blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
615f06e35c0bead8c5720a05b833738f2b174c43
|
30050476426b416b7bbcf1e2e6da2c39de5f5f71
|
/tests/test_segment.py
|
4602d150d3a10cd76ee8ffa7ebfc4803974eeddc
|
[
"ISC"
] |
permissive
|
librosa/librosa
|
b2cdbfde4692d980c1ea28b68ca829912610a9ac
|
09e4a622456bbad314e2e6d5c75879f56d728b9b
|
refs/heads/main
| 2023-08-27T12:53:38.102224
| 2023-08-15T17:52:04
| 2023-08-15T17:52:04
| 6,309,729
| 5,907
| 1,048
|
ISC
| 2023-09-14T19:13:28
| 2012-10-20T14:21:01
|
Python
|
UTF-8
|
Python
| false
| false
| 16,449
|
py
|
test_segment.py
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""Tests for segmentation functions"""
from typing import Union
import warnings
# Disable cache
import os
try:
os.environ.pop("LIBROSA_CACHE_DIR")
except:
pass
import numpy as np
import scipy
from scipy.spatial.distance import cdist, pdist, squareform
import pytest
from test_core import srand
import librosa
__EXAMPLE_FILE = os.path.join("tests", "data", "test1_22050.wav")
@pytest.mark.parametrize("n", [20, 250])
@pytest.mark.parametrize("k", [None, 5])
@pytest.mark.parametrize("metric", ["l2", "cosine"])
def test_cross_similarity(n, k, metric):
srand()
# Make a data matrix
data_ref = np.random.randn(3, n)
data = np.random.randn(3, n + 7)
D = librosa.segment.cross_similarity(data, data_ref, k=k, metric=metric)
assert D.shape == (data_ref.shape[1], data.shape[1])
if k is not None:
real_k = min(k, n)
assert not np.any(D.sum(axis=0) != real_k)
def test_cross_similarity_sparse():
srand()
data_ref = np.random.randn(3, 50)
data = np.random.randn(3, 100)
D_sparse = librosa.segment.cross_similarity(data, data_ref, sparse=True)
D_dense = librosa.segment.cross_similarity(data, data_ref, sparse=False)
assert scipy.sparse.isspmatrix(D_sparse)
assert np.allclose(D_sparse.todense(), D_dense)
def test_cross_similarity_distance():
srand()
data_ref = np.random.randn(3, 50)
data = np.random.randn(3, 70)
distance = cdist(data.T, data_ref.T, metric="sqeuclidean").T
rec = librosa.segment.cross_similarity(
data, data_ref, mode="distance", metric="sqeuclidean", sparse=True
)
i, j, vals = scipy.sparse.find(rec)
assert np.allclose(vals, distance[i, j])
@pytest.mark.parametrize("metric", ["sqeuclidean", "cityblock"])
@pytest.mark.parametrize("bandwidth", [None, 1])
def test_cross_similarity_affinity(metric, bandwidth):
srand()
data_ref = np.random.randn(3, 70)
data = np.random.randn(3, 50)
distance = cdist(data_ref.T, data.T, metric=metric)
rec = librosa.segment.cross_similarity(
data, data_ref, mode="affinity", metric=metric, sparse=True, bandwidth=bandwidth
)
i, j, vals = scipy.sparse.find(rec)
logvals = np.log(vals)
ratio = -logvals / distance[i, j]
if bandwidth is None:
assert np.allclose(-logvals, distance[i, j] * np.nanmax(ratio))
else:
assert np.allclose(-logvals, distance[i, j] * bandwidth)
def test_cross_similarity_full():
data = np.eye(10)
data_ref = np.eye(10)
rec = librosa.segment.cross_similarity(
data, data_ref, mode="distance", full=True
)
assert np.all(rec >= 0)
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_cross_similarity_badmode():
srand()
data_ref = np.random.randn(3, 70)
data = np.random.randn(3, 50)
rec = librosa.segment.cross_similarity(
data, data_ref, mode="NOT A MODE", metric="sqeuclidean", sparse=True
)
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_cross_similarity_bad_bandwidth():
srand()
data_ref = np.random.randn(3, 50)
data = np.random.randn(3, 70)
rec = librosa.segment.cross_similarity(data, data_ref, bandwidth=-2, mode='affinity')
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_cross_similarity_fail_mismatch():
D1 = np.zeros((3, 3))
D2 = np.zeros((2, 3))
librosa.segment.cross_similarity(D1, D2)
def test_cross_similarity_multi():
srand()
X1 = np.random.randn(2, 10, 100)
X2 = np.random.randn(2, 10, 50)
R = librosa.segment.cross_similarity(X1, X2, mode='affinity')
# This should give the same output as if we stacked out the leading channel
X1f = np.concatenate([X1[0], X1[1]], axis=0)
X2f = np.concatenate([X2[0], X2[1]], axis=0)
Rf = librosa.segment.cross_similarity(X1f, X2f, mode='affinity')
assert np.allclose(R, Rf)
@pytest.mark.parametrize("n", [20, 250])
@pytest.mark.parametrize("k", [None, 5])
@pytest.mark.parametrize("sym", [False, True])
@pytest.mark.parametrize("width", [1, 5])
@pytest.mark.parametrize("metric", ["l2", "cosine"])
@pytest.mark.parametrize("self", [False, True])
def test_recurrence_matrix(n, k, width, sym, metric, self):
srand()
# Make a data matrix
data = np.random.randn(3, n)
D = librosa.segment.recurrence_matrix(
data, k=k, width=width, sym=sym, axis=-1, metric=metric, self=self
)
# First test for symmetry
if sym:
assert np.allclose(D, D.T)
# Test for target-axis invariance
D_trans = librosa.segment.recurrence_matrix(
data.T, k=k, width=width, sym=sym, axis=0, metric=metric, self=self
)
assert np.allclose(D, D_trans)
# If not symmetric, test for correct number of links
if not sym and k is not None:
real_k = min(k, n - width)
if self:
real_k += 1
assert not np.any(D.sum(axis=0) != real_k)
if self:
assert np.allclose(np.diag(D), True)
# Make sure the +- width diagonal is hollow
# It's easier to test if zeroing out the triangles leaves nothing
idx = np.tril_indices(n, k=width)
D[idx] = False
D.T[idx] = False
assert not np.any(D)
@pytest.mark.xfail(raises=librosa.ParameterError)
@pytest.mark.parametrize("data", [np.ones((3, 10))])
@pytest.mark.parametrize("width", [-1, 0, 11])
def test_recurrence_badwidth(data, width):
librosa.segment.recurrence_matrix(data, width=width)
@pytest.mark.parametrize("self", [False, True])
def test_recurrence_sparse(self):
srand()
data = np.random.randn(3, 100)
D_sparse = librosa.segment.recurrence_matrix(data, sparse=True, self=self)
D_dense = librosa.segment.recurrence_matrix(data, sparse=False, self=self)
assert scipy.sparse.isspmatrix(D_sparse)
assert np.allclose(D_sparse.todense(), D_dense)
if self:
assert np.allclose(D_sparse.diagonal(), True)
else:
assert np.allclose(D_sparse.diagonal(), False)
@pytest.mark.parametrize("self", [False, True])
def test_recurrence_distance(self):
srand()
data = np.random.randn(3, 100)
distance = squareform(pdist(data.T, metric="sqeuclidean"))
rec = librosa.segment.recurrence_matrix(
data, mode="distance", metric="sqeuclidean", sparse=True, self=self
)
i, j, vals = scipy.sparse.find(rec)
assert np.allclose(vals, distance[i, j])
assert np.allclose(rec.diagonal(), 0.0)
@pytest.mark.parametrize("metric", ["sqeuclidean", "cityblock"])
@pytest.mark.parametrize("bandwidth", [None, 1])
@pytest.mark.parametrize("self", [False, True])
def test_recurrence_affinity(metric, bandwidth, self):
srand()
data = np.random.randn(3, 100)
distance = squareform(pdist(data.T, metric=metric))
rec = librosa.segment.recurrence_matrix(
data,
mode="affinity",
metric=metric,
sparse=True,
bandwidth=bandwidth,
self=self,
)
if self:
assert np.allclose(rec.diagonal(), 1.0)
else:
assert np.allclose(rec.diagonal(), 0.0)
i, j, vals = scipy.sparse.find(rec)
logvals = np.log(vals)
# After log-scaling, affinity will match distance up to a constant factor
ratio = -logvals / distance[i, j]
if bandwidth is None:
# Estimate the global bandwidth using non-zero distances
assert np.allclose(-logvals, distance[i, j] * np.nanmax(ratio))
else:
assert np.allclose(-logvals, distance[i, j] * bandwidth)
def test_recurrence_full():
data = np.eye(10)
rec = librosa.segment.recurrence_matrix(
data, mode="distance", metric="euclidean", sparse= False, full=True
)
assert np.all(rec >= 0)
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_big_width():
srand()
data = np.random.randn(3, 100)
width = 55
auto_k_rec = librosa.segment.recurrence_matrix(data, mode="affinity", width=width)
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_empty_data_recurrence():
data = np.zeros((10, 10))
librosa.segment.recurrence_matrix(data, mode="affinity")
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_empty_rows_recurrence():
data = np.zeros((10, 10))
data[0, 5] = 1
librosa.segment.recurrence_matrix(data, mode="affinity", bandwidth="mean_k")
def test_empty_rows_recurrence_okay():
data = np.zeros((10, 10))
data[0, 5] = 1
librosa.segment.recurrence_matrix(data, mode="affinity", bandwidth="med_k_scalar")
def test_recurrence_multi():
srand()
X = np.random.randn(2, 10, 100)
R = librosa.segment.recurrence_matrix(X, mode='affinity')
# This should give the same output as if we stacked out the leading channel
Xf = np.concatenate([X[0], X[1]], axis=0)
Rf = librosa.segment.recurrence_matrix(Xf, mode='affinity')
assert np.allclose(R, Rf)
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_recurrence_badmode():
srand()
data = np.random.randn(3, 100)
rec = librosa.segment.recurrence_matrix(
data, mode="NOT A MODE", metric="sqeuclidean", sparse=True
)
@pytest.mark.xfail(raises=librosa.ParameterError)
@pytest.mark.parametrize("bandwidth",
[-2, 'FAKE', np.random.randn(2, 5), -1 * np.random.randn(100, 100)])
def test_recurrence_bad_bandwidth(bandwidth):
srand()
data = np.random.randn(3, 100)
rec = librosa.segment.recurrence_matrix(data, bandwidth=bandwidth, mode='affinity')
def test_recurrence_array_bandwidth():
srand()
data = np.random.randn(3, 100)
bw = np.random.random((100, 100)) + 0.1
rec = librosa.segment.recurrence_matrix(data, bandwidth=bw, mode='affinity')
@pytest.mark.parametrize("bw_mode", ['mean_k', 'gmean_k', 'mean_k_avg', 'gmean_k_avg', 'mean_k_avg_and_pair'])
def test_automatic_bandwidth(bw_mode):
srand()
data = np.random.randn(3, 100)
rec = librosa.segment.recurrence_matrix(data, bandwidth=bw_mode, mode='affinity')
@pytest.mark.parametrize("n", [10, 100, 500])
@pytest.mark.parametrize("pad", [False, True])
def test_recurrence_to_lag(n, pad):
srand()
data = np.random.randn(17, n)
rec = librosa.segment.recurrence_matrix(data)
lag = librosa.segment.recurrence_to_lag(rec, pad=pad, axis=-1)
lag2 = librosa.segment.recurrence_to_lag(rec.T, pad=pad, axis=0)
assert np.allclose(lag, lag2.T)
x: Union[ellipsis, slice] = Ellipsis
if pad:
x = slice(n)
for i in range(n):
assert np.allclose(rec[:, i], np.roll(lag[:, i], i)[x])
@pytest.mark.xfail(raises=librosa.ParameterError)
@pytest.mark.parametrize("size", [(17,), (17, 34), (17, 17, 17)])
def test_recurrence_to_lag_fail(size):
librosa.segment.recurrence_to_lag(np.zeros(size))
@pytest.mark.parametrize("pad", [False, True])
@pytest.mark.parametrize("axis", [0, 1, -1])
@pytest.mark.parametrize(
"rec", [librosa.segment.recurrence_matrix(np.random.randn(3, 100), sparse=True)]
)
@pytest.mark.parametrize("fmt", ["csc", "csr", "lil", "bsr", "dia"])
def test_recurrence_to_lag_sparse(pad, axis, rec, fmt):
rec_dense = rec.toarray()
rec = rec.asformat(fmt)
lag_sparse = librosa.segment.recurrence_to_lag(rec, pad=pad, axis=axis)
lag_dense = librosa.segment.recurrence_to_lag(rec_dense, pad=pad, axis=axis)
assert scipy.sparse.issparse(lag_sparse)
assert rec.format == lag_sparse.format
assert rec.dtype == lag_sparse.dtype
assert np.allclose(lag_sparse.toarray(), lag_dense)
@pytest.mark.parametrize("n", [10, 100])
@pytest.mark.parametrize("pad", [False, True])
def test_lag_to_recurrence(n, pad):
srand()
data = np.random.randn(17, n)
rec = librosa.segment.recurrence_matrix(data)
lag = librosa.segment.recurrence_to_lag(rec, pad=pad, axis=-1)
lag2 = librosa.segment.recurrence_to_lag(rec.T, pad=pad, axis=0).T
rec2 = librosa.segment.lag_to_recurrence(lag)
assert np.allclose(rec, rec2)
assert np.allclose(lag, lag2)
@pytest.mark.xfail(raises=librosa.ParameterError)
@pytest.mark.parametrize("size", [(17,), (17, 35), (17, 17, 17)])
def test_lag_to_recurrence_badsize(size):
librosa.segment.lag_to_recurrence(np.zeros(size))
@pytest.mark.parametrize("axis", [0, 1, -1])
@pytest.mark.parametrize("pad", [False, True])
def test_lag_to_recurrence_sparse(axis, pad):
srand()
data = np.random.randn(3, 10)
rec = librosa.segment.recurrence_matrix(data, sparse=True)
lag = librosa.segment.recurrence_to_lag(rec, pad=pad, axis=axis)
lag_dense = lag.toarray()
rec_sparse = librosa.segment.lag_to_recurrence(lag, axis=axis)
rec_dense = librosa.segment.lag_to_recurrence(lag_dense, axis=axis)
assert scipy.sparse.issparse(rec_sparse)
assert rec_sparse.format == lag.format
assert rec_sparse.dtype == lag.dtype
assert np.allclose(rec_sparse.toarray(), rec_dense)
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_lag_to_recurrence_sparse_badaxis():
srand()
data = np.random.randn(3, 100)
R = librosa.segment.recurrence_matrix(data, sparse=True)
L = librosa.segment.recurrence_to_lag(R)
librosa.segment.lag_to_recurrence(L, axis=2)
def test_timelag_filter():
srand()
X = np.random.randn(15, 15)
d_pos0 = librosa.segment.timelag_filter(lambda X: X)
assert np.allclose(X, d_pos0(X))
def test_timelag_filter_pos1():
srand()
X = np.random.randn(15, 15)
d_pos1 = librosa.segment.timelag_filter(lambda _, X: X, index=1)
assert np.allclose(X, d_pos1(None, X))
@pytest.fixture(scope="module")
def ysr():
return librosa.load(__EXAMPLE_FILE)
@pytest.fixture(scope="module")
def mfcc(ysr):
y, sr = ysr
return librosa.feature.mfcc(y=y, sr=sr)
@pytest.fixture(scope="module")
def beats(ysr):
y, sr = ysr
tempo, beats = librosa.beat.beat_track(y=y, sr=sr)
return beats
@pytest.mark.parametrize("n_segments", [1, 2, 3, 4, 100])
def test_subsegment(mfcc, beats, n_segments):
subseg = librosa.segment.subsegment(mfcc, beats, n_segments=n_segments, axis=-1)
# Make sure that the boundaries are within range
assert subseg.min() >= 0
assert subseg.max() <= mfcc.shape[-1]
# Make sure that all input beats are retained
for b in beats:
assert b in subseg
# Do we have a 0 marker?
assert 0 in subseg
# Did we over-segment? +2 here for 0- and end-padding
assert len(subseg) <= n_segments * (len(beats) + 2)
# Verify that running on the transpose gives the same answer
ss2 = librosa.segment.subsegment(mfcc.T, beats, n_segments=n_segments, axis=0)
assert np.allclose(subseg, ss2)
@pytest.mark.xfail(raises=librosa.ParameterError)
@pytest.mark.parametrize("n_segments", [-1, 0])
def test_subsegment_badn(mfcc, beats, n_segments):
librosa.segment.subsegment(mfcc, beats, n_segments=n_segments, axis=-1)
@pytest.fixture
def R_input():
X = np.random.randn(30, 5)
return X.dot(X.T)
@pytest.mark.parametrize("window", ["rect", "hann"])
@pytest.mark.parametrize("n", [5, 9])
@pytest.mark.parametrize("max_ratio", [1.0, 1.5, 2.0])
@pytest.mark.parametrize("min_ratio", [None, 1.0])
@pytest.mark.parametrize("n_filters", [1, 2, 5])
@pytest.mark.parametrize("zero_mean", [False, True])
@pytest.mark.parametrize("clip", [False, True])
@pytest.mark.parametrize("kwargs", [dict(), dict(mode="reflect")])
def test_path_enhance(
R_input, window, n, max_ratio, min_ratio, n_filters, zero_mean, clip, kwargs
):
R_smooth = librosa.segment.path_enhance(
R_input,
window=window,
n=n,
max_ratio=max_ratio,
min_ratio=min_ratio,
n_filters=n_filters,
zero_mean=zero_mean,
clip=clip,
**kwargs,
)
assert R_smooth.shape == R_input.shape
assert np.all(np.isfinite(R_smooth))
assert R_smooth.dtype == R_input.dtype
if clip:
assert np.min(R_smooth) >= 0
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_path_enhance_badratio(R_input):
# We can't have min_ratio > max_ratio
librosa.segment.path_enhance(R_input, n=5, min_ratio=3, max_ratio=2)
def test_path_enhance_multi():
srand()
R = np.random.randn(2, 100, 100)
Rs0 = librosa.segment.path_enhance(R[0], n=5)
Rs1 = librosa.segment.path_enhance(R[1], n=5)
Rs = librosa.segment.path_enhance(R, n=5)
assert np.allclose(Rs0, Rs[0])
assert np.allclose(Rs1, Rs[1])
assert not np.allclose(Rs0, Rs1)
|
7eb5881e09255e448e12c9cc1d7910c84273762b
|
6c6a9aea1443e99dd85caa67d8b53f8ce5436461
|
/tweepy/error.py
|
753e2fe676cf2f581a9fe032a71d9c4d638cc471
|
[
"MIT"
] |
permissive
|
anka-213/fuckeveryword
|
72bcaf2404b65c6d49014de9700ac2b92c98627e
|
1fbfd329b950c1a8f71b812273d1d08281063848
|
refs/heads/master
| 2023-07-17T00:23:59.982075
| 2018-11-27T13:45:41
| 2018-11-27T13:45:41
| 115,251,644
| 152
| 48
|
MIT
| 2021-09-07T01:45:26
| 2017-12-24T09:02:58
|
Python
|
UTF-8
|
Python
| false
| false
| 344
|
py
|
error.py
|
# Tweepy
# Copyright 2009-2010 Joshua Roesslein
# See LICENSE for details.
class TweepError(Exception):
"""Tweepy exception"""
def __init__(self, reason, response=None):
self.reason = unicode(reason)
self.response = response
Exception.__init__(self, reason)
def __str__(self):
return self.reason
|
e22f79030f82173d1f7f47c57d7a6846e098f553
|
d7fd46dfd8aab520c4958fa065367e168b6bfee7
|
/examples/llvm_rl/model/benchmarks.py
|
48f500c8e14fbb688c82b6fd28d3721515434e8b
|
[
"MIT"
] |
permissive
|
facebookresearch/CompilerGym
|
f04a79fbfdbaf8afd6920ec205db6f1b6003d073
|
9e0c0beb12da1e1ea82ae6ce920713ee28dda4c9
|
refs/heads/development
| 2023-08-31T09:17:48.967970
| 2023-03-10T19:29:56
| 2023-03-10T19:29:56
| 312,059,069
| 787
| 126
|
MIT
| 2023-03-10T19:29:58
| 2020-11-11T18:44:35
|
Python
|
UTF-8
|
Python
| false
| false
| 4,197
|
py
|
benchmarks.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from itertools import islice
from typing import Iterable, List, Union
from pydantic import BaseModel, Field, root_validator, validator
from compiler_gym.datasets import Benchmark, BenchmarkUri
from compiler_gym.envs import CompilerEnv
class Benchmarks(BaseModel):
"""Represents a set of benchmarks to use for training/validation/testing.
There are two ways of describing benchmarks, either as a list of benchmark
URIs:
benchmarks:
uris:
- benchmark://cbench-v1/adpcm
- benchmark://cbench-v1/ghostscript
Or as a dataset to iterate over:
benchmarks:
dataset: benchmark://cbench-v1
max_benchmarks: 20
"""
# === Start of fields list. ===
dataset: str = Field(default=None, allow_mutation=False)
"""The name of a dataset to iterate over. If set, benchmarks are produced
by iterating over this dataset in order. If not set, the :code:`uris` list
must be provided.
"""
uris: List[str] = Field(default=[], allow_mutation=False)
"""A list of URIs to iterate over."""
max_benchmarks: int = Field(default=0, ge=0, allow_mutation=False)
"""The maximum number of benchmarks to yield from the given dataset or URIs
list.
"""
benchmarks_start_at: int = Field(default=0, ge=0, allow_mutation=False)
"""An offset into the dataset or URIs list to start iterating from.
Note that using very large offsets will slow things down as the
implementation still has to iterate over the excluded benchmarks.
"""
# === Start of public API. ===
def benchmarks_iterator(self, env: CompilerEnv) -> Iterable[Benchmark]:
"""Return an iterator over the benchmarks."""
return self._benchmark_iterator(env)
def benchmark_uris_iterator(self, env: CompilerEnv) -> Iterable[str]:
"""Return an iterator over the URIs of the benchmarks."""
return self._benchmark_iterator(env, uris=True)
# === Start of implementation details. ===
@root_validator
def check_that_either_dataset_or_uris_is_set(cls, values):
assert values.get("dataset") or values.get(
"uris"
), "Neither dataset or uris given"
return values
@validator("uris", pre=True)
def validate_uris(cls, value, *, values, **kwargs):
del kwargs
for uri in value:
uri = BenchmarkUri.from_string(uri)
assert uri.scheme and uri.dataset, f"Invalid benchmark URI: {uri}"
return list(value)
def _benchmark_iterator(
self, env: CompilerEnv, uris: bool = False
) -> Union[Iterable[Benchmark], Iterable[str]]:
return (
self._uris_iterator(env, uris)
if self.uris
else self._dataset_iterator(env, uris)
)
def _uris_iterator(
self, env: CompilerEnv, uris: bool = False
) -> Union[Iterable[Benchmark], Iterable[str]]:
"""Iterate from a URIs list."""
start = self.benchmarks_start_at
n = len(self.uris)
if self.max_benchmarks:
n = min(self.max_benchmarks, n)
if uris:
# Shortcut in case we already have a list of URIs that we can slice
# rather than iterating over.
return iter(self.uris[start:n])
return islice((env.datasets.benchmark(u) for u in self.uris), start, start + n)
def _dataset_iterator(
self, env: CompilerEnv, uris: bool = False
) -> Union[Iterable[Benchmark], Iterable[str]]:
"""Iterate from a dataset name."""
dataset = env.datasets[self.dataset]
dataset.install()
n = dataset.size or self.max_benchmarks # dataset.size == 0 for inf
if self.max_benchmarks:
n = min(self.max_benchmarks, n)
start = self.benchmarks_start_at
iterator = dataset.benchmark_uris if uris else dataset.benchmarks
return islice(iterator(), start, start + n)
class Config:
validate_assignment = True
|
4fd364d4cbb1123d13d15a2cde5eb8a50bae93e8
|
c475cd8531a94ffae69cc92371d41531dbbddb6c
|
/Projects/bullet3-2.89/examples/pybullet/examples/internalEdge.py
|
fa8dbcb9bd5899e47254eaa6e4fd2506b630d8ad
|
[
"Apache-2.0",
"LicenseRef-scancode-free-unknown",
"Zlib"
] |
permissive
|
WolfireGames/overgrowth
|
72d3dd29cbd7254337265c29f8de3e5c32400114
|
594a2a4f9da0855304ee8cd5335d042f8e954ce1
|
refs/heads/main
| 2023-08-15T19:36:56.156578
| 2023-05-17T08:17:53
| 2023-05-17T08:20:36
| 467,448,492
| 2,264
| 245
|
Apache-2.0
| 2023-05-09T07:29:58
| 2022-03-08T09:38:54
|
C++
|
UTF-8
|
Python
| false
| false
| 1,375
|
py
|
internalEdge.py
|
import pybullet as p
import time
p.connect(p.GUI)
if (1):
box_collision_shape_id = p.createCollisionShape(shapeType=p.GEOM_BOX,
halfExtents=[0.01, 0.01, 0.055])
box_mass = 0.1
box_visual_shape_id = -1
box_position = [0, 0.1, 1]
box_orientation = [0, 0, 0, 1]
p.createMultiBody(box_mass,
box_collision_shape_id,
box_visual_shape_id,
box_position,
box_orientation,
useMaximalCoordinates=True)
terrain_mass = 0
terrain_visual_shape_id = -1
terrain_position = [0, 0, 0]
terrain_orientation = [0, 0, 0, 1]
terrain_collision_shape_id = p.createCollisionShape(shapeType=p.GEOM_MESH,
fileName="terrain.obj",
flags=p.GEOM_FORCE_CONCAVE_TRIMESH |
p.GEOM_CONCAVE_INTERNAL_EDGE,
meshScale=[0.5, 0.5, 0.5])
p.createMultiBody(terrain_mass, terrain_collision_shape_id, terrain_visual_shape_id,
terrain_position, terrain_orientation)
p.setGravity(0, 0, -10)
pts = p.getContactPoints()
print("num points=", len(pts))
print(pts)
while (p.isConnected()):
time.sleep(1. / 240.)
p.stepSimulation()
|
2bea0705f51b2f0bae7f560ca3eeee02c6a131e0
|
e441a2f416c83f04889ecd43d6b6bdcf5172b287
|
/web3/providers/base.py
|
3f8c175dbfba6c46a6887583abb86ae90cee3129
|
[
"MIT"
] |
permissive
|
ethereum/web3.py
|
f8d66eefaa84d30fa51a0978d1d1c44c6807b355
|
76da2146267fa03760f35c33ca8b9a96d9e24835
|
refs/heads/main
| 2023-08-31T18:34:30.144026
| 2023-08-29T15:43:25
| 2023-08-29T15:43:25
| 56,251,096
| 4,403
| 1,680
|
MIT
| 2023-09-14T20:46:08
| 2016-04-14T15:59:35
|
Python
|
UTF-8
|
Python
| false
| false
| 4,149
|
py
|
base.py
|
import itertools
from typing import (
TYPE_CHECKING,
Any,
Callable,
Sequence,
Tuple,
cast,
)
from eth_utils import (
to_bytes,
to_text,
)
from web3._utils.encoding import (
FriendlyJsonSerde,
)
from web3.exceptions import (
ProviderConnectionError,
)
from web3.middleware import (
combine_middlewares,
)
from web3.types import (
Middleware,
MiddlewareOnion,
RPCEndpoint,
RPCResponse,
)
if TYPE_CHECKING:
from web3 import Web3 # noqa: F401
class BaseProvider:
_middlewares: Tuple[Middleware, ...] = ()
# a tuple of (all_middlewares, request_func)
_request_func_cache: Tuple[Tuple[Middleware, ...], Callable[..., RPCResponse]] = (
None,
None,
)
is_async = False
has_persistent_connection = False
global_ccip_read_enabled: bool = True
ccip_read_max_redirects: int = 4
@property
def middlewares(self) -> Tuple[Middleware, ...]:
return self._middlewares
@middlewares.setter
def middlewares(self, values: MiddlewareOnion) -> None:
# tuple(values) converts to MiddlewareOnion -> Tuple[Middleware, ...]
self._middlewares = tuple(values) # type: ignore
def request_func(
self, w3: "Web3", outer_middlewares: MiddlewareOnion
) -> Callable[..., RPCResponse]:
"""
@param outer_middlewares is an iterable of middlewares,
ordered by first to execute
@returns a function that calls all the middleware and
eventually self.make_request()
"""
# type ignored b/c tuple(MiddlewareOnion) converts to tuple of middlewares
all_middlewares: Tuple[Middleware] = tuple(outer_middlewares) + tuple(self.middlewares) # type: ignore # noqa: E501
cache_key = self._request_func_cache[0]
if cache_key is None or cache_key != all_middlewares:
self._request_func_cache = (
all_middlewares,
self._generate_request_func(w3, all_middlewares),
)
return self._request_func_cache[-1]
def _generate_request_func(
self, w3: "Web3", middlewares: Sequence[Middleware]
) -> Callable[..., RPCResponse]:
return combine_middlewares(
middlewares=middlewares,
w3=w3,
provider_request_fn=self.make_request,
)
def make_request(self, method: RPCEndpoint, params: Any) -> RPCResponse:
raise NotImplementedError("Providers must implement this method")
def is_connected(self, show_traceback: bool = False) -> bool:
raise NotImplementedError("Providers must implement this method")
class JSONBaseProvider(BaseProvider):
def __init__(self) -> None:
self.request_counter = itertools.count()
def decode_rpc_response(self, raw_response: bytes) -> RPCResponse:
text_response = to_text(raw_response)
return cast(RPCResponse, FriendlyJsonSerde().json_decode(text_response))
def encode_rpc_request(self, method: RPCEndpoint, params: Any) -> bytes:
rpc_dict = {
"jsonrpc": "2.0",
"method": method,
"params": params or [],
"id": next(self.request_counter),
}
encoded = FriendlyJsonSerde().json_encode(rpc_dict)
return to_bytes(text=encoded)
def is_connected(self, show_traceback: bool = False) -> bool:
try:
response = self.make_request(RPCEndpoint("web3_clientVersion"), [])
except OSError as e:
if show_traceback:
raise ProviderConnectionError(
f"Problem connecting to provider with error: {type(e)}: {e}"
)
return False
if "error" in response:
if show_traceback:
raise ProviderConnectionError(
f"Error received from provider: {response}"
)
return False
if response["jsonrpc"] == "2.0":
return True
else:
if show_traceback:
raise ProviderConnectionError(f"Bad jsonrpc version: {response}")
return False
|
59ea819364168ec42fbb6280fce9eb5d6293ce1a
|
a1d5bf1b155cd2de95e57e9734c961465ce901b4
|
/sklift/viz/__init__.py
|
51fd7263a92575f3821b3327068e32ae2c3d04b1
|
[
"MIT"
] |
permissive
|
maks-sh/scikit-uplift
|
88792a757f34290407fe9a82b19dc6ae96ca7312
|
0038e659428f6e7a49b935b850651cd9a9db3f54
|
refs/heads/master
| 2023-04-06T22:53:28.358650
| 2022-08-11T20:33:03
| 2022-08-11T20:33:03
| 229,347,545
| 636
| 104
|
MIT
| 2023-03-30T02:39:09
| 2019-12-20T22:51:58
|
Python
|
UTF-8
|
Python
| false
| false
| 335
|
py
|
__init__.py
|
from .base import (
plot_uplift_curve, plot_qini_curve, plot_uplift_preds,
plot_uplift_by_percentile, plot_treatment_balance_curve,
UpliftCurveDisplay
)
__all__ = [
'plot_uplift_curve', 'plot_qini_curve', 'plot_uplift_preds',
'plot_uplift_by_percentile', 'plot_treatment_balance_curve',
'UpliftCurveDisplay'
]
|
6db00585f30048d4fd349a2c6901d40020aa3982
|
b1d185d32bb3b049c19390f478f7a4005fb1753b
|
/geomancer/spells/length_of.py
|
70df5d100d55fc7bbd68f449d1f61d40627d91c9
|
[
"MIT"
] |
permissive
|
thinkingmachines/geomancer
|
a48311bf8ac7c8c92db95117e9b0dcadef664e0b
|
d1748c4c4f8f77fc4456ce1deef23f8f017c9549
|
refs/heads/master
| 2021-06-19T04:42:40.102835
| 2021-02-23T05:44:29
| 2021-02-23T06:04:44
| 172,477,092
| 223
| 18
|
MIT
| 2021-02-23T06:04:45
| 2019-02-25T09:39:01
|
Python
|
UTF-8
|
Python
| false
| false
| 4,931
|
py
|
length_of.py
|
# -*- coding: utf-8 -*-
"""
Spell LengthOf obtains the length of all Lines-of-Interest with a certain
radius. Suppose you want to find the length of residential roads
given a set of points:
.. code-block:: python
from geomancer.spells import LengthOf
from tests.conftest import sample_points
# Load sample points
df = sample_points()
# Configure and cast the spell
spell = LengthOf("residential",
source_table="geospatial.ph_osm.gis_osm_roads_free_1",
feature_name="len_residential")
# Will create a new column, `len_residential` with the
# appropriate features
df_with_features = spell.cast(df, dburl="bigquery://geospatial")
.. warning::
This spell currently doesn't work in BigQuery. In addition, the runtime for
casting this spell is slow.
"""
# Import modules
from sqlalchemy import func
from sqlalchemy.sql import select
from .base import Spell
from ..backend.cores.bq import BigQueryCore
from loguru import logger
class LengthOf(Spell):
"""Obtain the length of all Lines-of-Interest within a certain radius"""
def __init__(self, on, within=10 * 1000, **kwargs):
"""Spell constructor
Parameters
----------
on : str
Feature class to compare upon
within : float, optional
Look for values within a particular range. Its value is in meters,
the default is :code:`10,000` meters.
source_table : str
Table URI to run queries against.
feature_name : str
Column name for the output feature.
column : str, optional
Column to look the geometries from. The default is :code:`WKT`
options : :class:`geomancer.backend.settings.Config`
Specify configuration for interacting with the database backend.
Default is a BigQuery Configuration
"""
super(LengthOf, self).__init__(**kwargs)
logger.warning(
"ST_Buffer is not yet implemented so BigQueryCore won't work: groups.google.com/d/msg/bq-gis-feedback/Yq4Ku6u2A80/ceVXU01RCgAJ"
)
self.source_column, self.source_filter = self.extract_columns(on)
self.within = within
def query(self, source, target, core, column, pkey):
# ST_Buffer is not yet implemented so BigQueryCore won't work
# (groups.google.com/d/msg/bq-gis-feedback/Yq4Ku6u2A80/ceVXU01RCgAJ)
if isinstance(core, BigQueryCore):
raise ValueError(
"The LengthOf feature is currently incompatible with \
BigQueryCore because ST_Buffer is not yet implemented"
)
# Get all lines-of-interests (LOIs) of fclass `on`
lois = select(
[source.c[self.source_id], source.c.WKT],
source.c[self.source_column] == self.source_filter,
).cte("lois")
# Create a buffer `within` a distance/radius around each centroid.
# The point has to be converted to EPSG:3857 so that meters can be
# used instead of decimal degrees for EPSG:4326.
buff = select(
[
target,
func.ST_Buffer(
core.ST_GeoFromText(target.c[column]), self.within
).label("__buffer__"),
]
).cte("buff")
# Clip the LOIs with the buffers then calculate the length of all
# LOIs inside each buffer.
clip = select(
[
buff,
func.ST_Intersection(
core.ST_GeoFromText(lois.c.WKT),
func.ST_Transform(buff.c["__buffer__"], 4326),
).label("__geom__"),
func.ST_Length(
func.ST_Intersection(
func.ST_Transform(
core.ST_GeoFromText(lois.c.WKT), 3857
),
buff.c["__buffer__"],
)
).label("__len__"),
],
func.ST_Intersects(
core.ST_GeoFromText(lois.c.WKT),
func.ST_Transform(buff.c["__buffer__"], 4326),
),
).cte("clip")
# Sum the length of all LOIs inside each buffer
sum_length = (
select(
[
clip.c[pkey],
func.sum(clip.c["__len__"]).label(self.feature_name),
]
)
.select_from(clip)
.group_by(clip.c[pkey])
.cte("sum_length")
)
# Join the sum of the length of all LOIs inside each buffer
query = select(
[
col
for col in sum_length.columns
if col.key not in ("__len__", "__geom__", "__buffer__")
],
sum_length.c[pkey] == buff.c[pkey],
)
return query
|
86cd32c541081b4a1dc2baafe44d64f313856bd7
|
5db0fab37c2b8a618d85d3b60fab9f806c416474
|
/src/python/pants/backend/python/goals/repl.py
|
0d3c6c4d50d26c48d871ee397070e2678f95a3a4
|
[
"Apache-2.0"
] |
permissive
|
pantsbuild/pants
|
4988d1ac5474ec95f94ce2218aeb759401e4b011
|
98cbda8545f0d58c586ed2daa76fefd729d5e0d5
|
refs/heads/main
| 2023-09-05T03:44:17.646899
| 2023-09-01T19:52:09
| 2023-09-01T19:52:09
| 7,209,075
| 2,708
| 593
|
Apache-2.0
| 2023-09-14T19:33:33
| 2012-12-17T17:39:04
|
Python
|
UTF-8
|
Python
| false
| false
| 7,504
|
py
|
repl.py
|
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import os
from typing import Iterable
from pants.backend.python.subsystems import ipython
from pants.backend.python.subsystems.ipython import IPython
from pants.backend.python.subsystems.setup import PythonSetup
from pants.backend.python.target_types import PythonResolveField
from pants.backend.python.util_rules.interpreter_constraints import InterpreterConstraints
from pants.backend.python.util_rules.local_dists import LocalDistsPex, LocalDistsPexRequest
from pants.backend.python.util_rules.pex import Pex, PexRequest
from pants.backend.python.util_rules.pex_environment import PexEnvironment
from pants.backend.python.util_rules.pex_from_targets import (
InterpreterConstraintsRequest,
RequirementsPexRequest,
)
from pants.backend.python.util_rules.python_sources import (
PythonSourceFiles,
PythonSourceFilesRequest,
)
from pants.core.goals.generate_lockfiles import NoCompatibleResolveException
from pants.core.goals.repl import ReplImplementation, ReplRequest
from pants.engine.fs import Digest, MergeDigests
from pants.engine.rules import Get, MultiGet, collect_rules, rule
from pants.engine.target import Target, TransitiveTargets, TransitiveTargetsRequest
from pants.engine.unions import UnionRule
from pants.util.docutil import bin_name
from pants.util.logging import LogLevel
from pants.util.strutil import softwrap
def validate_compatible_resolve(root_targets: Iterable[Target], python_setup: PythonSetup) -> None:
"""Eagerly validate that all roots are compatible.
We already end up checking this in pex_from_targets.py, but this is a more eager check so that
we have a better error message.
"""
root_resolves = {
root[PythonResolveField].normalized_value(python_setup)
for root in root_targets
if root.has_field(PythonResolveField)
}
def maybe_get_resolve(t: Target) -> str | None:
if not t.has_field(PythonResolveField):
return None
return t[PythonResolveField].normalized_value(python_setup)
if len(root_resolves) > 1:
raise NoCompatibleResolveException.bad_input_roots(
root_targets,
maybe_get_resolve=maybe_get_resolve,
doc_url_slug="python-third-party-dependencies#multiple-lockfiles",
workaround=softwrap(
f"""
To work around this, choose which resolve you want to use from above. Then, run
`{bin_name()} peek :: | jq -r \'.[] | select(.resolve == "example") |
.["address"]\' | xargs {bin_name()} repl`, where you replace "example" with the
resolve name, and possibly replace the specs `::` with what you were using
before. If the resolve is the `[python].default_resolve`, use
`select(.resolve == "example" or .resolve == null)`. These queries will result in
opening a REPL with only targets using the desired resolve.
"""
),
)
class PythonRepl(ReplImplementation):
name = "python"
@rule(level=LogLevel.DEBUG)
async def create_python_repl_request(
request: PythonRepl, pex_env: PexEnvironment, python_setup: PythonSetup
) -> ReplRequest:
validate_compatible_resolve(request.targets, python_setup)
interpreter_constraints, transitive_targets = await MultiGet(
Get(InterpreterConstraints, InterpreterConstraintsRequest(request.addresses)),
Get(TransitiveTargets, TransitiveTargetsRequest(request.addresses)),
)
requirements_request = Get(Pex, RequirementsPexRequest(request.addresses))
local_dists_request = Get(
LocalDistsPex,
LocalDistsPexRequest(
request.addresses,
internal_only=True,
interpreter_constraints=interpreter_constraints,
),
)
sources_request = Get(
PythonSourceFiles, PythonSourceFilesRequest(transitive_targets.closure, include_files=True)
)
requirements_pex, local_dists, sources = await MultiGet(
requirements_request, local_dists_request, sources_request
)
merged_digest = await Get(
Digest,
MergeDigests(
(requirements_pex.digest, local_dists.pex.digest, sources.source_files.snapshot.digest)
),
)
complete_pex_env = pex_env.in_workspace()
args = complete_pex_env.create_argv(request.in_chroot(requirements_pex.name))
chrooted_source_roots = [request.in_chroot(sr) for sr in sources.source_roots]
extra_env = {
**complete_pex_env.environment_dict(python=requirements_pex.python),
"PEX_EXTRA_SYS_PATH": ":".join(chrooted_source_roots),
"PEX_PATH": request.in_chroot(local_dists.pex.name),
"PEX_INTERPRETER_HISTORY": "1" if python_setup.repl_history else "0",
}
return ReplRequest(digest=merged_digest, args=args, extra_env=extra_env)
class IPythonRepl(ReplImplementation):
name = "ipython"
@rule(level=LogLevel.DEBUG)
async def create_ipython_repl_request(
request: IPythonRepl, ipython: IPython, pex_env: PexEnvironment, python_setup: PythonSetup
) -> ReplRequest:
validate_compatible_resolve(request.targets, python_setup)
interpreter_constraints, transitive_targets = await MultiGet(
Get(InterpreterConstraints, InterpreterConstraintsRequest(request.addresses)),
Get(TransitiveTargets, TransitiveTargetsRequest(request.addresses)),
)
requirements_request = Get(Pex, RequirementsPexRequest(request.addresses))
sources_request = Get(
PythonSourceFiles, PythonSourceFilesRequest(transitive_targets.closure, include_files=True)
)
ipython_request = Get(
Pex, PexRequest, ipython.to_pex_request(interpreter_constraints=interpreter_constraints)
)
requirements_pex, sources, ipython_pex = await MultiGet(
requirements_request, sources_request, ipython_request
)
local_dists = await Get(
LocalDistsPex,
LocalDistsPexRequest(
request.addresses,
internal_only=True,
interpreter_constraints=interpreter_constraints,
sources=sources,
),
)
merged_digest = await Get(
Digest,
MergeDigests(
(
requirements_pex.digest,
local_dists.pex.digest,
local_dists.remaining_sources.source_files.snapshot.digest,
ipython_pex.digest,
)
),
)
complete_pex_env = pex_env.in_workspace()
args = list(complete_pex_env.create_argv(request.in_chroot(ipython_pex.name)))
if ipython.ignore_cwd:
args.append("--ignore-cwd")
chrooted_source_roots = [request.in_chroot(sr) for sr in sources.source_roots]
extra_env = {
**complete_pex_env.environment_dict(python=ipython_pex.python),
"PEX_PATH": os.pathsep.join(
[
request.in_chroot(requirements_pex.name),
request.in_chroot(local_dists.pex.name),
]
),
"PEX_EXTRA_SYS_PATH": os.pathsep.join(chrooted_source_roots),
}
return ReplRequest(digest=merged_digest, args=args, extra_env=extra_env)
def rules():
return [
*collect_rules(),
*ipython.rules(),
UnionRule(ReplImplementation, PythonRepl),
UnionRule(ReplImplementation, IPythonRepl),
]
|
910474681b206842682160f336929a9e866ac9e5
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/homeassistant/components/skybell/coordinator.py
|
55e34df5c63ddc38f41b1dc8eeb042c363541785
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 1,114
|
py
|
coordinator.py
|
"""Data update coordinator for the Skybell integration."""
from datetime import timedelta
from aioskybell import SkybellDevice, SkybellException
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from .const import LOGGER
class SkybellDataUpdateCoordinator(DataUpdateCoordinator[None]):
"""Data update coordinator for the Skybell integration."""
config_entry: ConfigEntry
def __init__(self, hass: HomeAssistant, device: SkybellDevice) -> None:
"""Initialize the coordinator."""
super().__init__(
hass=hass,
logger=LOGGER,
name=device.name,
update_interval=timedelta(seconds=30),
)
self.device = device
async def _async_update_data(self) -> None:
"""Fetch data from API endpoint."""
try:
await self.device.async_update()
except SkybellException as err:
raise UpdateFailed(f"Failed to communicate with device: {err}") from err
|
6b311fdcbe33f115f2ed8098afcb6f56e0ef9cd8
|
09a6d8dbad5b92f93791948b5bf9b75f5cb2e5ce
|
/tests/measurements/test_sample.py
|
18b9d3f5ad3e86296581d730572a18f6a0d0d844
|
[
"Apache-2.0"
] |
permissive
|
PennyLaneAI/pennylane
|
458efd5d9457e90ada31ca2ef0fb6bb96a24e9a7
|
0843183ff15a013c2622af5e61fea431d18076d3
|
refs/heads/master
| 2023-09-03T17:00:43.105784
| 2023-09-01T16:15:07
| 2023-09-01T16:15:07
| 129,936,360
| 1,431
| 410
|
Apache-2.0
| 2023-09-14T21:30:56
| 2018-04-17T16:45:42
|
Python
|
UTF-8
|
Python
| false
| false
| 16,023
|
py
|
test_sample.py
|
# Copyright 2018-2020 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the sample module"""
import numpy as np
import pytest
import pennylane as qml
from pennylane.measurements import MeasurementShapeError, Sample, Shots
from pennylane.operation import EigvalsUndefinedError, Operator
# pylint: disable=protected-access, no-member
# TODO: Remove this when new CustomMP are the default
def custom_measurement_process(device, spy):
assert len(spy.call_args_list) > 0 # make sure method is mocked properly
samples = device._samples
call_args_list = list(spy.call_args_list)
for call_args in call_args_list:
meas = call_args.args[1]
shot_range, bin_size = (call_args.kwargs["shot_range"], call_args.kwargs["bin_size"])
if isinstance(meas, Operator):
meas = qml.sample(op=meas)
assert qml.math.allequal(
device.sample(call_args.args[1], **call_args.kwargs),
meas.process_samples(
samples=samples,
wire_order=device.wires,
shot_range=shot_range,
bin_size=bin_size,
),
)
class TestSample:
"""Tests for the sample function"""
@pytest.mark.parametrize("n_sample", (1, 10))
def test_sample_dimension(self, mocker, n_sample):
"""Test that the sample function outputs samples of the right size"""
dev = qml.device("default.qubit", wires=2, shots=n_sample)
spy = mocker.spy(qml.QubitDevice, "sample")
@qml.qnode(dev)
def circuit():
qml.RX(0.54, wires=0)
return qml.sample(qml.PauliZ(0)), qml.sample(qml.PauliX(1))
output = circuit()
assert len(output) == 2
assert circuit._qfunc_output[0].shape(dev, Shots(n_sample)) == (
(n_sample,) if not n_sample == 1 else ()
)
assert circuit._qfunc_output[1].shape(dev, Shots(n_sample)) == (
(n_sample,) if not n_sample == 1 else ()
)
custom_measurement_process(dev, spy)
@pytest.mark.filterwarnings("ignore:Creating an ndarray from ragged nested sequences")
def test_sample_combination(self, mocker):
"""Test the output of combining expval, var and sample"""
n_sample = 10
dev = qml.device("default.qubit", wires=3, shots=n_sample)
spy = mocker.spy(qml.QubitDevice, "sample")
@qml.qnode(dev, diff_method="parameter-shift")
def circuit():
qml.RX(0.54, wires=0)
return qml.sample(qml.PauliZ(0)), qml.expval(qml.PauliX(1)), qml.var(qml.PauliY(2))
result = circuit()
assert len(result) == 3
assert np.array_equal(result[0].shape, (n_sample,))
assert circuit._qfunc_output[0].shape(dev, Shots(n_sample)) == (n_sample,)
assert isinstance(result[1], np.ndarray)
assert isinstance(result[2], np.ndarray)
custom_measurement_process(dev, spy)
def test_single_wire_sample(self, mocker):
"""Test the return type and shape of sampling a single wire"""
n_sample = 10
dev = qml.device("default.qubit", wires=1, shots=n_sample)
spy = mocker.spy(qml.QubitDevice, "sample")
@qml.qnode(dev)
def circuit():
qml.RX(0.54, wires=0)
return qml.sample(qml.PauliZ(0))
result = circuit()
assert isinstance(result, np.ndarray)
assert np.array_equal(result.shape, (n_sample,))
assert circuit._qfunc_output.shape(dev, Shots(n_sample)) == (n_sample,)
custom_measurement_process(dev, spy)
def test_multi_wire_sample_regular_shape(self, mocker):
"""Test the return type and shape of sampling multiple wires
where a rectangular array is expected"""
n_sample = 10
dev = qml.device("default.qubit", wires=3, shots=n_sample)
spy = mocker.spy(qml.QubitDevice, "sample")
@qml.qnode(dev)
def circuit():
return qml.sample(qml.PauliZ(0)), qml.sample(qml.PauliZ(1)), qml.sample(qml.PauliZ(2))
result = circuit()
assert circuit._qfunc_output[0].shape(dev, Shots(n_sample)) == (n_sample,)
assert circuit._qfunc_output[1].shape(dev, Shots(n_sample)) == (n_sample,)
assert circuit._qfunc_output[2].shape(dev, Shots(n_sample)) == (n_sample,)
# If all the dimensions are equal the result will end up to be a proper rectangular array
assert isinstance(result, tuple)
assert len(result) == 3
assert result[0].dtype == np.dtype("int")
custom_measurement_process(dev, spy)
@pytest.mark.filterwarnings("ignore:Creating an ndarray from ragged nested sequences")
def test_sample_output_type_in_combination(self, mocker):
"""Test the return type and shape of sampling multiple works
in combination with expvals and vars"""
n_sample = 10
dev = qml.device("default.qubit", wires=3, shots=n_sample)
spy = mocker.spy(qml.QubitDevice, "sample")
@qml.qnode(dev, diff_method="parameter-shift")
def circuit():
return qml.expval(qml.PauliZ(0)), qml.var(qml.PauliZ(1)), qml.sample(qml.PauliZ(2))
result = circuit()
# If all the dimensions are equal the result will end up to be a proper rectangular array
assert len(result) == 3
assert isinstance(result[0], np.ndarray)
assert isinstance(result[1], np.ndarray)
assert result[2].dtype == np.dtype("int")
assert np.array_equal(result[2].shape, (n_sample,))
custom_measurement_process(dev, spy)
def test_not_an_observable(self, mocker):
"""Test that a UserWarning is raised if the provided
argument might not be hermitian."""
dev = qml.device("default.qubit", wires=2, shots=10)
spy = mocker.spy(qml.QubitDevice, "sample")
@qml.qnode(dev)
def circuit():
qml.RX(0.52, wires=0)
return qml.sample(qml.prod(qml.PauliX(0), qml.PauliZ(0)))
with pytest.warns(UserWarning, match="Prod might not be hermitian."):
_ = circuit()
custom_measurement_process(dev, spy)
def test_observable_return_type_is_sample(self, mocker):
"""Test that the return type of the observable is :attr:`ObservableReturnTypes.Sample`"""
n_shots = 10
dev = qml.device("default.qubit", wires=1, shots=n_shots)
spy = mocker.spy(qml.QubitDevice, "sample")
@qml.qnode(dev)
def circuit():
res = qml.sample(qml.PauliZ(0))
assert res.return_type is Sample
return res
circuit()
custom_measurement_process(dev, spy)
def test_providing_observable_and_wires(self):
"""Test that a ValueError is raised if both an observable is provided and wires are specified"""
dev = qml.device("default.qubit", wires=2)
@qml.qnode(dev)
def circuit():
qml.Hadamard(wires=0)
return qml.sample(qml.PauliZ(0), wires=[0, 1])
with pytest.raises(
ValueError,
match="Cannot specify the wires to sample if an observable is provided."
" The wires to sample will be determined directly from the observable.",
):
_ = circuit()
def test_providing_no_observable_and_no_wires(self, mocker):
"""Test that we can provide no observable and no wires to sample function"""
dev = qml.device("default.qubit", wires=2, shots=1000)
spy = mocker.spy(qml.QubitDevice, "sample")
@qml.qnode(dev)
def circuit():
qml.Hadamard(wires=0)
res = qml.sample()
assert res.obs is None
assert res.wires == qml.wires.Wires([])
return res
circuit()
custom_measurement_process(dev, spy)
def test_providing_no_observable_and_no_wires_shot_vector(self, mocker):
"""Test that we can provide no observable and no wires to sample
function when using a shot vector"""
num_wires = 2
shots1 = 1
shots2 = 10
shots3 = 1000
dev = qml.device("default.qubit", wires=num_wires, shots=[shots1, shots2, shots3])
spy = mocker.spy(qml.QubitDevice, "sample")
@qml.qnode(dev)
def circuit():
qml.Hadamard(wires=0)
qml.CNOT(wires=[0, 1])
return qml.sample()
res = circuit()
assert isinstance(res, tuple)
expected_shapes = [(num_wires,), (shots2, num_wires), (shots3, num_wires)]
assert len(res) == len(expected_shapes)
assert all(r.shape == exp_shape for r, exp_shape in zip(res, expected_shapes))
# assert first wire is always the same as second
# pylint: disable=unsubscriptable-object
assert np.all(res[0][0] == res[0][1])
assert np.all(res[1][:, 0] == res[1][:, 1])
assert np.all(res[2][:, 0] == res[2][:, 1])
custom_measurement_process(dev, spy)
def test_providing_no_observable_and_wires(self, mocker):
"""Test that we can provide no observable but specify wires to the sample function"""
wires = [0, 2]
wires_obj = qml.wires.Wires(wires)
dev = qml.device("default.qubit", wires=3, shots=1000)
spy = mocker.spy(qml.QubitDevice, "sample")
@qml.qnode(dev)
def circuit():
qml.Hadamard(wires=0)
res = qml.sample(wires=wires)
assert res.obs is None
assert res.wires == wires_obj
return res
circuit()
custom_measurement_process(dev, spy)
@pytest.mark.parametrize(
"obs,exp",
[
# Single observables
(None, int), # comp basis samples
(qml.PauliX(0), int),
(qml.PauliY(0), int),
(qml.PauliZ(0), int),
(qml.Hadamard(0), int),
(qml.Identity(0), int),
(qml.Hermitian(np.diag([1, 2]), 0), float),
(qml.Hermitian(np.diag([1.0, 2.0]), 0), float),
# Tensor product observables
(
qml.PauliX("c")
@ qml.PauliY("a")
@ qml.PauliZ(1)
@ qml.Hadamard("wire1")
@ qml.Identity("b"),
int,
),
(qml.Projector([0, 1], wires=[0, 1]) @ qml.PauliZ(2), float),
(qml.Hermitian(np.array(np.eye(2)), wires=[0]) @ qml.PauliZ(2), float),
(
qml.Projector([0, 1], wires=[0, 1]) @ qml.Hermitian(np.array(np.eye(2)), wires=[2]),
float,
),
],
)
def test_numeric_type(self, obs, exp):
"""Test that the numeric type is correct."""
res = qml.sample(obs) if obs is not None else qml.sample()
assert res.numeric_type is exp
def test_shape_no_shots_error(self):
"""Test that the appropriate error is raised with no shots are specified"""
dev = qml.device("default.qubit", wires=2, shots=None)
shots = Shots(None)
mp = qml.sample()
with pytest.raises(
MeasurementShapeError, match="Shots are required to obtain the shape of the measurement"
):
_ = mp.shape(dev, shots)
@pytest.mark.parametrize(
"obs",
[
None,
qml.PauliZ(0),
qml.Hermitian(np.diag([1, 2]), 0),
qml.Hermitian(np.diag([1.0, 2.0]), 0),
],
)
def test_shape(self, obs):
"""Test that the shape is correct."""
shots = 10
dev = qml.device("default.qubit", wires=3, shots=shots)
res = qml.sample(obs) if obs is not None else qml.sample()
expected = (shots,) if obs is not None else (shots, 3)
assert res.shape(dev, Shots(shots)) == expected
@pytest.mark.parametrize("n_samples", (1, 10))
def test_shape_wires(self, n_samples):
"""Test that the shape is correct when wires are provided."""
dev = qml.device("default.qubit", wires=3, shots=n_samples)
mp = qml.sample(wires=(0, 1))
assert mp.shape(dev, Shots(n_samples)) == (n_samples, 2) if n_samples != 1 else (2,)
@pytest.mark.parametrize(
"obs",
[
None,
qml.PauliZ(0),
qml.Hermitian(np.diag([1, 2]), 0),
qml.Hermitian(np.diag([1.0, 2.0]), 0),
],
)
def test_shape_shot_vector(self, obs):
"""Test that the shape is correct with the shot vector too."""
shot_vector = (1, 2, 3)
dev = qml.device("default.qubit", wires=3, shots=shot_vector)
res = qml.sample(obs) if obs is not None else qml.sample()
expected = ((), (2,), (3,)) if obs is not None else ((3,), (2, 3), (3, 3))
assert res.shape(dev, Shots(shot_vector)) == expected
def test_shape_shot_vector_obs(self):
"""Test that the shape is correct with the shot vector and a observable too."""
shot_vec = (2, 2)
dev = qml.device("default.qubit", wires=3, shots=shot_vec)
@qml.qnode(dev)
def circuit():
qml.Hadamard(wires=0)
qml.PauliZ(0)
return qml.sample(qml.PauliZ(0))
binned_samples = circuit()
assert isinstance(binned_samples, tuple)
assert len(binned_samples) == len(shot_vec)
# pylint: disable=unsubscriptable-object
assert binned_samples[0].shape == (shot_vec[0],)
def test_sample_empty_wires(self):
"""Test that using ``qml.sample`` with an empty wire list raises an error."""
with pytest.raises(ValueError, match="Cannot set an empty list of wires."):
qml.sample(wires=[])
@pytest.mark.parametrize("shots", [2, 100])
def test_sample_no_arguments(self, shots):
"""Test that using ``qml.sample`` with no arguments returns the samples of all wires."""
dev = qml.device("default.qubit", wires=3, shots=shots)
@qml.qnode(dev)
def circuit():
return qml.sample()
res = circuit()
# pylint: disable=comparison-with-callable
assert res.shape == (shots, 3)
def test_new_sample_with_operator_with_no_eigvals(self):
"""Test that calling process with an operator that has no eigvals defined raises an error."""
class DummyOp(Operator): # pylint: disable=too-few-public-methods
num_wires = 1
with pytest.raises(EigvalsUndefinedError, match="Cannot compute samples of"):
qml.sample(op=DummyOp(0)).process_samples(samples=np.array([[1, 0]]), wire_order=[0])
@pytest.mark.jax
@pytest.mark.parametrize("samples", (1, 10))
def test_jitting_with_sampling_on_subset_of_wires(samples):
"""Test case covering bug in Issue #3904. Sampling should be jit-able
when sampling occurs on a subset of wires. The bug was occuring due an improperly
set shape method."""
import jax
jax.config.update("jax_enable_x64", True)
dev = qml.device("default.qubit", wires=3, shots=samples)
@qml.qnode(dev, interface="jax")
def circuit(x):
qml.RX(x, wires=0)
return qml.sample(wires=(0, 1))
results = jax.jit(circuit)(jax.numpy.array(0.123, dtype=jax.numpy.float64))
expected = (2,) if samples == 1 else (samples, 2)
assert results.shape == expected
assert (
circuit._qfunc_output.shape(dev, Shots(samples)) == (samples, 2) if samples != 1 else (2,)
)
|
9ffa2a36efa3fee7b0b3b30763946899e165b931
|
7f620e7902c0b9ccb1fcfd1427acd5936ea33814
|
/tests/system/runtimes/test_archives.py
|
0d8a1152d77a33e55270ca886be66243484c90ce
|
[
"Apache-2.0"
] |
permissive
|
mlrun/mlrun
|
2074c230070129ce3becb211b92c90b29a2ce850
|
b5fe0c05ae7f5818a4a5a5a40245c851ff9b2c77
|
refs/heads/development
| 2023-09-06T00:09:21.546135
| 2023-09-05T19:38:13
| 2023-09-05T19:38:13
| 205,706,595
| 1,093
| 229
|
Apache-2.0
| 2023-09-14T14:14:10
| 2019-09-01T16:59:19
|
Python
|
UTF-8
|
Python
| false
| false
| 13,765
|
py
|
test_archives.py
|
# Copyright 2023 Iguazio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import tempfile
import pytest
import mlrun
import tests.system.base
from mlrun.runtimes.constants import RunStates
git_uri = "git://github.com/mlrun/test-git-load.git"
base_image = "mlrun/mlrun"
tags = ["main", "refs/heads/tst"]
codepaths = [(None, "rootfn"), ("subdir", "func")]
job_cases = {
# name: (command, workdir, handler, tag)
"root-hndlr": ("", None, "rootfn.job_handler", tags[0]),
"subdir-hndlr": ("", "subdir", "func.job_handler", tags[0]),
"subdir-hndlr-ref": ("", "subdir", "func.job_handler", tags[1]),
"root-cmd": ("rootfn.py", None, "job_handler", tags[1]),
}
# for private repo tests set the MLRUN_SYSTEM_TESTS_PRIVATE_REPO, MLRUN_SYSTEM_TESTS_PRIVATE_GIT_TOKEN env vars
private_repo = os.environ.get(
"MLRUN_SYSTEM_TESTS_PRIVATE_REPO",
"git://github.com/mlrun/private_git_tests.git#main",
)
has_private_source = (
"MLRUN_SYSTEM_TESTS_PRIVATE_GIT_TOKEN" in os.environ and private_repo
)
need_private_git = pytest.mark.skipif(
not has_private_source, reason="env vars for private git repo not set"
)
@tests.system.base.TestMLRunSystem.skip_test_if_env_not_configured
class TestArchiveSources(tests.system.base.TestMLRunSystem):
project_name = "git-tests"
custom_project_names_to_delete = []
def custom_setup(self):
self.remote_code_dir = f"v3io:///projects/{self.project_name}/code/"
self.uploaded_code = False
# upload test files to cluster
if has_private_source:
self.project.set_secrets(
{
"GIT_TOKEN": os.environ["MLRUN_SYSTEM_TESTS_PRIVATE_GIT_TOKEN"],
}
)
def custom_teardown(self):
for name in self.custom_project_names_to_delete:
self._delete_test_project(name)
def _upload_code_to_cluster(self):
if not self.uploaded_code:
for file in [
"source_archive.tar.gz",
"source_archive.zip",
"handler.py",
"spark_session.tar.gz",
]:
source_path = str(self.assets_path / file)
mlrun.get_dataitem(self.remote_code_dir + file).upload(source_path)
self.uploaded_code = True
def _new_function(self, kind, name="run", command=""):
return mlrun.new_function(
f"{kind}-{name}",
kind=kind,
image=base_image if kind != "local" else None,
command=command,
)
@pytest.mark.parametrize("artifact_format", ["git", "tar.gz", "zip"])
@pytest.mark.parametrize("codepath", codepaths)
def test_local_archive(self, artifact_format, codepath):
workdir, module = codepath
source = (
f"{git_uri}#main"
if artifact_format == "git"
else str(self.assets_path / f"source_archive.{artifact_format}")
)
fn = self._new_function("local")
fn.with_source_archive(
source,
workdir=workdir,
handler=f"{module}.job_handler",
target_dir=tempfile.mkdtemp(),
)
run = mlrun.run_function(fn)
assert run.state() == "completed"
assert run.output("tag")
@pytest.mark.parametrize("load_mode", ["run", "build"])
@pytest.mark.parametrize("case", job_cases.keys())
def test_job_git(self, load_mode, case):
command, workdir, handler, tag = job_cases[case]
fn = self._new_function("job", f"{load_mode}-{case}", command)
fn.with_source_archive(
f"{git_uri}#{tag}",
workdir=workdir,
handler=handler,
pull_at_runtime=load_mode == "run",
)
fn.spec.image_pull_policy = "Always"
if load_mode == "build":
mlrun.build_function(fn)
run = mlrun.run_function(fn)
assert run.state() == "completed"
assert run.output("tag") == tag
@pytest.mark.parametrize("codepath", [(None, "rootfn"), ("subdir", "func")])
@pytest.mark.parametrize("tag", tags)
def test_nuclio_deploy(self, codepath, tag):
workdir, module = codepath
fn = self._new_function("nuclio")
fn.with_source_archive(
f"{git_uri}#{tag}", workdir=workdir, handler=f"{module}:nuclio_handler"
)
mlrun.deploy_function(fn)
resp = fn.invoke("")
assert resp.decode() == f"tag={tag}"
def test_serving_deploy(self):
tag = "main"
fn = self._new_function("serving")
fn.with_source_archive(f"{git_uri}#{tag}", handler="srv")
graph = fn.set_topology("flow")
graph.to(name="echo", handler="echo").respond()
mlrun.deploy_function(fn)
resp = fn.invoke("")
assert resp.decode() == f"tag={tag}"
@need_private_git
def test_private_repo_local(self):
fn = self._new_function("local", "priv")
fn.with_source_archive(
private_repo,
handler="rootfn.job_handler",
target_dir=tempfile.mkdtemp(),
)
task = mlrun.new_task().with_secrets(
"inline",
{
"GIT_TOKEN": os.environ.get("PRIVATE_GIT_TOKEN", ""),
},
)
run = mlrun.run_function(fn, base_task=task)
assert run.state() == "completed"
assert run.output("tag")
@need_private_git
@pytest.mark.parametrize("load_mode", ["run", "build"])
def test_private_repo_job(self, load_mode):
fn = self._new_function("job", f"{load_mode}-priv")
fn.with_source_archive(
private_repo,
handler="rootfn.job_handler",
pull_at_runtime=load_mode == "run",
)
# fn.spec.image_pull_policy = "Always"
if load_mode == "build":
mlrun.build_function(fn)
run = mlrun.run_function(fn)
assert run.state() == "completed"
assert run.output("tag")
@need_private_git
def test_private_repo_nuclio(self):
fn = self._new_function("nuclio", "priv")
fn.with_source_archive(
private_repo,
handler="rootfn:nuclio_handler",
)
mlrun.deploy_function(fn)
resp = fn.invoke("")
assert "tag=" in resp.decode()
@pytest.mark.enterprise
@pytest.mark.parametrize("load_mode", ["run", "build"])
@pytest.mark.parametrize("compression_format", ["zip", "tar.gz"])
def test_job_compressed(self, load_mode, compression_format):
self._upload_code_to_cluster()
fn = self._new_function("job", f"{load_mode}-compressed")
fn.with_source_archive(
self.remote_code_dir + f"source_archive.{compression_format}",
handler="rootfn.job_handler",
pull_at_runtime=load_mode == "run",
)
if load_mode == "build":
mlrun.build_function(fn)
run = mlrun.run_function(fn)
assert run.state() == "completed"
assert run.output("tag")
@pytest.mark.enterprise
def test_nuclio_tar(self):
self._upload_code_to_cluster()
fn = self._new_function("nuclio", "tar")
fn.with_source_archive(
self.remote_code_dir + "source_archive.tar.gz",
handler="rootfn:nuclio_handler",
)
fn.verbose = True
mlrun.deploy_function(fn)
resp = fn.invoke("")
assert "tag=" in resp.decode()
def test_job_project(self):
project = mlrun.new_project("git-proj-job1", user_project=True)
# using project.name because this is a user project meaning the project name get concatenated with the user name
self.custom_project_names_to_delete.append(project.name)
project.save()
project.set_source(f"{git_uri}#main", True) # , workdir="gtst")
project.set_function(
name="myjob",
handler="rootfn.job_handler",
image=base_image,
kind="job",
with_repo=True,
)
run = project.run_function("myjob")
assert run.state() == "completed"
assert run.output("tag")
def test_run_function_with_auto_build_and_source_is_idempotent(self):
project = mlrun.get_or_create_project("run-with-source-and-auto-build")
# using project.name because this is a user project meaning the project name get concatenated with the user name
self.custom_project_names_to_delete.append(project.name)
project.set_source(f"{git_uri}#main", False)
project.set_function(
name="myjob",
handler="rootfn.job_handler",
image=base_image,
kind="job",
with_repo=True,
requirements=["pandas"],
)
project.save()
project.run_function("myjob", auto_build=True)
project = mlrun.get_or_create_project("run-with-source-and-auto-build")
project.set_source(f"{git_uri}#main", False)
project.set_function(
name="myjob",
handler="rootfn.job_handler",
image=base_image,
kind="job",
with_repo=True,
requirements=["pandas"],
)
project.save()
project.run_function("myjob", auto_build=True)
def test_run_function_with_auto_build_and_source_is_idempotent_after_failure(self):
project = mlrun.get_or_create_project("run-with-source-and-auto-build")
# using project.name because this is a user project meaning the project name get concatenated with the user name
self.custom_project_names_to_delete.append(project.name)
project.set_source(f"{git_uri}#main", False)
project.set_function(
name="myjob",
handler="rootfn.job_handler",
image=base_image,
kind="job",
with_repo=True,
# not existing package, expected to fail when runnning
requirements=["pandaasdasds"],
)
project.save()
with pytest.raises(mlrun.errors.MLRunRuntimeError):
project.run_function("myjob", auto_build=True)
project = mlrun.get_or_create_project("run-with-source-and-auto-build")
project.set_source(f"{git_uri}#main", False)
project.set_function(
name="myjob",
handler="rootfn.job_handler",
image=base_image,
kind="job",
with_repo=True,
requirements=["pandas"],
)
project.save()
project.run_function("myjob", auto_build=True)
def test_nuclio_project(self):
project = mlrun.new_project("git-proj-nuc", user_project=True)
# using project.name because this is a user project meaning the project name get concatenated with the user name
self.custom_project_names_to_delete.append(project.name)
project.save()
project.set_source(f"{git_uri}#main")
project.set_function(
name="mynuclio",
handler="rootfn:nuclio_handler",
image=base_image,
kind="nuclio",
with_repo=True,
)
deployment = project.deploy_function("mynuclio")
resp = deployment.function.invoke("")
assert "tag=" in resp.decode()
def test_project_subdir(self):
# load project into a tmp dir, look for the project.yaml in the subpath
project = mlrun.load_project(
tempfile.mkdtemp(),
f"{git_uri}#main",
name="git-proj2",
user_project=True,
subpath="subdir",
)
# using project.name because this is a user project meaning the project name get concatenated with the user name
self.custom_project_names_to_delete.append(project.name)
project.save()
# run job locally (from cloned source)
run = project.run_function("myjob", local=True)
assert run.state() == "completed"
assert run.output("tag")
# build and run job on the cluster
project.build_function("myjob")
run = project.run_function("myjob")
assert run.state() == "completed"
assert run.output("tag")
# deploy Nuclio function and invoke
deployment = project.deploy_function("mynuclio")
resp = deployment.function.invoke("")
assert "tag=" in resp.decode()
@pytest.mark.enterprise
@pytest.mark.parametrize("pull_at_runtime", [False])
def test_with_igz_spark_from_source(self, pull_at_runtime):
self._upload_code_to_cluster()
fn = mlrun.new_function(
name="spark-test", kind="spark", command="spark_session.py"
)
fn.with_igz_spark()
# spark requires requests
fn.with_driver_limits(cpu="1300m")
fn.with_driver_requests(cpu=1, mem="512m")
fn.with_executor_limits(cpu="1400m")
fn.with_executor_requests(cpu=1, mem="512m")
fn.with_source_archive(
source=self.remote_code_dir + "spark_session.tar.gz",
pull_at_runtime=pull_at_runtime,
)
fn.set_image_pull_configuration(image_pull_policy="Always")
spark_run = fn.run(auto_build=True)
assert spark_run.status.state == RunStates.completed
|
3e190eb589c4e7a85fbeaf2b3252c2b4c4999368
|
b26c41926fa3a7c2c061132d80e91a2750f2f468
|
/tensorflow_probability/python/distributions/lkj.py
|
ad94401e0ed65c2d864fff03145f3ae4538c4cf5
|
[
"Apache-2.0"
] |
permissive
|
tensorflow/probability
|
22e679a4a883e408f8ef237cda56e3e3dfa42b17
|
42a64ba0d9e0973b1707fcd9b8bd8d14b2d4e3e5
|
refs/heads/main
| 2023-09-04T02:06:08.174935
| 2023-08-31T20:30:00
| 2023-08-31T20:31:33
| 108,053,674
| 4,055
| 1,269
|
Apache-2.0
| 2023-09-13T21:49:49
| 2017-10-23T23:50:54
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 27,178
|
py
|
lkj.py
|
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Lewandowski-Kurowicka-Joe distribution on correlation matrices.
The sampler follows the 'onion' method from
[1] Daniel Lewandowski, Dorota Kurowicka, and Harry Joe,
'Generating random correlation matrices based on vines and extended
onion method,' Journal of Multivariate Analysis 100 (2009), pp
1989-2001.
"""
# Dependency imports
import numpy as np
import tensorflow.compat.v1 as tf1
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.bijectors import bijector as bijector_lib
from tensorflow_probability.python.bijectors import chain as chain_bijector
from tensorflow_probability.python.bijectors import cholesky_outer_product as cholesky_outer_product_bijector
from tensorflow_probability.python.bijectors import correlation_cholesky as correlation_cholesky_bijector
from tensorflow_probability.python.bijectors import softplus as softplus_bijector
from tensorflow_probability.python.distributions import beta
from tensorflow_probability.python.distributions import distribution
from tensorflow_probability.python.internal import assert_util
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import parameter_properties
from tensorflow_probability.python.internal import prefer_static as ps
from tensorflow_probability.python.internal import reparameterization
from tensorflow_probability.python.internal import samplers
from tensorflow_probability.python.internal import tensor_util
from tensorflow_probability.python.internal import tensorshape_util
from tensorflow_probability.python.math import linalg
from tensorflow_probability.python.math import special
from tensorflow_probability.python.math.numeric import clip_by_value_preserve_gradient
from tensorflow_probability.python.random import random_ops
from tensorflow.python.ops import control_flow_util # pylint: disable=g-direct-tensorflow-import
__all__ = [
'LKJ',
]
class _ClipByValue(bijector_lib.AutoCompositeTensorBijector):
"""A bijector that clips by value.
This class is intended for minute numerical issues where `|clip(x) - x| <=
eps`, as it defines the derivative of its application to be exactly 1.
"""
def __init__(self,
clip_value_min,
clip_value_max,
validate_args=False,
name='clip_by_value'):
"""Instantiates the `ClipByValue` bijector.
Args:
clip_value_min: Floating-point `Tensor`.
clip_value_max: Floating-point `Tensor`.
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
name: Python `str` name given to ops managed by this object.
"""
parameters = dict(locals())
with tf.name_scope(name) as name:
dtype = dtype_util.common_dtype([clip_value_min, clip_value_max],
dtype_hint=tf.float32)
self._clip_value_min = tensor_util.convert_nonref_to_tensor(
clip_value_min, dtype=dtype, name='clip_value_min')
self._clip_value_max = tensor_util.convert_nonref_to_tensor(
clip_value_max, dtype=dtype, name='clip_value_max')
super(_ClipByValue, self).__init__(
forward_min_event_ndims=0,
is_constant_jacobian=True,
dtype=dtype,
validate_args=validate_args,
parameters=parameters,
name=name)
@classmethod
def _is_increasing(cls):
return False
def _forward(self, x):
return clip_by_value_preserve_gradient(x, self._clip_value_min,
self._clip_value_max)
def _inverse(self, y):
return y
def _forward_log_det_jacobian(self, x):
# We deliberately ignore the clipping operation.
return tf.zeros([], dtype=dtype_util.base_dtype(x.dtype))
def _tril_spherical_uniform(dimension, batch_shape, dtype, seed):
"""Returns a `Tensor` of samples of lower triangular matrices.
Each row of the lower triangular part follows a spherical uniform
distribution.
Args:
dimension: Scalar `int` `Tensor`, representing the dimensionality of the
output matrices.
batch_shape: Vector-shaped, `int` `Tensor` representing batch shape of
output. The output will have shape `batch_shape + [dimension, dimension]`.
dtype: TF `dtype` representing `dtype` of output.
seed: PRNG seed; see `tfp.random.sanitize_seed` for details.
Returns:
tril_spherical_uniform: `Tensor` with specified `batch_shape` and `dtype`
consisting of real values drawn row-wise from a spherical uniform
distribution.
"""
# Essentially, we will draw lower triangular samples where each lower
# triangular entry follows a normal distribution, then apply `x / norm(x)`
# for each row of the samples.
# To avoid possible NaNs, we will use spherical_uniform directly for
# the first two rows.
assert dimension > 0, '`dimension` needs to be positive.'
num_seeds = min(dimension, 3)
seeds = list(samplers.split_seed(seed, n=num_seeds, salt='sample_lkj'))
rows = []
paddings_prepend = [[0, 0]] * len(batch_shape)
for n in range(1, min(dimension, 2) + 1):
rows.append(
tf.pad(
random_ops.spherical_uniform(
shape=batch_shape, dimension=n, dtype=dtype, seed=seeds.pop()),
paddings_prepend + [[0, dimension - n]],
constant_values=0.))
samples = tf.stack(rows, axis=-2)
if dimension > 2:
normal_shape = ps.concat(
[batch_shape, [dimension * (dimension + 1) // 2 - 3]], axis=0)
normal_samples = samplers.normal(
shape=normal_shape, dtype=dtype, seed=seeds.pop())
# We fill the first two rows of the triangular matrix with ones.
# Note that fill_triangular fills elements in a clockwise spiral.
normal_samples = tf.concat([
normal_samples[..., :dimension],
tf.ones(ps.concat([batch_shape, [1]], axis=0), dtype=dtype),
normal_samples[..., dimension:(2 * dimension - 1)],
tf.ones(ps.concat([batch_shape, [2]], axis=0), dtype=dtype),
normal_samples[..., (2 * dimension - 1):],
],
axis=-1)
normal_samples = linalg.fill_triangular(
normal_samples, upper=False)[..., 2:, :]
remaining_rows = normal_samples / tf.norm(
normal_samples, ord=2, axis=-1, keepdims=True)
samples = tf.concat([samples, remaining_rows], axis=-2)
return samples
def sample_lkj(
num_samples,
dimension,
concentration,
cholesky_space=False,
seed=None,
name=None):
"""Returns a Tensor of samples from an LKJ distribution.
Args:
num_samples: Python `int`. The number of samples to draw.
dimension: Python `int`. The dimension of correlation matrices.
concentration: `Tensor` representing the concentration of the LKJ
distribution.
cholesky_space: Python `bool`. Whether to take samples from LKJ or
Chol(LKJ).
seed: PRNG seed; see `tfp.random.sanitize_seed` for details.
name: Python `str` name prefixed to Ops created by this function.
Returns:
samples: A Tensor of correlation matrices (or Cholesky factors of
correlation matrices if `cholesky_space = True`) with shape
`[n] + B + [D, D]`, where `B` is the shape of the `concentration`
parameter, and `D` is the `dimension`.
Raises:
ValueError: If `dimension` is negative.
"""
if dimension < 0:
raise ValueError(
'Cannot sample negative-dimension correlation matrices.')
# Notation below: B is the batch shape, i.e., tf.shape(concentration)
with tf.name_scope('sample_lkj' or name):
concentration = tf.convert_to_tensor(concentration)
if not dtype_util.is_floating(concentration.dtype):
raise TypeError(
'The concentration argument should have floating type, not '
'{}'.format(dtype_util.name(concentration.dtype)))
batch_shape = ps.concat([[num_samples], ps.shape(concentration)], axis=0)
dtype = concentration.dtype
if dimension <= 1:
# For any dimension <= 1, there is only one possible correlation matrix.
shape = ps.concat([batch_shape, [dimension, dimension]], axis=0)
return tf.ones(shape=shape, dtype=dtype)
# We need 1 seed for beta and 1 seed for tril_spherical_uniform.
beta_seed, tril_spherical_uniform_seed = samplers.split_seed(
seed, n=2, salt='sample_lkj')
# Note that the sampler below deviates from [1], by doing the sampling in
# cholesky space. This does not change the fundamental logic of the
# sampler, but does speed up the sampling.
# In addition, we also vectorize the computation to make the sampler
# more feasible to use in problems where `dimension` is large.
beta_conc = concentration + (dimension - 2.) / 2.
dimension_range = np.arange(
1., dimension, dtype=dtype_util.as_numpy_dtype(dtype))
beta_conc1 = dimension_range / 2.
beta_conc0 = beta_conc[..., tf.newaxis] - (dimension_range - 1) / 2.
beta_dist = beta.Beta(concentration1=beta_conc1, concentration0=beta_conc0)
# norm is y in reference [1].
norm = beta_dist.sample(sample_shape=[num_samples], seed=beta_seed)
# distance shape: B + [dimension - 1, 1] for broadcast
distance = tf.sqrt(norm)[..., tf.newaxis]
# direction is u in reference [1].
# direction follows the spherical uniform distribution and will be stored
# in a lower triangular matrix, hence it will have shape:
# B + [dimension - 1, dimension - 1]
direction = _tril_spherical_uniform(dimension - 1, batch_shape, dtype,
tril_spherical_uniform_seed)
# raw_correlation is w in reference [1].
# shape: B + [dimension - 1, dimension - 1]
raw_correlation = distance * direction
# This is the rows in the cholesky of the result,
# which differs from the construction in reference [1].
# In the reference, the new row `z` = chol_result @ raw_correlation^T
# = C @ raw_correlation^T (where as short hand we use C = chol_result).
# We prove that the below equation is the right row to add to the
# cholesky, by showing equality with reference [1].
# Let S be the sample constructed so far, and let `z` be as in
# reference [1]. Then at this iteration, the new sample S' will be
# [[S z^T]
# [z 1]]
# In our case we have the cholesky decomposition factor C, so
# we want our new row x (same size as z) to satisfy:
# [[S z^T] [[C 0] [[C^T x^T] [[CC^T Cx^T]
# [z 1]] = [x k]] [0 k]] = [xC^t xx^T + k**2]]
# Since C @ raw_correlation^T = z = C @ x^T, and C is invertible,
# we have that x = raw_correlation. Also 1 = xx^T + k**2, so k
# = sqrt(1 - xx^T) = sqrt(1 - |raw_correlation|**2) = sqrt(1 -
# distance**2).
paddings_prepend = [[0, 0]] * len(batch_shape)
diag = tf.pad(
tf.sqrt(1. - norm), paddings_prepend + [[1, 0]], constant_values=1.)
chol_result = tf.pad(
raw_correlation,
paddings_prepend + [[1, 0], [0, 1]],
constant_values=0.)
chol_result = tf.linalg.set_diag(chol_result, diag)
if cholesky_space:
return chol_result
result = tf.matmul(chol_result, chol_result, transpose_b=True)
# The diagonal for a correlation matrix should always be ones. Due to
# numerical instability the matmul might not achieve that, so manually set
# these to ones.
result = tf.linalg.set_diag(
result, tf.ones(shape=ps.shape(result)[:-1], dtype=result.dtype))
# This sampling algorithm can produce near-PSD matrices on which standard
# algorithms such as `tf.linalg.cholesky` or
# `tf.linalg.self_adjoint_eigvals` fail. Specifically, as documented in
# b/116828694, around 2% of trials of 900,000 5x5 matrices (distributed
# according to 9 different concentration parameter values) contained at
# least one matrix on which the Cholesky decomposition failed.
return result
class LKJ(distribution.AutoCompositeTensorDistribution):
"""The LKJ distribution on correlation matrices.
This is a one-parameter family of distributions on correlation matrices. The
probability density is proportional to the determinant raised to the power of
the parameter: `pdf(X; eta) = Z(eta) * det(X) ** (eta - 1)`, where `Z(eta)` is
a normalization constant. The uniform distribution on correlation matrices is
the special case `eta = 1`.
The distribution is named after Lewandowski, Kurowicka, and Joe, who gave a
sampler for the distribution in [(Lewandowski, Kurowicka, Joe, 2009)][1].
Note: For better numerical stability, it is recommended that you use
`CholeskyLKJ` instead.
#### Examples
```python
# Initialize a single 3x3 LKJ with concentration parameter 1.5
dist = tfp.distributions.LKJ(dimension=3, concentration=1.5)
# Evaluate this at a batch of two observations, each in R^{3x3}.
x = ... # Shape is [2, 3, 3].
dist.prob(x) # Shape is [2].
# Draw 6 LKJ-distributed 3x3 correlation matrices
ans = dist.sample(sample_shape=[2, 3], seed=42)
# shape of ans is [2, 3, 3, 3]
```
"""
def __init__(self,
dimension,
concentration,
input_output_cholesky=False,
validate_args=False,
allow_nan_stats=True,
name='LKJ'):
"""Construct LKJ distributions.
Args:
dimension: Python `int`. The dimension of the correlation matrices
to sample.
concentration: `float` or `double` `Tensor`. The positive concentration
parameter of the LKJ distributions. The pdf of a sample matrix `X` is
proportional to `det(X) ** (concentration - 1)`.
input_output_cholesky: Python `bool`. If `True`, functions whose input or
output have the semantics of samples assume inputs are in Cholesky form
and return outputs in Cholesky form. In particular, if this flag is
`True`, input to `log_prob` is presumed of Cholesky form and output from
`sample` is of Cholesky form. Setting this argument to `True` is purely
a computational optimization and does not change the underlying
distribution. Additionally, validation checks which are only defined on
the multiplied-out form are omitted, even if `validate_args` is `True`.
Default value: `False` (i.e., input/output does not have Cholesky
semantics). WARNING: Do not set this boolean to true, when using
`tfp.mcmc`. The density is not the density of Cholesky factors of
correlation matrices drawn via LKJ.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value `NaN` to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
ValueError: If `dimension` is negative.
"""
if dimension < 0:
raise ValueError(
'There are no negative-dimension correlation matrices.')
if dimension > 65536:
raise ValueError(
('Given dimension ({}) is greater than 65536, and will overflow '
'int32 array sizes.').format(dimension))
parameters = dict(locals())
self._input_output_cholesky = input_output_cholesky
with tf.name_scope(name):
dtype = dtype_util.common_dtype([concentration], tf.float32)
self._concentration = tensor_util.convert_nonref_to_tensor(
concentration, name='concentration', dtype=dtype)
self._dimension = dimension
super(LKJ, self).__init__(
dtype=self._concentration.dtype,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
reparameterization_type=reparameterization.NOT_REPARAMETERIZED,
parameters=parameters,
name=name)
@classmethod
def _parameter_properties(cls, dtype, num_classes=None):
# pylint: disable=g-long-lambda
return dict(
concentration=parameter_properties.ParameterProperties(
shape_fn=lambda sample_shape: sample_shape[:-2],
default_constraining_bijector_fn=(
lambda: softplus_bijector.Softplus(
low=tf.convert_to_tensor(
1. + dtype_util.eps(dtype), dtype=dtype)))))
# pylint: enable=g-long-lambda
@property
def dimension(self):
"""Dimension of returned correlation matrices."""
return self._dimension
@property
def concentration(self):
"""Concentration parameter."""
return self._concentration
@property
def input_output_cholesky(self):
"""Boolean indicating if `Tensor` input/outputs are Cholesky factorized."""
return self._input_output_cholesky
def _event_shape_tensor(self):
return tf.constant([self.dimension, self.dimension], dtype=tf.int32)
def _event_shape(self):
return tf.TensorShape([self.dimension, self.dimension])
def _sample_n(self, num_samples, seed=None, name=None):
"""Returns a Tensor of samples from an LKJ distribution.
Args:
num_samples: Python `int`. The number of samples to draw.
seed: PRNG seed; see `tfp.random.sanitize_seed` for details.
name: Python `str` name prefixed to Ops created by this function.
Returns:
samples: A Tensor of correlation matrices with shape `[n, B, D, D]`,
where `B` is the shape of the `concentration` parameter, and `D`
is the `dimension`.
Raises:
ValueError: If `dimension` is negative.
"""
return sample_lkj(
num_samples=num_samples,
dimension=self.dimension,
concentration=self.concentration,
cholesky_space=self.input_output_cholesky,
seed=seed,
name=name)
def _log_prob(self, x):
# Despite what one might infer from Eq 15 in [1], the formula
# given for the normalization constant should be read in the sense
# of division, not multiplication.
concentration = tf.convert_to_tensor(self.concentration)
normalizer = self._log_normalization(concentration=concentration)
return self._log_unnorm_prob(x, concentration) - normalizer
def _log_unnorm_prob(self, x, concentration, name=None):
"""Returns the unnormalized log density of an LKJ distribution.
Args:
x: `float` or `double` `Tensor` of correlation matrices. The shape of `x`
must be `B + [D, D]`, where `B` broadcasts with the shape of
`concentration`.
concentration: `float` or `double` `Tensor`. The positive concentration
parameter of the LKJ distributions.
name: Python `str` name prefixed to Ops created by this function.
Returns:
log_p: A Tensor of the unnormalized log density of each matrix element of
`x`, with respect to an LKJ distribution with parameter the
corresponding element of `concentration`.
"""
with tf.name_scope(name or 'log_unnorm_prob_lkj'):
x = tf.convert_to_tensor(x, name='x')
# The density is det(matrix) ** (concentration - 1).
# Computing the determinant with `logdet` is usually fine, since
# correlation matrices are Hermitian and PSD. But in some cases, for a
# PSD matrix whose eigenvalues are close to zero, `logdet` raises an error
# complaining that it is not PSD. The root cause is the computation of the
# cholesky decomposition in `logdet`. Hence, we use the less efficient but
# more robust `slogdet` which does not use `cholesky`.
#
# An alternative would have been to check allow_nan_stats and use
# eigenvalues = tf.linalg.self_adjoint_eigvals(x)
# psd_mask = tf.cast(
# tf.reduce_min(eigenvalues, axis=-1) >= 0, dtype=x.dtype)
# tf.where(psd_mask, answer, float('-inf'))
# to emit probability 0 for inputs that are not PSD, without ever raising
# an error. More care must be taken, as due to numerical stability issues,
# self_adjoint_eigvals can return slightly negative eigenvalues even for
# a PSD matrix.
if self.input_output_cholesky:
logdet = 2.0 * tf.reduce_sum(
tf.math.log(tf.linalg.diag_part(x)), axis=[-1])
else:
# TODO(b/162937268): Remove the hackaround.
if (not tf.executing_eagerly() and
control_flow_util.GraphOrParentsInXlaContext(
tf1.get_default_graph())):
s = tf.linalg.svd(x, compute_uv=False)
logdet = tf.math.reduce_sum(tf.math.log(s), -1)
else:
logdet = tf.linalg.slogdet(x).log_abs_determinant
answer = (concentration - 1.) * logdet
return answer
def _log_normalization(self, concentration=None, name='log_normalization'):
"""Returns the log normalization of an LKJ distribution.
Args:
concentration: `float` or `double` `Tensor`. The positive concentration
parameter of the LKJ distributions.
name: Python `str` name prefixed to Ops created by this function.
Returns:
log_z: A Tensor of the same shape and dtype as `concentration`, containing
the corresponding log normalizers.
"""
# The formula is from D. Lewandowski et al [1], p. 1999, from the
# proof that eqs 16 and 17 are equivalent.
# Instead of using a for loop for k from 1 to (dimension - 1), we will
# vectorize the computation by performing operations on the vector
# `dimension_range = np.arange(1, dimension)`.
with tf.name_scope(name or 'log_normalization_lkj'):
concentration = (
tf.convert_to_tensor(self.concentration
if concentration is None else concentration))
logpi = float(np.log(np.pi))
dimension_range = np.arange(
1.,
self.dimension,
dtype=dtype_util.as_numpy_dtype(concentration.dtype))
effective_concentration = (
concentration[..., tf.newaxis] +
(self.dimension - 1 - dimension_range) / 2.)
ans = tf.reduce_sum(
special.log_gamma_difference(
dimension_range / 2., effective_concentration),
axis=-1)
# Then we add to `ans` the sum of `logpi / 2 * k` for `k` run from 1 to
# `dimension - 1`.
ans = ans + logpi * (self.dimension * (self.dimension - 1) / 4.)
return ans
def _mean(self):
# The mean of the LKJ distribution (with any concentration parameter) is the
# identity matrix. Proof: Imagine a correlation matrix on D variables, and
# imagine reversing the sense of the kth of those variables. The
# off-diagonal entries in row and column k change sign, but LKJ is symmetric
# with respect to this operation (because the determinant doesn't change).
# Ergo, the mean must be invariant under it (for any k), and hence all the
# off-diagonal entries must be 0.
concentration = tf.convert_to_tensor(self.concentration)
batch = ps.shape(concentration)
answer = tf.eye(
num_rows=self.dimension, batch_shape=batch,
dtype=concentration.dtype)
return answer
def _default_event_space_bijector(self):
# TODO(b/145620027) Finalize choice of bijector.
cholesky_bijector = correlation_cholesky_bijector.CorrelationCholesky(
validate_args=self.validate_args)
if self.input_output_cholesky:
return cholesky_bijector
return chain_bijector.Chain([
# We need to explictly clip the output of this bijector because the
# other two bijectors sometimes return values that exceed the bounds by
# an epsilon due to minute numerical errors. Even numerically stable
# algorithms (which the other two bijectors employ) allow for symmetric
# errors about the true value, which is inappropriate for a one-sided
# validity constraint associated with correlation matrices.
_ClipByValue(-1., tf.ones([], self.dtype)),
cholesky_outer_product_bijector.CholeskyOuterProduct(
validate_args=self.validate_args),
cholesky_bijector
], validate_args=self.validate_args)
def _parameter_control_dependencies(self, is_init):
assertions = []
if not self.validate_args:
return assertions
if is_init != tensor_util.is_ref(self.concentration):
# concentration >= 1
# TODO(b/111451422, b/115950951) Generalize to concentration > 0.
assertions.append(assert_util.assert_non_negative(
self.concentration - 1,
message='Argument `concentration` must be >= 1.'))
return assertions
def _sample_control_dependencies(self, x):
assertions = []
if tensorshape_util.is_fully_defined(x.shape[-2:]):
if not (tensorshape_util.dims(x.shape)[-2] ==
tensorshape_util.dims(x.shape)[-1] ==
self.dimension):
raise ValueError(
'Input dimension mismatch: expected [..., {}, {}], got {}'.format(
self.dimension, self.dimension, tensorshape_util.dims(x.shape)))
elif self.validate_args:
msg = 'Input dimension mismatch: expected [..., {}, {}], got {}'.format(
self.dimension, self.dimension, tf.shape(x))
assertions.append(assert_util.assert_equal(
tf.shape(x)[-2], self.dimension, message=msg))
assertions.append(assert_util.assert_equal(
tf.shape(x)[-1], self.dimension, message=msg))
if self.validate_args and not self.input_output_cholesky:
assertions.append(assert_util.assert_less_equal(
dtype_util.as_numpy_dtype(x.dtype)(-1),
x,
message='Correlations must be >= -1.',
summarize=30))
assertions.append(assert_util.assert_less_equal(
x,
dtype_util.as_numpy_dtype(x.dtype)(1),
message='Correlations must be <= 1.',
summarize=30))
assertions.append(assert_util.assert_near(
tf.linalg.diag_part(x),
dtype_util.as_numpy_dtype(x.dtype)(1),
message='Self-correlations must be = 1.',
summarize=30))
assertions.append(assert_util.assert_near(
x,
tf.linalg.matrix_transpose(x),
message='Correlation matrices must be symmetric.',
summarize=30))
return assertions
|
27d68bba5f3f27b6de585b1141d8965ce6a7b8ec
|
e9869359c839c8c175ae7877bc35dcfdfe4058f8
|
/kornia/augmentation/_2d/mix/jigsaw.py
|
8cc3423af714ef546f83b2a56c73268aaa7cc11d
|
[
"Apache-2.0"
] |
permissive
|
kornia/kornia
|
80f93eae6a70b8bc0c9784f92a842ab9a6ab54ae
|
1e0f8baa7318c05b17ea6dbb48605691bca8972f
|
refs/heads/master
| 2023-08-31T06:32:45.960859
| 2023-08-30T21:59:41
| 2023-08-30T21:59:41
| 145,693,916
| 7,351
| 833
|
Apache-2.0
| 2023-09-12T21:59:29
| 2018-08-22T10:31:37
|
Python
|
UTF-8
|
Python
| false
| false
| 3,580
|
py
|
jigsaw.py
|
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from kornia.augmentation import random_generator as rg
from kornia.augmentation._2d.mix.base import MixAugmentationBaseV2
from kornia.constants import DataKey
from kornia.core import Tensor
__all__ = ["RandomJigsaw"]
class RandomJigsaw(MixAugmentationBaseV2):
r"""RandomJigsaw augmentation.
.. image:: _static/img/RandomJigsaw.png
Make Jigsaw puzzles for each image individually. To mix with different images in a
batch, referring to :class:`kornia.augmentation.RandomMosic`.
Args:
grid: the Jigsaw puzzle grid. e.g. (2, 2) means
each output will mix image patches in a 2x2 grid.
ensure_perm: to ensure the nonidentical patch permutation generation against
the original one.
data_keys: the input type sequential for applying augmentations.
Accepts "input", "image", "mask", "bbox", "bbox_xyxy", "bbox_xywh", "keypoints".
p: probability of applying the transformation for the whole batch.
same_on_batch: apply the same transformation across the batch.
keepdim: whether to keep the output shape the same as input ``True`` or broadcast it
to the batch form ``False``.
Examples:
>>> jigsaw = RandomJigsaw((4, 4))
>>> input = torch.randn(8, 3, 256, 256)
>>> out = jigsaw(input)
>>> out.shape
torch.Size([8, 3, 256, 256])
"""
def __init__(
self,
grid: Tuple[int, int] = (4, 4),
data_keys: List[Union[str, int, DataKey]] = [DataKey.INPUT],
p: float = 0.5,
same_on_batch: bool = False,
keepdim: bool = False,
ensure_perm: bool = True,
) -> None:
super().__init__(p=p, p_batch=1.0, same_on_batch=same_on_batch, keepdim=keepdim, data_keys=data_keys)
self._param_generator = rg.JigsawGenerator(grid, ensure_perm)
self.flags = {"grid": grid}
def apply_transform(
self, input: Tensor, params: Dict[str, Tensor], maybe_flags: Optional[Dict[str, Any]] = None
) -> Tensor:
# different from the Base class routine. This function will not refer to any non-transformation images.
batch_prob = params['batch_prob']
to_apply = batch_prob > 0.5 # NOTE: in case of Relaxed Distributions.
input = input[to_apply].clone()
b, c, h, w = input.shape
perm = params["permutation"]
# Note: with a 100x100 image and a grid size of 3x3, it could work if
# we make h = piece_size_h * self.flags["grid"][0] with one pixel loss, then resize to 100 x 100.
# Probably worth to check if we should tolerate such "errorness" or to raise it as an error.
piece_size_h, piece_size_w = input.shape[-2] // self.flags["grid"][0], input.shape[-1] // self.flags["grid"][1]
# Convert to C BxN H' W'
input = (
input.unfold(2, piece_size_h, piece_size_h)
.unfold(3, piece_size_w, piece_size_w)
.reshape(b, c, -1, piece_size_h, piece_size_w)
.permute(1, 0, 2, 3, 4)
.reshape(c, -1, piece_size_h, piece_size_w)
)
perm = (perm + torch.arange(0, b, device=perm.device)[:, None] * perm.shape[1]).view(-1)
input = input[:, perm, :, :]
input = (
input.reshape(-1, b, self.flags["grid"][1], h, piece_size_w)
.permute(0, 1, 2, 4, 3)
.reshape(-1, b, w, h)
.permute(0, 1, 3, 2)
.permute(1, 0, 2, 3)
)
return input
|
15937f4968b44765a674f572b4a68a320de19547
|
a173777f4ba02c1e683d75810fa6932487ba42cc
|
/2022/plaid-ctf-2022/flagsong/solve.py
|
fdb28cf4e2d99b7c794a52dd34eb9132184d4cf3
|
[] |
no_license
|
perfectblue/ctf-writeups
|
ba9454ef06e1004253f004154fba6ae00d88ca09
|
3f2a8a2c2598d700f33cb3f39ceb515e2ba46312
|
refs/heads/master
| 2023-06-25T19:28:05.222110
| 2022-12-11T04:55:13
| 2022-12-11T04:55:13
| 133,306,580
| 606
| 75
| null | 2023-01-20T22:38:17
| 2018-05-14T04:53:27
|
Python
|
UTF-8
|
Python
| false
| false
| 4,034
|
py
|
solve.py
|
from pathlib import Path
import time
import pyautogui
from python_speech_features import mfcc, logfbank
import scipy.io.wavfile as wav
import lib_generate
# for python_speech_features :|
import logging
logging.getLogger().setLevel(logging.CRITICAL)
WINDOW_POS = (0, 0)
def ustx_to_wav() -> Path:
"""
NOTE: Very hardcoded to my own setup
"""
# Reload current file
pyautogui.moveTo(WINDOW_POS[0] + 20, WINDOW_POS[1] + 50)
pyautogui.click()
pyautogui.move(0, 125)
pyautogui.click()
# time.sleep(0.5)
pyautogui.move(200, 0)
pyautogui.click()
# Export
pyautogui.moveTo(WINDOW_POS[0] + 20, WINDOW_POS[1] + 50)
pyautogui.click()
pyautogui.move(0, 270)
pyautogui.click()
time.sleep(0.25)
return Path('Export/song-01.wav')
def compare_mfcc_frames(frame_1, frame_2):
x = 0
for a, b in zip(frame_1, frame_2):
x += abs(a - b) ** 2
return x ** 0.5
VOWELS = '3AIOao{}'
CONSONANTS = '45DGLNSTfglprw'
DUMMY_VOWEL = '@'
DUMMY_CONSONANT = 'h'
all_possible_diphones = set()
all_possible_diphones |= set(VOWELS)
for c in CONSONANTS:
all_possible_diphones.add(c)
for v in VOWELS:
all_possible_diphones.add(c + v)
ENOTE = 240
MELODY = [
(69, ENOTE*2), # A
(64, ENOTE*2), # E
(72, ENOTE*2), # C
(71, ENOTE*1), # B
(69, ENOTE*1), # A
(71, ENOTE*2), # B
(67, ENOTE*2), # G
(62, ENOTE*2), # D
(71, ENOTE*2), # B
(65, ENOTE*2), # F
(60, ENOTE*2), # C
(69, ENOTE*2), # A
(67, ENOTE*1), # G
(65, ENOTE*1), # F
(67, ENOTE*2), # G
(69, ENOTE*2), # A
(71, ENOTE*2), # B
(74, ENOTE*2), # D
(72, ENOTE*4), # C
]
def main():
NFFT = 1103
DO_GENERATE = True
real_wav_fp = Path('song-01-real.wav')
real_rate, real_sig = wav.read(real_wav_fp)
real_mfcc = mfcc(real_sig, real_rate, nfft=NFFT)
flag_list = []
for syllable_num in range(len(flag_list), len(flag_list) + 1):
syllable_dir = Path('Syllables') / f'{syllable_num:02d}'
if DO_GENERATE: syllable_dir.mkdir()
print(f'Syllable {syllable_num}')
# Create all WAVs
print('Creating wavs...')
dp_to_wav_fp = {}
for i, dp in enumerate(sorted(all_possible_diphones)):
print(f'[{syllable_num} {flag_list}] {i}/134')
flag = ''.join(flag_list + [dp])
if lib_generate.convert_to_diphones(flag)[-1] not in [dp, dp + '@', 'h' + dp]:
print('Combines wrong. SKIPPING')
continue
lib_generate.generate(flag, MELODY, check_assertions=False)
if DO_GENERATE: wav_path = ustx_to_wav()
new_wav_path = syllable_dir / f'{flag}.wav'
if DO_GENERATE:
for _ in range(10):
try:
wav_path.rename(new_wav_path)
break
except FileNotFoundError:
time.sleep(0.5)
else:
raise
dp_to_wav_fp[dp] = new_wav_path
# Select best WAV
print('Analyzing wavs...')
best_diff = 99999999999999999999999999
best_dp = None
dps_and_diffs = []
for i, dp in enumerate(sorted(all_possible_diphones)):
print(f'{i}/134')
if dp not in dp_to_wav_fp:
continue
dp_rate, dp_sig = wav.read(dp_to_wav_fp[dp])
dp_mfcc = mfcc(dp_sig, dp_rate, nfft=NFFT)
difference = 0
for j, (real_frame, dp_frame) in enumerate(zip(real_mfcc, dp_mfcc)):
difference += compare_mfcc_frames(real_frame, dp_frame)
if difference < best_diff:
best_diff = difference
best_dp = dp
dps_and_diffs.append((dp, difference))
for dp, diff in sorted(dps_and_diffs, key=lambda x: x[1]):
print(dp, diff)
flag_list.append(best_dp)
print(''.join(flag_list))
main()
|
14d722c0aaed620ba9055f1f8523f75de0319dc1
|
ed865aed525556fd7aa5ac5a024af720de8438e3
|
/cli/tests/pcluster/aws/test_elb.py
|
3fa7d582b75f6ab099c5c22f2dc7473ce69ebf95
|
[
"Python-2.0",
"GPL-1.0-or-later",
"MPL-2.0",
"MIT",
"LicenseRef-scancode-python-cwi",
"BSD-3-Clause",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-free-unknown",
"Apache-2.0",
"MIT-0",
"BSD-2-Clause"
] |
permissive
|
aws/aws-parallelcluster
|
7bb33a6e175168f63a1e0acb1a9a7e9cbc405eff
|
a213978a09ea7fc80855bf55c539861ea95259f9
|
refs/heads/develop
| 2023-09-05T15:12:18.533270
| 2023-09-05T14:38:59
| 2023-09-05T14:38:59
| 19,718,034
| 520
| 226
|
Apache-2.0
| 2023-09-14T15:56:30
| 2014-05-12T22:42:19
|
Python
|
UTF-8
|
Python
| false
| false
| 6,900
|
py
|
test_elb.py
|
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License'). You may not use this file except in compliance
# with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the 'LICENSE.txt' file accompanying this file. This file is distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from assertpy import assert_that
from pcluster.aws.elb import ElbClient
from tests.utils import MockedBoto3Request
@pytest.fixture()
def boto3_stubber_path():
return "pcluster.aws.common.boto3"
@pytest.mark.parametrize("generate_error", [True, False])
def test_list_load_balancers(boto3_stubber, generate_error):
"""Verify that list_instance_types behaves as expected."""
dummy_message = "dummy error message"
dummy_load_balancer = {
"LoadBalancerArn": "dummy_load_balancer_arn",
"DNSName": "dummy_dns_name",
"LoadBalancerName": "dummy-load-balancer",
"Scheme": "internet-facing",
"State": {"Code": "active"},
}
dummy_load_balancer_2 = {
"LoadBalancerArn": "dummy_load_balancer_arn_2",
"DNSName": "dummy_dns_name",
"LoadBalancerName": "dummy-load-balancer-2",
"Scheme": "internal",
"State": {"Code": "provisioning"},
}
dummy_next_marker = "next_marker"
mocked_requests = [
MockedBoto3Request(
method="describe_load_balancers",
expected_params={},
response={"LoadBalancers": [dummy_load_balancer], "NextMarker": dummy_next_marker, "ResponseMetadata": {}},
generate_error=False,
),
MockedBoto3Request(
method="describe_load_balancers",
expected_params={"Marker": dummy_next_marker},
response=dummy_message
if generate_error
else {"LoadBalancers": [dummy_load_balancer_2], "ResponseMetadata": {}},
generate_error=generate_error,
),
]
boto3_stubber("elbv2", mocked_requests)
if generate_error:
with pytest.raises(BaseException, match=dummy_message):
ElbClient().list_load_balancers()
else:
return_value = ElbClient().list_load_balancers()
assert_that(return_value).is_equal_to([dummy_load_balancer, dummy_load_balancer_2])
@pytest.mark.parametrize("generate_error", [True, False])
def test_describe_tags(boto3_stubber, generate_error):
"""Verify that list_instance_types behaves as expected."""
dummy_load_balancer_arns = ["dummy_load_balancer_arn", "another_dummy_load_balancer_arn"]
dummy_message = "dummy error message"
dummy_tags_description = [
{
"ResourceArn": "dummy_load_balancer_arn",
"Tags": [
{
"Key": "parallelcluster:cluster-name",
"Value": "pcluster-name-1",
},
],
},
{
"ResourceArn": "another_dummy_load_balancer_arn",
"Tags": [
{
"Key": "parallelcluster:cluster-name",
"Value": "pcluster-name-2",
},
],
},
]
mocked_requests = [
MockedBoto3Request(
method="describe_tags",
expected_params={"ResourceArns": dummy_load_balancer_arns},
response=dummy_message
if generate_error
else {"TagDescriptions": dummy_tags_description, "ResponseMetadata": {}},
generate_error=generate_error,
)
]
boto3_stubber("elbv2", mocked_requests)
if generate_error:
with pytest.raises(BaseException, match=dummy_message):
ElbClient().describe_tags(dummy_load_balancer_arns)
else:
return_value = ElbClient().describe_tags(dummy_load_balancer_arns)
assert_that(return_value).is_equal_to(dummy_tags_description)
@pytest.mark.parametrize("generate_error", [True, False])
def test_describe_targets_group(boto3_stubber, generate_error):
"""Verify that describe_target_groups behaves as expected."""
dummy_load_balancer_arn = "dummy_load_balancer_arn"
dummy_message = "dummy error message"
dummy_target_groups = [
{
"HealthCheckPort": "22",
"LoadBalancerArns": [dummy_load_balancer_arn],
},
{
"HealthCheckPort": "22",
"LoadBalancerArns": [dummy_load_balancer_arn],
},
]
mocked_requests = [
MockedBoto3Request(
method="describe_target_groups",
expected_params={"LoadBalancerArn": dummy_load_balancer_arn},
response=dummy_message if generate_error else {"TargetGroups": dummy_target_groups, "ResponseMetadata": {}},
generate_error=generate_error,
)
]
boto3_stubber("elbv2", mocked_requests)
if generate_error:
with pytest.raises(BaseException, match=dummy_message):
ElbClient().describe_target_groups(dummy_load_balancer_arn)
else:
return_value = ElbClient().describe_target_groups(dummy_load_balancer_arn)
assert_that(return_value).is_equal_to(dummy_target_groups)
@pytest.mark.parametrize("generate_error", [True, False])
def test_describe_target_health(boto3_stubber, generate_error):
"""Verify that describe_target_health behaves as expected."""
dummy_target_arn = "dummy_target_arn"
dummy_message = "dummy error message"
dummy_targets_health = [
{
"HealthCheckPort": "22",
"Target": {
"Id": "i-123456",
"Port": 22,
},
"TargetHealth": {
"State": "healthy",
},
},
{
"HealthCheckPort": "22",
"Target": {
"Id": "i-789101",
"Port": 22,
},
"TargetHealth": {
"State": "healthy",
},
},
]
mocked_requests = [
MockedBoto3Request(
method="describe_target_health",
expected_params={"TargetGroupArn": "dummy_target_arn"},
response=dummy_message
if generate_error
else {"TargetHealthDescriptions": dummy_targets_health, "ResponseMetadata": {}},
generate_error=generate_error,
)
]
boto3_stubber("elbv2", mocked_requests)
if generate_error:
with pytest.raises(BaseException, match=dummy_message):
ElbClient().describe_target_health(dummy_target_arn)
else:
return_value = ElbClient().describe_target_health(dummy_target_arn)
assert_that(return_value).is_equal_to(dummy_targets_health)
|
6449cead461638987a08b225d7cf47249a1cd2dc
|
965efc4d7a83c2b5592417aa7e0d25a51f5a8108
|
/backend/metering_billing/migrations/0209_remove_historicalplan_addon_spec_and_more.py
|
52c597555b87464859558c1b9360500730878f60
|
[
"MIT"
] |
permissive
|
uselotus/lotus
|
f4ee23bb828605215f18aacd1d6fcff8e0986c53
|
c065fb33ee1a870d72bbd2adfddc08d50ca049b6
|
refs/heads/main
| 2023-08-17T03:38:35.770580
| 2023-07-26T18:50:17
| 2023-07-26T18:50:17
| 516,192,901
| 1,447
| 100
|
MIT
| 2023-06-25T22:53:06
| 2022-07-21T02:06:46
|
Python
|
UTF-8
|
Python
| false
| false
| 1,726
|
py
|
0209_remove_historicalplan_addon_spec_and_more.py
|
# Generated by Django 4.0.5 on 2023-02-24 23:06
from django.db import migrations, models
import metering_billing.utils.utils
class Migration(migrations.Migration):
dependencies = [
("metering_billing", "0208_copy_addon_specs"),
]
operations = [
migrations.RemoveField(
model_name="historicalplan",
name="addon_spec",
),
migrations.RemoveField(
model_name="plan",
name="addon_spec",
),
migrations.AddField(
model_name="historicalplan",
name="active_to",
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name="historicalplan",
name="active_from",
field=models.DateTimeField(
blank=True, default=metering_billing.utils.utils.now_utc
),
),
migrations.AddField(
model_name="plan",
name="active_to",
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name="plan",
name="active_from",
field=models.DateTimeField(
blank=True, default=metering_billing.utils.utils.now_utc
),
),
migrations.AddField(
model_name="planversion",
name="active_to",
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name="planversion",
name="active_from",
field=models.DateTimeField(
blank=True, null=True, default=metering_billing.utils.utils.now_utc
),
),
]
|
2b3ecb033c8be7bfee661d7596fd7d1442302007
|
73a0f661f1423d63e86489d4b2673f0103698aab
|
/python/oneflow/test/graph/test_graph_nccl_logical_fusion.py
|
81cc91d99670092771e704c8c078e12b3160f8e3
|
[
"Apache-2.0"
] |
permissive
|
Oneflow-Inc/oneflow
|
4fc3e081e45db0242a465c4330d8bcc8b21ee924
|
0aab78ea24d4b1c784c30c57d33ec69fe5605e4a
|
refs/heads/master
| 2023-08-25T16:58:30.576596
| 2023-08-22T14:15:46
| 2023-08-22T14:15:46
| 81,634,683
| 5,495
| 786
|
Apache-2.0
| 2023-09-14T09:44:31
| 2017-02-11T06:09:53
|
C++
|
UTF-8
|
Python
| false
| false
| 6,529
|
py
|
test_graph_nccl_logical_fusion.py
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import oneflow as flow
from oneflow import nn
import os
import numpy as np
import oneflow.unittest
@flow.unittest.skip_unless_1n4d()
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
class TestGraphNcclLogicalFusion(flow.unittest.TestCase):
def test_graph_nccl_fusion_1d(test_case):
x_list = []
local_np = np.arange(4 * 8, dtype=float).reshape(4, 8)
P1d = flow.placement("cuda", ranks=[0, 1, 2, 3])
B = flow.sbp.broadcast()
S0 = flow.sbp.split(0)
S1 = flow.sbp.split(1)
P = flow.sbp.partial_sum()
in_0 = (
flow.tensor(local_np / 4.0)
.to(flow.device("cuda"))
.to_global(sbp=P, placement=P1d)
)
flow.boxing.nccl.enable_use_compute_stream(True)
class TestNcclFusion1DGraph(flow.nn.Graph):
def __init__(self):
super().__init__()
def build(self, x):
# fuse group 0:
x0 = x * 0.5
y0 = x0.to_global(sbp=B, placement=P1d) # P->B
x1 = x * 1.0
y1 = x1.to_global(sbp=S0, placement=P1d) # P->S0
x2 = x * 2.0
y2 = x2.to_global(sbp=S1, placement=P1d) # P->S1
x3 = x * 3.0
y3 = x3.to_global(sbp=S1, placement=P1d) # P->S1
x4 = x * 4.0
y4 = x4.to_global(sbp=S0, placement=P1d) # P->S0
# fuse group 1:
x5 = y1 * 5.0
y5 = x5.to_global(sbp=B, placement=P1d) # S0->B
x6 = y2 * (6.0 / 2.0)
y6 = x6.to_global(sbp=B, placement=P1d) # S1->B
x7 = y3 * (9.0 / 3.0)
y7 = x7.to_global(sbp=S0, placement=P1d) # S1->S0
x8 = y4 * (8.0 / 4.0)
y8 = x8.to_global(sbp=S1, placement=P1d) # S0->S1
y = y0 + y1 + y2 + y3 + y4 + y5 + y6 + y7 + y8
return y, y0, y1, y2, y3, y4, y5, y6, y7, y8
graph = TestNcclFusion1DGraph()
out, out_0, out_1, out_2, out_3, out_4, out_5, out_6, out_7, out_8 = graph(in_0)
test_case.assertTrue(np.array_equal(out_0.numpy(), local_np * 0.5))
test_case.assertTrue(np.array_equal(out_1.numpy(), local_np * 1.0))
test_case.assertTrue(np.array_equal(out_2.numpy(), local_np * 2.0))
test_case.assertTrue(np.array_equal(out_3.numpy(), local_np * 3.0))
test_case.assertTrue(np.array_equal(out_4.numpy(), local_np * 4.0))
test_case.assertTrue(np.array_equal(out_5.numpy(), local_np * 5.0))
test_case.assertTrue(np.array_equal(out_6.numpy(), local_np * 6.0))
test_case.assertTrue(np.array_equal(out_7.numpy(), local_np * 9.0))
test_case.assertTrue(np.array_equal(out_8.numpy(), local_np * 8.0))
flow.boxing.nccl.enable_use_compute_stream(False)
def test_graph_nccl_fusion_2d(test_case):
x_list = []
local_np = np.arange(4 * 8, dtype=float).reshape(4, 8)
P2d = flow.placement("cuda", ranks=[[0, 1], [2, 3]])
B = flow.sbp.broadcast()
S0 = flow.sbp.split(0)
S1 = flow.sbp.split(1)
P = flow.sbp.partial_sum()
in_BP = (
flow.tensor(local_np / 2.0)
.to(flow.device("cuda"))
.to_global(sbp=(B, P), placement=P2d)
)
in_PB = (
flow.tensor(local_np / 2.0)
.to(flow.device("cuda"))
.to_global(sbp=(P, B), placement=P2d)
)
in_S0P = in_BP.to_global(sbp=(S0, P), placement=P2d)
in_PS0 = in_PB.to_global(sbp=(P, S0), placement=P2d)
flow.boxing.nccl.enable_use_compute_stream(True)
class TestNcclFusion2DGraph(flow.nn.Graph):
def __init__(self):
super().__init__()
def build(self, x, xsd1):
# fuse group 0:
x0 = x * 0.5
y0 = x0.to_global(sbp=(S0, B), placement=P2d) # same dim0 P->B
x1 = x * 1.0
y1 = x1.to_global(sbp=(S0, B), placement=P2d) # same dim0 P->B
xss0 = x.to_global(sbp=(S0, S0), placement=P2d)
xss1 = x.to_global(sbp=(S0, S1), placement=P2d)
x2 = xss0 * 2.0
y2 = x2.to_global(sbp=(S0, B), placement=P2d) # same dim0 S0->B
x3 = xss1 * 3.0
y3 = x3.to_global(sbp=(S0, B), placement=P2d) # same dim0 S1->B
x4 = xss0 * 4.0
y4 = x4.to_global(sbp=(S0, S1), placement=P2d) # same dim0 S0->S1
x5 = xss1 * 5.0
y5 = x5.to_global(sbp=(S0, S0), placement=P2d) # same dim0 S1->S0
x6 = xsd1 * 6.0
y6 = x6.to_global(sbp=(B, S0), placement=P2d) # same dim1 P-> B
x7 = xsd1 * 7.0
y7 = x7.to_global(sbp=(B, S0), placement=P2d) # same dim1 P-> B
y = y0 + y1 + y2 + y3 + y4 + y5 + y6 + y7
return y, y0, y1, y2, y3, y4, y5, y6, y7
graph = TestNcclFusion2DGraph()
out, out_0, out_1, out_2, out_3, out_4, out_5, out_6, out_7 = graph(
in_S0P, in_PS0
)
test_case.assertTrue(np.array_equal(out_0.numpy(), local_np * 0.5))
test_case.assertTrue(np.array_equal(out_1.numpy(), local_np * 1.0))
test_case.assertTrue(np.array_equal(out_2.numpy(), local_np * 2.0))
test_case.assertTrue(np.array_equal(out_3.numpy(), local_np * 3.0))
test_case.assertTrue(np.array_equal(out_4.numpy(), local_np * 4.0))
test_case.assertTrue(np.array_equal(out_5.numpy(), local_np * 5.0))
test_case.assertTrue(np.array_equal(out_6.numpy(), local_np * 6.0))
test_case.assertTrue(np.array_equal(out_7.numpy(), local_np * 7.0))
flow.boxing.nccl.enable_use_compute_stream(False)
if __name__ == "__main__":
unittest.main()
|
cca997aeadb78f369b2bffff8ef492deb1b15e26
|
80a3d98eae1d755d6914b5cbde63fd10f5cc2046
|
/autox/autox_video/mmaction2/configs/skeleton/stgcn/stgcn_80e_babel120_wfl.py
|
63516b2e1f73f680e01d1cd0fe0fbe27a86371b8
|
[
"Apache-2.0"
] |
permissive
|
4paradigm/AutoX
|
efda57b51b586209e1d58e1dab7d0797083aadc5
|
7eab9f4744329a225ff01bb5ec360c4662e1e52e
|
refs/heads/master
| 2023-05-24T00:53:37.109036
| 2023-02-14T14:21:50
| 2023-02-14T14:21:50
| 388,068,949
| 752
| 162
|
Apache-2.0
| 2022-07-12T08:28:09
| 2021-07-21T09:45:41
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 3,056
|
py
|
stgcn_80e_babel120_wfl.py
|
samples_per_cls = [
518, 1993, 6260, 508, 208, 3006, 431, 724, 4527, 2131, 199, 1255, 487, 302,
136, 571, 267, 646, 1180, 405, 72, 731, 842, 1619, 271, 27, 1198, 1012,
110, 865, 462, 526, 405, 487, 101, 24, 84, 64, 168, 271, 609, 503, 76, 167,
415, 137, 421, 283, 2069, 715, 196, 66, 44, 989, 122, 43, 599, 396, 245,
380, 34, 236, 260, 325, 127, 133, 119, 66, 125, 50, 206, 191, 394, 69, 98,
145, 38, 21, 29, 64, 277, 65, 39, 31, 35, 85, 54, 80, 133, 66, 39, 64, 268,
34, 172, 54, 33, 21, 110, 19, 40, 55, 146, 39, 37, 75, 101, 20, 46, 55, 43,
21, 43, 87, 29, 36, 24, 37, 28, 39
]
model = dict(
type='SkeletonGCN',
backbone=dict(
type='STGCN',
in_channels=3,
edge_importance_weighting=True,
graph_cfg=dict(layout='ntu-rgb+d', strategy='spatial')),
cls_head=dict(
type='STGCNHead',
num_classes=120,
in_channels=256,
num_person=1,
loss_cls=dict(type='CBFocalLoss', samples_per_cls=samples_per_cls)),
train_cfg=None,
test_cfg=None)
dataset_type = 'PoseDataset'
ann_file_train = 'data/babel/babel120_train.pkl'
ann_file_val = 'data/babel/babel120_val.pkl'
train_pipeline = [
dict(type='PoseDecode'),
dict(type='FormatGCNInput', input_format='NCTVM', num_person=1),
dict(type='Collect', keys=['keypoint', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['keypoint'])
]
val_pipeline = [
dict(type='PoseDecode'),
dict(type='FormatGCNInput', input_format='NCTVM', num_person=1),
dict(type='Collect', keys=['keypoint', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['keypoint'])
]
test_pipeline = [
dict(type='PoseDecode'),
dict(type='FormatGCNInput', input_format='NCTVM', num_person=1),
dict(type='Collect', keys=['keypoint', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['keypoint'])
]
data = dict(
videos_per_gpu=16,
workers_per_gpu=2,
test_dataloader=dict(videos_per_gpu=1),
train=dict(
type='RepeatDataset',
times=5,
dataset=dict(
type=dataset_type,
ann_file=ann_file_train,
data_prefix='',
pipeline=train_pipeline)),
val=dict(
type=dataset_type,
ann_file=ann_file_val,
data_prefix='',
pipeline=val_pipeline),
test=dict(
type=dataset_type,
ann_file=ann_file_val,
data_prefix='',
pipeline=test_pipeline))
# optimizer
optimizer = dict(
type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0001, nesterov=True)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(policy='step', step=[10, 14])
total_epochs = 16
checkpoint_config = dict(interval=1)
evaluation = dict(
interval=1, metrics=['top_k_accuracy', 'mean_class_accuracy'])
log_config = dict(interval=100, hooks=[dict(type='TextLoggerHook')])
# runtime settings
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/stgcn_80e_babel120_wfl/'
load_from = None
resume_from = None
workflow = [('train', 1)]
|
f20187e92f37d9a5a30b9cc8715252b3d95c0028
|
56d6257e932e1397ab03b1e7ccc6231378665b04
|
/_Komplete_Kontrol/selection_linked_session_ring_component.py
|
4970719c5bfde6f2547152ee8b505bcfe1322e58
|
[] |
no_license
|
gluon/AbletonLive10.1_MIDIRemoteScripts
|
e6c8dc4956cff9630aaa36f3667994387ad1d0cf
|
2468b51eba7e5082b06f9e381b3e72027c5f272c
|
refs/heads/master
| 2023-01-10T18:37:46.504180
| 2022-12-23T09:21:48
| 2022-12-23T09:21:48
| 213,423,555
| 205
| 59
| null | 2021-02-12T16:15:01
| 2019-10-07T15:44:52
|
Python
|
UTF-8
|
Python
| false
| false
| 952
|
py
|
selection_linked_session_ring_component.py
|
#Embedded file name: /Users/versonator/Jenkins/live/output/Live/mac_64_static/Release/python-bundle/MIDI Remote Scripts/_Komplete_Kontrol/selection_linked_session_ring_component.py
from __future__ import absolute_import, print_function, unicode_literals
from ableton.v2.base import listens
from ableton.v2.control_surface.components import SessionRingComponent
class SelectionLinkedSessionRingComponent(SessionRingComponent):
def __init__(self, *a, **k):
super(SelectionLinkedSessionRingComponent, self).__init__(*a, **k)
self.__on_selected_track_changed.subject = self.song.view
self.__on_selected_track_changed()
@listens(u'selected_track')
def __on_selected_track_changed(self):
selected_track = self.song.view.selected_track
if selected_track not in self.controlled_tracks():
all_tracks = list(self.tracks_to_use())
self.track_offset = all_tracks.index(selected_track)
|
460dd622c0920f69d3e0391e51bb3997685253a4
|
e79cb86744b9cc5d46912f2f9acdb5ffd434f745
|
/src/mpl2/test/plot_floorplan.py
|
01d6782a7fa20d119157a58f1d351f9ee8792142
|
[
"BSD-3-Clause",
"MPL-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
The-OpenROAD-Project/OpenROAD
|
555cbb00ec250bb09b9e4f9a7d1454e7ac7a01ab
|
1f6ccc9066e7df4509ed391d87b01eadb4b3b197
|
refs/heads/master
| 2023-08-31T05:35:25.363354
| 2023-08-31T05:04:27
| 2023-08-31T05:04:27
| 218,110,222
| 979
| 461
|
BSD-3-Clause
| 2023-09-14T21:51:36
| 2019-10-28T17:48:14
|
Verilog
|
UTF-8
|
Python
| false
| false
| 2,886
|
py
|
plot_floorplan.py
|
import os
import matplotlib.pyplot as plt
from math import log
import json
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--directory", default = "./results/mp_test1", help = "result directory")
parser.add_argument("--net_threshold", default = -1, help = "nets with weight below net_threshold will not be displayed")
parser.add_argument("--top_n", default = 50, help = "only show the top_n nets")
args = parser.parse_args()
design_dir = args.directory
top_n = args.top_n
net_threshold = args.net_threshold
# users can set this threshold manually
# if it's not set, it will be updated automatically
# only highlight the top n critical edges
#top_n = 50
#net_threshold = -1
highlight_list = []
#design_dir = "./results/mp_test1"
file_name = design_dir + "/root.fp.txt"
macro_map = { }
terminal_map = { }
with open(file_name) as f:
content = f.read().splitlines()
f.close()
for line in content:
items = line.split()
macro_map[items[0]] = [float(items[1]), float(items[2]), float(items[3]), float(items[4])]
terminal_map[items[0]] = [float(items[1]) + float(items[3]) / 2.0, float(items[2]) + float(items[4]) / 2.0]
file_name = design_dir + "/root.net.txt"
net_map = [ ]
with open(file_name) as f:
content = f.read().splitlines()
f.close()
net_values = []
for line in content:
items = line.split()
net_map.append([items[0], items[1], float(items[2]) + 1])
net_values.append(float(items[2]) + 1)
if (top_n > 0 and net_threshold <= 1 and len(net_values) > top_n):
net_values.sort(reverse = True)
net_threshold = net_values[top_n]
print("Only highlight the top ", top_n, " edges")
print("Reset net_threshold to ", net_threshold)
lx = 1e9
ly = 1e9
ux = 0.0
uy = 0.0
plt.figure()
for macro_name, size in macro_map.items():
color = "r"
if (macro_name in highlight_list):
color = "yellow"
rectangle = plt.Rectangle((size[0], size[1]), size[2], size[3], fc = color, ec = "blue")
lx = min(lx, size[0])
ly = min(ly, size[1])
ux = max(ux, size[0] + size[2])
uy = max(uy, size[1] + size[3])
plt.gca().add_patch(rectangle)
for net in net_map:
source = net[0]
target = net[1]
weight = net[2]
x = []
y = []
x.append(terminal_map[source][0])
x.append(terminal_map[target][0])
y.append(terminal_map[source][1])
y.append(terminal_map[target][1])
if weight > net_threshold:
plt.plot(x,y,'k', lw = log(weight))
x = []
y = []
x.append(lx)
y.append(ly)
x.append(ux)
y.append(ly)
plt.plot(x,y, '--k')
x = []
y = []
x.append(lx)
y.append(uy)
x.append(ux)
y.append(uy)
plt.plot(x,y, '--k')
x = []
y = []
x.append(lx)
y.append(ly)
x.append(lx)
y.append(uy)
plt.plot(x,y, '--k')
x = []
y = []
x.append(ux)
y.append(ly)
x.append(ux)
y.append(uy)
plt.plot(x,y, '--k')
plt.xlim(lx, ux)
plt.ylim(ly, uy)
plt.axis("scaled")
plt.show()
|
d87d8a4c8329829a6899b173ae521fad7107e4a4
|
fa3f6d4e9169fb95f828013d179d03accdff381b
|
/grr/test_lib/fleetspeak_test_lib.py
|
d2950a7189a33e2bebc2eb49b338d51cdb5312fd
|
[
"Apache-2.0"
] |
permissive
|
google/grr
|
c51a2bd251ed2f7adae538541990a2cc01fdcc8c
|
44c0eb8c938302098ef7efae8cfd6b90bcfbb2d6
|
refs/heads/master
| 2023-09-05T20:02:36.823914
| 2023-07-26T09:34:09
| 2023-07-26T09:34:09
| 14,909,673
| 4,683
| 927
|
Apache-2.0
| 2023-07-26T09:34:10
| 2013-12-04T00:17:53
|
Python
|
UTF-8
|
Python
| false
| false
| 2,716
|
py
|
fleetspeak_test_lib.py
|
#!/usr/bin/env python
"""Fleetspeak-related helpers for use in tests."""
import collections
import functools
import threading
from typing import Optional, Text
from unittest import mock
from grr_response_core.lib.rdfvalues import flows as rdf_flows
from grr_response_proto import jobs_pb2
from grr_response_server import fleetspeak_connector
from grr_response_server import fleetspeak_utils
from fleetspeak.src.common.proto.fleetspeak import common_pb2
from fleetspeak.src.server.proto.fleetspeak_server import admin_pb2
_message_lock = threading.Lock()
_messages_by_client_id = {}
def StoreMessage(fs_msg: common_pb2.Message):
"""Emulates sending of a message to Fleetspeak by storing it in-memory."""
if not fs_msg.destination.client_id:
raise ValueError("No destination set for Fleetspeak message:\n%s" % fs_msg)
grr_id = fleetspeak_utils.FleetspeakIDToGRRID(fs_msg.destination.client_id)
raw_grr_msg = jobs_pb2.GrrMessage()
fs_msg.data.Unpack(raw_grr_msg)
grr_msg = rdf_flows.GrrMessage.FromSerializedBytes(
raw_grr_msg.SerializeToString())
with _message_lock:
try:
_messages_by_client_id[grr_id].append(grr_msg)
except KeyError:
_messages_by_client_id[grr_id] = collections.deque([grr_msg])
def PopMessage(client_id: Text) -> Optional[rdf_flows.GrrMessage]:
"""Returns a message sent to the given Fleetspeak client.
The returned message is removed from the in-memory store. Messages for
any given client are returned in the order in which they are inserted. If
a client has no pending messages, None is returned.
Args:
client_id: GRR id of the Fleetspeak client to return a message for.
"""
try:
with _message_lock:
return _messages_by_client_id[client_id].popleft()
except (KeyError, IndexError):
return None
def WithFleetspeakConnector(func):
"""A decorator for Fleetspeak connector-dependent test methods.
This decorator is intended for tests that might involve sending
Fleetspeak messages or interacting with Fleetspeak connector.
Args:
func: A test method to be decorated.
Returns:
A Fleetspeak connector-aware function.
"""
@functools.wraps(func)
def Wrapper(*args, **kwargs):
with mock.patch.object(fleetspeak_connector, "CONN") as mock_conn:
mock_conn.outgoing.InsertMessage.side_effect = (
lambda msg, **_: StoreMessage(msg)
)
mock_conn.outgoing.ListClients.side_effect = (
lambda msg, **_: admin_pb2.ListClientsResponse()
)
Reset()
func(*(args + (mock_conn,)), **kwargs)
return Wrapper
def Reset():
"""Resets the test queue."""
global _messages_by_client_id
with _message_lock:
_messages_by_client_id = {}
|
a21004dfcd9c013db64d18c728f4c34bddb1942b
|
70e9a7da3d4e2a41b30544516e166dab2495253c
|
/l10n_br_sicoob/models/res_bank.py
|
f61a06dffe0966439b49565403690498d9db30a7
|
[
"MIT"
] |
permissive
|
Trust-Code/odoo-brasil
|
bf06ea58a4e0376cb5c297c18bf48eaf97104e54
|
d456a10e32f56e259061afbd989942ea1aae2c2d
|
refs/heads/16.0
| 2023-08-31T16:06:21.038792
| 2023-01-26T19:31:31
| 2023-01-26T19:31:31
| 72,882,959
| 206
| 253
|
MIT
| 2023-08-18T17:05:49
| 2016-11-04T20:28:03
|
Python
|
UTF-8
|
Python
| false
| false
| 160
|
py
|
res_bank.py
|
from odoo import models, fields
class ResBank(models.Model):
_inherit = 'res.partner.bank'
l10n_br_branch_number = fields.Char("Agência Bancária")
|
0f2f97e547d1861f7e4675b52acd74d7d4a3b3df
|
ea57d267ab31480d8d731b2c095e9da9ad989133
|
/tests/test_packages/test_skills/test_registration_aw1/test_handlers.py
|
602c8ecb1a8248338ff456babc8381ca93efc2d4
|
[
"Apache-2.0"
] |
permissive
|
fetchai/agents-aea
|
6d034f1db6f3beacf31dac2f5a1baaa60c8edb7d
|
bec49adaeba661d8d0f03ac9935dc89f39d95a0d
|
refs/heads/main
| 2023-08-08T23:19:06.276643
| 2023-02-04T10:46:39
| 2023-02-04T10:46:39
| 203,558,879
| 192
| 58
|
Apache-2.0
| 2023-07-19T04:45:26
| 2019-08-21T10:12:47
|
Python
|
UTF-8
|
Python
| false
| false
| 15,192
|
py
|
test_handlers.py
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2023 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains the tests of the handler classes of the registration_aw1 skill."""
import logging
from pathlib import Path
from typing import cast
from unittest.mock import patch
from aea.helpers.transaction.base import Terms
from aea.protocols.dialogue.base import DialogueMessage
from packages.fetchai.protocols.register.message import RegisterMessage
from packages.fetchai.protocols.signing.message import SigningMessage
from packages.fetchai.skills.registration_aw1.dialogues import (
RegisterDialogue,
SigningDialogue,
)
from tests.conftest import ROOT_DIR
from tests.test_packages.test_skills.test_registration_aw1.intermediate_class import (
RegiatrationAW1TestCase,
)
class TestAW1RegistrationHandler(RegiatrationAW1TestCase):
"""Test registration handler of registration_aw1."""
path_to_skill = Path(ROOT_DIR, "packages", "fetchai", "skills", "registration_aw1")
@classmethod
def setup(cls):
"""Setup the test class."""
super().setup()
cls.signature_of_ethereum_address = "some_signature_of_ethereum_address"
cls.info = {
"ethereum_address": cls.ethereum_address,
"fetchai_address": cls._skill.skill_context.agent_address,
"signature_of_ethereum_address": cls.signature_of_ethereum_address,
"signature_of_fetchai_address": cls.signature_of_fetchai_address,
"developer_handle": cls.developer_handle,
"tweet": cls.tweet,
}
cls.list_of_messages = (
DialogueMessage(RegisterMessage.Performative.REGISTER, {"info": cls.info}),
)
def test_setup(self):
"""Test the setup method of the registration_aw1 handler."""
assert self.register_handler.setup() is None
self.assert_quantity_in_outbox(0)
def test_handle_unidentified_dialogue(self):
"""Test the _handle_unidentified_dialogue method of the registration_aw1 handler."""
# setup
incorrect_dialogue_reference = ("", "")
incoming_message = self.build_incoming_message(
message_type=RegisterMessage,
dialogue_reference=incorrect_dialogue_reference,
performative=RegisterMessage.Performative.REGISTER,
info={"some_key": "some_value"},
)
# operation
with patch.object(self.logger, "log") as mock_logger:
self.register_handler.handle(incoming_message)
# after
mock_logger.assert_any_call(
logging.INFO,
f"received invalid register_msg message={incoming_message}, unidentified dialogue.",
)
def test_handle_success_i(self):
"""Test the _handle_success method of the registration_aw1 handler where announce_termination_key IS None."""
# setup
self.strategy.announce_termination_key = None
register_dialogue = cast(
RegisterDialogue,
self.prepare_skill_dialogue(
dialogues=self.register_dialogues,
messages=self.list_of_messages,
is_agent_to_agent_messages=True,
),
)
incoming_message = cast(
RegisterMessage,
self.build_incoming_message_for_skill_dialogue(
dialogue=register_dialogue,
performative=RegisterMessage.Performative.SUCCESS,
info={"transaction_digest": "some_transaction_digest"},
),
)
# operation
with patch.object(self.logger, "log") as mock_logger:
self.register_handler.handle(incoming_message)
# after
self.assert_quantity_in_outbox(0)
mock_logger.assert_any_call(
logging.DEBUG,
f"received register_msg success message={incoming_message} in dialogue={register_dialogue}.",
)
mock_logger.assert_any_call(
logging.INFO,
f"received register message success, info={incoming_message.info}. Stop me now!",
)
assert self.strategy.is_registered is True
assert self.strategy.is_registration_pending is False
assert self.strategy.is_ready_to_register is False
def test_handle_success_ii(self):
"""Test the _handle_success method of the registration_aw1 handler where announce_termination_key is NOT None."""
# setup
key = "some_key"
self.strategy.announce_termination_key = key
register_dialogue = cast(
RegisterDialogue,
self.prepare_skill_dialogue(
dialogues=self.register_dialogues,
messages=self.list_of_messages,
is_agent_to_agent_messages=True,
),
)
incoming_message = cast(
RegisterMessage,
self.build_incoming_message_for_skill_dialogue(
dialogue=register_dialogue,
performative=RegisterMessage.Performative.SUCCESS,
info={"transaction_digest": "some_transaction_digest"},
),
)
# operation
with patch.object(self.logger, "log") as mock_logger:
self.register_handler.handle(incoming_message)
# after
self.assert_quantity_in_outbox(0)
mock_logger.assert_any_call(
logging.DEBUG,
f"received register_msg success message={incoming_message} in dialogue={register_dialogue}.",
)
mock_logger.assert_any_call(
logging.INFO,
f"received register message success, info={incoming_message.info}. Stop me now!",
)
assert self.strategy.is_registered is True
assert self.strategy.is_registration_pending is False
assert self.strategy.is_ready_to_register is False
assert self.skill.skill_context.shared_state[key] is True
def test_handle_error(self):
"""Test the _handle_error method of the registration_aw1 handler."""
# setup
register_dialogue = cast(
RegisterDialogue,
self.prepare_skill_dialogue(
dialogues=self.register_dialogues,
messages=self.list_of_messages,
is_agent_to_agent_messages=True,
),
)
incoming_message = cast(
RegisterMessage,
self.build_incoming_message_for_skill_dialogue(
dialogue=register_dialogue,
performative=RegisterMessage.Performative.ERROR,
error_code=1,
error_msg="some_error_msg",
info={"some_key": "some_value"},
),
)
# operation
with patch.object(self.logger, "log") as mock_logger:
self.register_handler.handle(incoming_message)
# after
self.assert_quantity_in_outbox(0)
mock_logger.assert_any_call(
logging.DEBUG,
f"received register_msg error message={incoming_message} in dialogue={register_dialogue}.",
)
mock_logger.assert_any_call(
logging.INFO,
f"received register message error, error_msg={incoming_message.error_msg}. Stop me now!",
)
assert self.strategy.is_registration_pending is False
assert self.strategy.is_ready_to_register is False
def test_handle_invalid(self):
"""Test the _handle_invalid method of the registration_aw1 handler."""
# setup
incoming_message = cast(
RegisterMessage,
self.build_incoming_message(
message_type=RegisterMessage,
performative=RegisterMessage.Performative.REGISTER,
info=self.info,
),
)
# operation
with patch.object(self.logger, "log") as mock_logger:
self.register_handler.handle(incoming_message)
# after
self.assert_quantity_in_outbox(0)
register_dialogue = self.register_dialogues.get_dialogue(incoming_message)
mock_logger.assert_any_call(
logging.WARNING,
f"cannot handle register_msg message of performative={incoming_message.performative} in dialogue={register_dialogue}.",
)
def test_teardown(self):
"""Test the teardown method of the registration_aw1 handler."""
assert self.register_handler.teardown() is None
self.assert_quantity_in_outbox(0)
class TestSigningHandler(RegiatrationAW1TestCase):
"""Test signing handler of registration_aw1."""
path_to_skill = Path(ROOT_DIR, "packages", "fetchai", "skills", "registration_aw1")
is_agent_to_agent_messages = False
@classmethod
def setup(cls):
"""Setup the test class."""
super().setup()
cls.ledger_id = "some_ledger_id"
cls.body_bytes = b"some_body"
cls.body_str = "some_body"
cls.terms = Terms(
"some_ledger_id",
cls._skill.skill_context.agent_address,
"counterprty",
{"currency_id": 50},
{"good_id": -10},
"some_nonce",
)
cls.list_of_signing_msg_messages = (
DialogueMessage(
SigningMessage.Performative.SIGN_MESSAGE,
{
"terms": cls.terms,
"raw_message": SigningMessage.RawMessage(
cls.ledger_id, cls.body_bytes
),
},
),
)
def test_setup(self):
"""Test the setup method of the signing handler."""
assert self.signing_handler.setup() is None
self.assert_quantity_in_outbox(0)
def test_handle_unidentified_dialogue(self):
"""Test the _handle_unidentified_dialogue method of the signing handler."""
# setup
incorrect_dialogue_reference = ("", "")
incoming_message = self.build_incoming_message(
message_type=SigningMessage,
dialogue_reference=incorrect_dialogue_reference,
performative=SigningMessage.Performative.ERROR,
error_code=SigningMessage.ErrorCode.UNSUCCESSFUL_MESSAGE_SIGNING,
to=str(self.skill.skill_context.skill_id),
)
# operation
with patch.object(self.logger, "log") as mock_logger:
self.signing_handler.handle(incoming_message)
# after
mock_logger.assert_any_call(
logging.INFO,
f"received invalid signing message={incoming_message}, unidentified dialogue.",
)
def test_handle_signed_message(self):
"""Test the _handle_signed_message method of the signing handler."""
# setup
signing_dialogue = cast(
SigningDialogue,
self.prepare_skill_dialogue(
dialogues=self.signing_dialogues,
messages=self.list_of_signing_msg_messages[:1],
counterparty=self.skill.skill_context.decision_maker_address,
),
)
incoming_message = cast(
SigningMessage,
self.build_incoming_message_for_skill_dialogue(
dialogue=signing_dialogue,
performative=SigningMessage.Performative.SIGNED_MESSAGE,
signed_message=SigningMessage.SignedMessage(
self.ledger_id, self.body_str
),
),
)
# operation
with patch.object(self.logger, "log") as mock_logger:
self.signing_handler.handle(incoming_message)
# after
self.assert_quantity_in_outbox(0)
mock_logger.assert_any_call(
logging.DEBUG,
f"received signing message from decision maker, message={incoming_message} in dialogue={signing_dialogue}",
)
mock_logger.assert_any_call(
logging.INFO,
f"received signing message from decision maker, signature={incoming_message.signed_message.body} stored!",
)
assert self.strategy.signature_of_ethereum_address == self.body_str
assert self.strategy.is_ready_to_register is True
def test_handle_error(self):
"""Test the _handle_error method of the signing handler."""
# setup
signing_counterparty = self.skill.skill_context.decision_maker_address
signing_dialogue = self.prepare_skill_dialogue(
dialogues=self.signing_dialogues,
messages=self.list_of_signing_msg_messages[:1],
counterparty=signing_counterparty,
)
incoming_message = cast(
SigningMessage,
self.build_incoming_message_for_skill_dialogue(
dialogue=signing_dialogue,
performative=SigningMessage.Performative.ERROR,
error_code=SigningMessage.ErrorCode.UNSUCCESSFUL_TRANSACTION_SIGNING,
),
)
# operation
with patch.object(self.logger, "log") as mock_logger:
self.signing_handler.handle(incoming_message)
# after
mock_logger.assert_any_call(
logging.INFO,
f"transaction signing was not successful. Error_code={incoming_message.error_code} in dialogue={signing_dialogue}",
)
def test_handle_invalid(self):
"""Test the _handle_invalid method of the signing handler."""
# setup
invalid_performative = SigningMessage.Performative.SIGN_TRANSACTION
incoming_message = self.build_incoming_message(
message_type=SigningMessage,
dialogue_reference=("1", ""),
performative=invalid_performative,
terms=self.terms,
raw_transaction=SigningMessage.RawTransaction(
"some_ledger_id", {"some_key": "some_value"}
),
to=str(self.skill.skill_context.skill_id),
)
# operation
with patch.object(self.logger, "log") as mock_logger:
self.signing_handler.handle(incoming_message)
# after
mock_logger.assert_any_call(
logging.WARNING,
f"cannot handle signing message of performative={incoming_message.performative} in dialogue={self.signing_dialogues.get_dialogue(incoming_message)}.",
)
def test_teardown(self):
"""Test the teardown method of the signing handler."""
assert self.signing_handler.teardown() is None
self.assert_quantity_in_outbox(0)
|
8629aaef45a20ac217580bdd0326b89bd53ebf2a
|
0dddc0508138396c740901be4a0f9eebefb8fded
|
/ax/core/tests/test_optimization_config.py
|
e5597f5594335c77406276656cd82b28111f7399
|
[
"MIT"
] |
permissive
|
facebook/Ax
|
473beb143016f95f4ec381ed1bd95b32c1ca31f8
|
6443cee30cbf8cec290200a7420a3db08e4b5445
|
refs/heads/main
| 2023-09-01T09:29:13.684709
| 2023-08-31T21:49:30
| 2023-08-31T21:49:30
| 169,880,381
| 2,207
| 315
|
MIT
| 2023-09-14T21:26:51
| 2019-02-09T15:23:44
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 22,183
|
py
|
test_optimization_config.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from ax.core.metric import Metric
from ax.core.objective import MultiObjective, Objective, ScalarizedObjective
from ax.core.optimization_config import (
_NO_RISK_MEASURE,
MultiObjectiveOptimizationConfig,
OptimizationConfig,
)
from ax.core.outcome_constraint import (
ObjectiveThreshold,
OutcomeConstraint,
ScalarizedOutcomeConstraint,
)
from ax.core.risk_measures import RiskMeasure
from ax.core.types import ComparisonOp
from ax.exceptions.core import UserInputError
from ax.utils.common.testutils import TestCase
OC_STR = (
'OptimizationConfig(objective=Objective(metric_name="m1", minimize=False), '
"outcome_constraints=[OutcomeConstraint(m3 >= -0.25%), "
"OutcomeConstraint(m4 <= 0.25%), "
"ScalarizedOutcomeConstraint(metric_names=['m3', 'm4'], "
"weights=[0.5, 0.5], >= -0.25%)])"
)
MOOC_STR = (
"MultiObjectiveOptimizationConfig(objective=MultiObjective(objectives="
'[Objective(metric_name="m1", minimize=True), '
'Objective(metric_name="m2", minimize=False)]), '
"outcome_constraints=[OutcomeConstraint(m3 >= -0.25%), "
"OutcomeConstraint(m3 <= 0.25%)], objective_thresholds=[])"
)
class OptimizationConfigTest(TestCase):
def setUp(self) -> None:
self.metrics = {
"m1": Metric(name="m1"),
"m2": Metric(name="m2"),
"m3": Metric(name="m3"),
"m4": Metric(name="m4"),
}
self.objective = Objective(metric=self.metrics["m1"], minimize=False)
self.alt_objective = Objective(metric=self.metrics["m3"], minimize=False)
self.multi_objective = MultiObjective(
objectives=[self.objective, self.alt_objective],
)
self.m2_objective = ScalarizedObjective(
metrics=[self.metrics["m1"], self.metrics["m2"]]
)
self.outcome_constraint = OutcomeConstraint(
metric=self.metrics["m3"], op=ComparisonOp.GEQ, bound=-0.25
)
self.additional_outcome_constraint = OutcomeConstraint(
metric=self.metrics["m4"], op=ComparisonOp.LEQ, bound=0.25
)
self.scalarized_outcome_constraint = ScalarizedOutcomeConstraint(
metrics=[self.metrics["m3"], self.metrics["m4"]],
weights=[0.5, 0.5],
op=ComparisonOp.GEQ,
bound=-0.25,
)
self.outcome_constraints = [
self.outcome_constraint,
self.additional_outcome_constraint,
self.scalarized_outcome_constraint,
]
self.single_output_risk_measure = RiskMeasure(
risk_measure="Expectation",
options={"n_w": 2},
)
self.multi_output_risk_measure = RiskMeasure(
risk_measure="MultiOutputExpectation",
options={"n_w": 2},
)
def testInit(self) -> None:
config1 = OptimizationConfig(
objective=self.objective, outcome_constraints=self.outcome_constraints
)
self.assertEqual(str(config1), OC_STR)
with self.assertRaises(ValueError):
config1.objective = self.alt_objective # constrained Objective.
# updating constraints is fine.
config1.outcome_constraints = [self.outcome_constraint]
self.assertEqual(len(config1.metrics), 2)
# objective without outcome_constraints is also supported
config2 = OptimizationConfig(objective=self.objective)
self.assertEqual(config2.outcome_constraints, [])
# setting objective is fine too, if it's compatible with constraints..
config2.objective = self.m2_objective
# setting constraints on objectives is fine for MultiObjective components.
config2.outcome_constraints = self.outcome_constraints
self.assertEqual(config2.outcome_constraints, self.outcome_constraints)
# Risk measure is correctly registered.
self.assertIsNone(config2.risk_measure)
config3 = OptimizationConfig(
objective=self.objective,
outcome_constraints=self.outcome_constraints,
risk_measure=self.single_output_risk_measure,
)
expected_str = (
OC_STR[:-1] + ", risk_measure=RiskMeasure(risk_measure=Expectation, "
"options={'n_w': 2}))"
)
self.assertEqual(str(config3), expected_str)
self.assertIs(config3.risk_measure, self.single_output_risk_measure)
def testEq(self) -> None:
config1 = OptimizationConfig(
objective=self.objective,
outcome_constraints=self.outcome_constraints,
risk_measure=self.single_output_risk_measure,
)
config2 = OptimizationConfig(
objective=self.objective,
outcome_constraints=self.outcome_constraints,
risk_measure=self.single_output_risk_measure,
)
self.assertEqual(config1, config2)
new_outcome_constraint = OutcomeConstraint(
metric=self.metrics["m2"], op=ComparisonOp.LEQ, bound=0.5
)
config3 = OptimizationConfig(
objective=self.objective,
outcome_constraints=[self.outcome_constraint, new_outcome_constraint],
)
self.assertNotEqual(config1, config3)
def testConstraintValidation(self) -> None:
# Can build OptimizationConfig with MultiObjective
with self.assertRaises(ValueError):
OptimizationConfig(objective=self.multi_objective)
# Can't constrain on objective metric.
objective_constraint = OutcomeConstraint(
metric=self.objective.metric, op=ComparisonOp.GEQ, bound=0
)
with self.assertRaises(ValueError):
OptimizationConfig(
objective=self.objective, outcome_constraints=[objective_constraint]
)
# Using an outcome constraint for ScalarizedObjective should also raise
with self.assertRaisesRegex(
ValueError, "Cannot constrain on objective metric."
):
OptimizationConfig(
objective=self.m2_objective,
outcome_constraints=[objective_constraint],
)
# Two outcome_constraints on the same metric with the same op
# should raise.
duplicate_constraint = OutcomeConstraint(
metric=self.outcome_constraint.metric,
op=self.outcome_constraint.op,
bound=self.outcome_constraint.bound + 1,
)
with self.assertRaises(ValueError):
OptimizationConfig(
objective=self.objective,
outcome_constraints=[self.outcome_constraint, duplicate_constraint],
)
# Three outcome_constraints on the same metric should raise.
opposing_constraint = OutcomeConstraint(
metric=self.outcome_constraint.metric,
# pyre-fixme[6]: For 2nd param expected `ComparisonOp` but got `bool`.
op=not self.outcome_constraint.op,
bound=self.outcome_constraint.bound,
)
with self.assertRaises(ValueError):
OptimizationConfig(
objective=self.objective,
outcome_constraints=self.outcome_constraints + [opposing_constraint],
)
# Two outcome_constraints on the same metric with different ops and
# flipped bounds (lower < upper) should raise.
add_bound = 1 if self.outcome_constraint.op == ComparisonOp.LEQ else -1
opposing_constraint = OutcomeConstraint(
metric=self.outcome_constraint.metric,
# pyre-fixme[6]: For 2nd param expected `ComparisonOp` but got `bool`.
op=not self.outcome_constraint.op,
bound=self.outcome_constraint.bound + add_bound,
)
with self.assertRaises(ValueError):
OptimizationConfig(
objective=self.objective,
outcome_constraints=([self.outcome_constraint, opposing_constraint]),
)
# Two outcome_constraints on the same metric with different ops and
# bounds should not raise.
opposing_constraint = OutcomeConstraint(
metric=self.outcome_constraint.metric,
# pyre-fixme[6]: For 2nd param expected `ComparisonOp` but got `bool`.
op=not self.outcome_constraint.op,
bound=self.outcome_constraint.bound + 1,
)
config = OptimizationConfig(
objective=self.objective,
outcome_constraints=([self.outcome_constraint, opposing_constraint]),
)
self.assertEqual(
config.outcome_constraints, [self.outcome_constraint, opposing_constraint]
)
def testClone(self) -> None:
config1 = OptimizationConfig(
objective=self.objective,
outcome_constraints=self.outcome_constraints,
risk_measure=self.single_output_risk_measure,
)
self.assertEqual(config1, config1.clone())
def testCloneWithArgs(self) -> None:
config1 = OptimizationConfig(
objective=self.objective,
outcome_constraints=self.outcome_constraints,
risk_measure=self.single_output_risk_measure,
)
config2 = OptimizationConfig(
objective=self.objective,
)
config3 = OptimizationConfig(
objective=self.objective, risk_measure=_NO_RISK_MEASURE.clone()
)
# Empty args produce exact clone
self.assertEqual(
config1.clone_with_args(),
config1,
)
# None args not treated as default
self.assertEqual(
config1.clone_with_args(
outcome_constraints=None,
risk_measure=None,
),
config2,
)
# Arguments that has same value with default won't be treated as default
self.assertEqual(
config1.clone_with_args(
outcome_constraints=None,
risk_measure=config3.risk_measure,
),
config3,
)
class MultiObjectiveOptimizationConfigTest(TestCase):
def setUp(self) -> None:
self.metrics = {
"m1": Metric(name="m1", lower_is_better=True),
"m2": Metric(name="m2", lower_is_better=False),
"m3": Metric(name="m3", lower_is_better=False),
}
self.objectives = {
"o1": Objective(metric=self.metrics["m1"]),
"o2": Objective(metric=self.metrics["m2"], minimize=False),
"o3": Objective(metric=self.metrics["m3"], minimize=False),
}
self.objective = Objective(metric=self.metrics["m1"], minimize=False)
self.multi_objective = MultiObjective(
objectives=[self.objectives["o1"], self.objectives["o2"]]
)
self.multi_objective_just_m2 = MultiObjective(
objectives=[self.objectives["o2"]]
)
self.scalarized_objective = ScalarizedObjective(
metrics=list(self.metrics.values()), weights=[1.0, 1.0, 1.0]
)
self.outcome_constraint = OutcomeConstraint(
metric=self.metrics["m3"], op=ComparisonOp.GEQ, bound=-0.25
)
self.additional_outcome_constraint = OutcomeConstraint(
metric=self.metrics["m3"], op=ComparisonOp.LEQ, bound=0.25
)
self.outcome_constraints = [
self.outcome_constraint,
self.additional_outcome_constraint,
]
self.objective_thresholds = [
ObjectiveThreshold(metric=self.metrics["m1"], bound=-1.0, relative=False),
ObjectiveThreshold(metric=self.metrics["m2"], bound=-1.0, relative=False),
]
self.relative_objective_thresholds = [
ObjectiveThreshold(metric=self.metrics["m1"], bound=-1.0, relative=True),
ObjectiveThreshold(
metric=self.metrics["m2"],
op=ComparisonOp.GEQ,
bound=-1.0,
relative=True,
),
]
self.m1_constraint = OutcomeConstraint(
metric=self.metrics["m1"], op=ComparisonOp.LEQ, bound=0.1, relative=True
)
self.m3_constraint = OutcomeConstraint(
metric=self.metrics["m3"], op=ComparisonOp.GEQ, bound=0.1, relative=True
)
self.single_output_risk_measure = RiskMeasure(
risk_measure="Expectation",
options={"n_w": 2},
)
self.multi_output_risk_measure = RiskMeasure(
risk_measure="MultiOutputExpectation",
options={"n_w": 2},
)
def testInit(self) -> None:
config1 = MultiObjectiveOptimizationConfig(
objective=self.multi_objective, outcome_constraints=self.outcome_constraints
)
self.assertEqual(str(config1), MOOC_STR)
with self.assertRaises(TypeError):
config1.objective = self.objective # Wrong objective type
# updating constraints is fine.
config1.outcome_constraints = [self.outcome_constraint]
self.assertEqual(len(config1.metrics), 3)
# objective without outcome_constraints is also supported
config2 = MultiObjectiveOptimizationConfig(objective=self.multi_objective)
# setting objective is fine too, if it's compatible with constraints.
config2.objective = self.multi_objective
# setting constraints on objectives is fine for MultiObjective components.
config2.outcome_constraints = [self.outcome_constraint]
self.assertEqual(config2.outcome_constraints, [self.outcome_constraint])
# construct constraints with objective_thresholds:
config3 = MultiObjectiveOptimizationConfig(
objective=self.multi_objective,
objective_thresholds=self.objective_thresholds,
)
self.assertEqual(config3.all_constraints, self.objective_thresholds)
# objective_thresholds and outcome constraints together.
config4 = MultiObjectiveOptimizationConfig(
objective=self.multi_objective,
objective_thresholds=self.objective_thresholds,
outcome_constraints=[self.m3_constraint],
)
self.assertEqual(
config4.all_constraints, [self.m3_constraint] + self.objective_thresholds
)
self.assertEqual(config4.outcome_constraints, [self.m3_constraint])
self.assertEqual(config4.objective_thresholds, self.objective_thresholds)
# verify relative_objective_thresholds works:
config5 = MultiObjectiveOptimizationConfig(
objective=self.multi_objective,
objective_thresholds=self.relative_objective_thresholds,
)
threshold = config5.objective_thresholds[0]
self.assertTrue(threshold.relative)
self.assertEqual(threshold.bound, -1.0)
# ValueError on wrong direction constraints
with self.assertRaises(UserInputError):
MultiObjectiveOptimizationConfig(
objective=self.multi_objective,
# pyre-fixme[6]: For 2nd param expected
# `Optional[List[ObjectiveThreshold]]` but got
# `List[OutcomeConstraint]`.
objective_thresholds=[self.additional_outcome_constraint],
)
# Test with risk measures.
self.assertIsNone(config5.risk_measure)
config6 = MultiObjectiveOptimizationConfig(
objective=self.multi_objective,
outcome_constraints=self.outcome_constraints,
risk_measure=self.multi_output_risk_measure,
)
self.assertIs(config6.risk_measure, self.multi_output_risk_measure)
expected_str = (
MOOC_STR[:-1] + ", risk_measure=RiskMeasure(risk_measure="
"MultiOutputExpectation, options={'n_w': 2}))"
)
self.assertEqual(str(config6), expected_str)
# With scalarized objective.
config7 = MultiObjectiveOptimizationConfig(
objective=self.scalarized_objective,
risk_measure=self.single_output_risk_measure,
)
self.assertIs(config7.risk_measure, self.single_output_risk_measure)
def testEq(self) -> None:
config1 = MultiObjectiveOptimizationConfig(
objective=self.multi_objective, outcome_constraints=self.outcome_constraints
)
config2 = MultiObjectiveOptimizationConfig(
objective=self.multi_objective, outcome_constraints=self.outcome_constraints
)
self.assertEqual(config1, config2)
new_outcome_constraint = OutcomeConstraint(
metric=self.metrics["m3"], op=ComparisonOp.LEQ, bound=0.5
)
config3 = MultiObjectiveOptimizationConfig(
objective=self.multi_objective,
outcome_constraints=[self.outcome_constraint, new_outcome_constraint],
)
self.assertNotEqual(config1, config3)
def testConstraintValidation(self) -> None:
# Cannot build with non-MultiObjective
with self.assertRaises(TypeError):
MultiObjectiveOptimizationConfig(objective=self.objective)
# Using an outcome constraint for an objective should raise
outcome_constraint_m1 = OutcomeConstraint(
metric=self.metrics["m1"], op=ComparisonOp.LEQ, bound=1234, relative=False
)
with self.assertRaisesRegex(
ValueError, "Cannot constrain on objective metric."
):
MultiObjectiveOptimizationConfig(
objective=self.multi_objective,
outcome_constraints=[outcome_constraint_m1],
)
# Two outcome_constraints on the same metric with the same op
# should raise.
duplicate_constraint = OutcomeConstraint(
metric=self.outcome_constraint.metric,
op=self.outcome_constraint.op,
bound=self.outcome_constraint.bound + 1,
)
with self.assertRaises(ValueError):
MultiObjectiveOptimizationConfig(
objective=self.multi_objective,
outcome_constraints=[self.outcome_constraint, duplicate_constraint],
)
# Three outcome_constraints on the same metric should raise.
opposing_constraint = OutcomeConstraint(
metric=self.outcome_constraint.metric,
# pyre-fixme[6]: For 2nd param expected `ComparisonOp` but got `bool`.
op=not self.outcome_constraint.op,
bound=self.outcome_constraint.bound,
)
with self.assertRaises(ValueError):
MultiObjectiveOptimizationConfig(
objective=self.multi_objective,
outcome_constraints=self.outcome_constraints + [opposing_constraint],
)
# Two outcome_constraints on the same metric with different ops and
# flipped bounds (lower < upper) should raise.
add_bound = 1 if self.outcome_constraint.op == ComparisonOp.LEQ else -1
opposing_constraint = OutcomeConstraint(
metric=self.outcome_constraint.metric,
# pyre-fixme[6]: For 2nd param expected `ComparisonOp` but got `bool`.
op=not self.outcome_constraint.op,
bound=self.outcome_constraint.bound + add_bound,
)
with self.assertRaises(ValueError):
MultiObjectiveOptimizationConfig(
objective=self.multi_objective,
outcome_constraints=([self.outcome_constraint, opposing_constraint]),
)
# Two outcome_constraints on the same metric with different ops and
# bounds should not raise.
opposing_constraint = OutcomeConstraint(
metric=self.outcome_constraint.metric,
# pyre-fixme[6]: For 2nd param expected `ComparisonOp` but got `bool`.
op=not self.outcome_constraint.op,
bound=self.outcome_constraint.bound + 1,
)
config = MultiObjectiveOptimizationConfig(
objective=self.multi_objective,
outcome_constraints=([self.outcome_constraint, opposing_constraint]),
)
self.assertEqual(
config.outcome_constraints, [self.outcome_constraint, opposing_constraint]
)
def testClone(self) -> None:
config1 = MultiObjectiveOptimizationConfig(
objective=self.multi_objective, outcome_constraints=self.outcome_constraints
)
self.assertEqual(config1, config1.clone())
config2 = MultiObjectiveOptimizationConfig(
objective=self.multi_objective,
objective_thresholds=self.objective_thresholds,
)
self.assertEqual(config2, config2.clone())
def testCloneWithArgs(self) -> None:
config1 = MultiObjectiveOptimizationConfig(
objective=self.multi_objective,
objective_thresholds=self.objective_thresholds,
outcome_constraints=self.outcome_constraints,
risk_measure=self.multi_output_risk_measure,
)
config2 = MultiObjectiveOptimizationConfig(
objective=self.multi_objective,
)
config3 = MultiObjectiveOptimizationConfig(
objective=self.multi_objective, risk_measure=_NO_RISK_MEASURE.clone()
)
# Empty args produce exact clone
self.assertEqual(
config1.clone_with_args(),
config1,
)
# None args not treated as default
self.assertEqual(
config1.clone_with_args(
outcome_constraints=None,
objective_thresholds=None,
risk_measure=None,
),
config2,
)
# Arguments that has same value with default won't be treated as default
self.assertEqual(
config1.clone_with_args(
outcome_constraints=None,
objective_thresholds=None,
risk_measure=config3.risk_measure,
),
config3,
)
|
bf69c8a5f361370d9f593572194a124133638eb7
|
ecaba173879f92f24e3c951866fda23c0a4fc426
|
/perfkitbenchmarker/linux_packages/pgbench.py
|
c325ddc0de029230acc6978599cd6236625a1cfd
|
[
"Classpath-exception-2.0",
"BSD-3-Clause",
"AGPL-3.0-only",
"MIT",
"GPL-2.0-only",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause"
] |
permissive
|
GoogleCloudPlatform/PerfKitBenchmarker
|
2f4917fd796db4eb90822c557d8fa08a497fbd48
|
d0699f32998898757b036704fba39e5471641f01
|
refs/heads/master
| 2023-09-02T08:14:54.110308
| 2023-09-01T20:28:01
| 2023-09-01T20:28:38
| 21,950,910
| 1,923
| 567
|
Apache-2.0
| 2023-09-13T22:37:42
| 2014-07-17T17:23:26
|
Python
|
UTF-8
|
Python
| false
| false
| 4,583
|
py
|
pgbench.py
|
# Copyright 2017 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing pgbench installation, cleanup and run functions."""
import time
from perfkitbenchmarker import publisher
from perfkitbenchmarker import sample
APT_PACKAGES = (
'postgresql-client-common',
'postgresql-client',
'postgresql-contrib',
)
def AptInstall(vm):
"""Installs pgbench on the Debian VM."""
for package in APT_PACKAGES:
vm.InstallPackages(package)
def YumInstall(vm):
"""Raises exception when trying to install on yum-based VMs."""
raise NotImplementedError(
'PKB currently only supports the installation of pgbench on '
'Debian-based VMs')
def AptUninstall(vm):
"""Removes pgbench from the Debian VM."""
remove_str = 'sudo apt-get --purge autoremove -y '
for package in APT_PACKAGES:
vm.RemoteCommand(remove_str + package)
def MakeSamplesFromOutput(pgbench_stderr, num_clients, num_jobs,
additional_metadata):
"""Creates sample objects from the given pgbench output and metadata.
Two samples will be returned, one containing a latency list and
the other a tps (transactions per second) list. Each will contain
N floating point samples, where N = FLAGS.pgbench_seconds_per_test.
Args:
pgbench_stderr: stderr from the pgbench run command
num_clients: number of pgbench clients used
num_jobs: number of pgbench jobs (threads) used
additional_metadata: additional metadata to add to each sample
Returns:
A list containing a latency sample and a tps sample. Each sample
consists of a list of floats, sorted by time that were collected
by running pgbench with the given client and job counts.
"""
lines = pgbench_stderr.splitlines()[2:]
tps_numbers = [float(line.split(' ')[3]) for line in lines]
latency_numbers = [float(line.split(' ')[6]) for line in lines]
metadata = additional_metadata.copy()
metadata.update({'clients': num_clients, 'jobs': num_jobs})
tps_metadata = metadata.copy()
tps_metadata.update({'tps': tps_numbers})
latency_metadata = metadata.copy()
latency_metadata.update({'latency': latency_numbers})
tps_sample = sample.Sample('tps_array', -1, 'tps', tps_metadata)
latency_sample = sample.Sample('latency_array', -1, 'ms', latency_metadata)
return [tps_sample, latency_sample]
def RunPgBench(benchmark_spec,
relational_db,
vm,
test_db_name,
client_counts,
job_counts,
seconds_to_pause,
seconds_per_test,
metadata,
file=None,
path=None):
"""Run Pgbench on the client VM.
Args:
benchmark_spec: Benchmark spec of the run
relational_db: Relational database object
vm: Client VM
test_db_name: The name of the database
client_counts: Number of client
job_counts: Number of job
seconds_to_pause: Seconds to pause between test
seconds_per_test: Seconds per test
metadata: Metadata of the benchmark
file: Filename of the benchmark
path: File path of the benchmar.
"""
connection_string = relational_db.client_vm_query_tools.GetConnectionString(
database_name=test_db_name)
if file and path:
metadata['pgbench_file'] = file
if job_counts and len(client_counts) != len(job_counts):
raise ValueError('Length of clients and jobs must be the same.')
for i in range(len(client_counts)):
time.sleep(seconds_to_pause)
client = client_counts[i]
if job_counts:
jobs = job_counts[i]
else:
jobs = min(client, 16)
command = (
f'ulimit -n 10000 && pgbench {connection_string} --client={client} '
f'--jobs={jobs} --time={seconds_per_test} --progress=1 '
'-r'
)
if file and path:
command = f'cd {path} && {command} --file={file}'
_, stderr = vm.RobustRemoteCommand(command)
samples = MakeSamplesFromOutput(stderr, client, jobs, metadata)
publisher.PublishRunStageSamples(benchmark_spec, samples)
|
1c1110069a94d2fcd3394821ee5e8dd209f0b105
|
9868f287cfa54a8ed6c67b91b59d4f09bbd9410c
|
/retired_benchmarks/minigo/tensorflow/minigo/bigtable_output.py
|
421b13f52a24be6aa423242aac4e3ee6a851c895
|
[
"Apache-2.0"
] |
permissive
|
mlcommons/training
|
41c7e21ea074b5f5bb040d3602e621c3e987cc0e
|
2f4a93fb4888180755a8ef55f4b977ef8f60a89e
|
refs/heads/master
| 2023-09-05T12:45:53.020925
| 2023-08-03T15:43:54
| 2023-08-03T15:43:54
| 127,351,529
| 431
| 162
|
Apache-2.0
| 2023-09-07T23:35:53
| 2018-03-29T21:56:06
|
Python
|
UTF-8
|
Python
| false
| false
| 1,732
|
py
|
bigtable_output.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helpers to write data to Bigtable.
"""
import sgf_wrapper
def process_game(path):
"""
Get CBT metadata from a SGF file.
Calling function should probably overwrite 'tool'.
"""
with open(path) as f:
sgf_contents = f.read()
root_node = sgf_wrapper.get_sgf_root_node(sgf_contents)
assert root_node.properties['FF'] == ['4'], ("Bad game record", path)
result = root_node.properties['RE'][0]
assert result.lower()[0] in 'bw', result
assert result.lower()[1] == '+', result
black_won = result.lower()[0] == 'b'
length = 0
node = root_node.next
while node:
props = node.properties
length += 1 if props.get('B') or props.get('W') else 0
node = node.next
return {
"black": root_node.properties['PB'][0],
"white": root_node.properties['PW'][0],
# All values are strings, "1" for true and "0" for false here
"black_won": '1' if black_won else '0',
"white_won": '0' if black_won else '1',
"result": result,
"length": str(length),
"sgf": path,
"tag": "",
"tool": "bigtable_output",
}
|
e0591d6d3a33ca7d151f2df99186565f9be65be2
|
96cc2d2af983dd2df0e59cf8ecd1181b0a091ab2
|
/02分词/分词算法/02HMM分词/HMM_segment.py
|
e0bf587aeffa38c18ff65352e2eacc50806361f7
|
[] |
no_license
|
zhang17173/Event-Extraction
|
37400d4e46363340d8e61a9dc7eca300caebe26a
|
9fb0780868b9b605ed5ace2855d6b5d4cb05dd51
|
refs/heads/master
| 2023-02-18T10:54:53.847749
| 2023-02-15T12:39:16
| 2023-02-15T12:39:16
| 199,839,243
| 532
| 137
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,457
|
py
|
HMM_segment.py
|
#!/usr/bin/env python
# _*_coding:utf-8 _*_
# @Author:Zhang Shiwei
import numpy as np
import math
# 每一个字的4个隐藏状态 0123/start(该字是一个词的开头) middle(该字是一个词的中间部分) end(该字是一个词的结束) single(该字是单独就是一个词)
# 监督学习:用的语料库是分好词的来训练
# 注:ord()函数获取一个字符的Unicode编码
infinite = float(-2.0 ** 31) # 负无穷
# 训练参数(已知O和I的情况下监督学习),只需要做一个统计就可以得到参数
# 传入字符串语料数据,必须是处理好分词的,并且传入分隔符
def mle(train_data, split_char=" "):
tokens = train_data.split(split_char)
# 模型参数
pi = np.zeros(4) # 状态概率pai
A = np.zeros((4, 4)) # 状态转移矩阵
B = np.zeros((4, 65536)) # 发射矩阵,某个隐状态下的观测分布
last_token = tokens[0]
for token in tokens:
token = token.strip()
token_len = len(token)
# 若上一个token长度为1则为single转移到某个状态否则是end转移到某个状态
last_token_state = 3 if len(last_token) == 1 else 2
# 不为空字符判断
if token_len == 0:
continue
# 单字成词
if token_len == 1:
pi[3] += 1
A[last_token_state][3] += 1 # 上一个状态转移到single
# 给出状态single下出现的观测字符
B[3][ord(token)] += 1
elif token_len == 2:
pi[0] += 1
pi[2] += 1
# start转移到end
A[0][2] += 1
A[last_token_state][0] += 1
# 给出状态start和end下出现的观测字符
B[0][ord(token[0])] += 1
B[2][ord(token[1])] += 1
else:
pi[0] += 1
pi[2] += 1
pi[1] += token_len - 2
# start转移到middle,middle转移到middle,middle转移到end
A[0][1] += 1
A[1][1] += (token_len - 3)
A[1][2] += 1
A[last_token_state][0] += 1
# 给出状态start,middle,end下出现的观测字符
B[0][ord(token[0])] += 1 # start
B[2][ord(token[token_len - 1])] += 1 # end
for i in range(1, token_len - 1): # middle
B[1][ord(token[i])] += 1
last_token = token
# 取对数
sum1 = np.sum(pi)
for i in range(len(pi)):
pi[i] = math.log(pi[i] / sum1)
log_val(A)
log_val(B)
return pi, A, B
# 对pi,A,B结果取对数,因为单个数值太小,会溢出
def log_val(data):
# 遍历矩阵每一行,每一行概率相加为1,做取对数处理
col_len = data.shape[1]
for k, line in enumerate(data):
sum1 = np.sum(line)
log_sum = math.log(sum1)
for i in range(col_len):
if data[k][i] == 0:
data[k][i] = infinite
else:
data[k][i] = math.log(data[k][i]) - log_sum
# 预测维特比算法
def viterbi(pi, A, B, O):
O = O.strip()
O_len = len(O)
pi_len = len(pi)
if O_len == 0:
return
# 保存所有状态的最大值是由哪一个状态产生的也就是计算δ[t](i)时,是由哪一个δ[t-1](q)产生的,q就是哪个状态
states = np.full(shape=(O_len, pi_len), fill_value=0.0)
# 保存计算过所有的计算的δ
deltas = np.full(shape=(O_len, pi_len), fill_value=0.0)
# 初始化计算最优P(I,O1) = max{P(O1|I)*p(I)}
for j in range(0, pi_len):
deltas[0][j] = pi[j] + B[j][ord(O[0])] # 变加法是因为取了对数
# dp计算P(I|O1,O2,O3,...Ot,I1,I2...It-1)
for t in range(1, O_len):
for i in range(
0, pi_len): # 计算每一个δ[t](i=q1...q[pi_len]) = max{δt[j]*A[ji]*B[qi|Ot]},j是遍历所有状态
deltas[t][i] = deltas[t - 1][0] + A[0][i]
# 寻找最大的δ[t](i)
for j in range(1, pi_len):
current = deltas[t - 1][j] + A[j][i]
if current > deltas[t][i]:
deltas[t][i] = current
# 保存当前δ[t](i)取得最大值是是从上一个哪个状态来的
states[t][i] = j
deltas[t][i] += B[i][ord(O[t])]
# 回溯找到最优概率路径
max1 = deltas[O_len - 1][0]
best_state = np.zeros(O_len)
# 先找出最后一个观测的最可能状态是什么
for i in range(1, pi_len):
if deltas[O_len - 1][i] > max1:
max1 = deltas[O_len - 1][i]
best_state[O_len - 1] = i
# 由最后一个观测得到的最好状态往前回溯找出状态序列
for i in range(O_len - 2, -1, -1):
best_state[i] = states[i + 1][int(best_state[i + 1])]
return best_state
def output_words(decode, O):
T = len(O)
with open("result.txt", "a", encoding='utf-8') as f:
for i in range(0, T):
# 如果预测当前字符最有可能的状态是end或者single就分词
if decode[i] == 2 or decode[i] == 3:
f.write(O[i]+' ')
else:
f.write(O[i])
# 开始训练
f = open("./pku_training.utf8", "r", encoding="utf-8")
data = f.read()[3:]
f.close()
pi, A, B = mle(data) # 训练结束
# 测试
f2 = open("./test.txt", "r", encoding="utf-8")
O = f2.read().strip()
states = viterbi(pi, A, B, O)
output_words(states, O)
|
b1510c65349c19945695927b189f93fb768261d1
|
fa1ad2e2ac7e376fc7cb3b3a6e1bb88eed3e80be
|
/dts/airbyte/airbyte-integrations/connectors/source-mixpanel/unit_tests/test_streams.py
|
a44bd95af01340c3c626783d27d8a4469873dcc6
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"Elastic-2.0"
] |
permissive
|
alldatacenter/alldata
|
7bc7713c9f1d56ad6b8e59ea03206d1073b7e047
|
8d5f9a2d49ab8f9e85ccf058cb02c2fda287afc6
|
refs/heads/master
| 2023-08-05T07:32:25.442740
| 2023-08-03T13:17:24
| 2023-08-03T13:17:24
| 213,321,771
| 774
| 250
|
Apache-2.0
| 2023-09-06T17:35:32
| 2019-10-07T07:36:18
| null |
UTF-8
|
Python
| false
| false
| 16,707
|
py
|
test_streams.py
|
#
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
import json
from datetime import timedelta
from unittest.mock import MagicMock
import pendulum
import pytest
from airbyte_cdk import AirbyteLogger
from airbyte_cdk.models import SyncMode
from source_mixpanel.streams import (
Annotations,
CohortMembers,
Cohorts,
Engage,
EngageSchema,
Export,
ExportSchema,
Funnels,
FunnelsList,
IncrementalMixpanelStream,
MixpanelStream,
Revenue,
)
from source_mixpanel.utils import read_full_refresh
from .utils import get_url_to_mock, read_incremental, setup_response
logger = AirbyteLogger()
MIXPANEL_BASE_URL = "https://mixpanel.com/api/2.0/"
@pytest.fixture
def patch_base_class(mocker):
# Mock abstract methods to enable instantiating abstract class
mocker.patch.object(MixpanelStream, "path", "v0/example_endpoint")
mocker.patch.object(MixpanelStream, "primary_key", "test_primary_key")
mocker.patch.object(MixpanelStream, "__abstractmethods__", set())
@pytest.fixture
def patch_incremental_base_class(mocker):
# Mock abstract methods to enable instantiating abstract class
mocker.patch.object(IncrementalMixpanelStream, "path", "v0/example_endpoint")
mocker.patch.object(IncrementalMixpanelStream, "primary_key", "test_primary_key")
mocker.patch.object(IncrementalMixpanelStream, "cursor_field", "date")
mocker.patch.object(IncrementalMixpanelStream, "__abstractmethods__", set())
@pytest.fixture(autouse=True)
def time_sleep_mock(mocker):
time_mock = mocker.patch("time.sleep", lambda x: None)
yield time_mock
def test_url_base(patch_base_class, config):
stream = MixpanelStream(authenticator=MagicMock(), **config)
assert stream.url_base == "https://mixpanel.com/api/2.0/"
def test_request_headers(patch_base_class, config):
stream = MixpanelStream(authenticator=MagicMock(), **config)
assert stream.request_headers(stream_state={}) == {"Accept": "application/json"}
def test_updated_state(patch_incremental_base_class, config):
stream = IncrementalMixpanelStream(authenticator=MagicMock(), **config)
updated_state = stream.get_updated_state(
current_stream_state={"date": "2021-01-25T00:00:00Z"}, latest_record={"date": "2021-02-25T00:00:00Z"}
)
assert updated_state == {"date": "2021-02-25T00:00:00Z"}
@pytest.fixture
def cohorts_response():
return setup_response(
200,
[
{
"count": 150,
"is_visible": 1,
"description": "This cohort is visible, has an id = 1000, and currently has 150 users.",
"created": "2019-03-19 23:49:51",
"project_id": 1,
"id": 1000,
"name": "Cohort One",
},
{
"count": 25,
"is_visible": 0,
"description": "This cohort isn't visible, has an id = 2000, and currently has 25 users.",
"created": "2019-04-02 23:22:01",
"project_id": 1,
"id": 2000,
"name": "Cohort Two",
},
],
)
def test_cohorts_stream_incremental(requests_mock, cohorts_response, config):
requests_mock.register_uri("GET", MIXPANEL_BASE_URL + "cohorts/list", cohorts_response)
stream = Cohorts(authenticator=MagicMock(), **config)
records = read_incremental(stream, stream_state={"created": "2019-04-02 23:22:01"}, cursor_field=["created"])
records_length = sum(1 for _ in records)
assert records_length == 1
@pytest.fixture
def engage_response():
return setup_response(
200,
{
"page": 0,
"page_size": 1000,
"session_id": "1234567890-EXAMPL",
"status": "ok",
"total": 2,
"results": [
{
"$distinct_id": "9d35cd7f-3f06-4549-91bf-198ee58bb58a",
"$properties": {
"$created": "2008-12-12T11:20:47",
"$browser": "Chrome",
"$browser_version": "83.0.4103.116",
"$email": "clark@asw.com",
"$first_name": "Clark",
"$last_name": "Kent",
"$name": "Clark Kent",
},
},
{
"$distinct_id": "cd9d357f-3f06-4549-91bf-158bb598ee8a",
"$properties": {
"$created": "2008-11-12T11:20:47",
"$browser": "Firefox",
"$browser_version": "83.0.4103.116",
"$email": "bruce@asw.com",
"$first_name": "Bruce",
"$last_name": "Wayne",
"$name": "Bruce Wayne",
},
},
],
},
)
def test_engage_stream_incremental(requests_mock, engage_response, config):
requests_mock.register_uri("POST", MIXPANEL_BASE_URL + "engage?page_size=1000", engage_response)
stream = Engage(authenticator=MagicMock(), **config)
stream_state = {"created": "2008-12-12T11:20:47"}
records = list(read_incremental(stream, stream_state, cursor_field=["created"]))
assert len(records) == 1
assert stream.get_updated_state(current_stream_state=stream_state, latest_record=records[-1]) == {"created": "2008-12-12T11:20:47"}
def test_cohort_members_stream_incremental(requests_mock, engage_response, cohorts_response, config):
requests_mock.register_uri("POST", MIXPANEL_BASE_URL + "engage?page_size=1000", engage_response)
requests_mock.register_uri("GET", MIXPANEL_BASE_URL + "cohorts/list", cohorts_response)
stream = CohortMembers(authenticator=MagicMock(), **config)
stream.set_cursor(["created"])
stream_state = {"created": "2008-12-12T11:20:47"}
records = stream.read_records(
sync_mode=SyncMode.incremental, cursor_field=["created"], stream_state=stream_state, stream_slice={"id": 1000}
)
records = [item for item in records]
assert len(records) == 1
assert stream.get_updated_state(current_stream_state=stream_state, latest_record=records[-1]) == {"created": "2008-12-12T11:20:47"}
@pytest.fixture
def funnels_list_response():
return setup_response(200, [{"funnel_id": 1, "name": "Signup funnel"}])
def test_funnels_list_stream(requests_mock, config, funnels_list_response):
stream = FunnelsList(authenticator=MagicMock(), **config)
requests_mock.register_uri("GET", get_url_to_mock(stream), funnels_list_response)
records = stream.read_records(sync_mode=SyncMode.full_refresh)
records_length = sum(1 for _ in records)
assert records_length == 1
@pytest.fixture
def funnels_list_url(config):
funnel_list = FunnelsList(authenticator=MagicMock(), **config)
return get_url_to_mock(funnel_list)
@pytest.fixture
def funnels_response(start_date):
first_date = start_date + timedelta(days=1)
second_date = start_date + timedelta(days=10)
return setup_response(
200,
{
"meta": {"dates": [str(first_date), str(second_date)]},
"data": {
str(first_date): {
"steps": [],
"analysis": {
"completion": 20524,
"starting_amount": 32688,
"steps": 2,
"worst": 1,
},
},
str(second_date): {
"steps": [],
"analysis": {
"completion": 20500,
"starting_amount": 34750,
"steps": 2,
"worst": 1,
},
},
},
},
)
def test_funnels_stream(requests_mock, config, funnels_response, funnels_list_response, funnels_list_url):
stream = Funnels(authenticator=MagicMock(), **config)
requests_mock.register_uri("GET", funnels_list_url, funnels_list_response)
requests_mock.register_uri("GET", get_url_to_mock(stream), funnels_response)
stream_slices = stream.stream_slices(sync_mode=SyncMode.incremental)
records_arr = []
for stream_slice in stream_slices:
records = stream.read_records(sync_mode=SyncMode.incremental, stream_slice=stream_slice)
for record in records:
records_arr.append(record)
assert len(records_arr) == 4
last_record = records_arr[-1]
# Test without current state date
new_state = stream.get_updated_state(current_stream_state={}, latest_record=records_arr[-1])
assert new_state == {str(last_record["funnel_id"]): {"date": last_record["date"]}}
# Test with current state, that lesser than last record date
last_record_date = pendulum.parse(last_record["date"]).date()
new_state = stream.get_updated_state(
current_stream_state={str(last_record["funnel_id"]): {"date": str(last_record_date - timedelta(days=1))}},
latest_record=records_arr[-1],
)
assert new_state == {str(last_record["funnel_id"]): {"date": last_record["date"]}}
# Test with current state, that is greater, than last record date
new_state = stream.get_updated_state(
current_stream_state={str(last_record["funnel_id"]): {"date": str(last_record_date + timedelta(days=1))}},
latest_record=records_arr[-1],
)
assert new_state == {str(last_record["funnel_id"]): {"date": str(last_record_date + timedelta(days=1))}}
@pytest.fixture
def engage_schema_response():
return setup_response(
200,
{
"results": {
"$browser": {"count": 124, "type": "string"},
"$browser_version": {"count": 124, "type": "string"},
"$created": {"count": 124, "type": "string"},
}
},
)
def test_engage_schema(requests_mock, engage_schema_response, config):
stream = EngageSchema(authenticator=MagicMock(), **config)
requests_mock.register_uri("GET", get_url_to_mock(stream), engage_schema_response)
records = stream.read_records(sync_mode=SyncMode.full_refresh)
records_length = sum(1 for _ in records)
assert records_length == 3
def test_update_engage_schema(requests_mock, config):
stream = EngageSchema(authenticator=MagicMock(), **config)
requests_mock.register_uri(
"GET",
get_url_to_mock(stream),
setup_response(
200,
{
"results": {
"$someNewSchemaField": {"count": 124, "type": "string"},
}
},
),
)
engage_stream = Engage(authenticator=MagicMock(), **config)
engage_schema = engage_stream.get_json_schema()
assert "someNewSchemaField" in engage_schema["properties"]
@pytest.fixture
def annotations_response():
return setup_response(
200,
{
"annotations": [
{"id": 640999, "project_id": 2117889, "date": "2021-06-16 00:00:00", "description": "Looks good"},
{"id": 640000, "project_id": 2117889, "date": "2021-06-16 00:00:00", "description": "Looks bad"},
]
},
)
def test_annotations_stream(requests_mock, annotations_response, config):
stream = Annotations(authenticator=MagicMock(), **config)
requests_mock.register_uri("GET", get_url_to_mock(stream), annotations_response)
stream_slice = {"start_date": "2017-01-25T00:00:00Z", "end_date": "2017-02-25T00:00:00Z"}
# read records for single slice
records = stream.read_records(sync_mode=SyncMode.full_refresh, stream_slice=stream_slice)
records_length = sum(1 for _ in records)
assert records_length == 2
@pytest.fixture
def revenue_response():
return setup_response(
200,
{
"computed_at": "2021-07-03T12:43:48.889421+00:00",
"results": {
"$overall": {"amount": 0.0, "count": 124, "paid_count": 0},
"2021-06-01": {"amount": 0.0, "count": 124, "paid_count": 0},
"2021-06-02": {"amount": 0.0, "count": 124, "paid_count": 0},
},
"session_id": "162...",
"status": "ok",
},
)
def test_revenue_stream(requests_mock, revenue_response, config):
stream = Revenue(authenticator=MagicMock(), **config)
requests_mock.register_uri("GET", get_url_to_mock(stream), revenue_response)
stream_slice = {"start_date": "2017-01-25T00:00:00Z", "end_date": "2017-02-25T00:00:00Z"}
# read records for single slice
records = stream.read_records(sync_mode=SyncMode.incremental, stream_slice=stream_slice)
records_length = sum(1 for _ in records)
assert records_length == 2
@pytest.fixture
def export_schema_response():
return setup_response(
200,
{
"$browser": {"count": 6},
"$browser_version": {"count": 6},
"$current_url": {"count": 6},
"mp_lib": {"count": 6},
"noninteraction": {"count": 6},
"$event_name": {"count": 6},
"$duration_s": {},
"$event_count": {},
"$origin_end": {},
"$origin_start": {},
},
)
def test_export_schema(requests_mock, export_schema_response, config):
stream = ExportSchema(authenticator=MagicMock(), **config)
requests_mock.register_uri("GET", get_url_to_mock(stream), export_schema_response)
records = stream.read_records(sync_mode=SyncMode.full_refresh)
records_length = sum(1 for _ in records)
assert records_length == 10
@pytest.fixture
def export_response():
return setup_response(
200,
{
"event": "Viewed E-commerce Page",
"properties": {
"time": 1623860880, # 2021-06-16T16:28:00
"distinct_id": "1d694fd9-31a5-4b99-9eef-ae63112063ed",
"$browser": "Chrome",
"$browser_version": "91.0.4472.101",
"$current_url": "https://unblockdata.com/solutions/e-commerce/",
"$insert_id": "c5eed127-c747-59c8-a5ed-d766f48e39a4",
"$mp_api_endpoint": "api.mixpanel.com",
"mp_lib": "Segment: analytics-wordpress",
"mp_processing_time_ms": 1623886083321,
"noninteraction": True,
},
},
)
def test_export_stream(requests_mock, export_response, config):
stream = Export(authenticator=MagicMock(), **config)
requests_mock.register_uri("GET", get_url_to_mock(stream), export_response)
stream_slice = {"start_date": "2017-01-25T00:00:00Z", "end_date": "2017-02-25T00:00:00Z"}
# read records for single slice
records = stream.read_records(sync_mode=SyncMode.incremental, stream_slice=stream_slice)
records_length = sum(1 for _ in records)
assert records_length == 1
def test_export_stream_request_params(config):
stream = Export(authenticator=MagicMock(), **config)
stream_slice = {"start_date": "2017-01-25T00:00:00Z", "end_date": "2017-02-25T00:00:00Z"}
stream_state = {"date": "2021-06-16T17:00:00"}
request_params = stream.request_params(stream_state=None, stream_slice=stream_slice)
assert "where" not in request_params
request_params = stream.request_params(stream_state={}, stream_slice=stream_slice)
assert "where" not in request_params
request_params = stream.request_params(stream_state=stream_state, stream_slice=stream_slice)
assert "where" in request_params
timestamp = int(pendulum.parse("2021-06-16T17:00:00Z").timestamp())
assert request_params.get("where") == f'properties["$time"]>=datetime({timestamp})'
def test_export_terminated_early(requests_mock, config):
stream = Export(authenticator=MagicMock(), **config)
requests_mock.register_uri("GET", get_url_to_mock(stream), text="terminated early\n")
assert list(read_full_refresh(stream)) == []
def test_export_iter_dicts(config):
stream = Export(authenticator=MagicMock(), **config)
record = {"key1": "value1", "key2": "value2"}
record_string = json.dumps(record)
assert list(stream.iter_dicts([record_string, record_string])) == [record, record]
# combine record from 2 standing nearby parts
assert list(stream.iter_dicts([record_string, record_string[:2], record_string[2:], record_string])) == [record, record, record]
# drop record parts because they are not standing nearby
assert list(stream.iter_dicts([record_string, record_string[:2], record_string, record_string[2:]])) == [record, record]
|
040f4b40812bc43b24d70c983db271c896a4d5c6
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/EventFilter/DTRawToDigi/python/dtunpackerCommissioning_cfi.py
|
f5bd0a5c9fed975b232e7cae11d55b443e07ebf1
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 1,033
|
py
|
dtunpackerCommissioning_cfi.py
|
import FWCore.ParameterSet.Config as cms
import EventFilter.DTRawToDigi.dtUnpackingModule_cfi
# Module for DT data unpacking: produces a DTDigiCollection and - on demand -
# a DTLocalTriggerCollection
# Configuration for Comissioning data
dtunpacker = EventFilter.DTRawToDigi.dtUnpackingModule_cfi.dtUnpackingModule.clone()
dtunpacker.dataType = cms.string('DDU')
dtunpacker.inputLabel = cms.InputTag('rawDataCollector')
dtunpacker.useStandardFEDid = cms.untracked.bool(True)
dtunpacker.dqmOnly = cms.bool(False)
dtunpacker.readOutParameters = cms.PSet(
debug = cms.untracked.bool(False),
rosParameters = cms.PSet(
writeSC = cms.untracked.bool(True),
readingDDU = cms.untracked.bool(True),
performDataIntegrityMonitor = cms.untracked.bool(False),
readDDUIDfromDDU = cms.untracked.bool(True),
debug = cms.untracked.bool(False),
localDAQ = cms.untracked.bool(True)
),
localDAQ = cms.untracked.bool(True),
performDataIntegrityMonitor = cms.untracked.bool(False)
)
|
377d75a2c5df33c015d1f2895c35be9822b704d9
|
15f0514701a78e12750f68ba09d68095172493ee
|
/Python3/111.py
|
28bba69a81903621d48dd30954a08ac4e9fd726e
|
[
"MIT"
] |
permissive
|
strengthen/LeetCode
|
5e38c8c9d3e8f27109b9124ae17ef8a4139a1518
|
3ffa6dcbeb787a6128641402081a4ff70093bb61
|
refs/heads/master
| 2022-12-04T21:35:17.872212
| 2022-11-30T06:23:24
| 2022-11-30T06:23:24
| 155,958,163
| 936
| 365
|
MIT
| 2021-11-15T04:02:45
| 2018-11-03T06:47:38
| null |
UTF-8
|
Python
| false
| false
| 1,698
|
py
|
111.py
|
__________________________________________________________________________________________________
sample 28 ms submission
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def minDepth(self, root: TreeNode) -> int:
if not root:
return 0
if not root.left:
return 1 + self.minDepth(root.right)
if not root.right:
return 1 + self.minDepth(root.left)
return 1 + min(self.minDepth(root.left), self.minDepth(root.right))
__________________________________________________________________________________________________
sample 14236 kb submission
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def minDepth(self, root: TreeNode) -> int:
if root is None:
return 0
node_queue = []
node_queue.append((root, 1))
depth = 0
min_depth = None
while len(node_queue) > 0:
(curr_node, dep_lvl) = node_queue.pop(0)
if not curr_node.left and not curr_node.right:
min_depth = dep_lvl if not min_depth or min_depth > dep_lvl else min_depth
if curr_node.left:
node_queue.append((curr_node.left, dep_lvl+1))
if curr_node.right:
node_queue.append((curr_node.right, dep_lvl+1))
return min_depth
__________________________________________________________________________________________________
|
2dc789011b5871c14e124da3e6425a601d3265b7
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/tests/components/snooz/test_init.py
|
a7a0566d7c61c762e2226cf629217df1b906a592
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 1,165
|
py
|
test_init.py
|
"""Test Snooz configuration."""
from __future__ import annotations
import pytest
from homeassistant.core import HomeAssistant
from . import SnoozFixture
# This tests needs to be adjusted to remove lingering tasks
@pytest.mark.parametrize("expected_lingering_tasks", [True])
async def test_removing_entry_cleans_up_connections(
hass: HomeAssistant, mock_connected_snooz: SnoozFixture
) -> None:
"""Tests setup and removal of a config entry, ensuring connections are cleaned up."""
await hass.config_entries.async_remove(mock_connected_snooz.entry.entry_id)
await hass.async_block_till_done()
assert not mock_connected_snooz.device.is_connected
# This tests needs to be adjusted to remove lingering tasks
@pytest.mark.parametrize("expected_lingering_tasks", [True])
async def test_reloading_entry_cleans_up_connections(
hass: HomeAssistant, mock_connected_snooz: SnoozFixture
) -> None:
"""Test reloading an entry disconnects any existing connections."""
await hass.config_entries.async_reload(mock_connected_snooz.entry.entry_id)
await hass.async_block_till_done()
assert not mock_connected_snooz.device.is_connected
|
8f631c5934d51dd7f5673731c1362399fde90f08
|
0ac2d343bad7e25df1a2f2be951854d86b3ad173
|
/pycket/ast_visitor.py
|
ede8a4e77529e9597fee821b11d131352dcbed0a
|
[
"MIT"
] |
permissive
|
pycket/pycket
|
8c28888af4967b0f85c54f83f4ccd536fc8ac907
|
05ebd9885efa3a0ae54e77c1a1f07ea441b445c6
|
refs/heads/master
| 2021-12-01T16:26:09.149864
| 2021-08-08T17:01:12
| 2021-08-08T17:01:12
| 14,119,907
| 158
| 14
|
MIT
| 2021-08-08T17:01:12
| 2013-11-04T18:39:34
|
Python
|
UTF-8
|
Python
| false
| false
| 7,595
|
py
|
ast_visitor.py
|
from pycket.interpreter import (
App,
Begin,
Begin0,
BeginForSyntax,
CaseLambda,
Cell,
CellRef,
DefineValues,
If,
Lambda,
Let,
Letrec,
LexicalVar,
Module,
ModuleVar,
LinkletVar,
Quote,
QuoteSyntax,
Require,
SetBang,
ToplevelVar,
VariableReference,
WithContinuationMark,
make_let,
make_letrec,
)
from rpython.rlib.objectmodel import specialize
class ASTVisitor(object):
"""
An abstract visitor class for the AST classes defined below.
A subclass need only define handler functions for the relevant portions
of the AST, as the default implementations in this class pass along the
relevant data.
"""
@specialize.argtype(0)
def visit_cell(self, ast, *args):
assert isinstance(ast, Cell)
expr = ast.expr.visit(self, *args)
return Cell(expr, need_cell_flags=ast.need_cell_flags)
@specialize.argtype(0)
def visit_quote(self, ast, *args):
assert isinstance(ast, Quote)
return ast
@specialize.argtype(0)
def visit_quote_syntax(self, ast, *args):
assert isinstance(ast, QuoteSyntax)
return ast
@specialize.argtype(0)
def visit_variable_reference(self, ast, *args):
assert isinstance(ast, VariableReference)
return ast
@specialize.argtype(0)
def visit_with_continuation_mark(self, ast, *args):
assert isinstance(ast, WithContinuationMark)
key = ast.key.visit(self, *args)
value = ast.value.visit(self, *args)
body = ast.body.visit(self, *args)
return WithContinuationMark(key, value, body)
@specialize.argtype(0)
def visit_app(self, ast, *args):
assert isinstance(ast, App)
rator = ast.rator.visit(self, *args)
rands = [a.visit(self, *args) for a in ast.rands]
return App.make(rator, rands, ast.env_structure)
@specialize.argtype(0)
def visit_begin0(self, ast, *args):
assert isinstance(ast, Begin0)
first = ast.first.visit(self, *args)
body = [b.visit(self, *args) for b in ast.body]
return Begin0.make(first, body)
@specialize.argtype(0)
def visit_begin(self, ast, *args):
assert isinstance(ast, Begin)
body = [b.visit(self, *args) for b in ast.body]
return Begin.make(body)
@specialize.argtype(0)
def visit_begin_for_syntax(self, ast, *args):
assert isinstance(ast, BeginForSyntax)
return ast
@specialize.argtype(0)
def visit_cell_ref(self, ast, *args):
assert isinstance(ast, CellRef)
return ast
@specialize.argtype(0)
def visit_lexical_var(self, ast, *args):
assert isinstance(ast, LexicalVar)
return ast
@specialize.argtype(0)
def visit_module_var(self, ast, *args):
assert isinstance(ast, ModuleVar)
return ast
@specialize.argtype(0)
def visit_linklet_var(self, ast, *args):
assert isinstance(ast, LinkletVar)
return ast
@specialize.argtype(0)
def visit_toplevel_var(self, ast, *args):
assert isinstance(ast, ToplevelVar)
return ast
@specialize.argtype(0)
def visit_set_bang(self, ast, *args):
assert isinstance(ast, SetBang)
var = ast.var.visit(self, *args)
rhs = ast.rhs.visit(self, *args)
return SetBang(var, rhs)
@specialize.argtype(0)
def visit_if(self, ast, *args):
assert isinstance(ast, If)
tst = ast.tst.visit(self, *args)
thn = ast.thn.visit(self, *args)
els = ast.els.visit(self, *args)
return If.make(tst, thn, els)
@specialize.argtype(0)
def visit_case_lambda(self, ast, *args):
assert isinstance(ast, CaseLambda)
lams = [l.visit(self, *args) for l in ast.lams]
return CaseLambda(lams, recursive_sym=ast.recursive_sym, arity=ast._arity)
@specialize.argtype(0)
def visit_lambda(self, ast, *args):
from pycket.interpreter import make_lambda
assert isinstance(ast, Lambda)
body = [b.visit(self, *args) for b in ast.body]
return make_lambda(ast.formals, ast.rest, body, sourceinfo=ast.sourceinfo)
@specialize.argtype(0)
def visit_letrec(self, ast, *args):
assert isinstance(ast, Letrec)
rhss = [r.visit(self, *args) for r in ast.rhss]
body = [b.visit(self, *args) for b in ast.body]
vars = ast._rebuild_args()
return make_letrec(vars, rhss, body)
@specialize.argtype(0)
def visit_let(self, ast, *args):
assert isinstance(ast, Let)
rhss = [r.visit(self, *args) for r in ast.rhss]
body = [b.visit(self, *args) for b in ast.body]
vars = ast._rebuild_args()
return make_let(vars, rhss, body)
@specialize.argtype(0)
def visit_define_values(self, ast, *args):
assert isinstance(ast, DefineValues)
rhs = ast.rhs.visit(self, *args)
return DefineValues(ast.names, rhs, ast.display_names)
@specialize.argtype(0)
def visit_module(self, ast, *args):
""" Must not produce a new module AST """
assert isinstance(ast, Module)
for i, b in enumerate(ast.body):
ast.body[i] = b.visit(self, *args)
for i, r in enumerate(ast.requires):
ast.requires[i] = r.visit(self, *args)
return ast
@specialize.argtype(0)
def visit_require(self, ast, *args):
assert isinstance(ast, Require)
return ast
class CopyVisitor(ASTVisitor):
def visit_variable_reference(self, ast):
assert isinstance(ast, VariableReference)
return VariableReference(ast.var, ast.path, ast.is_mut)
def visit_quote(self, ast):
assert isinstance(ast, Quote)
return Quote(ast.w_val)
def visit_lexical_var(self, ast):
assert isinstance(ast, LexicalVar)
return LexicalVar(ast.sym, ast.env_structure)
def visit_module_var(self, ast):
assert isinstance(ast, ModuleVar)
var = ModuleVar(ast.sym, ast.srcmod, ast.srcsym, ast.path)
var.modenv = ast.modenv
var.w_value = ast.w_value
return var
def visit_cell_ref(self, ast):
assert isinstance(ast, CellRef)
return CellRef(ast.sym, ast.env_structure)
def visit_let(self, ast):
assert isinstance(ast, Let)
body = [b.visit(self) for b in ast.body]
rhss = [r.visit(self) for r in ast.rhss]
result = Let(ast.args,
ast.counts,
rhss,
body,
ast.remove_num_envs)
result.copy_body_pruning(ast)
return result
def visit_letrec(self, ast):
assert isinstance(ast, Letrec)
body = [b.visit(self) for b in ast.body]
rhss = [r.visit(self) for r in ast.rhss]
result = Letrec(ast.args,
ast.counts,
rhss,
body)
result.copy_body_pruning(ast)
return result
def visit_begin(self, ast):
assert isinstance(ast, Begin)
body = [b.visit(self) for b in ast.body]
result = Begin(body)
result.copy_body_pruning(ast)
return result
def visit_begin0(self, ast):
assert isinstance(ast, Begin0)
fst = ast.first.visit(self)
rst = [r.visit(self) for r in ast.body]
result = Begin0(fst, rst)
result.copy_body_pruning(ast)
return result
def copy_ast(ast):
visitor = CopyVisitor()
return ast.visit(visitor)
|
1eabc730e74d184c8cbb1bbd58d1813f776c236d
|
951c578186220f2499a7aecf99a314e46778fa75
|
/tests/columns/test_low_cardinality.py
|
f7d1ac173843c6240e5e32b9a710322741b06604
|
[
"MIT",
"Python-2.0"
] |
permissive
|
mymarilyn/clickhouse-driver
|
0d06fb1d3b28f61b267307fb6cea1a33d7997df4
|
ce712b5bc7a7900e844c7c8f99a1e3426aa326f7
|
refs/heads/master
| 2023-07-20T08:41:27.193499
| 2023-06-30T08:29:06
| 2023-06-30T08:29:31
| 90,912,724
| 1,108
| 229
|
NOASSERTION
| 2023-05-24T02:54:41
| 2017-05-10T22:13:04
|
Python
|
UTF-8
|
Python
| false
| false
| 6,724
|
py
|
test_low_cardinality.py
|
from datetime import date, timedelta
from decimal import Decimal
from uuid import UUID
from tests.testcase import BaseTestCase
from tests.util import require_server_version
class LowCardinalityTestCase(BaseTestCase):
required_server_version = (19, 3, 3)
stable_support_version = (19, 9, 2)
def cli_client_kwargs(self):
if self.server_version >= self.stable_support_version:
return {'allow_suspicious_low_cardinality_types': 1}
def test_uint8(self):
with self.create_table('a LowCardinality(UInt8)'):
data = [(x, ) for x in range(255)]
self.client.execute('INSERT INTO test (a) VALUES', data)
query = 'SELECT * FROM test'
inserted = self.emit_cli(query)
self.assertEqual(
inserted,
'\n'.join(str(x[0]) for x in data) + '\n'
)
inserted = self.client.execute(query)
self.assertEqual(inserted, data)
def test_int8(self):
with self.create_table('a LowCardinality(Int8)'):
data = [(x - 127, ) for x in range(255)]
self.client.execute('INSERT INTO test (a) VALUES', data)
query = 'SELECT * FROM test'
inserted = self.emit_cli(query)
self.assertEqual(
inserted,
'\n'.join(str(x[0]) for x in data) + '\n'
)
inserted = self.client.execute(query)
self.assertEqual(inserted, data)
def test_nullable_int8(self):
with self.create_table('a LowCardinality(Nullable(Int8))'):
data = [(None, ), (-1, ), (0, ), (1, ), (None, )]
self.client.execute('INSERT INTO test (a) VALUES', data)
query = 'SELECT * FROM test'
inserted = self.emit_cli(query)
self.assertEqual(inserted, '\\N\n-1\n0\n1\n\\N\n')
inserted = self.client.execute(query)
self.assertEqual(inserted, data)
def test_date(self):
with self.create_table('a LowCardinality(Date)'):
start = date(1970, 1, 1)
data = [(start + timedelta(x), ) for x in range(300)]
self.client.execute('INSERT INTO test (a) VALUES', data)
query = 'SELECT * FROM test'
inserted = self.client.execute(query)
self.assertEqual(inserted, data)
def test_nullable_date(self):
with self.create_table('a LowCardinality(Nullable(Date))'):
data = [(date(2023, 4, 1), ), (None, ), (date(1970, 1, 1), )]
self.client.execute('INSERT INTO test (a) VALUES', data)
query = 'SELECT * FROM test'
inserted = self.client.execute(query)
self.assertEqual(inserted, data)
@require_server_version(21, 6)
def test_nullable_uuid(self):
with self.create_table('a LowCardinality(Nullable(UUID))'):
data = [(UUID('2efcead4-ff55-4db5-bdb4-6b36a308d8e0'), ), (None, )]
self.client.execute('INSERT INTO test (a) VALUES', data)
query = 'SELECT * FROM test'
inserted = self.client.execute(query)
self.assertEqual(inserted, data)
def test_float(self):
with self.create_table('a LowCardinality(Float)'):
data = [(float(x),) for x in range(300)]
self.client.execute('INSERT INTO test (a) VALUES', data)
query = 'SELECT * FROM test'
inserted = self.client.execute(query)
self.assertEqual(inserted, data)
def test_decimal(self):
with self.create_table('a LowCardinality(Float)'):
data = [(Decimal(x),) for x in range(300)]
self.client.execute('INSERT INTO test (a) VALUES', data)
query = 'SELECT * FROM test'
inserted = self.client.execute(query)
self.assertEqual(inserted, data)
def test_array(self):
with self.create_table('a Array(LowCardinality(Int16))'):
data = [([100, 500], )]
self.client.execute('INSERT INTO test (a) VALUES', data)
query = 'SELECT * FROM test'
inserted = self.emit_cli(query)
self.assertEqual(inserted, '[100,500]\n')
inserted = self.client.execute(query)
self.assertEqual(inserted, data)
def test_empty_array(self):
with self.create_table('a Array(LowCardinality(Int16))'):
data = [([], )]
self.client.execute('INSERT INTO test (a) VALUES', data)
query = 'SELECT * FROM test'
inserted = self.emit_cli(query)
self.assertEqual(inserted, '[]\n')
inserted = self.client.execute(query)
self.assertEqual(inserted, data)
def test_string(self):
with self.create_table('a LowCardinality(String)'):
data = [
('test', ), ('low', ), ('cardinality', ),
('test', ), ('test', ), ('', )
]
self.client.execute('INSERT INTO test (a) VALUES', data)
query = 'SELECT * FROM test'
inserted = self.emit_cli(query)
self.assertEqual(
inserted,
'test\nlow\ncardinality\ntest\ntest\n\n'
)
inserted = self.client.execute(query)
self.assertEqual(inserted, data)
def test_fixed_string(self):
with self.create_table('a LowCardinality(FixedString(12))'):
data = [
('test', ), ('low', ), ('cardinality', ),
('test', ), ('test', ), ('', )
]
self.client.execute('INSERT INTO test (a) VALUES', data)
query = 'SELECT * FROM test'
inserted = self.emit_cli(query)
self.assertEqual(
inserted,
'test\\0\\0\\0\\0\\0\\0\\0\\0\n'
'low\\0\\0\\0\\0\\0\\0\\0\\0\\0\n'
'cardinality\\0\n'
'test\\0\\0\\0\\0\\0\\0\\0\\0\n'
'test\\0\\0\\0\\0\\0\\0\\0\\0\n'
'\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\n'
)
inserted = self.client.execute(query)
self.assertEqual(inserted, data)
def test_nullable_string(self):
with self.create_table('a LowCardinality(Nullable(String))'):
data = [
('test', ), ('', ), (None, )
]
self.client.execute('INSERT INTO test (a) VALUES', data)
query = 'SELECT * FROM test'
inserted = self.emit_cli(query)
self.assertEqual(
inserted,
'test\n\n\\N\n'
)
inserted = self.client.execute(query)
self.assertEqual(inserted, data)
|
94545fa9bb64969ed3d0a2045a424b7850ec2af1
|
3481023b43028c5ee9520a8be0978e914bdcb548
|
/manga_py/providers/mangamew_com.py
|
d07844f6519daca30e3d9741fd00d1e02f627e45
|
[
"MIT"
] |
permissive
|
manga-py/manga-py
|
18f6818d8efc96c3e69efee7dff3f3d6c773e32a
|
0db97123acab1f2fb99e808b0ba54db08977e5c8
|
refs/heads/stable_1.x
| 2023-08-20T03:04:06.373108
| 2023-04-16T08:28:15
| 2023-04-16T08:28:15
| 98,638,892
| 444
| 56
|
MIT
| 2023-07-27T13:21:40
| 2017-07-28T10:27:43
|
Python
|
UTF-8
|
Python
| false
| false
| 1,069
|
py
|
mangamew_com.py
|
from manga_py.provider import Provider
from .helpers.std import Std
class MangaMewCom(Provider, Std):
_type = 'manga'
def get_chapter_index(self) -> str:
re = r'%s/[^/]+/.+?-(\d+(?:-\d+)?)-\d+' % self._type
return self.re.search(re, self.chapter).group(1)
def get_content(self):
url = self.get_url()
if url.find('/' + self._type + '/') == -1: # not found
a = self.html_fromstring(url, 'h1.name a', 0)
url = a.get('href')
return self.http_get(url)
def get_manga_name(self) -> str:
content = self.http_get(self.get_url())
return self.text_content_full(content, 'h1.name a,h1.title')
def get_chapters(self):
return self._elements('.chapter .item a')[::-1]
def get_files(self):
parser = self.html_fromstring(self.chapter)
return self._images_helper(parser, '#content .item > img')
def get_cover(self) -> str:
return self._cover_from_content('.images img')
def book_meta(self) -> dict:
pass
main = MangaMewCom
|
c063e3ca9e8236b3b1139ee9509ff80b4cd4b6e0
|
5e9576c368e98927e2965bd2fb23bd35d9993d69
|
/featuretools/primitives/standard/transform/__init__.py
|
90bf8bd9e16a378b95dd38bba97b23a85545915a
|
[
"BSD-3-Clause"
] |
permissive
|
alteryx/featuretools
|
c6e319e063e8e84e7684bf232376f95dc5272160
|
c284c2d27a95b81e0bae913ac90df2b02c8f3b37
|
refs/heads/main
| 2023-08-25T12:21:33.945418
| 2023-08-23T16:30:25
| 2023-08-23T16:30:25
| 102,908,804
| 1,783
| 201
|
BSD-3-Clause
| 2023-09-07T18:53:19
| 2017-09-08T22:15:17
|
Python
|
UTF-8
|
Python
| false
| false
| 1,685
|
py
|
__init__.py
|
# flake8: noqa
from featuretools.primitives.standard.transform.absolute_diff import AbsoluteDiff
from featuretools.primitives.standard.transform.binary import *
from featuretools.primitives.standard.transform.cumulative import *
from featuretools.primitives.standard.transform.datetime import *
from featuretools.primitives.standard.transform.email import *
from featuretools.primitives.standard.transform.exponential import *
from featuretools.primitives.standard.transform.file_extension import FileExtension
from featuretools.primitives.standard.transform.full_name_to_first_name import (
FullNameToFirstName,
)
from featuretools.primitives.standard.transform.full_name_to_last_name import (
FullNameToLastName,
)
from featuretools.primitives.standard.transform.full_name_to_title import (
FullNameToTitle,
)
from featuretools.primitives.standard.transform.nth_week_of_month import NthWeekOfMonth
from featuretools.primitives.standard.transform.is_in import IsIn
from featuretools.primitives.standard.transform.is_null import IsNull
from featuretools.primitives.standard.transform.latlong import *
from featuretools.primitives.standard.transform.natural_language import *
from featuretools.primitives.standard.transform.not_primitive import Not
from featuretools.primitives.standard.transform.numeric import *
from featuretools.primitives.standard.transform.percent_change import PercentChange
from featuretools.primitives.standard.transform.postal import *
from featuretools.primitives.standard.transform.savgol_filter import SavgolFilter
from featuretools.primitives.standard.transform.time_series import *
from featuretools.primitives.standard.transform.url import *
|
14352f4198cfb906c65f793b14656c7dc9ec2412
|
47ef6997d03f4d5c921c83cc09aef1dfc6828e2c
|
/zeus/networks/pytorch/losses/reduce_loss.py
|
2948a08d5d991540122a0303a6c1c0bbb9b6509b
|
[
"MIT"
] |
permissive
|
huawei-noah/xingtian
|
620c9f245183d636e0a65659fd99a984397ecbd4
|
e4ef3a1c92d19d1d08c3ef0e2156b6fecefdbe04
|
refs/heads/master
| 2023-09-03T01:10:21.768245
| 2022-03-21T03:39:39
| 2022-03-21T03:39:39
| 287,759,621
| 308
| 91
|
MIT
| 2023-09-12T11:33:22
| 2020-08-15T14:13:06
|
Python
|
UTF-8
|
Python
| false
| false
| 1,971
|
py
|
reduce_loss.py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""Reduce Loss."""
import functools
import torch.nn.functional as F
def reduce_loss(loss, reduction):
"""Reduce loss compute.
:param loss: losses
:param reduction: reduce funtion
:return: loss
"""
reduction_function = F._Reduction.get_enum(reduction)
if reduction_function == 0:
return loss
elif reduction_function == 1:
return loss.mean()
elif reduction_function == 2:
return loss.sum()
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
"""Weight reduce loss.
:param loss: losses
:param weight: weight
:param reduction: reduce function
:param avg_factor: avg factor
:return: loss
"""
if weight is not None:
loss = loss * weight
if avg_factor is None:
loss = reduce_loss(loss, reduction)
else:
if reduction == 'mean':
loss = loss.sum() / avg_factor
elif reduction != 'none':
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
def weighted_loss(loss_func):
"""Weight loss compute.
:param loss_func: loss function
:return: loss
"""
@functools.wraps(loss_func)
def wrapper(pred,
target,
weight=None,
reduction='mean',
avg_factor=None,
**kwargs):
loss = loss_func(pred, target, **kwargs)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
return wrapper
|
c5effee25626efcc2f72d5c2447989e122ad27de
|
096017db9ecf688ea56e577628579ea73969808f
|
/bionic/protocol.py
|
fcb5fabd8b83342c800c46198bef1f791abee11f
|
[
"Apache-2.0"
] |
permissive
|
square/bionic
|
eb44d9d8b0c80b68c607c0f9327b39988b47ed4d
|
3b88338923bd84df2c48cb6bd3d282c6283305b9
|
refs/heads/master
| 2023-06-15T10:07:52.399855
| 2022-11-16T05:16:45
| 2022-11-16T05:16:45
| 204,986,901
| 107
| 24
|
Apache-2.0
| 2023-03-18T23:11:51
| 2019-08-28T17:29:02
|
Python
|
UTF-8
|
Python
| false
| false
| 3,138
|
py
|
protocol.py
|
from . import protocols
from .utils.misc import oneline
# These are callable with or without arguments. See BaseProtocol.__call__ for
# why we instantiate them here.
picklable = protocols.PicklableProtocol() # noqa: F401
dillable = protocols.DillableProtocol() # noqa: F401
dask = protocols.DaskProtocol() # noqa: F401
image = protocols.ImageProtocol() # noqa: F401
numpy = protocols.NumPyProtocol() # noqa: F401
yaml = protocols.YamlProtocol() # noqa: F401
path = protocols.PathProtocol() # noqa: F401
geodataframe = protocols.GeoPandasProtocol() # noqa: F401
json = protocols.JsonProtocol() # noqa: F401
def frame(func=None, file_format=None, check_dtypes=None):
"""
Decorator indicating that an entity will always have a pandas DataFrame
type.
The frame values will be serialized to either Parquet (default) or Feather.
Parquet is more popular, but some types of data or frame structures are
only supported by one format or the other. In particular, ordered
categorical columns are supported by Feather and not Parquet.
This decorator can be used with or without arguments:
.. code-block:: python
@frame
def dataframe(...):
...
@frame(file_format='feather')
def dataframe(...):
...
Parameters
----------
file_format: {'parquet', 'feather'} (default: 'parquet')
Which file format to use when saving values to disk.
check_dtypes: boolean (default: True)
Check for column types not supported by the file format. This
check is best-effort and not guaranteed to catch all problems. If
an unsupported data type is found, an exception will be thrown at
serialization time.
"""
# If the first argument is present, we were (hopefully) used as a decorator
# without any other arguments.
if func is not None:
if file_format is not None or check_dtypes is not None:
raise ValueError("frame can't be called with both a function and keywords")
if not callable(func):
raise ValueError(
oneline(
"""
frame must be used either (a) directly as a decorator or
(b) with keyword arguments;
it can't take positional arguments.
"""
)
)
return protocols.ParquetDataFrameProtocol()(func)
# Otherwise, we have arguments and should return a decorator.
if file_format is None or file_format == "parquet":
kwargs = {}
if check_dtypes is not None:
kwargs["check_dtypes"] = check_dtypes
return protocols.ParquetDataFrameProtocol(**kwargs)
elif file_format == "feather":
return protocols.FeatherDataFrameProtocol()
else:
raise ValueError(
oneline(
f"""
file_format must be one of {'parquet', 'feather'};
got {file_format!r}"""
)
)
# These need to be called with arguments.
enum = protocols.EnumProtocol # noqa: F401
type = protocols.TypeProtocol # noqa: F401
|
323fbc48209cc4c08f167b5e974159dfe324c596
|
21f35d6b81c94bd1ed07b923482c1a9e17423d4c
|
/sphinx_gallery/tests/tinybuild/examples/plot_webp.py
|
a7c3da3243d02dd1027108e9ab2fe025268192fd
|
[] |
permissive
|
sphinx-gallery/sphinx-gallery
|
06378c2ecedf9cb306b3958327a1ba294c8d6725
|
4e298a6ccee1c4ff8b33cd65371127118f626032
|
refs/heads/master
| 2023-08-17T06:30:59.195322
| 2023-08-15T16:00:25
| 2023-08-15T16:00:25
| 25,860,190
| 382
| 235
|
BSD-3-Clause
| 2023-09-12T15:29:13
| 2014-10-28T08:41:46
|
Python
|
UTF-8
|
Python
| false
| false
| 98
|
py
|
plot_webp.py
|
"""
============
Save as WebP
============
"""
import matplotlib.pyplot as plt
plt.plot([1, 2])
|
4c9ac1d8cf490922c9e66198f2d1a7b2ba063f6f
|
e3f5f41b242650b4bef68aa191a5779aedd3e02e
|
/Chapter10/webapp/blog/tasks.py
|
3fac59a9b4824544003883d6d42d46ea1bb030ee
|
[
"MIT"
] |
permissive
|
PacktPublishing/Mastering-Flask-Web-Development-Second-Edition
|
d4675c047bb51b0154958205f53c962ab4d32e4c
|
c3174127b40f8af1e2ab5e614994ffed7acbc11b
|
refs/heads/master
| 2023-05-11T00:23:30.213655
| 2023-01-18T09:14:14
| 2023-01-18T09:14:14
| 154,667,293
| 168
| 131
|
MIT
| 2023-05-01T20:52:13
| 2018-10-25T12:30:58
|
Python
|
UTF-8
|
Python
| false
| false
| 1,968
|
py
|
tasks.py
|
import datetime
import logging
from flask import render_template
from flask_mail import Message
from .. import celery, mail
from .models import Reminder, Post
logging.basicConfig(format='%(asctime)s:%(levelname)s:%(name)s:%(message)s')
logging.getLogger().setLevel(logging.DEBUG)
logs = logging.getLogger(__name__)
@celery.task()
def log(msg):
return msg
@celery.task()
def multiply(x, y):
return x * y
@celery.task(
bind=True,
ignore_result=True,
default_retry_delay=300,
max_retries=5
)
def remind(self, pk):
logs.info("Remind worker %d" % pk)
reminder = Reminder.query.get(pk)
msg = Message(body="Text %s" % str(reminder.text), recipients=[reminder.email], subject="Your reminder")
try:
mail.send(msg)
logs.info("Email sent to %s" % reminder.email)
return
except Exception as e:
logs.error(e)
self.retry(exc=e)
@celery.task(
bind=True,
ignore_result=True,
default_retry_delay=300,
max_retries=5
)
def digest(self):
# find the start and end of this week
year, week = datetime.datetime.now().isocalendar()[0:2]
date = datetime.date(year, 1, 1)
if (date.weekday() > 3):
date = date + datetime.timedelta(7 - date.weekday())
else:
date = date - datetime.timedelta(date.weekday())
delta = datetime.timedelta(days=(week - 1) * 7)
start, end = date + delta, date + delta + datetime.timedelta(days=6)
posts = Post.query.filter(
Post.publish_date >= start,
Post.publish_date <= end
).all()
if (len(posts) == 0):
return
msg = Message()
msg.html = render_template("digest.html", posts=posts)
msg.recipients = ['']
msg.subject = "Weekly Digest"
try:
mail.send(msg)
return
except Exception as e:
logs.error(e)
self.retry(exc=e)
def on_reminder_save(mapper, connect, self):
remind.apply_async(args=(self.id,), eta=self.date)
|
8517498091dbe0052bad5ca76c310711ec22af73
|
095e5e86c931af6553996b0a128c07d94b38cbca
|
/test/test_hpyunicode.py
|
481d3029ca3b8a899679ec3972f88506cce4fb46
|
[
"MIT"
] |
permissive
|
hpyproject/hpy
|
1dc9e5e855fa006b1728703c5925addbb43cf792
|
8310a762d78e3412464b1869959a77da013e6307
|
refs/heads/master
| 2023-09-03T21:18:17.273371
| 2023-07-24T07:26:14
| 2023-07-24T07:26:14
| 196,559,763
| 681
| 41
|
MIT
| 2023-07-24T07:26:16
| 2019-07-12T10:27:56
|
Python
|
UTF-8
|
Python
| false
| false
| 43,142
|
py
|
test_hpyunicode.py
|
# -*- encoding: utf-8 -*-
import itertools
import re
import sys
import pytest
from .support import HPyTest
class TestUnicode(HPyTest):
def test_Check(self):
mod = self.make_module("""
HPyDef_METH(f, "f", HPyFunc_O)
static HPy f_impl(HPyContext *ctx, HPy self, HPy arg)
{
if (HPyUnicode_Check(ctx, arg))
return HPy_Dup(ctx, ctx->h_True);
return HPy_Dup(ctx, ctx->h_False);
}
@EXPORT(f)
@INIT
""")
class MyUnicode(str):
pass
assert mod.f('hello') is True
assert mod.f(b'hello') is False
assert mod.f(MyUnicode('hello')) is True
def test_FromString(self):
mod = self.make_module("""
HPyDef_METH(f, "f", HPyFunc_NOARGS)
static HPy f_impl(HPyContext *ctx, HPy self)
{
return HPyUnicode_FromString(ctx, "foobar");
}
@EXPORT(f)
@INIT
""")
assert mod.f() == "foobar"
def test_FromWideChar(self):
mod = self.make_module("""
HPyDef_METH(f, "f", HPyFunc_O)
static HPy f_impl(HPyContext *ctx, HPy self, HPy arg)
{
const wchar_t buf[] = { 'h', 'e', 'l', 'l', 0xf2, ' ',
'w', 'o', 'r', 'l', 'd', 0 };
long n = HPyLong_AsLong(ctx, arg);
return HPyUnicode_FromWideChar(ctx, buf, n);
}
@EXPORT(f)
@INIT
""")
assert mod.f(-1) == "hellò world"
assert mod.f(11) == "hellò world"
assert mod.f(5) == "hellò"
def test_AsUTF8String(self):
mod = self.make_module("""
HPyDef_METH(f, "f", HPyFunc_O)
static HPy f_impl(HPyContext *ctx, HPy self, HPy arg)
{
return HPyUnicode_AsUTF8String(ctx, arg);
}
@EXPORT(f)
@INIT
""")
s = 'hellò'
b = mod.f(s)
assert type(b) is bytes
assert b == s.encode('utf-8')
def test_AsASCIIString(self):
mod = self.make_module("""
HPyDef_METH(f, "f", HPyFunc_O)
static HPy f_impl(HPyContext *ctx, HPy self, HPy arg)
{
return HPyUnicode_AsASCIIString(ctx, arg);
}
@EXPORT(f)
@INIT
""")
s = 'world'
b = mod.f(s)
assert type(b) is bytes
assert b == s.encode('ascii')
def test_AsLatin1String(self):
mod = self.make_module("""
HPyDef_METH(f, "f", HPyFunc_O)
static HPy f_impl(HPyContext *ctx, HPy self, HPy arg)
{
return HPyUnicode_AsLatin1String(ctx, arg);
}
@EXPORT(f)
@INIT
""")
s = "Müller"
b = mod.f(s)
assert type(b) is bytes
assert b == s.encode('latin1')
def test_AsUTF8AndSize(self):
mod = self.make_module("""
#include <string.h>
static HPy as_utf8_and_size(HPyContext *ctx, HPy arg, HPy_ssize_t *size)
{
HPy_ssize_t n;
const char* buf = HPyUnicode_AsUTF8AndSize(ctx, arg, size);
long res = 0;
if (size)
n = *size;
else
n = strlen(buf);
for(int i=0; i<n; i++)
res = (res * 10) + buf[i];
return HPyLong_FromLong(ctx, res);
}
HPyDef_METH(f, "f", HPyFunc_O)
static HPy f_impl(HPyContext *ctx, HPy self, HPy arg)
{
HPy_ssize_t n;
return as_utf8_and_size(ctx, arg, &n);
}
HPyDef_METH(g, "g", HPyFunc_O)
static HPy g_impl(HPyContext *ctx, HPy self, HPy arg)
{
return as_utf8_and_size(ctx, arg, NULL);
}
@EXPORT(f)
@EXPORT(g)
@INIT
""")
assert mod.f('ABC') == 100*ord('A') + 10*ord('B') + ord('C')
assert mod.f(b'A\0C'.decode('utf-8')) == 100*ord('A') + ord('C')
assert mod.g('ABC') == 100*ord('A') + 10*ord('B') + ord('C')
assert mod.g(b'A'.decode('utf-8')) == ord('A')
assert mod.g(b'A\0'.decode('utf-8')) == ord('A')
def test_DecodeLatin1(self):
mod = self.make_module("""
HPyDef_METH(f, "f", HPyFunc_O)
static HPy f_impl(HPyContext *ctx, HPy self, HPy arg)
{
const char* buf = HPyBytes_AS_STRING(ctx, arg);
HPy_ssize_t n = HPyBytes_Size(ctx, arg);
return HPyUnicode_DecodeLatin1(ctx, buf, n, "");
}
@EXPORT(f)
@INIT
""")
assert mod.f(b'M\xfcller') == "Müller"
def test_DecodeASCII(self):
mod = self.make_module("""
HPyDef_METH(f, "f", HPyFunc_O)
static HPy f_impl(HPyContext *ctx, HPy self, HPy arg)
{
const char* buf = HPyBytes_AS_STRING(ctx, arg);
HPy_ssize_t n = HPyBytes_Size(ctx, arg);
return HPyUnicode_DecodeASCII(ctx, buf, n, "");
}
@EXPORT(f)
@INIT
""")
assert mod.f(b'hello') == "hello"
def test_ReadChar(self):
mod = self.make_module("""
HPyDef_METH(f, "f", HPyFunc_O)
static HPy f_impl(HPyContext *ctx, HPy self, HPy arg)
{
long c = HPyUnicode_ReadChar(ctx, arg, 1);
return HPyLong_FromLong(ctx, c);
}
@EXPORT(f)
@INIT
""")
assert mod.f('ABC') == 66
def test_EncodeFSDefault(self):
mod = self.make_module("""
HPyDef_METH(f, "f", HPyFunc_O)
static HPy f_impl(HPyContext *ctx, HPy self, HPy arg)
{
return HPyUnicode_EncodeFSDefault(ctx, arg);
}
@EXPORT(f)
@INIT
""")
assert mod.f('ABC') == b'ABC'
def test_DecodeFSDefault(self):
mod = self.make_module("""
HPyDef_METH(f, "f", HPyFunc_O)
static HPy f_impl(HPyContext *ctx, HPy self, HPy arg)
{
HPy_ssize_t n;
const char* buf = HPyUnicode_AsUTF8AndSize(ctx, arg, &n);
return HPyUnicode_DecodeFSDefault(ctx, buf);
}
HPyDef_METH(g, "g", HPyFunc_NOARGS)
static HPy g_impl(HPyContext *ctx, HPy self)
{
const char buf[5] = { 'a', 'b', '\\0', 'c' };
return HPyUnicode_DecodeFSDefaultAndSize(ctx, buf, 4);
}
@EXPORT(f)
@EXPORT(g)
@INIT
""")
assert mod.f('ABC') == "ABC"
assert mod.g().encode('ascii') == b'ab\0c'
def test_FromFormat(self, hpy_abi):
# Later we generate an HPy function for each case described below:
# Most of the test cases are taken from CPython:Modules/_testcapi/unicode.c
# Future work can improve this to add tests from Lib/test/test_capi/test_unicode.py
cases = [
# Unrecognized
( "%y%d", (SystemError, "invalid format string"), "'w', 42"),
("%04.2y%d", (SystemError, "invalid format string"), "'w', 42"),
("%u %? %u", (SystemError, "invalid format string"), "1, 2"),
# "%%" (options are rejected)
( "%%", "%", "0"),
("%%%c", "%w", "'w'"),
( "%0%", (SystemError, "invalid format string"), "0"),
("%00%", (SystemError, "invalid format string"), "0"),
( "%2%", (SystemError, "invalid format string"), "0"),
("%02%", (SystemError, "invalid format string"), "0"),
("%.0%", (SystemError, "invalid format string"), "0"),
("%.2%", (SystemError, "invalid format string"), "0"),
# "%c"
( "%c", "c", "'c'"),
# Integers
("%d", "123", '(int)123'),
("%i", "123", '(int)123'),
("%u", "123", '(unsigned int)123'),
("%ld", "123", '(long)123'),
("%li", "123", '(long)123'),
("%lu", "123", '(unsigned long)123'),
("%lld", "123", '(long long)123'),
("%lli", "123", '(long long)123'),
("%llu", "123",'(unsigned long long)123'),
("%zd", "123", '(HPy_ssize_t)123'),
("%zi", "123", '(HPy_ssize_t)123'),
("%zu", "123", '(size_t)123'),
("%x", "7b", '(int)123'),
("%d", "-123", '(int)-123'),
("%i", "-123", '(int)-123'),
("%ld", "-123", '(long)-123'),
("%li", "-123", '(long)-123'),
("%lld", "-123", '(long long)-123'),
("%lli", "-123", '(long long)-123'),
("%zd", "-123", '(HPy_ssize_t)-123'),
("%zi", "-123", '(HPy_ssize_t)-123'),
("%x", "ffffff85", '(int)-123'),
# Integers: width < length
("%1d", "123", '(int)123'),
("%1i", "123", '(int)123'),
("%1u", "123", '(unsigned int)123'),
("%1ld", "123", '(long)123'),
("%1li", "123", '(long)123'),
("%1lu", "123", '(unsigned long)123'),
("%1lld", "123", '(long long)123'),
("%1lli", "123", '(long long)123'),
("%1llu", "123",'(unsigned long long)123'),
("%1zd", "123", '(HPy_ssize_t)123'),
("%1zi", "123", '(HPy_ssize_t)123'),
("%1zu", "123", '(size_t)123'),
("%1x", "7b", '(int)123'),
("%1d", "-123", '(int)-123'),
("%1i", "-123", '(int)-123'),
("%1ld", "-123", '(long)-123'),
("%1li", "-123", '(long)-123'),
("%1lld", "-123", '(long long)-123'),
("%1lli", "-123", '(long long)-123'),
("%1zd", "-123", '(HPy_ssize_t)-123'),
("%1zi", "-123", '(HPy_ssize_t)-123'),
("%1x", "ffffff85", '(int)-123'),
# Integers: width > length
("%5d", " 123", '(int)123'),
("%5i", " 123", '(int)123'),
("%5u", " 123", '(unsigned int)123'),
("%5ld", " 123", '(long)123'),
("%5li", " 123", '(long)123'),
("%5lu", " 123", '(unsigned long)123'),
("%5lld", " 123", '(long long)123'),
("%5lli", " 123", '(long long)123'),
("%5llu", " 123",'(unsigned long long)123'),
("%5zd", " 123", '(HPy_ssize_t)123'),
("%5zi", " 123", '(HPy_ssize_t)123'),
("%5zu", " 123", '(size_t)123'),
("%5x", " 7b", '(int)123'),
("%5d", " -123", '(int)-123'),
("%5i", " -123", '(int)-123'),
("%5ld", " -123", '(long)-123'),
("%5li", " -123", '(long)-123'),
("%5lld", " -123", '(long long)-123'),
("%5lli", " -123", '(long long)-123'),
("%5zd", " -123", '(HPy_ssize_t)-123'),
("%5zi", " -123", '(HPy_ssize_t)-123'),
("%9x", " ffffff85", '(int)-123'),
# Integers: width > length, 0-flag
("%05d", "00123", '(int)123'),
("%05i", "00123", '(int)123'),
("%05u", "00123", '(unsigned int)123'),
("%05ld", "00123", '(long)123'),
("%05li", "00123", '(long)123'),
("%05lu", "00123", '(unsigned long)123'),
("%05lld", "00123", '(long long)123'),
("%05lli", "00123", '(long long)123'),
("%05llu", "00123",'(unsigned long long)123'),
("%05zd", "00123", '(HPy_ssize_t)123'),
("%05zi", "00123", '(HPy_ssize_t)123'),
("%05zu", "00123", '(size_t)123'),
("%05x", "0007b", '(int)123'),
("%05d", "-0123", '(int)-123'),
("%05i", "-0123", '(int)-123'),
("%05ld", "-0123", '(long)-123'),
("%05li", "-0123", '(long)-123'),
("%05lld", "-0123", '(long long)-123'),
("%05lli", "-0123", '(long long)-123'),
("%05zd", "-0123", '(HPy_ssize_t)-123'),
("%05zi", "-0123", '(HPy_ssize_t)-123'),
("%09x", "0ffffff85", '(int)-123'),
# Integers: precision < length
("%.1d", "123", '(int)123'),
("%.1i", "123", '(int)123'),
("%.1u", "123", '(unsigned int)123'),
("%.1ld", "123", '(long)123'),
("%.1li", "123", '(long)123'),
("%.1lu", "123", '(unsigned long)123'),
("%.1lld", "123", '(long long)123'),
("%.1lli", "123", '(long long)123'),
("%.1llu", "123",'(unsigned long long)123'),
("%.1zd", "123", '(HPy_ssize_t)123'),
("%.1zi", "123", '(HPy_ssize_t)123'),
("%.1zu", "123", '(size_t)123'),
("%.1x", "7b", '(int)123'),
("%.1d", "-123", '(int)-123'),
("%.1i", "-123", '(int)-123'),
("%.1ld", "-123", '(long)-123'),
("%.1li", "-123", '(long)-123'),
("%.1lld", "-123", '(long long)-123'),
("%.1lli", "-123", '(long long)-123'),
("%.1zd", "-123", '(HPy_ssize_t)-123'),
("%.1zi", "-123", '(HPy_ssize_t)-123'),
("%.1x", "ffffff85", '(int)-123'),
# Integers: precision > length
("%.5d", "00123", '(int)123'),
("%.5i", "00123", '(int)123'),
("%.5u", "00123", '(unsigned int)123'),
("%.5ld", "00123", '(long)123'),
("%.5li", "00123", '(long)123'),
("%.5lu", "00123", '(unsigned long)123'),
("%.5lld", "00123", '(long long)123'),
("%.5lli", "00123", '(long long)123'),
("%.5llu", "00123",'(unsigned long long)123'),
("%.5zd", "00123", '(HPy_ssize_t)123'),
("%.5zi", "00123", '(HPy_ssize_t)123'),
("%.5zu", "00123", '(size_t)123'),
("%.5x", "0007b", '(int)123'),
("%.5d", "-00123", '(int)-123'),
("%.5i", "-00123", '(int)-123'),
("%.5ld", "-00123", '(long)-123'),
("%.5li", "-00123", '(long)-123'),
("%.5lld", "-00123", '(long long)-123'),
("%.5lli", "-00123", '(long long)-123'),
("%.5zd", "-00123", '(HPy_ssize_t)-123'),
("%.5zi", "-00123", '(HPy_ssize_t)-123'),
("%.9x", "0ffffff85", '(int)-123'),
# Integers: width > precision > length
("%7.5d", " 00123", '(int)123'),
("%7.5i", " 00123", '(int)123'),
("%7.5u", " 00123", '(unsigned int)123'),
("%7.5ld", " 00123", '(long)123'),
("%7.5li", " 00123", '(long)123'),
("%7.5lu", " 00123", '(unsigned long)123'),
("%7.5lld", " 00123", '(long long)123'),
("%7.5lli", " 00123", '(long long)123'),
("%7.5llu", " 00123",'(unsigned long long)123'),
("%7.5zd", " 00123", '(HPy_ssize_t)123'),
("%7.5zi", " 00123", '(HPy_ssize_t)123'),
("%7.5zu", " 00123", '(size_t)123'),
("%7.5x", " 0007b", '(int)123'),
("%7.5d", " -00123", '(int)-123'),
("%7.5i", " -00123", '(int)-123'),
("%7.5ld", " -00123", '(long)-123'),
("%7.5li", " -00123", '(long)-123'),
("%7.5lld", " -00123", '(long long)-123'),
("%7.5lli", " -00123", '(long long)-123'),
("%7.5zd", " -00123", '(HPy_ssize_t)-123'),
("%7.5zi", " -00123", '(HPy_ssize_t)-123'),
("%10.9x", " 0ffffff85", '(int)-123'),
# Integers: width > precision > length, 0-flag
("%07.5d", "0000123", '(int)123'),
("%07.5i", "0000123", '(int)123'),
("%07.5u", "0000123", '(unsigned int)123'),
("%07.5ld", "0000123", '(long)123'),
("%07.5li", "0000123", '(long)123'),
("%07.5lu", "0000123", '(unsigned long)123'),
("%07.5lld", "0000123", '(long long)123'),
("%07.5lli", "0000123", '(long long)123'),
("%07.5llu", "0000123",'(unsigned long long)123'),
("%07.5zd", "0000123", '(HPy_ssize_t)123'),
("%07.5zi", "0000123", '(HPy_ssize_t)123'),
("%07.5zu", "0000123", '(size_t)123'),
("%07.5x", "000007b", '(int)123'),
("%07.5d", "-000123", '(int)-123'),
("%07.5i", "-000123", '(int)-123'),
("%07.5ld", "-000123", '(long)-123'),
("%07.5li", "-000123", '(long)-123'),
("%07.5lld", "-000123", '(long long)-123'),
("%07.5lli", "-000123", '(long long)-123'),
("%07.5zd", "-000123", '(HPy_ssize_t)-123'),
("%07.5zi", "-000123", '(HPy_ssize_t)-123'),
("%010.9x","00ffffff85", '(int)-123'),
# Integers: precision > width > length
("%5.7d", "0000123", '(int)123'),
("%5.7i", "0000123", '(int)123'),
("%5.7u", "0000123", '(unsigned int)123'),
("%5.7ld", "0000123", '(long)123'),
("%5.7li", "0000123", '(long)123'),
("%5.7lu", "0000123", '(unsigned long)123'),
("%5.7lld", "0000123", '(long long)123'),
("%5.7lli", "0000123", '(long long)123'),
("%5.7llu", "0000123",'(unsigned long long)123'),
("%5.7zd", "0000123", '(HPy_ssize_t)123'),
("%5.7zi", "0000123", '(HPy_ssize_t)123'),
("%5.7zu", "0000123", '(size_t)123'),
("%5.7x", "000007b", '(int)123'),
("%5.7d", "-0000123", '(int)-123'),
("%5.7i", "-0000123", '(int)-123'),
("%5.7ld", "-0000123", '(long)-123'),
("%5.7li", "-0000123", '(long)-123'),
("%5.7lld", "-0000123", '(long long)-123'),
("%5.7lli", "-0000123", '(long long)-123'),
("%5.7zd", "-0000123", '(HPy_ssize_t)-123'),
("%5.7zi", "-0000123", '(HPy_ssize_t)-123'),
("%9.10x", "00ffffff85", '(int)-123'),
# Integers: precision > width > length, 0-flag
("%05.7d", "0000123", '(int)123'),
("%05.7i", "0000123", '(int)123'),
("%05.7u", "0000123", '(unsigned int)123'),
("%05.7ld", "0000123", '(long)123'),
("%05.7li", "0000123", '(long)123'),
("%05.7lu", "0000123", '(unsigned long)123'),
("%05.7lld", "0000123", '(long long)123'),
("%05.7lli", "0000123", '(long long)123'),
("%05.7llu", "0000123",'(unsigned long long)123'),
("%05.7zd", "0000123", '(HPy_ssize_t)123'),
("%05.7zi", "0000123", '(HPy_ssize_t)123'),
("%05.7zu", "0000123", '(size_t)123'),
("%05.7x", "000007b", '(int)123'),
("%05.7d", "-0000123", '(int)-123'),
("%05.7i", "-0000123", '(int)-123'),
("%05.7ld", "-0000123", '(long)-123'),
("%05.7li", "-0000123", '(long)-123'),
("%05.7lld", "-0000123", '(long long)-123'),
("%05.7lli", "-0000123", '(long long)-123'),
("%05.7zd", "-0000123", '(HPy_ssize_t)-123'),
("%05.7zi", "-0000123", '(HPy_ssize_t)-123'),
("%09.10x","00ffffff85", '(int)-123'),
# Integers: precision = 0, arg = 0 (empty string in C)
("%.0d", "0", '(int)0'),
("%.0i", "0", '(int)0'),
("%.0u", "0", '(unsigned int)0'),
("%.0ld", "0", '(long)0'),
("%.0li", "0", '(long)0'),
("%.0lu", "0", '(unsigned long)0'),
("%.0lld", "0", '(long long)0'),
("%.0lli", "0", '(long long)0'),
("%.0llu", "0", '(unsigned long long)0'),
("%.0zd", "0", '(HPy_ssize_t)0'),
("%.0zi", "0", '(HPy_ssize_t)0'),
("%.0zu", "0", '(size_t)0'),
("%.0x", "0", '(int)0'),
# Strings
("%s", "None", ' "None"'),
("%U", "None", 'unicode'),
("%A", "None", 'ctx->h_None'),
("%S", "None", 'ctx->h_None'),
("%R", "None", 'ctx->h_None'),
("%V", "None", 'unicode, "ignored"'),
("%V", "None", ' NULL, "None"'),
# Strings: width < length
("%1s", "None", ' "None"'),
("%1U", "None", 'unicode'),
("%1A", "None", 'ctx->h_None'),
("%1S", "None", 'ctx->h_None'),
("%1R", "None", 'ctx->h_None'),
("%1V", "None", 'unicode, "ignored"'),
("%1V", "None", ' NULL, "None"'),
# Strings: width > length
("%5s", " None", ' "None"'),
("%5U", " None", 'unicode'),
("%5A", " None", 'ctx->h_None'),
("%5S", " None", 'ctx->h_None'),
("%5R", " None", 'ctx->h_None'),
("%5V", " None", 'unicode, "ignored"'),
("%5V", " None", ' NULL, "None"'),
# Strings: precision < length
("%.1s", "N", ' "None"'),
("%.1U", "N", 'unicode'),
("%.1A", "N", 'ctx->h_None'),
("%.1S", "N", 'ctx->h_None'),
("%.1R", "N", 'ctx->h_None'),
("%.1V", "N", 'unicode, "ignored"'),
("%.1V", "N", ' NULL, "None"'),
# Strings: precision > length
("%.5s", "None", ' "None"'),
("%.5U", "None", 'unicode'),
("%.5A", "None", 'ctx->h_None'),
("%.5S", "None", 'ctx->h_None'),
("%.5R", "None", 'ctx->h_None'),
("%.5V", "None", 'unicode, "ignored"'),
("%.5V", "None", ' NULL, "None"'),
# Strings: precision < length, width > length
("%5.1s", " N", ' "None"'),
("%5.1U", " N", 'unicode'),
("%5.1A", " N", 'ctx->h_None'),
("%5.1S", " N", 'ctx->h_None'),
("%5.1R", " N", 'ctx->h_None'),
("%5.1V", " N", 'unicode, "ignored"'),
("%5.1V", " N", ' NULL, "None"'),
# Strings: width < length, precision > length
("%1.5s", "None", ' "None"'),
("%1.5U", "None", 'unicode'),
("%1.5A", "None", 'ctx->h_None'),
("%1.5S", "None", 'ctx->h_None'),
("%1.5R", "None", 'ctx->h_None'),
("%1.5V", "None", 'unicode, "ignored"'),
("%1.5V", "None", ' NULL, "None"'),
# Additional HPy tests:
("%c", (OverflowError, re.escape("character argument not in range(0x110000)")), "0x10ffff + 2"),
("check if %5d %s %6.3d is %5S or %6.3S",
"check if 42 == -042 is True or Fal",
'42, "==", -42, ctx->h_True, ctx->h_False')
]
cpython_incompatible_cases = [
( "%s", (SystemError, "null c string passed as value for formatting unit '%s'"), "NULL"),
( '%4p', (SystemError, "formatting unit '%p' does not support width nor precision"), "0"),
( '%04p', (SystemError, "formatting unit '%p' does not support 0-padding"), "0"),
( '%.4p', (SystemError, "formatting unit '%p' does not support width nor precision"), "0"),
( '%8.4p', (SystemError, "formatting unit '%p' does not support width nor precision"), "0"),
('%08.4p', (SystemError, "formatting unit '%p' does not support 0-padding"), "0"),
( '%4c', (SystemError, "formatting unit '%c' does not support width nor precision"), "0"),
( '%04c', (SystemError, "formatting unit '%c' does not support 0-padding"), "0"),
( '%.4c', (SystemError, "formatting unit '%c' does not support width nor precision"), "0"),
( '%8.4c', (SystemError, "formatting unit '%c' does not support width nor precision"), "0"),
('%08.4c', (SystemError, "formatting unit '%c' does not support 0-padding"), "0"),
("%U", (SystemError, ".*HPy_NULL passed.*"), "HPy_NULL"),
("%S", (SystemError, ".*HPy_NULL passed.*"), "HPy_NULL"),
("%R", (SystemError, ".*HPy_NULL passed.*"), "HPy_NULL"),
("%A", (SystemError, ".*HPy_NULL passed.*"), "HPy_NULL"),
("%0s", (SystemError, "formatting unit '%s' does not support 0-padding"), "0"),
("%0p", (SystemError, "formatting unit '%p' does not support 0-padding"), "0"),
("%0U", (SystemError, "formatting unit '%U' does not support 0-padding"), "0"),
("%0V", (SystemError, "formatting unit '%V' does not support 0-padding"), "0"),
("%0S", (SystemError, "formatting unit '%S' does not support 0-padding"), "0"),
("%0R", (SystemError, "formatting unit '%R' does not support 0-padding"), "0"),
("%0A", (SystemError, "formatting unit '%A' does not support 0-padding"), "0"),
]
cases += cpython_incompatible_cases
cpython_incompatible_cases = set(cpython_incompatible_cases)
# Generate a unique name for each test that is also valid C identifier
names = ['a' + str(i) for i in range(0, len(cases))]
cases = {name:case for (name, case) in itertools.zip_longest(names, cases)}
# ---
# Generate the test code from the cases:
def makefun(name, fmt, arg):
cpy_arg = arg.replace("ctx->h_None", "Py_None").replace("HPy_ssize_t", "Py_ssize_t")
return """
HPyDef_METH({name}, "{name}", HPyFunc_NOARGS)
static HPy {name}_impl(HPyContext *ctx, HPy self)
{{
HPy unicode = HPyUnicode_FromString(ctx, "None");
if (HPy_IsNull(unicode)) return HPy_NULL;
HPy result = HPyUnicode_FromFormat(ctx, "{fmt}", {arg});
HPy_Close(ctx, unicode);
return result;
}}
#ifdef CPM_WITH_CPYTHON
HPyDef_METH({name}_cpython, "{name}_cpython", HPyFunc_NOARGS)
static HPy {name}_cpython_impl(HPyContext *ctx, HPy self)
{{
PyObject *unicode = PyUnicode_FromString("None");
PyObject *py = PyUnicode_FromFormat("{fmt}", {cpy_arg});
HPy hpy = HPy_NULL;
if (py != NULL) {{
hpy = HPy_FromPyObject(ctx, py);
Py_DECREF(py);
}}
Py_DECREF(unicode);
return hpy;
}}
#endif
""".format(name=name, fmt=fmt, arg=arg, cpy_arg=cpy_arg)
# Change False->True to also check comparison with CPython.
# Works only for 3.12 or higher, lower versions have bugs that are
# fixed in HPy
compare_with_cpython = False and \
hpy_abi == 'cpython' and \
sys.implementation.name == 'cpython' and \
sys.implementation.version.major >= 3 and \
sys.implementation.version.minor >= 12
# Create functions for each case using the "makefun" template, export them
lines = ['#define CPM_WITH_CPYTHON'] if compare_with_cpython else []
lines += [makefun(name, fmt, arg) for (name, (fmt, _, arg)) in cases.items()]
lines += ["@EXPORT({})".format(name) for name in cases.keys()]
if compare_with_cpython:
lines += ["@EXPORT({}_cpython)".format(name) for name in cases.keys()]
lines += ["@INIT"]
mod = self.make_module("\n".join(lines))
def check_cpython_raises_any(name):
try:
getattr(mod, name + "_cpython")()
return False
except:
return True
for (name, case) in cases.items():
(_, expected, _) = case
if isinstance(expected, tuple):
(expected_type, expected_message) = expected
with pytest.raises(expected_type, match=expected_message):
getattr(mod, name)()
if compare_with_cpython and case not in cpython_incompatible_cases:
check_cpython_raises_any(name)
continue
assert getattr(mod, name)() == expected, name + ":" + repr(case)
if compare_with_cpython and case not in cpython_incompatible_cases:
assert getattr(mod, name)() == getattr(mod, name + "_cpython")(), \
"CPython check: " + name + ":" + repr(case)
def test_FromFormat_Ptr(self):
# '%p' is platform dependent to some extent, so we need to use regex
mod = self.make_module("""
HPyDef_METH(p, "p", HPyFunc_NOARGS)
static HPy p_impl(HPyContext *ctx, HPy self)
{
return HPyUnicode_FromFormat(ctx, "prefix-%p-suffix", (void*) 0xbeef);
}
@EXPORT(p)
@INIT
""")
assert re.match(r'prefix-0x[0]{,60}[bB][eE][eE][fF]-suffix', mod.p())
def test_FromFormat_PyObjs(self):
mod = self.make_module("""
HPyDef_METH(S, "S", HPyFunc_O)
static HPy S_impl(HPyContext *ctx, HPy self, HPy arg)
{
return HPyUnicode_FromFormat(ctx, "prefix-%S-suffix", arg);
}
HPyDef_METH(R, "R", HPyFunc_O)
static HPy R_impl(HPyContext *ctx, HPy self, HPy arg)
{
return HPyUnicode_FromFormat(ctx, "prefix-%R-suffix", arg);
}
HPyDef_METH(A, "A", HPyFunc_O)
static HPy A_impl(HPyContext *ctx, HPy self, HPy arg)
{
return HPyUnicode_FromFormat(ctx, "prefix-%A-suffix", arg);
}
@EXPORT(S)
@EXPORT(R)
@EXPORT(A)
@INIT
""")
class MyObj:
def __str__(self):
return "MyObj.__str__"
def __repr__(self):
return "MyObj.__repr__ü"
assert mod.S('ABC') == 'prefix-ABC-suffix'
assert mod.S(42) == 'prefix-42-suffix'
assert mod.S(MyObj()) == 'prefix-MyObj.__str__-suffix'
assert mod.R('ABC') == "prefix-'ABC'-suffix"
assert mod.R(42) == 'prefix-42-suffix'
assert mod.R(MyObj()) == 'prefix-MyObj.__repr__ü-suffix'
assert mod.A('ABC') == "prefix-'ABC'-suffix"
assert mod.A(42) == 'prefix-42-suffix'
assert mod.A(MyObj()) == 'prefix-MyObj.__repr__\\xfc-suffix'
def test_FromFormat_NoAsciiEncodedFmt(self):
mod = self.make_module("""
HPyDef_METH(no_ascii_fmt, "no_ascii_fmt", HPyFunc_O)
static HPy no_ascii_fmt_impl(HPyContext *ctx, HPy self, HPy arg)
{
HPy_ssize_t s;
const char *fmt = HPyUnicode_AsUTF8AndSize(ctx, arg, &s);
return HPyUnicode_FromFormat(ctx, fmt);
}
@EXPORT(no_ascii_fmt)
@INIT
""")
with pytest.raises(ValueError, match="expected an ASCII-encoded format string, got a non-ASCII byte: 0xc3"):
mod.no_ascii_fmt("format ü")
def test_FromFormat_Unicode(self):
mod = self.make_module("""
HPyDef_METH(f, "f", HPyFunc_O)
static HPy f_impl(HPyContext *ctx, HPy self, HPy arg)
{
return HPyUnicode_FromFormat(ctx, "%10.5S", arg);
}
@EXPORT(f)
@INIT
""")
assert mod.f("€urΘpe") == " €urΘp"
def test_FromFormat_LongFormat(self):
chunk_size = 1000
chunks_count = 5
total_c_size = (chunk_size + 1) * chunks_count + 1
args = ','.join([str(i) for i in range(1, chunks_count+1)])
mod = self.make_module("""
#include <string.h>
HPyDef_METH(f, "f", HPyFunc_NOARGS)
static HPy f_impl(HPyContext *ctx, HPy self)
{{
const size_t chunk_size = {chunk_size} + 1; // for the '%d'
const size_t total_size = {total_size};
char fmt[{total_size}];
memset(fmt, 'a', total_size);
fmt[total_size - 1] = '\\0';
for (size_t i = 0; i < {chunks_count}; i++) {{
fmt[i * chunk_size] = '%';
fmt[(i * chunk_size)+1] = 'd';
}}
return HPyUnicode_FromFormat(ctx, fmt, {args});
}}
@EXPORT(f)
@INIT
""".format(chunk_size=chunk_size, chunks_count=chunks_count, total_size=total_c_size, args=args))
assert mod.f() == ''.join([str(i) + ("a" * (chunk_size - 1)) for i in range(1,chunks_count+1)])
def test_FromFormat_Limits(self):
import sys
mod = self.make_module("""
#include <stdio.h>
HPyDef_METH(width, "width", HPyFunc_NOARGS)
static HPy width_impl(HPyContext *ctx, HPy self)
{{
char fmt[512];
sprintf(fmt, "%%%llud", ((unsigned long long) HPY_SSIZE_T_MAX) + 1ull);
return HPyUnicode_FromFormat(ctx, fmt, 42);
}}
HPyDef_METH(precision, "precision", HPyFunc_NOARGS)
static HPy precision_impl(HPyContext *ctx, HPy self)
{{
char fmt[512];
sprintf(fmt, "%%.%llud", ((unsigned long long) HPY_SSIZE_T_MAX) + 1ull);
return HPyUnicode_FromFormat(ctx, fmt, 42);
}}
HPyDef_METH(memory_err_width, "memory_err_width", HPyFunc_NOARGS)
static HPy memory_err_width_impl(HPyContext *ctx, HPy self)
{{
return HPyUnicode_FromFormat(ctx, "%{max_size}d", 42);
}}
HPyDef_METH(memory_err_precision, "memory_err_precision", HPyFunc_NOARGS)
static HPy memory_err_precision_impl(HPyContext *ctx, HPy self)
{{
return HPyUnicode_FromFormat(ctx, "%.{max_size}d", 42);
}}
@EXPORT(width)
@EXPORT(precision)
@INIT
""".format(max_size = str(sys.maxsize + 1)))
with pytest.raises(ValueError) as exc:
mod.width()
assert str(exc.value) == "width too big"
with pytest.raises(ValueError) as exc:
mod.precision()
assert str(exc.value) == "precision too big"
def test_FromEncodedObject(self):
import pytest
mod = self.make_module("""
static const char *as_string(HPyContext *ctx, HPy h)
{
const char *res = HPyUnicode_AsUTF8AndSize(ctx, h, NULL);
if (res == NULL)
HPyErr_Clear(ctx);
return res;
}
HPyDef_METH(f, "f", HPyFunc_VARARGS)
static HPy f_impl(HPyContext *ctx, HPy self, const HPy *args, size_t nargs)
{
HPy h_obj;
const char *encoding, *errors;
if (nargs != 3) {
HPyErr_SetString(ctx, ctx->h_TypeError, "expected exactly 3 arguments");
return HPy_NULL;
}
h_obj = HPy_Is(ctx, args[0], ctx->h_None) ? HPy_NULL : args[0];
encoding = as_string(ctx, args[1]);
errors = as_string(ctx, args[2]);
return HPyUnicode_FromEncodedObject(ctx, h_obj, encoding, errors);
}
@EXPORT(f)
@INIT
""")
# "hellö" as UTF-8 encoded bytes
utf8_bytes = b"hell\xc3\xb6"
# "hellö" as UTF-16 encoded bytes
utf16_bytes = b'\xff\xfeh\x00e\x00l\x00l\x00\xf6\x00'
ascii_codepoints = bytes(range(1, 128))
# note: None (if passed to arguments 'encoding' or 'errors') will be
# translated to a NULL pointer
for errors in (None, "strict", "ignore", "replace"):
assert mod.f(b"hello", "ascii", errors) == "hello"
assert mod.f(utf8_bytes, "utf8", errors) == "hellö"
assert mod.f(utf16_bytes, "utf16", errors) == "hellö"
assert len(mod.f(ascii_codepoints, "ascii", errors)) == 127
assert len(mod.f(ascii_codepoints, "utf8", errors)) == 127
# None will be translated to NULL and then defaults to UTF-8 encoding
for encoding in (None, "utf8"):
assert mod.f(utf8_bytes, encoding, None) == "hellö"
with pytest.raises(UnicodeDecodeError):
mod.f(utf16_bytes, encoding, None)
assert mod.f(utf16_bytes, encoding, "replace") == '��h\x00e\x00l\x00l\x00�\x00'
assert mod.f(utf16_bytes, encoding, "ignore") == 'h\x00e\x00l\x00l\x00\x00'
# test unknown encoding
with pytest.raises(LookupError):
mod.f(b"hello", "qwertyasdf13", None)
with pytest.raises(SystemError):
mod.f(None, None, None)
with pytest.raises(TypeError):
mod.f("hello", None, None)
with pytest.raises(TypeError):
mod.f(123, None, None)
def test_Substring(self):
import pytest
import string
mod = self.make_module("""
HPyDef_METH(f, "f", HPyFunc_VARARGS)
static HPy f_impl(HPyContext *ctx, HPy self, const HPy *args, size_t nargs)
{
HPy_ssize_t start, end;
if (nargs != 3) {
HPyErr_SetString(ctx, ctx->h_TypeError, "expected exactly 3 arguments");
return HPy_NULL;
}
start = HPyLong_AsSsize_t(ctx, args[1]);
if (start == -1 && HPyErr_Occurred(ctx))
return HPy_NULL;
end = HPyLong_AsSsize_t(ctx, args[2]);
if (end == -1 && HPyErr_Occurred(ctx))
return HPy_NULL;
return HPyUnicode_Substring(ctx, args[0], start, end);
}
@EXPORT(f)
@INIT
""")
# start == end
assert mod.f("hello", 0, 0) == ""
assert mod.f("hello", 4, 4) == ""
assert mod.f("hello", 5, 0) == ""
# start < end
assert mod.f("hello", 0, 5) == "hello"
assert mod.f("hello", 0, 100) == "hello"
assert mod.f('hello', 0, 1) == 'h'
assert mod.f("hello", 0, 2) == "he"
assert mod.f("hello", 2, 5) == "llo"
assert mod.f("hello", 2, 4) == "ll"
assert mod.f("hello", 100, 105) == ""
# start > end
assert mod.f("hello", 2000, 1000) == ""
assert mod.f("hello", 2, 1) == ""
with pytest.raises(IndexError):
mod.f("hello", -2, 5)
with pytest.raises(IndexError):
mod.f("hello", 2, -1)
# The following block is a variation of CPython's
# 'string_tests.py: test_extended_getslice'. This compares substrings
# with list slicing.
s = string.ascii_letters + string.digits
n = len(s)
indices = (0, 1, 3, 41, 1000, n-1, n-2, n-37)
for start in indices:
for stop in indices:
L = list(s)[start:stop]
assert mod.f(s, start, stop) == "".join(L)
|
b487cde8cbb1062c033c3806000deff7d03371c3
|
1095cfe2e29ddf4e4c5e12d713bd12f45c9b6f7d
|
/src/arch/arm/ArmFsWorkload.py
|
a9474fe119ae37868d7a951b37e364f7a551e05f
|
[
"BSD-3-Clause",
"LicenseRef-scancode-proprietary-license",
"LGPL-2.0-or-later",
"MIT"
] |
permissive
|
gem5/gem5
|
9ec715ae036c2e08807b5919f114e1d38d189bce
|
48a40cf2f5182a82de360b7efa497d82e06b1631
|
refs/heads/stable
| 2023-09-03T15:56:25.819189
| 2023-08-31T05:53:03
| 2023-08-31T05:53:03
| 27,425,638
| 1,185
| 1,177
|
BSD-3-Clause
| 2023-09-14T08:29:31
| 2014-12-02T09:46:00
|
C++
|
UTF-8
|
Python
| false
| false
| 4,180
|
py
|
ArmFsWorkload.py
|
# Copyright (c) 2009, 2012-2013, 2015-2020 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from m5.params import *
from m5.options import *
from m5.SimObject import *
from m5.objects.Workload import KernelWorkload
class ArmMachineType(Enum):
map = {"VExpress_EMM": 2272, "VExpress_EMM64": 2272, "DTOnly": -1}
class ArmFsWorkload(KernelWorkload):
type = "ArmFsWorkload"
cxx_header = "arch/arm/fs_workload.hh"
cxx_class = "gem5::ArmISA::FsWorkload"
boot_loader = VectorParam.String(
[],
"File that contains the boot loader code. Zero or more files may be "
"specified. The first boot loader that matches the kernel's "
"architecture will be used.",
)
dtb_filename = Param.String(
"", "File that contains the Device Tree Blob. Don't use DTB if empty."
)
dtb_addr = Param.Addr(0, "DTB or ATAGS address")
initrd_filename = Param.String(
"",
"File that contains the initial ramdisk. Don't use initrd if empty.",
)
initrd_addr = Param.Addr(0, "initrd/initramfs address")
cpu_release_addr = Param.Addr(0, "cpu-release-addr property")
machine_type = Param.ArmMachineType(
"DTOnly",
"Machine id from http://www.arm.linux.org.uk/developer/machines/",
)
early_kernel_symbols = Param.Bool(
False, "enable early kernel symbol tables before MMU"
)
enable_context_switch_stats_dump = Param.Bool(
False, "enable stats/task info dumping at context switch boundaries"
)
panic_on_panic = Param.Bool(
False, "Trigger a gem5 panic if the guest kernel panics"
)
panic_on_oops = Param.Bool(
False, "Trigger a gem5 panic if the guest kernel oopses"
)
class ArmFsLinux(ArmFsWorkload):
type = "ArmFsLinux"
cxx_header = "arch/arm/linux/fs_workload.hh"
cxx_class = "gem5::ArmISA::FsLinux"
load_addr_mask = 0
@cxxMethod
def dumpDmesg(self):
"""Dump dmesg from the simulated kernel to standard out"""
pass
class ArmFsFreebsd(ArmFsWorkload):
type = "ArmFsFreebsd"
cxx_header = "arch/arm/freebsd/fs_workload.hh"
cxx_class = "gem5::ArmISA::FsFreebsd"
|
bce696fde65119ccba5467402171ff33a2560687
|
ac2f43c8e0d9649a7f063c59b3dffdfed9fd7ed7
|
/tests2/tests/cloudripper/test_fscd.py
|
3572d1d4ccde218a724565bb1de31a7ecabe8303
|
[] |
no_license
|
facebook/openbmc
|
bef10604ced226288600f55248b7f1be9945aea4
|
32777c66a8410d767eae15baabf71c61a0bef13c
|
refs/heads/helium
| 2023-08-17T03:13:54.729494
| 2023-08-16T23:24:18
| 2023-08-16T23:24:18
| 31,917,712
| 684
| 331
| null | 2023-07-25T21:19:08
| 2015-03-09T19:18:35
|
C
|
UTF-8
|
Python
| false
| false
| 7,873
|
py
|
test_fscd.py
|
#!/usr/bin/env python3
#
# Copyright 2021-present Facebook. All Rights Reserved.
#
# This program file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program in a file named COPYING; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301 USA
#
import time
import unittest
from common.base_fscd_test import BaseFscdTest
from utils.cit_logger import Logger
from utils.shell_util import run_shell_cmd
from utils.test_utils import qemu_check, tests_dir
@unittest.skipIf(qemu_check(), "test env is QEMU, skipped")
class FscdTest(BaseFscdTest, unittest.TestCase):
TEST_DATA_PATH = None
DEFAULT_TEMP = 28000
def setUp(self, config=None, test_data_path=None):
self.TEST_DATA_PATH = "{}/cloudripper/test_data/fscd".format(
tests_dir()
)
super().setUp(config, test_data_path)
def power_host_on(self):
retry = 5
for num_retry in range(retry):
# If host was already on, script takes care of just returning on
cmd = "/usr/local/bin/wedge_power.sh on"
data = run_shell_cmd(cmd)
Logger.info(
"[FSCD Testing] Try {} Executing cmd= [{}]".format(num_retry, cmd)
)
Logger.info("[FSCD Testing] Received data= [{}]".format(data))
time.sleep(5)
if self.is_host_on():
return
self.assertTrue(
self.is_host_on(),
"[FSCD Testing] Retry for {} times"
" and host failed to power on".format(retry),
)
def is_host_on(self):
"""
Method to test if host power is on
"""
status = False
cmd = "/usr/local/bin/wedge_power.sh status"
data = run_shell_cmd(cmd)
Logger.info("[FSCD Testing] Executing cmd= [{}]".format(cmd))
Logger.info("[FSCD Testing] Received data= [{}]".format(data))
if "on" in data:
status = True
Logger.info("[FSCD Testing] userver power status {}".format(status))
return status
def get_fan_pwm(self, pwm_val=None):
"""
In Cloudripper there is only 1 zone and all fans belong to it.
PWM effect applies to all fans. Test if all fans report
the expected PWM
"""
self.assertNotEqual(pwm_val, None, "Expected PWM value needs to be set")
data = run_shell_cmd("/usr/local/bin/get_fan_speed.sh")
data = data.split("\n")
for line in data:
if len(line) == 0:
continue
line = line.split("(")
line = line[1].split("%")
if abs(int(line[0]) - int(pwm_val)) < 2:
continue
else:
return [False, data]
return [True, None]
def run_pwm_test(
self,
userver_temp=-DEFAULT_TEMP,
switch_temp=DEFAULT_TEMP,
inlet_temp=DEFAULT_TEMP,
json_max_temp=DEFAULT_TEMP,
expected_pwm=30,
):
PWM_VAL = expected_pwm
Logger.info(
"[FSCD Testing] Setting (userver={}C, switch={}C ,"
"inlet={}C,max_json={}C,expected pwm={})".format(
int(userver_temp) / 1000,
int(switch_temp) / 1000,
int(inlet_temp) / 1000,
int(json_max_temp) / 1000,
int(expected_pwm),
)
)
# Initialize PWM 30 for testing normal up curve in the next following steps
run_shell_cmd(
"echo {} > {}/inlet/temp1_input".format(20000, self.TEST_DATA_PATH)
)
# Wait for fans to change PWM
time.sleep(20)
run_shell_cmd(
"echo {} > {}/userver/temp1_input".format(userver_temp, self.TEST_DATA_PATH)
)
run_shell_cmd(
"echo {} > {}/switch/temp1_input".format(switch_temp, self.TEST_DATA_PATH)
)
run_shell_cmd(
"echo {} > {}/json-max/temp1_input".format(
json_max_temp, self.TEST_DATA_PATH
)
)
run_shell_cmd(
"echo {} > {}/inlet/temp1_input".format(inlet_temp, self.TEST_DATA_PATH)
)
# Wait for fans to change PWM
time.sleep(60)
return self.get_fan_pwm(pwm_val=PWM_VAL)
class FscdTestPwmCloudripper(FscdTest):
TEST_CONFIG_PATH = "{}/cloudripper/test_data/fscd".format(tests_dir())
def setUp(self):
config_file = "FSC-Cloudripper-config.json"
# Backup original config
run_shell_cmd("cp /etc/fsc/zone.fsc /etc/fsc/zone.fsc.orig")
# Overwrite fscd config
run_shell_cmd("cp {}/zone.fsc /etc/fsc/zone.fsc".format(self.TEST_CONFIG_PATH))
super().setUp(config=config_file, test_data_path=self.TEST_CONFIG_PATH)
def tearDown(self):
# Recover original config
run_shell_cmd("mv /etc/fsc/zone.fsc.orig /etc/fsc/zone.fsc")
super().tearDown()
def test_fscd_inlet_27dot99_duty_cycle_40(self):
# sub-test1: pwm when all temp<28C => duty_cycle=40
PWM_VAL = 40
status, pwm_output = self.run_pwm_test(
userver_temp=-68000,
switch_temp=28000,
inlet_temp=27990,
json_max_temp=28000,
expected_pwm=PWM_VAL,
)
self.assertTrue(
status,
"Expected {} for all fans but " "received {}".format(PWM_VAL, pwm_output),
)
def test_fscd_inlet_28_duty_cycle_45(self):
# sub-test2: pwm when all temp=28C => duty_cycle=45
PWM_VAL = 45
status, pwm_output = self.run_pwm_test(
userver_temp=-68000,
switch_temp=28000,
inlet_temp=28000,
json_max_temp=28000,
expected_pwm=PWM_VAL,
)
self.assertTrue(
status,
"Expected {} for all fans but " "received {}".format(PWM_VAL, pwm_output),
)
def test_fscd_inlet_30_duty_cycle_50(self):
# sub-test3: pwm when all temp=30C => duty_cycle=50
PWM_VAL = 50
status, pwm_output = self.run_pwm_test(
userver_temp=-68000,
switch_temp=28000,
inlet_temp=30000,
json_max_temp=28000,
expected_pwm=PWM_VAL,
)
self.assertTrue(
status,
"Expected {} for all fans but " "received {}".format(PWM_VAL, pwm_output),
)
def test_fscd_inlet_33_duty_cycle_55(self):
# sub-test3: pwm when all temp=33=> duty_cycle=55
PWM_VAL = 55
status, pwm_output = self.run_pwm_test(
userver_temp=-68000,
switch_temp=28000,
inlet_temp=33000,
json_max_temp=28000,
expected_pwm=PWM_VAL,
)
self.assertTrue(
status,
"Expected {} for all fans but " "received {}".format(PWM_VAL, pwm_output),
)
def test_fscd_inlet_35_duty_cycle_60(self):
# sub-test3: pwm when all temp=35C => duty_cycle=60
PWM_VAL = 60
status, pwm_output = self.run_pwm_test(
userver_temp=-68000,
switch_temp=28000,
inlet_temp=35000,
json_max_temp=28000,
expected_pwm=PWM_VAL,
)
self.assertTrue(
status,
"Expected {} for all fans but " "received {}".format(PWM_VAL, pwm_output),
)
|
62fa59116e00d489c0ed80b316f872afbed0ce30
|
89021435261d62f34c4d4d619e03409df72f156b
|
/sample/u2_demo.py
|
eb9bca27868de448d164ee8f0db5c86184fcbe79
|
[
"Apache-2.0"
] |
permissive
|
SeldomQA/poium
|
7018d080a55be112cc0e56a3d33476f8c9a485a5
|
fedcfa4a046a21d159ca5cffa6fa865569757e6a
|
refs/heads/master
| 2023-09-01T15:08:41.802432
| 2023-08-23T15:20:27
| 2023-08-23T15:20:27
| 154,795,650
| 251
| 96
|
Apache-2.0
| 2023-03-11T17:24:16
| 2018-10-26T07:35:21
|
Python
|
UTF-8
|
Python
| false
| false
| 768
|
py
|
u2_demo.py
|
"""
uiautomator2 Library test demo
https://github.com/openatx/uiautomator2
"""
import uiautomator2 as u2
from poium.u2 import Page, Element, Setting
Setting.click_screenshots = False
class BBSPage(Page):
search_input = Element(resourceId="com.meizu.flyme.flymebbs:id/ko", describe="搜索输入框")
search_button = Element(resourceId="com.meizu.flyme.flymebbs:id/o1", describe="搜索按钮")
search_result = Element(resourceId="com.meizu.flyme.flymebbs:id/a29", describe="搜索结果")
d = u2.connect()
d.app_start("com.meizu.flyme.flymebbs")
page = BBSPage(d)
page.search_input.click()
page.search_input.send_keys("flyme")
page.search_button.click()
result = page.search_result.get_text()
print(result)
d.app_stop("com.meizu.flyme.flymebbs")
|
7977b9644a3d597f7c71e219219cb8c1268cec58
|
cfedc55053631e336cd3639f5a8fff25bff7d373
|
/chapter2/solutions/expenditures_barchart.py
|
54a88177e7b2857508782bb3eead3f79a6066166
|
[
"MIT"
] |
permissive
|
doingmathwithpython/code
|
53db61c74eeb421fe8ba50e8f1aba3f3013e5aea
|
03ed863cd94e1a64912c0c273e3972e2019f9e60
|
refs/heads/master
| 2022-05-22T00:26:13.151556
| 2022-05-09T01:47:10
| 2022-05-09T01:47:10
| 41,512,525
| 118
| 84
|
MIT
| 2021-05-01T07:25:49
| 2015-08-27T21:34:19
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 997
|
py
|
expenditures_barchart.py
|
'''
expenditures_barchart.py
Visualizing the weekly expenditure using a bar chart
'''
import matplotlib.pyplot as plt
def create_bar_chart(data, labels):
# number of bars
num_bars = len(data)
# this list is the point on the y-axis where each
# bar is centered. Here it will be [1, 2, 3..]
positions = range(1, num_bars+1)
plt.barh(positions, data, align='center')
# set the label of each bar
plt.yticks(positions, labels)
plt.xlabel('Amount')
plt.ylabel('Categories')
plt.title('Weekly expenditures')
# Turns on the grid which may assist in visual estimation
plt.grid()
plt.show()
if __name__ == '__main__':
n = int(input('Enter the number of categories: '))
labels = []
expenditures = []
for i in range(n):
category = input('Enter category: ')
expenditure = float(input('Expenditure: '))
labels.append(category)
expenditures.append(expenditure)
create_bar_chart(expenditures, labels)
|
a5f9a062515879efece9a76eff327551025ccd74
|
010279e2ba272d09e9d2c4e903722e5faba2cf7a
|
/contrib/python/scipy/py3/scipy/stats/_hypotests.py
|
a5dd37b08f774d025719b6b2eb94c9a8d9febcd6
|
[
"Python-2.0",
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"Qhull",
"BSD-3-Clause",
"BSL-1.0",
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
catboost/catboost
|
854c1a1f439a96f1ae6b48e16644be20aa04dba2
|
f5042e35b945aded77b23470ead62d7eacefde92
|
refs/heads/master
| 2023-09-01T12:14:14.174108
| 2023-09-01T10:01:01
| 2023-09-01T10:22:12
| 97,556,265
| 8,012
| 1,425
|
Apache-2.0
| 2023-09-11T03:32:32
| 2017-07-18T05:29:04
|
Python
|
UTF-8
|
Python
| false
| false
| 53,545
|
py
|
_hypotests.py
|
from collections import namedtuple
from dataclasses import make_dataclass
import numpy as np
import warnings
from itertools import combinations
import scipy.stats
from scipy.optimize import shgo
from . import distributions
from ._continuous_distns import chi2, norm
from scipy.special import gamma, kv, gammaln
from . import _wilcoxon_data
__all__ = ['epps_singleton_2samp', 'cramervonmises', 'somersd',
'barnard_exact', 'boschloo_exact', 'cramervonmises_2samp']
Epps_Singleton_2sampResult = namedtuple('Epps_Singleton_2sampResult',
('statistic', 'pvalue'))
def epps_singleton_2samp(x, y, t=(0.4, 0.8)):
"""Compute the Epps-Singleton (ES) test statistic.
Test the null hypothesis that two samples have the same underlying
probability distribution.
Parameters
----------
x, y : array-like
The two samples of observations to be tested. Input must not have more
than one dimension. Samples can have different lengths.
t : array-like, optional
The points (t1, ..., tn) where the empirical characteristic function is
to be evaluated. It should be positive distinct numbers. The default
value (0.4, 0.8) is proposed in [1]_. Input must not have more than
one dimension.
Returns
-------
statistic : float
The test statistic.
pvalue : float
The associated p-value based on the asymptotic chi2-distribution.
See Also
--------
ks_2samp, anderson_ksamp
Notes
-----
Testing whether two samples are generated by the same underlying
distribution is a classical question in statistics. A widely used test is
the Kolmogorov-Smirnov (KS) test which relies on the empirical
distribution function. Epps and Singleton introduce a test based on the
empirical characteristic function in [1]_.
One advantage of the ES test compared to the KS test is that is does
not assume a continuous distribution. In [1]_, the authors conclude
that the test also has a higher power than the KS test in many
examples. They recommend the use of the ES test for discrete samples as
well as continuous samples with at least 25 observations each, whereas
`anderson_ksamp` is recommended for smaller sample sizes in the
continuous case.
The p-value is computed from the asymptotic distribution of the test
statistic which follows a `chi2` distribution. If the sample size of both
`x` and `y` is below 25, the small sample correction proposed in [1]_ is
applied to the test statistic.
The default values of `t` are determined in [1]_ by considering
various distributions and finding good values that lead to a high power
of the test in general. Table III in [1]_ gives the optimal values for
the distributions tested in that study. The values of `t` are scaled by
the semi-interquartile range in the implementation, see [1]_.
References
----------
.. [1] T. W. Epps and K. J. Singleton, "An omnibus test for the two-sample
problem using the empirical characteristic function", Journal of
Statistical Computation and Simulation 26, p. 177--203, 1986.
.. [2] S. J. Goerg and J. Kaiser, "Nonparametric testing of distributions
- the Epps-Singleton two-sample test using the empirical characteristic
function", The Stata Journal 9(3), p. 454--465, 2009.
"""
x, y, t = np.asarray(x), np.asarray(y), np.asarray(t)
# check if x and y are valid inputs
if x.ndim > 1:
raise ValueError('x must be 1d, but x.ndim equals {}.'.format(x.ndim))
if y.ndim > 1:
raise ValueError('y must be 1d, but y.ndim equals {}.'.format(y.ndim))
nx, ny = len(x), len(y)
if (nx < 5) or (ny < 5):
raise ValueError('x and y should have at least 5 elements, but len(x) '
'= {} and len(y) = {}.'.format(nx, ny))
if not np.isfinite(x).all():
raise ValueError('x must not contain nonfinite values.')
if not np.isfinite(y).all():
raise ValueError('y must not contain nonfinite values.')
n = nx + ny
# check if t is valid
if t.ndim > 1:
raise ValueError('t must be 1d, but t.ndim equals {}.'.format(t.ndim))
if np.less_equal(t, 0).any():
raise ValueError('t must contain positive elements only.')
# rescale t with semi-iqr as proposed in [1]; import iqr here to avoid
# circular import
from scipy.stats import iqr
sigma = iqr(np.hstack((x, y))) / 2
ts = np.reshape(t, (-1, 1)) / sigma
# covariance estimation of ES test
gx = np.vstack((np.cos(ts*x), np.sin(ts*x))).T # shape = (nx, 2*len(t))
gy = np.vstack((np.cos(ts*y), np.sin(ts*y))).T
cov_x = np.cov(gx.T, bias=True) # the test uses biased cov-estimate
cov_y = np.cov(gy.T, bias=True)
est_cov = (n/nx)*cov_x + (n/ny)*cov_y
est_cov_inv = np.linalg.pinv(est_cov)
r = np.linalg.matrix_rank(est_cov_inv)
if r < 2*len(t):
warnings.warn('Estimated covariance matrix does not have full rank. '
'This indicates a bad choice of the input t and the '
'test might not be consistent.') # see p. 183 in [1]_
# compute test statistic w distributed asympt. as chisquare with df=r
g_diff = np.mean(gx, axis=0) - np.mean(gy, axis=0)
w = n*np.dot(g_diff.T, np.dot(est_cov_inv, g_diff))
# apply small-sample correction
if (max(nx, ny) < 25):
corr = 1.0/(1.0 + n**(-0.45) + 10.1*(nx**(-1.7) + ny**(-1.7)))
w = corr * w
p = chi2.sf(w, r)
return Epps_Singleton_2sampResult(w, p)
class CramerVonMisesResult:
def __init__(self, statistic, pvalue):
self.statistic = statistic
self.pvalue = pvalue
def __repr__(self):
return (f"{self.__class__.__name__}(statistic={self.statistic}, "
f"pvalue={self.pvalue})")
def _psi1_mod(x):
"""
psi1 is defined in equation 1.10 in Csorgo, S. and Faraway, J. (1996).
This implements a modified version by excluding the term V(x) / 12
(here: _cdf_cvm_inf(x) / 12) to avoid evaluating _cdf_cvm_inf(x)
twice in _cdf_cvm.
Implementation based on MAPLE code of Julian Faraway and R code of the
function pCvM in the package goftest (v1.1.1), permission granted
by Adrian Baddeley. Main difference in the implementation: the code
here keeps adding terms of the series until the terms are small enough.
"""
def _ed2(y):
z = y**2 / 4
b = kv(1/4, z) + kv(3/4, z)
return np.exp(-z) * (y/2)**(3/2) * b / np.sqrt(np.pi)
def _ed3(y):
z = y**2 / 4
c = np.exp(-z) / np.sqrt(np.pi)
return c * (y/2)**(5/2) * (2*kv(1/4, z) + 3*kv(3/4, z) - kv(5/4, z))
def _Ak(k, x):
m = 2*k + 1
sx = 2 * np.sqrt(x)
y1 = x**(3/4)
y2 = x**(5/4)
e1 = m * gamma(k + 1/2) * _ed2((4 * k + 3)/sx) / (9 * y1)
e2 = gamma(k + 1/2) * _ed3((4 * k + 1) / sx) / (72 * y2)
e3 = 2 * (m + 2) * gamma(k + 3/2) * _ed3((4 * k + 5) / sx) / (12 * y2)
e4 = 7 * m * gamma(k + 1/2) * _ed2((4 * k + 1) / sx) / (144 * y1)
e5 = 7 * m * gamma(k + 1/2) * _ed2((4 * k + 5) / sx) / (144 * y1)
return e1 + e2 + e3 + e4 + e5
x = np.asarray(x)
tot = np.zeros_like(x, dtype='float')
cond = np.ones_like(x, dtype='bool')
k = 0
while np.any(cond):
z = -_Ak(k, x[cond]) / (np.pi * gamma(k + 1))
tot[cond] = tot[cond] + z
cond[cond] = np.abs(z) >= 1e-7
k += 1
return tot
def _cdf_cvm_inf(x):
"""
Calculate the cdf of the Cramér-von Mises statistic (infinite sample size).
See equation 1.2 in Csorgo, S. and Faraway, J. (1996).
Implementation based on MAPLE code of Julian Faraway and R code of the
function pCvM in the package goftest (v1.1.1), permission granted
by Adrian Baddeley. Main difference in the implementation: the code
here keeps adding terms of the series until the terms are small enough.
The function is not expected to be accurate for large values of x, say
x > 4, when the cdf is very close to 1.
"""
x = np.asarray(x)
def term(x, k):
# this expression can be found in [2], second line of (1.3)
u = np.exp(gammaln(k + 0.5) - gammaln(k+1)) / (np.pi**1.5 * np.sqrt(x))
y = 4*k + 1
q = y**2 / (16*x)
b = kv(0.25, q)
return u * np.sqrt(y) * np.exp(-q) * b
tot = np.zeros_like(x, dtype='float')
cond = np.ones_like(x, dtype='bool')
k = 0
while np.any(cond):
z = term(x[cond], k)
tot[cond] = tot[cond] + z
cond[cond] = np.abs(z) >= 1e-7
k += 1
return tot
def _cdf_cvm(x, n=None):
"""
Calculate the cdf of the Cramér-von Mises statistic for a finite sample
size n. If N is None, use the asymptotic cdf (n=inf).
See equation 1.8 in Csorgo, S. and Faraway, J. (1996) for finite samples,
1.2 for the asymptotic cdf.
The function is not expected to be accurate for large values of x, say
x > 2, when the cdf is very close to 1 and it might return values > 1
in that case, e.g. _cdf_cvm(2.0, 12) = 1.0000027556716846.
"""
x = np.asarray(x)
if n is None:
y = _cdf_cvm_inf(x)
else:
# support of the test statistic is [12/n, n/3], see 1.1 in [2]
y = np.zeros_like(x, dtype='float')
sup = (1./(12*n) < x) & (x < n/3.)
# note: _psi1_mod does not include the term _cdf_cvm_inf(x) / 12
# therefore, we need to add it here
y[sup] = _cdf_cvm_inf(x[sup]) * (1 + 1./(12*n)) + _psi1_mod(x[sup]) / n
y[x >= n/3] = 1
if y.ndim == 0:
return y[()]
return y
def cramervonmises(rvs, cdf, args=()):
"""Perform the one-sample Cramér-von Mises test for goodness of fit.
This performs a test of the goodness of fit of a cumulative distribution
function (cdf) :math:`F` compared to the empirical distribution function
:math:`F_n` of observed random variates :math:`X_1, ..., X_n` that are
assumed to be independent and identically distributed ([1]_).
The null hypothesis is that the :math:`X_i` have cumulative distribution
:math:`F`.
Parameters
----------
rvs : array_like
A 1-D array of observed values of the random variables :math:`X_i`.
cdf : str or callable
The cumulative distribution function :math:`F` to test the
observations against. If a string, it should be the name of a
distribution in `scipy.stats`. If a callable, that callable is used
to calculate the cdf: ``cdf(x, *args) -> float``.
args : tuple, optional
Distribution parameters. These are assumed to be known; see Notes.
Returns
-------
res : object with attributes
statistic : float
Cramér-von Mises statistic.
pvalue : float
The p-value.
See Also
--------
kstest, cramervonmises_2samp
Notes
-----
.. versionadded:: 1.6.0
The p-value relies on the approximation given by equation 1.8 in [2]_.
It is important to keep in mind that the p-value is only accurate if
one tests a simple hypothesis, i.e. the parameters of the reference
distribution are known. If the parameters are estimated from the data
(composite hypothesis), the computed p-value is not reliable.
References
----------
.. [1] Cramér-von Mises criterion, Wikipedia,
https://en.wikipedia.org/wiki/Cram%C3%A9r%E2%80%93von_Mises_criterion
.. [2] Csorgo, S. and Faraway, J. (1996). The Exact and Asymptotic
Distribution of Cramér-von Mises Statistics. Journal of the
Royal Statistical Society, pp. 221-234.
Examples
--------
Suppose we wish to test whether data generated by ``scipy.stats.norm.rvs``
were, in fact, drawn from the standard normal distribution. We choose a
significance level of alpha=0.05.
>>> from scipy import stats
>>> rng = np.random.default_rng()
>>> x = stats.norm.rvs(size=500, random_state=rng)
>>> res = stats.cramervonmises(x, 'norm')
>>> res.statistic, res.pvalue
(0.49121480855028343, 0.04189256516661377)
The p-value 0.79 exceeds our chosen significance level, so we do not
reject the null hypothesis that the observed sample is drawn from the
standard normal distribution.
Now suppose we wish to check whether the same samples shifted by 2.1 is
consistent with being drawn from a normal distribution with a mean of 2.
>>> y = x + 2.1
>>> res = stats.cramervonmises(y, 'norm', args=(2,))
>>> res.statistic, res.pvalue
(0.07400330012187435, 0.7274595666160468)
Here we have used the `args` keyword to specify the mean (``loc``)
of the normal distribution to test the data against. This is equivalent
to the following, in which we create a frozen normal distribution with
mean 2.1, then pass its ``cdf`` method as an argument.
>>> frozen_dist = stats.norm(loc=2)
>>> res = stats.cramervonmises(y, frozen_dist.cdf)
>>> res.statistic, res.pvalue
(0.07400330012187435, 0.7274595666160468)
In either case, we would reject the null hypothesis that the observed
sample is drawn from a normal distribution with a mean of 2 (and default
variance of 1) because the p-value 0.04 is less than our chosen
significance level.
"""
if isinstance(cdf, str):
cdf = getattr(distributions, cdf).cdf
vals = np.sort(np.asarray(rvs))
if vals.size <= 1:
raise ValueError('The sample must contain at least two observations.')
if vals.ndim > 1:
raise ValueError('The sample must be one-dimensional.')
n = len(vals)
cdfvals = cdf(vals, *args)
u = (2*np.arange(1, n+1) - 1)/(2*n)
w = 1/(12*n) + np.sum((u - cdfvals)**2)
# avoid small negative values that can occur due to the approximation
p = max(0, 1. - _cdf_cvm(w, n))
return CramerVonMisesResult(statistic=w, pvalue=p)
def _get_wilcoxon_distr(n):
"""
Distribution of counts of the Wilcoxon ranksum statistic r_plus (sum of
ranks of positive differences).
Returns an array with the counts/frequencies of all the possible ranks
r = 0, ..., n*(n+1)/2
"""
cnt = _wilcoxon_data.COUNTS.get(n)
if cnt is None:
raise ValueError("The exact distribution of the Wilcoxon test "
"statistic is not implemented for n={}".format(n))
return np.array(cnt, dtype=int)
def _Aij(A, i, j):
"""Sum of upper-left and lower right blocks of contingency table."""
# See [2] bottom of page 309
return A[:i, :j].sum() + A[i+1:, j+1:].sum()
def _Dij(A, i, j):
"""Sum of lower-left and upper-right blocks of contingency table."""
# See [2] bottom of page 309
return A[i+1:, :j].sum() + A[:i, j+1:].sum()
def _P(A):
"""Twice the number of concordant pairs, excluding ties."""
# See [2] bottom of page 309
m, n = A.shape
count = 0
for i in range(m):
for j in range(n):
count += A[i, j]*_Aij(A, i, j)
return count
def _Q(A):
"""Twice the number of discordant pairs, excluding ties."""
# See [2] bottom of page 309
m, n = A.shape
count = 0
for i in range(m):
for j in range(n):
count += A[i, j]*_Dij(A, i, j)
return count
def _a_ij_Aij_Dij2(A):
"""A term that appears in the ASE of Kendall's tau and Somers' D."""
# See [2] section 4: Modified ASEs to test the null hypothesis...
m, n = A.shape
count = 0
for i in range(m):
for j in range(n):
count += A[i, j]*(_Aij(A, i, j) - _Dij(A, i, j))**2
return count
def _tau_b(A):
"""Calculate Kendall's tau-b and p-value from contingency table."""
# See [2] 2.2 and 4.2
# contingency table must be truly 2D
if A.shape[0] == 1 or A.shape[1] == 1:
return np.nan, np.nan
NA = A.sum()
PA = _P(A)
QA = _Q(A)
Sri2 = (A.sum(axis=1)**2).sum()
Scj2 = (A.sum(axis=0)**2).sum()
denominator = (NA**2 - Sri2)*(NA**2 - Scj2)
tau = (PA-QA)/(denominator)**0.5
numerator = 4*(_a_ij_Aij_Dij2(A) - (PA - QA)**2 / NA)
s02_tau_b = numerator/denominator
if s02_tau_b == 0: # Avoid divide by zero
return tau, 0
Z = tau/s02_tau_b**0.5
p = 2*norm.sf(abs(Z)) # 2-sided p-value
return tau, p
def _somers_d(A):
"""Calculate Somers' D and p-value from contingency table."""
# See [3] page 1740
# contingency table must be truly 2D
if A.shape[0] <= 1 or A.shape[1] <= 1:
return np.nan, np.nan
NA = A.sum()
NA2 = NA**2
PA = _P(A)
QA = _Q(A)
Sri2 = (A.sum(axis=1)**2).sum()
d = (PA - QA)/(NA2 - Sri2)
S = _a_ij_Aij_Dij2(A) - (PA-QA)**2/NA
if S == 0: # Avoid divide by zero
return d, 0
Z = (PA - QA)/(4*(S))**0.5
p = 2*norm.sf(abs(Z)) # 2-sided p-value
return d, p
SomersDResult = make_dataclass("SomersDResult",
("statistic", "pvalue", "table"))
def somersd(x, y=None):
r"""Calculates Somers' D, an asymmetric measure of ordinal association.
Like Kendall's :math:`\tau`, Somers' :math:`D` is a measure of the
correspondence between two rankings. Both statistics consider the
difference between the number of concordant and discordant pairs in two
rankings :math:`X` and :math:`Y`, and both are normalized such that values
close to 1 indicate strong agreement and values close to -1 indicate
strong disagreement. They differ in how they are normalized. To show the
relationship, Somers' :math:`D` can be defined in terms of Kendall's
:math:`\tau_a`:
.. math::
D(Y|X) = \frac{\tau_a(X, Y)}{\tau_a(X, X)}
Suppose the first ranking :math:`X` has :math:`r` distinct ranks and the
second ranking :math:`Y` has :math:`s` distinct ranks. These two lists of
:math:`n` rankings can also be viewed as an :math:`r \times s` contingency
table in which element :math:`i, j` is the number of rank pairs with rank
:math:`i` in ranking :math:`X` and rank :math:`j` in ranking :math:`Y`.
Accordingly, `somersd` also allows the input data to be supplied as a
single, 2D contingency table instead of as two separate, 1D rankings.
Note that the definition of Somers' :math:`D` is asymmetric: in general,
:math:`D(Y|X) \neq D(X|Y)`. ``somersd(x, y)`` calculates Somers'
:math:`D(Y|X)`: the "row" variable :math:`X` is treated as an independent
variable, and the "column" variable :math:`Y` is dependent. For Somers'
:math:`D(X|Y)`, swap the input lists or transpose the input table.
Parameters
----------
x: array_like
1D array of rankings, treated as the (row) independent variable.
Alternatively, a 2D contingency table.
y: array_like
If `x` is a 1D array of rankings, `y` is a 1D array of rankings of the
same length, treated as the (column) dependent variable.
If `x` is 2D, `y` is ignored.
Returns
-------
res : SomersDResult
A `SomersDResult` object with the following fields:
correlation : float
The Somers' :math:`D` statistic.
pvalue : float
The two-sided p-value for a hypothesis test whose null
hypothesis is an absence of association, :math:`D=0`.
See notes for more information.
table : 2D array
The contingency table formed from rankings `x` and `y` (or the
provided contingency table, if `x` is a 2D array)
See Also
--------
kendalltau : Calculates Kendall's tau, another correlation measure.
weightedtau : Computes a weighted version of Kendall's tau.
spearmanr : Calculates a Spearman rank-order correlation coefficient.
pearsonr : Calculates a Pearson correlation coefficient.
Notes
-----
This function follows the contingency table approach of [2]_ and
[3]_. *p*-values are computed based on an asymptotic approximation of
the test statistic distribution under the null hypothesis :math:`D=0`.
Theoretically, hypothesis tests based on Kendall's :math:`tau` and Somers'
:math:`D` should be identical.
However, the *p*-values returned by `kendalltau` are based
on the null hypothesis of *independence* between :math:`X` and :math:`Y`
(i.e. the population from which pairs in :math:`X` and :math:`Y` are
sampled contains equal numbers of all possible pairs), which is more
specific than the null hypothesis :math:`D=0` used here. If the null
hypothesis of independence is desired, it is acceptable to use the
*p*-value returned by `kendalltau` with the statistic returned by
`somersd` and vice versa. For more information, see [2]_.
Contingency tables are formatted according to the convention used by
SAS and R: the first ranking supplied (``x``) is the "row" variable, and
the second ranking supplied (``y``) is the "column" variable. This is
opposite the convention of Somers' original paper [1]_.
References
----------
.. [1] Robert H. Somers, "A New Asymmetric Measure of Association for
Ordinal Variables", *American Sociological Review*, Vol. 27, No. 6,
pp. 799--811, 1962.
.. [2] Morton B. Brown and Jacqueline K. Benedetti, "Sampling Behavior of
Tests for Correlation in Two-Way Contingency Tables", *Journal of
the American Statistical Association* Vol. 72, No. 358, pp.
309--315, 1977.
.. [3] SAS Institute, Inc., "The FREQ Procedure (Book Excerpt)",
*SAS/STAT 9.2 User's Guide, Second Edition*, SAS Publishing, 2009.
.. [4] Laerd Statistics, "Somers' d using SPSS Statistics", *SPSS
Statistics Tutorials and Statistical Guides*,
https://statistics.laerd.com/spss-tutorials/somers-d-using-spss-statistics.php,
Accessed July 31, 2020.
Examples
--------
We calculate Somers' D for the example given in [4]_, in which a hotel
chain owner seeks to determine the association between hotel room
cleanliness and customer satisfaction. The independent variable, hotel
room cleanliness, is ranked on an ordinal scale: "below average (1)",
"average (2)", or "above average (3)". The dependent variable, customer
satisfaction, is ranked on a second scale: "very dissatisfied (1)",
"moderately dissatisfied (2)", "neither dissatisfied nor satisfied (3)",
"moderately satisfied (4)", or "very satisfied (5)". 189 customers
respond to the survey, and the results are cast into a contingency table
with the hotel room cleanliness as the "row" variable and customer
satisfaction as the "column" variable.
+-----+-----+-----+-----+-----+-----+
| | (1) | (2) | (3) | (4) | (5) |
+=====+=====+=====+=====+=====+=====+
| (1) | 27 | 25 | 14 | 7 | 0 |
+-----+-----+-----+-----+-----+-----+
| (2) | 7 | 14 | 18 | 35 | 12 |
+-----+-----+-----+-----+-----+-----+
| (3) | 1 | 3 | 2 | 7 | 17 |
+-----+-----+-----+-----+-----+-----+
For example, 27 customers assigned their room a cleanliness ranking of
"below average (1)" and a corresponding satisfaction of "very
dissatisfied (1)". We perform the analysis as follows.
>>> from scipy.stats import somersd
>>> table = [[27, 25, 14, 7, 0], [7, 14, 18, 35, 12], [1, 3, 2, 7, 17]]
>>> res = somersd(table)
>>> res.statistic
0.6032766111513396
>>> res.pvalue
1.0007091191074533e-27
The value of the Somers' D statistic is approximately 0.6, indicating
a positive correlation between room cleanliness and customer satisfaction
in the sample.
The *p*-value is very small, indicating a very small probability of
observing such an extreme value of the statistic under the null
hypothesis that the statistic of the entire population (from which
our sample of 189 customers is drawn) is zero. This supports the
alternative hypothesis that the true value of Somers' D for the population
is nonzero.
"""
x, y = np.array(x), np.array(y)
if x.ndim == 1:
if x.size != y.size:
raise ValueError("Rankings must be of equal length.")
table = scipy.stats.contingency.crosstab(x, y)[1]
elif x.ndim == 2:
if np.any(x < 0):
raise ValueError("All elements of the contingency table must be "
"non-negative.")
if np.any(x != x.astype(int)):
raise ValueError("All elements of the contingency table must be "
"integer.")
if x.nonzero()[0].size < 2:
raise ValueError("At least two elements of the contingency table "
"must be nonzero.")
table = x
else:
raise ValueError("x must be either a 1D or 2D array")
d, p = _somers_d(table)
return SomersDResult(d, p, table)
def _all_partitions(nx, ny):
"""
Partition a set of indices into two fixed-length sets in all possible ways
Partition a set of indices 0 ... nx + ny - 1 into two sets of length nx and
ny in all possible ways (ignoring order of elements).
"""
z = np.arange(nx+ny)
for c in combinations(z, nx):
x = np.array(c)
mask = np.ones(nx+ny, bool)
mask[x] = False
y = z[mask]
yield x, y
def _compute_log_combinations(n):
"""Compute all log combination of C(n, k)."""
gammaln_arr = gammaln(np.arange(n + 1) + 1)
return gammaln(n + 1) - gammaln_arr - gammaln_arr[::-1]
BarnardExactResult = make_dataclass(
"BarnardExactResult", [("statistic", float), ("pvalue", float)]
)
def barnard_exact(table, alternative="two-sided", pooled=True, n=32):
r"""Perform a Barnard exact test on a 2x2 contingency table.
Parameters
----------
table : array_like of ints
A 2x2 contingency table. Elements should be non-negative integers.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the null and alternative hypotheses. Default is 'two-sided'.
Please see explanations in the Notes section below.
pooled : bool, optional
Whether to compute score statistic with pooled variance (as in
Student's t-test, for example) or unpooled variance (as in Welch's
t-test). Default is ``True``.
n : int, optional
Number of sampling points used in the construction of the sampling
method. Note that this argument will automatically be converted to
the next higher power of 2 since `scipy.stats.qmc.Sobol` is used to
select sample points. Default is 32. Must be positive. In most cases,
32 points is enough to reach good precision. More points comes at
performance cost.
Returns
-------
ber : BarnardExactResult
A result object with the following attributes.
statistic : float
The Wald statistic with pooled or unpooled variance, depending
on the user choice of `pooled`.
pvalue : float
P-value, the probability of obtaining a distribution at least as
extreme as the one that was actually observed, assuming that the
null hypothesis is true.
See Also
--------
chi2_contingency : Chi-square test of independence of variables in a
contingency table.
fisher_exact : Fisher exact test on a 2x2 contingency table.
boschloo_exact : Boschloo's exact test on a 2x2 contingency table,
which is an uniformly more powerful alternative to Fisher's exact test.
Notes
-----
Barnard's test is an exact test used in the analysis of contingency
tables. It examines the association of two categorical variables, and
is a more powerful alternative than Fisher's exact test
for 2x2 contingency tables.
Let's define :math:`X_0` a 2x2 matrix representing the observed sample,
where each column stores the binomial experiment, as in the example
below. Let's also define :math:`p_1, p_2` the theoretical binomial
probabilities for :math:`x_{11}` and :math:`x_{12}`. When using
Barnard exact test, we can assert three different null hypotheses :
- :math:`H_0 : p_1 \geq p_2` versus :math:`H_1 : p_1 < p_2`,
with `alternative` = "less"
- :math:`H_0 : p_1 \leq p_2` versus :math:`H_1 : p_1 > p_2`,
with `alternative` = "greater"
- :math:`H_0 : p_1 = p_2` versus :math:`H_1 : p_1 \neq p_2`,
with `alternative` = "two-sided" (default one)
In order to compute Barnard's exact test, we are using the Wald
statistic [3]_ with pooled or unpooled variance.
Under the default assumption that both variances are equal
(``pooled = True``), the statistic is computed as:
.. math::
T(X) = \frac{
\hat{p}_1 - \hat{p}_2
}{
\sqrt{
\hat{p}(1 - \hat{p})
(\frac{1}{c_1} +
\frac{1}{c_2})
}
}
with :math:`\hat{p}_1, \hat{p}_2` and :math:`\hat{p}` the estimator of
:math:`p_1, p_2` and :math:`p`, the latter being the combined probability,
given the assumption that :math:`p_1 = p_2`.
If this assumption is invalid (``pooled = False``), the statistic is:
.. math::
T(X) = \frac{
\hat{p}_1 - \hat{p}_2
}{
\sqrt{
\frac{\hat{p}_1 (1 - \hat{p}_1)}{c_1} +
\frac{\hat{p}_2 (1 - \hat{p}_2)}{c_2}
}
}
The p-value is then computed as:
.. math::
\sum
\binom{c_1}{x_{11}}
\binom{c_2}{x_{12}}
\pi^{x_{11} + x_{12}}
(1 - \pi)^{t - x_{11} - x_{12}}
where the sum is over all 2x2 contingency tables :math:`X` such that:
* :math:`T(X) \leq T(X_0)` when `alternative` = "less",
* :math:`T(X) \geq T(X_0)` when `alternative` = "greater", or
* :math:`T(X) \geq |T(X_0)|` when `alternative` = "two-sided".
Above, :math:`c_1, c_2` are the sum of the columns 1 and 2,
and :math:`t` the total (sum of the 4 sample's element).
The returned p-value is the maximum p-value taken over the nuisance
parameter :math:`\pi`, where :math:`0 \leq \pi \leq 1`.
This function's complexity is :math:`O(n c_1 c_2)`, where `n` is the
number of sample points.
References
----------
.. [1] Barnard, G. A. "Significance Tests for 2x2 Tables". *Biometrika*.
34.1/2 (1947): 123-138. :doi:`dpgkg3`
.. [2] Mehta, Cyrus R., and Pralay Senchaudhuri. "Conditional versus
unconditional exact tests for comparing two binomials."
*Cytel Software Corporation* 675 (2003): 1-5.
.. [3] "Wald Test". *Wikipedia*. https://en.wikipedia.org/wiki/Wald_test
Examples
--------
An example use of Barnard's test is presented in [2]_.
Consider the following example of a vaccine efficacy study
(Chan, 1998). In a randomized clinical trial of 30 subjects, 15 were
inoculated with a recombinant DNA influenza vaccine and the 15 were
inoculated with a placebo. Twelve of the 15 subjects in the placebo
group (80%) eventually became infected with influenza whereas for the
vaccine group, only 7 of the 15 subjects (47%) became infected. The
data are tabulated as a 2 x 2 table::
Vaccine Placebo
Yes 7 12
No 8 3
When working with statistical hypothesis testing, we usually use a
threshold probability or significance level upon which we decide
to reject the null hypothesis :math:`H_0`. Suppose we choose the common
significance level of 5%.
Our alternative hypothesis is that the vaccine will lower the chance of
becoming infected with the virus; that is, the probability :math:`p_1` of
catching the virus with the vaccine will be *less than* the probability
:math:`p_2` of catching the virus without the vaccine. Therefore, we call
`barnard_exact` with the ``alternative="less"`` option:
>>> import scipy.stats as stats
>>> res = stats.barnard_exact([[7, 12], [8, 3]], alternative="less")
>>> res.statistic
-1.894...
>>> res.pvalue
0.03407...
Under the null hypothesis that the vaccine will not lower the chance of
becoming infected, the probability of obtaining test results at least as
extreme as the observed data is approximately 3.4%. Since this p-value is
less than our chosen significance level, we have evidence to reject
:math:`H_0` in favor of the alternative.
Suppose we had used Fisher's exact test instead:
>>> _, pvalue = stats.fisher_exact([[7, 12], [8, 3]], alternative="less")
>>> pvalue
0.0640...
With the same threshold significance of 5%, we would not have been able
to reject the null hypothesis in favor of the alternative. As stated in
[2]_, Barnard's test is uniformly more powerful than Fisher's exact test
because Barnard's test does not condition on any margin. Fisher's test
should only be used when both sets of marginals are fixed.
"""
if n <= 0:
raise ValueError(
"Number of points `n` must be strictly positive, "
f"found {n!r}"
)
table = np.asarray(table, dtype=np.int64)
if not table.shape == (2, 2):
raise ValueError("The input `table` must be of shape (2, 2).")
if np.any(table < 0):
raise ValueError("All values in `table` must be nonnegative.")
if 0 in table.sum(axis=0):
# If both values in column are zero, the p-value is 1 and
# the score's statistic is NaN.
return BarnardExactResult(np.nan, 1.0)
total_col_1, total_col_2 = table.sum(axis=0)
x1 = np.arange(total_col_1 + 1, dtype=np.int64).reshape(-1, 1)
x2 = np.arange(total_col_2 + 1, dtype=np.int64).reshape(1, -1)
# We need to calculate the wald statistics for each combination of x1 and
# x2.
p1, p2 = x1 / total_col_1, x2 / total_col_2
if pooled:
p = (x1 + x2) / (total_col_1 + total_col_2)
variances = p * (1 - p) * (1 / total_col_1 + 1 / total_col_2)
else:
variances = p1 * (1 - p1) / total_col_1 + p2 * (1 - p2) / total_col_2
# To avoid warning when dividing by 0
with np.errstate(divide="ignore", invalid="ignore"):
wald_statistic = np.divide((p1 - p2), np.sqrt(variances))
wald_statistic[p1 == p2] = 0 # Removing NaN values
wald_stat_obs = wald_statistic[table[0, 0], table[0, 1]]
if alternative == "two-sided":
index_arr = np.abs(wald_statistic) >= abs(wald_stat_obs)
elif alternative == "less":
index_arr = wald_statistic <= wald_stat_obs
elif alternative == "greater":
index_arr = wald_statistic >= wald_stat_obs
else:
msg = (
"`alternative` should be one of {'two-sided', 'less', 'greater'},"
f" found {alternative!r}"
)
raise ValueError(msg)
x1_sum_x2 = x1 + x2
x1_log_comb = _compute_log_combinations(total_col_1)
x2_log_comb = _compute_log_combinations(total_col_2)
x1_sum_x2_log_comb = x1_log_comb[x1] + x2_log_comb[x2]
result = shgo(
_get_binomial_log_p_value_with_nuisance_param,
args=(x1_sum_x2, x1_sum_x2_log_comb, index_arr),
bounds=((0, 1),),
n=n,
sampling_method="sobol",
)
# result.fun is the negative log pvalue and therefore needs to be
# changed before return
p_value = np.clip(np.exp(-result.fun), a_min=0, a_max=1)
return BarnardExactResult(wald_stat_obs, p_value)
BoschlooExactResult = make_dataclass(
"BoschlooExactResult", [("statistic", float), ("pvalue", float)]
)
def boschloo_exact(table, alternative="two-sided", n=32):
r"""Perform Boschloo's exact test on a 2x2 contingency table.
Parameters
----------
table : array_like of ints
A 2x2 contingency table. Elements should be non-negative integers.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the null and alternative hypotheses. Default is 'two-sided'.
Please see explanations in the Notes section below.
n : int, optional
Number of sampling points used in the construction of the sampling
method. Note that this argument will automatically be converted to
the next higher power of 2 since `scipy.stats.qmc.Sobol` is used to
select sample points. Default is 32. Must be positive. In most cases,
32 points is enough to reach good precision. More points comes at
performance cost.
Returns
-------
ber : BoschlooExactResult
A result object with the following attributes.
statistic : float
The statistic used in Boschloo's test; that is, the p-value
from Fisher's exact test.
pvalue : float
P-value, the probability of obtaining a distribution at least as
extreme as the one that was actually observed, assuming that the
null hypothesis is true.
See Also
--------
chi2_contingency : Chi-square test of independence of variables in a
contingency table.
fisher_exact : Fisher exact test on a 2x2 contingency table.
barnard_exact : Barnard's exact test, which is a more powerful alternative
than Fisher's exact test for 2x2 contingency tables.
Notes
-----
Boschloo's test is an exact test used in the analysis of contingency
tables. It examines the association of two categorical variables, and
is a uniformly more powerful alternative to Fisher's exact test
for 2x2 contingency tables.
Let's define :math:`X_0` a 2x2 matrix representing the observed sample,
where each column stores the binomial experiment, as in the example
below. Let's also define :math:`p_1, p_2` the theoretical binomial
probabilities for :math:`x_{11}` and :math:`x_{12}`. When using
Boschloo exact test, we can assert three different null hypotheses :
- :math:`H_0 : p_1=p_2` versus :math:`H_1 : p_1 < p_2`,
with `alternative` = "less"
- :math:`H_0 : p_1=p_2` versus :math:`H_1 : p_1 > p_2`,
with `alternative` = "greater"
- :math:`H_0 : p_1=p_2` versus :math:`H_1 : p_1 \neq p_2`,
with `alternative` = "two-sided" (default one)
Boschloo's exact test uses the p-value of Fisher's exact test as a
statistic, and Boschloo's p-value is the probability under the null
hypothesis of observing such an extreme value of this statistic.
Boschloo's and Barnard's are both more powerful than Fisher's exact
test.
.. versionadded:: 1.7.0
References
----------
.. [1] R.D. Boschloo. "Raised conditional level of significance for the
2 x 2-table when testing the equality of two probabilities",
Statistica Neerlandica, 24(1), 1970
.. [2] "Boschloo's test", Wikipedia,
https://en.wikipedia.org/wiki/Boschloo%27s_test
.. [3] Lise M. Saari et al. "Employee attitudes and job satisfaction",
Human Resource Management, 43(4), 395-407, 2004,
:doi:`10.1002/hrm.20032`.
Examples
--------
In the following example, we consider the article "Employee
attitudes and job satisfaction" [3]_
which reports the results of a survey from 63 scientists and 117 college
professors. Of the 63 scientists, 31 said they were very satisfied with
their jobs, whereas 74 of the college professors were very satisfied
with their work. Is this significant evidence that college
professors are happier with their work than scientists?
The following table summarizes the data mentioned above::
college professors scientists
Very Satisfied 74 31
Dissatisfied 43 32
When working with statistical hypothesis testing, we usually use a
threshold probability or significance level upon which we decide
to reject the null hypothesis :math:`H_0`. Suppose we choose the common
significance level of 5%.
Our alternative hypothesis is that college professors are truly more
satisfied with their work than scientists. Therefore, we expect
:math:`p_1` the proportion of very satisfied college professors to be
greater than :math:`p_2`, the proportion of very satisfied scientists.
We thus call `boschloo_exact` with the ``alternative="greater"`` option:
>>> import scipy.stats as stats
>>> res = stats.boschloo_exact([[74, 31], [43, 32]], alternative="greater")
>>> res.statistic
0.0483...
>>> res.pvalue
0.0355...
Under the null hypothesis that scientists are happier in their work than
college professors, the probability of obtaining test
results at least as extreme as the observed data is approximately 3.55%.
Since this p-value is less than our chosen significance level, we have
evidence to reject :math:`H_0` in favor of the alternative hypothesis.
"""
hypergeom = distributions.hypergeom
if n <= 0:
raise ValueError(
"Number of points `n` must be strictly positive,"
f" found {n!r}"
)
table = np.asarray(table, dtype=np.int64)
if not table.shape == (2, 2):
raise ValueError("The input `table` must be of shape (2, 2).")
if np.any(table < 0):
raise ValueError("All values in `table` must be nonnegative.")
if 0 in table.sum(axis=0):
# If both values in column are zero, the p-value is 1 and
# the score's statistic is NaN.
return BoschlooExactResult(np.nan, np.nan)
total_col_1, total_col_2 = table.sum(axis=0)
total = total_col_1 + total_col_2
x1 = np.arange(total_col_1 + 1, dtype=np.int64).reshape(1, -1)
x2 = np.arange(total_col_2 + 1, dtype=np.int64).reshape(-1, 1)
x1_sum_x2 = x1 + x2
if alternative == 'less':
pvalues = hypergeom.cdf(x1, total, x1_sum_x2, total_col_1).T
elif alternative == 'greater':
# Same formula as the 'less' case, but with the second column.
pvalues = hypergeom.cdf(x2, total, x1_sum_x2, total_col_2).T
elif alternative == 'two-sided':
boschloo_less = boschloo_exact(table, alternative="less", n=n)
boschloo_greater = boschloo_exact(table, alternative="greater", n=n)
res = (
boschloo_less if boschloo_less.pvalue < boschloo_greater.pvalue
else boschloo_greater
)
# Two-sided p-value is defined as twice the minimum of the one-sided
# p-values
pvalue = 2 * res.pvalue
return BoschlooExactResult(res.statistic, pvalue)
else:
msg = (
f"`alternative` should be one of {'two-sided', 'less', 'greater'},"
f" found {alternative!r}"
)
raise ValueError(msg)
fisher_stat = pvalues[table[0, 0], table[0, 1]]
# fisher_stat * (1+1e-13) guards us from small numerical error. It is
# equivalent to np.isclose with relative tol of 1e-13 and absolute tol of 0
# For more throughout explanations, see gh-14178
index_arr = pvalues <= fisher_stat * (1+1e-13)
x1, x2, x1_sum_x2 = x1.T, x2.T, x1_sum_x2.T
x1_log_comb = _compute_log_combinations(total_col_1)
x2_log_comb = _compute_log_combinations(total_col_2)
x1_sum_x2_log_comb = x1_log_comb[x1] + x2_log_comb[x2]
result = shgo(
_get_binomial_log_p_value_with_nuisance_param,
args=(x1_sum_x2, x1_sum_x2_log_comb, index_arr),
bounds=((0, 1),),
n=n,
sampling_method="sobol",
)
# result.fun is the negative log pvalue and therefore needs to be
# changed before return
p_value = np.clip(np.exp(-result.fun), a_min=0, a_max=1)
return BoschlooExactResult(fisher_stat, p_value)
def _get_binomial_log_p_value_with_nuisance_param(
nuisance_param, x1_sum_x2, x1_sum_x2_log_comb, index_arr
):
r"""
Compute the log pvalue in respect of a nuisance parameter considering
a 2x2 sample space.
Parameters
----------
nuisance_param : float
nuisance parameter used in the computation of the maximisation of
the p-value. Must be between 0 and 1
x1_sum_x2 : ndarray
Sum of x1 and x2 inside barnard_exact
x1_sum_x2_log_comb : ndarray
sum of the log combination of x1 and x2
index_arr : ndarray of boolean
Returns
-------
p_value : float
Return the maximum p-value considering every nuisance paramater
between 0 and 1
Notes
-----
Both Barnard's test and Boschloo's test iterate over a nuisance parameter
:math:`\pi \in [0, 1]` to find the maximum p-value. To search this
maxima, this function return the negative log pvalue with respect to the
nuisance parameter passed in params. This negative log p-value is then
used in `shgo` to find the minimum negative pvalue which is our maximum
pvalue.
Also, to compute the different combination used in the
p-values' computation formula, this function uses `gammaln` which is
more tolerant for large value than `scipy.special.comb`. `gammaln` gives
a log combination. For the little precision loss, performances are
improved a lot.
"""
t1, t2 = x1_sum_x2.shape
n = t1 + t2 - 2
with np.errstate(divide="ignore", invalid="ignore"):
log_nuisance = np.log(
nuisance_param,
out=np.zeros_like(nuisance_param),
where=nuisance_param >= 0,
)
log_1_minus_nuisance = np.log(
1 - nuisance_param,
out=np.zeros_like(nuisance_param),
where=1 - nuisance_param >= 0,
)
nuisance_power_x1_x2 = log_nuisance * x1_sum_x2
nuisance_power_x1_x2[(x1_sum_x2 == 0)[:, :]] = 0
nuisance_power_n_minus_x1_x2 = log_1_minus_nuisance * (n - x1_sum_x2)
nuisance_power_n_minus_x1_x2[(x1_sum_x2 == n)[:, :]] = 0
tmp_log_values_arr = (
x1_sum_x2_log_comb
+ nuisance_power_x1_x2
+ nuisance_power_n_minus_x1_x2
)
tmp_values_from_index = tmp_log_values_arr[index_arr]
# To avoid dividing by zero in log function and getting inf value,
# values are centered according to the max
max_value = tmp_values_from_index.max()
# To have better result's precision, the log pvalue is taken here.
# Indeed, pvalue is included inside [0, 1] interval. Passing the
# pvalue to log makes the interval a lot bigger ([-inf, 0]), and thus
# help us to achieve better precision
with np.errstate(divide="ignore", invalid="ignore"):
log_probs = np.exp(tmp_values_from_index - max_value).sum()
log_pvalue = max_value + np.log(
log_probs,
out=np.full_like(log_probs, -np.inf),
where=log_probs > 0,
)
# Since shgo find the minima, minus log pvalue is returned
return -log_pvalue
def _pval_cvm_2samp_exact(s, nx, ny):
"""
Compute the exact p-value of the Cramer-von Mises two-sample test
for a given value s (float) of the test statistic by enumerating
all possible combinations. nx and ny are the sizes of the samples.
"""
rangex = np.arange(nx)
rangey = np.arange(ny)
us = []
# x and y are all possible partitions of ranks from 0 to nx + ny - 1
# into two sets of length nx and ny
# Here, ranks are from 0 to nx + ny - 1 instead of 1 to nx + ny, but
# this does not change the value of the statistic.
for x, y in _all_partitions(nx, ny):
# compute the statistic
u = nx * np.sum((x - rangex)**2)
u += ny * np.sum((y - rangey)**2)
us.append(u)
# compute the values of u and the frequencies
u, cnt = np.unique(us, return_counts=True)
return np.sum(cnt[u >= s]) / np.sum(cnt)
def cramervonmises_2samp(x, y, method='auto'):
"""Perform the two-sample Cramér-von Mises test for goodness of fit.
This is the two-sample version of the Cramér-von Mises test ([1]_):
for two independent samples :math:`X_1, ..., X_n` and
:math:`Y_1, ..., Y_m`, the null hypothesis is that the samples
come from the same (unspecified) continuous distribution.
Parameters
----------
x : array_like
A 1-D array of observed values of the random variables :math:`X_i`.
y : array_like
A 1-D array of observed values of the random variables :math:`Y_i`.
method : {'auto', 'asymptotic', 'exact'}, optional
The method used to compute the p-value, see Notes for details.
The default is 'auto'.
Returns
-------
res : object with attributes
statistic : float
Cramér-von Mises statistic.
pvalue : float
The p-value.
See Also
--------
cramervonmises, anderson_ksamp, epps_singleton_2samp, ks_2samp
Notes
-----
.. versionadded:: 1.7.0
The statistic is computed according to equation 9 in [2]_. The
calculation of the p-value depends on the keyword `method`:
- ``asymptotic``: The p-value is approximated by using the limiting
distribution of the test statistic.
- ``exact``: The exact p-value is computed by enumerating all
possible combinations of the test statistic, see [2]_.
The exact calculation will be very slow even for moderate sample
sizes as the number of combinations increases rapidly with the
size of the samples. If ``method=='auto'``, the exact approach
is used if both samples contain less than 10 observations,
otherwise the asymptotic distribution is used.
If the underlying distribution is not continuous, the p-value is likely to
be conservative (Section 6.2 in [3]_). When ranking the data to compute
the test statistic, midranks are used if there are ties.
References
----------
.. [1] https://en.wikipedia.org/wiki/Cramer-von_Mises_criterion
.. [2] Anderson, T.W. (1962). On the distribution of the two-sample
Cramer-von-Mises criterion. The Annals of Mathematical
Statistics, pp. 1148-1159.
.. [3] Conover, W.J., Practical Nonparametric Statistics, 1971.
Examples
--------
Suppose we wish to test whether two samples generated by
``scipy.stats.norm.rvs`` have the same distribution. We choose a
significance level of alpha=0.05.
>>> from scipy import stats
>>> rng = np.random.default_rng()
>>> x = stats.norm.rvs(size=100, random_state=rng)
>>> y = stats.norm.rvs(size=70, random_state=rng)
>>> res = stats.cramervonmises_2samp(x, y)
>>> res.statistic, res.pvalue
(0.29376470588235293, 0.1412873014573014)
The p-value exceeds our chosen significance level, so we do not
reject the null hypothesis that the observed samples are drawn from the
same distribution.
For small sample sizes, one can compute the exact p-values:
>>> x = stats.norm.rvs(size=7, random_state=rng)
>>> y = stats.t.rvs(df=2, size=6, random_state=rng)
>>> res = stats.cramervonmises_2samp(x, y, method='exact')
>>> res.statistic, res.pvalue
(0.197802197802198, 0.31643356643356646)
The p-value based on the asymptotic distribution is a good approximation
even though the sample size is small.
>>> res = stats.cramervonmises_2samp(x, y, method='asymptotic')
>>> res.statistic, res.pvalue
(0.197802197802198, 0.2966041181527128)
Independent of the method, one would not reject the null hypothesis at the
chosen significance level in this example.
"""
xa = np.sort(np.asarray(x))
ya = np.sort(np.asarray(y))
if xa.size <= 1 or ya.size <= 1:
raise ValueError('x and y must contain at least two observations.')
if xa.ndim > 1 or ya.ndim > 1:
raise ValueError('The samples must be one-dimensional.')
if method not in ['auto', 'exact', 'asymptotic']:
raise ValueError('method must be either auto, exact or asymptotic.')
nx = len(xa)
ny = len(ya)
if method == 'auto':
if max(nx, ny) > 10:
method = 'asymptotic'
else:
method = 'exact'
# get ranks of x and y in the pooled sample
z = np.concatenate([xa, ya])
# in case of ties, use midrank (see [1])
r = scipy.stats.rankdata(z, method='average')
rx = r[:nx]
ry = r[nx:]
# compute U (eq. 10 in [2])
u = nx * np.sum((rx - np.arange(1, nx+1))**2)
u += ny * np.sum((ry - np.arange(1, ny+1))**2)
# compute T (eq. 9 in [2])
k, N = nx*ny, nx + ny
t = u / (k*N) - (4*k - 1)/(6*N)
if method == 'exact':
p = _pval_cvm_2samp_exact(u, nx, ny)
else:
# compute expected value and variance of T (eq. 11 and 14 in [2])
et = (1 + 1/N)/6
vt = (N+1) * (4*k*N - 3*(nx**2 + ny**2) - 2*k)
vt = vt / (45 * N**2 * 4 * k)
# computed the normalized statistic (eq. 15 in [2])
tn = 1/6 + (t - et) / np.sqrt(45 * vt)
# approximate distribution of tn with limiting distribution
# of the one-sample test statistic
# if tn < 0.003, the _cdf_cvm_inf(tn) < 1.28*1e-18, return 1.0 directly
if tn < 0.003:
p = 1.0
else:
p = max(0, 1. - _cdf_cvm_inf(tn))
return CramerVonMisesResult(statistic=t, pvalue=p)
|
6fef79f32f8e235cb0e56433da8280843e3f74ca
|
751fe2de18f00596e4f1ed342b56bd6f38ee2053
|
/examples/04_openmdao/betz_limit.py
|
e74e041e17c9f958cb96a7da5db8a205345dcfd0
|
[
"Apache-2.0"
] |
permissive
|
WISDEM/WISDEM
|
42fa780915d62fd4e4203050e886093ecc806c8a
|
d7270ebe1c554293a9d36730d67ab555c071cb17
|
refs/heads/master
| 2023-08-04T01:22:43.215105
| 2023-06-22T23:36:07
| 2023-06-22T23:36:07
| 23,678,280
| 120
| 86
|
Apache-2.0
| 2023-06-22T19:26:34
| 2014-09-04T20:30:24
|
Python
|
UTF-8
|
Python
| false
| false
| 4,303
|
py
|
betz_limit.py
|
# Import the OpenMDAO library
import openmdao.api as om
# --
# Specific the Actuator Disc theory into a derived OpenMDAO class
class ActuatorDisc(om.ExplicitComponent):
# Inputs and Outputs
def setup(self):
# Inputs into the the model
self.add_input("a", 0.5, desc="Indcued velocity factor")
self.add_input("Area", 10.0, units="m**2", desc="Rotor disc area")
self.add_input("rho", 1.225, units="kg/m**3", desc="Air density")
self.add_input("Vu", 10.0, units="m/s", desc="Freestream air velocity, upstream of rotor")
# Outputs
self.add_output("Vr", 0.0, units="m/s", desc="Air velocity at rotor exit plane")
self.add_output("Vd", 0.0, units="m/s", desc="Slipstream air velocity, downstream of rotor")
self.add_output("Ct", 0.0, desc="Thrust coefficient")
self.add_output("Cp", 0.0, desc="Power coefficient")
self.add_output("power", 0.0, units="W", desc="Power produced by the rotor")
self.add_output("thrust", 0.0, units="m/s")
# Declare which outputs are dependent on which inputs
self.declare_partials("Vr", ["a", "Vu"])
self.declare_partials("Vd", "a")
self.declare_partials("Ct", "a")
self.declare_partials("thrust", ["a", "Area", "rho", "Vu"])
self.declare_partials("Cp", "a")
self.declare_partials("power", ["a", "Area", "rho", "Vu"])
# --------
# Core theory
def compute(self, inputs, outputs):
a = inputs["a"]
Vu = inputs["Vu"]
rho = inputs["rho"]
Area = inputs["Area"]
qA = 0.5 * rho * Area * Vu**2
outputs["Vd"] = Vd = Vu * (1 - 2 * a)
outputs["Vr"] = 0.5 * (Vu + Vd)
outputs["Ct"] = Ct = 4 * a * (1 - a)
outputs["thrust"] = Ct * qA
outputs["Cp"] = Cp = Ct * (1 - a)
outputs["power"] = Cp * qA * Vu
# --------
# Derivatives of outputs wrt inputs
def compute_partials(self, inputs, J):
a = inputs["a"]
Vu = inputs["Vu"]
Area = inputs["Area"]
rho = inputs["rho"]
a_times_area = a * Area
one_minus_a = 1.0 - a
a_area_rho_vu = a_times_area * rho * Vu
J["Vr", "a"] = -Vu
J["Vr", "Vu"] = one_minus_a
J["Vd", "a"] = -2.0 * Vu
J["Ct", "a"] = 4.0 - 8.0 * a
J["thrust", "a"] = 0.5 * rho * Vu**2 * Area * J["Ct", "a"]
J["thrust", "Area"] = 2.0 * Vu**2 * a * rho * one_minus_a
J["thrust", "Vu"] = 4.0 * a_area_rho_vu * one_minus_a
J["Cp", "a"] = 4.0 * a * (2.0 * a - 2.0) + 4.0 * one_minus_a**2
J["power", "a"] = (
2.0 * Area * Vu**3 * a * rho * (2.0 * a - 2.0) + 2.0 * Area * Vu**3 * rho * one_minus_a**2
)
J["power", "Area"] = 2.0 * Vu**3 * a * rho * one_minus_a**2
J["power", "rho"] = 2.0 * a_times_area * Vu**3 * (one_minus_a) ** 2
J["power", "Vu"] = 6.0 * Area * Vu**2 * a * rho * one_minus_a**2
# -- end the class
# Optional: include underlying model in a group with Independent Variables
class Betz(om.Group):
"""
Group containing the actuator disc equations for deriving the Betz limit.
"""
def setup(self):
indeps = self.add_subsystem("indeps", om.IndepVarComp(), promotes=["*"])
indeps.add_output("a", 0.5)
indeps.add_output("Area", 10.0, units="m**2")
indeps.add_output("rho", 1.225, units="kg/m**3")
indeps.add_output("Vu", 10.0, units="m/s")
self.add_subsystem("a_disk", ActuatorDisc(), promotes=["a", "Area", "rho", "Vu"])
# --------
# Instantiate the model
prob = om.Problem()
prob.model = Betz()
# -----
# Specify the optimization 'driver'
prob.driver = om.ScipyOptimizeDriver()
prob.driver.options["optimizer"] = "SLSQP"
# -----
# Assign objective and design variables
prob.model.add_design_var("a", lower=0.0, upper=1.0)
prob.model.add_design_var("Area", lower=0.0, upper=1.0)
prob.model.add_objective("a_disk.Cp", scaler=-1.0)
# -----
# Execute!
prob.setup()
prob.run_driver()
# --------
# Display the output
print("Coefficient of power Cp = ", prob["a_disk.Cp"])
print("Induction factor a =", prob["a"])
print("Rotor disc Area =", prob["Area"], "m^2")
prob.model.list_inputs(units=True)
prob.model.list_outputs(units=True)
# --------
|
783f5d9207d92501edb677b3f565e867ab300a37
|
eb47bf8011f957e95a84ac64aab29598c59081a1
|
/sample/WRS2018/script/T1M-AizuSpiderSA.py
|
cdd7c5472b6844173663b9a72921b7b42fe56892
|
[
"Zlib",
"MIT"
] |
permissive
|
choreonoid/choreonoid
|
fd776aaab99d9ec544e5ceec04fbc283b8a3aee3
|
94b353722d5b9ff10e9c0ca8982e549f85b5f59f
|
refs/heads/master
| 2023-09-01T12:27:17.849641
| 2023-08-04T13:54:12
| 2023-08-04T13:54:12
| 274,977,786
| 117
| 30
|
NOASSERTION
| 2023-05-23T08:25:33
| 2020-06-25T17:36:56
|
C++
|
UTF-8
|
Python
| false
| false
| 98
|
py
|
T1M-AizuSpiderSA.py
|
import WRSUtil
WRSUtil.loadProject(
"MultiSceneViews", "T1M", "AGXSimulator", "AizuSpiderSA")
|
38127cb24c1f684b02ed77730b5dc941b42bdb01
|
4763ee2164b4c2531688b0e787ed762089b3d112
|
/pygrim/formulas/coulomb_wave.py
|
cf391e90146e8876753d1f0891e3adc6b2231597
|
[
"MIT"
] |
permissive
|
fredrik-johansson/fungrim
|
39dc106f69fb72c6046e4a6a27b1cff5b1e21183
|
b7c3ca6e565e1058638cab6ba2bc811090296767
|
refs/heads/master
| 2023-08-31T19:47:15.115965
| 2021-03-15T19:09:32
| 2021-03-15T19:09:32
| 175,661,931
| 111
| 14
|
MIT
| 2022-02-24T14:55:23
| 2019-03-14T16:42:45
|
Python
|
UTF-8
|
Python
| false
| false
| 11,870
|
py
|
coulomb_wave.py
|
# -*- coding: utf-8 -*-
from ..expr import *
def_Topic(
Title("Coulomb wave functions"),
Section("Definitions"),
Entries(
"8b2cb9",
"f25e3d",
"16a4e7",
"2b12f4",
"512063",
),
Section("Differential equations"),
Entries(
"ad8df6",
"74274a",
),
Section("Connection formulas"),
Entries(
"192a3e",
"8547ab",
"01af55",
"e20938",
"304559",
),
Section("Normalization functions"),
Entries(
"4a4739",
"ed2bf6",
),
Section("Derivatives"),
Entries(
"a51a4b",
"2fec14",
"07a654",
"faa118",
"eca10b",
),
Section("Hypergeometric representations"),
Subsection("Kummer function"),
Entries(
"d280c5",
"2a2f18",
),
Subsection("Tricomi function"),
Entries(
"1976e1",
"e2efbf",
"8027e8",
"69e5fb",
"bcdfc6",
"f0414a",
"781eae",
"0cc301",
),
)
# Definitions
make_entry(ID("8b2cb9"),
SymbolDefinition(CoulombF, CoulombF(ell,eta,z), "Regular Coulomb wave function"))
make_entry(ID("f25e3d"),
SymbolDefinition(CoulombG, CoulombG(ell,eta,z), "Irregular Coulomb wave function"))
make_entry(ID("16a4e7"),
SymbolDefinition(CoulombH, CoulombH(omega,ell,eta,z), "Outgoing and ingoing Coulomb wave function"))
make_entry(ID("2b12f4"),
SymbolDefinition(CoulombC, CoulombC(ell,eta), "Coulomb wave function Gamow factor"))
make_entry(ID("512063"),
SymbolDefinition(CoulombSigma, CoulombSigma(ell,eta), "Coulomb wave function phase shift"))
coulomb_param_condition = And(NotElement(1+ell+ConstI*eta, ZZLessEqual(0)), NotElement(1+ell-ConstI*eta, ZZLessEqual(0)))
# Differential equations
C1 = Subscript(c, 1)
C2 = Subscript(c, 2)
make_entry(ID("ad8df6"),
Formula(Where(Equal(ComplexDerivative(y(z), For(z, z, 2)) + (1 - (2*eta)/z - (ell*(ell+1))/z**2)*y(z), 0), Equal(y(z), C1*CoulombF(ell,eta,z) + C2*CoulombG(ell,eta,z)))),
Variables(ell, eta, z, C1, C2),
Assumptions(And(Element(ell, CC), Element(eta, CC), coulomb_param_condition, Element(z, SetMinus(CC, OpenClosedInterval(-Infinity, 0))), Element(C1, CC), Element(C2, CC))))
make_entry(ID("74274a"),
Formula(Equal(CoulombG(ell,eta,z) * Parentheses(ComplexDerivative(CoulombF(ell,eta,z), For(z, z, 1))) -
Parentheses(ComplexDerivative(CoulombG(ell,eta,z), For(z, z, 1))) * CoulombF(ell,eta,z), 1)),
Variables(ell, eta, z),
Assumptions(And(coulomb_param_condition, Element(z, SetMinus(CC, OpenClosedInterval(-Infinity, 0))))))
# Connection formulas
make_entry(ID("192a3e"),
Formula(Equal(CoulombF(ell, eta, z), (CoulombH(1, ell, eta, z) - CoulombH(-1, ell, eta, z))/(2*ConstI))),
Variables(ell, eta, z),
Assumptions(And(Element(ell, CC), Element(eta, CC), coulomb_param_condition, Element(z, SetMinus(CC, Set(0))))))
make_entry(ID("8547ab"),
Formula(Equal(CoulombG(ell, eta, z), (CoulombH(1, ell, eta, z) + CoulombH(-1, ell, eta, z))/2)),
Variables(ell, eta, z),
Assumptions(And(Element(ell, CC), Element(eta, CC), coulomb_param_condition, Element(z, SetMinus(CC, Set(0))))))
make_entry(ID("01af55"),
Formula(Equal(CoulombH(omega, ell, eta, z), CoulombG(ell, eta, z) + omega*ConstI*CoulombF(ell, eta, z))),
Variables(omega, ell, eta, z),
Assumptions(And(Element(omega, Set(-1,1)), Element(ell, CC), Element(eta, CC), coulomb_param_condition, Element(z, SetMinus(CC, Set(0))))))
make_entry(ID("e20938"),
Formula(Where(Equal(CoulombG(ell, eta, z),
(CoulombF(ell,eta,z)*Cos(chi) - CoulombF(-ell-1,eta,z))/Sin(chi)),
Equal(chi, CoulombSigma(ell,eta) - CoulombSigma(-ell-1,eta) - (ell+Div(1,2))*Pi))),
Variables(ell, eta, z),
Assumptions(And(Element(ell, CC), Element(eta, CC),
NotElement(2*ell, ZZ),
NotElement(1+ell+ConstI*eta, ZZLessEqual(0)),
NotElement(1+ell-ConstI*eta, ZZLessEqual(0)),
NotElement(-ell+ConstI*eta, ZZLessEqual(0)),
NotElement(-ell-ConstI*eta, ZZLessEqual(0)), Element(z, SetMinus(CC, Set(0))))))
make_entry(ID("304559"),
Formula(Where(Equal(CoulombH(omega, ell, eta, z),
(CoulombF(ell,eta,z)*Exp(omega*ConstI*chi) - CoulombF(-ell-1,eta,z))/Sin(chi)),
Equal(chi, CoulombSigma(ell,eta) - CoulombSigma(-ell-1,eta) - (ell+Div(1,2))*Pi))),
Variables(omega, ell, eta, z),
Assumptions(And(Element(omega, Set(-1,1)), Element(ell, CC), Element(eta, CC),
NotElement(2*ell, ZZ),
NotElement(1+ell+ConstI*eta, ZZLessEqual(0)),
NotElement(1+ell-ConstI*eta, ZZLessEqual(0)),
NotElement(-ell+ConstI*eta, ZZLessEqual(0)),
NotElement(-ell-ConstI*eta, ZZLessEqual(0)), Element(z, SetMinus(CC, Set(0))))))
# Normalization functions
make_entry(ID("4a4739"),
Formula(Equal(CoulombC(ell, eta), (2**ell / Gamma(2*ell+2)) * Exp((LogGamma(1+ell+ConstI*eta) + LogGamma(1+ell-ConstI*eta) - Pi*eta)/2))),
Variables(ell, eta),
Assumptions(And(Element(ell, CC), Element(eta, CC), coulomb_param_condition)))
make_entry(ID("ed2bf6"),
Formula(Equal(CoulombSigma(ell, eta), (LogGamma(1+ell+ConstI*eta) - LogGamma(1+ell-ConstI*eta))/(2*ConstI))),
Variables(ell, eta),
Assumptions(And(Element(ell, CC), Element(eta, CC), coulomb_param_condition)))
# Derivatives
make_entry(ID("a51a4b"),
Formula(Equal(ComplexDerivative(CoulombF(ell, eta, z), For(z, z, 1)),
((ell+1)/z + eta/(ell+1)) * CoulombF(ell,eta,z) - ((Sqrt(1+ell+ConstI*eta)*Sqrt(1+ell-ConstI*eta))/(ell+1)) * CoulombF(ell+1,eta,z))),
Variables(ell, eta, z),
Assumptions(And(Element(ell, CC), NotEqual(ell, -1), Element(eta, CC), coulomb_param_condition, Element(z, SetMinus(CC, OpenClosedInterval(-Infinity, 0))))))
make_entry(ID("2fec14"),
Formula(Equal(ComplexDerivative(CoulombG(ell, eta, z), For(z, z, 1)),
((ell+1)/z + eta/(ell+1)) * CoulombG(ell,eta,z) - ((Sqrt(1+ell+ConstI*eta)*Sqrt(1+ell-ConstI*eta))/(ell+1)) * CoulombG(ell+1,eta,z))),
Variables(ell, eta, z),
Assumptions(And(Element(ell, CC), NotEqual(ell, -1), Element(eta, CC), coulomb_param_condition, Element(z, SetMinus(CC, OpenClosedInterval(-Infinity, 0))))))
make_entry(ID("07a654"),
Formula(Where(Equal(ComplexDerivative(f(z), For(z, z, 2)),
((2*eta)/z + (ell*(ell+1))/z**2 - 1) * f(z)), Equal(f(z), C1 * CoulombF(ell,eta,z) + C2 * CoulombG(ell,eta,z)))),
Variables(ell, eta, C1, C2, z),
Assumptions(And(Element(C1, CC), Element(C2, CC), Element(ell, CC), Element(eta, CC), coulomb_param_condition, Element(z, SetMinus(CC, OpenClosedInterval(-Infinity, 0))))))
make_entry(ID("faa118"),
Formula(Where(Equal(ComplexDerivative(f(z), For(z, z, 3)),
((2*eta)/z + (ell*(ell+1))/z**2 - 1) * ComplexDerivative(f(z), For(z, z, 1))
- 2*(eta/z**2 + (ell*(ell+1))/z**3) * f(z)), Equal(f(z), C1 * CoulombF(ell,eta,z) + C2 * CoulombG(ell,eta,z)))),
Variables(ell, eta, C1, C2, z),
Assumptions(And(Element(C1, CC), Element(C2, CC), Element(ell, CC), Element(eta, CC), coulomb_param_condition, Element(z, SetMinus(CC, OpenClosedInterval(-Infinity, 0))))))
make_entry(ID("eca10b"),
Formula(Where(Equal(
ComplexDerivative(f(z), For(z, z, r+4)) / Factorial(r+4),
(-1/(z**2 * (r**2+7*r+12))) *
(2*(r**2+5*r+6)*z*(ComplexDerivative(f(z), For(z, z, r+3)) / Factorial(r+3)) +
(r**2+3*r + z**2 - 2*z*eta - ell*(ell+1) + 2)*(ComplexDerivative(f(z), For(z, z, r+2)) / Factorial(r+2)) +
2*(z-eta) * (ComplexDerivative(f(z), For(z, z, r+1)) / Factorial(r+1)) +
(ComplexDerivative(f(z), For(z, z, r)) / Factorial(r)))),
Equal(f(z), C1 * CoulombF(ell,eta,z) + C2 * CoulombG(ell,eta,z)))),
Variables(r, ell, eta, C1, C2, z),
Assumptions(And(Element(r, ZZGreaterEqual(0)),
Element(C1, CC), Element(C2, CC), Element(ell, CC), Element(eta, CC), coulomb_param_condition, Element(z, SetMinus(CC, OpenClosedInterval(-Infinity, 0))))))
# Hypergeometric representations
make_entry(ID("d280c5"),
Formula(Equal(CoulombF(ell, eta, z), CoulombC(ell,eta) * z**(ell+1) * Exp(omega*ConstI*z) * Hypergeometric1F1(1+ell+omega*ConstI*eta, 2*ell+2, -(2*omega*ConstI*z)))),
Variables(omega, ell, eta, z),
Assumptions(And(Element(omega, Set(-1,1)), Element(ell, CC), Element(eta, CC), coulomb_param_condition, NotElement(2*ell+2, ZZLessEqual(0)), Element(z, SetMinus(CC, Set(0))))))
make_entry(ID("2a2f18"),
Formula(Equal(CoulombF(ell, eta, z), 2**ell * Exp((LogGamma(1+ell+ConstI*eta) + LogGamma(1+ell-ConstI*eta) - Pi*eta)/2) * z**(ell+1) * Exp(omega*ConstI*z) * Hypergeometric1F1Regularized(1+ell+omega*ConstI*eta, 2*ell+2, -(2*omega*ConstI*z)))),
Variables(omega, ell, eta, z),
Assumptions(And(Element(omega, Set(-1,1)), Element(ell, CC), Element(eta, CC), coulomb_param_condition, Element(z, SetMinus(CC, Set(0))))))
_U1 = HypergeometricUStar(u, 2*ell+2, -(2*ConstI*z))
_U2 = HypergeometricUStar(v, 2*ell+2, 2*ConstI*z)
make_entry(ID("1976e1"),
Formula(Equal(CoulombF(ell, eta, z), Where(
2**ell * z**(ell+1) * Exp((LogGamma(u)+LogGamma(v)-Pi*eta)/2) *
(
((Exp(ConstI*z) * _U1) / ((2*ConstI*z)**u * Gamma(v))) +
((Exp(-ConstI*z) * _U2) / ((-(2*ConstI*z))**v * Gamma(u)))
),
Equal(u, 1+ell+ConstI*eta), Equal(v, 1+ell-ConstI*eta)))),
Variables(ell, eta, z),
Assumptions(And(Element(ell, CC), Element(eta, CC), coulomb_param_condition, Element(z, SetMinus(CC, Set(0))))))
_H1 = (2*z)**(-(ConstI*eta)) * Exp(ConstI*(z-ell*Pi/2 + CoulombSigma(ell,eta))) * HypergeometricUStar(1+ell+ConstI*eta, 2*ell+2, -(2*ConstI*z))
_H2 = (2*z)**((ConstI*eta)) * Exp(-(ConstI*(z-ell*Pi/2 + CoulombSigma(ell,eta)))) * HypergeometricUStar(1+ell-ConstI*eta, 2*ell+2, (2*ConstI*z))
make_entry(ID("e2efbf"),
Formula(Equal(CoulombG(ell,eta,z), Div(1,2) * (_H1 + _H2))),
Variables(ell, eta, z),
Assumptions(And(Element(ell, CC), Element(eta, CC), coulomb_param_condition, Element(z, SetMinus(CC, Set(0))),
Greater(Re(z), 0))))
make_entry(ID("8027e8"),
Formula(Equal(CoulombG(ell,eta,z), _H1 - ConstI*CoulombF(ell,eta,z))),
Variables(ell, eta, z),
Assumptions(And(Element(ell, CC), Element(eta, CC), coulomb_param_condition, Element(z, SetMinus(CC, Set(0))),
Or(GreaterEqual(Im(z), 0), Greater(Re(z), 0)))))
make_entry(ID("69e5fb"),
Formula(Equal(CoulombG(ell,eta,z), _H2 + ConstI*CoulombF(ell,eta,z))),
Variables(ell, eta, z),
Assumptions(And(Element(ell, CC), Element(eta, CC), coulomb_param_condition, Element(z, SetMinus(CC, Set(0))),
Or(Less(Im(z), 0), GreaterEqual(Re(z), 0)))))
make_entry(ID("bcdfc6"),
Formula(Equal(CoulombH(1,ell,eta,z), _H1)),
Variables(ell, eta, z),
Assumptions(And(Element(ell, CC), Element(eta, CC), coulomb_param_condition, Element(z, SetMinus(CC, Set(0))),
Or(GreaterEqual(Im(z), 0), Greater(Re(z), 0)))))
make_entry(ID("f0414a"),
Formula(Equal(CoulombH(1,ell,eta,z), _H2 + 2*ConstI*CoulombF(ell,eta,z))),
Variables(ell, eta, z),
Assumptions(And(Element(ell, CC), Element(eta, CC), coulomb_param_condition, Element(z, SetMinus(CC, Set(0))),
Or(Less(Im(z), 0), GreaterEqual(Re(z), 0)))))
make_entry(ID("781eae"),
Formula(Equal(CoulombH(-1,ell,eta,z), _H2)),
Variables(ell, eta, z),
Assumptions(And(Element(ell, CC), Element(eta, CC), coulomb_param_condition, Element(z, SetMinus(CC, Set(0))),
Or(Less(Im(z), 0), GreaterEqual(Re(z), 0)))))
make_entry(ID("0cc301"),
Formula(Equal(CoulombH(-1,ell,eta,z), _H1 - 2*ConstI*CoulombF(ell,eta,z))),
Variables(ell, eta, z),
Assumptions(And(Element(ell, CC), Element(eta, CC), coulomb_param_condition, Element(z, SetMinus(CC, Set(0))),
Or(GreaterEqual(Im(z), 0), Greater(Re(z), 0)))))
|
2e963e3f47db2397bd5dd996a772383d5cc982d3
|
5770a3fc8bd224d926d4aff5b7d8f1863f145cab
|
/quarkchain/cluster/neighbor.py
|
2a20d143f18743dff40904df434900f6f0232cca
|
[
"MIT"
] |
permissive
|
QuarkChain/pyquarkchain
|
d06a59d630fd0c4a07e1c10548ba044329da95ba
|
2068153c9386a1eacb5eccb8cf93d98f87537203
|
refs/heads/master
| 2023-02-27T14:16:07.419575
| 2022-04-18T20:35:59
| 2022-04-18T20:35:59
| 143,354,339
| 253
| 133
|
MIT
| 2023-02-07T21:54:01
| 2018-08-02T23:28:47
|
Python
|
UTF-8
|
Python
| false
| false
| 595
|
py
|
neighbor.py
|
from quarkchain.core import Branch
from quarkchain.utils import is_p2, check
def is_neighbor(b1: Branch, b2: Branch, shard_size: int):
"""A naive algorithm to decide neighbor relationship
TODO: a better algorithm, because the current one ensures 32 neighbors ONLY when there are 2^32 shards
"""
if shard_size <= 32:
return True
if b1.get_chain_id() == b2.get_chain_id():
return is_p2(abs(b1.get_shard_id() - b2.get_shard_id()))
if b1.get_shard_id() == b2.get_shard_id():
return is_p2(abs(b1.get_chain_id() - b2.get_chain_id()))
return False
|
f31a60067975aec1aa405af50e1efb8b42db903c
|
c9b0174cc2e2222571223b0da4730a785437ac42
|
/modulos/var_es.py
|
71a26dc371255768579a7b98697dd94cd8ad3d79
|
[
"CC0-1.0"
] |
permissive
|
Zian25/UniTools-Termux
|
5fedfdad7d4075d129e5adfa2144c1d85dcf2175
|
774bd1a894bfc0eddf378b318f361609b562b886
|
refs/heads/master
| 2023-08-31T17:01:42.127681
| 2023-08-11T12:01:03
| 2023-08-11T12:01:03
| 188,270,589
| 205
| 20
|
MIT
| 2022-05-12T20:24:42
| 2019-05-23T16:31:08
|
Python
|
UTF-8
|
Python
| false
| false
| 1,209
|
py
|
var_es.py
|
import sys
import random
from Zawiencom import *
from osint import *
from dos import *
from phishing import *
from exploit import *
from bruteforce import *
from autoinstalador import *
def pedido_7_es():
os.system("clear")
print ("En breve...\n")
print("¿Qué será??")
print("Un terminal con diversas funciones Ej: Exploits/Scanners, webscanners. ¡Terminal todo en uno para los pentesters!\n")
print("¿Cuándo?")
print("Hasta ahora no hay una fecha específica, vendrá en la versión 1.2!\n")
input("Apretar algo para volver: ")
restart_program()
def pedido_9_es():
print ("Comprobando...")
att()
restart_program()
dicas_es_init = ["[Consejos] Nuevas actualizaciones están llegando", "[Consejos] Los errores encontrados en el programa pueden ser reportados en github!"
,"[Consejos] No te olvides de comprobar las actualizaciones!", "[Consejos] Puedes sugerir una función en la pestaña 'Discusión' de github"]
dicas_menu1_es = ["[Consejos] La recopilación de información es esencial para el éxito de un ataque!", "[Consejos] Nmap es un buen comienzo!", "[Consejos] Camufló su IP?"]
dicas_menu2_es = ["[Consejos] Se recomienda un buen Internet"]
|
321fdf5edaa288cbfbd1f7f074a69351d2f6b5c4
|
be815aacbd7b06ac0ce3f412831639aa0297b988
|
/workflows/pipe-common/pipeline/hpc/logger.py
|
670a9cee3951dafcfbd565a49bef31bc27db6dd9
|
[
"Apache-2.0"
] |
permissive
|
epam/cloud-pipeline
|
8a861dae60d0f86089ff55e2f278e8593fc5e112
|
570dd898e96de931b96e584c86e72296b0e40607
|
refs/heads/develop
| 2023-08-30T08:03:18.672866
| 2023-08-29T17:07:13
| 2023-08-29T17:07:13
| 174,065,041
| 155
| 73
|
Apache-2.0
| 2023-09-14T13:36:36
| 2019-03-06T03:34:40
|
Java
|
UTF-8
|
Python
| false
| false
| 1,129
|
py
|
logger.py
|
# Copyright 2017-2023 EPAM Systems, Inc. (https://www.epam.com/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pipeline.log.logger import PrintLogger
class Logger:
inner = PrintLogger()
@staticmethod
def info(message, crucial=False, trace=False):
if crucial:
Logger.inner.info(message, trace=trace)
else:
Logger.inner.debug(message, trace=trace)
@staticmethod
def warn(message, crucial=False, trace=False):
if crucial:
Logger.inner.warning(message, trace=trace)
else:
Logger.inner.debug(message, trace=trace)
|
25b6c59c428e78bd6a6998b6323a86efad44ddda
|
cf9f0e04f0bc37defce1b869e15a1198f6534129
|
/electrum_ltc/lnurl.py
|
b610ea698fc6575566eb3ff02fcc93746a4b994f
|
[
"MIT"
] |
permissive
|
pooler/electrum-ltc
|
9aa496328c61cef017adb53b9df4d119088ded68
|
f9cbfaca5f8b2ab3eef89758bf78b7e4c2c554a0
|
refs/heads/master
| 2022-11-13T05:31:20.393685
| 2022-11-05T19:18:03
| 2022-11-05T19:37:26
| 18,324,571
| 224
| 162
|
MIT
| 2021-12-09T10:06:33
| 2014-04-01T09:39:17
|
Python
|
UTF-8
|
Python
| false
| false
| 3,991
|
py
|
lnurl.py
|
"""Module for lnurl-related functionality."""
# https://github.com/sipa/bech32/tree/master/ref/python
# https://github.com/lnbits/lnurl
import asyncio
import json
from typing import Callable, Optional, NamedTuple, Any, TYPE_CHECKING
import re
import aiohttp.client_exceptions
from aiohttp import ClientResponse
from electrum_ltc.segwit_addr import bech32_decode, Encoding, convertbits
from electrum_ltc.lnaddr import LnDecodeException
from electrum_ltc.network import Network
if TYPE_CHECKING:
from collections.abc import Coroutine
class LNURLError(Exception):
pass
def decode_lnurl(lnurl: str) -> str:
"""Converts bech32 encoded lnurl to url."""
decoded_bech32 = bech32_decode(
lnurl, ignore_long_length=True
)
hrp = decoded_bech32.hrp
data = decoded_bech32.data
if decoded_bech32.encoding is None:
raise LnDecodeException("Bad bech32 checksum")
if decoded_bech32.encoding != Encoding.BECH32:
raise LnDecodeException("Bad bech32 encoding: must be using vanilla BECH32")
if not hrp.startswith("lnurl"):
raise LnDecodeException("Does not start with lnurl")
data = convertbits(data, 5, 8, False)
url = bytes(data).decode("utf-8")
return url
class LNURL6Data(NamedTuple):
callback_url: str
max_sendable_sat: int
min_sendable_sat: int
metadata_plaintext: str
comment_allowed: int
#tag: str = "payRequest"
async def _request_lnurl(url: str) -> dict:
"""Requests payment data from a lnurl."""
try:
response = await Network.async_send_http_on_proxy("get", url, timeout=10)
response = json.loads(response)
except asyncio.TimeoutError as e:
raise LNURLError("Server did not reply in time.") from e
except aiohttp.client_exceptions.ClientError as e:
raise LNURLError(f"Client error: {e}") from e
except json.JSONDecodeError:
raise LNURLError(f"Invalid response from server")
# TODO: handling of specific client errors
if "metadata" in response:
response["metadata"] = json.loads(response["metadata"])
status = response.get("status")
if status and status == "ERROR":
raise LNURLError(f"LNURL request encountered an error: {response['reason']}")
return response
async def request_lnurl(url: str) -> LNURL6Data:
lnurl_dict = await _request_lnurl(url)
tag = lnurl_dict.get('tag')
if tag != 'payRequest': # only LNURL6 is handled atm
raise LNURLError(f"Unknown subtype of lnurl. tag={tag}")
metadata = lnurl_dict.get('metadata')
metadata_plaintext = ""
for m in metadata:
if m[0] == 'text/plain':
metadata_plaintext = str(m[1])
data = LNURL6Data(
callback_url=lnurl_dict['callback'],
max_sendable_sat=int(lnurl_dict['maxSendable']) // 1000,
min_sendable_sat=int(lnurl_dict['minSendable']) // 1000,
metadata_plaintext=metadata_plaintext,
comment_allowed=int(lnurl_dict['commentAllowed']) if 'commentAllowed' in lnurl_dict else 0
)
return data
async def callback_lnurl(url: str, params: dict) -> dict:
"""Requests an invoice from a lnurl supporting server."""
try:
response = await Network.async_send_http_on_proxy("get", url, params=params)
except aiohttp.client_exceptions.ClientError as e:
raise LNURLError(f"Client error: {e}") from e
# TODO: handling of specific errors
response = json.loads(response)
status = response.get("status")
if status and status == "ERROR":
raise LNURLError(f"LNURL request encountered an error: {response['reason']}")
return response
def lightning_address_to_url(address: str) -> Optional[str]:
"""Converts an email-type lightning address to a decoded lnurl.
see https://github.com/fiatjaf/lnurl-rfc/blob/luds/16.md
"""
if re.match(r"[^@]+@[^@]+\.[^@]+", address):
username, domain = address.split("@")
return f"https://{domain}/.well-known/lnurlp/{username}"
|
48c7c360bf2174af233e308692395791e943568d
|
3a24f63c8742560993b5465b26339e7c0ed05a27
|
/crates/ruff/resources/test/fixtures/flake8_use_pathlib/py_path_2.py
|
fc25529fb2e9089564110748636192a2230f2730
|
[
"BSD-3-Clause",
"0BSD",
"LicenseRef-scancode-free-unknown",
"GPL-1.0-or-later",
"MIT",
"Apache-2.0"
] |
permissive
|
astral-sh/ruff
|
8f1de11263474c6293454b02c728df2f113801db
|
82410524d9612f11387c2675a03869d489bb97ef
|
refs/heads/main
| 2023-08-02T23:20:34.351174
| 2023-08-02T21:32:43
| 2023-08-02T21:32:43
| 523,043,277
| 2,264
| 122
|
MIT
| 2023-09-14T20:08:59
| 2022-08-09T17:17:44
|
Rust
|
UTF-8
|
Python
| false
| false
| 52
|
py
|
py_path_2.py
|
from py.path import local as path
p = path("/foo")
|
8a326c287c6512ea9338c90a2c1f4c484da3032d
|
17b4b0b4714f3a784466d0209a40b8880dfb5fb0
|
/owtf/scripts/verify_nikto.py
|
b0442f83d317590c312b619c96d6e2b2d81bc646
|
[
"BSD-3-Clause"
] |
permissive
|
owtf/owtf
|
8b72d43ee01272d100408200a898d76deeb1569f
|
240825989a3850241b6b5dba6bcae1042a5dc384
|
refs/heads/develop
| 2023-08-31T20:25:57.364620
| 2023-08-30T23:02:24
| 2023-08-30T23:02:24
| 3,215,654
| 1,683
| 534
|
BSD-3-Clause
| 2023-09-01T17:23:00
| 2012-01-19T06:42:44
|
Python
|
UTF-8
|
Python
| false
| false
| 2,004
|
py
|
verify_nikto.py
|
#!/usr/bin/env python
"""
Tries to put links around nikto findings to save a bit of time in manual verification
This files actually converts target urls & OSVDB codes into clickable links
"""
import re
import sys
import tornado.template
from owtf.utils.strings import to_str
if len(sys.argv) < 3:
print("Usage: %s <nikto.txt file> <top_url>" % sys.argv[0])
exit(-1)
osvdb_regexp = re.compile("\+ (OSVDB-[0-9]+):.*")
url_regexp = re.compile("(/[^ :]*):")
link_list = []
top_url = sys.argv[2]
origin = sys.argv[1] # The original nikto output file
destination = "Nikto_Verify.html"
output_template = tornado.template.Template(
"""
<html>
<title>Nikto Verification</title>
<body>
{% autoescape None %}
<p>{{ content }}</p>
</body>
</html>
"""
)
link_template = tornado.template.Template("<a href='{{ link }}' target='_blank'>{{ text }}</a>")
# Replace the text with links where needed
nikto_output = open(origin, "r").read()
for match in url_regexp.findall(nikto_output):
url = top_url + match
if url not in link_list:
link_list.append(url)
nikto_output = nikto_output.replace(match, link_template.generate(link=url, text=match).decode("utf-8"))
for match in osvdb_regexp.findall(nikto_output):
osvdb_id = match.split("-")[1]
osvdb_url = "http://osvdb.org/show/osvdb/" + osvdb_id
nikto_output = nikto_output.replace(match, link_template.generate(link=osvdb_url, text=match).decode("utf-8"))
if osvdb_url not in link_list:
link_list.append(osvdb_url)
# Print here, since the links are constructed by here. Why printing? So that it is visible as command output
print(nikto_output)
# Replace newlines with breaks before writing to html file
nikto_output = nikto_output.replace("\n", "</br>")
# Generate html output directly to stdout, looks better in the report
with open(destination, "w") as file:
out = output_template.generate(content=nikto_output)
file.write(to_str(out))
|
1111ea73c6e5bd4b661e75513e63176813bc44ec
|
e324c6b59764a9739ae246b452af0a7198408569
|
/approvaltests/command.py
|
24bc75a4d81a4d960c6c4a437d83e166422a1d44
|
[
"Apache-2.0"
] |
permissive
|
approvals/ApprovalTests.Python
|
6a124ccd51f04835bd4e7ff6f299fb850ac4a75d
|
f9990385a7d898d7c42458ef99fac6c5636cfff5
|
refs/heads/main
| 2023-08-30T21:28:32.381629
| 2023-08-28T19:48:14
| 2023-08-28T19:48:14
| 12,657,590
| 124
| 57
|
Apache-2.0
| 2023-09-10T16:34:02
| 2013-09-07T00:48:46
|
Python
|
UTF-8
|
Python
| false
| false
| 643
|
py
|
command.py
|
import os
from typing import Optional
class Command:
def __init__(self, cmd: str) -> None:
self.command = cmd
@staticmethod
def executable(cmd: str) -> bool:
return os.path.isfile(cmd) and os.access(cmd, os.X_OK)
def locate(self) -> Optional[str]:
path, _ = os.path.split(self.command)
if path and self.executable(self.command):
return self.command
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe = os.path.join(path, self.command)
if self.executable(exe):
return exe
return None
|
712c866fd2fa3647eb916b85dd5e2d294382d7ea
|
ea1845e05c86a85d3e51d6765a0efdd5003db553
|
/flags/tests/test_conditions_validators.py
|
3e0db48d1ee4c27a7997f374e5b655c1a8d8fb1b
|
[
"CC0-1.0"
] |
permissive
|
cfpb/django-flags
|
806a693ddc48ceacf3f3381faab278b965df837b
|
9cfbb6c6a3afd956b740e7d4c74ed589e6a38f5c
|
refs/heads/main
| 2023-08-09T11:30:43.375403
| 2023-07-27T13:00:38
| 2023-07-27T13:00:38
| 137,590,135
| 197
| 31
|
CC0-1.0
| 2023-07-27T13:00:39
| 2018-06-16T15:23:55
|
Python
|
UTF-8
|
Python
| false
| false
| 3,422
|
py
|
test_conditions_validators.py
|
import django
from django.contrib.auth import get_user_model
from django.core.exceptions import ValidationError
from django.test import TestCase, override_settings
from flags.conditions.validators import (
validate_boolean,
validate_date,
validate_parameter,
validate_path_re,
validate_user,
)
class ValidateParameterTestCase(TestCase):
def test_invalid_parameter_strings(self):
with self.assertRaises(ValidationError):
validate_parameter("%20flag")
def test_valid_parameter_strings(self):
validate_parameter("myflag")
validate_parameter("my-flag")
validate_parameter("my_flag")
validate_parameter("my_flag=enable")
validate_parameter("myflág")
validate_parameter("myflág0")
class ValidatePathTestCase(TestCase):
def test_invalid_path_regexs(self):
with self.assertRaises(ValidationError):
validate_path_re("*foo/my/path")
def test_valid_path_regexs(self):
validate_path_re("/my/path")
validate_path_re("/my/path/")
validate_path_re("my/path/")
validate_path_re(r"^/my/(path)?$")
class ValidateBooleanTestCase(TestCase):
def test_invalid_boolean_strings(self):
"""ValidationError should be raised for invalid boolean values"""
with self.assertRaises(ValidationError):
validate_boolean("Flase")
with self.assertRaises(ValidationError):
validate_boolean("Ture")
with self.assertRaises(ValidationError):
validate_boolean(" True")
with self.assertRaises(ValidationError):
validate_boolean("True ")
with self.assertRaises(ValidationError):
validate_boolean(["foo"])
def test_valid_boolean_strings(self):
"""Valid boolean values should not raise ValidationError"""
validate_boolean(True)
validate_boolean(False)
validate_boolean(1)
validate_boolean(0)
validate_boolean("true")
class ValidateUserTestCase(TestCase):
def test_invalid_user(self):
with self.assertRaises(ValidationError):
validate_user("nottestuser")
def test_valid_user(self):
User = get_user_model()
User.objects.create_user(username="testuser", email="test@user")
validate_user("testuser")
@override_settings(AUTH_USER_MODEL="testapp.MyUserModel")
def test_custom_user_valid(self):
User = get_user_model()
u = User(identifier="customuser")
u.save()
validate_user("customuser")
@override_settings(AUTH_USER_MODEL="testapp.MyUserModel")
def test_custom_user_invalid(self):
with self.assertRaises(ValidationError):
validate_user("nottestuser")
class ValidateDateTestCase(TestCase):
def test_invalid_date_strings(self):
with self.assertRaises(ValidationError):
validate_date("tomorrowish")
# Django 4.0 relies on Python 3.7+'s `datetime.fromisoformat()`, which
# handles this where the old regex did not. This is now valid when on
# Django 4.0+. See https://github.com/django/django/pull/14582
if django.VERSION < (4, 0):
with self.assertRaises(ValidationError):
validate_date("2020-04-01")
def test_valid_date_strings(self):
validate_date("2020-04-01T12:00")
validate_date("2020-04-01T12:00+04:00")
|
99852e421c5a98db6f9ac69a3d20f6976b28c2d0
|
1095cfe2e29ddf4e4c5e12d713bd12f45c9b6f7d
|
/src/python/m5/simulate.py
|
587bfa0202309be1b66e0bb8e49b7fc58ce1bbb2
|
[
"BSD-3-Clause",
"LicenseRef-scancode-proprietary-license",
"LGPL-2.0-or-later",
"MIT"
] |
permissive
|
gem5/gem5
|
9ec715ae036c2e08807b5919f114e1d38d189bce
|
48a40cf2f5182a82de360b7efa497d82e06b1631
|
refs/heads/stable
| 2023-09-03T15:56:25.819189
| 2023-08-31T05:53:03
| 2023-08-31T05:53:03
| 27,425,638
| 1,185
| 1,177
|
BSD-3-Clause
| 2023-09-14T08:29:31
| 2014-12-02T09:46:00
|
C++
|
UTF-8
|
Python
| false
| false
| 15,862
|
py
|
simulate.py
|
# Copyright (c) 2012, 2019, 2021 Arm Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2005 The Regents of The University of Michigan
# Copyright (c) 2010 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import atexit
import os
import sys
# import the wrapped C++ functions
import _m5.drain
import _m5.core
from _m5.stats import updateEvents as updateStatEvents
from . import stats
from . import SimObject
from . import ticks
from . import objects
from . import params
from m5.util.dot_writer import do_dot, do_dvfs_dot
from m5.util.dot_writer_ruby import do_ruby_dot
from .util import fatal, warn
from .util import attrdict
# define a MaxTick parameter, unsigned 64 bit
MaxTick = 2**64 - 1
_drain_manager = _m5.drain.DrainManager.instance()
_instantiated = False # Has m5.instantiate() been called?
# The final call to instantiate the SimObject graph and initialize the
# system.
def instantiate(ckpt_dir=None):
global _instantiated
from m5 import options
if _instantiated:
fatal("m5.instantiate() called twice.")
_instantiated = True
root = objects.Root.getInstance()
if not root:
fatal("Need to instantiate Root() before calling instantiate()")
# we need to fix the global frequency
ticks.fixGlobalFrequency()
# Make sure SimObject-valued params are in the configuration
# hierarchy so we catch them with future descendants() walks
for obj in root.descendants():
obj.adoptOrphanParams()
# Unproxy in sorted order for determinism
for obj in root.descendants():
obj.unproxyParams()
if options.dump_config:
ini_file = open(os.path.join(options.outdir, options.dump_config), "w")
# Print ini sections in sorted order for easier diffing
for obj in sorted(root.descendants(), key=lambda o: o.path()):
obj.print_ini(ini_file)
ini_file.close()
if options.json_config:
try:
import json
json_file = open(
os.path.join(options.outdir, options.json_config), "w"
)
d = root.get_config_as_dict()
json.dump(d, json_file, indent=4)
json_file.close()
except ImportError:
pass
if options.dot_config:
do_dot(root, options.outdir, options.dot_config)
do_ruby_dot(root, options.outdir, options.dot_config)
# Initialize the global statistics
stats.initSimStats()
# Create the C++ sim objects and connect ports
for obj in root.descendants():
obj.createCCObject()
for obj in root.descendants():
obj.connectPorts()
# Do a second pass to finish initializing the sim objects
for obj in root.descendants():
obj.init()
# Do a third pass to initialize statistics
stats._bindStatHierarchy(root)
root.regStats()
# Do a fourth pass to initialize probe points
for obj in root.descendants():
obj.regProbePoints()
# Do a fifth pass to connect probe listeners
for obj in root.descendants():
obj.regProbeListeners()
# We want to generate the DVFS diagram for the system. This can only be
# done once all of the CPP objects have been created and initialised so
# that we are able to figure out which object belongs to which domain.
if options.dot_dvfs_config:
do_dvfs_dot(root, options.outdir, options.dot_dvfs_config)
# We're done registering statistics. Enable the stats package now.
stats.enable()
# Restore checkpoint (if any)
if ckpt_dir:
_drain_manager.preCheckpointRestore()
ckpt = _m5.core.getCheckpoint(ckpt_dir)
for obj in root.descendants():
obj.loadState(ckpt)
else:
for obj in root.descendants():
obj.initState()
# Check to see if any of the stat events are in the past after resuming from
# a checkpoint, If so, this call will shift them to be at a valid time.
updateStatEvents()
need_startup = True
def simulate(*args, **kwargs):
global need_startup
global _instantiated
if not _instantiated:
fatal("m5.instantiate() must be called before m5.simulate().")
if need_startup:
root = objects.Root.getInstance()
for obj in root.descendants():
obj.startup()
need_startup = False
# Python exit handlers happen in reverse order.
# We want to dump stats last.
atexit.register(stats.dump)
# register our C++ exit callback function with Python
atexit.register(_m5.core.doExitCleanup)
# Reset to put the stats in a consistent state.
stats.reset()
if _drain_manager.isDrained():
_drain_manager.resume()
# We flush stdout and stderr before and after the simulation to ensure the
# output arrive in order.
sys.stdout.flush()
sys.stderr.flush()
sim_out = _m5.event.simulate(*args, **kwargs)
sys.stdout.flush()
sys.stderr.flush()
return sim_out
def setMaxTick(tick: int) -> None:
"""Sets the maximum tick the simulation may run to. When when using the
stdlib simulator module, reaching this max tick triggers a
`ExitEvent.MAX_TICK` exit event.
:param tick: the maximum tick (absolute, not relative to the current tick).
"""
if tick <= curTick():
warn("Max tick scheduled for the past. This will not be triggered.")
_m5.event.setMaxTick(tick=tick)
def getMaxTick() -> int:
"""Returns the current maximum tick."""
return _m5.event.getMaxTick()
def getTicksUntilMax() -> int:
"""Returns the current number of ticks until the maximum tick."""
return getMaxTick() - curTick()
def scheduleTickExitFromCurrent(
ticks: int, exit_string: str = "Tick exit reached"
) -> None:
"""Schedules a tick exit event from the current tick. I.e., if ticks == 100
then an exit event will be scheduled at tick `curTick() + 100`.
The default `exit_string` value is used by the stdlib Simulator module to
declare this exit event as `ExitEvent.SCHEDULED_TICK`.
:param ticks: The simulation ticks, from `curTick()` to schedule the exit
event.
:param exit_string: The exit string to return when the exit event is
triggered.
"""
scheduleTickExitAbsolute(tick=ticks + curTick(), exit_string=exit_string)
def scheduleTickExitAbsolute(
tick: int, exit_string: str = "Tick exit reached"
) -> None:
"""Schedules a tick exit event using absolute ticks. I.e., if tick == 100
then an exit event will be scheduled at tick 100.
The default `exit_string` value is used by the stdlib Simulator module to
declare this exit event as `ExitEvent.SCHEDULED_TICK`.
:param tick: The absolute simulation tick to schedule the exit event.
:param exit_string: The exit string to return when the exit event is
triggered.
"""
if tick <= curTick():
warn("Tick exit scheduled for the past. This will not be triggered.")
_m5.event.exitSimLoop(exit_string, 0, tick, 0, False)
def drain():
"""Drain the simulator in preparation of a checkpoint or memory mode
switch.
This operation is a no-op if the simulator is already in the
Drained state.
"""
# Try to drain all objects. Draining might not be completed unless
# all objects return that they are drained on the first call. This
# is because as objects drain they may cause other objects to no
# longer be drained.
def _drain():
# Try to drain the system. The drain is successful if all
# objects are done without simulation. We need to simulate
# more if not.
if _drain_manager.tryDrain():
return True
# WARNING: if a valid exit event occurs while draining, it
# will not get returned to the user script
exit_event = _m5.event.simulate()
while exit_event.getCause() != "Finished drain":
exit_event = simulate()
return False
# Don't try to drain a system that is already drained
is_drained = _drain_manager.isDrained()
while not is_drained:
is_drained = _drain()
assert _drain_manager.isDrained(), "Drain state inconsistent"
def memWriteback(root):
for obj in root.descendants():
obj.memWriteback()
def memInvalidate(root):
for obj in root.descendants():
obj.memInvalidate()
def checkpoint(dir):
root = objects.Root.getInstance()
if not isinstance(root, objects.Root):
raise TypeError("Checkpoint must be called on a root object.")
drain()
memWriteback(root)
print("Writing checkpoint")
_m5.core.serializeAll(dir)
def _changeMemoryMode(system, mode):
if not isinstance(system, (objects.Root, objects.System)):
raise TypeError(
"Parameter of type '%s'. Must be type %s or %s."
% (type(system), objects.Root, objects.System)
)
if system.getMemoryMode() != mode:
system.setMemoryMode(mode)
else:
print("System already in target mode. Memory mode unchanged.")
def switchCpus(system, cpuList, verbose=True):
"""Switch CPUs in a system.
Note: This method may switch the memory mode of the system if that
is required by the CPUs. It may also flush all caches in the
system.
Arguments:
system -- Simulated system.
cpuList -- (old_cpu, new_cpu) tuples
"""
if verbose:
print("switching cpus")
if not isinstance(cpuList, list):
raise RuntimeError("Must pass a list to this function")
for item in cpuList:
if not isinstance(item, tuple) or len(item) != 2:
raise RuntimeError("List must have tuples of (oldCPU,newCPU)")
old_cpus = [old_cpu for old_cpu, new_cpu in cpuList]
new_cpus = [new_cpu for old_cpu, new_cpu in cpuList]
old_cpu_set = set(old_cpus)
memory_mode_name = new_cpus[0].memory_mode()
for old_cpu, new_cpu in cpuList:
if not isinstance(old_cpu, objects.BaseCPU):
raise TypeError(f"{old_cpu} is not of type BaseCPU")
if not isinstance(new_cpu, objects.BaseCPU):
raise TypeError(f"{new_cpu} is not of type BaseCPU")
if new_cpu in old_cpu_set:
raise RuntimeError(
f"New CPU ({old_cpu}) is in the list of old CPUs."
)
if not new_cpu.switchedOut():
raise RuntimeError(f"New CPU ({new_cpu}) is already active.")
if not new_cpu.support_take_over():
raise RuntimeError(
f"New CPU ({old_cpu}) does not support CPU handover."
)
if new_cpu.memory_mode() != memory_mode_name:
raise RuntimeError(
f"{new_cpu} and {new_cpus[0]} require different memory modes."
)
if old_cpu.switchedOut():
raise RuntimeError(f"Old CPU ({new_cpu}) is inactive.")
if not old_cpu.support_take_over():
raise RuntimeError(
f"Old CPU ({old_cpu}) does not support CPU handover."
)
MemoryMode = params.allEnums["MemoryMode"]
try:
memory_mode = MemoryMode(memory_mode_name).getValue()
except KeyError:
raise RuntimeError(f"Invalid memory mode ({memory_mode_name})")
drain()
# Now all of the CPUs are ready to be switched out
for old_cpu, new_cpu in cpuList:
old_cpu.switchOut()
# Change the memory mode if required. We check if this is needed
# to avoid printing a warning if no switch was performed.
if system.getMemoryMode() != memory_mode:
# Flush the memory system if we are switching to a memory mode
# that disables caches. This typically happens when switching to a
# hardware virtualized CPU.
if memory_mode == MemoryMode("atomic_noncaching").getValue():
memWriteback(system)
memInvalidate(system)
_changeMemoryMode(system, memory_mode)
for old_cpu, new_cpu in cpuList:
new_cpu.takeOverFrom(old_cpu)
def notifyFork(root):
for obj in root.descendants():
obj.notifyFork()
fork_count = 0
def fork(simout="%(parent)s.f%(fork_seq)i"):
"""Fork the simulator.
This function forks the simulator. After forking the simulator,
the child process gets its output files redirected to a new output
directory. The default name of the output directory is the same as
the parent with the suffix ".fN" added where N is the fork
sequence number. The name of the output directory can be
overridden using the simout keyword argument.
Output file formatting dictionary:
parent -- Path to the parent process's output directory.
fork_seq -- Fork sequence number.
pid -- PID of the child process.
Keyword Arguments:
simout -- New simulation output directory.
Return Value:
pid of the child process or 0 if running in the child.
"""
from m5 import options
global fork_count
if not _m5.core.listenersDisabled():
raise RuntimeError("Can not fork a simulator with listeners enabled")
drain()
# Terminate helper threads that service parallel event queues.
_m5.event.terminateEventQueueThreads()
try:
pid = os.fork()
except OSError as e:
raise e
if pid == 0:
# In child, notify objects of the fork
root = objects.Root.getInstance()
notifyFork(root)
# Setup a new output directory
parent = options.outdir
options.outdir = simout % {
"parent": parent,
"fork_seq": fork_count,
"pid": os.getpid(),
}
_m5.core.setOutputDir(options.outdir)
else:
fork_count += 1
return pid
from _m5.core import disableAllListeners, listenersDisabled
from _m5.core import listenersLoopbackOnly
from _m5.core import curTick
|
b35a9cc8922725863b825be4028cea2876e746a2
|
2d05050d0ada29f7680b4df20c10bb85b0530e45
|
/python/tvm/autotvm/task/topi_integration.py
|
a4f3636edbbe8c905a30120e15b0422a4c89c9e4
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"Zlib",
"LLVM-exception",
"BSD-2-Clause"
] |
permissive
|
apache/tvm
|
87cb617f9a131fa44e1693303aaddf70e7a4c403
|
d75083cd97ede706338ab413dbc964009456d01b
|
refs/heads/main
| 2023-09-04T11:24:26.263032
| 2023-09-04T07:26:00
| 2023-09-04T07:26:00
| 70,746,484
| 4,575
| 1,903
|
Apache-2.0
| 2023-09-14T19:06:33
| 2016-10-12T22:20:28
|
Python
|
UTF-8
|
Python
| false
| false
| 8,590
|
py
|
topi_integration.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-variable,invalid-name,unused-argument
"""
Decorators for registering tunable templates to TOPI.
These decorators can make your simple implementation be able to use different configurations
for different workloads.
Here we directly use all arguments to the TOPI call as "workload", so make sure all the arguments
(except tvm.te.Tensor) in you calls are hashable. For tvm.te.Tensor,
we will serialize it to a hashable tuple.
See tvm/topi/python/topi/arm_cpu/depthwise_conv2d.py for example usage.
"""
import functools
import tvm.te._ffi_api
from tvm.target import Target
from tvm.te import tensor
from .task import (
args_to_workload,
serialize_args,
DispatchContext,
_register_task_compute,
_register_task_schedule,
)
# Task extractor for relay program
class TaskExtractEnv:
"""Global environment for extracting tuning tasks from graph"""
current = None
registered = None
def __init__(self, allow_duplicate=False):
self.allow_duplicate = allow_duplicate
self.task_collection = []
self.wanted_relay_ops = None
self.modified_funcs = []
self.tracing = False
def __enter__(self):
self.task_collection = []
self.tracing = True
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.tracing = False
def reset(self, wanted_relay_ops=None):
"""Reset task collections
Parameters
----------
wanted_relay_ops: List of tvm.ir.Op
The relay ops to be extracted
"""
self.task_collection = []
self.wanted_relay_ops = wanted_relay_ops
def add_task(self, task_name, args):
"""Add AutoTVM task
Parameters
----------
task_name: str
AutoTVM task name.
args: tuple
Arguments to the TOPI function.
"""
key = (task_name, serialize_args(args))
if self.allow_duplicate or key not in self.task_collection:
self.task_collection.append(key)
def get_tasks(self):
"""Get collected tasks
Returns
-------
tasks: List of tuple(name, args)
A list of tasks extracted from the graph
"""
return self.task_collection
@staticmethod
def get(allow_duplicate=False):
"""Get the single instance of TaskExtractEnv
Parameters
----------
allow_duplicate : boolean
Whether to fetch all workloads in the network,
even though some of them are the same. This is
useful for graph tuning.
Returns
-------
env: TaskExtractEnv
The single instance of TaskExtractEnv
"""
if not TaskExtractEnv.current:
TaskExtractEnv.current = TaskExtractEnv(allow_duplicate)
else:
TaskExtractEnv.current.allow_duplicate = allow_duplicate
return TaskExtractEnv.current
def register_topi_compute(task_name, func=None):
"""Register a tunable template for a topi compute function.
The registration will wrap this topi compute to take `cfg` as the first argument,
followed by the original argument list. It uses all its argument as workload and
stores this "workload" to its final ComputeOp, which can be used to reconstruct
"workload" in the following topi_schedule call.
Parameters
----------
task_name: str
The AutoTVM task name
func: None or callable
If it is None, return a decorator.
If is callable, decorate this function.
Returns
-------
decorator: callable
A decorator
Examples
--------
See tvm/topi/python/topi/arm_cpu/depthwise_conv2d.py for example usage.
"""
def _decorate(topi_compute):
@functools.wraps(topi_compute)
@_register_task_compute(task_name)
def wrapper(*args, **kwargs):
"""wrapper function for topi compute"""
assert not kwargs, "Do not support kwargs in template function call"
task_env = TaskExtractEnv.current
if task_env is not None and task_env.tracing:
task_env.add_task(task_name, args)
workload = args_to_workload(args, task_name)
tgt = Target.current()
cfg = DispatchContext.current.query(tgt, workload)
node = topi_compute(cfg, *args)
# attach workload to return op
op = node.op
attrs = {}
for k, v in node.op.attrs.items():
attrs[k] = v
attrs["workload"] = workload
if isinstance(op, tensor.ComputeOp):
op = tvm.te._ffi_api.ComputeOp(op.name, op.tag, attrs, op.axis, op.body)
elif isinstance(op, tensor.ExternOp):
op = tvm.te._ffi_api.ExternOp(
op.name,
op.tag,
attrs,
op.inputs,
op.input_placeholders,
op.output_placeholders,
op.body,
)
else:
raise RuntimeError("Unsupported op type: " + str(type(op)))
if isinstance(node, tensor.Tensor):
return op.output(0)
return [op.output(i) for i in range(len(node))]
return wrapper
if func:
return _decorate(func)
return _decorate
def register_topi_schedule(task_name, func=None):
"""Register a tunable template for a topi schedule function.
The registration will wrap this topi schedule to take `cfg` as the first argument,
followed by the original argument list.
Note that this function will try to find "workload" from all the ComputeOp in the input.
You can attach "workload" to your compute op by using :any:`register_topi_compute`.
The task name has to be the same as that of the corresponding topi compute function.
Parameters
----------
task_name: str
The AutoTVM task name
func: None or callable
If it is None, return a decorator.
If is callable, decorate this function.
Returns
-------
decorator: callable
A decorator
Examples
--------
See tvm/topi/python/topi/arm_cpu/depthwise_conv2d.py for example usage.
"""
def _decorate(topi_schedule):
@functools.wraps(topi_schedule)
@_register_task_schedule(task_name)
def wrapper(outs, *args, **kwargs):
"""wrapper function for topi schedule"""
workload = get_workload(outs, task_name)
if workload is None:
raise RuntimeError(
f"Cannot find TOPI workload {task_name}. "
"Is it registered with `register_topi_compute`?"
)
tgt = Target.current()
cfg = DispatchContext.current.query(tgt, workload)
return topi_schedule(cfg, outs, *args, **kwargs)
return wrapper
if func:
return _decorate(func)
return _decorate
def get_workload(outs, task_name=None):
"""Retrieve the workload from outputs"""
visited = set()
def traverse(tensors):
"""traverse all ops to find attached workload"""
for t in tensors:
op = t.op
if op in visited:
continue
visited.add(op)
wkl = traverse(op.input_tensors)
if wkl is not None:
return wkl
if "workload" in op.attrs:
ret = args_to_workload(op.attrs["workload"])
if task_name is None or ret[0] == task_name:
return ret
return None
outs = [outs] if isinstance(outs, tensor.Tensor) else outs
return traverse(outs)
|
ed5b809ea625a77d9bb3640f3a32ade4c1ca051e
|
b2a0015525eb65d143891c911a30f9f6c30f246a
|
/nms_wrapper.py
|
ca900e8968a28957540a6dfc309243e8052f11ab
|
[
"MIT"
] |
permissive
|
qhgz2013/anime-face-detector
|
65a381848ff89293af422ed16aaa18ebb1c7101a
|
94d75475a17f48c7636345cd316c2eeae242a58e
|
refs/heads/master
| 2022-03-08T09:33:30.513973
| 2022-02-21T11:48:33
| 2022-02-21T11:48:33
| 143,052,594
| 240
| 37
|
MIT
| 2022-02-21T11:48:34
| 2018-07-31T18:31:29
|
Python
|
UTF-8
|
Python
| false
| false
| 770
|
py
|
nms_wrapper.py
|
from enum import Enum
class NMSType(Enum):
PY_NMS = 1
CPU_NMS = 2
GPU_NMS = 3
default_nms_type = NMSType.PY_NMS
class NMSWrapper:
def __init__(self, nms_type=default_nms_type):
assert type(nms_type) == NMSType
if nms_type == NMSType.PY_NMS:
from nms.py_cpu_nms import py_cpu_nms
self._nms = py_cpu_nms
elif nms_type == NMSType.CPU_NMS:
from nms.cpu_nms import cpu_nms
self._nms = cpu_nms
elif nms_type == NMSType.GPU_NMS:
from nms.gpu_nms import gpu_nms
self._nms = gpu_nms
else:
raise ValueError('current nms type is not implemented yet')
def __call__(self, *args, **kwargs):
return self._nms(*args, **kwargs)
|
b101863489d3feff4b43bc4328144990439b86b9
|
8613f9f389578e7c7b9e150a4fa2334fc3e5b49c
|
/tests/test_drive.py
|
9158e5d1f0314bdc50d9d2e4044256af96ca78d4
|
[
"MIT"
] |
permissive
|
picklepete/pyicloud
|
e38ba2f3146c61e27fd137fb7966e368678aaeef
|
332cc9fa767862480c27253233c2cfdf9f2ea0d9
|
refs/heads/master
| 2023-06-25T13:05:18.006587
| 2022-02-17T16:55:05
| 2022-02-17T16:55:05
| 7,379,566
| 2,217
| 478
|
MIT
| 2023-06-18T12:08:56
| 2012-12-30T19:27:15
|
Python
|
UTF-8
|
Python
| false
| false
| 3,265
|
py
|
test_drive.py
|
"""Drive service tests."""
from unittest import TestCase
import pytest
from . import PyiCloudServiceMock
from .const import AUTHENTICATED_USER, VALID_PASSWORD
class DriveServiceTest(TestCase):
"""Drive service tests."""
service = None
def setUp(self):
"""Set up tests."""
self.service = PyiCloudServiceMock(AUTHENTICATED_USER, VALID_PASSWORD)
def test_root(self):
"""Test the root folder."""
drive = self.service.drive
assert drive.name == ""
assert drive.type == "folder"
assert drive.size is None
assert drive.date_changed is None
assert drive.date_modified is None
assert drive.date_last_open is None
assert drive.dir() == ["Keynote", "Numbers", "Pages", "Preview", "pyiCloud"]
def test_folder_app(self):
"""Test the /Preview folder."""
folder = self.service.drive["Preview"]
assert folder.name == "Preview"
assert folder.type == "app_library"
assert folder.size is None
assert folder.date_changed is None
assert folder.date_modified is None
assert folder.date_last_open is None
with pytest.raises(KeyError, match="No items in folder, status: ID_INVALID"):
assert folder.dir()
def test_folder_not_exists(self):
"""Test the /not_exists folder."""
with pytest.raises(KeyError, match="No child named 'not_exists' exists"):
self.service.drive["not_exists"] # pylint: disable=pointless-statement
def test_folder(self):
"""Test the /pyiCloud folder."""
folder = self.service.drive["pyiCloud"]
assert folder.name == "pyiCloud"
assert folder.type == "folder"
assert folder.size is None
assert folder.date_changed is None
assert folder.date_modified is None
assert folder.date_last_open is None
assert folder.dir() == ["Test"]
def test_subfolder(self):
"""Test the /pyiCloud/Test folder."""
folder = self.service.drive["pyiCloud"]["Test"]
assert folder.name == "Test"
assert folder.type == "folder"
assert folder.size is None
assert folder.date_changed is None
assert folder.date_modified is None
assert folder.date_last_open is None
assert folder.dir() == ["Document scanné 2.pdf", "Scanned document 1.pdf"]
def test_subfolder_file(self):
"""Test the /pyiCloud/Test/Scanned document 1.pdf file."""
folder = self.service.drive["pyiCloud"]["Test"]
file_test = folder["Scanned document 1.pdf"]
assert file_test.name == "Scanned document 1.pdf"
assert file_test.type == "file"
assert file_test.size == 21644358
assert str(file_test.date_changed) == "2020-05-03 00:16:17"
assert str(file_test.date_modified) == "2020-05-03 00:15:17"
assert str(file_test.date_last_open) == "2020-05-03 00:24:25"
assert file_test.dir() is None
def test_file_open(self):
"""Test the /pyiCloud/Test/Scanned document 1.pdf file open."""
file_test = self.service.drive["pyiCloud"]["Test"]["Scanned document 1.pdf"]
with file_test.open(stream=True) as response:
assert response.raw
|
14e2feff7e0be239716580c866baf98464daefaf
|
f241df59f8e6c13cab13ec3b5d5d9ade89c419f7
|
/leo/modes/batch.py
|
1058c9a2d75f8058ab8aa996bcff6452eda688e1
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
leo-editor/leo-editor
|
6c6e09c1ae89cb9b1952c9f5b0c3a6c76ae9e625
|
a3f6c3ebda805dc40cd93123948f153a26eccee5
|
refs/heads/devel
| 2023-08-28T08:57:01.365701
| 2023-08-23T10:21:57
| 2023-08-23T10:21:57
| 16,728,437
| 1,671
| 219
|
NOASSERTION
| 2023-09-14T19:39:01
| 2014-02-11T11:14:41
|
Python
|
UTF-8
|
Python
| false
| false
| 8,670
|
py
|
batch.py
|
#@+leo-ver=5-thin
#@+node:ekr.20221129095254.1: * @file ../modes/batch.py
#@@language python
# Leo colorizer control file for batch mode.
# This file is in the public domain.
# Properties for batch mode.
properties = {
"lineComment": "rem",
}
# Attributes dict for batch_main ruleset.
batch_main_attributes_dict = {
"default": "null",
"digit_re": "",
"escape": "",
"highlight_digits": "false",
"ignore_case": "true",
"no_word_sep": "",
}
# Dictionary of attributes dictionaries for batch mode.
attributesDictDict = {
"batch_main": batch_main_attributes_dict,
}
# Keywords dict for batch_main ruleset.
batch_main_keywords_dict = {
"append": "function",
"attrib": "function",
"aux": "keyword2",
"call": "keyword1",
"cd": "keyword1",
"chdir": "keyword1",
"chkdsk": "function",
"choice": "function",
"cls": "keyword1",
"copy": "keyword1",
"debug": "function",
"defined": "keyword2",
"defrag": "function",
"del": "keyword1",
"deltree": "function",
"diskcomp": "function",
"diskcopy": "function",
"do": "keyword2",
"doskey": "function",
"drvspace": "function",
"echo": "keyword1",
"echo.": "keyword1",
"else": "keyword2",
"emm386": "function",
"endlocal": "keyword1",
"errorlevel": "keyword2",
"exist": "keyword2",
"exit": "keyword1",
"expand": "function",
"fastopen": "function",
"fc": "function",
"fdisk": "function",
"find": "function",
"for": "keyword1",
"format": "function",
"goto": "keyword3",
"graphics": "function",
"if": "keyword1",
"in": "keyword2",
"keyb": "function",
"label": "function",
"loadfix": "function",
"md": "keyword1",
"mem": "function",
"mkdir": "keyword1",
"mode": "function",
"more": "function",
"move": "function",
"mscdex": "function",
"nlsfunc": "function",
"not": "keyword1",
"nul": "keyword2",
"pause": "keyword1",
"power": "function",
"print": "function",
"prn": "keyword2",
"rd": "function",
"ren": "keyword1",
"replace": "function",
"restore": "function",
"set": "keyword1",
"setlocal": "keyword1",
"setver": "function",
"share": "function",
"shift": "keyword1",
"sort": "function",
"subst": "function",
"sys": "function",
"tree": "function",
"undelete": "function",
"unformat": "function",
"vsafe": "function",
"xcopy": "function",
}
# Dictionary of keywords dictionaries for batch mode.
keywordsDictDict = {
"batch_main": batch_main_keywords_dict,
}
# Rules for batch_main ruleset.
#@+others
#@+node:ekr.20221129095311.1: ** batch_rule0
def batch_rule0(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="keyword3", seq="@")
#@+node:ekr.20221129095311.2: ** batch_rule1
def batch_rule1(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq="+")
#@+node:ekr.20221129095311.3: ** batch_rule2
def batch_rule2(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq="|")
#@+node:ekr.20221129095311.4: ** batch_rule3
def batch_rule3(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq="&")
#@+node:ekr.20221129095311.5: ** batch_rule4
def batch_rule4(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq="!")
#@+node:ekr.20221129095311.6: ** batch_rule5
def batch_rule5(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq=">")
#@+node:ekr.20221129095311.7: ** batch_rule6
def batch_rule6(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq="<")
#@+node:ekr.20221129095311.8: ** batch_rule7
def batch_rule7(colorer, s, i):
# Labels.
return colorer.match_eol_span(s, i, kind="label", seq=":",
at_line_start=True)
#@+node:ekr.20221129095311.9: ** batch_rule8
def batch_rule8(colorer, s, i):
return colorer.match_eol_span(s, i, kind="comment1", seq="REM",
at_line_start=True, at_whitespace_end=True, at_word_start=True)
def batch_rule8a(colorer, s, i):
return colorer.match_eol_span(s, i, kind="comment1", seq="rem",
at_line_start=True, at_whitespace_end=True, at_word_start=True)
#@+node:ekr.20221129095311.10: ** batch_rule9
def batch_rule9(colorer, s, i):
return colorer.match_span(s, i, kind="literal1", begin="\"", end="\"",
no_line_break=True)
#@+node:ekr.20221129095311.11: ** batch_rule10
def batch_rule10(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="keyword2", seq="%0")
#@+node:ekr.20221129095311.12: ** batch_rule11
def batch_rule11(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="keyword2", seq="%1")
#@+node:ekr.20221129095311.13: ** batch_rule12
def batch_rule12(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="keyword2", seq="%2")
#@+node:ekr.20221129095311.14: ** batch_rule13
def batch_rule13(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="keyword2", seq="%3")
#@+node:ekr.20221129095311.15: ** batch_rule14
def batch_rule14(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="keyword2", seq="%4")
#@+node:ekr.20221129095311.16: ** batch_rule15
def batch_rule15(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="keyword2", seq="%5")
#@+node:ekr.20221129095311.17: ** batch_rule16
def batch_rule16(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="keyword2", seq="%6")
#@+node:ekr.20221129095311.18: ** batch_rule17
def batch_rule17(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="keyword2", seq="%7")
#@+node:ekr.20221129095311.19: ** batch_rule18
def batch_rule18(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="keyword2", seq="%8")
#@+node:ekr.20221129095311.20: ** batch_rule19
def batch_rule19(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="keyword2", seq="%9")
#@+node:ekr.20221129095311.21: ** batch_rule20
def batch_rule20(colorer, s, i):
return colorer.match_span(s, i, kind="keyword2", begin="%[", end="]")
#@+node:ekr.20221129095311.22: ** batch_rule21
def batch_rule21(colorer, s, i):
return colorer.match_span(s, i, kind="keyword2", begin="%", end="%",
no_line_break=True)
#@+node:ekr.20221129095311.23: ** batch_rule22
def batch_rule22(colorer, s, i):
return colorer.match_keywords(s, i)
#@-others
# Rules dict for batch_main ruleset.
rulesDict1 = {
"!": [batch_rule4],
"\"": [batch_rule9],
"%": [
batch_rule10, batch_rule11, batch_rule12, batch_rule13, batch_rule14,
batch_rule15, batch_rule16, batch_rule17, batch_rule18, batch_rule19,
batch_rule20, batch_rule21
],
"&": [batch_rule3],
"+": [batch_rule1],
".": [batch_rule22],
"0": [batch_rule22],
"1": [batch_rule22],
"2": [batch_rule22],
"3": [batch_rule22],
"4": [batch_rule22],
"5": [batch_rule22],
"6": [batch_rule22],
"7": [batch_rule22],
"8": [batch_rule22],
"9": [batch_rule22],
":": [batch_rule7],
"<": [batch_rule6],
">": [batch_rule5],
"@": [batch_rule0, batch_rule22],
"A": [batch_rule22],
"B": [batch_rule22],
"C": [batch_rule22],
"D": [batch_rule22],
"E": [batch_rule22],
"F": [batch_rule22],
"G": [batch_rule22],
"H": [batch_rule22],
"I": [batch_rule22],
"J": [batch_rule22],
"K": [batch_rule22],
"L": [batch_rule22],
"M": [batch_rule22],
"N": [batch_rule22],
"O": [batch_rule22],
"P": [batch_rule22],
"Q": [batch_rule22],
"R": [batch_rule8, batch_rule22],
"S": [batch_rule22],
"T": [batch_rule22],
"U": [batch_rule22],
"V": [batch_rule22],
"W": [batch_rule22],
"X": [batch_rule22],
"Y": [batch_rule22],
"Z": [batch_rule22],
"a": [batch_rule22],
"b": [batch_rule22],
"c": [batch_rule22],
"d": [batch_rule22],
"e": [batch_rule22],
"f": [batch_rule22],
"g": [batch_rule22],
"h": [batch_rule22],
"i": [batch_rule22],
"j": [batch_rule22],
"k": [batch_rule22],
"l": [batch_rule22],
"m": [batch_rule22],
"n": [batch_rule22],
"o": [batch_rule22],
"p": [batch_rule22],
"q": [batch_rule22],
"r": [batch_rule8a, batch_rule22],
"s": [batch_rule22],
"t": [batch_rule22],
"u": [batch_rule22],
"v": [batch_rule22],
"w": [batch_rule22],
"x": [batch_rule22],
"y": [batch_rule22],
"z": [batch_rule22],
"|": [batch_rule2],
}
# x.rulesDictDict for batch mode.
rulesDictDict = {
"batch_main": rulesDict1,
}
# Import dict for batch mode.
importDict = {}
#@@language python
#@@tabwidth -4
#@-leo
|
1f15f5356a0e963990c0b11a6e30e5c2137ca3aa
|
ee87c715e5d937b0380ddb87d56e9ebc4877a02b
|
/sklearn/cluster/_optics.py
|
c1665b28d0060832cec76d98d99dc52f9765f1bf
|
[
"BSD-3-Clause"
] |
permissive
|
scikit-learn/scikit-learn
|
27a2196f3173e0f32f7a5c5d652b70a6c57c7644
|
061f8777b48e5491b0c57bb8e0bc7067c103079d
|
refs/heads/main
| 2023-08-18T15:32:59.764468
| 2023-08-18T14:39:08
| 2023-08-18T14:39:08
| 843,222
| 58,456
| 29,777
|
BSD-3-Clause
| 2023-09-14T19:08:34
| 2010-08-17T09:43:38
|
Python
|
UTF-8
|
Python
| false
| false
| 42,163
|
py
|
_optics.py
|
"""Ordering Points To Identify the Clustering Structure (OPTICS)
These routines execute the OPTICS algorithm, and implement various
cluster extraction methods of the ordered list.
Authors: Shane Grigsby <refuge@rocktalus.com>
Adrin Jalali <adrinjalali@gmail.com>
Erich Schubert <erich@debian.org>
Hanmin Qin <qinhanmin2005@sina.com>
License: BSD 3 clause
"""
import warnings
from numbers import Integral, Real
import numpy as np
from scipy.sparse import SparseEfficiencyWarning, issparse
from ..base import BaseEstimator, ClusterMixin, _fit_context
from ..exceptions import DataConversionWarning
from ..metrics import pairwise_distances
from ..metrics.pairwise import _VALID_METRICS, PAIRWISE_BOOLEAN_FUNCTIONS
from ..neighbors import NearestNeighbors
from ..utils import gen_batches, get_chunk_n_rows
from ..utils._param_validation import (
HasMethods,
Interval,
RealNotInt,
StrOptions,
validate_params,
)
from ..utils.validation import check_memory
class OPTICS(ClusterMixin, BaseEstimator):
"""Estimate clustering structure from vector array.
OPTICS (Ordering Points To Identify the Clustering Structure), closely
related to DBSCAN, finds core sample of high density and expands clusters
from them [1]_. Unlike DBSCAN, keeps cluster hierarchy for a variable
neighborhood radius. Better suited for usage on large datasets than the
current sklearn implementation of DBSCAN.
Clusters are then extracted using a DBSCAN-like method
(cluster_method = 'dbscan') or an automatic
technique proposed in [1]_ (cluster_method = 'xi').
This implementation deviates from the original OPTICS by first performing
k-nearest-neighborhood searches on all points to identify core sizes, then
computing only the distances to unprocessed points when constructing the
cluster order. Note that we do not employ a heap to manage the expansion
candidates, so the time complexity will be O(n^2).
Read more in the :ref:`User Guide <optics>`.
Parameters
----------
min_samples : int > 1 or float between 0 and 1, default=5
The number of samples in a neighborhood for a point to be considered as
a core point. Also, up and down steep regions can't have more than
``min_samples`` consecutive non-steep points. Expressed as an absolute
number or a fraction of the number of samples (rounded to be at least
2).
max_eps : float, default=np.inf
The maximum distance between two samples for one to be considered as
in the neighborhood of the other. Default value of ``np.inf`` will
identify clusters across all scales; reducing ``max_eps`` will result
in shorter run times.
metric : str or callable, default='minkowski'
Metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string. If metric is
"precomputed", `X` is assumed to be a distance matrix and must be
square.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean',
'yule']
Sparse matrices are only supported by scikit-learn metrics.
See the documentation for scipy.spatial.distance for details on these
metrics.
.. note::
`'kulsinski'` is deprecated from SciPy 1.9 and will removed in SciPy 1.11.
p : float, default=2
Parameter for the Minkowski metric from
:class:`~sklearn.metrics.pairwise_distances`. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, default=None
Additional keyword arguments for the metric function.
cluster_method : str, default='xi'
The extraction method used to extract clusters using the calculated
reachability and ordering. Possible values are "xi" and "dbscan".
eps : float, default=None
The maximum distance between two samples for one to be considered as
in the neighborhood of the other. By default it assumes the same value
as ``max_eps``.
Used only when ``cluster_method='dbscan'``.
xi : float between 0 and 1, default=0.05
Determines the minimum steepness on the reachability plot that
constitutes a cluster boundary. For example, an upwards point in the
reachability plot is defined by the ratio from one point to its
successor being at most 1-xi.
Used only when ``cluster_method='xi'``.
predecessor_correction : bool, default=True
Correct clusters according to the predecessors calculated by OPTICS
[2]_. This parameter has minimal effect on most datasets.
Used only when ``cluster_method='xi'``.
min_cluster_size : int > 1 or float between 0 and 1, default=None
Minimum number of samples in an OPTICS cluster, expressed as an
absolute number or a fraction of the number of samples (rounded to be
at least 2). If ``None``, the value of ``min_samples`` is used instead.
Used only when ``cluster_method='xi'``.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`~sklearn.neighbors.BallTree`.
- 'kd_tree' will use :class:`~sklearn.neighbors.KDTree`.
- 'brute' will use a brute-force search.
- 'auto' (default) will attempt to decide the most appropriate
algorithm based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, default=30
Leaf size passed to :class:`~sklearn.neighbors.BallTree` or
:class:`~sklearn.neighbors.KDTree`. This can affect the speed of the
construction and query, as well as the memory required to store the
tree. The optimal value depends on the nature of the problem.
memory : str or object with the joblib.Memory interface, default=None
Used to cache the output of the computation of the tree.
By default, no caching is done. If a string is given, it is the
path to the caching directory.
n_jobs : int, default=None
The number of parallel jobs to run for neighbors search.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Attributes
----------
labels_ : ndarray of shape (n_samples,)
Cluster labels for each point in the dataset given to fit().
Noisy samples and points which are not included in a leaf cluster
of ``cluster_hierarchy_`` are labeled as -1.
reachability_ : ndarray of shape (n_samples,)
Reachability distances per sample, indexed by object order. Use
``clust.reachability_[clust.ordering_]`` to access in cluster order.
ordering_ : ndarray of shape (n_samples,)
The cluster ordered list of sample indices.
core_distances_ : ndarray of shape (n_samples,)
Distance at which each sample becomes a core point, indexed by object
order. Points which will never be core have a distance of inf. Use
``clust.core_distances_[clust.ordering_]`` to access in cluster order.
predecessor_ : ndarray of shape (n_samples,)
Point that a sample was reached from, indexed by object order.
Seed points have a predecessor of -1.
cluster_hierarchy_ : ndarray of shape (n_clusters, 2)
The list of clusters in the form of ``[start, end]`` in each row, with
all indices inclusive. The clusters are ordered according to
``(end, -start)`` (ascending) so that larger clusters encompassing
smaller clusters come after those smaller ones. Since ``labels_`` does
not reflect the hierarchy, usually
``len(cluster_hierarchy_) > np.unique(optics.labels_)``. Please also
note that these indices are of the ``ordering_``, i.e.
``X[ordering_][start:end + 1]`` form a cluster.
Only available when ``cluster_method='xi'``.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
DBSCAN : A similar clustering for a specified neighborhood radius (eps).
Our implementation is optimized for runtime.
References
----------
.. [1] Ankerst, Mihael, Markus M. Breunig, Hans-Peter Kriegel,
and Jörg Sander. "OPTICS: ordering points to identify the clustering
structure." ACM SIGMOD Record 28, no. 2 (1999): 49-60.
.. [2] Schubert, Erich, Michael Gertz.
"Improving the Cluster Structure Extracted from OPTICS Plots." Proc. of
the Conference "Lernen, Wissen, Daten, Analysen" (LWDA) (2018): 318-329.
Examples
--------
>>> from sklearn.cluster import OPTICS
>>> import numpy as np
>>> X = np.array([[1, 2], [2, 5], [3, 6],
... [8, 7], [8, 8], [7, 3]])
>>> clustering = OPTICS(min_samples=2).fit(X)
>>> clustering.labels_
array([0, 0, 0, 1, 1, 1])
For a more detailed example see
:ref:`sphx_glr_auto_examples_cluster_plot_optics.py`.
"""
_parameter_constraints: dict = {
"min_samples": [
Interval(Integral, 2, None, closed="left"),
Interval(RealNotInt, 0, 1, closed="both"),
],
"max_eps": [Interval(Real, 0, None, closed="both")],
"metric": [StrOptions(set(_VALID_METRICS) | {"precomputed"}), callable],
"p": [Interval(Real, 1, None, closed="left")],
"metric_params": [dict, None],
"cluster_method": [StrOptions({"dbscan", "xi"})],
"eps": [Interval(Real, 0, None, closed="both"), None],
"xi": [Interval(Real, 0, 1, closed="both")],
"predecessor_correction": ["boolean"],
"min_cluster_size": [
Interval(Integral, 2, None, closed="left"),
Interval(RealNotInt, 0, 1, closed="right"),
None,
],
"algorithm": [StrOptions({"auto", "brute", "ball_tree", "kd_tree"})],
"leaf_size": [Interval(Integral, 1, None, closed="left")],
"memory": [str, HasMethods("cache"), None],
"n_jobs": [Integral, None],
}
def __init__(
self,
*,
min_samples=5,
max_eps=np.inf,
metric="minkowski",
p=2,
metric_params=None,
cluster_method="xi",
eps=None,
xi=0.05,
predecessor_correction=True,
min_cluster_size=None,
algorithm="auto",
leaf_size=30,
memory=None,
n_jobs=None,
):
self.max_eps = max_eps
self.min_samples = min_samples
self.min_cluster_size = min_cluster_size
self.algorithm = algorithm
self.metric = metric
self.metric_params = metric_params
self.p = p
self.leaf_size = leaf_size
self.cluster_method = cluster_method
self.eps = eps
self.xi = xi
self.predecessor_correction = predecessor_correction
self.memory = memory
self.n_jobs = n_jobs
@_fit_context(
# Optics.metric is not validated yet
prefer_skip_nested_validation=False
)
def fit(self, X, y=None):
"""Perform OPTICS clustering.
Extracts an ordered list of points and reachability distances, and
performs initial clustering using ``max_eps`` distance specified at
OPTICS object instantiation.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features), or \
(n_samples, n_samples) if metric='precomputed'
A feature array, or array of distances between samples if
metric='precomputed'. If a sparse matrix is provided, it will be
converted into CSR format.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Returns a fitted instance of self.
"""
dtype = bool if self.metric in PAIRWISE_BOOLEAN_FUNCTIONS else float
if dtype == bool and X.dtype != bool:
msg = (
"Data will be converted to boolean for"
f" metric {self.metric}, to avoid this warning,"
" you may convert the data prior to calling fit."
)
warnings.warn(msg, DataConversionWarning)
X = self._validate_data(X, dtype=dtype, accept_sparse="csr")
if self.metric == "precomputed" and issparse(X):
with warnings.catch_warnings():
warnings.simplefilter("ignore", SparseEfficiencyWarning)
# Set each diagonal to an explicit value so each point is its
# own neighbor
X.setdiag(X.diagonal())
memory = check_memory(self.memory)
(
self.ordering_,
self.core_distances_,
self.reachability_,
self.predecessor_,
) = memory.cache(compute_optics_graph)(
X=X,
min_samples=self.min_samples,
algorithm=self.algorithm,
leaf_size=self.leaf_size,
metric=self.metric,
metric_params=self.metric_params,
p=self.p,
n_jobs=self.n_jobs,
max_eps=self.max_eps,
)
# Extract clusters from the calculated orders and reachability
if self.cluster_method == "xi":
labels_, clusters_ = cluster_optics_xi(
reachability=self.reachability_,
predecessor=self.predecessor_,
ordering=self.ordering_,
min_samples=self.min_samples,
min_cluster_size=self.min_cluster_size,
xi=self.xi,
predecessor_correction=self.predecessor_correction,
)
self.cluster_hierarchy_ = clusters_
elif self.cluster_method == "dbscan":
if self.eps is None:
eps = self.max_eps
else:
eps = self.eps
if eps > self.max_eps:
raise ValueError(
"Specify an epsilon smaller than %s. Got %s." % (self.max_eps, eps)
)
labels_ = cluster_optics_dbscan(
reachability=self.reachability_,
core_distances=self.core_distances_,
ordering=self.ordering_,
eps=eps,
)
self.labels_ = labels_
return self
def _validate_size(size, n_samples, param_name):
if size > n_samples:
raise ValueError(
"%s must be no greater than the number of samples (%d). Got %d"
% (param_name, n_samples, size)
)
# OPTICS helper functions
def _compute_core_distances_(X, neighbors, min_samples, working_memory):
"""Compute the k-th nearest neighbor of each sample.
Equivalent to neighbors.kneighbors(X, self.min_samples)[0][:, -1]
but with more memory efficiency.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data.
neighbors : NearestNeighbors instance
The fitted nearest neighbors estimator.
working_memory : int, default=None
The sought maximum memory for temporary distance matrix chunks.
When None (default), the value of
``sklearn.get_config()['working_memory']`` is used.
Returns
-------
core_distances : ndarray of shape (n_samples,)
Distance at which each sample becomes a core point.
Points which will never be core have a distance of inf.
"""
n_samples = X.shape[0]
core_distances = np.empty(n_samples)
core_distances.fill(np.nan)
chunk_n_rows = get_chunk_n_rows(
row_bytes=16 * min_samples, max_n_rows=n_samples, working_memory=working_memory
)
slices = gen_batches(n_samples, chunk_n_rows)
for sl in slices:
core_distances[sl] = neighbors.kneighbors(X[sl], min_samples)[0][:, -1]
return core_distances
@validate_params(
{
"X": [np.ndarray, "sparse matrix"],
"min_samples": [
Interval(Integral, 2, None, closed="left"),
Interval(RealNotInt, 0, 1, closed="both"),
],
"max_eps": [Interval(Real, 0, None, closed="both")],
"metric": [StrOptions(set(_VALID_METRICS) | {"precomputed"}), callable],
"p": [Interval(Real, 0, None, closed="right"), None],
"metric_params": [dict, None],
"algorithm": [StrOptions({"auto", "brute", "ball_tree", "kd_tree"})],
"leaf_size": [Interval(Integral, 1, None, closed="left")],
"n_jobs": [Integral, None],
},
prefer_skip_nested_validation=False, # metric is not validated yet
)
def compute_optics_graph(
X, *, min_samples, max_eps, metric, p, metric_params, algorithm, leaf_size, n_jobs
):
"""Compute the OPTICS reachability graph.
Read more in the :ref:`User Guide <optics>`.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features), or \
(n_samples, n_samples) if metric='precomputed'
A feature array, or array of distances between samples if
metric='precomputed'.
min_samples : int > 1 or float between 0 and 1
The number of samples in a neighborhood for a point to be considered
as a core point. Expressed as an absolute number or a fraction of the
number of samples (rounded to be at least 2).
max_eps : float, default=np.inf
The maximum distance between two samples for one to be considered as
in the neighborhood of the other. Default value of ``np.inf`` will
identify clusters across all scales; reducing ``max_eps`` will result
in shorter run times.
metric : str or callable, default='minkowski'
Metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string. If metric is
"precomputed", X is assumed to be a distance matrix and must be square.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean',
'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
.. note::
`'kulsinski'` is deprecated from SciPy 1.9 and will be removed in SciPy 1.11.
p : float, default=2
Parameter for the Minkowski metric from
:class:`~sklearn.metrics.pairwise_distances`. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, default=None
Additional keyword arguments for the metric function.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`~sklearn.neighbors.BallTree`.
- 'kd_tree' will use :class:`~sklearn.neighbors.KDTree`.
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to `fit` method. (default)
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, default=30
Leaf size passed to :class:`~sklearn.neighbors.BallTree` or
:class:`~sklearn.neighbors.KDTree`. This can affect the speed of the
construction and query, as well as the memory required to store the
tree. The optimal value depends on the nature of the problem.
n_jobs : int, default=None
The number of parallel jobs to run for neighbors search.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Returns
-------
ordering_ : array of shape (n_samples,)
The cluster ordered list of sample indices.
core_distances_ : array of shape (n_samples,)
Distance at which each sample becomes a core point, indexed by object
order. Points which will never be core have a distance of inf. Use
``clust.core_distances_[clust.ordering_]`` to access in cluster order.
reachability_ : array of shape (n_samples,)
Reachability distances per sample, indexed by object order. Use
``clust.reachability_[clust.ordering_]`` to access in cluster order.
predecessor_ : array of shape (n_samples,)
Point that a sample was reached from, indexed by object order.
Seed points have a predecessor of -1.
References
----------
.. [1] Ankerst, Mihael, Markus M. Breunig, Hans-Peter Kriegel,
and Jörg Sander. "OPTICS: ordering points to identify the clustering
structure." ACM SIGMOD Record 28, no. 2 (1999): 49-60.
"""
n_samples = X.shape[0]
_validate_size(min_samples, n_samples, "min_samples")
if min_samples <= 1:
min_samples = max(2, int(min_samples * n_samples))
# Start all points as 'unprocessed' ##
reachability_ = np.empty(n_samples)
reachability_.fill(np.inf)
predecessor_ = np.empty(n_samples, dtype=int)
predecessor_.fill(-1)
nbrs = NearestNeighbors(
n_neighbors=min_samples,
algorithm=algorithm,
leaf_size=leaf_size,
metric=metric,
metric_params=metric_params,
p=p,
n_jobs=n_jobs,
)
nbrs.fit(X)
# Here we first do a kNN query for each point, this differs from
# the original OPTICS that only used epsilon range queries.
# TODO: handle working_memory somehow?
core_distances_ = _compute_core_distances_(
X=X, neighbors=nbrs, min_samples=min_samples, working_memory=None
)
# OPTICS puts an upper limit on these, use inf for undefined.
core_distances_[core_distances_ > max_eps] = np.inf
np.around(
core_distances_,
decimals=np.finfo(core_distances_.dtype).precision,
out=core_distances_,
)
# Main OPTICS loop. Not parallelizable. The order that entries are
# written to the 'ordering_' list is important!
# Note that this implementation is O(n^2) theoretically, but
# supposedly with very low constant factors.
processed = np.zeros(X.shape[0], dtype=bool)
ordering = np.zeros(X.shape[0], dtype=int)
for ordering_idx in range(X.shape[0]):
# Choose next based on smallest reachability distance
# (And prefer smaller ids on ties, possibly np.inf!)
index = np.where(processed == 0)[0]
point = index[np.argmin(reachability_[index])]
processed[point] = True
ordering[ordering_idx] = point
if core_distances_[point] != np.inf:
_set_reach_dist(
core_distances_=core_distances_,
reachability_=reachability_,
predecessor_=predecessor_,
point_index=point,
processed=processed,
X=X,
nbrs=nbrs,
metric=metric,
metric_params=metric_params,
p=p,
max_eps=max_eps,
)
if np.all(np.isinf(reachability_)):
warnings.warn(
(
"All reachability values are inf. Set a larger"
" max_eps or all data will be considered outliers."
),
UserWarning,
)
return ordering, core_distances_, reachability_, predecessor_
def _set_reach_dist(
core_distances_,
reachability_,
predecessor_,
point_index,
processed,
X,
nbrs,
metric,
metric_params,
p,
max_eps,
):
P = X[point_index : point_index + 1]
# Assume that radius_neighbors is faster without distances
# and we don't need all distances, nevertheless, this means
# we may be doing some work twice.
indices = nbrs.radius_neighbors(P, radius=max_eps, return_distance=False)[0]
# Getting indices of neighbors that have not been processed
unproc = np.compress(~np.take(processed, indices), indices)
# Neighbors of current point are already processed.
if not unproc.size:
return
# Only compute distances to unprocessed neighbors:
if metric == "precomputed":
dists = X[point_index, unproc]
if issparse(dists):
dists.sort_indices()
dists = dists.data
else:
_params = dict() if metric_params is None else metric_params.copy()
if metric == "minkowski" and "p" not in _params:
# the same logic as neighbors, p is ignored if explicitly set
# in the dict params
_params["p"] = p
dists = pairwise_distances(P, X[unproc], metric, n_jobs=None, **_params).ravel()
rdists = np.maximum(dists, core_distances_[point_index])
np.around(rdists, decimals=np.finfo(rdists.dtype).precision, out=rdists)
improved = np.where(rdists < np.take(reachability_, unproc))
reachability_[unproc[improved]] = rdists[improved]
predecessor_[unproc[improved]] = point_index
@validate_params(
{
"reachability": [np.ndarray],
"core_distances": [np.ndarray],
"ordering": [np.ndarray],
"eps": [Interval(Real, 0, None, closed="both")],
},
prefer_skip_nested_validation=True,
)
def cluster_optics_dbscan(*, reachability, core_distances, ordering, eps):
"""Perform DBSCAN extraction for an arbitrary epsilon.
Extracting the clusters runs in linear time. Note that this results in
``labels_`` which are close to a :class:`~sklearn.cluster.DBSCAN` with
similar settings and ``eps``, only if ``eps`` is close to ``max_eps``.
Parameters
----------
reachability : ndarray of shape (n_samples,)
Reachability distances calculated by OPTICS (``reachability_``).
core_distances : ndarray of shape (n_samples,)
Distances at which points become core (``core_distances_``).
ordering : ndarray of shape (n_samples,)
OPTICS ordered point indices (``ordering_``).
eps : float
DBSCAN ``eps`` parameter. Must be set to < ``max_eps``. Results
will be close to DBSCAN algorithm if ``eps`` and ``max_eps`` are close
to one another.
Returns
-------
labels_ : array of shape (n_samples,)
The estimated labels.
"""
n_samples = len(core_distances)
labels = np.zeros(n_samples, dtype=int)
far_reach = reachability > eps
near_core = core_distances <= eps
labels[ordering] = np.cumsum(far_reach[ordering] & near_core[ordering]) - 1
labels[far_reach & ~near_core] = -1
return labels
@validate_params(
{
"reachability": [np.ndarray],
"predecessor": [np.ndarray],
"ordering": [np.ndarray],
"min_samples": [
Interval(Integral, 2, None, closed="left"),
Interval(RealNotInt, 0, 1, closed="both"),
],
"min_cluster_size": [
Interval(Integral, 2, None, closed="left"),
Interval(RealNotInt, 0, 1, closed="both"),
None,
],
"xi": [Interval(Real, 0, 1, closed="both")],
"predecessor_correction": ["boolean"],
},
prefer_skip_nested_validation=True,
)
def cluster_optics_xi(
*,
reachability,
predecessor,
ordering,
min_samples,
min_cluster_size=None,
xi=0.05,
predecessor_correction=True,
):
"""Automatically extract clusters according to the Xi-steep method.
Parameters
----------
reachability : ndarray of shape (n_samples,)
Reachability distances calculated by OPTICS (`reachability_`).
predecessor : ndarray of shape (n_samples,)
Predecessors calculated by OPTICS.
ordering : ndarray of shape (n_samples,)
OPTICS ordered point indices (`ordering_`).
min_samples : int > 1 or float between 0 and 1
The same as the min_samples given to OPTICS. Up and down steep regions
can't have more then ``min_samples`` consecutive non-steep points.
Expressed as an absolute number or a fraction of the number of samples
(rounded to be at least 2).
min_cluster_size : int > 1 or float between 0 and 1, default=None
Minimum number of samples in an OPTICS cluster, expressed as an
absolute number or a fraction of the number of samples (rounded to be
at least 2). If ``None``, the value of ``min_samples`` is used instead.
xi : float between 0 and 1, default=0.05
Determines the minimum steepness on the reachability plot that
constitutes a cluster boundary. For example, an upwards point in the
reachability plot is defined by the ratio from one point to its
successor being at most 1-xi.
predecessor_correction : bool, default=True
Correct clusters based on the calculated predecessors.
Returns
-------
labels : ndarray of shape (n_samples,)
The labels assigned to samples. Points which are not included
in any cluster are labeled as -1.
clusters : ndarray of shape (n_clusters, 2)
The list of clusters in the form of ``[start, end]`` in each row, with
all indices inclusive. The clusters are ordered according to ``(end,
-start)`` (ascending) so that larger clusters encompassing smaller
clusters come after such nested smaller clusters. Since ``labels`` does
not reflect the hierarchy, usually ``len(clusters) >
np.unique(labels)``.
"""
n_samples = len(reachability)
_validate_size(min_samples, n_samples, "min_samples")
if min_samples <= 1:
min_samples = max(2, int(min_samples * n_samples))
if min_cluster_size is None:
min_cluster_size = min_samples
_validate_size(min_cluster_size, n_samples, "min_cluster_size")
if min_cluster_size <= 1:
min_cluster_size = max(2, int(min_cluster_size * n_samples))
clusters = _xi_cluster(
reachability[ordering],
predecessor[ordering],
ordering,
xi,
min_samples,
min_cluster_size,
predecessor_correction,
)
labels = _extract_xi_labels(ordering, clusters)
return labels, clusters
def _extend_region(steep_point, xward_point, start, min_samples):
"""Extend the area until it's maximal.
It's the same function for both upward and downward reagions, depending on
the given input parameters. Assuming:
- steep_{upward/downward}: bool array indicating whether a point is a
steep {upward/downward};
- upward/downward: bool array indicating whether a point is
upward/downward;
To extend an upward reagion, ``steep_point=steep_upward`` and
``xward_point=downward`` are expected, and to extend a downward region,
``steep_point=steep_downward`` and ``xward_point=upward``.
Parameters
----------
steep_point : ndarray of shape (n_samples,), dtype=bool
True if the point is steep downward (upward).
xward_point : ndarray of shape (n_samples,), dtype=bool
True if the point is an upward (respectively downward) point.
start : int
The start of the xward region.
min_samples : int
The same as the min_samples given to OPTICS. Up and down steep
regions can't have more then ``min_samples`` consecutive non-steep
points.
Returns
-------
index : int
The current index iterating over all the samples, i.e. where we are up
to in our search.
end : int
The end of the region, which can be behind the index. The region
includes the ``end`` index.
"""
n_samples = len(steep_point)
non_xward_points = 0
index = start
end = start
# find a maximal area
while index < n_samples:
if steep_point[index]:
non_xward_points = 0
end = index
elif not xward_point[index]:
# it's not a steep point, but still goes up.
non_xward_points += 1
# region should include no more than min_samples consecutive
# non steep xward points.
if non_xward_points > min_samples:
break
else:
return end
index += 1
return end
def _update_filter_sdas(sdas, mib, xi_complement, reachability_plot):
"""Update steep down areas (SDAs) using the new maximum in between (mib)
value, and the given complement of xi, i.e. ``1 - xi``.
"""
if np.isinf(mib):
return []
res = [
sda for sda in sdas if mib <= reachability_plot[sda["start"]] * xi_complement
]
for sda in res:
sda["mib"] = max(sda["mib"], mib)
return res
def _correct_predecessor(reachability_plot, predecessor_plot, ordering, s, e):
"""Correct for predecessors.
Applies Algorithm 2 of [1]_.
Input parameters are ordered by the computer OPTICS ordering.
.. [1] Schubert, Erich, Michael Gertz.
"Improving the Cluster Structure Extracted from OPTICS Plots." Proc. of
the Conference "Lernen, Wissen, Daten, Analysen" (LWDA) (2018): 318-329.
"""
while s < e:
if reachability_plot[s] > reachability_plot[e]:
return s, e
p_e = ordering[predecessor_plot[e]]
for i in range(s, e):
if p_e == ordering[i]:
return s, e
e -= 1
return None, None
def _xi_cluster(
reachability_plot,
predecessor_plot,
ordering,
xi,
min_samples,
min_cluster_size,
predecessor_correction,
):
"""Automatically extract clusters according to the Xi-steep method.
This is rouphly an implementation of Figure 19 of the OPTICS paper.
Parameters
----------
reachability_plot : array-like of shape (n_samples,)
The reachability plot, i.e. reachability ordered according to
the calculated ordering, all computed by OPTICS.
predecessor_plot : array-like of shape (n_samples,)
Predecessors ordered according to the calculated ordering.
xi : float, between 0 and 1
Determines the minimum steepness on the reachability plot that
constitutes a cluster boundary. For example, an upwards point in the
reachability plot is defined by the ratio from one point to its
successor being at most 1-xi.
min_samples : int > 1
The same as the min_samples given to OPTICS. Up and down steep regions
can't have more then ``min_samples`` consecutive non-steep points.
min_cluster_size : int > 1
Minimum number of samples in an OPTICS cluster.
predecessor_correction : bool
Correct clusters based on the calculated predecessors.
Returns
-------
clusters : ndarray of shape (n_clusters, 2)
The list of clusters in the form of [start, end] in each row, with all
indices inclusive. The clusters are ordered in a way that larger
clusters encompassing smaller clusters come after those smaller
clusters.
"""
# Our implementation adds an inf to the end of reachability plot
# this helps to find potential clusters at the end of the
# reachability plot even if there's no upward region at the end of it.
reachability_plot = np.hstack((reachability_plot, np.inf))
xi_complement = 1 - xi
sdas = [] # steep down areas, introduced in section 4.3.2 of the paper
clusters = []
index = 0
mib = 0.0 # maximum in between, section 4.3.2
# Our implementation corrects a mistake in the original
# paper, i.e., in Definition 9 steep downward point,
# r(p) * (1 - x1) <= r(p + 1) should be
# r(p) * (1 - x1) >= r(p + 1)
with np.errstate(invalid="ignore"):
ratio = reachability_plot[:-1] / reachability_plot[1:]
steep_upward = ratio <= xi_complement
steep_downward = ratio >= 1 / xi_complement
downward = ratio > 1
upward = ratio < 1
# the following loop is almost exactly as Figure 19 of the paper.
# it jumps over the areas which are not either steep down or up areas
for steep_index in iter(np.flatnonzero(steep_upward | steep_downward)):
# just continue if steep_index has been a part of a discovered xward
# area.
if steep_index < index:
continue
mib = max(mib, np.max(reachability_plot[index : steep_index + 1]))
# steep downward areas
if steep_downward[steep_index]:
sdas = _update_filter_sdas(sdas, mib, xi_complement, reachability_plot)
D_start = steep_index
D_end = _extend_region(steep_downward, upward, D_start, min_samples)
D = {"start": D_start, "end": D_end, "mib": 0.0}
sdas.append(D)
index = D_end + 1
mib = reachability_plot[index]
# steep upward areas
else:
sdas = _update_filter_sdas(sdas, mib, xi_complement, reachability_plot)
U_start = steep_index
U_end = _extend_region(steep_upward, downward, U_start, min_samples)
index = U_end + 1
mib = reachability_plot[index]
U_clusters = []
for D in sdas:
c_start = D["start"]
c_end = U_end
# line (**), sc2*
if reachability_plot[c_end + 1] * xi_complement < D["mib"]:
continue
# Definition 11: criterion 4
D_max = reachability_plot[D["start"]]
if D_max * xi_complement >= reachability_plot[c_end + 1]:
# Find the first index from the left side which is almost
# at the same level as the end of the detected cluster.
while (
reachability_plot[c_start + 1] > reachability_plot[c_end + 1]
and c_start < D["end"]
):
c_start += 1
elif reachability_plot[c_end + 1] * xi_complement >= D_max:
# Find the first index from the right side which is almost
# at the same level as the beginning of the detected
# cluster.
# Our implementation corrects a mistake in the original
# paper, i.e., in Definition 11 4c, r(x) < r(sD) should be
# r(x) > r(sD).
while reachability_plot[c_end - 1] > D_max and c_end > U_start:
c_end -= 1
# predecessor correction
if predecessor_correction:
c_start, c_end = _correct_predecessor(
reachability_plot, predecessor_plot, ordering, c_start, c_end
)
if c_start is None:
continue
# Definition 11: criterion 3.a
if c_end - c_start + 1 < min_cluster_size:
continue
# Definition 11: criterion 1
if c_start > D["end"]:
continue
# Definition 11: criterion 2
if c_end < U_start:
continue
U_clusters.append((c_start, c_end))
# add smaller clusters first.
U_clusters.reverse()
clusters.extend(U_clusters)
return np.array(clusters)
def _extract_xi_labels(ordering, clusters):
"""Extracts the labels from the clusters returned by `_xi_cluster`.
We rely on the fact that clusters are stored
with the smaller clusters coming before the larger ones.
Parameters
----------
ordering : array-like of shape (n_samples,)
The ordering of points calculated by OPTICS
clusters : array-like of shape (n_clusters, 2)
List of clusters i.e. (start, end) tuples,
as returned by `_xi_cluster`.
Returns
-------
labels : ndarray of shape (n_samples,)
"""
labels = np.full(len(ordering), -1, dtype=int)
label = 0
for c in clusters:
if not np.any(labels[c[0] : (c[1] + 1)] != -1):
labels[c[0] : (c[1] + 1)] = label
label += 1
labels[ordering] = labels.copy()
return labels
|
6880341ee8bac9ebaf410f0dbd671875d863c681
|
57d5f2bffb4947ddfb61d70668067e6c284f4c05
|
/app/job/daka.py
|
45e280f0611c99f952952a55895ce7361e5e1ed0
|
[
"Apache-2.0"
] |
permissive
|
CaoZ/JD-Coin
|
05a5cbdeb6cab41a41fc9f33650970abd6da90ab
|
287af555c530d68b095018416b67c7d2fb1bad73
|
refs/heads/browser
| 2021-04-29T09:28:52.625505
| 2019-10-14T06:53:43
| 2019-10-14T06:53:43
| 85,375,295
| 1,108
| 440
| null | 2019-07-15T06:24:51
| 2017-03-18T04:19:59
|
Python
|
UTF-8
|
Python
| false
| false
| 4,062
|
py
|
daka.py
|
import traceback
from requests import Session
import browser
import job
from .common import find_value, RequestError
class Daka:
job_name = '小白卡钢镚打卡'
index_url = 'https://bk.jd.com/m/channel/login/daka.html'
login_url = 'https://home.m.jd.com'
sign_url = 'https://bk.jd.com/m/channel/login/clock.html'
test_url = index_url
job_gb_url = 'https://bk.jd.com/m/channel/login/recDakaGb.html'
logger = job.logger
def __init__(self, session: Session):
self.session = session
self.job_success = False
def run(self):
self.logger.info('Job Start: {}'.format(self.job_name))
is_login = self.is_login()
self.logger.info('登录状态: {}'.format(is_login))
if not is_login:
self.logger.info('进行登录...')
try:
self.login()
is_login = True
self.logger.info('登录成功')
except Exception as e:
self.logger.error('登录失败: {}'.format(repr(e)))
if is_login:
if self.is_signed():
self.job_success = True
else:
self.job_success = self.sign()
self.logger.info('Job End.')
def is_login(self):
r = self.session.get(self.test_url, allow_redirects=False)
if r.is_redirect and '/login' in r.headers['Location']:
return False
else:
return True
def login(self):
cookies = browser.get_cookies(self.login_url)
self.session.cookies.update(cookies)
def is_signed(self):
r = self.session.get(self.index_url)
signed = False
if r.ok:
sign_pattern = r'dakaed:\s*(\w+)'
days_pattern = r'dakaNumber:\s*(\d+)'
try:
signed = ('true' == find_value(sign_pattern, r.text))
sign_days = int(find_value(days_pattern, r.text))
self.logger.info('今日已打卡: {}; 打卡天数: {}'.format(signed, sign_days))
except Exception as e:
self.logger.error('返回数据结构可能有变化, 获取打卡数据失败: {}'.format(e))
traceback.print_exc()
return signed
def sign(self):
try:
data = self.fetch_data(self.sign_url)
self.logger.info('打卡成功: ' + data['resultMessage'])
return True
except RequestError as e:
if e.code == '0003':
# 已打卡 7 次, 需要先去 "任务" 里完成一个领钢镚任务...
self.logger.info('已打卡 7 次, 去完成领钢镚任务...')
pick_success = self.pick_gb()
if pick_success:
# 钢镚领取成功, 重新开始打卡任务
return self.sign()
else:
e.message = '钢镚领取任务未成功完成.'
self.logger.error('打卡失败: ' + e.message)
return False
def pick_gb(self):
# 任务列表在 https://bk.jd.com/m/money/doJobMoney.html 中看
# 领钢镚的任务的 id 是 82
try:
data = self.fetch_data(self.job_gb_url)
self.logger.info('钢镚领取成功: {}'.format(data['resultMessage']))
return True
except RequestError as e:
self.logger.error('领钢镚 -> 钢镚领取失败: {}'.format(e.message))
return False
def fetch_data(self, url, payload=None):
r = self.session.get(url, params=payload)
try:
as_json = r.json()
except ValueError:
raise RequestError('unexpected response: url: {}; http code: {}'.format(url, r.status_code), response=r)
if as_json['success']:
# 请求成功
return as_json
else:
error_msg = as_json.get('resultMessage') or str(as_json)
error_code = as_json.get('resultCode')
raise RequestError(error_msg, error_code)
|
7fd5ddad97216eb9204b9e246268bd7541a91171
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/SimTracker/TrackTriggerAssociation/python/StubAssociator_cff.py
|
f5ac90c9667f313568403bcc97709ae094f2971d
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 889
|
py
|
StubAssociator_cff.py
|
#---------------------------------------------------------------------------------------------------------
# This definess StubAssociator which produces StubAssociation which will be used by L1Tigger/trackerDTC and
# trackerTFP Analyzer to associate Stubs with MC truth either by using TTStubAssociationMap or
# TTClusterAssociationMap, where the latter is more useful to debug l1 tracking, the former has been
# implemented to enable use of same association as in standart workflow analyzer if wanted.
#---------------------------------------------------------------------------------------------------------
import FWCore.ParameterSet.Config as cms
from L1Trigger.TrackTrigger.ProducerSetup_cff import TrackTriggerSetup
from SimTracker.TrackTriggerAssociation.StubAssociator_cfi import StubAssociator_params
StubAssociator = cms.EDProducer('tt::StubAssociator', StubAssociator_params)
|
f0b6e9385e5c36b6c5869de772c0b7db5b9c948d
|
f1973e136f49f0b5ea2ec63c4d862188d197e5a5
|
/api/paginator.py
|
7247ff514a309399bdb869556c1b8fcbb17cebb6
|
[
"Apache-2.0"
] |
permissive
|
erigones/esdc-ce
|
65dc7d84e1bca3e3fcec668f54acae20183096a2
|
7e3dedddbe821283d909393f333eed4acd452953
|
refs/heads/master
| 2023-02-07T17:57:15.970089
| 2022-02-03T12:55:14
| 2022-02-03T12:55:14
| 73,122,985
| 123
| 36
|
Apache-2.0
| 2023-01-24T23:22:54
| 2016-11-07T21:34:53
|
Python
|
UTF-8
|
Python
| false
| false
| 1,990
|
py
|
paginator.py
|
from collections import OrderedDict
from django.core.paginator import InvalidPage, Paginator as DjangoPaginator
from api.exceptions import NotFound
from api.utils.urls import replace_query_param
class Paginator(DjangoPaginator):
"""
API paginator that has a get_response_results() method used in api_views via api.utils.get_pager().
"""
page_query_param = 'page'
invalid_page_message = 'Invalid page.'
def __init__(self, request, object_list, per_page, **kwargs):
super(Paginator, self).__init__(object_list, per_page, **kwargs)
self._page = None
self.request = request
def get_page(self, page):
try:
self._page = self.page(page)
except InvalidPage:
raise NotFound(self.invalid_page_message)
else:
return self._page
def get_next_link(self):
if not self._page.has_next():
return None
url = self.request.build_absolute_uri()
page_number = self._page.next_page_number()
return replace_query_param(url, self.page_query_param, page_number)
def get_previous_link(self):
if not self._page.has_previous():
return None
url = self.request.build_absolute_uri()
page_number = self._page.previous_page_number()
if page_number == 1:
return None
return replace_query_param(url, self.page_query_param, page_number)
def get_response_results(self, results):
return OrderedDict([
('count', self._page.paginator.count),
('next', self.get_next_link()),
('previous', self.get_previous_link()),
('results', results),
])
def get_pager(request, qs, per_page=100, page=None):
"""
Return our paginator.page object for a queryset.
"""
paginator = Paginator(request, qs, per_page)
if page is None:
page = request.query_params.get('page', 1)
return paginator.get_page(page)
|
f4eba255d50c2fa1dc06245201d1ea0488ca20bb
|
fa1ad2e2ac7e376fc7cb3b3a6e1bb88eed3e80be
|
/govern/data-meta/amundsen/frontend/tests/unit/api/mail/test_v0.py
|
77c96bb8fceab10cd4e4d4abb216f8ba9ea2d7e2
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"MIT"
] |
permissive
|
alldatacenter/alldata
|
7bc7713c9f1d56ad6b8e59ea03206d1073b7e047
|
8d5f9a2d49ab8f9e85ccf058cb02c2fda287afc6
|
refs/heads/master
| 2023-08-05T07:32:25.442740
| 2023-08-03T13:17:24
| 2023-08-03T13:17:24
| 213,321,771
| 774
| 250
|
Apache-2.0
| 2023-09-06T17:35:32
| 2019-10-07T07:36:18
| null |
UTF-8
|
Python
| false
| false
| 6,241
|
py
|
test_v0.py
|
# Copyright Contributors to the Amundsen project.
# SPDX-License-Identifier: Apache-2.0
import unittest
from unittest.mock import Mock
from http import HTTPStatus
from typing import Dict, List, Optional
from flask import Response, jsonify, make_response
from amundsen_application import create_app
from amundsen_application.base.base_mail_client import BaseMailClient
class MockMailClient(BaseMailClient):
def __init__(self, status_code: int, recipients: List = []) -> None:
self.status_code = status_code
def send_email(self,
html: str,
subject: str,
optional_data: Optional[Dict] = None,
recipients: Optional[List[str]] = None,
sender: Optional[str] = None) -> Response:
return make_response(jsonify({}), self.status_code)
class MockBadClient(BaseMailClient):
def __init__(self) -> None:
pass
def send_email(self,
html: str,
subject: str,
optional_data: Optional[Dict] = None,
recipients: Optional[List[str]] = None,
sender: Optional[str] = None) -> Response:
raise Exception('Bad client')
class MailTest(unittest.TestCase):
def setUp(self) -> None:
self.app = create_app('amundsen_application.config.TestConfig', 'tests/templates')
self.app_context = self.app.app_context()
self.app_context.push()
def tearDown(self) -> None:
self.app_context.pop()
def test_feedback_client_not_implemented(self) -> None:
"""
Test mail client is not implemented, and endpoint should return appropriate code
:return:
"""
with self.app.test_client() as test:
response = test.post('/api/mail/v0/feedback', json={
'rating': '10', 'comment': 'test'
})
self.assertEqual(response.status_code, HTTPStatus.NOT_IMPLEMENTED)
def test_feedback_client_success(self) -> None:
"""
Test mail client success
:return:
"""
status_codes = [HTTPStatus.OK, HTTPStatus.ACCEPTED]
for status_code in status_codes:
self.app.config['MAIL_CLIENT'] = MockMailClient(status_code=status_code)
with self.subTest():
with self.app.test_client() as test:
response = test.post('/api/mail/v0/feedback', json={
'rating': '10', 'comment': 'test'
})
self.assertTrue(200 <= response.status_code <= 300)
def test_feedback_client_raise_exception(self) -> None:
"""
Test failure due to incorrect implementation of base_mail_client
:return:
"""
self.app.config['MAIL_CLIENT'] = MockBadClient()
with self.app.test_client() as test:
response = test.post('/api/mail/v0/feedback', json={
'rating': '10', 'comment': 'test'
})
self.assertEqual(response.status_code, HTTPStatus.INTERNAL_SERVER_ERROR)
def test_feedback_client_propagate_status_code(self) -> None:
"""
Test that specific status codes returned from a custom mail client propagate,
so that they may be appropriately logged and surfaced to the React application
:return:
"""
expected_code = HTTPStatus.BAD_REQUEST
self.app.config['MAIL_CLIENT'] = MockMailClient(status_code=expected_code)
with self.app.test_client() as test:
response = test.post('/api/mail/v0/feedback', json={
'rating': '10', 'comment': 'test'
})
self.assertEqual(response.status_code, expected_code)
@unittest.mock.patch('amundsen_application.api.mail.v0.send_notification')
def test_notification_endpoint_calls_send_notification(self, send_notification_mock: Mock) -> None:
"""
Test that the endpoint calls send_notification with the correct information
from the request json
:return:
"""
test_recipients = ['test@test.com']
test_notification_type = 'added'
test_options: Dict = {}
with self.app.test_client() as test:
test.post('/api/mail/v0/notification', json={
'recipients': test_recipients,
'notificationType': test_notification_type,
'options': test_options,
})
send_notification_mock.assert_called_with(
notification_type=test_notification_type,
options=test_options,
recipients=test_recipients,
sender=self.app.config['AUTH_USER_METHOD'](self.app).email
)
@unittest.mock.patch('amundsen_application.api.mail.v0.send_notification')
def test_notification_endpoint_fails_missing_notification_type(self, send_notification_mock: Mock) -> None:
"""
Test that the endpoint fails if notificationType is not provided in the
request json
:return:
"""
test_recipients = ['test@test.com']
test_sender = 'test2@test.com'
test_options: Dict = {}
with self.app.test_client() as test:
response = test.post('/api/mail/v0/notification', json={
'recipients': test_recipients,
'sender': test_sender,
'options': test_options,
})
self.assertEqual(response.status_code, HTTPStatus.BAD_REQUEST)
self.assertFalse(send_notification_mock.called)
@unittest.mock.patch('amundsen_application.api.mail.v0.send_notification')
def test_notification_endpoint_fails_with_exception(self, send_notification_mock: Mock) -> None:
"""
Test that the endpoint returns 500 exception when error occurs
and that send_notification is not called
:return:
"""
with self.app.test_client() as test:
# generates error
response = test.post('/api/mail/v0/notification', json=None)
self.assertEqual(response.status_code, HTTPStatus.INTERNAL_SERVER_ERROR)
self.assertFalse(send_notification_mock.called)
|
7a1bb8672e9157d27daa9708e2f7d38f7eb465d7
|
28cf7b16dd29a5802d09b44b0186f6ae2c5ff0ed
|
/kuryr_kubernetes/cni/handlers.py
|
5e90f0dd61889f656cf1af367bac0ca51edd1f0e
|
[
"Apache-2.0"
] |
permissive
|
openstack/kuryr-kubernetes
|
c292826abfb8aa0d3f8ef3b1007362162db16956
|
4993c7a4b2d7e4b053832bf39602f2573fad6266
|
refs/heads/master
| 2023-08-18T19:21:02.487908
| 2023-08-03T13:58:11
| 2023-08-03T13:58:11
| 58,626,548
| 169
| 78
|
Apache-2.0
| 2022-04-13T02:27:52
| 2016-05-12T09:14:29
|
Python
|
UTF-8
|
Python
| false
| false
| 4,714
|
py
|
handlers.py
|
# Copyright (c) 2016 Mirantis, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from os_vif import objects as obj_vif
from oslo_concurrency import lockutils
from oslo_log import log as logging
from kuryr_kubernetes import constants as k_const
from kuryr_kubernetes.handlers import dispatch as k_dis
from kuryr_kubernetes.handlers import k8s_base
from kuryr_kubernetes import utils
LOG = logging.getLogger(__name__)
class CNIKuryrPortHandler(k8s_base.ResourceEventHandler):
OBJECT_KIND = k_const.K8S_OBJ_KURYRPORT
def __init__(self, registry):
super().__init__()
self.registry = registry
def on_vif(self, kuryrport, vifs):
kp_name = utils.get_res_unique_name(kuryrport)
with lockutils.lock(kp_name, external=True):
if (kp_name not in self.registry or
self.registry[kp_name] == k_const.CNI_DELETED_POD_SENTINEL
or self.registry[kp_name]['kp']['metadata']['uid'] !=
kuryrport['metadata']['uid']):
self.registry[kp_name] = {'kp': kuryrport,
'vifs': vifs,
'containerid': None,
'vif_unplugged': False,
'del_received': False}
else:
old_vifs = self.registry[kp_name]['vifs']
for iface in vifs:
if old_vifs[iface].active != vifs[iface].active:
kp_dict = self.registry[kp_name]
kp_dict['vifs'] = vifs
self.registry[kp_name] = kp_dict
def on_deleted(self, kuryrport, *args, **kwargs):
kp_name = utils.get_res_unique_name(kuryrport)
try:
if (kp_name in self.registry and self.registry[kp_name]
!= k_const.CNI_DELETED_POD_SENTINEL):
# NOTE(ndesh): We need to lock here to avoid race condition
# with the deletion code for CNI DEL so that
# we delete the registry entry exactly once
with lockutils.lock(kp_name, external=True):
if self.registry[kp_name]['vif_unplugged']:
del self.registry[kp_name]
else:
kp_dict = self.registry[kp_name]
kp_dict['del_received'] = True
self.registry[kp_name] = kp_dict
except KeyError:
# This means someone else removed it. It's odd but safe to ignore.
LOG.debug('KuryrPort %s entry already removed from registry while '
'handling DELETED event. Ignoring.', kp_name)
pass
def on_present(self, kuryrport, *args, **kwargs):
LOG.debug('MODIFIED event for KuryrPort %s',
utils.get_res_unique_name(kuryrport))
vifs = self._get_vifs(kuryrport)
if vifs:
self.on_vif(kuryrport, vifs)
def _get_vifs(self, kuryrport):
vifs_dict = {
k: obj_vif.base.VersionedObject.obj_from_primitive(v['vif'])
for k, v in kuryrport['status']['vifs'].items()}
LOG.debug("Got vifs: %r", vifs_dict)
return vifs_dict
class CNIPodHandler(k8s_base.ResourceEventHandler):
OBJECT_KIND = k_const.K8S_OBJ_POD
def __init__(self, registry):
super().__init__()
self.registry = registry
def on_finalize(self, pod, *args, **kwargs):
# TODO(dulek): Verify if this is the handler for such case.
kp_name = utils.get_res_unique_name(pod)
with lockutils.lock(kp_name, external=True):
# If there was no KP and Pod got deleted, we need inform the
# thread waiting for it about that. We'll insert sentinel value.
if kp_name not in self.registry:
self.registry[kp_name] = k_const.CNI_DELETED_POD_SENTINEL
class CNIPipeline(k_dis.EventPipeline):
def _wrap_dispatcher(self, dispatcher):
return dispatcher
def _wrap_consumer(self, consumer):
return consumer
|
42b565d3a8e1865fb2e30695749928c595f6e1e0
|
f7863c02970118d251846b896ddca4a7dff1066f
|
/hwt/synthesizer/rtlLevel/__init__.py
|
978d92882e4ba771ce52af6b0d61b130659c1494
|
[
"MIT"
] |
permissive
|
Nic30/hwt
|
6fea29e61422ed1c91ad924971b8f8e805d41b88
|
0c8e617fa7f56b261607c0acd9c977928e94276d
|
refs/heads/master
| 2023-09-04T06:44:31.103089
| 2023-08-23T13:05:21
| 2023-08-23T13:05:21
| 35,898,469
| 166
| 23
|
MIT
| 2020-07-30T15:16:54
| 2015-05-19T17:45:47
|
Python
|
UTF-8
|
Python
| false
| false
| 71
|
py
|
__init__.py
|
"""
rtlLevel is responsible for RtlSignal manipulation and design.
"""
|
620a2cf6a528198ee2b7f9d7cb26b218a6e01e49
|
4cccbd59c06b10f3bbe1d5e8c3082c8b0c9a8145
|
/tests/test_edgeql_volatility.py
|
228b5eb04936e997db148adf2aa4bd2b540c8e39
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
edgedb/edgedb
|
ff26656ee449208b88ae85a6ad9823fce4f2ecad
|
4d614ce5de15e0b08575b0bf6738ece02c516ded
|
refs/heads/master
| 2023-09-05T07:10:05.409260
| 2023-09-01T23:20:13
| 2023-09-01T23:20:13
| 95,817,032
| 11,683
| 404
|
Apache-2.0
| 2023-09-14T17:25:49
| 2017-06-29T20:30:48
|
Python
|
UTF-8
|
Python
| false
| false
| 60,519
|
py
|
test_edgeql_volatility.py
|
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2019-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import os.path
import edgedb
from edb.testbase import server as tb
from edb.testbase import serutils
from edb.tools import test
class TestEdgeQLVolatility(tb.QueryTestCase):
SCHEMA = os.path.join(os.path.dirname(__file__), 'schemas',
'volatility.esdl')
SETUP = os.path.join(os.path.dirname(__file__), 'schemas',
'volatility_setup.edgeql')
def _check_crossproduct(self, res):
ns = set()
pairs = set()
for row in res:
ns.add(row[0])
pairs.add((row[0], row[1]))
self.assertEqual(
pairs,
{(n1, n2) for n1 in ns for n2 in ns},
)
def _test_loop(self, n=None, *, single=False):
async def json_query(*args, **kwargs):
q = self.con.query_single_json if single else self.con.query_json
res = await q(*args, **kwargs)
return json.loads(res)
async def native_query(*args, **kwargs):
q = self.con.query_single if single else self.con.query
res = await q(*args, **kwargs)
return serutils.serialize(res)
async def native_query_typenames(*args, **kwargs):
res = await self.con._fetchall(*args, **kwargs, __typenames__=True)
if single:
assert len(res) == 1
res = res[0]
return serutils.serialize(res)
qs = [json_query, native_query, native_query_typenames]
if n is None:
n = len(qs)
for i in range(n):
yield qs[i % len(qs)]
async def test_edgeql_volatility_function_01(self):
result = await self.con.query(
r"""
SELECT Obj {
# immutable function should only be called once,
# generating the same value for all Objs
x := vol_immutable()
};
"""
)
self.assertEqual(
len(set(res.x for res in result)), 1,
'more than one value for the same vol_immutable() call'
)
async def test_edgeql_volatility_function_02(self):
result = await self.con.query(
r"""
SELECT Obj {
# stable function should only be called once,
# generating the same value for all Objs
x := vol_stable()
};
"""
)
self.assertEqual(
len(set(res.x for res in result)), 1,
'more than one value for the same vol_stable() call'
)
async def test_edgeql_volatility_function_03a(self):
result = await self.con.query(
r"""
SELECT Obj {
# volatile function should be called once for each
# Obj, generating different values
x := vol_volatile()
};
"""
)
self.assertNotEqual(
len(set(res.x for res in result)), 1,
'only one value for multiple vol_volatile() calls'
)
async def test_edgeql_volatility_function_03b(self):
result = await self.con.query(
r"""
SELECT Obj {
# volatile function should be called once for each
# Obj, generating different values
x := (vol_volatile(),)
};
"""
)
self.assertNotEqual(
len(set(res.x for res in result)), 1,
'only one value for multiple vol_volatile() calls'
)
async def test_edgeql_volatility_function_04(self):
with self.assertRaises(edgedb.DivisionByZeroError):
await self.con.execute(r'''
SELECT Obj {
# this condition is true for all of the Objs, but
# a constant immutable function call can be
# factored out and called once per query
x := 1 IF Obj.n > 0 ELSE err_immutable()
};
''')
async def test_edgeql_volatility_function_05(self):
await self.assert_query_result(r'''
SELECT Obj {
# this condition is true for all of the Objs and the
# stable function call cannot be factored out
x := 1 IF Obj.n > 0 ELSE err_stable()
};
''', [
{'x': 1},
{'x': 1},
{'x': 1},
])
async def test_edgeql_volatility_function_06(self):
await self.assert_query_result(r'''
SELECT Obj {
# this condition is true for all of the Objs and the
# volatile function call cannot be factored out
x := 1 IF Obj.n > 0 ELSE err_volatile()
};
''', [
{'x': 1},
{'x': 1},
{'x': 1},
])
async def test_edgeql_volatility_operator_01(self):
with self.assertRaises(edgedb.DivisionByZeroError):
await self.con.execute(r'''
SELECT Obj {
# this condition is true for all of the Objs, but
# a constant immutable operation can be factored out
# and called once per query
x := 1 IF Obj.n > 0 ELSE (1/0)
};
''')
async def test_edgeql_volatility_cast_01(self):
with self.assertRaises(edgedb.DivisionByZeroError):
await self.con.execute(r'''
SELECT Obj {
# this condition is true for all of the Objs, but
# a constant immutable cast can be factored out
# and called once per query
x := 1 IF Obj.n > 0 ELSE (<int64>(<float64>1)/0)
};
''')
async def test_edgeql_volatility_cast_02(self):
await self.assert_query_result(r'''
SELECT Obj {
# this condition is true for all of the Objs and the
# stable cast (<json>) cannot be factored out
x := 1 IF Obj.n > 0 ELSE (<int64>(<json>1)/0)
};
''', [
{'x': 1},
{'x': 1},
{'x': 1},
])
async def test_edgeql_volatility_for_01(self):
await self.assert_query_result(
r'''
SELECT count(DISTINCT (FOR x in {1,2} UNION (
uuid_generate_v4())));
''',
[2],
)
async def test_edgeql_volatility_for_02(self):
await self.assert_query_result(
r'''
WITH X := (FOR x in {1,2} UNION (uuid_generate_v1mc(), x))
SELECT count(DISTINCT X.0);
''',
[2],
)
async def test_edgeql_volatility_for_03(self):
await self.assert_query_result(
r'''
WITH X := (FOR y in {1, 2} UNION (
FOR x in {1,2} UNION (uuid_generate_v1mc(), x)))
SELECT count(DISTINCT X.0);
''',
[4],
)
async def test_edgeql_volatility_for_04(self):
await self.assert_query_result(
r'''
WITH X := (FOR y in {1, 2} UNION (
(0,
(FOR x in {1,2} UNION (
uuid_generate_v1mc(), x)))))
SELECT count(DISTINCT X.1.0);
''',
[4],
)
async def test_edgeql_volatility_for_05(self):
await self.assert_query_result(
r'''
WITH X := (FOR y in {1, 2} UNION (
(uuid_generate_v1mc(),
(INSERT Obj { n := y }))))
SELECT count(DISTINCT X.0);
''',
[2],
)
async def test_edgeql_volatility_for_06(self):
await self.assert_query_result(
r'''
SELECT count(DISTINCT (FOR x in {1,1} UNION (
uuid_generate_v1mc())));
''',
[2],
)
async def test_edgeql_volatility_for_07(self):
await self.assert_query_result(
r'''
SELECT count(DISTINCT (FOR x in {(),()} UNION (
uuid_generate_v4())));
''',
[2],
)
async def test_edgeql_volatility_for_08(self):
await self.assert_query_result(
r'''
SELECT count(DISTINCT (FOR x in {({1,2}, 0).1} UNION (
uuid_generate_v1mc())));
''',
[2],
)
async def test_edgeql_volatility_for_09(self):
await self.assert_query_result(
r'''
SELECT count(
DISTINCT (FOR x in {(Obj { x := random() }).x} UNION (
uuid_generate_v1mc())));
''',
[3],
)
async def test_edgeql_volatility_for_10(self):
res = await self.con.query(
r'''
WITH x := random() FOR y in {1,2,3} UNION (x);
''',
)
self.assertEqual(len(set(res)), 1)
async def test_edgeql_volatility_for_11(self):
await self.assert_query_result(
r'''
WITH X := ((FOR x in {(Obj { x := random() })} UNION (x.x))),
SELECT count(DISTINCT X)
''',
[3],
)
await self.assert_query_result(
r'''
WITH X := ((FOR x in {(Obj { x := random() })} UNION (x.x))),
SELECT count(X)
''',
[3],
)
async def test_edgeql_volatility_for_12(self):
await self.assert_query_result(
r'''
WITH X := ((FOR x in {(Obj { x := random() }).x} UNION (x))),
SELECT count(DISTINCT X)
''',
[3],
)
await self.assert_query_result(
r'''
WITH X := ((FOR x in {(Obj { x := random() }).x} UNION (x))),
SELECT count(X)
''',
[3],
)
async def test_edgeql_volatility_with_and_use_01(self):
await self.assert_query_result(
r'''
WITH X := (Obj { x := random() }).x,
SELECT count(DISTINCT X);
''',
[3],
)
await self.assert_query_result(
r'''
WITH X := (Obj { x := random() }).x,
SELECT count(X);
''',
[3],
)
async def test_edgeql_volatility_with_and_use_02(self):
await self.assert_query_result(
r'''
WITH X := (SELECT Obj { x := random() }).x,
SELECT count(DISTINCT X);
''',
[3],
)
await self.assert_query_result(
r'''
WITH X := (SELECT Obj { x := random() }).x,
SELECT count(X);
''',
[3],
)
async def test_edgeql_volatility_select_clause_01a(self):
# Spurious failure probability: 1/100!
# We need a nested SELECT because of bug #1816
# loses the ORDER BY otherwise
await self.assert_query_result(
r'''
WITH X := enumerate((SELECT _gen_series(0,99)
ORDER BY random()))
SELECT all(X.0 = X.1);
''',
[False],
)
async def test_edgeql_volatility_select_clause_01(self):
# Spurious failure probability: 1/100!
# We need a nested SELECT because of bug #1816
# loses the ORDER BY otherwise
await self.assert_query_result(
r'''
WITH X := enumerate((SELECT _gen_series(0,99)
ORDER BY random()))
SELECT all((FOR x in {X} UNION (x.0 = x.1)))
''',
[False],
)
async def test_edgeql_volatility_select_clause_02(self):
# Spurious failure probability: 1/2^99
await self.assert_query_result(
r'''
SELECT count((SELECT _gen_series(0,99) FILTER random() > 0.5))
NOT IN {0, 100};
''',
[True],
)
async def test_edgeql_volatility_select_clause_03(self):
# Spurious failure probability: 1/2^100 I think
# We want to test that the two SELECTs do separate FILTERs
# This is written in an awful way because of a bug with WITH.
await self.assert_query_result(
r'''
FOR X in {
array_agg(
(FOR x in {0, 1} UNION (SELECT _gen_series(0,100)
FILTER random() > 0.5)))}
UNION (
SELECT count(array_unpack(X))
!= 2*count(DISTINCT array_unpack(X)));
''',
[True],
)
async def test_edgeql_volatility_select_clause_04(self):
# Spurious failure probability: 1/2^100 I think
# This is just the test above but manually...
result = await self.con.query(
r'''
FOR x in {0, 1} UNION (
SELECT _gen_series(0,100) FILTER random() > 0.5
)
''',
)
self.assertNotEqual(
2 * len(set(result)), len(result),
'SELECT in FOR loop not doing independent filters'
)
async def test_edgeql_volatility_select_clause_05(self):
# Spurious failure probability: 1/2^99
await self.assert_query_result(
r'''
WITH X := (FOR x in {_gen_series(0,99)} UNION (()))
SELECT count((SELECT X FILTER random() > 0.5))
NOT IN {0, 100};
''',
[True],
)
async def test_edgeql_volatility_select_clause_06(self):
# Spurious failure probability: 1/2^99
await self.assert_query_result(
r'''
WITH X := (_gen_series(0,99), 0).1
SELECT count((SELECT X FILTER random() > 0.5))
NOT IN {0, 100};
''',
[True],
)
async def test_edgeql_volatility_with_01(self):
await self.assert_query_result(
r'''
WITH X := random() SELECT sum(X) = sum(X);
''',
[True],
)
async def test_edgeql_volatility_with_02(self):
await self.assert_query_result(
r'''
WITH X := random(), Y := X SELECT sum(Y) = sum(Y)
''',
[True],
)
async def test_edgeql_volatility_with_03(self):
await self.assert_query_result(
r'''
WITH W := random(),
Z := W,
SELECT W = Z;
''',
[True],
)
async def test_edgeql_volatility_with_04(self):
await self.assert_query_result(
r'''
WITH W := {random(), random()},
Z := W+0,
SELECT _ := (W = Z) ORDER BY _;
''',
[False, False, True, True],
)
async def test_edgeql_volatility_with_05(self):
await self.con.execute(r'''
CREATE TYPE Foo { CREATE PROPERTY asdf -> tuple<float64> };
''')
await self.con.query(r'''
WITH X := (random(),) SELECT X.0;
''')
await self.con.query(r'''
WITH X := {(random(),),(random(),)} SELECT X.0;
''')
async def test_edgeql_volatility_update_clause_01(self):
# Spurious failure probability: 1/2^99
await self.con.execute(r'''
FOR x in {_gen_series(4,100)} UNION (
INSERT Obj { n := x })
''')
await self.assert_query_result(
r'''
SELECT count(Obj)
''',
[100],
)
await self.assert_query_result(
r'''
WITH X := (UPDATE Obj FILTER random() > 0.5
SET { n := -1 })
SELECT count(X) NOT IN {0, 100}
''',
[True],
)
await self.assert_query_result(
r'''
WITH X := (SELECT Obj FILTER .n < 0)
SELECT count(X) != 0 AND count(X) != 100
''',
[True],
)
async def test_edgeql_volatility_delete_clause_01(self):
# Spurious failure probability: 1/2^99
await self.con.execute(r'''
FOR x in {_gen_series(4,100)} UNION (
INSERT Obj { n := x })
''')
await self.assert_query_result(
r'''
WITH X := (DELETE Obj FILTER random() > 0.5)
SELECT count(X) NOT IN {0, 100}
''',
[True],
)
await self.assert_query_result(
r'''
SELECT count(Obj) != 0 AND count(Obj) != 100
''',
[True],
)
async def test_edgeql_volatility_select_with_objects_01(self):
for query in self._test_loop(10):
res = await query("""
WITH W := (SELECT Obj FILTER random() > 0.5),
SELECT ((SELECT W {n}), (SELECT W {n}))
""")
self._check_crossproduct(
[(row[0]['n'], row[1]['n']) for row in res])
async def test_edgeql_volatility_select_with_objects_02(self):
for query in self._test_loop(10):
res = await query("""
SELECT Obj {n, m := random()}
FILTER .m > 0.3 ORDER BY .m;
""")
for row in res:
self.assertGreater(row['m'], 0.3)
nums = [row['m'] for row in res]
self.assertEqual(nums, sorted(nums))
async def test_edgeql_volatility_select_with_objects_03(self):
for query in self._test_loop(10):
res = await query("""
SELECT {
o := (
SELECT Obj {n, m := random()}
FILTER .m > 0.3 ORDER BY .m
)
};
""")
res = res[0]['o']
for row in res:
self.assertGreater(row['m'], 0.3)
nums = [row['m'] for row in res]
self.assertEqual(nums, sorted(nums))
async def test_edgeql_volatility_select_with_objects_04(self):
for query in self._test_loop(10):
res = await query("""
SELECT {
o := (SELECT (
SELECT Obj {n, m := random()}
FILTER .m > 0.3 ORDER BY .m
))
}
""")
res = res[0]['o']
for row in res:
self.assertGreater(row['m'], 0.3)
nums = [row['m'] for row in res]
self.assertEqual(nums, sorted(nums))
async def test_edgeql_volatility_select_with_objects_05(self):
for query in self._test_loop(10):
res = await query("""
SELECT {
o := (SELECT (
SELECT Obj {n, m := random()}
FILTER .m > 0.3
) ORDER BY .m)
}
""")
res = res[0]['o']
for row in res:
self.assertGreater(row['m'], 0.3)
nums = [row['m'] for row in res]
self.assertEqual(nums, sorted(nums))
async def test_edgeql_volatility_select_with_objects_06(self):
for query in self._test_loop(10):
res = await query("""
SELECT (
SELECT Obj {n, m := random()}
) FILTER .m > 0.3 ORDER BY .m
""")
for row in res:
self.assertGreater(row['m'], 0.3)
nums = [row['m'] for row in res]
self.assertEqual(nums, sorted(nums))
async def test_edgeql_volatility_select_with_objects_07(self):
for query in self._test_loop(10):
res = await query("""
SELECT (
SELECT Obj {n, m := {random(), random()}}
) ORDER BY max(.m)
""")
nums = [row['m'] for row in res]
self.assertEqual(nums, sorted(nums, key=max))
async def test_edgeql_volatility_select_with_objects_08(self):
for query in self._test_loop(10):
res = await query("""
SELECT (
SELECT Obj {n, m := (random(), random())}
) ORDER BY max(.m)
""")
nums = [row['m'] for row in res]
self.assertEqual(nums, sorted(nums))
async def test_edgeql_volatility_select_with_objects_09(self):
for query in self._test_loop(10):
res = await query("""
SELECT (
SELECT Obj {n, m := [random(), random()]}
) ORDER BY max(.m)
""")
nums = [row['m'] for row in res]
self.assertEqual(nums, sorted(nums))
async def test_edgeql_volatility_select_with_objects_10(self):
for query in self._test_loop():
res = await query("""
WITH X := (Obj { m := random()},)
SELECT X.0;
""")
self.assertEqual(len(res), 3)
async def test_edgeql_volatility_select_objects_optional_01(self):
for _ in range(10):
await self.assert_query_result(
r'''
WITH X := (SELECT Obj {
m := (SELECT .n FILTER random() > 0.5) }),
SELECT count(X);
''',
[3],
)
async def test_edgeql_volatility_select_objects_optional_02(self):
for query in self._test_loop(10, single=True):
res = await query("""
WITH X := (SELECT Obj {
m := (SELECT .n FILTER random() > 0.5) }),
SELECT {
foo := (SELECT X {n, m}),
baz := (SELECT X.m),
};
""")
foos = [x['m'] for x in res['foo'] if x['m'] is not None]
self.assertEqual(set(foos), set(res['baz']))
async def test_edgeql_volatility_select_hard_objects_01a(self):
for query in self._test_loop():
res = await query("""
WITH O := (SELECT Obj {m := next()}),
SELECT (O.m, O.m);
""")
self.assertEqual(len(res), 3)
for row in res:
self.assertEqual(row[0], row[1])
# Make sure it is really volatile
self.assertNotEqual(res[0][0], res[1][0])
async def test_edgeql_volatility_select_hard_objects_01b(self):
for query in self._test_loop():
# one side in a subquery, one not
res = await query("""
WITH O := (SELECT Obj {m := next()}),
SELECT ((SELECT O.m), O.m);
""")
self.assertEqual(len(res), 3)
for row in res:
self.assertEqual(row[0], row[1])
async def test_edgeql_volatility_select_hard_objects_02a(self):
for query in self._test_loop():
res = await query("""
WITH O := (SELECT Obj {m := next()}),
SELECT ((SELECT O.m), (SELECT O.m));
""")
self.assertEqual(len(res), 9)
self._check_crossproduct(res)
async def test_edgeql_volatility_select_hard_objects_02b(self):
for query in self._test_loop(10):
res = await query("""
WITH O := (SELECT Obj {m := random()} FILTER .m > 0.3),
SELECT ((SELECT O.m), (SELECT O.m));
""")
for row in res:
self.assertGreater(row[0], 0.3)
self._check_crossproduct(res)
async def test_edgeql_volatility_select_hard_objects_03(self):
for query in self._test_loop():
res = await query("""
WITH O := (SELECT Obj {m := next()}),
SELECT (O {m}, O {m});
""")
self.assertEqual(len(res), 3)
for row in res:
self.assertEqual(row[0]['m'], row[1]['m'])
async def test_edgeql_volatility_select_hard_objects_04a(self):
# TODO: this, but wrapped in DISTINCT
# (which breaks the serialization, ugh!)
for query in self._test_loop():
res = await query("""
WITH O := (SELECT Obj {m := next()}),
SELECT ((SELECT O {m}), (SELECT O {m}));
""")
self._check_crossproduct(
[(row[0]['m'], row[1]['m']) for row in res])
async def test_edgeql_volatility_select_hard_objects_04b(self):
# TODO: this, but wrapped in DISTINCT
# (which breaks the serialization, ugh!)
for query in self._test_loop(10):
res = await query("""
WITH O := (SELECT Obj {m := random()} FILTER .m > 0.3),
SELECT ((SELECT O {m}), (SELECT O {m}));
""")
for row in res:
self.assertGreater(row[0]['m'], 0.3)
self._check_crossproduct(
[(row[0]['m'], row[1]['m']) for row in res])
async def test_edgeql_volatility_select_hard_objects_05a(self):
for query in self._test_loop():
res = await query("""
WITH O := (SELECT {m := next()} LIMIT 1),
SELECT (O {m}, O {m});
""")
self.assertEqual(len(res), 1)
for row in res:
self.assertEqual(row[0]['m'], row[1]['m'])
async def test_edgeql_volatility_select_hard_objects_05b(self):
for query in self._test_loop():
res = await query("""
WITH O := (SELECT {m := next()} LIMIT 1),
SELECT assert_exists((O {m}, O {m}));
""")
self.assertEqual(len(res), 1)
for row in res:
self.assertEqual(row[0]['m'], row[1]['m'])
async def test_edgeql_volatility_select_hard_objects_06(self):
# now let's try it with a multi prop
res = await self.con.query("""
WITH O := (SELECT Obj {m := {next(), next()} })
SELECT ((SELECT O {m}), (SELECT O {m}));
""")
self._check_crossproduct(
[(tuple(row[0].m), tuple(row[1].m)) for row in res])
async def test_edgeql_volatility_select_hard_objects_07(self):
# now let's try it with a multi prop
for query in self._test_loop():
res = await query("""
WITH O := (SELECT Obj {m := {next(), next()} })
SELECT ((O {m}), (O {m}));
""")
self.assertEqual(len(res), 3)
for row in res:
self.assertEqual(row[0]['m'], row[1]['m'])
async def test_edgeql_volatility_select_hard_objects_08a(self):
for query in self._test_loop(single=True):
res = await query("""
WITH O := (SELECT Obj {m := next()}),
SELECT {
foo := (SELECT O {n, m}),
bar := (SELECT O {n, m}),
};
""")
self.assertEqual(
{(x['n'], x['m']) for x in res['foo']},
{(x['n'], x['m']) for x in res['bar']},
)
self.assertEqual(len(res['foo']), 3)
async def test_edgeql_volatility_select_hard_objects_08b(self):
for query in self._test_loop(single=True):
res = await query("""
WITH O := (SELECT Obj {m := next()} LIMIT 1),
SELECT {
foo := (SELECT O {n, m}),
bar := (SELECT O {n, m}),
};
""")
self.assertEqual(res['foo']['n'], res['bar']['n'])
self.assertEqual(res['foo']['m'], res['bar']['m'])
async def test_edgeql_volatility_select_hard_objects_09(self):
await self.assert_query_result(r'''
WITH O := (SELECT Obj {m := next()}),
SELECT {
foo := (SELECT O),
bar := (SELECT O),
};
''', [
{
'foo': [{"id": str}, {"id": str}, {"id": str}],
'bar': [{"id": str}, {"id": str}, {"id": str}],
}
])
async def test_edgeql_volatility_select_nested_01a(self):
for query in self._test_loop(10, single=True):
res = await query("""
WITH O := (SELECT Obj {
m := next(),
friends := (SELECT Tgt FILTER random() > 0.4)
}),
SELECT {
a := (SELECT O {m, friends: {n}} ORDER BY .m),
b := (SELECT O {m, friends: {n}} ORDER BY .m),
};
""")
nums = [row['m'] for row in res['a']]
self.assertEqual(nums, sorted(nums))
self.assertEqual(len(res['a']), 3)
for ra, rb in zip(res['a'], res['b']):
self.assertEqual(ra['m'], rb['m'])
self.assertEqual(
{x['n'] for x in ra['friends']},
{x['n'] for x in rb['friends']},
)
async def test_edgeql_volatility_select_nested_1b(self):
# same as 1b but without a shape on friends
for query in self._test_loop(10, single=True):
res = await query("""
WITH O := (SELECT Obj {
m := next(),
friends := (SELECT Tgt FILTER random() > 0.4)
}),
SELECT {
a := (SELECT O {m, friends} ORDER BY .m),
b := (SELECT O {m, friends} ORDER BY .m),
};
""")
nums = [row['m'] for row in res['a']]
self.assertEqual(nums, sorted(nums))
self.assertEqual(len(res['a']), 3)
for ra, rb in zip(res['a'], res['b']):
self.assertEqual(ra['m'], rb['m'])
self.assertEqual(
{x['id'] for x in ra['friends']},
{x['id'] for x in rb['friends']},
)
self.assertLessEqual(len(ra['friends']), 4)
async def test_edgeql_volatility_select_nested_02(self):
for query in self._test_loop(10, single=True):
res = await query("""
WITH O := (SELECT Obj {
m := next(),
friends := (SELECT .tgt FILTER random() > 0.4)
}),
SELECT {
a := (SELECT O {m, friends: {n}} ORDER BY .m),
b := (SELECT O {m, friend_nums := .friends.n} ORDER BY .m),
};
""")
nums = [row['m'] for row in res['a']]
self.assertEqual(nums, sorted(nums))
self.assertEqual(len(res['a']), 3)
for ra, rb in zip(res['a'], res['b']):
self.assertEqual(ra['m'], rb['m'])
self.assertEqual(
{x['n'] for x in ra['friends']},
set(rb['friend_nums']),
)
async def test_edgeql_volatility_select_nested_03a(self):
for query in self._test_loop(10, single=True):
res = await query("""
WITH O := (SELECT Obj {
m := next(),
friends := (SELECT .tgt { x := random() })
}),
SELECT {
a := (SELECT O {m, friends: {x}} ORDER BY .m),
b := (SELECT O {m, friend_nums := .friends.x} ORDER BY .m),
};
""")
nums = [row['m'] for row in res['a']]
self.assertEqual(nums, sorted(nums))
self.assertEqual(len(res['a']), 3)
for ra, rb in zip(res['a'], res['b']):
self.assertEqual(ra['m'], rb['m'])
self.assertEqual(
{x['x'] for x in ra['friends']},
set(rb['friend_nums']),
)
async def test_edgeql_volatility_select_nested_03b(self):
for query in self._test_loop(10, single=True):
res = await query("""
WITH O := (SELECT Obj {
m := next(),
friends := (SELECT (SELECT .tgt) { @x := next() })
}),
SELECT {
a := (SELECT O {m, friends: {@x}} ORDER BY .m),
b := (SELECT O {m, friend_nums := .friends@x} ORDER BY .m),
};
""")
nums = [row['m'] for row in res['a']]
self.assertEqual(nums, sorted(nums))
self.assertEqual(len(res['a']), 3)
for ra, rb in zip(res['a'], res['b']):
self.assertEqual(ra['m'], rb['m'])
self.assertEqual(
{x['@x'] for x in ra['friends']},
set(rb['friend_nums']),
)
async def test_edgeql_volatility_select_nested_04a(self):
for query in self._test_loop(single=True):
res = await query("""
WITH O := (SELECT Obj {
friends := (SELECT Tgt { x := next() } )
}),
SELECT {
a := (SELECT O {friends: {n, x}}),
b := (SELECT O {friends: {n, x}}),
};
""")
self.assertEqual(len(res['a']), 3)
for ra, rb in zip(res['a'], res['b']):
self.assertEqual(len(ra['friends']), 4)
self.assertEqual(
sorted((x['n'], x['x']) for x in ra['friends']),
sorted((x['n'], x['x']) for x in rb['friends']),
)
async def test_edgeql_volatility_select_nested_04b(self):
for query in self._test_loop(single=True):
res = await query("""
WITH O := (SELECT Obj {
tgt: { x := next() }
}),
SELECT {
a := (SELECT O {tgt: {n, x}}),
b := (SELECT O {tgt: {n, x}}),
};
""")
self.assertEqual(len(res['a']), 3)
for ra, rb in zip(res['a'], res['b']):
self.assertEqual(len(ra['tgt']), 2)
self.assertEqual(
sorted((x['n'], x['x']) for x in ra['tgt']),
sorted((x['n'], x['x']) for x in rb['tgt']),
)
async def test_edgeql_volatility_select_nested_05(self):
for query in self._test_loop(10, single=True):
res = await query("""
WITH O := (SELECT Obj {
m := rand_int(100),
friends := (SELECT Tgt { x := next() }
FILTER random() > 0.4)
}),
SELECT {
a := (SELECT O {m, n, friends: {n, x}, ha := .friends.x}),
b := (SELECT O {
m,
friends_tuples := (.friends.n, .friends.x),
friend_sums := sum(.friends.x),
}),
c := (O.n, O.friends {n, x}, O.friends {n, x}),
};
""")
cs = {x['n']: [] for x in res['a']}
for rc in res['c']:
self.assertEqual(rc[1]['n'], rc[2]['n'])
self.assertEqual(rc[1]['x'], rc[2]['x'])
cs[rc[0]].append([rc[1]['n'], rc[1]['x']])
for ra, rb in zip(res['a'], res['b']):
self.assertLessEqual(len(ra['friends']), 4)
self.assertEqual(
sorted(x['x'] for x in ra['friends']),
sorted(ra['ha']),
)
self.assertEqual(
sorted([x['n'], x['x']] for x in ra['friends']),
sorted(rb['friends_tuples']),
)
self.assertEqual(
sorted(cs[ra['n']]),
sorted(rb['friends_tuples']),
)
self.assertEqual(sum(ra['ha']), rb['friend_sums'])
async def test_edgeql_volatility_select_nested_06a(self):
# here we want some deduplicating to happen
for query in self._test_loop(single=True):
res = await query("""
WITH O := (SELECT Obj {
friends := (SELECT Tgt { x := next() })
}),
SELECT {
x := (O { friends: {x} }),
y := O.friends.x,
};
""")
self.assertEqual(len(res['y']), 4)
all_xs = {t['x'] for r in res['x'] for t in r['friends']}
self.assertTrue(set(res['y']).issubset(all_xs))
async def test_edgeql_volatility_select_nested_06b(self):
# here we want some deduplicating to happen
for query in self._test_loop(single=True):
res = await query("""
WITH O := (SELECT Obj {
friends := (SELECT Tgt { x := next() })
}),
SELECT {
x := (O { friends: {n, x} }),
y := O.friends {n, x},
};
""")
self.assertEqual(len(res['y']), 4)
all_xs = {(t['n'], t['x']) for r in res['x'] for t in r['friends']}
y = {(t['n'], t['x']) for t in res['y']}
self.assertTrue(y.issubset(all_xs))
async def test_edgeql_volatility_select_nested_06c(self):
# here we want some deduplicating to happen
for query in self._test_loop():
res = await query("""
WITH O := (SELECT Obj {
friends := (SELECT Tgt { x := next() })
}),
SELECT ((SELECT O.friends.x), (SELECT O.friends.x));
""")
self.assertEqual(len(res), 16)
async def test_edgeql_volatility_select_nested_06d(self):
# here we want some deduplicating to happen
for query in self._test_loop():
res = await query("""
WITH O := (SELECT Obj {
friends := (SELECT Tgt { x := next() })
}),
SELECT O.friends;
""")
self.assertEqual(len(res), 4)
async def test_edgeql_volatility_select_nested_06e(self):
# here we want some deduplicating to happen
# same as above but with an extra select
for query in self._test_loop():
res = await query("""
WITH O := (SELECT (SELECT Obj {
friends := (SELECT Tgt { x := next() })
})),
SELECT O.friends;
""")
self.assertEqual(len(res), 4)
async def test_edgeql_volatility_select_nested_07a(self):
for query in self._test_loop(10):
res = await query("""
SELECT Obj {
n,
tgt: {
n,
} FILTER random() < 0.5
}
FILTER EXISTS (.tgt);
""")
for row in res:
self.assertGreater(len(row['tgt']), 0)
async def test_edgeql_volatility_select_nested_07b(self):
for query in self._test_loop(10):
res = await query("""
SELECT Obj {
n,
tgts := (SELECT .tgt {
n,
} FILTER random() < 0.5)
}
FILTER EXISTS (.tgts);
""")
for row in res:
self.assertGreater(len(row['tgts']), 0)
@test.xerror("Arrays containing objects are hard; TODO: fail?")
async def test_edgeql_volatility_select_arrays_01(self):
for query in self._test_loop(single=True):
res = await query("""
WITH O := [(SELECT Obj {m := next()})],
SELECT {
foo := (SELECT O[0] {m}),
bar := (SELECT O[0] {m}),
};
""")
self.assertEqual(res['foo'], res['bar'])
self.assertEqual(len(res['foo']), 3)
async def test_edgeql_volatility_select_tuples_01(self):
for query in self._test_loop(single=True):
res = await query("""
WITH O := ((SELECT Obj {m := next()}),),
SELECT {
foo := (SELECT O.0 {n, m}),
bar := (SELECT O.0 {n, m}),
};
""")
self.assertEqual(res['foo'], res['bar'])
self.assertEqual(len(res['foo']), 3)
async def test_edgeql_volatility_select_tuples_02(self):
for query in self._test_loop(single=True):
res = await query("""
WITH O := (z := ((SELECT Obj {m := next()}),)),
SELECT {
foo := (SELECT O.z.0 {n, m}),
bar := (SELECT O.z.0 {n, m}),
os := O,
ms := O.z.0.m,
};
""")
self.assertEqual(res['foo'], res['bar'])
self.assertEqual(len(res['foo']), 3)
self.assertEqual(
{x['m'] for x in res['foo']},
set(res['ms']),
)
async def test_edgeql_volatility_select_tuples_03(self):
await self.assert_query_result(r'''
WITH X := ((SELECT Obj { m := next() }),),
Y := ((SELECT Obj { m := next() }),),
SELECT count((SELECT (X, Y) FILTER X = Y));
''', [
3,
])
await self.assert_query_result(r'''
WITH X := ((SELECT Obj { m := next() }),),
Y := ((SELECT Obj { m := next() }),),
SELECT count((SELECT (X, Y) FILTER X < Y));
''', [
3,
])
await self.assert_query_result(r'''
WITH X := ((SELECT Obj { m := next() }),),
Y := (Obj,),
SELECT count((SELECT (X, Y) FILTER X < Y));
''', [
3,
])
async def test_edgeql_volatility_insert_01(self):
for query in self._test_loop(single=True):
res = await query("""
WITH
Foo := (SELECT (
INSERT Obj {n := 10}
) { m := next() })
SELECT {
foo := Foo {n, m},
bar := Foo {n, m},
};
""")
self.assertEqual(res['foo']['n'], 10)
self.assertEqual(res['foo']['m'], res['bar']['m'])
async def test_edgeql_volatility_nested_link_01(self):
# next() should get called once for each Obj/Tgt pair
for query in self._test_loop():
res = await query(
r"""
SELECT Obj {
l := (SELECT Tgt { m := next() }),
};
"""
)
nums = [t['m'] for o in res for t in o['l']]
self.assertEqual(len(nums), len(set(nums)))
async def test_edgeql_volatility_hack_01a(self):
await self.assert_query_result(r'''
SELECT (FOR x IN {1,2} UNION (SELECT Obj { m := x }))
{ n, m } ORDER BY .m THEN .n;
''', [
{"m": 1, "n": 1},
{"m": 1, "n": 2},
{"m": 1, "n": 3},
{"m": 2, "n": 1},
{"m": 2, "n": 2},
{"m": 2, "n": 3},
])
async def test_edgeql_volatility_hack_01b(self):
await self.assert_query_result(r'''
SELECT (FOR x IN {1,2} UNION ((SELECT Obj) { m := x }))
{ n, m } ORDER BY .m THEN .n;
''', [
{"m": 1, "n": 1},
{"m": 1, "n": 2},
{"m": 1, "n": 3},
{"m": 2, "n": 1},
{"m": 2, "n": 2},
{"m": 2, "n": 3},
])
async def test_edgeql_volatility_hack_01c(self):
await self.assert_query_result(r'''
SELECT (FOR x IN {1,2} UNION (Obj { m := x }))
{ n, m } ORDER BY .m THEN .n;
''', [
{"m": 1, "n": 1},
{"m": 1, "n": 2},
{"m": 1, "n": 3},
{"m": 2, "n": 1},
{"m": 2, "n": 2},
{"m": 2, "n": 3},
])
async def test_edgeql_volatility_hack_02(self):
await self.assert_query_result(r'''
WITH X := (FOR x IN {1,2} UNION (SELECT Obj { m := x }))
SELECT X { n, m } ORDER BY .m THEN .n;
''', [
{"m": 1, "n": 1},
{"m": 1, "n": 2},
{"m": 1, "n": 3},
{"m": 2, "n": 1},
{"m": 2, "n": 2},
{"m": 2, "n": 3},
])
async def test_edgeql_volatility_hack_03a(self):
await self.assert_query_result(r'''
WITH X := (WITH x := {1,2}, SELECT (x, Obj {m := x})).1
SELECT X { n, m } ORDER BY .m THEN .n;
''', [
{"m": 1, "n": 1},
{"m": 1, "n": 2},
{"m": 1, "n": 3},
{"m": 2, "n": 1},
{"m": 2, "n": 2},
{"m": 2, "n": 3},
])
async def test_edgeql_volatility_hack_03b(self):
await self.assert_query_result(r'''
WITH X := (WITH x := {1,2}, SELECT (x, Obj {m := x}).1)
SELECT X { n, m } ORDER BY .m THEN .n;
''', [
{"m": 1, "n": 1},
{"m": 1, "n": 2},
{"m": 1, "n": 3},
{"m": 2, "n": 1},
{"m": 2, "n": 2},
{"m": 2, "n": 3},
])
async def test_edgeql_volatility_hack_04a(self):
await self.assert_query_result(r'''
SELECT (WITH x := {1,2}, SELECT (x, Obj {m := x})).1
{ n, m } ORDER BY .m THEN .n;
''', [
{"m": 1, "n": 1},
{"m": 1, "n": 2},
{"m": 1, "n": 3},
{"m": 2, "n": 1},
{"m": 2, "n": 2},
{"m": 2, "n": 3},
])
async def test_edgeql_volatility_hack_04b(self):
await self.assert_query_result(r'''
SELECT (WITH x := {1,2}, SELECT (x, Obj {m := x}).1)
{ n, m } ORDER BY .m THEN .n;
''', [
{"m": 1, "n": 1},
{"m": 1, "n": 2},
{"m": 1, "n": 3},
{"m": 2, "n": 1},
{"m": 2, "n": 2},
{"m": 2, "n": 3},
])
async def test_edgeql_volatility_hack_05a(self):
await self.assert_query_result(r'''
SELECT (WITH x := {(SELECT Tgt FILTER .n < 3)},
SELECT (x.n, Obj {m := x.n})).1
{ n, m } ORDER BY .m THEN .n;
''', [
{"m": 1, "n": 1},
{"m": 1, "n": 2},
{"m": 1, "n": 3},
{"m": 2, "n": 1},
{"m": 2, "n": 2},
{"m": 2, "n": 3},
])
async def test_edgeql_volatility_hack_05b(self):
await self.assert_query_result(r'''
WITH X := (WITH x := {(SELECT Tgt FILTER .n < 3)},
SELECT (x.n, Obj {m := x.n})).1,
SELECT X { n, m } ORDER BY .m THEN .n;
''', [
{"m": 1, "n": 1},
{"m": 1, "n": 2},
{"m": 1, "n": 3},
{"m": 2, "n": 1},
{"m": 2, "n": 2},
{"m": 2, "n": 3},
])
async def test_edgeql_volatility_for_like_hard_01(self):
for query in self._test_loop():
res = await query("""
WITH
O := (SELECT Obj { x := next() }),
Z := (O, (SELECT O { n, x, y := -.x })).1
SELECT Z { n, x, y };
""")
self.assertEqual(len(res), 3)
self.assertNotEqual(res[0]['x'], res[1]['x'])
for obj in res:
self.assertEqual(obj['x'], -obj['y'])
@test.xerror("column definition list is only allowed ...")
async def test_edgeql_volatility_for_like_hard_02(self):
# Weird stuff is happening here!
# 1. Putting basically anything other than O as the 1st tuple el works
# 2. If we reorder the arguments it works
# 3. If we add a real shape to the nested O, it works
for query in self._test_loop():
res = await query("""
WITH
O := (SELECT Obj { x := next() }),
Z := (O, ({ o := O })).1
SELECT Z { o: {n, x} };
""")
self.assertEqual(len(res), 3)
self.assertNotEqual(res[0]['o']['x'], res[1]['o']['x'])
@test.xerror("column definition list is only allowed ...")
async def test_edgeql_volatility_for_like_hard_03(self):
for query in self._test_loop():
res = await query("""
WITH
O := (SELECT Obj { x := next() }),
Za := (O, ({ o := O })),
Z := Za.1
SELECT Z { o: {n, x} };
""")
self.assertEqual(len(res), 3)
self.assertNotEqual(res[0]['o']['x'], res[1]['o']['x'])
async def test_edgeql_volatility_for_hard_01(self):
for query in self._test_loop():
res = await query("""
WITH Z := (FOR O IN {(
SELECT Obj { x := next() }
)} UNION (
SELECT O { y := -.x }
)),
SELECT Z { n, x, y };
""")
self.assertEqual(len(res), 3)
self.assertNotEqual(res[0]['x'], res[1]['x'])
for obj in res:
self.assertEqual(obj['x'], -obj['y'])
async def test_edgeql_volatility_for_hard_02(self):
for query in self._test_loop():
res = await query("""
WITH Z := (FOR O IN {(
SELECT Obj { x := next() }
)} UNION (
SELECT { a := O { n, x, y := -.x } }
)),
SELECT Z { a: { n, x, y }};
""")
self.assertEqual(len(res), 3)
self.assertNotEqual(res[0]['a']['x'], res[1]['a']['x'])
for obj in res:
self.assertEqual(obj['a']['x'], -obj['a']['y'])
async def test_edgeql_volatility_for_hard_03(self):
for query in self._test_loop():
res = await query("""
WITH Z := (FOR O IN {(
SELECT Obj {
tgt: { x := next() }
}
)} UNION (
SELECT O {tgt: {n, x, y := -.x}}
)),
SELECT Z { tgt: {n, x, y} };
""")
self.assertEqual(len(res), 3)
for obj in res:
for tgt in obj['tgt']:
self.assertEqual(tgt['x'], -tgt['y'])
async def test_edgeql_volatility_for_hard_04(self):
for query in self._test_loop():
res = await query("""
WITH Z := (FOR O IN {(
SELECT Obj {
tgt: { x := next() }
}
)} UNION (
SELECT { a := (O {tgt: {n, x, y := -.x}}) }
)),
SELECT Z { a: {tgt: {n, x, y} } };
""")
self.assertEqual(len(res), 3)
for obj in res:
for tgt in obj['a']['tgt']:
self.assertEqual(tgt['x'], -tgt['y'])
async def test_edgeql_volatility_for_hard_05(self):
for query in self._test_loop(single=True):
res = await query("""
WITH Z := Obj { m := next() },
Y := (FOR k in {Z} UNION (k.m)),
SELECT { z := Z.m, y := Y };
""")
self.assertEqual(set(res['z']), set(res['y']))
async def test_edgeql_volatility_rebind_flat_01(self):
for query in self._test_loop():
res = await query("""
WITH O := (SELECT Obj { x := next() }),
Z := (SELECT O {y := -.x}),
SELECT Z { n, x, y };
""")
self.assertEqual(len(res), 3)
for obj in res:
self.assertEqual(obj['x'], -obj['y'])
async def test_edgeql_volatility_rebind_flat_02(self):
for query in self._test_loop():
res = await query("""
WITH O := (SELECT Obj { x := next() }),
Z := (SELECT O {x, y := -.x}),
SELECT Z { n, x, y };
""")
self.assertEqual(len(res), 3)
for obj in res:
self.assertEqual(obj['x'], -obj['y'])
async def test_edgeql_volatility_rebind_flat_03(self):
for query in self._test_loop():
res = await query("""
WITH O := (SELECT Obj { x := next() }),
Z := (SELECT O {x := .x}),
SELECT (Z.n, (SELECT Z.x), (SELECT Z.x));
""")
self.assertEqual(len(res), 3)
for _, x1, x2 in res:
self.assertEqual(x1, x2)
async def test_edgeql_volatility_rebind_nested_01(self):
for query in self._test_loop():
res = await query("""
WITH O := (
SELECT Obj {
tgt: { x := next() }
}
),
Z := (SELECT O {tgt: {n, x, y := -.x}}),
SELECT Z { tgt: {n, x, y} };
""")
self.assertEqual(len(res), 3)
for obj in res:
for tgt in obj['tgt']:
self.assertEqual(tgt['x'], -tgt['y'])
async def test_edgeql_volatility_rebind_nested_02(self):
for query in self._test_loop():
res = await query("""
WITH O := (
SELECT Obj {
tgt: { x := next() }
}
),
Z := (SELECT O {tgt: {n, y := -.x}}),
SELECT Z { tgt: {n, x, y} };
""")
self.assertEqual(len(res), 3)
for obj in res:
for tgt in obj['tgt']:
self.assertEqual(tgt['x'], -tgt['y'])
async def test_edgeql_volatility_rebind_nested_03(self):
for query in self._test_loop(single=True):
res = await query("""
WITH O := (
SELECT Obj {
tgt: { x := next() }
}
),
Z := { o := (SELECT O {tgt: {n, y := -.x}}) },
SELECT Z { o: {tgt: {n, x, y}} };
""")
for obj in res['o']:
for tgt in obj['tgt']:
self.assertEqual(tgt['x'], -tgt['y'])
async def test_edgeql_volatility_shape_array_01(self):
for query in self._test_loop():
res = await query("""
WITH X := { multi x := [next()] },
SELECT ((SELECT X.x), (SELECT X.x));
""")
self.assertEqual(len(res), 1)
for obj in res:
self.assertEqual(obj[0], obj[1])
async def test_edgeql_volatility_shape_array_02(self):
for query in self._test_loop():
res = await query("""
WITH X := { x := [next()] },
SELECT ((SELECT X.x), (SELECT X.x));
""")
self.assertEqual(len(res), 1)
for obj in res:
self.assertEqual(obj[0], obj[1])
async def test_edgeql_volatility_in_func_01(self):
await self.con.execute('''
create function foo() -> float64 using (
with Z := Obj { v := random() },
select sum(Z.v)
);
''')
await self.con.query('select foo()')
async def test_edgeql_volatility_errors_01(self):
async with self._run_and_rollback():
with self.assertRaisesRegex(
edgedb.QueryError,
"can not take cross product of volatile operation",
_position=36):
await self.con.execute(
r"""
SELECT Obj.n + random()
"""
)
async with self._run_and_rollback():
with self.assertRaisesRegex(
edgedb.QueryError,
"can not take cross product of volatile operation",
_position=36):
await self.con.execute(
r"""
SELECT (Obj.n, random())
"""
)
async with self._run_and_rollback():
with self.assertRaisesRegex(
edgedb.QueryError,
"can not take cross product of volatile operation"):
await self.con.execute(
r"""
SELECT ({1,2}, random())
"""
)
async with self._run_and_rollback():
with self.assertRaisesRegex(
edgedb.QueryError,
"can not take cross product of volatile operation",
_position=28):
await self.con.execute(
r"""
SELECT random() + Obj.n
"""
)
async with self._run_and_rollback():
with self.assertRaisesRegex(
edgedb.QueryError,
"can not take cross product of volatile operation",
_position=37):
await self.con.execute(
r"""
SELECT {1,2} + (FOR x in {1,2,3} UNION (x*random()))
"""
)
async with self._run_and_rollback():
with self.assertRaisesRegex(
edgedb.QueryError,
"can not take cross product of volatile operation",
_position=37):
await self.con.execute(
r"""
SELECT ({1,2}, (INSERT Obj { n := 100 }))
"""
)
async with self._run_and_rollback():
with self.assertRaisesRegex(
edgedb.QueryError,
"can not take cross product of volatile operation",
_position=65):
await self.con.execute(
r"""
SELECT ({1,2},
(FOR i in {1,2,3} UNION (
INSERT Obj { n := i })))
"""
)
async with self._run_and_rollback():
with self.assertRaisesRegex(
edgedb.QueryError,
"can not take cross product of volatile operation"):
await self.con.execute(
r"""
WITH X := (WITH x := {1,2},
SELECT (x, Obj {m := vol_id(x)})).1
SELECT X;
"""
)
|
5bf795ffa1e84cdaaa30c0904a14d9ea185e7150
|
2337351b228818e41be3002bd38f68f77c2aa074
|
/sa/profiles/Alentis/NetPing/get_chassis_id.py
|
0ebba776b5febafb3b4f6d6bab75d0bd6f259783
|
[
"BSD-3-Clause"
] |
permissive
|
nocproject/noc
|
57d40c680a1499374463e472434f9595ed6d1374
|
6e6d71574e9b9d822bec572cc629a0ea73604a59
|
refs/heads/master
| 2023-08-31T01:11:33.544573
| 2023-08-30T17:31:11
| 2023-08-30T17:31:11
| 107,815,776
| 105
| 33
|
BSD-3-Clause
| 2023-07-31T07:57:45
| 2017-10-21T21:04:33
|
Python
|
UTF-8
|
Python
| false
| false
| 920
|
py
|
get_chassis_id.py
|
# ---------------------------------------------------------------------
# Alentis.NetPing.get_chassis_id
# ---------------------------------------------------------------------
# Copyright (C) 2007-2014 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# NOC modules
from noc.sa.profiles.Generic.get_chassis_id import Script as BaseScript
from noc.sa.interfaces.igetchassisid import IGetChassisID
from noc.core.mib import mib
class Script(BaseScript):
name = "Alentis.NetPing.get_chassis_id"
interface = IGetChassisID
cache = True
always_prefer = "S"
SNMP_GET_OIDS = {"SNMP": [mib["IF-MIB::ifPhysAddress", 1]]}
def execute_cli(self, **kwargs):
# Fallback to HTTP
data = self.profile.var_data(self, "/setup_get.cgi")
mac = data["mac"]
return {"first_chassis_mac": mac, "last_chassis_mac": mac}
|
62e074f4be77085317913d29204dd853579ea989
|
8bd04e1685be72706b3c28a159cc8f744a4a5f65
|
/uefi_firmware/guids/efiguids.py
|
25338edc8709e9df28e740d0a06274628db23505
|
[
"MIT"
] |
permissive
|
theopolis/uefi-firmware-parser
|
e55384b638026d79c03f51e2760ada6047db3269
|
f05ed14eaf4013f62aa19e74c434a6a465583423
|
refs/heads/master
| 2023-08-24T03:14:54.480442
| 2023-08-12T15:59:06
| 2023-08-12T15:59:06
| 16,303,018
| 656
| 171
|
NOASSERTION
| 2023-08-12T17:12:01
| 2014-01-28T05:25:47
|
Python
|
UTF-8
|
Python
| false
| false
| 80,833
|
py
|
efiguids.py
|
"""
efiguids.py
This is a giant list of protocol GUIDs I grepped out of the TianoCore source code. It should be relatively complete,
but won't contain any of Apple's proprietary GUIDs. I'll add those as I come across them.
See the following URL for more info and the latest version:
https://github.com/snarez/ida-efiutils
"""
GUIDs = {
'ACPI_TABLE_GUID': [0xeb9d2d30, 0x2d88, 0x11d3, 0x9a, 0x16, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d],
'APPLE_REMOVABLE_MEDIA_PROTOCOL_GUID': [0x2ea9743a, 0x23d9, 0x425e, 0x87, 0x2c, 0xf6, 0x15, 0xaa, 0x19, 0x57, 0x88],
'ARM_GLOBAL_VARIABLE_PPI_GUID': [0xab1c1816, 0xd542, 0x4e6f, 0x9b, 0x1e, 0x8e, 0xcd, 0x92, 0x53, 0xe2, 0xe7],
'ARM_HOB_GLOBAL_VARIABLE_GUID': [0xc3253c90, 0xa24f, 0x4599, 0xa6, 0x64, 0x1f, 0x88, 0x13, 0x77, 0x8f, 0xc9],
'ARM_MP_CORE_INFO_GUID': [0xa4ee0728, 0xe5d7, 0x4ac5, 0xb2, 0x1e, 0x65, 0x8e, 0xd8, 0x57, 0xe8, 0x34],
'ARM_MP_CORE_INFO_PPI_GUID': [0x6847cc74, 0xe9ec, 0x4f8f, 0xa2, 0x9d, 0xab, 0x44, 0xe7, 0x54, 0xa8, 0xfc],
'BDS_LIB_STRING_PACKAGE_GUID': [0x3b4d9b23, 0x95ac, 0x44f6, 0x9f, 0xcd, 0xe, 0x95, 0x94, 0x58, 0x6c, 0x72],
'BLOCKIO_VENDOR_GUID': [0xcf31fac5, 0xc24e, 0x11d2, 0x85, 0xf3, 0x0, 0xa0, 0xc9, 0x3e, 0xc9, 0x3b],
'BLOCK_MMIO_PROTOCOL_GUID': [0x6b558ce3, 0x69e5, 0x4c67, 0xa6, 0x34, 0xf7, 0xfe, 0x72, 0xad, 0xbe, 0x84],
'BOOT_MAINT_FORMSET_GUID': [0x642237c7, 0x35d4, 0x472d, 0x83, 0x65, 0x12, 0xe0, 0xcc, 0xf2, 0x7a, 0x22],
'BOOT_MANAGER_FORMSET_GUID': [0x847bc3fe, 0xb974, 0x446d, 0x94, 0x49, 0x5a, 0xd5, 0x41, 0x2e, 0x99, 0x3b],
'CONNECT_CONIN_EVENT_GUID': [0xdb4e8151, 0x57ed, 0x4bed, 0x88, 0x33, 0x67, 0x51, 0xb5, 0xd1, 0xa8, 0xd7],
'DEVICE_MANAGER_FORMSET_GUID': [0x3ebfa8e6, 0x511d, 0x4b5b, 0xa9, 0x5f, 0xfb, 0x38, 0x26, 0xf, 0x1c, 0x27],
'DP_HII_GUID': [0xeb832fd9, 0x9089, 0x4898, 0x83, 0xc9, 0x41, 0x61, 0x8f, 0x5c, 0x48, 0xb9],
'DRIVER_HEALTH_FORMSET_GUID': [0xf76e0a70, 0xb5ed, 0x4c38, 0xac, 0x9a, 0xe5, 0xf5, 0x4b, 0xf1, 0x6e, 0x34],
'DRIVER_SAMPLE_FORMSET_GUID': [0xA04A27f4, 0xDF00, 0x4D42, 0xB5, 0x52, 0x39, 0x51, 0x13, 0x02, 0x11, 0x3D],
'DRIVER_SAMPLE_INVENTORY_GUID': [0xb3f56470, 0x6141, 0x4621, 0x8f, 0x19, 0x70, 0x4e, 0x57, 0x7a, 0xa9, 0xe8],
'DUET_CONSOLEOUT_CONFIG_GUID': [0xED150714, 0xDF30, 0x407D, 0xB2, 0x4A, 0x4B, 0x74, 0x2F, 0xD5, 0xCE, 0xA2],
'DXE_CORE_FILE_NAME_GUID': [0xD6A2CB7F, 0x6A18, 0x4e2f, 0xB4, 0x3B, 0x99, 0x20, 0xA7, 0x33, 0x70, 0x0A],
'DXE_SERVICES_TABLE_GUID': [0x5ad34ba, 0x6f02, 0x4214, 0x95, 0x2e, 0x4d, 0xa0, 0x39, 0x8e, 0x2b, 0xb9],
'EBL_ADD_COMMAND_PROTOCOL_GUID': [0xaeda2428, 0x9a22, 0x4637, 0x9b, 0x21, 0x54, 0x5e, 0x28, 0xfb, 0xb8, 0x29],
'ECP_PEI_PCI_CFG_PPI_GUID': [0xb0ee53d4, 0xa049, 0x4a79, 0xb2, 0xff, 0x19, 0xd9, 0xfa, 0xef, 0xaa, 0x94],
'EFI_ABSOLUTE_POINTER_PROTOCOL_GUID': [0x8D59D32B, 0xC655, 0x4AE9, 0x9B, 0x15, 0xF2, 0x59, 0x04, 0x99, 0x2A, 0x43],
'EFI_ACPI_20_TABLE_GUID': [0x8868e871, 0xe4f1, 0x11d3, 0xbc, 0x22, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81],
'EFI_ACPI_S3_CONTEXT_GUID': [0xef98d3a, 0x3e33, 0x497a, 0xa4, 0x1, 0x77, 0xbe, 0x3e, 0xb7, 0x4f, 0x38],
'EFI_ACPI_S3_SAVE_GUID': [0x125f2de1, 0xfb85, 0x440c, 0xa5, 0x4c, 0x4d, 0x99, 0x35, 0x8a, 0x8d, 0x38],
'EFI_ACPI_SDT_PROTOCOL_GUID': [0xeb97088e, 0xcfdf, 0x49c6, 0xbe, 0x4b, 0xd9, 0x6, 0xa5, 0xb2, 0xe, 0x86],
'EFI_ACPI_SUPPORT_GUID': [0xdbff9d55, 0x89b7, 0x46da, 0xbd, 0xdf, 0x67, 0x7d, 0x3d, 0xc0, 0x24, 0x1d],
'EFI_ACPI_TABLE_GUID': [0x8868e871, 0xe4f1, 0x11d3, 0xbc, 0x22, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81],
'EFI_ACPI_TABLE_GUID': [0xeb9d2d30, 0x2d88, 0x11d3, 0x9a, 0x16, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d],
'EFI_ACPI_TABLE_PROTOCOL_GUID': [0xffe06bdd, 0x6107, 0x46a6, 0x7b, 0xb2, 0x5a, 0x9c, 0x7e, 0xc5, 0x27, 0x5c],
'EFI_ACPI_TABLE_STORAGE_GUID': [0x7e374e25, 0x8e01, 0x4fee, 0x87, 0xf2, 0x39, 0xc, 0x23, 0xc6, 0x6, 0xcd],
'EFI_ACPI_VARIABLE_COMPATIBILITY_GUID': [0xc020489e, 0x6db2, 0x4ef2, 0x9a, 0xa5, 0xca, 0x6, 0xfc, 0x11, 0xd3, 0x6a],
'EFI_ALTERNATE_FV_BLOCK_GUID': [0xf496922d, 0x172f, 0x4bbc, 0xa1, 0xeb, 0xe, 0xeb, 0x94, 0x9c, 0x34, 0x86],
'EFI_APRIORI_GUID': [0xfc510ee7, 0xffdc, 0x11d4, 0xbd, 0x41, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81],
'EFI_ARP_PROTOCOL_GUID': [0xf4b427bb, 0xba21, 0x4f16, 0xbc, 0x4e, 0x43, 0xe4, 0x16, 0xab, 0x61, 0x9c],
'EFI_ARP_SERVICE_BINDING_PROTOCOL_GUID': [0xf44c00ee, 0x1f2c, 0x4a00, 0xaa, 0x9, 0x1c, 0x9f, 0x3e, 0x8, 0x0, 0xa3],
'EFI_ATA_PASS_THRU_PROTOCOL_GUID': [0x1d3de7f0, 0x807, 0x424f, 0xaa, 0x69, 0x11, 0xa5, 0x4e, 0x19, 0xa4, 0x6f],
'EFI_AUTHENTICATED_VARIABLE_GUID': [0xaaf32c78, 0x947b, 0x439a, 0xa1, 0x80, 0x2e, 0x14, 0x4e, 0xc3, 0x77, 0x92],
'EFI_AUTHENTICATION_CHAP_LOCAL_GUID': [0xc280c73e, 0x15ca, 0x11da, 0xb0, 0xca, 0x00, 0x10, 0x83, 0xff, 0xca, 0x4d],
'EFI_AUTHENTICATION_CHAP_RADIUS_GUID': [0xd6062b50, 0x15ca, 0x11da, 0x92, 0x19, 0x00, 0x10, 0x83, 0xff, 0xca, 0x4d],
'EFI_AUTHENTICATION_INFO_PROTOCOL_GUID': [0x7671d9d0, 0x53db, 0x4173, 0xaa, 0x69, 0x23, 0x27, 0xf2, 0x1f, 0x0b, 0xc7],
'EFI_BDS_ARCH_PROTOCOL_GUID': [0x665E3FF6, 0x46CC, 0x11d4, 0x9A, 0x38, 0x00, 0x90, 0x27, 0x3F, 0xC1, 0x4D],
'EFI_BIS_PROTOCOL_GUID': [0x0b64aab0, 0x5429, 0x11d4, 0x98, 0x16, 0x00, 0xa0, 0xc9, 0x1f, 0xad, 0xcf],
'EFI_BLOCK_IO2_PROTOCOL_GUID': [0xa77b2472, 0xe282, 0x4e9f, 0xa2, 0x45, 0xc2, 0xc0, 0xe2, 0x7b, 0xbc, 0xc1],
'EFI_BLOCK_IO_PROTOCOL_GUID': [0x964e5b21, 0x6459, 0x11d2, 0x8e, 0x39, 0x0, 0xa0, 0xc9, 0x69, 0x72, 0x3b],
'EFI_BOOT_LOGO_PROTOCOL_GUID': [0xcdea2bd3, 0xfc25, 0x4c1c, 0xb9, 0x7c, 0xb3, 0x11, 0x86, 0x6, 0x49, 0x90],
'EFI_BOOT_SCRIPT_EXECUTOR_CONTEXT_GUID': [0x79cb58c4, 0xac51, 0x442f, 0xaf, 0xd7, 0x98, 0xe4, 0x7d, 0x2e, 0x99, 0x8],
'EFI_BOOT_SCRIPT_EXECUTOR_VARIABLE_GUID': [0x3079818c, 0x46d4, 0x4a73, 0xae, 0xf3, 0xe3, 0xe4, 0x6c, 0xf1, 0xee, 0xdb],
'EFI_BOOT_SCRIPT_SAVE_PROTOCOL_GUID': [0x470e1529, 0xb79e, 0x4e32, 0xa0, 0xfe, 0x6a, 0x15, 0x6d, 0x29, 0xf9, 0xb2],
'EFI_BUS_SPECIFIC_DRIVER_OVERRIDE_PROTOCOL_GUID': [0x3bc1b285, 0x8a15, 0x4a82, 0xaa, 0xbf, 0x4d, 0x7d, 0x13, 0xfb, 0x32, 0x65],
'EFI_CACHE_SUBCLASS_GUID': [0x7f0013a7, 0xdc79, 0x4b22, 0x80, 0x99, 0x11, 0xf7, 0x5f, 0xdc, 0x82, 0x9d],
'EFI_CAPSULE_ARCH_PROTOCOL_GUID': [0x5053697e, 0x2cbc, 0x4819, 0x90, 0xd9, 0x05, 0x80, 0xde, 0xee, 0x57, 0x54],
'EFI_CAPSULE_ARCH_PROTOCOL_GUID': [0x5053697e, 0x2cbc, 0x4819, 0x90, 0xd9, 0x5, 0x80, 0xde, 0xee, 0x57, 0x54],
'EFI_CAPSULE_GUID': [0x3B6686BD, 0x0D76, 0x4030, 0xB7, 0x0E, 0xB5, 0x51, 0x9E, 0x2F, 0xC5, 0xA0],
'EFI_CAPSULE_INFO_GUID': [0x8B34EAC7, 0x2690, 0x460B, 0x8B, 0xA5, 0xD5, 0xCF, 0x32, 0x83, 0x17, 0x35],
'EFI_CAPSULE_VENDOR_GUID': [0x711C703F, 0xC285, 0x4B10, 0xA3, 0xB0, 0x36, 0xEC, 0xBD, 0x3C, 0x8B, 0xE2],
'EFI_CERT_RSA2048_GUID': [0x3c5766e8, 0x269c, 0x4e34, 0xaa, 0x14, 0xed, 0x77, 0x6e, 0x85, 0xb3, 0xb6],
'EFI_CERT_RSA2048_SHA1_GUID': [0x67f8444f, 0x8743, 0x48f1, 0xa3, 0x28, 0x1e, 0xaa, 0xb8, 0x73, 0x60, 0x80],
'EFI_CERT_RSA2048_SHA256_GUID': [0xe2b36190, 0x879b, 0x4a3d, 0xad, 0x8d, 0xf2, 0xe7, 0xbb, 0xa3, 0x27, 0x84],
'EFI_CERT_SHA1_GUID': [0x826ca512, 0xcf10, 0x4ac9, 0xb1, 0x87, 0xbe, 0x1, 0x49, 0x66, 0x31, 0xbd],
'EFI_CERT_SHA224_GUID': [0xb6e5233, 0xa65c, 0x44c9, 0x94, 0x7, 0xd9, 0xab, 0x83, 0xbf, 0xc8, 0xbd],
'EFI_CERT_SHA256_GUID': [0xc1c41626, 0x504c, 0x4092, 0xac, 0xa9, 0x41, 0xf9, 0x36, 0x93, 0x43, 0x28],
'EFI_CERT_SHA384_GUID': [0xff3e5307, 0x9fd0, 0x48c9, 0x85, 0xf1, 0x8a, 0xd5, 0x6c, 0x70, 0x1e, 0x1],
'EFI_CERT_SHA512_GUID': [0x93e0fae, 0xa6c4, 0x4f50, 0x9f, 0x1b, 0xd4, 0x1e, 0x2b, 0x89, 0xc1, 0x9a],
'EFI_CERT_TYPE_PKCS7_GUID': [0x4aafd29d, 0x68df, 0x49ee, 0x8a, 0xa9, 0x34, 0x7d, 0x37, 0x56, 0x65, 0xa7],
'EFI_CERT_TYPE_RSA2048_SHA256_GUID': [0xa7717414, 0xc616, 0x4977, 0x94, 0x20, 0x84, 0x47, 0x12, 0xa7, 0x35, 0xbf],
'EFI_CERT_X509_GUID': [0xa5c059a1, 0x94e4, 0x4aa7, 0x87, 0xb5, 0xab, 0x15, 0x5c, 0x2b, 0xf0, 0x72],
'EFI_COMPATIBLE_MEMORY_TESTED_PROTOCOL_GUID': [0x64c475ef, 0x344b, 0x492c, 0x93, 0xad, 0xab, 0x9e, 0xb4, 0x39, 0x50, 0x4],
'EFI_COMPONENT_NAME2_PROTOCOL_GUID': [0x6a7a5cff, 0xe8d9, 0x4f70, 0xba, 0xda, 0x75, 0xab, 0x30, 0x25, 0xce, 0x14],
'EFI_COMPONENT_NAME_PROTOCOL_GUID': [0x107a772c, 0xd5e1, 0x11d4, 0x9a, 0x46, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d],
'EFI_CONFIG_FILE_NAME_GUID': [0x98B8D59B, 0xE8BA, 0x48EE, 0x98, 0xDD, 0xC2, 0x95, 0x39, 0x2F, 0x1E, 0xDB],
'EFI_CONSOLE_CONTROL_PROTOCOL_GUID': [0xf42f7782, 0x12e, 0x4c12, 0x99, 0x56, 0x49, 0xf9, 0x43, 0x4, 0xf7, 0x21],
'EFI_CONSOLE_IN_DEVICE_GUID': [0xd3b36f2b, 0xd551, 0x11d4, 0x9a, 0x46, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d],
'EFI_CONSOLE_OUT_DEVICE_GUID': [0xd3b36f2c, 0xd551, 0x11d4, 0x9a, 0x46, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d],
'EFI_CPU_ARCH_PROTOCOL_GUID': [0x26baccb1, 0x6f42, 0x11d4, 0xbc, 0xe7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81],
'EFI_CPU_IO2_PROTOCOL_GUID': [0xad61f191, 0xae5f, 0x4c0e, 0xb9, 0xfa, 0xe8, 0x69, 0xd2, 0x88, 0xc6, 0x4f],
'EFI_CPU_IO_PROTOCOL_GUID': [0xB0732526, 0x38C8, 0x4b40, 0x88, 0x77, 0x61, 0xC7, 0xB0, 0x6A, 0xAC, 0x45],
'EFI_CRC32_GUIDED_SECTION_EXTRACTION_GUID': [0xFC1BCDB0, 0x7D31, 0x49aa, 0x93, 0x6A, 0xA4, 0x60, 0x0D, 0x9D, 0xD0, 0x83],
'EFI_CRC32_GUIDED_SECTION_EXTRACTION_PROTOCOL_GUID': [0xFC1BCDB0, 0x7D31, 0x49aa, 0x93, 0x6A, 0xA4, 0x60, 0x0D, 0x9D, 0xD0, 0x83],
'EFI_CUSTOMIZED_DECOMPRESS_PROTOCOL_GUID': [0x9a44198e, 0xa4a2, 0x44e6, 0x8a, 0x1f, 0x39, 0xbe, 0xfd, 0xac, 0x89, 0x6f],
'EFI_DATA_HUB_PROTOCOL_GUID': [0xae80d021, 0x618e, 0x11d4, 0xbc, 0xd7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81],
'EFI_DATA_HUB_STATUS_CODE_RECORD_GUID': [0xd083e94c, 0x6560, 0x42e4, 0xb6, 0xd4, 0x2d, 0xf7, 0x5a, 0xdf, 0x6a, 0x2a],
'EFI_DEBUGPORT_PROTOCOL_GUID': [0xEBA4E8D2, 0x3858, 0x41EC, 0xA2, 0x81, 0x26, 0x47, 0xBA, 0x96, 0x60, 0xD0],
'EFI_DEBUG_AGENT_GUID': [0x865a5a9b, 0xb85d, 0x474c, 0x84, 0x55, 0x65, 0xd1, 0xbe, 0x84, 0x4b, 0xe2],
'EFI_DEBUG_ASSERT_PROTOCOL_GUID': [0xbe499c92, 0x7d4b, 0x11d4, 0xbc, 0xee, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81],
'EFI_DEBUG_IMAGE_INFO_TABLE_GUID': [0x49152e77, 0x1ada, 0x4764, 0xb7, 0xa2, 0x7a, 0xfe, 0xfe, 0xd9, 0x5e, 0x8b],
'EFI_DEBUG_MASK_PROTOCOL_GUID': [0x4c8a2451, 0xc207, 0x405b, 0x96, 0x94, 0x99, 0xea, 0x13, 0x25, 0x13, 0x41],
'EFI_DEBUG_SERIAL_IO_PROTOCOL_GUID': [0xe683dc4f, 0x9ed, 0x4f22, 0x86, 0x6b, 0x8e, 0x40, 0x46, 0x94, 0x7c, 0x6c],
'EFI_DEBUG_SUPPORT_PERIODIC_CALLBACK_PROTOCOL_GUID': [0x9546e07c, 0x2cbb, 0x4c88, 0x98, 0x6c, 0xcd, 0x34, 0x10, 0x86, 0xf0, 0x44],
'EFI_DEBUG_SUPPORT_PROTOCOL_GUID': [0x2755590C, 0x6F3C, 0x42FA, 0x9E, 0xA4, 0xA3, 0xBA, 0x54, 0x3C, 0xDA, 0x25],
'EFI_DECOMPRESS_PROTOCOL_GUID': [0xd8117cfe, 0x94a6, 0x11d4, 0x9a, 0x3a, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d],
'EFI_DEFAULT_BMP_LOGO_GUID': [0x7BB28B99, 0x61BB, 0x11d5, 0x9A, 0x5D, 0x00, 0x90, 0x27, 0x3F, 0xC1, 0x4D],
'EFI_DEFERRED_IMAGE_LOAD_PROTOCOL_GUID': [0x15853d7c, 0x3ddf, 0x43e0, 0xa1, 0xcb, 0xeb, 0xf8, 0x5b, 0x8f, 0x87, 0x2c],
'EFI_DEVICE_IO_PROTOCOL_GUID': [0xaf6ac311, 0x84c3, 0x11d2, 0x8e, 0x3c, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b],
'EFI_DEVICE_PATH_FROM_TEXT_PROTOCOL_GUID': [0x5c99a21, 0xc70f, 0x4ad2, 0x8a, 0x5f, 0x35, 0xdf, 0x33, 0x43, 0xf5, 0x1e],
'EFI_DEVICE_PATH_PROTOCOL_GUID': [0x9576e91, 0x6d3f, 0x11d2, 0x8e, 0x39, 0x0, 0xa0, 0xc9, 0x69, 0x72, 0x3b],
'EFI_DEVICE_PATH_TO_TEXT_PROTOCOL_GUID': [0x8b843e20, 0x8132, 0x4852, 0x90, 0xcc, 0x55, 0x1a, 0x4e, 0x4a, 0x7f, 0x1c],
'EFI_DEVICE_PATH_UTILITIES_PROTOCOL_GUID': [0x379be4e, 0xd706, 0x437d, 0xb0, 0x37, 0xed, 0xb8, 0x2f, 0xb7, 0x72, 0xa4],
'EFI_DHCP4_PROTOCOL_GUID': [0x8a219718, 0x4ef5, 0x4761, 0x91, 0xc8, 0xc0, 0xf0, 0x4b, 0xda, 0x9e, 0x56],
'EFI_DHCP4_SERVICE_BINDING_PROTOCOL_GUID': [0x9d9a39d8, 0xbd42, 0x4a73, 0xa4, 0xd5, 0x8e, 0xe9, 0x4b, 0xe1, 0x13, 0x80],
'EFI_DHCP6_PROTOCOL_GUID': [0x87c8bad7, 0x595, 0x4053, 0x82, 0x97, 0xde, 0xde, 0x39, 0x5f, 0x5d, 0x5b],
'EFI_DHCP6_SERVICE_BINDING_PROTOCOL_GUID': [0x9fb9a8a1, 0x2f4a, 0x43a6, 0x88, 0x9c, 0xd0, 0xf7, 0xb6, 0xc4, 0x7a, 0xd5],
'EFI_DISK_INFO_AHCI_INTERFACE_GUID': [0x9e498932, 0x4abc, 0x45af, 0xa3, 0x4d, 0x2, 0x47, 0x78, 0x7b, 0xe7, 0xc6],
'EFI_DISK_INFO_IDE_INTERFACE_GUID': [0x5e948fe3, 0x26d3, 0x42b5, 0xaf, 0x17, 0x61, 0x2, 0x87, 0x18, 0x8d, 0xec],
'EFI_DISK_INFO_PROTOCOL_GUID': [0xd432a67f, 0x14dc, 0x484b, 0xb3, 0xbb, 0x3f, 0x2, 0x91, 0x84, 0x93, 0x27],
'EFI_DISK_INFO_SCSI_INTERFACE_GUID': [0x8f74baa, 0xea36, 0x41d9, 0x95, 0x21, 0x21, 0xa7, 0xf, 0x87, 0x80, 0xbc],
'EFI_DISK_INFO_USB_INTERFACE_GUID': [0xcb871572, 0xc11a, 0x47b5, 0xb4, 0x92, 0x67, 0x5e, 0xaf, 0xa7, 0x77, 0x27],
'EFI_DISK_IO_PROTOCOL_GUID': [0xce345171, 0xba0b, 0x11d2, 0x8e, 0x4f, 0x0, 0xa0, 0xc9, 0x69, 0x72, 0x3b],
'EFI_DPC_PROTOCOL_GUID': [0x480f8ae9, 0xc46, 0x4aa9, 0xbc, 0x89, 0xdb, 0x9f, 0xba, 0x61, 0x98, 0x6],
'EFI_DRIVER_BINDING_PROTOCOL_GUID': [0x18a031ab, 0xb443, 0x4d1a, 0xa5, 0xc0, 0xc, 0x9, 0x26, 0x1e, 0x9f, 0x71],
'EFI_DRIVER_CONFIGURATION2_PROTOCOL_GUID': [0xbfd7dc1d, 0x24f1, 0x40d9, 0x82, 0xe7, 0x2e, 0x09, 0xbb, 0x6b, 0x4e, 0xbe],
'EFI_DRIVER_CONFIGURATION_PROTOCOL_GUID': [0x107a772b, 0xd5e1, 0x11d4, 0x9a, 0x46, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d],
'EFI_DRIVER_DIAGNOSTICS2_PROTOCOL_GUID': [0x4d330321, 0x025f, 0x4aac, 0x90, 0xd8, 0x5e, 0xd9, 0x0, 0x17, 0x3b, 0x63],
'EFI_DRIVER_DIAGNOSTICS2_PROTOCOL_GUID': [0x4d330321, 0x025f, 0x4aac, 0x90, 0xd8, 0x5e, 0xd9, 0x00, 0x17, 0x3b, 0x63],
'EFI_DRIVER_DIAGNOSTICS_PROTOCOL_GUID': [0x0784924f, 0xe296, 0x11d4, 0x9a, 0x49, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d],
'EFI_DRIVER_FAMILY_OVERRIDE_PROTOCOL_GUID': [0xb1ee129e, 0xda36, 0x4181, 0x91, 0xf8, 0x4, 0xa4, 0x92, 0x37, 0x66, 0xa7],
'EFI_DRIVER_HEALTH_PROTOCOL_GUID': [0x2a534210, 0x9280, 0x41d8, 0xae, 0x79, 0xca, 0xda, 0x1, 0xa2, 0xb1, 0x27],
'EFI_DRIVER_SUPPORTED_EFI_VERSION_PROTOCOL_GUID': [0x5c198761, 0x16a8, 0x4e69, 0x97, 0x2c, 0x89, 0xd6, 0x79, 0x54, 0xf8, 0x1d],
'EFI_DXE_IPL_PPI_GUID': [0xae8ce5d, 0xe448, 0x4437, 0xa8, 0xd7, 0xeb, 0xf5, 0xf1, 0x94, 0xf7, 0x31],
'EFI_DXE_SERVICES_TABLE_GUID': [0x5ad34ba, 0x6f02, 0x4214, 0x95, 0x2e, 0x4d, 0xa0, 0x39, 0x8e, 0x2b, 0xb9],
'EFI_DXE_SMM_READY_TO_LOCK_PROTOCOL_GUID': [0x60ff8964, 0xe906, 0x41d0, 0xaf, 0xed, 0xf2, 0x41, 0xe9, 0x74, 0xe0, 0x8e],
'EFI_EAP_MANAGEMENT_PROTOCOL_GUID': [0xbb62e663, 0x625d, 0x40b2, 0xa0, 0x88, 0xbb, 0xe8, 0x36, 0x23, 0xa2, 0x45],
'EFI_EAP_PROTOCOL_GUID': [0x5d9f96db, 0xe731, 0x4caa, 0xa0, 0xd, 0x72, 0xe1, 0x87, 0xcd, 0x77, 0x62],
'EFI_EBC_INTERPRETER_PROTOCOL_GUID': [0x13AC6DD1, 0x73D0, 0x11D4, 0xB0, 0x6B, 0x00, 0xAA, 0x00, 0xBD, 0x6D, 0xE7],
'EFI_EBC_SIMPLE_DEBUGGER_PROTOCOL_GUID': [0x2a72d11e, 0x7376, 0x40f6, 0x9c, 0x68, 0x23, 0xfa, 0x2f, 0xe3, 0x63, 0xf1],
'EFI_EBC_VM_TEST_PROTOCOL_GUID': [0xAAEACCFD, 0xF27B, 0x4C17, 0xB6, 0x10, 0x75, 0xCA, 0x1F, 0x2D, 0xFB, 0x52],
'EFI_EBC_VM_TEST_PROTOCOL_GUID': [0xAAEACCFD, 0xF27B, 0x4C17, 0xB6, 0x10, 0x75, 0xCA, 0x1F, 0x2D, 0xFB, 0x52],
'EFI_EDID_ACTIVE_PROTOCOL_GUID': [0xbd8c1056, 0x9f36, 0x44ec, 0x92, 0xa8, 0xa6, 0x33, 0x7f, 0x81, 0x79, 0x86],
'EFI_EDID_DISCOVERED_PROTOCOL_GUID': [0x1c0c34f6, 0xd380, 0x41fa, 0xa0, 0x49, 0x8a, 0xd0, 0x6c, 0x1a, 0x66, 0xaa],
'EFI_EDID_DISCOVERED_PROTOCOL_GUID': [0x1c0c34f6, 0xd380, 0x41fa, 0xa0, 0x49, 0x8a, 0xd0, 0x6c, 0x1a, 0x66, 0xaa],
'EFI_EDID_OVERRIDE_PROTOCOL_GUID': [0x48ecb431, 0xfb72, 0x45c0, 0xa9, 0x22, 0xf4, 0x58, 0xfe, 0x4, 0xb, 0xd5],
'EFI_EMU_PHYSICAL_DISK_GUID': [0xf2ba331a, 0x8985, 0x11db, 0xa4, 0x06, 0x00, 0x40, 0xd0, 0x2b, 0x18, 0x35],
'EFI_EMU_SYSTEM_CONFIG_GUID': [0x9C4FB516, 0x3A1E, 0xD847, 0xA1, 0xA1, 0x70, 0x58, 0xB6, 0x98, 0x67, 0x32],
'EFI_EMU_VIRTUAL_DISK_GUID': [0xf2ba331a, 0x8985, 0x11db, 0xa4, 0x06, 0x00, 0x40, 0xd0, 0x2b, 0x18, 0x35],
'EFI_END_OF_DXE_EVENT_GROUP_GUID': [0x2ce967a, 0xdd7e, 0x4ffc, 0x9e, 0xe7, 0x81, 0x0c, 0xf0, 0x47, 0x8, 0x80],
'EFI_END_OF_DXE_EVENT_GROUP_GUID': [0x2ce967a, 0xdd7e, 0x4ffc, 0x9e, 0xe7, 0x81, 0xc, 0xf0, 0x47, 0x8, 0x80],
'EFI_ERROR_SECTION_DIRECTED_IO_DMAR_GUID': [0x71761d37, 0x32b2, 0x45cd, 0xa7, 0xd0, 0xb0, 0xfe, 0xdd, 0x93, 0xe8, 0xcf],
'EFI_ERROR_SECTION_DMAR_GENERIC_GUID': [0x5b51fef7, 0xc79d, 0x4434, 0x8f, 0x1b, 0xaa, 0x62, 0xde, 0x3e, 0x2c, 0x64],
'EFI_ERROR_SECTION_FW_ERROR_RECORD_GUID': [0x81212a96, 0x09ed, 0x4996, 0x94, 0x71, 0x8d, 0x72, 0x9c, 0x8e, 0x69, 0xed],
'EFI_ERROR_SECTION_IOMMU_DMAR_GUID': [0x036f84e1, 0x7f37, 0x428c, 0xa7, 0x9e, 0x57, 0x5f, 0xdf, 0xaa, 0x84, 0xec],
'EFI_ERROR_SECTION_PCIE_GUID': [0xd995e954, 0xbbc1, 0x430f, 0xad, 0x91, 0xb4, 0x4d, 0xcb, 0x3c, 0x6f, 0x35],
'EFI_ERROR_SECTION_PCI_DEVICE_GUID': [0xeb5e4685, 0xca66, 0x4769, 0xb6, 0xa2, 0x26, 0x06, 0x8b, 0x00, 0x13, 0x26],
'EFI_ERROR_SECTION_PCI_PCIX_BUS_GUID': [0xc5753963, 0x3b84, 0x4095, 0xbf, 0x78, 0xed, 0xda, 0xd3, 0xf9, 0xc9, 0xdd],
'EFI_ERROR_SECTION_PLATFORM_MEMORY_GUID': [0xa5bc1114, 0x6f64, 0x4ede, 0xb8, 0x63, 0x3e, 0x83, 0xed, 0x7c, 0x83, 0xb1],
'EFI_ERROR_SECTION_PROCESSOR_GENERIC_GUID': [0x9876ccad, 0x47b4, 0x4bdb, 0xb6, 0x5e, 0x16, 0xf1, 0x93, 0xc4, 0xf3, 0xdb],
'EFI_ERROR_SECTION_PROCESSOR_SPECIFIC_GUID': [0xdc3ea0b0, 0xa144, 0x4797, 0xb9, 0x5b, 0x53, 0xfa, 0x24, 0x2b, 0x6e, 0x1d],
'EFI_EVENT_GROUP_DXE_DISPATCH_GUID': [0x7081e22f, 0xcac6, 0x4053, 0x94, 0x68, 0x67, 0x57, 0x82, 0xcf, 0x88, 0xe5],
'EFI_EVENT_LEGACY_BOOT_GUID': [0x2a571201, 0x4966, 0x47f6, 0x8b, 0x86, 0xf3, 0x1e, 0x41, 0xf3, 0x2f, 0x10],
'EFI_EVENT_NOTIFICATION_TYEP_BOOT_GUID': [0x3D61A466, 0xAB40, 0x409a, 0xA6, 0x98, 0xF3, 0x62, 0xD4, 0x64, 0xB3, 0x8F],
'EFI_EVENT_NOTIFICATION_TYEP_CMC_GUID': [0x2DCE8BB1, 0xBDD7, 0x450e, 0xB9, 0xAD, 0x9C, 0xF4, 0xEB, 0xD4, 0xF8, 0x90],
'EFI_EVENT_NOTIFICATION_TYEP_CPE_GUID': [0x4E292F96, 0xD843, 0x4a55, 0xA8, 0xC2, 0xD4, 0x81, 0xF2, 0x7E, 0xBE, 0xEE],
'EFI_EVENT_NOTIFICATION_TYEP_DMAR_GUID': [0x667DD791, 0xC6B3, 0x4c27, 0x8A, 0x6B, 0x0F, 0x8E, 0x72, 0x2D, 0xEB, 0x41],
'EFI_EVENT_NOTIFICATION_TYEP_INIT_GUID': [0xCC5263E8, 0x9308, 0x454a, 0x89, 0xD0, 0x34, 0x0B, 0xD3, 0x9B, 0xC9, 0x8E],
'EFI_EVENT_NOTIFICATION_TYEP_MCE_GUID': [0xE8F56FFE, 0x919C, 0x4cc5, 0xBA, 0x88, 0x65, 0xAB, 0xE1, 0x49, 0x13, 0xBB],
'EFI_EVENT_NOTIFICATION_TYEP_NMI_GUID': [0x5BAD89FF, 0xB7E6, 0x42c9, 0x81, 0x4A, 0xCF, 0x24, 0x85, 0xD6, 0xE9, 0x8A],
'EFI_EVENT_NOTIFICATION_TYEP_PCIE_GUID': [0xCF93C01F, 0x1A16, 0x4dfc, 0xB8, 0xBC, 0x9C, 0x4D, 0xAF, 0x67, 0xC1, 0x04],
'EFI_EXTENDED_SAL_BASE_IO_SERVICES_PROTOCOL_GUID': [0x5aea42b5, 0x31e1, 0x4515, 0xbc, 0x31, 0xb8, 0xd5, 0x25, 0x75, 0x65, 0xa6],
'EFI_EXTENDED_SAL_BASE_SERVICES_PROTOCOL_GUID': [0xd9e9fa06, 0x0fe0, 0x41c3, 0x96, 0xfb, 0x83, 0x42, 0x5a, 0x33, 0x94, 0xf8],
'EFI_EXTENDED_SAL_CACHE_SERVICES_PROTOCOL_GUID': [0xedc9494, 0x2743, 0x4ba5, 0x88, 0x18, 0x0a, 0xef, 0x52, 0x13, 0xf1, 0x88],
'EFI_EXTENDED_SAL_ELOG_SERVICES_PROTOCOL_GUID': [0xd5e4ee5f, 0x3e0a, 0x453c, 0xa7, 0x25, 0xb6, 0x92, 0xbb, 0x6, 0x36, 0x5a],
'EFI_EXTENDED_SAL_FV_BLOCK_SERVICES_PROTOCOL_GUID': [0xa2271df1, 0xbcbb, 0x4f1d, 0x98, 0xa9, 0x06, 0xbc, 0x17, 0x2f, 0x07, 0x1a],
'EFI_EXTENDED_SAL_LOCK_SERVICES_PROTOCOL_GUID': [0x76b75c23, 0xfe4f, 0x4e17, 0xa2, 0xad, 0x1a, 0x65, 0x3d, 0xbb, 0x49, 0x4a],
'EFI_EXTENDED_SAL_MCA_LOG_SERVICES_PROTOCOL_GUID': [0xcb3fd86e, 0x38a3, 0x4c03, 0x9a, 0x5c, 0x90, 0xcf, 0xa3, 0xa2, 0xab, 0x7a],
'EFI_EXTENDED_SAL_MCA_SERVICES_PROTOCOL_GUID': [0x2a591128, 0x6cc7, 0x42b1, 0x8a, 0xf0, 0x58, 0x93, 0x3b, 0x68, 0x2d, 0xbb],
'EFI_EXTENDED_SAL_MP_SERVICES_PROTOCOL_GUID': [0x697d81a2, 0xcf18, 0x4dc0, 0x9e, 0x0d, 0x06, 0x11, 0x3b, 0x61, 0x8a, 0x3f],
'EFI_EXTENDED_SAL_MTC_SERVICES_PROTOCOL_GUID': [0x899afd18, 0x75e8, 0x408b, 0xa4, 0x1a, 0x6e, 0x2e, 0x7e, 0xcd, 0xf4, 0x54],
'EFI_EXTENDED_SAL_PAL_SERVICES_PROTOCOL_GUID': [0xe1cd9d21, 0x0fc2, 0x438d, 0x97, 0x03, 0x04, 0xe6, 0x6d, 0x96, 0x1e, 0x57],
'EFI_EXTENDED_SAL_PCI_SERVICES_PROTOCOL_GUID': [0xa46b1a31, 0xad66, 0x4905, 0x92, 0xf6, 0x2b, 0x46, 0x59, 0xdc, 0x30, 0x63],
'EFI_EXTENDED_SAL_RESET_SERVICES_PROTOCOL_GUID': [0x7d019990, 0x8ce1, 0x46f5, 0xa7, 0x76, 0x3c, 0x51, 0x98, 0x67, 0x6a, 0xa0],
'EFI_EXTENDED_SAL_RTC_SERVICES_PROTOCOL_GUID': [0x7e97a470, 0xefdb, 0x4d02, 0x8f, 0xce, 0x61, 0x90, 0xd2, 0x7b, 0xa2, 0x96],
'EFI_EXTENDED_SAL_SENSOR_SERVICES_PROTOCOL_GUID': [0x4a153b6e, 0x85a1, 0x4982, 0x98, 0xf4, 0x6a, 0x8c, 0xfc, 0xa4, 0xab, 0xa1],
'EFI_EXTENDED_SAL_SM_COM_LAYER_SERVICES_PROTOCOL_GUID': [0x4356799, 0x81b7, 0x4e08, 0xa3, 0x8d, 0xd9, 0x78, 0xfa, 0x47, 0xba, 0x42],
'EFI_EXTENDED_SAL_SST_GUID': [0x38802700, 0x868a, 0x4b4e, 0x81, 0xd4, 0x4f, 0x1b, 0xdc, 0xcf, 0xb4, 0x6f],
'EFI_EXTENDED_SAL_STALL_SERVICES_PROTOCOL_GUID': [0x53a58d06, 0xac27, 0x4d8c, 0xb5, 0xe9, 0xf0, 0x8a, 0x80, 0x65, 0x41, 0x70],
'EFI_EXTENDED_SAL_STATUS_CODE_SERVICES_PROTOCOL_GUID': [0xdbd91d, 0x55e9, 0x420f, 0x96, 0x39, 0x5e, 0x9f, 0x84, 0x37, 0xb4, 0x4f],
'EFI_EXTENDED_SAL_VARIABLE_SERVICES_PROTOCOL_GUID': [0x4ecb6c53, 0xc641, 0x4370, 0x8c, 0xb2, 0x3b, 0x0e, 0x49, 0x6e, 0x83, 0x78],
'EFI_EXTENDED_SAL_VIRTUAL_SERVICES_PROTOCOL_GUID': [0xc1a74056, 0x260e, 0x4871, 0xa0, 0x31, 0xe6, 0x45, 0xa6, 0x5b, 0x6e, 0x11],
'EFI_EXT_SCSI_PASS_THRU_PROTOCOL_GUID': [0x143b7632, 0xb81b, 0x4cb7, 0xab, 0xd3, 0xb6, 0x25, 0xa5, 0xb9, 0xbf, 0xfe],
'EFI_FAULT_TOLERANT_WRITE_PROTOCOL_GUID': [0x3ebd9e82, 0x2c78, 0x4de6, 0x97, 0x86, 0x8d, 0x4b, 0xfc, 0xb7, 0xc8, 0x81],
'EFI_FFS_VOLUME_TOP_FILE_GUID': [0x1BA0062E, 0xC779, 0x4582, 0x85, 0x66, 0x33, 0x6A, 0xE8, 0xF7, 0x8F, 0x09],
'EFI_FILE_SYSTEM_INFO_ID_GUID': [0x9576e93, 0x6d3f, 0x11d2, 0x8e, 0x39, 0x0, 0xa0, 0xc9, 0x69, 0x72, 0x3b],
'EFI_FILE_SYSTEM_VOLUME_LABEL_INFO_ID_GUID': [0xDB47D7D3, 0xFE81, 0x11d3, 0x9A, 0x35, 0x00, 0x90, 0x27, 0x3F, 0xC1, 0x4D],
'EFI_FIND_FV_PPI_GUID': [0x36164812, 0xa023, 0x44e5, 0xbd, 0x85, 0x5, 0xbf, 0x3c, 0x77, 0x0, 0xaa],
'EFI_FIRMWARE_CONTENTS_SIGNED_GUID': [0xf9d89e8, 0x9259, 0x4f76, 0xa5, 0xaf, 0xc, 0x89, 0xe3, 0x40, 0x23, 0xdf],
'EFI_FIRMWARE_FILE_SYSTEM2_GUID': [0x8c8ce578, 0x8a3d, 0x4f1c, 0x99, 0x35, 0x89, 0x61, 0x85, 0xc3, 0x2d, 0xd3],
'EFI_FIRMWARE_FILE_SYSTEM3_GUID': [0x5473c07a, 0x3dcb, 0x4dca, 0xbd, 0x6f, 0x1e, 0x96, 0x89, 0xe7, 0x34, 0x9a],
'EFI_FIRMWARE_FILE_SYSTEM_GUID': [0x7A9354D9, 0x0468, 0x444a, 0x81, 0xCE, 0x0B, 0xF6, 0x17, 0xD8, 0x90, 0xDF],
'EFI_FIRMWARE_MANAGEMENT_PROTOCOL_GUID': [0x86c77a67, 0xb97, 0x4633, 0xa1, 0x87, 0x49, 0x10, 0x4d, 0x6, 0x85, 0xc7],
'EFI_FIRMWARE_PERFORMANCE_GUID': [0xc095791a, 0x3001, 0x47b2, 0x80, 0xc9, 0xea, 0xc7, 0x31, 0x9f, 0x2f, 0xa4],
'EFI_FIRMWARE_VOLUME2_PROTOCOL_GUID': [0x220e73b6, 0x6bdb, 0x4413, 0x84, 0x5, 0xb9, 0x74, 0xb1, 0x8, 0x61, 0x9a],
'EFI_FIRMWARE_VOLUME_BLOCK2_PROTOCOL_GUID': [0x8f644fa9, 0xe850, 0x4db1, 0x9c, 0xe2, 0xb, 0x44, 0x69, 0x8e, 0x8d, 0xa4],
'EFI_FIRMWARE_VOLUME_BLOCK_PROTOCOL_GUID': [0x8f644fa9, 0xe850, 0x4db1, 0x9c, 0xe2, 0xb, 0x44, 0x69, 0x8e, 0x8d, 0xa4],
'EFI_FIRMWARE_VOLUME_DISPATCH_PROTOCOL_GUID': [0x7aa35a69, 0x506c, 0x444f, 0xa7, 0xaf, 0x69, 0x4b, 0xf5, 0x6f, 0x71, 0xc8],
'EFI_FIRMWARE_VOLUME_PROTOCOL_GUID': [0x389F751F, 0x1838, 0x4388, 0x83, 0x90, 0xCD, 0x81, 0x54, 0xBD, 0x27, 0xF8],
'EFI_FORM_BROWSER2_PROTOCOL_GUID': [0xb9d4c360, 0xbcfb, 0x4f9b, 0x92, 0x98, 0x53, 0xc1, 0x36, 0x98, 0x22, 0x58],
'EFI_FORM_BROWSER_COMPATIBILITY_PROTOCOL_GUID': [0xfb7c852, 0xadca, 0x4853, 0x8d, 0xf, 0xfb, 0xa7, 0x1b, 0x1c, 0xe1, 0x1a],
'EFI_FORM_BROWSER_PROTOCOL_GUID': [0xe5a1333e, 0xe1b4, 0x4d55, 0xce, 0xeb, 0x35, 0xc3, 0xef, 0x13, 0x34, 0x43],
'EFI_FORM_BROWSER_PROTOCOL_GUID': [0xfb7c852, 0xadca, 0x4853, 0x8d, 0xf, 0xfb, 0xa7, 0x1b, 0x1c, 0xe1, 0x1a],
'EFI_FORM_CALLBACK_PROTOCOL_GUID': [0xf3e4543d, 0xcf35, 0x6cef, 0x35, 0xc4, 0x4f, 0xe6, 0x34, 0x4d, 0xfc, 0x54],
'EFI_FRAMEWORK_DEVICE_PATH_GUID': [0xb7084e63, 0x46b7, 0x4d1a, 0x86, 0x77, 0xe3, 0x0b, 0x53, 0xdb, 0xf0, 0x50],
'EFI_FTP4_PROTOCOL_GUID': [0xeb338826, 0x681b, 0x4295, 0xb3, 0x56, 0x2b, 0x36, 0x4c, 0x75, 0x7b, 0x9],
'EFI_FTP4_SERVICE_BINDING_PROTOCOL_GUID': [0xfaaecb1, 0x226e, 0x4782, 0xaa, 0xce, 0x7d, 0xb9, 0xbc, 0xbf, 0x4d, 0xaf],
'EFI_FTW_LITE_PROTOCOL_GUID': [0x3f557189, 0x8dae, 0x45ae, 0xa0, 0xb3, 0x2b, 0x99, 0xca, 0x7a, 0xa7, 0xa0],
'EFI_FVB_EXTENSION_PROTOCOL_GUID': [0x53a4c71b, 0xb581, 0x4170, 0x91, 0xb3, 0x8d, 0xb8, 0x7a, 0x4b, 0x5c, 0x46],
'EFI_GENERIC_MEMORY_TEST_PROTOCOL_GUID': [0x309de7f1, 0x7f5e, 0x4ace, 0xb4, 0x9c, 0x53, 0x1b, 0xe5, 0xaa, 0x95, 0xef],
'EFI_GENERIC_VARIABLE_GUID': [0x59d1c24f, 0x50f1, 0x401a, 0xb1, 0x01, 0xf3, 0x3e, 0x0d, 0xae, 0xd4, 0x43],
'EFI_GLOBAL_VARIABLE_GUID': [0x8BE4DF61, 0x93CA, 0x11d2, 0xAA, 0x0D, 0x00, 0xE0, 0x98, 0x03, 0x2B, 0x8C],
'EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID': [0x9042a9de, 0x23dc, 0x4a38, 0x96, 0xfb, 0x7a, 0xde, 0xd0, 0x80, 0x51, 0x6a],
'EFI_HARDWARE_ERROR_VARIABLE_GUID': [0x414E6BDD, 0xE47B, 0x47cc, 0xB2, 0x44, 0xBB, 0x61, 0x02, 0x0C, 0xF5, 0x16],
'EFI_HASH_ALGORITHM_SHA1_GUID': [0x2ae9d80f, 0x3fb2, 0x4095, 0xb7, 0xb1, 0xe9, 0x31, 0x57, 0xb9, 0x46, 0xb6],
'EFI_HASH_ALGORITHM_SHA1_NOPAD_GUID': [0x24c5dc2f, 0x53e2, 0x40ca, 0x9e, 0xd6, 0xa5, 0xd9, 0xa4, 0x9f, 0x46, 0x3b],
'EFI_HASH_ALGORITHM_SHA224_GUID': [0x8df01a06, 0x9bd5, 0x4bf7, 0xb0, 0x21, 0xdb, 0x4f, 0xd9, 0xcc, 0xf4, 0x5b],
'EFI_HASH_ALGORITHM_SHA256_GUID': [0x51aa59de, 0xfdf2, 0x4ea3, 0xbc, 0x63, 0x87, 0x5f, 0xb7, 0x84, 0x2e, 0xe9],
'EFI_HASH_ALGORITHM_SHA256_NOPAD_GUID': [0x8628752a, 0x6cb7, 0x4814, 0x96, 0xfc, 0x24, 0xa8, 0x15, 0xac, 0x22, 0x26],
'EFI_HASH_ALGORITHM_SHA384_GUID': [0xefa96432, 0xde33, 0x4dd2, 0xae, 0xe6, 0x32, 0x8c, 0x33, 0xdf, 0x77, 0x7a],
'EFI_HASH_ALGORITHM_SHA512_GUID': [0xcaa4381e, 0x750c, 0x4770, 0xb8, 0x70, 0x7a, 0x23, 0xb4, 0xe4, 0x21, 0x30],
'EFI_HASH_ALGORTIHM_MD5_GUID': [0xaf7c79c, 0x65b5, 0x4319, 0xb0, 0xae, 0x44, 0xec, 0x48, 0x4e, 0x4a, 0xd7],
'EFI_HASH_PROTOCOL_GUID': [0xc5184932, 0xdba5, 0x46db, 0xa5, 0xba, 0xcc, 0x0b, 0xda, 0x9c, 0x14, 0x35],
'EFI_HASH_SERVICE_BINDING_PROTOCOL_GUID': [0x42881c98, 0xa4f3, 0x44b0, 0xa3, 0x9d, 0xdf, 0xa1, 0x86, 0x67, 0xd8, 0xcd],
'EFI_HII_COMPATIBILITY_PROTOCOL_GUID': [0x5542cce1, 0xdf5c, 0x4d1b, 0xab, 0xca, 0x36, 0x4f, 0x77, 0xd3, 0x99, 0xfb],
'EFI_HII_CONFIG_ACCESS_PROTOCOL_GUID': [0x330d4706, 0xf2a0, 0x4e4f, 0xa3, 0x69, 0xb6, 0x6f, 0xa8, 0xd5, 0x43, 0x85],
'EFI_HII_CONFIG_ROUTING_PROTOCOL_GUID': [0x587e72d7, 0xcc50, 0x4f79, 0x82, 0x09, 0xca, 0x29, 0x1f, 0xc1, 0xa1, 0x0f],
'EFI_HII_DATABASE_PROTOCOL_GUID': [0xef9fc172, 0xa1b2, 0x4693, 0xb3, 0x27, 0x6d, 0x32, 0xfc, 0x41, 0x60, 0x42],
'EFI_HII_DRIVER_HEALTH_FORMSET_GUID': [0xf22fc20c, 0x8cf4, 0x45eb, 0x8e, 0x6, 0xad, 0x4e, 0x50, 0xb9, 0x5d, 0xd3],
'EFI_HII_FONT_PROTOCOL_GUID': [0xe9ca4775, 0x8657, 0x47fc, 0x97, 0xe7, 0x7e, 0xd6, 0x5a, 0x8, 0x43, 0x24],
'EFI_HII_FRONT_PAGE_CLASS_GUID': [0x94d411b7, 0x7669, 0x45c3, 0xba, 0x3b, 0xf3, 0xa5, 0x8a, 0x71, 0x56, 0x81],
'EFI_HII_IMAGE_PROTOCOL_GUID': [0x31a6406a, 0x6bdf, 0x4e46, 0xb2, 0xa2, 0xeb, 0xaa, 0x89, 0xc4, 0x9, 0x20],
'EFI_HII_PACKAGE_LIST_PROTOCOL_GUID': [0x6a1ee763, 0xd47a, 0x43b4, 0xaa, 0xbe, 0xef, 0x1d, 0xe2, 0xab, 0x56, 0xfc],
'EFI_HII_PLATFORM_SETUP_FORMSET_GUID': [0x93039971, 0x8545, 0x4b04, 0xb4, 0x5e, 0x32, 0xeb, 0x83, 0x26, 0x4, 0xe],
'EFI_HII_PROTOCOL_GUID': [0x5542cce1, 0xdf5c, 0x4d1b, 0xab, 0xca, 0x36, 0x4f, 0x77, 0xd3, 0x99, 0xfb],
'EFI_HII_PROTOCOL_GUID': [0xd7ad636e, 0xb997, 0x459b, 0xbf, 0x3f, 0x88, 0x46, 0x89, 0x79, 0x80, 0xe1],
'EFI_HII_SET_KEYBOARD_LAYOUT_EVENT_GUID': [0x14982a4f, 0xb0ed, 0x45b8, 0xa8, 0x11, 0x5a, 0x7a, 0x9b, 0xc2, 0x32, 0xdf],
'EFI_HII_STANDARD_FORM_GUID': [0x3bd2f4ec, 0xe524, 0x46e4, 0xa9, 0xd8, 0x51, 0x1, 0x17, 0x42, 0x55, 0x62],
'EFI_HII_STRING_PROTOCOL_GUID': [0xfd96974, 0x23aa, 0x4cdc, 0xb9, 0xcb, 0x98, 0xd1, 0x77, 0x50, 0x32, 0x2a],
'EFI_HII_USER_CREDENTIAL_FORMSET_GUID': [0x337f4407, 0x5aee, 0x4b83, 0xb2, 0xa7, 0x4e, 0xad, 0xca, 0x30, 0x88, 0xcd],
'EFI_HOB_LIST_GUID': [0x7739f24c, 0x93d7, 0x11d4, 0x9a, 0x3a, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d],
'EFI_HOB_MEMORY_ALLOC_BSP_STORE_GUID': [0x564b33cd, 0xc92a, 0x4593, 0x90, 0xbf, 0x24, 0x73, 0xe4, 0x3c, 0x63, 0x22],
'EFI_HOB_MEMORY_ALLOC_STACK_GUID': [0x4ed4bf27, 0x4092, 0x42e9, 0x80, 0x7d, 0x52, 0x7b, 0x1d, 0x0, 0xc9, 0xbd],
'EFI_IA32_X64_ERROR_TYPE_BUS_CHECK_GUID': [0x1CF3F8B3, 0xC5B1, 0x49a2, 0xAA, 0x59, 0x5E, 0xEF, 0x92, 0xFF, 0xA6, 0x3C],
'EFI_IA32_X64_ERROR_TYPE_CACHE_CHECK_GUID': [0xA55701F5, 0xE3EF, 0x43de, 0xAC, 0x72, 0x24, 0x9B, 0x57, 0x3F, 0xAD, 0x2C],
'EFI_IA32_X64_ERROR_TYPE_MS_CHECK_GUID': [0x48AB7F57, 0xDC34, 0x4f6c, 0xA7, 0xD3, 0xB0, 0xB5, 0xB0, 0xA7, 0x43, 0x14],
'EFI_IA32_X64_ERROR_TYPE_TLB_CHECK_GUID': [0xFC06B535, 0x5E1F, 0x4562, 0x9F, 0x25, 0x0A, 0x3B, 0x9A, 0xDB, 0x63, 0xC3],
'EFI_IDE_CONTROLLER_INIT_PROTOCOL_GUID': [0xa1e37052, 0x80d9, 0x4e65, 0xa3, 0x17, 0x3e, 0x9a, 0x55, 0xc4, 0x3e, 0xc9],
'EFI_IFR_FRAMEWORK_GUID': [0x31ca5d1a, 0xd511, 0x4931, 0xb7, 0x82, 0xae, 0x6b, 0x2b, 0x17, 0x8c, 0xd7],
'EFI_IFR_REFRESH_ID_OP_GUID': [0xF5E655D9, 0x02A6, 0x46f2, 0x9E, 0x76, 0xB8, 0xBE, 0x8E, 0x60, 0xAB, 0x22],
'EFI_IFR_TIANO_GUID': [0xf0b1735, 0x87a0, 0x4193, 0xb2, 0x66, 0x53, 0x8c, 0x38, 0xaf, 0x48, 0xce],
'EFI_IMAGE_SECURITY_DATABASE_GUID': [0xd719b2cb, 0x3d3a, 0x4596, 0xa3, 0xbc, 0xda, 0xd0, 0xe, 0x67, 0x65, 0x6f],
'EFI_INCOMPATIBLE_PCI_DEVICE_SUPPORT_PROTOCOL_GUID': [0xeb23f55a, 0x7863, 0x4ac2, 0x8d, 0x3d, 0x95, 0x65, 0x35, 0xde, 0x03, 0x75],
'EFI_IOBASE_HOB_GUID': [0xd4a28a3e, 0xdcf2, 0x43cf, 0xa2, 0xb7, 0xf3, 0x57, 0x2a, 0x7c, 0xab, 0x9],
'EFI_IP4_CONFIG_PROTOCOL_GUID': [0x3b95aa31, 0x3793, 0x434b, 0x86, 0x67, 0xc8, 0x07, 0x08, 0x92, 0xe0, 0x5e],
'EFI_IP4_PROTOCOL_GUID': [0x41d94cd2, 0x35b6, 0x455a, 0x82, 0x58, 0xd4, 0xe5, 0x13, 0x34, 0xaa, 0xdd],
'EFI_IP4_SERVICE_BINDING_PROTOCOL_GUID': [0xc51711e7, 0xb4bf, 0x404a, 0xbf, 0xb8, 0x0a, 0x04, 0x8e, 0xf1, 0xff, 0xe4],
'EFI_IP6_CONFIG_PROTOCOL_GUID': [0x937fe521, 0x95ae, 0x4d1a, 0x89, 0x29, 0x48, 0xbc, 0xd9, 0x0a, 0xd3, 0x1a],
'EFI_IP6_PROTOCOL_GUID': [0x2c8759d5, 0x5c2d, 0x66ef, 0x92, 0x5f, 0xb6, 0x6c, 0x10, 0x19, 0x57, 0xe2],
'EFI_IP6_SERVICE_BINDING_PROTOCOL_GUID': [0xec835dd3, 0xfe0f, 0x617b, 0xa6, 0x21, 0xb3, 0x50, 0xc3, 0xe1, 0x33, 0x88],
'EFI_IPSEC2_PROTOCOL_GUID': [0xa3979e64, 0xace8, 0x4ddc, 0xbc, 0x7, 0x4d, 0x66, 0xb8, 0xfd, 0x9, 0x77],
'EFI_IPSEC_CONFIG_PROTOCOL_GUID': [0xce5e5929, 0xc7a3, 0x4602, 0xad, 0x9e, 0xc9, 0xda, 0xf9, 0x4e, 0xbf, 0xcf],
'EFI_IPSEC_PROTOCOL_GUID': [0xdfb386f7, 0xe100, 0x43ad, 0x9c, 0x9a, 0xed, 0x90, 0xd0, 0x8a, 0x5e, 0x12],
'EFI_ISA_ACPI_PROTOCOL_GUID': [0x64a892dc, 0x5561, 0x4536, 0x92, 0xc7, 0x79, 0x9b, 0xfc, 0x18, 0x33, 0x55],
'EFI_ISA_IO_PROTOCOL_GUID': [0x7ee2bd44, 0x3da0, 0x11d4, 0x9a, 0x38, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d],
'EFI_ISCSI_INITIATOR_NAME_PROTOCOL_GUID': [0x59324945, 0xec44, 0x4c0d, 0xb1, 0xcd, 0x9d, 0xb1, 0x39, 0xdf, 0x7, 0xc],
'EFI_KMS_FORMAT_AESCBC_128_GUID': [0xa0e8ee6a, 0x0e92, 0x44d4, 0x86, 0x1b, 0x0e, 0xaa, 0x4a, 0xca, 0x44, 0xa2],
'EFI_KMS_FORMAT_AESCBC_256_GUID': [0xd7e69789, 0x1f68, 0x45e8, 0x96, 0xef, 0x3b, 0x64, 0x07, 0xa5, 0xb2, 0xdc],
'EFI_KMS_FORMAT_AESXTS_128_GUID': [0x4776e33f, 0xdb47, 0x479a, 0xa2, 0x5f, 0xa1, 0xcd, 0x0a, 0xfa, 0xb3, 0x8b],
'EFI_KMS_FORMAT_AESXTS_256_GUID': [0xdc7e8613, 0xc4bb, 0x4db0, 0x84, 0x62, 0x13, 0x51, 0x13, 0x57, 0xab, 0xe2],
'EFI_KMS_FORMAT_GENERIC_1024_GUID': [0x43be0b44, 0x874b, 0x4ead, 0xb0, 0x9c, 0x24, 0x1a, 0x4f, 0xbd, 0x7e, 0xb3],
'EFI_KMS_FORMAT_GENERIC_128_GUID': [0xec8a3d69, 0x6ddf, 0x4108, 0x94, 0x76, 0x73, 0x37, 0xfc, 0x52, 0x21, 0x36],
'EFI_KMS_FORMAT_GENERIC_160_GUID': [0xa3b3e6f8, 0xefca, 0x4bc1, 0x88, 0xfb, 0xcb, 0x87, 0x33, 0x9b, 0x25, 0x79],
'EFI_KMS_FORMAT_GENERIC_2048_GUID': [0x40093f23, 0x630c, 0x4626, 0x9c, 0x48, 0x40, 0x37, 0x3b, 0x19, 0xcb, 0xbe],
'EFI_KMS_FORMAT_GENERIC_256_GUID': [0x70f64793, 0xc323, 0x4261, 0xac, 0x2c, 0xd8, 0x76, 0xf2, 0x7c, 0x53, 0x45],
'EFI_KMS_FORMAT_GENERIC_3072_GUID': [0xb9237513, 0x6c44, 0x4411, 0xa9, 0x90, 0x21, 0xe5, 0x56, 0xe0, 0x5a, 0xde],
'EFI_KMS_FORMAT_GENERIC_512_GUID': [0x978fe043, 0xd7af, 0x422e, 0x8a, 0x92, 0x2b, 0x48, 0xe4, 0x63, 0xbd, 0xe6],
'EFI_KMS_FORMAT_MD2_128_GUID': [0x78be11c4, 0xee44, 0x4a22, 0x9f, 0x05, 0x03, 0x85, 0x2e, 0xc5, 0xc9, 0x78],
'EFI_KMS_FORMAT_MD4_128_GUID': [0xd1c17aa1, 0xcac5, 0x400f, 0xbe, 0x17, 0xe2, 0xa2, 0xae, 0x06, 0x67, 0x7c],
'EFI_KMS_FORMAT_MD5SHA_128_GUID': [0x1c178237, 0x6897, 0x459e, 0x9d, 0x36, 0x67, 0xce, 0x8e, 0xf9, 0x4f, 0x76],
'EFI_KMS_FORMAT_MD5_128_GUID': [0xdcbc3662, 0x9cda, 0x4b52, 0xa0, 0x4c, 0x82, 0xeb, 0x1d, 0x23, 0x48, 0xc7],
'EFI_KMS_FORMAT_MDC2_128_GUID': [0xf7ad60f8, 0xefa8, 0x44a3, 0x91, 0x13, 0x23, 0x1f, 0x39, 0x9e, 0xb4, 0xc7],
'EFI_KMS_FORMAT_MDC4_128_GUID': [0x3fa4f847, 0xd8eb, 0x4df4, 0xbd, 0x49, 0x10, 0x3a, 0x0a, 0x84, 0x7b, 0xbc],
'EFI_KMS_FORMAT_RSASHA1_1024_GUID': [0x56417bed, 0x6bbe, 0x4882, 0x86, 0xa0, 0x3a, 0xe8, 0xbb, 0x17, 0xf8, 0xf9],
'EFI_KMS_FORMAT_RSASHA1_2048_GUID': [0xf66447d4, 0x75a6, 0x463e, 0xa8, 0x19, 0x07, 0x7f, 0x2d, 0xda, 0x05, 0xe9],
'EFI_KMS_FORMAT_RSASHA256_2048_GUID': [0xa477af13, 0x877d, 0x4060, 0xba, 0xa1, 0x25, 0xd1, 0xbe, 0xa0, 0x8a, 0xd3],
'EFI_KMS_FORMAT_SHA1_160_GUID': [0x453c5e5a, 0x482d, 0x43f0, 0x87, 0xc9, 0x59, 0x41, 0xf3, 0xa3, 0x8a, 0xc2],
'EFI_KMS_FORMAT_SHA256_256_GUID': [0x6bb4f5cd, 0x8022, 0x448d, 0xbc, 0x6d, 0x77, 0x1b, 0xae, 0x93, 0x5f, 0xc6],
'EFI_KMS_FORMAT_SHA512_512_GUID': [0x2f240e12, 0xe14d, 0x475c, 0x83, 0xb0, 0xef, 0xff, 0x22, 0xd7, 0x7b, 0xe7],
'EFI_KMS_PROTOCOL_GUID': [0xEC3A978D, 0x7C4E, 0x48FA, 0x9A, 0xBE, 0x6A, 0xD9, 0x1C, 0xC8, 0xF8, 0x11],
'EFI_LEGACY_8259_PROTOCOL_GUID': [0x38321dba, 0x4fe0, 0x4e17, 0x8a, 0xec, 0x41, 0x30, 0x55, 0xea, 0xed, 0xc1],
'EFI_LEGACY_BIOS_GUID': [0x2e3044ac, 0x879f, 0x490f, 0x97, 0x60, 0xbb, 0xdf, 0xaf, 0x69, 0x5f, 0x50],
'EFI_LEGACY_BIOS_PLATFORM_PROTOCOL_GUID': [0x783658a3, 0x4172, 0x4421, 0xa2, 0x99, 0xe0, 0x9, 0x7, 0x9c, 0xc, 0xb4],
'EFI_LEGACY_BIOS_PROTOCOL_GUID': [0xdb9a1e3d, 0x45cb, 0x4abb, 0x85, 0x3b, 0xe5, 0x38, 0x7f, 0xdb, 0x2e, 0x2d],
'EFI_LEGACY_BIOS_THUNK_PROTOCOL_GUID': [0x4c51a7ba, 0x7195, 0x442d, 0x87, 0x92, 0xbe, 0xea, 0x6e, 0x2f, 0xf6, 0xec],
'EFI_LEGACY_DEV_ORDER_VARIABLE_GUID': [0xa56074db, 0x65fe, 0x45f7, 0xbd, 0x21, 0x2d, 0x2b, 0xdd, 0x8e, 0x96, 0x52],
'EFI_LEGACY_INTERRUPT_PROTOCOL_GUID': [0x31ce593d, 0x108a, 0x485d, 0xad, 0xb2, 0x78, 0xf2, 0x1f, 0x29, 0x66, 0xbe],
'EFI_LEGACY_REGION2_PROTOCOL_GUID': [0x70101eaf, 0x85, 0x440c, 0xb3, 0x56, 0x8e, 0xe3, 0x6f, 0xef, 0x24, 0xf0],
'EFI_LEGACY_REGION_PROTOCOL_GUID': [0xfc9013a, 0x568, 0x4ba9, 0x9b, 0x7e, 0xc9, 0xc3, 0x90, 0xa6, 0x60, 0x9b],
'EFI_LOADED_IMAGE_DEVICE_PATH_PROTOCOL_GUID': [0xbc62157e, 0x3e33, 0x4fec, 0x99, 0x20, 0x2d, 0x3b, 0x36, 0xd7, 0x50, 0xdf],
'EFI_LOADED_IMAGE_PROTOCOL_GUID': [0x5B1B31A1, 0x9562, 0x11d2, 0x8E, 0x3F, 0x00, 0xA0, 0xC9, 0x69, 0x72, 0x3B],
'EFI_LOAD_FILE2_PROTOCOL_GUID': [0x4006c0c1, 0xfcb3, 0x403e, 0x99, 0x6d, 0x4a, 0x6c, 0x87, 0x24, 0xe0, 0x6d],
'EFI_LOAD_FILE_PROTOCOL_GUID': [0x56EC3091, 0x954C, 0x11d2, 0x8E, 0x3F, 0x00, 0xA0, 0xC9, 0x69, 0x72, 0x3B],
'EFI_LOAD_FIXED_ADDRESS_CONFIGURATION_TABLE_GUID': [0x2CA88B53, 0xD296, 0x4080, 0xA4, 0xA5, 0xCA, 0xD9, 0xBA, 0xE2, 0x4B, 0x9],
'EFI_LOCK_BOX_PROTOCOL_GUID': [0xbd445d79, 0xb7ad, 0x4f04, 0x9a, 0xd8, 0x29, 0xbd, 0x20, 0x40, 0xeb, 0x3c],
'EFI_MANAGED_NETWORK_PROTOCOL_GUID': [0x7ab33a91, 0xace5, 0x4326, 0xb5, 0x72, 0xe7, 0xee, 0x33, 0xd3, 0x9f, 0x16],
'EFI_MANAGED_NETWORK_SERVICE_BINDING_PROTOCOL_GUID': [0xf36ff770, 0xa7e1, 0x42cf, 0x9e, 0xd2, 0x56, 0xf0, 0xf2, 0x71, 0xf4, 0x4c],
'EFI_MEASURED_FV_HOB_GUID': [0xb2360b42, 0x7173, 0x420a, 0x86, 0x96, 0x46, 0xca, 0x6b, 0xab, 0x10, 0x60],
'EFI_MEMORY_PRODUCER_GUID': [0x1d7add6e, 0xb2da, 0x4b0b, 0xb2, 0x9f, 0x49, 0xcb, 0x42, 0xf4, 0x63, 0x56],
'EFI_MEMORY_SUBCLASS_GUID': [0x4E8F4EBB, 0x64B9, 0x4e05, 0x9B, 0x18, 0x4C, 0xFE, 0x49, 0x23, 0x50, 0x97],
'EFI_MEMORY_TYPE_INFORMATION_GUID': [0x4c19049f, 0x4137, 0x4dd3, 0x9c, 0x10, 0x8b, 0x97, 0xa8, 0x3f, 0xfd, 0xfa],
'EFI_METRONOME_ARCH_PROTOCOL_GUID': [0x26baccb2, 0x6f42, 0x11d4, 0xbc, 0xe7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81],
'EFI_MINI_SHELL_FILE_GUID': [0x86ad232b, 0xd33a, 0x465c, 0xbf, 0x5f, 0x41, 0x37, 0xb, 0xa9, 0x2f, 0xe2],
'EFI_MISC_PRODUCER_GUID': [0x62512c92, 0x63c4, 0x4d80, 0x82, 0xb1, 0xc1, 0xa4, 0xdc, 0x44, 0x80, 0xe5],
'EFI_MISC_SUBCLASS_GUID': [0x772484B2, 0x7482, 0x4b91, 0x9F, 0x9A, 0xAD, 0x43, 0xF8, 0x1C, 0x58, 0x81],
'EFI_MMC_HOST_PROTOCOL_GUID': [0x3e591c00, 0x9e4a, 0x11df, 0x92, 0x44, 0x00, 0x02, 0xA5, 0xD5, 0xC5, 0x1B],
'EFI_MONOTONIC_COUNTER_ARCH_PROTOCOL_GUID': [0x1da97072, 0xbddc, 0x4b30, 0x99, 0xf1, 0x72, 0xa0, 0xb5, 0x6f, 0xff, 0x2a],
'EFI_MONTONIC_COUNTER_ARCH_PROTOCOL_GUID': [0x1da97072, 0xbddc, 0x4b30, 0x99, 0xf1, 0x72, 0xa0, 0xb5, 0x6f, 0xff, 0x2a],
'EFI_MPS_TABLE_GUID': [0xeb9d2d2f, 0x2d88, 0x11d3, 0x9a, 0x16, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d],
'EFI_MP_SERVICES_PROTOCOL_GUID': [0x3fdda605, 0xa76e, 0x4f46, 0xad, 0x29, 0x12, 0xf4, 0x53, 0x1b, 0x3d, 0x08],
'EFI_MTFTP4_PROTOCOL_GUID': [0x78247c57, 0x63db, 0x4708, 0x99, 0xc2, 0xa8, 0xb4, 0xa9, 0xa6, 0x1f, 0x6b],
'EFI_MTFTP4_SERVICE_BINDING_PROTOCOL_GUID': [0x2FE800BE, 0x8F01, 0x4aa6, 0x94, 0x6B, 0xD7, 0x13, 0x88, 0xE1, 0x83, 0x3F],
'EFI_MTFTP6_PROTOCOL_GUID': [0xbf0a78ba, 0xec29, 0x49cf, 0xa1, 0xc9, 0x7a, 0xe5, 0x4e, 0xab, 0x6a, 0x51],
'EFI_MTFTP6_SERVICE_BINDING_PROTOCOL_GUID': [0xd9760ff3, 0x3cca, 0x4267, 0x80, 0xf9, 0x75, 0x27, 0xfa, 0xfa, 0x42, 0x23],
'EFI_NETWORK_INTERFACE_IDENTIFIER_PROTOCOL_GUID': [0x1ACED566, 0x76ED, 0x4218, 0xBC, 0x81, 0x76, 0x7F, 0x1F, 0x97, 0x7A, 0x89],
'EFI_NETWORK_INTERFACE_IDENTIFIER_PROTOCOL_GUID': [0xE18541CD, 0xF755, 0x4f73, 0x92, 0x8D, 0x64, 0x3C, 0x8A, 0x79, 0xB2, 0x29],
'EFI_NIC_IP4_CONFIG_NVDATA_GUID': [0x9d5b53f, 0xf4b0, 0x4f59, 0xa0, 0xb1, 0x7b, 0x57, 0xd3, 0x5c, 0xe, 0x5],
'EFI_NIC_IP4_CONFIG_PROTOCOL_GUID': [0xdca3d4d, 0x12da, 0x4728, 0xbf, 0x7e, 0x86, 0xce, 0xb9, 0x28, 0xd0, 0x67],
'EFI_NIC_IP4_CONFIG_VARIABLE_GUID': [0xd8944553, 0xc4dd, 0x41f4, 0x9b, 0x30, 0xe1, 0x39, 0x7c, 0xfb, 0x26, 0x7b],
'EFI_NT_LOAD_AS_DLL_PPI_GUID': [0xccc53f6b, 0xa03a, 0x4ed8, 0x83, 0x9a, 0x3, 0xd9, 0x9c, 0x2, 0xb4, 0xe3],
'EFI_OEM_BADGING_PROTOCOL_GUID': [0x170e13c0, 0xbf1b, 0x4218, 0x87, 0x1d, 0x2a, 0xbd, 0xc6, 0xf8, 0x87, 0xbc],
'EFI_PART_TYPE_EFI_SYSTEM_PART_GUID': [0xc12a7328, 0xf81f, 0x11d2, 0xba, 0x4b, 0x00, 0xa0, 0xc9, 0x3e, 0xc9, 0x3b],
'EFI_PART_TYPE_LEGACY_MBR_GUID': [0x024dee41, 0x33e7, 0x11d3, 0x9d, 0x69, 0x00, 0x08, 0xc7, 0x81, 0xf3, 0x9f],
'EFI_PATH_FILE_NAME_GUID': [0x7644C181, 0xFA6E, 0x46DA, 0x80, 0xCB, 0x04, 0xB9, 0x90, 0x40, 0x62, 0xE8],
'EFI_PCD_PROTOCOL_GUID': [0x13a3f0f6, 0x264a, 0x3ef0, 0xf2, 0xe0, 0xde, 0xc5, 0x12, 0x34, 0x2f, 0x34],
'EFI_PCI_ENUMERATION_COMPLETE_GUID': [0x30cfe3e7, 0x3de1, 0x4586, 0xbe, 0x20, 0xde, 0xab, 0xa1, 0xb3, 0xb7, 0x93],
'EFI_PCI_EXPRESS_BASE_ADDRESS_GUID': [0x3677d529, 0x326f, 0x4603, 0xa9, 0x26, 0xea, 0xac, 0xe0, 0x1d, 0xcb, 0xb0],
'EFI_PCI_HOST_BRIDGE_RESOURCE_ALLOCATION_PROTOCOL_GUID': [0xCF8034BE, 0x6768, 0x4d8b, 0xB7, 0x39, 0x7C, 0xCE, 0x68, 0x3A, 0x9F, 0xBE],
'EFI_PCI_HOTPLUG_DEVICE_GUID': [0x0b280816, 0x52e7, 0x4e51, 0xaa, 0x57, 0x11, 0xbd, 0x41, 0xcb, 0xef, 0xc3],
'EFI_PCI_HOTPLUG_REQUEST_PROTOCOL_GUID': [0x19cb87ab, 0x2cb9, 0x4665, 0x83, 0x60, 0xdd, 0xcf, 0x60, 0x54, 0xf7, 0x9d],
'EFI_PCI_HOT_PLUG_INIT_PROTOCOL_GUID': [0xaa0e8bc1, 0xdabc, 0x46b0, 0xa8, 0x44, 0x37, 0xb8, 0x16, 0x9b, 0x2b, 0xea],
'EFI_PCI_IO_PROTOCOL_GUID': [0x4cf5b200, 0x68b8, 0x4ca5, 0x9e, 0xec, 0xb2, 0x3e, 0x3f, 0x50, 0x2, 0x9a],
'EFI_PCI_OPTION_ROM_TABLE_GUID': [0x7462660f, 0x1cbd, 0x48da, 0xad, 0x11, 0x91, 0x71, 0x79, 0x13, 0x83, 0x1c],
'EFI_PCI_OVERRIDE_GUID': [0xb5b35764, 0x460c, 0x4a06, 0x99, 0xfc, 0x77, 0xa1, 0x7c, 0x1b, 0x5c, 0xeb],
'EFI_PCI_PLATFORM_PROTOCOL_GUID': [0x7d75280, 0x27d4, 0x4d69, 0x90, 0xd0, 0x56, 0x43, 0xe2, 0x38, 0xb3, 0x41],
'EFI_PCI_ROOT_BRIDGE_IO_PROTOCOL_GUID': [0x2f707ebb, 0x4a1a, 0x11d4, 0x9a, 0x38, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d],
'EFI_PC_ANSI_GUID': [0xe0c14753, 0xf9be, 0x11d2, 0x9a, 0x0c, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d],
'EFI_PEI_APRIORI_FILE_NAME_GUID': [0x1b45cc0a, 0x156a, 0x428a, 0xaf, 0x62, 0x49, 0x86, 0x4d, 0xa0, 0xe6, 0xe6],
'EFI_PEI_BOOT_SCRIPT_EXECUTER_PPI_GUID': [0xabd42895, 0x78cf, 0x4872, 0x84, 0x44, 0x1b, 0x5c, 0x18, 0x0b, 0xfb, 0xff],
'EFI_PEI_CPU_IO_PPI_INSTALLED_GUID': [0xe6af1f7b, 0xfc3f, 0x46da, 0xa8, 0x28, 0xa3, 0xb4, 0x57, 0xa4, 0x42, 0x82],
'EFI_PEI_DECOMPRESS_PPI_GUID': [0x1a36e4e7, 0xfab6, 0x476a, 0x8e, 0x75, 0x69, 0x5a, 0x5, 0x76, 0xfd, 0xd7],
'EFI_PEI_DEVICE_RECOVERY_MODULE_PPI_GUID': [0x0DE2CE25, 0x446A, 0x45a7, 0xBF, 0xC9, 0x37, 0xDA, 0x26, 0x34, 0x4B, 0x37],
'EFI_PEI_END_OF_PEI_PHASE_PPI_GUID': [0x605EA650, 0xC65C, 0x42e1, 0xBA, 0x80, 0x91, 0xA5, 0x2A, 0xB6, 0x18, 0xC6],
'EFI_PEI_FIND_FV_PPI_GUID': [0x36164812, 0xa023, 0x44e5, 0xbd, 0x85, 0x5, 0xbf, 0x3c, 0x77, 0x0, 0xaa],
'EFI_PEI_FIRMWARE_VOLUME_INFO_PPI_GUID': [0x49edb1c1, 0xbf21, 0x4761, 0xbb, 0x12, 0xeb, 0x0, 0x31, 0xaa, 0xbb, 0x39],
'EFI_PEI_FLUSH_INSTRUCTION_CACHE_GUID': [0xd8117cfc, 0x94a6, 0x11d4, 0x9a, 0x3a, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d],
'EFI_PEI_LOADED_IMAGE_PPI_GUID': [0xc1fcd448, 0x6300, 0x4458, 0xb8, 0x64, 0x28, 0xdf, 0x01, 0x53, 0x64, 0xbc],
'EFI_PEI_LOAD_FILE_GUID': [0xb9e0abfe, 0x5979, 0x4914, 0x97, 0x7f, 0x6d, 0xee, 0x78, 0xc2, 0x78, 0xa6],
'EFI_PEI_LOAD_FILE_PPI_GUID': [0xb9e0abfe, 0x5979, 0x4914, 0x97, 0x7f, 0x6d, 0xee, 0x78, 0xc2, 0x78, 0xa6],
'EFI_PEI_PCD_PPI_GUID': [0x1f34d25, 0x4de2, 0x23ad, 0x3f, 0xf3, 0x36, 0x35, 0x3f, 0xf3, 0x23, 0xf1],
'EFI_PEI_PCI_CFG2_PPI_GUID': [0x57a449a, 0x1fdc, 0x4c06, 0xbf, 0xc9, 0xf5, 0x3f, 0x6a, 0x99, 0xbb, 0x92],
'EFI_PEI_PCI_CFG_PPI_INSTALLED_GUID': [0xe1f2eba0, 0xf7b9, 0x4a26, 0x86, 0x20, 0x13, 0x12, 0x21, 0x64, 0x2a, 0x90],
'EFI_PEI_PERFORMANCE_HOB_GUID': [0x10f432de, 0xdeec, 0x4631, 0x80, 0xcd, 0x47, 0xf6, 0x5d, 0x8f, 0x80, 0xbb],
'EFI_PEI_PERMANENT_MEMORY_INSTALLED_PPI_GUID': [0xf894643d, 0xc449, 0x42d1, 0x8e, 0xa8, 0x85, 0xbd, 0xd8, 0xc6, 0x5b, 0xde],
'EFI_PEI_PE_COFF_LOADER_GUID': [0xd8117cff, 0x94a6, 0x11d4, 0x9a, 0x3a, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d],
'EFI_PEI_READ_ONLY_VARIABLE2_PPI_GUID': [0x2ab86ef5, 0xecb5, 0x4134, 0xb5, 0x56, 0x38, 0x54, 0xca, 0x1f, 0xe1, 0xb4],
'EFI_PEI_READ_ONLY_VARIABLE_ACCESS_PPI_GUID': [0x3cdc90c6, 0x13fb, 0x4a75, 0x9e, 0x79, 0x59, 0xe9, 0xdd, 0x78, 0xb9, 0xfa],
'EFI_PEI_RECOVERY_BLOCK_IO_PPI_GUID': [0x695d8aa1, 0x42ee, 0x4c46, 0x80, 0x5c, 0x6e, 0xa6, 0xbc, 0xe7, 0x99, 0xe3],
'EFI_PEI_RECOVERY_MODULE_PPI_GUID': [0xFB6D9542, 0x612D, 0x4f45, 0x87, 0x2F, 0x5C, 0xFF, 0x52, 0xE9, 0x3D, 0xCF],
'EFI_PEI_REPORT_PROGRESS_CODE_PPI_GUID': [0x229832d3, 0x7a30, 0x4b36, 0xb8, 0x27, 0xf4, 0xc, 0xb7, 0xd4, 0x54, 0x36],
'EFI_PEI_RESET_PPI_GUID': [0xef398d58, 0x9dfd, 0x4103, 0xbf, 0x94, 0x78, 0xc6, 0xf4, 0xfe, 0x71, 0x2f],
'EFI_PEI_RSC_HANDLER_PPI_GUID': [0x65d394, 0x9951, 0x4144, 0x82, 0xa3, 0xa, 0xfc, 0x85, 0x79, 0xc2, 0x51],
'EFI_PEI_S3_RESUME2_PPI_GUID': [0x6D582DBC, 0xDB85, 0x4514, 0x8F, 0xCC, 0x5A, 0xDF, 0x62, 0x27, 0xB1, 0x47],
'EFI_PEI_S3_RESUME_PPI_GUID': [0x4426CCB2, 0xE684, 0x4a8a, 0xAE, 0x40, 0x20, 0xD4, 0xB0, 0x25, 0xB7, 0x10],
'EFI_PEI_SECTION_EXTRACTION_PPI_GUID': [0x4F89E208, 0xE144, 0x4804, 0x9E, 0xC8, 0x0F, 0x89, 0x4F, 0x7E, 0x36, 0xD7],
'EFI_PEI_SECURITY2_PPI_GUID': [0xdcd0be23, 0x9586, 0x40f4, 0xb6, 0x43, 0x06, 0x52, 0x2c, 0xed, 0x4e, 0xde],
'EFI_PEI_SECURITY_PPI_GUID': [0x1388066e, 0x3a57, 0x4efa, 0x98, 0xf3, 0xc1, 0x2f, 0x3a, 0x95, 0x8a, 0x29],
'EFI_PEI_SMBUS2_PPI_GUID': [0x9ca93627, 0xb65b, 0x4324, 0xa2, 0x2, 0xc0, 0xb4, 0x61, 0x76, 0x45, 0x43],
'EFI_PEI_SMBUS_PPI_GUID': [0xabd42895, 0x78cf, 0x4872, 0x84, 0x44, 0x1b, 0x5c, 0x18, 0xb, 0xfb, 0xda],
'EFI_PEI_SMM_COMMUNICATION_PPI_GUID': [0xae933e1c, 0xcc47, 0x4e38, 0x8f, 0xe, 0xe2, 0xf6, 0x1d, 0x26, 0x5, 0xdf],
'EFI_PEI_STALL_PPI_GUID': [0x1f4c6f90, 0xb06b, 0x48d8, 0xa2, 0x01, 0xba, 0xe5, 0xf1, 0xcd, 0x7d, 0x56],
'EFI_PEI_TEMPORARY_RAM_DONE_PPI_GUID': [0xceab683c, 0xec56, 0x4a2d, 0xa9, 0x06, 0x40, 0x53, 0xfa, 0x4e, 0x9c, 0x16],
'EFI_PEI_TEMPORARY_RAM_SUPPORT_PPI_GUID': [0xdbe23aa9, 0xa345, 0x4b97, 0x85, 0xb6, 0xb2, 0x26, 0xf1, 0x61, 0x73, 0x89],
'EFI_PEI_TRANSFER_CONTROL_GUID': [0xd8117d02, 0x94a6, 0x11d4, 0x9a, 0x3a, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d],
'EFI_PEI_VECTOR_HANDOFF_INFO_PPI_GUID': [0x3cd652b4, 0x6d33, 0x4dce, 0x89, 0xdb, 0x83, 0xdf, 0x97, 0x66, 0xfc, 0xca],
'EFI_PERFORMANCE_PROTOCOL_GUID': [0xFFECFFFF, 0x923C, 0x14d2, 0x9E, 0x3F, 0x22, 0xA0, 0xC9, 0x69, 0x56, 0x3B],
'EFI_PHYSICAL_PRESENCE_DATA_GUID': [0xf6499b1, 0xe9ad, 0x493d, 0xb9, 0xc2, 0x2f, 0x90, 0x81, 0x5c, 0x6c, 0xbc],
'EFI_PLATFORM_DRIVER_OVERRIDE_PROTOCOL_GUID': [0x6b30c738, 0xa391, 0x11d4, 0x9a, 0x3b, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d],
'EFI_PLATFORM_MEMTEST_PROTOCOL_GUID': [0x859ba18, 0x7dd7, 0x4ed7, 0xa8, 0x8e, 0x10, 0x9c, 0x63, 0x91, 0x7b, 0xdd],
'EFI_PLATFORM_TO_DRIVER_CONFIGURATION_CLP_GUID': [0x345ecc0e, 0x0cb6, 0x4b75, 0xbb, 0x57, 0x1b, 0x12, 0x9c, 0x47, 0x33, 0x3e],
'EFI_PLATFORM_TO_DRIVER_CONFIGURATION_PROTOCOL_GUID': [0x642cd590, 0x8059, 0x4c0a, 0xa9, 0x58, 0xc5, 0xec, 0x07, 0xd2, 0x3c, 0x4b],
'EFI_PRIMARY_CONSOLE_IN_DEVICE_GUID': [0xe451dcbe, 0x96a1, 0x4729, 0xa5, 0xcf, 0x6b, 0x9c, 0x2c, 0xff, 0x47, 0xfd],
'EFI_PRIMARY_CONSOLE_OUT_DEVICE_GUID': [0x62bdf38a, 0xe3d5, 0x492c, 0x95, 0xc, 0x23, 0xa7, 0xf6, 0x6e, 0x67, 0x2e],
'EFI_PRIMARY_STANDARD_ERROR_DEVICE_GUID': [0x5a68191b, 0x9b97, 0x4752, 0x99, 0x46, 0xe3, 0x6a, 0x5d, 0xa9, 0x42, 0xb1],
'EFI_PRINT2_PROTOCOL_GUID': [0xf05976ef, 0x83f1, 0x4f3d, 0x86, 0x19, 0xf7, 0x59, 0x5d, 0x41, 0xe5, 0x38],
'EFI_PRINT_PROTOCOL_GUID': [0xdf2d868e, 0x32fc, 0x4cf0, 0x8e, 0x6b, 0xff, 0xd9, 0x5d, 0x13, 0x43, 0xd0],
'EFI_PROCESSOR_PRODUCER_GUID': [0x1bf06aea, 0x5bec, 0x4a8d, 0x95, 0x76, 0x74, 0x9b, 0x09, 0x56, 0x2d, 0x30],
'EFI_PROCESSOR_SUBCLASS_GUID': [0x26fdeb7e, 0xb8af, 0x4ccf, 0xaa, 0x97, 0x02, 0x63, 0x3c, 0xe4, 0x8c, 0xa7],
'EFI_PS2_POLICY_PROTOCOL_GUID': [0x4df19259, 0xdc71, 0x4d46, 0xbe, 0xf1, 0x35, 0x7b, 0xb5, 0x78, 0xc4, 0x18],
'EFI_PXE_BASE_CODE_CALLBACK_PROTOCOL_GUID': [0x245dca21, 0xfb7b, 0x11d3, 0x8f, 0x01, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b],
'EFI_PXE_BASE_CODE_PROTOCOL_GUID': [0x03c4e603, 0xac28, 0x11d3, 0x9a, 0x2d, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d],
'EFI_PXE_DHCP4_CALLBACK_PROTOCOL_GUID': [0xc1544c01, 0x92a4, 0x4198, 0x8a, 0x84, 0x77, 0x85, 0x83, 0xc2, 0x36, 0x21],
'EFI_PXE_DHCP4_PROTOCOL_GUID': [0x03c4e624, 0xac28, 0x11d3, 0x9a, 0x2d, 0x00, 0x90, 0x29, 0x3f, 0xc1, 0x4d],
'EFI_REAL_TIME_CLOCK_ARCH_PROTOCOL_GUID': [0x27CFAC87, 0x46CC, 0x11d4, 0x9A, 0x38, 0x00, 0x90, 0x27, 0x3F, 0xC1, 0x4D],
'EFI_RESET_ARCH_PROTOCOL_GUID': [0x27CFAC88, 0x46CC, 0x11d4, 0x9A, 0x38, 0x00, 0x90, 0x27, 0x3F, 0xC1, 0x4D],
'EFI_RSC_HANDLER_PROTOCOL_GUID': [0x86212936, 0xe76, 0x41c8, 0xa0, 0x3a, 0x2a, 0xf2, 0xfc, 0x1c, 0x39, 0xe2],
'EFI_RUNTIME_ARCH_PROTOCOL_GUID': [0xb7dfb4e1, 0x52f, 0x449f, 0x87, 0xbe, 0x98, 0x18, 0xfc, 0x91, 0xb7, 0x33],
'EFI_RUNTIME_CRYPT_PROTOCOL_GUID': [0xe1475e0c, 0x1746, 0x4802, 0x86, 0x2e, 0x1, 0x1c, 0x2c, 0x2d, 0x9d, 0x86],
'EFI_S3_SAVE_STATE_PROTOCOL_GUID': [0xe857caf6, 0xc046, 0x45dc, 0xbe, 0x3f, 0xee, 0x7, 0x65, 0xfb, 0xa8, 0x87],
'EFI_S3_SMM_SAVE_STATE_PROTOCOL_GUID': [0x320afe62, 0xe593, 0x49cb, 0xa9, 0xf1, 0xd4, 0xc2, 0xf4, 0xaf, 0x1, 0x4c],
'EFI_SAL_MCA_INIT_PMI_PROTOCOL_GUID': [0xb60dc6e8, 0x3b6f, 0x11d5, 0xaf, 0x9, 0x0, 0xa0, 0xc9, 0x44, 0xa0, 0x5b],
'EFI_SAL_SYSTEM_TABLE_GUID': [0xeb9d2d32, 0x2d88, 0x11d3, 0x9a, 0x16, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d],
'EFI_SAS_DEVICE_PATH_GUID': [0xd487ddb4, 0x008b, 0x11d9, 0xaf, 0xdc, 0x00, 0x10, 0x83, 0xff, 0xca, 0x4d],
'EFI_SCSI_BUS_PROTOCOL_GUID': [0x5261213D, 0x3A3D, 0x441E, 0xB3, 0xAF, 0x21, 0xD3, 0xF7, 0xA4, 0xCA, 0x17],
'EFI_SCSI_IO_PROTOCOL_GUID': [0x932f47e6, 0x2362, 0x4002, 0x80, 0x3e, 0x3c, 0xd5, 0x4b, 0x13, 0x8f, 0x85],
'EFI_SCSI_PASS_THRU_PROTOCOL_GUID': [0xa59e8fcf, 0xbda0, 0x43bb, 0x90, 0xb1, 0xd3, 0x73, 0x2e, 0xca, 0xa8, 0x77],
'EFI_SECTION_EXTRACTION_PROTOCOL_GUID': [0x448F5DA4, 0x6DD7, 0x4FE1, 0x93, 0x07, 0x69, 0x22, 0x41, 0x92, 0x21, 0x5D],
'EFI_SECURITY2_ARCH_PROTOCOL_GUID': [0x94ab2f58, 0x1438, 0x4ef1, 0x91, 0x52, 0x18, 0x94, 0x1a, 0x3a, 0x0e, 0x68],
'EFI_SECURITY_ARCH_PROTOCOL_GUID': [0xA46423E3, 0x4617, 0x49f1, 0xB9, 0xFF, 0xD1, 0xBF, 0xA9, 0x11, 0x58, 0x39],
'EFI_SECURITY_POLICY_PROTOCOL_GUID': [0x78E4D245, 0xCD4D, 0x4a05, 0xA2, 0xBA, 0x47, 0x43, 0xE8, 0x6C, 0xFC, 0xAB],
'EFI_SEC_PLATFORM_INFORMATION_GUID': [0x6f8c2b35, 0xfef4, 0x448d, 0x82, 0x56, 0xe1, 0x1b, 0x19, 0xd6, 0x10, 0x77],
'EFI_SERIAL_IO_PROTOCOL_GUID': [0xBB25CF6F, 0xF1D4, 0x11D2, 0x9A, 0x0C, 0x00, 0x90, 0x27, 0x3F, 0xC1, 0xFD],
'EFI_SE_EXT_SIGNATURE_GUID': [0xd2c18636, 0x40e5, 0x4eb5, 0xa3, 0x1b, 0x36, 0x69, 0x5f, 0xd4, 0x2c, 0x87],
'EFI_SHELLPKG_TOKEN_SPACE_GUID': [0x171e9188, 0x31d3, 0x40f5, 0xb1, 0xc, 0x53, 0x9b, 0x2d, 0xb9, 0x40, 0xcd],
'EFI_SHELL_FILE_GUID': [0xc57ad6b7, 0x0515, 0x40a8, 0x9d, 0x21, 0x55, 0x16, 0x52, 0x85, 0x4e, 0x37],
'EFI_SHELL_PARAMETERS_PROTOCOL_GUID': [0x752f3136, 0x4e16, 0x4fdc, 0xa2, 0x2a, 0xe5, 0xf4, 0x68, 0x12, 0xf4, 0xca],
'EFI_SHELL_PROTOCOL_GUID': [0x6302d008, 0x7f9b, 0x4f30, 0x87, 0xac, 0x60, 0xc9, 0xfe, 0xf5, 0xda, 0x4e],
'EFI_SIMPLE_FILE_SYSTEM_PROTOCOL_GUID': [0x964e5b22, 0x6459, 0x11d2, 0x8e, 0x39, 0x0, 0xa0, 0xc9, 0x69, 0x72, 0x3b],
'EFI_SIMPLE_NETWORK_PROTOCOL_GUID': [0xA19832B9, 0xAC25, 0x11D3, 0x9A, 0x2D, 0x00, 0x90, 0x27, 0x3F, 0xC1, 0x4D],
'EFI_SIMPLE_POINTER_PROTOCOL_GUID': [0x31878c87, 0xb75, 0x11d5, 0x9a, 0x4f, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d],
'EFI_SIMPLE_TEXT_INPUT_EX_PROTOCOL_GUID': [0xdd9e7534, 0x7762, 0x4698, 0x8c, 0x14, 0xf5, 0x85, 0x17, 0xa6, 0x25, 0xaa],
'EFI_SIMPLE_TEXT_INPUT_PROTOCOL_GUID': [0x387477c1, 0x69c7, 0x11d2, 0x8e, 0x39, 0x0, 0xa0, 0xc9, 0x69, 0x72, 0x3b],
'EFI_SIMPLE_TEXT_IN_PROTOCOL_GUID': [0x387477c1, 0x69c7, 0x11d2, 0x8e, 0x39, 0x0, 0xa0, 0xc9, 0x69, 0x72, 0x3b],
'EFI_SIMPLE_TEXT_OUTPUT_PROTOCOL_GUID': [0x387477c2, 0x69c7, 0x11d2, 0x8e, 0x39, 0x0, 0xa0, 0xc9, 0x69, 0x72, 0x3b],
'EFI_SIMPLE_TEXT_OUT_PROTOCOL_GUID': [0x387477c2, 0x69c7, 0x11d2, 0x8e, 0x39, 0x0, 0xa0, 0xc9, 0x69, 0x72, 0x3b],
'EFI_SIO_PROTOCOL_GUID': [0x215fdd18, 0xbd50, 0x4feb, 0x89, 0xb, 0x58, 0xca, 0xb, 0x47, 0x39, 0xe9],
'EFI_SMBIOS_PROTOCOL_GUID': [0x3583ff6, 0xcb36, 0x4940, 0x94, 0x7e, 0xb9, 0xb3, 0x9f, 0x4a, 0xfa, 0xf7],
'EFI_SMBIOS_TABLE_GUID': [0xeb9d2d31, 0x2d88, 0x11d3, 0x9a, 0x16, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d],
'EFI_SMBUS_HC_PROTOCOL_GUID': [0xe49d33ed, 0x513d, 0x4634, 0xb6, 0x98, 0x6f, 0x55, 0xaa, 0x75, 0x1c, 0x1b],
'EFI_SMM_ACCESS2_PROTOCOL_GUID': [0xc2702b74, 0x800c, 0x4131, 0x87, 0x46, 0x8f, 0xb5, 0xb8, 0x9c, 0xe4, 0xac],
'EFI_SMM_ACCESS_PROTOCOL_GUID': [0x3792095a, 0xe309, 0x4c1e, 0xaa, 0x01, 0x85, 0xf5, 0x65, 0x5a, 0x17, 0xf1],
'EFI_SMM_BASE2_PROTOCOL_GUID': [0xf4ccbfb7, 0xf6e0, 0x47fd, 0x9d, 0xd4, 0x10, 0xa8, 0xf1, 0x50, 0xc1, 0x91],
'EFI_SMM_BASE_HELPER_READY_PROTOCOL_GUID': [0x910dca07, 0x1f94, 0x4ee7, 0xaf, 0x2f, 0xff, 0x72, 0xf3, 0x15, 0x43, 0x53],
'EFI_SMM_BASE_PROTOCOL_GUID': [0x1390954D, 0xda95, 0x4227, 0x93, 0x28, 0x72, 0x82, 0xc2, 0x17, 0xda, 0xa8],
'EFI_SMM_COMMUNICATION_PROTOCOL_GUID': [0xc68ed8e2, 0x9dc6, 0x4cbd, 0x9d, 0x94, 0xdb, 0x65, 0xac, 0xc5, 0xc3, 0x32],
'EFI_SMM_CONFIGURATION_PROTOCOL_GUID': [0x26eeb3de, 0xb689, 0x492e, 0x80, 0xf0, 0xbe, 0x8b, 0xd7, 0xda, 0x4b, 0xa7],
'EFI_SMM_CONTROL2_PROTOCOL_GUID': [0x843dc720, 0xab1e, 0x42cb, 0x93, 0x57, 0x8a, 0x0, 0x78, 0xf3, 0x56, 0x1b],
'EFI_SMM_CONTROL_PROTOCOL_GUID': [0x8d12e231, 0xc667, 0x4fd1, 0x98, 0xf2, 0x24, 0x49, 0xa7, 0xe7, 0xb2, 0xe5],
'EFI_SMM_CPU_IO2_PROTOCOL_GUID': [0x3242A9D8, 0xCE70, 0x4AA0, 0x95, 0x5D, 0x5E, 0x7B, 0x14, 0x0D, 0xE4, 0xD2],
'EFI_SMM_CPU_IO_GUID': [0x5f439a0b, 0x45d8, 0x4682, 0xa4, 0xf4, 0xf0, 0x57, 0x6b, 0x51, 0x34, 0x41],
'EFI_SMM_CPU_PROTOCOL_GUID': [0xeb346b97, 0x975f, 0x4a9f, 0x8b, 0x22, 0xf8, 0xe9, 0x2b, 0xb3, 0xd5, 0x69],
'EFI_SMM_CPU_SAVE_STATE_PROTOCOL_GUID': [0x21f302ad, 0x6e94, 0x471b, 0x84, 0xbc, 0xb1, 0x48, 0x0, 0x40, 0x3a, 0x1d],
'EFI_SMM_END_OF_DXE_PROTOCOL_GUID': [0x24e70042, 0xd5c5, 0x4260, 0x8c, 0x39, 0xa, 0xd3, 0xaa, 0x32, 0xe9, 0x3d],
'EFI_SMM_FAULT_TOLERANT_WRITE_PROTOCOL_GUID': [0x3868fc3b, 0x7e45, 0x43a7, 0x90, 0x6c, 0x4b, 0xa4, 0x7d, 0xe1, 0x75, 0x4d],
'EFI_SMM_FIRMWARE_VOLUME_BLOCK_PROTOCOL_GUID': [0xd326d041, 0xbd31, 0x4c01, 0xb5, 0xa8, 0x62, 0x8b, 0xe8, 0x7f, 0x6, 0x53],
'EFI_SMM_GPI_DISPATCH2_PROTOCOL_GUID': [0x25566b03, 0xb577, 0x4cbf, 0x95, 0x8c, 0xed, 0x66, 0x3e, 0xa2, 0x43, 0x80],
'EFI_SMM_GPI_DISPATCH_PROTOCOL_GUID': [0xe0744b81, 0x9513, 0x49cd, 0x8c, 0xea, 0xe9, 0x24, 0x5e, 0x70, 0x39, 0xda],
'EFI_SMM_ICHN_DISPATCH_PROTOCOL_GUID': [0xc50b323e, 0x9075, 0x4f2a, 0xac, 0x8e, 0xd2, 0x59, 0x6a, 0x10, 0x85, 0xcc],
'EFI_SMM_IO_TRAP_DISPATCH2_PROTOCOL_GUID': [0x58dc368d, 0x7bfa, 0x4e77, 0xab, 0xbc, 0xe, 0x29, 0x41, 0x8d, 0xf9, 0x30],
'EFI_SMM_LOCK_BOX_COMMUNICATION_GUID': [0x2a3cfebd, 0x27e8, 0x4d0a, 0x8b, 0x79, 0xd6, 0x88, 0xc2, 0xa3, 0xe1, 0xc0],
'EFI_SMM_PCI_ROOT_BRIDGE_IO_PROTOCOL_GUID': [0x8bc1714d, 0xffcb, 0x41c3, 0x89, 0xdc, 0x6c, 0x74, 0xd0, 0x6d, 0x98, 0xea],
'EFI_SMM_PERIODIC_TIMER_DISPATCH2_PROTOCOL_GUID': [0x4cec368e, 0x8e8e, 0x4d71, 0x8b, 0xe1, 0x95, 0x8c, 0x45, 0xfc, 0x8a, 0x53],
'EFI_SMM_PERIODIC_TIMER_DISPATCH_PROTOCOL_GUID': [0x9cca03fc, 0x4c9e, 0x4a19, 0x9b, 0x6, 0xed, 0x7b, 0x47, 0x9b, 0xde, 0x55],
'EFI_SMM_POWER_BUTTON_DISPATCH2_PROTOCOL_GUID': [0x1b1183fa, 0x1823, 0x46a7, 0x88, 0x72, 0x9c, 0x57, 0x87, 0x55, 0x40, 0x9d],
'EFI_SMM_POWER_BUTTON_DISPATCH_PROTOCOL_GUID': [0xb709efa0, 0x47a6, 0x4b41, 0xb9, 0x31, 0x12, 0xec, 0xe7, 0xa8, 0xee, 0x56],
'EFI_SMM_READY_TO_LOCK_PROTOCOL_GUID': [0x47b7fa8c, 0xf4bd, 0x4af6, 0x82, 0x00, 0x33, 0x30, 0x86, 0xf0, 0xd2, 0xc8],
'EFI_SMM_RSC_HANDLER_PROTOCOL_GUID': [0x2ff29fa7, 0x5e80, 0x4ed9, 0xb3, 0x80, 0x1, 0x7d, 0x3c, 0x55, 0x4f, 0xf4],
'EFI_SMM_STANDBY_BUTTON_DISPATCH2_PROTOCOL_GUID': [0x7300c4a1, 0x43f2, 0x4017, 0xa5, 0x1b, 0xc8, 0x1a, 0x7f, 0x40, 0x58, 0x5b],
'EFI_SMM_STANDBY_BUTTON_DISPATCH_PROTOCOL_GUID': [0x78965b98, 0xb0bf, 0x449e, 0x8b, 0x22, 0xd2, 0x91, 0x4e, 0x49, 0x8a, 0x98],
'EFI_SMM_STATUS_CODE_PROTOCOL_GUID': [0x6afd2b77, 0x98c1, 0x4acd, 0xa6, 0xf9, 0x8a, 0x94, 0x39, 0xde, 0xf, 0xb1],
'EFI_SMM_SWAP_ADDRESS_RANGE_PROTOCOL_GUID': [0x67c4f112, 0x3385, 0x4e55, 0x9c, 0x5b, 0xc0, 0x5b, 0x71, 0x7c, 0x42, 0x28],
'EFI_SMM_SW_DISPATCH2_PROTOCOL_GUID': [0x18a3c6dc, 0x5eea, 0x48c8, 0xa1, 0xc1, 0xb5, 0x33, 0x89, 0xf9, 0x89, 0x99],
'EFI_SMM_SW_DISPATCH_PROTOCOL_GUID': [0xe541b773, 0xdd11, 0x420c, 0xb0, 0x26, 0xdf, 0x99, 0x36, 0x53, 0xf8, 0xbf],
'EFI_SMM_SX_DISPATCH2_PROTOCOL_GUID': [0x456d2859, 0xa84b, 0x4e47, 0xa2, 0xee, 0x32, 0x76, 0xd8, 0x86, 0x99, 0x7d],
'EFI_SMM_SX_DISPATCH_PROTOCOL_GUID': [0x14fc52be, 0x1dc, 0x426c, 0x91, 0xae, 0xa2, 0x3c, 0x3e, 0x22, 0xa, 0xe8],
'EFI_SMM_USB_DISPATCH2_PROTOCOL_GUID': [0xee9b8d90, 0xc5a6, 0x40a2, 0xbd, 0xe2, 0x52, 0x55, 0x8d, 0x33, 0xcc, 0xa1],
'EFI_SMM_USB_DISPATCH_PROTOCOL_GUID': [0xa05b6ffd, 0x87af, 0x4e42, 0x95, 0xc9, 0x62, 0x28, 0xb6, 0x3c, 0xf3, 0xf3],
'EFI_SMM_VARIABLE_PROTOCOL_GUID': [0xed32d533, 0x99e6, 0x4209, 0x9c, 0xc0, 0x2d, 0x72, 0xcd, 0xd9, 0x98, 0xa7],
'EFI_SMM_VARIABLE_WRITE_GUID': [0x93ba1826, 0xdffb, 0x45dd, 0x82, 0xa7, 0xe7, 0xdc, 0xaa, 0x3b, 0xbd, 0xf3],
'EFI_STANDARD_CALLER_ID_GUID': [0xC9DCF469, 0xA7C4, 0x11D5, 0x87, 0xDA, 0x00, 0x06, 0x29, 0x45, 0xC3, 0xB9],
'EFI_STANDARD_ERROR_DEVICE_GUID': [0xd3b36f2d, 0xd551, 0x11d4, 0x9a, 0x46, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d],
'EFI_STATUS_CODE_DATA_TYPE_ASSERT_GUID': [0xDA571595, 0x4D99, 0x487C, 0x82, 0x7C, 0x26, 0x22, 0x67, 0x7D, 0x33, 0x07],
'EFI_STATUS_CODE_DATA_TYPE_DEBUG_GUID': [0x9A4E9246, 0xD553, 0x11D5, 0x87, 0xE2, 0x00, 0x06, 0x29, 0x45, 0xC3, 0xb9],
'EFI_STATUS_CODE_DATA_TYPE_ERROR_GUID': [0xAB359CE3, 0x99B3, 0xAE18, 0xC8, 0x9D, 0x95, 0xD3, 0xB0, 0x72, 0xE1, 0x9B],
'EFI_STATUS_CODE_DATA_TYPE_EXCEPTION_HANDLER_GUID': [0x3BC2BD12, 0xAD2E, 0x11D5, 0x87, 0xDD, 0x00, 0x06, 0x29, 0x45, 0xC3, 0xB9],
'EFI_STATUS_CODE_DATA_TYPE_PROGRESS_CODE_GUID': [0xA356AB39, 0x35C4, 0x35DA, 0xB3, 0x7A, 0xF8, 0xEA, 0x9E, 0x8B, 0x36, 0xA3],
'EFI_STATUS_CODE_DATA_TYPE_STRING_GUID': [0x92D11080, 0x496F, 0x4D95, 0xBE, 0x7E, 0x03, 0x74, 0x88, 0x38, 0x2B, 0x0A],
'EFI_STATUS_CODE_GUID': [0xd083e94c, 0x6560, 0x42e4, 0xb6, 0xd4, 0x2d, 0xf7, 0x5a, 0xdf, 0x6a, 0x2a],
'EFI_STATUS_CODE_RUNTIME_PROTOCOL_GUID': [0xd2b2b828, 0x826, 0x48a7, 0xb3, 0xdf, 0x98, 0x3c, 0x0, 0x60, 0x24, 0xf0],
'EFI_STATUS_CODE_SPECIFIC_DATA_GUID': [0x335984bd, 0xe805, 0x409a, 0xb8, 0xf8, 0xd2, 0x7e, 0xce, 0x5f, 0xf7, 0xa6],
'EFI_STORAGE_SECURITY_COMMAND_PROTOCOL_GUID': [0xC88B0B6D, 0x0DFC, 0x49A7, 0x9C, 0xB4, 0x49, 0x07, 0x4B, 0x4C, 0x3A, 0x78],
'EFI_SWAP_ADDRESS_RANGE_PROTOCOL_GUID': [0x1259f60d, 0xb754, 0x468e, 0xa7, 0x89, 0x4d, 0xb8, 0x5d, 0x55, 0xe8, 0x7e],
'EFI_SYSTEM_NV_DATA_FV_GUID': [0xfff12b8d, 0x7696, 0x4c8b, 0xa9, 0x85, 0x27, 0x47, 0x7, 0x5b, 0x4f, 0x50],
'EFI_SYSTEM_NV_DATA_HOB_GUID': [0xd6e5092d, 0xc7b2, 0x4872, 0xaf, 0x66, 0xfd, 0xc0, 0xe6, 0xf9, 0x5e, 0x78],
'EFI_TAPE_IO_PROTOCOL_GUID': [0x1e93e633, 0xd65a, 0x459e, 0xab, 0x84, 0x93, 0xd9, 0xec, 0x26, 0x6d, 0x18],
'EFI_TCG_EVENT_HOB_GUID': [0x2e3044ac, 0x879f, 0x490f, 0x97, 0x60, 0xbb, 0xdf, 0xaf, 0x69, 0x5f, 0x50],
'EFI_TCG_PLATFORM_PROTOCOL_GUID': [0x8c4c9a41, 0xbf56, 0x4627, 0x9e, 0xa, 0xc8, 0x38, 0x6d, 0x66, 0x11, 0x5c],
'EFI_TCG_PROTOCOL_GUID': [0xf541796d, 0xa62e, 0x4954, 0xa7, 0x75, 0x95, 0x84, 0xf6, 0x1b, 0x9c, 0xdd],
'EFI_TCP4_PROTOCOL_GUID': [0x65530BC7, 0xA359, 0x410f, 0xB0, 0x10, 0x5A, 0xAD, 0xC7, 0xEC, 0x2B, 0x62],
'EFI_TCP4_SERVICE_BINDING_PROTOCOL_GUID': [0x00720665, 0x67EB, 0x4a99, 0xBA, 0xF7, 0xD3, 0xC3, 0x3A, 0x1C, 0x7C, 0xC9],
'EFI_TCP6_PROTOCOL_GUID': [0x46e44855, 0xbd60, 0x4ab7, 0xab, 0x0d, 0xa6, 0x79, 0xb9, 0x44, 0x7d, 0x77],
'EFI_TCP6_SERVICE_BINDING_PROTOCOL_GUID': [0xec20eb79, 0x6c1a, 0x4664, 0x9a, 0x0d, 0xd2, 0xe4, 0xcc, 0x16, 0xd6, 0x64],
'EFI_TCP_PROTOCOL_GUID': [0x02b3d5f2, 0xac28, 0x11d3, 0x9a, 0x2d, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d],
'EFI_TIANO_DECOMPRESS_PROTOCOL_GUID': [0xe84cf29c, 0x191f, 0x4eae, 0x96, 0xe1, 0xf4, 0x6a, 0xec, 0xea, 0xea, 0x0b],
'EFI_TIMER_ARCH_PROTOCOL_GUID': [0x26baccb3, 0x6f42, 0x11d4, 0xbc, 0xe7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81],
'EFI_TSC_FREQUENCY_GUID': [0xdba6a7e3, 0xbb57, 0x4be7, 0x8a, 0xf8, 0xd5, 0x78, 0xdb, 0x7e, 0x56, 0x87],
'EFI_UART_DEVICE_PATH_GUID': [0x37499a9d, 0x542f, 0x4c89, 0xa0, 0x26, 0x35, 0xda, 0x14, 0x20, 0x94, 0xe4],
'EFI_UDP4_PROTOCOL_GUID': [0x3ad9df29, 0x4501, 0x478d, 0xb1, 0xf8, 0x7f, 0x7f, 0xe7, 0x0e, 0x50, 0xf3],
'EFI_UDP4_SERVICE_BINDING_PROTOCOL_GUID': [0x83f01464, 0x99bd, 0x45e5, 0xb3, 0x83, 0xaf, 0x63, 0x05, 0xd8, 0xe9, 0xe6],
'EFI_UDP6_PROTOCOL_GUID': [0x4f948815, 0xb4b9, 0x43cb, 0x8a, 0x33, 0x90, 0xe0, 0x60, 0xb3, 0x49, 0x55],
'EFI_UDP6_SERVICE_BINDING_PROTOCOL_GUID': [0x66ed4721, 0x3c98, 0x4d3e, 0x81, 0xe3, 0xd0, 0x3d, 0xd3, 0x9a, 0x72, 0x54],
'EFI_UGA_DRAW_PROTOCOL_GUID': [0x982c298b, 0xf4fa, 0x41cb, 0xb8, 0x38, 0x77, 0xaa, 0x68, 0x8f, 0xb8, 0x39],
'EFI_UGA_IO_PROTOCOL_GUID': [0x61a4d49e, 0x6f68, 0x4f1b, 0xb9, 0x22, 0xa8, 0x6e, 0xed, 0xb, 0x7, 0xa2],
'EFI_UGA_SPLASH_PROTOCOL_GUID': [0xa45b3a0d, 0x2e55, 0x4c03, 0xad, 0x9c, 0x27, 0xd4, 0x82, 0xb, 0x50, 0x7e],
'EFI_UNICODE_COLLATION2_PROTOCOL_GUID': [0xa4c751fc, 0x23ae, 0x4c3e, 0x92, 0xe9, 0x49, 0x64, 0xcf, 0x63, 0xf3, 0x49],
'EFI_UNICODE_COLLATION_PROTOCOL2_GUID': [0xa4c751fc, 0x23ae, 0x4c3e, 0x92, 0xe9, 0x49, 0x64, 0xcf, 0x63, 0xf3, 0x49],
'EFI_UNICODE_COLLATION_PROTOCOL_GUID': [0x1d85cd7f, 0xf43d, 0x11d2, 0x9a, 0xc, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d],
'EFI_UNIX_CONSOLE_GUID': [0xf2cc5d06, 0x8985, 0x11db, 0xbb, 0x19, 0x00, 0x40, 0xd0, 0x2b, 0x18, 0x35],
'EFI_UNIX_CPU_MODEL_GUID': [0xf2d3b330, 0x8985, 0x11db, 0x8a, 0xa3, 0x00, 0x40, 0xd0, 0x2b, 0x18, 0x35],
'EFI_UNIX_CPU_SPEED_GUID': [0xf2d74e5a, 0x8985, 0x11db, 0x97, 0x05, 0x00, 0x40, 0xd0, 0x2b, 0x18, 0x35],
'EFI_UNIX_FILE_SYSTEM_GUID': [0xf2c16b9e, 0x8985, 0x11db, 0x92, 0xc8, 0x00, 0x40, 0xd0, 0x2b, 0x18, 0x35],
'EFI_UNIX_GOP_GUID': [0xbace07c2, 0x8987, 0x11db, 0xa5, 0x9a, 0x00, 0x40, 0xd0, 0x2b, 0x18, 0x35],
'EFI_UNIX_IO_PROTOCOL_GUID': [0xf2e23f54, 0x8985, 0x11db, 0xac, 0x79, 0x00, 0x40, 0xd0, 0x2b, 0x18, 0x35],
'EFI_UNIX_MEMORY_GUID': [0xf2d006cc, 0x8985, 0x11db, 0xa4, 0x72, 0x00, 0x40, 0xd0, 0x2b, 0x18, 0x35],
'EFI_UNIX_NETWORK_GUID': [0x081603B4, 0x0F1D, 0x4022, 0xB6, 0xFD, 0x4C, 0xE3, 0x5E, 0x09, 0xA1, 0xA6],
'EFI_UNIX_PHYSICAL_DISKS_GUID': [0xf2bdcc96, 0x8985, 0x11db, 0x87, 0x19, 0x00, 0x40, 0xd0, 0x2b, 0x18, 0x35],
'EFI_UNIX_SERIAL_PORT_GUID': [0x6d3a727d, 0x66c8, 0x4d19, 0x87, 0xe6, 0x2, 0x15, 0x86, 0x14, 0x90, 0xf3],
'EFI_UNIX_THUNK_PROTOCOL_GUID': [0xf2e98868, 0x8985, 0x11db, 0x9a, 0x59, 0x00, 0x40, 0xd0, 0x2b, 0x18, 0x35],
'EFI_UNIX_UGA_GUID': [0xf2c8b80e, 0x8985, 0x11db, 0x93, 0xf1, 0x00, 0x40, 0xd0, 0x2b, 0x18, 0x35],
'EFI_UNIX_UGA_IO_PROTOCOL_GUID': [0xf2e5e2c6, 0x8985, 0x11db, 0xa1, 0x91, 0x00, 0x40, 0xd0, 0x2b, 0x18, 0x35],
'EFI_UNIX_VIRTUAL_DISKS_GUID': [0xf2ba331a, 0x8985, 0x11db, 0xa4, 0x06, 0x00, 0x40, 0xd0, 0x2b, 0x18, 0x35],
'EFI_UPDATE_DATA_FILE_GUID': [0x283fa2ee, 0x532c, 0x484d, 0x93, 0x83, 0x9f, 0x93, 0xb3, 0x6f, 0xb, 0x7e],
'EFI_USB2_HC_PROTOCOL_GUID': [0x3e745226, 0x9818, 0x45b6, 0xa2, 0xac, 0xd7, 0xcd, 0xe, 0x8b, 0xa2, 0xbc],
'EFI_USB_ATAPI_PROTOCOL_GUID': [0x2B2F68DA, 0x0CD2, 0x44cf, 0x8E, 0x8B, 0xBB, 0xA2, 0x0B, 0x1B, 0x5B, 0x75],
'EFI_USB_BUS_PROTOCOL_GUID': [0x2B2F68CC, 0x0CD2, 0x44cf, 0x8E, 0x8B, 0xBB, 0xA2, 0x0B, 0x1B, 0x5B, 0x75],
'EFI_USB_HC_PROTOCOL_GUID': [0xf5089266, 0x1aa0, 0x4953, 0x97, 0xd8, 0x56, 0x2f, 0x8a, 0x73, 0xb5, 0x19],
'EFI_USB_IO_PROTOCOL_GUID': [0x2B2F68D6, 0x0CD2, 0x44cf, 0x8E, 0x8B, 0xBB, 0xA2, 0x0B, 0x1B, 0x5B, 0x75],
'EFI_USER_CREDENTIAL2_PROTOCOL_GUID': [0xe98adb03, 0xb8b9, 0x4af8, 0xba, 0x20, 0x26, 0xe9, 0x11, 0x4c, 0xbc, 0xe5],
'EFI_USER_CREDENTIAL_PROTOCOL_GUID': [0x71ee5e94, 0x65b9, 0x45d5, 0x82, 0x1a, 0x3a, 0x4d, 0x86, 0xcf, 0xe6, 0xbe],
'EFI_USER_INFO_ACCESS_SETUP_ADMIN_GUID': [0x85b75607, 0xf7ce, 0x471e, 0xb7, 0xe4, 0x2a, 0xea, 0x5f, 0x72, 0x32, 0xee],
'EFI_USER_INFO_ACCESS_SETUP_NORMAL_GUID': [0x1db29ae0, 0x9dcb, 0x43bc, 0x8d, 0x87, 0x5d, 0xa1, 0x49, 0x64, 0xdd, 0xe2],
'EFI_USER_INFO_ACCESS_SETUP_RESTRICTED_GUID': [0xbdb38125, 0x4d63, 0x49f4, 0x82, 0x12, 0x61, 0xcf, 0x5a, 0x19, 0xa, 0xf8],
'EFI_USER_MANAGER_PROTOCOL_GUID': [0x6fd5b00c, 0xd426, 0x4283, 0x98, 0x87, 0x6c, 0xf5, 0xcf, 0x1c, 0xb1, 0xfe],
'EFI_UXIX_SYSTEM_CONFIG_GUID': [0x375ea976, 0x3ccd, 0x4e74, 0xa8, 0x45, 0x26, 0xb9, 0xb3, 0x24, 0xb1, 0x3c],
'EFI_VARIABLE_ARCH_PROTOCOL_GUID': [0x1e5668e2, 0x8481, 0x11d4, 0xbc, 0xf1, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81],
'EFI_VARIABLE_GUID': [0xddcf3616, 0x3275, 0x4164, 0x98, 0xb6, 0xfe, 0x85, 0x70, 0x7f, 0xfe, 0x7d],
'EFI_VARIABLE_INDEX_TABLE_GUID': [0x8cfdb8c8, 0xd6b2, 0x40f3, 0x8e, 0x97, 0x02, 0x30, 0x7c, 0xc9, 0x8b, 0x7c],
'EFI_VARIABLE_STORE_PROTOCOL_GUID': [0xf088cd91, 0xa046, 0x11d2, 0x8e, 0x42, 0x0, 0xa0, 0xc9, 0x69, 0x72, 0x3b],
'EFI_VARIABLE_WRITE_ARCH_PROTOCOL_GUID': [0x6441f818, 0x6362, 0x4e44, 0xb5, 0x70, 0x7d, 0xba, 0x31, 0xdd, 0x24, 0x53],
'EFI_VGA_MINI_PORT_PROTOCOL_GUID': [0xc7735a2f, 0x88f5, 0x4882, 0xae, 0x63, 0xfa, 0xac, 0x8c, 0x8b, 0x86, 0xb3],
'EFI_VIRTUAL_MEMORY_ACCESS_PROTOCOL_GUID': [0x745d377a, 0xb988, 0x47b2, 0xb1, 0x8f, 0xbb, 0xc8, 0xd, 0xc5, 0x66, 0x98],
'EFI_VLAN_CONFIG_PROTOCOL_GUID': [0x9e23d768, 0xd2f3, 0x4366, 0x9f, 0xc3, 0x3a, 0x7a, 0xba, 0x86, 0x43, 0x74],
'EFI_VT_100_GUID': [0xdfa66065, 0xb419, 0x11d3, 0x9a, 0x2d, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d],
'EFI_VT_100_PLUS_GUID': [0x7baec70b, 0x57e0, 0x4c76, 0x8e, 0x87, 0x2f, 0x9e, 0x28, 0x08, 0x83, 0x43],
'EFI_VT_UTF8_GUID': [0xad15a0d6, 0x8bec, 0x4acf, 0xa0, 0x73, 0xd0, 0x1d, 0xe7, 0x7e, 0x2d, 0x88],
'EFI_WATCHDOG_TIMER_ARCH_PROTOCOL_GUID': [0x665E3FF5, 0x46CC, 0x11d4, 0x9A, 0x38, 0x00, 0x90, 0x27, 0x3F, 0xC1, 0x4D],
'EFI_WIN_NT_CONSOLE_GUID': [0xba73672c, 0xa5d3, 0x11d4, 0xbd, 0x0, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81],
'EFI_WIN_NT_CPU_MODEL_GUID': [0xbee9b6ce, 0x2f8a, 0x11d4, 0xbd, 0xd, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81],
'EFI_WIN_NT_CPU_SPEED_GUID': [0xd4f29055, 0xe1fb, 0x11d4, 0xbd, 0xd, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81],
'EFI_WIN_NT_FILE_SYSTEM_GUID': [0xc95a935, 0xa006, 0x11d4, 0xbc, 0xfa, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81],
'EFI_WIN_NT_GOP_GUID': [0x4e11e955, 0xccca, 0x11d4, 0xbd, 0xd, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81],
'EFI_WIN_NT_IO_PROTOCOL_GUID': [0x96eb4ad6, 0xa32a, 0x11d4, 0xbc, 0xfd, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81],
'EFI_WIN_NT_MEMORY_GUID': [0x99042912, 0x122a, 0x11d4, 0xbd, 0xd, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81],
'EFI_WIN_NT_PASS_THROUGH_GUID': [0xcc664eb8, 0x3c24, 0x4086, 0xb6, 0xf6, 0x34, 0xe8, 0x56, 0xbc, 0xe3, 0x6e],
'EFI_WIN_NT_PHYSICAL_DISKS_GUID': [0xc95a92f, 0xa006, 0x11d4, 0xbc, 0xfa, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81],
'EFI_WIN_NT_SERIAL_PORT_GUID': [0xc95a93d, 0xa006, 0x11d4, 0xbc, 0xfa, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81],
'EFI_WIN_NT_SYSTEM_CONFIG_GUID': [0xb347f047, 0xaf8c, 0x490e, 0xac, 0x07, 0x0a, 0xa9, 0xb7, 0xe5, 0x38, 0x58],
'EFI_WIN_NT_THUNK_PROTOCOL_GUID': [0x58c518b1, 0x76f3, 0x11d4, 0xbc, 0xea, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81],
'EFI_WIN_NT_UGA_GUID': [0xab248e99, 0xabe1, 0x11d4, 0xbd, 0xd, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81],
'EFI_WIN_NT_VIRTUAL_DISKS_GUID': [0xc95a928, 0xa006, 0x11d4, 0xbc, 0xfa, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81],
'EFI_XEN_INFO_GUID': [0xd3b46f3b, 0xd441, 0x1244, 0x9a, 0x12, 0x0, 0x12, 0x27, 0x3f, 0xc1, 0x4d],
'EMBEDDED_DEVICE_PROTOCOL_GUID': [0xbf4b9d10, 0x13ec, 0x43dd, 0x88, 0x80, 0xe9, 0xb, 0x71, 0x8f, 0x27, 0xde],
'EMBEDDED_EXTERNAL_DEVICE_PROTOCOL_GUID': [0x735F8C64, 0xD696, 0x44D0, 0xBD, 0xF2, 0x44, 0x7F, 0xD0, 0x5A, 0x54, 0x06],
'EMU_BLOCK_IO_PROTOCOL_GUID': [0x6888A4AE, 0xAFCE, 0xE84B, 0x91, 0x02, 0xF7, 0xB9, 0xDA, 0xE6, 0xA0, 0x30],
'EMU_GRAPHICS_WINDOW_PROTOCOL_GUID': [0x30FD316A, 0x6728, 0x2E41, 0xA6, 0x90, 0x0D, 0x13, 0x33, 0xD8, 0xCA, 0xC1],
'EMU_IO_THUNK_PROTOCO_GUID': [0x453368F6, 0x7C85, 0x434A, 0xA9, 0x8A, 0x72, 0xD1, 0xB7, 0xFF, 0xA9, 0x26],
'EMU_SNP_PROTOCOL_GUID': [0xFD5FBE54, 0x8C35, 0xB345, 0x8A, 0x0F, 0x7A, 0xC8, 0xA5, 0xFD, 0x05, 0x21],
'EMU_THUNK_PPI_GUID': [0xB958B78C, 0x1D3E, 0xEE40, 0x8B, 0xF4, 0xF0, 0x63, 0x2D, 0x06, 0x39, 0x16],
'EMU_THUNK_PROTOCOL_GUID': [0x5CF32E0B, 0x8EDF, 0x2E44, 0x9C, 0xDA, 0x93, 0x20, 0x5E, 0x99, 0xEC, 0x1C],
'EXTENDED_SAL_BOOT_SERVICE_PROTOCOL_GUID': [0xde0ee9a4, 0x3c7a, 0x44f2, 0xb7, 0x8b, 0xe3, 0xcc, 0xd6, 0x9c, 0x3a, 0xf7],
'EXTENDED_SAL_BOOT_SERVICE_PROTOCOL_GUID': [0xde0ee9a4, 0x3c7a, 0x44f2, 0xb7, 0x8b, 0xe3, 0xcc, 0xd6, 0x9c, 0x3a, 0xf7],
'FFS_GUID': [0xac05bf33, 0x995a, 0x4ed4, 0xaa, 0xb8, 0xef, 0x7a, 0xe8, 0xf, 0x5c, 0xb0],
'FILE_EXPLORE_FORMSET_GUID': [0x1f2d63e1, 0xfebd, 0x4dc7, 0x9c, 0xc5, 0xba, 0x2b, 0x1c, 0xef, 0x9c, 0x5b],
'FILE_GUID': [0xcbd2e4d5, 0x7068, 0x4ff5, 0xb4, 0x62, 0x98, 0x22, 0xb4, 0xad, 0x8d, 0x60],
'FORM_BROWSER_EXTENSION_PROTOCOL_GUID': [0x1f73b18d, 0x4630, 0x43c1, 0xa1, 0xde, 0x6f, 0x80, 0x85, 0x5d, 0x7d, 0xa4],
'FRAMEWORK_BDS_FRONTPAGE_FORMSET_GUID': [0x9e0c30bc, 0x3f06, 0x4ba6, 0x82, 0x88, 0x9, 0x17, 0x9b, 0x85, 0x5d, 0xbe],
'FRAMEWORK_EFI_FIRMWARE_VOLUME_BLOCK_PROTOCOL_GUID': [0xDE28BC59, 0x6228, 0x41BD, 0xBD, 0xF6, 0xA3, 0xB9, 0xAD, 0xB5, 0x8D, 0xA1],
'FRAMEWORK_EFI_MP_SERVICES_PROTOCOL_GUID': [0xf33261e7, 0x23cb, 0x11d5, 0xbd, 0x5c, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81],
'FRONT_PAGE_FORMSET_GUID': [0x9e0c30bc, 0x3f06, 0x4ba6, 0x82, 0x88, 0x9, 0x17, 0x9b, 0x85, 0x5d, 0xbe],
'HANDLE_PARSING_HII_GUID': [0xb8969637, 0x81de, 0x43af, 0xbc, 0x9a, 0x24, 0xd9, 0x89, 0x13, 0xf2, 0xf6],
'HD_BOOT_DEVICE_PATH_VARIABLE_GUID': [0xfab7e9e1, 0x39dd, 0x4f2b, 0x84, 0x8, 0xe2, 0xe, 0x90, 0x6c, 0xb6, 0xde],
'HII_RESOURCE_SAMPLE_FORM_SET_GUID': [0x4f4ef7f0, 0xaa29, 0x4ce9, 0xba, 0x41, 0x64, 0x3e, 0x1, 0x23, 0xa9, 0x9f],
'HOB_LIST_GUID': [0x7739f24c, 0x93d7, 0x11d4, 0x9a, 0x3a, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d],
'HOT_PLUG_DEVICE_GUID': [0x220ac432, 0x1d43, 0x49e5, 0xa7, 0x4f, 0x4c, 0x9d, 0xa6, 0x7a, 0xd2, 0x3b],
'IDLE_LOOP_EVENT_GUID': [0x3c8d294c, 0x5fc3, 0x4451, 0xbb, 0x31, 0xc4, 0xc0, 0x32, 0x29, 0x5e, 0x6c],
'INTEL_FRAMEWORK_MODULEPKG_TOKEN_SPACE_GUID': [0xD3705011, 0xBC19, 0x4af7, 0xBE, 0x16, 0xF6, 0x80, 0x30, 0x37, 0x8C, 0x15],
'IP4_ISCSI_CONFIG_GUID': [0x6456ed61, 0x3579, 0x41c9, 0x8a, 0x26, 0x0a, 0x0b, 0xd6, 0x2b, 0x78, 0xfc],
'IP6_CONFIG_NVDATA_GUID': [0x2eea107, 0x98db, 0x400e, 0x98, 0x30, 0x46, 0xa, 0x15, 0x42, 0xd7, 0x99],
'ISCSI_CHAP_AUTH_INFO_GUID': [0x786ec0ac, 0x65ae, 0x4d1b, 0xb1, 0x37, 0xd, 0x11, 0xa, 0x48, 0x37, 0x97],
'ISCSI_CONFIG_GUID': [0x4b47d616, 0xa8d6, 0x4552, 0x9d, 0x44, 0xcc, 0xad, 0x2e, 0xf, 0x4c, 0xf9],
'ISCSI_V4_PRIVATE_GUID': [0xfa3cde4c, 0x87c2, 0x427d, 0xae, 0xde, 0x7d, 0xd0, 0x96, 0xc8, 0x8c, 0x58],
'ISCSI_V6_PRIVATE_GUID': [0x28be27e5, 0x66cc, 0x4a31, 0xa3, 0x15, 0xdb, 0x14, 0xc3, 0x74, 0x4d, 0x85],
'LAST_ENUM_LANGUAGE_GUID': [0xe8c545b, 0xa2ee, 0x470d, 0x8e, 0x26, 0xbd, 0xa1, 0xa1, 0x3c, 0xa, 0xa3],
'LDR_MEMORY_DESCRIPTOR_GUID': [0x7701d7e5, 0x7d1d, 0x4432, 0xa4, 0x68, 0x67, 0x3d, 0xab, 0x8a, 0xde, 0x60],
'LOAD_FILE_PROTOCOL_GUID': [0x56EC3091, 0x954C, 0x11d2, 0x8E, 0x3F, 0x00, 0xA0, 0xC9, 0x69, 0x72, 0x3B],
'LOCAL_EFI_WIN_NT_BUS_DRIVER_IO_PROTOCOL_GUID': [0x96eb4ad6, 0xa32a, 0x11d4, 0xbc, 0xfd, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81],
'LOCAL_EFI_WIN_NT_SERIAL_PORT_GUID': [0xc95a93d, 0xa006, 0x11d4, 0xbc, 0xfa, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81],
'LOCAL_EFI_WIN_NT_THUNK_PROTOCOL_GUID': [0x58c518b1, 0x76f3, 0x11d4, 0xbc, 0xea, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81],
'LZMAF86_CUSTOM_DECOMPRESS_GUID': [0xD42AE6BD, 0x1352, 0x4bfb, 0x90, 0x9A, 0xCA, 0x72, 0xA6, 0xEA, 0xE8, 0x89],
'LZMA_CUSTOM_DECOMPRESS_GUID': [0xEE4E5898, 0x3914, 0x4259, 0x9D, 0x6E, 0xDC, 0x7B, 0xD7, 0x94, 0x03, 0xCF],
'MDEMODULEPKG_TOKEN_SPACE_GUID': [0xA1AFF049, 0xFDEB, 0x442a, 0xB3, 0x20, 0x13, 0xAB, 0x4C, 0xB7, 0x2B, 0xBC],
'MDEPKG_TOKEN_SPACE_GUID': [0x914AEBE7, 0x4635, 0x459b, 0xAA, 0x1C, 0x11, 0xE2, 0x19, 0xB0, 0x3A, 0x10],
'MEMORY_ONLY_RESET_CONTROL_GUID': [0xe20939be, 0x32d4, 0x41be, 0xa1, 0x50, 0x89, 0x7f, 0x85, 0xd4, 0x98, 0x29],
'MEMORY_STATUS_CODE_RECORD_GUID': [0x60cc026, 0x4c0d, 0x4dda, 0x8f, 0x41, 0x59, 0x5f, 0xef, 0x0, 0xa5, 0x2],
'MTC_VENDOR_GUID': [0xeb704011, 0x1402, 0x11d3, 0x8e, 0x77, 0x0, 0xa0, 0xc9, 0x69, 0x72, 0x3b],
'MY_GUID': [0x12345678, 0xAABB, 0xCCDD, 0xEE, 0xFF, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66],
'NT_FWH_PPI_GUID': [0x4e76928f, 0x50ad, 0x4334, 0xb0, 0x6b, 0xa8, 0x42, 0x13, 0x10, 0x8a, 0x57],
'PCATCHIPSET_TOKEN_SPACE_GUID': [0x326ae723, 0xae32, 0x4589, 0x98, 0xb8, 0xca, 0xc2, 0x3c, 0xdc, 0xc1, 0xb1],
'PCD_DATABASE_HOB_GUID': [0xEA296D92, 0x0B69, 0x423C, 0x8C, 0x28, 0x33, 0xB4, 0xE0, 0xA9, 0x12, 0x68],
'PCD_PPI_GUID': [0x6e81c58, 0x4ad7, 0x44bc, 0x83, 0x90, 0xf1, 0x2, 0x65, 0xf7, 0x24, 0x80],
'PCD_PROTOCOL_GUID': [0x11b34006, 0xd85b, 0x4d0a, 0xa2, 0x90, 0xd5, 0xa5, 0x71, 0x31, 0xe, 0xf7],
'PE32_IMAGE_PROTOCOL_GUID': [0x5cb5c776, 0x60d5, 0x45ee, 0x88, 0x3c, 0x45, 0x27, 0x8, 0xcd, 0x74, 0x3f],
'PEI_ATA_CONTROLLER_PPI_GUID': [0xa45e60d1, 0xc719, 0x44aa, 0xb0, 0x7a, 0xaa, 0x77, 0x7f, 0x85, 0x90, 0x6d],
'PEI_BASE_MEMORY_TEST_GUID': [0xb6ec423c, 0x21d2, 0x490d, 0x85, 0xc6, 0xdd, 0x58, 0x64, 0xea, 0xa6, 0x74],
'PEI_BLOCK_IO_PPI_GUID': [0x695d8aa1, 0x42ee, 0x4c46, 0x80, 0x5c, 0x6e, 0xa6, 0xbc, 0xe7, 0x99, 0xe3],
'PEI_BOOT_SCRIPT_EXECUTER_PPI_GUID': [0xabd42895, 0x78cf, 0x4872, 0x84, 0x44, 0x1b, 0x5c, 0x18, 0x0b, 0xfb, 0xff],
'PEI_CAPSULE_PPI_GUID': [0x3acf33ee, 0xd892, 0x40f4, 0xa2, 0xfc, 0x38, 0x54, 0xd2, 0xe1, 0x32, 0x3d],
'PEI_CPU_IO_PPI_GUID': [0xe6af1f7b, 0xfc3f, 0x46da, 0xa8, 0x28, 0xa3, 0xb4, 0x57, 0xa4, 0x42, 0x82],
'PEI_END_OF_PEI_PHASE_PPI_GUID': [0x605EA650, 0xC65C, 0x42e1, 0xBA, 0x80, 0x91, 0xA5, 0x2A, 0xB6, 0x18, 0xC6],
'PEI_FLASH_MAP_PPI_GUID': [0xf34c2fa0, 0xde88, 0x4270, 0x84, 0x14, 0x96, 0x12, 0x22, 0xf4, 0x52, 0x1c],
'PEI_IN_MEMORY_GUID': [0x643b8786, 0xb417, 0x48d2, 0x8f, 0x5e, 0x78, 0x19, 0x93, 0x1c, 0xae, 0xd8],
'PEI_LOCK_PHYSICAL_PRESENCE_PPI_GUID': [0xef9aefe5, 0x2bd3, 0x4031, 0xaf, 0x7d, 0x5e, 0xfe, 0x5a, 0xbb, 0x9a, 0xd],
'PEI_NT_THUNK_GUID': [0x98c281e5, 0xf906, 0x43dd, 0xa9, 0x2b, 0xb0, 0x3, 0xbf, 0x27, 0x65, 0xda],
'PEI_NT_THUNK_PPI_GUID': [0x98c281e5, 0xf906, 0x43dd, 0xa9, 0x2b, 0xb0, 0x3, 0xbf, 0x27, 0x65, 0xda],
'PEI_OPERATOR_PRESENCE_PPI_GUID': [0x20a7378c, 0xaa83, 0x4ce1, 0x82, 0x1f, 0x47, 0x40, 0xee, 0x1b, 0x3f, 0x9f],
'PEI_PCI_CFG_PPI_GUID': [0xe1f2eba0, 0xf7b9, 0x4a26, 0x86, 0x20, 0x13, 0x12, 0x21, 0x64, 0x2a, 0x90],
'PEI_PERMANENT_MEMORY_INSTALLED_PPI_GUID': [0xf894643d, 0xc449, 0x42d1, 0x8e, 0xa8, 0x85, 0xbd, 0xd8, 0xc6, 0x5b, 0xde],
'PEI_READ_ONLY_VARIABLE_ACCESS_PPI_GUID': [0x3cdc90c6, 0x13fb, 0x4a75, 0x9e, 0x79, 0x59, 0xe9, 0xdd, 0x78, 0xb9, 0xfa],
'PEI_RESET_PPI_GUID': [0xef398d58, 0x9dfd, 0x4103, 0xbf, 0x94, 0x78, 0xc6, 0xf4, 0xfe, 0x71, 0x2f],
'PEI_S3_RESUME_PPI_GUID': [0x4426CCB2, 0xE684, 0x4a8a, 0xAE, 0x40, 0x20, 0xD4, 0xB0, 0x25, 0xB7, 0x10],
'PEI_SECURITY_PPI_GUID': [0x1388066e, 0x3a57, 0x4efa, 0x98, 0xf3, 0xc1, 0x2f, 0x3a, 0x95, 0x8a, 0x29],
'PEI_SEC_PERFORMANCE_PPI_GUID': [0x0ecc666b, 0x4662, 0x47f9, 0x9d, 0xd5, 0xd0, 0x96, 0xff, 0x7d, 0xa4, 0x9e],
'PEI_SMBUS2_PPI_GUID': [0x9ca93627, 0xb65b, 0x4324, 0xa2, 0x2, 0xc0, 0xb4, 0x61, 0x76, 0x45, 0x43],
'PEI_SMBUS_PPI_GUID': [0xabd42895, 0x78cf, 0x4872, 0x84, 0x44, 0x1b, 0x5c, 0x18, 0xb, 0xfb, 0xda],
'PEI_SMM_ACCESS_PPI_GUID': [0x268f33a9, 0xcccd, 0x48be, 0x88, 0x17, 0x86, 0x5, 0x3a, 0xc3, 0x2e, 0xd6],
'PEI_SMM_CONTROL_PPI_GUID': [0x61c68702, 0x4d7e, 0x4f43, 0x8d, 0xef, 0xa7, 0x43, 0x5, 0xce, 0x74, 0xc5],
'PEI_STALL_PPI_GUID': [0x1f4c6f90, 0xb06b, 0x48d8, 0xa2, 0x01, 0xba, 0xe5, 0xf1, 0xcd, 0x7d, 0x56],
'PEI_STATUS_CODE_MEMORY_PPI_GUID': [0x26f8ab01, 0xd3cd, 0x489c, 0x98, 0x4f, 0xdf, 0xde, 0xf7, 0x68, 0x39, 0x5b],
'PEI_STATUS_CODE_PPI_GUID': [0x229832d3, 0x7a30, 0x4b36, 0xb8, 0x27, 0xf4, 0xc, 0xb7, 0xd4, 0x54, 0x36],
'PEI_TPM_INITIALIZED_PPI_GUID': [0xe9db0d58, 0xd48d, 0x47f6, 0x9c, 0x6e, 0x6f, 0x40, 0xe8, 0x6c, 0x7b, 0x41],
'PEI_UNIX_AUTOSCAN_PPI_GUID': [0xf2ed3d14, 0x8985, 0x11db, 0xb0, 0x57, 0x00, 0x40, 0xd0, 0x2b, 0x18, 0x35],
'PEI_UNIX_THUNK_PPI_GUID': [0xf2f830f2, 0x8985, 0x11db, 0x80, 0x6b, 0x00, 0x40, 0xd0, 0x2b, 0x18, 0x35],
'PEI_USB2_HOST_CONTROLLER_PPI_GUID': [0xa7d09fe1, 0x74d4, 0x4ba5, 0x84, 0x7c, 0x12, 0xed, 0x5b, 0x19, 0xad, 0xe4],
'PEI_USB_CONTROLLER_PPI_GUID': [0x3bc1f6de, 0x693e, 0x4547, 0xa3, 0x0, 0x21, 0x82, 0x3c, 0xa4, 0x20, 0xb2],
'PEI_USB_HOST_CONTROLLER_PPI_GUID': [0x652b38a9, 0x77f4, 0x453f, 0x89, 0xd5, 0xe7, 0xbd, 0xc3, 0x52, 0xfc, 0x53],
'PEI_USB_IO_PPI_GUID': [0x7c29785c, 0x66b9, 0x49fc, 0xb7, 0x97, 0x1c, 0xa5, 0x55, 0xe, 0xf2, 0x83],
'PERFORMANCEPKG_TOKEN_SPACE_GUID': [0x669346ef, 0xFDad, 0x4aeb, 0x08, 0xa6, 0x21, 0x46, 0x2d, 0x3f, 0xef, 0x7d],
'PERFORMANCE_EX_PROTOCOL_GUID': [0x1ea81bec, 0xf01a, 0x4d98, 0xa2, 0x1, 0x4a, 0x61, 0xce, 0x2f, 0xc0, 0x22],
'PERFORMANCE_PROTOCOL_GUID': [0x76b6bdfa, 0x2acd, 0x4462, 0x9E, 0x3F, 0xcb, 0x58, 0xC9, 0x69, 0xd9, 0x37],
'PE_COFF_LOADER_PROTOCOL_GUID': [0xB323179B, 0x97FB, 0x477E, 0xB0, 0xFE, 0xD8, 0x85, 0x91, 0xFA, 0x11, 0xAB],
'PLAT_OVER_MNGR_GUID': [0x8614567d, 0x35be, 0x4415, 0x8d, 0x88, 0xbd, 0x7d, 0xc, 0x9c, 0x70, 0xc0],
'PRE_PI_EXTRACT_GUIDED_SECTION_DATA_GUID': [0x385A982C, 0x2F49, 0x4043, 0xA5, 0x1E, 0x49, 0x01, 0x02, 0x5C, 0x8B, 0x6B],
'PWD_CREDENTIAL_PROVIDER_GUID': [0x78b9ec8b, 0xc000, 0x46c5, 0xac, 0x93, 0x24, 0xa0, 0xc1, 0xbb, 0x0, 0xce],
'RECOVERY_ON_DATA_CD_GUID': [0x5cac0099, 0x0dc9, 0x48e5, 0x80, 0x68, 0xbb, 0x95, 0xf5, 0x40, 0x0a, 0x9f],
'RECOVERY_ON_FAT_FLOPPY_DISK_GUID': [0x2e3d2e75, 0x9b2e, 0x412d, 0xb4, 0xb1, 0x70, 0x41, 0x6b, 0x87, 0x0, 0xff],
'RECOVERY_ON_FAT_IDE_DISK_GUID': [0xb38573b6, 0x6200, 0x4ac5, 0xb5, 0x1d, 0x82, 0xe6, 0x59, 0x38, 0xd7, 0x83],
'RECOVERY_ON_FAT_USB_DISK_GUID': [0x0ffbce19, 0x324c, 0x4690, 0xa0, 0x09, 0x98, 0xc6, 0xae, 0x2e, 0xb1, 0x86],
'SAL_SYSTEM_TABLE_GUID': [0xeb9d2d32, 0x2d88, 0x11d3, 0x9a, 0x16, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d],
'SECUREBOOT_CONFIG_FORM_SET_GUID': [0x5daf50a5, 0xea81, 0x4de2, 0x8f, 0x9b, 0xca, 0xbd, 0xa9, 0xcf, 0x5c, 0x14],
'SECURITYPKG_TOKEN_SPACE_GUID': [0xd3fb176, 0x9569, 0x4d51, 0xa3, 0xef, 0x7d, 0x61, 0xc6, 0x4f, 0xea, 0xba],
'SHELLPKG_SHELL_ENV2_EXT_GUID': [0xd2c18636, 0x40e5, 0x4eb5, 0xa3, 0x1b, 0x36, 0x69, 0x5f, 0xd4, 0x2c, 0x87],
'SHELL_ALIAS_VARIABLE_GUID': [0x0053d9d6, 0x2659, 0x4599, 0xa2, 0x6b, 0xef, 0x45, 0x36, 0xe6, 0x31, 0xa9],
'SHELL_DEBUG1_HII_GUID': [0x25f200aa, 0xd3cb, 0x470a, 0xbf, 0x51, 0xe7, 0xd1, 0x62, 0xd2, 0x2e, 0x6f],
'SHELL_DRIVER1_HII_GUID': [0xaf0b742, 0x63ec, 0x45bd, 0x8d, 0xb6, 0x71, 0xad, 0x7f, 0x2f, 0xe8, 0xe8],
'SHELL_ENVIRONMENT_PROTOCOL_GUID': [0x47c7b221, 0xc42a, 0x11d2, 0x8e, 0x57, 0x0, 0xa0, 0xc9, 0x69, 0x72, 0x3b],
'SHELL_INSTALL1_HII_GUID': [0x7d574d54, 0xd364, 0x4d4a, 0x95, 0xe3, 0x49, 0x45, 0xdb, 0x7a, 0xd3, 0xee],
'SHELL_INTERFACE_PROTOCOL_GUID': [0x47c7b223, 0xc42a, 0x11d2, 0x8e, 0x57, 0x0, 0xa0, 0xc9, 0x69, 0x72, 0x3b],
'SHELL_LEVEL1_HII_GUID': [0xdec5daa4, 0x6781, 0x4820, 0x9c, 0x63, 0xa7, 0xb0, 0xe4, 0xf1, 0xdb, 0x31],
'SHELL_LEVEL2_HII_GUID': [0xf95a7ccc, 0x4c55, 0x4426, 0xa7, 0xb4, 0xdc, 0x89, 0x61, 0x95, 0xb, 0xae],
'SHELL_LEVEL3_HII_GUID': [0x4344558d, 0x4ef9, 0x4725, 0xb1, 0xe4, 0x33, 0x76, 0xe8, 0xd6, 0x97, 0x4f],
'SHELL_MAP_GUID': [0x51271e13, 0x7de3, 0x43af, 0x8b, 0xc2, 0x71, 0xad, 0x3b, 0x82, 0x43, 0x25],
'SHELL_NETWORK1_HII_GUID': [0xf3d301bb, 0xf4a5, 0x45a8, 0xb0, 0xb7, 0xfa, 0x99, 0x9c, 0x62, 0x37, 0xae],
'SHELL_VARIABLE_GUID': [0x158def5a, 0xf656, 0x419c, 0xb0, 0x27, 0x7a, 0x31, 0x92, 0xc0, 0x79, 0xd2],
'SMBIOS_TABLE_GUID': [0xeb9d2d31, 0x2d88, 0x11d3, 0x9a, 0x16, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d],
'SMM_COMMUNICATE_HEADER_GUID': [0xf328e36c, 0x23b6, 0x4a95, 0x85, 0x4b, 0x32, 0xe1, 0x95, 0x34, 0xcd, 0x75],
'SMM_PERFORMANCE_EX_PROTOCOL_GUID': [0x931fc048, 0xc71d, 0x4455, 0x89, 0x30, 0x47, 0x6, 0x30, 0xe3, 0xe, 0xe5],
'SMM_PERFORMANCE_PROTOCOL_GUID': [0xf866226a, 0xeaa5, 0x4f5a, 0xa9, 0xa, 0x6c, 0xfb, 0xa5, 0x7c, 0x58, 0x8e],
'STATUS_CODE_CALLBACK_GUID': [0xe701458c, 0x4900, 0x4ca5, 0xb7, 0x72, 0x3d, 0x37, 0x94, 0x9f, 0x79, 0x27],
'SYSTEM_ROM_FILE_GUID': [0x1547B4F3, 0x3E8A, 0x4FEF, 0x81, 0xC8, 0x32, 0x8E, 0xD6, 0x47, 0xAB, 0x1A],
'TCG_CONFIG_FORM_SET_GUID': [0xb0f901e4, 0xc424, 0x45de, 0x90, 0x81, 0x95, 0xe2, 0xb, 0xde, 0x6f, 0xb5],
'TEMPORARY_RAM_SUPPORT_PPI_GUID': [0xdbe23aa9, 0xa345, 0x4b97, 0x85, 0xb6, 0xb2, 0x26, 0xf1, 0x61, 0x73, 0x89],
'TIANO_CUSTOM_DECOMPRESS_GUID': [0xA31280AD, 0x481E, 0x41B6, 0x95, 0xE8, 0x12, 0x7F, 0x4C, 0x98, 0x47, 0x79],
'UNIX_FWH_PPI_GUID': [0xf2f0dc30, 0x8985, 0x11db, 0xa1, 0x5b, 0x00, 0x40, 0xd0, 0x2b, 0x18, 0x35],
'UNIX_PEI_LOAD_FILE_GUID': [0xf2f48768, 0x8985, 0x11db, 0xb8, 0xda, 0x00, 0x40, 0xd0, 0x2b, 0x18, 0x35],
'UNKNOWN_DEVICE_GUID': [0xcf31fac5, 0xc24e, 0x11d2, 0x85, 0xf3, 0x0, 0xa0, 0xc9, 0x3e, 0xc9, 0x3b],
'USB_CREDENTIAL_PROVIDER_GUID': [0xd0849ed1, 0xa88c, 0x4ba6, 0xb1, 0xd6, 0xab, 0x50, 0xe2, 0x80, 0xb7, 0xa9],
'USB_KEYBOARD_LAYOUT_PACKAGE_GUID': [0xc0f3b43, 0x44de, 0x4907, 0xb4, 0x78, 0x22, 0x5f, 0x6f, 0x62, 0x89, 0xdc],
'USER_IDENTIFY_MANAGER_GUID': [0x3ccd3dd8, 0x8d45, 0x4fed, 0x96, 0x2d, 0x2b, 0x38, 0xcd, 0x82, 0xb3, 0xc4],
'USER_PROFILE_MANAGER_GUID': [0xc35f272c, 0x97c2, 0x465a, 0xa2, 0x16, 0x69, 0x6b, 0x66, 0x8a, 0x8c, 0xfe],
'VIRTUAL_UNCACHED_PAGES_PROTOCOL_GUID': [0xAD651C7D, 0x3C22, 0x4DBF, 0x92, 0xe8, 0x38, 0xa7, 0xcd, 0xae, 0x87, 0xb2],
'VLAN_CONFIG_FORM_SET_GUID': [0xd79df6b0, 0xef44, 0x43bd, 0x97, 0x97, 0x43, 0xe9, 0x3b, 0xcf, 0x5f, 0xa8],
}
|
1123f4bb237219dea693931b6e026ece250a971c
|
bb33e6be8316f35decbb2b81badf2b6dcf7df515
|
/source/res/scripts/client/web/web_client_api/ui/util.py
|
33fc0cd2ae703ee7e50aee80222762718e1772f4
|
[] |
no_license
|
StranikS-Scan/WorldOfTanks-Decompiled
|
999c9567de38c32c760ab72c21c00ea7bc20990c
|
d2fe9c195825ececc728e87a02983908b7ea9199
|
refs/heads/1.18
| 2023-08-25T17:39:27.718097
| 2022-09-22T06:49:44
| 2022-09-22T06:49:44
| 148,696,315
| 103
| 39
| null | 2022-09-14T17:50:03
| 2018-09-13T20:49:11
|
Python
|
UTF-8
|
Python
| false
| false
| 12,141
|
py
|
util.py
|
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/web/web_client_api/ui/util.py
import typing
from account_helpers import AccountSettings
from account_helpers.AccountSettings import NEW_LOBBY_TAB_COUNTER
from dossiers2.ui.achievements import ACHIEVEMENT_BLOCK
from gui.Scaleform.daapi.settings.views import VIEW_ALIAS
from gui.Scaleform.daapi.view.lobby.header.LobbyHeader import HEADER_BUTTONS_COUNTERS_CHANGED_EVENT
from gui.Scaleform.daapi.view.lobby.vehicle_preview.items_kit_helper import lookupItem, showItemTooltip, getCDFromId, canInstallStyle, showAwardsTooltip
from gui.Scaleform.genConsts.TOOLTIPS_CONSTANTS import TOOLTIPS_CONSTANTS as TC
from gui.Scaleform.daapi.view.lobby.header import battle_selector_items
from gui.server_events.bonuses import getNonQuestBonuses
from gui.shared import g_eventBus
from gui.shared.events import HasCtxEvent
from gui.shared.gui_items.dossier import dumpDossier
from gui.shared.gui_items.dossier.achievements.abstract import isRareAchievement
from gui.shared.utils import showInvitationInWindowsBar
from gui.shared.event_dispatcher import runSalesChain
from gui.shared.view_helpers import UsersInfoHelper
from gui.shared.utils.functions import makeTooltip
from helpers import time_utils
from helpers import dependency
from helpers.gui_utils import getMousePosition
from messenger.storage import storage_getter
from skeletons.gui.app_loader import IAppLoader
from skeletons.gui.game_control import IExternalLinksController
from skeletons.gui.goodies import IGoodiesCache
from skeletons.gui.shared import IItemsCache
from skeletons.gui.web import IWebController
from web.web_client_api import w2c, W2CSchema, Field, WebCommandException
from web.web_client_api.common import ItemPackType, ItemPackEntry, SPA_ID_TYPES
from gui.wgcg.utils.contexts import SPAAccountAttributeCtx, PlatformFetchProductListCtx
from web.web_client_api.ui.vehicle import _VehicleCustomizationPreviewSchema
from items import makeIntCompactDescrByID
from items.components.crew_books_constants import CrewBookCacheType
if typing.TYPE_CHECKING:
from gui.Scaleform.framework.entities.abstract.ToolTipMgrMeta import ToolTipMgrMeta
_COUNTER_IDS_MAP = {'shop': VIEW_ALIAS.LOBBY_STORE}
def _itemTypeValidator(itemType, _=None):
if not ItemPackType.hasValue(itemType):
raise WebCommandException('unsupported item type "{}"'.format(itemType))
return True
def _counterIdValidator(counterId, _=None):
if counterId not in _COUNTER_IDS_MAP:
raise WebCommandException('unsupported counter id "{}"'.format(counterId))
return True
def _counterIdsValidator(idList, _=None):
return all((_counterIdValidator(id) for id in idList))
class _SetCounterSchema(W2CSchema):
id = Field(required=True, type=basestring, validator=_counterIdValidator)
value = Field(required=True, type=(int, basestring))
class _GetCountersSchema(W2CSchema):
id_list = Field(required=False, type=list, validator=_counterIdsValidator)
class _RunTriggerChainSchema(W2CSchema):
trigger_chain_id = Field(required=True, type=basestring)
class _ShowToolTipSchema(W2CSchema):
tooltipType = Field(required=True, type=basestring)
itemId = Field(type=(int, basestring))
blockId = Field(type=basestring, validator=lambda value, _: value in ACHIEVEMENT_BLOCK.ALL)
isWulfTooltip = Field(type=bool)
class _ShowCustomTooltipSchema(W2CSchema):
header = Field(required=True, type=basestring)
body = Field(required=True, type=basestring)
class _ShowSimpleTooltipSchema(W2CSchema):
body = Field(required=True, type=basestring)
class _ShowItemTooltipSchema(W2CSchema):
id = Field(required=True, type=(basestring, int))
type = Field(required=True, type=basestring, validator=_itemTypeValidator)
count = Field(required=False, type=int)
extra = Field(required=False, type=dict)
class _ShowAwardsTooltipSchema(W2CSchema):
type = Field(required=True, type=basestring, validator=_itemTypeValidator)
data = Field(required=True, type=dict)
class _ChatAvailabilitySchema(W2CSchema):
receiver_id = Field(required=True, type=SPA_ID_TYPES)
class _AccountAttribute(W2CSchema):
attr_prefix = Field(required=True, type=basestring)
class _PlatformProductListSchema(W2CSchema):
storefront = Field(required=True, type=basestring)
wgid = Field(required=True, type=basestring)
language = Field(required=True, type=basestring)
additional_data = Field(required=True, type=dict)
country = Field(required=True, type=basestring)
response_fields = Field(required=True, type=dict)
response_fields_profile = Field(required=False, type=basestring)
category = Field(required=False, type=basestring)
product_codes = Field(required=False, type=list)
class _SelectBattleTypeSchema(W2CSchema):
battle_type = Field(required=True, type=basestring)
class _UrlInfoSchema(W2CSchema):
url = Field(required=True, type=basestring)
class _ShowAdditionalRewardsTooltipSchema(W2CSchema):
rewards = Field(required=True, type=dict)
x = Field(required=True, type=int)
y = Field(required=True, type=int)
class UtilWebApiMixin(object):
itemsCache = dependency.descriptor(IItemsCache)
goodiesCache = dependency.descriptor(IGoodiesCache)
_webCtrl = dependency.descriptor(IWebController)
_lnkCtrl = dependency.descriptor(IExternalLinksController)
def __init__(self):
super(UtilWebApiMixin, self).__init__()
self.__usersInfoHelper = UsersInfoHelper()
@w2c(_SetCounterSchema, 'set_counter')
def setCounterState(self, cmd):
alias = _COUNTER_IDS_MAP.get(cmd.id)
if alias is not None:
g_eventBus.handleEvent(HasCtxEvent(eventType=HEADER_BUTTONS_COUNTERS_CHANGED_EVENT, ctx={'alias': alias,
'value': cmd.value or ''}))
return
@w2c(_GetCountersSchema, 'get_counters')
def getCountersInfo(self, cmd):
ids = cmd.id_list or _COUNTER_IDS_MAP.keys()
counters = AccountSettings.getCounters(NEW_LOBBY_TAB_COUNTER)
return {id:counters.get(_COUNTER_IDS_MAP[id]) for id in ids if id in _COUNTER_IDS_MAP}
@w2c(W2CSchema, 'blink_taskbar')
def blinkTaskbar(self, _):
showInvitationInWindowsBar()
@w2c(_RunTriggerChainSchema, 'run_trigger_chain')
def runTriggerChain(self, cmd):
chainID = cmd.trigger_chain_id
runSalesChain(chainID, reloadIfRun=True, isStopForced=True)
@w2c(_ShowToolTipSchema, 'show_tooltip')
def showTooltip(self, cmd):
tooltipType = cmd.tooltipType
itemId = cmd.itemId
isWulfTooltip = cmd.isWulfTooltip
args = []
withLongIntArgs = (TC.AWARD_SHELL,)
withLongOnlyArgs = (TC.AWARD_VEHICLE,
TC.AWARD_MODULE,
TC.INVENTORY_BATTLE_BOOSTER,
TC.BOOSTERS_BOOSTER_INFO,
TC.BADGE,
TC.TECH_CUSTOMIZATION_ITEM,
TC.EVENT_BATTLES_TICKET,
TC.EVENT_LOOTBOX)
if tooltipType in withLongIntArgs:
args = [itemId, 0]
elif tooltipType in withLongOnlyArgs:
args = [itemId]
elif tooltipType == TC.ACHIEVEMENT:
dossier = self.itemsCache.items.getAccountDossier()
dossierCompDescr = dumpDossier(self.itemsCache.items.getAccountDossier())
achievement = dossier.getTotalStats().getAchievement((cmd.blockId, itemId))
args = [dossier.getDossierType(),
dossierCompDescr,
achievement.getBlock(),
cmd.itemId,
isRareAchievement(achievement)]
if isWulfTooltip:
mouseX, mouseY = getMousePosition()
self.__getTooltipMgr().onCreateWulfTooltip(tooltipType, args, mouseX, mouseY)
else:
self.__getTooltipMgr().onCreateTypedTooltip(tooltipType, args, 'INFO')
@w2c(_ShowItemTooltipSchema, 'show_item_tooltip')
def showItemTooltip(self, cmd):
itemType = cmd.type
if itemType == ItemPackType.CREW_BOOK:
itemId = makeIntCompactDescrByID('crewBook', CrewBookCacheType.CREW_BOOK, cmd.id)
else:
itemId = getCDFromId(itemType=cmd.type, itemId=cmd.id)
rawItem = ItemPackEntry(type=itemType, id=itemId, count=cmd.count or 1, extra=cmd.extra or {})
item = lookupItem(rawItem, self.itemsCache, self.goodiesCache)
showItemTooltip(self.__getTooltipMgr(), rawItem, item)
@w2c(_ShowAwardsTooltipSchema, 'show_awards_tooltip')
def showAwardsTooltip(self, cmd):
showAwardsTooltip(self.__getTooltipMgr(), cmd.type, cmd.data)
@w2c(_ShowCustomTooltipSchema, 'show_custom_tooltip')
def showCustomTooltip(self, cmd):
self.__getTooltipMgr().onCreateComplexTooltip(makeTooltip(header=cmd.header, body=cmd.body), 'INFO')
@w2c(_ShowSimpleTooltipSchema, 'show_simple_tooltip')
def showSimpleTooltip(self, cmd):
self.__getTooltipMgr().onCreateComplexTooltip(makeTooltip(body=cmd.body), 'INFO')
@w2c(W2CSchema, 'hide_tooltip')
def hideToolTip(self, _):
self.__getTooltipMgr().hide()
@w2c(W2CSchema, 'hide_window_tooltip')
def hideWulfToolTip(self, _):
self.__getTooltipMgr().onHideTooltip('')
@w2c(_ShowAdditionalRewardsTooltipSchema, 'show_additional_rewards_tooltip')
def showAdditionalRewardsTooltip(self, cmd):
bonuses = []
for key, value in cmd.rewards.iteritems():
bonuses.extend(getNonQuestBonuses(key, value))
self.__getTooltipMgr().onCreateWulfTooltip(TC.ADDITIONAL_REWARDS, [bonuses], cmd.x, cmd.y)
@w2c(W2CSchema, 'server_timestamp')
def getCurrentLocalServerTimestamp(self, _):
return time_utils.getCurrentLocalServerTimestamp()
@w2c(_PlatformProductListSchema, name='fetch_product_list')
def handleFetchProductList(self, cmd):
ctx = PlatformFetchProductListCtx(cmd)
response = yield self._webCtrl.sendRequest(ctx=ctx)
if response.isSuccess():
yield {'result': response.getData()}
else:
yield {'error': self.__getErrorResponse(response.data, 'Unable to fetch product list.')}
@w2c(_AccountAttribute, name='get_account_attribute_by_prefix')
def handleGetAccountAttributeByPrefix(self, cmd):
ctx = SPAAccountAttributeCtx(cmd)
response = yield self._webCtrl.sendRequest(ctx=ctx)
if response.isSuccess():
yield {'result': response.getData()}
else:
yield {'error': self.__getErrorResponse(response.data, 'Unable to obtain account attrs.')}
@storage_getter('users')
def usersStorage(self):
return None
@w2c(_ChatAvailabilitySchema, 'check_if_chat_available')
def checkIfChatAvailable(self, cmd, ctx):
callback = ctx.get('callback')
receiverId = cmd.receiver_id
def isAvailable():
receiver = self.__usersInfoHelper.getContact(receiverId)
return receiver.hasValidName() and not receiver.isIgnored()
def onNamesReceivedCallback():
callback(isAvailable())
if not bool(self.__usersInfoHelper.getUserName(receiverId)):
self.__usersInfoHelper.onNamesReceived += onNamesReceivedCallback
self.__usersInfoHelper.syncUsersInfo()
else:
return isAvailable()
@w2c(_VehicleCustomizationPreviewSchema, 'can_install_style')
def canStyleBeInstalled(self, cmd):
result = canInstallStyle(cmd.style_id)
return {'can_install': result.canInstall}
def __getTooltipMgr(self):
appLoader = dependency.instance(IAppLoader)
return appLoader.getApp().getToolTipMgr()
@staticmethod
def __getErrorResponse(data, defaultError=''):
return data if data else {'description': defaultError}
@w2c(_SelectBattleTypeSchema, 'select_battle_type')
def selectBattleType(self, cmd):
battle_selector_items.getItems().select(cmd.battle_type, onlyActive=True)
@w2c(_UrlInfoSchema, 'get_url_info')
def getUrlInfo(self, cmd):
external = self._lnkCtrl.externalAllowed(cmd.url)
return {'external_allowed': external}
|
727152c0a05dabe91231c593cfca029ea11e5aa4
|
8188f026dcfa3ca6c4e2d58e6c56d04d24e37a18
|
/projectq/cengines/_cmdmodifier.py
|
1f8769304227f6138cb1f7c02b2248450eaee7a6
|
[
"Apache-2.0"
] |
permissive
|
ProjectQ-Framework/ProjectQ
|
2e342da0622d4b5d513c15504556e95d3d0e2aea
|
67c660ca18725d23ab0b261a45e34873b6a58d03
|
refs/heads/develop
| 2023-09-04T02:18:25.581119
| 2023-03-09T16:03:57
| 2023-03-09T16:03:57
| 77,520,796
| 886
| 335
|
Apache-2.0
| 2023-07-24T07:07:15
| 2016-12-28T09:31:53
|
Python
|
UTF-8
|
Python
| false
| false
| 2,177
|
py
|
_cmdmodifier.py
|
# Copyright 2017 ProjectQ-Framework (www.projectq.ch)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A CommandModifier engine that can be used to apply a user-defined transformation to all incoming commands.
A CommandModifier engine can be used to, e.g., modify the tags of all commands which pass by (see the
AutoReplacer for an example).
"""
from ._basics import BasicEngine
class CommandModifier(BasicEngine):
"""
Compiler engine applying a user-defined transformation to all incoming commands.
CommandModifier is a compiler engine which applies a function to all incoming commands, sending on the resulting
command instead of the original one.
"""
def __init__(self, cmd_mod_fun):
"""
Initialize the CommandModifier.
Args:
cmd_mod_fun (function): Function which, given a command cmd, returns the command it should send instead.
Example:
.. code-block:: python
def cmd_mod_fun(cmd):
cmd.tags += [MyOwnTag()]
compiler_engine = CommandModifier(cmd_mod_fun)
...
"""
super().__init__()
self._cmd_mod_fun = cmd_mod_fun
def receive(self, command_list):
"""
Receive a list of commands.
Receive a list of commands from the previous engine, modify all commands, and send them on to the next engine.
Args:
command_list (list<Command>): List of commands to receive and then (after modification) send on.
"""
new_command_list = [self._cmd_mod_fun(cmd) for cmd in command_list]
self.send(new_command_list)
|
ab646a6a29a0f613bb02b7bd295afbaf40834360
|
c94d31edf92c1f7a85e5f132ebeddfd2678801bf
|
/aiomisc/version.py
|
13502ee9a6d19214df3a9fde368b36046912ae7f
|
[
"MIT"
] |
permissive
|
aiokitchen/aiomisc
|
56abb33ec32be7278888f1a5e1fadca80aa95925
|
d9dcce797ac1be7ae7737c5478c152f7aec72ee7
|
refs/heads/master
| 2023-08-04T11:26:38.779711
| 2023-07-24T10:51:09
| 2023-07-24T10:51:09
| 129,414,094
| 314
| 25
|
MIT
| 2023-07-25T23:17:09
| 2018-04-13T14:33:37
|
Python
|
UTF-8
|
Python
| false
| false
| 154
|
py
|
version.py
|
# THIS FILE WAS GENERATED AUTOMATICALLY
# BY: poem-plugins "git" plugin
# NEVER EDIT THIS FILE MANUALLY
version_info = (17, 3, 0)
__version__ = "17.3.0"
|
3b12f1ba0443989fb1e46a2adc8e853bd9f1f7e5
|
e579188f958ae43ee5bbcce5e85a3494b829a6d3
|
/tests/plantuml/test_note.py
|
778e6f728c3161e94b822bfd8fb055c4fc2c0639
|
[
"MIT"
] |
permissive
|
pinetr2e/napkin
|
2797542abeadefff5ade9945961eba2e34208695
|
fb1da3d3b3b9fceb59a4adc1287a93393d0baa4c
|
refs/heads/master
| 2021-07-19T12:32:06.746980
| 2021-07-18T07:21:34
| 2021-07-18T07:21:34
| 27,030,611
| 203
| 9
|
MIT
| 2021-07-18T03:57:20
| 2014-11-23T11:43:17
|
Python
|
UTF-8
|
Python
| false
| false
| 915
|
py
|
test_note.py
|
def test_over_object(check_puml):
def f(c):
foo = c.object('foo')
bar = c.object('bar')
with foo:
c.note('blah')
bar.func()
check_puml(f, """
participant foo
participant bar
note over foo : blah
foo -> bar : func()
""")
def test_multiple_line_text(check_puml):
def f(c):
foo = c.object('foo')
bar = c.object('bar')
with foo:
c.note('blah\nblah')
bar.func()
check_puml(f, """
participant foo
participant bar
note over foo
blah
blah
end note
foo -> bar : func()
""")
def test_callee_caller(check_puml):
def f(c):
foo = c.object('foo')
bar = c.object('bar')
with foo:
bar.func().note(callee='callee side', caller='caller side')
check_puml(f, """
participant foo
participant bar
foo -> bar : func()
note right : callee side
note left : caller side
""")
|
a7eb502d90f9a22dcc60e12859b3de4a9a805365
|
6dd5027d9f02b2c40c96fdea9796a4fba6ee7e46
|
/tests/polybench/gramschmidt.py
|
02aa5f77f250ca2047359b85d2c299df38b595c6
|
[
"Apache-2.0"
] |
permissive
|
cornell-zhang/heterocl
|
fb4fd3c9cdbb7c7ccbdb2a8a09f47b436200c8f6
|
b794409e68e326cafa6c3eaec2e3560ff066e129
|
refs/heads/main
| 2023-07-22T16:33:57.900104
| 2023-07-19T19:58:13
| 2023-07-19T19:58:13
| 114,906,951
| 312
| 111
|
Apache-2.0
| 2023-07-19T19:58:15
| 2017-12-20T16:13:52
|
Python
|
UTF-8
|
Python
| false
| false
| 1,993
|
py
|
gramschmidt.py
|
# Copyright HeteroCL authors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import os
import heterocl as hcl
def top_gramschmidt(M=30, N=20, dtype=hcl.Int(), target=None):
hcl.init(dtype)
# Rank N matrix
A = hcl.placeholder((M, N), "A")
# Orthogonal matrix
Q = hcl.placeholder((M, N), "Q")
# Upper triangular matrix
R = hcl.placeholder((N, N), "R")
def kernel_gramschmidt(A, Q, R):
def loop_1():
with hcl.for_(0, N, name="l1") as k:
nrm = hcl.scalar(0.0)
with hcl.for_(0, M, name="l2") as i:
nrm.v = nrm.v + A[i][k] * A[i][k]
R[k][k] = hcl.sqrt(nrm.v * 1.0)
with hcl.for_(0, M, name="l3") as i:
Q[i][k] = A[i][k] / R[k][k]
with hcl.for_(k + 1, N, name="l4") as j:
R[k][j] = 0.0
with hcl.for_(0, M, name="l5") as i:
R[k][j] = R[k][j] + Q[i][k] * A[i][j]
with hcl.for_(0, M, name="l6") as i:
A[i][j] = A[i][j] - Q[i][k] * R[k][j]
hcl.mutate((1,), lambda x: loop_1(), name="L1")
s = hcl.create_schedule([A, Q, R], kernel_gramschmidt)
#### Apply customizations ####
L1 = kernel_gramschmidt.L1
s[L1].pipeline(L1.l1)
#### Apply customizations ####
return hcl.build(s, target=target)
import numpy as np
import math as mt
def gramschmidt_golden(M, N, A, Q, R, DATA_TYPE):
dtype = NDATA_TYPE_DICT[DATA_TYPE.lower()]
for k in range(N):
nrm = (dtype)(0.0)
for i in range(M):
nrm += A[i][k] * A[i][k]
R[k][k] = mt.sqrt(nrm)
for i in range(M):
Q[i][k] = A[i][k] / R[k][k]
for j in range(k + 1, N):
R[k][j] = (dtype)(0.0)
for i in range(M):
R[k][j] += Q[i][k] * A[i][j]
for i in range(M):
A[i][j] = A[i][j] - Q[i][k] * R[k][j]
|
c6263c299bc837778080b69e16a175a7f7130cfe
|
8d37f57da7c991381c9fc3d7d5d3c9f610ac10d4
|
/snoop/utils.py
|
478ab550c691e6fdc91acfd97b029f213d6b7fbc
|
[
"MIT"
] |
permissive
|
alexmojaki/snoop
|
b91615844ed9f8c9f34071774ed4aaeb197d3a6a
|
98102bde87d092640828590927ef144d069dc56f
|
refs/heads/master
| 2023-08-21T11:59:00.497603
| 2022-12-22T13:48:56
| 2022-12-22T13:48:56
| 186,476,740
| 975
| 40
|
MIT
| 2022-12-22T13:47:56
| 2019-05-13T18:45:35
|
Python
|
UTF-8
|
Python
| false
| false
| 4,754
|
py
|
utils.py
|
import ast
import inspect
import os
import sys
from itertools import chain
import six
from cheap_repr import cheap_repr, try_register_repr
PY34 = sys.version_info[:2] == (3, 4)
NO_ASTTOKENS = PY34
PYPY = 'pypy' in sys.version.lower()
NO_BIRDSEYE = NO_ASTTOKENS or PYPY
pp_name_prefix = '__deep_pp_hidden__'
file_reading_errors = (
IOError,
OSError,
ValueError # IronPython weirdness.
)
def shitcode(s):
return ''.join(
(c if (0 < ord(c) < 256) else '?') for c in s
)
def truncate(seq, max_length, middle):
if len(seq) > max_length:
left = (max_length - len(middle)) // 2
right = max_length - len(middle) - left
seq = seq[:left] + middle + seq[-right:]
return seq
def truncate_string(string, max_length):
return truncate(string, max_length, '...')
def truncate_list(lst, max_length):
return truncate(lst, max_length, ['...'])
def ensure_tuple(x, split=False):
if split and isinstance(x, six.string_types):
x = x.replace(',', ' ').split()
if not isinstance(x, (list, set, tuple)):
x = (x,)
return tuple(x)
def short_filename(code):
result = os.path.basename(code.co_filename)
if result.endswith('.pyc'):
result = result[:-1]
return result
def is_comprehension_frame(frame):
return frame.f_code.co_name in ('<listcomp>', '<dictcomp>', '<setcomp>')
def needs_parentheses(source):
def code(s):
return compile(s.format(source), '<variable>', 'eval').co_code
try:
without_parens = code('{}.x')
except SyntaxError:
# Likely a multiline expression that needs parentheses to be valid
code('({})')
return True
else:
return without_parens != code('({}).x')
def with_needed_parentheses(source):
if needs_parentheses(source):
return '({})'.format(source)
else:
return source
REPR_TARGET_LENGTH = 100
def my_cheap_repr(x):
return cheap_repr(x, target_length=REPR_TARGET_LENGTH)
class ArgDefaultDict(dict):
def __init__(self, factory):
super(ArgDefaultDict, self).__init__()
self.factory = factory
def __missing__(self, key):
result = self[key] = self.factory(key)
return result
def optional_numeric_label(i, lst):
if len(lst) == 1:
return ''
else:
return ' ' + str(i + 1)
def is_pathlike(x):
if hasattr(os, 'PathLike'):
return isinstance(x, os.PathLike)
return (
hasattr(x, '__fspath__') or
# Make a concession for older `pathlib` versions:
(hasattr(x, 'open') and
'path' in x.__class__.__name__.lower())
)
try:
iscoroutinefunction = inspect.iscoroutinefunction
except AttributeError:
def iscoroutinefunction(_):
return False
try:
try_statement = ast.Try
except AttributeError:
try_statement = ast.TryExcept
try:
builtins = __import__("__builtin__")
except ImportError:
builtins = __import__("builtins")
try:
FormattedValue = ast.FormattedValue
except Exception:
class FormattedValue(object):
pass
def no_args_decorator(args, kwargs):
return len(args) == 1 and inspect.isfunction(args[0]) and not kwargs
try:
from functools import lru_cache
except ImportError:
from backports.functools_lru_cache import lru_cache
if six.PY2:
# noinspection PyUnresolvedReferences
from collections import Mapping, Sequence, Set
else:
# noinspection PyUnresolvedReferences,PyCompatibility
from collections.abc import Mapping, Sequence, Set
class DirectRepr(str):
def __repr__(self):
return self
try:
from django.db.models import QuerySet
except ImportError:
class QuerySet(object):
pass
def _register_cheap_reprs():
def _sample_indices(length, max_length):
if length <= max_length + 2:
return range(length)
else:
return chain(range(max_length // 2),
range(length - max_length // 2,
length))
@try_register_repr('pandas', 'Series')
def _repr_series_one_line(x, helper):
n = len(x)
if n == 0:
return repr(x)
newlevel = helper.level - 1
pieces = []
maxparts = _repr_series_one_line.maxparts
for i in _sample_indices(n, maxparts):
try:
k = x.index[i:i + 1].format(sparsify=False)[0]
except TypeError:
k = x.index[i:i + 1].format()[0]
v = x.iloc[i]
pieces.append('%s = %s' % (k, cheap_repr(v, newlevel)))
if n > maxparts + 2:
pieces.insert(maxparts // 2, '...')
return '; '.join(pieces)
_register_cheap_reprs()
|
f1a39ac9463a6eabdd788af044c1f84a7666df58
|
75b050312d5367b8ebfd287d78e90b02421a389c
|
/rl/monte_carlo_no_es.py
|
245ccb2f237a97bd0d21009de483e799764d2127
|
[] |
no_license
|
lazyprogrammer/machine_learning_examples
|
2fd0b0fc4da95a8a5940be62fca70bd44d148d43
|
ed82ac3cc886fc06060ed459bfd528a057256fbc
|
refs/heads/master
| 2023-09-01T07:17:11.476040
| 2023-08-21T06:57:28
| 2023-08-21T06:57:28
| 22,489,033
| 8,242
| 7,095
| null | 2023-06-29T11:51:47
| 2014-07-31T23:40:45
|
Python
|
UTF-8
|
Python
| false
| false
| 4,369
|
py
|
monte_carlo_no_es.py
|
# https://deeplearningcourses.com/c/artificial-intelligence-reinforcement-learning-in-python
# https://www.udemy.com/artificial-intelligence-reinforcement-learning-in-python
from __future__ import print_function, division
from builtins import range
# Note: you may need to update your version of future
# sudo pip install -U future
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from grid_world import standard_grid, negative_grid
from iterative_policy_evaluation import print_values, print_policy
GAMMA = 0.9
ALL_POSSIBLE_ACTIONS = ('U', 'D', 'L', 'R')
def epsilon_greedy(policy, s, eps=0.1):
p = np.random.random()
if p < (1 - eps):
return policy[s]
else:
return np.random.choice(ALL_POSSIBLE_ACTIONS)
def play_game(grid, policy, max_steps=20):
# start state
s = grid.reset()
# choose action
a = epsilon_greedy(policy, s)
states = [s]
actions = [a]
rewards = [0]
for _ in range(max_steps):
r = grid.move(a)
s = grid.current_state()
rewards.append(r)
states.append(s)
if grid.game_over():
break
else:
a = epsilon_greedy(policy, s)
actions.append(a)
# we want to return:
# states = [s(0), s(1), ..., s(T-1), s(T)]
# actions = [a(0), a(1), ..., a(T-1), ]
# rewards = [ 0, R(1), ..., R(T-1), R(T)]
return states, actions, rewards
def max_dict(d):
# returns the argmax (key) and max (value) from a dictionary
# put this into a function since we are using it so often
# find max val
max_val = max(d.values())
# find keys corresponding to max val
max_keys = [key for key, val in d.items() if val == max_val]
### slow version
# max_keys = []
# for key, val in d.items():
# if val == max_val:
# max_keys.append(key)
return np.random.choice(max_keys), max_val
if __name__ == '__main__':
# use the standard grid again (0 for every step) so that we can compare
# to iterative policy evaluation
grid = standard_grid()
# try the negative grid too, to see if agent will learn to go past the "bad spot"
# in order to minimize number of steps
# grid = negative_grid(step_cost=-0.1)
# print rewards
print("rewards:")
print_values(grid.rewards, grid)
# state -> action
# initialize a random policy
policy = {}
for s in grid.actions.keys():
policy[s] = np.random.choice(ALL_POSSIBLE_ACTIONS)
# initialize Q(s,a) and returns
Q = {}
sample_counts = {}
state_sample_count = {}
states = grid.all_states()
for s in states:
if s in grid.actions: # not a terminal state
Q[s] = {}
sample_counts[s] = {}
state_sample_count[s] = 0
for a in ALL_POSSIBLE_ACTIONS:
Q[s][a] = 0
sample_counts[s][a] = 0
else:
# terminal state or state we can't otherwise get to
pass
# repeat until convergence
deltas = []
for it in range(10000):
if it % 1000 == 0:
print(it)
# generate an episode using pi
biggest_change = 0
states, actions, rewards = play_game(grid, policy)
# create a list of only state-action pairs for lookup
states_actions = list(zip(states, actions))
T = len(states)
G = 0
for t in range(T - 2, -1, -1):
# retrieve current s, a, r tuple
s = states[t]
a = actions[t]
# update G
G = rewards[t+1] + GAMMA * G
# check if we have already seen (s, a) ("first-visit")
if (s, a) not in states_actions[:t]:
old_q = Q[s][a]
sample_counts[s][a] += 1
lr = 1 / sample_counts[s][a]
Q[s][a] = old_q + lr * (G - old_q)
# update policy
policy[s] = max_dict(Q[s])[0]
# update state sample count
state_sample_count[s] += 1
# update delta
biggest_change = max(biggest_change, np.abs(old_q - Q[s][a]))
deltas.append(biggest_change)
plt.plot(deltas)
plt.show()
print("final policy:")
print_policy(policy, grid)
# find V
V = {}
for s, Qs in Q.items():
V[s] = max_dict(Q[s])[1]
print("final values:")
print_values(V, grid)
print("state_sample_count:")
state_sample_count_arr = np.zeros((grid.rows, grid.cols))
for i in range(grid.rows):
for j in range(grid.cols):
if (i, j) in state_sample_count:
state_sample_count_arr[i,j] = state_sample_count[(i, j)]
df = pd.DataFrame(state_sample_count_arr)
print(df)
|
798f9fb9826a055d77d18f9b54facd85e393ddfb
|
1ad268817e4f048815df6e7b7669c45257a37b0e
|
/tests/core/test_builder.py
|
fa2d9300de56ab3326bee2e915f1d1e49de6dfdf
|
[
"MIT"
] |
permissive
|
JDASoftwareGroup/kartothek
|
07c7f2fceb3dcee5cf8d0a6a93f4c1060eb0bcf4
|
1821ea5df60d4079d3911b3c2f17be11d8780e22
|
refs/heads/master
| 2023-05-26T11:43:04.781173
| 2021-12-10T09:15:19
| 2021-12-10T09:15:19
| 184,608,549
| 178
| 59
|
MIT
| 2023-05-15T21:56:50
| 2019-05-02T15:45:42
|
Python
|
UTF-8
|
Python
| false
| false
| 6,562
|
py
|
test_builder.py
|
# -*- coding: utf-8 -*-
import pytest
import simplejson
import kartothek.core._zmsgpack as msgpack
from kartothek.core.dataset import DatasetMetadata, DatasetMetadataBuilder
from kartothek.core.index import ExplicitSecondaryIndex
from kartothek.core.partition import Partition
from kartothek.core.testing import TIME_TO_FREEZE_ISO
@pytest.mark.parametrize("explicit_partitions", [True, False])
def test_builder_empty(explicit_partitions, metadata_version, frozen_time):
creation_time = TIME_TO_FREEZE_ISO
expected = {
"dataset_uuid": "uuid",
"dataset_metadata_version": metadata_version,
"metadata": {"creation_time": creation_time},
}
if explicit_partitions:
expected["partitions"] = {}
key, result = DatasetMetadataBuilder(
"uuid",
metadata_version=metadata_version,
explicit_partitions=explicit_partitions,
).to_json()
result = simplejson.loads(result)
assert key == "uuid.by-dataset-metadata.json"
assert result == expected
def test_builder_msgpack(metadata_version, frozen_time):
creation_time = TIME_TO_FREEZE_ISO
expected = {
"dataset_uuid": "uuid",
"dataset_metadata_version": metadata_version,
"metadata": {"creation_time": creation_time},
"partitions": {},
}
key, result = DatasetMetadataBuilder(
"uuid", metadata_version=metadata_version
).to_msgpack()
result = msgpack.unpackb(result)
assert key == "uuid.by-dataset-metadata.msgpack.zstd"
assert result == expected
def test_builder_to_dataset(metadata_version, frozen_time):
expected = {
"dataset_uuid": "uuid",
"dataset_metadata_version": metadata_version,
"partitions": {"part_2": {"files": {"core": "uuid/core/part_2.parquet"}}},
"metadata": {"key": "value", "creation_time": TIME_TO_FREEZE_ISO},
"indices": {"col1": {"a": ["part1"], "b": ["part2"]}},
}
builder = DatasetMetadataBuilder("uuid", metadata_version=metadata_version)
part_2 = Partition("part_2", {"core": "uuid/core/part_2.parquet"})
builder.add_partition("part_2", part_2)
builder.add_metadata("key", "value")
builder.add_embedded_index(
"col1", ExplicitSecondaryIndex("col1", {"a": ["part1"], "b": ["part2"]})
)
result = builder.to_dataset()
expected_from_dict = DatasetMetadata.from_dict(expected)
assert result == expected_from_dict
def test_builder_modify_uuid_embedded_index(metadata_version, frozen_time):
expected = {
"dataset_uuid": "uuid_new",
"dataset_metadata_version": metadata_version,
"partitions": {"part_2": {"files": {"core": "uuid_new/core/part_2.parquet"}}},
"metadata": {"key": "value", "creation_time": TIME_TO_FREEZE_ISO},
"indices": {"col1": {"a": ["part1"], "b": ["part2"]}},
}
builder = DatasetMetadataBuilder("uuid", metadata_version=metadata_version)
part_2 = Partition("part_2", {"core": "uuid/core/part_2.parquet"})
builder.add_partition("part_2", part_2)
builder.add_metadata("key", "value")
builder.add_embedded_index(
"col1", ExplicitSecondaryIndex("col1", {"a": ["part1"], "b": ["part2"]})
)
builder.modify_uuid("uuid_new")
result = builder.to_dataset()
expected_from_dict = DatasetMetadata.from_dict(expected)
assert result == expected_from_dict
def test_builder_modify_uuid_external_index(metadata_version, frozen_time):
expected = {
"dataset_uuid": "uuid_new",
"dataset_metadata_version": metadata_version,
"partitions": {"part_2": {"files": {"core": "uuid_new/core/part_2.parquet"}}},
"metadata": {"key": "value", "creation_time": TIME_TO_FREEZE_ISO},
"indices": {"col1": "uuid_new.col1.by-dataset-index.parquet"},
}
builder = DatasetMetadataBuilder("uuid", metadata_version=metadata_version)
part_2 = Partition("part_2", {"core": "uuid/core/part_2.parquet"})
builder.add_partition("part_2", part_2)
builder.add_metadata("key", "value")
builder.add_external_index("col1")
builder.modify_uuid("uuid_new")
result = builder.to_dataset()
expected_from_dict = DatasetMetadata.from_dict(expected)
assert result == expected_from_dict
def test_builder_full(metadata_version, frozen_time):
expected = {
"dataset_uuid": "uuid",
"dataset_metadata_version": metadata_version,
"partitions": {
"run_id=1/L=1/P=1/part_1": {
"files": {
"core": "uuid/core/run_id=1/L=1/P=1/part_1.parquet",
"helper": "uuid/helper/run_id=1/L=1/P=1/part_1.parquet",
}
}
},
"metadata": {"key": "value", "creation_time": TIME_TO_FREEZE_ISO},
"indices": {
"col1": {
"a": ["run_id=1/L=1/P=1/part_1"],
"b": ["run_id=2/L=1/P=1/part_1"],
},
"col2": "uuid.col2.by-dataset-index.parquet",
},
"partition_keys": ["L", "P"],
}
builder = DatasetMetadataBuilder(
"uuid", metadata_version=metadata_version, partition_keys=["L", "P"]
)
part_2 = Partition(
label="run_id=1/L=1/P=1/part_1",
files={
"core": "uuid/core/run_id=1/L=1/P=1/part_1.parquet",
"helper": "uuid/helper/run_id=1/L=1/P=1/part_1.parquet",
},
)
builder.add_partition("run_id=1/L=1/P=1/part_1", part_2)
builder.add_metadata("key", "value")
builder.add_external_index("col2")
builder.add_embedded_index(
"col1",
ExplicitSecondaryIndex(
"col1", {"a": ["run_id=1/L=1/P=1/part_1"], "b": ["run_id=2/L=1/P=1/part_1"]}
),
)
key, result = builder.to_json()
result = simplejson.loads(result)
assert key == "uuid.by-dataset-metadata.json"
assert result == expected
def test_builder_empty_partition_keys(store, metadata_version, frozen_time):
expected = {
"dataset_uuid": "uuid",
"dataset_metadata_version": metadata_version,
"metadata": {"creation_time": TIME_TO_FREEZE_ISO},
"partition_keys": ["L", "P"],
"partitions": {},
}
builder = DatasetMetadataBuilder(
"uuid", metadata_version=4, partition_keys=["L", "P"]
)
key, result = builder.to_json()
result = simplejson.loads(result)
assert key == "uuid.by-dataset-metadata.json"
assert result == expected
result_from_dict = DatasetMetadata.load_from_dict(result, store).to_dict()
assert result_from_dict == expected
|
53ef24dbc8d3b85143a9a67aa5e4ea081a236a19
|
7d232f51e2330a4f537c50ede9c6bc023d656fd4
|
/examples/python/auth/async_customized_auth_server.py
|
71dbe544841392fa18feddccc2268aaad47f2490
|
[
"BSD-3-Clause",
"MPL-2.0",
"Apache-2.0"
] |
permissive
|
grpc/grpc
|
6975af3ba6f07a6fe965b875a0c09abf18999a52
|
e4d598ab64aa54f1da78c6ed6133b741742d11d4
|
refs/heads/master
| 2023-08-31T01:10:22.666618
| 2023-08-30T22:35:17
| 2023-08-30T22:35:17
| 27,729,880
| 42,330
| 13,022
|
Apache-2.0
| 2023-09-14T21:54:19
| 2014-12-08T18:58:53
|
C++
|
UTF-8
|
Python
| false
| false
| 3,498
|
py
|
async_customized_auth_server.py
|
# Copyright 2020 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Server of the Python AsyncIO example of customizing authentication mechanism."""
import argparse
import asyncio
import logging
from typing import Awaitable, Callable, Tuple
import _credentials
import grpc
helloworld_pb2, helloworld_pb2_grpc = grpc.protos_and_services(
"helloworld.proto"
)
_LOGGER = logging.getLogger(__name__)
_LOGGER.setLevel(logging.INFO)
_LISTEN_ADDRESS_TEMPLATE = "localhost:%d"
_SIGNATURE_HEADER_KEY = "x-signature"
class SignatureValidationInterceptor(grpc.aio.ServerInterceptor):
def __init__(self):
def abort(ignored_request, context: grpc.aio.ServicerContext) -> None:
context.abort(grpc.StatusCode.UNAUTHENTICATED, "Invalid signature")
self._abort_handler = grpc.unary_unary_rpc_method_handler(abort)
async def intercept_service(
self,
continuation: Callable[
[grpc.HandlerCallDetails], Awaitable[grpc.RpcMethodHandler]
],
handler_call_details: grpc.HandlerCallDetails,
) -> grpc.RpcMethodHandler:
# Example HandlerCallDetails object:
# _HandlerCallDetails(
# method=u'/helloworld.Greeter/SayHello',
# invocation_metadata=...)
method_name = handler_call_details.method.split("/")[-1]
expected_metadata = (_SIGNATURE_HEADER_KEY, method_name[::-1])
if expected_metadata in handler_call_details.invocation_metadata:
return await continuation(handler_call_details)
else:
return self._abort_handler
class SimpleGreeter(helloworld_pb2_grpc.GreeterServicer):
async def SayHello(
self, request: helloworld_pb2.HelloRequest, unused_context
) -> helloworld_pb2.HelloReply:
return helloworld_pb2.HelloReply(message="Hello, %s!" % request.name)
async def run_server(port: int) -> Tuple[grpc.aio.Server, int]:
# Bind interceptor to server
server = grpc.aio.server(interceptors=(SignatureValidationInterceptor(),))
helloworld_pb2_grpc.add_GreeterServicer_to_server(SimpleGreeter(), server)
# Loading credentials
server_credentials = grpc.ssl_server_credentials(
(
(
_credentials.SERVER_CERTIFICATE_KEY,
_credentials.SERVER_CERTIFICATE,
),
)
)
# Pass down credentials
port = server.add_secure_port(
_LISTEN_ADDRESS_TEMPLATE % port, server_credentials
)
await server.start()
return server, port
async def main() -> None:
parser = argparse.ArgumentParser()
parser.add_argument(
"--port", nargs="?", type=int, default=50051, help="the listening port"
)
args = parser.parse_args()
server, port = await run_server(args.port)
logging.info("Server is listening at port :%d", port)
await server.wait_for_termination()
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
asyncio.run(main())
|
ef9410a8e2034eb279f228925cefce7226108e2b
|
d5e1591a6b96ec0e35ea223269da38b15fffe600
|
/tests/web/test_wsgi_application_yield.py
|
db7a2277657dfca522a77ba2e376d9b3e5a1dc04
|
[
"MIT"
] |
permissive
|
circuits/circuits
|
630cfa0fa13b19f84bfb96705912f3f6a26c69e1
|
87fb5a3380069d907d2ac500d13418b1abdeb2f2
|
refs/heads/master
| 2023-07-31T07:17:06.706151
| 2023-02-07T19:39:20
| 2023-02-07T19:39:20
| 12,450,349
| 310
| 80
|
NOASSERTION
| 2023-01-06T00:08:37
| 2013-08-29T03:05:42
|
Python
|
UTF-8
|
Python
| false
| false
| 432
|
py
|
test_wsgi_application_yield.py
|
#!/usr/bin/env python
from circuits.web import Controller
from circuits.web.wsgi import Application
from .helpers import urlopen
class Root(Controller):
def index(self):
yield "Hello "
yield "World!"
application = Application() + Root()
def test(webapp):
f = urlopen(webapp.server.http.base)
s = f.read()
assert s == b"Hello World!"
assert f.headers.get('Transfer-Encoding') != 'chunked'
|
a0d0767b0270913d16b01e0c44b158d56b80ac08
|
a63d907ad63ba6705420a6fb2788196d1bd3763c
|
/src/datamgr/datamanager/dataquality/sampling_consume.py
|
1c80a2f813b9684723bb28c1c40cc74a9c5b3dcf
|
[
"MIT"
] |
permissive
|
Tencent/bk-base
|
a38461072811667dc2880a13a5232004fe771a4b
|
6d483b4df67739b26cc8ecaa56c1d76ab46bd7a2
|
refs/heads/master
| 2022-07-30T04:24:53.370661
| 2022-04-02T10:30:55
| 2022-04-02T10:30:55
| 381,257,882
| 101
| 51
|
NOASSERTION
| 2022-04-02T10:30:56
| 2021-06-29T06:10:01
|
Python
|
UTF-8
|
Python
| false
| false
| 3,008
|
py
|
sampling_consume.py
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import logging
from dataquality.sample.task import SamplingConsumeTaskGreenlet
from gevent import monkey
monkey.patch_all()
def sampling_consume_datasets(params):
logging.info("Start to execute sampling consume kafka task")
sampling_task_config = {
"consume_count": 100, # 每次采样数据量
"consume_interval": 60, # 每次采样间隔时间
"consume_hash_values": params.get("consume_hash_values", [0]), # 采样结果表的hash值
"consume_task_count": params.get("consume_task_count", 1), # 采样任务总数量
"produce_bk_biz_id": params.get("produce_bk_biz_id", 2), # 生产数据的业务ID
"produce_raw_data_name": params.get(
"produce_raw_data_name", "bkdata_sampled_datasets"
),
"produce_partition_count": params.get(
"produce_partition_count", 1
), # 生产数据的分区数量
"filtered_data_sets": params.get("filtered_data_sets", []), # 需要进行过滤的数据集
"task_pool_size": 300, # 采样任务协程池大小
"heat_score": None, # 采样数据集热度阈值
"heat_rate": None, # 采样数据集热度比例
"recent_data_time": 3600, # 采样数据集最近有数据的时间
"multiple_partitions": False,
}
try:
task = SamplingConsumeTaskGreenlet(sampling_task_config)
task.start()
task.join()
except Exception as e:
logging.error(
"Raise exception({error}) when init sampling consume task".format(error=e),
exc_info=True,
)
|
803e89f64d2711a04a5b5486392d1941f03b1a07
|
bb3b304edd0c41247ea405ad9614417defc75076
|
/pythonz/commands/help.py
|
bafae83cf59d23ea2140875bf3744c10e47af3d6
|
[
"MIT"
] |
permissive
|
saghul/pythonz
|
9f2fbc7e1971de06ba92ae9ab1daddb5ba416a98
|
b22abb33b106e599c8ac56c8e82e067548bf0e3d
|
refs/heads/master
| 2023-08-27T15:09:20.312374
| 2022-07-08T08:36:08
| 2022-12-12T07:05:55
| 3,885,342
| 371
| 48
|
MIT
| 2022-12-12T07:05:56
| 2012-03-31T12:31:20
|
Python
|
UTF-8
|
Python
| false
| false
| 902
|
py
|
help.py
|
from pythonz.commands import Command, command_map
from pythonz.log import logger
class HelpCommand(Command):
name = "help"
usage = "%prog [COMMAND]"
summary = "Show available commands"
def run_command(self, options, args):
if args:
command = args[0]
if command not in command_map:
self.parser.error("Unknown command: `%s`" % command)
return
command = command_map[command]
command.parser.print_help()
return
self.parser.print_help()
logger.log("\nCommands available:")
commands = [command_map[key] for key in sorted(command_map.keys())]
for command in commands:
logger.log(" %s: %s" % (command.name, command.summary))
logger.log("\nFurther Instructions:")
logger.log(" https://github.com/saghul/pythonz")
HelpCommand()
|
2c6280f88fc8727893c0f8de73f842e3f5be991c
|
bb33e6be8316f35decbb2b81badf2b6dcf7df515
|
/source/res/scripts/client/gui/Scaleform/daapi/view/lobby/prb_windows/squad_action_button_state_vo.py
|
cd8d2fe4053ae3656c257b5b9d7089823a9e53db
|
[] |
no_license
|
StranikS-Scan/WorldOfTanks-Decompiled
|
999c9567de38c32c760ab72c21c00ea7bc20990c
|
d2fe9c195825ececc728e87a02983908b7ea9199
|
refs/heads/1.18
| 2023-08-25T17:39:27.718097
| 2022-09-22T06:49:44
| 2022-09-22T06:49:44
| 148,696,315
| 103
| 39
| null | 2022-09-14T17:50:03
| 2018-09-13T20:49:11
|
Python
|
UTF-8
|
Python
| false
| false
| 1,171
|
py
|
squad_action_button_state_vo.py
|
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/lobby/prb_windows/squad_action_button_state_vo.py
from gui.Scaleform.daapi.view.lobby.rally.action_button_state_vo import ActionButtonStateVO
from gui.Scaleform.locale.CYBERSPORT import CYBERSPORT
from gui.prb_control.settings import UNIT_RESTRICTION
_VALID_RESTRICTIONS = (UNIT_RESTRICTION.COMMANDER_VEHICLE_NOT_SELECTED, UNIT_RESTRICTION.UNIT_NOT_FULL, UNIT_RESTRICTION.NOT_READY_IN_SLOTS)
class SquadActionButtonStateVO(ActionButtonStateVO):
def _isEnabled(self, isValid, restriction):
return isValid or restriction in _VALID_RESTRICTIONS
def _getLabel(self):
if self._playerInfo.isReady:
label = CYBERSPORT.WINDOW_UNIT_NOTREADY
else:
label = CYBERSPORT.WINDOW_UNIT_READY
return label
def _getArenaStateStr(self):
return (CYBERSPORT.WINDOW_UNIT_MESSAGE_SQUADINBATTLE, {})
def _getReadyValidInSlotStateStr(self):
return (CYBERSPORT.WINDOW_UNIT_MESSAGE_GETNOTREADY, {})
def _getIdleStateStr(self):
return (CYBERSPORT.SQUADWINDOW_WAITINGFORBATTLE, {})
|
833911069c17c37df7529bce9de23c4f30db48b0
|
c2d48caa5db7e746a38beca625406fcf47379d3c
|
/src/olympia/devhub/permissions.py
|
ac5610f605e16ee982d179cd09fbf8cc9de08331
|
[] |
permissive
|
mozilla/addons-server
|
1f6269ec0a4aa5a0142a5f81978ef674daf213a7
|
e0f043bca8a64478e2ba62f877c9dc28620be22f
|
refs/heads/master
| 2023-09-01T09:34:41.867534
| 2023-09-01T07:21:22
| 2023-09-01T07:21:22
| 16,416,867
| 920
| 590
|
BSD-3-Clause
| 2023-09-14T16:15:01
| 2014-01-31T18:44:15
|
Python
|
UTF-8
|
Python
| false
| false
| 836
|
py
|
permissions.py
|
from rest_framework.permissions import BasePermission
from olympia.users.utils import RestrictionChecker
class IsSubmissionAllowedFor(BasePermission):
"""
Like is_submission_allowed_for_request, but in Permission form for use in
the API. If the client is disallowed, a message property specifiying the
reason is set on the permission instance to be returned to the client in
the 403 response.
"""
def has_permission(self, request, view):
checker = RestrictionChecker(request=request)
if not checker.is_submission_allowed():
self.message = checker.get_error_message()
self.code = 'permission_denied_restriction'
return False
return True
def has_object_permission(self, request, view, obj):
return self.has_permission(request, view)
|
09bc0e8435b404b6c98bbdf7816c4c931ae8cb67
|
faf7ed9d56d408a261a69fcc9eb3b5f9c6e38873
|
/tests/test_customers.py
|
1c849b039cc5b892120765869a0f6da5155d6639
|
[
"Apache-2.0"
] |
permissive
|
alerta/alerta
|
53eaf5e491da46a8faae37824eebd02e92b25dac
|
5b572c3aa9b086f02e366e3f8a2173174b0c5a87
|
refs/heads/master
| 2023-08-19T19:21:55.272638
| 2023-06-30T08:38:25
| 2023-06-30T08:38:25
| 3,877,327
| 1,468
| 282
|
Apache-2.0
| 2023-09-04T17:32:20
| 2012-03-30T14:19:34
|
Python
|
UTF-8
|
Python
| false
| false
| 17,393
|
py
|
test_customers.py
|
import json
import unittest
from flask import g
from alerta.app import create_app, db, plugins
from alerta.exceptions import ApiError
from alerta.models.enums import Scope
from alerta.models.key import ApiKey
from alerta.utils.api import assign_customer
class CustomersTestCase(unittest.TestCase):
def setUp(self):
test_config = {
'TESTING': True,
'AUTH_REQUIRED': True,
'CUSTOMER_VIEWS': True,
'ADMIN_USERS': ['admin@alerta.io'],
'ALLOWED_EMAIL_DOMAINS': ['alerta.io', 'foo.com', 'bar.com']
}
self.app = create_app(test_config)
self.client = self.app.test_client()
self.foo_alert = {
'event': 'foo1',
'resource': 'foo1',
'environment': 'Production',
'service': ['Web']
}
self.bar_alert = {
'event': 'bar1',
'resource': 'bar1',
'environment': 'Production',
'service': ['Web']
}
with self.app.test_request_context('/'):
self.app.preprocess_request()
self.api_key = ApiKey(
user='admin@alerta.io',
scopes=[Scope.admin, Scope.read, Scope.write],
text='admin-key'
)
self.api_key.create()
self.admin_headers = {
'Authorization': f'Key {self.api_key.key}',
'Content-type': 'application/json'
}
def tearDown(self):
plugins.plugins.clear()
db.destroy()
def test_customers(self):
# add customer mappings
payload = {
'customer': 'Bar Corp',
'match': 'bar.com'
}
response = self.client.post('/customer', data=json.dumps(payload),
content_type='application/json', headers=self.admin_headers)
self.assertEqual(response.status_code, 201)
payload = {
'customer': 'Foo Bar Corp',
'match': 'foo@bar.com'
}
response = self.client.post('/customer', data=json.dumps(payload),
content_type='application/json', headers=self.admin_headers)
self.assertEqual(response.status_code, 201)
response = self.client.get('/customers', headers=self.admin_headers)
self.assertEqual(response.status_code, 200)
# create users
payload = {
'name': 'Bar User',
'email': 'user@bar.com',
'password': 'b8rb8r',
'text': ''
}
response = self.client.post('/auth/signup', data=json.dumps(payload),
content_type='application/json', headers=self.admin_headers)
self.assertEqual(response.status_code, 200, response.data)
data = json.loads(response.data.decode('utf-8'))
self.assertIsNotNone(data, 'Failed to create user')
self.bar_bearer_headers = {
'Authorization': f"Bearer {data['token']}",
'Content-type': 'application/json'
}
payload = {
'name': 'Foo Bar User',
'email': 'foo@bar.com',
'password': 'f00b8r',
'text': ''
}
response = self.client.post('/auth/signup', data=json.dumps(payload),
content_type='application/json', headers=self.admin_headers)
self.assertEqual(response.status_code, 200)
data = json.loads(response.data.decode('utf-8'))
self.assertIsNotNone(data, 'Failed to create user')
self.foobar_bearer_headers = {
'Authorization': f"Bearer {data['token']}",
'Content-type': 'application/json'
}
# create API key for user@bar.com
payload = {
'user': 'user@bar.com',
'scopes': ['read', 'write'],
'text': ''
}
response = self.client.post('/key', data=json.dumps(payload),
content_type='application/json', headers=self.bar_bearer_headers)
self.assertEqual(response.status_code, 201, response.data)
data = json.loads(response.data.decode('utf-8'))
self.assertIsNotNone(data['key'], 'Failed to create read-write key')
self.bar_api_key_headers = {
'Authorization': f"Key {data['key']}",
'Content-type': 'application/json'
}
# create API keys for foo@bar.com
payload = {
'user': 'foo@bar.com',
'scopes': ['read', 'write'],
'text': '',
'customer': 'Foo Bar Corp'
}
response = self.client.post('/key', data=json.dumps(payload),
content_type='application/json', headers=self.foobar_bearer_headers)
self.assertEqual(response.status_code, 201, response.data)
data = json.loads(response.data.decode('utf-8'))
self.assertIsNotNone(data['key'], 'Failed to create read-write key')
self.foobar_api_key_headers = {
'Authorization': f"Key {data['key']}",
'Content-type': 'application/json'
}
payload = {
'user': 'foo@bar.com',
'scopes': ['read', 'write'],
'text': '',
'customer': 'Bar Corp'
}
response = self.client.post('/key', data=json.dumps(payload),
content_type='application/json', headers=self.foobar_bearer_headers)
self.assertEqual(response.status_code, 201, response.data)
data = json.loads(response.data.decode('utf-8'))
self.assertIsNotNone(data['key'], 'Failed to create read-write key')
self.foobar_bar_only_api_key_headers = {
'Authorization': f"Key {data['key']}",
'Content-type': 'application/json'
}
# get list of customers for users
response = self.client.get('/customers', headers=self.bar_api_key_headers)
self.assertEqual(response.status_code, 200)
data = json.loads(response.data.decode('utf-8'))
self.assertEqual([c['customer'] for c in data['customers']], ['Bar Corp'])
response = self.client.get('/customers', headers=self.foobar_api_key_headers)
self.assertEqual(response.status_code, 200)
data = json.loads(response.data.decode('utf-8'))
self.assertEqual([c['customer'] for c in data['customers']], ['Foo Bar Corp'])
# create alerts using API keys
response = self.client.post('/alert', data=json.dumps(self.foo_alert), headers=self.bar_api_key_headers)
self.assertEqual(response.status_code, 201)
data = json.loads(response.data.decode('utf-8'))
self.assertEqual(data['alert']['customer'], 'Bar Corp')
response = self.client.post('/alert', data=json.dumps(self.foo_alert), headers=self.foobar_api_key_headers)
self.assertEqual(response.status_code, 201)
data = json.loads(response.data.decode('utf-8'))
self.assertEqual(data['alert']['customer'], 'Foo Bar Corp')
response = self.client.post('/alert', data=json.dumps(self.foo_alert),
headers=self.foobar_bar_only_api_key_headers)
self.assertEqual(response.status_code, 201)
data = json.loads(response.data.decode('utf-8'))
self.assertEqual(data['alert']['customer'], 'Bar Corp')
response = self.client.post('/alert', data=json.dumps(self.foo_alert),
headers=self.foobar_bar_only_api_key_headers)
self.assertEqual(response.status_code, 201)
data = json.loads(response.data.decode('utf-8'))
self.assertEqual(data['alert']['customer'], 'Bar Corp')
# create alerts using Bearer tokens
response = self.client.post('/alert', data=json.dumps(self.foo_alert), headers=self.bar_bearer_headers)
self.assertEqual(response.status_code, 201, response.data)
data = json.loads(response.data.decode('utf-8'))
self.assertEqual(data['alert']['customer'], 'Bar Corp')
self.foo_alert['customer'] = 'Foo Bar Corp'
response = self.client.post('/alert', data=json.dumps(self.foo_alert), headers=self.foobar_bearer_headers)
self.assertEqual(response.status_code, 201, response.data)
data = json.loads(response.data.decode('utf-8'))
self.assertEqual(data['alert']['customer'], 'Foo Bar Corp')
def test_blackouts(self):
# add customer mappings
payload = {
'customer': 'Foo Corp',
'match': 'foo.com'
}
response = self.client.post('/customer', data=json.dumps(payload),
content_type='application/json', headers=self.admin_headers)
self.assertEqual(response.status_code, 201)
payload = {
'customer': 'Bar Corp',
'match': 'bar.com'
}
response = self.client.post('/customer', data=json.dumps(payload),
content_type='application/json', headers=self.admin_headers)
self.assertEqual(response.status_code, 201)
# create users
payload = {
'name': 'Foo User',
'email': 'user@foo.com',
'password': 'f00f00',
'text': ''
}
response = self.client.post('/auth/signup', data=json.dumps(payload),
content_type='application/json', headers=self.admin_headers)
self.assertEqual(response.status_code, 200)
data = json.loads(response.data.decode('utf-8'))
self.assertIsNotNone(data, 'Failed to create user')
foo_user_headers = {
'Authorization': f"Bearer {data['token']}",
'Content-type': 'application/json'
}
payload = {
'name': 'Bar User',
'email': 'user@bar.com',
'password': 'b8rb8r',
'text': ''
}
response = self.client.post('/auth/signup', data=json.dumps(payload),
content_type='application/json', headers=self.admin_headers)
self.assertEqual(response.status_code, 200)
data = json.loads(response.data.decode('utf-8'))
self.assertIsNotNone(data, 'Failed to create user')
bar_user_headers = {
'Authorization': f"Bearer {data['token']}",
'Content-type': 'application/json'
}
# create customer blackout by foo user
response = self.client.post(
'/blackout', data=json.dumps({'environment': 'Production'}), headers=foo_user_headers)
self.assertEqual(response.status_code, 201)
data = json.loads(response.data.decode('utf-8'))
blackout_id = data['id']
# new alert by foo user should be suppressed
response = self.client.post('/alert', data=json.dumps(self.foo_alert), headers=foo_user_headers)
self.assertEqual(response.status_code, 202)
# new alert by bar user should not be suppressed
response = self.client.post('/alert', data=json.dumps(self.bar_alert), headers=bar_user_headers)
self.assertEqual(response.status_code, 201)
# delete blackout by id
response = self.client.delete('/blackout/' + blackout_id, headers=self.admin_headers)
self.assertEqual(response.status_code, 200)
# create global blackout by admin user
response = self.client.post(
'/blackout', data=json.dumps({'environment': 'Production'}), headers=self.admin_headers)
self.assertEqual(response.status_code, 201)
data = json.loads(response.data.decode('utf-8'))
blackout_id = data['id']
# new alert by foo user should be suppressed
response = self.client.post('/alert', data=json.dumps(self.foo_alert), headers=foo_user_headers)
self.assertEqual(response.status_code, 202)
# new alert by bar user should be suppressed
response = self.client.post('/alert', data=json.dumps(self.bar_alert), headers=bar_user_headers)
self.assertEqual(response.status_code, 202)
# delete blackout by id
response = self.client.delete('/blackout/' + blackout_id, headers=self.admin_headers)
self.assertEqual(response.status_code, 200)
def test_assign_customer(self):
with self.app.test_request_context('/'):
self.app.preprocess_request()
# nothing wanted, assign one
g.customers = ['Customer1']
g.scopes = []
self.assertEqual(assign_customer(wanted=None), 'Customer1')
# nothing wanted, but too many, throw error
g.customers = ['Customer1', 'Customer2']
g.scopes = []
with self.assertRaises(ApiError) as e:
assign_customer(wanted=None)
exc = e.exception
self.assertEqual(str(exc), 'must define customer as more than one possibility')
# customer wanted, matches so allow
g.customers = ['Customer1']
g.scopes = []
self.assertEqual(assign_customer(wanted='Customer1'), 'Customer1')
# customer wanted, in list so allow
g.customers = ['Customer1', 'Customer2']
g.scopes = []
self.assertEqual(assign_customer(wanted='Customer2'), 'Customer2')
# customer wanted not in list, throw exception
g.customers = ['Customer1', 'Customer2']
g.scopes = []
with self.assertRaises(ApiError) as e:
assign_customer(wanted='Customer3')
exc = e.exception
self.assertEqual(str(exc), "not allowed to set customer to 'Customer3'")
# no customers, admin scope so allow
g.customers = []
g.scopes = ['admin']
self.assertEqual(assign_customer(wanted=None), None)
self.assertEqual(assign_customer(wanted='Customer1'), 'Customer1')
g.customers = ['Customer1', 'Customer2']
g.scopes = ['admin']
with self.assertRaises(ApiError) as e:
assign_customer(wanted=None)
exc = e.exception
self.assertEqual(str(exc), 'must define customer as more than one possibility')
self.assertEqual(assign_customer(wanted='Customer3'), 'Customer3')
# wrong scope
g.customers = ['Customer1']
g.scopes = ['read:keys', 'write:keys']
with self.assertRaises(ApiError) as e:
assign_customer(wanted='Customer2', permission=Scope.admin_keys)
exc = e.exception
self.assertEqual(str(exc), "not allowed to set customer to 'Customer2'")
# right scope
g.customers = ['Customer1']
g.scopes = ['admin:keys', 'read:keys', 'write:keys']
self.assertEqual(assign_customer(wanted='Customer2', permission=Scope.admin_keys), 'Customer2')
def test_invalid_customer(self):
self.foo_alert['customer'] = ''
response = self.client.post('/alert', data=json.dumps(self.foo_alert), headers=self.admin_headers)
self.assertEqual(response.status_code, 400)
data = json.loads(response.data.decode('utf-8'))
self.assertEqual(data['message'], 'customer must not be an empty string')
def test_edit_customer(self):
# add customer mappings
payload = {
'customer': 'Foo Corp',
'match': 'foo.com'
}
response = self.client.post('/customer', data=json.dumps(payload),
content_type='application/json', headers=self.admin_headers)
self.assertEqual(response.status_code, 201)
data = json.loads(response.data.decode('utf-8'))
customer_id = data['id']
# change customer name
update = {
'customer': 'Bar Corp'
}
response = self.client.put('/customer/' + customer_id, data=json.dumps(update), headers=self.admin_headers)
self.assertEqual(response.status_code, 200)
data = json.loads(response.data.decode('utf-8'))
self.assertEqual(data['status'], 'ok')
# check updates worked and didn't change anything else
response = self.client.get('/customer/' + customer_id, headers=self.admin_headers)
self.assertEqual(response.status_code, 200)
data = json.loads(response.data.decode('utf-8'))
self.assertEqual(data['customer']['customer'], 'Bar Corp')
self.assertEqual(data['customer']['match'], 'foo.com')
# change customer lookup
update = {
'match': 'bar.com'
}
response = self.client.put('/customer/' + customer_id, data=json.dumps(update), headers=self.admin_headers)
self.assertEqual(response.status_code, 200)
data = json.loads(response.data.decode('utf-8'))
self.assertEqual(data['status'], 'ok')
# check updates worked and didn't change anything else
response = self.client.get('/customer/' + customer_id, headers=self.admin_headers)
self.assertEqual(response.status_code, 200)
data = json.loads(response.data.decode('utf-8'))
self.assertEqual(data['customer']['customer'], 'Bar Corp')
self.assertEqual(data['customer']['match'], 'bar.com')
|
b3066ec98a7d68dcf0ac3152db9d52dd302f3a02
|
b20dcf585fcda752d567a17fe1e0eb3b3dcdbf81
|
/tllib/vision/__init__.py
|
89b87c7d98f1b40109b810d9df57d70ec10cd165
|
[
"MIT"
] |
permissive
|
thuml/Transfer-Learning-Library
|
1dc1402025ac842e361221f4fe1ed72bc36e9eac
|
ed03f0b11c16062e7faacb547f6eb9f83ce5f15e
|
refs/heads/master
| 2023-08-18T00:46:42.764139
| 2023-08-09T02:31:20
| 2023-08-09T02:31:20
| 240,494,185
| 2,786
| 527
|
MIT
| 2023-05-04T09:53:41
| 2020-02-14T11:33:06
|
Python
|
UTF-8
|
Python
| false
| false
| 47
|
py
|
__init__.py
|
__all__ = ['datasets', 'models', 'transforms']
|
424d0f08ab3ece2901ad0cbe6def858d1750956b
|
f779e1efe1f9b737dcb26fe712ee5c4392df30c0
|
/cmake/translation_tmpl.py
|
1ec22c6a5bbaa551c531bf80d186c96fd514eeed
|
[
"Apache-2.0",
"GPL-3.0-or-later",
"BSD-3-Clause",
"BSL-1.0",
"GPL-2.0-or-later",
"MIT",
"LicenseRef-scancode-public-domain",
"Autoconf-exception-3.0",
"FSFUL"
] |
permissive
|
znc/znc
|
0ef033e8271e4788ccb8a92b7015696eb527a27e
|
41032f895581c541fdbf6b1847e46699ae22aa77
|
refs/heads/master
| 2023-09-02T23:13:04.457263
| 2023-08-13T10:31:46
| 2023-08-13T10:31:46
| 465,681
| 1,552
| 439
|
Apache-2.0
| 2023-09-10T21:17:08
| 2010-01-09T23:44:05
|
C++
|
UTF-8
|
Python
| false
| false
| 1,953
|
py
|
translation_tmpl.py
|
#!/usr/bin/env python3
#
# Copyright (C) 2004-2023 ZNC, see the NOTICE file for details.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import glob
import os
import re
parser = argparse.ArgumentParser(
description='Extract translateable strings from .tmpl files')
parser.add_argument('--directory', action='store')
parser.add_argument('--output', action='store')
args = parser.parse_args()
pattern = re.compile(r'<\?\s*(?:FORMAT|(PLURAL))\s+(?:CTX="([^"]+?)"\s+)?"([^"]+?)"(?(1)\s+"([^"]+?)"|).*?\?>')
result = []
for fname in glob.iglob(args.directory + '/*.tmpl'):
fbase = os.path.basename(fname)
with open(fname) as f:
for linenum, line in enumerate(f):
for x in pattern.finditer(line):
text, plural, context = x.group(3), x.group(4), x.group(2)
result.append('#: {}:{}'.format(fbase, linenum + 1))
if context:
result.append('msgctxt "{}"'.format(context))
result.append('msgid "{}"'.format(text))
if plural:
result.append('msgid_plural "{}"'.format(plural))
result.append('msgstr[0] ""')
result.append('msgstr[1] ""')
else:
result.append('msgstr ""')
result.append('')
if result:
with open(args.output, 'w') as f:
for line in result:
print(line, file=f)
|
95b31fcd07d7549c1ce9fb1dae53453b456a0cb0
|
b8441dc1987be9e64fa3081d456b2a3060ec44d1
|
/mars/services/scheduling/supervisor/tests/test_assigner.py
|
37660694980af2d690b0c8cb3077d6b79d538f6f
|
[
"BSD-3-Clause",
"MIT",
"ISC",
"Apache-2.0",
"CC0-1.0",
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
mars-project/mars
|
f99fefbce999d58a9249bc72046787a9731c9c73
|
c36c53fa22e10ef9477d9c454401a2f281375f31
|
refs/heads/master
| 2023-07-23T00:23:55.133015
| 2023-07-03T11:44:54
| 2023-07-03T11:44:54
| 160,543,708
| 2,704
| 362
|
Apache-2.0
| 2023-09-11T07:57:35
| 2018-12-05T16:04:03
|
Python
|
UTF-8
|
Python
| false
| false
| 13,613
|
py
|
test_assigner.py
|
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import asyncio
import pytest
from ..... import oscar as mo
from .....core import ChunkGraph
from .....tensor.fetch import TensorFetch
from .....tensor.arithmetic import TensorTreeAdd
from ....cluster import ClusterAPI
from ....cluster.core import NodeRole, NodeStatus
from ....cluster.uploader import NodeInfoUploaderActor
from ....cluster.supervisor.locator import SupervisorPeerLocatorActor
from ....cluster.supervisor.node_info import NodeInfoCollectorActor
from ....meta import MockMetaAPI
from ....session import MockSessionAPI
from ....subtask import Subtask
from ...supervisor import AssignerActor
from ...errors import NoMatchingSlots, NoAvailableBand
class MockNodeInfoCollectorActor(NodeInfoCollectorActor):
def __init__(self, timeout=None, check_interval=None, with_gpu=False):
super().__init__(timeout=timeout, check_interval=check_interval)
self.ready_bands = {
("address0", "numa-0"): 2,
("address1", "numa-0"): 2,
("address2", "numa-0"): 2,
("address3", "numa-0"): 2,
}
if with_gpu:
self.ready_bands[("address0", "gpu-0")] = 1
self.all_bands = self.ready_bands.copy()
async def update_node_info(
self, address, role, env=None, resource=None, detail=None, status=None
):
if "address" in address and status == NodeStatus.STOPPING:
del self.ready_bands[(address, "numa-0")]
await super().update_node_info(address, role, env, resource, detail, status)
def get_all_bands(self, role=None, statuses=None):
if statuses == {NodeStatus.READY}:
return self.ready_bands
else:
return self.all_bands
class FakeClusterAPI(ClusterAPI):
@classmethod
async def create(cls, address: str, **kw):
dones, _ = await asyncio.wait(
[
mo.create_actor(
SupervisorPeerLocatorActor,
"fixed",
address,
uid=SupervisorPeerLocatorActor.default_uid(),
address=address,
),
mo.create_actor(
MockNodeInfoCollectorActor,
with_gpu=kw.get("with_gpu", False),
uid=NodeInfoCollectorActor.default_uid(),
address=address,
),
mo.create_actor(
NodeInfoUploaderActor,
NodeRole.WORKER,
interval=kw.get("upload_interval"),
band_to_resource=kw.get("band_to_resource"),
use_gpu=kw.get("use_gpu", False),
uid=NodeInfoUploaderActor.default_uid(),
address=address,
),
]
)
for task in dones:
try:
task.result()
except mo.ActorAlreadyExist: # pragma: no cover
pass
api = await super().create(address=address)
await api.mark_node_ready()
return api
@pytest.fixture
async def actor_pool(request):
pool = await mo.create_actor_pool("127.0.0.1", n_process=0)
with_gpu = request.param
async with pool:
session_id = "test_session"
cluster_api = await FakeClusterAPI.create(
pool.external_address, with_gpu=with_gpu
)
await MockSessionAPI.create(pool.external_address, session_id=session_id)
meta_api = await MockMetaAPI.create(session_id, pool.external_address)
assigner_ref = await mo.create_actor(
AssignerActor,
session_id,
uid=AssignerActor.gen_uid(session_id),
address=pool.external_address,
)
try:
yield pool, session_id, assigner_ref, cluster_api, meta_api
finally:
await mo.destroy_actor(assigner_ref)
@pytest.mark.asyncio
@pytest.mark.parametrize("actor_pool", [False], indirect=True)
async def test_assign_cpu_tasks(actor_pool):
pool, session_id, assigner_ref, cluster_api, meta_api = actor_pool
input1 = TensorFetch(key="a", source_key="a", dtype=np.dtype(int)).new_chunk([])
input2 = TensorFetch(key="b", source_key="b", dtype=np.dtype(int)).new_chunk([])
input3 = TensorFetch(key="c", source_key="c", dtype=np.dtype(int)).new_chunk([])
result_chunk = TensorTreeAdd(args=[input1, input2, input3]).new_chunk(
[input1, input2, input3]
)
chunk_graph = ChunkGraph([result_chunk])
chunk_graph.add_node(input1)
chunk_graph.add_node(input2)
chunk_graph.add_node(input3)
chunk_graph.add_node(result_chunk)
chunk_graph.add_edge(input1, result_chunk)
chunk_graph.add_edge(input2, result_chunk)
chunk_graph.add_edge(input3, result_chunk)
await meta_api.set_chunk_meta(
input1, memory_size=200, store_size=200, bands=[("address0", "numa-0")]
)
await meta_api.set_chunk_meta(
input2, memory_size=400, store_size=400, bands=[("address1", "numa-0")]
)
await meta_api.set_chunk_meta(
input3, memory_size=400, store_size=400, bands=[("address2", "numa-0")]
)
await cluster_api.set_node_status(
node="address1", role=NodeRole.WORKER, status=NodeStatus.STOPPING
)
await cluster_api.set_node_status(
node="address3", role=NodeRole.WORKER, status=NodeStatus.STOPPING
)
subtask = Subtask("test_task", session_id, chunk_graph=chunk_graph)
[result] = await assigner_ref.assign_subtasks([subtask])
assert result in (("address0", "numa-0"), ("address2", "numa-0"))
subtask.expect_bands = [("address0", "numa-0")]
[result] = await assigner_ref.assign_subtasks([subtask])
assert result == ("address0", "numa-0")
subtask.expect_bands = [("address0", "numa-0"), ("address1", "numa-0")]
[result] = await assigner_ref.assign_subtasks([subtask])
assert result == ("address0", "numa-0")
subtask.expect_bands = [("address1", "numa-0")]
[result] = await assigner_ref.assign_subtasks([subtask])
assert result in (("address0", "numa-0"), ("address2", "numa-0"))
[result] = await assigner_ref.assign_subtasks(
[subtask], exclude_bands={("address0", "numa-0"), ("address2", "numa-0")}
)
assert result in (("address0", "numa-0"), ("address2", "numa-0"))
[result] = await assigner_ref.assign_subtasks(
[subtask], exclude_bands={("address0", "numa-0")}, random_when_unavailable=False
)
assert result == ("address2", "numa-0")
with pytest.raises(NoAvailableBand):
await assigner_ref.assign_subtasks(
[subtask],
exclude_bands={("address0", "numa-0"), ("address2", "numa-0")},
random_when_unavailable=False,
)
subtask.bands_specified = True
assert result == ("address2", "numa-0")
with pytest.raises(NoAvailableBand):
await assigner_ref.assign_subtasks([subtask])
subtask.bands_specified = False
result_chunk.op.gpu = True
subtask = Subtask("test_task", session_id, chunk_graph=chunk_graph)
with pytest.raises(NoMatchingSlots) as err:
await assigner_ref.assign_subtasks([subtask])
assert "gpu" in str(err.value)
@pytest.mark.asyncio
@pytest.mark.parametrize("actor_pool", [False], indirect=True)
async def test_assign_broadcaster(actor_pool):
pool, session_id, assigner_ref, cluster_api, meta_api = actor_pool
broadcaster = TensorFetch(key="x", source_key="x", dtype=np.dtype(int)).new_chunk(
[], is_broadcaster=True
)
input_chunk = TensorFetch(key="a", source_key="a", dtype=np.dtype(int)).new_chunk(
[]
)
result_chunk = TensorTreeAdd(args=[broadcaster, input_chunk]).new_chunk(
[broadcaster, input_chunk]
)
chunk_graph = ChunkGraph([result_chunk])
chunk_graph.add_node(broadcaster)
chunk_graph.add_node(input_chunk)
chunk_graph.add_node(result_chunk)
chunk_graph.add_edge(broadcaster, result_chunk)
chunk_graph.add_edge(input_chunk, result_chunk)
await meta_api.set_chunk_meta(
broadcaster, memory_size=1000, store_size=200, bands=[("address0", "numa-0")]
)
await meta_api.set_chunk_meta(
input_chunk, memory_size=200, store_size=200, bands=[("address1", "numa-0")]
)
subtask = Subtask("test_task", session_id, chunk_graph=chunk_graph)
[result] = await assigner_ref.assign_subtasks([subtask])
assert result == ("address1", "numa-0")
@pytest.mark.asyncio
@pytest.mark.parametrize("actor_pool", [True], indirect=True)
async def test_assign_gpu_tasks(actor_pool):
pool, session_id, assigner_ref, cluster_api, meta_api = actor_pool
input1 = TensorFetch(key="a", source_key="a", dtype=np.dtype(int)).new_chunk([])
input2 = TensorFetch(key="b", source_key="b", dtype=np.dtype(int)).new_chunk([])
result_chunk = TensorTreeAdd(args=[input1, input2], gpu=True).new_chunk(
[input1, input2]
)
chunk_graph = ChunkGraph([result_chunk])
chunk_graph.add_node(input1)
chunk_graph.add_node(input2)
chunk_graph.add_node(result_chunk)
chunk_graph.add_edge(input1, result_chunk)
chunk_graph.add_edge(input2, result_chunk)
await meta_api.set_chunk_meta(
input1, memory_size=200, store_size=200, bands=[("address0", "numa-0")]
)
await meta_api.set_chunk_meta(
input2, memory_size=200, store_size=200, bands=[("address0", "numa-0")]
)
subtask = Subtask("test_task", session_id, chunk_graph=chunk_graph)
[result] = await assigner_ref.assign_subtasks([subtask])
assert result[1].startswith("gpu")
@pytest.mark.asyncio
@pytest.mark.parametrize("actor_pool", [False], indirect=True)
async def test_reassign_subtasks(actor_pool):
pool, session_id, assigner_ref, cluster_api, meta_api = actor_pool
# ('address0', 'numa-0'), ('address1', 'numa-0'), ('address2', 'numa-0') are ready
await cluster_api.set_node_status(
node="address3", role=NodeRole.WORKER, status=NodeStatus.STOPPING
)
band_num_queued_subtasks = {("address0", "numa-0"): 3, ("address1", "numa-0"): 4}
move_queued_subtasks = await assigner_ref.reassign_subtasks(
band_num_queued_subtasks
)
assert move_queued_subtasks in (
{
("address1", "numa-0"): -1,
("address0", "numa-0"): -1,
("address2", "numa-0"): 2,
},
{
("address1", "numa-0"): -2,
("address0", "numa-0"): 0,
("address2", "numa-0"): 2,
},
{
("address1", "numa-0"): -2,
("address0", "numa-0"): -1,
("address2", "numa-0"): 3,
},
)
# ('address0', 'numa-0'), ('address2', 'numa-0') are ready
await cluster_api.set_node_status(
node="address1", role=NodeRole.WORKER, status=NodeStatus.STOPPING
)
band_num_queued_subtasks = {
("address0", "numa-0"): 9,
("address1", "numa-0"): 7,
("address2", "numa-0"): 0,
}
move_queued_subtasks = await assigner_ref.reassign_subtasks(
band_num_queued_subtasks
)
assert move_queued_subtasks in (
{
("address1", "numa-0"): -7,
("address0", "numa-0"): 3,
("address2", "numa-0"): 4,
},
{
("address1", "numa-0"): -7,
("address0", "numa-0"): 4,
("address2", "numa-0"): 3,
},
)
band_num_queued_subtasks = {("address0", "numa-0"): 9, ("address1", "numa-0"): 7}
move_queued_subtasks = await assigner_ref.reassign_subtasks(
band_num_queued_subtasks
)
assert move_queued_subtasks == {
("address1", "numa-0"): -7,
("address0", "numa-0"): -1,
("address2", "numa-0"): 8,
}
band_num_queued_subtasks = {("address1", "numa-0"): 8}
move_queued_subtasks = await assigner_ref.reassign_subtasks(
band_num_queued_subtasks
)
assert move_queued_subtasks == {
("address1", "numa-0"): -8,
("address0", "numa-0"): 4,
("address2", "numa-0"): 4,
}
band_num_queued_subtasks = {("address1", "numa-0"): 0}
move_queued_subtasks = await assigner_ref.reassign_subtasks(
band_num_queued_subtasks
)
assert move_queued_subtasks == {("address1", "numa-0"): 0}
# only ('address0', 'numa-0') is ready, i.e. there's only one band initially
await cluster_api.set_node_status(
node="address2", role=NodeRole.WORKER, status=NodeStatus.STOPPING
)
band_num_queued_subtasks = {("address0", "numa-0"): 8}
move_queued_subtasks = await assigner_ref.reassign_subtasks(
band_num_queued_subtasks
)
assert move_queued_subtasks == {("address0", "numa-0"): 0}
band_num_queued_subtasks = {("address1", "numa-0"): 8}
move_queued_subtasks = await assigner_ref.reassign_subtasks(
band_num_queued_subtasks
)
assert move_queued_subtasks == {
("address1", "numa-0"): -8,
("address0", "numa-0"): 8,
}
|
e01582bcc41605b5131008bef678ec2967aa6540
|
1e6d68e3bfe8cdc54cec1f28317eb4419ef44afa
|
/09_Deep_Learning_Prediction/ch09_09_RNN_StockPrediction.py
|
4d84aa211bd9b1b5deefa3eff88b654a997226a6
|
[] |
no_license
|
INVESTAR/StockAnalysisInPython
|
5018dd54937050e0c19a9a0d95f5b6bd8f3f9ba5
|
0077b3d7ad2d195b914eb2e6d4d831928f81504f
|
refs/heads/master
| 2023-02-24T23:20:53.570309
| 2023-01-23T14:25:51
| 2023-01-23T14:25:51
| 250,962,342
| 484
| 462
| null | 2023-02-16T01:32:14
| 2020-03-29T05:40:59
|
Python
|
UTF-8
|
Python
| false
| false
| 2,165
|
py
|
ch09_09_RNN_StockPrediction.py
|
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense, LSTM, Dropout
import numpy as np
import matplotlib.pyplot as plt
from Investar import Analyzer
mk = Analyzer.MarketDB()
raw_df = mk.get_daily_price('삼성전자', '2018-05-04', '2020-01-22')
window_size = 10
data_size = 5
def MinMaxScaler(data):
"""최솟값과 최댓값을 이용하여 0 ~ 1 값으로 변환"""
numerator = data - np.min(data, 0)
denominator = np.max(data, 0) - np.min(data, 0)
# 0으로 나누기 에러가 발생하지 않도록 매우 작은 값(1e-7)을 더해서 나눔
return numerator / (denominator + 1e-7)
dfx = raw_df[['open','high','low','volume', 'close']]
dfx = MinMaxScaler(dfx)
dfy = dfx[['close']]
x = dfx.values.tolist()
y = dfy.values.tolist()
data_x = []
data_y = []
for i in range(len(y) - window_size):
_x = x[i : i + window_size] # 다음 날 종가(i+windows_size)는 포함되지 않음
_y = y[i + window_size] # 다음 날 종가
data_x.append(_x)
data_y.append(_y)
print(_x, "->", _y)
train_size = int(len(data_y) * 0.7)
train_x = np.array(data_x[0 : train_size])
train_y = np.array(data_y[0 : train_size])
test_size = len(data_y) - train_size
test_x = np.array(data_x[train_size : len(data_x)])
test_y = np.array(data_y[train_size : len(data_y)])
# 모델 생성
model = Sequential()
model.add(LSTM(units=10, activation='relu', return_sequences=True, input_shape=(window_size, data_size)))
model.add(Dropout(0.1))
model.add(LSTM(units=10, activation='relu'))
model.add(Dropout(0.1))
model.add(Dense(units=1))
model.summary()
model.compile(optimizer='adam', loss='mean_squared_error')
model.fit(train_x, train_y, epochs=60, batch_size=30)
pred_y = model.predict(test_x)
# Visualising the results
plt.figure()
plt.plot(test_y, color='red', label='real SEC stock price')
plt.plot(pred_y, color='blue', label='predicted SEC stock price')
plt.title('SEC stock price prediction')
plt.xlabel('time')
plt.ylabel('stock price')
plt.legend()
plt.show()
# raw_df.close[-1] : dfy.close[-1] = x : pred_y[-1]
print("Tomorrow's SEC price :", raw_df.close[-1] * pred_y[-1] / dfy.close[-1], 'KRW')
|
a1f1de464dfada8927f0f12d5c4361cdc2016513
|
4b27890b4ce6e7fd0791eeba09e9ddd6bd23e658
|
/src/core/tests/test_distr_1d.py
|
53e0586c89d70a14a5158890393f8dc5bc96819f
|
[
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
mitsuba-renderer/mitsuba3
|
f7008bd9056c8f4c2c5f93c3219e8330294a33b7
|
91b0b7e7c2732a131fac9149bf1db81429e946b0
|
refs/heads/master
| 2023-08-18T23:13:51.877647
| 2023-08-17T09:44:00
| 2023-08-18T13:08:41
| 362,484,572
| 1,510
| 171
|
NOASSERTION
| 2023-09-14T15:28:50
| 2021-04-28T13:50:41
|
C++
|
UTF-8
|
Python
| false
| false
| 9,356
|
py
|
test_distr_1d.py
|
import pytest
import drjit as dr
import mitsuba as mi
def test01_discr_empty(variants_all_backends_once):
# Test that operations involving the empty distribution throw
d = mi.DiscreteDistribution()
assert d.empty()
with pytest.raises(RuntimeError) as excinfo:
d.update()
assert 'empty distribution' in str(excinfo.value)
def test02_discr_zero_prob(variants_all_backends_once):
# Test that operations involving zero probability mass throw
with pytest.raises(RuntimeError) as excinfo:
mi.DiscreteDistribution([0, 0, 0])
assert "no probability mass found" in str(excinfo.value)
def test03_discr_neg_prob(variants_all_backends_once):
# Test that operations involving negative probability mass throw
with pytest.raises(RuntimeError) as excinfo:
mi.DiscreteDistribution([1, -1, 1])
assert "entries must be non-negative" in str(excinfo.value)
def test04_discr_basic(variants_vec_backends_once):
# Validate discrete distribution cdf/pmf against hand-computed reference
x = mi.DiscreteDistribution([1, 3, 2])
assert len(x) == 3
assert x.sum() == 6
assert dr.allclose(x.normalization(), 1.0 / 6.0)
assert x.pmf() == [1, 3, 2]
assert x.cdf() == [1, 4, 6]
assert x.eval_pmf([1, 2, 0]) == [3, 2, 1]
assert dr.allclose(
x.eval_pmf_normalized([1, 2, 0]),
mi.Float([3, 2, 1]) / 6.0
)
assert dr.allclose(
x.eval_cdf_normalized([1, 2, 0]),
mi.Float([4, 6, 1]) / 6.0
)
assert repr(x) == 'DiscreteDistribution[\n size = 3,' \
'\n sum = [6],\n pmf = [1, 3, 2]\n]'
x.pmf()[:] = [1, 1, 1]
x.update()
assert x.cdf() == [1, 2, 3]
assert x.sum() == 3
assert dr.allclose(x.normalization(), 1.0 / 3.0)
def test05_discr_sample(variants_vec_backends_once):
# Validate discrete distribution sampling against hand-computed reference
eps = 1e-7
x = mi.DiscreteDistribution([1, 3, 2])
assert x.sample([-1, 0, 1, 2]) == [0, 0, 2, 2]
assert x.sample([1 / 6.0 - eps, 1 / 6.0 + eps]) == [0, 1]
assert x.sample([4 / 6.0 - eps, 4 / 6.0 + eps]) == [1, 2]
assert dr.allclose(
x.sample_pmf([-1, 0, 1, 2]),
([0, 0, 2, 2], mi.Float([1, 1, 2, 2]) / 6)
)
assert dr.allclose(
x.sample_pmf([1 / 6.0 - eps, 1 / 6.0 + eps]),
([0, 1], mi.Float([1, 3]) / 6)
)
assert dr.allclose(
x.sample_pmf([4 / 6.0 - eps, 4 / 6.0 + eps]),
([1, 2], mi.Float([3, 2]) / 6)
)
assert dr.allclose(
x.sample_reuse([0, 1 / 12.0, 1 / 6.0 - eps, 1 / 6.0 + eps]),
([0, 0, 0, 1], mi.Float([0, .5, 1, 0])),
atol=3 * eps
)
assert dr.allclose(
x.sample_reuse_pmf([0, 1 / 12.0, 1 / 6.0 - eps, 1 / 6.0 + eps]),
([0, 0, 0, 1], mi.Float([0, .5, 1, 0]), mi.Float([1, 1, 1, 3]) / 6),
atol=3 * eps
)
def test06_discr_bruteforce(variants_vec_backends_once):
# Brute force validation of discrete distribution sampling, PCG32, UInt64
rng = mi.PCG32(initseq=dr.arange(mi.UInt64, 50))
for size in range(2, 20, 5):
for i in range(2, 50, 5):
density = mi.Float(rng.next_uint32_bounded(i)[0:size])
if dr.sum(density)[0] == 0:
continue
ddistr = mi.DiscreteDistribution(density)
x = dr.linspace(mi.Float, 0, 1, 20)
y = ddistr.sample(x)
z = dr.gather(mi.Float, ddistr.cdf(), y - 1, y > 0)
x *= ddistr.sum()
# Did we sample the right interval?
assert dr.all((x > z) | (dr.eq(x, 0) & (x >= z)))
def test07_discr_leading_trailing_zeros(variants_vec_backends_once):
# Check that sampling still works when there are zero-valued buckets
x = mi.DiscreteDistribution([0, 0, 1, 0, 1, 0, 0, 0])
index, pmf = x.sample_pmf([-100, 0, 0.5, 0.5 + 1e-6, 1, 100])
assert index == [2, 2, 2, 4, 4, 4]
assert pmf == [.5] * 6
def test08_cont_empty(variants_all_backends_once):
# Test that operations involving the empty distribution throw
d = mi.ContinuousDistribution()
assert d.empty()
d.range()[:] = [1, 2]
with pytest.raises(RuntimeError) as excinfo:
d.update()
assert 'needs at least two entries' in str(excinfo.value)
with pytest.raises(RuntimeError) as excinfo:
mi.ContinuousDistribution([1, 2], [1])
assert 'needs at least two entries' in str(excinfo.value)
def test09_cont_empty_invalid_range(variants_all_backends_once):
# Test that invalid range specifications throw an exception
with pytest.raises(RuntimeError) as excinfo:
mi.ContinuousDistribution([1, 1], [1, 1])
assert 'invalid range' in str(excinfo.value)
with pytest.raises(RuntimeError) as excinfo:
mi.ContinuousDistribution([2, 1], [1, 1])
assert 'invalid range' in str(excinfo.value)
def test10_cont_zero_prob(variants_all_backends_once):
# Test that operations involving zero probability mass throw
with pytest.raises(RuntimeError) as excinfo:
mi.ContinuousDistribution([1, 2], [0, 0, 0])
assert "no probability mass found" in str(excinfo.value)
def test11_cont_neg_prob(variants_all_backends_once):
# Test that operations involving negative probability mass throw
with pytest.raises(RuntimeError) as excinfo:
mi.ContinuousDistribution([1, 2], [1, -1, 1])
assert "entries must be non-negative" in str(excinfo.value)
def test12_cont_eval(variants_vec_backends_once):
# Test continuous 1D distribution pdf/cdf against hand-computed reference
d = mi.ContinuousDistribution([2, 3], [1, 2])
eps = 1e-6
assert dr.allclose(d.max(), 2.0)
assert dr.allclose(d.integral(), 3.0 / 2.0)
assert dr.allclose(d.normalization(), 2.0 / 3.0)
assert dr.allclose(
d.eval_pdf_normalized([1, 2 - eps, 2, 2.5, 3, 3 + eps, 4]),
[0, 0, 2.0 / 3.0, 1.0, 4.0 / 3.0, 0, 0]
)
assert dr.allclose(
d.eval_cdf_normalized([1, 2, 2.5, 3, 4]),
[0, 0, 5.0 / 12.0, 1, 1]
)
assert d.sample([0, 1]) == [2, 3]
x, pdf = d.sample_pdf([0, 0.5, 1])
dx = (dr.sqrt(10) - 2) / 2
assert x == [2, 2 + dx, 3]
assert dr.allclose(
pdf,
[2.0 / 3.0, (4 * dx + 2 * (1 - dx)) / 3.0, 4.0 / 3.0]
)
def test13_cont_func(variants_vec_backends_once):
# Test continuous 1D distribution integral against analytic result
x = dr.linspace(mi.Float, -2, 2, 513)
y = dr.exp(-dr.sqr(x))
d = mi.ContinuousDistribution([-2, 2], y)
assert dr.allclose(d.max(), 1.0)
assert dr.allclose(d.integral(), dr.sqrt(dr.pi) * dr.erf(2.0))
assert dr.allclose(d.eval_pdf([1]), [dr.exp(-1)])
assert dr.allclose(d.sample([0, 0.5, 1]), [-2, 0, 2])
def test14_irrcont_empty(variants_all_backends_once):
# Test that operations involving the empty distribution throw
d = mi.IrregularContinuousDistribution()
assert d.empty()
with pytest.raises(RuntimeError) as excinfo:
mi.IrregularContinuousDistribution([1], [1])
assert 'needs at least two entries' in str(excinfo.value)
with pytest.raises(RuntimeError) as excinfo:
mi.IrregularContinuousDistribution([1, 2], [1])
assert 'size mismatch' in str(excinfo.value)
def test15_irrcont_empty_invalid_range(variants_all_backends_once):
# Test that invalid range specifications throw an exception
with pytest.raises(RuntimeError) as excinfo:
mi.IrregularContinuousDistribution([2, 1], [1, 1])
assert 'strictly increasing' in str(excinfo.value)
with pytest.raises(RuntimeError) as excinfo:
mi.IrregularContinuousDistribution([1, 1], [1, 1])
assert 'strictly increasing' in str(excinfo.value)
def test16_irrcont_zero_prob(variants_all_backends_once):
# Test that operations involving the empty distribution throw
with pytest.raises(RuntimeError) as excinfo:
mi.IrregularContinuousDistribution([1, 2, 3], [0, 0, 0])
assert "no probability mass found" in str(excinfo.value)
def test17_irrcont_neg_prob(variants_all_backends_once):
# Test that operations involving negative probability mass throw
with pytest.raises(RuntimeError) as excinfo:
mi.IrregularContinuousDistribution([1, 2, 3], [1, -1, 1])
assert "entries must be non-negative" in str(excinfo.value)
def test18_irrcont_simple_function(variants_vec_backends_once):
# Reference from Mathematica, mi.Float
d = mi.IrregularContinuousDistribution([1, 1.5, 1.8, 5], [1, 3, 0, 1])
assert dr.allclose(d.max(), 3.0)
assert dr.allclose(d.integral(), 3.05)
assert dr.allclose(
d.eval_pdf([0, 1, 2, 3, 4, 5, 6]),
[0, 1, 0.0625, 0.375, 0.6875, 1, 0]
)
assert dr.allclose(
d.eval_cdf([0, 1, 2, 3, 4, 5, 6]),
[0, 0, 1.45625, 1.675, 2.20625, 3.05, 3.05]
)
assert dr.allclose(
d.sample(dr.linspace(mi.Float, 0, 1, 11)),
[1., 1.21368, 1.35622, 1.47111, 1.58552, 2.49282,
3.35949, 3.8938, 4.31714, 4.67889, 5.]
)
assert dr.allclose(
d.sample_pdf(dr.linspace(mi.Float, 0, 1, 11)),
([1., 1.21368, 1.35622, 1.47111, 1.58552, 2.49282,
3.35949, 3.8938, 4.31714, 4.67889, 5.],
mi.Float([1., 1.85472, 2.42487, 2.88444, 2.14476, 0.216506,
0.48734, 0.654313, 0.786607, 0.899653, 1.])
* d.normalization())
)
|
71e10455ae54439d20b800a67456ac011dd39f8f
|
0eb78414767c4dd1d49127f44b1204abe08115a6
|
/task_utils.py
|
20387ae9def897757642de2af0a26311f2551acb
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
spcl/ncc
|
9b688bb80d247d6b718fda2bec3f07e2257a1459
|
45c3619ba193585579c7feedbe046bac3ff9e7b5
|
refs/heads/master
| 2023-08-29T00:35:57.669037
| 2021-08-12T22:21:37
| 2021-08-12T22:21:37
| 138,141,023
| 201
| 60
|
BSD-3-Clause
| 2023-08-14T22:15:12
| 2018-06-21T08:20:41
|
Python
|
UTF-8
|
Python
| false
| false
| 19,289
|
py
|
task_utils.py
|
# NCC: Neural Code Comprehension
# https://github.com/spcl/ncc
# Copyright 2018 ETH Zurich
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the follo
# wing conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
# disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ==============================================================================
"""Helper variables and functions for NCC task training"""
import struct
import pickle
import os
import re
import wget
import zipfile
import rgx_utils as rgx
from inst2vec import inst2vec_preprocess as i2v_prep
from collections import defaultdict
from absl import flags
# Embedding and vocabulary file paths
flags.DEFINE_string('embeddings_file', 'published_results/emb.p',
'Path to the embeddings file')
flags.DEFINE_string('vocabulary_dir', 'published_results/vocabulary',
'Path to the vocabulary folder associated with those embeddings')
FLAGS = flags.FLAGS
########################################################################################################################
# Downloading data sets
########################################################################################################################
def download_and_unzip(url, dataset_name, data_folder):
"""
Download and unzip data set folder from url
:param url: from which to download
:param dataset_name: name of data set (for printing)
:param data_folder: folder in which to put the downloaded data
"""
print('Downloading', dataset_name, 'data set...')
if not os.path.exists(data_folder):
os.makedirs(data_folder)
data_zip = wget.download(url, out=data_folder)
print('\tunzipping...')
zip_ = zipfile.ZipFile(data_zip, 'r')
assert os.path.isdir(data_folder), data_folder
zip_.extractall(data_folder)
zip_.close()
print('\tdone')
########################################################################################################################
# Reading, writing and dumping files
########################################################################################################################
def get_embeddings():
"""
Load embedding matrix from file
:return:
"""
assert os.path.exists(FLAGS.embeddings_file), "File " + FLAGS.embeddings_file + " does not exist"
print('Loading pre-trained embeddings from', FLAGS.embeddings_file)
with open(FLAGS.embeddings_file, 'rb') as f:
embedding_matrix = pickle.load(f)
vocabulary_size, embedding_dimension = embedding_matrix.shape
print('\n--- Loaded embeddings with vocabulary size : {}\n'.format(vocabulary_size),
'\t with embedding dimension: {}'.format(embedding_dimension),
'\n\tfrom file:', FLAGS.embeddings_file)
return embedding_matrix
########################################################################################################################
# Utils (Preprocess files)
########################################################################################################################
def inline_struct_types_in_file(data, dic):
"""
Inline structure types in the whole file
:param data: list of strings representing the content of one file
:param dic: dictionary ["structure name", "corresponding literal structure"]
:return: modified data
"""
# Remove all "... = type {..." statements since we don't need them anymore
data = [stmt for stmt in data if not re.match('.* = type ', stmt)]
# Inline the named structures throughout the file
for i in range(len(data)):
possible_struct = re.findall('(' + rgx.struct_name + ')', data[i])
if len(possible_struct) > 0:
for s in possible_struct:
if s in dic and not re.match(s + r'\d* = ', data[i]):
# Replace them by their value in dictionary
data[i] = re.sub(re.escape(s) + rgx.struct_lookahead, dic[s], data[i])
return data
def inline_struct_types_txt(data, data_with_structure_def):
"""
Inline structure types so that the code has no more named structures but only explicit aggregate types
And construct a dictionary of these named structures
:param data: input data as a list of files where each file is a list of strings
:return: data: modified input data
dictio: list of dictionaries corresponding to source files,
where each dictionary has entries ["structure name", "corresponding literal structure"]
"""
print('\tConstructing dictionary of structures and inlining structures...')
dictio = defaultdict(list)
# Loop on all files in the dataset
for i in range(len(data)):
# Construct a dictionary ["structure name", "corresponding literal structure"]
data_with_structure_def[i], dict_temp = \
i2v_prep.construct_struct_types_dictionary_for_file(data_with_structure_def[i])
# If the dictionary is empty
if not dict_temp:
found_type = False
for l in data[i]:
if re.match(rgx.struct_name + ' = type (<?\{ .* \}|opaque|{})', l):
found_type = True
break
assert not found_type, "Structures' dictionary is empty for file containing type definitions: \n" + \
data[i][0] + '\n' + data[i][1] + '\n' + data[i] + '\n'
# Use the constructed dictionary to substitute named structures
# by their corresponding literal structure throughout the program
data[i] = inline_struct_types_in_file(data[i], dict_temp)
# Add the entries of the dictionary to the big dictionary
for k, v in dict_temp.items():
dictio[k].append(v)
return data, dictio
def abstract_statements_from_identifiers_txt(data):
"""
Simplify lines of code by stripping them from their identifiers,
unnamed values, etc. so that LLVM IR statements can be abstracted from them
:param data: input data as a list of files where each file is a list of strings
:return: modified input data
"""
data = remove_local_identifiers(data)
data = remove_global_identifiers(data)
data = remove_labels(data)
data = replace_unnamed_values(data)
data = remove_index_types(data)
return data
def remove_local_identifiers(data):
"""
Replace all local identifiers (%## expressions) by "<%ID>"
:param data: input data as a list of files where each file is a list of strings
:return: modified input data
"""
print('\tRemoving local identifiers ...')
for i in range(len(data)):
for j in range(len(data[i])):
data[i][j] = re.sub(rgx.local_id, "<%ID>", data[i][j])
return data
def remove_global_identifiers(data):
"""
Replace all local identifiers (@## expressions) by "<@ID>"
:param data: input data as a list of files where each file is a list of strings
:return: modified input data
"""
print('\tRemoving global identifiers ...')
for i in range(len(data)):
for j in range(len(data[i])):
data[i][j] = re.sub(rgx.global_id, "<@ID>", data[i][j])
return data
def remove_labels(data):
"""
Replace label declarations by token '<LABEL>'
:param data: input data as a list of files where each file is a list of strings
:return: modified input data
"""
print('\tRemoving labels ...')
for i in range(len(data)):
for j in range(len(data[i])):
if re.match(r'; <label>:\d+:?(\s+; preds = )?', data[i][j]):
data[i][j] = re.sub(r":\d+", ":<LABEL>", data[i][j])
data[i][j] = re.sub("<%ID>", "<LABEL>", data[i][j])
elif re.match(rgx.local_id_no_perc + r':(\s+; preds = )?', data[i][j]):
data[i][j] = re.sub(rgx.local_id_no_perc + ':', "<LABEL>:", data[i][j])
data[i][j] = re.sub("<%ID>", "<LABEL>", data[i][j])
if '; preds = ' in data[i][j]:
s = data[i][j].split(' ')
if s[-1][0] == ' ':
data[i][j] = s[0] + s[-1]
else:
data[i][j] = s[0] + ' ' + s[-1]
return data
def replace_unnamed_values(data):
"""
Replace unnamed_values by abstract token:
integers: <INT>
floating points: <FLOAT> (whether in decimal or hexadecimal notation)
string: <STRING>
:param data: input data as a list of files where each file is a list of strings
:return: modified input data
"""
print('\tRemoving immediate values ...')
for i in range(len(data)):
for j in range(len(data[i])):
data[i][j] = re.sub(r' ' + rgx.immediate_value_float_hexa, " <FLOAT>", data[i][j]) # hexadecimal notation
data[i][j] = re.sub(r' ' + rgx.immediate_value_float_sci, " <FLOAT>", data[i][j]) # decimal / scientific
if re.match("<%ID> = extractelement", data[i][j]) is None and \
re.match("<%ID> = extractvalue", data[i][j]) is None and \
re.match("<%ID> = insertelement", data[i][j]) is None and \
re.match("<%ID> = insertvalue", data[i][j]) is None:
data[i][j] = re.sub(r'(?<!align)(?<!\[) ' + rgx.immediate_value_int, " <INT>", data[i][j])
data[i][j] = re.sub(rgx.immediate_value_string, " <STRING>", data[i][j])
return data
def remove_index_types(data):
"""
Replace the index type in expressions containing "extractelement" or "insertelement" by token <TYP>
:param data: input data as a list of files where each file is a list of strings
:return: modified input data
"""
print('\tRemoving index types ...')
for i in range(len(data)):
for j in range(len(data[i])):
if re.match("<%ID> = extractelement", data[i][j]) is not None or \
re.match("<%ID> = insertelement", data[i][j]) is not None:
data[i][j] = re.sub(r'i\d+ ', '<TYP> ', data[i][j])
return data
########################################################################################################################
# Transform a folder of raw IR into trainable data to be used as input data in tasks
########################################################################################################################
def llvm_ir_to_trainable(folder_ir):
####################################################################################################################
# Setup
assert len(folder_ir) > 0, "Please specify a folder containing the raw LLVM IR"
assert os.path.exists(folder_ir), "Folder not found: " + folder_ir
folder_seq = re.sub('ir', 'seq', folder_ir)
if len(folder_seq) > 0:
print('Preparing to write LLVM IR index sequences to', folder_seq)
if not os.path.exists(folder_seq):
os.makedirs(folder_seq)
# Get sub-folders if there are any
listing = os.listdir(folder_ir + '/')
folders_ir = list()
folders_seq = list()
found_subfolder = False
for path in listing:
if os.path.isdir(os.path.join(folder_ir, path)):
folders_ir.append(os.path.join(folder_ir, path))
folders_seq.append(os.path.join(folder_seq, path))
found_subfolder = True
if found_subfolder:
print('Found', len(folders_ir), 'subfolders')
else:
print('No subfolders found in', folder_ir)
folders_ir = [folder_ir]
folders_seq = [folder_seq]
# Loop over sub-folders
summary = ''
num_folders = len(folders_ir)
for i, raw_ir_folder in enumerate(folders_ir):
l = folders_seq[i] + '/'
if not os.path.exists(l) or len(os.listdir(l)) == 0:
############################################################################################################
# Read files
# Read data from folder
print('\n--- Read data from folder ', raw_ir_folder)
raw_data, file_names = i2v_prep.read_data_files_from_folder(raw_ir_folder)
# Print data statistics and release memory
source_data_list, source_data = i2v_prep.data_statistics(raw_data, descr="reading data from source files")
del source_data_list
# Source code transformation: simple pre-processing
print('\n--- Pre-process code')
preprocessed_data, functions_declared_in_files = i2v_prep.preprocess(raw_data)
preprocessed_data_with_structure_def = raw_data
############################################################################################################
# Load vocabulary and cut off statements
# Vocabulary files
folder_vocabulary = FLAGS.vocabulary_dir
dictionary_pickle = os.path.join(folder_vocabulary, 'dic_pickle')
cutoff_stmts_pickle = os.path.join(folder_vocabulary, 'cutoff_stmts_pickle')
# Load dictionary and cutoff statements
print('\tLoading dictionary from file', dictionary_pickle)
with open(dictionary_pickle, 'rb') as f:
dictionary = pickle.load(f)
print('\tLoading cut off statements from file', cutoff_stmts_pickle)
with open(cutoff_stmts_pickle, 'rb') as f:
stmts_cut_off = pickle.load(f)
stmts_cut_off = set(stmts_cut_off)
############################################################################################################
# IR processing (inline structures, abstract statements)
# Source code transformation: inline structure types
print('\n--- Inline structure types')
processed_data, structures_dictionary = inline_struct_types_txt(preprocessed_data,
preprocessed_data_with_structure_def)
# Source code transformation: identifier processing (abstract statements)
print('\n--- Abstract statements from identifiers')
processed_data = abstract_statements_from_identifiers_txt(processed_data)
############################################################################################################
# Write indexed sequence of statements
seq_folder = folders_seq[i]
if not os.path.exists(seq_folder):
os.makedirs(seq_folder)
# Write indexed sequence of statements to file
unknown_counter_folder = list()
seq_length_folder = list()
file_counter = 0
for file in processed_data:
stmt_indexed = list() # Construct indexed sequence
unknown_counter = 0 # Reset unknown counter
for stmt in file:
# check whether this is a label, in which case we ignore it
if re.match(r'((?:<label>:)?(<LABEL>):|; <label>:<LABEL>)', stmt):
continue
# check whether this is an unknown
if stmt in stmts_cut_off:
stmt = rgx.unknown_token
unknown_counter += 1
# lookup and add to list
if stmt not in dictionary.keys():
print("NOT IN DICTIONARY:", stmt)
stmt = rgx.unknown_token
unknown_counter += 1
stmt_indexed.append(dictionary[stmt])
# Write to csv
file_name_csv = os.path.join(seq_folder, file_names[file_counter][:-3] + '_seq.csv')
file_name_rec = os.path.join(seq_folder, file_names[file_counter][:-3] + '_seq.rec')
with open(file_name_csv, 'w') as csv, open(file_name_rec, 'wb') as rec:
for ind in stmt_indexed:
csv.write(str(ind) + '\n')
rec.write(struct.pack('I', int(ind)))
print('\tPrinted data pairs to file', file_name_csv)
print('\tPrinted data pairs to file', file_name_rec)
print('\t#UNKS', unknown_counter)
# Increment counter
unknown_counter_folder.append(unknown_counter)
seq_length_folder.append(len(stmt_indexed))
file_counter += 1
# Print stats
out = '\n\nFolder: ' + raw_ir_folder + '(' + str(i) + '/' + str(num_folders) + ')'
out += '\n\nNumber of files processed: ' + str(len(seq_length_folder))
out += '\n--- Sequence length stats:'
out += '\nMin seq length : {}'.format(min(seq_length_folder))
out += '\nMax seq length : {}'.format(max(seq_length_folder))
out += '\nAvg seq length : {}'.format(sum(seq_length_folder) / len(seq_length_folder))
out += '\nTotal number stmts: {}'.format(sum(seq_length_folder))
out += '\n--- UNK count stats:'
out += '\nMin #UNKS in a sequence : {}'.format(min(unknown_counter_folder))
out += '\nMax #UNKS in a sequence : {}'.format(max(unknown_counter_folder))
out += '\nAvg #UNKS in a sequence : {}'.format(sum(unknown_counter_folder) / len(unknown_counter_folder))
out += '\nSum #UNKS in all sequence: {} / {}, {}%'.format(sum(unknown_counter_folder),
sum(seq_length_folder),
sum(unknown_counter_folder) * 100 / sum(
seq_length_folder))
print(out)
summary += '\n' + out
# When all is done, print a summary:
print(summary)
return folder_seq
|
b553724ff900e8847dcc5feaaf4456191ad6244d
|
1f399edf85d995443d01f66d77eca0723886d0ff
|
/misc/config_tools/scenario_config/validator.py
|
dabb4583e751b48aafa8cfbc064e1b5ffa06d757
|
[
"BSD-3-Clause"
] |
permissive
|
projectacrn/acrn-hypervisor
|
f9c5864d54929a5d2fa36b5e78c08f19b46b8f98
|
390740aa1b1e9d62c51f8e3afa0c29e07e43fa23
|
refs/heads/master
| 2023-08-18T05:07:01.310327
| 2023-08-11T07:49:36
| 2023-08-16T13:20:27
| 123,983,554
| 1,059
| 686
|
BSD-3-Clause
| 2023-09-14T09:51:10
| 2018-03-05T21:52:25
|
C
|
UTF-8
|
Python
| false
| false
| 12,055
|
py
|
validator.py
|
#!/usr/bin/env python3
#
# Copyright (C) 2022 Intel Corporation.
#
# SPDX-License-Identifier: BSD-3-Clause
#
import sys, os
import argparse
import logging
from copy import copy
from collections import namedtuple
import re
try:
import elementpath
import elementpath_overlay
from elementpath.xpath_context import XPathContext
import xmlschema
except ImportError:
logging.error("Python package `xmlschema` is not installed.\n" +
"The scenario XML file will NOT be validated against the schema, which may cause build-time or runtime errors.\n" +
"To enable the validation, install the python package by executing: pip3 install xmlschema.")
sys.exit(0)
from pipeline import PipelineObject, PipelineStage, PipelineEngine
from schema_slicer import SlicingSchemaByVMTypeStage
from default_populator import DefaultValuePopulatingStage
def existing_file_type(parser):
def aux(arg):
if not os.path.exists(arg):
parser.error(f"can't open {arg}: No such file or directory")
elif not os.path.isfile(arg):
parser.error(f"can't open {arg}: Is not a file")
else:
return arg
return aux
def log_level_type(parser):
def aux(arg):
arg = arg.lower()
if arg in ["critical", "error", "warning", "info", "debug"]:
return arg
else:
parser.error(f"{arg} is not a valid log level")
return aux
class ValidationError(dict):
logging_fns = {
"critical": logging.critical,
"error": logging.error,
"warning": logging.warning,
"info": logging.info,
"debug": logging.debug,
}
def __init__(self, paths, message, severity):
super().__init__(paths = paths, message = message, severity = severity)
def __str__(self):
return f"{', '.join(self['paths'])}: {self['message']}"
def log(self):
try:
self.logging_fns[self['severity']](self)
except KeyError:
logging.debug(self)
class ScenarioValidator:
def __init__(self, schema_etree, datachecks_etree):
"""Initialize the validator with preprocessed schemas in ElementTree."""
self.schema = xmlschema.XMLSchema11(schema_etree)
self.datachecks = xmlschema.XMLSchema11(datachecks_etree) if datachecks_etree else None
def check_syntax(self, scenario_etree):
errors = []
it = self.schema.iter_errors(scenario_etree)
for error in it:
# Syntactic errors are always critical.
e = ValidationError([error.path], error.reason, "critical")
e.log()
errors.append(e)
return errors
def check_semantics(self, board_etree, scenario_etree):
errors = []
if self.datachecks:
unified_node = copy(scenario_etree.getroot())
parent_map = {c : p for p in unified_node.iter() for c in p}
unified_node.extend(board_etree.getroot())
it = self.datachecks.iter_errors(unified_node)
for error in it:
e = self.format_error(unified_node, parent_map, error)
e.log()
errors.append(e)
return errors
@staticmethod
def format_paths(unified_node, parent_map, report_on, variables):
elems = elementpath.select(unified_node, report_on, variables = variables, parser = elementpath.XPath2Parser)
paths = []
for elem in elems:
path = []
while elem is not None:
path_segment = elem.tag
parent = parent_map.get(elem, None)
if parent is not None:
children = parent.findall(elem.tag)
if len(children) > 1:
path_segment += f"[{children.index(elem) + 1}]"
path.insert(0, path_segment)
elem = parent
paths.append(f"/{'/'.join(path)}")
return paths
@staticmethod
def get_counter_example(error):
assertion = error.validator
if not isinstance(assertion, xmlschema.validators.assertions.XsdAssert):
return {}
elem = error.obj
context = XPathContext(elem, variables={'value': None})
context.counter_example = {}
result = assertion.token.evaluate(context)
if result == False:
return context.counter_example
else:
return {}
@staticmethod
def format_error(unified_node, parent_map, error):
def format_node(n):
if isinstance(n, str):
return n
elif isinstance(n, (int, float)):
return str(n)
elif isinstance(n, object) and n.__class__.__name__.endswith("Element"):
return n.text
else:
return str(n)
anno = error.validator.annotation
counter_example = ScenarioValidator.get_counter_example(error)
variables = {k.obj.source.strip("$"): v for k,v in counter_example.items()}
paths = ScenarioValidator.format_paths(unified_node, parent_map, anno.elem.get("{https://projectacrn.org}report-on"), variables)
description = anno.elem.find("{http://www.w3.org/2001/XMLSchema}documentation").text
severity = anno.elem.get("{https://projectacrn.org}severity")
expr_regex = re.compile("{[^{}]*}")
exprs = set(expr_regex.findall(description))
for expr in exprs:
result = elementpath.select(unified_node, expr.strip("{}"), variables = variables, parser = elementpath.XPath2Parser)
if isinstance(result, list):
if len(result) == 1:
value = format_node(result[0])
elif len(result) > 1:
s = ', '.join(map(format_node, result))
value = f"[{s}]"
else:
value = "{unknown}"
else:
value = str(result)
description = description.replace(expr, value)
return ValidationError(paths, description, severity)
class ValidatorConstructionStage(PipelineStage):
# The schema etree may still useful for schema-based transformation. Do not consume it.
uses = {"schema_etree"}
consumes = {"datachecks_etree"}
provides = {"validator"}
def run(self, obj):
validator = ScenarioValidator(obj.get("schema_etree"), obj.get("datachecks_etree"))
obj.set("validator", validator)
class ValidatorConstructionByFileStage(PipelineStage):
uses = {"schema_path", "datachecks_path"}
provides = {"validator"}
def run(self, obj):
validator = ScenarioValidator(obj.get("schema_path"), obj.get("datachecks_path"))
obj.set("validator", validator)
class SyntacticValidationStage(PipelineStage):
provides = {"syntactic_errors"}
def __init__(self, etree_tag = "scenario"):
self.etree_tag = f"{etree_tag}_etree"
self.uses = {"validator", self.etree_tag}
def run(self, obj):
errors = obj.get("validator").check_syntax(obj.get(self.etree_tag))
obj.set("syntactic_errors", errors)
class SemanticValidationStage(PipelineStage):
uses = {"validator", "board_etree", "scenario_etree"}
provides = {"semantic_errors"}
def run(self, obj):
errors = obj.get("validator").check_semantics(obj.get("board_etree"), obj.get("scenario_etree"))
obj.set("semantic_errors", errors)
class ReportValidationResultStage(PipelineStage):
consumes = {"board_etree", "scenario_etree", "syntactic_errors", "semantic_errors"}
provides = {"nr_all_errors"}
def run(self, obj):
board_name = obj.get("board_etree").getroot().get("board")
scenario_name = obj.get("scenario_etree").getroot().get("scenario")
nr_critical = len(obj.get("syntactic_errors"))
nr_error = len(list(filter(lambda e: e["severity"] == "error", obj.get("semantic_errors"))))
nr_warning = len(list(filter(lambda e: e["severity"] == "warning", obj.get("semantic_errors"))))
if nr_critical > 0 or nr_error > 0:
logging.error(f"Board {board_name} and scenario {scenario_name} are inconsistent: {nr_critical} syntax errors, {nr_error} data errors, {nr_warning} warnings.")
elif nr_warning > 0:
logging.warning(f"Board {board_name} and scenario {scenario_name} are potentially inconsistent: {nr_warning} warnings.")
else:
logging.info(f"Board {board_name} and scenario {scenario_name} are valid and consistent.")
obj.set("nr_all_errors", nr_critical + nr_error + nr_warning)
def validate_one(validation_pipeline, pipeline_obj, board_xml, scenario_xml):
pipeline_obj.set("board_path", board_xml)
pipeline_obj.set("scenario_path", scenario_xml)
validation_pipeline.run(pipeline_obj)
return pipeline_obj.consume("nr_all_errors")
def validate_board(validation_pipeline, pipeline_obj, board_xml):
board_dir = os.path.dirname(board_xml)
nr_all_errors = 0
for f in os.listdir(board_dir):
if not f.endswith(".xml"):
continue
if f == os.path.basename(board_xml) or "launch" in f:
continue
nr_all_errors += validate_one(validation_pipeline, pipeline_obj, board_xml, os.path.join(board_dir, f))
return nr_all_errors
def validate_all(validation_pipeline, pipeline_obj, data_dir):
nr_all_errors = 0
for f in os.listdir(data_dir):
board_xml = os.path.join(data_dir, f, f"{f}.xml")
if os.path.isfile(board_xml):
nr_all_errors += validate_board(validation_pipeline, pipeline_obj, board_xml)
else:
logging.warning(f"Cannot find a board XML under {os.path.join(data_dir, f)}")
return nr_all_errors
def main(args):
from lxml_loader import LXMLLoadStage
validator_construction_pipeline = PipelineEngine(["schema_path", "datachecks_path"])
validator_construction_pipeline.add_stages([
LXMLLoadStage("schema"),
LXMLLoadStage("datachecks"),
SlicingSchemaByVMTypeStage(),
ValidatorConstructionStage(),
])
validation_pipeline = PipelineEngine(["board_path", "scenario_path", "schema_etree", "validator"])
validation_pipeline.add_stages([
LXMLLoadStage("board"),
LXMLLoadStage("scenario"),
DefaultValuePopulatingStage(),
SyntacticValidationStage(),
SemanticValidationStage(),
ReportValidationResultStage(),
])
obj = PipelineObject(schema_path = args.schema, datachecks_path = args.datachecks)
validator_construction_pipeline.run(obj)
if args.board and args.scenario:
nr_all_errors = validate_one(validation_pipeline, obj, args.board, args.scenario)
elif args.board:
nr_all_errors = validate_board(validation_pipeline, obj, args.board)
else:
nr_all_errors = validate_all(validation_pipeline, obj, os.path.join(config_tools_dir, "data"))
sys.exit(1 if nr_all_errors > 0 else 0)
if __name__ == "__main__":
config_tools_dir = os.path.join(os.path.dirname(__file__), "..")
schema_dir = os.path.join(config_tools_dir, "schema")
parser = argparse.ArgumentParser()
parser.add_argument("board", nargs="?", type=existing_file_type(parser), help="the board XML file to be validated")
parser.add_argument("scenario", nargs="?", type=existing_file_type(parser), help="the scenario XML file to be validated")
parser.add_argument("--loglevel", default="warning", type=log_level_type(parser), help="choose log level, e.g. debug, info, warning or error")
parser.add_argument("--schema", default=os.path.join(schema_dir, "config.xsd"), help="the XML schema that defines the syntax of scenario XMLs")
parser.add_argument("--datachecks", default=os.path.join(schema_dir, "datachecks.xsd"), help="the XML schema that defines the semantic rules against board and scenario data")
args = parser.parse_args()
logging.basicConfig(level=args.loglevel.upper())
main(args)
|
05a3952d5c5a71e5c9ebbaa05a893f5a5553c07c
|
6729b201b2bd3cc6f3994d781826bd2c45dd38ab
|
/benchmarks/expand4_sage.py
|
961f5d3b5dc2ecb088980cf6a04627b158cda98c
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"LGPL-3.0-or-later",
"LGPL-2.0-or-later",
"Apache-2.0"
] |
permissive
|
symengine/symengine.py
|
b700f1d8cc261c1f0bc7ca7802b32cd59fa9e2bb
|
8d5d63c7842ca92f7f3c5436f9825ace984e4af0
|
refs/heads/master
| 2023-08-19T00:27:14.815140
| 2023-08-16T15:41:32
| 2023-08-16T15:41:32
| 41,312,700
| 156
| 64
|
MIT
| 2023-08-16T15:41:34
| 2015-08-24T15:56:54
|
Cython
|
UTF-8
|
Python
| false
| false
| 283
|
py
|
expand4_sage.py
|
print("import...")
from timeit import default_timer as clock
from sage.all import var
var("x")
e = 1
print("constructing expression...")
for i in range(1, 351):
e *= (i+x)**3
print("running benchmark...")
t1 = clock()
f = e.expand()
t2 = clock()
print("Total time:", t2-t1, "s")
|
a078760be79de7d51eccab6ce2f55a16cddd364c
|
09a7fa80d420634848b5e6af7b59353afd8c726b
|
/src/main/resources/resource/Esp8266/Esp8266.py
|
c6c474f858c2012666cf0b9e41adcec2f46a42ec
|
[
"Apache-2.0",
"CC-BY-2.5"
] |
permissive
|
MyRobotLab/myrobotlab
|
cf789956d9f97a98eead44faf7a8b61f70348dc3
|
0ecdc681b4928ab65649404779c095d352dd96b1
|
refs/heads/develop
| 2023-09-04T10:57:19.041683
| 2023-08-30T14:04:44
| 2023-08-30T14:04:44
| 18,051,302
| 213
| 114
|
Apache-2.0
| 2023-09-07T14:14:58
| 2014-03-24T03:59:27
|
Java
|
UTF-8
|
Python
| false
| false
| 64
|
py
|
Esp8266.py
|
# start the service
esp8266 = runtime.start("esp8266","Esp8266")
|
efea785d7c01c6fcfaa6710ba0f811cffaf85620
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/testData/pyi/resolve/canonicalName/pkg/__init__.pyi
|
449a68a9b944e0fa4f45f4b0f99dfb29e557dee6
|
[
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 64
|
pyi
|
__init__.pyi
|
from .mod import Exported as Exported
from .mod import Internal
|
58600bdc7b37d038d99e1c1a964beb4765b473c2
|
85ba0a1df162642fe5ca7e14e06f60e5ae3f6d00
|
/searching/binary-search-recursive-pointers.py
|
ae0f5514c1d2d45560101b218477666e757fbf98
|
[
"MIT"
] |
permissive
|
ivanmmarkovic/Problem-Solving-with-Algorithms-and-Data-Structures-using-Python
|
ad478564167b7f3f8eed607cb23bd0190f4d81f8
|
1a6e8fd5b93c9fe87231bef57bd92b81a40ae38d
|
refs/heads/master
| 2023-02-08T10:49:18.390691
| 2023-01-23T22:22:58
| 2023-01-23T22:22:58
| 157,221,493
| 138
| 42
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 763
|
py
|
binary-search-recursive-pointers.py
|
def binary_search(nums: list, target: int) -> bool:
def binary_search_helper(numbers: list, element, start: int, end: int) -> bool:
if start > end:
return False
else:
midpoint: int = start + (end - start) // 2
if nums[midpoint] == element:
return True
else:
if nums[midpoint] > element:
return binary_search_helper(numbers, element, start, midpoint - 1)
else:
return binary_search_helper(numbers, element, midpoint + 1, end)
return binary_search_helper(nums, target, 0, len(nums) - 1)
testlist = [0, 1, 2, 8, 13, 17, 19, 32, 42]
print(binary_search(testlist, 3))
print(binary_search(testlist, 13))
|
bd279b164b420356afad04db2aa5a76ec3d519ab
|
8d44e796eaf0c8e11bbc2a27ef093e97a25b6f4a
|
/haystack/modeling/data_handler/data_silo.py
|
c9db096ade2e50fbf0b5e571304a568f185a2cc2
|
[
"Apache-2.0"
] |
permissive
|
deepset-ai/haystack
|
caa5287051d1771395ea624b58097000825bad81
|
5f1256ac7e5734c2ea481e72cb7e02c34baf8c43
|
refs/heads/main
| 2023-09-01T02:41:23.490526
| 2023-08-31T15:33:12
| 2023-08-31T15:33:12
| 221,654,678
| 10,599
| 1,558
|
Apache-2.0
| 2023-09-14T17:09:42
| 2019-11-14T09:05:28
|
Python
|
UTF-8
|
Python
| false
| false
| 38,558
|
py
|
data_silo.py
|
from typing import TYPE_CHECKING, Optional, List, Tuple, Dict, Union
import hashlib
import json
import logging
import random
from itertools import groupby
from pathlib import Path
import numpy as np
from tqdm import tqdm
import torch
from torch.utils.data import ConcatDataset, Dataset
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data.sampler import RandomSampler, SequentialSampler
from haystack.modeling.data_handler.dataloader import NamedDataLoader
from haystack.modeling.data_handler.processor import Processor, SquadProcessor
from haystack.utils.experiment_tracking import Tracker as tracker
from haystack.modeling.visual import TRACTOR_SMALL
if TYPE_CHECKING:
from haystack.nodes import FARMReader
logger = logging.getLogger(__name__)
class DataSilo:
"""Generates and stores PyTorch DataLoader objects for the train, dev and test datasets.
Relies upon functionality in the processor to do the conversion of the data. Will also
calculate and display some statistics.
"""
def __init__(
self,
processor: Processor,
batch_size: int,
eval_batch_size: Optional[int] = None,
distributed: bool = False,
automatic_loading: bool = True,
max_multiprocessing_chunksize: int = 512,
max_processes: int = 128,
multiprocessing_strategy: Optional[str] = None,
caching: bool = False,
cache_path: Path = Path("cache/data_silo"),
):
"""
:param processor: A dataset specific Processor object which will turn input (file or dict) into a Pytorch Dataset.
:param batch_size: The size of batch that should be returned by the DataLoader for the training set.
:param eval_batch_size: The size of batch that should be returned by the DataLoaders for the dev and test set.
:param distributed: Set to True if you are running in a distributed evn, e.g. using DistributedDataParallel.
The DataSilo will init the DataLoader with a DistributedSampler() to distribute batches.
:param automatic_loading: Set to False, if you don't want to automatically load data at initialization.
:param max_multiprocessing_chunksize: max possible value for chunksize as calculated by `calc_chunksize()`
in `haystack.basics.utils`. For certain cases like lm_finetuning, a smaller value can be set, as the default chunksize
values are rather large that might cause memory issues.
:param max_processes: the maximum number of processes to spawn in the multiprocessing.Pool used in DataSilo.
It can be set to 1 to disable the use of multiprocessing or make debugging easier.
.. deprecated:: 1.9
Multiprocessing has been removed in 1.9. This parameter will be ignored.
:multiprocessing_strategy: Set the multiprocessing sharing strategy, this can be one of file_descriptor/file_system depending on your OS.
If your system has low limits for the number of open file descriptors, and you can’t raise them,
you should use the file_system strategy.
.. deprecated:: 1.9
Multiprocessing has been removed in 1.9. This parameter will be ignored.
:param caching: save the processed datasets on disk to save time/compute if the same train data is used to run
multiple experiments. Each cache has a checksum based on the train_filename of the Processor
and the batch size.
:param cache_path: root dir for storing the datasets' cache.
"""
self.distributed = distributed
self.processor = processor
self.data = {} # type: Dict
self.batch_size = batch_size
self.class_weights = None
self.max_processes = max_processes
self.multiprocessing_strategy = multiprocessing_strategy
self.max_multiprocessing_chunksize = max_multiprocessing_chunksize
self.caching = caching
self.cache_path = cache_path
self.tensor_names = None
if eval_batch_size is None:
self.eval_batch_size = batch_size
else:
self.eval_batch_size = eval_batch_size
if len(self.processor.tasks) == 0:
raise Exception(
"No task initialized. Try initializing the processor with a metric and a label list. "
"Alternatively you can add a task using Processor.add_task()"
)
loaded_from_cache = False
if self.caching: # Check if DataSets are present in cache
checksum = self._get_checksum()
dataset_path = self.cache_path / checksum
if dataset_path.exists():
self._load_dataset_from_cache(dataset_path)
loaded_from_cache = True
if not loaded_from_cache and automatic_loading:
# In most cases we want to load all data automatically, but in some cases we rather want to do this
# later or load from dicts instead of file
self._load_data()
def _get_dataset(self, filename: Optional[Union[str, Path]], dicts: Optional[List[Dict]] = None):
if not filename and not dicts:
raise ValueError("You must either supply `filename` or `dicts`")
# loading dicts from file (default)
if dicts is None:
dicts = list(self.processor.file_to_dicts(filename)) # type: ignore
# shuffle list of dicts here if we later want to have a random dev set splitted from train set
if str(self.processor.train_filename) in str(filename):
if not self.processor.dev_filename:
if self.processor.dev_split > 0.0:
random.shuffle(dicts)
num_dicts = len(dicts)
datasets = []
problematic_ids_all = set()
batch_size = self.max_multiprocessing_chunksize
for i in tqdm(range(0, num_dicts, batch_size), desc="Preprocessing dataset", unit=" Dicts"):
processing_batch = dicts[i : i + batch_size]
dataset, tensor_names, problematic_sample_ids = self.processor.dataset_from_dicts(
dicts=processing_batch, indices=list(range(len(processing_batch))) # TODO remove indices
)
datasets.append(dataset)
problematic_ids_all.update(problematic_sample_ids)
self.processor.log_problematic(problematic_ids_all)
datasets = [d for d in datasets if d]
concat_datasets = ConcatDataset(datasets) # type: Dataset
return concat_datasets, tensor_names
def _load_data(
self,
train_dicts: Optional[List[Dict]] = None,
dev_dicts: Optional[List[Dict]] = None,
test_dicts: Optional[List[Dict]] = None,
):
"""
Loading the train, dev and test datasets either from files (default) or from supplied dicts.
The processor is called to handle the full conversion from "raw data" to a Pytorch Dataset.
The resulting datasets are loaded into DataSilo.data
:param train_dicts: (Optional) dicts containing examples for training.
:param dev_dicts: (Optional) dicts containing examples for dev.
:param test_dicts: (Optional) dicts containing examples for test.
:return: None
"""
logger.info("\nLoading data into the data silo ... %s", TRACTOR_SMALL)
# train data
logger.info("LOADING TRAIN DATA")
logger.info("==================")
if train_dicts:
# either from supplied dicts
logger.info("Loading train set from supplied dicts ")
self.data["train"], self.tensor_names = self._get_dataset(filename=None, dicts=train_dicts)
elif self.processor.train_filename:
# or from a file (default)
train_file = self.processor.data_dir / self.processor.train_filename
logger.info("Loading train set from: %s ", train_file)
self.data["train"], self.tensor_names = self._get_dataset(train_file)
else:
logger.info("No train set is being loaded")
self.data["train"] = None
# dev data
logger.info("")
logger.info("LOADING DEV DATA")
logger.info("=================")
if dev_dicts:
# either from supplied dicts
logger.info("Loading train set from supplied dicts ")
self.data["dev"], self.tensor_names = self._get_dataset(filename=None, dicts=dev_dicts)
elif self.processor.dev_filename:
# or from file (default)
dev_file = self.processor.data_dir / self.processor.dev_filename
logger.info("Loading dev set from: %s", dev_file)
self.data["dev"], _ = self._get_dataset(dev_file)
elif self.processor.dev_split > 0.0:
# or split it apart from train set
logger.info("Loading dev set as a slice of train set")
self._create_dev_from_train()
else:
logger.info("No dev set is being loaded")
self.data["dev"] = None
logger.info("")
logger.info("LOADING TEST DATA")
logger.info("=================")
# test data
if test_dicts:
# either from supplied dicts
logger.info("Loading train set from supplied dicts ")
self.data["test"], self.tensor_names = self._get_dataset(filename=None, dicts=test_dicts)
elif self.processor.test_filename:
# or from file (default)
test_file = self.processor.data_dir / self.processor.test_filename
logger.info("Loading test set from: %s", test_file)
if self.tensor_names:
self.data["test"], _ = self._get_dataset(test_file)
else:
self.data["test"], self.tensor_names = self._get_dataset(test_file)
else:
logger.info("No test set is being loaded")
self.data["test"] = None
if self.caching:
self._save_dataset_to_cache()
# derive stats and meta data
self._calculate_statistics()
# self.calculate_class_weights()
self._initialize_data_loaders()
def _load_dataset_from_cache(self, cache_dir: Path):
"""
Load serialized dataset from a cache.
"""
logger.info("Loading datasets from cache at %s", cache_dir)
self.data["train"] = torch.load(cache_dir / "train_dataset")
dev_dataset_path = cache_dir / "dev_dataset"
if dev_dataset_path.exists():
self.data["dev"] = torch.load(dev_dataset_path)
else:
self.data["dev"] = None
test_dataset_path = cache_dir / "test_dataset"
if test_dataset_path.exists():
self.data["test"] = torch.load(test_dataset_path)
else:
self.data["test"] = None
self.tensor_names = torch.load(cache_dir / "tensor_names")
# derive stats and meta data
self._calculate_statistics()
# self.calculate_class_weights()
self._initialize_data_loaders()
def _get_checksum(self):
"""
Get checksum based on a dict to ensure validity of cached DataSilo
"""
# keys in the dict identifies uniqueness for a given DataSilo.
payload_dict = {
"train_filename": str(Path(self.processor.train_filename).absolute()),
"data_dir": str(self.processor.data_dir.absolute()),
"max_seq_len": self.processor.max_seq_len,
"dev_split": self.processor.dev_split,
"tasks": self.processor.tasks,
}
checksum = get_dict_checksum(payload_dict)
return checksum
def _save_dataset_to_cache(self):
"""
Serialize and save dataset to a cache.
"""
checksum = self._get_checksum()
cache_dir = self.cache_path / checksum
cache_dir.mkdir(parents=True, exist_ok=True)
torch.save(self.data["train"], cache_dir / "train_dataset")
if self.data["dev"]:
torch.save(self.data["dev"], cache_dir / "dev_dataset")
if self.data["test"]:
torch.save(self.data["test"], cache_dir / "test_dataset")
torch.save(self.tensor_names, cache_dir / "tensor_names")
logger.info("Cached the datasets at %s", cache_dir)
def _initialize_data_loaders(self):
"""
Initializing train, dev and test data loaders for the already loaded datasets.
"""
if self.data["train"] is not None:
if self.distributed:
sampler_train = DistributedSampler(self.data["train"])
else:
sampler_train = RandomSampler(self.data["train"])
data_loader_train = NamedDataLoader(
dataset=self.data["train"],
sampler=sampler_train,
batch_size=self.batch_size,
tensor_names=self.tensor_names,
)
else:
data_loader_train = None
if self.data["dev"] is not None:
data_loader_dev = NamedDataLoader(
dataset=self.data["dev"],
sampler=SequentialSampler(self.data["dev"]),
batch_size=self.eval_batch_size,
tensor_names=self.tensor_names,
)
else:
data_loader_dev = None
if self.data["test"] is not None:
data_loader_test = NamedDataLoader(
dataset=self.data["test"],
sampler=SequentialSampler(self.data["test"]),
batch_size=self.eval_batch_size,
tensor_names=self.tensor_names,
)
else:
data_loader_test = None
self.loaders = {"train": data_loader_train, "dev": data_loader_dev, "test": data_loader_test}
def _create_dev_from_train(self):
"""
Split a dev set apart from the train dataset.
"""
n_dev = int(self.processor.dev_split * len(self.data["train"]))
n_train = len(self.data["train"]) - n_dev
train_dataset, dev_dataset = self.random_split_ConcatDataset(self.data["train"], lengths=[n_train, n_dev])
self.data["train"] = train_dataset
if len(dev_dataset) > 0:
self.data["dev"] = dev_dataset
else:
logger.warning("No dev set created. Please adjust the dev_split parameter.")
logger.info(
"Took %s samples out of train set to create dev set (dev split is roughly %s)",
len(dev_dataset),
self.processor.dev_split,
)
def random_split_ConcatDataset(self, ds: ConcatDataset, lengths: List[int]):
"""
Roughly split a Concatdataset into non-overlapping new datasets of given lengths.
Samples inside Concatdataset should already be shuffled.
:param ds: Dataset to be split.
:param lengths: Lengths of splits to be produced.
"""
if sum(lengths) != len(ds):
raise ValueError("Sum of input lengths does not equal the length of the input dataset!")
try:
idx_dataset = np.where(np.array(ds.cumulative_sizes) > lengths[0])[0][0]
except IndexError:
raise Exception(
"All dataset chunks are being assigned to train set leaving no samples for dev set. "
"Either consider increasing dev_split or setting it to 0.0\n"
f"Cumulative chunk sizes: {ds.cumulative_sizes}\n"
f"train/dev split: {lengths}"
)
assert idx_dataset >= 1, (
"Dev_split ratio is too large, there is no data in train set. "
f"Please lower dev_split = {self.processor.dev_split}"
)
train = ConcatDataset(ds.datasets[:idx_dataset]) # type: Dataset
test = ConcatDataset(ds.datasets[idx_dataset:]) # type: Dataset
return train, test
def _calculate_statistics(self):
"""Calculate and log simple summary statistics of the datasets"""
logger.info("")
logger.info("DATASETS SUMMARY")
logger.info("================")
self.counts = {}
clipped = -1
ave_len = -1
if self.data["train"]:
self.counts["train"] = len(self.data["train"])
if "input_ids" in self.tensor_names:
clipped, ave_len, seq_lens, max_seq_len = self._calc_length_stats_single_encoder()
elif "query_input_ids" in self.tensor_names and "passage_input_ids" in self.tensor_names:
clipped, ave_len, seq_lens, max_seq_len = self._calc_length_stats_biencoder()
else:
logger.warning(
"Could not compute length statistics because 'input_ids' or 'query_input_ids' and 'passage_input_ids' are missing."
)
clipped = -1
ave_len = -1
else:
self.counts["train"] = 0
if self.data["dev"]:
self.counts["dev"] = len(self.data["dev"])
else:
self.counts["dev"] = 0
if self.data["test"]:
self.counts["test"] = len(self.data["test"])
else:
self.counts["test"] = 0
logger.info("Examples in train: %s", self.counts["train"])
logger.info("Examples in dev : %s", self.counts["dev"])
logger.info("Examples in test : %s", self.counts["test"])
logger.info("Total examples : %s", self.counts["train"] + self.counts["dev"] + self.counts["test"])
logger.info("")
if self.data["train"]:
# SquadProcessor does not clip sequences, but splits them into multiple samples
if "input_ids" in self.tensor_names and not isinstance(self.processor, SquadProcessor):
logger.info("Longest sequence length observed after clipping: %s", max(seq_lens))
logger.info("Average sequence length after clipping: %s", ave_len)
logger.info("Proportion clipped: %s", clipped)
if clipped > 0.5:
logger.info(
"[Haystack Tip] %s%% of your samples got cut down to %s tokens. "
"Consider increasing max_seq_len "
"(the maximum value allowed with the current model is max_seq_len=%s, "
"if this is not enough consider splitting the document in smaller units or changing the model). "
"This will lead to higher memory consumption but is likely to improve your model performance",
round(clipped * 100, 1),
max_seq_len,
self.processor.tokenizer.model_max_length,
)
elif "query_input_ids" in self.tensor_names and "passage_input_ids" in self.tensor_names:
logger.info(
"Longest query length observed after clipping: %s - for max_query_len: %s",
max(seq_lens[0]),
max_seq_len[0],
)
logger.info("Average query length after clipping: %s", ave_len[0])
logger.info("Proportion queries clipped: %s", clipped[0])
logger.info("")
logger.info(
"Longest passage length observed after clipping: %s - for max_passage_len: %s",
max(seq_lens[1]),
max_seq_len[1],
)
logger.info("Average passage length after clipping: %s", ave_len[1])
logger.info("Proportion passages clipped: %s", clipped[1])
tracker.track_params(
{
"n_samples_train": self.counts["train"],
"n_samples_dev": self.counts["dev"],
"n_samples_test": self.counts["test"],
"batch_size": self.batch_size,
"ave_seq_len": ave_len,
"clipped": clipped,
}
)
def _calc_length_stats_single_encoder(self):
seq_lens = []
for dataset in self.data["train"].datasets:
train_input_numpy = dataset[:][self.tensor_names.index("input_ids")].numpy()
seq_lens.extend(np.sum(train_input_numpy != self.processor.tokenizer.pad_token_id, axis=1))
max_seq_len = dataset[:][self.tensor_names.index("input_ids")].shape[1]
clipped = np.mean(np.array(seq_lens) == max_seq_len) if seq_lens else 0
ave_len = np.mean(seq_lens) if seq_lens else 0
return clipped, ave_len, seq_lens, max_seq_len
def _calc_length_stats_biencoder(self):
seq_lens = [[], []]
for dataset in self.data["train"].datasets:
query_input_numpy = dataset[:][self.tensor_names.index("query_input_ids")].numpy()
num_passages = dataset[:][self.tensor_names.index("passage_input_ids")].shape[1]
bs = dataset[:][self.tensor_names.index("passage_input_ids")].shape[0]
passage_input_numpy = (
dataset[:][self.tensor_names.index("passage_input_ids")].numpy().reshape((bs, -1), order="C")
)
qlen = np.sum(query_input_numpy != self.processor.query_tokenizer.pad_token_id, axis=1)
plen = np.sum(passage_input_numpy != self.processor.passage_tokenizer.pad_token_id, axis=1) / num_passages
seq_lens[0].extend(qlen)
seq_lens[1].extend(plen)
q_max_seq_len = dataset[:][self.tensor_names.index("query_input_ids")].shape[1]
p_max_seq_len = dataset[:][self.tensor_names.index("passage_input_ids")].shape[2]
clipped_q = np.mean(np.array(seq_lens[0]) == q_max_seq_len) if seq_lens[0] else 0
ave_len_q = np.mean(seq_lens[0]) if seq_lens[0] else 0
clipped_p = np.mean(np.array(seq_lens[1]) == p_max_seq_len) if seq_lens[1] else 0
ave_len_p = np.mean(seq_lens[1]) if seq_lens[1] else 0
clipped = [clipped_q, clipped_p]
ave_len = [ave_len_q, ave_len_p]
max_seq_len = [q_max_seq_len, p_max_seq_len]
return clipped, ave_len, seq_lens, max_seq_len
def get_data_loader(self, dataset_name: str):
"""
Returns data loader for specified split of dataset.
:param dataset_name: Split of dataset. Either 'train' or 'dev' or 'test'.
"""
return self.loaders[dataset_name]
def n_samples(self, dataset_name: str):
"""
Returns the number of samples in a given dataset.
:param dataset_name: Split of dataset. Choose from 'train', 'dev' or 'test'.
"""
return self.counts[dataset_name]
class DataSiloForCrossVal:
"""
Perform cross validation or nested cross validation.
For performing cross validation or nested cross validation, we really want to combine all the
instances from all the sets or just some of the sets, then create a different data silo
instance for each fold or nested fold.
Calling DataSiloForCrossVal.make() creates a list of DataSiloForCrossVal instances - one for each fold.
"""
def __init__(
self,
origsilo: DataSilo,
trainset: Union[List, Dataset],
devset: Union[List, Dataset],
testset: Union[List, Dataset],
):
self.tensor_names = origsilo.tensor_names
self.data = {"train": trainset, "dev": devset, "test": testset}
self.processor = origsilo.processor
self.batch_size = origsilo.batch_size
# should not be necessary, xval makes no sense with huge data
# sampler_train = DistributedSampler(self.data["train"])
sampler_train = RandomSampler(trainset) # type: ignore [arg-type]
self.data_loader_train = NamedDataLoader(
dataset=trainset, sampler=sampler_train, batch_size=self.batch_size, tensor_names=self.tensor_names # type: ignore [arg-type]
)
self.data_loader_dev = NamedDataLoader(
dataset=devset, # type: ignore [arg-type]
sampler=SequentialSampler(devset), # type: ignore [arg-type]
batch_size=self.batch_size,
tensor_names=self.tensor_names,
)
self.data_loader_test = NamedDataLoader(
dataset=testset, # type: ignore [arg-type]
sampler=SequentialSampler(testset), # type: ignore [arg-type]
batch_size=self.batch_size,
tensor_names=self.tensor_names,
)
self.loaders = {"train": self.data_loader_train, "dev": self.data_loader_dev, "test": self.data_loader_test}
def get_data_loader(self, which):
return self.loaders[which]
@classmethod
def make(
cls,
datasilo: DataSilo,
sets: Optional[List[str]] = None,
n_splits: int = 5,
shuffle: bool = True,
random_state: Optional[int] = None,
stratified: bool = True,
n_neg_answers_per_question: int = 1,
n_inner_splits: Optional[int] = None,
):
"""
Create number of folds data-silo-like objects which can be used for training from the
original data silo passed on.
:param datasilo: The data silo that contains the original data.
:param sets: Which sets to use to create the xval folds (strings). By default, "train", "dev", and "test" are used.
:param n_splits: number of folds to create
:param shuffle: shuffle each class' samples before splitting
:param random_state: random state for shuffling
:param stratified: If class stratification should be done.
It is never done with question answering.
:param n_neg_answers_per_question: number of negative answers per question to include for training
"""
if sets is None:
sets = ["train", "dev", "test"]
if "question_answering" in datasilo.processor.tasks and n_inner_splits is None: # type: ignore
return cls._make_question_answering(
datasilo, sets, n_splits, shuffle, random_state, n_neg_answers_per_question
)
else:
raise RuntimeError("Cross validation can not be done under these conditions!")
@classmethod
def _make_question_answering(
cls,
datasilo: DataSilo,
sets: Optional[List[str]] = None,
n_splits: int = 5,
shuffle: bool = True,
random_state: Optional[int] = None,
n_neg_answers_per_question: int = 1,
):
"""
Create number of folds data-silo-like objects which can be used for training from the
original data silo passed on. This function takes into account the characteristics of the
data for question-answering-
:param datasilo: The data silo that contains the original data.
:param sets: Which sets to use to create the xval folds (strings). By default, "train", "dev", and "test" are used.
:param n_splits: Number of folds to create.
:param shuffle: Shuffle each class' samples before splitting.
:param random_state: Random state for shuffling.
:param n_neg_answers_per_question: Number of negative answers per question to include for training.
"""
if sets is None:
sets = ["train", "dev", "test"]
assert "id" in datasilo.tensor_names, f"Expected tensor 'id' in tensor names, found {datasilo.tensor_names}" # type: ignore
assert "labels" in datasilo.tensor_names, f"Expected tensor 'labels' in tensor names, found {datasilo.tensor_names}" # type: ignore
id_index = datasilo.tensor_names.index("id") # type: ignore
label_index = datasilo.tensor_names.index("labels") # type:ignore
sets_to_concat = []
for setname in sets:
if datasilo.data[setname]:
sets_to_concat.extend(datasilo.data[setname])
all_data = ConcatDataset(sets_to_concat) # type: Dataset
documents = []
keyfunc = lambda x: x[id_index][0] # pylint: disable=unnecessary-lambda-assignment
all_data = sorted(all_data.datasets, key=keyfunc) # type: ignore
for _, document in groupby(all_data, key=keyfunc): # type: ignore
documents.append(list(document))
xval_split = cls._split_for_qa(
documents=documents, id_index=id_index, n_splits=n_splits, shuffle=shuffle, random_state=random_state
)
silos = []
for train_set, test_set in xval_split:
# Each training set is further divided into actual train and dev set
if datasilo.processor.dev_split > 0:
dev_split = datasilo.processor.dev_split
n_dev = int(np.ceil(dev_split * len(train_set)))
assert n_dev > 0, f"dev split of {dev_split} is not large enough to split away a development set"
n_actual_train = len(train_set) - n_dev
actual_train_set = train_set[:n_actual_train]
dev_set = train_set[n_actual_train:]
ds_dev = [sample for document in dev_set for sample in document]
else:
ds_dev = None # type: ignore
actual_train_set = train_set
train_samples = []
for doc in actual_train_set:
keyfunc = lambda x: x[id_index][1] # pylint: disable=unnecessary-lambda-assignment
doc = sorted(doc, key=keyfunc)
for _, question in groupby(doc, key=keyfunc):
# add all available answrs to train set
sample_list = list(question)
neg_answer_idx: List[int] = []
for index, sample in enumerate(sample_list):
if sample[label_index][0][0] or sample[label_index][0][1]:
train_samples.append(sample)
else:
neg_answer_idx.append(index)
# add random n_neg_answers_per_question samples to train set
if len(neg_answer_idx) <= n_neg_answers_per_question:
train_samples.extend([sample_list[idx] for idx in neg_answer_idx])
else:
neg_answer_idx = random.sample(neg_answer_idx, n_neg_answers_per_question)
train_samples.extend(
# For some reason pylint seems to be just wrong here. It's therefore silenced.
# Check if the issue persists in case of a future refactoring.
[sample_list[idx] for idx in neg_answer_idx] # pylint: disable=invalid-sequence-index
)
ds_train = train_samples
ds_test = [sample for document in test_set for sample in document]
silos.append(DataSiloForCrossVal(datasilo, ds_train, ds_dev, ds_test)) # type: ignore [arg-type]
return silos
@staticmethod
def _split_for_qa(
documents: List, id_index: int, n_splits: int = 5, shuffle: bool = True, random_state: Optional[int] = None
):
keyfunc = lambda x: x[id_index][1] # pylint: disable=unnecessary-lambda-assignment
if shuffle:
fixed_random = random.Random()
fixed_random.seed(random_state)
fixed_random.shuffle(documents)
questions_per_doc = []
for doc in documents:
# group samples in current doc by question id
doc = sorted(doc, key=keyfunc)
questions = list(groupby(doc, key=keyfunc))
questions_per_doc.append(len(questions))
# split documents into n_splits splits with approximately same number of questions per split
questions_per_doc = np.array(questions_per_doc) # type: ignore [assignment]
accumulated_questions_per_doc = questions_per_doc.cumsum() # type: ignore
questions_per_fold = accumulated_questions_per_doc[-1] // n_splits
accumulated_questions_per_fold = np.array(range(1, n_splits)) * questions_per_fold
if accumulated_questions_per_fold[0] < accumulated_questions_per_doc[0]:
accumulated_questions_per_fold[0] = accumulated_questions_per_doc[0] + 1
indices_to_split_at = np.searchsorted(
accumulated_questions_per_doc, accumulated_questions_per_fold, side="right"
)
splits = np.split(documents, indices_to_split_at)
for split in splits:
assert len(split) > 0
for idx, split in enumerate(splits):
current_test_set = split
current_train_set = np.hstack(np.delete(splits, idx, axis=0)) # type: ignore [call-overload]
yield current_train_set, current_test_set
def get_dict_checksum(payload_dict):
"""
Get MD5 checksum for a dict.
"""
checksum = hashlib.md5(json.dumps(payload_dict, sort_keys=True).encode("utf-8")).hexdigest()
return checksum
class DistillationDataSilo(DataSilo):
"""
This data silo does a forward pass on the full data set on a teacher model for model distillation.
As its done in preprocessing, it does not need to be repeated in each epoch and can be cached.
"""
def __init__(
self,
teacher_model: "FARMReader",
teacher_batch_size: int,
device: torch.device,
processor: Processor,
batch_size: int,
eval_batch_size: Optional[int] = None,
distributed: bool = False,
automatic_loading: bool = True,
max_processes: int = 128,
caching: bool = False,
cache_path: Path = Path("cache/data_silo"),
):
self.teacher = teacher_model
self.teacher_batch_size = teacher_batch_size
self.device = device
max_processes = 1 # fix as long as multithreading is not working with teacher attribute
super().__init__(
max_processes=max_processes,
processor=processor,
batch_size=batch_size,
eval_batch_size=eval_batch_size,
distributed=distributed,
automatic_loading=automatic_loading,
caching=caching,
cache_path=cache_path,
)
def _run_teacher(self, batch: dict) -> List[torch.Tensor]:
"""
Run the teacher model on the given batch.
"""
params = {
"input_ids": batch["input_ids"],
"segment_ids": batch["segment_ids"],
"padding_mask": batch["padding_mask"],
}
if "output_hidden_states" in batch.keys():
params["output_hidden_states"] = batch["output_hidden_states"]
if "output_attentions" in batch.keys():
params["output_attentions"] = batch["output_attentions"]
return self.teacher.inferencer.model(**params)
def _pass_batches(
self,
batch: List[List[torch.Tensor]],
corresponding_chunks: List[int],
teacher_outputs: List[List[Tuple[torch.Tensor, ...]]],
tensor_names: List[str],
):
with torch.inference_mode():
batch_transposed = zip(*batch) # transpose dimensions (from batch, features, ... to features, batch, ...)
batch_transposed_list = [torch.stack(b) for b in batch_transposed] # create tensors for each feature
batch_dict = {
key: tensor.to(self.device) for key, tensor in zip(tensor_names, batch_transposed_list)
} # create input dict
y = self._run_teacher(batch=batch_dict) # run teacher model
y = [y.cpu() for y in y]
self.output_len = len(y)
# grouping by chunk
for i, data in zip(corresponding_chunks, zip(*y)): # transpose back
teacher_outputs[i].append(data)
return
def _teacher_output_names(self) -> List[str]:
return ["teacher_output_" + str(i) for i in range(self.output_len)]
def _get_dataset(self, filename: Optional[Union[str, Path]], dicts: Optional[List[Dict]] = None):
concat_datasets, tensor_names = super()._get_dataset(filename, dicts)
batch = []
corresponding_chunks = (
[]
) # to be able to associate elements of batches with chunks (elements could be from multiple chunks)
teacher_outputs: List[List[Tuple[torch.Tensor, ...]]] = [] # list of teacher outputs group in list by chunk
# creating batches from chunks
for i, dataset in enumerate(tqdm(concat_datasets.datasets, desc="Doing forward pass on teacher model")):
teacher_outputs.append([])
for x in zip(*dataset.tensors): # loop through chunks
batch.append(x)
corresponding_chunks.append(i)
if len(batch) == self.teacher_batch_size:
self._pass_batches(
batch, corresponding_chunks, teacher_outputs, tensor_names
) # doing forward pass on teacher model
batch = []
corresponding_chunks = []
if batch:
self._pass_batches(batch, corresponding_chunks, teacher_outputs, tensor_names)
# appending teacher outputs to original dataset
for dataset, teacher_output in zip(concat_datasets.datasets, teacher_outputs):
dataset.tensors += tuple(torch.stack(tensors) for tensors in zip(*teacher_output))
tensor_names += self._teacher_output_names()
concat_datasets = ConcatDataset(concat_datasets.datasets) # making sure metrics are updated
return concat_datasets, tensor_names
def _get_checksum(self):
"""
Get checksum based on a dict to ensure validity of cached DataSilo
"""
# keys in the dict identifies uniqueness for a given DataSilo.
payload_dict = {
"train_filename": str(Path(self.processor.train_filename).absolute()),
"data_dir": str(self.processor.data_dir.absolute()),
"max_seq_len": self.processor.max_seq_len,
"dev_split": self.processor.dev_split,
"tasks": self.processor.tasks,
"teacher_name_or_path": self.teacher.model_name_or_path,
"data_silo_type": self.__class__.__name__,
}
checksum = get_dict_checksum(payload_dict)
return checksum
|
37ccd94db797e07bb9de9af164019bd8a3905717
|
1dbbb05b30d27c6419b9f34eea3b9a47f92582a0
|
/parlai/tasks/wizard_of_internet/mutators.py
|
b44668c641a358488dc358eb5051eca33600ebc7
|
[
"CC-BY-4.0",
"MIT"
] |
permissive
|
facebookresearch/ParlAI
|
815334323d0ebef51bf9837336fe3eef6fe1655d
|
e1d899edfb92471552bae153f59ad30aa7fca468
|
refs/heads/main
| 2023-08-31T22:20:45.918129
| 2023-08-14T19:39:56
| 2023-08-14T19:39:56
| 89,266,735
| 10,943
| 2,395
|
MIT
| 2023-09-13T23:07:40
| 2017-04-24T17:10:44
|
Python
|
UTF-8
|
Python
| false
| false
| 9,597
|
py
|
mutators.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from parlai.core.mutators import register_mutator, MessageMutator, ManyEpisodeMutator
from typing import Optional
from parlai.core.opt import Opt
from parlai.core.message import Message
from parlai.core.params import ParlaiParser
import parlai.tasks.wizard_of_internet.constants as CONST
from parlai.tasks.wizard_of_wikipedia.mutators import (
AddLabel as AddLabelWizWiki,
AddLabelLM as AddLabelLMWizWiki,
CheckedSentenceAsLabel as CheckedSentenceAsLabelWizWiki,
AddCheckedSentence as AddCheckedSentenceWizWiki,
)
@register_mutator("woi_add_checked_sentence_to_input")
class AddCheckedSentence(AddCheckedSentenceWizWiki):
"""
Adds the checked sentence to the end of the text.
E.g. run with: parlai display_data -t wizard_of_internet -n 100 -dt valid --mutators
flatten,add_checked_sentence_to_input_woi
"""
@property
def checked_sentence_kword(self):
return CONST.SELECTED_SENTENCES
@register_mutator("woi_checked_sentence_as_label")
class CheckedSentenceAsLabel(CheckedSentenceAsLabelWizWiki):
"""
Uses the checked sentence (knowledge) as label.
E.g. run with: parlai display_data -t wizard_of_internet -n 100 -dt valid --mutators
flatten,checked_sentence_as_label_woi
"""
@property
def checked_sentence_kword(self):
return CONST.SELECTED_SENTENCES
@register_mutator("woi_add_label_to_input")
class AddLabel(AddLabelWizWiki):
"""
Adds the dialogue sentence to the input.
E.g. run with: parlai display_data -t wizard_of_internet -n 100 -dt valid --mutators
flatten,checked_sentence_as_label_woi,add_label_to_input_woi
"""
pass
@register_mutator("woi_add_label_to_input_lm")
class AddLabelLM(AddLabelLMWizWiki):
"""
Adds the dialogue sentence to the input (language modeling version).
Language modeling version where a random piece of the label is sampled in
the input. The rest is placed inside special tokens.
E.g. run with: parlai display_data -t wizard_of_internet -n 100 -dt valid --mutators
flatten,add_label_to_input_lm_woi
To add the checked sentence as the label, use:
parlai display_data -t wizard_of_internet -n 100 -dt valid --mutators
flatten,add_label_to_input_lm_woi,checked_sentence_as_label_woi
"""
pass
@register_mutator("woi_filter_no_passage_used")
class WoiFilterNoPassageUsed(ManyEpisodeMutator):
"""
Allows to filter any examples where no passage was selected to base the wizard reply
on.
This works best in flattened mode. E.g. run with: parlai display_data -t
wizard_of_internet -n 100 -dt valid --mutators flatten+filter_no_passage_used
"""
def many_episode_mutation(self, episode):
out_episodes = []
for e in episode:
checked_sentences = e.get(CONST.SELECTED_SENTENCES)
checked_sentences = ' '.join(checked_sentences)
if checked_sentences == CONST.NO_SELECTED_SENTENCES_TOKEN:
pass
else:
out_episodes.append([e])
return out_episodes
@register_mutator("woi_keep_only_no_passage_used")
class WoiKeepOnlyNoPassageUsed(ManyEpisodeMutator):
"""
Filter all examples where passages are used.
"""
def many_episode_mutation(self, episode):
out_episodes = []
for e in episode:
checked_sentences = e.get(CONST.SELECTED_SENTENCES)
checked_sentences = ' '.join(checked_sentences)
if checked_sentences == CONST.NO_SELECTED_SENTENCES_TOKEN:
out_episodes.append([e])
return out_episodes
@register_mutator("woi_filter_selected_knowledge_in_retrieved_docs")
class WoiFilterSelectedKnowledgeInRetrievedDocs(ManyEpisodeMutator):
"""
Allows to filter any examples where '__retrieved-docs__' field doesn't contain the
'__selected-sentences__'.
"""
def many_episode_mutation(self, episode):
out_episodes = []
for e in episode:
checked_sentences = e.get(
CONST.SELECTED_SENTENCES,
e.get('labels', [CONST.NO_SELECTED_SENTENCES_TOKEN]),
)
docs = ' '.join(e.get('__retrieved-docs__'))
if ' '.join(checked_sentences) != CONST.NO_SELECTED_SENTENCES_TOKEN:
found = True
for sent in checked_sentences:
s = sent.lstrip(' ').rstrip(' ')
if s not in docs:
found = False
if found:
out_episodes.append([e])
else:
pass
return out_episodes
def chunk_docs_in_message(message, chunk_sz):
if CONST.RETRIEVED_DOCS not in message:
return message
new_message = message.copy()
docs = message[CONST.RETRIEVED_DOCS]
titles = message.get(CONST.RETRIEVED_DOCS_TITLES)
urls = message.get(CONST.RETRIEVED_DOCS_URLS)
new_docs = []
new_titles = []
new_urls = []
checked_sentences = list(
message.get(
CONST.SELECTED_SENTENCES,
message.get('labels', [CONST.NO_SELECTED_SENTENCES_TOKEN]),
)
)
for i in range(len(checked_sentences)):
checked_sentences[i] = checked_sentences[i].lstrip(' ').rstrip(' ')
if ' '.join(checked_sentences) == CONST.NO_SELECTED_SENTENCES_TOKEN:
checked_sentences = []
for ind in range(len(docs)):
d = docs[ind]
# Guarantees that checked sentences are not split in half (as we split by space).
for i in range(len(checked_sentences)):
d = d.replace(checked_sentences[i], "||CHECKED_SENTENCE_{i}||")
while True:
end_chunk = d.find(' ', chunk_sz)
if end_chunk == -1:
# last chunk
for i in range(len(checked_sentences)):
d = d.replace("||CHECKED_SENTENCE_{i}||", checked_sentences[i])
new_docs.append(d)
new_titles.append(titles[ind])
new_urls.append(urls[ind])
break
else:
new_d = d[0:end_chunk]
for i in range(len(checked_sentences)):
new_d = new_d.replace(
"||CHECKED_SENTENCE_{i}||", checked_sentences[i]
)
new_docs.append(new_d)
new_titles.append(titles[ind])
new_urls.append(urls[ind])
d = d[end_chunk + 1 : -1]
new_message.force_set(CONST.RETRIEVED_DOCS, new_docs)
new_message.force_set(CONST.RETRIEVED_DOCS_TITLES, new_titles)
new_message.force_set(CONST.RETRIEVED_DOCS_URLS, new_urls)
return new_message
@register_mutator("woi_chunk_retrieved_docs")
class WoiChunkRetrievedDocs(MessageMutator):
"""
Chunks '__retrieved-docs__' into smaller docs (max 100 words each).
"""
@classmethod
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
parser.add_argument(
'--woi-doc-chunk-size',
default=500,
type=int,
help='Document chunk size (in characters).',
)
def message_mutation(self, message: Message) -> Message:
chunk_sz = self.opt.get('woi_doc_chunk_size', 500)
return chunk_docs_in_message(message, chunk_sz)
@register_mutator("woi_dropout_retrieved_docs")
class WoiDropoutRetrievedDocs(MessageMutator):
"""
Drops out '__retrieved-docs__' to only keep a maximum number in each example.
"""
@classmethod
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
parser.add_argument(
'--woi-doc-max-chunks',
default=100,
type=int,
help='Largest number of chunks to use, others will be dropped out at random. Chunks containing gold checked sentences will not be removed.',
)
def message_mutation(self, message: Message) -> Message:
if CONST.RETRIEVED_DOCS not in message:
return message
new_message = message.copy()
docs = message.get(CONST.RETRIEVED_DOCS)
new_docs = []
max_chunks = self.opt.get('woi_doc_max_chunks', 100)
if max_chunks > 0:
keep = torch.randperm(len(docs))[0:max_chunks]
else:
keep = torch.randperm(len(docs))
remove = torch.ones(len(docs))
remove[keep] = 0
for i in range(len(docs)):
if remove[i] == 0:
new_docs.append(docs[i])
else:
# We may still keep the doc if it contains the gold checked sentence(s).
checked_sentences = message.get(
CONST.SELECTED_SENTENCES,
message.get('labels', [CONST.NO_SELECTED_SENTENCES_TOKEN]),
)
d = docs[i]
found = False
if ' '.join(checked_sentences) != CONST.NO_SELECTED_SENTENCES_TOKEN:
for sent in checked_sentences:
s = sent.lstrip(' ').rstrip(' ')
if s in d:
found = True
if found:
new_docs.append(docs[i])
new_message.force_set(CONST.RETRIEVED_DOCS, new_docs)
return new_message
|
b93213356e45cbe9114955d7e4bb102422f27123
|
eda6e7b8f399dedcdb960f4b48a2134b978f8d83
|
/examples/03_faithful/plot-01-demo=vb_algs-model=mix_gauss.py
|
c3a87475dbcd2de50d9cde6cadc7119db19cd291
|
[
"BSD-3-Clause"
] |
permissive
|
bnpy/bnpy
|
8ed61bc4fe2f0ed99e0254c11a21c27c0cee59b2
|
ffc2242427451aa6a61dcac1473c47577a5ade6f
|
refs/heads/master
| 2023-08-16T06:49:58.716279
| 2022-10-15T15:59:12
| 2022-10-15T15:59:12
| 75,731,181
| 197
| 54
|
NOASSERTION
| 2023-07-21T20:59:10
| 2016-12-06T12:56:07
|
Python
|
UTF-8
|
Python
| false
| false
| 6,893
|
py
|
plot-01-demo=vb_algs-model=mix_gauss.py
|
"""
==============================================
Variational training for Mixtures of Gaussians
==============================================
Showcase of different models and algorithms applied to same dataset.
In this example, we show how bnpy makes it easy to apply
different models and algorithms to the same dataset.
"""
import bnpy
import numpy as np
import os
from matplotlib import pylab
import seaborn as sns
SMALL_FIG_SIZE = (2.5, 2.5)
FIG_SIZE = (5, 5)
pylab.rcParams['figure.figsize'] = FIG_SIZE
np.set_printoptions(precision=3, suppress=1, linewidth=200)
###############################################################################
#
# Load dataset from file
dataset_path = os.path.join(bnpy.DATASET_PATH, 'faithful')
dataset = bnpy.data.XData.read_csv(
os.path.join(dataset_path, 'faithful.csv'))
###############################################################################
#
# Make a simple plot of the raw data
pylab.plot(dataset.X[:, 0], dataset.X[:, 1], 'k.')
pylab.xlabel(dataset.column_names[0])
pylab.ylabel(dataset.column_names[1])
pylab.tight_layout()
data_ax_h = pylab.gca()
###############################################################################
#
# Setup: Helper function to display the learned clusters
# ------------------------------------------------------
def show_clusters_over_time(
task_output_path=None,
query_laps=[0, 1, 2, 10, 20, None],
nrows=2):
''' Show 2D elliptical contours overlaid on raw data.
'''
ncols = int(np.ceil(len(query_laps) // float(nrows)))
fig_handle, ax_handle_list = pylab.subplots(
figsize=(SMALL_FIG_SIZE[0] * ncols, SMALL_FIG_SIZE[1] * nrows),
nrows=nrows, ncols=ncols, sharex=True, sharey=True)
for plot_id, lap_val in enumerate(query_laps):
cur_model, lap_val = bnpy.load_model_at_lap(task_output_path, lap_val)
cur_ax_handle = ax_handle_list.flatten()[plot_id]
bnpy.viz.PlotComps.plotCompsFromHModel(
cur_model, dataset=dataset, ax_handle=cur_ax_handle)
cur_ax_handle.set_title("lap: %d" % lap_val)
cur_ax_handle.set_xlabel(dataset.column_names[0])
cur_ax_handle.set_ylabel(dataset.column_names[1])
cur_ax_handle.set_xlim(data_ax_h.get_xlim())
cur_ax_handle.set_ylim(data_ax_h.get_ylim())
pylab.tight_layout()
###############################################################################
#
# *DiagGauss* observation model
# -----------------------------
#
# Assume diagonal covariances.
#
# Start with too many clusters (K=20)
gamma = 5.0
sF = 5.0
K = 20
diag_trained_model, diag_info_dict = bnpy.run(
dataset, 'DPMixtureModel', 'DiagGauss', 'memoVB',
output_path='/tmp/faithful/showcase-K=20-lik=DiagGauss-ECovMat=5*eye/',
nLap=1000, nTask=1, nBatch=1, convergeThr=0.0001,
gamma0=gamma, sF=sF, ECovMat='eye',
K=K, initname='randexamples',
)
show_clusters_over_time(diag_info_dict['task_output_path'])
###############################################################################
#
# *Gauss* observations + VB
# -------------------------
#
# Assume full covariances.
#
# Start with too many clusters (K=20)
full_trained_model, full_info_dict = bnpy.run(
dataset, 'DPMixtureModel', 'Gauss', 'VB',
output_path='/tmp/faithful/showcase-K=20-lik=Gauss-ECovMat=5*eye/',
nLap=1000, nTask=1, nBatch=1, convergeThr=0.0001,
gamma0=gamma, sF=sF, ECovMat='eye',
K=K, initname='randexamples',
)
show_clusters_over_time(full_info_dict['task_output_path'])
###############################################################################
#
# *ZeroMeanGauss* observations + VB
# ---------------------------------
#
# Assume full covariances and fix all means to zero.
#
# Start with too many clusters (K=20)
zm_trained_model, zm_info_dict = bnpy.run(
dataset, 'DPMixtureModel', 'ZeroMeanGauss', 'VB',
output_path='/tmp/faithful/showcase-K=20-lik=ZeroMeanGauss-ECovMat=5*eye/',
nLap=1000, nTask=1, nBatch=1, convergeThr=0.0001,
gamma0=gamma, sF=sF, ECovMat='eye',
K=K, initname='randexamples',
)
show_clusters_over_time(zm_info_dict['task_output_path'])
###############################################################################
#
# *Gauss* observations + stochastic VB
# ------------------------------------
#
# Assume full covariances and fix all means to zero.
#
# Start with too many clusters (K=20)
stoch_trained_model, stoch_info_dict = bnpy.run(
dataset, 'DPMixtureModel', 'Gauss', 'soVB',
output_path=\
'/tmp/faithful/showcase-K=20-lik=Gauss-ECovMat=5*eye-alg=soVB/',
nLap=50, nTask=1, nBatch=50,
rhoexp=0.51, rhodelay=1.0,
gamma0=gamma, sF=sF, ECovMat='eye',
K=K, initname='randexamples',
)
show_clusters_over_time(stoch_info_dict['task_output_path'])
###############################################################################
#
# Compare loss function traces for all methods
# --------------------------------------------
#
pylab.figure()
pylab.plot(
zm_info_dict['lap_history'],
zm_info_dict['loss_history'], 'b.-',
label='full_covar zero_mean')
pylab.plot(
full_info_dict['lap_history'],
full_info_dict['loss_history'], 'k.-',
label='full_covar')
pylab.plot(
diag_info_dict['lap_history'],
diag_info_dict['loss_history'], 'r.-',
label='diag_covar')
pylab.plot(
stoch_info_dict['lap_history'],
stoch_info_dict['loss_history'], 'c.:',
label='full_covar stochastic')
pylab.legend(loc='upper right')
pylab.xlabel('num. laps')
pylab.ylabel('loss')
pylab.xlim([4, 100]) # avoid early iterations
pylab.ylim([2.34, 4.0]) # handpicked
pylab.draw()
pylab.tight_layout()
###############################################################################
#
# Inspect the learned distribution over appearance probabilities
# --------------------------------------------------------------
# E_proba_K : 1D array, size n_clusters
# Each entry gives expected probability of that cluster
E_proba_K = stoch_trained_model.allocModel.get_active_comp_probs()
print("probability of each cluster:")
print(E_proba_K)
###############################################################################
#
# Inspect the learned means and covariance distributions
# ------------------------------------------------------
#
# Remember that each cluster has the following approximate posterior
# over its mean vector $\mu$ and covariance matrix $\Sigma$:
#
# $$
# q(\mu, \Sigma) = \Normal(\mu | m, \kappa \Sigma) \Wishart(\Sigma | \nu, S)
# $$
#
# We show here how to compute the expected mean of $\mu$ and $\Sigma$
# from a trained model.
for k in range(K):
E_mu_k = stoch_trained_model.obsModel.get_mean_for_comp(k)
E_Sigma_k = stoch_trained_model.obsModel.get_covar_mat_for_comp(k)
print("")
print("mean[k=%d]" % k)
print(E_mu_k)
print("covar[k=%d]" % k)
print(E_Sigma_k)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.