blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2532106e13739cc47d2b1f558f5d81b88e58769e
|
09a6d8dbad5b92f93791948b5bf9b75f5cb2e5ce
|
/tests/transforms/test_hamiltonian_expand.py
|
b6d4581e157db6c5f240a2cd363965a713780af8
|
[
"Apache-2.0"
] |
permissive
|
PennyLaneAI/pennylane
|
458efd5d9457e90ada31ca2ef0fb6bb96a24e9a7
|
0843183ff15a013c2622af5e61fea431d18076d3
|
refs/heads/master
| 2023-09-03T17:00:43.105784
| 2023-09-01T16:15:07
| 2023-09-01T16:15:07
| 129,936,360
| 1,431
| 410
|
Apache-2.0
| 2023-09-14T21:30:56
| 2018-04-17T16:45:42
|
Python
|
UTF-8
|
Python
| false
| false
| 20,552
|
py
|
test_hamiltonian_expand.py
|
# Copyright 2018-2020 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for the ``hamiltonian_expand`` transform.
"""
import numpy as np
import pytest
import pennylane as qml
from pennylane import numpy as pnp
from pennylane.queuing import AnnotatedQueue
from pennylane.tape import QuantumScript
from pennylane.transforms import hamiltonian_expand, sum_expand
# Defines the device used for all tests
dev = qml.device("default.qubit", wires=4)
# Defines circuits to be used in queueing/output tests
with AnnotatedQueue() as q_tape1:
qml.PauliX(0)
H1 = qml.Hamiltonian([1.5], [qml.PauliZ(0) @ qml.PauliZ(1)])
qml.expval(H1)
tape1 = QuantumScript.from_queue(q_tape1)
with AnnotatedQueue() as q_tape2:
qml.Hadamard(0)
qml.Hadamard(1)
qml.PauliZ(1)
qml.PauliX(2)
H2 = qml.Hamiltonian(
[1, 3, -2, 1, 1],
[
qml.PauliX(0) @ qml.PauliZ(2),
qml.PauliZ(2),
qml.PauliX(0),
qml.PauliX(2),
qml.PauliZ(0) @ qml.PauliX(1),
],
)
qml.expval(H2)
tape2 = QuantumScript.from_queue(q_tape2)
H3 = 1.5 * qml.PauliZ(0) @ qml.PauliZ(1) + 0.3 * qml.PauliX(1)
with AnnotatedQueue() as q3:
qml.PauliX(0)
qml.expval(H3)
tape3 = QuantumScript.from_queue(q3)
H4 = (
qml.PauliX(0) @ qml.PauliZ(2)
+ 3 * qml.PauliZ(2)
- 2 * qml.PauliX(0)
+ qml.PauliZ(2)
+ qml.PauliZ(2)
)
H4 += qml.PauliZ(0) @ qml.PauliX(1) @ qml.PauliY(2)
with AnnotatedQueue() as q4:
qml.Hadamard(0)
qml.Hadamard(1)
qml.PauliZ(1)
qml.PauliX(2)
qml.expval(H4)
tape4 = QuantumScript.from_queue(q4)
TAPES = [tape1, tape2, tape3, tape4]
OUTPUTS = [-1.5, -6, -1.5, -8]
class TestHamiltonianExpand:
"""Tests for the hamiltonian_expand transform"""
def test_ham_with_no_terms_raises(self):
"""Tests that the hamiltonian_expand transform raises an error for a Hamiltonian with no terms."""
mps = [qml.expval(qml.Hamiltonian([], []))]
qscript = QuantumScript([], mps)
with pytest.raises(
ValueError,
match="The Hamiltonian in the tape has no terms defined - cannot perform the Hamiltonian expansion.",
):
qml.transforms.hamiltonian_expand(qscript)
@pytest.mark.parametrize(("tape", "output"), zip(TAPES, OUTPUTS))
def test_hamiltonians(self, tape, output):
"""Tests that the hamiltonian_expand transform returns the correct value"""
tapes, fn = hamiltonian_expand(tape)
results = dev.batch_execute(tapes)
expval = fn(results)
assert np.isclose(output, expval)
qs = QuantumScript(tape.operations, tape.measurements)
tapes, fn = hamiltonian_expand(qs)
results = dev.batch_execute(tapes)
expval = fn(results)
assert np.isclose(output, expval)
@pytest.mark.parametrize(("tape", "output"), zip(TAPES, OUTPUTS))
def test_hamiltonians_no_grouping(self, tape, output):
"""Tests that the hamiltonian_expand transform returns the correct value
if we switch grouping off"""
tapes, fn = hamiltonian_expand(tape, group=False)
results = dev.batch_execute(tapes)
expval = fn(results)
assert np.isclose(output, expval)
qs = QuantumScript(tape.operations, tape.measurements)
tapes, fn = hamiltonian_expand(qs, group=False)
results = dev.batch_execute(tapes)
expval = fn(results)
assert np.isclose(output, expval)
def test_grouping_is_used(self):
"""Test that the grouping in a Hamiltonian is used"""
H = qml.Hamiltonian(
[1.0, 2.0, 3.0], [qml.PauliZ(0), qml.PauliX(1), qml.PauliX(0)], grouping_type="qwc"
)
assert H.grouping_indices is not None
with AnnotatedQueue() as q:
qml.Hadamard(wires=0)
qml.CNOT(wires=[0, 1])
qml.PauliX(wires=2)
qml.expval(H)
tape = QuantumScript.from_queue(q)
tapes, _ = hamiltonian_expand(tape, group=False)
assert len(tapes) == 2
qs = QuantumScript(tape.operations, tape.measurements)
tapes, _ = hamiltonian_expand(qs, group=False)
assert len(tapes) == 2
def test_number_of_tapes(self):
"""Tests that the the correct number of tapes is produced"""
H = qml.Hamiltonian([1.0, 2.0, 3.0], [qml.PauliZ(0), qml.PauliX(1), qml.PauliX(0)])
with AnnotatedQueue() as q:
qml.Hadamard(wires=0)
qml.CNOT(wires=[0, 1])
qml.PauliX(wires=2)
qml.expval(H)
tape = QuantumScript.from_queue(q)
tapes, _ = hamiltonian_expand(tape, group=False)
assert len(tapes) == 3
tapes, _ = hamiltonian_expand(tape, group=True)
assert len(tapes) == 2
def test_number_of_qscripts(self):
"""Tests the correct number of quantum scripts are produced."""
H = qml.Hamiltonian([1.0, 2.0, 3.0], [qml.PauliZ(0), qml.PauliX(1), qml.PauliX(0)])
qs = QuantumScript(measurements=[qml.expval(H)])
tapes, _ = hamiltonian_expand(qs, group=False)
assert len(tapes) == 3
tapes, _ = hamiltonian_expand(qs, group=True)
assert len(tapes) == 2
@pytest.mark.parametrize("shots", [None, 100])
@pytest.mark.parametrize("group", [True, False])
def test_shots_attribute(self, shots, group):
"""Tests that the shots attribute is copied to the new tapes"""
H = qml.Hamiltonian([1.0, 2.0, 3.0], [qml.PauliZ(0), qml.PauliX(1), qml.PauliX(0)])
with AnnotatedQueue() as q:
qml.Hadamard(wires=0)
qml.CNOT(wires=[0, 1])
qml.PauliX(wires=2)
qml.expval(H)
tape = QuantumScript.from_queue(q, shots=shots)
new_tapes, _ = hamiltonian_expand(tape, group=group)
assert all(new_tape.shots == tape.shots for new_tape in new_tapes)
def test_hamiltonian_error(self):
"""Tests that the script passed to hamiltonian_expand must end with a hamiltonian."""
qscript = QuantumScript(measurements=[qml.expval(qml.PauliZ(0))])
with pytest.raises(ValueError, match=r"Passed tape must end in"):
qml.transforms.hamiltonian_expand(qscript)
@pytest.mark.autograd
def test_hamiltonian_dif_autograd(self, tol):
"""Tests that the hamiltonian_expand tape transform is differentiable with the Autograd interface"""
H = qml.Hamiltonian(
[-0.2, 0.5, 1], [qml.PauliX(1), qml.PauliZ(1) @ qml.PauliY(2), qml.PauliZ(0)]
)
var = pnp.array([0.1, 0.67, 0.3, 0.4, -0.5, 0.7, -0.2, 0.5, 1.0], requires_grad=True)
output = 0.42294409781940356
output2 = [
9.68883500e-02,
-2.90832724e-01,
-1.04448033e-01,
-1.94289029e-09,
3.50307411e-01,
-3.41123470e-01,
0.0,
-0.43657,
0.64123,
]
with AnnotatedQueue() as q:
for _ in range(2):
qml.RX(np.array(0), wires=0)
qml.RX(np.array(0), wires=1)
qml.RX(np.array(0), wires=2)
qml.CNOT(wires=[0, 1])
qml.CNOT(wires=[1, 2])
qml.CNOT(wires=[2, 0])
qml.expval(H)
tape = QuantumScript.from_queue(q)
def cost(x):
new_tape = tape.bind_new_parameters(x, list(range(9)))
tapes, fn = hamiltonian_expand(new_tape)
res = qml.execute(tapes, dev, qml.gradients.param_shift)
return fn(res)
assert np.isclose(cost(var), output)
grad = qml.grad(cost)(var)
assert len(grad) == len(output2)
for g, o in zip(grad, output2):
assert np.allclose(g, o, atol=tol)
@pytest.mark.tf
def test_hamiltonian_dif_tensorflow(self):
"""Tests that the hamiltonian_expand tape transform is differentiable with the Tensorflow interface"""
import tensorflow as tf
H = qml.Hamiltonian(
[-0.2, 0.5, 1], [qml.PauliX(1), qml.PauliZ(1) @ qml.PauliY(2), qml.PauliZ(0)]
)
var = tf.Variable([[0.1, 0.67, 0.3], [0.4, -0.5, 0.7]], dtype=tf.float64)
output = 0.42294409781940356
output2 = [
9.68883500e-02,
-2.90832724e-01,
-1.04448033e-01,
-1.94289029e-09,
3.50307411e-01,
-3.41123470e-01,
]
with tf.GradientTape() as gtape:
with AnnotatedQueue() as q:
for i in range(2):
qml.RX(var[i, 0], wires=0)
qml.RX(var[i, 1], wires=1)
qml.RX(var[i, 2], wires=2)
qml.CNOT(wires=[0, 1])
qml.CNOT(wires=[1, 2])
qml.CNOT(wires=[2, 0])
qml.expval(H)
tape = QuantumScript.from_queue(q)
tapes, fn = hamiltonian_expand(tape)
res = fn(qml.execute(tapes, dev, qml.gradients.param_shift))
assert np.isclose(res, output)
g = gtape.gradient(res, var)
assert np.allclose(list(g[0]) + list(g[1]), output2)
with AnnotatedQueue() as s_tape1:
qml.PauliX(0)
S1 = qml.s_prod(1.5, qml.prod(qml.PauliZ(0), qml.PauliZ(1)))
qml.expval(S1)
qml.expval(S1)
qml.state()
with AnnotatedQueue() as s_tape2:
qml.Hadamard(0)
qml.Hadamard(1)
qml.PauliZ(1)
qml.PauliX(2)
S2 = qml.sum(
qml.prod(qml.PauliX(0), qml.PauliZ(2)),
qml.s_prod(3, qml.PauliZ(2)),
qml.s_prod(-2, qml.PauliX(0)),
qml.PauliX(2),
qml.prod(qml.PauliZ(0), qml.PauliX(1)),
)
qml.expval(S2)
qml.probs(op=qml.PauliZ(0))
qml.expval(S2)
S3 = qml.sum(
qml.s_prod(1.5, qml.prod(qml.PauliZ(0), qml.PauliZ(1))), qml.s_prod(0.3, qml.PauliX(1))
)
with AnnotatedQueue() as s_tape3:
qml.PauliX(0)
qml.expval(S3)
qml.probs(wires=[1, 3])
qml.expval(qml.PauliX(1))
qml.expval(S3)
qml.probs(op=qml.PauliY(0))
S4 = qml.sum(
qml.prod(qml.PauliX(0), qml.PauliZ(2)),
qml.s_prod(3, qml.PauliZ(2)),
qml.s_prod(-2, qml.PauliX(0)),
qml.PauliZ(2),
qml.PauliZ(2),
qml.prod(qml.PauliZ(0), qml.PauliX(1), qml.PauliY(2)),
)
with AnnotatedQueue() as s_tape4:
qml.Hadamard(0)
qml.Hadamard(1)
qml.PauliZ(1)
qml.PauliX(2)
qml.expval(S4)
qml.expval(qml.PauliX(2))
qml.expval(S4)
qml.expval(qml.PauliX(2))
s_qscript1 = QuantumScript.from_queue(s_tape1)
s_qscript2 = QuantumScript.from_queue(s_tape2)
s_qscript3 = QuantumScript.from_queue(s_tape3)
s_qscript4 = QuantumScript.from_queue(s_tape4)
SUM_QSCRIPTS = [s_qscript1, s_qscript2, s_qscript3, s_qscript4]
SUM_OUTPUTS = [
[
-1.5,
-1.5,
np.array(
[
0.0 + 0.0j,
0.0 + 0.0j,
0.0 + 0.0j,
0.0 + 0.0j,
0.0 + 0.0j,
0.0 + 0.0j,
0.0 + 0.0j,
0.0 + 0.0j,
1.0 + 0.0j,
0.0 + 0.0j,
0.0 + 0.0j,
0.0 + 0.0j,
0.0 + 0.0j,
0.0 + 0.0j,
0.0 + 0.0j,
0.0 + 0.0j,
]
),
],
[-6, np.array([0.5, 0.5]), -6],
[-1.5, np.array([1.0, 0.0, 0.0, 0.0]), 0.0, -1.5, np.array([0.5, 0.5])],
[-8, 0, -8, 0],
]
class TestSumExpand:
"""Tests for the sum_expand transform"""
def test_observables_on_same_wires(self):
"""Test that even if the observables are on the same wires, if they are different operations, they are separated.
This is testing for a case that gave rise to a bug that occured due to a problem in MeasurementProcess.hash.
"""
obs1 = qml.prod(qml.PauliX(0), qml.PauliX(1))
obs2 = qml.prod(qml.PauliX(0), qml.PauliY(1))
circuit = QuantumScript(measurements=[qml.expval(obs1), qml.expval(obs2)])
batch, _ = sum_expand(circuit)
assert len(batch) == 2
assert qml.equal(batch[0][0], qml.expval(obs1))
assert qml.equal(batch[1][0], qml.expval(obs2))
@pytest.mark.parametrize(("qscript", "output"), zip(SUM_QSCRIPTS, SUM_OUTPUTS))
def test_sums(self, qscript, output):
"""Tests that the sum_expand transform returns the correct value"""
tapes, fn = sum_expand(qscript)
results = dev.batch_execute(tapes)
expval = fn(results)
assert all(qml.math.allclose(o, e) for o, e in zip(output, expval))
@pytest.mark.parametrize(("qscript", "output"), zip(SUM_QSCRIPTS, SUM_OUTPUTS))
def test_sums_no_grouping(self, qscript, output):
"""Tests that the sum_expand transform returns the correct value
if we switch grouping off"""
tapes, fn = sum_expand(qscript, group=False)
results = dev.batch_execute(tapes)
expval = fn(results)
assert all(qml.math.allclose(o, e) for o, e in zip(output, expval))
def test_grouping(self):
"""Test the grouping functionality"""
S = qml.sum(qml.PauliZ(0), qml.s_prod(2, qml.PauliX(1)), qml.s_prod(3, qml.PauliX(0)))
with AnnotatedQueue() as q:
qml.Hadamard(wires=0)
qml.CNOT(wires=[0, 1])
qml.PauliX(wires=2)
qml.expval(S)
qscript = QuantumScript.from_queue(q)
tapes, _ = sum_expand(qscript, group=True)
assert len(tapes) == 2
def test_number_of_qscripts(self):
"""Tests the correct number of quantum scripts are produced."""
S = qml.sum(qml.PauliZ(0), qml.s_prod(2, qml.PauliX(1)), qml.s_prod(3, qml.PauliX(0)))
qs = QuantumScript(measurements=[qml.expval(S)])
tapes, _ = sum_expand(qs, group=False)
assert len(tapes) == 3
tapes, _ = sum_expand(qs, group=True)
assert len(tapes) == 2
@pytest.mark.parametrize("shots", [None, 100])
@pytest.mark.parametrize("group", [True, False])
def test_shots_attribute(self, shots, group):
"""Tests that the shots attribute is copied to the new tapes"""
H = qml.Hamiltonian([1.0, 2.0, 3.0], [qml.PauliZ(0), qml.PauliX(1), qml.PauliX(0)])
with AnnotatedQueue() as q:
qml.Hadamard(wires=0)
qml.CNOT(wires=[0, 1])
qml.PauliX(wires=2)
qml.expval(H)
tape = QuantumScript.from_queue(q, shots=shots)
new_tapes, _ = sum_expand(tape, group=group)
assert all(new_tape.shots == tape.shots for new_tape in new_tapes)
def test_non_sum_tape(self):
"""Test that the ``sum_expand`` function returns the input tape if it does not
contain a single measurement with the expectation value of a Sum."""
with AnnotatedQueue() as q:
qml.expval(qml.PauliZ(0))
tape = QuantumScript.from_queue(q)
tapes, fn = sum_expand(tape)
assert len(tapes) == 1
assert isinstance(list(tapes[0])[0].obs, qml.PauliZ)
# Old return types return a list for a single value:
# e.g. qml.expval(qml.PauliX(0)) = [1.23]
res = [1.23]
assert fn(res) == 1.23
def test_multiple_sum_tape(self):
"""Test that the ``sum_expand`` function can expand tapes with multiple sum observables"""
@pytest.mark.autograd
def test_sum_dif_autograd(self, tol):
"""Tests that the sum_expand tape transform is differentiable with the Autograd interface"""
S = qml.sum(
qml.s_prod(-0.2, qml.PauliX(1)),
qml.s_prod(0.5, qml.prod(qml.PauliZ(1), qml.PauliY(2))),
qml.s_prod(1, qml.PauliZ(0)),
)
var = pnp.array([0.1, 0.67, 0.3, 0.4, -0.5, 0.7, -0.2, 0.5, 1], requires_grad=True)
output = 0.42294409781940356
output2 = [
9.68883500e-02,
-2.90832724e-01,
-1.04448033e-01,
-1.94289029e-09,
3.50307411e-01,
-3.41123470e-01,
0.0,
-4.36578753e-01,
6.41233474e-01,
]
with AnnotatedQueue() as q:
for _ in range(2):
qml.RX(np.array(0), wires=0)
qml.RX(np.array(0), wires=1)
qml.RX(np.array(0), wires=2)
qml.CNOT(wires=[0, 1])
qml.CNOT(wires=[1, 2])
qml.CNOT(wires=[2, 0])
qml.expval(S)
qscript = QuantumScript.from_queue(q)
def cost(x):
new_qscript = qscript.bind_new_parameters(x, list(range(9)))
tapes, fn = sum_expand(new_qscript)
res = qml.execute(tapes, dev, qml.gradients.param_shift)
return fn(res)
assert np.isclose(cost(var), output)
grad = qml.grad(cost)(var)
assert len(grad) == len(output2)
for g, o in zip(grad, output2):
assert np.allclose(g, o, atol=tol)
@pytest.mark.tf
def test_sum_dif_tensorflow(self):
"""Tests that the sum_expand tape transform is differentiable with the Tensorflow interface"""
import tensorflow as tf
S = qml.sum(
qml.s_prod(-0.2, qml.PauliX(1)),
qml.s_prod(0.5, qml.prod(qml.PauliZ(1), qml.PauliY(2))),
qml.s_prod(1, qml.PauliZ(0)),
)
var = tf.Variable([[0.1, 0.67, 0.3], [0.4, -0.5, 0.7]], dtype=tf.float64)
output = 0.42294409781940356
output2 = [
9.68883500e-02,
-2.90832724e-01,
-1.04448033e-01,
-1.94289029e-09,
3.50307411e-01,
-3.41123470e-01,
]
with tf.GradientTape() as gtape:
with AnnotatedQueue() as q:
for i in range(2):
qml.RX(var[i, 0], wires=0)
qml.RX(var[i, 1], wires=1)
qml.RX(var[i, 2], wires=2)
qml.CNOT(wires=[0, 1])
qml.CNOT(wires=[1, 2])
qml.CNOT(wires=[2, 0])
qml.expval(S)
qscript = QuantumScript.from_queue(q)
tapes, fn = sum_expand(qscript)
res = fn(qml.execute(tapes, dev, qml.gradients.param_shift))
assert np.isclose(res, output)
g = gtape.gradient(res, var)
assert np.allclose(list(g[0]) + list(g[1]), output2)
@pytest.mark.jax
def test_sum_dif_jax(self, tol):
"""Tests that the sum_expand tape transform is differentiable with the Jax interface"""
import jax
from jax import numpy as jnp
S = qml.sum(
qml.s_prod(-0.2, qml.PauliX(1)),
qml.s_prod(0.5, qml.prod(qml.PauliZ(1), qml.PauliY(2))),
qml.s_prod(1, qml.PauliZ(0)),
)
var = jnp.array([0.1, 0.67, 0.3, 0.4, -0.5, 0.7, -0.2, 0.5, 1])
output = 0.42294409781940356
output2 = [
9.68883500e-02,
-2.90832724e-01,
-1.04448033e-01,
-1.94289029e-09,
3.50307411e-01,
-3.41123470e-01,
0.0,
-4.36578753e-01,
6.41233474e-01,
]
with AnnotatedQueue() as q:
for _ in range(2):
qml.RX(np.array(0), wires=0)
qml.RX(np.array(0), wires=1)
qml.RX(np.array(0), wires=2)
qml.CNOT(wires=[0, 1])
qml.CNOT(wires=[1, 2])
qml.CNOT(wires=[2, 0])
qml.expval(S)
qscript = QuantumScript.from_queue(q)
def cost(x):
new_qscript = qscript.bind_new_parameters(x, list(range(9)))
tapes, fn = sum_expand(new_qscript)
res = qml.execute(tapes, dev, qml.gradients.param_shift)
return fn(res)
assert np.isclose(cost(var), output)
grad = jax.grad(cost)(var)
assert len(grad) == len(output2)
for g, o in zip(grad, output2):
assert np.allclose(g, o, atol=tol)
|
0e562b78ae1578dc4f6ea5d3ca8791d858e7e6d9
|
5ea4a3a0e50d2cee386f497c8449d13cd80450f9
|
/xsdata/utils/text.py
|
5f9ac680ebce72afd525f426e3def91a11d73bfb
|
[
"MIT"
] |
permissive
|
tefra/xsdata
|
8df028ff79cd04b29ecf24401810562b8917b7be
|
31f672af84fd040a97996871916a41b1046fe46b
|
refs/heads/main
| 2023-08-17T03:20:06.912750
| 2023-08-12T15:24:40
| 2023-08-12T15:24:40
| 217,130,848
| 243
| 49
|
MIT
| 2023-08-30T15:25:31
| 2019-10-23T18:51:12
|
Python
|
UTF-8
|
Python
| false
| false
| 4,961
|
py
|
text.py
|
import re
import string
from typing import Any
from typing import List
from typing import Match
from typing import Tuple
stop_words = {
"",
"Any",
"Decimal",
"Dict",
"Enum",
"False",
"List",
"Meta",
"None",
"Optional",
"QName",
"True",
"Type",
"Tuple",
"Union",
"and",
"as",
"assert",
"async",
"bool",
"break",
"class",
"continue",
"def",
"del",
"dict",
"elif",
"else",
"except",
"field",
"finally",
"float",
"for",
"from",
"global",
"if",
"import",
"in",
"int",
"is",
"lambda",
"list",
"nonlocal",
"not",
"object", # py36 specific
"or",
"pass",
"raise",
"return",
"self",
"str",
"try",
"type",
"while",
"with",
"yield",
}
is_reserved = stop_words.__contains__
def prefix(value: str, sep: str = ":") -> str:
"""Return the first part of the string before the separator."""
return split(value, sep)[0]
def suffix(value: str, sep: str = ":") -> str:
"""Return the last part of the string after the separator."""
return split(value, sep)[1]
def split(value: str, sep: str = ":") -> Tuple:
"""
Separate the given string with the given separator and return a tuple of
the prefix and suffix.
If the separator isn't present in the string return None as prefix.
"""
left, _, right = value.partition(sep)
return (left, right) if right else (None, left)
def capitalize(value: str, **kwargs: Any) -> str:
"""Capitalize the given string."""
return value[0].upper() + value[1:]
def original_case(value: str, **kwargs: Any) -> str:
"""Return the input string without any modifications."""
return value
def pascal_case(value: str, **kwargs: Any) -> str:
"""Convert the given string to pascal case."""
return "".join(map(str.title, split_words(value)))
def camel_case(value: str, **kwargs: Any) -> str:
"""Convert the given string to camel case."""
result = "".join(map(str.title, split_words(value)))
return result[0].lower() + result[1:]
def mixed_case(value: str, **kwargs: Any) -> str:
"""Convert the given string to mixed case."""
return "".join(split_words(value))
def mixed_pascal_case(value: str, **kwargs: Any) -> str:
"""Convert the given string to mixed pascal case."""
return capitalize(mixed_case(value))
def mixed_snake_case(value: str, **kwargs: Any) -> str:
"""Convert the given string to mixed snake case."""
return "_".join(split_words(value))
def snake_case(value: str, **kwargs: Any) -> str:
"""Convert the given string to snake case."""
return "_".join(map(str.lower, split_words(value)))
def screaming_snake_case(value: str, **kwargs: Any) -> str:
"""Convert the given string to screaming snake case."""
return snake_case(value, **kwargs).upper()
def kebab_case(value: str, **kwargs: Any) -> str:
"""Convert the given string to kebab case."""
return "-".join(split_words(value))
def split_words(value: str) -> List[str]:
"""Split a string on new capital letters and not alphanumeric
characters."""
words: List[str] = []
buffer: List[str] = []
previous = None
def flush():
if buffer:
words.append("".join(buffer))
buffer.clear()
for char in value:
tp = classify(char)
if tp == StringType.OTHER:
flush()
elif not previous or tp == previous:
buffer.append(char)
elif tp == StringType.UPPER and previous != StringType.UPPER:
flush()
buffer.append(char)
else:
buffer.append(char)
previous = tp
flush()
return words
class StringType:
UPPER = 1
LOWER = 2
NUMERIC = 3
OTHER = 4
def classify(character: str) -> int:
"""String classifier."""
code_point = ord(character)
if 64 < code_point < 91:
return StringType.UPPER
if 96 < code_point < 123:
return StringType.LOWER
if 47 < code_point < 58:
return StringType.NUMERIC
return StringType.OTHER
ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]')
ESCAPE_DCT = {
"\\": "\\\\",
'"': '\\"',
"\b": "\\b",
"\f": "\\f",
"\n": "\\n",
"\r": "\\r",
"\t": "\\t",
}
for i in range(0x20):
ESCAPE_DCT.setdefault(chr(i), f"\\u{i:04x}")
def escape_string(value: str) -> str:
"""
Escape a string for code generation.
Source: json.encoder.py_encode_basestring
"""
def replace(match: Match) -> str:
return ESCAPE_DCT[match.group(0)]
return ESCAPE.sub(replace, value)
__alnum_ascii__ = set(string.digits + string.ascii_letters)
def alnum(value: str) -> str:
"""Return a lower case version of the string only with ascii alphanumerical
characters."""
return "".join(filter(__alnum_ascii__.__contains__, value)).lower()
|
3acf7a6415478c3ee9d33ade1024ff1a653df1c1
|
444a9480bce2035565332d4d4654244c0b5cd47b
|
/research/cv/nnUNet/src/nnunet/preprocessing/cropping.py
|
1c675a488a0c92df1aa2b79767a62f10a2f81f87
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
mindspore-ai/models
|
7ede9c6454e77e995e674628204e1c6e76bd7b27
|
eab643f51336dbf7d711f02d27e6516e5affee59
|
refs/heads/master
| 2023-07-20T01:49:34.614616
| 2023-07-17T11:43:18
| 2023-07-17T11:43:18
| 417,393,380
| 301
| 92
|
Apache-2.0
| 2023-05-17T11:22:28
| 2021-10-15T06:38:37
|
Python
|
UTF-8
|
Python
| false
| false
| 8,710
|
py
|
cropping.py
|
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""cropping function"""
import shutil
from collections import OrderedDict
from multiprocessing import Pool
import SimpleITK as sitk
import numpy as np
from batchgenerators.utilities.file_and_folder_operations import subfiles, maybe_mkdir_p, os, pickle
def create_nonzero_mask(data):
"""create nonzero mask for data"""
from scipy.ndimage import binary_fill_holes
assert len(data.shape) == 4 or len(data.shape) == 3, "data must have shape (C, X, Y, Z) or shape (C, X, Y)"
nonzero_mask = np.zeros(data.shape[1:], dtype=bool)
for c in range(data.shape[0]):
this_mask = data[c] != 0
nonzero_mask = nonzero_mask | this_mask
nonzero_mask = binary_fill_holes(nonzero_mask)
return nonzero_mask
def get_bbox_from_mask(mask, outside_value=0):
"""get bbox from mask"""
mask_voxel_coords = np.where(mask != outside_value)
minzidx = int(np.min(mask_voxel_coords[0]))
maxzidx = int(np.max(mask_voxel_coords[0])) + 1
minxidx = int(np.min(mask_voxel_coords[1]))
maxxidx = int(np.max(mask_voxel_coords[1])) + 1
minyidx = int(np.min(mask_voxel_coords[2]))
maxyidx = int(np.max(mask_voxel_coords[2])) + 1
return [[minzidx, maxzidx], [minxidx, maxxidx], [minyidx, maxyidx]]
def crop_to_bbox(image, bbox):
"""crop to box"""
assert len(image.shape) == 3, "only supports 3d images"
resizer = (slice(bbox[0][0], bbox[0][1]), slice(bbox[1][0], bbox[1][1]), slice(bbox[2][0], bbox[2][1]))
return image[resizer]
def get_case_identifier(case):
"""get case identifier"""
case_identifier = case[0].split("/")[-1].split(".nii.gz")[0][:-5]
return case_identifier
def get_case_identifier_from_npz(case):
"""get case identifier from npz"""
case_identifier = case.split("/")[-1][:-4]
return case_identifier
def load_case_from_list_of_files(data_files, seg_file=None):
"""load case from list of files"""
assert isinstance(data_files, (list, tuple)), "case must be either a list or a tuple"
properties = OrderedDict()
data_itk = [sitk.ReadImage(f) for f in data_files]
properties["original_size_of_raw_data"] = np.array(data_itk[0].GetSize())[[2, 1, 0]]
properties["original_spacing"] = np.array(data_itk[0].GetSpacing())[[2, 1, 0]]
properties["list_of_data_files"] = data_files
properties["seg_file"] = seg_file
properties["itk_origin"] = data_itk[0].GetOrigin()
properties["itk_spacing"] = data_itk[0].GetSpacing()
properties["itk_direction"] = data_itk[0].GetDirection()
data_npy = np.vstack([sitk.GetArrayFromImage(d)[None] for d in data_itk])
if seg_file is not None:
seg_itk = sitk.ReadImage(seg_file)
seg_npy = sitk.GetArrayFromImage(seg_itk)[None].astype(np.float32)
else:
seg_npy = None
return data_npy.astype(np.float32), seg_npy, properties
def crop_to_nonzero(data, seg=None, nonzero_label=-1):
"""crop data nonzero region"""
nonzero_mask = create_nonzero_mask(data)
bbox = get_bbox_from_mask(nonzero_mask, 0)
cropped_data = []
for c in range(data.shape[0]):
cropped = crop_to_bbox(data[c], bbox)
cropped_data.append(cropped[None])
data = np.vstack(cropped_data)
if seg is not None:
cropped_seg = []
for c in range(seg.shape[0]):
cropped = crop_to_bbox(seg[c], bbox)
cropped_seg.append(cropped[None])
seg = np.vstack(cropped_seg)
nonzero_mask = crop_to_bbox(nonzero_mask, bbox)[None]
if seg is not None:
seg[(seg == 0) & (nonzero_mask == 0)] = nonzero_label
else:
nonzero_mask = nonzero_mask.astype(int)
nonzero_mask[nonzero_mask == 0] = nonzero_label
nonzero_mask[nonzero_mask > 0] = 0
seg = nonzero_mask
return data, seg, bbox
def get_patient_identifiers_from_cropped_files(folder):
"""get patient identifiers from cropped files"""
return [i.split("/")[-1][:-4] for i in subfiles(folder, join=True, suffix=".npz")]
class ImageCropper():
def __init__(self, num_threads, output_folder=None):
"""
This one finds a mask of nonzero elements (must be nonzero in all modalities) and crops the image to that mask.
In the case of BRaTS and ISLES data this results in a significant reduction in image size
"""
self.output_folder = output_folder
self.num_threads = num_threads
if self.output_folder is not None:
maybe_mkdir_p(self.output_folder)
@staticmethod
def crop(data, properties, seg=None):
"""Image cropper for crop"""
shape_before = data.shape
data, seg, bbox = crop_to_nonzero(data, seg, nonzero_label=-1)
shape_after = data.shape
print("before crop:", shape_before, "after crop:", shape_after, "spacing:",
np.array(properties["original_spacing"]), "\n")
properties["crop_bbox"] = bbox
properties['classes'] = np.unique(seg)
seg[seg < -1] = 0
properties["size_after_cropping"] = data[0].shape
return data, seg, properties
@staticmethod
def crop_from_list_of_files(data_files, seg_file=None):
"crop from list of files"
data, seg, properties = load_case_from_list_of_files(data_files, seg_file)
return ImageCropper.crop(data, properties, seg)
def load_crop_save(self, case, case_identifier, overwrite_existing=False):
"""load crop save case"""
try:
print(case_identifier)
if overwrite_existing \
or (not os.path.isfile(os.path.join(self.output_folder, "%s.npz" % case_identifier))
or not os.path.isfile(os.path.join(self.output_folder, "%s.pkl" % case_identifier))):
data, seg, properties = self.crop_from_list_of_files(case[:-1], case[-1])
all_data = np.vstack((data, seg))
np.savez_compressed(os.path.join(self.output_folder, "%s.npz" % case_identifier), data=all_data)
with open(os.path.join(self.output_folder, "%s.pkl" % case_identifier), 'wb') as f:
pickle.dump(properties, f)
except Exception as e:
print("Exception in", case_identifier, ":")
print(e)
raise e
def get_list_of_cropped_files(self):
"""get list of cropped files"""
return subfiles(self.output_folder, join=True, suffix=".npz")
def get_patient_identifiers_from_cropped_files(self):
"""get patient identifiers from cropped files"""
return [i.split("/")[-1][:-4] for i in self.get_list_of_cropped_files()]
def run_cropping(self, list_of_files, overwrite_existing=False, output_folder=None):
"""
also copied ground truth nifti segmentation into the preprocessed folder so that we can use them for evaluation
on the cluster
"""
if output_folder is not None:
self.output_folder = output_folder
output_folder_gt = os.path.join(self.output_folder, "gt_segmentations")
maybe_mkdir_p(output_folder_gt)
for _, case in enumerate(list_of_files):
if case[-1] is not None:
shutil.copy(case[-1], output_folder_gt)
list_of_args = []
for _, case in enumerate(list_of_files):
case_identifier = get_case_identifier(case)
list_of_args.append((case, case_identifier, overwrite_existing))
p = Pool(self.num_threads)
p.starmap(self.load_crop_save, list_of_args)
p.close()
p.join()
def load_properties(self, case_identifier):
"""load properties from identifier"""
with open(os.path.join(self.output_folder, "%s.pkl" % case_identifier), 'rb') as f:
properties = pickle.load(f)
return properties
def save_properties(self, case_identifier, properties):
"""save properties for case_identifier"""
with open(os.path.join(self.output_folder, "%s.pkl" % case_identifier), 'wb') as f:
pickle.dump(properties, f)
|
6a13db60ad9cf8ea2294d7e2a87ec008de68daf3
|
a825b6f0d5aced82d5c2a0ec8ba4ff14e1631456
|
/pysyncobj/tcp_connection.py
|
6260b601be5e6ed4ddeb59dff6dddafc10586feb
|
[
"MIT"
] |
permissive
|
bakwc/PySyncObj
|
76cc6457d6b15c876b931372fdc64220378ab441
|
3ae622afbc92f506820a835a64595db51ba3de1e
|
refs/heads/master
| 2023-08-20T00:50:31.259996
| 2023-07-27T09:15:06
| 2023-07-27T09:15:06
| 50,359,688
| 690
| 118
|
MIT
| 2023-07-27T09:15:08
| 2016-01-25T15:26:03
|
Python
|
UTF-8
|
Python
| false
| false
| 10,895
|
py
|
tcp_connection.py
|
import time
import socket
from sys import platform
import zlib
import struct
import pysyncobj.pickle as pickle
import pysyncobj.win_inet_pton
from .poller import POLL_EVENT_TYPE
from .monotonic import monotonic as monotonicTime
class CONNECTION_STATE:
DISCONNECTED = 0
CONNECTING = 1
CONNECTED = 2
def _getAddrType(addr):
try:
socket.inet_aton(addr)
return socket.AF_INET
except socket.error:
pass
try:
socket.inet_pton(socket.AF_INET6, addr)
return socket.AF_INET6
except socket.error:
pass
raise Exception('unknown address type')
import socket
def set_keepalive_linux(sock, after_idle_sec=1, interval_sec=3, max_fails=5):
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, after_idle_sec)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, interval_sec)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPCNT, max_fails)
def set_keepalive_osx(sock, after_idle_sec=1, interval_sec=3, max_fails=5):
TCP_KEEPALIVE = 0x10
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
sock.setsockopt(socket.IPPROTO_TCP, TCP_KEEPALIVE, interval_sec)
def set_keepalive_windows(sock, after_idle_sec=1, interval_sec=3, max_fails=5):
sock.ioctl(socket.SIO_KEEPALIVE_VALS, (1, after_idle_sec * 1000, interval_sec * 1000))
def set_keepalive(sock, after_idle_sec=1, interval_sec=3, max_fails=5):
if platform == "linux" or platform == "linux2":
set_keepalive_linux(sock, after_idle_sec, interval_sec, max_fails)
elif platform == "darwin":
set_keepalive_osx(sock, after_idle_sec, interval_sec, max_fails)
elif platform == "win32":
set_keepalive_windows(sock, after_idle_sec, interval_sec, max_fails)
class TcpConnection(object):
def __init__(self, poller, onMessageReceived = None, onConnected = None, onDisconnected = None,
socket=None, timeout=10.0, sendBufferSize = 2 ** 13, recvBufferSize = 2 ** 13,
keepalive=None):
self.sendRandKey = None
self.recvRandKey = None
self.recvLastTimestamp = 0
self.encryptor = None
self.__socket = socket
self.__readBuffer = bytes()
self.__writeBuffer = bytes()
self.__lastReadTime = monotonicTime()
self.__timeout = timeout
self.__poller = poller
self.__keepalive = keepalive
if socket is not None:
self.__socket = socket
self.__fileno = socket.fileno()
self.__state = CONNECTION_STATE.CONNECTED
self.setSockoptKeepalive()
self.__poller.subscribe(self.__fileno,
self.__processConnection,
POLL_EVENT_TYPE.READ | POLL_EVENT_TYPE.WRITE | POLL_EVENT_TYPE.ERROR)
else:
self.__state = CONNECTION_STATE.DISCONNECTED
self.__fileno = None
self.__socket = None
self.__onMessageReceived = onMessageReceived
self.__onConnected = onConnected
self.__onDisconnected = onDisconnected
self.__sendBufferSize = sendBufferSize
self.__recvBufferSize = recvBufferSize
def setSockoptKeepalive(self):
if self.__socket is None:
return
if self.__keepalive is None:
return
set_keepalive(
self.__socket,
self.__keepalive[0],
self.__keepalive[1],
self.__keepalive[2],
)
def setOnConnectedCallback(self, onConnected):
self.__onConnected = onConnected
def setOnMessageReceivedCallback(self, onMessageReceived):
self.__onMessageReceived = onMessageReceived
def setOnDisconnectedCallback(self, onDisconnected):
self.__onDisconnected = onDisconnected
def connect(self, host, port):
if host is None:
return False
self.__state = CONNECTION_STATE.DISCONNECTED
self.__fileno = None
self.__socket = socket.socket(_getAddrType(host), socket.SOCK_STREAM)
self.__socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, self.__sendBufferSize)
self.__socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, self.__recvBufferSize)
self.__socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
self.setSockoptKeepalive()
self.__socket.setblocking(0)
self.__readBuffer = bytes()
self.__writeBuffer = bytes()
self.__lastReadTime = monotonicTime()
try:
self.__socket.connect((host, port))
except socket.error as e:
if e.errno not in (socket.errno.EINPROGRESS, socket.errno.EWOULDBLOCK):
return False
self.__fileno = self.__socket.fileno()
self.__state = CONNECTION_STATE.CONNECTING
self.__poller.subscribe(self.__fileno,
self.__processConnection,
POLL_EVENT_TYPE.READ | POLL_EVENT_TYPE.WRITE | POLL_EVENT_TYPE.ERROR)
return True
def send(self, message):
if self.sendRandKey:
message = (self.sendRandKey, message)
data = zlib.compress(pickle.dumps(message), 3)
if self.encryptor:
data = self.encryptor.encrypt_at_time(data, int(monotonicTime()))
data = struct.pack('i', len(data)) + data
self.__writeBuffer += data
self.__trySendBuffer()
def fileno(self):
return self.__fileno
def disconnect(self):
needCallDisconnect = False
if self.__onDisconnected is not None and self.__state != CONNECTION_STATE.DISCONNECTED:
needCallDisconnect = True
self.sendRandKey = None
self.recvRandKey = None
self.recvLastTimestamp = 0
if self.__socket is not None:
self.__socket.close()
self.__socket = None
if self.__fileno is not None:
self.__poller.unsubscribe(self.__fileno)
self.__fileno = None
self.__writeBuffer = bytes()
self.__readBuffer = bytes()
self.__state = CONNECTION_STATE.DISCONNECTED
if needCallDisconnect:
self.__onDisconnected()
def getSendBufferSize(self):
return len(self.__writeBuffer)
def __processConnection(self, descr, eventType):
poller = self.__poller
if descr != self.__fileno:
poller.unsubscribe(descr)
return
if eventType & POLL_EVENT_TYPE.ERROR:
self.disconnect()
return
self.__processConnectionTimeout()
if self.state == CONNECTION_STATE.DISCONNECTED:
return
if eventType & POLL_EVENT_TYPE.READ or eventType & POLL_EVENT_TYPE.WRITE:
if self.__socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR):
self.disconnect()
return
if self.__state == CONNECTION_STATE.CONNECTING:
if self.__onConnected is not None:
self.__onConnected()
if self.__state == CONNECTION_STATE.DISCONNECTED:
return
self.__state = CONNECTION_STATE.CONNECTED
self.__lastReadTime = monotonicTime()
return
if eventType & POLL_EVENT_TYPE.WRITE:
self.__trySendBuffer()
if self.__state == CONNECTION_STATE.DISCONNECTED:
return
event = POLL_EVENT_TYPE.READ | POLL_EVENT_TYPE.ERROR
if len(self.__writeBuffer) > 0:
event |= POLL_EVENT_TYPE.WRITE
poller.subscribe(descr, self.__processConnection, event)
if eventType & POLL_EVENT_TYPE.READ:
self.__tryReadBuffer()
if self.__state == CONNECTION_STATE.DISCONNECTED:
return
while True:
message = self.__processParseMessage()
if message is None:
break
if self.__onMessageReceived is not None:
self.__onMessageReceived(message)
if self.__state == CONNECTION_STATE.DISCONNECTED:
return
def __processConnectionTimeout(self):
if monotonicTime() - self.__lastReadTime > self.__timeout:
self.disconnect()
return
def __trySendBuffer(self):
self.__processConnectionTimeout()
if self.state == CONNECTION_STATE.DISCONNECTED:
return
while self.__processSend():
pass
def __processSend(self):
if not self.__writeBuffer:
return False
try:
res = self.__socket.send(self.__writeBuffer)
if res < 0:
self.disconnect()
return False
if res == 0:
return False
self.__writeBuffer = self.__writeBuffer[res:]
return True
except socket.error as e:
if e.errno not in (socket.errno.EAGAIN, socket.errno.EWOULDBLOCK):
self.disconnect()
return False
def __tryReadBuffer(self):
while self.__processRead():
pass
self.__lastReadTime = monotonicTime()
def __processRead(self):
try:
incoming = self.__socket.recv(self.__recvBufferSize)
except socket.error as e:
if e.errno not in (socket.errno.EAGAIN, socket.errno.EWOULDBLOCK):
self.disconnect()
return False
if self.__socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR):
self.disconnect()
return False
if not incoming:
self.disconnect()
return False
self.__readBuffer += incoming
return True
def __processParseMessage(self):
if len(self.__readBuffer) < 4:
return None
l = struct.unpack('i', self.__readBuffer[:4])[0]
if len(self.__readBuffer) - 4 < l:
return None
data = self.__readBuffer[4:4 + l]
try:
if self.encryptor:
dataTimestamp = self.encryptor.extract_timestamp(data)
assert dataTimestamp >= self.recvLastTimestamp
self.recvLastTimestamp = dataTimestamp
# Unfortunately we can't get a timestamp and data in one go
data = self.encryptor.decrypt(data)
message = pickle.loads(zlib.decompress(data))
if self.recvRandKey:
randKey, message = message
assert randKey == self.recvRandKey
except:
# Why no logging of security errors?
self.disconnect()
return None
self.__readBuffer = self.__readBuffer[4 + l:]
return message
@property
def state(self):
return self.__state
|
cfe8803fb0d0878cc21ec74d87d8fab115e7ecf7
|
2a1b8a671aceda6bc446f8ce26400aa84fa444a6
|
/Packs/CommonScripts/Scripts/ResolveShortenedURL/ResolveShortenedURL_test.py
|
c14f318a5947b270fd0cdc8cdf1b96699c44d216
|
[
"MIT"
] |
permissive
|
demisto/content
|
6d4722d46f0ff0beea2748e9f7de585bf91a78b4
|
890def5a0e0ae8d6eaa538148249ddbc851dbb6b
|
refs/heads/master
| 2023-09-04T00:02:25.618032
| 2023-09-03T21:56:22
| 2023-09-03T21:56:22
| 60,525,392
| 1,023
| 1,921
|
MIT
| 2023-09-14T20:55:24
| 2016-06-06T12:17:02
|
Python
|
UTF-8
|
Python
| false
| false
| 12,600
|
py
|
ResolveShortenedURL_test.py
|
from pathlib import Path
import pytest
from requests import Response
from ResolveShortenedURL import *
def load_test_data(folder: str, file_name: str) -> dict:
"""
A function for loading and returning data from json files within the "test_data" folder.
Args:
folder (str): Name of the parent folder of the file within `test_data`.
file_name (str): Name of a json file to load data from.
Returns:
dict: Dictionary data loaded from the json file.
"""
with open(Path("test_data") / folder / f"{file_name}.json", "r") as f:
return json.load(f)
class TestLongurlInService:
@pytest.mark.parametrize("args, mock_files_prefix, mock_files_count, expected_output",
[
( # Generic test
{"url": "https://short.url/a", "redirect_limit": 0},
"nested_unshorten",
3,
load_test_data("longurl.in", "nested_unshorten_expected_output"),
),
( # Test a case where redirect is stopped because of `redirect_limit`
{"url": "https://short.url/a", "redirect_limit": 1},
"nested_unshorten",
2,
load_test_data("longurl.in", "limited_unshorten_expected_output"),
),
( # Test a case where the URL is invalid
{"url": "https://short.url/a", "redirect_limit": 1},
"nested_unshorten",
2,
load_test_data("longurl.in", "limited_unshorten_expected_output"),
),
])
def test_nested_shortened_url(self, mocker, args: dict, mock_files_prefix: str,
mock_files_count: int, expected_output: dict):
"""
Given: Parameters for unshortening a URL that redirects to another shortened URL using longurl.in.
When: Calling the `unshorten_url` function.
Then: Ensure the context output is returned as expected, and that redirect_limit is working as expected.
"""
mock_data = [load_test_data("longurl.in", mock_files_prefix + f"_{i}")
for i in range(mock_files_count)]
# Add the last response again, as we try to unshorten the final URL since we don't know that it's not shortened.
mock_data.append(mock_data[-1])
def redirect_side_effect() -> dict:
for d in mock_data:
yield d
mocker.patch.object(BaseClient, "_http_request", side_effect=redirect_side_effect())
result = unshorten_url(service_name="longurl.in", url=args["url"], redirect_limit=args["redirect_limit"])
assert (result.outputs["RedirectCount"] <= args["redirect_limit"] or args["redirect_limit"] == 0)
assert result.outputs == expected_output
@pytest.mark.parametrize("args, response_mock, expected_output",
[
( # Test a case where the URL is invalid
{"url": "https://invalid.url", "redirect_limit": 0},
load_test_data("longurl.in", "invalid_url"),
load_test_data("longurl.in", "invalid_url_expected_output"),
),
])
def test_single_shortened_url(self, mocker, args: dict, response_mock: dict, expected_output: dict):
"""
Given: Parameters for unshortening a shortened URL using longurl.in.
When: Calling the `unshorten_url` function.
Then: Ensure the context output is returned as expected, and that redirect_limit is working as expected.
"""
mocker.patch.object(BaseClient, "_http_request", return_value=response_mock)
result = unshorten_url(service_name="longurl.in", url=args["url"], redirect_limit=args["redirect_limit"])
assert result.outputs == expected_output
class TestUnshortenMeService:
@pytest.mark.parametrize("args, mock_files_prefix, mock_files_count, expected_output",
[
( # Generic test
{"url": "https://short.url/a", "redirect_limit": 0},
"nested_unshorten",
3,
load_test_data("unshorten.me", "nested_unshorten_expected_output"),
),
( # Test a case where redirect is stopped because of `redirect_limit`
{"url": "https://short.url/a", "redirect_limit": 1},
"nested_unshorten",
2,
load_test_data("unshorten.me", "limited_unshorten_expected_output"),
),
])
def test_nested_shortened_url(self, mocker, args: dict, mock_files_prefix: str,
mock_files_count: int, expected_output: dict):
"""
Given: Parameters for unshortening a URL that redirects to another shortened URL using unshorten.me.
When: Calling the `unshorten_url` function.
Then: Ensure the context output is returned as expected, and that redirect_limit is working as expected.
"""
mock_data = [load_test_data("unshorten.me", mock_files_prefix + f"_{i}")
for i in range(mock_files_count)]
# Add the last response again, as we try to unshorten the final URL since we don't know that it's not shortened.
mock_data.append(mock_data[-1])
def redirect_side_effect() -> dict:
for d in mock_data:
yield d
mocker.patch.object(BaseClient, "_http_request", side_effect=redirect_side_effect())
result = unshorten_url(service_name="unshorten.me", url=args["url"], redirect_limit=args["redirect_limit"])
assert (result.outputs["RedirectCount"] <= args["redirect_limit"] or args["redirect_limit"] == 0)
assert result.outputs == expected_output
@pytest.mark.parametrize("args, response_mock, expected_output",
[
( # Test a case where the URL is invalid
{"url": "https://invalid.url", "redirect_limit": 0},
load_test_data("unshorten.me", "invalid_url"),
load_test_data("unshorten.me", "invalid_url_expected_output"),
),
])
def test_single_shortened_url(self, mocker, args: dict, response_mock: dict, expected_output: dict):
"""
Given: Parameters for unshortening a shortened URL using unshorten.me.
When: Calling the `unshorten_url` function.
Then: Ensure the context output is returned as expected, and that redirect_limit is working as expected.
"""
mocker.patch.object(BaseClient, "_http_request", return_value=response_mock)
result = unshorten_url(service_name="unshorten.me", url=args["url"], redirect_limit=args["redirect_limit"])
assert result.outputs == expected_output
class TestBuiltInService:
@staticmethod
def get_response_mock(url: str, redirect_url: str | None = None, response_code: int | None = 200) -> Response:
"""
Create a mock requests.Response object.
Args:
url (str): URL to set as the `url` attribute (should be the last URL in the redirect chain).
redirect_url (str | None, optional): URL to redirect to. Defaults to None.
response_code (int | None, optional): Response code to set as the `status_code` attribute.
Not relevant if `redirect_url` is set (301 will be used). Defaults to None.
Returns:
Response: A requests.Response object to use as a mock.
"""
response_mock = Response()
response_mock.url = url
if redirect_url is not None:
response_mock.status_code = 301
response_mock.headers["Location"] = redirect_url
else:
response_mock.status_code = response_code
return response_mock
@pytest.mark.parametrize("args, responses, expected_output",
[
( # Generic test
{"url": "https://short.url/a", "redirect_limit": 0},
[get_response_mock(url="https://short.url/a",
redirect_url="https://short.url/b"),
get_response_mock(url="https://short.url/b",
redirect_url="https://xsoar.pan.dev/"),
get_response_mock(url="https://xsoar.pan.dev/")],
load_test_data("built-in", "nested_unshorten_expected_output"),
),
( # Test a case where redirect is stopped because of `redirect_limit`
{"url": "https://short.url/a", "redirect_limit": 1},
[get_response_mock(url="https://short.url/a",
redirect_url="https://short.url/b"),
get_response_mock(url="https://short.url/b",
redirect_url="https://xsoar.pan.dev/"),
get_response_mock(url="https://xsoar.pan.dev/")],
load_test_data("built-in", "limited_unshorten_expected_output"),
)
])
def test_nested_shortened_url(self, mocker, args: dict, responses: list[Response], expected_output: dict):
"""
Given: Parameters for unshortening a URL that redirects to another shortened URL using Python's requests lib.
When: Calling the `unshorten_url` function.
Then: Ensure the context output is returned as expected, and that redirect_limit is working as expected.
"""
def redirect_side_effect() -> Response:
for response in responses:
yield response
mocker.patch.object(BaseClient, "_http_request", side_effect=redirect_side_effect())
result = unshorten_url(service_name="Built-In", url=args["url"], redirect_limit=args["redirect_limit"])
assert (result.outputs["RedirectCount"] <= args["redirect_limit"] or args["redirect_limit"] == 0)
assert result.outputs == expected_output
@pytest.mark.parametrize("args, response, expected_output",
[
( # Test a case where the URL is invalid
{"url": "https://invalid.url", "redirect_limit": 0},
None,
load_test_data("built-in", "invalid_url_expected_output"),
),
])
def test_single_shortened_url(self, mocker, args: dict, response: Response | None, expected_output: dict):
"""
Given: Parameters for unshortening a shortened URL using Python's requests lib.
When: Calling the `unshorten_url` function.
Then: Ensure the context output is returned as expected, and that redirect_limit is working as expected.
Note:
Use `None` for raising an exception.
"""
if response is None:
mocker.patch.object(requests.sessions.Session, "request", side_effect=requests.exceptions.ConnectionError())
else:
mocker.patch.object(BaseClient, "_http_request", return_value=response)
result = unshorten_url(service_name="Built-In", url=args["url"], redirect_limit=args["redirect_limit"])
assert result.outputs == expected_output
|
d46d6ef597f294daecce0c622e864bf21e0cb8c3
|
edc1134436a79ca883a0d25f3c8dfffc4235c514
|
/tests/contrib/tracking/test_measurements.py
|
f946311906478c7e818281dfe04b999287753718
|
[
"Apache-2.0"
] |
permissive
|
pyro-ppl/pyro
|
2283d8ca528fc090c724a3a6e0f344e505ebbf77
|
0e82cad30f75b892a07e6c9a5f9e24f2cb5d0d81
|
refs/heads/dev
| 2023-08-18T00:35:28.014919
| 2023-08-06T21:01:36
| 2023-08-06T21:01:36
| 94,506,832
| 3,647
| 606
|
Apache-2.0
| 2023-09-14T13:52:14
| 2017-06-16T05:03:47
|
Python
|
UTF-8
|
Python
| false
| false
| 934
|
py
|
test_measurements.py
|
# Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import torch
from pyro.contrib.tracking.measurements import PositionMeasurement
def test_PositionMeasurement():
dimension = 3
time = 0.232
frame_num = 5
measurement = PositionMeasurement(
mean=torch.rand(dimension),
cov=torch.eye(dimension),
time=time,
frame_num=frame_num,
)
assert measurement.dimension == dimension
x = torch.rand(2 * dimension)
assert measurement(x).shape == (dimension,)
assert measurement.mean.shape == (dimension,)
assert measurement.cov.shape == (dimension, dimension)
assert measurement.time == time
assert measurement.frame_num == frame_num
assert measurement.geodesic_difference(
torch.rand(dimension), torch.rand(dimension)
).shape == (dimension,)
assert measurement.jacobian().shape == (dimension, 2 * dimension)
|
d34369185b7df8dbfc667583fe3a7694be35ec96
|
d110546d747d7e3865ce5742d5fca09f404623c0
|
/tests/pytests/functional/states/file/test_pruned.py
|
80d4f94b6c2ba9bec902e766705fa009aa12655e
|
[
"Apache-2.0",
"MIT",
"BSD-2-Clause"
] |
permissive
|
saltstack/salt
|
354fc86a7be1f69514b3dd3b2edb9e6f66844c1d
|
1ef90cbdc7203f97775edb7666db86a41eb9fc15
|
refs/heads/master
| 2023-07-19T20:56:20.210556
| 2023-06-29T23:12:28
| 2023-07-19T11:47:47
| 1,390,248
| 11,026
| 6,296
|
Apache-2.0
| 2023-09-14T20:45:37
| 2011-02-20T20:16:56
|
Python
|
UTF-8
|
Python
| false
| false
| 2,502
|
py
|
test_pruned.py
|
import pytest
pytestmark = [
pytest.mark.windows_whitelisted,
pytest.mark.slow_test,
]
@pytest.fixture(scope="module")
def file(states):
return states.file
@pytest.fixture(scope="function")
def single_dir_with_file(tmp_path):
file = tmp_path / "stuff.txt"
file.write_text("things")
yield str(tmp_path)
@pytest.fixture(scope="function")
def nested_empty_dirs(tmp_path):
num_root = 2
num_mid = 4
num_last = 2
for root in range(1, num_root + 1):
for mid in range(1, num_mid + 1):
for last in range(1, num_last + 1):
nest = (
tmp_path
/ "root{}".format(root)
/ "mid{}".format(mid)
/ "last{}".format(last)
)
nest.mkdir(parents=True, exist_ok=True)
yield str(tmp_path)
@pytest.fixture(scope="function")
def nested_dirs_with_files(tmp_path):
num_root = 2
num_mid = 4
num_last = 2
for root in range(1, num_root + 1):
for mid in range(1, num_mid + 1):
for last in range(1, num_last + 1):
nest = (
tmp_path
/ "root{}".format(root)
/ "mid{}".format(mid)
/ "last{}".format(last)
)
nest.mkdir(parents=True, exist_ok=True)
if last % 2:
last_file = nest / "stuff.txt"
last_file.write_text("things")
yield str(tmp_path)
def test_pruned_failure(file, single_dir_with_file):
ret = file.pruned(name=single_dir_with_file)
assert ret.result is False
assert not ret.changes["deleted"]
assert len(ret.changes["errors"]) == 1
assert ret.comment == "Failed to remove directory {}".format(single_dir_with_file)
def test_pruned_success_recurse_and_deleted(file, nested_empty_dirs):
ret = file.pruned(name=nested_empty_dirs, recurse=True)
assert ret.result is True
assert len(ret.changes["deleted"]) == 27
assert ret.comment == "Recursively removed empty directories under {}".format(
nested_empty_dirs
)
def test_pruned_success_ignore_errors_and_deleted(file, nested_dirs_with_files):
ret = file.pruned(name=nested_dirs_with_files, ignore_errors=True)
assert ret.result is True
assert len(ret.changes["deleted"]) == 8
assert ret.comment == "Recursively removed empty directories under {}".format(
nested_dirs_with_files
)
|
eaab593330465305c05934a2de2bfb1ef5152829
|
a2e87d991a31e103b6142c9cc71aeef7158e196d
|
/src/etc/server.py
|
f6cc4d62b0be4caa1542c33321b185c12b59eec5
|
[] |
no_license
|
ytanaka-/menthas
|
3b5ca7c486856a929d67703209ce73d99bee5667
|
db07569241f5995f0582766ff118e5d2b4c2dde2
|
refs/heads/master
| 2023-02-01T16:43:30.330526
| 2023-01-23T20:21:54
| 2023-01-23T20:21:54
| 35,497,180
| 164
| 21
| null | 2023-09-11T17:52:55
| 2015-05-12T15:42:13
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,807
|
py
|
server.py
|
from sklearn.metrics.pairwise import cosine_similarity
from bottle import route, request, abort, run
from gensim.models import Word2Vec as word2vec
import os
import json
import numpy as np
import pymongo
MONGO_URL = os.getenv("MONGO_URL", "mongodb://localhost:27017/")
client = pymongo.MongoClient(MONGO_URL)
MONGO_DB_NAME = os.getenv("MONGO_DB_NAME", "menthas-example")
Page = client[MONGO_DB_NAME].pages
Categories = client[MONGO_DB_NAME].categories
MODEL_FILE_PATH = os.getenv("MODEL_FILE_PATH", "./src/etc/model/wiki-deepwalk.model")
_model = word2vec.load(MODEL_FILE_PATH)
vector_dimension = 32
def feat2vec(features):
vec = np.zeros(vector_dimension)
for w in features:
if w in _model.wv:
vec += _model.wv[w]
return vec
categories = Categories.find({})
category_feats = {}
for category in categories:
category_feats[category["name"]] = feat2vec(category["tags"])
@route('/api/similarity')
def similarity():
category = request.query.category
features = request.query.features
if not category or not features:
abort(500, 'invalid parameter.')
feature_list = features.split(",")
if category not in category_feats:
abort(500, 'category not found.')
category_vec = category_feats[category]
feature_vec = feat2vec(feature_list)
similarity = cosine_similarity([category_vec], [feature_vec])[0][0]
if similarity != 0.0:
# deepwalkのmodelだと最も関連していないニュースでも下限は-0.10~15くらいになる
# -1~1にscoreを調整するために正規化を行う
min_similarity = -0.15
similarity = 2.*(similarity - min_similarity)/(1 - min_similarity) - 1
return json.dumps({ "category": category, "similarity": similarity })
run(host='localhost', port=5000)
|
7486b98dd31e79c3197cdbde96406a8f05aa42f2
|
a5622dafafd782af153be2bc0bd19cb086fd07b2
|
/rest-service/manager_rest/test/endpoints/test_filters.py
|
5cb80e5517d6e483b2cee1a39d0573050941ec41
|
[
"Apache-2.0"
] |
permissive
|
cloudify-cosmo/cloudify-manager
|
8b2d226ad5a9dd8103d7690b2f8081bef24078e1
|
c0de6442e1d7653fad824d75e571802a74eee605
|
refs/heads/master
| 2023-09-06T09:11:51.753912
| 2023-09-04T08:01:58
| 2023-09-04T08:01:58
| 18,326,574
| 146
| 84
|
Apache-2.0
| 2023-09-04T08:02:00
| 2014-04-01T11:06:47
|
Python
|
UTF-8
|
Python
| false
| false
| 23,983
|
py
|
test_filters.py
|
from cloudify.models_states import VisibilityState
from cloudify_rest_client.exceptions import CloudifyClientError
from manager_rest.test import base_test
from manager_rest.utils import get_formatted_timestamp
from manager_rest.manager_exceptions import BadFilterRule
from manager_rest.storage import models
from manager_rest.constants import AttrsOperator, LabelsOperator
from manager_rest.rest.filters_utils import (FilterRule,
create_filter_rules_list)
FILTER_ID = 'filter'
LEGAL_FILTER_RULES = [
FilterRule('a', ['b'], LabelsOperator.ANY_OF, 'label'),
FilterRule('a', ['b@c#d& ,:'], LabelsOperator.ANY_OF, 'label'),
FilterRule('e', ['f', 'g*%'], LabelsOperator.ANY_OF, 'label'),
FilterRule('c', ['d'], LabelsOperator.NOT_ANY_OF, 'label'),
FilterRule('h', ['i', 'j'], LabelsOperator.NOT_ANY_OF, 'label'),
FilterRule('k', [], LabelsOperator.IS_NULL, 'label'),
FilterRule('l', [], LabelsOperator.IS_NOT_NULL, 'label'),
FilterRule('created_by', ['user'], AttrsOperator.ANY_OF, 'attribute'),
FilterRule('created_by', ['user', 'admin'], AttrsOperator.ANY_OF,
'attribute'),
FilterRule('created_by', ['user'], AttrsOperator.NOT_ANY_OF, 'attribute'),
FilterRule('created_by', ['user', 'admin'], AttrsOperator.NOT_ANY_OF,
'attribute'),
FilterRule('created_by', ['user'], AttrsOperator.CONTAINS, 'attribute'),
FilterRule('created_by', ['user', 'admin'], AttrsOperator.CONTAINS,
'attribute'),
FilterRule('created_by', ['user'], AttrsOperator.NOT_CONTAINS,
'attribute'),
FilterRule('created_by', ['user', 'admin'], AttrsOperator.NOT_CONTAINS,
'attribute'),
FilterRule('created_by', ['user'], AttrsOperator.STARTS_WITH, 'attribute'),
FilterRule('created_by', ['user', 'admin'], AttrsOperator.STARTS_WITH,
'attribute'),
FilterRule('created_by', ['user'], AttrsOperator.ENDS_WITH, 'attribute'),
FilterRule('created_by', ['user', 'admin'], AttrsOperator.ENDS_WITH,
'attribute'),
FilterRule('created_by', [], AttrsOperator.IS_NOT_EMPTY, 'attribute'),
]
BLUEPRINT_SPECIFIC_FILTER_RULES = [
FilterRule('state', ['uploaded'], AttrsOperator.ANY_OF, 'attribute'),
FilterRule('state', ['uploaded', 'invalid'], AttrsOperator.ANY_OF,
'attribute'),
FilterRule('state', ['uploaded'], AttrsOperator.NOT_ANY_OF, 'attribute'),
FilterRule('state', ['uploaded', 'invalid'], AttrsOperator.NOT_ANY_OF,
'attribute'),
FilterRule('state', ['uploaded'], AttrsOperator.CONTAINS, 'attribute'),
FilterRule('state', ['uploaded', 'invalid'], AttrsOperator.CONTAINS,
'attribute'),
FilterRule('state', ['uploaded'], AttrsOperator.NOT_CONTAINS, 'attribute'),
FilterRule('state', ['uploaded', 'invalid'], AttrsOperator.NOT_CONTAINS,
'attribute'),
FilterRule('state', ['uploaded'], AttrsOperator.STARTS_WITH, 'attribute'),
FilterRule('state', ['uploaded', 'invalid'], AttrsOperator.STARTS_WITH,
'attribute'),
FilterRule('state', ['uploaded'], AttrsOperator.ENDS_WITH, 'attribute'),
FilterRule('state', ['uploaded', 'invalid'], AttrsOperator.ENDS_WITH,
'attribute'),
]
class FiltersFunctionalityBaseCase(base_test.BaseServerTestCase):
__test__ = False
LABELS = [{'a': 'b'}, {'a': 'z'}, {'c': 'd'}]
LABELS_2 = [{'a': 'b'}, {'c': 'z'}, {'e': 'f'}]
LABELS_3 = [{'g': 'f'}]
def setUp(self, resource_model):
super().setUp()
self.resource_model = resource_model
def _test_labels_filters_applied(self,
res_1_id,
res_2_id,
res_3_id,
res_4_id):
self.assert_filters_applied([('a', ['b'], LabelsOperator.ANY_OF,
'label')],
{res_1_id, res_2_id}, self.resource_model)
self.assert_filters_applied([('c', ['z'], LabelsOperator.NOT_ANY_OF,
'label')],
{res_1_id}, self.resource_model)
self.assert_filters_applied([('a', ['y', 'z'], LabelsOperator.ANY_OF,
'label'),
('c', ['d'], LabelsOperator.ANY_OF,
'label')],
{res_1_id}, self.resource_model)
self.assert_filters_applied([('a', ['b'], LabelsOperator.ANY_OF,
'label'),
('e', [], LabelsOperator.IS_NOT_NULL,
'label')],
{res_2_id}, self.resource_model)
self.assert_filters_applied([('a', ['b'], LabelsOperator.ANY_OF,
'label'),
('e', [], LabelsOperator.IS_NULL,
'label')],
{res_1_id}, self.resource_model)
self.assert_filters_applied([('a', [], LabelsOperator.IS_NULL,
'label')], {res_3_id, res_4_id},
self.resource_model)
self.assert_filters_applied([('c', ['z'], LabelsOperator.IS_NOT,
'label')],
{res_1_id, res_3_id, res_4_id},
self.resource_model)
def test_filter_rule_not_dictionary_fails(self):
with self.assertRaisesRegex(BadFilterRule, 'not a dictionary'):
# calling with the wrong type on purpose
create_filter_rules_list(
['a'], self.resource_model) # type: ignore
def test_filter_rule_missing_entry_fails(self):
with self.assertRaisesRegex(BadFilterRule, 'missing'):
create_filter_rules_list([{'key': 'key1'}], self.resource_model)
def test_filter_rule_key_not_text_type_fails(self):
with self.assertRaisesRegex(BadFilterRule, 'must be a string'):
err_filter_rule = {'key': 1, 'values': ['b'],
'operator': LabelsOperator.ANY_OF,
'type': 'label'}
create_filter_rules_list([err_filter_rule], self.resource_model)
def test_filter_rule_value_not_list_fails(self):
with self.assertRaisesRegex(BadFilterRule, 'must be a list'):
err_filter_rule = {'key': 'a', 'values': 'b',
'operator': LabelsOperator.ANY_OF,
'type': 'label'}
create_filter_rules_list([err_filter_rule], self.resource_model)
def test_parse_filter_rules_fails(self):
err_filter_rules_params = [
(('a', ['b'], 'bad_operator', 'label'),
'operator for filtering by labels must be one of'),
(('a', ['b'], LabelsOperator.IS_NULL, 'label'),
'list must be empty if the operator'),
(('a', ['b'], LabelsOperator.IS_NOT_NULL, 'label'),
'list must be empty if the operator'),
(('a', [], LabelsOperator.ANY_OF, 'label'),
'list must include at least one item if the operator'),
(('blueprint_id', ['b'], 'bad_operator', 'attribute'),
'The operator for filtering by attributes must be'),
(('bad_attribute', ['dep1'], LabelsOperator.ANY_OF, 'attribute'),
'Allowed attributes to filter deployments|blueprints by are'),
(('a', ['b'], LabelsOperator.ANY_OF, 'bad_type'),
'Filter rule type must be one of'),
(('bad_attribute', ['dep1'], LabelsOperator.ANY_OF, 'bad_type'),
'Filter rule type must be one of')
]
for params, err_msg in err_filter_rules_params:
with self.assertRaisesRegex(BadFilterRule, err_msg):
create_filter_rules_list([FilterRule(*params)],
self.resource_model)
def test_key_and_value_validation_fails(self):
err_filter_rules_params = [
(('a b', ['b'], LabelsOperator.ANY_OF, 'label'), 'The key'),
(('a', ['b', '"'], LabelsOperator.ANY_OF, 'label'), 'The value'),
(('a', ['b', '\t'], LabelsOperator.ANY_OF, 'label'), 'The value'),
(('a', ['b', '\n'], LabelsOperator.ANY_OF, 'label'), 'The value')
]
for params, err_msg in err_filter_rules_params:
with self.assertRaisesRegex(BadFilterRule, err_msg):
create_filter_rules_list([FilterRule(*params)],
self.resource_model)
class BlueprintsFiltersFunctionalityCase(FiltersFunctionalityBaseCase):
__test__ = True
def setUp(self):
super().setUp(models.Blueprint)
def test_filters_applied(self):
bp_1 = self.put_blueprint_with_labels(self.LABELS, blueprint_id='bp_1')
bp_2 = self.put_blueprint_with_labels(self.LABELS_2,
blueprint_id='bp_2')
bp_3 = self.put_blueprint_with_labels(self.LABELS_3,
blueprint_id='bp_3')
bp_4 = self.put_blueprint(blueprint_id='bp_4')
self._test_labels_filters_applied(bp_1['id'], bp_2['id'], bp_3['id'],
bp_4['id'])
def test_filter_by_state_uploaded(self):
bp_1 = self.put_blueprint(blueprint_id='bp_1')
bp_2 = self.put_blueprint(blueprint_id='bp_2')
bp_3 = self.put_blueprint(blueprint_id='bp_3')
self.assert_filters_applied(
[('state', ['uploaded'], AttrsOperator.ANY_OF,
'attribute'),
('state', ['invalid', 'failed', ], AttrsOperator.NOT_ANY_OF,
'attribute')],
{bp_1.id, bp_2.id, bp_3.id},
models.Blueprint
)
def test_filter_by_state_invalid(self):
bp = self.put_blueprint(blueprint_id='invalid_blueprint')
self.client.blueprints.update(bp.id, {'state': 'invalid'})
self.assert_filters_applied(
[('state', ['invalid'], AttrsOperator.ANY_OF, 'attribute'),
('state', ['uploaded'], AttrsOperator.NOT_ANY_OF, 'attribute'),
('state', ['invalid'], AttrsOperator.CONTAINS, 'attribute'),
('state', ['uploaded'], AttrsOperator.NOT_CONTAINS, 'attribute')],
{bp.id},
models.Blueprint
)
class DeploymentFiltersFunctionalityCase(FiltersFunctionalityBaseCase):
__test__ = True
def setUp(self):
super().setUp(models.Deployment)
def test_filters_applied(self):
site1 = models.Site(
id='site_1',
name='site_1',
creator=self.user,
tenant=self.tenant,
)
other_site = models.Site(
id='other_site',
name='other_site',
creator=self.user,
tenant=self.tenant,
)
bp1 = models.Blueprint(
id='res_1', creator=self.user, tenant=self.tenant)
dep1 = models.Deployment(
id='res_1',
blueprint=bp1,
labels=[
models.DeploymentLabel(key='a', value='b', creator=self.user),
models.DeploymentLabel(key='a', value='z', creator=self.user),
models.DeploymentLabel(key='c', value='d', creator=self.user),
],
site=site1,
creator=self.user,
tenant=self.tenant,
)
bp2 = models.Blueprint(
id='res_2', creator=self.user, tenant=self.tenant)
dep2 = models.Deployment(
id='res_2',
blueprint=bp2,
labels=[
models.DeploymentLabel(key='a', value='b', creator=self.user),
models.DeploymentLabel(key='c', value='z', creator=self.user),
models.DeploymentLabel(key='e', value='f', creator=self.user),
],
site=other_site,
creator=self.user,
tenant=self.tenant,
)
bp3 = models.Blueprint(
id='res_3', creator=self.user, tenant=self.tenant)
dep3 = models.Deployment(
id='res_3',
blueprint=bp3,
labels=[
models.DeploymentLabel(key='g', value='f', creator=self.user),
],
site=other_site,
creator=self.user,
tenant=self.tenant,
)
bp4 = models.Blueprint(
id='res_4', creator=self.user, tenant=self.tenant)
dep4 = models.Deployment(
id='res_4',
blueprint=bp4,
creator=self.user,
tenant=self.tenant,
)
self._test_labels_filters_applied(dep1.id, dep2.id, dep3.id, dep4.id)
self.assert_filters_applied(
[('a', ['b'], LabelsOperator.ANY_OF, 'label'),
('c', ['y', 'z'], LabelsOperator.NOT_ANY_OF, 'label')], {dep1.id})
self.assert_filters_applied(
[('blueprint_id', ['res_1', 'res_2'], AttrsOperator.ANY_OF,
'attribute'),
('created_by', ['not_user'], AttrsOperator.NOT_ANY_OF,
'attribute'),
('site_name', ['site'], AttrsOperator.CONTAINS, 'attribute'),
('a', ['b'], LabelsOperator.ANY_OF, 'label')],
{dep1.id, dep2.id},
)
self.assert_filters_applied(
[('a', ['b'], LabelsOperator.ANY_OF, 'label'),
('blueprint_id', ['res_1', 'res_2'], AttrsOperator.NOT_ANY_OF,
'attribute')],
set()
)
self.assert_filters_applied(
[('a', ['b'], LabelsOperator.ANY_OF, 'label'),
('blueprint_id', ['res'], AttrsOperator.CONTAINS, 'attribute')],
{dep1.id, dep2.id}
)
self.assert_filters_applied(
[('a', ['b'], LabelsOperator.ANY_OF, 'label'),
('blueprint_id', ['res_1'], AttrsOperator.CONTAINS, 'attribute')],
{dep1.id}
)
self.assert_filters_applied(
[('site_name', ['site_1'], AttrsOperator.NOT_CONTAINS,
'attribute')], {dep2.id, dep3.id}
)
self.assert_filters_applied(
[('site_name', ['site_1', 'site_3'], AttrsOperator.NOT_CONTAINS,
'attribute')], {dep2.id, dep3.id}
)
self.assert_filters_applied(
[('site_name', ['site'], AttrsOperator.STARTS_WITH, 'attribute')],
{dep1.id}
)
self.assert_filters_applied(
[('site_name', ['other', 'blah'], AttrsOperator.STARTS_WITH,
'attribute')], {dep2.id, dep3.id}
)
self.assert_filters_applied(
[('site_name', ['site'], AttrsOperator.ENDS_WITH, 'attribute')],
{dep2.id, dep3.id}
)
self.assert_filters_applied(
[('site_name', ['1', 'blah'], AttrsOperator.ENDS_WITH,
'attribute')], {dep1.id}
)
class FiltersBaseCase(base_test.BaseServerTestCase):
__test__ = False
LABELS_RULE = FilterRule('a', ['b'], LabelsOperator.ANY_OF, 'label')
ATTRS_RULE = FilterRule('created_by', ['admin'], AttrsOperator.NOT_ANY_OF,
'attribute')
FILTER_RULES = [LABELS_RULE, ATTRS_RULE]
NEW_LABELS_RULE = FilterRule('c', ['d'], LabelsOperator.ANY_OF, 'label')
NEW_ATTRS_RULE = FilterRule('created_by', ['user'], AttrsOperator.ANY_OF,
'attribute')
NEW_RULES = [NEW_LABELS_RULE, NEW_ATTRS_RULE]
def setUp(self, filters_resource, filters_model, legal_filter_rules):
super().setUp()
self.filters_client = getattr(self.client, filters_resource)
self.filters_model = filters_model
self.legal_filter_rules = legal_filter_rules
def test_create_legal_filter(self):
new_filter = self.create_filter(self.filters_client,
FILTER_ID,
self.legal_filter_rules)
self.assertEqual(new_filter.value, self.legal_filter_rules)
def test_list_filters(self):
for i in range(3):
self.create_filter(self.filters_client,
'{0}{1}'.format(FILTER_ID, i),
[FilterRule(f'a{i}', [f'b{i}'],
LabelsOperator.ANY_OF, 'label')])
filters_list = self.filters_client.list()
self.assertEqual(len(filters_list.items), 3)
for i in range(3):
self.assertEqual(filters_list.items[i].labels_filter_rules,
[FilterRule(f'a{i}', [f'b{i}'],
LabelsOperator.ANY_OF, 'label')])
def test_list_filters_sort(self):
filter_ids = ['a_filter', 'c_filter', 'b_filter']
for filter_id in filter_ids:
self.create_filter(self.filters_client,
filter_id,
self.FILTER_RULES)
sorted_asc_filters_list = self.filters_client.list(sort='id')
self.assertEqual(
[filter_elem.id for filter_elem in sorted_asc_filters_list],
sorted(filter_ids)
)
sorted_dsc_filters_list = self.filters_client.list(sort='id',
is_descending=True)
self.assertEqual(
[filter_elem.id for filter_elem in sorted_dsc_filters_list],
sorted(filter_ids, reverse=True)
)
def test_uppercase_filter_rules(self):
filter_rules = [
FilterRule('created_by', ['Joe'], AttrsOperator.ANY_OF,
'attribute'),
FilterRule('A', ['B'], LabelsOperator.ANY_OF, 'label')
]
new_filter = self.create_filter(self.filters_client,
FILTER_ID,
filter_rules)
expected_filter_rules = [
{'key': 'created_by', 'values': ['Joe'],
'operator': AttrsOperator.ANY_OF, 'type': 'attribute'},
{'key': 'a', 'values': ['B'],
'operator': AttrsOperator.ANY_OF, 'type': 'label'}
]
self.assertEqual(new_filter.value, expected_filter_rules)
def test_filter_create_with_invalid_filter_rule_fails(self):
err_filter_rule = [{'key': 'a', 'values': 'b', 'operator': 'any_of',
'type': 'label'}]
with self.assertRaisesRegex(CloudifyClientError, 'must be a list'):
self.create_filter(self.filters_client, FILTER_ID, err_filter_rule)
def test_filter_create_with_reserved_filter_id_fails(self):
with self.assertRaisesRegex(CloudifyClientError, 'prefix'):
self.create_filter(self.filters_client, 'csys-filter',
self.FILTER_RULES)
def test_create_filter_with_duplicate_filter_rules(self):
new_filter = self.create_filter(self.filters_client,
FILTER_ID,
[self.LABELS_RULE, self.ATTRS_RULE,
self.LABELS_RULE, self.ATTRS_RULE])
self.assertEqual(new_filter.value, self.FILTER_RULES)
def test_get_filter(self):
self.create_filter(self.filters_client, FILTER_ID, self.FILTER_RULES)
fetched_filter = self.filters_client.get(FILTER_ID)
self.assertEqual(fetched_filter.value, self.FILTER_RULES)
def test_delete_filter(self):
self.create_filter(self.filters_client, FILTER_ID, self.FILTER_RULES)
self.assertEqual(len(self.filters_client.list().items), 1)
self.filters_client.delete(FILTER_ID)
self.assertEqual(len(self.filters_client.list().items), 0)
def test_delete_system_filter_fails(self):
self._put_system_filter()
with self.assertRaisesRegex(CloudifyClientError, 'system filter'):
self.filters_client.delete('csys-test-filter')
def test_update_filter(self):
self._update_filter(new_filter_rules=self.NEW_RULES,
new_visibility=VisibilityState.GLOBAL)
def test_update_filter_only_visibility(self):
self._update_filter(new_visibility=VisibilityState.GLOBAL)
def test_update_filter_only_filter_rules(self):
self._update_filter(new_filter_rules=self.NEW_RULES)
def test_update_filter_no_args_fails(self):
with self.assertRaisesRegex(RuntimeError, 'to update a filter'):
self._update_filter()
def test_update_filter_narrower_visibility_fails(self):
with self.assertRaisesRegex(CloudifyClientError,
'has wider visibility'):
self._update_filter(new_visibility=VisibilityState.PRIVATE)
def test_update_filter_updates_only_labels_rules(self):
self._update_filter(new_filter_rules=[self.NEW_LABELS_RULE])
def test_update_filter_updates_only_attrs_rules(self):
self._update_filter(new_filter_rules=[self.NEW_ATTRS_RULE])
def test_update_system_filter_fails(self):
self._put_system_filter()
with self.assertRaisesRegex(CloudifyClientError, 'system filter'):
self.filters_client.update('csys-test-filter', self.NEW_RULES)
def _update_filter(self, new_filter_rules=None, new_visibility=None):
orig_filter = self.create_filter(self.filters_client,
FILTER_ID,
self.FILTER_RULES)
updated_filter = self.filters_client.update(FILTER_ID,
new_filter_rules,
new_visibility)
updated_visibility = new_visibility or VisibilityState.TENANT
if new_filter_rules:
new_attrs_filter_rules = self._get_filter_rules_by_type(
new_filter_rules, 'attribute')
new_labels_filter_rules = self._get_filter_rules_by_type(
new_filter_rules, 'label')
if new_attrs_filter_rules:
if new_labels_filter_rules:
updated_rules = new_filter_rules
else:
updated_rules = [self.LABELS_RULE] + new_attrs_filter_rules
elif new_labels_filter_rules:
updated_rules = [self.ATTRS_RULE] + new_labels_filter_rules
else:
raise Exception('Unknown filter rule type')
else:
updated_rules = self.FILTER_RULES
self.assertEqual(updated_filter.value, updated_rules)
self.assertEqual(updated_filter.visibility, updated_visibility)
self.assertGreater(updated_filter.updated_at, orig_filter.updated_at)
def _put_system_filter(self):
# We need to use the storage manager because system filters
# cannot (and should not) be created using the rest-service.
now = get_formatted_timestamp()
new_filter = self.filters_model(
id='csys-test-filter',
value=self.FILTER_RULES,
created_at=now,
updated_at=now,
visibility=VisibilityState.TENANT,
is_system_filter=True
)
self.sm.put(new_filter)
class BlueprintsFiltersCase(FiltersBaseCase):
__test__ = True
def setUp(self):
super().setUp('blueprints_filters', models.BlueprintsFilter,
LEGAL_FILTER_RULES + BLUEPRINT_SPECIFIC_FILTER_RULES)
class DeploymentsFiltersCase(FiltersBaseCase):
__test__ = True
def setUp(self):
super().setUp('deployments_filters', models.DeploymentsFilter,
LEGAL_FILTER_RULES)
|
e55b54ac0746dac749ac627bd1d8782dfed40a34
|
7c593f4cc70ee56106cc9cce105e6b9e7839431e
|
/objax/optimizer/adam.py
|
9e091b8b06747ac644343d387402e6a8570713d9
|
[
"Apache-2.0"
] |
permissive
|
google/objax
|
84e397cafb70813a1e89467f745facf828ed24b8
|
a2d025d9e1da8660a1883404207c41d4327d8c48
|
refs/heads/master
| 2023-09-02T07:04:26.801269
| 2023-06-12T22:12:53
| 2023-06-12T22:12:53
| 288,923,752
| 801
| 80
|
Apache-2.0
| 2023-06-12T22:12:54
| 2020-08-20T06:20:40
|
Python
|
UTF-8
|
Python
| false
| false
| 2,898
|
py
|
adam.py
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = ['Adam']
from typing import List, Optional
from jax import numpy as jn
from objax import functional
from objax.module import Module, ModuleList
from objax.typing import JaxArray
from objax.util import class_name
from objax.variable import TrainRef, StateVar, TrainVar, VarCollection
class Adam(Module):
"""Adam optimizer."""
def __init__(self, vc: VarCollection, beta1: float = 0.9, beta2: float = 0.999, eps: float = 1e-8):
"""Constructor for Adam optimizer class.
Args:
vc: collection of variables to optimize.
beta1: value of Adam's beta1 hyperparameter. Defaults to 0.9.
beta2: value of Adam's beta2 hyperparameter. Defaults to 0.999.
eps: value of Adam's epsilon hyperparameter. Defaults to 1e-8.
"""
self.beta1 = beta1
self.beta2 = beta2
self.eps = eps
self.step = StateVar(jn.array(0, jn.uint32), reduce=lambda x: x[0])
self.train_vars = ModuleList(TrainRef(x) for x in vc.subset(TrainVar))
self.m = ModuleList(StateVar(jn.zeros_like(x.value)) for x in self.train_vars)
self.v = ModuleList(StateVar(jn.zeros_like(x.value)) for x in self.train_vars)
def __call__(self, lr: float, grads: List[JaxArray], beta1: Optional[float] = None, beta2: Optional[float] = None):
"""Updates variables and other state based on Adam algorithm.
Args:
lr: the learning rate.
grads: the gradients to apply.
beta1: optional, override the default beta1.
beta2: optional, override the default beta2.
"""
assert len(grads) == len(self.train_vars), 'Expecting as many gradients as trainable variables'
if beta1 is None:
beta1 = self.beta1
if beta2 is None:
beta2 = self.beta2
self.step.value += 1
lr *= jn.sqrt(1 - beta2 ** self.step.value) / (1 - beta1 ** self.step.value)
for g, p, m, v in zip(grads, self.train_vars, self.m, self.v):
m.value += (1 - beta1) * (g - m.value)
v.value += (1 - beta2) * (g ** 2 - v.value)
p.value -= lr * m.value * functional.rsqrt(v.value + self.eps)
def __repr__(self):
return f'{class_name(self)}(beta1={self.beta1}, beta2={self.beta2}, eps={self.eps})'
|
877d95c9b31ff6b73350a64956adf781aaccb23f
|
f07e66293cc41a9fe71fc44f765b432fd7a0997c
|
/selfdrive/controls/plannerd.py
|
f592c213c5acb18cc47fb3b99ec8f7b7e5f833ba
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
kegman/openpilot
|
c9ba96a72d905956f02c684e065091e023942883
|
b35291c91783657a5fc83abfff012d3bb49dd89f
|
refs/heads/kegman-ultimate
| 2022-05-22T17:07:16.656336
| 2021-10-25T13:35:28
| 2021-10-25T13:35:28
| 229,979,925
| 105
| 212
|
MIT
| 2022-03-13T05:47:51
| 2019-12-24T17:27:11
|
C
|
UTF-8
|
Python
| false
| false
| 1,540
|
py
|
plannerd.py
|
#!/usr/bin/env python3
from cereal import car
from common.params import Params
from common.realtime import Priority, config_realtime_process
from selfdrive.swaglog import cloudlog
from selfdrive.controls.lib.longitudinal_planner import Planner
from selfdrive.controls.lib.vehicle_model import VehicleModel
from selfdrive.controls.lib.lateral_planner import LateralPlanner
import cereal.messaging as messaging
def plannerd_thread(sm=None, pm=None):
config_realtime_process(2, Priority.CTRL_LOW)
cloudlog.info("plannerd is waiting for CarParams")
CP = car.CarParams.from_bytes(Params().get("CarParams", block=True))
cloudlog.info("plannerd got CarParams: %s", CP.carName)
PL = Planner(CP)
PP = LateralPlanner(CP)
VM = VehicleModel(CP)
if sm is None:
sm = messaging.SubMaster(['carControl', 'carState', 'controlsState', 'radarState', 'modelV2', 'liveParameters'],
poll=['radarState', 'modelV2'])
if pm is None:
pm = messaging.PubMaster(['longitudinalPlan', 'liveLongitudinalMpc', 'lateralPlan', 'liveMpc'])
sm['liveParameters'].valid = True
sm['liveParameters'].sensorValid = True
sm['liveParameters'].steerRatio = CP.steerRatio
sm['liveParameters'].stiffnessFactor = 1.0
while True:
sm.update()
if sm.updated['modelV2']:
PP.update(sm, CP, VM)
PP.publish(sm, pm)
if sm.updated['radarState']:
PL.update(sm, CP, VM, PP)
PL.publish(sm, pm)
def main(sm=None, pm=None):
plannerd_thread(sm, pm)
if __name__ == "__main__":
main()
|
6834203db3fae6ca572fc8737e77b6e08d22a039
|
3526b979d4860971f517edc12e307d714f48c354
|
/razorpay/resources/__init__.py
|
68917d25d2a08b8a20013718fb62203d1ce61a35
|
[
"MIT"
] |
permissive
|
razorpay/razorpay-python
|
9adda56cc9dd25b6ff5f7f1a5879844f35666811
|
c07bf8baf100541786998b87ada035eaec308aaf
|
refs/heads/master
| 2023-08-17T10:15:11.744848
| 2023-08-16T08:21:45
| 2023-08-16T08:21:45
| 46,411,248
| 153
| 93
|
MIT
| 2023-08-16T08:21:47
| 2015-11-18T10:20:33
|
Python
|
UTF-8
|
Python
| false
| false
| 1,081
|
py
|
__init__.py
|
from .payment import Payment
from .refund import Refund
from .order import Order
from .invoice import Invoice
from .payment_link import PaymentLink
from .customer import Customer
from .card import Card
from .token import Token
from .transfer import Transfer
from .virtual_account import VirtualAccount
from .addon import Addon
from .plan import Plan
from .subscription import Subscription
from .qrcode import Qrcode
from .registration_link import RegistrationLink
from .settlement import Settlement
from .item import Item
from .fund_account import FundAccount
from .account import Account
from .stakeholder import Stakeholder
from .product import Product
from .iin import Iin
from .webhook import Webhook
__all__ = [
'Payment',
'Refund',
'Order',
'Invoice',
'PaymentLink',
'Customer',
'Card',
'Token',
'Transfer',
'VirtualAccount',
'Addon',
'Plan',
'Subscription',
'RegistrationLink',
'Settlement',
'Item',
'QrCode',
'FundAccount',
'Account',
'Stakeholder',
'Product',
'Iin',
'Webhook'
]
|
82969ba06b4992e40ebd73af8730df7dc2676c8a
|
470f8a4fe5f7d38b9ed482d2c5260dbfd5d448a3
|
/app-tasks/rf/src/rf/models/scene.py
|
62a251d89ccca1b5634dc356d70c7cec7010dbc0
|
[
"Apache-2.0"
] |
permissive
|
raster-foundry/raster-foundry
|
4f817a5cbfaa8b0561ea119ac25dadfa96f3dce5
|
eee86bf45b9ebabbbee7bb36a73dd60bd8a807bf
|
refs/heads/develop
| 2022-11-21T13:24:28.723720
| 2022-11-07T16:36:37
| 2022-11-07T16:36:37
| 36,464,140
| 117
| 35
|
Apache-2.0
| 2022-11-04T21:58:44
| 2015-05-28T20:25:45
|
Scala
|
UTF-8
|
Python
| false
| false
| 7,727
|
py
|
scene.py
|
"""Python class representation of a Raster Foundry Scene"""
import uuid
from requests.exceptions import HTTPError
import logging
from rf.models.base import BaseModel
from rf.models.thumbnail import Thumbnail
from rf.models.image import Image
from rf.models.footprint import Footprint
logger = logging.getLogger(__name__)
class Scene(BaseModel):
URL_PATH = "/api/scenes/"
def __init__(
self,
visibility,
tags,
datasource,
sceneMetadata,
name,
thumbnailStatus,
boundaryStatus,
ingestStatus,
metadataFiles,
sunAzimuth=None,
sunElevation=None,
cloudCover=None,
acquisitionDate=None,
id=None,
thumbnails=None,
tileFootprint=None,
dataFootprint=None,
images=None,
createdAt=None,
modifiedAt=None,
createdBy=None,
ingestLocation=None,
owner=None,
sceneType="AVRO",
metadataFields=None,
):
"""Create a new Scene
Args:
visibility (str): level of access to search for/view scene
tags (List[str]): list of tags for scene
datasource (str): datasource that scene belongs to
sceneMetadata (dict): extra metadata associated with scene
name (str): name of scene (displayed to users)
thumbnailStatus (str): status of thumbnail creation
boundaryStatus (str): status of creating boundaries
ingestStatus (str): overall status of scene creation
sunAzimuth (float): azimuth of sun when scene was created from satellite/uav
sunElevation (float): elevation of sun when scene was created
cloudCover (float): percent of scene covered by clouds
acquisitionDate (datetime): date when scene was acquired
id (str): UUID primary key for scene
thumbnails (List[Thumbnail]): list of thumbnails associated with scene
tileFootprint (Footprint): footprint of the tile associated with scene
dataFootprint (Footprint): footprint of this scene's data
images (List[Image]): list of images associated with scene
owner (str): user that owns a scene
"""
self.ingestLocation = ingestLocation
self.createdBy = createdBy
self.visibility = visibility
self.tags = tags
self.datasource = datasource
self.sceneMetadata = sceneMetadata
self.name = name
self.thumbnailStatus = thumbnailStatus
self.boundaryStatus = boundaryStatus
self.ingestStatus = ingestStatus
self.metadataFiles = metadataFiles
self.owner = owner
self.sceneType = sceneType
# Optional - can be None
self.sunAzimuth = sunAzimuth
self.sunElevation = sunElevation
self.cloudCover = cloudCover
self.acquisitionDate = acquisitionDate
self.id = id or str(uuid.uuid4())
self.thumbnails = thumbnails
self.tileFootprint = tileFootprint
self.dataFootprint = dataFootprint
self.images = images
self.createdAt = createdAt
self.modifiedAt = modifiedAt
self.metadataFields = metadataFields
def __repr__(self):
return "<Scene: {}>".format(self.name)
@classmethod
def from_dict(cls, d):
statuses = d.get("statusFields")
filter_fields = d.get("filterFields")
images = [Image.from_dict(image) for image in d.get("images")]
thumbnails = [
Thumbnail.from_dict(thumbnail) for thumbnail in d.get("thumbnails")
]
tile_footprint_dict = d.get("tileFootprint")
data_footprint_dict = d.get("dataFootprint")
if tile_footprint_dict:
tile_footprint = Footprint.from_dict(tile_footprint_dict)
else:
tile_footprint = None
if data_footprint_dict:
data_footprint = Footprint.from_dict(data_footprint_dict)
else:
data_footprint = None
return cls(
d.get("visibility"),
d.get("tags"),
d.get("datasource")["id"],
d.get("sceneMetadata"),
d.get("name"),
statuses.get("thumbnailStatus"),
statuses.get("boundaryStatus"),
statuses.get("ingestStatus"),
d.get("metadataFiles"),
filter_fields.get("sunAzimuth"),
filter_fields.get("sunElevation"),
filter_fields.get("cloudCover"),
filter_fields.get("acquisitionDate"),
d.get("id"),
thumbnails,
tile_footprint,
data_footprint,
images,
d.get("createdAt"),
d.get("modifiedAt"),
d.get("createdBy"),
d.get("ingestLocation", ""),
owner=d.get("owner"),
sceneType=d.get("sceneType"),
metadataFields=d.get("metadataFields"),
)
def to_dict(self):
filterFields = {}
statusFields = dict(
thumbnailStatus=self.thumbnailStatus,
boundaryStatus=self.boundaryStatus,
ingestStatus=self.ingestStatus,
)
scene_dict = dict(
visibility=self.visibility,
tags=self.tags,
datasource=self.datasource,
sceneMetadata=self.sceneMetadata,
filterFields=filterFields,
name=self.name,
statusFields=statusFields,
metadataFiles=self.metadataFiles,
ingestLocation=self.ingestLocation,
owner=self.owner,
sceneType=self.sceneType,
metadataFields=self.metadataFields,
)
if self.sunAzimuth:
filterFields["sunAzimuth"] = self.sunAzimuth
if self.sunElevation:
filterFields["sunElevation"] = self.sunElevation
if self.cloudCover is not None:
filterFields["cloudCover"] = self.cloudCover
if self.acquisitionDate:
filterFields["acquisitionDate"] = self.acquisitionDate
if self.id:
scene_dict["id"] = self.id
if self.thumbnails:
scene_dict["thumbnails"] = [
thumbnail.to_dict() for thumbnail in self.thumbnails
]
else:
scene_dict["thumbnails"] = []
if self.images:
scene_dict["images"] = [image.to_dict() for image in self.images]
else:
scene_dict["images"] = []
if self.tileFootprint:
scene_dict["tileFootprint"] = self.tileFootprint.to_dict()
if self.dataFootprint:
scene_dict["dataFootprint"] = self.dataFootprint.to_dict()
if self.createdAt:
scene_dict["createdAt"] = self.createdAt
if self.modifiedAt:
scene_dict["modifiedAt"] = self.modifiedAt
if self.createdBy:
scene_dict["createdBy"] = self.createdBy
return scene_dict
def create(self):
try:
return super(Scene, self).create()
except HTTPError as exc:
if exc.response.status_code != 409:
raise
else:
logger.info("Tried to create duplicate object: %s", self)
return None
def get_extent(self):
"""Helper method to return extent of scene"""
assert self.tileFootprint, "Must have tile footprint to extract extent"
coords = self.tileFootprint.multipolygon[0][0]
longitudes = [coord[0] for coord in coords]
latitudes = [coord[1] for coord in coords]
return [min(longitudes), min(latitudes), max(longitudes), max(latitudes)]
|
07da3363afd16827136a7d4dd5e98f169690272c
|
479a9c76b19b84d6cde69305828031cd2531aa56
|
/testing/MLDB-2100_fetcher_timeout_test.py
|
d10d429e5430b4f385ed7a1136b7933b68972a44
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
mldbai/mldb
|
d36801bd99dd3f82d7557cd0f438b0121f63f22c
|
19bc4bc92a41ee8ad4eab0979dffd9c985d95758
|
refs/heads/master
| 2023-09-03T22:59:11.621839
| 2022-12-30T18:42:24
| 2022-12-30T18:42:24
| 47,634,692
| 701
| 107
|
Apache-2.0
| 2023-02-10T23:08:05
| 2015-12-08T16:34:16
|
C++
|
UTF-8
|
Python
| false
| false
| 1,628
|
py
|
MLDB-2100_fetcher_timeout_test.py
|
#
# MLDB-2100_fetcher_timeout_test.py
# Francois-Michel L'Heureux, 2016-11-20
# This file is part of MLDB. Copyright 2016 mldb.ai inc. All rights reserved.
#
import socket
import threading
import time
class MyThread(threading.Thread):
def run(self):
try:
threading.Thread.run(self)
except Exception as xxx_todo_changeme:
self.err = xxx_todo_changeme
pass
else:
self.err = None
# timeout in case MLDB fails to connect to the socket, the test won't hang
socket.setdefaulttimeout(10)
from mldb import mldb
serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serversocket.bind(('127.0.0.1', 0))
serversocket.listen(1)
port_num = serversocket.getsockname()[1]
keep_going = threading.Event()
def sleeper():
while not keep_going.is_set():
time.sleep(1)
def client_thread(clientsocket):
return threading.Thread(target=sleeper)
def mldb_test():
mldb.log("MLDB querying")
res = mldb.query(
"SELECT fetcher('http://localhost:{}/toto')".format(port_num))
assert res[1][2].find("Timeout was reached") != -1
mldb_thread = MyThread(target=mldb_test)
mldb_thread.start()
# accept connections from outside
try:
(clientsocket, address) = serversocket.accept()
except socket.timeout:
mldb.log("MLDB did not contact the socket")
raise
# now do something with the clientsocket
# in this case, we'll pretend this is a threaded server
ct = client_thread(clientsocket)
ct.start()
mldb_thread.join()
keep_going.set()
ct.join()
if mldb_thread.err:
raise mldb_thread.err
request.set_return("success")
|
25ebab0a1895598ba7622713acc69b811d1858a9
|
082cb56436631f16585dc6c667a8b384cee3335f
|
/script/talk/source/t400262.py
|
a905952b7a98280675d557169a98cc455d0b8028
|
[] |
no_license
|
vawser/Cinders-DS3
|
abf2c5e1c163f2e556a0d89e437eead3ddd6992c
|
d086ebce45b27806f757e04778dad1615e405dab
|
refs/heads/master
| 2023-09-01T00:48:00.500866
| 2023-08-07T12:25:24
| 2023-08-07T12:25:24
| 230,333,994
| 192
| 203
| null | 2022-02-13T21:09:26
| 2019-12-26T22:08:06
|
Python
|
UTF-8
|
Python
| false
| false
| 10,847
|
py
|
t400262.py
|
#-------------------------------------------
#-- Irina of Carim
#-------------------------------------------
# -*- coding: utf-8 -*-
def t400262_1():
""" State 0,1 """
assert GetCurrentStateElapsedTime() > 1
""" State 2 """
while True:
call = t400262_x12()
assert IsClientPlayer() == 1
""" State 3 """
call = t400262_x13()
assert not IsClientPlayer()
def t400262_x0(z5=6260, flag3=1295, flag4=6000, flag5=6000, flag6=6000, flag7=6000):
""" State 0,1 """
while True:
assert (not GetOneLineHelpStatus() and not IsTalkingToSomeoneElse() and not IsClientPlayer()
and not IsPlayerDead() and not IsCharacterDisabled())
""" State 3 """
assert (GetEventStatus(flag3) == 1 or GetEventStatus(flag4) == 1 or GetEventStatus(flag5) ==
1 or GetEventStatus(flag6) == 1 or GetEventStatus(flag7) == 1)
""" State 2 """
if (not (not GetOneLineHelpStatus() and not IsTalkingToSomeoneElse() and not IsClientPlayer()
and not IsPlayerDead() and not IsCharacterDisabled())):
pass
elif (not GetEventStatus(flag3) and not GetEventStatus(flag4) and not GetEventStatus(flag5) and
not GetEventStatus(flag6) and not GetEventStatus(flag7)):
pass
elif CheckActionButtonArea(z5):
break
""" State 4 """
return 0
def t400262_x1():
""" State 0,1 """
if not CheckSpecificPersonTalkHasEnded(0):
""" State 7 """
ClearTalkProgressData()
StopEventAnimWithoutForcingConversationEnd(0)
""" State 6 """
ReportConversationEndToHavokBehavior()
else:
pass
""" State 2 """
if CheckSpecificPersonGenericDialogIsOpen(0) == 1:
""" State 3 """
ForceCloseGenericDialog()
else:
pass
""" State 4 """
if CheckSpecificPersonMenuIsOpen(-1, 0) == 1 and not CheckSpecificPersonGenericDialogIsOpen(0):
""" State 5 """
ForceCloseMenu()
else:
pass
""" State 8 """
return 0
def t400262_x2():
""" State 0,1 """
ClearTalkProgressData()
StopEventAnimWithoutForcingConversationEnd(0)
ForceCloseGenericDialog()
ForceCloseMenu()
ReportConversationEndToHavokBehavior()
""" State 2 """
return 0
def t400262_x3(text2=_, z4=_, flag2=0, mode2=1):
""" State 0,5 """
assert t400262_x2() and CheckSpecificPersonTalkHasEnded(0) == 1
""" State 2 """
SetEventState(z4, 1)
""" State 1 """
TalkToPlayer(text2, -1, -1, flag2)
assert CheckSpecificPersonTalkHasEnded(0) == 1
""" State 4 """
if not mode2:
pass
else:
""" State 3 """
ReportConversationEndToHavokBehavior()
""" State 6 """
return 0
def t400262_x4(text1=_, flag1=0, mode1=1):
""" State 0,4 """
assert t400262_x2() and CheckSpecificPersonTalkHasEnded(0) == 1
""" State 1 """
TalkToPlayer(text1, -1, -1, flag1)
assert CheckSpecificPersonTalkHasEnded(0) == 1
""" State 3 """
if not mode1:
pass
else:
""" State 2 """
ReportConversationEndToHavokBehavior()
""" State 5 """
return 0
def t400262_x5(action1=13016000):
""" State 0,1 """
OpenGenericDialog(7, action1, 1, 0, 1)
assert not CheckSpecificPersonGenericDialogIsOpen(0)
""" State 2 """
return 0
def t400262_x6(z1=2119, z2=2120, z3=2121):
""" State 0,10 """
c1110()
""" State 1 """
while True:
ClearTalkListData()
assert (not CheckSpecificPersonGenericDialogIsOpen(2) and not (CheckSpecificPersonMenuIsOpen(-1,
2) == 1 and not CheckSpecificPersonGenericDialogIsOpen(2)))
""" State 2 """
# action:15002000:Level Up
AddTalkListData(1, 15002000, -1)
# action:15000005:Leave
AddTalkListData(99, 15000005, -1)
""" State 3 """
ShowShopMessage(1)
assert not (CheckSpecificPersonMenuIsOpen(1, 0) == 1 and not CheckSpecificPersonGenericDialogIsOpen(0))
""" State 7 """
if GetTalkListEntryResult() == 1:
""" State 4 """
if GetEventStatus(2051) == 1 or IsMultiplayerInProgress() == 1:
""" State 16,20 """
assert t400262_x5(action1=13016000)
else:
""" State 15 """
assert not GetEventStatus(74000391) and not GetEventStatus(74000392)
""" State 19 """
SetEventState(74000389, 1)
call = t400262_x20()
def ExitPause():
SetEventState(74000389, 0)
SetEventState(74000390, 0)
if call.Get() == 1:
""" State 14 """
break
elif call.Done():
pass
elif GetTalkListEntryResult() == 2:
""" State 12,13 """
OpenRegularShop(160000, 169999)
assert not (CheckSpecificPersonMenuIsOpen(5, 0) == 1 and not CheckSpecificPersonGenericDialogIsOpen(0))
elif GetTalkListEntryResult() == 3:
""" State 5,9 """
CombineMenuFlagAndEventFlag(6001, 234)
CombineMenuFlagAndEventFlag(6001, 235)
""" State 8 """
OpenEnhanceShop(0)
assert not (CheckSpecificPersonMenuIsOpen(9, 0) == 1 and not CheckSpecificPersonGenericDialogIsOpen(0))
elif GetTalkListEntryResult() == 4:
""" State 11,18 """
assert t400262_x14()
elif not (CheckSpecificPersonMenuIsOpen(1, 0) == 1 and not CheckSpecificPersonGenericDialogIsOpen(0)):
""" State 6 """
break
""" State 21 """
return 0
def t400262_x7():
""" State 0,1,2,3,4 """
# talk:26003100:Ohh, sweet Champion of Ash.
assert t400262_x4(text1=26003100, flag1=0, mode1=1)
""" State 5 """
assert t400262_x6(z1=2119, z2=2120, z3=2121)
""" State 6 """
return 0
def t400262_x8():
""" State 0,6 """
assert t400262_x1()
""" State 3 """
assert GetCurrentStateElapsedFrames() > 1
""" State 1,2 """
if GetDistanceToPlayer() < 10:
""" State 5,8 """
call = t400262_x19()
if call.Done():
pass
elif GetDistanceToPlayer() > 12:
""" State 7 """
assert t400262_x1()
else:
""" State 4 """
pass
""" State 9 """
return 0
def t400262_x9():
""" State 0,1 """
if GetEventStatus(1298) == 1:
""" State 2 """
pass
else:
""" State 3 """
if GetDistanceToPlayer() < 10:
""" State 4,7 """
call = t400262_x18()
if call.Done():
pass
elif GetDistanceToPlayer() > 12:
""" State 6 """
assert t400262_x1()
else:
""" State 5 """
pass
""" State 8 """
return 0
def t400262_x10():
""" State 0,2,1,3 """
return 0
def t400262_x11():
""" State 0,1,3,4 """
assert t400262_x1()
""" State 6 """
return 0
def t400262_x12():
""" State 0,1 """
while True:
call = t400262_x15()
assert not GetEventStatus(1286)
""" State 2 """
call = t400262_x16()
assert GetEventStatus(1286) == 1
def t400262_x13():
""" State 0,1 """
assert t400262_x1()
""" State 2 """
return 0
def t400262_x14():
""" State 0,1 """
# talk:26003101:Let souls be your strength...
assert t400262_x4(text1=26003101, flag1=0, mode1=1)
""" State 2 """
return 0
def t400262_x15():
""" State 0,2 """
call = t400262_x21()
assert CheckSelfDeath() == 1
""" State 1 """
t400262_x9()
def t400262_x16():
""" State 0 """
def t400262_x17():
""" State 0,1,2 """
if GetEventStatus(9210) == 1:
""" State 3 """
# talk:26002800:Godspeed, Champion of Ash.
assert t400262_x4(text1=26002800, flag1=0, mode1=1)
else:
""" State 4 """
# talk:26002700:Thank you ever so much, sweet Champion.
assert t400262_x4(text1=26002700, flag1=0, mode1=1)
""" State 5 """
return 0
def t400262_x18():
""" State 0,1,2,3 """
# talk:26003400:
assert t400262_x4(text1=26003400, flag1=0, mode1=1)
""" State 4 """
return 0
def t400262_x19():
""" State 0,1 """
if not GetEventStatus(74000371):
""" State 4 """
# talk:26000600:
assert t400262_x3(text2=26000600, z4=74000371, flag2=0, mode2=1)
else:
""" State 3 """
SetEventState(74000371, 0)
SetEventState(74000372, 0)
SetEventState(74000373, 0)
""" State 5 """
# talk:26000700:
assert t400262_x3(text2=26000700, z4=74000372, flag2=0, mode2=1)
""" State 8 """
return 0
def t400262_x20():
""" State 0,3 """
def WhilePaused():
SetTalkTime(0.1)
if not GetEventStatus(74000389):
pass
elif DoesSelfHaveSpEffect(150) == 1:
""" State 4 """
SetEventState(74000390, 1)
""" State 2 """
if GetEventStatus(74000391) == 1 and GetEventStatus(74000392) == 1:
""" State 1 """
OpenSoul()
assert not (CheckSpecificPersonMenuIsOpen(10, 0) == 1 and not CheckSpecificPersonGenericDialogIsOpen(0))
""" State 5 """
return 0
elif not GetEventStatus(74000389):
pass
""" State 6 """
return 1
def t400262_x21():
""" State 0,6 """
while True:
call = t400262_x0(z5=6260, flag3=1295, flag4=6000, flag5=6000, flag6=6000, flag7=6000)
if call.Done():
""" State 4 """
call = t400262_x7()
if call.Done():
pass
elif IsAttackedBySomeone() == 1:
""" State 2 """
Label('L0')
call = t400262_x8()
def ExitPause():
RemoveMyAggro()
if call.Done():
pass
elif IsPlayerDead() == 1:
break
elif IsPlayerDead() == 1:
break
elif GetDistanceToPlayer() > 3 or not GetEventStatus(74000393):
""" State 5 """
call = t400262_x11()
if call.Done() and GetDistanceToPlayer() < 4.9:
pass
elif IsAttackedBySomeone() == 1:
Goto('L0')
elif IsAttackedBySomeone() == 1:
Goto('L0')
elif IsPlayerDead() == 1:
break
elif not GetEventStatus(74000393):
""" State 1 """
if GetEventStatus(74000393) == 1:
pass
elif IsAttackedBySomeone() == 1:
Goto('L0')
""" State 3 """
t400262_x10()
|
6b10764b851f6799e85e5e15a5f61236a74a078e
|
61b95ee2aefbcfbd6c4abf9511d976d0b9d0e100
|
/faker/providers/automotive/es_CO/__init__.py
|
27ee8f825f7dcb42186069562dba1efcf2521b8d
|
[
"MIT"
] |
permissive
|
joke2k/faker
|
fed7472580ced2bce326fe4ea0c3d1c810853d5e
|
33e36b1b6cc9c6f039fe387988853771bab60624
|
refs/heads/master
| 2023-09-04T00:43:33.599705
| 2023-08-31T16:15:04
| 2023-08-31T16:15:04
| 6,662,075
| 14,544
| 2,215
|
MIT
| 2023-09-11T16:06:14
| 2012-11-12T23:00:09
|
Python
|
UTF-8
|
Python
| false
| false
| 359
|
py
|
__init__.py
|
from collections import OrderedDict
from .. import Provider as AutomotiveProvider
class Provider(AutomotiveProvider):
license_formats = OrderedDict(
[
("???###", 0.6),
("???##?", 0.3),
("T####", 0.03),
("??####", 0.01),
("R#####", 0.03),
("S#####", 0.03),
]
)
|
7e4b64cdd22d529e477d6aac70047a877543d3c7
|
8d44e796eaf0c8e11bbc2a27ef093e97a25b6f4a
|
/haystack/nodes/file_converter/csv.py
|
ac69203f2aa4d1e22e6178ac79cc34bd3d1199cb
|
[
"Apache-2.0"
] |
permissive
|
deepset-ai/haystack
|
caa5287051d1771395ea624b58097000825bad81
|
5f1256ac7e5734c2ea481e72cb7e02c34baf8c43
|
refs/heads/main
| 2023-09-01T02:41:23.490526
| 2023-08-31T15:33:12
| 2023-08-31T15:33:12
| 221,654,678
| 10,599
| 1,558
|
Apache-2.0
| 2023-09-14T17:09:42
| 2019-11-14T09:05:28
|
Python
|
UTF-8
|
Python
| false
| false
| 2,978
|
py
|
csv.py
|
from typing import Union, List, Optional, Any, Dict
import logging
from pathlib import Path
import pandas as pd
from haystack import Document
from haystack.nodes.file_converter import BaseConverter
logger = logging.getLogger(__name__)
class CsvTextConverter(BaseConverter):
"""
Converts a CSV file containing FAQs to text Documents. The CSV file must have two columns: 'question' and 'answer'. Use this node for FAQ-style question answering.
"""
outgoing_edges = 1
def convert(
self,
file_path: Union[Path, List[Path], str, List[str], List[Union[Path, str]]],
meta: Optional[Dict[str, Any]],
remove_numeric_tables: Optional[bool] = None,
valid_languages: Optional[List[str]] = None,
encoding: Optional[str] = "UTF-8",
id_hash_keys: Optional[List[str]] = None,
) -> List[Document]:
"""
Load a CSV file containing question-answer pairs and convert it to Documents.
:param file_path: Path to the CSV file you want to convert. The file must have two columns called 'question' and 'answer'.
The first will be interpreted as a question, the second as content.
:param meta: A dictionary of metadata key-value pairs that you want to append to the returned document. It's optional.
:param encoding: Specifies the file encoding. It's optional. The default value is `UTF-8`.
:param id_hash_keys: Generates the document ID from a custom list of strings that refer to the document's
attributes. To ensure you don't have duplicate documents in your DocumentStore when texts are
not unique, modify the metadata and pass, for example, "meta" to this field (example: ["content", "meta"]).
Then the ID is generated by using the content and the metadata you defined.
:param remove_numeric_tables: unused
:param valid_languages: unused
:returns: List of document, 1 document per line in the CSV.
"""
if not isinstance(file_path, list):
file_path = [file_path]
docs: List[Document] = []
for path in file_path:
df = pd.read_csv(path, encoding=encoding)
if len(df.columns) != 2 or df.columns[0] != "question" or df.columns[1] != "answer":
raise ValueError("The CSV must contain two columns named 'question' and 'answer'")
df.fillna(value="", inplace=True)
df["question"] = df["question"].apply(lambda x: x.strip())
df = df.rename(columns={"question": "content"})
docs_dicts = df.to_dict(orient="records")
for dictionary in docs_dicts:
if meta:
dictionary["meta"] = meta
if id_hash_keys:
dictionary["id_hash_keys"] = id_hash_keys
docs.append(Document.from_dict(dictionary))
return docs
|
9eb33ece9833448becca0710af3dcd8473a7744c
|
3eb78230ac1700a95c0d4fcad3653251088a2b57
|
/unfurl/lib/blackboxprotobuf/lib/types/varint.py
|
45eaafbd8175fcf3c5f8e0e04ffe94696b52945b
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
obsidianforensics/unfurl
|
61d9f90eab0996af34b4617ae1d473449b5da28f
|
3363420baf689a665715cd748c4f5b9880822a04
|
refs/heads/main
| 2023-09-01T15:06:12.996876
| 2023-03-04T03:30:49
| 2023-03-04T03:30:49
| 219,613,650
| 532
| 67
|
Apache-2.0
| 2023-09-13T17:23:38
| 2019-11-04T23:06:57
|
Python
|
UTF-8
|
Python
| false
| false
| 2,893
|
py
|
varint.py
|
"""Classes for encoding and decoding varint types"""
import binascii
import struct
from google.protobuf.internal import wire_format, encoder, decoder
import six
from unfurl.lib.blackboxprotobuf.lib.exceptions import EncoderException, DecoderException
def _gen_append_bytearray(arr):
def _append_bytearray(x):
if isinstance(x, (str, int)):
arr.append(x)
elif isinstance(x, bytes):
arr.extend(x)
else:
raise EncoderException("Unknown type returned by protobuf library")
return _append_bytearray
def encode_uvarint(value):
"""Encode a long or int into a bytearray."""
output = bytearray()
if value < 0:
raise EncoderException(
"Error encoding %d as uvarint. Value must be positive" % value
)
try:
encoder._EncodeVarint(_gen_append_bytearray(output), value)
except (struct.error, ValueError) as exc:
six.raise_from(EncoderException("Error encoding %d as uvarint." % value), exc)
return output
def decode_uvarint(buf, pos):
"""Decode bytearray into a long."""
# Convert buffer to string
if six.PY2:
buf = str(buf)
try:
value, pos = decoder._DecodeVarint(buf, pos)
except (TypeError, IndexError, decoder._DecodeError) as exc:
six.raise_from(
DecoderException(
"Error decoding uvarint from %s..."
% binascii.hexlify(buf[pos : pos + 8])
),
exc,
)
return (value, pos)
def encode_varint(value):
"""Encode a long or int into a bytearray."""
output = bytearray()
if value > (2 ** 63) or value < -(2 ** 63):
raise EncoderException("Value %d above maximum varint size" % value)
try:
encoder._EncodeSignedVarint(_gen_append_bytearray(output), value)
except (struct.error, ValueError) as exc:
six.raise_from(
EncoderException("Error encoding %d as signed varint." % value), exc
)
return output
def decode_varint(buf, pos):
"""Decode bytearray into a long."""
# Convert buffer to string
if six.PY2:
buf = str(buf)
try:
value, pos = decoder._DecodeSignedVarint(buf, pos)
except (TypeError, IndexError, decoder._DecodeError) as exc:
six.raise_from(
DecoderException(
"Error decoding varint from %s..."
% binascii.hexlify(buf[pos : pos + 8])
),
exc,
)
return (value, pos)
def encode_svarint(value):
"""Zigzag encode the potentially signed value prior to encoding"""
# zigzag encode value
return encode_uvarint(wire_format.ZigZagEncode(value))
def decode_svarint(buf, pos):
"""Decode bytearray into a long."""
output, pos = decode_uvarint(buf, pos)
# zigzag encode value
return wire_format.ZigZagDecode(output), pos
|
8cee960656e4a3cabce628766d2c1295549ef533
|
50203b4a349dcb2ed1e72c9f5463d84db8a6e983
|
/skyline/mirage/mirage_labelled_metrics.py
|
fa60661baa71c496e5cf9ef7f6ad5177d2dfc8ae
|
[
"MIT"
] |
permissive
|
earthgecko/skyline
|
97e43df824d7c92d68086f529f0f3d051a7debb0
|
c2edc451e63d5eb57117ddcfbc6e79100e706460
|
refs/heads/master
| 2023-08-30T08:36:50.740285
| 2023-06-28T15:33:47
| 2023-06-28T15:33:47
| 20,475,900
| 482
| 74
|
NOASSERTION
| 2023-06-28T15:33:49
| 2014-06-04T08:33:15
|
Python
|
UTF-8
|
Python
| false
| false
| 161,139
|
py
|
mirage_labelled_metrics.py
|
"""
mirage_labelled_metrics.py
"""
import logging
try:
from Queue import Empty
except:
from queue import Empty
# from redis import StrictRedis
from time import time, sleep, strftime, gmtime
from threading import Thread
from collections import defaultdict
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# Use Redis sets in place of Manager().list() to reduce memory and number of
# processes
# from multiprocessing import Process, Manager, Queue
from multiprocessing import Process, Queue
import traceback
import re
# imports required for surfacing graphite JSON formatted timeseries for use in
# Mirage
import json
import sys
import os
from shutil import rmtree
from ast import literal_eval
from math import ceil
import datetime
# @added 20220722 - Task #4624: Change all dict copy to deepcopy
import copy
import settings
# @modified 20160922 - Branch #922: Ionosphere
# Added the send_anomalous_metric_to skyline_functions.py
from skyline_functions import (
write_data_to_file, fail_check, send_anomalous_metric_to,
# @modified 20220726 - Task #2732: Prometheus to Skyline
# Branch #4300: prometheus
# Moved send_graphite_metric
# mkdir_p, send_graphite_metric, filesafe_metricname,
mkdir_p, filesafe_metricname,
# @added 20191030 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# Added a single functions to deal with Redis connection and the
# charset='utf-8', decode_responses=True arguments required in py3
get_redis_conn, get_redis_conn_decoded,
)
from mirage_algorithms import run_selected_algorithm
from algorithm_exceptions import TooShort, Stale, Boring
# @added 20220504 - Feature #2580: illuminance
from functions.illuminance.add_illuminance_entries import add_illuminance_entries
# @added 20220715 - Task #2732: Prometheus to Skyline
# Branch #4300: prometheus
from functions.victoriametrics.get_victoriametrics_metric import get_victoriametrics_metric
from functions.prometheus.metric_name_labels_parser import metric_name_labels_parser
# @added 20220726 - Task #2732: Prometheus to Skyline
# Branch #4300: prometheus
from functions.graphite.send_graphite_metric import send_graphite_metric
from functions.metrics.get_base_name_from_labelled_metrics_name import get_base_name_from_labelled_metrics_name
# @added 20221105 - Feature #4724: custom_algorithms - anomalous_daily_peak
from custom_algorithms import run_custom_algorithm_on_timeseries
# @added 20230419 - Feature #4848: mirage - analyse.irregular.unstable.timeseries.at.30days
from functions.timeseries.normalized_variance import normalized_variance
# @added 20230522 - metric_type.longterm_expire
from functions.timeseries.strictly_increasing_monotonicity import strictly_increasing_monotonicity
skyline_app = 'mirage'
skyline_app_logger = '%sLog' % skyline_app
logger = logging.getLogger(skyline_app_logger)
skyline_app_logfile = '%s/%s.log' % (settings.LOG_PATH, skyline_app)
skyline_app_loglock = '%s.lock' % skyline_app_logfile
skyline_app_logwait = '%s.wait' % skyline_app_logfile
python_version = int(sys.version_info[0])
this_host = str(os.uname()[1])
try:
SERVER_METRIC_PATH = '.%s' % settings.SERVER_METRICS_NAME
if SERVER_METRIC_PATH == '.':
SERVER_METRIC_PATH = ''
except:
SERVER_METRIC_PATH = ''
try:
# @modified 20200606 - Bug #3572: Apply list to settings import
BATCH_PROCESSING_NAMESPACES = list(settings.BATCH_PROCESSING_NAMESPACES)
except:
BATCH_PROCESSING_NAMESPACES = []
# @added 20200425 - Feature #3508: ionosphere.untrainable_metrics
# Determine if any metrcs have negatives values some they can be
# added to the ionosphere.untrainable_metrics Redis set
try:
# @modified 20200606 - Bug #3572: Apply list to settings import
# from settings import KNOWN_NEGATIVE_METRICS
KNOWN_NEGATIVE_METRICS = list(settings.KNOWN_NEGATIVE_METRICS)
except:
KNOWN_NEGATIVE_METRICS = []
# @added 20200607 - Feature #3566: custom_algorithms
try:
CUSTOM_ALGORITHMS = settings.CUSTOM_ALGORITHMS
except:
CUSTOM_ALGORITHMS = None
try:
DEBUG_CUSTOM_ALGORITHMS = settings.DEBUG_CUSTOM_ALGORITHMS
except:
DEBUG_CUSTOM_ALGORITHMS = False
# @added 20200913 - Branch #3068: SNAB
# Task #3744: POC matrixprofile
# Info #1792: Shapelet extraction
try:
SNAB_ENABLED = settings.SNAB_ENABLED
except:
SNAB_ENABLED = False
try:
# @modified 20220722 - Task #4624: Change all dict copy to deepcopy
# SNAB_CHECKS = settings.SNAB_CHECKS.copy()
SNAB_CHECKS = copy.deepcopy(settings.SNAB_CHECKS)
except:
SNAB_CHECKS = {}
# @added 20200916 - Branch #3068: SNAB
# Task #3744: POC matrixprofile
mirage_snab_only_checks_redis_set = 'mirage.snab_only_checks'
# @added 20201026 - Task #3800: Handle feedback metrics in Mirage and waterfall alerts
# Handle feedback metrics in a similar style to Ionosphere
try:
SKYLINE_FEEDBACK_NAMESPACES = list(settings.SKYLINE_FEEDBACK_NAMESPACES)
except:
# Let us take a guess
try:
graphite_host = str(settings.GRAPHITE_HOST)
graphite_hostname = graphite_host.split('.', -1)[0]
SKYLINE_FEEDBACK_NAMESPACES = [settings.SERVER_METRICS_NAME, graphite_hostname]
except:
SKYLINE_FEEDBACK_NAMESPACES = [this_host]
# @added 20210701 - Feature #4152: DO_NOT_SKIP_SKYLINE_FEEDBACK_NAMESPACES
try:
DO_NOT_SKIP_SKYLINE_FEEDBACK_NAMESPACES = list(settings.DO_NOT_SKIP_SKYLINE_FEEDBACK_NAMESPACES)
except:
DO_NOT_SKIP_SKYLINE_FEEDBACK_NAMESPACES = []
# @added 20201208 - Feature #3866: MIRAGE_ENABLE_HIGH_RESOLUTION_ANALYSIS
# Task #3868: POC MIRAGE_ENABLE_HIGH_RESOLUTION_ANALYSIS
# @modified 20220414 - Feature #3866: MIRAGE_ENABLE_HIGH_RESOLUTION_ANALYSIS
# Task #3868: POC MIRAGE_ENABLE_HIGH_RESOLUTION_ANALYSIS
# Not introduced as a settings, making this the default behaviour
# try:
# MIRAGE_ENABLE_HIGH_RESOLUTION_ANALYSIS = settings.MIRAGE_ENABLE_HIGH_RESOLUTION_ANALYSIS
# except:
# MIRAGE_ENABLE_HIGH_RESOLUTION_ANALYSIS = False
MIRAGE_ENABLE_HIGH_RESOLUTION_ANALYSIS = True
# @added 20210323 - Feature #3642: Anomaly type classification
try:
LUMINOSITY_CLASSIFY_ANOMALIES = settings.LUMINOSITY_CLASSIFY_ANOMALIES
except:
LUMINOSITY_CLASSIFY_ANOMALIES = False
# @added 20221105 - Feature #4724: custom_algorithms - anomalous_daily_peak
try:
MIRAGE_CHECK_REPETITIVE_DAILY_PEAKS = settings.MIRAGE_CHECK_REPETITIVE_DAILY_PEAKS
except:
MIRAGE_CHECK_REPETITIVE_DAILY_PEAKS = 0
MIRAGE_CHECK_REPETITIVE_DAILY_PEAKS = 3
# @added 20230424 - Feature #4724: custom_algorithms - anomalous_daily_peak
# Added expiry to record metrics identified as normal by anomalous_daily_peaks
# to allow Mirage to set a key in a hash and to allow Analyzer labelled_metrics
# to skip analysis of those metrics for the expiry period. This is to reduce
# metrics that are experiencing a normal anomalous daily peak to not have to
# be analysed by Analyzer every run and pushed to Mirage to check as this
# results in Mirage getting lots of unnecessary checks which caused feedback
# for the period in question.
anomalous_daily_peak_expiry = 180
skyline_app_graphite_namespace = 'skyline.%s%s.labelled_metrics' % (skyline_app, SERVER_METRIC_PATH)
failed_checks_dir = '%s_failed' % settings.MIRAGE_CHECK_PATH
# @added 20191107 - Branch #3262: py3
alert_test_file = '%s/%s_alert_test.txt' % (settings.SKYLINE_TMP_DIR, skyline_app)
# In Skyline a metric is either a counter (derivative) or a gauge
skyline_metric_types = {'COUNTER': 1, 'GAUGE': 0}
skyline_metric_types_by_id = {}
for o_key in list(skyline_metric_types.keys()):
skyline_metric_types_by_id[skyline_metric_types[o_key]] = o_key
MIRAGE_LABELLED_CHECK_PATH = '%s_labelled_metrics' % settings.MIRAGE_CHECK_PATH
class MirageLabelledMetrics(Thread):
"""
The MirageLabelledMetrics thread
"""
def __init__(self, parent_pid):
"""
Initialize the Mirage
"""
super().__init__()
self.daemon = True
self.parent_pid = parent_pid
self.current_pid = os.getpid()
self.mirage_labelled_metrics_exceptions_q = Queue()
self.mirage_labelled_metrics_anomaly_breakdown_q = Queue()
self.redis_conn = get_redis_conn(skyline_app)
self.redis_conn_decoded = get_redis_conn_decoded(skyline_app)
def check_if_parent_is_alive(self):
"""
Self explanatory
"""
try:
os.kill(self.current_pid, 0)
os.kill(self.parent_pid, 0)
except:
# @added 20201203 - Bug #3856: Handle boring sparsely populated metrics in derivative_metrics
# Log warning
logger.warning('warning :: parent or current process dead')
sys.exit(0)
# @added 20170127 - Feature #1886: Ionosphere learn - child like parent with evolutionary maturity
# Bug #1460: panorama check file fails
# Panorama check file fails #24
# Get rid of the skyline_functions imp as imp is deprecated in py3 anyway
def mirage_load_metric_vars(self, metric_vars_file):
"""
Load the metric variables for a check from a metric check variables file
:param metric_vars_file: the path and filename to the metric variables files
:type metric_vars_file: str
:return: the metric_vars list or ``False``
:rtype: list
"""
if os.path.isfile(metric_vars_file):
logger.info(
'loading metric variables from metric_check_file - %s' % (
str(metric_vars_file)))
else:
logger.error(
'error :: loading metric variables from metric_check_file - file not found - %s' % (
str(metric_vars_file)))
return False
metric_vars = []
with open(metric_vars_file) as f:
for line in f:
no_new_line = line.replace('\n', '')
no_equal_line = no_new_line.replace(' = ', ',')
array = str(no_equal_line.split(',', 1))
add_line = literal_eval(array)
metric_vars.append(add_line)
# @added 20200429 - Feature #3486: analyzer_batch
# Feature #3480: batch_processing
# Allow the check file to already hold a valid python list on one line
# so that a check can be added by simply echoing to debug metric_vars
# line from to log for any failed checks into a new Mirage check file
# The original above pattern is still the default, this is for the check
# files to be added by the operator from the log or for debugging.
try_literal_eval = False
if metric_vars:
if isinstance(metric_vars, list):
pass
else:
try_literal_eval = True
logger.info('mirage_labelled_metrics :: metric_vars is not a list, set to try_literal_eval')
if len(metric_vars) < 2:
try_literal_eval = True
logger.info('mirage_labelled_metrics :: metric_vars is not a list of lists, set to try_literal_eval')
else:
try_literal_eval = True
logger.info('mirage_labelled_metrics :: metric_vars is not defined, set to try_literal_eval')
if try_literal_eval:
try:
with open(metric_vars_file) as f:
for line in f:
metric_vars = literal_eval(line)
if metric_vars:
break
except:
logger.error(traceback.format_exc())
logger.error('metric_vars not loaded with literal_eval')
metric_vars = []
string_keys = ['metric']
float_keys = ['value']
int_keys = ['hours_to_resolve', 'metric_timestamp']
# @added 20200916 - Branch #3068: SNAB
# Task #3744: POC matrixprofile
boolean_keys = ['snab_only_check']
# @added 20210304 - Feature #3642: Anomaly type classification
# Feature #3970: custom_algorithm - adtk_level_shift
# Added triggered_algorithms to mirage_check_file
list_keys = ['triggered_algorithms']
metric_vars_array = []
for var_array in metric_vars:
# @modified 20181023 - Feature #2618: alert_slack
# Wrapped in try except for debugging issue where the
# hours_to_resolve was interpolating to hours_to_resolve = "t"
try:
key = None
value = None
if var_array[0] in string_keys:
key = var_array[0]
_value_str = str(var_array[1]).replace("'", '')
value_str = str(_value_str).replace('"', '')
value = str(value_str)
if var_array[0] == 'metric':
metric = value
if var_array[0] in float_keys:
key = var_array[0]
_value_str = str(var_array[1]).replace("'", '')
value_str = str(_value_str).replace('"', '')
value = float(value_str)
if var_array[0] in int_keys:
key = var_array[0]
_value_str = str(var_array[1]).replace("'", '')
value_str = str(_value_str).replace('"', '')
value = int(float(value_str))
# @added 20200916 - Branch #3068: SNAB
# Task #3744: POC matrixprofile
# Handle new snab_only_check boolean
if var_array[0] in boolean_keys:
key = var_array[0]
logger.debug(
'debug :: boolean key - key: %s, value: %s' % (
str(var_array[0]), str(var_array[1])))
if str(var_array[1]) == '"True"':
value = True
else:
value = False
# @added 20210304 - Feature #3642: Anomaly type classification
# Feature #3970: custom_algorithm - adtk_level_shift
# Added triggered_algorithms to mirage_check_file
if var_array[0] in list_keys:
key = var_array[0]
logger.debug(
'debug :: list key - key: %s, value: %s' % (
str(var_array[0]), str(var_array[1])))
_value_str = str(var_array[1]).replace("'", '')
try:
value = literal_eval(var_array[1])
except Exception as e:
logger.error(
'error :: loading metric variables - failed to literal_eval list for %s, %s - %s' % (
str(key), str(var_array[1]), e))
value = []
if key:
metric_vars_array.append([key, value])
if len(metric_vars_array) == 0:
logger.error(
'error :: loading metric variables - none found from %s' % (
str(metric_vars_file)))
return False
except:
logger.error(traceback.format_exc())
logger.error('error :: mirage_labelled_metrics :: failed to load metric variables from check file - %s' % (metric_vars_file))
return False
logger.info('mirage_labelled_metrics :: debug :: metric_vars for %s' % str(metric))
logger.info('mirage_labelled_metrics :: debug :: %s' % str(metric_vars_array))
return metric_vars_array
def create_check_file_from_hash_key(self, check_item):
"""
Create a check file from an analyzer_labelled_metrics.mirage_check key
"""
check = None
hash_key = None
redis_hash = 'analyzer_labelled_metrics.mirage_check'
metric_prefix = None
if check_item.startswith('analyzer_labelled_metrics.mirage_check.'):
hash_key = check_item.replace('analyzer_labelled_metrics.mirage_check.', '', 1)
metric_prefix = 'labelled_metrics'
metric_data = {}
try:
metric_data_str = self.redis_conn_decoded.hget(redis_hash, hash_key)
if metric_data_str:
metric_data = literal_eval(metric_data_str)
except Exception as err:
logger.error('error :: mirage_labelled_metrics :: create_check_file_from_hash_key :: failed to get %s from %s from Redis hash - %s' % (
str(hash_key), redis_hash, err))
if metric_data:
try:
self.redis_conn_decoded.hdel(redis_hash, hash_key)
logger.info('mirage_labelled_metrics :: create_check_file_from_hash_key :: removed %s from %s from Redis hash' % (
str(hash_key), redis_hash))
except Exception as err:
logger.error('error :: mirage_labelled_metrics :: create_check_file_from_hash_key :: failed to hdel %s from %s from Redis hash - %s' % (
str(hash_key), redis_hash, err))
try:
base_name = metric_data['metric']
metric_id = metric_data['metric_id']
if metric_prefix:
metric = '%s.%s' % (metric_prefix, str(metric_id))
else:
metric = str(base_name)
metric_timestamp = metric_data['timestamp']
value = metric_data['value']
triggered_algorithms = metric_data['triggered_algorithms']
try:
use_hours_to_resolve = metric_data['hours_to_resolve']
except:
use_hours_to_resolve = 168
try:
snab_only_check = metric_data['snab_only_check']
except:
snab_only_check = False
if metric_prefix == 'labelled_metrics':
metric_data['source'] = 'victoriametrics'
metric_dict = {}
try:
metric_dict = metric_name_labels_parser(skyline_app, base_name)
except Exception as err:
logger.error('error :: mirage_labelled_metrics :: create_check_file_from_hash_key :: metric_name_labels_parser failed for %s - %s' % (
base_name, err))
for i_key in list(metric_dict.keys()):
metric_data['metric_dict'][i_key] = metric_dict[i_key]
anomaly_check_file = '%s/%s.%s.txt' % (MIRAGE_LABELLED_CHECK_PATH, str(metric_timestamp), metric)
try:
with open(anomaly_check_file, 'w') as fh:
fh.write('metric = "%s"\nvalue = "%s"\nhours_to_resolve = "%s"\nmetric_timestamp = "%s"\nsnab_only_check = "%s"\ntriggered_algorithms = %s\n' % (
metric, str(value), str(use_hours_to_resolve),
str(metric_timestamp), str(snab_only_check),
str(triggered_algorithms)))
mirage_anomaly_check_file_created = True
except Exception as err:
logger.error('error :: mirage_labelled_metrics :: failed to write anomaly_check_file %s - %s' % (
anomaly_check_file, err))
if mirage_anomaly_check_file_created:
os.chmod(anomaly_check_file, mode=0o644)
check = '%s.%s.txt' % (str(metric_timestamp), metric)
except Exception as err:
logger.error('error :: mirage_labelled_metrics :: failed to create anomaly check file from metric_data: %s - %s' % (
str(metric_data), err))
return check, metric_data
# @added 20221105 - Feature #4724: custom_algorithms - anomalous_daily_peak
def clear_trigger_history(self, metric):
"""
Clear last item from the trigger history
"""
trigger_history = {}
try:
raw_trigger_history = self.redis_conn_decoded.hget('mirage.trigger_history', metric)
if raw_trigger_history:
trigger_history = literal_eval(raw_trigger_history)
except Exception as err:
logger.error(traceback.format_exc())
logger.error('error :: mirage_labelled_metrics :: failed to evaluate data from mirage.trigger_history Redis hash key - %s' % (
str(err)))
return False
if trigger_history:
last_history_key = list(trigger_history.keys())[0]
new_trigger_history = {}
for history_timestamp in list(trigger_history.keys()):
if history_timestamp != last_history_key:
new_trigger_history[history_timestamp] = trigger_history[history_timestamp]
if new_trigger_history:
try:
self.redis_conn_decoded.hset('mirage.trigger_history', metric, str(new_trigger_history))
logger.info('mirage_labelled_metrics :: removed last event for %s from mirage.trigger_history' % metric)
except Exception as err:
logger.error('error :: mirage_labelled_metrics :: failed to set key in mirage.trigger_history Redis hash key - %s' % (
str(err)))
return True
# @modified 20200909 - Task #3730: Validate Mirage running multiple processes
# def spin_process(self, i, run_timestamp):
def spin_process(self, i, run_timestamp, assigned_checks):
"""
Assign a metrics for a process to analyze.
"""
if not assigned_checks:
logger.info('mirage_labelled_metrics :: no checks to assign to process, nothing to do')
return
process_start_timestamp = int(time())
# Make process-specific dicts
exceptions = defaultdict(int)
anomaly_breakdown = defaultdict(int)
# TODO - testing
redis_metrics_processed_key = 'mirage_labelled_metrics.%s.metrics_processed' % str(i)
try:
exists = self.redis_conn_decoded.exists(redis_metrics_processed_key)
if exists:
last_redis_metrics_processed_key = 'mirage_labelled_metrics.%s.metrics_processed.last' % str(i)
self.redis_conn_decoded.rename(redis_metrics_processed_key, last_redis_metrics_processed_key)
except Exception as err:
logger.error('error :: mirage_labelled_metrics :: failed to rename %s Redis hash - %s' % (
redis_metrics_processed_key, err))
# @added 20230425 - Feature #4894: labelled_metrics - SKYLINE_FEEDBACK_NAMESPACES
feedback_labelled_metric_ids = []
feedback_labelled_metric_ids_skipped = []
analyzer_labelled_metrics_busy = False
if settings.SKYLINE_FEEDBACK_NAMESPACES:
try:
analyzer_labelled_metrics_busy = self.redis_conn_decoded.get('analyzer_labelled_metrics.busy')
except Exception as err:
logger.error('error :: mirage_labelled_metrics :: failed to get analyzer_labelled_metrics.busy Redis key' % (
err))
# @added 20230605 - Feature #4932: mute_alerts_on
mute_alerts_on = []
if not analyzer_labelled_metrics_busy:
mute_alerts_on_dict = {}
try:
mute_alerts_on_dict = self.redis_conn_decoded.hgetall('metrics_manager.mute_alerts_on')
except Exception as err:
logger.error('error :: failed to hgetall metrics_manager.mute_alerts_on - %s' % (
err))
mute_alerts_on = [i_metric for i_metric in list(mute_alerts_on_dict.keys()) if i_metric.startswith('labelled_metrics.')]
if mute_alerts_on:
logger.info('mirage_labelled_metrics :: there are %s mute_alert_on labelled_metrics currently which shall be add to feedback_labelled_metric_ids if not set' % str(len(mute_alerts_on)))
if analyzer_labelled_metrics_busy:
logger.info('mirage_labelled_metrics :: analyzer_labelled_metrics_busy found')
try:
feedback_labelled_metric_ids = list(self.redis_conn_decoded.smembers('aet.metrics_manager.feedback.labelled_metric_ids'))
except Exception as err:
logger.error('error :: mirage_labelled_metrics :: smembers failed on Redis set aet.metrics_manager.feedback.labelled_metric_ids - %s' % (
err))
# @added 20230605 - Feature #4932: mute_alerts_on
if not feedback_labelled_metric_ids and mute_alerts_on:
for labelled_metric in mute_alerts_on:
metric_id = labelled_metric.split('.')[-1]
feedback_labelled_metric_ids.append(str(metric_id))
logger.info('mirage_labelled_metrics :: added %s mute_alert_on metric ids to feedback_labelled_metric_ids if not set' % str(len(mute_alerts_on)))
if not analyzer_labelled_metrics_busy:
analyzer_labelled_metrics_busy = True
logger.info('mirage_labelled_metrics :: analyzer_labelled_metrics_busy set to True because mute_alert_on labelled_metrics were found')
redis_set = 'analyzer.waterfall_alerts.sent_to_mirage'
literal_analyzer_waterfall_alerts = []
try:
literal_analyzer_waterfall_alerts = list(self.redis_conn_decoded.smembers(redis_set))
except:
literal_analyzer_waterfall_alerts = []
analyzer_waterfall_alerts = []
for literal_waterfall_alert in literal_analyzer_waterfall_alerts:
waterfall_alert = literal_eval(literal_waterfall_alert)
analyzer_waterfall_alerts.append(waterfall_alert)
# @added 20230424 - Feature #4724: custom_algorithms - anomalous_daily_peak
# Added expiry to record metrics identified as normal by anomalous_daily_peaks
current_now = int(time())
current_aligned_ts = int(process_start_timestamp // 60 * 60)
normal_daily_peaks_keys = []
for i in sorted(list(range(1, 3)), reverse=True):
key_ts = current_aligned_ts - (60 * i)
key = 'mirage.normal_daily_peak_metrics.%s' % str(key_ts)
normal_daily_peaks_keys.append(key)
normal_daily_peak_metrics_expiry = {}
for normal_daily_peaks_key in normal_daily_peaks_keys:
try:
current_key_data = self.redis_conn_decoded.hgetall(normal_daily_peaks_key)
except Exception as err:
logger.error('error :: mirage_labelled_metrics :: hgetall failed on Redis hash %s - %s' % (
str(normal_daily_peaks_key), err))
for labelled_metric in list(current_key_data):
if current_now > int(float(current_key_data[labelled_metric])):
try:
self.redis_conn_decoded.hdel(normal_daily_peaks_key, labelled_metric)
except Exception as err:
logger.error('error :: mirage_labelled_metrics :: failed to remove %s from Redis hash %s - %s' % (
labelled_metric, normal_daily_peaks_key, err))
else:
normal_daily_peak_metrics_expiry[labelled_metric] = int(float(current_key_data[labelled_metric]))
checks_processed = 0
ionosphere_unique_metrics = []
for check_item in assigned_checks:
check = str(check_item)
if int(time()) >= (process_start_timestamp + 50):
logger.info('mirage_labelled_metrics :: run time limit reached - stopping')
break
checks_processed += 1
metric_data = {
'source': 'graphite',
}
is_labelled_metric = False
if check_item.startswith('analyzer_labelled_metrics.mirage_check.'):
is_labelled_metric = True
if is_labelled_metric:
# Create a check file for backwards compatibility
try:
check, metric_data = self.create_check_file_from_hash_key(check_item)
except Exception as err:
logger.error('error :: mirage_labelled_metrics :: create_check_file_from_hash_key failed for %s - %s' % (
str(check_item), err))
continue
else:
continue
logger.info('mirage_labelled_metrics :: checking metric_data: %s' % str(metric_data))
metric_id = 0
try:
metric_id = metric_data['metric_id']
except Exception as err:
logger.error('error :: mirage_labelled_metrics :: failed to determine metric_id from metric_data for %s - %s' % (
check_item, err))
# TODO - testing
try:
key = 'labelled_metrics.%s-%s' % (str(metric_id), str(metric_data['timestamp']))
self.redis_conn_decoded.hset(redis_metrics_processed_key, key, str(metric_data))
except Exception as err:
logger.error(traceback.format_exc())
logger.error('error :: mirage_labelled_metrics :: failed to add metric to %s Redis hash - %s' % (redis_metrics_processed_key, err))
metric_check_file = '%s/%s' % (
# @modified 20200903 - Task #3730: Validate Mirage running multiple processes
# settings.MIRAGE_CHECK_PATH, str(metric_var_files_sorted[0]))
MIRAGE_LABELLED_CHECK_PATH, check)
check_file_name = os.path.basename(str(metric_check_file))
check_file_timestamp = check_file_name.split('.', 1)[0]
check_file_metricname_txt = check_file_name.split('.', 1)[1]
check_file_metricname = check_file_metricname_txt.replace('.txt', '')
check_file_metricname_dir = check_file_metricname.replace('.', '/')
metric_failed_check_dir = '%s/%s/%s' % (failed_checks_dir, check_file_metricname_dir, check_file_timestamp)
# Load metric variables
# @modified 20160822 - Bug #1460: panorama check file fails
# Changed to panorama style skyline_functions load_metric_vars
# self.load_metric_vars(metric_check_file)
# Load and validate metric variables
try:
# @modified 20170127 - Feature #1886: Ionosphere learn - child like parent with evolutionary maturity
# Bug #1460: panorama check file fails
# Panorama check file fails #24
# Get rid of the skyline_functions imp as imp is deprecated in py3 anyway
# metric_vars = load_metric_vars(skyline_app, str(metric_check_file))
metric_vars_array = self.mirage_load_metric_vars(str(metric_check_file))
except:
logger.error(traceback.format_exc())
logger.error('error :: mirage_labelled_metrics :: failed to load metric variables from check file - %s' % str(metric_check_file))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
continue
metric = None
# @added 20200106 - Branch #3262: py3
# Task #3034: Reduce multiprocessing Manager list usage
# @modified 20200903 - Task #3730: Validate Mirage running multiple processes
# redis_set_to_delete = 'mirage.metric_variables'
redis_metric_variables_set = 'mirage_labelled_metrics.%s.metric_variables' % str(i)
redis_set_to_delete = redis_metric_variables_set
try:
self.redis_conn.delete(redis_set_to_delete)
logger.info('mirage_labelled_metrics :: deleted Redis set - %s' % redis_set_to_delete)
except:
logger.error(traceback.format_exc())
logger.error('error :: mirage_labelled_metrics :: failed to delete Redis set - %s' % redis_set_to_delete)
try:
key = 'metric'
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
metric = str(value_list[0])
metric_name = ['metric_name', metric]
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# self.metric_variables.append(metric_name)
# @modified 20200903 - Task #3730: Validate Mirage running multiple processes
redis_set = 'mirage_labelled_metrics.metric_variables'
data = str(metric_name)
try:
# @modified 20200903 - Task #3730: Validate Mirage running multiple processes
# self.redis_conn.sadd(redis_set, data)
self.redis_conn.sadd(redis_metric_variables_set, data)
except:
logger.error(traceback.format_exc())
logger.error('error :: mirage_labelled_metrics :: failed to add %s to Redis set %s' % (
str(data), str(redis_set)))
logger.info('mirage_labelled_metrics :: debug :: added metric_name %s from check file - %s' % (str(metric_name), metric_check_file))
except:
logger.error(traceback.format_exc())
logger.error('error :: mirage_labelled_metrics :: failed to read metric variable from check file - %s' % (metric_check_file))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
continue
if not metric:
logger.error('error :: mirage_labelled_metrics :: failed to load metric variable from check file - %s' % (metric_check_file))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
continue
value = None
try:
key = 'value'
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
value = float(value_list[0])
metric_value = ['metric_value', value]
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# self.metric_variables.append(metric_value)
redis_set = 'mirage_labelled_metrics.metric_variables'
data = str(metric_value)
try:
# @modified 20200903 - Task #3730: Validate Mirage running multiple processes
# self.redis_conn.sadd(redis_set, data)
self.redis_conn.sadd(redis_metric_variables_set, data)
except:
logger.error(traceback.format_exc())
logger.error('error :: mirage_labelled_metrics :: failed to add %s to Redis set %s' % (
str(data), str(redis_set)))
except Exception as err:
logger.error('error :: mirage_labelled_metrics :: failed to read value variable from check file - %s - %s' % (metric_check_file, err))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
continue
if not value:
# @modified 20181119 - Bug #2708: Failing to load metric vars
if value == 0.0:
pass
else:
logger.error('error :: mirage_labelled_metrics :: failed to load value variable from check file - %s' % (metric_check_file))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
continue
hours_to_resolve = None
try:
key = 'hours_to_resolve'
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
hours_to_resolve = int(value_list[0])
hours_to_resolve_list = ['hours_to_resolve', hours_to_resolve]
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# self.metric_variables.append(hours_to_resolve_list)
redis_set = 'mirage_labelled_metrics.metric_variables'
data = str(hours_to_resolve_list)
try:
# @modified 20200903 - Task #3730: Validate Mirage running multiple processes
# self.redis_conn.sadd(redis_set, data)
self.redis_conn.sadd(redis_metric_variables_set, data)
except:
logger.error(traceback.format_exc())
logger.error('error :: mirage_labelled_metrics :: failed to add %s to Redis set %s' % (
str(data), str(redis_set)))
except:
logger.error('error :: mirage_labelled_metrics :: failed to read hours_to_resolve variable from check file - %s' % (metric_check_file))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
continue
if not hours_to_resolve:
logger.error('error :: mirage_labelled_metrics :: failed to load hours_to_resolve variable from check file - %s' % (metric_check_file))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
continue
metric_timestamp = None
try:
key = 'metric_timestamp'
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
metric_timestamp = int(value_list[0])
metric_timestamp_list = ['metric_timestamp', metric_timestamp]
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# self.metric_variables.append(metric_timestamp_list)
redis_set = 'mirage_labelled_metrics.metric_variables'
data = str(metric_timestamp_list)
try:
# @modified 20200903 - Task #3730: Validate Mirage running multiple processes
# self.redis_conn.sadd(redis_set, data)
self.redis_conn.sadd(redis_metric_variables_set, data)
except:
logger.error(traceback.format_exc())
logger.error('error :: mirage_labelled_metrics :: failed to add %s to Redis set %s' % (
str(data), str(redis_set)))
except:
logger.error('error :: mirage_labelled_metrics :: failed to read metric_timestamp variable from check file - %s' % (metric_check_file))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
continue
if not metric_timestamp:
logger.error('error :: mirage_labelled_metrics :: failed to load metric_timestamp variable from check file - %s' % (metric_check_file))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
continue
# @added 20200916 - Branch #3068: SNAB
# Task #3744: POC matrixprofile
snab_only_check = None
try:
key = 'snab_only_check'
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
snab_only_check = value_list[0]
except:
snab_only_check = None
snab_only_check_list = ['snab_only_check', snab_only_check]
redis_set = 'mirage_labelled_metrics.metric_variables'
data = str(snab_only_check_list)
try:
self.redis_conn.sadd(redis_metric_variables_set, data)
except:
logger.error(traceback.format_exc())
logger.error('error :: mirage_labelled_metrics :: failed to add %s to Redis set %s' % (
str(data), str(redis_set)))
# @added 20210304 - Feature #3642: Anomaly type classification
# Feature #3970: custom_algorithm - adtk_level_shift
# Added triggered_algorithms to mirage_check_file
try:
key = 'triggered_algorithms'
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
triggered_algorithms = value_list[0]
except:
triggered_algorithms = []
metric_data_dir = '%s/%s' % (settings.MIRAGE_DATA_FOLDER, str(metric))
# Ignore any metric check with a timestamp greater than MIRAGE_STALE_SECONDS
int_metric_timestamp = int(metric_timestamp)
int_run_timestamp = int(run_timestamp)
metric_timestamp_age = int_run_timestamp - int_metric_timestamp
if metric_timestamp_age > settings.MIRAGE_STALE_SECONDS:
logger.info('mirage_labelled_metrics :: stale check :: %s check request is %s seconds old - discarding' % (metric, str(metric_timestamp_age)))
# Remove metric check file
if os.path.isfile(metric_check_file):
os.remove(metric_check_file)
logger.info('mirage_labelled_metrics :: removed check file - %s' % (metric_check_file))
else:
logger.info('mirage_labelled_metrics :: could not remove check file - %s' % (metric_check_file))
# Remove the metric directory
if os.path.exists(metric_data_dir):
try:
rmtree(metric_data_dir)
logger.info('mirage_labelled_metrics :: removed data dir - %s' % metric_data_dir)
except:
logger.error('error :: mirage_labelled_metrics :: failed to rmtree - %s' % metric_data_dir)
# @added 20200903 - Task #3730: Validate Mirage running multiple processes
redis_set = 'mirage_labelled_metrics.stale_check_discarded'
try:
self.redis_conn.sadd(redis_set, str(metric))
except:
logger.error(traceback.format_exc())
logger.error('error :: mirage_labelled_metrics :: failed to add %s to Redis set %s' % (
str(metric), str(redis_set)))
continue
# Calculate hours second order resolution to seconds
second_order_resolution_seconds = int(hours_to_resolve) * 3600
int_second_order_resolution_seconds = int(float(second_order_resolution_seconds))
second_resolution_timestamp = int_metric_timestamp - int_second_order_resolution_seconds
labelled_metric_base_name = None
labelled_metric_name = str(metric)
try:
labelled_metric_base_name = get_base_name_from_labelled_metrics_name(skyline_app, labelled_metric_name)
except Exception as err:
logger.error('error :: mirage_labelled_metrics :: get_base_name_from_labelled_metrics_name failed for %s - %s' % (
str(labelled_metric_name), str(err)))
trigger_anomaly = False
test_alert_and_trigger = False
test_alert = False
if 'test_alert' in metric_data:
test_alert = metric_data['test_alert']
if 'trigger_anomaly' in metric_data:
trigger_anomaly = metric_data['trigger_anomaly']
test_alert_and_trigger = True
if test_alert:
logger.info('test_alert found for %s set trigger_anomaly: %s' % (
str(metric), str(trigger_anomaly)))
alert_tested_key = 'mirage.test_alerts.done.%s' % metric
try:
self.redis_conn_decoded.setex(alert_tested_key, 300, int(time()))
logger.info('test_alert created Redis key %s' % alert_tested_key)
except Exception as err:
logger.error(traceback.format_exc())
logger.error('error :: failed to create Redis key %s - %s' % (
alert_tested_key, err))
# Remove any old json file related to the metric
metric_json_file = '%s/%s.json' % (metric_data_dir, str(metric))
try:
os.remove(metric_json_file)
except OSError:
pass
# @added 20230425 - Feature #4894: labelled_metrics - SKYLINE_FEEDBACK_NAMESPACES
feedback_metric = False
feedback_key = 'mirage_labelled_metrics.feedback.expiry.%s' % str(metric_id)
if analyzer_labelled_metrics_busy:
if str(metric_id) in feedback_labelled_metric_ids:
feedback_metric = True
feedback_metric_expiry = 0
if feedback_metric:
try:
feedback_metric_expiry = self.redis_conn_decoded.get(feedback_key)
except Exception as err:
logger.error('error :: mirage_labelled_metrics :: failed to get Redis key %s - %s' % (
str(feedback_key), err))
feedback_metric_expiry = 0
if feedback_metric_expiry:
logger.info('mirage_labelled_metrics :: feedback metric expiry exists, removing check :: %s ' % metric)
# Remove metric check file
if os.path.isfile(metric_check_file):
os.remove(metric_check_file)
logger.info('mirage_labelled_metrics :: removed check file - %s' % (metric_check_file))
else:
logger.info('mirage_labelled_metrics :: could not remove check file - %s' % (metric_check_file))
# Remove the metric directory
if os.path.exists(metric_data_dir):
try:
rmtree(metric_data_dir)
logger.info('mirage_labelled_metrics :: removed data dir - %s' % metric_data_dir)
except:
logger.error('error :: mirage_labelled_metrics :: failed to rmtree - %s' % metric_data_dir)
redis_set = 'analyzer.waterfall_alerts.sent_to_mirage'
for waterfall_alert in analyzer_waterfall_alerts:
if waterfall_alert[0] == metric:
if int(waterfall_alert[1]) == metric_timestamp:
try:
self.redis_conn.srem(redis_set, str(waterfall_alert))
logger.info('mirage_labelled_metrics :: removed waterfall alert item from Redis set %s - %s' % (
redis_set, str(waterfall_alert)))
except:
logger.error(traceback.format_exc())
logger.error('error :: mirage_labelled_metrics :: failed to remove waterfall alert item for %s at %s from Redis set %s' % (
metric, str(metric_timestamp), redis_set))
continue
data_source = metric_data['source']
# Get data from graphite
if data_source == 'graphite':
logger.error('error :: mirage_labelled_metrics :: data_source set to graphite for %s' % (
metric))
# Get data from victoriametrics
if data_source == 'victoriametrics':
base_name = metric_data['metric']
logger.info(
'retrieve data :: surfacing %s time series from victoriametrics for %s seconds' % (
base_name, str(second_order_resolution_seconds)))
# @added 20230522 - metric_type.longterm_expire
# Check that monotonicity of the metric at second_order_resolution_seconds
# to determine whether it is different from the recorded longterm type
current_metric_type = None
try:
current_metric_type = self.redis_conn_decoded.hget('skyline.labelled_metrics.id.type', str(metric_id))
except Exception as err:
logger.error(traceback.format_exc())
logger.error('error :: mirage_labelled_metrics :: failed to determine metric_type from skyline.labelled_metrics.id.type - %s' % (
err))
test_timeseries = []
update_metric_type = False
save_test_data = False
if current_metric_type:
try:
test_timeseries = get_victoriametrics_metric(
skyline_app, base_name, second_resolution_timestamp,
metric_timestamp, 'list', 'object', metric_data={},
plot_parameters={}, do_not_type=True)
except Exception as err:
logger.error('error :: mirage_labelled_metrics :: get_victoriametrics_metric failed getting test_timeseries for %s - %s' % (
base_name, err))
if test_timeseries:
is_strictly_increasing_monotonic = None
try:
is_strictly_increasing_monotonic = strictly_increasing_monotonicity(test_timeseries)
except Exception as err:
logger.error('error :: mirage_labelled_metrics :: get_victoriametrics_metric failed getting test_timeseries for %s - %s' % (
base_name, err))
determined_metric_type = None
if str(is_strictly_increasing_monotonic) != 'None':
if is_strictly_increasing_monotonic:
determined_metric_type = '1'
else:
determined_metric_type = '0'
if str(determined_metric_type) != 'None':
if current_metric_type != determined_metric_type:
update_metric_type = True
else:
if determined_metric_type == '0':
save_test_data = False
if update_metric_type:
logger.info('mirage_labelled_metrics :: detected change in metric_type updating from %s to %s for %s ' % (
current_metric_type, determined_metric_type, metric))
try:
self.redis_conn_decoded.hset('skyline.labelled_metrics.id.type', str(metric_id), determined_metric_type)
except Exception as err:
logger.error('error :: mirage_labelled_metrics :: failed to determine update skyline.labelled_metrics.id.type with metric_type - %s' % (
err))
# Set a new expire in the longterm_expire hash to have metrics_manager
# recheck the metric at 30 days.
try:
new_expire = int(metric_timestamp) - 3600
self.redis_conn_decoded.hset('skyline.labelled_metrics.id.type.longterm_expire', str(metric_id), str(new_expire))
except Exception as err:
logger.error('error :: mirage_labelled_metrics :: failed to update skyline.labelled_metrics.id.type.longterm_expire with new expire value - %s' % (
err))
if save_test_data:
output_object_path = os.path.dirname(metric_json_file_saved)
if not os.path.isdir(output_object_path):
try:
mkdir_p(output_object_path)
logger.info(
'output_object_path - %s' % str(output_object_path))
except Exception as err:
logger.error(
'error :: failed to create output_object_path - %s - %s' % (
str(output_object_path,), err))
with open(metric_json_file_saved, 'w') as f:
f.write(json.dumps(test_timeseries))
os.chmod(metric_json_file_saved, mode=0o644)
# @modified 20230522 - metric_type.longterm_expire
# Only surface if the test data was not saved
if not save_test_data:
try:
# get_victoriametrics_metric automatically applies the rate and
# step required no downsampling or nonNegativeDerivative is
# required.
metric_json_file_saved = get_victoriametrics_metric(
skyline_app, base_name, second_resolution_timestamp,
metric_timestamp, 'json', metric_json_file, metric_data)
if metric_json_file_saved:
logger.info('mirage_labelled_metrics :: %s time series data saved to %s' % (metric, metric_json_file_saved))
except Exception as err:
logger.error('error :: mirage_labelled_metrics :: get_victoriametrics_metric failed for %s - %s' % (
str(metric_json_file), err))
# Check there is a json timeseries file to test
if not os.path.isfile(metric_json_file):
logger.error(
'error :: retrieve failed - failed to surface %s time series from %s' % (
metric, data_source))
# @added 20200905 - Feature #3734: waterfall alerts
# Try a metric 3 times before removing the check file
remove_check_file = True
check_failed_key = 'mirage.check.data_retrieval_failed.%s.%s' % (
str(int_metric_timestamp), metric)
fail_count = 0
try:
fail_count = self.redis_conn.get(check_failed_key)
except:
fail_count = 0
if not fail_count:
fail_count = 0
fail_count += 1
if fail_count < 3:
remove_check_file = False
try:
self.redis_conn.setex(check_failed_key, 300, fail_count)
logger.info('mirage_labelled_metrics :: updated fail_count to %s in %s' % (str(fail_count), check_failed_key))
except:
logger.error(traceback.format_exc())
logger.error('error :: mirage_labelled_metrics :: failed to set Redis key %s with %s' % (
str(check_failed_key), str(fail_count)))
else:
logger.error('error :: mirage_labelled_metrics :: fail_count is %s in %s, removing check file' % (str(fail_count), check_failed_key))
if remove_check_file:
# Remove metric check file
try:
os.remove(metric_check_file)
except OSError:
pass
# Remove the metric directory
try:
rmtree(metric_data_dir)
logger.info('mirage_labelled_metrics :: removed data dir - %s' % metric_data_dir)
except:
logger.error('error :: mirage_labelled_metrics :: failed to rmtree %s' % metric_data_dir)
continue
logger.info('mirage_labelled_metrics :: retrieved data :: for %s at %s seconds' % (
metric, str(second_order_resolution_seconds)))
# Make process-specific dicts
# exceptions = defaultdict(int)
# anomaly_breakdown = defaultdict(int)
self.check_if_parent_is_alive()
timeseries = []
try:
with open((metric_json_file), 'r') as f:
timeseries = json.loads(f.read())
logger.info('mirage_labelled_metrics :: data points surfaced :: %s' % (str(len(timeseries))))
except Exception as err:
logger.error(traceback.format_exc())
logger.error('error :: mirage_labelled_metrics :: failed to create timeseries from %s - %s' % (
str(metric_json_file), err))
timeseries = []
# @added 20170212 - Feature #1886: Ionosphere learn
# Only process if the metric has sufficient data
first_timestamp = None
try:
first_timestamp = int(timeseries[0][0])
except Exception as err:
logger.error('error :: mirage_labelled_metrics :: could not determine first timestamp - %s' % err)
timestamp_now = int(time())
valid_if_before_timestamp = timestamp_now - int(settings.FULL_DURATION)
valid_mirage_timeseries = True
if first_timestamp:
if first_timestamp > valid_if_before_timestamp:
valid_mirage_timeseries = False
else:
valid_mirage_timeseries = False
logger.warning('warning :: no first_timestamp, valid_mirage_timeseries: %s' % str(valid_mirage_timeseries))
valid_mirage_timeseries = True
redis_metric_name = '%s' % str(metric)
metric_id = 0
if is_labelled_metric:
metric_id = metric_data['metric_id']
redis_metric_name = 'labelled_metrics.%s' % str(metric_id)
# @added 20200425 - Feature #3508: ionosphere.untrainable_metrics
# Determine if any metrcs have negatives values some they can be
# added to the ionosphere.untrainable_metrics Redis set
run_negatives_present = False
# @added 20200607 - Feature #3566: custom_algorithms
algorithms_run = list(settings.MIRAGE_ALGORITHMS)
# @added 20200904 - Feature #3734: waterfall alerts
anomalous = None
# @added 20201001 - Branch #3068: SNAB
# Task #3748: POC SNAB
# Add timings
analysis_start_time = time()
try:
if valid_mirage_timeseries:
logger.info('mirage_labelled_metrics :: analyzing :: %s at %s seconds' % (metric, second_order_resolution_seconds))
# @modified 20230118 - Task #4786: Switch from matrixprofile to stumpy
# Task #4778: v4.0.0 - update dependencies
# Added current_func
anomalous, ensemble, datapoint, negatives_found, algorithms_run = run_selected_algorithm(timeseries, metric, second_order_resolution_seconds, run_negatives_present, triggered_algorithms, current_func='mirage_labelled_metrics')
logger.info('mirage_labelled_metrics :: analysed :: %s - anomalous: %s' % (metric, str(anomalous)))
else:
logger.info('mirage_labelled_metrics :: not analyzing :: %s at %s seconds as there is not sufficiently older datapoints in the timeseries - not valid_mirage_timeseries' % (metric, second_order_resolution_seconds))
anomalous = False
if timeseries:
datapoint = timeseries[-1][1]
else:
datapoint = 0
# @added 20220315 - Feature #4482: Test alerts
# Allow to test on sparse metrics
if test_alert or test_alert_and_trigger:
anomalous = True
logger.info('mirage_labelled_metrics :: test_alert - setting anomalous to True for %s' % metric)
ensemble = [True]
triggered_algorithms = ['testing']
algorithms_run = ['histogram_bins']
negatives_found = False
# It could have been deleted by the Roomba
except TypeError:
# @added 20200430 - Feature #3480: batch_processing
# Added logging here as the DeletedByRoomba exception is
# generally not related to that but related to some other fail
# in the processing of the run algorithms phase.
# It could have been deleted by the Roomba, but Mirage does not use
# Redis data so probably, definitely was not :)
logger.error(traceback.format_exc())
logger.error('error :: mirage_labelled_metrics :: added as DeletedByRoomba but possibly not see traceback above')
exceptions['DeletedByRoomba'] += 1
logger.info('mirage_labelled_metrics :: exceptions :: DeletedByRoomba')
except TooShort:
exceptions['TooShort'] += 1
logger.info('mirage_labelled_metrics :: exceptions :: TooShort')
except Stale:
exceptions['Stale'] += 1
logger.info('mirage_labelled_metrics :: exceptions :: Stale')
except Boring:
exceptions['Boring'] += 1
logger.info('mirage_labelled_metrics :: exceptions :: Boring')
except Exception as err:
exceptions['Other'] += 1
logger.info('mirage_labelled_metrics :: exceptions :: Other')
logger.error(traceback.format_exc())
logger.error('error :: mirage_labelled_metrics :: unhandled error - %s' % err)
# @added 20230425 - Feature #4894: labelled_metrics - SKYLINE_FEEDBACK_NAMESPACES
if feedback_metric:
try:
self.redis_conn_decoded.setex(feedback_key, 300, int(time()))
except Exception as err:
logger.error('error :: mirage_labelled_metrics :: setex failed on %s - %s' % (
feedback_key, err))
# @added 20220420 - Feature #4530: namespace.analysed_events
parent_namespace = metric.split('.', maxsplit=1)[0]
if is_labelled_metric:
try:
parent_namespace = metric_data['metric_dict']['labels']['_tenant_id']
except Exception as err:
logger.error(traceback.format_exc())
logger.error('error :: mirage_labelled_metrics :: failed to determine parent_namespace from metric_data: %s - %s' % (
str(metric_data), err))
date_string = str(strftime('%Y-%m-%d', gmtime()))
namespace_analysed_events_hash = 'namespace.analysed_events.%s.%s' % (skyline_app, date_string)
try:
self.redis_conn.hincrby(namespace_analysed_events_hash, parent_namespace, 1)
except Exception as err:
logger.error(traceback.format_exc())
logger.error('error :: mirage_labelled_metrics :: failed to increment %s Redis hash - %s' % (
namespace_analysed_events_hash, err))
try:
self.redis_conn.expire(namespace_analysed_events_hash, (86400 * 15))
logger.info('mirage_labelled_metrics :: updated %s Redis hash' % namespace_analysed_events_hash)
except Exception as err:
logger.error(traceback.format_exc())
logger.error('error :: mirage_labelled_metrics :: failed to set expire %s Redis hash - %s' % (
namespace_analysed_events_hash, err))
# @added 20201001 - Branch #3068: SNAB
# Task #3748: POC SNAB
# Add timings
analysis_run_time = time() - analysis_start_time
logger.info('mirage_labelled_metrics :: algorithms analysis completed in %.2f seconds' % (
analysis_run_time))
# @added 20210309 - Task #3730: Validate Mirage running multiple processes
# Reimplement mirage.checks.done count
try:
self.redis_conn.incr('mirage_labelled_metrics.checks.done')
except Exception as err:
logger.error(traceback.format_exc())
logger.error('error :: mirage_labelled_metrics :: failed to increment mirage_labelled_metrics.checks.done Redis key - %s' % str(err))
# @modified 20200728 - Bug #3652: Handle multiple metrics in base_name conversion
# base_name = metric.replace(settings.FULL_NAMESPACE, '', 1)
if metric.startswith(settings.FULL_NAMESPACE):
base_name = metric.replace(settings.FULL_NAMESPACE, '', 1)
else:
base_name = metric
# @added 20200904 - Feature #3734: waterfall alerts
# Remove the metric from the waterfall_alerts Redis set
# [metric, timestamp, value, added_to_waterfall_timestamp]
# waterfall_data = [metric[1], metric[2], metric[0], added_to_waterfall_timestamp. waterfall_panorama_data]
redis_set = 'analyzer.waterfall_alerts.sent_to_mirage'
get_waterfall_alerts_once = True
if not get_waterfall_alerts_once:
literal_analyzer_waterfall_alerts = []
try:
literal_analyzer_waterfall_alerts = list(self.redis_conn_decoded.smembers(redis_set))
except:
literal_analyzer_waterfall_alerts = []
analyzer_waterfall_alerts = []
for literal_waterfall_alert in literal_analyzer_waterfall_alerts:
waterfall_alert = literal_eval(literal_waterfall_alert)
analyzer_waterfall_alerts.append(waterfall_alert)
if test_alert or test_alert_and_trigger:
anomalous = True
logger.info('mirage_labelled_metrics :: test_alert - setting anomalous to True for %s' % metric)
ensemble = [True]
triggered_algorithms = ['testing']
algorithms_run = ['histogram_bins']
negatives_found = False
# @added 20230419 - Feature #4848: mirage - analyse.irregular.unstable.timeseries.at.30days
irregular_unstable_timeseries = False
if anomalous:
low_variance = 0.009
normalized_var = None
start_normalized_var = time()
try:
normalized_var = normalized_variance(timeseries)
except Exception as err:
logger.error('error :: mirage_labelled_metrics :: normalized_variance failed on timeseries for %s - %s' % (
metric, err))
if isinstance(normalized_var, dict):
err = normalized_var['error']
logger.error('error :: mirage_labelled_metrics :: normalized_variance reported an error with timeseries for %s - %s' % (
metric, err))
normalized_var = None
if isinstance(normalized_var, float):
if normalized_var <= low_variance:
irregular_unstable_timeseries = True
logger.info('mirage_labelled_metrics :: normalized_variance ran with result: %s (took %.6f seconds), for %s' % (
str(normalized_var), (time() - start_normalized_var), metric))
# @added 20230515 - Feature #4848: mirage - analyse.irregular.unstable.timeseries.at.30days
if anomalous and irregular_unstable_timeseries:
logger.info('mirage_labelled_metrics :: checking irregular_unstable %s' % (
metric))
result = True
start_irregular_unstable = time()
try:
custom_algorithm = 'irregular_unstable'
custom_algorithms_to_run = {}
custom_algorithms_to_run[custom_algorithm] = {
'namespaces': ['labelled_metrics'],
'algorithm_source': '/opt/skyline/github/skyline/skyline/custom_algorithms/irregular_unstable.py',
'algorithm_parameters': {
'low_variance': 0.009, 'labelled_metric_name': labelled_metric_name,
'metric': labelled_metric_base_name,
'debug_logging': True,
},
'max_execution_time': 3.0,
'consensus': 1,
'algorithms_allowed_in_consensus': ['irregular_unstable'],
# 'debug_logging': False,
'debug_logging': True,
'run_3sigma_algorithms': False,
'run_before_3sigma': False,
'run_only_if_consensus': False,
'trigger_history_override': False,
'use_with': ['mirage'],
}
# use_debug_logging_here = False
use_debug_logging_here = True
result, anomalyScore = run_custom_algorithm_on_timeseries(skyline_app, os.getpid(), labelled_metric_name, timeseries, 'irregular_unstable', custom_algorithms_to_run[custom_algorithm], use_debug_logging_here, current_func='mirage_labelled_metrics')
algorithms_run.append(custom_algorithm)
ensemble.append(result)
if DEBUG_CUSTOM_ALGORITHMS or use_debug_logging_here:
logger.debug('debug :: mirage_labelled_metrics :: run_custom_algorithm_on_timeseries run irregular_unstable with result - %s, anomalyScore - %s' % (
str(result), str(anomalyScore)))
except Exception as err:
logger.error(traceback.format_exc())
logger.error('error :: mirage_labelled_metrics :: run_custom_algorithm_on_timeseries irregular_unstable failed on %s - %s' % (
str(metric), err))
result = None
logger.info('mirage_labelled_metrics :: irregular_unstable ran with result: %s (took %.6f seconds), for %s - %s' % (
str(result), (time() - start_irregular_unstable), labelled_metric_name, labelled_metric_base_name))
# Although fine in a notebook does not have the desired effect
# in the runtime so convert to a str and check
# if result is False:
if str(result) == 'False':
logger.info('mirage_labelled_metrics :: irregular_unstable is overrriding anomalous result for %s' % (
labelled_metric_name))
anomalous = False
# Clear last item from the trigger history as anomalous_daily_peak
# is a 3sigma method after all
try:
cleared_trigger_history = self.clear_trigger_history(labelled_metric_name)
except Exception as err:
logger.error(traceback.format_exc())
logger.error('error :: mirage_labelled_metrics :: clear_trigger_history failed on %s - %s' % (
str(labelled_metric_name), err))
# @added 20221105 - Feature #4724: custom_algorithms - anomalous_daily_peak
# Determine if an anomaly is a normal peak value of normal magnitude
# that occurs daily in a 7 day period
if anomalous and MIRAGE_CHECK_REPETITIVE_DAILY_PEAKS:
logger.info('mirage_labelled_metrics :: checking anomalous_daily_peak %s' % (
metric))
result = True
start_anomalous_daily_peak = time()
try:
custom_algorithm = 'anomalous_daily_peak'
custom_algorithms_to_run = {}
custom_algorithms_to_run[custom_algorithm] = {
'namespaces': ['labelled_metrics'],
'algorithm_source': '/opt/skyline/github/skyline/skyline/custom_algorithms/anomalous_daily_peak.py',
'algorithm_parameters': {
'number_of_daily_peaks': MIRAGE_CHECK_REPETITIVE_DAILY_PEAKS,
'within_percent_of_normal_peaks': 20.0,
# @added 20230424 - Feature #4724: custom_algorithms - anomalous_daily_peak
# Added expiry to record metrics identified as normal by anomalous_daily_peaks
'expiry': anomalous_daily_peak_expiry,
'debug_logging': True,
},
'max_execution_time': 2.0,
'consensus': 1,
'algorithms_allowed_in_consensus': ['anomalous_daily_peak'],
# 'debug_logging': False,
'debug_logging': True,
'run_3sigma_algorithms': False,
'run_before_3sigma': False,
'run_only_if_consensus': False,
'trigger_history_override': False,
'use_with': ['mirage'],
}
# use_debug_logging_here = False
use_debug_logging_here = True
# @modified 20230118 - Task #4786: Switch from matrixprofile to stumpy
# Task #4778: v4.0.0 - update dependencies
# Added current_func
result, anomalyScore = run_custom_algorithm_on_timeseries(skyline_app, os.getpid(), metric, timeseries, 'anomalous_daily_peak', custom_algorithms_to_run[custom_algorithm], use_debug_logging_here, current_func='mirage_labelled_metrics')
algorithms_run.append(custom_algorithm)
ensemble.append(result)
if DEBUG_CUSTOM_ALGORITHMS or use_debug_logging_here:
logger.debug('debug :: mirage_labelled_metrics :: run_custom_algorithm_on_timeseries run anomalous_daily_peak with result - %s, anomalyScore - %s' % (
str(result), str(anomalyScore)))
except Exception as err:
logger.error(traceback.format_exc())
logger.error('error :: mirage_labelled_metrics :: run_custom_algorithm_on_timeseries anomalous_daily_peak failed on %s - %s' % (
str(metric), err))
result = None
logger.info('mirage_labelled_metrics :: anomalous_daily_peak ran with result: %s (took %.6f seconds), for %s' % (
str(result), (time() - start_anomalous_daily_peak), metric))
# Although fine in a notebook does not have the desired effect
# in the runtime so convert to a str and check
# if result is False:
if str(result) == 'False':
logger.info('mirage_labelled_metrics :: anomalous_daily_peak is overrriding anomalous result for %s' % (
metric))
anomalous = False
# Clear last item from the trigger history as anomalous_daily_peak
# is a 3sigma method after all
try:
cleared_trigger_history = self.clear_trigger_history(metric)
except Exception as err:
logger.error(traceback.format_exc())
logger.error('error :: mirage_labelled_metrics :: clear_trigger_history failed on %s - %s' % (
str(metric), err))
# @added 20230424 - Feature #4724: custom_algorithms - anomalous_daily_peak
# Added expiry to record metrics identified as normal by anomalous_daily_peaks
current_now = int(time())
current_aligned_ts = int(process_start_timestamp // 60 * 60)
expire_at = current_now + anomalous_daily_peak_expiry
redis_hash = 'mirage.normal_daily_peak_metrics.%s' % str(current_aligned_ts)
try:
self.redis_conn_decoded.hset(redis_hash, redis_metric_name, str(expire_at))
self.redis_conn_decoded.expire(redis_hash, str(anomalous_daily_peak_expiry * 2))
except Exception as err:
logger.error('error :: mirage_labelled_metrics :: failed to add %s to Redis hash %s - %s' % (
str(redis_metric_name), str(redis_hash), err))
# @added 20220504 - Feature #2580: illuminance
# @modified 20230419 - Feature #2580: illuminance
# Moved out of the if anomalous block. Record illuminance for all
# Get the anomaly breakdown - who returned True?
triggered_algorithms = []
for index, boolean_value in enumerate(ensemble):
if boolean_value:
# @modified 20200607 - Feature #3566: custom_algorithms
# algorithm = settings.MIRAGE_ALGORITHMS[index]
algorithm = algorithms_run[index]
anomaly_breakdown[algorithm] += 1
triggered_algorithms.append(algorithm)
if test_alert:
triggered_algorithms = ['testing']
if triggered_algorithms:
illuminance_dict = {}
use_key = str(base_name)
if is_labelled_metric:
use_key = str(metric_id)
illuminance_dict[use_key] = {
'timestamp': int(metric_timestamp),
'value': float(datapoint),
'triggered_algorithms_count': len(triggered_algorithms)}
logger.info('mirage_labelled_metrics :: calling add_illuminance_entries with %s entries to add' % (
str(len(illuminance_dict))))
current_illuminance_dict = {}
try:
current_illuminance_dict = add_illuminance_entries(self, skyline_app, int(run_timestamp), illuminance_dict)
except Exception as err:
logger.error('error :: mirage_labelled_metrics :: add_illuminance_entries failed - %s' % (
err))
logger.info('mirage_labelled_metrics :: illuminance Redis hash now has %s entries' % (
str(len(current_illuminance_dict))))
if not anomalous:
not_anomalous_metric = [datapoint, base_name]
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# self.not_anomalous_metrics.append(not_anomalous_metric)
redis_set = 'mirage_labelled_metrics.not_anomalous_metrics'
data = str(not_anomalous_metric)
try:
self.redis_conn.sadd(redis_set, data)
except:
logger.error(traceback.format_exc())
logger.error('error :: mirage_labelled_metrics :: failed to add %s to Redis set %s' % (
str(data), str(redis_set)))
# @added 20200904 - Feature #3734: waterfall alerts
# Remove the metric from the waterfall_alerts Redis set
# [metric, timestamp, value, added_to_waterfall_timestamp]
# waterfall_data = [metric[1], metric[2], metric[0], added_to_waterfall_timestamp, waterfall_panorama_data]
redis_set = 'analyzer.waterfall_alerts.sent_to_mirage'
for waterfall_alert in analyzer_waterfall_alerts:
if waterfall_alert[0] == base_name:
if int(waterfall_alert[1]) == metric_timestamp:
try:
self.redis_conn.srem(redis_set, str(waterfall_alert))
logger.info('mirage_labelled_metrics :: removed waterfall alert item from Redis set %s - %s' % (
redis_set, str(waterfall_alert)))
except:
logger.error(traceback.format_exc())
logger.error('error :: mirage_labelled_metrics :: failed to remove waterfall alert item for %s at %s from Redis set %s' % (
base_name, str(metric_timestamp), redis_set))
# @added 20201128 - Feature #3734: waterfall alerts
# If the check just done is new than an existing analyzer
# waterfall alert metric timestamp remove those keys as well
if int(waterfall_alert[1]) < metric_timestamp:
try:
self.redis_conn.srem(redis_set, str(waterfall_alert))
logger.info('mirage_labelled_metrics :: removed waterfall alert item with older timestamp from Redis set %s - %s' % (
redis_set, str(waterfall_alert)))
except:
logger.error(traceback.format_exc())
logger.error('error :: mirage_labelled_metrics :: failed to remove waterfall alert item for %s at %s from Redis set %s' % (
base_name, str(metric_timestamp), redis_set))
# @added 20210330 - Feature #3994: Panorama - mirage not anomalous
# A hash is added to the mirage.panorama.not_anomalous_metrics for
# every metric that is found to be not anomalous. This provides
# data for /panorama?not_anomalous and /panorama?not_anomalous_metric
# method which are used for plots in the webapp and json response.
# The mirage.panorama.not_anomalous_metrics Redis hash is managed in
# analyzer/metrics_manager
not_anomalous_timestamp = None
try:
not_anomalous_timestamp = int(timeseries[-1][0])
except:
not_anomalous_timestamp = int(metric_timestamp)
redis_hash = 'mirage_labelled_metrics.panorama.not_anomalous_metrics'
try:
data = {
base_name: {
'timestamp': not_anomalous_timestamp,
'value': datapoint,
'hours_to_resolve': int(hours_to_resolve),
}
}
self.redis_conn.hset(redis_hash, time(), str(data))
except:
logger.error(traceback.format_exc())
logger.error('error :: mirage_labelled_metrics :: failed to add %s to Redis hash %s' % (
str(data), str(redis_hash)))
logger.info('mirage_labelled_metrics :: not anomalous :: %s with %s (at full duration), %s (at SECOND_ORDER_RESOLUTION_HOURS)' % (
metric, value, str(datapoint)))
# If it's anomalous, add it to list
if anomalous:
# @modified 20200728 - Bug #3652: Handle multiple metrics in base_name conversion
# base_name = metric.replace(settings.FULL_NAMESPACE, '', 1)
if metric.startswith(settings.FULL_NAMESPACE):
base_name = metric.replace(settings.FULL_NAMESPACE, '', 1)
else:
base_name = metric
# metric_timestamp = int(timeseries[-1][0])
metric_timestamp = int_metric_timestamp
# Get the anomaly breakdown - who returned True?
# @modified 20230419 - Feature #2580: illuminance
# Moved out of the if anomalous block. Determine
# triggered_algorithms for all to record illuminance
# triggered_algorithms = []
# for index, boolean_value in enumerate(ensemble):
# if boolean_value:
# # @modified 20200607 - Feature #3566: custom_algorithms
# # algorithm = settings.MIRAGE_ALGORITHMS[index]
# algorithm = algorithms_run[index]
# anomaly_breakdown[algorithm] += 1
# triggered_algorithms.append(algorithm)
# if test_alert:
# triggered_algorithms = ['testing']
# @modified 20201007 - Feature #3772: Add the anomaly_id to the http_alerter json
# Branch #3068: SNAB
# Added second_order_resolution_seconds, triggered_algorithms and algorithms_run
# anomalous_metric = [datapoint, base_name, metric_timestamp]
# @modified 20230419 - Feature #4848: mirage - analyse.irregular.unstable.timeseries.at.30days
# Added snab_algorithms_to_run
snab_algorithms_to_run = []
if SNAB_ENABLED:
if irregular_unstable_timeseries:
snab_algorithms_to_run = ['irregular_unstable']
anomalous_metric = [datapoint, base_name, metric_timestamp, second_order_resolution_seconds, triggered_algorithms, algorithms_run, snab_algorithms_to_run]
if not ionosphere_unique_metrics:
try:
ionosphere_unique_metrics = list(self.redis_conn_decoded.smembers('ionosphere.unique_metrics'))
except:
ionosphere_unique_metrics = []
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# self.anomalous_metrics.append(anomalous_metric)
if metric not in ionosphere_unique_metrics:
redis_set = 'mirage.anomalous_metrics'
data = str(anomalous_metric)
try:
self.redis_conn.sadd(redis_set, data)
logger.info('mirage_labelled_metrics :: add %s to mirage.anomalous_metrics Redis set' % (
str(data)))
except:
logger.error(traceback.format_exc())
logger.error('error :: mirage_labelled_metrics :: failed to add %s to mirage.anomalous_metrics Redis set' % (
str(data)))
redis_set = 'mirage_labelled_metrics.anomalous_metrics'
data = str(anomalous_metric)
try:
self.redis_conn.sadd(redis_set, data)
except:
logger.error(traceback.format_exc())
logger.error('error :: mirage_labelled_metrics :: failed to add %s to mirage.anomalous_metrics Redis set' % (
str(data)))
# @added 20220504 - Feature #2580: illuminance
# @modified 20230419 - Feature #2580: illuminance
# Moved out of the if anomalous block to above. Determine
# triggered_algorithms and record illuminance for all that
# triggered
# illuminance_dict = {}
# use_key = str(base_name)
# if is_labelled_metric:
# use_key = str(metric_id)
# illuminance_dict[use_key] = {
# 'timestamp': int(metric_timestamp),
# 'value': float(datapoint),
# 'triggered_algorithms_count': len(triggered_algorithms)}
# logger.info('mirage_labelled_metrics :: calling add_illuminance_entries with %s entries to add' % (
# str(len(illuminance_dict))))
# current_illuminance_dict = {}
# try:
# current_illuminance_dict = add_illuminance_entries(self, skyline_app, int(run_timestamp), illuminance_dict)
# except Exception as err:
# logger.error('error :: mirage_labelled_metrics :: add_illuminance_entries failed - %s' % (
# err))
# logger.info('mirage_labelled_metrics :: illuminance Redis hash now has %s entries' % (
# str(len(current_illuminance_dict))))
logger.info('mirage_labelled_metrics :: anomaly detected :: %s with %s (at SECOND_ORDER_RESOLUTION_HOURS), %s (at FULL_DURATION)' % (
metric, str(datapoint), str(value)))
# It runs so fast, this allows us to process 30 anomalies/min
# @modified 20200903 - Task #3730: Validate Mirage running multiple processes
# Removed limit
# sleep(2)
# @added 20170206 - Bug #1904: Handle non filesystem friendly metric names in check files
sane_metricname = filesafe_metricname(str(base_name))
# @added 20200425 - Feature #3508: ionosphere.untrainable_metrics
# Determine if any metrcs have negatives values some they can be
# added to the ionosphere.untrainable_metrics Redis set
if run_negatives_present and negatives_found:
redis_set = 'ionosphere.untrainable_metrics'
try:
last_negative_timestamp = int(negatives_found[-1][0])
last_negative_value = negatives_found[-1][1]
remove_after_timestamp = int(last_negative_timestamp + second_order_resolution_seconds)
data = str([base_name, metric_timestamp, datapoint, last_negative_timestamp, last_negative_value, second_order_resolution_seconds, remove_after_timestamp])
self.redis_conn.sadd(redis_set, data)
except:
logger.error(traceback.format_exc())
logger.error('error :: mirage_labelled_metrics :: failed to add %s to Redis set %s' % (
str(data), str(redis_set)))
# If Crucible or Panorama are enabled determine details
determine_anomaly_details = False
if settings.ENABLE_CRUCIBLE and settings.MIRAGE_CRUCIBLE_ENABLED:
determine_anomaly_details = True
if settings.PANORAMA_ENABLED:
determine_anomaly_details = True
# If Ionosphere is enabled determine details
try:
ionosphere_enabled = settings.IONOSPHERE_ENABLED
if settings.IONOSPHERE_ENABLED:
determine_anomaly_details = True
except:
ionosphere_enabled = False
if determine_anomaly_details:
# metric_timestamp = str(int(timeseries[-1][0]))
from_timestamp = str(int(timeseries[1][0]))
timeseries_dir = base_name.replace('.', '/')
cache_key = 'mirage.last_alert.smtp.%s' % (base_name)
last_alert = False
try:
# @modified 20200805 - Task #3662: Change mirage.last_check keys to timestamp value
# Feature #3486: analyzer_batch
# Feature #3480: batch_processing
# Changed the last_alert cache key to hold the last
# anomaly timestamp
# last_alert = self.redis_conn.get(cache_key)
last_alert = self.redis_conn_decoded.get(cache_key)
except Exception as e:
logger.error('error :: mirage_labelled_metrics :: could not query Redis for cache_key: %s' % str(e))
# @added 20170308 - Feature #1960: ionosphere_layers
# Allow Ionosphere to send Panorama checks, it is an ionosphere_metric
if not ionosphere_unique_metrics:
try:
# @modified 20191022 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# ionosphere_unique_metrics = list(self.redis_conn.smembers('ionosphere.unique_metrics'))
ionosphere_unique_metrics = list(self.redis_conn_decoded.smembers('ionosphere.unique_metrics'))
except:
ionosphere_unique_metrics = []
added_at = str(int(time()))
# If Panorama is enabled - create a Panorama check
# @modified 20170308 - Feature #1960: ionosphere_layers
# Allow Ionosphere to send Panorama checks for ionosphere_metrics
# if settings.PANORAMA_ENABLED:
send_to_panorama = False
redis_metric_name = '%s%s' % (str(settings.FULL_NAMESPACE), str(base_name))
# TODO - testing
if is_labelled_metric:
redis_metric_name = 'labelled_metrics.%s' % str(metric_id)
if settings.PANORAMA_ENABLED:
send_to_panorama = True
if redis_metric_name in ionosphere_unique_metrics:
send_to_panorama = False
# @added 20220315 - Feature #4482: Test alerts
# Allow for full testing with the injection of an anomaly on a
# metric
if test_alert or test_alert_and_trigger:
logger.info('mirage_labelled_metrics :: test_alert sending triggered anomaly on %s to Panorama' % (
metric))
send_to_panorama = True
# Panorama must have at least one triggered algorithm
original_triggered_algorithms = list(triggered_algorithms)
if len(triggered_algorithms) == 0:
triggered_algorithms = [algorithms_run[0]]
if send_to_panorama:
if not os.path.exists(settings.PANORAMA_CHECK_PATH):
mkdir_p(settings.PANORAMA_CHECK_PATH)
# Note:
# The values are enclosed is single quoted intentionally
# as the imp.load_source used results in a shift in the
# decimal position when double quoted, e.g.
# value = "5622.0" gets imported as
# 2016-03-02 12:53:26 :: 28569 :: metric variable - value - 562.2
# single quoting results in the desired,
# 2016-03-02 13:16:17 :: 1515 :: metric variable - value - 5622.0
source = 'graphite'
if base_name.startswith('labelled_metrics.'):
source = 'victoriametrics'
panaroma_anomaly_data = 'metric = \'%s\'\n' \
'value = \'%s\'\n' \
'from_timestamp = \'%s\'\n' \
'metric_timestamp = \'%s\'\n' \
'algorithms = %s\n' \
'triggered_algorithms = %s\n' \
'app = \'%s\'\n' \
'source = \'%s\'\n' \
'added_by = \'%s\'\n' \
'added_at = \'%s\'\n' \
% (base_name, str(datapoint), from_timestamp,
# @modified 20200607 - Feature #3566: custom_algorithms
# str(int_metric_timestamp), str(settings.MIRAGE_ALGORITHMS),
str(int_metric_timestamp), str(algorithms_run),
triggered_algorithms, skyline_app, source,
this_host, added_at)
# Create an anomaly file with details about the anomaly
panaroma_anomaly_file = '%s/%s.%s.txt' % (
settings.PANORAMA_CHECK_PATH, added_at, sane_metricname)
try:
write_data_to_file(
skyline_app, panaroma_anomaly_file, 'w',
panaroma_anomaly_data)
logger.info('mirage_labelled_metrics :: added panorama anomaly file :: %s' % (panaroma_anomaly_file))
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# Move to Redis set block below
# self.sent_to_panorama.append(base_name)
except:
logger.error('error :: mirage_labelled_metrics :: failed to add panorama anomaly file :: %s' % (panaroma_anomaly_file))
logger.error(traceback.format_exc())
# @added 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# Moved from the above self.sent_to_panorama
redis_set = 'mirage_labelled_metrics.sent_to_panorama'
data = str(base_name)
try:
self.redis_conn.sadd(redis_set, data)
except:
logger.error(traceback.format_exc())
logger.error('error :: mirage_labelled_metrics :: failed to add %s to Redis set %s' % (
str(data), str(redis_set)))
# @added 20210323 - Feature #3642: Anomaly type classification
if LUMINOSITY_CLASSIFY_ANOMALIES:
redis_set = 'luminosity.classify_anomalies'
data_dict = {
'metric': metric,
'timestamp': int_metric_timestamp,
'value': datapoint,
'algorithms': algorithms_run,
'triggered_algorithms': triggered_algorithms,
'app': skyline_app,
'added_at': int(added_at),
}
data = [metric, int_metric_timestamp, int(added_at), data_dict]
try:
self.redis_conn.sadd(redis_set, str(data))
except:
logger.error(traceback.format_exc())
logger.error('error :: mirage_labelled_metrics :: failed to add %s to Redis set %s' % (
str(data), str(redis_set)))
# @added 20220315 - Feature #4482: Test alerts
# Allow for full testing with the injection of an anomaly on a
# metric
if test_alert or test_alert_and_trigger:
triggered_algorithms = list(original_triggered_algorithms)
# @added 20200904 - Feature #3734: waterfall alerts
# Remove the metric from the waterfall_alerts Redis set
# [metric, timestamp, value, added_to_waterfall_timestamp]
# waterfall_data = [metric[1], metric[2], metric[0], added_to_waterfall_timestamp, waterfall_panorama_data]
redis_set = 'analyzer.waterfall_alerts.sent_to_mirage'
for waterfall_alert in analyzer_waterfall_alerts:
if waterfall_alert[0] == base_name:
if int(waterfall_alert[1]) == metric_timestamp:
try:
self.redis_conn.srem(redis_set, str(waterfall_alert))
logger.info('mirage_labelled_metrics :: removed waterfall alert item from Redis set %s - %s' % (
redis_set, str(waterfall_alert)))
except:
logger.error(traceback.format_exc())
logger.error('error :: mirage_labelled_metrics :: failed to remove waterfall alert item for %s at %s from Redis set %s' % (
base_name, str(metric_timestamp), redis_set))
# If crucible is enabled - save timeseries and create a
# crucible check
if settings.ENABLE_CRUCIBLE and settings.MIRAGE_CRUCIBLE_ENABLED:
from_timestamp = str(int(timeseries[1][0]))
timeseries_dir = base_name.replace('.', '/')
crucible_anomaly_dir = str(settings.CRUCIBLE_DATA_FOLDER) + '/' + timeseries_dir + '/' + metric_timestamp
if not os.path.exists(crucible_anomaly_dir):
mkdir_p(crucible_anomaly_dir)
# Note:
# The value is enclosed is single quoted intentionally
# as the imp.load_source used in crucible results in a
# shift in the decimal position when double quoted, e.g.
# value = "5622.0" gets imported as
# 2016-03-02 12:53:26 :: 28569 :: metric variable - value - 562.2
# single quoting results in the desired,
# 2016-03-02 13:16:17 :: 1515 :: metric variable - value - 5622.0
crucible_anomaly_data = 'metric = \'%s\'\n' \
'value = \'%s\'\n' \
'from_timestamp = \'%s\'\n' \
'metric_timestamp = \'%s\'\n' \
'algorithms = %s\n' \
'triggered_algorithms = %s\n' \
'anomaly_dir = \'%s\'\n' \
'graphite_metric = True\n' \
'run_crucible_tests = False\n' \
'added_by = \'%s\'\n' \
'added_at = \'%s\'\n' \
% (base_name, str(datapoint), from_timestamp,
# @modified 20200607 - Feature #3566: custom_algorithms
# str(int_metric_timestamp), str(settings.MIRAGE_ALGORITHMS),
str(int_metric_timestamp), str(algorithms_run),
triggered_algorithms, crucible_anomaly_dir,
skyline_app, added_at)
# Create an anomaly file with details about the anomaly
crucible_anomaly_file = '%s/%s.txt' % (crucible_anomaly_dir, sane_metricname)
try:
write_data_to_file(
skyline_app, crucible_anomaly_file, 'w',
crucible_anomaly_data)
logger.info('mirage_labelled_metrics :: added crucible anomaly file :: %s' % (crucible_anomaly_file))
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# self.sent_to_crucible.append(base_name)
except:
logger.error('error :: mirage_labelled_metrics :: failed to add crucible anomaly file :: %s' % (crucible_anomaly_file))
logger.error(traceback.format_exc())
# @added 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# Moved from the above self.sent_to_crucible
redis_set = 'mirage.sent_to_crucible'
data = str(base_name)
try:
self.redis_conn.sadd(redis_set, data)
except:
logger.error(traceback.format_exc())
logger.error('error :: mirage_labelled_metrics :: failed to add %s to Redis set %s' % (
str(data), str(redis_set)))
# Create timeseries json file with the timeseries
json_file = '%s/%s.json' % (crucible_anomaly_dir, base_name)
timeseries_json = str(timeseries).replace('[', '(').replace(']', ')')
try:
write_data_to_file(skyline_app, json_file, 'w', timeseries_json)
logger.info('mirage_labelled_metrics :: added crucible timeseries file :: %s' % (json_file))
except:
logger.error('error :: mirage_labelled_metrics :: failed to add crucible timeseries file :: %s' % (json_file))
logger.error(traceback.format_exc())
# Create a crucible check file
crucible_check_file = '%s/%s.%s.txt' % (settings.CRUCIBLE_CHECK_PATH, metric_timestamp, sane_metricname)
try:
write_data_to_file(
skyline_app, crucible_check_file, 'w',
crucible_anomaly_data)
logger.info('mirage_labelled_metrics :: added crucible check :: %s,%s' % (base_name, metric_timestamp))
except:
logger.error('error :: mirage_labelled_metrics :: failed to add crucible check file :: %s' % (crucible_check_file))
logger.error(traceback.format_exc())
# @added 20230510 - Feature #4902: Prevent training on metrics newer than 7 days
new_metric_added_at = False
if ionosphere_enabled and not last_alert:
try:
new_metric_added_at = self.redis_conn_decoded.hget('metrics_manager.untrainable_new_metrics', labelled_metric_base_name)
except Exception as err:
logger.error('error :: failed to hget from metrics_manager.untrainable_new_metrics - %s' % (
err))
if new_metric_added_at:
try:
new_until = int(float(new_metric_added_at)) + (86400 * 7)
new_until_date = datetime.datetime.fromtimestamp(new_until).strftime('%Y-%m-%d %H:%M:%S')
logger.info('not sending %s to Ionosphere as still a new metric until %s' % (
labelled_metric_base_name, new_until_date))
except Exception as err:
logger.error('error :: failed to determine when %s matures - %s' % (
labelled_metric_base_name, err))
ionosphere_enabled = False
# @added 20160922 - Branch #922: Ionosphere
# Also added the send_anomalous_metric_to skyline_functions.py
# function
if ionosphere_enabled:
if not last_alert:
# @modified 20161228 Feature #1830: Ionosphere alerts
# Added full_duration which needs to be recorded to allow Mirage metrics
# to be profiled on Redis timeseries data at FULL_DURATION
# e.g. mirage.redis.24h.json
full_duration = str(second_order_resolution_seconds)
# @modified 20170127 - Feature #1886: Ionosphere learn - child like parent with evolutionary maturity
# Added ionosphere_parent_id, always zero from Analyzer and Mirage
ionosphere_parent_id = 0
send_anomalous_metric_to(
skyline_app, 'ionosphere', timeseries_dir,
str(int_metric_timestamp), base_name, str(datapoint),
from_timestamp, triggered_algorithms, timeseries,
full_duration, str(ionosphere_parent_id),
# @added 20201001 - Task #3748: POC SNAB
# Added algorithms_run required to determine the anomalyScore
# so this needs to be sent to Ionosphere so Ionosphere
# can send it back on an alert
algorithms_run)
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# Moved to the mirage.sent_to_ionosphere Redis set Redis set
# block below
# self.sent_to_ionosphere.append(base_name)
# @added 20200804 - Feature #3462: Add IONOSPHERE_MANAGE_PURGE
# Feature #3472: ionosphere.training_data Redis set
# Feature #3474: webapp api - training_data
# Add training data to the ionosphere.training_data so that
# the ionosphere purge_old_data_dirs can happen less
# frequently for reduced I/O
redis_set = 'ionosphere.training_data'
data = [base_name, int(int_metric_timestamp), second_order_resolution_seconds]
try:
logger.info('mirage_labelled_metrics :: adding to Redis set %s - %s' % (
redis_set, str(data)))
self.redis_conn.sadd(redis_set, str(data))
except:
logger.error(traceback.format_exc())
logger.error('error :: mirage_labelled_metrics :: failed to add %s to %s Redis set' % (str(data), redis_set))
else:
logger.info('mirage_labelled_metrics :: alert expiry key exists not sending to Ionosphere :: %s' % base_name)
# @added 20200904 - Feature #3734: waterfall alerts
# Remove the metric from the waterfall_alerts Redis set
# [metric, timestamp, value, added_to_waterfall_timestamp]
# waterfall_data = [metric[1], metric[2], metric[0], added_to_waterfall_timestamp, waterfall_panorama_data]
# Do not remove if this is only for training_data creation
if redis_metric_name in ionosphere_unique_metrics:
redis_set = 'analyzer.waterfall_alerts.sent_to_mirage'
mirage_waterfall_data = []
for waterfall_alert in analyzer_waterfall_alerts:
if waterfall_alert[0] == base_name:
if int(waterfall_alert[1]) == metric_timestamp:
mirage_waterfall_data = waterfall_alert
try:
self.redis_conn.srem(redis_set, str(waterfall_alert))
logger.info('mirage_labelled_metrics :: removed waterfall alert item from Redis set %s - %s' % (
redis_set, str(waterfall_alert)))
except:
logger.error(traceback.format_exc())
logger.error('error :: mirage_labelled_metrics :: failed to remove waterfall alert item for %s at %s from Redis set %s' % (
base_name, str(metric_timestamp), redis_set))
# @added 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# Moved from the above self.sent_to_ionosphere
if not last_alert:
redis_set = 'mirage_labelled_metrics.sent_to_ionosphere'
data = str(base_name)
try:
self.redis_conn.sadd(redis_set, data)
except:
logger.error(traceback.format_exc())
logger.error('error :: mirage_labelled_metrics :: failed to add %s to Redis set %s' % (
str(data), str(redis_set)))
# @added 20220315 - Feature #4482: Test alerts
# Allow for full testing with the injection of an anomaly on a
# metric
if test_alert or test_alert_and_trigger:
logger.info('mirage_labelled_metrics :: test_alert not sending anomaly on %s to ionosphere' % (
metric))
ionosphere_unique_metrics = []
# @added 20200904 - Feature #3734: waterfall alerts
# Add mirage waterfall alert
# Only add if this is an ionosphere_enabled metric_check_file
if redis_metric_name in ionosphere_unique_metrics:
if mirage_waterfall_data:
# waterfall_data = [metric[1], metric[2], metric[0], added_to_waterfall_timestamp, waterfall_panorama_data]
waterfall_data = mirage_waterfall_data
redis_set = 'mirage.waterfall_alerts.sent_to_ionosphere'
try:
self.redis_conn.sadd(redis_set, str(waterfall_data))
logger.info('mirage_labelled_metrics :: added to Redis set %s - %s' % (redis_set, str(waterfall_data)))
except:
logger.error(traceback.format_exc())
logger.error('error :: mirage_labelled_metrics :: failed to add %s to Redis set %s' % (
str(waterfall_data), str(redis_set)))
metric_var_files = []
timeseries = []
if os.path.isfile(metric_check_file):
# Remove metric check file
try:
os.remove(metric_check_file)
logger.info('mirage_labelled_metrics :: removed check file - %s' % metric_check_file)
except OSError:
logger.error('error :: mirage_labelled_metrics :: failed to remove check file - %s' % metric_check_file)
# Remove the metric directory
if os.path.exists(metric_data_dir):
try:
rmtree(metric_data_dir)
logger.info('mirage_labelled_metrics :: removed data dir - %s' % metric_data_dir)
except:
logger.error('error :: mirage_labelled_metrics :: failed to rmtree %s' % metric_data_dir)
# Add values to the queue so the parent process can collate
try:
for key, ab_value in anomaly_breakdown.items():
self.mirage_labelled_metrics_anomaly_breakdown_q.put((key, ab_value))
except Exception as err:
logger.error('error :: mirage_labelled_metrics :: failed iterate to self.mirage_labelled_metrics_anomaly_breakdown_q - %s' % err)
try:
for key, e_value in exceptions.items():
self.mirage_labelled_metrics_exceptions_q.put((key, e_value))
except Exception as err:
logger.error('error :: mirage_labelled_metrics :: failed iterate to self.mirage_labelled_metrics_exceptions_q - %s' % err)
def run(self):
"""
Called when the process intializes.
"""
# Log management to prevent overwriting
# Allow the bin/<skyline_app>.d to manage the log
if os.path.isfile(skyline_app_logwait):
try:
os.remove(skyline_app_logwait)
except OSError:
logger.error('error - failed to remove %s, continuing' % skyline_app_logwait)
now = time()
log_wait_for = now + 5
while now < log_wait_for:
if os.path.isfile(skyline_app_loglock):
sleep(.1)
now = time()
else:
now = log_wait_for + 1
logger.info('mirage_labelled_metrics :: starting %s run' % skyline_app)
if os.path.isfile(skyline_app_loglock):
logger.error('error - bin/%s.d log management seems to have failed, continuing' % skyline_app)
try:
os.remove(skyline_app_loglock)
logger.info('mirage_labelled_metrics :: log lock file removed')
except OSError:
logger.error('error - failed to remove %s, continuing' % skyline_app_loglock)
else:
logger.info('mirage_labelled_metrics :: bin/%s.d log management done' % skyline_app)
if not os.path.exists(MIRAGE_LABELLED_CHECK_PATH):
mkdir_p(MIRAGE_LABELLED_CHECK_PATH)
# @added 20200903 - Task #3730: Validate Mirage running multiple processes
last_sent_to_graphite = int(time())
# @added 20220421 - Task #3800: Handle feedback metrics in Mirage and waterfall alerts
filesafe_names_dict = {}
last_redis_self_key_update = 0
while 1:
now = time()
# Make sure Redis is up
try:
self.redis_conn.ping()
except:
logger.error('error :: mirage_labelled_metrics :: skyline can not connect to redis at socket path %s' % settings.REDIS_SOCKET_PATH)
sleep(10)
logger.info('mirage_labelled_metrics :: attempting to connect to redis at socket path %s' % settings.REDIS_SOCKET_PATH)
try:
self.redis_conn = get_redis_conn(skyline_app)
self.redis_conn_decoded = get_redis_conn_decoded(skyline_app)
except Exception as err:
logger.error('error :: mirage_labelled_metrics :: failed to connect to Redis - %s' % err)
try:
self.redis_conn.ping()
logger.info('mirage_labelled_metrics :: connected to redis')
except Exception as err:
logger.error('error :: mirage_labelled_metrics :: failed to ping Redis - %s' % err)
# Determine if any metric to analyze or Ionosphere alerts to be sent
while True:
now = time()
# Report app up
update_redis_self_key = False
if not last_redis_self_key_update:
update_redis_self_key = True
if last_redis_self_key_update and now >= (last_redis_self_key_update + 20):
update_redis_self_key = True
if update_redis_self_key:
try:
# redis_is_up = self.redis_conn.setex(skyline_app, 120, now)
redis_is_up = self.redis_conn.setex('mirage_labelled_metrics', 120, now)
last_redis_self_key_update = int(now)
if redis_is_up:
try:
self.redis_conn.setex('redis', 120, now)
except Exception as err:
logger.error(traceback.format_exc())
logger.error('error :: mirage_labelled_metrics :: could not update the Redis redis key - %s' % (
err))
except Exception as err:
logger.error('error :: mirage_labelled_metrics :: failed to update Redis key for mirage_labelled_metrics up - %s' % err)
# @added 20161228 - Feature #1828: ionosphere - mirage Redis data features
# If Ionosphere is going to pass alerts back to the app
# here we are going to have break out and force a alerting
# only run.
ionosphere_alerts_returned = False
# @added 20220315 - Feature #4482: Test alerts
# Allow for full testing with the injection of an anomaly on a
# metric
test_alerts = {}
test_alert_metrics = []
metric_var_files = []
ionosphere_alerts_returned = False
# @modified 20190408 - Bug #2904: Initial Ionosphere echo load and Ionosphere feedback
# Feature #2484: FULL_DURATION feature profiles
# Move this len(metric_var_files) from above and apply the
# appropriatte sleep
if len(metric_var_files) == 0:
sleep_for = 10
next_send_to_graphite = last_sent_to_graphite + 60
seconds_to_next_send_to_graphite = next_send_to_graphite - int(time())
if seconds_to_next_send_to_graphite < 10:
if seconds_to_next_send_to_graphite > 1:
sleep_for = seconds_to_next_send_to_graphite
else:
break
logger.info('mirage_labelled_metrics :: sleeping no metrics...')
# @modified 20200903 - Task #3730: Validate Mirage running multiple processes
# sleep(10)
sleep(sleep_for)
batch_processing_metrics = []
# Clean up old files
now_timestamp = time()
stale_age = now_timestamp - settings.MIRAGE_STALE_SECONDS
for current_file in os.listdir(MIRAGE_LABELLED_CHECK_PATH):
if os.path.isfile(MIRAGE_LABELLED_CHECK_PATH + "/" + current_file):
t = os.stat(MIRAGE_LABELLED_CHECK_PATH + "/" + current_file)
c = t.st_ctime
# @added 20220113 - Feature #3486: analyzer_batch
# Feature #3480: batch_processing
# Do not remove batch_processing checks
for b_metric in batch_processing_metrics:
if b_metric in current_file:
continue
# delete file if older than a week
if c < stale_age:
os.remove(MIRAGE_LABELLED_CHECK_PATH + "/" + current_file)
logger.info('mirage_labelled_metrics :: removed stale check - %s' % (current_file))
# @added 20200903 - Task #3730: Validate Mirage running multiple processes
redis_set = 'mirage_labelled_metrics.stale_check_discarded'
try:
self.redis_conn.sadd(redis_set, str(current_file))
except:
logger.error(traceback.format_exc())
logger.error('error :: mirage_labelled_metrics :: failed to add %s to Redis set %s' % (
str(current_file), str(redis_set)))
# @added 20220421 - Task #3800: Handle feedback metrics in Mirage and waterfall alerts
# Handle filesafe names
filesafe_names_list = []
if filesafe_names_dict:
filesafe_names_list = list(filesafe_names_dict.keys())
# Discover metric to analyze
metric_var_files = []
labelled_metrics_to_check_dict = {}
try:
labelled_metrics_to_check_dict = self.redis_conn_decoded.hgetall('analyzer_labelled_metrics.mirage_check')
except Exception as err:
logger.error('failed to cleanup mirage_algorithm.error files - %s' % (traceback.format_exc()))
if labelled_metrics_to_check_dict:
for key in list(labelled_metrics_to_check_dict.keys()):
check_key = 'analyzer_labelled_metrics.mirage_check.%s' % key
metric_var_files.append(check_key)
if len(metric_var_files) > 0:
break
process_metric_check_files = False
metric_var_files_sorted = sorted(metric_var_files)
# metric_check_file = settings.MIRAGE_CHECK_PATH + "/" + metric_var_files_sorted[0]
if metric_var_files_sorted:
process_metric_check_files = True
# @added 20221014 - Bug #4696: analyzer - anomalous metrics sets not flushing
# Task #4614: Support labelled metrics
# Set the default dicts before the if as it was not being set inside
# at times causing the log entries to hang.
exceptions = {}
anomaly_breakdown = {}
if process_metric_check_files:
# @added 20200903 - Task #3730: Validate Mirage running multiple processes
check_files_to_process = len(metric_var_files_sorted)
logger.info('mirage_labelled_metrics :: %s checks to process' % str(check_files_to_process))
# Remove any existing algorithm.error files from any previous runs
# that did not cleanup for any reason
pattern = '%s.*.algorithm.error' % skyline_app
try:
for f in os.listdir(settings.SKYLINE_TMP_DIR):
if re.search(pattern, f):
try:
os.remove(os.path.join(settings.SKYLINE_TMP_DIR, f))
logger.info('mirage_labelled_metrics :: cleaning up old error file - %s' % (str(f)))
except OSError:
pass
except:
logger.error('failed to cleanup mirage_algorithm.error files - %s' % (traceback.format_exc()))
# Spawn processes
pids = []
spawned_pids = []
pid_count = 0
# @modified 20200903 - Task #3730: Validate Mirage running multiple processes
# MIRAGE_PROCESSES = 1
if len(metric_var_files) > 1:
try:
MIRAGE_PROCESSES = int(settings.MIRAGE_PROCESSES)
if len(metric_var_files) < MIRAGE_PROCESSES:
MIRAGE_PROCESSES = len(metric_var_files)
except:
MIRAGE_PROCESSES = 1
else:
MIRAGE_PROCESSES = 1
# Was testing just 1 process
# MIRAGE_PROCESSES = 1
# @modified 20230120 - Task #4786: Switch from matrixprofile to stumpy
# Task #4778: v4.0.0 - update dependencies
# Removed this limitation because mirage_labelled_metrics can be
# assigned 100s of checks and with the change to stumpy analysis
# with skyline_matrixprofile has increased the run time.
# With matrixprofile being run with skyline_matrixprofile via
# run_custom_algorithm_on_timeseries was achieving around 91
# checks in 41 seconds, now running skyline_matrixproile direct
# with "stumpy-mp.stump" is taking around 65 seconds to do 88
# checks. Do not limit to 1 process unless there are under 10
# checks. The reason being that initialisation of stumpy.stump
# even with jit caching takes between 1 and 3 seconds,
# thereafter any further metrics analysed with stump in the run
# take between 0.02 and 0.6 seconds (no init required), this is
# more on the 0.6 second side when busy. Therefore if there are
# less than 6 checks (ballpark figure, depends on load) it is
# more efficient and quicker to just use 1 process rather than
# more.
# MIRAGE_PROCESSES = 1
if len(metric_var_files) <= 6:
MIRAGE_PROCESSES = 1
run_timestamp = int(time())
for i in range(1, MIRAGE_PROCESSES + 1):
checks_per_processor = int(ceil(float(len(metric_var_files_sorted)) / float(MIRAGE_PROCESSES)))
if i == MIRAGE_PROCESSES:
assigned_max = len(metric_var_files_sorted)
else:
assigned_max = min(len(metric_var_files_sorted), i * checks_per_processor)
assigned_min = (i - 1) * checks_per_processor
assigned_keys = range(assigned_min, assigned_max)
# Compile assigned metrics
assigned_checks = [metric_var_files_sorted[index] for index in assigned_keys]
logger.info('mirage_labelled_metrics :: processing %s checks' % str(len(assigned_checks)))
p = Process(target=self.spin_process, args=(i, run_timestamp, assigned_checks))
pids.append(p)
pid_count += 1
logger.info('mirage_labelled_metrics :: starting %s of %s spin_process/es' % (str(pid_count), str(MIRAGE_PROCESSES)))
p.start()
# @modified 20200903 - Task #3730: Validate Mirage running multiple processes
# spawned_pids.append(p.pid)
spawned_pids.append([p.pid, i])
logger.info('mirage_labelled_metrics :: started spin_process %s with pid %s' % (str(pid_count), str(p.pid)))
# Self monitor processes and terminate if any spin_process has run
# for longer than 180 seconds - 20160512 @earthgecko
p_starts = time()
while time() - p_starts <= settings.MAX_ANALYZER_PROCESS_RUNTIME:
if any(p.is_alive() for p in pids):
# Just to avoid hogging the CPU
sleep(.1)
else:
# All the processes are done, break now.
time_to_run = time() - p_starts
logger.info('mirage_labelled_metrics :: %s :: %s spin_process/es completed in %.2f seconds' % (
skyline_app, str(MIRAGE_PROCESSES), time_to_run))
break
else:
# We only enter this if we didn't 'break' above.
logger.info('mirage_labelled_metrics :: %s :: timed out, killing all spin_process processes' % (skyline_app))
for p in pids:
try:
# @modified 20230410 - moved p.join before p.terminate
p.join()
p.terminate()
# @modified 20221125 - added join back
# p.join()
except Exception as err:
logger.error('error :: mirage_labelled_metrics :: %s :: error terminating pid - %s' % (
skyline_app, err))
for p in pids:
try:
if p.is_alive():
logger.info('mirage_labelled_metrics :: %s :: stopping spin_process - %s' % (skyline_app, str(p.is_alive())))
p.join()
except Exception as err:
logger.error('error :: mirage_labelled_metrics :: %s :: error joining pid - %s' % (
skyline_app, err))
# @added 20200607 - Feature #3508: ionosphere.untrainable_metrics
# Check to non 3sigma algorithm errors too
check_algorithm_errors = ['negatives_present']
for algorithm in list(settings.MIRAGE_ALGORITHMS):
check_algorithm_errors.append(algorithm)
# @added 20200607 - Feature #3566: custom_algorithms
if CUSTOM_ALGORITHMS:
for custom_algorithm in settings.CUSTOM_ALGORITHMS:
check_algorithm_errors.append(custom_algorithm)
# Grab data from the queue and populate dictionaries
exceptions = {}
anomaly_breakdown = {}
while 1:
try:
key, value = self.mirage_labelled_metrics_anomaly_breakdown_q.get_nowait()
if key not in list(anomaly_breakdown.keys()):
anomaly_breakdown[key] = value
else:
anomaly_breakdown[key] += value
except Empty:
# @added 20191113 - Branch #3262: py3
# Log
logger.info('mirage_labelled_metrics :: anomaly_breakdown.keys are empty')
break
except Exception as err:
logger.error('error :: mirage_labelled_metrics :: grabbing data from the queue and populating anomaly_breakdown - %s' % err)
break
while 1:
try:
key, value = self.mirage_labelled_metrics_exceptions_q.get_nowait()
if key not in list(exceptions.keys()):
exceptions[key] = value
else:
exceptions[key] += value
except Empty:
# @added 20191113 - Branch #3262: py3
# Log
logger.info('mirage_labelled_metrics :: exceptions.keys are empty')
break
except Exception as err:
logger.error('error :: mirage_labelled_metrics :: grabbing data from the queue and populating exceptions - %s' % err)
break
# @added 20191021 - Bug #3288: Always send anomaly_breakdown and exception metrics
# Branch #3262: py3
exceptions_metrics = ['Boring', 'Stale', 'TooShort', 'Other']
for i_exception in exceptions_metrics:
if i_exception not in list(exceptions.keys()):
exceptions[i_exception] = 0
# @modified 20200607 - Feature #3566: custom_algorithms
# for i_anomaly_breakdown in settings.MIRAGE_ALGORITHMS:
for i_anomaly_breakdown in check_algorithm_errors:
if i_anomaly_breakdown not in list(anomaly_breakdown.keys()):
anomaly_breakdown[i_anomaly_breakdown] = 0
# @modified 20200903 - Task #3730: Validate Mirage running multiple processes
# for completed_pid in spawned_pids:
for completed_pid, mirage_process in spawned_pids:
logger.info('mirage_labelled_metrics :: spin_process with pid %s completed' % (str(completed_pid)))
# @modified 20200607 - Feature #3566: custom_algorithms
# Feature #3508: ionosphere.untrainable_metrics
# Check to non 3sigma algorithm errors too and wrapped in try
try:
# for algorithm in settings.MIRAGE_ALGORITHMS:
for algorithm in check_algorithm_errors:
algorithm_error_file = '%s/%s.%s.%s.algorithm.error' % (
settings.SKYLINE_TMP_DIR, skyline_app,
str(completed_pid), algorithm)
if os.path.isfile(algorithm_error_file):
logger.info(
'error :: spin_process with pid %s has reported an error with the %s algorithm' % (
str(completed_pid), algorithm))
try:
with open(algorithm_error_file, 'r') as f:
error_string = f.read()
logger.error('%s' % str(error_string))
except:
logger.error('error :: mirage_labelled_metrics :: failed to read %s error file' % algorithm)
try:
os.remove(algorithm_error_file)
except OSError:
pass
except:
logger.error(traceback.format_exc())
logger.error('error :: mirage_labelled_metrics :: failed to check algorithm errors')
redis_metrics_processed_key = 'mirage_labelled_metrics.%s.metrics_processed' % str(mirage_process)
redis_metrics_processed = {}
try:
redis_metrics_processed = self.redis_conn_decoded.hgetall(redis_metrics_processed_key)
# if redis_metrics_processed:
# self.redis_conn_decoded.delete(redis_metrics_processed_key)
except Exception as err:
logger.error(traceback.format_exc())
logger.error('error :: mirage_labelled_metrics :: %s Redis hash operation failed - %s' % (redis_metrics_processed_key, err))
# @added 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# Use Redis set and not self.metric_variables
metric_variables = []
# @modified 20191022 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# literal_metric_variables = list(self.redis_conn.smembers('mirage.metric_variables'))
# @modified 20200903 - Task #3730: Validate Mirage running multiple processes
# Handle per process
# literal_metric_variables = list(self.redis_conn_decoded.smembers('mirage.metric_variables'))
metric_variable_redis_set = 'mirage_labelled_metrics.%s.metric_variables' % str(mirage_process)
try:
literal_metric_variables = list(self.redis_conn_decoded.smembers(metric_variable_redis_set))
except Exception as err:
logger.error(traceback.format_exc())
logger.error('error :: mirage_labelled_metrics :: smembers failed on Redis %s - %s' % (
metric_variable_redis_set, err))
literal_metric_variables = []
for item_list_string in literal_metric_variables:
list_item = literal_eval(item_list_string)
metric_variables.append(list_item)
# @added 20200903 - Task #3730: Validate Mirage running multiple processes
# Handle per process
try:
self.redis_conn.delete(metric_variable_redis_set)
# logger.info('mirage_labelled_metrics :: deleted Redis set - %s' % metric_variable_redis_set)
except:
logger.error(traceback.format_exc())
logger.error('error :: mirage_labelled_metrics :: failed to delete Redis set - %s' % metric_variable_redis_set)
# @added 20191113 - Branch #3262: py3
# Set default values
metric_name = None
metric_value = None
hours_to_resolve = 0
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# for metric_variable in self.metric_variables:
for metric_variable in metric_variables:
if metric_variable[0] == 'metric_name':
metric_name = metric_variable[1]
if metric_variable[0] == 'metric_value':
metric_value = metric_variable[1]
if metric_variable[0] == 'hours_to_resolve':
hours_to_resolve = metric_variable[1]
# if metric_variable[0] == 'metric_timestamp':
# metric_timestamp = metric_variable[1]
# logger.info('mirage_labelled_metrics :: analysis done - %s' % str(metric_name))
logger.info('mirage_labelled_metrics :: process %s checked %s metrics' % (
str(mirage_process), str(len(redis_metrics_processed))))
# Send alerts
# Calculate hours second order resolution to seconds
# @modified 20191113 - Branch #3262: py3
# Only if set
if hours_to_resolve:
logger.info('mirage_labelled_metrics :: analyzed at %s hours resolution' % hours_to_resolve)
second_order_resolution_seconds = int(hours_to_resolve) * 3600
logger.info('mirage_labelled_metrics :: analyzed at %s seconds resolution' % str(second_order_resolution_seconds))
# Remove metric check files
for check_item in list(redis_metrics_processed.keys()):
metric_check_file = 'None'
try:
metric_data = literal_eval(redis_metrics_processed[check_item])
metric_id = metric_data['metric_id']
metric_name = 'labelled_metrics.%s' % str(metric_id)
processing_check_file = '%s.txt' % str(metric_name)
metric_check_file = '%s/%s' % (MIRAGE_LABELLED_CHECK_PATH, processing_check_file)
except Exception as err:
logger.error('error :: mirage_labelled_metrics :: failed to interpolate metric_check_file - %s' % err)
if os.path.isfile(metric_check_file):
try:
os.remove(metric_check_file)
logger.info('mirage_labelled_metrics :: removed check file - %s' % metric_check_file)
except OSError:
pass
except:
logger.error(traceback.format_exc())
logger.error('error :: mirage_labelled_metrics :: failed to remove metric_check_file - %s' % metric_check_file)
# Remove the metric directory
# @modified 20191113 - Branch #3262: py3
# Convert None to str
# timeseries_dir = metric_name.replace('.', '/')
metric_data_dir = 'None'
try:
metric_name_str = str(metric_name)
timeseries_dir = metric_name_str.replace('.', '/')
metric_data_dir = '%s/%s' % (MIRAGE_LABELLED_CHECK_PATH, timeseries_dir)
except:
logger.error(traceback.format_exc())
logger.error('error :: mirage_labelled_metrics :: failed to interpolate metric_data_dir')
metric_data_dir = 'None'
if os.path.exists(metric_data_dir):
try:
rmtree(metric_data_dir)
logger.info('mirage_labelled_metrics :: removed - %s' % metric_data_dir)
except:
logger.error('error :: mirage_labelled_metrics :: failed to rmtree %s' % metric_data_dir)
# @added 20190522 - Task #3034: Reduce multiprocessing Manager list usage
mirage_anomalous_metrics = []
try:
# @modified 20191022 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# literal_mirage_anomalous_metrics = list(self.redis_conn.smembers('mirage.anomalous_metrics'))
literal_mirage_anomalous_metrics = list(self.redis_conn_decoded.smembers('mirage_labelled_metrics.anomalous_metrics'))
for metric_list_string in literal_mirage_anomalous_metrics:
metric = literal_eval(metric_list_string)
mirage_anomalous_metrics.append(metric)
except:
logger.error(traceback.format_exc())
logger.error('error :: mirage_labelled_metrics :: failed to determine list from mirage_labelled_metrics.anomalous_metrics Redis set')
mirage_anomalous_metrics = []
# @added 20190522 - Task #3034: Reduce multiprocessing Manager list usage
mirage_not_anomalous_metrics = []
try:
# @modified 20191022 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# literal_mirage_not_anomalous_metrics = list(self.redis_conn.smembers('mirage.not_anomalous_metrics'))
literal_mirage_not_anomalous_metrics = list(self.redis_conn_decoded.smembers('mirage_labelled_metrics.not_anomalous_metrics'))
for metric_list_string in literal_mirage_not_anomalous_metrics:
metric = literal_eval(metric_list_string)
mirage_not_anomalous_metrics.append(metric)
except:
logger.error(traceback.format_exc())
logger.error('error :: mirage_labelled_metrics :: failed to determine list from mirage_labelled_metrics.not_anomalous_metrics Redis set')
mirage_not_anomalous_metrics = []
# Log progress
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# if len(self.anomalous_metrics) > 0:
if len(mirage_anomalous_metrics) > 0:
logger.info('mirage_labelled_metrics :: seconds since last anomaly :: %.2f' % (time() - now))
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# logger.info('mirage_labelled_metrics :: total anomalies :: %d' % len(self.anomalous_metrics))
logger.info('mirage_labelled_metrics :: total anomalies :: %d' % len(mirage_anomalous_metrics))
logger.info('mirage_labelled_metrics :: exception stats :: %s' % str(exceptions))
logger.info('mirage_labelled_metrics :: anomaly breakdown :: %s' % str(anomaly_breakdown))
# Log to Graphite
if process_metric_check_files:
n_time = time()
run_time = n_time - run_timestamp
logger.info('mirage_labelled_metrics :: process took %.2f seconds to run' % run_time)
# graphite_run_time = '%.2f' % run_time
# send_metric_name = skyline_app_graphite_namespace + '.run_time'
# send_graphite_metric(self, skyline_app, send_metric_name, graphite_run_time)
try:
self.redis_conn_decoded.hset('mirage_labelled_metrics.run_times', str(n_time), str(run_time))
except Exception as err:
logger.error('error :: mirage_labelled_metrics :: failed to add run_time to mirage_labelled_metrics.run_times Redis hash - %s' % err)
# @added 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# Use Redis sets instead of Manager().list()
delete_redis_sets = [
'mirage_labelled_metrics.anomalous_metrics',
'mirage_labelled_metrics.not_anomalous_metrics',
'mirage_labelled_metrics.metric_variables',
# @modified 20200903 - Task #3730: Validate Mirage running multiple processes
# Handle once per minute
# 'mirage.sent_to_crucible',
# 'mirage.sent_to_panorama',
# 'mirage.sent_to_ionosphere',
]
for i_redis_set in delete_redis_sets:
redis_set_to_delete = i_redis_set
try:
self.redis_conn.delete(redis_set_to_delete)
logger.info('mirage_labelled_metrics :: deleted Redis set - %s' % redis_set_to_delete)
except:
logger.error(traceback.format_exc())
logger.error('error :: mirage_labelled_metrics :: failed to delete Redis set - %s' % redis_set_to_delete)
# @added 20200903 - Task #3730: Validate Mirage running multiple processes
# Send checks.stale_discarded and checks.pending metrics
if int(time()) >= (last_sent_to_graphite + 60):
stale_check_discarded = []
try:
stale_check_discarded = list(self.redis_conn_decoded.smembers('mirage_labelled_metrics.stale_check_discarded'))
except:
logger.error(traceback.format_exc())
logger.error('error :: mirage_labelled_metrics :: failed to get mirage_labelled_metrics.stale_check_discarded set from Redis')
stale_check_discarded = []
stale_check_discarded_count = len(stale_check_discarded)
logger.info('mirage_labelled_metrics :: checks.stale_discarded :: %s' % str(stale_check_discarded_count))
send_metric_name = '%s.checks.stale_discarded' % skyline_app_graphite_namespace
send_graphite_metric(self, skyline_app, send_metric_name, str(stale_check_discarded_count))
checks_pending = [f_pending for f_pending in os.listdir(MIRAGE_LABELLED_CHECK_PATH) if os.path.isfile(os.path.join(MIRAGE_LABELLED_CHECK_PATH, f_pending))]
checks_pending_count = len(checks_pending)
logger.info('mirage_labelled_metrics :: checks.pending :: %s' % str(checks_pending_count))
send_metric_name = '%s.checks.pending' % skyline_app_graphite_namespace
send_graphite_metric(self, skyline_app, send_metric_name, str(checks_pending_count))
run_times = []
try:
run_times_dict = self.redis_conn_decoded.hgetall('mirage_labelled_metrics.run_times')
self.redis_conn_decoded.delete('mirage_labelled_metrics.run_times')
if run_times_dict:
for n_time, run_time_str in run_times_dict.items():
run_times.append(float(run_time_str))
except Exception as err:
logger.error('error :: mirage_labelled_metrics :: failed to determine run_times from mirage_labelled_metrics.run_times Redis hash - %s' % err)
if run_times:
run_time = sum(run_times)
else:
run_time = 0
logger.info('mirage_labelled_metrics :: seconds to run :: %.2f' % run_time)
graphite_run_time = '%.2f' % run_time
send_metric_name = skyline_app_graphite_namespace + '.run_time'
send_graphite_metric(self, skyline_app, send_metric_name, graphite_run_time)
try:
self.redis_conn_decoded.hset('mirage_labelled_metrics.run_time', 'timestamp', str(time()))
self.redis_conn_decoded.hset('mirage_labelled_metrics.run_time', 'value', str(run_time))
except Exception as err:
logger.error('error :: mirage_labelled_metrics :: failed to add keys to mirage_labelled_metrics.run_time Redis hash - %s' % err)
# @modified 20210309 - Task #3730: Validate Mirage running multiple processes
# Reimplement mirage.checks.done count as increment key
# checks_done = []
# try:
# checks_done = list(self.redis_conn_decoded.smembers('mirage.checks.done'))
checks_done = 0
try:
# @modified 20230205 - Task #4844: Replace Redis getset with set with get
# As of Redis version 6.2.0, this command is regarded as deprecated.
# It can be replaced by SET with the GET argument when migrating or writing new code.
# checks_done_str = self.redis_conn_decoded.getset('mirage_labelled_metrics.checks.done', 0)
checks_done_str = self.redis_conn_decoded.set('mirage_labelled_metrics.checks.done', 0, get=True)
if checks_done_str:
checks_done = int(checks_done_str)
except:
logger.error(traceback.format_exc())
logger.error('error :: mirage_labelled_metrics :: failed to get mirage_labelled_metrics.checks.done key from Redis')
checks_done = 0
# checks_done_count = len(checks_done)
# logger.info('mirage_labelled_metrics :: checks.done :: %s' % str(checks_done_count))
logger.info('mirage_labelled_metrics :: checks.done :: %s' % str(checks_done))
send_metric_name = '%s.checks.done' % skyline_app_graphite_namespace
# send_graphite_metric(self, skyline_app, send_metric_name, str(checks_done_count))
send_graphite_metric(self, skyline_app, send_metric_name, str(checks_done))
# @modified 20200903 - Task #3730: Validate Mirage running multiple processes
# Only send panorama, ionosphere and crucible metrics once a minute
if settings.ENABLE_CRUCIBLE and settings.MIRAGE_CRUCIBLE_ENABLED:
try:
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# sent_to_crucible = str(len(self.sent_to_crucible))#
# @modified 20191022 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# sent_to_crucible = str(len(list(self.redis_conn.smembers('mirage.sent_to_crucible'))))
sent_to_crucible = str(len(list(self.redis_conn_decoded.smembers('mirage.sent_to_crucible'))))
except:
sent_to_crucible = '0'
logger.info('mirage_labelled_metrics :: sent_to_crucible :: %s' % sent_to_crucible)
send_metric_name = '%s.sent_to_crucible' % skyline_app_graphite_namespace
send_graphite_metric(self, skyline_app, send_metric_name, sent_to_crucible)
if settings.PANORAMA_ENABLED:
try:
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# sent_to_panorama = str(len(self.sent_to_panorama))
# @modified 20191022 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# sent_to_panorama = str(len(list(self.redis_conn.smembers('mirage.sent_to_panorama'))))
sent_to_panorama = str(len(list(self.redis_conn_decoded.smembers('mirage_labelled_metrics.sent_to_panorama'))))
except:
sent_to_panorama = '0'
logger.info('mirage_labelled_metrics :: sent_to_panorama :: %s' % sent_to_panorama)
send_metric_name = '%s.sent_to_panorama' % skyline_app_graphite_namespace
send_graphite_metric(self, skyline_app, send_metric_name, sent_to_panorama)
if settings.IONOSPHERE_ENABLED:
try:
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# sent_to_ionosphere = str(len(self.sent_to_ionosphere))
# @modified 20191022 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# sent_to_ionosphere = str(len(list(self.redis_conn.smembers('mirage.sent_to_ionosphere'))))
sent_to_ionosphere = str(len(list(self.redis_conn_decoded.smembers('mirage_labelled_metrics.sent_to_ionosphere'))))
except Exception as e:
logger.error('error :: mirage_labelled_metrics :: could not determine sent_to_ionosphere: %s' % e)
sent_to_ionosphere = '0'
logger.info('mirage_labelled_metrics :: sent_to_ionosphere :: %s' % sent_to_ionosphere)
send_metric_name = '%s.sent_to_ionosphere' % skyline_app_graphite_namespace
send_graphite_metric(self, skyline_app, send_metric_name, sent_to_ionosphere)
last_sent_to_graphite = int(time())
delete_redis_sets = [
'mirage_labelled_metrics.sent_to_crucible',
'mirage_labelled_metrics.sent_to_panorama',
'mirage_labelled_metrics.sent_to_ionosphere',
'mirage_labelled_metrics.stale_check_discarded',
# @modified 20210309 - Task #3730: Validate Mirage running multiple processes
# Reimplement mirage.checks.done count as increment key
# 'mirage.checks.done',
# @added 20200916 - Branch #3068: SNAB
# Task #3744: POC matrixprofile
# The main mirage process deletes this set not mirage_labelled_metrics
# mirage_snab_only_checks_redis_set,
]
for i_redis_set in delete_redis_sets:
redis_set_to_delete = i_redis_set
try:
self.redis_conn.delete(redis_set_to_delete)
logger.info('mirage_labelled_metrics :: deleted Redis set - %s' % redis_set_to_delete)
except:
logger.error(traceback.format_exc())
logger.error('error :: mirage_labelled_metrics :: failed to delete Redis set - %s' % redis_set_to_delete)
# @added 20220421 - Task #3800: Handle feedback metrics in Mirage and waterfall alerts
# Refresh
try:
filesafe_names_dict = self.redis_conn_decoded.hgetall('metrics_manager.filesafe_base_names')
except Exception as err:
logger.error(traceback.format_exc())
logger.error('error :: mirage_labelled_metrics :: hgetall metrics_manager.filesafe_base_names failed - %s' % err)
# Sleep if it went too fast
# if time() - now < 1:
if time() - now < 59:
logger.info('mirage_labelled_metrics :: sleeping due to low run time...')
# sleep(10)
sleep(1)
|
90da589bc817513933373384b0fd4e89fc044ef3
|
c3542b98289c1ba85f62d08b5edbe1a3c18f3c80
|
/dec_to_hex.py
|
adce0ab816ebc16874e2b176026f3540e41589c0
|
[
"LicenseRef-scancode-unknown",
"GPL-1.0-or-later",
"MIT"
] |
permissive
|
geekcomputers/Python
|
16674289843f89f6cc287097f033b928f4181d84
|
bc55e2a2c5a98f4c7597e901a04457dfb9d5df0c
|
refs/heads/master
| 2023-08-18T21:04:18.163283
| 2023-08-17T17:38:16
| 2023-08-17T17:38:16
| 2,881,789
| 32,418
| 15,024
|
MIT
| 2023-09-02T18:40:33
| 2011-11-30T09:04:08
|
Python
|
UTF-8
|
Python
| false
| false
| 71
|
py
|
dec_to_hex.py
|
dec_num = input("Enter the decimal number\n")
print(hex(int(dec_num)))
|
92d9aca807a44ee804d882504c4712dd1dc4ff14
|
927a94b9fd97a12c74302adc994d6b0bf3f4e27a
|
/codalab/apps/web/tests/test_competition_download_csv.py
|
f790cabe7f80ba48950ed35eeeabf91ff0c26743
|
[
"Apache-2.0"
] |
permissive
|
codalab/codalab-competitions
|
0a7f77c690ad6bca3769a272d423d958917b8256
|
a3e12648ea80e23f21938103d41d70eb0917d833
|
refs/heads/develop
| 2023-08-30T21:39:02.989301
| 2023-08-21T16:15:34
| 2023-08-21T16:15:34
| 10,556,194
| 425
| 116
|
NOASSERTION
| 2023-08-26T15:23:30
| 2013-06-07T18:23:37
|
Python
|
UTF-8
|
Python
| false
| false
| 4,260
|
py
|
test_competition_download_csv.py
|
import mock
import datetime
from django.conf import settings
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.core.files.uploadedfile import SimpleUploadedFile
from django.contrib.auth import get_user_model
from apps.customizer.models import Configuration
from apps.web.models import (Competition,
CompetitionParticipant,
CompetitionPhase,
CompetitionSubmission,
CompetitionSubmissionStatus,
ParticipantStatus,
PhaseLeaderBoard,
PhaseLeaderBoardEntry,
SubmissionResultGroup,
SubmissionResultGroupPhase,
SubmissionScore,
SubmissionScoreDef,
SubmissionScoreDefGroup,
SubmissionScoreSet,)
User = get_user_model()
class CompetitionDownloadCSVTests(TestCase):
def setUp(self):
super(CompetitionDownloadCSVTests, self).setUp()
self.customizer_configuration = Configuration.objects.create(disable_all_submissions=False)
self.user = User.objects.create(email='test@user.com', username='testuser\u2020', password='pass')
self.user.set_password('pass')
self.user.save()
self.other_user = User.objects.create(email='other@user.com', username='other')
self.competition = Competition.objects.create(creator=self.user, modified_by=self.user)
self.participant_1 = CompetitionParticipant.objects.create(
user=self.user,
competition=self.competition,
status=ParticipantStatus.objects.get_or_create(name='approved', codename=ParticipantStatus.APPROVED)[0]
)
self.phase_1 = CompetitionPhase.objects.create(
competition=self.competition,
phasenumber=1,
start_date=datetime.datetime.now() - datetime.timedelta(days=30),
)
submission_finished = CompetitionSubmissionStatus.objects.create(name="finished", codename="finished")
self.submission_1 = CompetitionSubmission.objects.create(
participant=self.participant_1,
phase=self.phase_1,
status=submission_finished,
submitted_at=datetime.datetime.now() - datetime.timedelta(days=29),
description="Some description with unicode \u2020"
)
self.leader_board = PhaseLeaderBoard.objects.create(phase=self.phase_1)
self.leader_board_entry_1 = PhaseLeaderBoardEntry.objects.create(
board=self.leader_board,
result=self.submission_1
)
result_group = SubmissionResultGroup.objects.create(
competition=self.competition,
key="Key",
label="Test \u2020",
ordering=1
)
submission_result_group_phase = SubmissionResultGroupPhase.objects.create(phase=self.phase_1, group=result_group)
score_def = SubmissionScoreDef.objects.create(
competition=self.competition,
key="Key",
label="Test \u2020",
)
SubmissionScoreDefGroup.objects.create(
scoredef=score_def,
group=result_group,
)
SubmissionScore.objects.create(
result=self.submission_1,
scoredef=score_def,
value=123,
)
SubmissionScoreSet.objects.create(
competition=self.competition,
key="Key",
label="Test \u2020",
scoredef=score_def,
)
self.url = reverse("competitions:competition_results_complete_download", kwargs={"id": self.competition.pk,
"phase": self.phase_1.pk})
def test_download_competition_csv_returns_200_with_unicode_labels(self):
'''Unicode set in setUp method'''
result = self.client.login(username="{}".format(self.user.username), password="pass")
assert result
resp = self.client.get(self.url)
self.assertEqual(resp.status_code, 200)
|
c0c42532253ba4257ca35c37fd508287c9fcd21c
|
ec85250addb7357dfe7bb3e0680d53fc7b0fd8fb
|
/examples/with_pyspark_emr/with_pyspark_emr/definitions.py
|
4e81f44566b80ddb243b8c9539e2a51aae25d46c
|
[
"Apache-2.0"
] |
permissive
|
dagster-io/dagster
|
6adb5deee8bcf3ea1866a6a64f2ed81e1db5e73a
|
fe21995e0402878437a828c6a4244025eac8c43b
|
refs/heads/master
| 2023-09-05T20:46:08.203794
| 2023-09-05T19:54:52
| 2023-09-05T19:54:52
| 131,619,646
| 8,565
| 1,154
|
Apache-2.0
| 2023-09-14T21:57:37
| 2018-04-30T16:30:04
|
Python
|
UTF-8
|
Python
| false
| false
| 2,115
|
py
|
definitions.py
|
from pathlib import Path
from typing import Any
from dagster import ConfigurableIOManager, Definitions, ResourceParam, asset
from dagster_aws.emr import emr_pyspark_step_launcher
from dagster_aws.s3 import S3Resource
from dagster_pyspark import PySparkResource
from pyspark.sql import DataFrame, Row
from pyspark.sql.types import IntegerType, StringType, StructField, StructType
class ParquetIOManager(ConfigurableIOManager):
pyspark: PySparkResource
path_prefix: str
def _get_path(self, context) -> str:
return "/".join([context.resource_config["path_prefix"], *context.asset_key.path])
def handle_output(self, context, obj):
obj.write.parquet(self._get_path(context))
def load_input(self, context):
spark = self.pyspark.spark_session
return spark.read.parquet(self._get_path(context.upstream_output))
@asset
def people(pyspark: PySparkResource, pyspark_step_launcher: ResourceParam[Any]) -> DataFrame:
schema = StructType([StructField("name", StringType()), StructField("age", IntegerType())])
rows = [Row(name="Thom", age=51), Row(name="Jonny", age=48), Row(name="Nigel", age=49)]
return pyspark.spark_session.createDataFrame(rows, schema)
emr_pyspark = PySparkResource(spark_config={"spark.executor.memory": "2g"})
@asset
def people_over_50(pyspark_step_launcher: ResourceParam[Any], people: DataFrame) -> DataFrame:
return people.filter(people["age"] > 50)
defs = Definitions(
assets=[people, people_over_50],
resources={
"pyspark_step_launcher": emr_pyspark_step_launcher.configured(
{
"cluster_id": {"env": "EMR_CLUSTER_ID"},
"local_pipeline_package_path": str(Path(__file__).parent),
"deploy_local_pipeline_package": True,
"region_name": "us-west-1",
"staging_bucket": "my_staging_bucket",
"wait_for_logs": True,
}
),
"pyspark": emr_pyspark,
"s3": S3Resource(),
"io_manager": ParquetIOManager(pyspark=emr_pyspark, path_prefix="s3://my-s3-bucket"),
},
)
|
5437dfc0fb1155468319907d0c365747c69cd987
|
d2a4cf4e6fff810e94c85c1c94ebef6638469ff4
|
/Sorting Algorithm/Counting Sort/Python/counting_sort.py
|
3d5b5e8e523f80492d211e179048448de1276b3a
|
[
"MIT"
] |
permissive
|
iam-abbas/cs-algorithms
|
8be8dd665cb3abd5fce2b9de09519d0597e59e45
|
d04aa8fd9a1fa290266dde96afe9b90ee23c5a92
|
refs/heads/master
| 2021-10-07T09:57:04.943048
| 2021-10-01T18:24:24
| 2021-10-01T18:24:24
| 213,309,115
| 255
| 443
|
MIT
| 2021-10-01T18:23:12
| 2019-10-07T06:18:54
|
C++
|
UTF-8
|
Python
| false
| false
| 1,673
|
py
|
counting_sort.py
|
def counting_sort(array: list) -> list:
"""
Implementation of the linear O(n) Counting Sort algorithm
Arguments:
array - array of integers to be sorted
Returns:
Contents of array argument
"""
# Number of items to be sorted
n: int = len(array)
# Get maximum value in array - represented by k in textbook
max_val: int = max(array)
# Create resultant sorting array, reserving space for n items
sorted_arr: list = [None] * n
# Count array is "temporary working storage" of max_val # of items
count: list = [0] * (max_val + 1)
# Set count[val] to contain number of elements equal to val in array
for val in array:
count[val] += 1
# Set count[i] equal to number of elements <= i
for i in range(len(count)):
# Avoid attempting to access count[-1] if on first iteration
count[i] += count[i - 1] if i != 0 else 0
# Do sorting from end of array down
for i in range(len(array) - 1, -1, -1):
sorted_arr[count[array[i]] - 1] = array[i]
count[array[i]] -= 1
return sorted_arr
if __name__ == "__main__":
import random
NUM_VALUES = 10
MAX_VALUE = 100
# Uncomment to generate consistent random values between each run
# random.seed(0xd34db33f)
# Generate list of NUM_VALUES random values in range [0, MAX_VALUE]
unsorted = [random.randint(0, NUM_VALUES) for _ in range(0, MAX_VALUE + 1)]
# Perform sort, asserting result matches Python's default sorting functionality
sorted_arr = counting_sort(unsorted)
print("Sorted successfully!" if sorted_arr == sorted(unsorted) else "Did not sort!")
|
d2a38fd2d4344cf944f06de96b7469aee779f94c
|
37a420e606a51195b9d82d4dd8682b6010523fee
|
/keras_unet_collection/_model_unet_3plus_2d.py
|
c52a5c21c1a66d32412b147d604928446d7528cd
|
[
"MIT"
] |
permissive
|
yingkaisha/keras-unet-collection
|
cee50c21dae011c5312f87b589e6058ddb4def50
|
d30f14a259656d2f26ea11ed978255d6a7d0ce37
|
refs/heads/main
| 2023-04-18T15:55:13.460006
| 2022-07-05T21:43:04
| 2022-07-05T21:43:04
| 323,426,984
| 548
| 161
|
MIT
| 2023-02-22T08:58:53
| 2020-12-21T19:16:34
|
Python
|
UTF-8
|
Python
| false
| false
| 18,136
|
py
|
_model_unet_3plus_2d.py
|
from __future__ import absolute_import
from keras_unet_collection.layer_utils import *
from keras_unet_collection.activations import GELU, Snake
from keras_unet_collection._backbone_zoo import backbone_zoo, bach_norm_checker
from keras_unet_collection._model_unet_2d import UNET_left, UNET_right
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model
def unet_3plus_2d_base(input_tensor, filter_num_down, filter_num_skip, filter_num_aggregate,
stack_num_down=2, stack_num_up=1, activation='ReLU', batch_norm=False, pool=True, unpool=True,
backbone=None, weights='imagenet', freeze_backbone=True, freeze_batch_norm=True, name='unet3plus'):
'''
The base of UNET 3+ with an optional ImagNet-trained backbone.
unet_3plus_2d_base(input_tensor, filter_num_down, filter_num_skip, filter_num_aggregate,
stack_num_down=2, stack_num_up=1, activation='ReLU', batch_norm=False, pool=True, unpool=True,
backbone=None, weights='imagenet', freeze_backbone=True, freeze_batch_norm=True, name='unet3plus')
----------
Huang, H., Lin, L., Tong, R., Hu, H., Zhang, Q., Iwamoto, Y., Han, X., Chen, Y.W. and Wu, J., 2020.
UNet 3+: A Full-Scale Connected UNet for Medical Image Segmentation.
In ICASSP 2020-2020 IEEE International Conference on Acoustics,
Speech and Signal Processing (ICASSP) (pp. 1055-1059). IEEE.
Input
----------
input_tensor: the input tensor of the base, e.g., `keras.layers.Inpyt((None, None, 3))`.
filter_num_down: a list that defines the number of filters for each
downsampling level. e.g., `[64, 128, 256, 512, 1024]`.
the network depth is expected as `len(filter_num_down)`
filter_num_skip: a list that defines the number of filters after each
full-scale skip connection. Number of elements is expected to be `depth-1`.
i.e., the bottom level is not included.
* Huang et al. (2020) applied the same numbers for all levels.
e.g., `[64, 64, 64, 64]`.
filter_num_aggregate: an int that defines the number of channels of full-scale aggregations.
stack_num_down: number of convolutional layers per downsampling level/block.
stack_num_up: number of convolutional layers (after full-scale concat) per upsampling level/block.
activation: one of the `tensorflow.keras.layers` or `keras_unet_collection.activations` interfaces, e.g., ReLU
batch_norm: True for batch normalization.
pool: True or 'max' for MaxPooling2D.
'ave' for AveragePooling2D.
False for strided conv + batch norm + activation.
unpool: True or 'bilinear' for Upsampling2D with bilinear interpolation.
'nearest' for Upsampling2D with nearest interpolation.
False for Conv2DTranspose + batch norm + activation.
name: prefix of the created keras model and its layers.
---------- (keywords of backbone options) ----------
backbone_name: the bakcbone model name. Should be one of the `tensorflow.keras.applications` class.
None (default) means no backbone.
Currently supported backbones are:
(1) VGG16, VGG19
(2) ResNet50, ResNet101, ResNet152
(3) ResNet50V2, ResNet101V2, ResNet152V2
(4) DenseNet121, DenseNet169, DenseNet201
(5) EfficientNetB[0-7]
weights: one of None (random initialization), 'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded.
freeze_backbone: True for a frozen backbone.
freeze_batch_norm: False for not freezing batch normalization layers.
* Downsampling is achieved through maxpooling and can be replaced by strided convolutional layers here.
* Upsampling is achieved through bilinear interpolation and can be replaced by transpose convolutional layers here.
Output
----------
A list of tensors with the first/second/third tensor obtained from
the deepest/second deepest/third deepest upsampling block, etc.
* The feature map sizes of these tensors are different,
with the first tensor has the smallest size.
'''
depth_ = len(filter_num_down)
X_encoder = []
X_decoder = []
# no backbone cases
if backbone is None:
X = input_tensor
# stacked conv2d before downsampling
X = CONV_stack(X, filter_num_down[0], kernel_size=3, stack_num=stack_num_down,
activation=activation, batch_norm=batch_norm, name='{}_down0'.format(name))
X_encoder.append(X)
# downsampling levels
for i, f in enumerate(filter_num_down[1:]):
# UNET-like downsampling
X = UNET_left(X, f, kernel_size=3, stack_num=stack_num_down, activation=activation,
pool=pool, batch_norm=batch_norm, name='{}_down{}'.format(name, i+1))
X_encoder.append(X)
else:
# handling VGG16 and VGG19 separately
if 'VGG' in backbone:
backbone_ = backbone_zoo(backbone, weights, input_tensor, depth_, freeze_backbone, freeze_batch_norm)
# collecting backbone feature maps
X_encoder = backbone_([input_tensor,])
depth_encode = len(X_encoder)
# for other backbones
else:
backbone_ = backbone_zoo(backbone, weights, input_tensor, depth_-1, freeze_backbone, freeze_batch_norm)
# collecting backbone feature maps
X_encoder = backbone_([input_tensor,])
depth_encode = len(X_encoder) + 1
# extra conv2d blocks are applied
# if downsampling levels of a backbone < user-specified downsampling levels
if depth_encode < depth_:
# begins at the deepest available tensor
X = X_encoder[-1]
# extra downsamplings
for i in range(depth_-depth_encode):
i_real = i + depth_encode
X = UNET_left(X, filter_num_down[i_real], stack_num=stack_num_down, activation=activation, pool=pool,
batch_norm=batch_norm, name='{}_down{}'.format(name, i_real+1))
X_encoder.append(X)
# treat the last encoded tensor as the first decoded tensor
X_decoder.append(X_encoder[-1])
# upsampling levels
X_encoder = X_encoder[::-1]
depth_decode = len(X_encoder)-1
# loop over upsampling levels
for i in range(depth_decode):
f = filter_num_skip[i]
# collecting tensors for layer fusion
X_fscale = []
# for each upsampling level, loop over all available downsampling levels (similar to the unet++)
for lev in range(depth_decode):
# counting scale difference between the current down- and upsampling levels
pool_scale = lev-i-1 # -1 for python indexing
# deeper tensors are obtained from **decoder** outputs
if pool_scale < 0:
pool_size = 2**(-1*pool_scale)
X = decode_layer(X_decoder[lev], f, pool_size, unpool,
activation=activation, batch_norm=batch_norm, name='{}_up_{}_en{}'.format(name, i, lev))
# unet skip connection (identity mapping)
elif pool_scale == 0:
X = X_encoder[lev]
# shallower tensors are obtained from **encoder** outputs
else:
pool_size = 2**(pool_scale)
X = encode_layer(X_encoder[lev], f, pool_size, pool, activation=activation,
batch_norm=batch_norm, name='{}_down_{}_en{}'.format(name, i, lev))
# a conv layer after feature map scale change
X = CONV_stack(X, f, kernel_size=3, stack_num=1,
activation=activation, batch_norm=batch_norm, name='{}_down_from{}_to{}'.format(name, i, lev))
X_fscale.append(X)
# layer fusion at the end of each level
# stacked conv layers after concat. BatchNormalization is fixed to True
X = concatenate(X_fscale, axis=-1, name='{}_concat_{}'.format(name, i))
X = CONV_stack(X, filter_num_aggregate, kernel_size=3, stack_num=stack_num_up,
activation=activation, batch_norm=True, name='{}_fusion_conv_{}'.format(name, i))
X_decoder.append(X)
# if tensors for concatenation is not enough
# then use upsampling without concatenation
if depth_decode < depth_-1:
for i in range(depth_-depth_decode-1):
i_real = i + depth_decode
X = UNET_right(X, None, filter_num_aggregate, stack_num=stack_num_up, activation=activation,
unpool=unpool, batch_norm=batch_norm, concat=False, name='{}_plain_up{}'.format(name, i_real))
X_decoder.append(X)
# return decoder outputs
return X_decoder
def unet_3plus_2d(input_size, n_labels, filter_num_down, filter_num_skip='auto', filter_num_aggregate='auto',
stack_num_down=2, stack_num_up=1, activation='ReLU', output_activation='Sigmoid',
batch_norm=False, pool=True, unpool=True, deep_supervision=False,
backbone=None, weights='imagenet', freeze_backbone=True, freeze_batch_norm=True, name='unet3plus'):
'''
UNET 3+ with an optional ImageNet-trained backbone.
unet_3plus_2d(input_size, n_labels, filter_num_down, filter_num_skip='auto', filter_num_aggregate='auto',
stack_num_down=2, stack_num_up=1, activation='ReLU', output_activation='Sigmoid',
batch_norm=False, pool=True, unpool=True, deep_supervision=False,
backbone=None, weights='imagenet', freeze_backbone=True, freeze_batch_norm=True, name='unet3plus')
----------
Huang, H., Lin, L., Tong, R., Hu, H., Zhang, Q., Iwamoto, Y., Han, X., Chen, Y.W. and Wu, J., 2020.
UNet 3+: A Full-Scale Connected UNet for Medical Image Segmentation.
In ICASSP 2020-2020 IEEE International Conference on Acoustics,
Speech and Signal Processing (ICASSP) (pp. 1055-1059). IEEE.
Input
----------
input_size: the size/shape of network input, e.g., `(128, 128, 3)`.
filter_num_down: a list that defines the number of filters for each
downsampling level. e.g., `[64, 128, 256, 512, 1024]`.
the network depth is expected as `len(filter_num_down)`
filter_num_skip: a list that defines the number of filters after each
full-scale skip connection. Number of elements is expected to be `depth-1`.
i.e., the bottom level is not included.
* Huang et al. (2020) applied the same numbers for all levels.
e.g., `[64, 64, 64, 64]`.
filter_num_aggregate: an int that defines the number of channels of full-scale aggregations.
stack_num_down: number of convolutional layers per downsampling level/block.
stack_num_up: number of convolutional layers (after full-scale concat) per upsampling level/block.
activation: one of the `tensorflow.keras.layers` or `keras_unet_collection.activations` interfaces, e.g., 'ReLU'
output_activation: one of the `tensorflow.keras.layers` or `keras_unet_collection.activations` interface or 'Sigmoid'.
Default option is 'Softmax'.
if None is received, then linear activation is applied.
batch_norm: True for batch normalization.
pool: True or 'max' for MaxPooling2D.
'ave' for AveragePooling2D.
False for strided conv + batch norm + activation.
unpool: True or 'bilinear' for Upsampling2D with bilinear interpolation.
'nearest' for Upsampling2D with nearest interpolation.
False for Conv2DTranspose + batch norm + activation.
deep_supervision: True for a model that supports deep supervision. Details see Huang et al. (2020).
name: prefix of the created keras model and its layers.
---------- (keywords of backbone options) ----------
backbone_name: the bakcbone model name. Should be one of the `tensorflow.keras.applications` class.
None (default) means no backbone.
Currently supported backbones are:
(1) VGG16, VGG19
(2) ResNet50, ResNet101, ResNet152
(3) ResNet50V2, ResNet101V2, ResNet152V2
(4) DenseNet121, DenseNet169, DenseNet201
(5) EfficientNetB[0-7]
weights: one of None (random initialization), 'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded.
freeze_backbone: True for a frozen backbone.
freeze_batch_norm: False for not freezing batch normalization layers.
* The Classification-guided Module (CGM) is not implemented.
See https://github.com/yingkaisha/keras-unet-collection/tree/main/examples for a relevant example.
* Automated mode is applied for determining `filter_num_skip`, `filter_num_aggregate`.
* The default output activation is sigmoid, consistent with Huang et al. (2020).
* Downsampling is achieved through maxpooling and can be replaced by strided convolutional layers here.
* Upsampling is achieved through bilinear interpolation and can be replaced by transpose convolutional layers here.
Output
----------
model: a keras model.
'''
depth_ = len(filter_num_down)
verbose = False
if filter_num_skip == 'auto':
verbose = True
filter_num_skip = [filter_num_down[0] for num in range(depth_-1)]
if filter_num_aggregate == 'auto':
verbose = True
filter_num_aggregate = int(depth_*filter_num_down[0])
if verbose:
print('Automated hyper-parameter determination is applied with the following details:\n----------')
print('\tNumber of convolution filters after each full-scale skip connection: filter_num_skip = {}'.format(filter_num_skip))
print('\tNumber of channels of full-scale aggregated feature maps: filter_num_aggregate = {}'.format(filter_num_aggregate))
if backbone is not None:
bach_norm_checker(backbone, batch_norm)
X_encoder = []
X_decoder = []
IN = Input(input_size)
X_decoder = unet_3plus_2d_base(IN, filter_num_down, filter_num_skip, filter_num_aggregate,
stack_num_down=stack_num_down, stack_num_up=stack_num_up, activation=activation,
batch_norm=batch_norm, pool=pool, unpool=unpool,
backbone=backbone, weights=weights, freeze_backbone=freeze_backbone,
freeze_batch_norm=freeze_batch_norm, name=name)
X_decoder = X_decoder[::-1]
if deep_supervision:
# ----- frozen backbone issue checker ----- #
if ('{}_backbone_'.format(backbone) in X_decoder[0].name) and freeze_backbone:
backbone_warn = '\n\nThe deepest UNET 3+ deep supervision branch directly connects to a frozen backbone.\nTesting your configurations on `keras_unet_collection.base.unet_plus_2d_base` is recommended.'
warnings.warn(backbone_warn);
# ----------------------------------------- #
OUT_stack = []
L_out = len(X_decoder)
print('----------\ndeep_supervision = True\nnames of output tensors are listed as follows ("sup0" is the shallowest supervision layer;\n"final" is the final output layer):\n')
# conv2d --> upsampling --> output activation.
# index 0 is final output
for i in range(1, L_out):
pool_size = 2**(i)
X = Conv2D(n_labels, 3, padding='same', name='{}_output_conv_{}'.format(name, i-1))(X_decoder[i])
X = decode_layer(X, n_labels, pool_size, unpool,
activation=None, batch_norm=False, name='{}_output_sup{}'.format(name, i-1))
if output_activation:
print('\t{}_output_sup{}_activation'.format(name, i-1))
if output_activation == 'Sigmoid':
X = Activation('sigmoid', name='{}_output_sup{}_activation'.format(name, i-1))(X)
else:
activation_func = eval(output_activation)
X = activation_func(name='{}_output_sup{}_activation'.format(name, i-1))(X)
else:
if unpool is False:
print('\t{}_output_sup{}_trans_conv'.format(name, i-1))
else:
print('\t{}_output_sup{}_unpool'.format(name, i-1))
OUT_stack.append(X)
X = CONV_output(X_decoder[0], n_labels, kernel_size=3,
activation=output_activation, name='{}_output_final'.format(name))
OUT_stack.append(X)
if output_activation:
print('\t{}_output_final_activation'.format(name))
else:
print('\t{}_output_final'.format(name))
model = Model([IN,], OUT_stack)
else:
OUT = CONV_output(X_decoder[0], n_labels, kernel_size=3,
activation=output_activation, name='{}_output_final'.format(name))
model = Model([IN,], [OUT,])
return model
|
dd82ab59c11873ecb7386b56eddca0feadb0b43e
|
a40ad075eeadf753167a7e9be2bb41253bb443e9
|
/lit_nlp/components/minimal_targeted_counterfactuals_test.py
|
4cba2f6800ce2bea741bc6060f25486aec9fb7c8
|
[
"Apache-2.0"
] |
permissive
|
PAIR-code/lit
|
1cd55c5471bd24a8205174d3a40a2ec91ea56d27
|
a41130960d6ccb92acf6ffc603377eaecce8a62b
|
refs/heads/main
| 2023-09-05T15:35:22.731062
| 2022-12-02T19:48:37
| 2022-12-02T19:48:37
| 283,215,238
| 3,201
| 351
|
Apache-2.0
| 2023-09-14T06:08:56
| 2020-07-28T13:07:26
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 16,111
|
py
|
minimal_targeted_counterfactuals_test.py
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for lit_nlp.components.minimal_targeted_counterfactuals."""
from typing import List
import unittest.mock as mock
from absl.testing import absltest
from lit_nlp.api import dataset as lit_dataset
from lit_nlp.api import model as lit_model
from lit_nlp.api import types as lit_types
from lit_nlp.components import minimal_targeted_counterfactuals
from lit_nlp.lib import caching
import numpy as np
import scipy.special as scipy_special
ANIMALS = ['unknown', 'elephant', 'ant', 'whale', 'seal']
class ClassificationTestDataset(lit_dataset.Dataset):
"""A test dataset for classification testing."""
def spec(self) -> lit_types.Spec:
return {
'size': lit_types.CategoryLabel(vocab=['small', 'medium', 'large']),
'weight': lit_types.Scalar(),
'legs': lit_types.Boolean(),
'description': lit_types.StringLitType(),
'animal': lit_types.CategoryLabel(vocab=ANIMALS),
}
@property
def examples(self) -> List[lit_types.JsonDict]:
return [
{
'size': 'small',
'weight': 0.01,
'legs': True,
'description': 'small but strong',
'animal': 'ant'
},
{
'size': 'large',
'weight': 0.8,
'legs': True,
'description': 'has a trunk',
'animal': 'elephant'
},
{
'size': 'medium',
'weight': 0.2,
'legs': False,
'description': 'makes strange sounds',
'animal': 'seal'
},
{
'size': 'large',
'weight': 2.5,
'legs': False,
'description': 'excellent water displacement',
'animal': 'whale'
},
]
class ClassificationTestModel(lit_model.Model):
"""A test model for testing tabular hot-flips on classification tasks."""
def __init__(self, dataset: lit_dataset.Dataset) -> None:
super().__init__()
self._dataset = dataset
def max_minibatch_size(self, **unused) -> int:
return 2
def input_spec(self) -> lit_types.Spec:
return {
'size': lit_types.CategoryLabel(vocab=['small', 'medium', 'large']),
'weight': lit_types.Scalar(),
'legs': lit_types.Boolean(),
'description': lit_types.StringLitType(),
}
def output_spec(self) -> lit_types.Spec:
return {
'preds':
lit_types.MulticlassPreds(
parent='animal', vocab=ANIMALS, null_idx=0)
}
def predict_minibatch(self, inputs: List[lit_types.JsonDict],
**unused) -> List[lit_types.JsonDict]:
output = []
def predict_example(ex: lit_types.JsonDict) -> lit_types.JsonDict:
"""Returns model predictions for a given example.
The method uses the animal test dataset as the ground truth. The method
compares the given example features to the dataset features for all
animals. The closer the feature values are, the higher the contribution to
the corresponding class logit is.
Args:
ex: an example to run prediction for.
Returns:
The softmax values for the animal class prediction.
"""
# Logit values for ['unknown', 'elephant', 'ant', 'whale'].
logits = np.zeros((len(ANIMALS),))
for db_rec in self._dataset.examples:
animal_index = ANIMALS.index(db_rec['animal'])
for field_name in self._dataset.spec():
if ex[field_name] is None or db_rec[field_name] is None:
continue
if field_name == 'animal':
continue
field_spec_value = self._dataset.spec()[field_name]
if (isinstance(field_spec_value, lit_types.CategoryLabel) or
isinstance(field_spec_value, lit_types.Boolean)) and (
ex[field_name] == db_rec[field_name]):
logits[animal_index] += 1
if isinstance(field_spec_value, lit_types.Scalar):
logits[animal_index] += 1.0 - abs(ex[field_name] -
db_rec[field_name])
return scipy_special.softmax(logits)
for example in inputs:
output.append({'preds': predict_example(example)})
return output
class RegressionTestDataset(lit_dataset.Dataset):
"""A test dataset for regression testing."""
def spec(self) -> lit_types.Spec:
return {
'x_1': lit_types.Scalar(),
'x_2': lit_types.Scalar(),
'y': lit_types.Scalar(),
}
@property
def examples(self) -> List[lit_types.JsonDict]:
return [
{
'x_1': 0.0,
'x_2': 0.0,
'y': 0.0
},
{
'x_1': 0.5,
'x_2': 0.4,
'y': 1.0
},
]
class RegressionTestModel(lit_model.Model):
"""A test model for testing tabular hot-flips on regression tasks."""
def max_minibatch_size(self, **unused) -> int:
return 2
def input_spec(self) -> lit_types.Spec:
return {
'x_1': lit_types.Scalar(),
'x_2': lit_types.Scalar(),
}
def output_spec(self) -> lit_types.Spec:
return {'score': lit_types.RegressionScore(parent='y')}
def predict_minibatch(self, inputs: List[lit_types.JsonDict],
**unused) -> List[lit_types.JsonDict]:
output = []
def predict_example(ex: lit_types.JsonDict) -> lit_types.JsonDict:
x1 = ex['x_1']
x2 = ex['x_2']
return 2 * x1**2 + x2
for example in inputs:
output.append({'score': predict_example(example)})
return output
class ClassificationTabularMtcTest(absltest.TestCase):
"""Tests tabular hot-flips on classification tasks."""
def setUp(self):
super().setUp()
dataset = lit_dataset.IndexedDataset(
base=ClassificationTestDataset(), id_fn=caching.input_hash)
self._dataset = dataset
self._model = ClassificationTestModel(self._dataset)
self._gen = minimal_targeted_counterfactuals.TabularMTC()
self._example = {
'size': 'large',
'weight': 1.2,
'legs': False,
'description': 'big water animal',
'animal': 'whale'
}
self._config = {
'Prediction key': 'preds',
'dataset_name': 'classification_test_dataset'
}
def test_test_model(self):
"""Tests the tests model predict method."""
dataset = ClassificationTestDataset()
model = ClassificationTestModel(dataset)
preds = list(model.predict(dataset.examples))
self.assertEqual(np.argmax(preds[0]['preds']), 2)
self.assertEqual(np.argmax(preds[1]['preds']), 1)
self.assertEqual(np.argmax(preds[2]['preds']), 4)
self.assertEqual(np.argmax(preds[3]['preds']), 3)
def test_prediction_key_required(self):
"""Tests the case when the client doesn't specify the prediction key."""
self._config['Prediction key'] = ''
with self.assertRaisesRegex(ValueError,
'Please provide the prediction key'):
self._gen.generate(
example=self._example,
model=self._model,
dataset=self._dataset,
config=self._config)
def test_incorrect_prediction_key(self):
"""Tests the case when the client specifies a key that doesn't exist."""
self._config['Prediction key'] = 'wrong_key'
with self.assertRaisesRegex(ValueError, 'Invalid prediction key'):
self._gen.generate(
example=self._example,
model=self._model,
dataset=self._dataset,
config=self._config)
def test_unsupported_model(self):
"""Tests the case when the passed model is not supported."""
mocked_model = mock.MagicMock()
output_spec = {'preds': lit_types.ImageBytes}
mocked_model.output_spec = mock.MagicMock(return_value=output_spec)
with self.assertRaisesRegex(
ValueError, 'Only classification and regression models are supported'):
self._gen.generate(
example=self._example,
model=mocked_model,
dataset=self._dataset,
config=self._config)
def test_no_model(self):
"""Tests the case when no model is passed."""
with self.assertRaisesRegex(ValueError,
'Please provide a model for this generator'):
self._gen.generate(
example=self._example,
model=None,
dataset=self._dataset,
config=self._config)
def test_max_number_of_records(self):
"""Tests that a client can specify a desired number of flips to return."""
self._config['Number of examples'] = '2'
result = self._gen.generate(
example=self._example,
model=self._model,
dataset=self._dataset,
config=self._config)
self.assertLen(result, 2)
def test_text_fields_equal_to_target(self):
"""Tests that non-scalar non-categorical features has correct value.
The values of non-scalar, non-categorical features should be the same as in
the input example.
"""
output = self._gen.generate(
example=self._example,
model=self._model,
dataset=self._dataset,
config=self._config)
s = {o['description'] for o in output}
self.assertLen(s, 1)
self.assertIn('big water animal', s)
def test_mtc_prediction_is_argmax(self):
output = self._gen.generate(
example=self._example,
model=self._model,
dataset=self._dataset,
config=self._config)
y_actual = output[0]['animal']
y_expected = self._predict_and_return_argmax_label(output[0])
self.assertEqual(y_actual, y_expected)
def test_output_is_counterfactuals(self):
"""Tests that the returned values are indeed counterfactuals."""
output = self._gen.generate(
example=self._example,
model=self._model,
dataset=self._dataset,
config=self._config)
self.assertGreaterEqual(len(output), 1)
target_prediction = self._predict_and_return_argmax_label(self._example)
for cf_example in output:
cf_prediction = self._predict_and_return_argmax_label(cf_example)
self.assertNotEqual(cf_prediction, target_prediction)
def test_config_spec(self):
"""Tests that the generator returns spec with correct fields."""
spec = self._gen.config_spec()
self.assertIn('Number of examples', spec)
self.assertIn('Maximum number of columns to change', spec)
self.assertIn('Regression threshold', spec)
self.assertIn('Prediction key', spec)
def test_example_field_is_none(self):
"""Tests the case when a feature is assigned None value."""
self._example['weight'] = None
output = self._gen.generate(
example=self._example,
model=self._model,
dataset=self._dataset,
config=self._config)
self.assertNotEmpty(output)
def _predict_and_return_argmax_label(self, example):
"""Given an example, returns the index of the top prediction."""
model_out = self._model.predict([example])
softmax = list(model_out)[0]['preds']
argmax = np.argmax(softmax)
return self._model.output_spec()['preds'].vocab[argmax]
class RegressionTabularMtcTest(absltest.TestCase):
"""Tests tabular hot-flips with regression models."""
def setUp(self):
super().setUp()
dataset = lit_dataset.IndexedDataset(
base=RegressionTestDataset(), id_fn=caching.input_hash)
self._dataset = dataset
self._model = RegressionTestModel()
self._gen = minimal_targeted_counterfactuals.TabularMTC()
self._example = {'x_1': 1.0, 'x_2': 1.0}
self._config = {
'Prediction key': 'score',
'dataset_name': 'regression_test_dataset'
}
def test_test_regression_model(self):
"""Tests the predict method of the regression model."""
model = RegressionTestModel()
example = {'x_1': 3, 'x_2': 2}
pred = list(model.predict([example]))[0]
self.assertEqual(pred['score'], 20)
def test_output_is_below_threshold_counterfactuals(self):
"""Tests the case when the target prediction is above the threshold.
If the target (reference) prediction is above the decision boundary
threshold, the predictions for all counterfactuals should be below the
threshold.
"""
threshold = 2.8
self._config['Regression threshold'] = str(threshold)
self._example = {'x_1': 1.0, 'x_2': 1.0}
output = self._gen.generate(
example=self._example,
model=self._model,
dataset=self._dataset,
config=self._config)
target_score = self._predict_and_return_score(self._example)
self.assertGreaterEqual(target_score, threshold)
self.assertNotEmpty(output)
for cf_example in output:
cf_score = self._predict_and_return_score(cf_example)
self.assertLess(cf_score, threshold)
def test_output_is_above_threshold_counterfactuals(self):
"""Tests the case when the target prediction is below the threshold.
If the target (reference) prediction is below the decision boundary
threshold, the predictions for all counterfactuals should be above or equal
the threshold.
"""
threshold = 0.1
self._config['Regression threshold'] = str(threshold)
self._example = {'x_1': 0.0, 'x_2': -5.0}
output = self._gen.generate(
example=self._example,
model=self._model,
dataset=self._dataset,
config=self._config)
target_score = self._predict_and_return_score(self._example)
self.assertLess(target_score, threshold)
self.assertNotEmpty(output)
for cf_example in output:
cf_score = self._predict_and_return_score(cf_example)
self.assertGreaterEqual(cf_score, threshold)
def test_no_counterfactuals_found(self):
"""Tests the case when there no counterfactuals in the database."""
threshold = 4.0
self._config['Regression threshold'] = str(threshold)
self._example = {'x_1': 1.0, 'x_2': 1.0}
output = self._gen.generate(
example=self._example,
model=self._model,
dataset=self._dataset,
config=self._config)
self.assertEmpty(output)
def test_max_num_of_changed_columns(self):
"""Tests the client can set the number of features that can be changed."""
self._config['Regression threshold'] = '0.25'
self._config['Maximum number of columns to change'] = '1'
self._example = {'x_1': 0.3, 'x_2': 0.3}
output_1 = self._gen.generate(
example=self._example,
model=self._model,
dataset=self._dataset,
config=self._config)
self._config['Maximum number of columns to change'] = '2'
output_2 = self._gen.generate(
example=self._example,
model=self._model,
dataset=self._dataset,
config=self._config)
self.assertNotEmpty(output_1)
self.assertNotEmpty(output_2)
self.assertGreater(len(output_2), len(output_1))
def test_parent_field_updated(self):
threshold = 0.8
self._config['Regression threshold'] = str(threshold)
self._example = {'x_1': 0.0, 'x_2': 0.0}
output = self._gen.generate(
example=self._example,
model=self._model,
dataset=self._dataset,
config=self._config)
y_actual = output[0]['y']
y_expected = self._predict_and_return_score(output[0])
self.assertEqual(y_actual, y_expected)
def _predict_and_return_score(self, example):
"""Given an example, returns the regression score."""
model_out = self._model.predict([example])
return list(model_out)[0]['score']
if __name__ == '__main__':
absltest.main()
|
cb80d453551b673af8391dddd3d8c9b47b71eca6
|
fa0bd730981a4a7333e7858c03e2a16c75e9cf5c
|
/Chapter 2/batch_datasets.py
|
2c118a9a35ca9710d8fbd92a11b1ba4484141bf4
|
[
"MIT"
] |
permissive
|
PacktPublishing/Deep-Learning-with-TensorFlow-2-and-Keras
|
4cb5f7249dcd1efe6ea5a5263fb862240ce303bb
|
e23d2b4a4292386b70977473805acb2f93ef16ca
|
refs/heads/master
| 2023-02-13T04:04:57.531730
| 2023-02-07T19:23:47
| 2023-02-07T19:23:47
| 228,759,428
| 311
| 214
|
MIT
| 2021-06-01T14:06:06
| 2019-12-18T04:42:07
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 368
|
py
|
batch_datasets.py
|
import tensorflow as tf
import tensorflow_datasets as tfds
import numpy as np
builder = tfds.builder('imdb_reviews')
builder.download_and_prepare()
datasets, info = tfds.load('imdb_reviews', with_info=True, as_supervised=True)
train_dataset = datasets['train']
train_dataset = train_dataset.batch(5).shuffle(50).take(2)
for data in train_dataset:
print(data)
|
35d89b29dfc334409599b486bd28314a59f61d7c
|
59bb398c5f23770e4725f35f932f3a5fd013efae
|
/jwst/ami/tests/test_ami_analyze.py
|
8fb464b44ec9b377852ef5c76524df59b311939a
|
[
"BSD-2-Clause"
] |
permissive
|
spacetelescope/jwst
|
9826d86781c6e01aced951882471f8b967fa1f6e
|
a4a0e8ad2b88249f01445ee1dcf175229c51033f
|
refs/heads/master
| 2023-09-04T09:54:04.504036
| 2023-08-31T20:19:27
| 2023-08-31T20:19:27
| 60,551,519
| 449
| 106
|
NOASSERTION
| 2023-09-14T21:21:33
| 2016-06-06T18:34:23
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 33,018
|
py
|
test_ami_analyze.py
|
"""
Unit tests for ami_analyze
"""
import numpy as np
import math
import pytest
from stdatamodels.jwst import datamodels
from jwst.ami import utils, leastsqnrm, hexee, webb_psf
from jwst.ami.leastsqnrm import hexpb, ffc, ffs, closure_amplitudes
from jwst.ami.leastsqnrm import closurephase, redundant_cps
from jwst.ami.leastsqnrm import populate_symmamparray
from jwst.ami.leastsqnrm import populate_antisymmphasearray
from jwst.ami.leastsqnrm import tan2visibilities, model_array
from jwst.ami.analyticnrm2 import interf, psf, phasor, asf_hex
from numpy.testing import assert_allclose
# ---------------------------------------------------------------
# utils module tests:
#
def test_utils_rebin():
''' Test of rebin() and krebin() in utils module '''
arr = np.arange(24).reshape((3, 8)) / 10.0
rc = tuple((2, 2))
binned_arr = utils.rebin(arr, rc)
true_arr = np.array([[5.1, 6.3, 7.5, 8.7]])
assert_allclose(binned_arr, true_arr)
def test_utils_quadratic():
''' Test of quadratic in utils module '''
x = np.array([0.5, 0.55, 0.55, 0.65, 0.70, 0.8, 0.85, 1.0, 1.01, 1.02, 1.03, 1.04, 1.05])
p = np.array([-2.0, 3.0, 7.0])
maxx, maxy, fit_vals = utils.quadratic(p, x)
true_maxx = 0.75
true_maxy = 8.125
assert_allclose([maxx, maxy], [true_maxx, true_maxy])
true_fit_vals = np.array(
[8.0, 8.045, 8.045, 8.105, 8.12, 8.12, 8.105, 8.0, 7.9898, 7.9792, 7.9682, 7.9568, 7.945]
)
assert_allclose(fit_vals, true_fit_vals)
def test_utils_findmax():
''' Test of findmax in utils module '''
mag = np.arange(9) + 1.0
delt = 1.0e-7
mag[2] += delt
mag[5] += delt # Add a bit of deterministic noise
mag[1] -= delt
mag[7] -= delt
vals = (mag - 3.0) ** 2 + 5 # Is quadratic ...
vals[1] += delt
vals[6] += delt # ... with a bit more noise
vals[4] -= delt
vals[3] -= delt
maxx, maxy = utils.findmax(mag, vals)
true_maxx = 3.0
true_maxy = 5.0
assert_allclose([maxx, maxy], [true_maxx, true_maxy])
def test_utils_makeA():
''' Test of makeA in utils module '''
nh = 4 # number of holes
arr = utils.makeA(nh)
true_arr = np.array(
[
[-1.0, 1.0, 0.0, 0.0],
[-1.0, 0.0, 1.0, 0.0],
[-1.0, 0.0, 0.0, 1.0],
[0.0, -1.0, 1.0, 0.0],
[0.0, -1.0, 0.0, 1.0],
[0.0, 0.0, -1.0, 1.0],
]
)
assert_allclose(arr, true_arr)
def test_utils_fringes2pistons():
''' Test of fringes2pistons in utils module '''
fringephases = np.array([0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.1])
nholes = 5
result = utils.fringes2pistons(fringephases, nholes)
true_result = np.array([-0.02, -0.034, -0.02, 0.014, 0.06])
assert_allclose(result, true_result)
def test_utils_rcrosscorrelate():
''' Test of rcrosscorrelate() in utils module '''
a = np.array(
[
[2.0, 3.0, 4.0, 5.0],
[6.0, 7.0, 8.0, 9.0],
[10.0, 11.0, 12.0, 13.0],
[14.0, 15.0, 16.0, 17.0],
]
)
b = np.array(
[
[-5.0, -4.0, -3.0, -2.0],
[-1.0, 0.0, 1.0, 2.0],
[3.0, 4.0, 5.0, 6.0],
[7.0, 8.0, 9.0, 10.0],
]
)
result = utils.rcrosscorrelate(a, b)
true_result = np.array(
[
[0.19865015, 0.20767971, 0.23476836, 0.20767971],
[0.34312299, 0.35215254, 0.3792412, 0.35215254],
[0.77654151, 0.78557106, 0.81265972, 0.78557106],
[0.34312299, 0.35215254, 0.3792412, 0.35215254],
]
)
assert_allclose(result, true_result)
def test_utils_crosscorrelate():
''' Test of crosscorrelate() in utils module '''
a = np.array(
[
[2.0, 3.0, 4.0, 5.0],
[6.0, 7.0, 8.0, 9.0],
[10.0, 11.0, 12.0, 13.0],
[14.0, 15.0, 16.0, 17.0],
]
)
b = np.array(
[
[-5.0, -4.0, -3.0, -2.0],
[-1.0, 0.0, 1.0, 2.0],
[3.0, 4.0, 5.0, 6.0],
[7.0, 8.0, 9.0, 10.0],
]
)
result = utils.crosscorrelate(a, b)
true_result = np.array(
[
[176.0 + 0.0j, 184.0 + 0.0j, 208.0 + 0.0j, 184.0 + 0.0j],
[304.0 + 0.0j, 312.0 + 0.0j, 336.0 + 0.0j, 312.0 + 0.0j],
[688.0 + 0.0j, 696.0 + 0.0j, 720.0 + 0.0j, 696.0 + 0.0j],
[304.0 + 0.0j, 312.0 + 0.0j, 336.0 + 0.0j, 312.0 + 0.0j],
]
)
assert_allclose(result, true_result)
def test_utils_imgmedian():
'''Test of img_median_replace() in utils module'''
# create input image model containing NaN's and DO_NOT_USE flags
data = np.array(
[
[1.0, 2.0, 3.0, 4.0, 5.0],
[6.0, 0.0, 8.0, 9.0, 10.0],
[11.0, 12.0, 13.0, 14.0, 15.0],
[16.0, 17.0, np.nan, 0.0, 20.0],
[21.0, 22.0, 23.0, 24.0, 25.0],
], dtype=np.float32,
)
dq = np.array(
[
[0, 0, 0, 0, 0],
[0, 1, 4, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 0],
], dtype=np.uint32,
)
input_model = datamodels.ImageModel(data=data, dq=dq)
# send to img_median_replace to replace bad pixels
input_model = utils.img_median_replace(input_model, box_size=3)
expected_result = np.array(
[
[1.0, 2.0, 3.0, 4.0, 5.0],
[6.0, 7.0, 8.0, 9.0, 10.0],
[11.0, 12.0, 13.0, 14.0, 15.0],
[16.0, 17.0, 17.0, 18.5, 20.0],
[21.0, 22.0, 23.0, 24.0, 25.0],
], dtype=np.float32,
)
assert_allclose(input_model.data, expected_result)
# ---------------------------------------------------------------
# leastsqnrm module tests:
#
def test_leastsqnrm_rotatevectors():
"""Test of rotatevectors() in leastsqnrm module.
Positive x decreases under slight rotation, and positive y
increases under slight rotation.
"""
vec = np.arange(8).reshape((4, 2)) + 1.0
rot_vec = leastsqnrm.rotatevectors(vec, thetarad=0.001)
true_rot_vec = np.array(
[[0.9979995, 2.000999], [2.9959985, 4.002998], [4.9939975, 6.004997], [6.9919965, 8.006996]]
)
assert_allclose(rot_vec, true_rot_vec)
rot_vec = leastsqnrm.rotatevectors(vec, thetarad=math.pi / 2.0)
true_rot_vec = np.array([[-2.0, 1.0], [-4.0, 3.0], [-6.0, 5.0], [-8.0, 7.0]])
assert_allclose(rot_vec, true_rot_vec)
def test_leastsqnrm_flip():
"""Test of flip() in leastsqnrm module.
Change sign of 2nd coordinate of holes.
"""
vec = np.arange(8).reshape((4, 2)) + 1.0
flip_vec = leastsqnrm.flip(vec)
true_flip_vec = np.array([[1.0, -2.0], [3.0, -4.0], [5.0, -6.0], [7.0, -8.0]])
assert_allclose(flip_vec, true_flip_vec)
def test_leastsqnrm_mas2rad():
"""Test of mas2rad() in leastsqnrm module.
Convert angle in milli arc-sec to radians.
"""
mas = 1.0e8
theta_rad = leastsqnrm.mas2rad(mas)
true_theta_rad = mas * (10 ** (-3)) / (3600 * 180 / np.pi)
assert_allclose(theta_rad, true_theta_rad)
def test_leastsqnrm_rad2mas():
"""Test of rad2mas() in leastsqnrm module.
Convert input angle in radians to milli arc sec.
"""
theta_rad = 1.0e-6
mas = leastsqnrm.rad2mas(theta_rad)
true_mas = theta_rad * (3600.0 * 180 / np.pi) * 10.0 ** 3
assert_allclose(mas, true_mas)
def test_leastsqnrm_sin2deltapistons():
"""Test of sin2deltapistons() in leastsqnrm module.
Each baseline has one sine and one cosine fringe with a coefficient
that depends on the piston difference between the two holes that make
the baseline. For a 7-hole mask there are 21 baselines and therefore
there are 42 sine and cosine terms that contribute to the fringe model.
This function calculates the sine of this piston difference.
"""
# 4 holes (6 baselines) plus average flux per hole and DC offset
coeffs = np.array([1.0, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8])
delta = leastsqnrm.sin2deltapistons(coeffs)
true_delta = np.array([0.04849334, 0.08333333, 0.12340834])
assert_allclose(delta, true_delta)
def test_leastsqnrm_cos2deltapistons():
"""Test of cos2deltapistons() in leastsqnrm module.
Each baseline has one sine and one cosine fringe with a coefficient
that depends on the piston difference between the two holes that make
the baseline. For a 7-hole mask there are 21 baselines and therefore
there are 42 sine and cosine terms that contribute to the fringe model.
This function calculate the cosine of this piston difference.
"""
# 4 holes (6 baselines) plus average flux per hole and DC offset
coeffs = np.array([1.0, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8])
delta = leastsqnrm.cos2deltapistons(coeffs)
true_delta = np.array([0.21795289, 0.18450506, 0.14758362])
assert_allclose(delta, true_delta)
def test_leastsqnrm_replacenan():
"""Test of replacenan() in leastsqnrm module.
Replace singularities encountered in the analytical hexagon Fourier
transform with the analytically derived limits. (pi/4)
"""
arr = np.array([1.0, 5.6, np.nan, 5.3])
rep_arr = leastsqnrm.replacenan(arr)
true_rep_arr = np.array([1.0, 5.6, math.pi / 4.0, 5.3])
assert_allclose(rep_arr, true_rep_arr)
def test_leastsqnrm_hexpb():
"""Test of hexpb() in leastsqnrm module.
Calculate the primary beam for hexagonal holes.
"""
# Clean up any attributes that may have been added earlier
for kk in list((hexpb.__dict__).keys()):
delattr(hexpb, kk)
hexpb.d = 0.5
hexpb.lam = 2.0e-06
hexpb.offx = 28.0
hexpb.offy = 28.0
hexpb.pitch = 1.0e-07
hexpb.shape = 'hex'
hexpb.size = (3, 3)
hexpb_arr = hexpb()
true_hexpb_arr = np.array(
[
[0.01520087, 0.01901502, 0.02328432],
[0.01912038, 0.02356723, 0.02850747],
[0.02349951, 0.02861771, 0.03426836],
]
)
assert_allclose(hexpb_arr, true_hexpb_arr, atol=1e-7)
# Clean up attributes that have been added
for kk in list((hexpb.__dict__).keys()):
delattr(hexpb, kk)
def test_leastsqnrm_model_array():
"""Test of model_array in leastsqnrm module.
Create a model using the specified wavelength.
"""
import warnings
test_res = [] # to accumulate subtest comparisons
modelctrs = np.array(
[
[-0.01540951, -2.63995503],
[-2.28627105, 0.01334504],
[2.2785663, -1.33332266],
[-2.2785663, 1.33332266],
[-1.1315734, 1.98663876],
[2.29397581, 1.30663257],
[1.15468766, 1.97329378],
]
)
lam = 2.3965000082171173e-06
oversample = 3 # oversample factor
modelpix = 2.729940189982537e-07
fov = 19
hole_d = 0.8 # hole diameter
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "invalid value*", RuntimeWarning)
warnings.filterwarnings("ignore", "divide by zero*", RuntimeWarning)
pb, ff = model_array(
modelctrs,
lam,
oversample,
modelpix,
fov,
hole_d,
centering="PIXELCENTERED",
shape="hex",
)
pb_1 = pb[15:18, 20:23]
true_pb_1 = np.array(
[
[0.40298655, 0.42031716, 0.43582878],
[0.43220726, 0.45055339, 0.46696826],
[0.46060077, 0.47992491, 0.49720953],
]
)
test_res.append(np.allclose(pb_1, true_pb_1, rtol=1.0e-7))
pb_2 = pb[25:28, 10:13]
true_pb_2 = np.array(
[
[0.30173028, 0.33450152, 0.36798482],
[0.30622563, 0.33939216, 0.37327326],
[0.30894849, 0.34235409, 0.37647573],
]
)
test_res.append(np.allclose(pb_2, true_pb_2, rtol=1.0e-7))
ff_1 = ff[1][15:18, 20:23] # slice 1
true_ff_1 = np.array(
[
[-1.45863979, -0.5441426, 0.52620696],
[-1.98551072, -1.57725007, -0.71723623],
[-1.74296371, -1.9991473, -1.68273863],
]
)
test_res.append(np.allclose(ff_1, true_ff_1, rtol=1.0e-7))
ff_5 = ff[5][25:28, 10:13] # slice 5
true_ff_5 = np.array(
[
[1.65967899, 1.99729328, 1.76662698],
[0.06173976, 1.0806429, 1.79207573],
[-1.58764681, -0.73650014, 0.32419949],
]
)
test_res.append(np.allclose(ff_5, true_ff_5, rtol=1.0e-7))
assert np.all(test_res)
def test_leastsqnrm_ffc():
"""Test of ffc in leastsqnrm module.
Calculate cosine terms of analytic model.
"""
ASIZE = 4
kx = np.arange(ASIZE * ASIZE).reshape((ASIZE, ASIZE))
ky = np.arange(ASIZE * ASIZE).reshape((ASIZE, ASIZE))
vv = np.arange(ASIZE)
for ii in np.arange(ASIZE):
kx[:, ii] = vv
ky[ii, :] = vv
# Clean up any attributes that may have been added earlier
for kk in list((ffc.__dict__).keys()):
delattr(ffc, kk)
ffc.N = 7
ffc.lam = 2.3965000082171173e-06
ffc.offx = 28.0
ffc.offy = 28.0
ffc.over = 3
ffc.pitch = 9.099800633275124e-08
ffc.ri = np.array([-0.01540951, -2.63995503])
ffc.rj = np.array([-2.28627105, 0.01334504])
ffc.size = (57, 57)
ffc_arr = ffc(kx, ky)
true_ffc_arr = np.array(
[
[-1.66542264, -0.68760215, 0.55667544, 1.58523217],
[-1.99797288, -1.55759213, -0.51361892, 0.72939004],
[-1.75826723, -1.98145931, -1.43680344, -0.3353627],
[-1.01496177, -1.83780038, -1.94846124, -1.30406145],
]
)
assert_allclose(ffc_arr, true_ffc_arr)
for kk in list((ffc.__dict__).keys()):
delattr(ffc, kk)
def test_leastsqnrm_ffs():
"""Test of ffs in leastsqnrm module.
Calculate sine terms of analytic model.
"""
ASIZE = 4
kx = np.arange(ASIZE * ASIZE).reshape((ASIZE, ASIZE))
ky = np.arange(ASIZE * ASIZE).reshape((ASIZE, ASIZE))
vv = np.arange(ASIZE)
for ii in np.arange(ASIZE):
kx[:, ii] = vv
ky[ii, :] = vv
for kk in list((ffs.__dict__).keys()):
delattr(ffs, kk)
ffs.N = 7
ffs.lam = 2.3965000082171173e-06
ffs.offx = 28.0
ffs.offy = 28.0
ffs.over = 3
ffs.pitch = 9.099800633275124e-08
ffs.ri = np.array([-0.01540951, -2.63995503])
ffs.rj = np.array([-2.28627105, 0.01334504])
ffs.size = (57, 57)
ffs_arr = ffs(kx, ky)
true_ffs_arr = np.array(
[
[-1.10741476, -1.878085, -1.92096654, -1.21944207],
[-0.09002431, -1.2545544, -1.93292411, -1.86225406],
[0.95315074, -0.27169652, -1.39125694, -1.97168249],
[1.72332603, 0.7889802, -0.45110839, -1.51638509],
]
)
assert_allclose(ffs_arr, true_ffs_arr)
for kk in list((ffs.__dict__).keys()):
delattr(ffs, kk)
def test_leastsqnrm_closure_amplitudes():
"""Test of closure_amplitudes() in leastsqnrm module.
Calculate the closure amplitudes.
"""
amps = np.array([0.1, 0.2, 0.3, 1.0, 0.9, 0.5, 1.1, 0.7, 0.1, 1.0])
n = 5 # number of holes
cas = closure_amplitudes(amps, n=n)
true_cas = np.array([0.7, 0.04545455, 0.3030303, 6.66666667, 18.0])
assert_allclose(cas, true_cas, atol=1e-7)
def test_leastsqnrm_closurephase():
"""Test of closurephase in leastsqnrm module.
Calculate closure phases between each pair of holes.
"""
n = 7 # number of holes
deltap = np.array(
[
0.1,
-0.2,
0.3,
0.2,
0.05,
-0.7,
-0.05,
0.7,
0.1,
0.02,
-0.5,
-0.05,
0.7,
0.1,
0.3,
0.4,
-0.2,
-0.3,
0.2,
0.5,
0.3,
]
)
cps = closurephase(deltap, n=n)
true_cps = np.array(
[0.25, 0.5, 0.0, 0.07, 0.3, -0.8, 0.55, 0.03, 0.75, -0.35, -0.35, -0.65, 0.8, 1.2, 0.0]
)
assert_allclose(cps, true_cps, atol=1e-8)
def test_leastsqnrm_redundant_cps():
"""Test of redundant_cps in leastsqnrm module.
Calculate closure phases for each set of 3 holes.
"""
n = 7 # number of holes
deltaps = np.array(
[
0.1,
-0.2,
0.3,
0.2,
0.05,
-0.7,
-0.05,
0.7,
0.1,
0.02,
-0.5,
-0.05,
0.7,
0.1,
0.3,
0.4,
-0.2,
-0.3,
0.2,
0.5,
0.3,
]
)
cps = redundant_cps(deltaps, n=n)
true_cps = np.array(
[
0.25,
0.5,
0.0,
0.07,
0.3,
-0.55,
0.3,
-0.15,
0.8,
0.5,
0.05,
0.7,
0.35,
1.4,
1.05,
-0.8,
0.55,
0.03,
0.75,
1.0,
0.48,
0.9,
0.28,
1.1,
0.82,
-0.35,
-0.35,
-0.65,
0.8,
0.9,
0.1,
0.8,
1.2,
0.4,
0.0,
]
)
assert_allclose(cps, true_cps, atol=1e-8)
def test_leastsqnrm_populate_symmamparray():
"""Test of populate_symmamparray in leastsqnrm module.
Populate the symmetric fringe amplitude array.
"""
amps = np.array([0.1, 0.2, 0.3, 0.2, 0.05, 0.7, 0.3, 0.1, 0.2, 0.8])
n = 5
arr = populate_symmamparray(amps, n=n)
true_arr = np.array(
[
[0.0, 0.1, 0.2, 0.3, 0.2],
[0.1, 0.0, 0.05, 0.7, 0.3],
[0.2, 0.05, 0.0, 0.1, 0.2],
[0.3, 0.7, 0.1, 0.0, 0.8],
[0.2, 0.3, 0.2, 0.8, 0.0],
]
)
assert_allclose(arr, true_arr, atol=1e-8)
def test_leastsqnrm_populate_antisymmphasearray():
"""Test of populate_antisymmphasearray in leastsqnrm module.
Populate the antisymmetric fringe phase array.
"""
deltaps = np.array([0.1, 0.2, 0.3, 0.2, 0.05, 0.7, 0.3, 0.1, 0.2, 0.8])
n = 5
arr = populate_antisymmphasearray(deltaps, n=n)
true_arr = np.array(
[
[0.0, 0.1, 0.2, 0.3, 0.2],
[-0.1, 0.0, 0.05, 0.7, 0.3],
[-0.2, -0.05, 0.0, 0.1, 0.2],
[-0.3, -0.7, -0.1, 0.0, 0.8],
[-0.2, -0.3, -0.2, -0.8, 0.0],
]
)
assert_allclose(arr, true_arr, atol=1e-8)
def test_leastsqnrm_tan2visibilities():
"""Test of tan2visibilities in leastsqnrm module.
From the solution to the fit, calculate the fringe amplitude and phase.
"""
test_res = [] # to accumulate subtest comparisons
coeffs = np.array([1.0, 0.2, -0.3, -0.1, 0.4, 0.2, -0.5, -0.1, 0.2, 0.4])
amp, delta = tan2visibilities(coeffs)
true_amp = np.array([0.36055513, 0.41231056, 0.53851648, 0.2236068])
test_res.append(np.allclose(amp, true_amp, rtol=1.0e-7))
true_delta = np.array([-0.98279372, 1.81577499, -1.19028995, 2.03444394])
test_res.append(np.allclose(delta, true_delta, rtol=1.0e-7))
assert np.all(test_res)
def test_leastsqnrm_multiplyenv():
"""Test of multiplyenv in leastsqnrm module.
Multiply the envelope by each fringe 'image'.
"""
env = np.array(
[
[1.00e-06, 1.00e-06, 1.01e-04],
[1.00e-06, 1.00e-06, 1.01e-04],
[1.10e-05, 1.10e-05, 1.00e-06],
]
)
ft = np.array(
[
[[4.0, 4.0, 4.0], [4.0, 4.0, 4.0], [4.0, 4.0, 4.0]],
[[0.3, 0.3, 0.3], [0.3, 0.3, 0.4], [0.3, 0.3, 0.3]],
[[-0.4, -0.4, -0.4], [-0.4, -0.4, -0.6], [-0.4, -0.4, -0.9]],
[[0.8, 0.8, 0.8], [0.8, 0.8, 0.8], [1.0, 0.8, 1.3]],
]
)
# function is expecting a list, so make it one
fringeterms = list(ft)
full = leastsqnrm.multiplyenv(env, fringeterms)
true_full = np.array(
[
[
[4.00e-06, 3.00e-07, -4.00e-07, 8.00e-07, 1.00e00],
[4.00e-06, 3.00e-07, -4.00e-07, 8.00e-07, 1.00e00],
[4.04e-04, 3.03e-05, -4.04e-05, 8.08e-05, 1.00e00],
],
[
[4.00e-06, 3.00e-07, -4.00e-07, 8.00e-07, 1.00e00],
[4.00e-06, 3.00e-07, -4.00e-07, 8.00e-07, 1.00e00],
[4.04e-04, 4.04e-05, -6.06e-05, 8.08e-05, 1.00e00],
],
[
[4.40e-05, 3.30e-06, -4.40e-06, 1.10e-05, 1.00e00],
[4.40e-05, 3.30e-06, -4.40e-06, 8.80e-06, 1.00e00],
[4.00e-06, 3.00e-07, -9.00e-07, 1.30e-06, 1.00e00],
],
]
)
assert_allclose(full, true_full, atol=1e-8)
# ---------------------------------------------------------------
# hexee module tests:
#
def test_hexee_g_eeAG():
"""Test of g_eeAG() in the hexee module.
Calculate the Fourier transform of one half of a hexagon that is
bisected from one corner to its diametrically opposite corner.
"""
xi, eta, kwargs = setup_hexee()
g = hexee.g_eeAG(xi, eta, **kwargs)
true_g = np.array(
[
[-0.04454286 + 0.05015766j, -0.04164985 + 0.06041733j, -0.03830953 + 0.07099764j],
[-0.04072437 + 0.05375103j, -0.03729262 + 0.06415232j, -0.03340318 + 0.07486623j],
[-0.03657856 + 0.05703437j, -0.03258885 + 0.06754246j, -0.02813134 + 0.07835476j],
]
)
assert_allclose(g, true_g)
def test_hexee_glimit():
"""Test of glimit() in the hexee module.
Calculate the analytic limit of the Fourier transform of one half of the
hexagon along eta=0.
"""
xi, eta, kwargs = setup_hexee()
g = hexee.glimit(xi, eta, **kwargs)
true_g = np.array(
[
[0.07105571 + 0.28088478j, 0.07105571 + 0.28088478j, 0.07105571 + 0.28088478j],
[0.08609692 + 0.28598645j, 0.08609692 + 0.28598645j, 0.08609692 + 0.28598645j],
[0.10178022 + 0.29008864j, 0.10178022 + 0.29008864j, 0.10178022 + 0.29008864j],
]
)
assert_allclose(g, true_g)
# ---------------------------------------------------------------
# analyticnrm2 module tests:
#
def test_analyticnrm2_psf(setup_sf):
''' Test of psf() in the analyticnrm2 module '''
pixel, fov, oversample, ctrs, d, lam, phi, psf_offset, aff_obj = setup_sf
shape = "hex"
computed_psf = psf(pixel, fov, oversample, ctrs, d, lam, phi, psf_offset, aff_obj, shape=shape)
true_psf = np.array(
[
[1.14249135, 0.65831385, 0.45119464, 0.66864436, 1.10501352, 2.04851966],
[2.2221824, 0.62716999, 0.87062628, 1.97855142, 1.72666739, 0.28363866],
[4.37562298, 2.64951632, 6.40126821, 12.22910105, 13.17326852, 7.49323549],
[5.93942383, 4.58894785, 12.68235611, 24.87843624, 29.17900067, 20.64525322],
[5.38441424, 3.73680387, 13.26524812, 28.96518165, 36.75, 28.96518165],
[3.98599305, 1.08124031, 7.38628086, 20.64525322, 29.17900067, 24.87843625],
]
)
assert_allclose(computed_psf, true_psf, atol=1e-7)
def test_analyticnrm2_asf_hex(setup_sf):
''' Test of asf_hex() in the analyticnrm2 module FOR HEX '''
pixel, fov, oversample, ctrs, d, lam, phi, psf_offset, aff_obj = setup_sf
asf = asf_hex(pixel, fov, oversample, ctrs, d, lam, phi, psf_offset, aff_obj)
true_asf = np.array(
[
[
0.82125698 + 7.84095011e-16j,
0.83091456 + 2.48343013e-14j,
0.83785899 - 2.49800181e-16j,
0.84204421 - 1.80411242e-16j,
0.8434424 - 2.91433544e-16j,
0.84204424 - 1.24900090e-16j,
],
[
0.83091447 + 4.09394740e-16j,
0.84064761 + 1.38777878e-15j,
0.8476463 + 1.29063427e-15j,
0.85186417 - 6.17561557e-16j,
0.85327325 + 2.98372438e-16j,
0.85186418 + 1.90125693e-15j,
],
[
0.83785894 + 1.68268177e-16j,
0.84764629 + 1.07552856e-16j,
0.8546839 + 6.38378239e-16j,
0.8589252 - 1.65145675e-15j,
0.8603421 - 9.29811783e-16j,
0.8589252 + 1.15185639e-15j,
],
[
0.84204421 - 6.59194921e-17j,
0.85186417 - 6.70470623e-16j,
0.8589252 + 8.91214186e-16j,
0.86318061 - 3.46944695e-16j,
0.86460222 + 2.08166817e-17j,
0.86318061 - 5.34294831e-16j,
],
[
0.84344243 + 2.28983499e-16j,
0.85327326 + 2.98719383e-15j,
0.8603421 + 5.02722863e-15j,
0.86460222 + 5.48866508e-15j,
0.8660254 + 0.00000000e00j,
0.86460222 + 5.29611077e-15j,
],
[
0.84204425 - 1.48492330e-15j,
0.85186418 + 6.03683770e-16j,
0.8589252 + 5.68989300e-16j,
0.86318061 + 2.77555756e-16j,
0.86460222 - 1.72431514e-15j,
0.86318061 - 5.54070678e-15j,
],
]
)
assert_allclose(asf, true_asf, atol=1e-7)
def test_analyticnrm2_interf(setup_sf):
''' Test of interf() in the analyticnrm2 module '''
ASIZE = 4
kx = np.arange(ASIZE * ASIZE).reshape((ASIZE, ASIZE))
ky = np.arange(ASIZE * ASIZE).reshape((ASIZE, ASIZE))
vv = np.arange(ASIZE)
for ii in np.arange(ASIZE):
kx[:, ii] = vv
ky[ii, :] = vv
# Clean up any attributes that may have been added earlier
for kk in list((interf.__dict__).keys()):
delattr(interf, kk)
pixel, fov, oversample, ctrs, d, lam, phi, centering, aff_obj = setup_sf
pitch = pixel / float(oversample)
interf.lam = lam
interf.offx = 0.5
interf.offy = 0.5
interf.ctrs = ctrs
interf.d = d
interf.phi = phi
interf.pitch = pixel / float(oversample)
c = (ASIZE / 2.0, ASIZE / 2)
interf.c = (ASIZE / 2.0, ASIZE / 2)
interference = interf(kx, ky, ctrs=ctrs, phi=phi, lam=lam, pitch=pitch, c=c, affine2d=aff_obj)
true_interference = np.array(
[
[
2.6870043 + 1.24219632j,
4.01721904 + 0.66189711j,
4.2132531 + 0.21372447j,
3.18675131 - 0.03818252j,
],
[
3.8517604 + 1.53442862j,
5.71582424 + 0.84829672j,
6.24380079 + 0.2201634j,
5.25470657 - 0.31113349j,
],
[
4.02194801 + 1.32112798j,
6.1888738 + 0.66733046j,
7.0 + 0.0j,
6.1888738 - 0.66733046j,
],
[
3.07194559 + 0.75829976j,
5.25470657 + 0.31113349j,
6.24380079 - 0.2201634j,
5.71582424 - 0.84829672j,
],
]
)
assert_allclose(interference, true_interference, atol=1e-7)
def test_analyticnrm2_phasor():
''' Test of phasor() in the analyticnrm2 module '''
ASIZE = 4
kx = np.arange(ASIZE * ASIZE).reshape((ASIZE, ASIZE))
for ii in np.arange(ASIZE):
kx[:, ii] = ii
ky = kx.transpose()
hx = 0.06864653345335156
hy = -2.6391073592116028
lam = 2.3965000082171173e-06
phi = 0.0
pitch = 1.0375012775744072e-07
aff_obj = utils.Affine2d(rotradccw=0.4)
result = phasor(kx, ky, hx, hy, lam, phi, pitch, aff_obj)
true_result = np.array(
[
[
1.0 + 0.0j,
0.96578202 + 0.25935515j,
0.86546981 + 0.50096108j,
0.70592834 + 0.70828326j,
],
[
0.78476644 + 0.61979161j,
0.59716716 + 0.80211681j,
0.36870018 + 0.92954837j,
0.11500085 + 0.99336539j,
],
[
0.23171672 + 0.97278331j,
-0.02850852 + 0.99959355j,
-0.28678275 + 0.95799564j,
-0.52543073 + 0.85083638j,
],
[
-0.42107943 + 0.90702377j,
-0.64191223 + 0.76677812j,
-0.81881514 + 0.57405728j,
-0.93968165 + 0.34205027j,
],
]
)
assert_allclose(result, true_result, atol=1e-7)
# ---------------------------------------------------------------
# webb_psf module test:
#
def test_webb_psf():
"""Test of PSF() in the webb_psf module:
Create a Throughput datamodel, having a dummy filter bandpass data
that peaks at 1.0 at the center and decreases in the wings.
"""
min_wl = 5000.0 # lowest wavelength
max_wl = 100000.0 # highest wavelength
nelem = 28
wavelength = np.linspace(min_wl, max_wl, nelem, endpoint=True, dtype=np.float32)
throughput = create_throughput(nelem)
dtype = np.dtype([('wavelength', '<f4'), ('throughput', '<f4')])
filt_tab = np.array(list(zip(wavelength, throughput)), dtype=dtype)
filter_model = datamodels.ThroughputModel(filter_table=filt_tab)
bindown = 4
band = webb_psf.get_webbpsf_filter(filter_model, specbin=bindown)
true_band = np.array(
[
[4.05621603e-01, 1.37962969e-06],
[8.10614496e-01, 2.78703703e-06],
[9.50576201e-01, 4.19444444e-06],
[9.74027127e-01, 5.60185185e-06],
[9.01925057e-01, 7.00925932e-06],
[6.51473783e-01, 8.41666679e-06],
]
)
assert_allclose(band, true_band, atol=1e-7)
# ---------------------------------------------------------------
# utility functions:
@pytest.fixture
def setup_sf():
"""Initialize values for these parameters needed for the analyticnrm2 tests.
Returns
-------
pixel (optional, via **kwargs) : float
pixel scale
fov : integer
number of detector pixels on a side
oversample : integer
oversampling factor
ctrs : float, float
coordinates of hole centers
d : float
hole diameter
lam : float
wavelength
phi : float
distance of fringe from hole center in units of waves
centering : string
if set to 'PIXELCENTERED' or unspecified, the offsets will be set to
(0.5,0.5); if set to 'PIXELCORNER', the offsets will be set to
(0.0,0.0).
aff : Affine2d object
Affine2d object
"""
pixel = 3.1125038327232215e-07
fov = 2
oversample = 3
ctrs = np.array(
[
[0.06864653, -2.63910736],
[-2.28553695, -0.05944972],
[2.31986022, -1.26010406],
[-2.31986022, 1.26010406],
[-1.19424838, 1.94960579],
[2.25121368, 1.3790035],
[1.09127858, 2.00905525],
]
)
d = 0.8
lam = 2.3965000082171173e-06
phi = np.zeros(7, dtype=np.float32)
centering = (0.5, 0.5)
aff_obj = utils.Affine2d(rotradccw=0.4)
return pixel, fov, oversample, ctrs, d, lam, phi, centering, aff_obj
def setup_hexee():
"""Initialize values for parameters needed for the hexee tests.
Returns
-------
xi : 2D float array
hexagon's coordinate center at center of symmetry, along flat edge
eta : 2D float array
hexagon's coordinate center at center of symmetry, normal to xi;
(not currently used)
c (optional, via **kwargs) : tuple(float, float)
coordinates of center
pixel (optional, via **kwargs) : float
pixel scale
d (optional, via **kwargs) : float
flat-to-flat distance across hexagon
lam : (optional, via **kwargs) : float
wavelength
minus : (optional, via **kwargs) boolean
if set, use flipped sign of xi in calculation
"""
xdim, ydim = 3, 3
xi = np.zeros(ydim * xdim).reshape((ydim, xdim))
eta = np.zeros(ydim * xdim).reshape((ydim, xdim))
for ii in range(ydim):
xi[ii, :] = ii
eta[:, ii] = ii
kwargs = {
'd': 0.8,
'c': (28.0, 28.0),
'lam': 2.3965000082171173e-06,
'pixel': 1.0375012775744072e-07,
'minus': False,
}
return xi, eta, kwargs
def create_throughput(nelem):
"""Create a symmetric dummy throughput function that has values near
0 on the wings and near 1 at the center.
"""
ctr = int(nelem / 2.0)
lower_half = [2.0 / (1.0 + math.e ** (-5.0 * i / ctr)) - 1.0 for i in range(ctr)]
throughput = np.zeros(nelem, dtype=np.float32)
throughput[:ctr] = lower_half
throughput[ctr:] = lower_half[::-1] # mirror image for upper half
return throughput
|
abb423112311980c9078ce8f2c0c75880f3686d9
|
cad91ae76d2746a6c28ddda0f33a58f9d461378f
|
/TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/object_detection/balanced_positive_negative_sampler.py
|
f0b734f967ea9c7fe7f642caa9f338084ba4c8e4
|
[
"Apache-2.0"
] |
permissive
|
NVIDIA/DeepLearningExamples
|
fe677521e7e2a16e3cb0b77e358f9aab72f8c11a
|
a5388a45f71a949639b35cc5b990bd130d2d8164
|
refs/heads/master
| 2023-08-31T20:57:08.798455
| 2023-08-23T10:09:12
| 2023-08-23T10:09:12
| 131,881,622
| 11,838
| 3,124
| null | 2023-08-28T16:57:33
| 2018-05-02T17:04:05
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 12,648
|
py
|
balanced_positive_negative_sampler.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class to subsample minibatches by balancing positives and negatives.
Subsamples minibatches based on a pre-specified positive fraction in range
[0,1]. The class presumes there are many more negatives than positive examples:
if the desired batch_size cannot be achieved with the pre-specified positive
fraction, it fills the rest with negative examples. If this is not sufficient
for obtaining the desired batch_size, it returns fewer examples.
The main function to call is Subsample(self, indicator, labels). For convenience
one can also call SubsampleWeights(self, weights, labels) which is defined in
the minibatch_sampler base class.
When is_static is True, it implements a method that guarantees static shapes.
It also ensures the length of output of the subsample is always batch_size, even
when number of examples set to True in indicator is less than batch_size.
This is originally implemented in TensorFlow Object Detection API.
"""
import tensorflow as tf
from mrcnn_tf2.object_detection import minibatch_sampler, ops
class BalancedPositiveNegativeSampler(minibatch_sampler.MinibatchSampler):
"""Subsamples minibatches to a desired balance of positives and negatives."""
def __init__(self, positive_fraction=0.5, is_static=False):
"""Constructs a minibatch sampler.
Args:
positive_fraction: desired fraction of positive examples (scalar in [0,1])
in the batch.
is_static: If True, uses an implementation with static shape guarantees.
Raises:
ValueError: if positive_fraction < 0, or positive_fraction > 1
"""
if positive_fraction < 0 or positive_fraction > 1:
raise ValueError('positive_fraction should be in range [0,1]. '
'Received: %s.' % positive_fraction)
self._positive_fraction = positive_fraction
self._is_static = is_static
def _get_num_pos_neg_samples(self, sorted_indices_tensor, sample_size):
"""Counts the number of positives and negatives numbers to be sampled.
Args:
sorted_indices_tensor: A sorted int32 tensor of shape [N] which contains
the signed indices of the examples where the sign is based on the label
value. The examples that cannot be sampled are set to 0. It samples
atmost sample_size*positive_fraction positive examples and remaining
from negative examples.
sample_size: Size of subsamples.
Returns:
A tuple containing the number of positive and negative labels in the
subsample.
"""
input_length = tf.shape(input=sorted_indices_tensor)[0]
valid_positive_index = tf.greater(sorted_indices_tensor,
tf.zeros(input_length, tf.int32))
num_sampled_pos = tf.reduce_sum(input_tensor=tf.cast(valid_positive_index, tf.int32))
max_num_positive_samples = tf.constant(
int(sample_size * self._positive_fraction), tf.int32)
num_positive_samples = tf.minimum(max_num_positive_samples, num_sampled_pos)
num_negative_samples = tf.constant(sample_size,
tf.int32) - num_positive_samples
return num_positive_samples, num_negative_samples
def _get_values_from_start_and_end(self, input_tensor, num_start_samples,
num_end_samples, total_num_samples):
"""slices num_start_samples and last num_end_samples from input_tensor.
Args:
input_tensor: An int32 tensor of shape [N] to be sliced.
num_start_samples: Number of examples to be sliced from the beginning
of the input tensor.
num_end_samples: Number of examples to be sliced from the end of the
input tensor.
total_num_samples: Sum of is num_start_samples and num_end_samples. This
should be a scalar.
Returns:
A tensor containing the first num_start_samples and last num_end_samples
from input_tensor.
"""
input_length = tf.shape(input=input_tensor)[0]
start_positions = tf.less(tf.range(input_length), num_start_samples)
end_positions = tf.greater_equal(
tf.range(input_length), input_length - num_end_samples)
selected_positions = tf.logical_or(start_positions, end_positions)
selected_positions = tf.cast(selected_positions, tf.float32)
indexed_positions = tf.multiply(tf.cumsum(selected_positions),
selected_positions)
one_hot_selector = tf.one_hot(tf.cast(indexed_positions, tf.int32) - 1,
total_num_samples,
dtype=tf.float32)
return tf.cast(tf.tensordot(tf.cast(input_tensor, tf.float32),
one_hot_selector, axes=[0, 0]), tf.int32)
def _static_subsample(self, indicator, batch_size, labels):
"""Returns subsampled minibatch.
Args:
indicator: boolean tensor of shape [N] whose True entries can be sampled.
N should be a complie time constant.
batch_size: desired batch size. This scalar cannot be None.
labels: boolean tensor of shape [N] denoting positive(=True) and negative
(=False) examples. N should be a complie time constant.
Returns:
sampled_idx_indicator: boolean tensor of shape [N], True for entries which
are sampled. It ensures the length of output of the subsample is always
batch_size, even when number of examples set to True in indicator is
less than batch_size.
Raises:
ValueError: if labels and indicator are not 1D boolean tensors.
"""
# Check if indicator and labels have a static size.
if not indicator.shape.is_fully_defined():
raise ValueError('indicator must be static in shape when is_static is'
'True')
if not labels.shape.is_fully_defined():
raise ValueError('labels must be static in shape when is_static is'
'True')
if not isinstance(batch_size, int):
raise ValueError('batch_size has to be an integer when is_static is'
'True.')
input_length = tf.shape(input=indicator)[0]
# Set the number of examples set True in indicator to be at least
# batch_size.
num_true_sampled = tf.reduce_sum(input_tensor=tf.cast(indicator, tf.float32))
additional_false_sample = tf.less_equal(
tf.cumsum(tf.cast(tf.logical_not(indicator), tf.float32)),
batch_size - num_true_sampled)
indicator = tf.logical_or(indicator, additional_false_sample)
# Shuffle indicator and label. Need to store the permutation to restore the
# order post sampling.
permutation = tf.random.shuffle(tf.range(input_length))
indicator = ops.matmul_gather_on_zeroth_axis(
tf.cast(indicator, tf.float32), permutation)
labels = ops.matmul_gather_on_zeroth_axis(
tf.cast(labels, tf.float32), permutation)
# index (starting from 1) when indicator is True, 0 when False
indicator_idx = tf.where(
tf.cast(indicator, tf.bool), tf.range(1, input_length + 1),
tf.zeros(input_length, tf.int32))
# Replace -1 for negative, +1 for positive labels
signed_label = tf.where(
tf.cast(labels, tf.bool), tf.ones(input_length, tf.int32),
tf.scalar_mul(-1, tf.ones(input_length, tf.int32)))
# negative of index for negative label, positive index for positive label,
# 0 when indicator is False.
signed_indicator_idx = tf.multiply(indicator_idx, signed_label)
sorted_signed_indicator_idx = tf.nn.top_k(
signed_indicator_idx, input_length, sorted=True).values
[num_positive_samples,
num_negative_samples] = self._get_num_pos_neg_samples(
sorted_signed_indicator_idx, batch_size)
sampled_idx = self._get_values_from_start_and_end(
sorted_signed_indicator_idx, num_positive_samples,
num_negative_samples, batch_size)
# Shift the indices to start from 0 and remove any samples that are set as
# False.
sampled_idx = tf.abs(sampled_idx) - tf.ones(batch_size, tf.int32)
sampled_idx = tf.multiply(
tf.cast(tf.greater_equal(sampled_idx, tf.constant(0)), tf.int32),
sampled_idx)
sampled_idx_indicator = tf.cast(tf.reduce_sum(
input_tensor=tf.one_hot(sampled_idx, depth=input_length),
axis=0), tf.bool)
# project back the order based on stored permutations
reprojections = tf.one_hot(permutation, depth=input_length,
dtype=tf.float32)
return tf.cast(tf.tensordot(
tf.cast(sampled_idx_indicator, tf.float32),
reprojections, axes=[0, 0]), tf.bool)
def subsample(self, indicator, batch_size, labels, scope=None):
"""Returns subsampled minibatch.
Args:
indicator: boolean tensor of shape [N] whose True entries can be sampled.
batch_size: desired batch size. If None, keeps all positive samples and
randomly selects negative samples so that the positive sample fraction
matches self._positive_fraction. It cannot be None is is_static is True.
labels: boolean tensor of shape [N] denoting positive(=True) and negative
(=False) examples.
scope: name scope.
Returns:
sampled_idx_indicator: boolean tensor of shape [N], True for entries which
are sampled.
Raises:
ValueError: if labels and indicator are not 1D boolean tensors.
"""
if len(indicator.get_shape().as_list()) != 1:
raise ValueError('indicator must be 1 dimensional, got a tensor of '
'shape %s' % indicator.get_shape())
if len(labels.get_shape().as_list()) != 1:
raise ValueError('labels must be 1 dimensional, got a tensor of '
'shape %s' % labels.get_shape())
if labels.dtype != tf.bool:
raise ValueError('labels should be of type bool. Received: %s' %
labels.dtype)
if indicator.dtype != tf.bool:
raise ValueError('indicator should be of type bool. Received: %s' %
indicator.dtype)
if self._is_static:
return self._static_subsample(indicator, batch_size, labels)
else:
# Only sample from indicated samples
negative_idx = tf.logical_not(labels)
positive_idx = tf.logical_and(labels, indicator)
negative_idx = tf.logical_and(negative_idx, indicator)
# Sample positive and negative samples separately
if batch_size is None:
max_num_pos = tf.reduce_sum(input_tensor=tf.cast(positive_idx, dtype=tf.int32))
else:
max_num_pos = int(self._positive_fraction * batch_size)
sampled_pos_idx = self.subsample_indicator(positive_idx, max_num_pos)
num_sampled_pos = tf.reduce_sum(input_tensor=tf.cast(sampled_pos_idx, tf.int32))
if batch_size is None:
negative_positive_ratio = (
1 - self._positive_fraction) / self._positive_fraction
max_num_neg = tf.cast(
negative_positive_ratio * tf.cast(num_sampled_pos, dtype=tf.float32), dtype=tf.int32)
else:
max_num_neg = batch_size - num_sampled_pos
sampled_neg_idx = self.subsample_indicator(negative_idx, max_num_neg)
return tf.logical_or(sampled_pos_idx, sampled_neg_idx)
|
4ac268113dcf1741242576dc23e7ef42c6c2437b
|
52b49f3c5d0b17f81790e2d7ee37045694aa6bca
|
/grammarinator/runtime/dispatching_listener.py
|
704d00b31bf276323fae5a85c356fc81c934a673
|
[
"BSD-3-Clause"
] |
permissive
|
renatahodovan/grammarinator
|
7cd436639ae68665641560d11477011a31aa6eb5
|
da9ef71b31665e6e5f7ce799ccd0e983fe69bf14
|
refs/heads/master
| 2023-07-23T22:08:14.173371
| 2023-05-13T20:50:54
| 2023-07-15T22:38:15
| 91,344,700
| 311
| 65
|
NOASSERTION
| 2023-07-15T21:55:53
| 2017-05-15T14:02:45
|
Python
|
UTF-8
|
Python
| false
| false
| 1,104
|
py
|
dispatching_listener.py
|
# Copyright (c) 2020-2023 Renata Hodovan, Akos Kiss.
#
# Licensed under the BSD 3-Clause License
# <LICENSE.rst or https://opensource.org/licenses/BSD-3-Clause>.
# This file may not be copied, modified, or distributed except
# according to those terms.
from .listener import Listener
class DispatchingListener(Listener):
"""
Base class of custom listeners that aim to override the enter and exit
actions for specific rules. Subclassing ``DispatchingListener`` enables to
define the enter and exit methods of a rule in the form of
``[enter|exit]_{rule_name}``.
"""
def enter_rule(self, node):
"""
Trampoline to the ``enter_{node.name}`` method of the subclassed listener, if it is defined.
"""
fn = 'enter_' + node.name
if hasattr(self, fn):
getattr(self, fn)(node)
def exit_rule(self, node):
"""
Trampoline to the ``exit_{node.name}`` method of the subclassed listener, if it is defined.
"""
fn = 'exit_' + node.name
if hasattr(self, fn):
getattr(self, fn)(node)
|
49a26c4556b1eab041f143b139155e5191572f4d
|
bbfc9f05efefe29b6ce9832bb3506efb900c1c93
|
/tencentcloud/dlc/v20210125/models.py
|
80341184ac02c11fb491f1673607b39a1b0890c8
|
[
"Apache-2.0"
] |
permissive
|
TencentCloud/tencentcloud-sdk-python
|
a2fab235926b0a27e9cfdf55e085a8bb15b3f506
|
6baf00a5a56ba58b6a1123423e0a1422d17a0201
|
refs/heads/master
| 2023-09-04T10:52:28.060438
| 2023-09-01T03:09:16
| 2023-09-01T03:09:16
| 130,147,399
| 594
| 300
|
Apache-2.0
| 2023-09-06T07:03:24
| 2018-04-19T02:23:12
|
Python
|
UTF-8
|
Python
| false
| false
| 572,416
|
py
|
models.py
|
# -*- coding: utf8 -*-
# Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from tencentcloud.common.abstract_model import AbstractModel
class AddDMSPartitionsRequest(AbstractModel):
"""AddDMSPartitions请求参数结构体
"""
def __init__(self):
r"""
:param _Partitions: 分区
:type Partitions: list of DMSPartition
"""
self._Partitions = None
@property
def Partitions(self):
return self._Partitions
@Partitions.setter
def Partitions(self, Partitions):
self._Partitions = Partitions
def _deserialize(self, params):
if params.get("Partitions") is not None:
self._Partitions = []
for item in params.get("Partitions"):
obj = DMSPartition()
obj._deserialize(item)
self._Partitions.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class AddDMSPartitionsResponse(AbstractModel):
"""AddDMSPartitions返回参数结构体
"""
def __init__(self):
r"""
:param _Total: 成功数量
:type Total: int
:param _Partitions: 分区值
:type Partitions: list of DMSPartition
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._Total = None
self._Partitions = None
self._RequestId = None
@property
def Total(self):
return self._Total
@Total.setter
def Total(self, Total):
self._Total = Total
@property
def Partitions(self):
return self._Partitions
@Partitions.setter
def Partitions(self, Partitions):
self._Partitions = Partitions
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._Total = params.get("Total")
if params.get("Partitions") is not None:
self._Partitions = []
for item in params.get("Partitions"):
obj = DMSPartition()
obj._deserialize(item)
self._Partitions.append(obj)
self._RequestId = params.get("RequestId")
class AddUsersToWorkGroupRequest(AbstractModel):
"""AddUsersToWorkGroup请求参数结构体
"""
def __init__(self):
r"""
:param _AddInfo: 要操作的工作组和用户信息
:type AddInfo: :class:`tencentcloud.dlc.v20210125.models.UserIdSetOfWorkGroupId`
"""
self._AddInfo = None
@property
def AddInfo(self):
return self._AddInfo
@AddInfo.setter
def AddInfo(self, AddInfo):
self._AddInfo = AddInfo
def _deserialize(self, params):
if params.get("AddInfo") is not None:
self._AddInfo = UserIdSetOfWorkGroupId()
self._AddInfo._deserialize(params.get("AddInfo"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class AddUsersToWorkGroupResponse(AbstractModel):
"""AddUsersToWorkGroup返回参数结构体
"""
def __init__(self):
r"""
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._RequestId = None
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._RequestId = params.get("RequestId")
class AlterDMSDatabaseRequest(AbstractModel):
"""AlterDMSDatabase请求参数结构体
"""
def __init__(self):
r"""
:param _CurrentName: 当前名称
:type CurrentName: str
:param _SchemaName: schema名称
:type SchemaName: str
:param _Location: 路径
:type Location: str
:param _Asset: 基础对象
:type Asset: :class:`tencentcloud.dlc.v20210125.models.Asset`
"""
self._CurrentName = None
self._SchemaName = None
self._Location = None
self._Asset = None
@property
def CurrentName(self):
return self._CurrentName
@CurrentName.setter
def CurrentName(self, CurrentName):
self._CurrentName = CurrentName
@property
def SchemaName(self):
return self._SchemaName
@SchemaName.setter
def SchemaName(self, SchemaName):
self._SchemaName = SchemaName
@property
def Location(self):
return self._Location
@Location.setter
def Location(self, Location):
self._Location = Location
@property
def Asset(self):
return self._Asset
@Asset.setter
def Asset(self, Asset):
self._Asset = Asset
def _deserialize(self, params):
self._CurrentName = params.get("CurrentName")
self._SchemaName = params.get("SchemaName")
self._Location = params.get("Location")
if params.get("Asset") is not None:
self._Asset = Asset()
self._Asset._deserialize(params.get("Asset"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class AlterDMSDatabaseResponse(AbstractModel):
"""AlterDMSDatabase返回参数结构体
"""
def __init__(self):
r"""
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._RequestId = None
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._RequestId = params.get("RequestId")
class AlterDMSPartitionRequest(AbstractModel):
"""AlterDMSPartition请求参数结构体
"""
def __init__(self):
r"""
:param _CurrentDbName: 当前名称,变更前db名称
:type CurrentDbName: str
:param _CurrentTableName: 当前名称,变更前table名称
:type CurrentTableName: str
:param _CurrentValues: 当前名称,变更前Part名称
:type CurrentValues: str
:param _Partition: 分区
:type Partition: :class:`tencentcloud.dlc.v20210125.models.DMSPartition`
"""
self._CurrentDbName = None
self._CurrentTableName = None
self._CurrentValues = None
self._Partition = None
@property
def CurrentDbName(self):
return self._CurrentDbName
@CurrentDbName.setter
def CurrentDbName(self, CurrentDbName):
self._CurrentDbName = CurrentDbName
@property
def CurrentTableName(self):
return self._CurrentTableName
@CurrentTableName.setter
def CurrentTableName(self, CurrentTableName):
self._CurrentTableName = CurrentTableName
@property
def CurrentValues(self):
return self._CurrentValues
@CurrentValues.setter
def CurrentValues(self, CurrentValues):
self._CurrentValues = CurrentValues
@property
def Partition(self):
return self._Partition
@Partition.setter
def Partition(self, Partition):
self._Partition = Partition
def _deserialize(self, params):
self._CurrentDbName = params.get("CurrentDbName")
self._CurrentTableName = params.get("CurrentTableName")
self._CurrentValues = params.get("CurrentValues")
if params.get("Partition") is not None:
self._Partition = DMSPartition()
self._Partition._deserialize(params.get("Partition"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class AlterDMSPartitionResponse(AbstractModel):
"""AlterDMSPartition返回参数结构体
"""
def __init__(self):
r"""
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._RequestId = None
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._RequestId = params.get("RequestId")
class AlterDMSTableRequest(AbstractModel):
"""AlterDMSTable请求参数结构体
"""
def __init__(self):
r"""
:param _CurrentName: 当前名称
:type CurrentName: str
:param _CurrentDbName: 当前数据库名称
:type CurrentDbName: str
:param _Asset: 基础对象
:type Asset: :class:`tencentcloud.dlc.v20210125.models.Asset`
:param _Type: 表类型
:type Type: str
:param _DbName: 数据库名称
:type DbName: str
:param _StorageSize: 存储大小
:type StorageSize: int
:param _RecordCount: 记录数量
:type RecordCount: int
:param _LifeTime: 生命周期
:type LifeTime: int
:param _DataUpdateTime: 数据更新时间
:type DataUpdateTime: str
:param _StructUpdateTime: 结构更新时间
:type StructUpdateTime: str
:param _LastAccessTime: 最后访问时间
:type LastAccessTime: str
:param _Sds: 存储对象
:type Sds: :class:`tencentcloud.dlc.v20210125.models.DMSSds`
:param _Columns: 列
:type Columns: list of DMSColumn
:param _PartitionKeys: 分区键值
:type PartitionKeys: list of DMSColumn
:param _ViewOriginalText: 视图文本
:type ViewOriginalText: str
:param _ViewExpandedText: 视图文本
:type ViewExpandedText: str
:param _Partitions: 分区
:type Partitions: list of DMSPartition
:param _Name: 当前表名
:type Name: str
"""
self._CurrentName = None
self._CurrentDbName = None
self._Asset = None
self._Type = None
self._DbName = None
self._StorageSize = None
self._RecordCount = None
self._LifeTime = None
self._DataUpdateTime = None
self._StructUpdateTime = None
self._LastAccessTime = None
self._Sds = None
self._Columns = None
self._PartitionKeys = None
self._ViewOriginalText = None
self._ViewExpandedText = None
self._Partitions = None
self._Name = None
@property
def CurrentName(self):
return self._CurrentName
@CurrentName.setter
def CurrentName(self, CurrentName):
self._CurrentName = CurrentName
@property
def CurrentDbName(self):
return self._CurrentDbName
@CurrentDbName.setter
def CurrentDbName(self, CurrentDbName):
self._CurrentDbName = CurrentDbName
@property
def Asset(self):
return self._Asset
@Asset.setter
def Asset(self, Asset):
self._Asset = Asset
@property
def Type(self):
return self._Type
@Type.setter
def Type(self, Type):
self._Type = Type
@property
def DbName(self):
return self._DbName
@DbName.setter
def DbName(self, DbName):
self._DbName = DbName
@property
def StorageSize(self):
return self._StorageSize
@StorageSize.setter
def StorageSize(self, StorageSize):
self._StorageSize = StorageSize
@property
def RecordCount(self):
return self._RecordCount
@RecordCount.setter
def RecordCount(self, RecordCount):
self._RecordCount = RecordCount
@property
def LifeTime(self):
return self._LifeTime
@LifeTime.setter
def LifeTime(self, LifeTime):
self._LifeTime = LifeTime
@property
def DataUpdateTime(self):
return self._DataUpdateTime
@DataUpdateTime.setter
def DataUpdateTime(self, DataUpdateTime):
self._DataUpdateTime = DataUpdateTime
@property
def StructUpdateTime(self):
return self._StructUpdateTime
@StructUpdateTime.setter
def StructUpdateTime(self, StructUpdateTime):
self._StructUpdateTime = StructUpdateTime
@property
def LastAccessTime(self):
return self._LastAccessTime
@LastAccessTime.setter
def LastAccessTime(self, LastAccessTime):
self._LastAccessTime = LastAccessTime
@property
def Sds(self):
return self._Sds
@Sds.setter
def Sds(self, Sds):
self._Sds = Sds
@property
def Columns(self):
return self._Columns
@Columns.setter
def Columns(self, Columns):
self._Columns = Columns
@property
def PartitionKeys(self):
return self._PartitionKeys
@PartitionKeys.setter
def PartitionKeys(self, PartitionKeys):
self._PartitionKeys = PartitionKeys
@property
def ViewOriginalText(self):
return self._ViewOriginalText
@ViewOriginalText.setter
def ViewOriginalText(self, ViewOriginalText):
self._ViewOriginalText = ViewOriginalText
@property
def ViewExpandedText(self):
return self._ViewExpandedText
@ViewExpandedText.setter
def ViewExpandedText(self, ViewExpandedText):
self._ViewExpandedText = ViewExpandedText
@property
def Partitions(self):
return self._Partitions
@Partitions.setter
def Partitions(self, Partitions):
self._Partitions = Partitions
@property
def Name(self):
return self._Name
@Name.setter
def Name(self, Name):
self._Name = Name
def _deserialize(self, params):
self._CurrentName = params.get("CurrentName")
self._CurrentDbName = params.get("CurrentDbName")
if params.get("Asset") is not None:
self._Asset = Asset()
self._Asset._deserialize(params.get("Asset"))
self._Type = params.get("Type")
self._DbName = params.get("DbName")
self._StorageSize = params.get("StorageSize")
self._RecordCount = params.get("RecordCount")
self._LifeTime = params.get("LifeTime")
self._DataUpdateTime = params.get("DataUpdateTime")
self._StructUpdateTime = params.get("StructUpdateTime")
self._LastAccessTime = params.get("LastAccessTime")
if params.get("Sds") is not None:
self._Sds = DMSSds()
self._Sds._deserialize(params.get("Sds"))
if params.get("Columns") is not None:
self._Columns = []
for item in params.get("Columns"):
obj = DMSColumn()
obj._deserialize(item)
self._Columns.append(obj)
if params.get("PartitionKeys") is not None:
self._PartitionKeys = []
for item in params.get("PartitionKeys"):
obj = DMSColumn()
obj._deserialize(item)
self._PartitionKeys.append(obj)
self._ViewOriginalText = params.get("ViewOriginalText")
self._ViewExpandedText = params.get("ViewExpandedText")
if params.get("Partitions") is not None:
self._Partitions = []
for item in params.get("Partitions"):
obj = DMSPartition()
obj._deserialize(item)
self._Partitions.append(obj)
self._Name = params.get("Name")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class AlterDMSTableResponse(AbstractModel):
"""AlterDMSTable返回参数结构体
"""
def __init__(self):
r"""
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._RequestId = None
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._RequestId = params.get("RequestId")
class Asset(AbstractModel):
"""元数据基本对象
"""
def __init__(self):
r"""
:param _Id: 主键
注意:此字段可能返回 null,表示取不到有效值。
:type Id: int
:param _Name: 名称
注意:此字段可能返回 null,表示取不到有效值。
:type Name: str
:param _Guid: 对象GUID值
注意:此字段可能返回 null,表示取不到有效值。
:type Guid: str
:param _Catalog: 数据目录
注意:此字段可能返回 null,表示取不到有效值。
:type Catalog: str
:param _Description: 描述信息
:type Description: str
:param _Owner: 对象owner
:type Owner: str
:param _OwnerAccount: 对象owner账户
:type OwnerAccount: str
:param _PermValues: 权限
:type PermValues: list of KVPair
:param _Params: 附加属性
:type Params: list of KVPair
:param _BizParams: 附加业务属性
:type BizParams: list of KVPair
:param _DataVersion: 数据版本
:type DataVersion: int
:param _CreateTime: 创建时间
:type CreateTime: str
:param _ModifiedTime: 修改时间
:type ModifiedTime: str
:param _DatasourceId: 数据源主键
:type DatasourceId: int
"""
self._Id = None
self._Name = None
self._Guid = None
self._Catalog = None
self._Description = None
self._Owner = None
self._OwnerAccount = None
self._PermValues = None
self._Params = None
self._BizParams = None
self._DataVersion = None
self._CreateTime = None
self._ModifiedTime = None
self._DatasourceId = None
@property
def Id(self):
return self._Id
@Id.setter
def Id(self, Id):
self._Id = Id
@property
def Name(self):
return self._Name
@Name.setter
def Name(self, Name):
self._Name = Name
@property
def Guid(self):
return self._Guid
@Guid.setter
def Guid(self, Guid):
self._Guid = Guid
@property
def Catalog(self):
return self._Catalog
@Catalog.setter
def Catalog(self, Catalog):
self._Catalog = Catalog
@property
def Description(self):
return self._Description
@Description.setter
def Description(self, Description):
self._Description = Description
@property
def Owner(self):
return self._Owner
@Owner.setter
def Owner(self, Owner):
self._Owner = Owner
@property
def OwnerAccount(self):
return self._OwnerAccount
@OwnerAccount.setter
def OwnerAccount(self, OwnerAccount):
self._OwnerAccount = OwnerAccount
@property
def PermValues(self):
return self._PermValues
@PermValues.setter
def PermValues(self, PermValues):
self._PermValues = PermValues
@property
def Params(self):
return self._Params
@Params.setter
def Params(self, Params):
self._Params = Params
@property
def BizParams(self):
return self._BizParams
@BizParams.setter
def BizParams(self, BizParams):
self._BizParams = BizParams
@property
def DataVersion(self):
return self._DataVersion
@DataVersion.setter
def DataVersion(self, DataVersion):
self._DataVersion = DataVersion
@property
def CreateTime(self):
return self._CreateTime
@CreateTime.setter
def CreateTime(self, CreateTime):
self._CreateTime = CreateTime
@property
def ModifiedTime(self):
return self._ModifiedTime
@ModifiedTime.setter
def ModifiedTime(self, ModifiedTime):
self._ModifiedTime = ModifiedTime
@property
def DatasourceId(self):
return self._DatasourceId
@DatasourceId.setter
def DatasourceId(self, DatasourceId):
self._DatasourceId = DatasourceId
def _deserialize(self, params):
self._Id = params.get("Id")
self._Name = params.get("Name")
self._Guid = params.get("Guid")
self._Catalog = params.get("Catalog")
self._Description = params.get("Description")
self._Owner = params.get("Owner")
self._OwnerAccount = params.get("OwnerAccount")
if params.get("PermValues") is not None:
self._PermValues = []
for item in params.get("PermValues"):
obj = KVPair()
obj._deserialize(item)
self._PermValues.append(obj)
if params.get("Params") is not None:
self._Params = []
for item in params.get("Params"):
obj = KVPair()
obj._deserialize(item)
self._Params.append(obj)
if params.get("BizParams") is not None:
self._BizParams = []
for item in params.get("BizParams"):
obj = KVPair()
obj._deserialize(item)
self._BizParams.append(obj)
self._DataVersion = params.get("DataVersion")
self._CreateTime = params.get("CreateTime")
self._ModifiedTime = params.get("ModifiedTime")
self._DatasourceId = params.get("DatasourceId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class AttachUserPolicyRequest(AbstractModel):
"""AttachUserPolicy请求参数结构体
"""
def __init__(self):
r"""
:param _UserId: 用户Id,和子用户uin相同,需要先使用CreateUser接口创建用户。可以使用DescribeUsers接口查看。
:type UserId: str
:param _PolicySet: 鉴权策略集合
:type PolicySet: list of Policy
"""
self._UserId = None
self._PolicySet = None
@property
def UserId(self):
return self._UserId
@UserId.setter
def UserId(self, UserId):
self._UserId = UserId
@property
def PolicySet(self):
return self._PolicySet
@PolicySet.setter
def PolicySet(self, PolicySet):
self._PolicySet = PolicySet
def _deserialize(self, params):
self._UserId = params.get("UserId")
if params.get("PolicySet") is not None:
self._PolicySet = []
for item in params.get("PolicySet"):
obj = Policy()
obj._deserialize(item)
self._PolicySet.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class AttachUserPolicyResponse(AbstractModel):
"""AttachUserPolicy返回参数结构体
"""
def __init__(self):
r"""
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._RequestId = None
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._RequestId = params.get("RequestId")
class AttachWorkGroupPolicyRequest(AbstractModel):
"""AttachWorkGroupPolicy请求参数结构体
"""
def __init__(self):
r"""
:param _WorkGroupId: 工作组Id
:type WorkGroupId: int
:param _PolicySet: 要绑定的策略集合
:type PolicySet: list of Policy
"""
self._WorkGroupId = None
self._PolicySet = None
@property
def WorkGroupId(self):
return self._WorkGroupId
@WorkGroupId.setter
def WorkGroupId(self, WorkGroupId):
self._WorkGroupId = WorkGroupId
@property
def PolicySet(self):
return self._PolicySet
@PolicySet.setter
def PolicySet(self, PolicySet):
self._PolicySet = PolicySet
def _deserialize(self, params):
self._WorkGroupId = params.get("WorkGroupId")
if params.get("PolicySet") is not None:
self._PolicySet = []
for item in params.get("PolicySet"):
obj = Policy()
obj._deserialize(item)
self._PolicySet.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class AttachWorkGroupPolicyResponse(AbstractModel):
"""AttachWorkGroupPolicy返回参数结构体
"""
def __init__(self):
r"""
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._RequestId = None
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._RequestId = params.get("RequestId")
class BindWorkGroupsToUserRequest(AbstractModel):
"""BindWorkGroupsToUser请求参数结构体
"""
def __init__(self):
r"""
:param _AddInfo: 绑定的用户和工作组信息
:type AddInfo: :class:`tencentcloud.dlc.v20210125.models.WorkGroupIdSetOfUserId`
"""
self._AddInfo = None
@property
def AddInfo(self):
return self._AddInfo
@AddInfo.setter
def AddInfo(self, AddInfo):
self._AddInfo = AddInfo
def _deserialize(self, params):
if params.get("AddInfo") is not None:
self._AddInfo = WorkGroupIdSetOfUserId()
self._AddInfo._deserialize(params.get("AddInfo"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class BindWorkGroupsToUserResponse(AbstractModel):
"""BindWorkGroupsToUser返回参数结构体
"""
def __init__(self):
r"""
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._RequestId = None
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._RequestId = params.get("RequestId")
class CSV(AbstractModel):
"""CSV类型数据格式
"""
def __init__(self):
r"""
:param _CodeCompress: 压缩格式,["Snappy", "Gzip", "None"选一]。
:type CodeCompress: str
:param _CSVSerde: CSV序列化及反序列化数据结构。
:type CSVSerde: :class:`tencentcloud.dlc.v20210125.models.CSVSerde`
:param _HeadLines: 标题行,默认为0。
:type HeadLines: int
:param _Format: 格式,默认值为CSV
:type Format: str
"""
self._CodeCompress = None
self._CSVSerde = None
self._HeadLines = None
self._Format = None
@property
def CodeCompress(self):
return self._CodeCompress
@CodeCompress.setter
def CodeCompress(self, CodeCompress):
self._CodeCompress = CodeCompress
@property
def CSVSerde(self):
return self._CSVSerde
@CSVSerde.setter
def CSVSerde(self, CSVSerde):
self._CSVSerde = CSVSerde
@property
def HeadLines(self):
return self._HeadLines
@HeadLines.setter
def HeadLines(self, HeadLines):
self._HeadLines = HeadLines
@property
def Format(self):
return self._Format
@Format.setter
def Format(self, Format):
self._Format = Format
def _deserialize(self, params):
self._CodeCompress = params.get("CodeCompress")
if params.get("CSVSerde") is not None:
self._CSVSerde = CSVSerde()
self._CSVSerde._deserialize(params.get("CSVSerde"))
self._HeadLines = params.get("HeadLines")
self._Format = params.get("Format")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CSVSerde(AbstractModel):
"""CSV序列化及反序列化数据结构
"""
def __init__(self):
r"""
:param _Escape: CSV序列化转义符,默认为"\\",最长8个字符,如 Escape: "/\"
:type Escape: str
:param _Quote: CSV序列化字段域符,默认为"'",最长8个字符, 如 Quote: "\""
:type Quote: str
:param _Separator: CSV序列化分隔符,默认为"\t",最长8个字符, 如 Separator: "\t"
:type Separator: str
"""
self._Escape = None
self._Quote = None
self._Separator = None
@property
def Escape(self):
return self._Escape
@Escape.setter
def Escape(self, Escape):
self._Escape = Escape
@property
def Quote(self):
return self._Quote
@Quote.setter
def Quote(self, Quote):
self._Quote = Quote
@property
def Separator(self):
return self._Separator
@Separator.setter
def Separator(self, Separator):
self._Separator = Separator
def _deserialize(self, params):
self._Escape = params.get("Escape")
self._Quote = params.get("Quote")
self._Separator = params.get("Separator")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CancelNotebookSessionStatementBatchRequest(AbstractModel):
"""CancelNotebookSessionStatementBatch请求参数结构体
"""
def __init__(self):
r"""
:param _SessionId: Session唯一标识
:type SessionId: str
:param _BatchId: 批任务唯一标识
:type BatchId: str
"""
self._SessionId = None
self._BatchId = None
@property
def SessionId(self):
return self._SessionId
@SessionId.setter
def SessionId(self, SessionId):
self._SessionId = SessionId
@property
def BatchId(self):
return self._BatchId
@BatchId.setter
def BatchId(self, BatchId):
self._BatchId = BatchId
def _deserialize(self, params):
self._SessionId = params.get("SessionId")
self._BatchId = params.get("BatchId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CancelNotebookSessionStatementBatchResponse(AbstractModel):
"""CancelNotebookSessionStatementBatch返回参数结构体
"""
def __init__(self):
r"""
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._RequestId = None
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._RequestId = params.get("RequestId")
class CancelNotebookSessionStatementRequest(AbstractModel):
"""CancelNotebookSessionStatement请求参数结构体
"""
def __init__(self):
r"""
:param _SessionId: Session唯一标识
:type SessionId: str
:param _StatementId: Session Statement唯一标识
:type StatementId: str
"""
self._SessionId = None
self._StatementId = None
@property
def SessionId(self):
return self._SessionId
@SessionId.setter
def SessionId(self, SessionId):
self._SessionId = SessionId
@property
def StatementId(self):
return self._StatementId
@StatementId.setter
def StatementId(self, StatementId):
self._StatementId = StatementId
def _deserialize(self, params):
self._SessionId = params.get("SessionId")
self._StatementId = params.get("StatementId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CancelNotebookSessionStatementResponse(AbstractModel):
"""CancelNotebookSessionStatement返回参数结构体
"""
def __init__(self):
r"""
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._RequestId = None
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._RequestId = params.get("RequestId")
class CancelSparkSessionBatchSQLRequest(AbstractModel):
"""CancelSparkSessionBatchSQL请求参数结构体
"""
def __init__(self):
r"""
:param _BatchId: 批任务唯一标识
:type BatchId: str
"""
self._BatchId = None
@property
def BatchId(self):
return self._BatchId
@BatchId.setter
def BatchId(self, BatchId):
self._BatchId = BatchId
def _deserialize(self, params):
self._BatchId = params.get("BatchId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CancelSparkSessionBatchSQLResponse(AbstractModel):
"""CancelSparkSessionBatchSQL返回参数结构体
"""
def __init__(self):
r"""
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._RequestId = None
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._RequestId = params.get("RequestId")
class CancelTaskRequest(AbstractModel):
"""CancelTask请求参数结构体
"""
def __init__(self):
r"""
:param _TaskId: 任务Id,全局唯一
:type TaskId: str
"""
self._TaskId = None
@property
def TaskId(self):
return self._TaskId
@TaskId.setter
def TaskId(self, TaskId):
self._TaskId = TaskId
def _deserialize(self, params):
self._TaskId = params.get("TaskId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CancelTaskResponse(AbstractModel):
"""CancelTask返回参数结构体
"""
def __init__(self):
r"""
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._RequestId = None
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._RequestId = params.get("RequestId")
class CheckLockMetaDataRequest(AbstractModel):
"""CheckLockMetaData请求参数结构体
"""
def __init__(self):
r"""
:param _LockId: 锁ID
:type LockId: int
:param _DatasourceConnectionName: 数据源名称
:type DatasourceConnectionName: str
:param _TxnId: 事务ID
:type TxnId: int
:param _ElapsedMs: 过期时间ms
:type ElapsedMs: int
"""
self._LockId = None
self._DatasourceConnectionName = None
self._TxnId = None
self._ElapsedMs = None
@property
def LockId(self):
return self._LockId
@LockId.setter
def LockId(self, LockId):
self._LockId = LockId
@property
def DatasourceConnectionName(self):
return self._DatasourceConnectionName
@DatasourceConnectionName.setter
def DatasourceConnectionName(self, DatasourceConnectionName):
self._DatasourceConnectionName = DatasourceConnectionName
@property
def TxnId(self):
return self._TxnId
@TxnId.setter
def TxnId(self, TxnId):
self._TxnId = TxnId
@property
def ElapsedMs(self):
return self._ElapsedMs
@ElapsedMs.setter
def ElapsedMs(self, ElapsedMs):
self._ElapsedMs = ElapsedMs
def _deserialize(self, params):
self._LockId = params.get("LockId")
self._DatasourceConnectionName = params.get("DatasourceConnectionName")
self._TxnId = params.get("TxnId")
self._ElapsedMs = params.get("ElapsedMs")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CheckLockMetaDataResponse(AbstractModel):
"""CheckLockMetaData返回参数结构体
"""
def __init__(self):
r"""
:param _LockId: 锁ID
:type LockId: int
:param _LockState: 锁状态:ACQUIRED、WAITING、ABORT、NOT_ACQUIRED
:type LockState: str
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._LockId = None
self._LockState = None
self._RequestId = None
@property
def LockId(self):
return self._LockId
@LockId.setter
def LockId(self, LockId):
self._LockId = LockId
@property
def LockState(self):
return self._LockState
@LockState.setter
def LockState(self, LockState):
self._LockState = LockState
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._LockId = params.get("LockId")
self._LockState = params.get("LockState")
self._RequestId = params.get("RequestId")
class Column(AbstractModel):
"""数据表列信息。
"""
def __init__(self):
r"""
:param _Name: 列名称,不区分大小写,最大支持25个字符。
:type Name: str
:param _Type: 列类型,支持如下类型定义:
string|tinyint|smallint|int|bigint|boolean|float|double|decimal|timestamp|date|binary|array<data_type>|map<primitive_type, data_type>|struct<col_name : data_type [COMMENT col_comment], ...>|uniontype<data_type, data_type, ...>。
:type Type: str
:param _Comment: 对该类的注释。
注意:此字段可能返回 null,表示取不到有效值。
:type Comment: str
:param _Precision: 表示整个 numeric 的长度
注意:此字段可能返回 null,表示取不到有效值。
:type Precision: int
:param _Scale: 表示小数部分的长度
注意:此字段可能返回 null,表示取不到有效值。
:type Scale: int
:param _Nullable: 是否为null
注意:此字段可能返回 null,表示取不到有效值。
:type Nullable: str
:param _Position: 字段位置,小的在前
注意:此字段可能返回 null,表示取不到有效值。
:type Position: int
:param _CreateTime: 字段创建时间
注意:此字段可能返回 null,表示取不到有效值。
:type CreateTime: str
:param _ModifiedTime: 字段修改时间
注意:此字段可能返回 null,表示取不到有效值。
:type ModifiedTime: str
:param _IsPartition: 是否为分区字段
注意:此字段可能返回 null,表示取不到有效值。
:type IsPartition: bool
"""
self._Name = None
self._Type = None
self._Comment = None
self._Precision = None
self._Scale = None
self._Nullable = None
self._Position = None
self._CreateTime = None
self._ModifiedTime = None
self._IsPartition = None
@property
def Name(self):
return self._Name
@Name.setter
def Name(self, Name):
self._Name = Name
@property
def Type(self):
return self._Type
@Type.setter
def Type(self, Type):
self._Type = Type
@property
def Comment(self):
return self._Comment
@Comment.setter
def Comment(self, Comment):
self._Comment = Comment
@property
def Precision(self):
return self._Precision
@Precision.setter
def Precision(self, Precision):
self._Precision = Precision
@property
def Scale(self):
return self._Scale
@Scale.setter
def Scale(self, Scale):
self._Scale = Scale
@property
def Nullable(self):
return self._Nullable
@Nullable.setter
def Nullable(self, Nullable):
self._Nullable = Nullable
@property
def Position(self):
return self._Position
@Position.setter
def Position(self, Position):
self._Position = Position
@property
def CreateTime(self):
return self._CreateTime
@CreateTime.setter
def CreateTime(self, CreateTime):
self._CreateTime = CreateTime
@property
def ModifiedTime(self):
return self._ModifiedTime
@ModifiedTime.setter
def ModifiedTime(self, ModifiedTime):
self._ModifiedTime = ModifiedTime
@property
def IsPartition(self):
return self._IsPartition
@IsPartition.setter
def IsPartition(self, IsPartition):
self._IsPartition = IsPartition
def _deserialize(self, params):
self._Name = params.get("Name")
self._Type = params.get("Type")
self._Comment = params.get("Comment")
self._Precision = params.get("Precision")
self._Scale = params.get("Scale")
self._Nullable = params.get("Nullable")
self._Position = params.get("Position")
self._CreateTime = params.get("CreateTime")
self._ModifiedTime = params.get("ModifiedTime")
self._IsPartition = params.get("IsPartition")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CommonMetrics(AbstractModel):
"""任务公共指标
"""
def __init__(self):
r"""
:param _CreateTaskTime: 创建任务时长,单位:ms
注意:此字段可能返回 null,表示取不到有效值。
:type CreateTaskTime: float
:param _ProcessTime: 预处理总时长,单位:ms
注意:此字段可能返回 null,表示取不到有效值。
:type ProcessTime: float
:param _QueueTime: 排队时长,单位:ms
注意:此字段可能返回 null,表示取不到有效值。
:type QueueTime: float
:param _ExecutionTime: 执行时长,单位:ms
注意:此字段可能返回 null,表示取不到有效值。
:type ExecutionTime: float
:param _IsResultCacheHit: 是否命中结果缓存
注意:此字段可能返回 null,表示取不到有效值。
:type IsResultCacheHit: bool
:param _MatchedMVBytes: 匹配物化视图数据量
注意:此字段可能返回 null,表示取不到有效值。
:type MatchedMVBytes: int
:param _MatchedMVs: 匹配物化视图列表
注意:此字段可能返回 null,表示取不到有效值。
:type MatchedMVs: str
:param _AffectedBytes: 结果数据量,单位:byte
注意:此字段可能返回 null,表示取不到有效值。
:type AffectedBytes: str
:param _AffectedRows: 结果行数
注意:此字段可能返回 null,表示取不到有效值。
:type AffectedRows: int
:param _ProcessedBytes: 扫描数据量,单位:byte
注意:此字段可能返回 null,表示取不到有效值。
:type ProcessedBytes: int
:param _ProcessedRows: 扫描行数
注意:此字段可能返回 null,表示取不到有效值。
:type ProcessedRows: int
"""
self._CreateTaskTime = None
self._ProcessTime = None
self._QueueTime = None
self._ExecutionTime = None
self._IsResultCacheHit = None
self._MatchedMVBytes = None
self._MatchedMVs = None
self._AffectedBytes = None
self._AffectedRows = None
self._ProcessedBytes = None
self._ProcessedRows = None
@property
def CreateTaskTime(self):
return self._CreateTaskTime
@CreateTaskTime.setter
def CreateTaskTime(self, CreateTaskTime):
self._CreateTaskTime = CreateTaskTime
@property
def ProcessTime(self):
return self._ProcessTime
@ProcessTime.setter
def ProcessTime(self, ProcessTime):
self._ProcessTime = ProcessTime
@property
def QueueTime(self):
return self._QueueTime
@QueueTime.setter
def QueueTime(self, QueueTime):
self._QueueTime = QueueTime
@property
def ExecutionTime(self):
return self._ExecutionTime
@ExecutionTime.setter
def ExecutionTime(self, ExecutionTime):
self._ExecutionTime = ExecutionTime
@property
def IsResultCacheHit(self):
return self._IsResultCacheHit
@IsResultCacheHit.setter
def IsResultCacheHit(self, IsResultCacheHit):
self._IsResultCacheHit = IsResultCacheHit
@property
def MatchedMVBytes(self):
return self._MatchedMVBytes
@MatchedMVBytes.setter
def MatchedMVBytes(self, MatchedMVBytes):
self._MatchedMVBytes = MatchedMVBytes
@property
def MatchedMVs(self):
return self._MatchedMVs
@MatchedMVs.setter
def MatchedMVs(self, MatchedMVs):
self._MatchedMVs = MatchedMVs
@property
def AffectedBytes(self):
return self._AffectedBytes
@AffectedBytes.setter
def AffectedBytes(self, AffectedBytes):
self._AffectedBytes = AffectedBytes
@property
def AffectedRows(self):
return self._AffectedRows
@AffectedRows.setter
def AffectedRows(self, AffectedRows):
self._AffectedRows = AffectedRows
@property
def ProcessedBytes(self):
return self._ProcessedBytes
@ProcessedBytes.setter
def ProcessedBytes(self, ProcessedBytes):
self._ProcessedBytes = ProcessedBytes
@property
def ProcessedRows(self):
return self._ProcessedRows
@ProcessedRows.setter
def ProcessedRows(self, ProcessedRows):
self._ProcessedRows = ProcessedRows
def _deserialize(self, params):
self._CreateTaskTime = params.get("CreateTaskTime")
self._ProcessTime = params.get("ProcessTime")
self._QueueTime = params.get("QueueTime")
self._ExecutionTime = params.get("ExecutionTime")
self._IsResultCacheHit = params.get("IsResultCacheHit")
self._MatchedMVBytes = params.get("MatchedMVBytes")
self._MatchedMVs = params.get("MatchedMVs")
self._AffectedBytes = params.get("AffectedBytes")
self._AffectedRows = params.get("AffectedRows")
self._ProcessedBytes = params.get("ProcessedBytes")
self._ProcessedRows = params.get("ProcessedRows")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CosPermission(AbstractModel):
"""cos权限描述
"""
def __init__(self):
r"""
:param _CosPath: cos路径
注意:此字段可能返回 null,表示取不到有效值。
:type CosPath: str
:param _Permissions: 权限【"read","write"】
注意:此字段可能返回 null,表示取不到有效值。
:type Permissions: list of str
"""
self._CosPath = None
self._Permissions = None
@property
def CosPath(self):
return self._CosPath
@CosPath.setter
def CosPath(self, CosPath):
self._CosPath = CosPath
@property
def Permissions(self):
return self._Permissions
@Permissions.setter
def Permissions(self, Permissions):
self._Permissions = Permissions
def _deserialize(self, params):
self._CosPath = params.get("CosPath")
self._Permissions = params.get("Permissions")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateDMSDatabaseRequest(AbstractModel):
"""CreateDMSDatabase请求参数结构体
"""
def __init__(self):
r"""
:param _Asset: 基础元数据对象
:type Asset: :class:`tencentcloud.dlc.v20210125.models.Asset`
:param _SchemaName: Schema目录
:type SchemaName: str
:param _Location: Db存储路径
:type Location: str
:param _Name: 数据库名称
:type Name: str
"""
self._Asset = None
self._SchemaName = None
self._Location = None
self._Name = None
@property
def Asset(self):
return self._Asset
@Asset.setter
def Asset(self, Asset):
self._Asset = Asset
@property
def SchemaName(self):
return self._SchemaName
@SchemaName.setter
def SchemaName(self, SchemaName):
self._SchemaName = SchemaName
@property
def Location(self):
return self._Location
@Location.setter
def Location(self, Location):
self._Location = Location
@property
def Name(self):
return self._Name
@Name.setter
def Name(self, Name):
self._Name = Name
def _deserialize(self, params):
if params.get("Asset") is not None:
self._Asset = Asset()
self._Asset._deserialize(params.get("Asset"))
self._SchemaName = params.get("SchemaName")
self._Location = params.get("Location")
self._Name = params.get("Name")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateDMSDatabaseResponse(AbstractModel):
"""CreateDMSDatabase返回参数结构体
"""
def __init__(self):
r"""
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._RequestId = None
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._RequestId = params.get("RequestId")
class CreateDMSTableRequest(AbstractModel):
"""CreateDMSTable请求参数结构体
"""
def __init__(self):
r"""
:param _Asset: 基础对象
:type Asset: :class:`tencentcloud.dlc.v20210125.models.Asset`
:param _Type: 表类型
:type Type: str
:param _DbName: 数据库名称
:type DbName: str
:param _StorageSize: 存储大小
:type StorageSize: int
:param _RecordCount: 记录数量
:type RecordCount: int
:param _LifeTime: 生命周期
:type LifeTime: int
:param _DataUpdateTime: 数据更新时间
:type DataUpdateTime: str
:param _StructUpdateTime: 结构更新时间
:type StructUpdateTime: str
:param _LastAccessTime: 最后访问时间
:type LastAccessTime: str
:param _Sds: 存储对象
:type Sds: :class:`tencentcloud.dlc.v20210125.models.DMSSds`
:param _Columns: 列
:type Columns: list of DMSColumn
:param _PartitionKeys: 分区键值
:type PartitionKeys: list of DMSColumn
:param _ViewOriginalText: 视图文本
:type ViewOriginalText: str
:param _ViewExpandedText: 视图文本
:type ViewExpandedText: str
:param _Partitions: 分区
:type Partitions: list of DMSPartition
:param _Name: 表名称
:type Name: str
"""
self._Asset = None
self._Type = None
self._DbName = None
self._StorageSize = None
self._RecordCount = None
self._LifeTime = None
self._DataUpdateTime = None
self._StructUpdateTime = None
self._LastAccessTime = None
self._Sds = None
self._Columns = None
self._PartitionKeys = None
self._ViewOriginalText = None
self._ViewExpandedText = None
self._Partitions = None
self._Name = None
@property
def Asset(self):
return self._Asset
@Asset.setter
def Asset(self, Asset):
self._Asset = Asset
@property
def Type(self):
return self._Type
@Type.setter
def Type(self, Type):
self._Type = Type
@property
def DbName(self):
return self._DbName
@DbName.setter
def DbName(self, DbName):
self._DbName = DbName
@property
def StorageSize(self):
return self._StorageSize
@StorageSize.setter
def StorageSize(self, StorageSize):
self._StorageSize = StorageSize
@property
def RecordCount(self):
return self._RecordCount
@RecordCount.setter
def RecordCount(self, RecordCount):
self._RecordCount = RecordCount
@property
def LifeTime(self):
return self._LifeTime
@LifeTime.setter
def LifeTime(self, LifeTime):
self._LifeTime = LifeTime
@property
def DataUpdateTime(self):
return self._DataUpdateTime
@DataUpdateTime.setter
def DataUpdateTime(self, DataUpdateTime):
self._DataUpdateTime = DataUpdateTime
@property
def StructUpdateTime(self):
return self._StructUpdateTime
@StructUpdateTime.setter
def StructUpdateTime(self, StructUpdateTime):
self._StructUpdateTime = StructUpdateTime
@property
def LastAccessTime(self):
return self._LastAccessTime
@LastAccessTime.setter
def LastAccessTime(self, LastAccessTime):
self._LastAccessTime = LastAccessTime
@property
def Sds(self):
return self._Sds
@Sds.setter
def Sds(self, Sds):
self._Sds = Sds
@property
def Columns(self):
return self._Columns
@Columns.setter
def Columns(self, Columns):
self._Columns = Columns
@property
def PartitionKeys(self):
return self._PartitionKeys
@PartitionKeys.setter
def PartitionKeys(self, PartitionKeys):
self._PartitionKeys = PartitionKeys
@property
def ViewOriginalText(self):
return self._ViewOriginalText
@ViewOriginalText.setter
def ViewOriginalText(self, ViewOriginalText):
self._ViewOriginalText = ViewOriginalText
@property
def ViewExpandedText(self):
return self._ViewExpandedText
@ViewExpandedText.setter
def ViewExpandedText(self, ViewExpandedText):
self._ViewExpandedText = ViewExpandedText
@property
def Partitions(self):
return self._Partitions
@Partitions.setter
def Partitions(self, Partitions):
self._Partitions = Partitions
@property
def Name(self):
return self._Name
@Name.setter
def Name(self, Name):
self._Name = Name
def _deserialize(self, params):
if params.get("Asset") is not None:
self._Asset = Asset()
self._Asset._deserialize(params.get("Asset"))
self._Type = params.get("Type")
self._DbName = params.get("DbName")
self._StorageSize = params.get("StorageSize")
self._RecordCount = params.get("RecordCount")
self._LifeTime = params.get("LifeTime")
self._DataUpdateTime = params.get("DataUpdateTime")
self._StructUpdateTime = params.get("StructUpdateTime")
self._LastAccessTime = params.get("LastAccessTime")
if params.get("Sds") is not None:
self._Sds = DMSSds()
self._Sds._deserialize(params.get("Sds"))
if params.get("Columns") is not None:
self._Columns = []
for item in params.get("Columns"):
obj = DMSColumn()
obj._deserialize(item)
self._Columns.append(obj)
if params.get("PartitionKeys") is not None:
self._PartitionKeys = []
for item in params.get("PartitionKeys"):
obj = DMSColumn()
obj._deserialize(item)
self._PartitionKeys.append(obj)
self._ViewOriginalText = params.get("ViewOriginalText")
self._ViewExpandedText = params.get("ViewExpandedText")
if params.get("Partitions") is not None:
self._Partitions = []
for item in params.get("Partitions"):
obj = DMSPartition()
obj._deserialize(item)
self._Partitions.append(obj)
self._Name = params.get("Name")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateDMSTableResponse(AbstractModel):
"""CreateDMSTable返回参数结构体
"""
def __init__(self):
r"""
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._RequestId = None
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._RequestId = params.get("RequestId")
class CreateDataEngineRequest(AbstractModel):
"""CreateDataEngine请求参数结构体
"""
def __init__(self):
r"""
:param _EngineType: 引擎类型spark/presto
:type EngineType: str
:param _DataEngineName: 虚拟集群名称
:type DataEngineName: str
:param _ClusterType: 集群类型 spark_private/presto_private/presto_cu/spark_cu
:type ClusterType: str
:param _Mode: 计费模式 0=共享模式 1=按量计费 2=包年包月
:type Mode: int
:param _AutoResume: 是否自动启动集群
:type AutoResume: bool
:param _MinClusters: 最小资源
:type MinClusters: int
:param _MaxClusters: 最大资源
:type MaxClusters: int
:param _DefaultDataEngine: 是否为默认虚拟集群
:type DefaultDataEngine: bool
:param _CidrBlock: VPC网段
:type CidrBlock: str
:param _Message: 描述信息
:type Message: str
:param _Size: 集群规模
:type Size: int
:param _PayMode: 计费类型,后付费:0,预付费:1。当前只支持后付费,不填默认为后付费。
:type PayMode: int
:param _TimeSpan: 资源使用时长,后付费:固定填3600,预付费:最少填1,代表购买资源一个月,最长不超过120。默认3600
:type TimeSpan: int
:param _TimeUnit: 资源使用时长的单位,后付费:s,预付费:m。默认为s
:type TimeUnit: str
:param _AutoRenew: 资源的自动续费标志。后付费无需续费,固定填0;预付费下:0表示手动续费、1代表自动续费、2代表不续费,在0下如果是大客户,会自动帮大客户续费。默认为0
:type AutoRenew: int
:param _Tags: 创建资源的时候需要绑定的标签信息
:type Tags: list of TagInfo
:param _AutoSuspend: 是否自定挂起集群:false(默认):不自动挂起、true:自动挂起
:type AutoSuspend: bool
:param _CrontabResumeSuspend: 定时启停集群策略:0(默认):关闭定时策略、1:开启定时策略(注:定时启停策略与自动挂起策略互斥)
:type CrontabResumeSuspend: int
:param _CrontabResumeSuspendStrategy: 定时启停策略,复杂类型:包含启停时间、挂起集群策略
:type CrontabResumeSuspendStrategy: :class:`tencentcloud.dlc.v20210125.models.CrontabResumeSuspendStrategy`
:param _EngineExecType: 引擎执行任务类型,有效值:SQL/BATCH,默认为SQL
:type EngineExecType: str
:param _MaxConcurrency: 单个集群最大并发任务数,默认5
:type MaxConcurrency: int
:param _TolerableQueueTime: 可容忍的排队时间,默认0。当任务排队的时间超过可容忍的时间时可能会触发扩容。如果该参数为0,则表示一旦有任务排队就可能立即触发扩容。
:type TolerableQueueTime: int
:param _AutoSuspendTime: 集群自动挂起时间,默认10分钟
:type AutoSuspendTime: int
:param _ResourceType: 资源类型。Standard_CU:标准型;Memory_CU:内存型
:type ResourceType: str
:param _DataEngineConfigPairs: 集群高级配置
:type DataEngineConfigPairs: list of DataEngineConfigPair
:param _ImageVersionName: 集群镜像版本名字。如SuperSQL-P 1.1;SuperSQL-S 3.2等,不传,默认创建最新镜像版本的集群
:type ImageVersionName: str
:param _MainClusterName: 主集群名称,创建容灾集群时指定
:type MainClusterName: str
:param _ElasticSwitch: spark jar 包年包月集群是否开启弹性
:type ElasticSwitch: bool
:param _ElasticLimit: spark jar 包年包月集群弹性上限
:type ElasticLimit: int
:param _SessionResourceTemplate: spark作业集群session资源配置模板
:type SessionResourceTemplate: :class:`tencentcloud.dlc.v20210125.models.SessionResourceTemplate`
"""
self._EngineType = None
self._DataEngineName = None
self._ClusterType = None
self._Mode = None
self._AutoResume = None
self._MinClusters = None
self._MaxClusters = None
self._DefaultDataEngine = None
self._CidrBlock = None
self._Message = None
self._Size = None
self._PayMode = None
self._TimeSpan = None
self._TimeUnit = None
self._AutoRenew = None
self._Tags = None
self._AutoSuspend = None
self._CrontabResumeSuspend = None
self._CrontabResumeSuspendStrategy = None
self._EngineExecType = None
self._MaxConcurrency = None
self._TolerableQueueTime = None
self._AutoSuspendTime = None
self._ResourceType = None
self._DataEngineConfigPairs = None
self._ImageVersionName = None
self._MainClusterName = None
self._ElasticSwitch = None
self._ElasticLimit = None
self._SessionResourceTemplate = None
@property
def EngineType(self):
return self._EngineType
@EngineType.setter
def EngineType(self, EngineType):
self._EngineType = EngineType
@property
def DataEngineName(self):
return self._DataEngineName
@DataEngineName.setter
def DataEngineName(self, DataEngineName):
self._DataEngineName = DataEngineName
@property
def ClusterType(self):
return self._ClusterType
@ClusterType.setter
def ClusterType(self, ClusterType):
self._ClusterType = ClusterType
@property
def Mode(self):
return self._Mode
@Mode.setter
def Mode(self, Mode):
self._Mode = Mode
@property
def AutoResume(self):
return self._AutoResume
@AutoResume.setter
def AutoResume(self, AutoResume):
self._AutoResume = AutoResume
@property
def MinClusters(self):
return self._MinClusters
@MinClusters.setter
def MinClusters(self, MinClusters):
self._MinClusters = MinClusters
@property
def MaxClusters(self):
return self._MaxClusters
@MaxClusters.setter
def MaxClusters(self, MaxClusters):
self._MaxClusters = MaxClusters
@property
def DefaultDataEngine(self):
warnings.warn("parameter `DefaultDataEngine` is deprecated", DeprecationWarning)
return self._DefaultDataEngine
@DefaultDataEngine.setter
def DefaultDataEngine(self, DefaultDataEngine):
warnings.warn("parameter `DefaultDataEngine` is deprecated", DeprecationWarning)
self._DefaultDataEngine = DefaultDataEngine
@property
def CidrBlock(self):
return self._CidrBlock
@CidrBlock.setter
def CidrBlock(self, CidrBlock):
self._CidrBlock = CidrBlock
@property
def Message(self):
return self._Message
@Message.setter
def Message(self, Message):
self._Message = Message
@property
def Size(self):
return self._Size
@Size.setter
def Size(self, Size):
self._Size = Size
@property
def PayMode(self):
return self._PayMode
@PayMode.setter
def PayMode(self, PayMode):
self._PayMode = PayMode
@property
def TimeSpan(self):
return self._TimeSpan
@TimeSpan.setter
def TimeSpan(self, TimeSpan):
self._TimeSpan = TimeSpan
@property
def TimeUnit(self):
return self._TimeUnit
@TimeUnit.setter
def TimeUnit(self, TimeUnit):
self._TimeUnit = TimeUnit
@property
def AutoRenew(self):
return self._AutoRenew
@AutoRenew.setter
def AutoRenew(self, AutoRenew):
self._AutoRenew = AutoRenew
@property
def Tags(self):
return self._Tags
@Tags.setter
def Tags(self, Tags):
self._Tags = Tags
@property
def AutoSuspend(self):
return self._AutoSuspend
@AutoSuspend.setter
def AutoSuspend(self, AutoSuspend):
self._AutoSuspend = AutoSuspend
@property
def CrontabResumeSuspend(self):
return self._CrontabResumeSuspend
@CrontabResumeSuspend.setter
def CrontabResumeSuspend(self, CrontabResumeSuspend):
self._CrontabResumeSuspend = CrontabResumeSuspend
@property
def CrontabResumeSuspendStrategy(self):
return self._CrontabResumeSuspendStrategy
@CrontabResumeSuspendStrategy.setter
def CrontabResumeSuspendStrategy(self, CrontabResumeSuspendStrategy):
self._CrontabResumeSuspendStrategy = CrontabResumeSuspendStrategy
@property
def EngineExecType(self):
return self._EngineExecType
@EngineExecType.setter
def EngineExecType(self, EngineExecType):
self._EngineExecType = EngineExecType
@property
def MaxConcurrency(self):
return self._MaxConcurrency
@MaxConcurrency.setter
def MaxConcurrency(self, MaxConcurrency):
self._MaxConcurrency = MaxConcurrency
@property
def TolerableQueueTime(self):
return self._TolerableQueueTime
@TolerableQueueTime.setter
def TolerableQueueTime(self, TolerableQueueTime):
self._TolerableQueueTime = TolerableQueueTime
@property
def AutoSuspendTime(self):
return self._AutoSuspendTime
@AutoSuspendTime.setter
def AutoSuspendTime(self, AutoSuspendTime):
self._AutoSuspendTime = AutoSuspendTime
@property
def ResourceType(self):
return self._ResourceType
@ResourceType.setter
def ResourceType(self, ResourceType):
self._ResourceType = ResourceType
@property
def DataEngineConfigPairs(self):
return self._DataEngineConfigPairs
@DataEngineConfigPairs.setter
def DataEngineConfigPairs(self, DataEngineConfigPairs):
self._DataEngineConfigPairs = DataEngineConfigPairs
@property
def ImageVersionName(self):
return self._ImageVersionName
@ImageVersionName.setter
def ImageVersionName(self, ImageVersionName):
self._ImageVersionName = ImageVersionName
@property
def MainClusterName(self):
return self._MainClusterName
@MainClusterName.setter
def MainClusterName(self, MainClusterName):
self._MainClusterName = MainClusterName
@property
def ElasticSwitch(self):
return self._ElasticSwitch
@ElasticSwitch.setter
def ElasticSwitch(self, ElasticSwitch):
self._ElasticSwitch = ElasticSwitch
@property
def ElasticLimit(self):
return self._ElasticLimit
@ElasticLimit.setter
def ElasticLimit(self, ElasticLimit):
self._ElasticLimit = ElasticLimit
@property
def SessionResourceTemplate(self):
return self._SessionResourceTemplate
@SessionResourceTemplate.setter
def SessionResourceTemplate(self, SessionResourceTemplate):
self._SessionResourceTemplate = SessionResourceTemplate
def _deserialize(self, params):
self._EngineType = params.get("EngineType")
self._DataEngineName = params.get("DataEngineName")
self._ClusterType = params.get("ClusterType")
self._Mode = params.get("Mode")
self._AutoResume = params.get("AutoResume")
self._MinClusters = params.get("MinClusters")
self._MaxClusters = params.get("MaxClusters")
self._DefaultDataEngine = params.get("DefaultDataEngine")
self._CidrBlock = params.get("CidrBlock")
self._Message = params.get("Message")
self._Size = params.get("Size")
self._PayMode = params.get("PayMode")
self._TimeSpan = params.get("TimeSpan")
self._TimeUnit = params.get("TimeUnit")
self._AutoRenew = params.get("AutoRenew")
if params.get("Tags") is not None:
self._Tags = []
for item in params.get("Tags"):
obj = TagInfo()
obj._deserialize(item)
self._Tags.append(obj)
self._AutoSuspend = params.get("AutoSuspend")
self._CrontabResumeSuspend = params.get("CrontabResumeSuspend")
if params.get("CrontabResumeSuspendStrategy") is not None:
self._CrontabResumeSuspendStrategy = CrontabResumeSuspendStrategy()
self._CrontabResumeSuspendStrategy._deserialize(params.get("CrontabResumeSuspendStrategy"))
self._EngineExecType = params.get("EngineExecType")
self._MaxConcurrency = params.get("MaxConcurrency")
self._TolerableQueueTime = params.get("TolerableQueueTime")
self._AutoSuspendTime = params.get("AutoSuspendTime")
self._ResourceType = params.get("ResourceType")
if params.get("DataEngineConfigPairs") is not None:
self._DataEngineConfigPairs = []
for item in params.get("DataEngineConfigPairs"):
obj = DataEngineConfigPair()
obj._deserialize(item)
self._DataEngineConfigPairs.append(obj)
self._ImageVersionName = params.get("ImageVersionName")
self._MainClusterName = params.get("MainClusterName")
self._ElasticSwitch = params.get("ElasticSwitch")
self._ElasticLimit = params.get("ElasticLimit")
if params.get("SessionResourceTemplate") is not None:
self._SessionResourceTemplate = SessionResourceTemplate()
self._SessionResourceTemplate._deserialize(params.get("SessionResourceTemplate"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateDataEngineResponse(AbstractModel):
"""CreateDataEngine返回参数结构体
"""
def __init__(self):
r"""
:param _DataEngineId: 虚拟引擎id
:type DataEngineId: str
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._DataEngineId = None
self._RequestId = None
@property
def DataEngineId(self):
return self._DataEngineId
@DataEngineId.setter
def DataEngineId(self, DataEngineId):
self._DataEngineId = DataEngineId
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._DataEngineId = params.get("DataEngineId")
self._RequestId = params.get("RequestId")
class CreateDatabaseRequest(AbstractModel):
"""CreateDatabase请求参数结构体
"""
def __init__(self):
r"""
:param _DatabaseInfo: 数据库基础信息
:type DatabaseInfo: :class:`tencentcloud.dlc.v20210125.models.DatabaseInfo`
:param _DatasourceConnectionName: 数据源名称,默认为DataLakeCatalog
:type DatasourceConnectionName: str
"""
self._DatabaseInfo = None
self._DatasourceConnectionName = None
@property
def DatabaseInfo(self):
return self._DatabaseInfo
@DatabaseInfo.setter
def DatabaseInfo(self, DatabaseInfo):
self._DatabaseInfo = DatabaseInfo
@property
def DatasourceConnectionName(self):
return self._DatasourceConnectionName
@DatasourceConnectionName.setter
def DatasourceConnectionName(self, DatasourceConnectionName):
self._DatasourceConnectionName = DatasourceConnectionName
def _deserialize(self, params):
if params.get("DatabaseInfo") is not None:
self._DatabaseInfo = DatabaseInfo()
self._DatabaseInfo._deserialize(params.get("DatabaseInfo"))
self._DatasourceConnectionName = params.get("DatasourceConnectionName")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateDatabaseResponse(AbstractModel):
"""CreateDatabase返回参数结构体
"""
def __init__(self):
r"""
:param _Execution: 生成的建库执行语句对象。
:type Execution: :class:`tencentcloud.dlc.v20210125.models.Execution`
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._Execution = None
self._RequestId = None
@property
def Execution(self):
return self._Execution
@Execution.setter
def Execution(self, Execution):
self._Execution = Execution
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
if params.get("Execution") is not None:
self._Execution = Execution()
self._Execution._deserialize(params.get("Execution"))
self._RequestId = params.get("RequestId")
class CreateExportTaskRequest(AbstractModel):
"""CreateExportTask请求参数结构体
"""
def __init__(self):
r"""
:param _InputType: 数据来源,lakefsStorage、taskResult
:type InputType: str
:param _InputConf: 导出任务输入配置
:type InputConf: list of KVPair
:param _OutputConf: 导出任务输出配置
:type OutputConf: list of KVPair
:param _OutputType: 目标数据源的类型,目前支持导出到cos
:type OutputType: str
"""
self._InputType = None
self._InputConf = None
self._OutputConf = None
self._OutputType = None
@property
def InputType(self):
return self._InputType
@InputType.setter
def InputType(self, InputType):
self._InputType = InputType
@property
def InputConf(self):
return self._InputConf
@InputConf.setter
def InputConf(self, InputConf):
self._InputConf = InputConf
@property
def OutputConf(self):
return self._OutputConf
@OutputConf.setter
def OutputConf(self, OutputConf):
self._OutputConf = OutputConf
@property
def OutputType(self):
return self._OutputType
@OutputType.setter
def OutputType(self, OutputType):
self._OutputType = OutputType
def _deserialize(self, params):
self._InputType = params.get("InputType")
if params.get("InputConf") is not None:
self._InputConf = []
for item in params.get("InputConf"):
obj = KVPair()
obj._deserialize(item)
self._InputConf.append(obj)
if params.get("OutputConf") is not None:
self._OutputConf = []
for item in params.get("OutputConf"):
obj = KVPair()
obj._deserialize(item)
self._OutputConf.append(obj)
self._OutputType = params.get("OutputType")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateExportTaskResponse(AbstractModel):
"""CreateExportTask返回参数结构体
"""
def __init__(self):
r"""
:param _TaskId: 任务id
:type TaskId: str
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._TaskId = None
self._RequestId = None
@property
def TaskId(self):
return self._TaskId
@TaskId.setter
def TaskId(self, TaskId):
self._TaskId = TaskId
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._TaskId = params.get("TaskId")
self._RequestId = params.get("RequestId")
class CreateImportTaskRequest(AbstractModel):
"""CreateImportTask请求参数结构体
"""
def __init__(self):
r"""
:param _InputType: 数据来源,cos
:type InputType: str
:param _InputConf: 输入配置
:type InputConf: list of KVPair
:param _OutputConf: 输出配置
:type OutputConf: list of KVPair
:param _OutputType: 目标数据源的类型,目前支持导入到托管存储,即lakefsStorage
:type OutputType: str
"""
self._InputType = None
self._InputConf = None
self._OutputConf = None
self._OutputType = None
@property
def InputType(self):
return self._InputType
@InputType.setter
def InputType(self, InputType):
self._InputType = InputType
@property
def InputConf(self):
return self._InputConf
@InputConf.setter
def InputConf(self, InputConf):
self._InputConf = InputConf
@property
def OutputConf(self):
return self._OutputConf
@OutputConf.setter
def OutputConf(self, OutputConf):
self._OutputConf = OutputConf
@property
def OutputType(self):
return self._OutputType
@OutputType.setter
def OutputType(self, OutputType):
self._OutputType = OutputType
def _deserialize(self, params):
self._InputType = params.get("InputType")
if params.get("InputConf") is not None:
self._InputConf = []
for item in params.get("InputConf"):
obj = KVPair()
obj._deserialize(item)
self._InputConf.append(obj)
if params.get("OutputConf") is not None:
self._OutputConf = []
for item in params.get("OutputConf"):
obj = KVPair()
obj._deserialize(item)
self._OutputConf.append(obj)
self._OutputType = params.get("OutputType")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateImportTaskResponse(AbstractModel):
"""CreateImportTask返回参数结构体
"""
def __init__(self):
r"""
:param _TaskId: 任务id
:type TaskId: str
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._TaskId = None
self._RequestId = None
@property
def TaskId(self):
return self._TaskId
@TaskId.setter
def TaskId(self, TaskId):
self._TaskId = TaskId
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._TaskId = params.get("TaskId")
self._RequestId = params.get("RequestId")
class CreateInternalTableRequest(AbstractModel):
"""CreateInternalTable请求参数结构体
"""
def __init__(self):
r"""
:param _TableBaseInfo: 表基本信息
:type TableBaseInfo: :class:`tencentcloud.dlc.v20210125.models.TableBaseInfo`
:param _Columns: 表字段信息
:type Columns: list of TColumn
:param _Partitions: 表分区信息
:type Partitions: list of TPartition
:param _Properties: 表属性信息
:type Properties: list of Property
"""
self._TableBaseInfo = None
self._Columns = None
self._Partitions = None
self._Properties = None
@property
def TableBaseInfo(self):
return self._TableBaseInfo
@TableBaseInfo.setter
def TableBaseInfo(self, TableBaseInfo):
self._TableBaseInfo = TableBaseInfo
@property
def Columns(self):
return self._Columns
@Columns.setter
def Columns(self, Columns):
self._Columns = Columns
@property
def Partitions(self):
return self._Partitions
@Partitions.setter
def Partitions(self, Partitions):
self._Partitions = Partitions
@property
def Properties(self):
return self._Properties
@Properties.setter
def Properties(self, Properties):
self._Properties = Properties
def _deserialize(self, params):
if params.get("TableBaseInfo") is not None:
self._TableBaseInfo = TableBaseInfo()
self._TableBaseInfo._deserialize(params.get("TableBaseInfo"))
if params.get("Columns") is not None:
self._Columns = []
for item in params.get("Columns"):
obj = TColumn()
obj._deserialize(item)
self._Columns.append(obj)
if params.get("Partitions") is not None:
self._Partitions = []
for item in params.get("Partitions"):
obj = TPartition()
obj._deserialize(item)
self._Partitions.append(obj)
if params.get("Properties") is not None:
self._Properties = []
for item in params.get("Properties"):
obj = Property()
obj._deserialize(item)
self._Properties.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateInternalTableResponse(AbstractModel):
"""CreateInternalTable返回参数结构体
"""
def __init__(self):
r"""
:param _Execution: 创建托管存储内表sql语句描述
:type Execution: str
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._Execution = None
self._RequestId = None
@property
def Execution(self):
return self._Execution
@Execution.setter
def Execution(self, Execution):
self._Execution = Execution
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._Execution = params.get("Execution")
self._RequestId = params.get("RequestId")
class CreateNotebookSessionRequest(AbstractModel):
"""CreateNotebookSession请求参数结构体
"""
def __init__(self):
r"""
:param _Name: Session名称
:type Name: str
:param _Kind: 类型,当前支持:spark、pyspark、sparkr、sql
:type Kind: str
:param _DataEngineName: DLC Spark作业引擎名称
:type DataEngineName: str
:param _ProgramDependentFiles: session文件地址,当前支持:cosn://和lakefs://两种路径
:type ProgramDependentFiles: list of str
:param _ProgramDependentJars: 依赖的jar程序地址,当前支持:cosn://和lakefs://两种路径
:type ProgramDependentJars: list of str
:param _ProgramDependentPython: 依赖的python程序地址,当前支持:cosn://和lakefs://两种路径
:type ProgramDependentPython: list of str
:param _ProgramArchives: 依赖的pyspark虚拟环境地址,当前支持:cosn://和lakefs://两种路径
:type ProgramArchives: list of str
:param _DriverSize: 指定的Driver规格,当前支持:small(默认,1cu)、medium(2cu)、large(4cu)、xlarge(8cu)
:type DriverSize: str
:param _ExecutorSize: 指定的Executor规格,当前支持:small(默认,1cu)、medium(2cu)、large(4cu)、xlarge(8cu)
:type ExecutorSize: str
:param _ExecutorNumbers: 指定的Executor数量,默认为1
:type ExecutorNumbers: int
:param _Arguments: Session相关配置,当前支持:
1. dlc.eni: 用户配置的eni网关信息,可以通过该字段设置;
2. dlc.role.arn: 用户配置的roleArn鉴权策略配置信息,可以通过该字段设置;
3. dlc.sql.set.config: 用户配置的集群配置信息,可以通过该字段设置;
:type Arguments: list of KVPair
:param _ProxyUser: 代理用户,默认为root
:type ProxyUser: str
:param _TimeoutInSecond: 指定的Session超时时间,单位秒,默认3600秒
:type TimeoutInSecond: int
:param _ExecutorMaxNumbers: 指定的Executor数量(最大值),默认为1,当开启动态分配有效,若未开启,则该值等于ExecutorNumbers
:type ExecutorMaxNumbers: int
:param _SparkImage: 指定spark版本名称,当前任务使用该spark镜像运行
:type SparkImage: str
:param _IsInherit: 是否继承集群的资源类配置:0:自定义(默认),1:继承集群;
:type IsInherit: int
"""
self._Name = None
self._Kind = None
self._DataEngineName = None
self._ProgramDependentFiles = None
self._ProgramDependentJars = None
self._ProgramDependentPython = None
self._ProgramArchives = None
self._DriverSize = None
self._ExecutorSize = None
self._ExecutorNumbers = None
self._Arguments = None
self._ProxyUser = None
self._TimeoutInSecond = None
self._ExecutorMaxNumbers = None
self._SparkImage = None
self._IsInherit = None
@property
def Name(self):
return self._Name
@Name.setter
def Name(self, Name):
self._Name = Name
@property
def Kind(self):
return self._Kind
@Kind.setter
def Kind(self, Kind):
self._Kind = Kind
@property
def DataEngineName(self):
return self._DataEngineName
@DataEngineName.setter
def DataEngineName(self, DataEngineName):
self._DataEngineName = DataEngineName
@property
def ProgramDependentFiles(self):
return self._ProgramDependentFiles
@ProgramDependentFiles.setter
def ProgramDependentFiles(self, ProgramDependentFiles):
self._ProgramDependentFiles = ProgramDependentFiles
@property
def ProgramDependentJars(self):
return self._ProgramDependentJars
@ProgramDependentJars.setter
def ProgramDependentJars(self, ProgramDependentJars):
self._ProgramDependentJars = ProgramDependentJars
@property
def ProgramDependentPython(self):
return self._ProgramDependentPython
@ProgramDependentPython.setter
def ProgramDependentPython(self, ProgramDependentPython):
self._ProgramDependentPython = ProgramDependentPython
@property
def ProgramArchives(self):
return self._ProgramArchives
@ProgramArchives.setter
def ProgramArchives(self, ProgramArchives):
self._ProgramArchives = ProgramArchives
@property
def DriverSize(self):
return self._DriverSize
@DriverSize.setter
def DriverSize(self, DriverSize):
self._DriverSize = DriverSize
@property
def ExecutorSize(self):
return self._ExecutorSize
@ExecutorSize.setter
def ExecutorSize(self, ExecutorSize):
self._ExecutorSize = ExecutorSize
@property
def ExecutorNumbers(self):
return self._ExecutorNumbers
@ExecutorNumbers.setter
def ExecutorNumbers(self, ExecutorNumbers):
self._ExecutorNumbers = ExecutorNumbers
@property
def Arguments(self):
return self._Arguments
@Arguments.setter
def Arguments(self, Arguments):
self._Arguments = Arguments
@property
def ProxyUser(self):
return self._ProxyUser
@ProxyUser.setter
def ProxyUser(self, ProxyUser):
self._ProxyUser = ProxyUser
@property
def TimeoutInSecond(self):
return self._TimeoutInSecond
@TimeoutInSecond.setter
def TimeoutInSecond(self, TimeoutInSecond):
self._TimeoutInSecond = TimeoutInSecond
@property
def ExecutorMaxNumbers(self):
return self._ExecutorMaxNumbers
@ExecutorMaxNumbers.setter
def ExecutorMaxNumbers(self, ExecutorMaxNumbers):
self._ExecutorMaxNumbers = ExecutorMaxNumbers
@property
def SparkImage(self):
return self._SparkImage
@SparkImage.setter
def SparkImage(self, SparkImage):
self._SparkImage = SparkImage
@property
def IsInherit(self):
return self._IsInherit
@IsInherit.setter
def IsInherit(self, IsInherit):
self._IsInherit = IsInherit
def _deserialize(self, params):
self._Name = params.get("Name")
self._Kind = params.get("Kind")
self._DataEngineName = params.get("DataEngineName")
self._ProgramDependentFiles = params.get("ProgramDependentFiles")
self._ProgramDependentJars = params.get("ProgramDependentJars")
self._ProgramDependentPython = params.get("ProgramDependentPython")
self._ProgramArchives = params.get("ProgramArchives")
self._DriverSize = params.get("DriverSize")
self._ExecutorSize = params.get("ExecutorSize")
self._ExecutorNumbers = params.get("ExecutorNumbers")
if params.get("Arguments") is not None:
self._Arguments = []
for item in params.get("Arguments"):
obj = KVPair()
obj._deserialize(item)
self._Arguments.append(obj)
self._ProxyUser = params.get("ProxyUser")
self._TimeoutInSecond = params.get("TimeoutInSecond")
self._ExecutorMaxNumbers = params.get("ExecutorMaxNumbers")
self._SparkImage = params.get("SparkImage")
self._IsInherit = params.get("IsInherit")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateNotebookSessionResponse(AbstractModel):
"""CreateNotebookSession返回参数结构体
"""
def __init__(self):
r"""
:param _SessionId: Session唯一标识
:type SessionId: str
:param _SparkAppId: Spark任务返回的AppId
注意:此字段可能返回 null,表示取不到有效值。
:type SparkAppId: str
:param _State: Session状态,包含:not_started(未启动)、starting(已启动)、idle(等待输入)、busy(正在运行statement)、shutting_down(停止)、error(异常)、dead(已退出)、killed(被杀死)、success(正常停止)
:type State: str
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._SessionId = None
self._SparkAppId = None
self._State = None
self._RequestId = None
@property
def SessionId(self):
return self._SessionId
@SessionId.setter
def SessionId(self, SessionId):
self._SessionId = SessionId
@property
def SparkAppId(self):
return self._SparkAppId
@SparkAppId.setter
def SparkAppId(self, SparkAppId):
self._SparkAppId = SparkAppId
@property
def State(self):
return self._State
@State.setter
def State(self, State):
self._State = State
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._SessionId = params.get("SessionId")
self._SparkAppId = params.get("SparkAppId")
self._State = params.get("State")
self._RequestId = params.get("RequestId")
class CreateNotebookSessionStatementRequest(AbstractModel):
"""CreateNotebookSessionStatement请求参数结构体
"""
def __init__(self):
r"""
:param _SessionId: Session唯一标识
:type SessionId: str
:param _Code: 执行的代码
:type Code: str
:param _Kind: 类型,当前支持:spark、pyspark、sparkr、sql
:type Kind: str
"""
self._SessionId = None
self._Code = None
self._Kind = None
@property
def SessionId(self):
return self._SessionId
@SessionId.setter
def SessionId(self, SessionId):
self._SessionId = SessionId
@property
def Code(self):
return self._Code
@Code.setter
def Code(self, Code):
self._Code = Code
@property
def Kind(self):
return self._Kind
@Kind.setter
def Kind(self, Kind):
self._Kind = Kind
def _deserialize(self, params):
self._SessionId = params.get("SessionId")
self._Code = params.get("Code")
self._Kind = params.get("Kind")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateNotebookSessionStatementResponse(AbstractModel):
"""CreateNotebookSessionStatement返回参数结构体
"""
def __init__(self):
r"""
:param _NotebookSessionStatement: Session Statement详情
:type NotebookSessionStatement: :class:`tencentcloud.dlc.v20210125.models.NotebookSessionStatementInfo`
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._NotebookSessionStatement = None
self._RequestId = None
@property
def NotebookSessionStatement(self):
return self._NotebookSessionStatement
@NotebookSessionStatement.setter
def NotebookSessionStatement(self, NotebookSessionStatement):
self._NotebookSessionStatement = NotebookSessionStatement
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
if params.get("NotebookSessionStatement") is not None:
self._NotebookSessionStatement = NotebookSessionStatementInfo()
self._NotebookSessionStatement._deserialize(params.get("NotebookSessionStatement"))
self._RequestId = params.get("RequestId")
class CreateNotebookSessionStatementSupportBatchSQLRequest(AbstractModel):
"""CreateNotebookSessionStatementSupportBatchSQL请求参数结构体
"""
def __init__(self):
r"""
:param _SessionId: Session唯一标识
:type SessionId: str
:param _Code: 执行的代码
:type Code: str
:param _Kind: 类型,当前支持:sql
:type Kind: str
:param _SaveResult: 是否保存运行结果
:type SaveResult: bool
"""
self._SessionId = None
self._Code = None
self._Kind = None
self._SaveResult = None
@property
def SessionId(self):
return self._SessionId
@SessionId.setter
def SessionId(self, SessionId):
self._SessionId = SessionId
@property
def Code(self):
return self._Code
@Code.setter
def Code(self, Code):
self._Code = Code
@property
def Kind(self):
return self._Kind
@Kind.setter
def Kind(self, Kind):
self._Kind = Kind
@property
def SaveResult(self):
return self._SaveResult
@SaveResult.setter
def SaveResult(self, SaveResult):
self._SaveResult = SaveResult
def _deserialize(self, params):
self._SessionId = params.get("SessionId")
self._Code = params.get("Code")
self._Kind = params.get("Kind")
self._SaveResult = params.get("SaveResult")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateNotebookSessionStatementSupportBatchSQLResponse(AbstractModel):
"""CreateNotebookSessionStatementSupportBatchSQL返回参数结构体
"""
def __init__(self):
r"""
:param _NotebookSessionStatementBatches: Session Statement详情
:type NotebookSessionStatementBatches: :class:`tencentcloud.dlc.v20210125.models.NotebookSessionStatementBatchInformation`
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._NotebookSessionStatementBatches = None
self._RequestId = None
@property
def NotebookSessionStatementBatches(self):
return self._NotebookSessionStatementBatches
@NotebookSessionStatementBatches.setter
def NotebookSessionStatementBatches(self, NotebookSessionStatementBatches):
self._NotebookSessionStatementBatches = NotebookSessionStatementBatches
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
if params.get("NotebookSessionStatementBatches") is not None:
self._NotebookSessionStatementBatches = NotebookSessionStatementBatchInformation()
self._NotebookSessionStatementBatches._deserialize(params.get("NotebookSessionStatementBatches"))
self._RequestId = params.get("RequestId")
class CreateResultDownloadRequest(AbstractModel):
"""CreateResultDownload请求参数结构体
"""
def __init__(self):
r"""
:param _TaskId: 查询结果任务Id
:type TaskId: str
:param _Format: 下载格式
:type Format: str
:param _Force: 是否重新生成下载文件,仅当之前任务为 Timout | Error 时有效
:type Force: bool
"""
self._TaskId = None
self._Format = None
self._Force = None
@property
def TaskId(self):
return self._TaskId
@TaskId.setter
def TaskId(self, TaskId):
self._TaskId = TaskId
@property
def Format(self):
return self._Format
@Format.setter
def Format(self, Format):
self._Format = Format
@property
def Force(self):
return self._Force
@Force.setter
def Force(self, Force):
self._Force = Force
def _deserialize(self, params):
self._TaskId = params.get("TaskId")
self._Format = params.get("Format")
self._Force = params.get("Force")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateResultDownloadResponse(AbstractModel):
"""CreateResultDownload返回参数结构体
"""
def __init__(self):
r"""
:param _DownloadId: 下载任务Id
:type DownloadId: str
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._DownloadId = None
self._RequestId = None
@property
def DownloadId(self):
return self._DownloadId
@DownloadId.setter
def DownloadId(self, DownloadId):
self._DownloadId = DownloadId
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._DownloadId = params.get("DownloadId")
self._RequestId = params.get("RequestId")
class CreateScriptRequest(AbstractModel):
"""CreateScript请求参数结构体
"""
def __init__(self):
r"""
:param _ScriptName: 脚本名称,最大不能超过255个字符。
:type ScriptName: str
:param _SQLStatement: base64编码后的sql语句
:type SQLStatement: str
:param _ScriptDesc: 脚本描述, 不能超过50个字符
:type ScriptDesc: str
:param _DatabaseName: 数据库名称
:type DatabaseName: str
"""
self._ScriptName = None
self._SQLStatement = None
self._ScriptDesc = None
self._DatabaseName = None
@property
def ScriptName(self):
return self._ScriptName
@ScriptName.setter
def ScriptName(self, ScriptName):
self._ScriptName = ScriptName
@property
def SQLStatement(self):
return self._SQLStatement
@SQLStatement.setter
def SQLStatement(self, SQLStatement):
self._SQLStatement = SQLStatement
@property
def ScriptDesc(self):
return self._ScriptDesc
@ScriptDesc.setter
def ScriptDesc(self, ScriptDesc):
self._ScriptDesc = ScriptDesc
@property
def DatabaseName(self):
return self._DatabaseName
@DatabaseName.setter
def DatabaseName(self, DatabaseName):
self._DatabaseName = DatabaseName
def _deserialize(self, params):
self._ScriptName = params.get("ScriptName")
self._SQLStatement = params.get("SQLStatement")
self._ScriptDesc = params.get("ScriptDesc")
self._DatabaseName = params.get("DatabaseName")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateScriptResponse(AbstractModel):
"""CreateScript返回参数结构体
"""
def __init__(self):
r"""
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._RequestId = None
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._RequestId = params.get("RequestId")
class CreateSparkAppRequest(AbstractModel):
"""CreateSparkApp请求参数结构体
"""
def __init__(self):
r"""
:param _AppName: spark作业名
:type AppName: str
:param _AppType: spark作业类型,1代表spark jar作业,2代表spark streaming作业
:type AppType: int
:param _DataEngine: 执行spark作业的数据引擎名称
:type DataEngine: str
:param _AppFile: spark作业程序包文件路径
:type AppFile: str
:param _RoleArn: 数据访问策略,CAM Role arn
:type RoleArn: int
:param _AppDriverSize: 指定的Driver规格,当前支持:small(默认,1cu)、medium(2cu)、large(4cu)、xlarge(8cu)
:type AppDriverSize: str
:param _AppExecutorSize: 指定的Executor规格,当前支持:small(默认,1cu)、medium(2cu)、large(4cu)、xlarge(8cu)
:type AppExecutorSize: str
:param _AppExecutorNums: spark作业executor个数
:type AppExecutorNums: int
:param _Eni: 该字段已下线,请使用字段Datasource
:type Eni: str
:param _IsLocal: spark作业程序包是否本地上传,cos:存放与cos,lakefs:本地上传(控制台使用,该方式不支持直接接口调用)
:type IsLocal: str
:param _MainClass: spark作业主类
:type MainClass: str
:param _AppConf: spark配置,以换行符分隔
:type AppConf: str
:param _IsLocalJars: spark 作业依赖jar包是否本地上传,cos:存放与cos,lakefs:本地上传(控制台使用,该方式不支持直接接口调用)
:type IsLocalJars: str
:param _AppJars: spark 作业依赖jar包(--jars),以逗号分隔
:type AppJars: str
:param _IsLocalFiles: spark作业依赖文件资源是否本地上传,cos:存放与cos,lakefs:本地上传(控制台使用,该方式不支持直接接口调用)
:type IsLocalFiles: str
:param _AppFiles: spark作业依赖文件资源(--files)(非jar、zip),以逗号分隔
:type AppFiles: str
:param _CmdArgs: spark作业程序入参,空格分割
:type CmdArgs: str
:param _MaxRetries: 最大重试次数,只对spark流任务生效
:type MaxRetries: int
:param _DataSource: 数据源名称
:type DataSource: str
:param _IsLocalPythonFiles: pyspark:依赖上传方式,cos:存放与cos,lakefs:本地上传(控制台使用,该方式不支持直接接口调用)
:type IsLocalPythonFiles: str
:param _AppPythonFiles: pyspark作业依赖python资源(--py-files),支持py/zip/egg等归档格式,多文件以逗号分隔
:type AppPythonFiles: str
:param _IsLocalArchives: spark作业依赖archives资源是否本地上传,cos:存放与cos,lakefs:本地上传(控制台使用,该方式不支持直接接口调用)
:type IsLocalArchives: str
:param _AppArchives: spark作业依赖archives资源(--archives),支持tar.gz/tgz/tar等归档格式,以逗号分隔
:type AppArchives: str
:param _SparkImage: Spark Image 版本号
:type SparkImage: str
:param _SparkImageVersion: Spark Image 版本名称
:type SparkImageVersion: str
:param _AppExecutorMaxNumbers: 指定的Executor数量(最大值),默认为1,当开启动态分配有效,若未开启,则该值等于AppExecutorNums
:type AppExecutorMaxNumbers: int
:param _SessionId: 关联dlc查询脚本id
:type SessionId: str
:param _IsInherit: 任务资源配置是否继承集群模板,0(默认)不继承,1:继承
:type IsInherit: int
:param _IsSessionStarted: 是否使用session脚本的sql运行任务:false:否,true:是
:type IsSessionStarted: bool
"""
self._AppName = None
self._AppType = None
self._DataEngine = None
self._AppFile = None
self._RoleArn = None
self._AppDriverSize = None
self._AppExecutorSize = None
self._AppExecutorNums = None
self._Eni = None
self._IsLocal = None
self._MainClass = None
self._AppConf = None
self._IsLocalJars = None
self._AppJars = None
self._IsLocalFiles = None
self._AppFiles = None
self._CmdArgs = None
self._MaxRetries = None
self._DataSource = None
self._IsLocalPythonFiles = None
self._AppPythonFiles = None
self._IsLocalArchives = None
self._AppArchives = None
self._SparkImage = None
self._SparkImageVersion = None
self._AppExecutorMaxNumbers = None
self._SessionId = None
self._IsInherit = None
self._IsSessionStarted = None
@property
def AppName(self):
return self._AppName
@AppName.setter
def AppName(self, AppName):
self._AppName = AppName
@property
def AppType(self):
return self._AppType
@AppType.setter
def AppType(self, AppType):
self._AppType = AppType
@property
def DataEngine(self):
return self._DataEngine
@DataEngine.setter
def DataEngine(self, DataEngine):
self._DataEngine = DataEngine
@property
def AppFile(self):
return self._AppFile
@AppFile.setter
def AppFile(self, AppFile):
self._AppFile = AppFile
@property
def RoleArn(self):
return self._RoleArn
@RoleArn.setter
def RoleArn(self, RoleArn):
self._RoleArn = RoleArn
@property
def AppDriverSize(self):
return self._AppDriverSize
@AppDriverSize.setter
def AppDriverSize(self, AppDriverSize):
self._AppDriverSize = AppDriverSize
@property
def AppExecutorSize(self):
return self._AppExecutorSize
@AppExecutorSize.setter
def AppExecutorSize(self, AppExecutorSize):
self._AppExecutorSize = AppExecutorSize
@property
def AppExecutorNums(self):
return self._AppExecutorNums
@AppExecutorNums.setter
def AppExecutorNums(self, AppExecutorNums):
self._AppExecutorNums = AppExecutorNums
@property
def Eni(self):
return self._Eni
@Eni.setter
def Eni(self, Eni):
self._Eni = Eni
@property
def IsLocal(self):
return self._IsLocal
@IsLocal.setter
def IsLocal(self, IsLocal):
self._IsLocal = IsLocal
@property
def MainClass(self):
return self._MainClass
@MainClass.setter
def MainClass(self, MainClass):
self._MainClass = MainClass
@property
def AppConf(self):
return self._AppConf
@AppConf.setter
def AppConf(self, AppConf):
self._AppConf = AppConf
@property
def IsLocalJars(self):
return self._IsLocalJars
@IsLocalJars.setter
def IsLocalJars(self, IsLocalJars):
self._IsLocalJars = IsLocalJars
@property
def AppJars(self):
return self._AppJars
@AppJars.setter
def AppJars(self, AppJars):
self._AppJars = AppJars
@property
def IsLocalFiles(self):
return self._IsLocalFiles
@IsLocalFiles.setter
def IsLocalFiles(self, IsLocalFiles):
self._IsLocalFiles = IsLocalFiles
@property
def AppFiles(self):
return self._AppFiles
@AppFiles.setter
def AppFiles(self, AppFiles):
self._AppFiles = AppFiles
@property
def CmdArgs(self):
return self._CmdArgs
@CmdArgs.setter
def CmdArgs(self, CmdArgs):
self._CmdArgs = CmdArgs
@property
def MaxRetries(self):
return self._MaxRetries
@MaxRetries.setter
def MaxRetries(self, MaxRetries):
self._MaxRetries = MaxRetries
@property
def DataSource(self):
return self._DataSource
@DataSource.setter
def DataSource(self, DataSource):
self._DataSource = DataSource
@property
def IsLocalPythonFiles(self):
return self._IsLocalPythonFiles
@IsLocalPythonFiles.setter
def IsLocalPythonFiles(self, IsLocalPythonFiles):
self._IsLocalPythonFiles = IsLocalPythonFiles
@property
def AppPythonFiles(self):
return self._AppPythonFiles
@AppPythonFiles.setter
def AppPythonFiles(self, AppPythonFiles):
self._AppPythonFiles = AppPythonFiles
@property
def IsLocalArchives(self):
return self._IsLocalArchives
@IsLocalArchives.setter
def IsLocalArchives(self, IsLocalArchives):
self._IsLocalArchives = IsLocalArchives
@property
def AppArchives(self):
return self._AppArchives
@AppArchives.setter
def AppArchives(self, AppArchives):
self._AppArchives = AppArchives
@property
def SparkImage(self):
return self._SparkImage
@SparkImage.setter
def SparkImage(self, SparkImage):
self._SparkImage = SparkImage
@property
def SparkImageVersion(self):
return self._SparkImageVersion
@SparkImageVersion.setter
def SparkImageVersion(self, SparkImageVersion):
self._SparkImageVersion = SparkImageVersion
@property
def AppExecutorMaxNumbers(self):
return self._AppExecutorMaxNumbers
@AppExecutorMaxNumbers.setter
def AppExecutorMaxNumbers(self, AppExecutorMaxNumbers):
self._AppExecutorMaxNumbers = AppExecutorMaxNumbers
@property
def SessionId(self):
return self._SessionId
@SessionId.setter
def SessionId(self, SessionId):
self._SessionId = SessionId
@property
def IsInherit(self):
return self._IsInherit
@IsInherit.setter
def IsInherit(self, IsInherit):
self._IsInherit = IsInherit
@property
def IsSessionStarted(self):
return self._IsSessionStarted
@IsSessionStarted.setter
def IsSessionStarted(self, IsSessionStarted):
self._IsSessionStarted = IsSessionStarted
def _deserialize(self, params):
self._AppName = params.get("AppName")
self._AppType = params.get("AppType")
self._DataEngine = params.get("DataEngine")
self._AppFile = params.get("AppFile")
self._RoleArn = params.get("RoleArn")
self._AppDriverSize = params.get("AppDriverSize")
self._AppExecutorSize = params.get("AppExecutorSize")
self._AppExecutorNums = params.get("AppExecutorNums")
self._Eni = params.get("Eni")
self._IsLocal = params.get("IsLocal")
self._MainClass = params.get("MainClass")
self._AppConf = params.get("AppConf")
self._IsLocalJars = params.get("IsLocalJars")
self._AppJars = params.get("AppJars")
self._IsLocalFiles = params.get("IsLocalFiles")
self._AppFiles = params.get("AppFiles")
self._CmdArgs = params.get("CmdArgs")
self._MaxRetries = params.get("MaxRetries")
self._DataSource = params.get("DataSource")
self._IsLocalPythonFiles = params.get("IsLocalPythonFiles")
self._AppPythonFiles = params.get("AppPythonFiles")
self._IsLocalArchives = params.get("IsLocalArchives")
self._AppArchives = params.get("AppArchives")
self._SparkImage = params.get("SparkImage")
self._SparkImageVersion = params.get("SparkImageVersion")
self._AppExecutorMaxNumbers = params.get("AppExecutorMaxNumbers")
self._SessionId = params.get("SessionId")
self._IsInherit = params.get("IsInherit")
self._IsSessionStarted = params.get("IsSessionStarted")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateSparkAppResponse(AbstractModel):
"""CreateSparkApp返回参数结构体
"""
def __init__(self):
r"""
:param _SparkAppId: App唯一标识
注意:此字段可能返回 null,表示取不到有效值。
:type SparkAppId: str
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._SparkAppId = None
self._RequestId = None
@property
def SparkAppId(self):
return self._SparkAppId
@SparkAppId.setter
def SparkAppId(self, SparkAppId):
self._SparkAppId = SparkAppId
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._SparkAppId = params.get("SparkAppId")
self._RequestId = params.get("RequestId")
class CreateSparkAppTaskRequest(AbstractModel):
"""CreateSparkAppTask请求参数结构体
"""
def __init__(self):
r"""
:param _JobName: spark作业名
:type JobName: str
:param _CmdArgs: spark作业程序入参,以空格分隔;一般用于周期性调用使用
:type CmdArgs: str
"""
self._JobName = None
self._CmdArgs = None
@property
def JobName(self):
return self._JobName
@JobName.setter
def JobName(self, JobName):
self._JobName = JobName
@property
def CmdArgs(self):
return self._CmdArgs
@CmdArgs.setter
def CmdArgs(self, CmdArgs):
self._CmdArgs = CmdArgs
def _deserialize(self, params):
self._JobName = params.get("JobName")
self._CmdArgs = params.get("CmdArgs")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateSparkAppTaskResponse(AbstractModel):
"""CreateSparkAppTask返回参数结构体
"""
def __init__(self):
r"""
:param _BatchId: 批Id
:type BatchId: str
:param _TaskId: 任务Id
:type TaskId: str
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._BatchId = None
self._TaskId = None
self._RequestId = None
@property
def BatchId(self):
return self._BatchId
@BatchId.setter
def BatchId(self, BatchId):
self._BatchId = BatchId
@property
def TaskId(self):
return self._TaskId
@TaskId.setter
def TaskId(self, TaskId):
self._TaskId = TaskId
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._BatchId = params.get("BatchId")
self._TaskId = params.get("TaskId")
self._RequestId = params.get("RequestId")
class CreateSparkSessionBatchSQLRequest(AbstractModel):
"""CreateSparkSessionBatchSQL请求参数结构体
"""
def __init__(self):
r"""
:param _DataEngineName: DLC Spark作业引擎名称
:type DataEngineName: str
:param _ExecuteSQL: 运行sql
:type ExecuteSQL: str
:param _DriverSize: 指定的Driver规格,当前支持:small(默认,1cu)、medium(2cu)、large(4cu)、xlarge(8cu)
:type DriverSize: str
:param _ExecutorSize: 指定的Executor规格,当前支持:small(默认,1cu)、medium(2cu)、large(4cu)、xlarge(8cu)
:type ExecutorSize: str
:param _ExecutorNumbers: 指定的Executor数量,默认为1
:type ExecutorNumbers: int
:param _ExecutorMaxNumbers: 指定的Executor数量(最大值),默认为1,当开启动态分配有效,若未开启,则该值等于ExecutorNumbers
:type ExecutorMaxNumbers: int
:param _TimeoutInSecond: 指定的Session超时时间,单位秒,默认3600秒
:type TimeoutInSecond: int
:param _SessionId: Session唯一标识,当指定sessionid,则使用该session运行任务。
:type SessionId: str
:param _SessionName: 指定要创建的session名称
:type SessionName: str
:param _Arguments: Session相关配置,当前支持:1.dlc.eni:用户配置的eni网关信息,可以用过该字段设置;
2.dlc.role.arn:用户配置的roleArn鉴权策略配置信息,可以用过该字段设置;
3.dlc.sql.set.config:用户配置的集群配置信息,可以用过该字段设置;
:type Arguments: list of KVPair
:param _IsInherit: 是否继承集群的资源类配置:0:自定义(默认),1:继承集群;
:type IsInherit: int
"""
self._DataEngineName = None
self._ExecuteSQL = None
self._DriverSize = None
self._ExecutorSize = None
self._ExecutorNumbers = None
self._ExecutorMaxNumbers = None
self._TimeoutInSecond = None
self._SessionId = None
self._SessionName = None
self._Arguments = None
self._IsInherit = None
@property
def DataEngineName(self):
return self._DataEngineName
@DataEngineName.setter
def DataEngineName(self, DataEngineName):
self._DataEngineName = DataEngineName
@property
def ExecuteSQL(self):
return self._ExecuteSQL
@ExecuteSQL.setter
def ExecuteSQL(self, ExecuteSQL):
self._ExecuteSQL = ExecuteSQL
@property
def DriverSize(self):
return self._DriverSize
@DriverSize.setter
def DriverSize(self, DriverSize):
self._DriverSize = DriverSize
@property
def ExecutorSize(self):
return self._ExecutorSize
@ExecutorSize.setter
def ExecutorSize(self, ExecutorSize):
self._ExecutorSize = ExecutorSize
@property
def ExecutorNumbers(self):
return self._ExecutorNumbers
@ExecutorNumbers.setter
def ExecutorNumbers(self, ExecutorNumbers):
self._ExecutorNumbers = ExecutorNumbers
@property
def ExecutorMaxNumbers(self):
return self._ExecutorMaxNumbers
@ExecutorMaxNumbers.setter
def ExecutorMaxNumbers(self, ExecutorMaxNumbers):
self._ExecutorMaxNumbers = ExecutorMaxNumbers
@property
def TimeoutInSecond(self):
return self._TimeoutInSecond
@TimeoutInSecond.setter
def TimeoutInSecond(self, TimeoutInSecond):
self._TimeoutInSecond = TimeoutInSecond
@property
def SessionId(self):
return self._SessionId
@SessionId.setter
def SessionId(self, SessionId):
self._SessionId = SessionId
@property
def SessionName(self):
return self._SessionName
@SessionName.setter
def SessionName(self, SessionName):
self._SessionName = SessionName
@property
def Arguments(self):
return self._Arguments
@Arguments.setter
def Arguments(self, Arguments):
self._Arguments = Arguments
@property
def IsInherit(self):
return self._IsInherit
@IsInherit.setter
def IsInherit(self, IsInherit):
self._IsInherit = IsInherit
def _deserialize(self, params):
self._DataEngineName = params.get("DataEngineName")
self._ExecuteSQL = params.get("ExecuteSQL")
self._DriverSize = params.get("DriverSize")
self._ExecutorSize = params.get("ExecutorSize")
self._ExecutorNumbers = params.get("ExecutorNumbers")
self._ExecutorMaxNumbers = params.get("ExecutorMaxNumbers")
self._TimeoutInSecond = params.get("TimeoutInSecond")
self._SessionId = params.get("SessionId")
self._SessionName = params.get("SessionName")
if params.get("Arguments") is not None:
self._Arguments = []
for item in params.get("Arguments"):
obj = KVPair()
obj._deserialize(item)
self._Arguments.append(obj)
self._IsInherit = params.get("IsInherit")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateSparkSessionBatchSQLResponse(AbstractModel):
"""CreateSparkSessionBatchSQL返回参数结构体
"""
def __init__(self):
r"""
:param _BatchId: 批任务唯一标识
:type BatchId: str
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._BatchId = None
self._RequestId = None
@property
def BatchId(self):
return self._BatchId
@BatchId.setter
def BatchId(self, BatchId):
self._BatchId = BatchId
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._BatchId = params.get("BatchId")
self._RequestId = params.get("RequestId")
class CreateStoreLocationRequest(AbstractModel):
"""CreateStoreLocation请求参数结构体
"""
def __init__(self):
r"""
:param _StoreLocation: 计算结果存储cos路径,如:cosn://bucketname/
:type StoreLocation: str
"""
self._StoreLocation = None
@property
def StoreLocation(self):
return self._StoreLocation
@StoreLocation.setter
def StoreLocation(self, StoreLocation):
self._StoreLocation = StoreLocation
def _deserialize(self, params):
self._StoreLocation = params.get("StoreLocation")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateStoreLocationResponse(AbstractModel):
"""CreateStoreLocation返回参数结构体
"""
def __init__(self):
r"""
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._RequestId = None
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._RequestId = params.get("RequestId")
class CreateTableRequest(AbstractModel):
"""CreateTable请求参数结构体
"""
def __init__(self):
r"""
:param _TableInfo: 数据表配置信息
:type TableInfo: :class:`tencentcloud.dlc.v20210125.models.TableInfo`
"""
self._TableInfo = None
@property
def TableInfo(self):
return self._TableInfo
@TableInfo.setter
def TableInfo(self, TableInfo):
self._TableInfo = TableInfo
def _deserialize(self, params):
if params.get("TableInfo") is not None:
self._TableInfo = TableInfo()
self._TableInfo._deserialize(params.get("TableInfo"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateTableResponse(AbstractModel):
"""CreateTable返回参数结构体
"""
def __init__(self):
r"""
:param _Execution: 生成的建表执行语句对象。
:type Execution: :class:`tencentcloud.dlc.v20210125.models.Execution`
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._Execution = None
self._RequestId = None
@property
def Execution(self):
return self._Execution
@Execution.setter
def Execution(self, Execution):
self._Execution = Execution
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
if params.get("Execution") is not None:
self._Execution = Execution()
self._Execution._deserialize(params.get("Execution"))
self._RequestId = params.get("RequestId")
class CreateTaskRequest(AbstractModel):
"""CreateTask请求参数结构体
"""
def __init__(self):
r"""
:param _Task: 计算任务,该参数中包含任务类型及其相关配置信息
:type Task: :class:`tencentcloud.dlc.v20210125.models.Task`
:param _DatabaseName: 数据库名称。如果SQL语句中有数据库名称,优先使用SQL语句中的数据库,否则使用该参数指定的数据库(注:当提交建库sql时,该字段传空字符串)。
:type DatabaseName: str
:param _DatasourceConnectionName: 默认数据源名称。
:type DatasourceConnectionName: str
:param _DataEngineName: 数据引擎名称,不填提交到默认集群
:type DataEngineName: str
"""
self._Task = None
self._DatabaseName = None
self._DatasourceConnectionName = None
self._DataEngineName = None
@property
def Task(self):
return self._Task
@Task.setter
def Task(self, Task):
self._Task = Task
@property
def DatabaseName(self):
return self._DatabaseName
@DatabaseName.setter
def DatabaseName(self, DatabaseName):
self._DatabaseName = DatabaseName
@property
def DatasourceConnectionName(self):
return self._DatasourceConnectionName
@DatasourceConnectionName.setter
def DatasourceConnectionName(self, DatasourceConnectionName):
self._DatasourceConnectionName = DatasourceConnectionName
@property
def DataEngineName(self):
return self._DataEngineName
@DataEngineName.setter
def DataEngineName(self, DataEngineName):
self._DataEngineName = DataEngineName
def _deserialize(self, params):
if params.get("Task") is not None:
self._Task = Task()
self._Task._deserialize(params.get("Task"))
self._DatabaseName = params.get("DatabaseName")
self._DatasourceConnectionName = params.get("DatasourceConnectionName")
self._DataEngineName = params.get("DataEngineName")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateTaskResponse(AbstractModel):
"""CreateTask返回参数结构体
"""
def __init__(self):
r"""
:param _TaskId: 任务ID
注意:此字段可能返回 null,表示取不到有效值。
:type TaskId: str
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._TaskId = None
self._RequestId = None
@property
def TaskId(self):
return self._TaskId
@TaskId.setter
def TaskId(self, TaskId):
self._TaskId = TaskId
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._TaskId = params.get("TaskId")
self._RequestId = params.get("RequestId")
class CreateTasksInOrderRequest(AbstractModel):
"""CreateTasksInOrder请求参数结构体
"""
def __init__(self):
r"""
:param _DatabaseName: 数据库名称。如果SQL语句中有数据库名称,优先使用SQL语句中的数据库,否则使用该参数指定的数据库。
:type DatabaseName: str
:param _Tasks: SQL任务信息
:type Tasks: :class:`tencentcloud.dlc.v20210125.models.TasksInfo`
:param _DatasourceConnectionName: 数据源名称,默认为COSDataCatalog
:type DatasourceConnectionName: str
"""
self._DatabaseName = None
self._Tasks = None
self._DatasourceConnectionName = None
@property
def DatabaseName(self):
return self._DatabaseName
@DatabaseName.setter
def DatabaseName(self, DatabaseName):
self._DatabaseName = DatabaseName
@property
def Tasks(self):
return self._Tasks
@Tasks.setter
def Tasks(self, Tasks):
self._Tasks = Tasks
@property
def DatasourceConnectionName(self):
return self._DatasourceConnectionName
@DatasourceConnectionName.setter
def DatasourceConnectionName(self, DatasourceConnectionName):
self._DatasourceConnectionName = DatasourceConnectionName
def _deserialize(self, params):
self._DatabaseName = params.get("DatabaseName")
if params.get("Tasks") is not None:
self._Tasks = TasksInfo()
self._Tasks._deserialize(params.get("Tasks"))
self._DatasourceConnectionName = params.get("DatasourceConnectionName")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateTasksInOrderResponse(AbstractModel):
"""CreateTasksInOrder返回参数结构体
"""
def __init__(self):
r"""
:param _BatchId: 本批次提交的任务的批次Id
:type BatchId: str
:param _TaskIdSet: 任务Id集合,按照执行顺序排列
:type TaskIdSet: list of str
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._BatchId = None
self._TaskIdSet = None
self._RequestId = None
@property
def BatchId(self):
return self._BatchId
@BatchId.setter
def BatchId(self, BatchId):
self._BatchId = BatchId
@property
def TaskIdSet(self):
return self._TaskIdSet
@TaskIdSet.setter
def TaskIdSet(self, TaskIdSet):
self._TaskIdSet = TaskIdSet
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._BatchId = params.get("BatchId")
self._TaskIdSet = params.get("TaskIdSet")
self._RequestId = params.get("RequestId")
class CreateTasksRequest(AbstractModel):
"""CreateTasks请求参数结构体
"""
def __init__(self):
r"""
:param _DatabaseName: 数据库名称。如果SQL语句中有数据库名称,优先使用SQL语句中的数据库,否则使用该参数指定的数据库(注:当提交建库sql时,该字段传空字符串)。
:type DatabaseName: str
:param _Tasks: SQL任务信息
:type Tasks: :class:`tencentcloud.dlc.v20210125.models.TasksInfo`
:param _DatasourceConnectionName: 数据源名称,默认为DataLakeCatalog
:type DatasourceConnectionName: str
:param _DataEngineName: 计算引擎名称,不填任务提交到默认集群
:type DataEngineName: str
"""
self._DatabaseName = None
self._Tasks = None
self._DatasourceConnectionName = None
self._DataEngineName = None
@property
def DatabaseName(self):
return self._DatabaseName
@DatabaseName.setter
def DatabaseName(self, DatabaseName):
self._DatabaseName = DatabaseName
@property
def Tasks(self):
return self._Tasks
@Tasks.setter
def Tasks(self, Tasks):
self._Tasks = Tasks
@property
def DatasourceConnectionName(self):
return self._DatasourceConnectionName
@DatasourceConnectionName.setter
def DatasourceConnectionName(self, DatasourceConnectionName):
self._DatasourceConnectionName = DatasourceConnectionName
@property
def DataEngineName(self):
return self._DataEngineName
@DataEngineName.setter
def DataEngineName(self, DataEngineName):
self._DataEngineName = DataEngineName
def _deserialize(self, params):
self._DatabaseName = params.get("DatabaseName")
if params.get("Tasks") is not None:
self._Tasks = TasksInfo()
self._Tasks._deserialize(params.get("Tasks"))
self._DatasourceConnectionName = params.get("DatasourceConnectionName")
self._DataEngineName = params.get("DataEngineName")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateTasksResponse(AbstractModel):
"""CreateTasks返回参数结构体
"""
def __init__(self):
r"""
:param _BatchId: 本批次提交的任务的批次Id
:type BatchId: str
:param _TaskIdSet: 任务Id集合,按照执行顺序排列
:type TaskIdSet: list of str
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._BatchId = None
self._TaskIdSet = None
self._RequestId = None
@property
def BatchId(self):
return self._BatchId
@BatchId.setter
def BatchId(self, BatchId):
self._BatchId = BatchId
@property
def TaskIdSet(self):
return self._TaskIdSet
@TaskIdSet.setter
def TaskIdSet(self, TaskIdSet):
self._TaskIdSet = TaskIdSet
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._BatchId = params.get("BatchId")
self._TaskIdSet = params.get("TaskIdSet")
self._RequestId = params.get("RequestId")
class CreateUserRequest(AbstractModel):
"""CreateUser请求参数结构体
"""
def __init__(self):
r"""
:param _UserId: 需要授权的子用户uin,可以通过腾讯云控制台右上角 → “账号信息” → “账号ID进行查看”。
:type UserId: str
:param _UserDescription: 用户描述信息,方便区分不同用户
:type UserDescription: str
:param _PolicySet: 绑定到用户的权限集合
:type PolicySet: list of Policy
:param _UserType: 用户类型。ADMIN:管理员 COMMON:一般用户。当用户类型为管理员的时候,不能设置权限集合和绑定的工作组集合,管理员默认拥有所有权限。该参数不填默认为COMMON
:type UserType: str
:param _WorkGroupIds: 绑定到用户的工作组ID集合。
:type WorkGroupIds: list of int
:param _UserAlias: 用户别名,字符长度小50
:type UserAlias: str
"""
self._UserId = None
self._UserDescription = None
self._PolicySet = None
self._UserType = None
self._WorkGroupIds = None
self._UserAlias = None
@property
def UserId(self):
return self._UserId
@UserId.setter
def UserId(self, UserId):
self._UserId = UserId
@property
def UserDescription(self):
return self._UserDescription
@UserDescription.setter
def UserDescription(self, UserDescription):
self._UserDescription = UserDescription
@property
def PolicySet(self):
return self._PolicySet
@PolicySet.setter
def PolicySet(self, PolicySet):
self._PolicySet = PolicySet
@property
def UserType(self):
return self._UserType
@UserType.setter
def UserType(self, UserType):
self._UserType = UserType
@property
def WorkGroupIds(self):
return self._WorkGroupIds
@WorkGroupIds.setter
def WorkGroupIds(self, WorkGroupIds):
self._WorkGroupIds = WorkGroupIds
@property
def UserAlias(self):
return self._UserAlias
@UserAlias.setter
def UserAlias(self, UserAlias):
self._UserAlias = UserAlias
def _deserialize(self, params):
self._UserId = params.get("UserId")
self._UserDescription = params.get("UserDescription")
if params.get("PolicySet") is not None:
self._PolicySet = []
for item in params.get("PolicySet"):
obj = Policy()
obj._deserialize(item)
self._PolicySet.append(obj)
self._UserType = params.get("UserType")
self._WorkGroupIds = params.get("WorkGroupIds")
self._UserAlias = params.get("UserAlias")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateUserResponse(AbstractModel):
"""CreateUser返回参数结构体
"""
def __init__(self):
r"""
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._RequestId = None
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._RequestId = params.get("RequestId")
class CreateWorkGroupRequest(AbstractModel):
"""CreateWorkGroup请求参数结构体
"""
def __init__(self):
r"""
:param _WorkGroupName: 工作组名称
:type WorkGroupName: str
:param _WorkGroupDescription: 工作组描述
:type WorkGroupDescription: str
:param _PolicySet: 工作组绑定的鉴权策略集合
:type PolicySet: list of Policy
:param _UserIds: 需要绑定到工作组的用户Id集合
:type UserIds: list of str
"""
self._WorkGroupName = None
self._WorkGroupDescription = None
self._PolicySet = None
self._UserIds = None
@property
def WorkGroupName(self):
return self._WorkGroupName
@WorkGroupName.setter
def WorkGroupName(self, WorkGroupName):
self._WorkGroupName = WorkGroupName
@property
def WorkGroupDescription(self):
return self._WorkGroupDescription
@WorkGroupDescription.setter
def WorkGroupDescription(self, WorkGroupDescription):
self._WorkGroupDescription = WorkGroupDescription
@property
def PolicySet(self):
return self._PolicySet
@PolicySet.setter
def PolicySet(self, PolicySet):
self._PolicySet = PolicySet
@property
def UserIds(self):
return self._UserIds
@UserIds.setter
def UserIds(self, UserIds):
self._UserIds = UserIds
def _deserialize(self, params):
self._WorkGroupName = params.get("WorkGroupName")
self._WorkGroupDescription = params.get("WorkGroupDescription")
if params.get("PolicySet") is not None:
self._PolicySet = []
for item in params.get("PolicySet"):
obj = Policy()
obj._deserialize(item)
self._PolicySet.append(obj)
self._UserIds = params.get("UserIds")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateWorkGroupResponse(AbstractModel):
"""CreateWorkGroup返回参数结构体
"""
def __init__(self):
r"""
:param _WorkGroupId: 工作组Id,全局唯一
:type WorkGroupId: int
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._WorkGroupId = None
self._RequestId = None
@property
def WorkGroupId(self):
return self._WorkGroupId
@WorkGroupId.setter
def WorkGroupId(self, WorkGroupId):
self._WorkGroupId = WorkGroupId
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._WorkGroupId = params.get("WorkGroupId")
self._RequestId = params.get("RequestId")
class CrontabResumeSuspendStrategy(AbstractModel):
"""定时启停策略信息
"""
def __init__(self):
r"""
:param _ResumeTime: 定时拉起时间:如:周一8点
注意:此字段可能返回 null,表示取不到有效值。
:type ResumeTime: str
:param _SuspendTime: 定时挂起时间:如:周一20点
注意:此字段可能返回 null,表示取不到有效值。
:type SuspendTime: str
:param _SuspendStrategy: 挂起配置:0(默认):等待任务结束后挂起、1:强制挂起
注意:此字段可能返回 null,表示取不到有效值。
:type SuspendStrategy: int
"""
self._ResumeTime = None
self._SuspendTime = None
self._SuspendStrategy = None
@property
def ResumeTime(self):
return self._ResumeTime
@ResumeTime.setter
def ResumeTime(self, ResumeTime):
self._ResumeTime = ResumeTime
@property
def SuspendTime(self):
return self._SuspendTime
@SuspendTime.setter
def SuspendTime(self, SuspendTime):
self._SuspendTime = SuspendTime
@property
def SuspendStrategy(self):
return self._SuspendStrategy
@SuspendStrategy.setter
def SuspendStrategy(self, SuspendStrategy):
self._SuspendStrategy = SuspendStrategy
def _deserialize(self, params):
self._ResumeTime = params.get("ResumeTime")
self._SuspendTime = params.get("SuspendTime")
self._SuspendStrategy = params.get("SuspendStrategy")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DMSColumn(AbstractModel):
"""迁移列对象
"""
def __init__(self):
r"""
:param _Name: 名称
注意:此字段可能返回 null,表示取不到有效值。
:type Name: str
:param _Description: 描述
注意:此字段可能返回 null,表示取不到有效值。
:type Description: str
:param _Type: 类型
注意:此字段可能返回 null,表示取不到有效值。
:type Type: str
:param _Position: 排序
注意:此字段可能返回 null,表示取不到有效值。
:type Position: int
:param _Params: 附加参数
注意:此字段可能返回 null,表示取不到有效值。
:type Params: list of KVPair
:param _BizParams: 业务参数
注意:此字段可能返回 null,表示取不到有效值。
:type BizParams: list of KVPair
:param _IsPartition: 是否分区
注意:此字段可能返回 null,表示取不到有效值。
:type IsPartition: bool
"""
self._Name = None
self._Description = None
self._Type = None
self._Position = None
self._Params = None
self._BizParams = None
self._IsPartition = None
@property
def Name(self):
return self._Name
@Name.setter
def Name(self, Name):
self._Name = Name
@property
def Description(self):
return self._Description
@Description.setter
def Description(self, Description):
self._Description = Description
@property
def Type(self):
return self._Type
@Type.setter
def Type(self, Type):
self._Type = Type
@property
def Position(self):
return self._Position
@Position.setter
def Position(self, Position):
self._Position = Position
@property
def Params(self):
return self._Params
@Params.setter
def Params(self, Params):
self._Params = Params
@property
def BizParams(self):
return self._BizParams
@BizParams.setter
def BizParams(self, BizParams):
self._BizParams = BizParams
@property
def IsPartition(self):
return self._IsPartition
@IsPartition.setter
def IsPartition(self, IsPartition):
self._IsPartition = IsPartition
def _deserialize(self, params):
self._Name = params.get("Name")
self._Description = params.get("Description")
self._Type = params.get("Type")
self._Position = params.get("Position")
if params.get("Params") is not None:
self._Params = []
for item in params.get("Params"):
obj = KVPair()
obj._deserialize(item)
self._Params.append(obj)
if params.get("BizParams") is not None:
self._BizParams = []
for item in params.get("BizParams"):
obj = KVPair()
obj._deserialize(item)
self._BizParams.append(obj)
self._IsPartition = params.get("IsPartition")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DMSColumnOrder(AbstractModel):
"""列排序对象
"""
def __init__(self):
r"""
:param _Col: 列名
注意:此字段可能返回 null,表示取不到有效值。
:type Col: str
:param _Order: 排序
注意:此字段可能返回 null,表示取不到有效值。
:type Order: int
"""
self._Col = None
self._Order = None
@property
def Col(self):
return self._Col
@Col.setter
def Col(self, Col):
self._Col = Col
@property
def Order(self):
return self._Order
@Order.setter
def Order(self, Order):
self._Order = Order
def _deserialize(self, params):
self._Col = params.get("Col")
self._Order = params.get("Order")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DMSPartition(AbstractModel):
"""迁移元数据分区对象
"""
def __init__(self):
r"""
:param _DatabaseName: 数据库名称
:type DatabaseName: str
:param _SchemaName: 数据目录名称
:type SchemaName: str
:param _TableName: 表名称
:type TableName: str
:param _DataVersion: 数据版本
:type DataVersion: int
:param _Name: 分区名称
:type Name: str
:param _Values: 值列表
:type Values: list of str
:param _StorageSize: 存储大小
:type StorageSize: int
:param _RecordCount: 记录数量
:type RecordCount: int
:param _CreateTime: 创建时间
:type CreateTime: str
:param _ModifiedTime: 修改时间
:type ModifiedTime: str
:param _LastAccessTime: 最后访问时间
:type LastAccessTime: str
:param _Params: 附件属性
:type Params: list of KVPair
:param _Sds: 存储对象
:type Sds: :class:`tencentcloud.dlc.v20210125.models.DMSSds`
"""
self._DatabaseName = None
self._SchemaName = None
self._TableName = None
self._DataVersion = None
self._Name = None
self._Values = None
self._StorageSize = None
self._RecordCount = None
self._CreateTime = None
self._ModifiedTime = None
self._LastAccessTime = None
self._Params = None
self._Sds = None
@property
def DatabaseName(self):
return self._DatabaseName
@DatabaseName.setter
def DatabaseName(self, DatabaseName):
self._DatabaseName = DatabaseName
@property
def SchemaName(self):
return self._SchemaName
@SchemaName.setter
def SchemaName(self, SchemaName):
self._SchemaName = SchemaName
@property
def TableName(self):
return self._TableName
@TableName.setter
def TableName(self, TableName):
self._TableName = TableName
@property
def DataVersion(self):
return self._DataVersion
@DataVersion.setter
def DataVersion(self, DataVersion):
self._DataVersion = DataVersion
@property
def Name(self):
return self._Name
@Name.setter
def Name(self, Name):
self._Name = Name
@property
def Values(self):
return self._Values
@Values.setter
def Values(self, Values):
self._Values = Values
@property
def StorageSize(self):
return self._StorageSize
@StorageSize.setter
def StorageSize(self, StorageSize):
self._StorageSize = StorageSize
@property
def RecordCount(self):
return self._RecordCount
@RecordCount.setter
def RecordCount(self, RecordCount):
self._RecordCount = RecordCount
@property
def CreateTime(self):
return self._CreateTime
@CreateTime.setter
def CreateTime(self, CreateTime):
self._CreateTime = CreateTime
@property
def ModifiedTime(self):
return self._ModifiedTime
@ModifiedTime.setter
def ModifiedTime(self, ModifiedTime):
self._ModifiedTime = ModifiedTime
@property
def LastAccessTime(self):
return self._LastAccessTime
@LastAccessTime.setter
def LastAccessTime(self, LastAccessTime):
self._LastAccessTime = LastAccessTime
@property
def Params(self):
return self._Params
@Params.setter
def Params(self, Params):
self._Params = Params
@property
def Sds(self):
return self._Sds
@Sds.setter
def Sds(self, Sds):
self._Sds = Sds
def _deserialize(self, params):
self._DatabaseName = params.get("DatabaseName")
self._SchemaName = params.get("SchemaName")
self._TableName = params.get("TableName")
self._DataVersion = params.get("DataVersion")
self._Name = params.get("Name")
self._Values = params.get("Values")
self._StorageSize = params.get("StorageSize")
self._RecordCount = params.get("RecordCount")
self._CreateTime = params.get("CreateTime")
self._ModifiedTime = params.get("ModifiedTime")
self._LastAccessTime = params.get("LastAccessTime")
if params.get("Params") is not None:
self._Params = []
for item in params.get("Params"):
obj = KVPair()
obj._deserialize(item)
self._Params.append(obj)
if params.get("Sds") is not None:
self._Sds = DMSSds()
self._Sds._deserialize(params.get("Sds"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DMSSds(AbstractModel):
"""元数据存储描述属性
"""
def __init__(self):
r"""
:param _Location: 存储地址
注意:此字段可能返回 null,表示取不到有效值。
:type Location: str
:param _InputFormat: 输入格式
注意:此字段可能返回 null,表示取不到有效值。
:type InputFormat: str
:param _OutputFormat: 输出格式
注意:此字段可能返回 null,表示取不到有效值。
:type OutputFormat: str
:param _NumBuckets: bucket数量
注意:此字段可能返回 null,表示取不到有效值。
:type NumBuckets: int
:param _Compressed: 是是否压缩
注意:此字段可能返回 null,表示取不到有效值。
:type Compressed: bool
:param _StoredAsSubDirectories: 是否有子目录
注意:此字段可能返回 null,表示取不到有效值。
:type StoredAsSubDirectories: bool
:param _SerdeLib: 序列化lib
注意:此字段可能返回 null,表示取不到有效值。
:type SerdeLib: str
:param _SerdeName: 序列化名称
注意:此字段可能返回 null,表示取不到有效值。
:type SerdeName: str
:param _BucketCols: 桶名称
注意:此字段可能返回 null,表示取不到有效值。
:type BucketCols: list of str
:param _SerdeParams: 序列化参数
注意:此字段可能返回 null,表示取不到有效值。
:type SerdeParams: list of KVPair
:param _Params: 附加参数
注意:此字段可能返回 null,表示取不到有效值。
:type Params: list of KVPair
:param _SortCols: 列排序(Expired)
注意:此字段可能返回 null,表示取不到有效值。
:type SortCols: :class:`tencentcloud.dlc.v20210125.models.DMSColumnOrder`
:param _Cols: 列
注意:此字段可能返回 null,表示取不到有效值。
:type Cols: list of DMSColumn
:param _SortColumns: 列排序字段
注意:此字段可能返回 null,表示取不到有效值。
:type SortColumns: list of DMSColumnOrder
"""
self._Location = None
self._InputFormat = None
self._OutputFormat = None
self._NumBuckets = None
self._Compressed = None
self._StoredAsSubDirectories = None
self._SerdeLib = None
self._SerdeName = None
self._BucketCols = None
self._SerdeParams = None
self._Params = None
self._SortCols = None
self._Cols = None
self._SortColumns = None
@property
def Location(self):
return self._Location
@Location.setter
def Location(self, Location):
self._Location = Location
@property
def InputFormat(self):
return self._InputFormat
@InputFormat.setter
def InputFormat(self, InputFormat):
self._InputFormat = InputFormat
@property
def OutputFormat(self):
return self._OutputFormat
@OutputFormat.setter
def OutputFormat(self, OutputFormat):
self._OutputFormat = OutputFormat
@property
def NumBuckets(self):
return self._NumBuckets
@NumBuckets.setter
def NumBuckets(self, NumBuckets):
self._NumBuckets = NumBuckets
@property
def Compressed(self):
return self._Compressed
@Compressed.setter
def Compressed(self, Compressed):
self._Compressed = Compressed
@property
def StoredAsSubDirectories(self):
return self._StoredAsSubDirectories
@StoredAsSubDirectories.setter
def StoredAsSubDirectories(self, StoredAsSubDirectories):
self._StoredAsSubDirectories = StoredAsSubDirectories
@property
def SerdeLib(self):
return self._SerdeLib
@SerdeLib.setter
def SerdeLib(self, SerdeLib):
self._SerdeLib = SerdeLib
@property
def SerdeName(self):
return self._SerdeName
@SerdeName.setter
def SerdeName(self, SerdeName):
self._SerdeName = SerdeName
@property
def BucketCols(self):
return self._BucketCols
@BucketCols.setter
def BucketCols(self, BucketCols):
self._BucketCols = BucketCols
@property
def SerdeParams(self):
return self._SerdeParams
@SerdeParams.setter
def SerdeParams(self, SerdeParams):
self._SerdeParams = SerdeParams
@property
def Params(self):
return self._Params
@Params.setter
def Params(self, Params):
self._Params = Params
@property
def SortCols(self):
return self._SortCols
@SortCols.setter
def SortCols(self, SortCols):
self._SortCols = SortCols
@property
def Cols(self):
return self._Cols
@Cols.setter
def Cols(self, Cols):
self._Cols = Cols
@property
def SortColumns(self):
return self._SortColumns
@SortColumns.setter
def SortColumns(self, SortColumns):
self._SortColumns = SortColumns
def _deserialize(self, params):
self._Location = params.get("Location")
self._InputFormat = params.get("InputFormat")
self._OutputFormat = params.get("OutputFormat")
self._NumBuckets = params.get("NumBuckets")
self._Compressed = params.get("Compressed")
self._StoredAsSubDirectories = params.get("StoredAsSubDirectories")
self._SerdeLib = params.get("SerdeLib")
self._SerdeName = params.get("SerdeName")
self._BucketCols = params.get("BucketCols")
if params.get("SerdeParams") is not None:
self._SerdeParams = []
for item in params.get("SerdeParams"):
obj = KVPair()
obj._deserialize(item)
self._SerdeParams.append(obj)
if params.get("Params") is not None:
self._Params = []
for item in params.get("Params"):
obj = KVPair()
obj._deserialize(item)
self._Params.append(obj)
if params.get("SortCols") is not None:
self._SortCols = DMSColumnOrder()
self._SortCols._deserialize(params.get("SortCols"))
if params.get("Cols") is not None:
self._Cols = []
for item in params.get("Cols"):
obj = DMSColumn()
obj._deserialize(item)
self._Cols.append(obj)
if params.get("SortColumns") is not None:
self._SortColumns = []
for item in params.get("SortColumns"):
obj = DMSColumnOrder()
obj._deserialize(item)
self._SortColumns.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DMSTable(AbstractModel):
"""DMSTable基本信息
"""
def __init__(self):
r"""
:param _ViewOriginalText: 视图文本
注意:此字段可能返回 null,表示取不到有效值。
:type ViewOriginalText: str
:param _ViewExpandedText: 视图文本
注意:此字段可能返回 null,表示取不到有效值。
:type ViewExpandedText: str
:param _Retention: hive维护版本
注意:此字段可能返回 null,表示取不到有效值。
:type Retention: int
:param _Sds: 存储对象
注意:此字段可能返回 null,表示取不到有效值。
:type Sds: :class:`tencentcloud.dlc.v20210125.models.DMSSds`
:param _PartitionKeys: 分区列
注意:此字段可能返回 null,表示取不到有效值。
:type PartitionKeys: list of DMSColumn
:param _Partitions: 分区
注意:此字段可能返回 null,表示取不到有效值。
:type Partitions: list of DMSPartition
:param _Type: 表类型
注意:此字段可能返回 null,表示取不到有效值。
:type Type: str
:param _DbName: 数据库名称
注意:此字段可能返回 null,表示取不到有效值。
:type DbName: str
:param _SchemaName: Schema名称
注意:此字段可能返回 null,表示取不到有效值。
:type SchemaName: str
:param _StorageSize: 存储大小
注意:此字段可能返回 null,表示取不到有效值。
:type StorageSize: int
:param _RecordCount: 记录数量
注意:此字段可能返回 null,表示取不到有效值。
:type RecordCount: int
:param _LifeTime: 生命周期
注意:此字段可能返回 null,表示取不到有效值。
:type LifeTime: int
:param _LastAccessTime: 最后访问时间
注意:此字段可能返回 null,表示取不到有效值。
:type LastAccessTime: str
:param _DataUpdateTime: 数据更新时间
注意:此字段可能返回 null,表示取不到有效值。
:type DataUpdateTime: str
:param _StructUpdateTime: 结构更新时间
注意:此字段可能返回 null,表示取不到有效值。
:type StructUpdateTime: str
:param _Columns: 列
注意:此字段可能返回 null,表示取不到有效值。
:type Columns: list of DMSColumn
:param _Name: 表名
注意:此字段可能返回 null,表示取不到有效值。
:type Name: str
"""
self._ViewOriginalText = None
self._ViewExpandedText = None
self._Retention = None
self._Sds = None
self._PartitionKeys = None
self._Partitions = None
self._Type = None
self._DbName = None
self._SchemaName = None
self._StorageSize = None
self._RecordCount = None
self._LifeTime = None
self._LastAccessTime = None
self._DataUpdateTime = None
self._StructUpdateTime = None
self._Columns = None
self._Name = None
@property
def ViewOriginalText(self):
return self._ViewOriginalText
@ViewOriginalText.setter
def ViewOriginalText(self, ViewOriginalText):
self._ViewOriginalText = ViewOriginalText
@property
def ViewExpandedText(self):
return self._ViewExpandedText
@ViewExpandedText.setter
def ViewExpandedText(self, ViewExpandedText):
self._ViewExpandedText = ViewExpandedText
@property
def Retention(self):
return self._Retention
@Retention.setter
def Retention(self, Retention):
self._Retention = Retention
@property
def Sds(self):
return self._Sds
@Sds.setter
def Sds(self, Sds):
self._Sds = Sds
@property
def PartitionKeys(self):
return self._PartitionKeys
@PartitionKeys.setter
def PartitionKeys(self, PartitionKeys):
self._PartitionKeys = PartitionKeys
@property
def Partitions(self):
return self._Partitions
@Partitions.setter
def Partitions(self, Partitions):
self._Partitions = Partitions
@property
def Type(self):
return self._Type
@Type.setter
def Type(self, Type):
self._Type = Type
@property
def DbName(self):
return self._DbName
@DbName.setter
def DbName(self, DbName):
self._DbName = DbName
@property
def SchemaName(self):
return self._SchemaName
@SchemaName.setter
def SchemaName(self, SchemaName):
self._SchemaName = SchemaName
@property
def StorageSize(self):
return self._StorageSize
@StorageSize.setter
def StorageSize(self, StorageSize):
self._StorageSize = StorageSize
@property
def RecordCount(self):
return self._RecordCount
@RecordCount.setter
def RecordCount(self, RecordCount):
self._RecordCount = RecordCount
@property
def LifeTime(self):
return self._LifeTime
@LifeTime.setter
def LifeTime(self, LifeTime):
self._LifeTime = LifeTime
@property
def LastAccessTime(self):
return self._LastAccessTime
@LastAccessTime.setter
def LastAccessTime(self, LastAccessTime):
self._LastAccessTime = LastAccessTime
@property
def DataUpdateTime(self):
return self._DataUpdateTime
@DataUpdateTime.setter
def DataUpdateTime(self, DataUpdateTime):
self._DataUpdateTime = DataUpdateTime
@property
def StructUpdateTime(self):
return self._StructUpdateTime
@StructUpdateTime.setter
def StructUpdateTime(self, StructUpdateTime):
self._StructUpdateTime = StructUpdateTime
@property
def Columns(self):
return self._Columns
@Columns.setter
def Columns(self, Columns):
self._Columns = Columns
@property
def Name(self):
return self._Name
@Name.setter
def Name(self, Name):
self._Name = Name
def _deserialize(self, params):
self._ViewOriginalText = params.get("ViewOriginalText")
self._ViewExpandedText = params.get("ViewExpandedText")
self._Retention = params.get("Retention")
if params.get("Sds") is not None:
self._Sds = DMSSds()
self._Sds._deserialize(params.get("Sds"))
if params.get("PartitionKeys") is not None:
self._PartitionKeys = []
for item in params.get("PartitionKeys"):
obj = DMSColumn()
obj._deserialize(item)
self._PartitionKeys.append(obj)
if params.get("Partitions") is not None:
self._Partitions = []
for item in params.get("Partitions"):
obj = DMSPartition()
obj._deserialize(item)
self._Partitions.append(obj)
self._Type = params.get("Type")
self._DbName = params.get("DbName")
self._SchemaName = params.get("SchemaName")
self._StorageSize = params.get("StorageSize")
self._RecordCount = params.get("RecordCount")
self._LifeTime = params.get("LifeTime")
self._LastAccessTime = params.get("LastAccessTime")
self._DataUpdateTime = params.get("DataUpdateTime")
self._StructUpdateTime = params.get("StructUpdateTime")
if params.get("Columns") is not None:
self._Columns = []
for item in params.get("Columns"):
obj = DMSColumn()
obj._deserialize(item)
self._Columns.append(obj)
self._Name = params.get("Name")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DMSTableInfo(AbstractModel):
"""DMSTable信息
"""
def __init__(self):
r"""
:param _Table: DMS表信息
注意:此字段可能返回 null,表示取不到有效值。
:type Table: :class:`tencentcloud.dlc.v20210125.models.DMSTable`
:param _Asset: 基础对象信息
注意:此字段可能返回 null,表示取不到有效值。
:type Asset: :class:`tencentcloud.dlc.v20210125.models.Asset`
"""
self._Table = None
self._Asset = None
@property
def Table(self):
return self._Table
@Table.setter
def Table(self, Table):
self._Table = Table
@property
def Asset(self):
return self._Asset
@Asset.setter
def Asset(self, Asset):
self._Asset = Asset
def _deserialize(self, params):
if params.get("Table") is not None:
self._Table = DMSTable()
self._Table._deserialize(params.get("Table"))
if params.get("Asset") is not None:
self._Asset = Asset()
self._Asset._deserialize(params.get("Asset"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DataEngineConfigPair(AbstractModel):
"""引擎配置
"""
class DataEngineInfo(AbstractModel):
"""DataEngine详细信息
"""
def __init__(self):
r"""
:param _DataEngineName: DataEngine名称
:type DataEngineName: str
:param _EngineType: 引擎类型 spark/presto
:type EngineType: str
:param _ClusterType: 集群资源类型 spark_private/presto_private/presto_cu/spark_cu
:type ClusterType: str
:param _QuotaId: 引用ID
:type QuotaId: str
:param _State: 数据引擎状态 -2已删除 -1失败 0初始化中 1挂起 2运行中 3准备删除 4删除中
:type State: int
:param _CreateTime: 创建时间
:type CreateTime: int
:param _UpdateTime: 更新时间
:type UpdateTime: int
:param _Size: 集群规格
注意:此字段可能返回 null,表示取不到有效值。
:type Size: int
:param _Mode: 计费模式 0共享模式 1按量计费 2包年包月
:type Mode: int
:param _MinClusters: 最小集群数
注意:此字段可能返回 null,表示取不到有效值。
:type MinClusters: int
:param _MaxClusters: 最大集群数
注意:此字段可能返回 null,表示取不到有效值。
:type MaxClusters: int
:param _AutoResume: 是否自动恢复
注意:此字段可能返回 null,表示取不到有效值。
:type AutoResume: bool
:param _SpendAfter: 自动恢复时间
注意:此字段可能返回 null,表示取不到有效值。
:type SpendAfter: int
:param _CidrBlock: 集群网段
注意:此字段可能返回 null,表示取不到有效值。
:type CidrBlock: str
:param _DefaultDataEngine: 是否为默认引擎
注意:此字段可能返回 null,表示取不到有效值。
:type DefaultDataEngine: bool
:param _Message: 返回信息
注意:此字段可能返回 null,表示取不到有效值。
:type Message: str
:param _DataEngineId: 引擎id
:type DataEngineId: str
:param _SubAccountUin: 操作者
:type SubAccountUin: str
:param _ExpireTime: 到期时间
:type ExpireTime: str
:param _IsolatedTime: 隔离时间
:type IsolatedTime: str
:param _ReversalTime: 冲正时间
:type ReversalTime: str
:param _UserAlias: 用户名称
注意:此字段可能返回 null,表示取不到有效值。
:type UserAlias: str
:param _TagList: 标签对集合
注意:此字段可能返回 null,表示取不到有效值。
:type TagList: list of TagInfo
:param _Permissions: 引擎拥有的权限
注意:此字段可能返回 null,表示取不到有效值。
:type Permissions: list of str
:param _AutoSuspend: 是否自定挂起集群:false(默认):不自动挂起、true:自动挂起
注意:此字段可能返回 null,表示取不到有效值。
:type AutoSuspend: bool
:param _CrontabResumeSuspend: 定时启停集群策略:0(默认):关闭定时策略、1:开启定时策略(注:定时启停策略与自动挂起策略互斥)
注意:此字段可能返回 null,表示取不到有效值。
:type CrontabResumeSuspend: int
:param _CrontabResumeSuspendStrategy: 定时启停策略,复杂类型:包含启停时间、挂起集群策略
注意:此字段可能返回 null,表示取不到有效值。
:type CrontabResumeSuspendStrategy: :class:`tencentcloud.dlc.v20210125.models.CrontabResumeSuspendStrategy`
:param _EngineExecType: 引擎执行任务类型,有效值:SQL/BATCH
注意:此字段可能返回 null,表示取不到有效值。
:type EngineExecType: str
:param _RenewFlag: 自动续费标志,0,初始状态,默认不自动续费,若用户有预付费不停服特权,自动续费。1:自动续费。2:明确不自动续费
注意:此字段可能返回 null,表示取不到有效值。
:type RenewFlag: int
:param _AutoSuspendTime: 集群自动挂起时间
注意:此字段可能返回 null,表示取不到有效值。
:type AutoSuspendTime: int
:param _NetworkConnectionSet: 网络连接配置
注意:此字段可能返回 null,表示取不到有效值。
:type NetworkConnectionSet: list of NetworkConnection
:param _UiURL: ui的跳转地址
注意:此字段可能返回 null,表示取不到有效值。
:type UiURL: str
:param _ResourceType: 引擎的资源类型
注意:此字段可能返回 null,表示取不到有效值。
:type ResourceType: str
:param _ImageVersionId: 集群镜像版本ID
注意:此字段可能返回 null,表示取不到有效值。
:type ImageVersionId: str
:param _ChildImageVersionId: 集群镜像小版本ID
注意:此字段可能返回 null,表示取不到有效值。
:type ChildImageVersionId: str
:param _ImageVersionName: 集群镜像版本名字
注意:此字段可能返回 null,表示取不到有效值。
:type ImageVersionName: str
:param _StartStandbyCluster: 是否开启备集群
注意:此字段可能返回 null,表示取不到有效值。
:type StartStandbyCluster: bool
:param _ElasticSwitch: spark jar 包年包月集群是否开启弹性
注意:此字段可能返回 null,表示取不到有效值。
:type ElasticSwitch: bool
:param _ElasticLimit: spark jar 包年包月集群弹性上限
注意:此字段可能返回 null,表示取不到有效值。
:type ElasticLimit: int
:param _DefaultHouse: 是否为默认引擎
注意:此字段可能返回 null,表示取不到有效值。
:type DefaultHouse: bool
:param _MaxConcurrency: 单个集群任务最大并发数
注意:此字段可能返回 null,表示取不到有效值。
:type MaxConcurrency: int
:param _TolerableQueueTime: 任务排队上限时间
注意:此字段可能返回 null,表示取不到有效值。
:type TolerableQueueTime: int
:param _UserAppId: 用户appid
注意:此字段可能返回 null,表示取不到有效值。
:type UserAppId: int
:param _UserUin: 用户uin
注意:此字段可能返回 null,表示取不到有效值。
:type UserUin: str
:param _SessionResourceTemplate: SessionResourceTemplate
注意:此字段可能返回 null,表示取不到有效值。
:type SessionResourceTemplate: :class:`tencentcloud.dlc.v20210125.models.SessionResourceTemplate`
"""
self._DataEngineName = None
self._EngineType = None
self._ClusterType = None
self._QuotaId = None
self._State = None
self._CreateTime = None
self._UpdateTime = None
self._Size = None
self._Mode = None
self._MinClusters = None
self._MaxClusters = None
self._AutoResume = None
self._SpendAfter = None
self._CidrBlock = None
self._DefaultDataEngine = None
self._Message = None
self._DataEngineId = None
self._SubAccountUin = None
self._ExpireTime = None
self._IsolatedTime = None
self._ReversalTime = None
self._UserAlias = None
self._TagList = None
self._Permissions = None
self._AutoSuspend = None
self._CrontabResumeSuspend = None
self._CrontabResumeSuspendStrategy = None
self._EngineExecType = None
self._RenewFlag = None
self._AutoSuspendTime = None
self._NetworkConnectionSet = None
self._UiURL = None
self._ResourceType = None
self._ImageVersionId = None
self._ChildImageVersionId = None
self._ImageVersionName = None
self._StartStandbyCluster = None
self._ElasticSwitch = None
self._ElasticLimit = None
self._DefaultHouse = None
self._MaxConcurrency = None
self._TolerableQueueTime = None
self._UserAppId = None
self._UserUin = None
self._SessionResourceTemplate = None
@property
def DataEngineName(self):
return self._DataEngineName
@DataEngineName.setter
def DataEngineName(self, DataEngineName):
self._DataEngineName = DataEngineName
@property
def EngineType(self):
return self._EngineType
@EngineType.setter
def EngineType(self, EngineType):
self._EngineType = EngineType
@property
def ClusterType(self):
return self._ClusterType
@ClusterType.setter
def ClusterType(self, ClusterType):
self._ClusterType = ClusterType
@property
def QuotaId(self):
return self._QuotaId
@QuotaId.setter
def QuotaId(self, QuotaId):
self._QuotaId = QuotaId
@property
def State(self):
return self._State
@State.setter
def State(self, State):
self._State = State
@property
def CreateTime(self):
return self._CreateTime
@CreateTime.setter
def CreateTime(self, CreateTime):
self._CreateTime = CreateTime
@property
def UpdateTime(self):
return self._UpdateTime
@UpdateTime.setter
def UpdateTime(self, UpdateTime):
self._UpdateTime = UpdateTime
@property
def Size(self):
return self._Size
@Size.setter
def Size(self, Size):
self._Size = Size
@property
def Mode(self):
return self._Mode
@Mode.setter
def Mode(self, Mode):
self._Mode = Mode
@property
def MinClusters(self):
return self._MinClusters
@MinClusters.setter
def MinClusters(self, MinClusters):
self._MinClusters = MinClusters
@property
def MaxClusters(self):
return self._MaxClusters
@MaxClusters.setter
def MaxClusters(self, MaxClusters):
self._MaxClusters = MaxClusters
@property
def AutoResume(self):
return self._AutoResume
@AutoResume.setter
def AutoResume(self, AutoResume):
self._AutoResume = AutoResume
@property
def SpendAfter(self):
return self._SpendAfter
@SpendAfter.setter
def SpendAfter(self, SpendAfter):
self._SpendAfter = SpendAfter
@property
def CidrBlock(self):
return self._CidrBlock
@CidrBlock.setter
def CidrBlock(self, CidrBlock):
self._CidrBlock = CidrBlock
@property
def DefaultDataEngine(self):
return self._DefaultDataEngine
@DefaultDataEngine.setter
def DefaultDataEngine(self, DefaultDataEngine):
self._DefaultDataEngine = DefaultDataEngine
@property
def Message(self):
return self._Message
@Message.setter
def Message(self, Message):
self._Message = Message
@property
def DataEngineId(self):
return self._DataEngineId
@DataEngineId.setter
def DataEngineId(self, DataEngineId):
self._DataEngineId = DataEngineId
@property
def SubAccountUin(self):
return self._SubAccountUin
@SubAccountUin.setter
def SubAccountUin(self, SubAccountUin):
self._SubAccountUin = SubAccountUin
@property
def ExpireTime(self):
return self._ExpireTime
@ExpireTime.setter
def ExpireTime(self, ExpireTime):
self._ExpireTime = ExpireTime
@property
def IsolatedTime(self):
return self._IsolatedTime
@IsolatedTime.setter
def IsolatedTime(self, IsolatedTime):
self._IsolatedTime = IsolatedTime
@property
def ReversalTime(self):
return self._ReversalTime
@ReversalTime.setter
def ReversalTime(self, ReversalTime):
self._ReversalTime = ReversalTime
@property
def UserAlias(self):
return self._UserAlias
@UserAlias.setter
def UserAlias(self, UserAlias):
self._UserAlias = UserAlias
@property
def TagList(self):
return self._TagList
@TagList.setter
def TagList(self, TagList):
self._TagList = TagList
@property
def Permissions(self):
return self._Permissions
@Permissions.setter
def Permissions(self, Permissions):
self._Permissions = Permissions
@property
def AutoSuspend(self):
return self._AutoSuspend
@AutoSuspend.setter
def AutoSuspend(self, AutoSuspend):
self._AutoSuspend = AutoSuspend
@property
def CrontabResumeSuspend(self):
return self._CrontabResumeSuspend
@CrontabResumeSuspend.setter
def CrontabResumeSuspend(self, CrontabResumeSuspend):
self._CrontabResumeSuspend = CrontabResumeSuspend
@property
def CrontabResumeSuspendStrategy(self):
return self._CrontabResumeSuspendStrategy
@CrontabResumeSuspendStrategy.setter
def CrontabResumeSuspendStrategy(self, CrontabResumeSuspendStrategy):
self._CrontabResumeSuspendStrategy = CrontabResumeSuspendStrategy
@property
def EngineExecType(self):
return self._EngineExecType
@EngineExecType.setter
def EngineExecType(self, EngineExecType):
self._EngineExecType = EngineExecType
@property
def RenewFlag(self):
return self._RenewFlag
@RenewFlag.setter
def RenewFlag(self, RenewFlag):
self._RenewFlag = RenewFlag
@property
def AutoSuspendTime(self):
return self._AutoSuspendTime
@AutoSuspendTime.setter
def AutoSuspendTime(self, AutoSuspendTime):
self._AutoSuspendTime = AutoSuspendTime
@property
def NetworkConnectionSet(self):
return self._NetworkConnectionSet
@NetworkConnectionSet.setter
def NetworkConnectionSet(self, NetworkConnectionSet):
self._NetworkConnectionSet = NetworkConnectionSet
@property
def UiURL(self):
return self._UiURL
@UiURL.setter
def UiURL(self, UiURL):
self._UiURL = UiURL
@property
def ResourceType(self):
return self._ResourceType
@ResourceType.setter
def ResourceType(self, ResourceType):
self._ResourceType = ResourceType
@property
def ImageVersionId(self):
return self._ImageVersionId
@ImageVersionId.setter
def ImageVersionId(self, ImageVersionId):
self._ImageVersionId = ImageVersionId
@property
def ChildImageVersionId(self):
return self._ChildImageVersionId
@ChildImageVersionId.setter
def ChildImageVersionId(self, ChildImageVersionId):
self._ChildImageVersionId = ChildImageVersionId
@property
def ImageVersionName(self):
return self._ImageVersionName
@ImageVersionName.setter
def ImageVersionName(self, ImageVersionName):
self._ImageVersionName = ImageVersionName
@property
def StartStandbyCluster(self):
return self._StartStandbyCluster
@StartStandbyCluster.setter
def StartStandbyCluster(self, StartStandbyCluster):
self._StartStandbyCluster = StartStandbyCluster
@property
def ElasticSwitch(self):
return self._ElasticSwitch
@ElasticSwitch.setter
def ElasticSwitch(self, ElasticSwitch):
self._ElasticSwitch = ElasticSwitch
@property
def ElasticLimit(self):
return self._ElasticLimit
@ElasticLimit.setter
def ElasticLimit(self, ElasticLimit):
self._ElasticLimit = ElasticLimit
@property
def DefaultHouse(self):
return self._DefaultHouse
@DefaultHouse.setter
def DefaultHouse(self, DefaultHouse):
self._DefaultHouse = DefaultHouse
@property
def MaxConcurrency(self):
return self._MaxConcurrency
@MaxConcurrency.setter
def MaxConcurrency(self, MaxConcurrency):
self._MaxConcurrency = MaxConcurrency
@property
def TolerableQueueTime(self):
return self._TolerableQueueTime
@TolerableQueueTime.setter
def TolerableQueueTime(self, TolerableQueueTime):
self._TolerableQueueTime = TolerableQueueTime
@property
def UserAppId(self):
return self._UserAppId
@UserAppId.setter
def UserAppId(self, UserAppId):
self._UserAppId = UserAppId
@property
def UserUin(self):
return self._UserUin
@UserUin.setter
def UserUin(self, UserUin):
self._UserUin = UserUin
@property
def SessionResourceTemplate(self):
return self._SessionResourceTemplate
@SessionResourceTemplate.setter
def SessionResourceTemplate(self, SessionResourceTemplate):
self._SessionResourceTemplate = SessionResourceTemplate
def _deserialize(self, params):
self._DataEngineName = params.get("DataEngineName")
self._EngineType = params.get("EngineType")
self._ClusterType = params.get("ClusterType")
self._QuotaId = params.get("QuotaId")
self._State = params.get("State")
self._CreateTime = params.get("CreateTime")
self._UpdateTime = params.get("UpdateTime")
self._Size = params.get("Size")
self._Mode = params.get("Mode")
self._MinClusters = params.get("MinClusters")
self._MaxClusters = params.get("MaxClusters")
self._AutoResume = params.get("AutoResume")
self._SpendAfter = params.get("SpendAfter")
self._CidrBlock = params.get("CidrBlock")
self._DefaultDataEngine = params.get("DefaultDataEngine")
self._Message = params.get("Message")
self._DataEngineId = params.get("DataEngineId")
self._SubAccountUin = params.get("SubAccountUin")
self._ExpireTime = params.get("ExpireTime")
self._IsolatedTime = params.get("IsolatedTime")
self._ReversalTime = params.get("ReversalTime")
self._UserAlias = params.get("UserAlias")
if params.get("TagList") is not None:
self._TagList = []
for item in params.get("TagList"):
obj = TagInfo()
obj._deserialize(item)
self._TagList.append(obj)
self._Permissions = params.get("Permissions")
self._AutoSuspend = params.get("AutoSuspend")
self._CrontabResumeSuspend = params.get("CrontabResumeSuspend")
if params.get("CrontabResumeSuspendStrategy") is not None:
self._CrontabResumeSuspendStrategy = CrontabResumeSuspendStrategy()
self._CrontabResumeSuspendStrategy._deserialize(params.get("CrontabResumeSuspendStrategy"))
self._EngineExecType = params.get("EngineExecType")
self._RenewFlag = params.get("RenewFlag")
self._AutoSuspendTime = params.get("AutoSuspendTime")
if params.get("NetworkConnectionSet") is not None:
self._NetworkConnectionSet = []
for item in params.get("NetworkConnectionSet"):
obj = NetworkConnection()
obj._deserialize(item)
self._NetworkConnectionSet.append(obj)
self._UiURL = params.get("UiURL")
self._ResourceType = params.get("ResourceType")
self._ImageVersionId = params.get("ImageVersionId")
self._ChildImageVersionId = params.get("ChildImageVersionId")
self._ImageVersionName = params.get("ImageVersionName")
self._StartStandbyCluster = params.get("StartStandbyCluster")
self._ElasticSwitch = params.get("ElasticSwitch")
self._ElasticLimit = params.get("ElasticLimit")
self._DefaultHouse = params.get("DefaultHouse")
self._MaxConcurrency = params.get("MaxConcurrency")
self._TolerableQueueTime = params.get("TolerableQueueTime")
self._UserAppId = params.get("UserAppId")
self._UserUin = params.get("UserUin")
if params.get("SessionResourceTemplate") is not None:
self._SessionResourceTemplate = SessionResourceTemplate()
self._SessionResourceTemplate._deserialize(params.get("SessionResourceTemplate"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DataFormat(AbstractModel):
"""数据表数据格式。
"""
def __init__(self):
r"""
:param _TextFile: 文本格式,TextFile。
注意:此字段可能返回 null,表示取不到有效值。
:type TextFile: :class:`tencentcloud.dlc.v20210125.models.TextFile`
:param _CSV: 文本格式,CSV。
注意:此字段可能返回 null,表示取不到有效值。
:type CSV: :class:`tencentcloud.dlc.v20210125.models.CSV`
:param _Json: 文本格式,Json。
注意:此字段可能返回 null,表示取不到有效值。
:type Json: :class:`tencentcloud.dlc.v20210125.models.Other`
:param _Parquet: Parquet格式
注意:此字段可能返回 null,表示取不到有效值。
:type Parquet: :class:`tencentcloud.dlc.v20210125.models.Other`
:param _ORC: ORC格式
注意:此字段可能返回 null,表示取不到有效值。
:type ORC: :class:`tencentcloud.dlc.v20210125.models.Other`
:param _AVRO: AVRO格式
注意:此字段可能返回 null,表示取不到有效值。
:type AVRO: :class:`tencentcloud.dlc.v20210125.models.Other`
"""
self._TextFile = None
self._CSV = None
self._Json = None
self._Parquet = None
self._ORC = None
self._AVRO = None
@property
def TextFile(self):
return self._TextFile
@TextFile.setter
def TextFile(self, TextFile):
self._TextFile = TextFile
@property
def CSV(self):
return self._CSV
@CSV.setter
def CSV(self, CSV):
self._CSV = CSV
@property
def Json(self):
return self._Json
@Json.setter
def Json(self, Json):
self._Json = Json
@property
def Parquet(self):
return self._Parquet
@Parquet.setter
def Parquet(self, Parquet):
self._Parquet = Parquet
@property
def ORC(self):
return self._ORC
@ORC.setter
def ORC(self, ORC):
self._ORC = ORC
@property
def AVRO(self):
return self._AVRO
@AVRO.setter
def AVRO(self, AVRO):
self._AVRO = AVRO
def _deserialize(self, params):
if params.get("TextFile") is not None:
self._TextFile = TextFile()
self._TextFile._deserialize(params.get("TextFile"))
if params.get("CSV") is not None:
self._CSV = CSV()
self._CSV._deserialize(params.get("CSV"))
if params.get("Json") is not None:
self._Json = Other()
self._Json._deserialize(params.get("Json"))
if params.get("Parquet") is not None:
self._Parquet = Other()
self._Parquet._deserialize(params.get("Parquet"))
if params.get("ORC") is not None:
self._ORC = Other()
self._ORC._deserialize(params.get("ORC"))
if params.get("AVRO") is not None:
self._AVRO = Other()
self._AVRO._deserialize(params.get("AVRO"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DataGovernPolicy(AbstractModel):
"""数据治理规则
"""
def __init__(self):
r"""
:param _RuleType: 治理规则类型,Customize: 自定义;Intelligence: 智能治理
注意:此字段可能返回 null,表示取不到有效值。
:type RuleType: str
:param _GovernEngine: 治理引擎
注意:此字段可能返回 null,表示取不到有效值。
:type GovernEngine: str
"""
self._RuleType = None
self._GovernEngine = None
@property
def RuleType(self):
return self._RuleType
@RuleType.setter
def RuleType(self, RuleType):
self._RuleType = RuleType
@property
def GovernEngine(self):
return self._GovernEngine
@GovernEngine.setter
def GovernEngine(self, GovernEngine):
self._GovernEngine = GovernEngine
def _deserialize(self, params):
self._RuleType = params.get("RuleType")
self._GovernEngine = params.get("GovernEngine")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DatabaseInfo(AbstractModel):
"""数据库对象
"""
def __init__(self):
r"""
:param _DatabaseName: 数据库名称,长度0~128,支持数字、字母下划线,不允许数字大头,统一转换为小写。
:type DatabaseName: str
:param _Comment: 数据库描述信息,长度 0~500。
注意:此字段可能返回 null,表示取不到有效值。
:type Comment: str
:param _Properties: 数据库属性列表。
注意:此字段可能返回 null,表示取不到有效值。
:type Properties: list of Property
:param _Location: 数据库cos路径
注意:此字段可能返回 null,表示取不到有效值。
:type Location: str
"""
self._DatabaseName = None
self._Comment = None
self._Properties = None
self._Location = None
@property
def DatabaseName(self):
return self._DatabaseName
@DatabaseName.setter
def DatabaseName(self, DatabaseName):
self._DatabaseName = DatabaseName
@property
def Comment(self):
return self._Comment
@Comment.setter
def Comment(self, Comment):
self._Comment = Comment
@property
def Properties(self):
return self._Properties
@Properties.setter
def Properties(self, Properties):
self._Properties = Properties
@property
def Location(self):
return self._Location
@Location.setter
def Location(self, Location):
self._Location = Location
def _deserialize(self, params):
self._DatabaseName = params.get("DatabaseName")
self._Comment = params.get("Comment")
if params.get("Properties") is not None:
self._Properties = []
for item in params.get("Properties"):
obj = Property()
obj._deserialize(item)
self._Properties.append(obj)
self._Location = params.get("Location")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DatabaseResponseInfo(AbstractModel):
"""数据库对象
"""
def __init__(self):
r"""
:param _DatabaseName: 数据库名称。
:type DatabaseName: str
:param _Comment: 数据库描述信息,长度 0~256。
注意:此字段可能返回 null,表示取不到有效值。
:type Comment: str
:param _Properties: 允许针对数据库的属性元数据信息进行指定。
注意:此字段可能返回 null,表示取不到有效值。
:type Properties: list of Property
:param _CreateTime: 数据库创建时间戳,单位:s。
注意:此字段可能返回 null,表示取不到有效值。
:type CreateTime: str
:param _ModifiedTime: 数据库更新时间戳,单位:s。
注意:此字段可能返回 null,表示取不到有效值。
:type ModifiedTime: str
:param _Location: cos存储路径
注意:此字段可能返回 null,表示取不到有效值。
:type Location: str
:param _UserAlias: 建库用户昵称
注意:此字段可能返回 null,表示取不到有效值。
:type UserAlias: str
:param _UserSubUin: 建库用户ID
注意:此字段可能返回 null,表示取不到有效值。
:type UserSubUin: str
:param _GovernPolicy: 数据治理配置项
注意:此字段可能返回 null,表示取不到有效值。
:type GovernPolicy: :class:`tencentcloud.dlc.v20210125.models.DataGovernPolicy`
:param _DatabaseId: 数据库ID(无效字段)
注意:此字段可能返回 null,表示取不到有效值。
:type DatabaseId: str
"""
self._DatabaseName = None
self._Comment = None
self._Properties = None
self._CreateTime = None
self._ModifiedTime = None
self._Location = None
self._UserAlias = None
self._UserSubUin = None
self._GovernPolicy = None
self._DatabaseId = None
@property
def DatabaseName(self):
return self._DatabaseName
@DatabaseName.setter
def DatabaseName(self, DatabaseName):
self._DatabaseName = DatabaseName
@property
def Comment(self):
return self._Comment
@Comment.setter
def Comment(self, Comment):
self._Comment = Comment
@property
def Properties(self):
return self._Properties
@Properties.setter
def Properties(self, Properties):
self._Properties = Properties
@property
def CreateTime(self):
return self._CreateTime
@CreateTime.setter
def CreateTime(self, CreateTime):
self._CreateTime = CreateTime
@property
def ModifiedTime(self):
return self._ModifiedTime
@ModifiedTime.setter
def ModifiedTime(self, ModifiedTime):
self._ModifiedTime = ModifiedTime
@property
def Location(self):
return self._Location
@Location.setter
def Location(self, Location):
self._Location = Location
@property
def UserAlias(self):
return self._UserAlias
@UserAlias.setter
def UserAlias(self, UserAlias):
self._UserAlias = UserAlias
@property
def UserSubUin(self):
return self._UserSubUin
@UserSubUin.setter
def UserSubUin(self, UserSubUin):
self._UserSubUin = UserSubUin
@property
def GovernPolicy(self):
return self._GovernPolicy
@GovernPolicy.setter
def GovernPolicy(self, GovernPolicy):
self._GovernPolicy = GovernPolicy
@property
def DatabaseId(self):
return self._DatabaseId
@DatabaseId.setter
def DatabaseId(self, DatabaseId):
self._DatabaseId = DatabaseId
def _deserialize(self, params):
self._DatabaseName = params.get("DatabaseName")
self._Comment = params.get("Comment")
if params.get("Properties") is not None:
self._Properties = []
for item in params.get("Properties"):
obj = Property()
obj._deserialize(item)
self._Properties.append(obj)
self._CreateTime = params.get("CreateTime")
self._ModifiedTime = params.get("ModifiedTime")
self._Location = params.get("Location")
self._UserAlias = params.get("UserAlias")
self._UserSubUin = params.get("UserSubUin")
if params.get("GovernPolicy") is not None:
self._GovernPolicy = DataGovernPolicy()
self._GovernPolicy._deserialize(params.get("GovernPolicy"))
self._DatabaseId = params.get("DatabaseId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DeleteNotebookSessionRequest(AbstractModel):
"""DeleteNotebookSession请求参数结构体
"""
def __init__(self):
r"""
:param _SessionId: Session唯一标识
:type SessionId: str
"""
self._SessionId = None
@property
def SessionId(self):
return self._SessionId
@SessionId.setter
def SessionId(self, SessionId):
self._SessionId = SessionId
def _deserialize(self, params):
self._SessionId = params.get("SessionId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DeleteNotebookSessionResponse(AbstractModel):
"""DeleteNotebookSession返回参数结构体
"""
def __init__(self):
r"""
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._RequestId = None
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._RequestId = params.get("RequestId")
class DeleteScriptRequest(AbstractModel):
"""DeleteScript请求参数结构体
"""
def __init__(self):
r"""
:param _ScriptIds: 脚本id,其可以通过DescribeScripts接口提取
:type ScriptIds: list of str
"""
self._ScriptIds = None
@property
def ScriptIds(self):
return self._ScriptIds
@ScriptIds.setter
def ScriptIds(self, ScriptIds):
self._ScriptIds = ScriptIds
def _deserialize(self, params):
self._ScriptIds = params.get("ScriptIds")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DeleteScriptResponse(AbstractModel):
"""DeleteScript返回参数结构体
"""
def __init__(self):
r"""
:param _ScriptsAffected: 删除的脚本数量
:type ScriptsAffected: int
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._ScriptsAffected = None
self._RequestId = None
@property
def ScriptsAffected(self):
return self._ScriptsAffected
@ScriptsAffected.setter
def ScriptsAffected(self, ScriptsAffected):
self._ScriptsAffected = ScriptsAffected
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._ScriptsAffected = params.get("ScriptsAffected")
self._RequestId = params.get("RequestId")
class DeleteSparkAppRequest(AbstractModel):
"""DeleteSparkApp请求参数结构体
"""
def __init__(self):
r"""
:param _AppName: spark作业名
:type AppName: str
"""
self._AppName = None
@property
def AppName(self):
return self._AppName
@AppName.setter
def AppName(self, AppName):
self._AppName = AppName
def _deserialize(self, params):
self._AppName = params.get("AppName")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DeleteSparkAppResponse(AbstractModel):
"""DeleteSparkApp返回参数结构体
"""
def __init__(self):
r"""
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._RequestId = None
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._RequestId = params.get("RequestId")
class DeleteUserRequest(AbstractModel):
"""DeleteUser请求参数结构体
"""
def __init__(self):
r"""
:param _UserIds: 需要删除的用户的Id
:type UserIds: list of str
"""
self._UserIds = None
@property
def UserIds(self):
return self._UserIds
@UserIds.setter
def UserIds(self, UserIds):
self._UserIds = UserIds
def _deserialize(self, params):
self._UserIds = params.get("UserIds")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DeleteUserResponse(AbstractModel):
"""DeleteUser返回参数结构体
"""
def __init__(self):
r"""
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._RequestId = None
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._RequestId = params.get("RequestId")
class DeleteUsersFromWorkGroupRequest(AbstractModel):
"""DeleteUsersFromWorkGroup请求参数结构体
"""
def __init__(self):
r"""
:param _AddInfo: 要删除的用户信息
:type AddInfo: :class:`tencentcloud.dlc.v20210125.models.UserIdSetOfWorkGroupId`
"""
self._AddInfo = None
@property
def AddInfo(self):
return self._AddInfo
@AddInfo.setter
def AddInfo(self, AddInfo):
self._AddInfo = AddInfo
def _deserialize(self, params):
if params.get("AddInfo") is not None:
self._AddInfo = UserIdSetOfWorkGroupId()
self._AddInfo._deserialize(params.get("AddInfo"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DeleteUsersFromWorkGroupResponse(AbstractModel):
"""DeleteUsersFromWorkGroup返回参数结构体
"""
def __init__(self):
r"""
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._RequestId = None
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._RequestId = params.get("RequestId")
class DeleteWorkGroupRequest(AbstractModel):
"""DeleteWorkGroup请求参数结构体
"""
def __init__(self):
r"""
:param _WorkGroupIds: 要删除的工作组Id集合
:type WorkGroupIds: list of int
"""
self._WorkGroupIds = None
@property
def WorkGroupIds(self):
return self._WorkGroupIds
@WorkGroupIds.setter
def WorkGroupIds(self, WorkGroupIds):
self._WorkGroupIds = WorkGroupIds
def _deserialize(self, params):
self._WorkGroupIds = params.get("WorkGroupIds")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DeleteWorkGroupResponse(AbstractModel):
"""DeleteWorkGroup返回参数结构体
"""
def __init__(self):
r"""
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._RequestId = None
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._RequestId = params.get("RequestId")
class DescribeDMSDatabaseRequest(AbstractModel):
"""DescribeDMSDatabase请求参数结构体
"""
def __init__(self):
r"""
:param _Name: 数据库名称
:type Name: str
:param _SchemaName: schema名称
:type SchemaName: str
:param _Pattern: 匹配规则
:type Pattern: str
"""
self._Name = None
self._SchemaName = None
self._Pattern = None
@property
def Name(self):
return self._Name
@Name.setter
def Name(self, Name):
self._Name = Name
@property
def SchemaName(self):
return self._SchemaName
@SchemaName.setter
def SchemaName(self, SchemaName):
self._SchemaName = SchemaName
@property
def Pattern(self):
return self._Pattern
@Pattern.setter
def Pattern(self, Pattern):
self._Pattern = Pattern
def _deserialize(self, params):
self._Name = params.get("Name")
self._SchemaName = params.get("SchemaName")
self._Pattern = params.get("Pattern")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeDMSDatabaseResponse(AbstractModel):
"""DescribeDMSDatabase返回参数结构体
"""
def __init__(self):
r"""
:param _Name: 数据库名称
注意:此字段可能返回 null,表示取不到有效值。
:type Name: str
:param _SchemaName: schema名称
注意:此字段可能返回 null,表示取不到有效值。
:type SchemaName: str
:param _Location: 存储地址
注意:此字段可能返回 null,表示取不到有效值。
:type Location: str
:param _Asset: 数据对象
注意:此字段可能返回 null,表示取不到有效值。
:type Asset: :class:`tencentcloud.dlc.v20210125.models.Asset`
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._Name = None
self._SchemaName = None
self._Location = None
self._Asset = None
self._RequestId = None
@property
def Name(self):
return self._Name
@Name.setter
def Name(self, Name):
self._Name = Name
@property
def SchemaName(self):
return self._SchemaName
@SchemaName.setter
def SchemaName(self, SchemaName):
self._SchemaName = SchemaName
@property
def Location(self):
return self._Location
@Location.setter
def Location(self, Location):
self._Location = Location
@property
def Asset(self):
return self._Asset
@Asset.setter
def Asset(self, Asset):
self._Asset = Asset
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._Name = params.get("Name")
self._SchemaName = params.get("SchemaName")
self._Location = params.get("Location")
if params.get("Asset") is not None:
self._Asset = Asset()
self._Asset._deserialize(params.get("Asset"))
self._RequestId = params.get("RequestId")
class DescribeDMSPartitionsRequest(AbstractModel):
"""DescribeDMSPartitions请求参数结构体
"""
def __init__(self):
r"""
:param _DatabaseName: 数据库名
:type DatabaseName: str
:param _TableName: 表名称
:type TableName: str
:param _SchemaName: schema名称
:type SchemaName: str
:param _Name: 名称
:type Name: str
:param _Values: 单个分区名称,精准匹配
:type Values: list of str
:param _PartitionNames: 多个分区名称,精准匹配
:type PartitionNames: list of str
:param _PartValues: 多个分区字段的匹配,模糊匹配
:type PartValues: list of str
:param _Filter: 过滤SQL
:type Filter: str
:param _MaxParts: 最大分区数量
:type MaxParts: int
:param _Offset: 翻页跳过数量
:type Offset: int
:param _Limit: 页面数量
:type Limit: int
:param _Expression: 表达式
:type Expression: str
"""
self._DatabaseName = None
self._TableName = None
self._SchemaName = None
self._Name = None
self._Values = None
self._PartitionNames = None
self._PartValues = None
self._Filter = None
self._MaxParts = None
self._Offset = None
self._Limit = None
self._Expression = None
@property
def DatabaseName(self):
return self._DatabaseName
@DatabaseName.setter
def DatabaseName(self, DatabaseName):
self._DatabaseName = DatabaseName
@property
def TableName(self):
return self._TableName
@TableName.setter
def TableName(self, TableName):
self._TableName = TableName
@property
def SchemaName(self):
return self._SchemaName
@SchemaName.setter
def SchemaName(self, SchemaName):
self._SchemaName = SchemaName
@property
def Name(self):
return self._Name
@Name.setter
def Name(self, Name):
self._Name = Name
@property
def Values(self):
return self._Values
@Values.setter
def Values(self, Values):
self._Values = Values
@property
def PartitionNames(self):
return self._PartitionNames
@PartitionNames.setter
def PartitionNames(self, PartitionNames):
self._PartitionNames = PartitionNames
@property
def PartValues(self):
return self._PartValues
@PartValues.setter
def PartValues(self, PartValues):
self._PartValues = PartValues
@property
def Filter(self):
return self._Filter
@Filter.setter
def Filter(self, Filter):
self._Filter = Filter
@property
def MaxParts(self):
return self._MaxParts
@MaxParts.setter
def MaxParts(self, MaxParts):
self._MaxParts = MaxParts
@property
def Offset(self):
return self._Offset
@Offset.setter
def Offset(self, Offset):
self._Offset = Offset
@property
def Limit(self):
return self._Limit
@Limit.setter
def Limit(self, Limit):
self._Limit = Limit
@property
def Expression(self):
return self._Expression
@Expression.setter
def Expression(self, Expression):
self._Expression = Expression
def _deserialize(self, params):
self._DatabaseName = params.get("DatabaseName")
self._TableName = params.get("TableName")
self._SchemaName = params.get("SchemaName")
self._Name = params.get("Name")
self._Values = params.get("Values")
self._PartitionNames = params.get("PartitionNames")
self._PartValues = params.get("PartValues")
self._Filter = params.get("Filter")
self._MaxParts = params.get("MaxParts")
self._Offset = params.get("Offset")
self._Limit = params.get("Limit")
self._Expression = params.get("Expression")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeDMSPartitionsResponse(AbstractModel):
"""DescribeDMSPartitions返回参数结构体
"""
def __init__(self):
r"""
:param _Partitions: 分区信息
:type Partitions: list of DMSPartition
:param _Total: 总数
:type Total: int
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._Partitions = None
self._Total = None
self._RequestId = None
@property
def Partitions(self):
return self._Partitions
@Partitions.setter
def Partitions(self, Partitions):
self._Partitions = Partitions
@property
def Total(self):
return self._Total
@Total.setter
def Total(self, Total):
self._Total = Total
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
if params.get("Partitions") is not None:
self._Partitions = []
for item in params.get("Partitions"):
obj = DMSPartition()
obj._deserialize(item)
self._Partitions.append(obj)
self._Total = params.get("Total")
self._RequestId = params.get("RequestId")
class DescribeDMSTableRequest(AbstractModel):
"""DescribeDMSTable请求参数结构体
"""
def __init__(self):
r"""
:param _DbName: 数据库名称
:type DbName: str
:param _SchemaName: 数据库schema名称
:type SchemaName: str
:param _Name: 表名称
:type Name: str
:param _Catalog: 数据目录
:type Catalog: str
:param _Keyword: 查询关键词
:type Keyword: str
:param _Pattern: 查询模式
:type Pattern: str
:param _Type: 表类型
:type Type: str
"""
self._DbName = None
self._SchemaName = None
self._Name = None
self._Catalog = None
self._Keyword = None
self._Pattern = None
self._Type = None
@property
def DbName(self):
return self._DbName
@DbName.setter
def DbName(self, DbName):
self._DbName = DbName
@property
def SchemaName(self):
return self._SchemaName
@SchemaName.setter
def SchemaName(self, SchemaName):
self._SchemaName = SchemaName
@property
def Name(self):
return self._Name
@Name.setter
def Name(self, Name):
self._Name = Name
@property
def Catalog(self):
return self._Catalog
@Catalog.setter
def Catalog(self, Catalog):
self._Catalog = Catalog
@property
def Keyword(self):
return self._Keyword
@Keyword.setter
def Keyword(self, Keyword):
self._Keyword = Keyword
@property
def Pattern(self):
return self._Pattern
@Pattern.setter
def Pattern(self, Pattern):
self._Pattern = Pattern
@property
def Type(self):
return self._Type
@Type.setter
def Type(self, Type):
self._Type = Type
def _deserialize(self, params):
self._DbName = params.get("DbName")
self._SchemaName = params.get("SchemaName")
self._Name = params.get("Name")
self._Catalog = params.get("Catalog")
self._Keyword = params.get("Keyword")
self._Pattern = params.get("Pattern")
self._Type = params.get("Type")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeDMSTableResponse(AbstractModel):
"""DescribeDMSTable返回参数结构体
"""
def __init__(self):
r"""
:param _Asset: 基础对象
注意:此字段可能返回 null,表示取不到有效值。
:type Asset: :class:`tencentcloud.dlc.v20210125.models.Asset`
:param _ViewOriginalText: 视图文本
注意:此字段可能返回 null,表示取不到有效值。
:type ViewOriginalText: str
:param _ViewExpandedText: 视图文本
注意:此字段可能返回 null,表示取不到有效值。
:type ViewExpandedText: str
:param _Retention: hive维护版本
注意:此字段可能返回 null,表示取不到有效值。
:type Retention: int
:param _Sds: 存储对象
注意:此字段可能返回 null,表示取不到有效值。
:type Sds: :class:`tencentcloud.dlc.v20210125.models.DMSSds`
:param _PartitionKeys: 分区列
注意:此字段可能返回 null,表示取不到有效值。
:type PartitionKeys: list of DMSColumn
:param _Partitions: 分区
注意:此字段可能返回 null,表示取不到有效值。
:type Partitions: list of DMSPartition
:param _Type: 表类型
注意:此字段可能返回 null,表示取不到有效值。
:type Type: str
:param _DbName: 数据库名称
注意:此字段可能返回 null,表示取不到有效值。
:type DbName: str
:param _SchemaName: Schame名称
注意:此字段可能返回 null,表示取不到有效值。
:type SchemaName: str
:param _StorageSize: 存储大小
注意:此字段可能返回 null,表示取不到有效值。
:type StorageSize: int
:param _RecordCount: 记录数量
注意:此字段可能返回 null,表示取不到有效值。
:type RecordCount: int
:param _LifeTime: 生命周期
注意:此字段可能返回 null,表示取不到有效值。
:type LifeTime: int
:param _LastAccessTime: 最后访问时间
注意:此字段可能返回 null,表示取不到有效值。
:type LastAccessTime: str
:param _DataUpdateTime: 数据更新时间
注意:此字段可能返回 null,表示取不到有效值。
:type DataUpdateTime: str
:param _StructUpdateTime: 结构更新时间
注意:此字段可能返回 null,表示取不到有效值。
:type StructUpdateTime: str
:param _Columns: 列
注意:此字段可能返回 null,表示取不到有效值。
:type Columns: list of DMSColumn
:param _Name: 表名称
注意:此字段可能返回 null,表示取不到有效值。
:type Name: str
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._Asset = None
self._ViewOriginalText = None
self._ViewExpandedText = None
self._Retention = None
self._Sds = None
self._PartitionKeys = None
self._Partitions = None
self._Type = None
self._DbName = None
self._SchemaName = None
self._StorageSize = None
self._RecordCount = None
self._LifeTime = None
self._LastAccessTime = None
self._DataUpdateTime = None
self._StructUpdateTime = None
self._Columns = None
self._Name = None
self._RequestId = None
@property
def Asset(self):
return self._Asset
@Asset.setter
def Asset(self, Asset):
self._Asset = Asset
@property
def ViewOriginalText(self):
return self._ViewOriginalText
@ViewOriginalText.setter
def ViewOriginalText(self, ViewOriginalText):
self._ViewOriginalText = ViewOriginalText
@property
def ViewExpandedText(self):
return self._ViewExpandedText
@ViewExpandedText.setter
def ViewExpandedText(self, ViewExpandedText):
self._ViewExpandedText = ViewExpandedText
@property
def Retention(self):
return self._Retention
@Retention.setter
def Retention(self, Retention):
self._Retention = Retention
@property
def Sds(self):
return self._Sds
@Sds.setter
def Sds(self, Sds):
self._Sds = Sds
@property
def PartitionKeys(self):
return self._PartitionKeys
@PartitionKeys.setter
def PartitionKeys(self, PartitionKeys):
self._PartitionKeys = PartitionKeys
@property
def Partitions(self):
return self._Partitions
@Partitions.setter
def Partitions(self, Partitions):
self._Partitions = Partitions
@property
def Type(self):
return self._Type
@Type.setter
def Type(self, Type):
self._Type = Type
@property
def DbName(self):
return self._DbName
@DbName.setter
def DbName(self, DbName):
self._DbName = DbName
@property
def SchemaName(self):
return self._SchemaName
@SchemaName.setter
def SchemaName(self, SchemaName):
self._SchemaName = SchemaName
@property
def StorageSize(self):
return self._StorageSize
@StorageSize.setter
def StorageSize(self, StorageSize):
self._StorageSize = StorageSize
@property
def RecordCount(self):
return self._RecordCount
@RecordCount.setter
def RecordCount(self, RecordCount):
self._RecordCount = RecordCount
@property
def LifeTime(self):
return self._LifeTime
@LifeTime.setter
def LifeTime(self, LifeTime):
self._LifeTime = LifeTime
@property
def LastAccessTime(self):
return self._LastAccessTime
@LastAccessTime.setter
def LastAccessTime(self, LastAccessTime):
self._LastAccessTime = LastAccessTime
@property
def DataUpdateTime(self):
return self._DataUpdateTime
@DataUpdateTime.setter
def DataUpdateTime(self, DataUpdateTime):
self._DataUpdateTime = DataUpdateTime
@property
def StructUpdateTime(self):
return self._StructUpdateTime
@StructUpdateTime.setter
def StructUpdateTime(self, StructUpdateTime):
self._StructUpdateTime = StructUpdateTime
@property
def Columns(self):
return self._Columns
@Columns.setter
def Columns(self, Columns):
self._Columns = Columns
@property
def Name(self):
return self._Name
@Name.setter
def Name(self, Name):
self._Name = Name
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
if params.get("Asset") is not None:
self._Asset = Asset()
self._Asset._deserialize(params.get("Asset"))
self._ViewOriginalText = params.get("ViewOriginalText")
self._ViewExpandedText = params.get("ViewExpandedText")
self._Retention = params.get("Retention")
if params.get("Sds") is not None:
self._Sds = DMSSds()
self._Sds._deserialize(params.get("Sds"))
if params.get("PartitionKeys") is not None:
self._PartitionKeys = []
for item in params.get("PartitionKeys"):
obj = DMSColumn()
obj._deserialize(item)
self._PartitionKeys.append(obj)
if params.get("Partitions") is not None:
self._Partitions = []
for item in params.get("Partitions"):
obj = DMSPartition()
obj._deserialize(item)
self._Partitions.append(obj)
self._Type = params.get("Type")
self._DbName = params.get("DbName")
self._SchemaName = params.get("SchemaName")
self._StorageSize = params.get("StorageSize")
self._RecordCount = params.get("RecordCount")
self._LifeTime = params.get("LifeTime")
self._LastAccessTime = params.get("LastAccessTime")
self._DataUpdateTime = params.get("DataUpdateTime")
self._StructUpdateTime = params.get("StructUpdateTime")
if params.get("Columns") is not None:
self._Columns = []
for item in params.get("Columns"):
obj = DMSColumn()
obj._deserialize(item)
self._Columns.append(obj)
self._Name = params.get("Name")
self._RequestId = params.get("RequestId")
class DescribeDMSTablesRequest(AbstractModel):
"""DescribeDMSTables请求参数结构体
"""
def __init__(self):
r"""
:param _DbName: 数据库名称
:type DbName: str
:param _SchemaName: 数据库schema名称
:type SchemaName: str
:param _Name: 表名称
:type Name: str
:param _Catalog: 数据目录
:type Catalog: str
:param _Keyword: 查询关键词
:type Keyword: str
:param _Pattern: 查询模式
:type Pattern: str
:param _Type: 表类型
:type Type: str
:param _StartTime: 筛选参数:更新开始时间
:type StartTime: str
:param _EndTime: 筛选参数:更新结束时间
:type EndTime: str
:param _Limit: 分页参数
:type Limit: int
:param _Offset: 分页参数
:type Offset: int
:param _Sort: 排序字段:create_time:创建时间
:type Sort: str
:param _Asc: 排序字段:true:升序(默认),false:降序
:type Asc: bool
"""
self._DbName = None
self._SchemaName = None
self._Name = None
self._Catalog = None
self._Keyword = None
self._Pattern = None
self._Type = None
self._StartTime = None
self._EndTime = None
self._Limit = None
self._Offset = None
self._Sort = None
self._Asc = None
@property
def DbName(self):
return self._DbName
@DbName.setter
def DbName(self, DbName):
self._DbName = DbName
@property
def SchemaName(self):
return self._SchemaName
@SchemaName.setter
def SchemaName(self, SchemaName):
self._SchemaName = SchemaName
@property
def Name(self):
return self._Name
@Name.setter
def Name(self, Name):
self._Name = Name
@property
def Catalog(self):
return self._Catalog
@Catalog.setter
def Catalog(self, Catalog):
self._Catalog = Catalog
@property
def Keyword(self):
return self._Keyword
@Keyword.setter
def Keyword(self, Keyword):
self._Keyword = Keyword
@property
def Pattern(self):
return self._Pattern
@Pattern.setter
def Pattern(self, Pattern):
self._Pattern = Pattern
@property
def Type(self):
return self._Type
@Type.setter
def Type(self, Type):
self._Type = Type
@property
def StartTime(self):
return self._StartTime
@StartTime.setter
def StartTime(self, StartTime):
self._StartTime = StartTime
@property
def EndTime(self):
return self._EndTime
@EndTime.setter
def EndTime(self, EndTime):
self._EndTime = EndTime
@property
def Limit(self):
return self._Limit
@Limit.setter
def Limit(self, Limit):
self._Limit = Limit
@property
def Offset(self):
return self._Offset
@Offset.setter
def Offset(self, Offset):
self._Offset = Offset
@property
def Sort(self):
return self._Sort
@Sort.setter
def Sort(self, Sort):
self._Sort = Sort
@property
def Asc(self):
return self._Asc
@Asc.setter
def Asc(self, Asc):
self._Asc = Asc
def _deserialize(self, params):
self._DbName = params.get("DbName")
self._SchemaName = params.get("SchemaName")
self._Name = params.get("Name")
self._Catalog = params.get("Catalog")
self._Keyword = params.get("Keyword")
self._Pattern = params.get("Pattern")
self._Type = params.get("Type")
self._StartTime = params.get("StartTime")
self._EndTime = params.get("EndTime")
self._Limit = params.get("Limit")
self._Offset = params.get("Offset")
self._Sort = params.get("Sort")
self._Asc = params.get("Asc")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeDMSTablesResponse(AbstractModel):
"""DescribeDMSTables返回参数结构体
"""
def __init__(self):
r"""
:param _TableList: DMS元数据列表信息
注意:此字段可能返回 null,表示取不到有效值。
:type TableList: list of DMSTableInfo
:param _TotalCount: 统计值
注意:此字段可能返回 null,表示取不到有效值。
:type TotalCount: int
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._TableList = None
self._TotalCount = None
self._RequestId = None
@property
def TableList(self):
return self._TableList
@TableList.setter
def TableList(self, TableList):
self._TableList = TableList
@property
def TotalCount(self):
return self._TotalCount
@TotalCount.setter
def TotalCount(self, TotalCount):
self._TotalCount = TotalCount
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
if params.get("TableList") is not None:
self._TableList = []
for item in params.get("TableList"):
obj = DMSTableInfo()
obj._deserialize(item)
self._TableList.append(obj)
self._TotalCount = params.get("TotalCount")
self._RequestId = params.get("RequestId")
class DescribeDataEnginesRequest(AbstractModel):
"""DescribeDataEngines请求参数结构体
"""
def __init__(self):
r"""
:param _Offset: 偏移量,默认为0。
:type Offset: int
:param _Filters: 过滤类型,支持如下的过滤类型,传参Name应为以下其中一个, data-engine-name - String(数据引擎名称):engine-type - String(引擎类型:spark:spark 引擎,presto:presto引擎),state - String (数据引擎状态 -2已删除 -1失败 0初始化中 1挂起 2运行中 3准备删除 4删除中) , mode - String(计费模式 0共享模式 1按量计费 2包年包月) , create-time - String(创建时间,10位时间戳) message - String (描述信息),cluster-type - String (集群资源类型 spark_private/presto_private/presto_cu/spark_cu),engine-id - String(数据引擎ID),key-word - String(数据引擎名称或集群资源类型或描述信息模糊搜索),engine-exec-type - String(引擎执行任务类型,SQL/BATCH)
:type Filters: list of Filter
:param _SortBy: 排序字段,支持如下字段类型,create-time
:type SortBy: str
:param _Sorting: 排序方式,desc表示正序,asc表示反序, 默认为asc。
:type Sorting: str
:param _Limit: 返回数量,默认为10,最大值为100。
:type Limit: int
:param _DatasourceConnectionName: 已废弃,请使用DatasourceConnectionNameSet
:type DatasourceConnectionName: str
:param _ExcludePublicEngine: 是否不返回共享引擎,true不返回共享引擎,false可以返回共享引擎
:type ExcludePublicEngine: bool
:param _AccessTypes: 参数应该为引擎权限类型,有效类型:"USE", "MODIFY", "OPERATE", "MONITOR", "DELETE"
:type AccessTypes: list of str
:param _EngineExecType: 引擎执行任务类型,有效值:SQL/BATCH,默认为SQL
:type EngineExecType: str
:param _EngineType: 引擎类型,有效值:spark/presto
:type EngineType: str
:param _DatasourceConnectionNameSet: 网络配置列表,若传入该参数,则返回网络配置关联的计算引擎
:type DatasourceConnectionNameSet: list of str
"""
self._Offset = None
self._Filters = None
self._SortBy = None
self._Sorting = None
self._Limit = None
self._DatasourceConnectionName = None
self._ExcludePublicEngine = None
self._AccessTypes = None
self._EngineExecType = None
self._EngineType = None
self._DatasourceConnectionNameSet = None
@property
def Offset(self):
return self._Offset
@Offset.setter
def Offset(self, Offset):
self._Offset = Offset
@property
def Filters(self):
return self._Filters
@Filters.setter
def Filters(self, Filters):
self._Filters = Filters
@property
def SortBy(self):
return self._SortBy
@SortBy.setter
def SortBy(self, SortBy):
self._SortBy = SortBy
@property
def Sorting(self):
return self._Sorting
@Sorting.setter
def Sorting(self, Sorting):
self._Sorting = Sorting
@property
def Limit(self):
return self._Limit
@Limit.setter
def Limit(self, Limit):
self._Limit = Limit
@property
def DatasourceConnectionName(self):
return self._DatasourceConnectionName
@DatasourceConnectionName.setter
def DatasourceConnectionName(self, DatasourceConnectionName):
self._DatasourceConnectionName = DatasourceConnectionName
@property
def ExcludePublicEngine(self):
return self._ExcludePublicEngine
@ExcludePublicEngine.setter
def ExcludePublicEngine(self, ExcludePublicEngine):
self._ExcludePublicEngine = ExcludePublicEngine
@property
def AccessTypes(self):
return self._AccessTypes
@AccessTypes.setter
def AccessTypes(self, AccessTypes):
self._AccessTypes = AccessTypes
@property
def EngineExecType(self):
return self._EngineExecType
@EngineExecType.setter
def EngineExecType(self, EngineExecType):
self._EngineExecType = EngineExecType
@property
def EngineType(self):
return self._EngineType
@EngineType.setter
def EngineType(self, EngineType):
self._EngineType = EngineType
@property
def DatasourceConnectionNameSet(self):
return self._DatasourceConnectionNameSet
@DatasourceConnectionNameSet.setter
def DatasourceConnectionNameSet(self, DatasourceConnectionNameSet):
self._DatasourceConnectionNameSet = DatasourceConnectionNameSet
def _deserialize(self, params):
self._Offset = params.get("Offset")
if params.get("Filters") is not None:
self._Filters = []
for item in params.get("Filters"):
obj = Filter()
obj._deserialize(item)
self._Filters.append(obj)
self._SortBy = params.get("SortBy")
self._Sorting = params.get("Sorting")
self._Limit = params.get("Limit")
self._DatasourceConnectionName = params.get("DatasourceConnectionName")
self._ExcludePublicEngine = params.get("ExcludePublicEngine")
self._AccessTypes = params.get("AccessTypes")
self._EngineExecType = params.get("EngineExecType")
self._EngineType = params.get("EngineType")
self._DatasourceConnectionNameSet = params.get("DatasourceConnectionNameSet")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeDataEnginesResponse(AbstractModel):
"""DescribeDataEngines返回参数结构体
"""
def __init__(self):
r"""
:param _DataEngines: 数据引擎列表
注意:此字段可能返回 null,表示取不到有效值。
:type DataEngines: list of DataEngineInfo
:param _TotalCount: 总条数
注意:此字段可能返回 null,表示取不到有效值。
:type TotalCount: int
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._DataEngines = None
self._TotalCount = None
self._RequestId = None
@property
def DataEngines(self):
return self._DataEngines
@DataEngines.setter
def DataEngines(self, DataEngines):
self._DataEngines = DataEngines
@property
def TotalCount(self):
return self._TotalCount
@TotalCount.setter
def TotalCount(self, TotalCount):
self._TotalCount = TotalCount
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
if params.get("DataEngines") is not None:
self._DataEngines = []
for item in params.get("DataEngines"):
obj = DataEngineInfo()
obj._deserialize(item)
self._DataEngines.append(obj)
self._TotalCount = params.get("TotalCount")
self._RequestId = params.get("RequestId")
class DescribeDatabasesRequest(AbstractModel):
"""DescribeDatabases请求参数结构体
"""
def __init__(self):
r"""
:param _Limit: 返回数量,默认为10,最大值为100。
:type Limit: int
:param _Offset: 数据偏移量,从0开始,默认为0。
:type Offset: int
:param _KeyWord: 模糊匹配,库名关键字。
:type KeyWord: str
:param _DatasourceConnectionName: 数据源唯名称,该名称可以通过DescribeDatasourceConnection接口查询到。默认为DataLakeCatalog
:type DatasourceConnectionName: str
:param _Sort: 排序字段,CreateTime:创建时间,Name:数据库名称
:type Sort: str
:param _Asc: 排序类型:false:降序(默认)、true:升序
:type Asc: bool
"""
self._Limit = None
self._Offset = None
self._KeyWord = None
self._DatasourceConnectionName = None
self._Sort = None
self._Asc = None
@property
def Limit(self):
return self._Limit
@Limit.setter
def Limit(self, Limit):
self._Limit = Limit
@property
def Offset(self):
return self._Offset
@Offset.setter
def Offset(self, Offset):
self._Offset = Offset
@property
def KeyWord(self):
return self._KeyWord
@KeyWord.setter
def KeyWord(self, KeyWord):
self._KeyWord = KeyWord
@property
def DatasourceConnectionName(self):
return self._DatasourceConnectionName
@DatasourceConnectionName.setter
def DatasourceConnectionName(self, DatasourceConnectionName):
self._DatasourceConnectionName = DatasourceConnectionName
@property
def Sort(self):
return self._Sort
@Sort.setter
def Sort(self, Sort):
self._Sort = Sort
@property
def Asc(self):
return self._Asc
@Asc.setter
def Asc(self, Asc):
self._Asc = Asc
def _deserialize(self, params):
self._Limit = params.get("Limit")
self._Offset = params.get("Offset")
self._KeyWord = params.get("KeyWord")
self._DatasourceConnectionName = params.get("DatasourceConnectionName")
self._Sort = params.get("Sort")
self._Asc = params.get("Asc")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeDatabasesResponse(AbstractModel):
"""DescribeDatabases返回参数结构体
"""
def __init__(self):
r"""
:param _DatabaseList: 数据库对象列表。
:type DatabaseList: list of DatabaseResponseInfo
:param _TotalCount: 实例总数。
:type TotalCount: int
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._DatabaseList = None
self._TotalCount = None
self._RequestId = None
@property
def DatabaseList(self):
return self._DatabaseList
@DatabaseList.setter
def DatabaseList(self, DatabaseList):
self._DatabaseList = DatabaseList
@property
def TotalCount(self):
return self._TotalCount
@TotalCount.setter
def TotalCount(self, TotalCount):
self._TotalCount = TotalCount
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
if params.get("DatabaseList") is not None:
self._DatabaseList = []
for item in params.get("DatabaseList"):
obj = DatabaseResponseInfo()
obj._deserialize(item)
self._DatabaseList.append(obj)
self._TotalCount = params.get("TotalCount")
self._RequestId = params.get("RequestId")
class DescribeEngineUsageInfoRequest(AbstractModel):
"""DescribeEngineUsageInfo请求参数结构体
"""
def __init__(self):
r"""
:param _DataEngineId: 数据引擎ID
:type DataEngineId: str
"""
self._DataEngineId = None
@property
def DataEngineId(self):
return self._DataEngineId
@DataEngineId.setter
def DataEngineId(self, DataEngineId):
self._DataEngineId = DataEngineId
def _deserialize(self, params):
self._DataEngineId = params.get("DataEngineId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeEngineUsageInfoResponse(AbstractModel):
"""DescribeEngineUsageInfo返回参数结构体
"""
def __init__(self):
r"""
:param _Total: 集群总规格
:type Total: int
:param _Used: 已占用集群规格
:type Used: int
:param _Available: 剩余集群规格
:type Available: int
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._Total = None
self._Used = None
self._Available = None
self._RequestId = None
@property
def Total(self):
return self._Total
@Total.setter
def Total(self, Total):
self._Total = Total
@property
def Used(self):
return self._Used
@Used.setter
def Used(self, Used):
self._Used = Used
@property
def Available(self):
return self._Available
@Available.setter
def Available(self, Available):
self._Available = Available
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._Total = params.get("Total")
self._Used = params.get("Used")
self._Available = params.get("Available")
self._RequestId = params.get("RequestId")
class DescribeForbiddenTableProRequest(AbstractModel):
"""DescribeForbiddenTablePro请求参数结构体
"""
class DescribeForbiddenTableProResponse(AbstractModel):
"""DescribeForbiddenTablePro返回参数结构体
"""
def __init__(self):
r"""
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._RequestId = None
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._RequestId = params.get("RequestId")
class DescribeLakeFsDirSummaryRequest(AbstractModel):
"""DescribeLakeFsDirSummary请求参数结构体
"""
class DescribeLakeFsDirSummaryResponse(AbstractModel):
"""DescribeLakeFsDirSummary返回参数结构体
"""
def __init__(self):
r"""
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._RequestId = None
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._RequestId = params.get("RequestId")
class DescribeLakeFsInfoRequest(AbstractModel):
"""DescribeLakeFsInfo请求参数结构体
"""
class DescribeLakeFsInfoResponse(AbstractModel):
"""DescribeLakeFsInfo返回参数结构体
"""
def __init__(self):
r"""
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._RequestId = None
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._RequestId = params.get("RequestId")
class DescribeNotebookSessionLogRequest(AbstractModel):
"""DescribeNotebookSessionLog请求参数结构体
"""
def __init__(self):
r"""
:param _SessionId: Session唯一标识
:type SessionId: str
:param _Limit: 分页参数,默认200
:type Limit: int
:param _Offset: 分页参数,默认0
:type Offset: int
"""
self._SessionId = None
self._Limit = None
self._Offset = None
@property
def SessionId(self):
return self._SessionId
@SessionId.setter
def SessionId(self, SessionId):
self._SessionId = SessionId
@property
def Limit(self):
return self._Limit
@Limit.setter
def Limit(self, Limit):
self._Limit = Limit
@property
def Offset(self):
return self._Offset
@Offset.setter
def Offset(self, Offset):
self._Offset = Offset
def _deserialize(self, params):
self._SessionId = params.get("SessionId")
self._Limit = params.get("Limit")
self._Offset = params.get("Offset")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeNotebookSessionLogResponse(AbstractModel):
"""DescribeNotebookSessionLog返回参数结构体
"""
def __init__(self):
r"""
:param _Logs: 日志信息,默认获取最新的200条
:type Logs: list of str
:param _Limit: 分页参数,默认200
:type Limit: int
:param _Offset: 分页参数,默认0
:type Offset: int
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._Logs = None
self._Limit = None
self._Offset = None
self._RequestId = None
@property
def Logs(self):
return self._Logs
@Logs.setter
def Logs(self, Logs):
self._Logs = Logs
@property
def Limit(self):
return self._Limit
@Limit.setter
def Limit(self, Limit):
self._Limit = Limit
@property
def Offset(self):
return self._Offset
@Offset.setter
def Offset(self, Offset):
self._Offset = Offset
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._Logs = params.get("Logs")
self._Limit = params.get("Limit")
self._Offset = params.get("Offset")
self._RequestId = params.get("RequestId")
class DescribeNotebookSessionRequest(AbstractModel):
"""DescribeNotebookSession请求参数结构体
"""
def __init__(self):
r"""
:param _SessionId: Session唯一标识
:type SessionId: str
"""
self._SessionId = None
@property
def SessionId(self):
return self._SessionId
@SessionId.setter
def SessionId(self, SessionId):
self._SessionId = SessionId
def _deserialize(self, params):
self._SessionId = params.get("SessionId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeNotebookSessionResponse(AbstractModel):
"""DescribeNotebookSession返回参数结构体
"""
def __init__(self):
r"""
:param _Session: Session详情信息
:type Session: :class:`tencentcloud.dlc.v20210125.models.NotebookSessionInfo`
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._Session = None
self._RequestId = None
@property
def Session(self):
return self._Session
@Session.setter
def Session(self, Session):
self._Session = Session
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
if params.get("Session") is not None:
self._Session = NotebookSessionInfo()
self._Session._deserialize(params.get("Session"))
self._RequestId = params.get("RequestId")
class DescribeNotebookSessionStatementRequest(AbstractModel):
"""DescribeNotebookSessionStatement请求参数结构体
"""
def __init__(self):
r"""
:param _SessionId: Session唯一标识
:type SessionId: str
:param _StatementId: Session Statement唯一标识
:type StatementId: str
:param _TaskId: 任务唯一标识
:type TaskId: str
"""
self._SessionId = None
self._StatementId = None
self._TaskId = None
@property
def SessionId(self):
return self._SessionId
@SessionId.setter
def SessionId(self, SessionId):
self._SessionId = SessionId
@property
def StatementId(self):
return self._StatementId
@StatementId.setter
def StatementId(self, StatementId):
self._StatementId = StatementId
@property
def TaskId(self):
return self._TaskId
@TaskId.setter
def TaskId(self, TaskId):
self._TaskId = TaskId
def _deserialize(self, params):
self._SessionId = params.get("SessionId")
self._StatementId = params.get("StatementId")
self._TaskId = params.get("TaskId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeNotebookSessionStatementResponse(AbstractModel):
"""DescribeNotebookSessionStatement返回参数结构体
"""
def __init__(self):
r"""
:param _NotebookSessionStatement: Session Statement详情
:type NotebookSessionStatement: :class:`tencentcloud.dlc.v20210125.models.NotebookSessionStatementInfo`
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._NotebookSessionStatement = None
self._RequestId = None
@property
def NotebookSessionStatement(self):
return self._NotebookSessionStatement
@NotebookSessionStatement.setter
def NotebookSessionStatement(self, NotebookSessionStatement):
self._NotebookSessionStatement = NotebookSessionStatement
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
if params.get("NotebookSessionStatement") is not None:
self._NotebookSessionStatement = NotebookSessionStatementInfo()
self._NotebookSessionStatement._deserialize(params.get("NotebookSessionStatement"))
self._RequestId = params.get("RequestId")
class DescribeNotebookSessionStatementSqlResultRequest(AbstractModel):
"""DescribeNotebookSessionStatementSqlResult请求参数结构体
"""
def __init__(self):
r"""
:param _TaskId: 任务唯一ID
:type TaskId: str
:param _MaxResults: 返回结果的最大行数,范围0~1000,默认为1000.
:type MaxResults: int
:param _NextToken: 上一次请求响应返回的分页信息。第一次可以不带,从头开始返回数据,每次返回MaxResults字段设置的数据量。
:type NextToken: str
"""
self._TaskId = None
self._MaxResults = None
self._NextToken = None
@property
def TaskId(self):
return self._TaskId
@TaskId.setter
def TaskId(self, TaskId):
self._TaskId = TaskId
@property
def MaxResults(self):
return self._MaxResults
@MaxResults.setter
def MaxResults(self, MaxResults):
self._MaxResults = MaxResults
@property
def NextToken(self):
return self._NextToken
@NextToken.setter
def NextToken(self, NextToken):
self._NextToken = NextToken
def _deserialize(self, params):
self._TaskId = params.get("TaskId")
self._MaxResults = params.get("MaxResults")
self._NextToken = params.get("NextToken")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeNotebookSessionStatementSqlResultResponse(AbstractModel):
"""DescribeNotebookSessionStatementSqlResult返回参数结构体
"""
def __init__(self):
r"""
:param _TaskId: 任务Id
:type TaskId: str
:param _ResultSet: 结果数据
:type ResultSet: str
:param _ResultSchema: schema
:type ResultSchema: list of Column
:param _NextToken: 分页信息
注意:此字段可能返回 null,表示取不到有效值。
:type NextToken: str
:param _OutputPath: 存储结果地址
注意:此字段可能返回 null,表示取不到有效值。
:type OutputPath: str
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._TaskId = None
self._ResultSet = None
self._ResultSchema = None
self._NextToken = None
self._OutputPath = None
self._RequestId = None
@property
def TaskId(self):
return self._TaskId
@TaskId.setter
def TaskId(self, TaskId):
self._TaskId = TaskId
@property
def ResultSet(self):
return self._ResultSet
@ResultSet.setter
def ResultSet(self, ResultSet):
self._ResultSet = ResultSet
@property
def ResultSchema(self):
return self._ResultSchema
@ResultSchema.setter
def ResultSchema(self, ResultSchema):
self._ResultSchema = ResultSchema
@property
def NextToken(self):
return self._NextToken
@NextToken.setter
def NextToken(self, NextToken):
self._NextToken = NextToken
@property
def OutputPath(self):
return self._OutputPath
@OutputPath.setter
def OutputPath(self, OutputPath):
self._OutputPath = OutputPath
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._TaskId = params.get("TaskId")
self._ResultSet = params.get("ResultSet")
if params.get("ResultSchema") is not None:
self._ResultSchema = []
for item in params.get("ResultSchema"):
obj = Column()
obj._deserialize(item)
self._ResultSchema.append(obj)
self._NextToken = params.get("NextToken")
self._OutputPath = params.get("OutputPath")
self._RequestId = params.get("RequestId")
class DescribeNotebookSessionStatementsRequest(AbstractModel):
"""DescribeNotebookSessionStatements请求参数结构体
"""
def __init__(self):
r"""
:param _SessionId: Session唯一标识
:type SessionId: str
:param _BatchId: 批任务id
:type BatchId: str
"""
self._SessionId = None
self._BatchId = None
@property
def SessionId(self):
return self._SessionId
@SessionId.setter
def SessionId(self, SessionId):
self._SessionId = SessionId
@property
def BatchId(self):
return self._BatchId
@BatchId.setter
def BatchId(self, BatchId):
self._BatchId = BatchId
def _deserialize(self, params):
self._SessionId = params.get("SessionId")
self._BatchId = params.get("BatchId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeNotebookSessionStatementsResponse(AbstractModel):
"""DescribeNotebookSessionStatements返回参数结构体
"""
def __init__(self):
r"""
:param _NotebookSessionStatements: Session Statement详情
:type NotebookSessionStatements: :class:`tencentcloud.dlc.v20210125.models.NotebookSessionStatementBatchInformation`
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._NotebookSessionStatements = None
self._RequestId = None
@property
def NotebookSessionStatements(self):
return self._NotebookSessionStatements
@NotebookSessionStatements.setter
def NotebookSessionStatements(self, NotebookSessionStatements):
self._NotebookSessionStatements = NotebookSessionStatements
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
if params.get("NotebookSessionStatements") is not None:
self._NotebookSessionStatements = NotebookSessionStatementBatchInformation()
self._NotebookSessionStatements._deserialize(params.get("NotebookSessionStatements"))
self._RequestId = params.get("RequestId")
class DescribeNotebookSessionsRequest(AbstractModel):
"""DescribeNotebookSessions请求参数结构体
"""
def __init__(self):
r"""
:param _DataEngineName: DLC Spark作业引擎名称
:type DataEngineName: str
:param _State: Session状态,包含:not_started(未启动)、starting(已启动)、idle(等待输入)、busy(正在运行statement)、shutting_down(停止)、error(异常)、dead(已退出)、killed(被杀死)、success(正常停止)
:type State: list of str
:param _SortFields: 排序字段(默认按创建时间)
:type SortFields: list of str
:param _Asc: 排序字段:true:升序、false:降序(默认)
:type Asc: bool
:param _Limit: 分页参数,默认10
:type Limit: int
:param _Offset: 分页参数,默认0
:type Offset: int
"""
self._DataEngineName = None
self._State = None
self._SortFields = None
self._Asc = None
self._Limit = None
self._Offset = None
@property
def DataEngineName(self):
return self._DataEngineName
@DataEngineName.setter
def DataEngineName(self, DataEngineName):
self._DataEngineName = DataEngineName
@property
def State(self):
return self._State
@State.setter
def State(self, State):
self._State = State
@property
def SortFields(self):
return self._SortFields
@SortFields.setter
def SortFields(self, SortFields):
self._SortFields = SortFields
@property
def Asc(self):
return self._Asc
@Asc.setter
def Asc(self, Asc):
self._Asc = Asc
@property
def Limit(self):
return self._Limit
@Limit.setter
def Limit(self, Limit):
self._Limit = Limit
@property
def Offset(self):
return self._Offset
@Offset.setter
def Offset(self, Offset):
self._Offset = Offset
def _deserialize(self, params):
self._DataEngineName = params.get("DataEngineName")
self._State = params.get("State")
self._SortFields = params.get("SortFields")
self._Asc = params.get("Asc")
self._Limit = params.get("Limit")
self._Offset = params.get("Offset")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeNotebookSessionsResponse(AbstractModel):
"""DescribeNotebookSessions返回参数结构体
"""
def __init__(self):
r"""
:param _TotalElements: session总数量
:type TotalElements: int
:param _TotalPages: 总页数
:type TotalPages: int
:param _Page: 当前页码
:type Page: int
:param _Size: 当前页数量
:type Size: int
:param _Sessions: session列表信息
:type Sessions: list of NotebookSessions
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._TotalElements = None
self._TotalPages = None
self._Page = None
self._Size = None
self._Sessions = None
self._RequestId = None
@property
def TotalElements(self):
return self._TotalElements
@TotalElements.setter
def TotalElements(self, TotalElements):
self._TotalElements = TotalElements
@property
def TotalPages(self):
return self._TotalPages
@TotalPages.setter
def TotalPages(self, TotalPages):
self._TotalPages = TotalPages
@property
def Page(self):
return self._Page
@Page.setter
def Page(self, Page):
self._Page = Page
@property
def Size(self):
return self._Size
@Size.setter
def Size(self, Size):
self._Size = Size
@property
def Sessions(self):
return self._Sessions
@Sessions.setter
def Sessions(self, Sessions):
self._Sessions = Sessions
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._TotalElements = params.get("TotalElements")
self._TotalPages = params.get("TotalPages")
self._Page = params.get("Page")
self._Size = params.get("Size")
if params.get("Sessions") is not None:
self._Sessions = []
for item in params.get("Sessions"):
obj = NotebookSessions()
obj._deserialize(item)
self._Sessions.append(obj)
self._RequestId = params.get("RequestId")
class DescribeResultDownloadRequest(AbstractModel):
"""DescribeResultDownload请求参数结构体
"""
def __init__(self):
r"""
:param _DownloadId: 查询任务Id
:type DownloadId: str
"""
self._DownloadId = None
@property
def DownloadId(self):
return self._DownloadId
@DownloadId.setter
def DownloadId(self, DownloadId):
self._DownloadId = DownloadId
def _deserialize(self, params):
self._DownloadId = params.get("DownloadId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeResultDownloadResponse(AbstractModel):
"""DescribeResultDownload返回参数结构体
"""
def __init__(self):
r"""
:param _Path: 下载文件路径
注意:此字段可能返回 null,表示取不到有效值。
:type Path: str
:param _Status: 任务状态 init | queue | format | compress | success| timeout | error
:type Status: str
:param _Reason: 任务异常原因
注意:此字段可能返回 null,表示取不到有效值。
:type Reason: str
:param _SecretId: 临时AK
注意:此字段可能返回 null,表示取不到有效值。
:type SecretId: str
:param _SecretKey: 临时SK
注意:此字段可能返回 null,表示取不到有效值。
:type SecretKey: str
:param _Token: 临时Token
注意:此字段可能返回 null,表示取不到有效值。
:type Token: str
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._Path = None
self._Status = None
self._Reason = None
self._SecretId = None
self._SecretKey = None
self._Token = None
self._RequestId = None
@property
def Path(self):
return self._Path
@Path.setter
def Path(self, Path):
self._Path = Path
@property
def Status(self):
return self._Status
@Status.setter
def Status(self, Status):
self._Status = Status
@property
def Reason(self):
return self._Reason
@Reason.setter
def Reason(self, Reason):
self._Reason = Reason
@property
def SecretId(self):
return self._SecretId
@SecretId.setter
def SecretId(self, SecretId):
self._SecretId = SecretId
@property
def SecretKey(self):
return self._SecretKey
@SecretKey.setter
def SecretKey(self, SecretKey):
self._SecretKey = SecretKey
@property
def Token(self):
return self._Token
@Token.setter
def Token(self, Token):
self._Token = Token
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._Path = params.get("Path")
self._Status = params.get("Status")
self._Reason = params.get("Reason")
self._SecretId = params.get("SecretId")
self._SecretKey = params.get("SecretKey")
self._Token = params.get("Token")
self._RequestId = params.get("RequestId")
class DescribeScriptsRequest(AbstractModel):
"""DescribeScripts请求参数结构体
"""
def __init__(self):
r"""
:param _Limit: 返回数量,默认为10,最大值为100。
:type Limit: int
:param _Offset: 偏移量,默认为0。
:type Offset: int
:param _SortBy: 按字段排序,支持如下字段类型,update-time
:type SortBy: str
:param _Sorting: 排序方式,desc表示正序,asc表示反序,默认asc
:type Sorting: str
:param _Filters: 过滤条件,如下支持的过滤类型,传参Name应为其一
script-id - String - (过滤条件)script-id取值形如:157de0d1-26b4-4df2-a2d0-b64afc406c25。
script-name-keyword - String - (过滤条件)数据表名称,形如:script-test。
:type Filters: list of Filter
"""
self._Limit = None
self._Offset = None
self._SortBy = None
self._Sorting = None
self._Filters = None
@property
def Limit(self):
return self._Limit
@Limit.setter
def Limit(self, Limit):
self._Limit = Limit
@property
def Offset(self):
return self._Offset
@Offset.setter
def Offset(self, Offset):
self._Offset = Offset
@property
def SortBy(self):
return self._SortBy
@SortBy.setter
def SortBy(self, SortBy):
self._SortBy = SortBy
@property
def Sorting(self):
return self._Sorting
@Sorting.setter
def Sorting(self, Sorting):
self._Sorting = Sorting
@property
def Filters(self):
return self._Filters
@Filters.setter
def Filters(self, Filters):
self._Filters = Filters
def _deserialize(self, params):
self._Limit = params.get("Limit")
self._Offset = params.get("Offset")
self._SortBy = params.get("SortBy")
self._Sorting = params.get("Sorting")
if params.get("Filters") is not None:
self._Filters = []
for item in params.get("Filters"):
obj = Filter()
obj._deserialize(item)
self._Filters.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeScriptsResponse(AbstractModel):
"""DescribeScripts返回参数结构体
"""
def __init__(self):
r"""
:param _Scripts: Script列表
注意:此字段可能返回 null,表示取不到有效值。
:type Scripts: list of Script
:param _TotalCount: 实例总数
:type TotalCount: int
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._Scripts = None
self._TotalCount = None
self._RequestId = None
@property
def Scripts(self):
return self._Scripts
@Scripts.setter
def Scripts(self, Scripts):
self._Scripts = Scripts
@property
def TotalCount(self):
return self._TotalCount
@TotalCount.setter
def TotalCount(self, TotalCount):
self._TotalCount = TotalCount
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
if params.get("Scripts") is not None:
self._Scripts = []
for item in params.get("Scripts"):
obj = Script()
obj._deserialize(item)
self._Scripts.append(obj)
self._TotalCount = params.get("TotalCount")
self._RequestId = params.get("RequestId")
class DescribeSparkAppJobRequest(AbstractModel):
"""DescribeSparkAppJob请求参数结构体
"""
def __init__(self):
r"""
:param _JobId: spark作业Id,与JobName同时存在时,JobName无效,JobId与JobName至少存在一个
:type JobId: str
:param _JobName: spark作业名
:type JobName: str
"""
self._JobId = None
self._JobName = None
@property
def JobId(self):
return self._JobId
@JobId.setter
def JobId(self, JobId):
self._JobId = JobId
@property
def JobName(self):
return self._JobName
@JobName.setter
def JobName(self, JobName):
self._JobName = JobName
def _deserialize(self, params):
self._JobId = params.get("JobId")
self._JobName = params.get("JobName")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeSparkAppJobResponse(AbstractModel):
"""DescribeSparkAppJob返回参数结构体
"""
def __init__(self):
r"""
:param _Job: spark作业详情
注意:此字段可能返回 null,表示取不到有效值。
:type Job: :class:`tencentcloud.dlc.v20210125.models.SparkJobInfo`
:param _IsExists: 查询的spark作业是否存在
:type IsExists: bool
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._Job = None
self._IsExists = None
self._RequestId = None
@property
def Job(self):
return self._Job
@Job.setter
def Job(self, Job):
self._Job = Job
@property
def IsExists(self):
return self._IsExists
@IsExists.setter
def IsExists(self, IsExists):
self._IsExists = IsExists
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
if params.get("Job") is not None:
self._Job = SparkJobInfo()
self._Job._deserialize(params.get("Job"))
self._IsExists = params.get("IsExists")
self._RequestId = params.get("RequestId")
class DescribeSparkAppJobsRequest(AbstractModel):
"""DescribeSparkAppJobs请求参数结构体
"""
def __init__(self):
r"""
:param _SortBy: 返回结果按照该字段排序
:type SortBy: str
:param _Sorting: 正序或者倒序,例如:desc
:type Sorting: str
:param _Filters: 过滤条件,如下支持的过滤类型,传参Name应为其一:spark-job-name(作业名称),spark-job-id(作业id),spark-app-type(作业类型,1:批任务,2:流任务,4:SQL作业),user-name(创建人),key-word(作业名称或ID关键词模糊搜索)
:type Filters: list of Filter
:param _StartTime: 更新时间起始点,支持格式:yyyy-MM-dd HH:mm:ss
:type StartTime: str
:param _EndTime: 更新时间截止点,支持格式:yyyy-MM-dd HH:mm:ss
:type EndTime: str
:param _Offset: 查询列表偏移量, 默认值0
:type Offset: int
:param _Limit: 查询列表限制数量, 默认值100
:type Limit: int
"""
self._SortBy = None
self._Sorting = None
self._Filters = None
self._StartTime = None
self._EndTime = None
self._Offset = None
self._Limit = None
@property
def SortBy(self):
return self._SortBy
@SortBy.setter
def SortBy(self, SortBy):
self._SortBy = SortBy
@property
def Sorting(self):
return self._Sorting
@Sorting.setter
def Sorting(self, Sorting):
self._Sorting = Sorting
@property
def Filters(self):
return self._Filters
@Filters.setter
def Filters(self, Filters):
self._Filters = Filters
@property
def StartTime(self):
return self._StartTime
@StartTime.setter
def StartTime(self, StartTime):
self._StartTime = StartTime
@property
def EndTime(self):
return self._EndTime
@EndTime.setter
def EndTime(self, EndTime):
self._EndTime = EndTime
@property
def Offset(self):
return self._Offset
@Offset.setter
def Offset(self, Offset):
self._Offset = Offset
@property
def Limit(self):
return self._Limit
@Limit.setter
def Limit(self, Limit):
self._Limit = Limit
def _deserialize(self, params):
self._SortBy = params.get("SortBy")
self._Sorting = params.get("Sorting")
if params.get("Filters") is not None:
self._Filters = []
for item in params.get("Filters"):
obj = Filter()
obj._deserialize(item)
self._Filters.append(obj)
self._StartTime = params.get("StartTime")
self._EndTime = params.get("EndTime")
self._Offset = params.get("Offset")
self._Limit = params.get("Limit")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeSparkAppJobsResponse(AbstractModel):
"""DescribeSparkAppJobs返回参数结构体
"""
def __init__(self):
r"""
:param _SparkAppJobs: spark作业列表详情
:type SparkAppJobs: list of SparkJobInfo
:param _TotalCount: spark作业总数
:type TotalCount: int
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._SparkAppJobs = None
self._TotalCount = None
self._RequestId = None
@property
def SparkAppJobs(self):
return self._SparkAppJobs
@SparkAppJobs.setter
def SparkAppJobs(self, SparkAppJobs):
self._SparkAppJobs = SparkAppJobs
@property
def TotalCount(self):
return self._TotalCount
@TotalCount.setter
def TotalCount(self, TotalCount):
self._TotalCount = TotalCount
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
if params.get("SparkAppJobs") is not None:
self._SparkAppJobs = []
for item in params.get("SparkAppJobs"):
obj = SparkJobInfo()
obj._deserialize(item)
self._SparkAppJobs.append(obj)
self._TotalCount = params.get("TotalCount")
self._RequestId = params.get("RequestId")
class DescribeSparkAppTasksRequest(AbstractModel):
"""DescribeSparkAppTasks请求参数结构体
"""
def __init__(self):
r"""
:param _JobId: spark作业Id
:type JobId: str
:param _Offset: 分页查询偏移量
:type Offset: int
:param _Limit: 分页查询Limit
:type Limit: int
:param _TaskId: 执行实例id
:type TaskId: str
:param _StartTime: 更新时间起始点,支持格式:yyyy-MM-dd HH:mm:ss
:type StartTime: str
:param _EndTime: 更新时间截止点,支持格式:yyyy-MM-dd HH:mm:ss
:type EndTime: str
:param _Filters: 按照该参数过滤,支持task-state
:type Filters: list of Filter
"""
self._JobId = None
self._Offset = None
self._Limit = None
self._TaskId = None
self._StartTime = None
self._EndTime = None
self._Filters = None
@property
def JobId(self):
return self._JobId
@JobId.setter
def JobId(self, JobId):
self._JobId = JobId
@property
def Offset(self):
return self._Offset
@Offset.setter
def Offset(self, Offset):
self._Offset = Offset
@property
def Limit(self):
return self._Limit
@Limit.setter
def Limit(self, Limit):
self._Limit = Limit
@property
def TaskId(self):
return self._TaskId
@TaskId.setter
def TaskId(self, TaskId):
self._TaskId = TaskId
@property
def StartTime(self):
return self._StartTime
@StartTime.setter
def StartTime(self, StartTime):
self._StartTime = StartTime
@property
def EndTime(self):
return self._EndTime
@EndTime.setter
def EndTime(self, EndTime):
self._EndTime = EndTime
@property
def Filters(self):
return self._Filters
@Filters.setter
def Filters(self, Filters):
self._Filters = Filters
def _deserialize(self, params):
self._JobId = params.get("JobId")
self._Offset = params.get("Offset")
self._Limit = params.get("Limit")
self._TaskId = params.get("TaskId")
self._StartTime = params.get("StartTime")
self._EndTime = params.get("EndTime")
if params.get("Filters") is not None:
self._Filters = []
for item in params.get("Filters"):
obj = Filter()
obj._deserialize(item)
self._Filters.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeSparkAppTasksResponse(AbstractModel):
"""DescribeSparkAppTasks返回参数结构体
"""
def __init__(self):
r"""
:param _Tasks: 任务结果(该字段已废弃)
注意:此字段可能返回 null,表示取不到有效值。
:type Tasks: :class:`tencentcloud.dlc.v20210125.models.TaskResponseInfo`
:param _TotalCount: 任务总数
:type TotalCount: int
:param _SparkAppTasks: 任务结果列表
注意:此字段可能返回 null,表示取不到有效值。
:type SparkAppTasks: list of TaskResponseInfo
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._Tasks = None
self._TotalCount = None
self._SparkAppTasks = None
self._RequestId = None
@property
def Tasks(self):
return self._Tasks
@Tasks.setter
def Tasks(self, Tasks):
self._Tasks = Tasks
@property
def TotalCount(self):
return self._TotalCount
@TotalCount.setter
def TotalCount(self, TotalCount):
self._TotalCount = TotalCount
@property
def SparkAppTasks(self):
return self._SparkAppTasks
@SparkAppTasks.setter
def SparkAppTasks(self, SparkAppTasks):
self._SparkAppTasks = SparkAppTasks
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
if params.get("Tasks") is not None:
self._Tasks = TaskResponseInfo()
self._Tasks._deserialize(params.get("Tasks"))
self._TotalCount = params.get("TotalCount")
if params.get("SparkAppTasks") is not None:
self._SparkAppTasks = []
for item in params.get("SparkAppTasks"):
obj = TaskResponseInfo()
obj._deserialize(item)
self._SparkAppTasks.append(obj)
self._RequestId = params.get("RequestId")
class DescribeSparkSessionBatchSqlLogRequest(AbstractModel):
"""DescribeSparkSessionBatchSqlLog请求参数结构体
"""
def __init__(self):
r"""
:param _BatchId: SparkSQL唯一标识
:type BatchId: str
"""
self._BatchId = None
@property
def BatchId(self):
return self._BatchId
@BatchId.setter
def BatchId(self, BatchId):
self._BatchId = BatchId
def _deserialize(self, params):
self._BatchId = params.get("BatchId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeSparkSessionBatchSqlLogResponse(AbstractModel):
"""DescribeSparkSessionBatchSqlLog返回参数结构体
"""
def __init__(self):
r"""
:param _State: 状态:0:初始化、1:成功、2:失败、3:取消、4:异常;
:type State: int
:param _LogSet: 日志信息列表
注意:此字段可能返回 null,表示取不到有效值。
:type LogSet: list of SparkSessionBatchLog
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._State = None
self._LogSet = None
self._RequestId = None
@property
def State(self):
return self._State
@State.setter
def State(self, State):
self._State = State
@property
def LogSet(self):
return self._LogSet
@LogSet.setter
def LogSet(self, LogSet):
self._LogSet = LogSet
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._State = params.get("State")
if params.get("LogSet") is not None:
self._LogSet = []
for item in params.get("LogSet"):
obj = SparkSessionBatchLog()
obj._deserialize(item)
self._LogSet.append(obj)
self._RequestId = params.get("RequestId")
class DescribeStoreLocationRequest(AbstractModel):
"""DescribeStoreLocation请求参数结构体
"""
class DescribeStoreLocationResponse(AbstractModel):
"""DescribeStoreLocation返回参数结构体
"""
def __init__(self):
r"""
:param _StoreLocation: 返回用户设置的结果存储位置路径,如果未设置则返回空字符串:""
注意:此字段可能返回 null,表示取不到有效值。
:type StoreLocation: str
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._StoreLocation = None
self._RequestId = None
@property
def StoreLocation(self):
return self._StoreLocation
@StoreLocation.setter
def StoreLocation(self, StoreLocation):
self._StoreLocation = StoreLocation
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._StoreLocation = params.get("StoreLocation")
self._RequestId = params.get("RequestId")
class DescribeTableRequest(AbstractModel):
"""DescribeTable请求参数结构体
"""
def __init__(self):
r"""
:param _TableName: 查询对象表名称
:type TableName: str
:param _DatabaseName: 查询表所在的数据库名称。
:type DatabaseName: str
:param _DatasourceConnectionName: 查询表所在的数据源名称
:type DatasourceConnectionName: str
"""
self._TableName = None
self._DatabaseName = None
self._DatasourceConnectionName = None
@property
def TableName(self):
return self._TableName
@TableName.setter
def TableName(self, TableName):
self._TableName = TableName
@property
def DatabaseName(self):
return self._DatabaseName
@DatabaseName.setter
def DatabaseName(self, DatabaseName):
self._DatabaseName = DatabaseName
@property
def DatasourceConnectionName(self):
return self._DatasourceConnectionName
@DatasourceConnectionName.setter
def DatasourceConnectionName(self, DatasourceConnectionName):
self._DatasourceConnectionName = DatasourceConnectionName
def _deserialize(self, params):
self._TableName = params.get("TableName")
self._DatabaseName = params.get("DatabaseName")
self._DatasourceConnectionName = params.get("DatasourceConnectionName")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeTableResponse(AbstractModel):
"""DescribeTable返回参数结构体
"""
def __init__(self):
r"""
:param _Table: 数据表对象
:type Table: :class:`tencentcloud.dlc.v20210125.models.TableResponseInfo`
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._Table = None
self._RequestId = None
@property
def Table(self):
return self._Table
@Table.setter
def Table(self, Table):
self._Table = Table
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
if params.get("Table") is not None:
self._Table = TableResponseInfo()
self._Table._deserialize(params.get("Table"))
self._RequestId = params.get("RequestId")
class DescribeTablesRequest(AbstractModel):
"""DescribeTables请求参数结构体
"""
def __init__(self):
r"""
:param _DatabaseName: 列出该数据库下所属数据表。
:type DatabaseName: str
:param _Limit: 返回数量,默认为10,最大值为100。
:type Limit: int
:param _Offset: 数据偏移量,从0开始,默认为0。
:type Offset: int
:param _Filters: 过滤条件,如下支持的过滤类型,传参Name应为其一
table-name - String - (过滤条件)数据表名称,形如:table-001。
table-id - String - (过滤条件)table id形如:12342。
:type Filters: list of Filter
:param _DatasourceConnectionName: 指定查询的数据源名称,默认为DataLakeCatalog
:type DatasourceConnectionName: str
:param _StartTime: 起始时间:用于对更新时间的筛选,格式为yyyy-mm-dd HH:MM:SS
:type StartTime: str
:param _EndTime: 终止时间:用于对更新时间的筛选,格式为yyyy-mm-dd HH:MM:SS
:type EndTime: str
:param _Sort: 排序字段,支持:CreateTime(创建时间)、UpdateTime(更新时间)、StorageSize(存储空间)、RecordCount(行数)、Name(表名称)(不传则默认按name升序)
:type Sort: str
:param _Asc: 排序字段,false:降序(默认);true:升序
:type Asc: bool
:param _TableType: table type,表类型查询,可用值:EXTERNAL_TABLE,INDEX_TABLE,MANAGED_TABLE,MATERIALIZED_VIEW,TABLE,VIEW,VIRTUAL_VIEW
:type TableType: str
:param _TableFormat: 筛选字段-表格式:不传(默认)为查全部;LAKEFS:托管表;ICEBERG:非托管iceberg表;HIVE:非托管hive表;OTHER:非托管其它;
:type TableFormat: str
"""
self._DatabaseName = None
self._Limit = None
self._Offset = None
self._Filters = None
self._DatasourceConnectionName = None
self._StartTime = None
self._EndTime = None
self._Sort = None
self._Asc = None
self._TableType = None
self._TableFormat = None
@property
def DatabaseName(self):
return self._DatabaseName
@DatabaseName.setter
def DatabaseName(self, DatabaseName):
self._DatabaseName = DatabaseName
@property
def Limit(self):
return self._Limit
@Limit.setter
def Limit(self, Limit):
self._Limit = Limit
@property
def Offset(self):
return self._Offset
@Offset.setter
def Offset(self, Offset):
self._Offset = Offset
@property
def Filters(self):
return self._Filters
@Filters.setter
def Filters(self, Filters):
self._Filters = Filters
@property
def DatasourceConnectionName(self):
return self._DatasourceConnectionName
@DatasourceConnectionName.setter
def DatasourceConnectionName(self, DatasourceConnectionName):
self._DatasourceConnectionName = DatasourceConnectionName
@property
def StartTime(self):
return self._StartTime
@StartTime.setter
def StartTime(self, StartTime):
self._StartTime = StartTime
@property
def EndTime(self):
return self._EndTime
@EndTime.setter
def EndTime(self, EndTime):
self._EndTime = EndTime
@property
def Sort(self):
return self._Sort
@Sort.setter
def Sort(self, Sort):
self._Sort = Sort
@property
def Asc(self):
return self._Asc
@Asc.setter
def Asc(self, Asc):
self._Asc = Asc
@property
def TableType(self):
return self._TableType
@TableType.setter
def TableType(self, TableType):
self._TableType = TableType
@property
def TableFormat(self):
return self._TableFormat
@TableFormat.setter
def TableFormat(self, TableFormat):
self._TableFormat = TableFormat
def _deserialize(self, params):
self._DatabaseName = params.get("DatabaseName")
self._Limit = params.get("Limit")
self._Offset = params.get("Offset")
if params.get("Filters") is not None:
self._Filters = []
for item in params.get("Filters"):
obj = Filter()
obj._deserialize(item)
self._Filters.append(obj)
self._DatasourceConnectionName = params.get("DatasourceConnectionName")
self._StartTime = params.get("StartTime")
self._EndTime = params.get("EndTime")
self._Sort = params.get("Sort")
self._Asc = params.get("Asc")
self._TableType = params.get("TableType")
self._TableFormat = params.get("TableFormat")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeTablesResponse(AbstractModel):
"""DescribeTables返回参数结构体
"""
def __init__(self):
r"""
:param _TableList: 数据表对象列表。
:type TableList: list of TableResponseInfo
:param _TotalCount: 实例总数。
:type TotalCount: int
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._TableList = None
self._TotalCount = None
self._RequestId = None
@property
def TableList(self):
return self._TableList
@TableList.setter
def TableList(self, TableList):
self._TableList = TableList
@property
def TotalCount(self):
return self._TotalCount
@TotalCount.setter
def TotalCount(self, TotalCount):
self._TotalCount = TotalCount
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
if params.get("TableList") is not None:
self._TableList = []
for item in params.get("TableList"):
obj = TableResponseInfo()
obj._deserialize(item)
self._TableList.append(obj)
self._TotalCount = params.get("TotalCount")
self._RequestId = params.get("RequestId")
class DescribeTaskResultRequest(AbstractModel):
"""DescribeTaskResult请求参数结构体
"""
def __init__(self):
r"""
:param _TaskId: 任务唯一ID
:type TaskId: str
:param _NextToken: 上一次请求响应返回的分页信息。第一次可以不带,从头开始返回数据,每次返回MaxResults字段设置的数据量。
:type NextToken: str
:param _MaxResults: 返回结果的最大行数,范围0~1000,默认为1000.
:type MaxResults: int
"""
self._TaskId = None
self._NextToken = None
self._MaxResults = None
@property
def TaskId(self):
return self._TaskId
@TaskId.setter
def TaskId(self, TaskId):
self._TaskId = TaskId
@property
def NextToken(self):
return self._NextToken
@NextToken.setter
def NextToken(self, NextToken):
self._NextToken = NextToken
@property
def MaxResults(self):
return self._MaxResults
@MaxResults.setter
def MaxResults(self, MaxResults):
self._MaxResults = MaxResults
def _deserialize(self, params):
self._TaskId = params.get("TaskId")
self._NextToken = params.get("NextToken")
self._MaxResults = params.get("MaxResults")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeTaskResultResponse(AbstractModel):
"""DescribeTaskResult返回参数结构体
"""
def __init__(self):
r"""
:param _TaskInfo: 查询的任务信息,返回为空表示输入任务ID对应的任务不存在。只有当任务状态为成功(2)的时候,才会返回任务的结果。
注意:此字段可能返回 null,表示取不到有效值。
:type TaskInfo: :class:`tencentcloud.dlc.v20210125.models.TaskResultInfo`
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._TaskInfo = None
self._RequestId = None
@property
def TaskInfo(self):
return self._TaskInfo
@TaskInfo.setter
def TaskInfo(self, TaskInfo):
self._TaskInfo = TaskInfo
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
if params.get("TaskInfo") is not None:
self._TaskInfo = TaskResultInfo()
self._TaskInfo._deserialize(params.get("TaskInfo"))
self._RequestId = params.get("RequestId")
class DescribeTasksRequest(AbstractModel):
"""DescribeTasks请求参数结构体
"""
def __init__(self):
r"""
:param _Limit: 返回数量,默认为10,最大值为100。
:type Limit: int
:param _Offset: 偏移量,默认为0。
:type Offset: int
:param _Filters: 过滤条件,如下支持的过滤类型,传参Name应为以下其中一个,其中task-id支持最大50个过滤个数,其他过滤参数支持的总数不超过5个。
task-id - String - (任务ID准确过滤)task-id取值形如:e386471f-139a-4e59-877f-50ece8135b99。
task-state - String - (任务状态过滤)取值范围 0(初始化), 1(运行中), 2(成功), -1(失败)。
task-sql-keyword - String - (SQL语句关键字模糊过滤)取值形如:DROP TABLE。
task-operator- string (子uin过滤)
task-kind - string (任务类型过滤)
:type Filters: list of Filter
:param _SortBy: 排序字段,支持如下字段类型,create-time(创建时间,默认)、update-time(更新时间)
:type SortBy: str
:param _Sorting: 排序方式,desc表示正序,asc表示反序, 默认为asc。
:type Sorting: str
:param _StartTime: 起始时间点,格式为yyyy-mm-dd HH:MM:SS。默认为45天前的当前时刻
:type StartTime: str
:param _EndTime: 结束时间点,格式为yyyy-mm-dd HH:MM:SS时间跨度在(0,30天],支持最近45天数据查询。默认为当前时刻
:type EndTime: str
:param _DataEngineName: 数据引擎名称,用于筛选
:type DataEngineName: str
"""
self._Limit = None
self._Offset = None
self._Filters = None
self._SortBy = None
self._Sorting = None
self._StartTime = None
self._EndTime = None
self._DataEngineName = None
@property
def Limit(self):
return self._Limit
@Limit.setter
def Limit(self, Limit):
self._Limit = Limit
@property
def Offset(self):
return self._Offset
@Offset.setter
def Offset(self, Offset):
self._Offset = Offset
@property
def Filters(self):
return self._Filters
@Filters.setter
def Filters(self, Filters):
self._Filters = Filters
@property
def SortBy(self):
return self._SortBy
@SortBy.setter
def SortBy(self, SortBy):
self._SortBy = SortBy
@property
def Sorting(self):
return self._Sorting
@Sorting.setter
def Sorting(self, Sorting):
self._Sorting = Sorting
@property
def StartTime(self):
return self._StartTime
@StartTime.setter
def StartTime(self, StartTime):
self._StartTime = StartTime
@property
def EndTime(self):
return self._EndTime
@EndTime.setter
def EndTime(self, EndTime):
self._EndTime = EndTime
@property
def DataEngineName(self):
return self._DataEngineName
@DataEngineName.setter
def DataEngineName(self, DataEngineName):
self._DataEngineName = DataEngineName
def _deserialize(self, params):
self._Limit = params.get("Limit")
self._Offset = params.get("Offset")
if params.get("Filters") is not None:
self._Filters = []
for item in params.get("Filters"):
obj = Filter()
obj._deserialize(item)
self._Filters.append(obj)
self._SortBy = params.get("SortBy")
self._Sorting = params.get("Sorting")
self._StartTime = params.get("StartTime")
self._EndTime = params.get("EndTime")
self._DataEngineName = params.get("DataEngineName")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeTasksResponse(AbstractModel):
"""DescribeTasks返回参数结构体
"""
def __init__(self):
r"""
:param _TaskList: 任务对象列表。
:type TaskList: list of TaskResponseInfo
:param _TotalCount: 实例总数。
:type TotalCount: int
:param _TasksOverview: 任务概览信息
注意:此字段可能返回 null,表示取不到有效值。
:type TasksOverview: :class:`tencentcloud.dlc.v20210125.models.TasksOverview`
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._TaskList = None
self._TotalCount = None
self._TasksOverview = None
self._RequestId = None
@property
def TaskList(self):
return self._TaskList
@TaskList.setter
def TaskList(self, TaskList):
self._TaskList = TaskList
@property
def TotalCount(self):
return self._TotalCount
@TotalCount.setter
def TotalCount(self, TotalCount):
self._TotalCount = TotalCount
@property
def TasksOverview(self):
return self._TasksOverview
@TasksOverview.setter
def TasksOverview(self, TasksOverview):
self._TasksOverview = TasksOverview
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
if params.get("TaskList") is not None:
self._TaskList = []
for item in params.get("TaskList"):
obj = TaskResponseInfo()
obj._deserialize(item)
self._TaskList.append(obj)
self._TotalCount = params.get("TotalCount")
if params.get("TasksOverview") is not None:
self._TasksOverview = TasksOverview()
self._TasksOverview._deserialize(params.get("TasksOverview"))
self._RequestId = params.get("RequestId")
class DescribeUserRolesRequest(AbstractModel):
"""DescribeUserRoles请求参数结构体
"""
def __init__(self):
r"""
:param _Limit: 列举的数量限制
:type Limit: int
:param _Offset: 列举的偏移位置
:type Offset: int
:param _Fuzzy: 按照arn模糊列举
:type Fuzzy: str
:param _SortBy: 返回结果按照该字段排序
:type SortBy: str
:param _Sorting: 正序或者倒序,例如:desc
:type Sorting: str
"""
self._Limit = None
self._Offset = None
self._Fuzzy = None
self._SortBy = None
self._Sorting = None
@property
def Limit(self):
return self._Limit
@Limit.setter
def Limit(self, Limit):
self._Limit = Limit
@property
def Offset(self):
return self._Offset
@Offset.setter
def Offset(self, Offset):
self._Offset = Offset
@property
def Fuzzy(self):
return self._Fuzzy
@Fuzzy.setter
def Fuzzy(self, Fuzzy):
self._Fuzzy = Fuzzy
@property
def SortBy(self):
return self._SortBy
@SortBy.setter
def SortBy(self, SortBy):
self._SortBy = SortBy
@property
def Sorting(self):
return self._Sorting
@Sorting.setter
def Sorting(self, Sorting):
self._Sorting = Sorting
def _deserialize(self, params):
self._Limit = params.get("Limit")
self._Offset = params.get("Offset")
self._Fuzzy = params.get("Fuzzy")
self._SortBy = params.get("SortBy")
self._Sorting = params.get("Sorting")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeUserRolesResponse(AbstractModel):
"""DescribeUserRoles返回参数结构体
"""
def __init__(self):
r"""
:param _Total: 符合列举条件的总数量
:type Total: int
:param _UserRoles: 用户角色信息
:type UserRoles: list of UserRole
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._Total = None
self._UserRoles = None
self._RequestId = None
@property
def Total(self):
return self._Total
@Total.setter
def Total(self, Total):
self._Total = Total
@property
def UserRoles(self):
return self._UserRoles
@UserRoles.setter
def UserRoles(self, UserRoles):
self._UserRoles = UserRoles
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._Total = params.get("Total")
if params.get("UserRoles") is not None:
self._UserRoles = []
for item in params.get("UserRoles"):
obj = UserRole()
obj._deserialize(item)
self._UserRoles.append(obj)
self._RequestId = params.get("RequestId")
class DescribeUsersRequest(AbstractModel):
"""DescribeUsers请求参数结构体
"""
def __init__(self):
r"""
:param _UserId: 指定查询的子用户uin,用户需要通过CreateUser接口创建。
:type UserId: str
:param _Offset: 偏移量,默认为0
:type Offset: int
:param _Limit: 返回数量,默认20,最大值100
:type Limit: int
:param _SortBy: 排序字段,支持如下字段类型,create-time
:type SortBy: str
:param _Sorting: 排序方式,desc表示正序,asc表示反序, 默认为asc
:type Sorting: str
:param _Filters: 过滤条件,支持如下字段类型,user-type:根据用户类型过滤。user-keyword:根据用户名称过滤
:type Filters: list of Filter
"""
self._UserId = None
self._Offset = None
self._Limit = None
self._SortBy = None
self._Sorting = None
self._Filters = None
@property
def UserId(self):
return self._UserId
@UserId.setter
def UserId(self, UserId):
self._UserId = UserId
@property
def Offset(self):
return self._Offset
@Offset.setter
def Offset(self, Offset):
self._Offset = Offset
@property
def Limit(self):
return self._Limit
@Limit.setter
def Limit(self, Limit):
self._Limit = Limit
@property
def SortBy(self):
return self._SortBy
@SortBy.setter
def SortBy(self, SortBy):
self._SortBy = SortBy
@property
def Sorting(self):
return self._Sorting
@Sorting.setter
def Sorting(self, Sorting):
self._Sorting = Sorting
@property
def Filters(self):
return self._Filters
@Filters.setter
def Filters(self, Filters):
self._Filters = Filters
def _deserialize(self, params):
self._UserId = params.get("UserId")
self._Offset = params.get("Offset")
self._Limit = params.get("Limit")
self._SortBy = params.get("SortBy")
self._Sorting = params.get("Sorting")
if params.get("Filters") is not None:
self._Filters = []
for item in params.get("Filters"):
obj = Filter()
obj._deserialize(item)
self._Filters.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeUsersResponse(AbstractModel):
"""DescribeUsers返回参数结构体
"""
def __init__(self):
r"""
:param _TotalCount: 查询到的用户总数
:type TotalCount: int
:param _UserSet: 查询到的授权用户信息集合
:type UserSet: list of UserInfo
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._TotalCount = None
self._UserSet = None
self._RequestId = None
@property
def TotalCount(self):
return self._TotalCount
@TotalCount.setter
def TotalCount(self, TotalCount):
self._TotalCount = TotalCount
@property
def UserSet(self):
return self._UserSet
@UserSet.setter
def UserSet(self, UserSet):
self._UserSet = UserSet
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._TotalCount = params.get("TotalCount")
if params.get("UserSet") is not None:
self._UserSet = []
for item in params.get("UserSet"):
obj = UserInfo()
obj._deserialize(item)
self._UserSet.append(obj)
self._RequestId = params.get("RequestId")
class DescribeViewsRequest(AbstractModel):
"""DescribeViews请求参数结构体
"""
def __init__(self):
r"""
:param _DatabaseName: 列出该数据库下所属数据表。
:type DatabaseName: str
:param _Limit: 返回数量,默认为10,最大值为100。
:type Limit: int
:param _Offset: 数据偏移量,从0开始,默认为0。
:type Offset: int
:param _Filters: 过滤条件,如下支持的过滤类型,传参Name应为其一
view-name - String - (过滤条件)数据表名称,形如:view-001。
view-id - String - (过滤条件)view id形如:12342。
:type Filters: list of Filter
:param _DatasourceConnectionName: 数据库所属的数据源名称
:type DatasourceConnectionName: str
:param _Sort: 排序字段
:type Sort: str
:param _Asc: 排序规则,true:升序;false:降序
:type Asc: bool
:param _StartTime: 按视图更新时间筛选,开始时间,如2021-11-11 00:00:00
:type StartTime: str
:param _EndTime: 按视图更新时间筛选,结束时间,如2021-11-12 00:00:00
:type EndTime: str
"""
self._DatabaseName = None
self._Limit = None
self._Offset = None
self._Filters = None
self._DatasourceConnectionName = None
self._Sort = None
self._Asc = None
self._StartTime = None
self._EndTime = None
@property
def DatabaseName(self):
return self._DatabaseName
@DatabaseName.setter
def DatabaseName(self, DatabaseName):
self._DatabaseName = DatabaseName
@property
def Limit(self):
return self._Limit
@Limit.setter
def Limit(self, Limit):
self._Limit = Limit
@property
def Offset(self):
return self._Offset
@Offset.setter
def Offset(self, Offset):
self._Offset = Offset
@property
def Filters(self):
return self._Filters
@Filters.setter
def Filters(self, Filters):
self._Filters = Filters
@property
def DatasourceConnectionName(self):
return self._DatasourceConnectionName
@DatasourceConnectionName.setter
def DatasourceConnectionName(self, DatasourceConnectionName):
self._DatasourceConnectionName = DatasourceConnectionName
@property
def Sort(self):
return self._Sort
@Sort.setter
def Sort(self, Sort):
self._Sort = Sort
@property
def Asc(self):
return self._Asc
@Asc.setter
def Asc(self, Asc):
self._Asc = Asc
@property
def StartTime(self):
return self._StartTime
@StartTime.setter
def StartTime(self, StartTime):
self._StartTime = StartTime
@property
def EndTime(self):
return self._EndTime
@EndTime.setter
def EndTime(self, EndTime):
self._EndTime = EndTime
def _deserialize(self, params):
self._DatabaseName = params.get("DatabaseName")
self._Limit = params.get("Limit")
self._Offset = params.get("Offset")
if params.get("Filters") is not None:
self._Filters = []
for item in params.get("Filters"):
obj = Filter()
obj._deserialize(item)
self._Filters.append(obj)
self._DatasourceConnectionName = params.get("DatasourceConnectionName")
self._Sort = params.get("Sort")
self._Asc = params.get("Asc")
self._StartTime = params.get("StartTime")
self._EndTime = params.get("EndTime")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeViewsResponse(AbstractModel):
"""DescribeViews返回参数结构体
"""
def __init__(self):
r"""
:param _ViewList: 视图对象列表。
:type ViewList: list of ViewResponseInfo
:param _TotalCount: 实例总数。
:type TotalCount: int
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._ViewList = None
self._TotalCount = None
self._RequestId = None
@property
def ViewList(self):
return self._ViewList
@ViewList.setter
def ViewList(self, ViewList):
self._ViewList = ViewList
@property
def TotalCount(self):
return self._TotalCount
@TotalCount.setter
def TotalCount(self, TotalCount):
self._TotalCount = TotalCount
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
if params.get("ViewList") is not None:
self._ViewList = []
for item in params.get("ViewList"):
obj = ViewResponseInfo()
obj._deserialize(item)
self._ViewList.append(obj)
self._TotalCount = params.get("TotalCount")
self._RequestId = params.get("RequestId")
class DescribeWorkGroupsRequest(AbstractModel):
"""DescribeWorkGroups请求参数结构体
"""
def __init__(self):
r"""
:param _WorkGroupId: 查询的工作组Id,不填或填0表示不过滤。
:type WorkGroupId: int
:param _Filters: 过滤条件,当前仅支持按照工作组名称进行模糊搜索。Key为workgroup-name
:type Filters: list of Filter
:param _Offset: 偏移量,默认为0
:type Offset: int
:param _Limit: 返回数量,默认20,最大值100
:type Limit: int
:param _SortBy: 排序字段,支持如下字段类型,create-time
:type SortBy: str
:param _Sorting: 排序方式,desc表示正序,asc表示反序, 默认为asc
:type Sorting: str
"""
self._WorkGroupId = None
self._Filters = None
self._Offset = None
self._Limit = None
self._SortBy = None
self._Sorting = None
@property
def WorkGroupId(self):
return self._WorkGroupId
@WorkGroupId.setter
def WorkGroupId(self, WorkGroupId):
self._WorkGroupId = WorkGroupId
@property
def Filters(self):
return self._Filters
@Filters.setter
def Filters(self, Filters):
self._Filters = Filters
@property
def Offset(self):
return self._Offset
@Offset.setter
def Offset(self, Offset):
self._Offset = Offset
@property
def Limit(self):
return self._Limit
@Limit.setter
def Limit(self, Limit):
self._Limit = Limit
@property
def SortBy(self):
return self._SortBy
@SortBy.setter
def SortBy(self, SortBy):
self._SortBy = SortBy
@property
def Sorting(self):
return self._Sorting
@Sorting.setter
def Sorting(self, Sorting):
self._Sorting = Sorting
def _deserialize(self, params):
self._WorkGroupId = params.get("WorkGroupId")
if params.get("Filters") is not None:
self._Filters = []
for item in params.get("Filters"):
obj = Filter()
obj._deserialize(item)
self._Filters.append(obj)
self._Offset = params.get("Offset")
self._Limit = params.get("Limit")
self._SortBy = params.get("SortBy")
self._Sorting = params.get("Sorting")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeWorkGroupsResponse(AbstractModel):
"""DescribeWorkGroups返回参数结构体
"""
def __init__(self):
r"""
:param _TotalCount: 工作组总数
:type TotalCount: int
:param _WorkGroupSet: 工作组信息集合
:type WorkGroupSet: list of WorkGroupInfo
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._TotalCount = None
self._WorkGroupSet = None
self._RequestId = None
@property
def TotalCount(self):
return self._TotalCount
@TotalCount.setter
def TotalCount(self, TotalCount):
self._TotalCount = TotalCount
@property
def WorkGroupSet(self):
return self._WorkGroupSet
@WorkGroupSet.setter
def WorkGroupSet(self, WorkGroupSet):
self._WorkGroupSet = WorkGroupSet
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._TotalCount = params.get("TotalCount")
if params.get("WorkGroupSet") is not None:
self._WorkGroupSet = []
for item in params.get("WorkGroupSet"):
obj = WorkGroupInfo()
obj._deserialize(item)
self._WorkGroupSet.append(obj)
self._RequestId = params.get("RequestId")
class DetachUserPolicyRequest(AbstractModel):
"""DetachUserPolicy请求参数结构体
"""
def __init__(self):
r"""
:param _UserId: 用户Id,和CAM侧Uin匹配
:type UserId: str
:param _PolicySet: 解绑的权限集合
:type PolicySet: list of Policy
"""
self._UserId = None
self._PolicySet = None
@property
def UserId(self):
return self._UserId
@UserId.setter
def UserId(self, UserId):
self._UserId = UserId
@property
def PolicySet(self):
return self._PolicySet
@PolicySet.setter
def PolicySet(self, PolicySet):
self._PolicySet = PolicySet
def _deserialize(self, params):
self._UserId = params.get("UserId")
if params.get("PolicySet") is not None:
self._PolicySet = []
for item in params.get("PolicySet"):
obj = Policy()
obj._deserialize(item)
self._PolicySet.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DetachUserPolicyResponse(AbstractModel):
"""DetachUserPolicy返回参数结构体
"""
def __init__(self):
r"""
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._RequestId = None
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._RequestId = params.get("RequestId")
class DetachWorkGroupPolicyRequest(AbstractModel):
"""DetachWorkGroupPolicy请求参数结构体
"""
def __init__(self):
r"""
:param _WorkGroupId: 工作组Id
:type WorkGroupId: int
:param _PolicySet: 解绑的权限集合
:type PolicySet: list of Policy
"""
self._WorkGroupId = None
self._PolicySet = None
@property
def WorkGroupId(self):
return self._WorkGroupId
@WorkGroupId.setter
def WorkGroupId(self, WorkGroupId):
self._WorkGroupId = WorkGroupId
@property
def PolicySet(self):
return self._PolicySet
@PolicySet.setter
def PolicySet(self, PolicySet):
self._PolicySet = PolicySet
def _deserialize(self, params):
self._WorkGroupId = params.get("WorkGroupId")
if params.get("PolicySet") is not None:
self._PolicySet = []
for item in params.get("PolicySet"):
obj = Policy()
obj._deserialize(item)
self._PolicySet.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DetachWorkGroupPolicyResponse(AbstractModel):
"""DetachWorkGroupPolicy返回参数结构体
"""
def __init__(self):
r"""
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._RequestId = None
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._RequestId = params.get("RequestId")
class DropDMSDatabaseRequest(AbstractModel):
"""DropDMSDatabase请求参数结构体
"""
def __init__(self):
r"""
:param _Name: 数据库名称
:type Name: str
:param _DeleteData: 是否删除数据
:type DeleteData: bool
:param _Cascade: 是否级联删除
:type Cascade: bool
"""
self._Name = None
self._DeleteData = None
self._Cascade = None
@property
def Name(self):
return self._Name
@Name.setter
def Name(self, Name):
self._Name = Name
@property
def DeleteData(self):
return self._DeleteData
@DeleteData.setter
def DeleteData(self, DeleteData):
self._DeleteData = DeleteData
@property
def Cascade(self):
return self._Cascade
@Cascade.setter
def Cascade(self, Cascade):
self._Cascade = Cascade
def _deserialize(self, params):
self._Name = params.get("Name")
self._DeleteData = params.get("DeleteData")
self._Cascade = params.get("Cascade")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DropDMSDatabaseResponse(AbstractModel):
"""DropDMSDatabase返回参数结构体
"""
def __init__(self):
r"""
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._RequestId = None
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._RequestId = params.get("RequestId")
class DropDMSPartitionsRequest(AbstractModel):
"""DropDMSPartitions请求参数结构体
"""
def __init__(self):
r"""
:param _DatabaseName: 数据库名称
:type DatabaseName: str
:param _SchemaName: 数据库Schema名称
:type SchemaName: str
:param _TableName: 数据表名称
:type TableName: str
:param _Name: 分区名称
:type Name: str
:param _Values: 单个分区名称
:type Values: list of str
:param _DeleteData: 是否删除分区数据
:type DeleteData: bool
"""
self._DatabaseName = None
self._SchemaName = None
self._TableName = None
self._Name = None
self._Values = None
self._DeleteData = None
@property
def DatabaseName(self):
return self._DatabaseName
@DatabaseName.setter
def DatabaseName(self, DatabaseName):
self._DatabaseName = DatabaseName
@property
def SchemaName(self):
return self._SchemaName
@SchemaName.setter
def SchemaName(self, SchemaName):
self._SchemaName = SchemaName
@property
def TableName(self):
return self._TableName
@TableName.setter
def TableName(self, TableName):
self._TableName = TableName
@property
def Name(self):
return self._Name
@Name.setter
def Name(self, Name):
self._Name = Name
@property
def Values(self):
return self._Values
@Values.setter
def Values(self, Values):
self._Values = Values
@property
def DeleteData(self):
return self._DeleteData
@DeleteData.setter
def DeleteData(self, DeleteData):
self._DeleteData = DeleteData
def _deserialize(self, params):
self._DatabaseName = params.get("DatabaseName")
self._SchemaName = params.get("SchemaName")
self._TableName = params.get("TableName")
self._Name = params.get("Name")
self._Values = params.get("Values")
self._DeleteData = params.get("DeleteData")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DropDMSPartitionsResponse(AbstractModel):
"""DropDMSPartitions返回参数结构体
"""
def __init__(self):
r"""
:param _Status: 状态
:type Status: bool
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._Status = None
self._RequestId = None
@property
def Status(self):
return self._Status
@Status.setter
def Status(self, Status):
self._Status = Status
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._Status = params.get("Status")
self._RequestId = params.get("RequestId")
class DropDMSTableRequest(AbstractModel):
"""DropDMSTable请求参数结构体
"""
def __init__(self):
r"""
:param _DbName: 数据库名称
:type DbName: str
:param _Name: 表名称
:type Name: str
:param _DeleteData: 是否删除数据
:type DeleteData: bool
:param _EnvProps: 环境属性
:type EnvProps: :class:`tencentcloud.dlc.v20210125.models.KVPair`
"""
self._DbName = None
self._Name = None
self._DeleteData = None
self._EnvProps = None
@property
def DbName(self):
return self._DbName
@DbName.setter
def DbName(self, DbName):
self._DbName = DbName
@property
def Name(self):
return self._Name
@Name.setter
def Name(self, Name):
self._Name = Name
@property
def DeleteData(self):
return self._DeleteData
@DeleteData.setter
def DeleteData(self, DeleteData):
self._DeleteData = DeleteData
@property
def EnvProps(self):
return self._EnvProps
@EnvProps.setter
def EnvProps(self, EnvProps):
self._EnvProps = EnvProps
def _deserialize(self, params):
self._DbName = params.get("DbName")
self._Name = params.get("Name")
self._DeleteData = params.get("DeleteData")
if params.get("EnvProps") is not None:
self._EnvProps = KVPair()
self._EnvProps._deserialize(params.get("EnvProps"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DropDMSTableResponse(AbstractModel):
"""DropDMSTable返回参数结构体
"""
def __init__(self):
r"""
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._RequestId = None
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._RequestId = params.get("RequestId")
class Execution(AbstractModel):
"""SQL语句对象
"""
def __init__(self):
r"""
:param _SQL: 自动生成SQL语句。
:type SQL: str
"""
self._SQL = None
@property
def SQL(self):
return self._SQL
@SQL.setter
def SQL(self, SQL):
self._SQL = SQL
def _deserialize(self, params):
self._SQL = params.get("SQL")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class Filter(AbstractModel):
"""查询列表过滤条件参数
"""
def __init__(self):
r"""
:param _Name: 属性名称, 若存在多个Filter时,Filter间的关系为逻辑或(OR)关系。
:type Name: str
:param _Values: 属性值, 若同一个Filter存在多个Values,同一Filter下Values间的关系为逻辑或(OR)关系。
:type Values: list of str
"""
self._Name = None
self._Values = None
@property
def Name(self):
return self._Name
@Name.setter
def Name(self, Name):
self._Name = Name
@property
def Values(self):
return self._Values
@Values.setter
def Values(self, Values):
self._Values = Values
def _deserialize(self, params):
self._Name = params.get("Name")
self._Values = params.get("Values")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class GenerateCreateMangedTableSqlRequest(AbstractModel):
"""GenerateCreateMangedTableSql请求参数结构体
"""
def __init__(self):
r"""
:param _TableBaseInfo: 表基本信息
:type TableBaseInfo: :class:`tencentcloud.dlc.v20210125.models.TableBaseInfo`
:param _Columns: 表字段信息
:type Columns: list of TColumn
:param _Partitions: 表分区信息
:type Partitions: list of TPartition
:param _Properties: 表属性信息
:type Properties: list of Property
:param _UpsertKeys: V2 upsert表 upsert键
:type UpsertKeys: list of str
"""
self._TableBaseInfo = None
self._Columns = None
self._Partitions = None
self._Properties = None
self._UpsertKeys = None
@property
def TableBaseInfo(self):
return self._TableBaseInfo
@TableBaseInfo.setter
def TableBaseInfo(self, TableBaseInfo):
self._TableBaseInfo = TableBaseInfo
@property
def Columns(self):
return self._Columns
@Columns.setter
def Columns(self, Columns):
self._Columns = Columns
@property
def Partitions(self):
return self._Partitions
@Partitions.setter
def Partitions(self, Partitions):
self._Partitions = Partitions
@property
def Properties(self):
return self._Properties
@Properties.setter
def Properties(self, Properties):
self._Properties = Properties
@property
def UpsertKeys(self):
return self._UpsertKeys
@UpsertKeys.setter
def UpsertKeys(self, UpsertKeys):
self._UpsertKeys = UpsertKeys
def _deserialize(self, params):
if params.get("TableBaseInfo") is not None:
self._TableBaseInfo = TableBaseInfo()
self._TableBaseInfo._deserialize(params.get("TableBaseInfo"))
if params.get("Columns") is not None:
self._Columns = []
for item in params.get("Columns"):
obj = TColumn()
obj._deserialize(item)
self._Columns.append(obj)
if params.get("Partitions") is not None:
self._Partitions = []
for item in params.get("Partitions"):
obj = TPartition()
obj._deserialize(item)
self._Partitions.append(obj)
if params.get("Properties") is not None:
self._Properties = []
for item in params.get("Properties"):
obj = Property()
obj._deserialize(item)
self._Properties.append(obj)
self._UpsertKeys = params.get("UpsertKeys")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class GenerateCreateMangedTableSqlResponse(AbstractModel):
"""GenerateCreateMangedTableSql返回参数结构体
"""
def __init__(self):
r"""
:param _Execution: 创建托管存储内表sql语句描述
:type Execution: :class:`tencentcloud.dlc.v20210125.models.Execution`
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._Execution = None
self._RequestId = None
@property
def Execution(self):
return self._Execution
@Execution.setter
def Execution(self, Execution):
self._Execution = Execution
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
if params.get("Execution") is not None:
self._Execution = Execution()
self._Execution._deserialize(params.get("Execution"))
self._RequestId = params.get("RequestId")
class JobLogResult(AbstractModel):
"""日志详情
"""
def __init__(self):
r"""
:param _Time: 日志时间戳,毫秒
注意:此字段可能返回 null,表示取不到有效值。
:type Time: int
:param _TopicId: 日志topic id
注意:此字段可能返回 null,表示取不到有效值。
:type TopicId: str
:param _TopicName: 日志topic name
注意:此字段可能返回 null,表示取不到有效值。
:type TopicName: str
:param _LogJson: 日志内容,json字符串
注意:此字段可能返回 null,表示取不到有效值。
:type LogJson: str
:param _PkgLogId: 日志ID
注意:此字段可能返回 null,表示取不到有效值。
:type PkgLogId: str
"""
self._Time = None
self._TopicId = None
self._TopicName = None
self._LogJson = None
self._PkgLogId = None
@property
def Time(self):
return self._Time
@Time.setter
def Time(self, Time):
self._Time = Time
@property
def TopicId(self):
return self._TopicId
@TopicId.setter
def TopicId(self, TopicId):
self._TopicId = TopicId
@property
def TopicName(self):
return self._TopicName
@TopicName.setter
def TopicName(self, TopicName):
self._TopicName = TopicName
@property
def LogJson(self):
return self._LogJson
@LogJson.setter
def LogJson(self, LogJson):
self._LogJson = LogJson
@property
def PkgLogId(self):
return self._PkgLogId
@PkgLogId.setter
def PkgLogId(self, PkgLogId):
self._PkgLogId = PkgLogId
def _deserialize(self, params):
self._Time = params.get("Time")
self._TopicId = params.get("TopicId")
self._TopicName = params.get("TopicName")
self._LogJson = params.get("LogJson")
self._PkgLogId = params.get("PkgLogId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class KVPair(AbstractModel):
"""配置格式
"""
def __init__(self):
r"""
:param _Key: 配置的key值
注意:此字段可能返回 null,表示取不到有效值。
:type Key: str
:param _Value: 配置的value值
注意:此字段可能返回 null,表示取不到有效值。
:type Value: str
"""
self._Key = None
self._Value = None
@property
def Key(self):
return self._Key
@Key.setter
def Key(self, Key):
self._Key = Key
@property
def Value(self):
return self._Value
@Value.setter
def Value(self, Value):
self._Value = Value
def _deserialize(self, params):
self._Key = params.get("Key")
self._Value = params.get("Value")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ListTaskJobLogDetailRequest(AbstractModel):
"""ListTaskJobLogDetail请求参数结构体
"""
def __init__(self):
r"""
:param _TaskId: 列表返回的Id
:type TaskId: str
:param _StartTime: 开始运行时间,unix时间戳(毫秒)
:type StartTime: int
:param _EndTime: 结束运行时间,unix时间戳(毫秒)
:type EndTime: int
:param _Limit: 分页大小,最大1000,配合Context一起使用
:type Limit: int
:param _Context: 下一次分页参数,第一次传空
:type Context: str
:param _Asc: 最近1000条日志是否升序排列,true:升序排序,false:倒序,默认false,倒序排列
:type Asc: bool
:param _Filters: 预览日志的通用过滤条件
:type Filters: list of Filter
:param _BatchId: SparkSQL任务唯一ID
:type BatchId: str
"""
self._TaskId = None
self._StartTime = None
self._EndTime = None
self._Limit = None
self._Context = None
self._Asc = None
self._Filters = None
self._BatchId = None
@property
def TaskId(self):
return self._TaskId
@TaskId.setter
def TaskId(self, TaskId):
self._TaskId = TaskId
@property
def StartTime(self):
return self._StartTime
@StartTime.setter
def StartTime(self, StartTime):
self._StartTime = StartTime
@property
def EndTime(self):
return self._EndTime
@EndTime.setter
def EndTime(self, EndTime):
self._EndTime = EndTime
@property
def Limit(self):
return self._Limit
@Limit.setter
def Limit(self, Limit):
self._Limit = Limit
@property
def Context(self):
return self._Context
@Context.setter
def Context(self, Context):
self._Context = Context
@property
def Asc(self):
return self._Asc
@Asc.setter
def Asc(self, Asc):
self._Asc = Asc
@property
def Filters(self):
return self._Filters
@Filters.setter
def Filters(self, Filters):
self._Filters = Filters
@property
def BatchId(self):
return self._BatchId
@BatchId.setter
def BatchId(self, BatchId):
self._BatchId = BatchId
def _deserialize(self, params):
self._TaskId = params.get("TaskId")
self._StartTime = params.get("StartTime")
self._EndTime = params.get("EndTime")
self._Limit = params.get("Limit")
self._Context = params.get("Context")
self._Asc = params.get("Asc")
if params.get("Filters") is not None:
self._Filters = []
for item in params.get("Filters"):
obj = Filter()
obj._deserialize(item)
self._Filters.append(obj)
self._BatchId = params.get("BatchId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ListTaskJobLogDetailResponse(AbstractModel):
"""ListTaskJobLogDetail返回参数结构体
"""
def __init__(self):
r"""
:param _Context: 下一次分页参数
注意:此字段可能返回 null,表示取不到有效值。
:type Context: str
:param _ListOver: 是否获取完结
注意:此字段可能返回 null,表示取不到有效值。
:type ListOver: bool
:param _Results: 日志详情
注意:此字段可能返回 null,表示取不到有效值。
:type Results: list of JobLogResult
:param _LogUrl: 日志url
注意:此字段可能返回 null,表示取不到有效值。
:type LogUrl: str
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._Context = None
self._ListOver = None
self._Results = None
self._LogUrl = None
self._RequestId = None
@property
def Context(self):
return self._Context
@Context.setter
def Context(self, Context):
self._Context = Context
@property
def ListOver(self):
return self._ListOver
@ListOver.setter
def ListOver(self, ListOver):
self._ListOver = ListOver
@property
def Results(self):
return self._Results
@Results.setter
def Results(self, Results):
self._Results = Results
@property
def LogUrl(self):
return self._LogUrl
@LogUrl.setter
def LogUrl(self, LogUrl):
self._LogUrl = LogUrl
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._Context = params.get("Context")
self._ListOver = params.get("ListOver")
if params.get("Results") is not None:
self._Results = []
for item in params.get("Results"):
obj = JobLogResult()
obj._deserialize(item)
self._Results.append(obj)
self._LogUrl = params.get("LogUrl")
self._RequestId = params.get("RequestId")
class LockComponentInfo(AbstractModel):
"""元数据加锁内容
"""
def __init__(self):
r"""
:param _DbName: 数据库名称
:type DbName: str
:param _TableName: 表名称
:type TableName: str
:param _Partition: 分区
:type Partition: str
:param _LockType: 锁类型:SHARED_READ、SHARED_WRITE、EXCLUSIVE
:type LockType: str
:param _LockLevel: 锁级别:DB、TABLE、PARTITION
:type LockLevel: str
:param _DataOperationType: 锁操作:SELECT,INSERT,UPDATE,DELETE,UNSET,NO_TXN
:type DataOperationType: str
:param _IsAcid: 是否保持Acid
:type IsAcid: bool
:param _IsDynamicPartitionWrite: 是否动态分区写
:type IsDynamicPartitionWrite: bool
"""
self._DbName = None
self._TableName = None
self._Partition = None
self._LockType = None
self._LockLevel = None
self._DataOperationType = None
self._IsAcid = None
self._IsDynamicPartitionWrite = None
@property
def DbName(self):
return self._DbName
@DbName.setter
def DbName(self, DbName):
self._DbName = DbName
@property
def TableName(self):
return self._TableName
@TableName.setter
def TableName(self, TableName):
self._TableName = TableName
@property
def Partition(self):
return self._Partition
@Partition.setter
def Partition(self, Partition):
self._Partition = Partition
@property
def LockType(self):
return self._LockType
@LockType.setter
def LockType(self, LockType):
self._LockType = LockType
@property
def LockLevel(self):
return self._LockLevel
@LockLevel.setter
def LockLevel(self, LockLevel):
self._LockLevel = LockLevel
@property
def DataOperationType(self):
return self._DataOperationType
@DataOperationType.setter
def DataOperationType(self, DataOperationType):
self._DataOperationType = DataOperationType
@property
def IsAcid(self):
return self._IsAcid
@IsAcid.setter
def IsAcid(self, IsAcid):
self._IsAcid = IsAcid
@property
def IsDynamicPartitionWrite(self):
return self._IsDynamicPartitionWrite
@IsDynamicPartitionWrite.setter
def IsDynamicPartitionWrite(self, IsDynamicPartitionWrite):
self._IsDynamicPartitionWrite = IsDynamicPartitionWrite
def _deserialize(self, params):
self._DbName = params.get("DbName")
self._TableName = params.get("TableName")
self._Partition = params.get("Partition")
self._LockType = params.get("LockType")
self._LockLevel = params.get("LockLevel")
self._DataOperationType = params.get("DataOperationType")
self._IsAcid = params.get("IsAcid")
self._IsDynamicPartitionWrite = params.get("IsDynamicPartitionWrite")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class LockMetaDataRequest(AbstractModel):
"""LockMetaData请求参数结构体
"""
def __init__(self):
r"""
:param _LockComponentList: 加锁内容
:type LockComponentList: list of LockComponentInfo
:param _DatasourceConnectionName: 数据源名称
:type DatasourceConnectionName: str
:param _TxnId: 事务id
:type TxnId: int
:param _AgentInfo: 客户端信息
:type AgentInfo: str
:param _Hostname: 主机名
:type Hostname: str
"""
self._LockComponentList = None
self._DatasourceConnectionName = None
self._TxnId = None
self._AgentInfo = None
self._Hostname = None
@property
def LockComponentList(self):
return self._LockComponentList
@LockComponentList.setter
def LockComponentList(self, LockComponentList):
self._LockComponentList = LockComponentList
@property
def DatasourceConnectionName(self):
return self._DatasourceConnectionName
@DatasourceConnectionName.setter
def DatasourceConnectionName(self, DatasourceConnectionName):
self._DatasourceConnectionName = DatasourceConnectionName
@property
def TxnId(self):
return self._TxnId
@TxnId.setter
def TxnId(self, TxnId):
self._TxnId = TxnId
@property
def AgentInfo(self):
return self._AgentInfo
@AgentInfo.setter
def AgentInfo(self, AgentInfo):
self._AgentInfo = AgentInfo
@property
def Hostname(self):
return self._Hostname
@Hostname.setter
def Hostname(self, Hostname):
self._Hostname = Hostname
def _deserialize(self, params):
if params.get("LockComponentList") is not None:
self._LockComponentList = []
for item in params.get("LockComponentList"):
obj = LockComponentInfo()
obj._deserialize(item)
self._LockComponentList.append(obj)
self._DatasourceConnectionName = params.get("DatasourceConnectionName")
self._TxnId = params.get("TxnId")
self._AgentInfo = params.get("AgentInfo")
self._Hostname = params.get("Hostname")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class LockMetaDataResponse(AbstractModel):
"""LockMetaData返回参数结构体
"""
def __init__(self):
r"""
:param _LockId: 锁id
:type LockId: int
:param _LockState: 锁状态:ACQUIRED、WAITING、ABORT、NOT_ACQUIRED
:type LockState: str
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._LockId = None
self._LockState = None
self._RequestId = None
@property
def LockId(self):
return self._LockId
@LockId.setter
def LockId(self, LockId):
self._LockId = LockId
@property
def LockState(self):
return self._LockState
@LockState.setter
def LockState(self, LockState):
self._LockState = LockState
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._LockId = params.get("LockId")
self._LockState = params.get("LockState")
self._RequestId = params.get("RequestId")
class ModifyGovernEventRuleRequest(AbstractModel):
"""ModifyGovernEventRule请求参数结构体
"""
class ModifyGovernEventRuleResponse(AbstractModel):
"""ModifyGovernEventRule返回参数结构体
"""
def __init__(self):
r"""
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._RequestId = None
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._RequestId = params.get("RequestId")
class ModifySparkAppBatchRequest(AbstractModel):
"""ModifySparkAppBatch请求参数结构体
"""
def __init__(self):
r"""
:param _SparkAppId: 需要批量修改的Spark作业任务ID列表
:type SparkAppId: list of str
:param _DataEngine: 引擎ID
:type DataEngine: str
:param _AppDriverSize: driver规格:small,medium,large,xlarge;内存型(引擎类型):m.small,m.medium,m.large,m.xlarge
:type AppDriverSize: str
:param _AppExecutorSize: executor规格:small,medium,large,xlarge;内存型(引擎类型):m.small,m.medium,m.large,m.xlarge
:type AppExecutorSize: str
:param _AppExecutorNums: 指定executor数量,最小值为1,最大值小于集群规格
:type AppExecutorNums: int
:param _AppExecutorMaxNumbers: 指定executor max数量(动态配置场景下),最小值为1,最大值小于集群规格(当ExecutorMaxNumbers小于ExecutorNums时,改值设定为ExecutorNums)
:type AppExecutorMaxNumbers: int
:param _IsInherit: 任务资源配置是否继承集群模板,0(默认)不继承,1:继承
:type IsInherit: int
"""
self._SparkAppId = None
self._DataEngine = None
self._AppDriverSize = None
self._AppExecutorSize = None
self._AppExecutorNums = None
self._AppExecutorMaxNumbers = None
self._IsInherit = None
@property
def SparkAppId(self):
return self._SparkAppId
@SparkAppId.setter
def SparkAppId(self, SparkAppId):
self._SparkAppId = SparkAppId
@property
def DataEngine(self):
return self._DataEngine
@DataEngine.setter
def DataEngine(self, DataEngine):
self._DataEngine = DataEngine
@property
def AppDriverSize(self):
return self._AppDriverSize
@AppDriverSize.setter
def AppDriverSize(self, AppDriverSize):
self._AppDriverSize = AppDriverSize
@property
def AppExecutorSize(self):
return self._AppExecutorSize
@AppExecutorSize.setter
def AppExecutorSize(self, AppExecutorSize):
self._AppExecutorSize = AppExecutorSize
@property
def AppExecutorNums(self):
return self._AppExecutorNums
@AppExecutorNums.setter
def AppExecutorNums(self, AppExecutorNums):
self._AppExecutorNums = AppExecutorNums
@property
def AppExecutorMaxNumbers(self):
return self._AppExecutorMaxNumbers
@AppExecutorMaxNumbers.setter
def AppExecutorMaxNumbers(self, AppExecutorMaxNumbers):
self._AppExecutorMaxNumbers = AppExecutorMaxNumbers
@property
def IsInherit(self):
return self._IsInherit
@IsInherit.setter
def IsInherit(self, IsInherit):
self._IsInherit = IsInherit
def _deserialize(self, params):
self._SparkAppId = params.get("SparkAppId")
self._DataEngine = params.get("DataEngine")
self._AppDriverSize = params.get("AppDriverSize")
self._AppExecutorSize = params.get("AppExecutorSize")
self._AppExecutorNums = params.get("AppExecutorNums")
self._AppExecutorMaxNumbers = params.get("AppExecutorMaxNumbers")
self._IsInherit = params.get("IsInherit")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifySparkAppBatchResponse(AbstractModel):
"""ModifySparkAppBatch返回参数结构体
"""
def __init__(self):
r"""
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._RequestId = None
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._RequestId = params.get("RequestId")
class ModifySparkAppRequest(AbstractModel):
"""ModifySparkApp请求参数结构体
"""
def __init__(self):
r"""
:param _AppName: spark作业名
:type AppName: str
:param _AppType: spark作业类型,1代表spark jar作业,2代表spark streaming作业
:type AppType: int
:param _DataEngine: 执行spark作业的数据引擎名称
:type DataEngine: str
:param _AppFile: spark作业程序包文件路径
:type AppFile: str
:param _RoleArn: 数据访问策略,CAM Role arn
:type RoleArn: int
:param _AppDriverSize: 指定的Driver规格,当前支持:small(默认,1cu)、medium(2cu)、large(4cu)、xlarge(8cu)
:type AppDriverSize: str
:param _AppExecutorSize: 指定的Executor规格,当前支持:small(默认,1cu)、medium(2cu)、large(4cu)、xlarge(8cu)
:type AppExecutorSize: str
:param _AppExecutorNums: spark作业executor个数
:type AppExecutorNums: int
:param _SparkAppId: spark作业Id
:type SparkAppId: str
:param _Eni: 该字段已下线,请使用字段Datasource
:type Eni: str
:param _IsLocal: spark作业程序包是否本地上传,cos:存放与cos,lakefs:本地上传(控制台使用,该方式不支持直接接口调用)
:type IsLocal: str
:param _MainClass: spark作业主类
:type MainClass: str
:param _AppConf: spark配置,以换行符分隔
:type AppConf: str
:param _IsLocalJars: spark 作业依赖jar包是否本地上传,cos:存放与cos,lakefs:本地上传(控制台使用,该方式不支持直接接口调用)
:type IsLocalJars: str
:param _AppJars: spark 作业依赖jar包(--jars),以逗号分隔
:type AppJars: str
:param _IsLocalFiles: spark作业依赖文件资源是否本地上传,cos:存放与cos,lakefs:本地上传(控制台使用,该方式不支持直接接口调用)
:type IsLocalFiles: str
:param _AppFiles: spark作业依赖文件资源(--files)(非jar、zip),以逗号分隔
:type AppFiles: str
:param _IsLocalPythonFiles: pyspark:依赖上传方式,cos:存放与cos,lakefs:本地上传(控制台使用,该方式不支持直接接口调用)
:type IsLocalPythonFiles: str
:param _AppPythonFiles: pyspark作业依赖python资源(--py-files),支持py/zip/egg等归档格式,多文件以逗号分隔
:type AppPythonFiles: str
:param _CmdArgs: spark作业程序入参
:type CmdArgs: str
:param _MaxRetries: 最大重试次数,只对spark流任务生效
:type MaxRetries: int
:param _DataSource: 数据源名
:type DataSource: str
:param _IsLocalArchives: spark作业依赖archives资源是否本地上传,cos:存放与cos,lakefs:本地上传(控制台使用,该方式不支持直接接口调用)
:type IsLocalArchives: str
:param _AppArchives: spark作业依赖archives资源(--archives),支持tar.gz/tgz/tar等归档格式,以逗号分隔
:type AppArchives: str
:param _SparkImage: Spark Image 版本号
:type SparkImage: str
:param _SparkImageVersion: Spark Image 版本名称
:type SparkImageVersion: str
:param _AppExecutorMaxNumbers: 指定的Executor数量(最大值),默认为1,当开启动态分配有效,若未开启,则该值等于AppExecutorNums
:type AppExecutorMaxNumbers: int
:param _SessionId: 关联dlc查询脚本
:type SessionId: str
:param _IsInherit: 任务资源配置是否继承集群配置模板:0(默认)不继承、1:继承
:type IsInherit: int
:param _IsSessionStarted: 是否使用session脚本的sql运行任务:false:否,true:是
:type IsSessionStarted: bool
"""
self._AppName = None
self._AppType = None
self._DataEngine = None
self._AppFile = None
self._RoleArn = None
self._AppDriverSize = None
self._AppExecutorSize = None
self._AppExecutorNums = None
self._SparkAppId = None
self._Eni = None
self._IsLocal = None
self._MainClass = None
self._AppConf = None
self._IsLocalJars = None
self._AppJars = None
self._IsLocalFiles = None
self._AppFiles = None
self._IsLocalPythonFiles = None
self._AppPythonFiles = None
self._CmdArgs = None
self._MaxRetries = None
self._DataSource = None
self._IsLocalArchives = None
self._AppArchives = None
self._SparkImage = None
self._SparkImageVersion = None
self._AppExecutorMaxNumbers = None
self._SessionId = None
self._IsInherit = None
self._IsSessionStarted = None
@property
def AppName(self):
return self._AppName
@AppName.setter
def AppName(self, AppName):
self._AppName = AppName
@property
def AppType(self):
return self._AppType
@AppType.setter
def AppType(self, AppType):
self._AppType = AppType
@property
def DataEngine(self):
return self._DataEngine
@DataEngine.setter
def DataEngine(self, DataEngine):
self._DataEngine = DataEngine
@property
def AppFile(self):
return self._AppFile
@AppFile.setter
def AppFile(self, AppFile):
self._AppFile = AppFile
@property
def RoleArn(self):
return self._RoleArn
@RoleArn.setter
def RoleArn(self, RoleArn):
self._RoleArn = RoleArn
@property
def AppDriverSize(self):
return self._AppDriverSize
@AppDriverSize.setter
def AppDriverSize(self, AppDriverSize):
self._AppDriverSize = AppDriverSize
@property
def AppExecutorSize(self):
return self._AppExecutorSize
@AppExecutorSize.setter
def AppExecutorSize(self, AppExecutorSize):
self._AppExecutorSize = AppExecutorSize
@property
def AppExecutorNums(self):
return self._AppExecutorNums
@AppExecutorNums.setter
def AppExecutorNums(self, AppExecutorNums):
self._AppExecutorNums = AppExecutorNums
@property
def SparkAppId(self):
return self._SparkAppId
@SparkAppId.setter
def SparkAppId(self, SparkAppId):
self._SparkAppId = SparkAppId
@property
def Eni(self):
return self._Eni
@Eni.setter
def Eni(self, Eni):
self._Eni = Eni
@property
def IsLocal(self):
return self._IsLocal
@IsLocal.setter
def IsLocal(self, IsLocal):
self._IsLocal = IsLocal
@property
def MainClass(self):
return self._MainClass
@MainClass.setter
def MainClass(self, MainClass):
self._MainClass = MainClass
@property
def AppConf(self):
return self._AppConf
@AppConf.setter
def AppConf(self, AppConf):
self._AppConf = AppConf
@property
def IsLocalJars(self):
return self._IsLocalJars
@IsLocalJars.setter
def IsLocalJars(self, IsLocalJars):
self._IsLocalJars = IsLocalJars
@property
def AppJars(self):
return self._AppJars
@AppJars.setter
def AppJars(self, AppJars):
self._AppJars = AppJars
@property
def IsLocalFiles(self):
return self._IsLocalFiles
@IsLocalFiles.setter
def IsLocalFiles(self, IsLocalFiles):
self._IsLocalFiles = IsLocalFiles
@property
def AppFiles(self):
return self._AppFiles
@AppFiles.setter
def AppFiles(self, AppFiles):
self._AppFiles = AppFiles
@property
def IsLocalPythonFiles(self):
return self._IsLocalPythonFiles
@IsLocalPythonFiles.setter
def IsLocalPythonFiles(self, IsLocalPythonFiles):
self._IsLocalPythonFiles = IsLocalPythonFiles
@property
def AppPythonFiles(self):
return self._AppPythonFiles
@AppPythonFiles.setter
def AppPythonFiles(self, AppPythonFiles):
self._AppPythonFiles = AppPythonFiles
@property
def CmdArgs(self):
return self._CmdArgs
@CmdArgs.setter
def CmdArgs(self, CmdArgs):
self._CmdArgs = CmdArgs
@property
def MaxRetries(self):
return self._MaxRetries
@MaxRetries.setter
def MaxRetries(self, MaxRetries):
self._MaxRetries = MaxRetries
@property
def DataSource(self):
return self._DataSource
@DataSource.setter
def DataSource(self, DataSource):
self._DataSource = DataSource
@property
def IsLocalArchives(self):
return self._IsLocalArchives
@IsLocalArchives.setter
def IsLocalArchives(self, IsLocalArchives):
self._IsLocalArchives = IsLocalArchives
@property
def AppArchives(self):
return self._AppArchives
@AppArchives.setter
def AppArchives(self, AppArchives):
self._AppArchives = AppArchives
@property
def SparkImage(self):
return self._SparkImage
@SparkImage.setter
def SparkImage(self, SparkImage):
self._SparkImage = SparkImage
@property
def SparkImageVersion(self):
return self._SparkImageVersion
@SparkImageVersion.setter
def SparkImageVersion(self, SparkImageVersion):
self._SparkImageVersion = SparkImageVersion
@property
def AppExecutorMaxNumbers(self):
return self._AppExecutorMaxNumbers
@AppExecutorMaxNumbers.setter
def AppExecutorMaxNumbers(self, AppExecutorMaxNumbers):
self._AppExecutorMaxNumbers = AppExecutorMaxNumbers
@property
def SessionId(self):
return self._SessionId
@SessionId.setter
def SessionId(self, SessionId):
self._SessionId = SessionId
@property
def IsInherit(self):
return self._IsInherit
@IsInherit.setter
def IsInherit(self, IsInherit):
self._IsInherit = IsInherit
@property
def IsSessionStarted(self):
return self._IsSessionStarted
@IsSessionStarted.setter
def IsSessionStarted(self, IsSessionStarted):
self._IsSessionStarted = IsSessionStarted
def _deserialize(self, params):
self._AppName = params.get("AppName")
self._AppType = params.get("AppType")
self._DataEngine = params.get("DataEngine")
self._AppFile = params.get("AppFile")
self._RoleArn = params.get("RoleArn")
self._AppDriverSize = params.get("AppDriverSize")
self._AppExecutorSize = params.get("AppExecutorSize")
self._AppExecutorNums = params.get("AppExecutorNums")
self._SparkAppId = params.get("SparkAppId")
self._Eni = params.get("Eni")
self._IsLocal = params.get("IsLocal")
self._MainClass = params.get("MainClass")
self._AppConf = params.get("AppConf")
self._IsLocalJars = params.get("IsLocalJars")
self._AppJars = params.get("AppJars")
self._IsLocalFiles = params.get("IsLocalFiles")
self._AppFiles = params.get("AppFiles")
self._IsLocalPythonFiles = params.get("IsLocalPythonFiles")
self._AppPythonFiles = params.get("AppPythonFiles")
self._CmdArgs = params.get("CmdArgs")
self._MaxRetries = params.get("MaxRetries")
self._DataSource = params.get("DataSource")
self._IsLocalArchives = params.get("IsLocalArchives")
self._AppArchives = params.get("AppArchives")
self._SparkImage = params.get("SparkImage")
self._SparkImageVersion = params.get("SparkImageVersion")
self._AppExecutorMaxNumbers = params.get("AppExecutorMaxNumbers")
self._SessionId = params.get("SessionId")
self._IsInherit = params.get("IsInherit")
self._IsSessionStarted = params.get("IsSessionStarted")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifySparkAppResponse(AbstractModel):
"""ModifySparkApp返回参数结构体
"""
def __init__(self):
r"""
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._RequestId = None
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._RequestId = params.get("RequestId")
class ModifyUserRequest(AbstractModel):
"""ModifyUser请求参数结构体
"""
def __init__(self):
r"""
:param _UserId: 用户Id,和CAM侧Uin匹配
:type UserId: str
:param _UserDescription: 用户描述
:type UserDescription: str
"""
self._UserId = None
self._UserDescription = None
@property
def UserId(self):
return self._UserId
@UserId.setter
def UserId(self, UserId):
self._UserId = UserId
@property
def UserDescription(self):
return self._UserDescription
@UserDescription.setter
def UserDescription(self, UserDescription):
self._UserDescription = UserDescription
def _deserialize(self, params):
self._UserId = params.get("UserId")
self._UserDescription = params.get("UserDescription")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyUserResponse(AbstractModel):
"""ModifyUser返回参数结构体
"""
def __init__(self):
r"""
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._RequestId = None
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._RequestId = params.get("RequestId")
class ModifyWorkGroupRequest(AbstractModel):
"""ModifyWorkGroup请求参数结构体
"""
def __init__(self):
r"""
:param _WorkGroupId: 工作组Id
:type WorkGroupId: int
:param _WorkGroupDescription: 工作组描述,最大字符数限制50
:type WorkGroupDescription: str
"""
self._WorkGroupId = None
self._WorkGroupDescription = None
@property
def WorkGroupId(self):
return self._WorkGroupId
@WorkGroupId.setter
def WorkGroupId(self, WorkGroupId):
self._WorkGroupId = WorkGroupId
@property
def WorkGroupDescription(self):
return self._WorkGroupDescription
@WorkGroupDescription.setter
def WorkGroupDescription(self, WorkGroupDescription):
self._WorkGroupDescription = WorkGroupDescription
def _deserialize(self, params):
self._WorkGroupId = params.get("WorkGroupId")
self._WorkGroupDescription = params.get("WorkGroupDescription")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyWorkGroupResponse(AbstractModel):
"""ModifyWorkGroup返回参数结构体
"""
def __init__(self):
r"""
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._RequestId = None
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._RequestId = params.get("RequestId")
class NetworkConnection(AbstractModel):
"""网络配置
"""
def __init__(self):
r"""
:param _Id: 网络配置id
注意:此字段可能返回 null,表示取不到有效值。
:type Id: int
:param _AssociateId: 网络配置唯一标志符
注意:此字段可能返回 null,表示取不到有效值。
:type AssociateId: str
:param _HouseId: 计算引擎id
注意:此字段可能返回 null,表示取不到有效值。
:type HouseId: str
:param _DatasourceConnectionId: 数据源id(已废弃)
注意:此字段可能返回 null,表示取不到有效值。
:type DatasourceConnectionId: str
:param _State: 网络配置状态(0-初始化,1-正常)
注意:此字段可能返回 null,表示取不到有效值。
:type State: int
:param _CreateTime: 创建时间
注意:此字段可能返回 null,表示取不到有效值。
:type CreateTime: int
:param _UpdateTime: 修改时间
注意:此字段可能返回 null,表示取不到有效值。
:type UpdateTime: int
:param _Appid: 创建用户Appid
注意:此字段可能返回 null,表示取不到有效值。
:type Appid: int
:param _HouseName: 计算引擎名称
注意:此字段可能返回 null,表示取不到有效值。
:type HouseName: str
:param _DatasourceConnectionName: 网络配置名称
注意:此字段可能返回 null,表示取不到有效值。
:type DatasourceConnectionName: str
:param _NetworkConnectionType: 网络配置类型
注意:此字段可能返回 null,表示取不到有效值。
:type NetworkConnectionType: int
:param _Uin: 创建用户uin
注意:此字段可能返回 null,表示取不到有效值。
:type Uin: str
:param _SubAccountUin: 创建用户SubAccountUin
注意:此字段可能返回 null,表示取不到有效值。
:type SubAccountUin: str
:param _NetworkConnectionDesc: 网络配置描述
注意:此字段可能返回 null,表示取不到有效值。
:type NetworkConnectionDesc: str
:param _DatasourceConnectionVpcId: 数据源vpcid
注意:此字段可能返回 null,表示取不到有效值。
:type DatasourceConnectionVpcId: str
:param _DatasourceConnectionSubnetId: 数据源SubnetId
注意:此字段可能返回 null,表示取不到有效值。
:type DatasourceConnectionSubnetId: str
:param _DatasourceConnectionCidrBlock: 数据源SubnetId
注意:此字段可能返回 null,表示取不到有效值。
:type DatasourceConnectionCidrBlock: str
:param _DatasourceConnectionSubnetCidrBlock: 数据源SubnetCidrBlock
注意:此字段可能返回 null,表示取不到有效值。
:type DatasourceConnectionSubnetCidrBlock: str
"""
self._Id = None
self._AssociateId = None
self._HouseId = None
self._DatasourceConnectionId = None
self._State = None
self._CreateTime = None
self._UpdateTime = None
self._Appid = None
self._HouseName = None
self._DatasourceConnectionName = None
self._NetworkConnectionType = None
self._Uin = None
self._SubAccountUin = None
self._NetworkConnectionDesc = None
self._DatasourceConnectionVpcId = None
self._DatasourceConnectionSubnetId = None
self._DatasourceConnectionCidrBlock = None
self._DatasourceConnectionSubnetCidrBlock = None
@property
def Id(self):
return self._Id
@Id.setter
def Id(self, Id):
self._Id = Id
@property
def AssociateId(self):
return self._AssociateId
@AssociateId.setter
def AssociateId(self, AssociateId):
self._AssociateId = AssociateId
@property
def HouseId(self):
return self._HouseId
@HouseId.setter
def HouseId(self, HouseId):
self._HouseId = HouseId
@property
def DatasourceConnectionId(self):
return self._DatasourceConnectionId
@DatasourceConnectionId.setter
def DatasourceConnectionId(self, DatasourceConnectionId):
self._DatasourceConnectionId = DatasourceConnectionId
@property
def State(self):
return self._State
@State.setter
def State(self, State):
self._State = State
@property
def CreateTime(self):
return self._CreateTime
@CreateTime.setter
def CreateTime(self, CreateTime):
self._CreateTime = CreateTime
@property
def UpdateTime(self):
return self._UpdateTime
@UpdateTime.setter
def UpdateTime(self, UpdateTime):
self._UpdateTime = UpdateTime
@property
def Appid(self):
return self._Appid
@Appid.setter
def Appid(self, Appid):
self._Appid = Appid
@property
def HouseName(self):
return self._HouseName
@HouseName.setter
def HouseName(self, HouseName):
self._HouseName = HouseName
@property
def DatasourceConnectionName(self):
return self._DatasourceConnectionName
@DatasourceConnectionName.setter
def DatasourceConnectionName(self, DatasourceConnectionName):
self._DatasourceConnectionName = DatasourceConnectionName
@property
def NetworkConnectionType(self):
return self._NetworkConnectionType
@NetworkConnectionType.setter
def NetworkConnectionType(self, NetworkConnectionType):
self._NetworkConnectionType = NetworkConnectionType
@property
def Uin(self):
return self._Uin
@Uin.setter
def Uin(self, Uin):
self._Uin = Uin
@property
def SubAccountUin(self):
return self._SubAccountUin
@SubAccountUin.setter
def SubAccountUin(self, SubAccountUin):
self._SubAccountUin = SubAccountUin
@property
def NetworkConnectionDesc(self):
return self._NetworkConnectionDesc
@NetworkConnectionDesc.setter
def NetworkConnectionDesc(self, NetworkConnectionDesc):
self._NetworkConnectionDesc = NetworkConnectionDesc
@property
def DatasourceConnectionVpcId(self):
return self._DatasourceConnectionVpcId
@DatasourceConnectionVpcId.setter
def DatasourceConnectionVpcId(self, DatasourceConnectionVpcId):
self._DatasourceConnectionVpcId = DatasourceConnectionVpcId
@property
def DatasourceConnectionSubnetId(self):
return self._DatasourceConnectionSubnetId
@DatasourceConnectionSubnetId.setter
def DatasourceConnectionSubnetId(self, DatasourceConnectionSubnetId):
self._DatasourceConnectionSubnetId = DatasourceConnectionSubnetId
@property
def DatasourceConnectionCidrBlock(self):
return self._DatasourceConnectionCidrBlock
@DatasourceConnectionCidrBlock.setter
def DatasourceConnectionCidrBlock(self, DatasourceConnectionCidrBlock):
self._DatasourceConnectionCidrBlock = DatasourceConnectionCidrBlock
@property
def DatasourceConnectionSubnetCidrBlock(self):
return self._DatasourceConnectionSubnetCidrBlock
@DatasourceConnectionSubnetCidrBlock.setter
def DatasourceConnectionSubnetCidrBlock(self, DatasourceConnectionSubnetCidrBlock):
self._DatasourceConnectionSubnetCidrBlock = DatasourceConnectionSubnetCidrBlock
def _deserialize(self, params):
self._Id = params.get("Id")
self._AssociateId = params.get("AssociateId")
self._HouseId = params.get("HouseId")
self._DatasourceConnectionId = params.get("DatasourceConnectionId")
self._State = params.get("State")
self._CreateTime = params.get("CreateTime")
self._UpdateTime = params.get("UpdateTime")
self._Appid = params.get("Appid")
self._HouseName = params.get("HouseName")
self._DatasourceConnectionName = params.get("DatasourceConnectionName")
self._NetworkConnectionType = params.get("NetworkConnectionType")
self._Uin = params.get("Uin")
self._SubAccountUin = params.get("SubAccountUin")
self._NetworkConnectionDesc = params.get("NetworkConnectionDesc")
self._DatasourceConnectionVpcId = params.get("DatasourceConnectionVpcId")
self._DatasourceConnectionSubnetId = params.get("DatasourceConnectionSubnetId")
self._DatasourceConnectionCidrBlock = params.get("DatasourceConnectionCidrBlock")
self._DatasourceConnectionSubnetCidrBlock = params.get("DatasourceConnectionSubnetCidrBlock")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class NotebookSessionInfo(AbstractModel):
"""Notebook Session详细信息。
"""
def __init__(self):
r"""
:param _Name: Session名称
:type Name: str
:param _Kind: 类型,当前支持:spark、pyspark、sparkr、sql
:type Kind: str
:param _DataEngineName: DLC Spark作业引擎名称
:type DataEngineName: str
:param _Arguments: Session相关配置,当前支持:eni、roleArn以及用户指定的配置
注意:此字段可能返回 null,表示取不到有效值。
:type Arguments: list of KVPair
:param _ProgramDependentFiles: 运行程序地址,当前支持:cosn://和lakefs://两种路径
注意:此字段可能返回 null,表示取不到有效值。
:type ProgramDependentFiles: list of str
:param _ProgramDependentJars: 依赖的jar程序地址,当前支持:cosn://和lakefs://两种路径
注意:此字段可能返回 null,表示取不到有效值。
:type ProgramDependentJars: list of str
:param _ProgramDependentPython: 依赖的python程序地址,当前支持:cosn://和lakefs://两种路径
注意:此字段可能返回 null,表示取不到有效值。
:type ProgramDependentPython: list of str
:param _ProgramArchives: 依赖的pyspark虚拟环境地址,当前支持:cosn://和lakefs://两种路径
注意:此字段可能返回 null,表示取不到有效值。
:type ProgramArchives: list of str
:param _DriverSize: 指定的Driver规格,当前支持:small(默认,1cu)、medium(2cu)、large(4cu)、xlarge(8cu)
注意:此字段可能返回 null,表示取不到有效值。
:type DriverSize: str
:param _ExecutorSize: 指定的Executor规格,当前支持:small(默认,1cu)、medium(2cu)、large(4cu)、xlarge(8cu)
注意:此字段可能返回 null,表示取不到有效值。
:type ExecutorSize: str
:param _ExecutorNumbers: 指定的Executor数量,默认为1
注意:此字段可能返回 null,表示取不到有效值。
:type ExecutorNumbers: int
:param _ProxyUser: 代理用户,默认为root
注意:此字段可能返回 null,表示取不到有效值。
:type ProxyUser: str
:param _TimeoutInSecond: 指定的Session超时时间,单位秒,默认3600秒
注意:此字段可能返回 null,表示取不到有效值。
:type TimeoutInSecond: int
:param _SparkAppId: Spark任务返回的AppId
注意:此字段可能返回 null,表示取不到有效值。
:type SparkAppId: str
:param _SessionId: Session唯一标识
:type SessionId: str
:param _State: Session状态,包含:not_started(未启动)、starting(已启动)、idle(等待输入)、busy(正在运行statement)、shutting_down(停止)、error(异常)、dead(已退出)、killed(被杀死)、success(正常停止)
:type State: str
:param _CreateTime: Session创建时间
:type CreateTime: str
:param _AppInfo: 其它信息
注意:此字段可能返回 null,表示取不到有效值。
:type AppInfo: list of KVPair
:param _SparkUiUrl: Spark ui地址
注意:此字段可能返回 null,表示取不到有效值。
:type SparkUiUrl: str
:param _ExecutorMaxNumbers: 指定的Executor数量(最大值),默认为1,当开启动态分配有效,若未开启,则该值等于ExecutorNumbers
注意:此字段可能返回 null,表示取不到有效值。
:type ExecutorMaxNumbers: int
"""
self._Name = None
self._Kind = None
self._DataEngineName = None
self._Arguments = None
self._ProgramDependentFiles = None
self._ProgramDependentJars = None
self._ProgramDependentPython = None
self._ProgramArchives = None
self._DriverSize = None
self._ExecutorSize = None
self._ExecutorNumbers = None
self._ProxyUser = None
self._TimeoutInSecond = None
self._SparkAppId = None
self._SessionId = None
self._State = None
self._CreateTime = None
self._AppInfo = None
self._SparkUiUrl = None
self._ExecutorMaxNumbers = None
@property
def Name(self):
return self._Name
@Name.setter
def Name(self, Name):
self._Name = Name
@property
def Kind(self):
return self._Kind
@Kind.setter
def Kind(self, Kind):
self._Kind = Kind
@property
def DataEngineName(self):
return self._DataEngineName
@DataEngineName.setter
def DataEngineName(self, DataEngineName):
self._DataEngineName = DataEngineName
@property
def Arguments(self):
return self._Arguments
@Arguments.setter
def Arguments(self, Arguments):
self._Arguments = Arguments
@property
def ProgramDependentFiles(self):
return self._ProgramDependentFiles
@ProgramDependentFiles.setter
def ProgramDependentFiles(self, ProgramDependentFiles):
self._ProgramDependentFiles = ProgramDependentFiles
@property
def ProgramDependentJars(self):
return self._ProgramDependentJars
@ProgramDependentJars.setter
def ProgramDependentJars(self, ProgramDependentJars):
self._ProgramDependentJars = ProgramDependentJars
@property
def ProgramDependentPython(self):
return self._ProgramDependentPython
@ProgramDependentPython.setter
def ProgramDependentPython(self, ProgramDependentPython):
self._ProgramDependentPython = ProgramDependentPython
@property
def ProgramArchives(self):
return self._ProgramArchives
@ProgramArchives.setter
def ProgramArchives(self, ProgramArchives):
self._ProgramArchives = ProgramArchives
@property
def DriverSize(self):
return self._DriverSize
@DriverSize.setter
def DriverSize(self, DriverSize):
self._DriverSize = DriverSize
@property
def ExecutorSize(self):
return self._ExecutorSize
@ExecutorSize.setter
def ExecutorSize(self, ExecutorSize):
self._ExecutorSize = ExecutorSize
@property
def ExecutorNumbers(self):
return self._ExecutorNumbers
@ExecutorNumbers.setter
def ExecutorNumbers(self, ExecutorNumbers):
self._ExecutorNumbers = ExecutorNumbers
@property
def ProxyUser(self):
return self._ProxyUser
@ProxyUser.setter
def ProxyUser(self, ProxyUser):
self._ProxyUser = ProxyUser
@property
def TimeoutInSecond(self):
return self._TimeoutInSecond
@TimeoutInSecond.setter
def TimeoutInSecond(self, TimeoutInSecond):
self._TimeoutInSecond = TimeoutInSecond
@property
def SparkAppId(self):
return self._SparkAppId
@SparkAppId.setter
def SparkAppId(self, SparkAppId):
self._SparkAppId = SparkAppId
@property
def SessionId(self):
return self._SessionId
@SessionId.setter
def SessionId(self, SessionId):
self._SessionId = SessionId
@property
def State(self):
return self._State
@State.setter
def State(self, State):
self._State = State
@property
def CreateTime(self):
return self._CreateTime
@CreateTime.setter
def CreateTime(self, CreateTime):
self._CreateTime = CreateTime
@property
def AppInfo(self):
return self._AppInfo
@AppInfo.setter
def AppInfo(self, AppInfo):
self._AppInfo = AppInfo
@property
def SparkUiUrl(self):
return self._SparkUiUrl
@SparkUiUrl.setter
def SparkUiUrl(self, SparkUiUrl):
self._SparkUiUrl = SparkUiUrl
@property
def ExecutorMaxNumbers(self):
return self._ExecutorMaxNumbers
@ExecutorMaxNumbers.setter
def ExecutorMaxNumbers(self, ExecutorMaxNumbers):
self._ExecutorMaxNumbers = ExecutorMaxNumbers
def _deserialize(self, params):
self._Name = params.get("Name")
self._Kind = params.get("Kind")
self._DataEngineName = params.get("DataEngineName")
if params.get("Arguments") is not None:
self._Arguments = []
for item in params.get("Arguments"):
obj = KVPair()
obj._deserialize(item)
self._Arguments.append(obj)
self._ProgramDependentFiles = params.get("ProgramDependentFiles")
self._ProgramDependentJars = params.get("ProgramDependentJars")
self._ProgramDependentPython = params.get("ProgramDependentPython")
self._ProgramArchives = params.get("ProgramArchives")
self._DriverSize = params.get("DriverSize")
self._ExecutorSize = params.get("ExecutorSize")
self._ExecutorNumbers = params.get("ExecutorNumbers")
self._ProxyUser = params.get("ProxyUser")
self._TimeoutInSecond = params.get("TimeoutInSecond")
self._SparkAppId = params.get("SparkAppId")
self._SessionId = params.get("SessionId")
self._State = params.get("State")
self._CreateTime = params.get("CreateTime")
if params.get("AppInfo") is not None:
self._AppInfo = []
for item in params.get("AppInfo"):
obj = KVPair()
obj._deserialize(item)
self._AppInfo.append(obj)
self._SparkUiUrl = params.get("SparkUiUrl")
self._ExecutorMaxNumbers = params.get("ExecutorMaxNumbers")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class NotebookSessionStatementBatchInformation(AbstractModel):
"""按批提交Statement运行SQL任务。
"""
def __init__(self):
r"""
:param _NotebookSessionStatementBatch: 任务详情列表
注意:此字段可能返回 null,表示取不到有效值。
:type NotebookSessionStatementBatch: list of NotebookSessionStatementInfo
:param _IsAvailable: 当前批任务是否运行完成
注意:此字段可能返回 null,表示取不到有效值。
:type IsAvailable: bool
:param _SessionId: Session唯一标识
注意:此字段可能返回 null,表示取不到有效值。
:type SessionId: str
:param _BatchId: Batch唯一标识
注意:此字段可能返回 null,表示取不到有效值。
:type BatchId: str
"""
self._NotebookSessionStatementBatch = None
self._IsAvailable = None
self._SessionId = None
self._BatchId = None
@property
def NotebookSessionStatementBatch(self):
return self._NotebookSessionStatementBatch
@NotebookSessionStatementBatch.setter
def NotebookSessionStatementBatch(self, NotebookSessionStatementBatch):
self._NotebookSessionStatementBatch = NotebookSessionStatementBatch
@property
def IsAvailable(self):
return self._IsAvailable
@IsAvailable.setter
def IsAvailable(self, IsAvailable):
self._IsAvailable = IsAvailable
@property
def SessionId(self):
return self._SessionId
@SessionId.setter
def SessionId(self, SessionId):
self._SessionId = SessionId
@property
def BatchId(self):
return self._BatchId
@BatchId.setter
def BatchId(self, BatchId):
self._BatchId = BatchId
def _deserialize(self, params):
if params.get("NotebookSessionStatementBatch") is not None:
self._NotebookSessionStatementBatch = []
for item in params.get("NotebookSessionStatementBatch"):
obj = NotebookSessionStatementInfo()
obj._deserialize(item)
self._NotebookSessionStatementBatch.append(obj)
self._IsAvailable = params.get("IsAvailable")
self._SessionId = params.get("SessionId")
self._BatchId = params.get("BatchId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class NotebookSessionStatementInfo(AbstractModel):
"""NotebookSessionStatement详情。
"""
def __init__(self):
r"""
:param _Completed: 完成时间戳
注意:此字段可能返回 null,表示取不到有效值。
:type Completed: int
:param _Started: 开始时间戳
注意:此字段可能返回 null,表示取不到有效值。
:type Started: int
:param _Progress: 完成进度,百分制
注意:此字段可能返回 null,表示取不到有效值。
:type Progress: float
:param _StatementId: Session Statement唯一标识
:type StatementId: str
:param _State: Session Statement状态,包含:waiting(排队中)、running(运行中)、available(正常)、error(异常)、cancelling(取消中)、cancelled(已取消)
:type State: str
:param _OutPut: Statement输出信息
注意:此字段可能返回 null,表示取不到有效值。
:type OutPut: :class:`tencentcloud.dlc.v20210125.models.StatementOutput`
:param _BatchId: 批任务id
注意:此字段可能返回 null,表示取不到有效值。
:type BatchId: str
:param _Code: 运行语句
注意:此字段可能返回 null,表示取不到有效值。
:type Code: str
:param _TaskId: 任务ID
注意:此字段可能返回 null,表示取不到有效值。
:type TaskId: str
"""
self._Completed = None
self._Started = None
self._Progress = None
self._StatementId = None
self._State = None
self._OutPut = None
self._BatchId = None
self._Code = None
self._TaskId = None
@property
def Completed(self):
return self._Completed
@Completed.setter
def Completed(self, Completed):
self._Completed = Completed
@property
def Started(self):
return self._Started
@Started.setter
def Started(self, Started):
self._Started = Started
@property
def Progress(self):
return self._Progress
@Progress.setter
def Progress(self, Progress):
self._Progress = Progress
@property
def StatementId(self):
return self._StatementId
@StatementId.setter
def StatementId(self, StatementId):
self._StatementId = StatementId
@property
def State(self):
return self._State
@State.setter
def State(self, State):
self._State = State
@property
def OutPut(self):
return self._OutPut
@OutPut.setter
def OutPut(self, OutPut):
self._OutPut = OutPut
@property
def BatchId(self):
return self._BatchId
@BatchId.setter
def BatchId(self, BatchId):
self._BatchId = BatchId
@property
def Code(self):
return self._Code
@Code.setter
def Code(self, Code):
self._Code = Code
@property
def TaskId(self):
return self._TaskId
@TaskId.setter
def TaskId(self, TaskId):
self._TaskId = TaskId
def _deserialize(self, params):
self._Completed = params.get("Completed")
self._Started = params.get("Started")
self._Progress = params.get("Progress")
self._StatementId = params.get("StatementId")
self._State = params.get("State")
if params.get("OutPut") is not None:
self._OutPut = StatementOutput()
self._OutPut._deserialize(params.get("OutPut"))
self._BatchId = params.get("BatchId")
self._Code = params.get("Code")
self._TaskId = params.get("TaskId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class NotebookSessions(AbstractModel):
"""notebook session列表信息。
"""
def __init__(self):
r"""
:param _Kind: 类型,当前支持:spark、pyspark、sparkr、sql
:type Kind: str
:param _SessionId: Session唯一标识
:type SessionId: str
:param _ProxyUser: 代理用户,默认为root
注意:此字段可能返回 null,表示取不到有效值。
:type ProxyUser: str
:param _State: Session状态,包含:not_started(未启动)、starting(已启动)、idle(等待输入)、busy(正在运行statement)、shutting_down(停止)、error(异常)、dead(已退出)、killed(被杀死)、success(正常停止)
:type State: str
:param _SparkAppId: Spark任务返回的AppId
注意:此字段可能返回 null,表示取不到有效值。
:type SparkAppId: str
:param _Name: Session名称
:type Name: str
:param _CreateTime: Session创建时间
:type CreateTime: str
:param _DataEngineName: 引擎名称
:type DataEngineName: str
:param _LastRunningTime: 最新的运行时间
注意:此字段可能返回 null,表示取不到有效值。
:type LastRunningTime: str
:param _Creator: 创建者
:type Creator: str
:param _SparkUiUrl: spark ui地址
注意:此字段可能返回 null,表示取不到有效值。
:type SparkUiUrl: str
"""
self._Kind = None
self._SessionId = None
self._ProxyUser = None
self._State = None
self._SparkAppId = None
self._Name = None
self._CreateTime = None
self._DataEngineName = None
self._LastRunningTime = None
self._Creator = None
self._SparkUiUrl = None
@property
def Kind(self):
return self._Kind
@Kind.setter
def Kind(self, Kind):
self._Kind = Kind
@property
def SessionId(self):
return self._SessionId
@SessionId.setter
def SessionId(self, SessionId):
self._SessionId = SessionId
@property
def ProxyUser(self):
return self._ProxyUser
@ProxyUser.setter
def ProxyUser(self, ProxyUser):
self._ProxyUser = ProxyUser
@property
def State(self):
return self._State
@State.setter
def State(self, State):
self._State = State
@property
def SparkAppId(self):
return self._SparkAppId
@SparkAppId.setter
def SparkAppId(self, SparkAppId):
self._SparkAppId = SparkAppId
@property
def Name(self):
return self._Name
@Name.setter
def Name(self, Name):
self._Name = Name
@property
def CreateTime(self):
return self._CreateTime
@CreateTime.setter
def CreateTime(self, CreateTime):
self._CreateTime = CreateTime
@property
def DataEngineName(self):
return self._DataEngineName
@DataEngineName.setter
def DataEngineName(self, DataEngineName):
self._DataEngineName = DataEngineName
@property
def LastRunningTime(self):
return self._LastRunningTime
@LastRunningTime.setter
def LastRunningTime(self, LastRunningTime):
self._LastRunningTime = LastRunningTime
@property
def Creator(self):
return self._Creator
@Creator.setter
def Creator(self, Creator):
self._Creator = Creator
@property
def SparkUiUrl(self):
return self._SparkUiUrl
@SparkUiUrl.setter
def SparkUiUrl(self, SparkUiUrl):
self._SparkUiUrl = SparkUiUrl
def _deserialize(self, params):
self._Kind = params.get("Kind")
self._SessionId = params.get("SessionId")
self._ProxyUser = params.get("ProxyUser")
self._State = params.get("State")
self._SparkAppId = params.get("SparkAppId")
self._Name = params.get("Name")
self._CreateTime = params.get("CreateTime")
self._DataEngineName = params.get("DataEngineName")
self._LastRunningTime = params.get("LastRunningTime")
self._Creator = params.get("Creator")
self._SparkUiUrl = params.get("SparkUiUrl")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class Other(AbstractModel):
"""数据格式其它类型。
"""
def __init__(self):
r"""
:param _Format: 枚举类型,默认值为Json,可选值为[Json, Parquet, ORC, AVRD]之一。
:type Format: str
"""
self._Format = None
@property
def Format(self):
return self._Format
@Format.setter
def Format(self, Format):
self._Format = Format
def _deserialize(self, params):
self._Format = params.get("Format")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class Partition(AbstractModel):
"""数据表分块信息。
"""
def __init__(self):
r"""
:param _Name: 分区列名。
:type Name: str
:param _Type: 分区类型。
:type Type: str
:param _Comment: 对分区的描述。
:type Comment: str
:param _Transform: 隐式分区转换策略
注意:此字段可能返回 null,表示取不到有效值。
:type Transform: str
:param _TransformArgs: 转换策略参数
注意:此字段可能返回 null,表示取不到有效值。
:type TransformArgs: list of str
:param _CreateTime: 创建时间
注意:此字段可能返回 null,表示取不到有效值。
:type CreateTime: int
"""
self._Name = None
self._Type = None
self._Comment = None
self._Transform = None
self._TransformArgs = None
self._CreateTime = None
@property
def Name(self):
return self._Name
@Name.setter
def Name(self, Name):
self._Name = Name
@property
def Type(self):
return self._Type
@Type.setter
def Type(self, Type):
self._Type = Type
@property
def Comment(self):
return self._Comment
@Comment.setter
def Comment(self, Comment):
self._Comment = Comment
@property
def Transform(self):
return self._Transform
@Transform.setter
def Transform(self, Transform):
self._Transform = Transform
@property
def TransformArgs(self):
return self._TransformArgs
@TransformArgs.setter
def TransformArgs(self, TransformArgs):
self._TransformArgs = TransformArgs
@property
def CreateTime(self):
return self._CreateTime
@CreateTime.setter
def CreateTime(self, CreateTime):
self._CreateTime = CreateTime
def _deserialize(self, params):
self._Name = params.get("Name")
self._Type = params.get("Type")
self._Comment = params.get("Comment")
self._Transform = params.get("Transform")
self._TransformArgs = params.get("TransformArgs")
self._CreateTime = params.get("CreateTime")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class Policy(AbstractModel):
"""权限对象
"""
def __init__(self):
r"""
:param _Database: 需要授权的数据库名,填*代表当前Catalog下所有数据库。当授权类型为管理员级别时,只允许填“*”,当授权类型为数据连接级别时只允许填空,其他类型下可以任意指定数据库。
:type Database: str
:param _Catalog: 需要授权的数据源名称,管理员级别下只支持填*(代表该级别全部资源);数据源级别和数据库级别鉴权的情况下,只支持填COSDataCatalog或者*;在数据表级别鉴权下可以填写用户自定义数据源。不填情况下默认为DataLakeCatalog。注意:如果是对用户自定义数据源进行鉴权,DLC能够管理的权限是用户接入数据源的时候提供的账户的子集。
:type Catalog: str
:param _Table: 需要授权的表名,填*代表当前Database下所有表。当授权类型为管理员级别时,只允许填“*”,当授权类型为数据连接级别、数据库级别时只允许填空,其他类型下可以任意指定数据表。
:type Table: str
:param _Operation: 授权的权限操作,对于不同级别的鉴权提供不同操作。管理员权限:ALL,不填默认为ALL;数据连接级鉴权:CREATE;数据库级别鉴权:ALL、CREATE、ALTER、DROP;数据表权限:ALL、SELECT、INSERT、ALTER、DELETE、DROP、UPDATE。注意:在数据表权限下,指定的数据源不为COSDataCatalog的时候,只支持SELECT操作。
:type Operation: str
:param _PolicyType: 授权类型,现在支持八种授权类型:ADMIN:管理员级别鉴权 DATASOURCE:数据连接级别鉴权 DATABASE:数据库级别鉴权 TABLE:表级别鉴权 VIEW:视图级别鉴权 FUNCTION:函数级别鉴权 COLUMN:列级别鉴权 ENGINE:数据引擎鉴权。不填默认为管理员级别鉴权。
:type PolicyType: str
:param _Function: 需要授权的函数名,填*代表当前Catalog下所有函数。当授权类型为管理员级别时,只允许填“*”,当授权类型为数据连接级别时只允许填空,其他类型下可以任意指定函数。
注意:此字段可能返回 null,表示取不到有效值。
:type Function: str
:param _View: 需要授权的视图,填*代表当前Database下所有视图。当授权类型为管理员级别时,只允许填“*”,当授权类型为数据连接级别、数据库级别时只允许填空,其他类型下可以任意指定视图。
注意:此字段可能返回 null,表示取不到有效值。
:type View: str
:param _Column: 需要授权的列,填*代表当前所有列。当授权类型为管理员级别时,只允许填“*”
注意:此字段可能返回 null,表示取不到有效值。
:type Column: str
:param _DataEngine: 需要授权的数据引擎,填*代表当前所有引擎。当授权类型为管理员级别时,只允许填“*”
注意:此字段可能返回 null,表示取不到有效值。
:type DataEngine: str
:param _ReAuth: 用户是否可以进行二次授权。当为true的时候,被授权的用户可以将本次获取的权限再次授权给其他子用户。默认为false
注意:此字段可能返回 null,表示取不到有效值。
:type ReAuth: bool
:param _Source: 权限来源,入参不填。USER:权限来自用户本身;WORKGROUP:权限来自绑定的工作组
注意:此字段可能返回 null,表示取不到有效值。
:type Source: str
:param _Mode: 授权模式,入参不填。COMMON:普通模式;SENIOR:高级模式。
注意:此字段可能返回 null,表示取不到有效值。
:type Mode: str
:param _Operator: 操作者,入参不填。
注意:此字段可能返回 null,表示取不到有效值。
:type Operator: str
:param _CreateTime: 权限创建的时间,入参不填
注意:此字段可能返回 null,表示取不到有效值。
:type CreateTime: str
:param _SourceId: 权限所属工作组的ID,只有当该权限的来源为工作组时才会有值。即仅当Source字段的值为WORKGROUP时该字段才有值。
注意:此字段可能返回 null,表示取不到有效值。
:type SourceId: int
:param _SourceName: 权限所属工作组的名称,只有当该权限的来源为工作组时才会有值。即仅当Source字段的值为WORKGROUP时该字段才有值。
注意:此字段可能返回 null,表示取不到有效值。
:type SourceName: str
:param _Id: 策略ID
注意:此字段可能返回 null,表示取不到有效值。
:type Id: int
"""
self._Database = None
self._Catalog = None
self._Table = None
self._Operation = None
self._PolicyType = None
self._Function = None
self._View = None
self._Column = None
self._DataEngine = None
self._ReAuth = None
self._Source = None
self._Mode = None
self._Operator = None
self._CreateTime = None
self._SourceId = None
self._SourceName = None
self._Id = None
@property
def Database(self):
return self._Database
@Database.setter
def Database(self, Database):
self._Database = Database
@property
def Catalog(self):
return self._Catalog
@Catalog.setter
def Catalog(self, Catalog):
self._Catalog = Catalog
@property
def Table(self):
return self._Table
@Table.setter
def Table(self, Table):
self._Table = Table
@property
def Operation(self):
return self._Operation
@Operation.setter
def Operation(self, Operation):
self._Operation = Operation
@property
def PolicyType(self):
return self._PolicyType
@PolicyType.setter
def PolicyType(self, PolicyType):
self._PolicyType = PolicyType
@property
def Function(self):
return self._Function
@Function.setter
def Function(self, Function):
self._Function = Function
@property
def View(self):
return self._View
@View.setter
def View(self, View):
self._View = View
@property
def Column(self):
return self._Column
@Column.setter
def Column(self, Column):
self._Column = Column
@property
def DataEngine(self):
return self._DataEngine
@DataEngine.setter
def DataEngine(self, DataEngine):
self._DataEngine = DataEngine
@property
def ReAuth(self):
return self._ReAuth
@ReAuth.setter
def ReAuth(self, ReAuth):
self._ReAuth = ReAuth
@property
def Source(self):
return self._Source
@Source.setter
def Source(self, Source):
self._Source = Source
@property
def Mode(self):
return self._Mode
@Mode.setter
def Mode(self, Mode):
self._Mode = Mode
@property
def Operator(self):
return self._Operator
@Operator.setter
def Operator(self, Operator):
self._Operator = Operator
@property
def CreateTime(self):
return self._CreateTime
@CreateTime.setter
def CreateTime(self, CreateTime):
self._CreateTime = CreateTime
@property
def SourceId(self):
return self._SourceId
@SourceId.setter
def SourceId(self, SourceId):
self._SourceId = SourceId
@property
def SourceName(self):
return self._SourceName
@SourceName.setter
def SourceName(self, SourceName):
self._SourceName = SourceName
@property
def Id(self):
return self._Id
@Id.setter
def Id(self, Id):
self._Id = Id
def _deserialize(self, params):
self._Database = params.get("Database")
self._Catalog = params.get("Catalog")
self._Table = params.get("Table")
self._Operation = params.get("Operation")
self._PolicyType = params.get("PolicyType")
self._Function = params.get("Function")
self._View = params.get("View")
self._Column = params.get("Column")
self._DataEngine = params.get("DataEngine")
self._ReAuth = params.get("ReAuth")
self._Source = params.get("Source")
self._Mode = params.get("Mode")
self._Operator = params.get("Operator")
self._CreateTime = params.get("CreateTime")
self._SourceId = params.get("SourceId")
self._SourceName = params.get("SourceName")
self._Id = params.get("Id")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class PrestoMonitorMetrics(AbstractModel):
"""Presto监控指标
"""
def __init__(self):
r"""
:param _LocalCacheHitRate: Alluxio本地缓存命中率
注意:此字段可能返回 null,表示取不到有效值。
:type LocalCacheHitRate: float
:param _FragmentCacheHitRate: Fragment缓存命中率
注意:此字段可能返回 null,表示取不到有效值。
:type FragmentCacheHitRate: float
"""
self._LocalCacheHitRate = None
self._FragmentCacheHitRate = None
@property
def LocalCacheHitRate(self):
return self._LocalCacheHitRate
@LocalCacheHitRate.setter
def LocalCacheHitRate(self, LocalCacheHitRate):
self._LocalCacheHitRate = LocalCacheHitRate
@property
def FragmentCacheHitRate(self):
return self._FragmentCacheHitRate
@FragmentCacheHitRate.setter
def FragmentCacheHitRate(self, FragmentCacheHitRate):
self._FragmentCacheHitRate = FragmentCacheHitRate
def _deserialize(self, params):
self._LocalCacheHitRate = params.get("LocalCacheHitRate")
self._FragmentCacheHitRate = params.get("FragmentCacheHitRate")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class Property(AbstractModel):
"""数据库和数据表属性信息
"""
def __init__(self):
r"""
:param _Key: 属性key名称。
:type Key: str
:param _Value: 属性key对应的value。
:type Value: str
"""
self._Key = None
self._Value = None
@property
def Key(self):
return self._Key
@Key.setter
def Key(self, Key):
self._Key = Key
@property
def Value(self):
return self._Value
@Value.setter
def Value(self, Value):
self._Value = Value
def _deserialize(self, params):
self._Key = params.get("Key")
self._Value = params.get("Value")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ReportHeartbeatMetaDataRequest(AbstractModel):
"""ReportHeartbeatMetaData请求参数结构体
"""
def __init__(self):
r"""
:param _DatasourceConnectionName: 数据源名称
:type DatasourceConnectionName: str
:param _LockId: 锁ID
:type LockId: int
:param _TxnId: 事务ID
:type TxnId: int
"""
self._DatasourceConnectionName = None
self._LockId = None
self._TxnId = None
@property
def DatasourceConnectionName(self):
return self._DatasourceConnectionName
@DatasourceConnectionName.setter
def DatasourceConnectionName(self, DatasourceConnectionName):
self._DatasourceConnectionName = DatasourceConnectionName
@property
def LockId(self):
return self._LockId
@LockId.setter
def LockId(self, LockId):
self._LockId = LockId
@property
def TxnId(self):
return self._TxnId
@TxnId.setter
def TxnId(self, TxnId):
self._TxnId = TxnId
def _deserialize(self, params):
self._DatasourceConnectionName = params.get("DatasourceConnectionName")
self._LockId = params.get("LockId")
self._TxnId = params.get("TxnId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ReportHeartbeatMetaDataResponse(AbstractModel):
"""ReportHeartbeatMetaData返回参数结构体
"""
def __init__(self):
r"""
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._RequestId = None
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._RequestId = params.get("RequestId")
class SQLTask(AbstractModel):
"""SQL查询任务
"""
def __init__(self):
r"""
:param _SQL: base64加密后的SQL语句
:type SQL: str
:param _Config: 任务的配置信息
:type Config: list of KVPair
"""
self._SQL = None
self._Config = None
@property
def SQL(self):
return self._SQL
@SQL.setter
def SQL(self, SQL):
self._SQL = SQL
@property
def Config(self):
return self._Config
@Config.setter
def Config(self, Config):
self._Config = Config
def _deserialize(self, params):
self._SQL = params.get("SQL")
if params.get("Config") is not None:
self._Config = []
for item in params.get("Config"):
obj = KVPair()
obj._deserialize(item)
self._Config.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class Script(AbstractModel):
"""script实例。
"""
def __init__(self):
r"""
:param _ScriptId: 脚本Id,长度36字节。
注意:此字段可能返回 null,表示取不到有效值。
:type ScriptId: str
:param _ScriptName: 脚本名称,长度0-25。
注意:此字段可能返回 null,表示取不到有效值。
:type ScriptName: str
:param _ScriptDesc: 脚本描述,长度0-50。
注意:此字段可能返回 null,表示取不到有效值。
:type ScriptDesc: str
:param _DatabaseName: 默认关联数据库。
注意:此字段可能返回 null,表示取不到有效值。
:type DatabaseName: str
:param _SQLStatement: SQL描述,长度0-10000。
注意:此字段可能返回 null,表示取不到有效值。
:type SQLStatement: str
:param _UpdateTime: 更新时间戳, 单位:ms。
注意:此字段可能返回 null,表示取不到有效值。
:type UpdateTime: int
"""
self._ScriptId = None
self._ScriptName = None
self._ScriptDesc = None
self._DatabaseName = None
self._SQLStatement = None
self._UpdateTime = None
@property
def ScriptId(self):
return self._ScriptId
@ScriptId.setter
def ScriptId(self, ScriptId):
self._ScriptId = ScriptId
@property
def ScriptName(self):
return self._ScriptName
@ScriptName.setter
def ScriptName(self, ScriptName):
self._ScriptName = ScriptName
@property
def ScriptDesc(self):
return self._ScriptDesc
@ScriptDesc.setter
def ScriptDesc(self, ScriptDesc):
self._ScriptDesc = ScriptDesc
@property
def DatabaseName(self):
return self._DatabaseName
@DatabaseName.setter
def DatabaseName(self, DatabaseName):
self._DatabaseName = DatabaseName
@property
def SQLStatement(self):
return self._SQLStatement
@SQLStatement.setter
def SQLStatement(self, SQLStatement):
self._SQLStatement = SQLStatement
@property
def UpdateTime(self):
return self._UpdateTime
@UpdateTime.setter
def UpdateTime(self, UpdateTime):
self._UpdateTime = UpdateTime
def _deserialize(self, params):
self._ScriptId = params.get("ScriptId")
self._ScriptName = params.get("ScriptName")
self._ScriptDesc = params.get("ScriptDesc")
self._DatabaseName = params.get("DatabaseName")
self._SQLStatement = params.get("SQLStatement")
self._UpdateTime = params.get("UpdateTime")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class SessionResourceTemplate(AbstractModel):
"""Spark批作业集群Session资源配置模板;
"""
def __init__(self):
r"""
:param _DriverSize: driver规格:small,medium,large,xlarge;内存型(引擎类型):m.small,m.medium,m.large,m.xlarge
注意:此字段可能返回 null,表示取不到有效值。
:type DriverSize: str
:param _ExecutorSize: executor规格:small,medium,large,xlarge;内存型(引擎类型):m.small,m.medium,m.large,m.xlarge
注意:此字段可能返回 null,表示取不到有效值。
:type ExecutorSize: str
:param _ExecutorNums: 指定executor数量,最小值为1,最大值小于集群规格
注意:此字段可能返回 null,表示取不到有效值。
:type ExecutorNums: int
:param _ExecutorMaxNumbers: 指定executor max数量(动态配置场景下),最小值为1,最大值小于集群规格(当ExecutorMaxNumbers小于ExecutorNums时,改值设定为ExecutorNums)
注意:此字段可能返回 null,表示取不到有效值。
:type ExecutorMaxNumbers: int
"""
self._DriverSize = None
self._ExecutorSize = None
self._ExecutorNums = None
self._ExecutorMaxNumbers = None
@property
def DriverSize(self):
return self._DriverSize
@DriverSize.setter
def DriverSize(self, DriverSize):
self._DriverSize = DriverSize
@property
def ExecutorSize(self):
return self._ExecutorSize
@ExecutorSize.setter
def ExecutorSize(self, ExecutorSize):
self._ExecutorSize = ExecutorSize
@property
def ExecutorNums(self):
return self._ExecutorNums
@ExecutorNums.setter
def ExecutorNums(self, ExecutorNums):
self._ExecutorNums = ExecutorNums
@property
def ExecutorMaxNumbers(self):
return self._ExecutorMaxNumbers
@ExecutorMaxNumbers.setter
def ExecutorMaxNumbers(self, ExecutorMaxNumbers):
self._ExecutorMaxNumbers = ExecutorMaxNumbers
def _deserialize(self, params):
self._DriverSize = params.get("DriverSize")
self._ExecutorSize = params.get("ExecutorSize")
self._ExecutorNums = params.get("ExecutorNums")
self._ExecutorMaxNumbers = params.get("ExecutorMaxNumbers")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class SparkJobInfo(AbstractModel):
"""spark作业详情。
"""
def __init__(self):
r"""
:param _JobId: spark作业ID
:type JobId: str
:param _JobName: spark作业名
:type JobName: str
:param _JobType: spark作业类型,可去1或者2,1表示batch作业, 2表示streaming作业
:type JobType: int
:param _DataEngine: 引擎名
:type DataEngine: str
:param _Eni: 该字段已下线,请使用字段Datasource
:type Eni: str
:param _IsLocal: 程序包是否本地上传,cos或者lakefs
:type IsLocal: str
:param _JobFile: 程序包路径
:type JobFile: str
:param _RoleArn: 角色ID
:type RoleArn: int
:param _MainClass: spark作业运行主类
:type MainClass: str
:param _CmdArgs: 命令行参数,spark作业命令行参数,空格分隔
:type CmdArgs: str
:param _JobConf: spark原生配置,换行符分隔
:type JobConf: str
:param _IsLocalJars: 依赖jars是否本地上传,cos或者lakefs
:type IsLocalJars: str
:param _JobJars: spark作业依赖jars,逗号分隔
:type JobJars: str
:param _IsLocalFiles: 依赖文件是否本地上传,cos或者lakefs
:type IsLocalFiles: str
:param _JobFiles: spark作业依赖文件,逗号分隔
:type JobFiles: str
:param _JobDriverSize: spark作业driver资源大小
:type JobDriverSize: str
:param _JobExecutorSize: spark作业executor资源大小
:type JobExecutorSize: str
:param _JobExecutorNums: spark作业executor个数
:type JobExecutorNums: int
:param _JobMaxAttempts: spark流任务最大重试次数
:type JobMaxAttempts: int
:param _JobCreator: spark作业创建者
:type JobCreator: str
:param _JobCreateTime: spark作业创建时间
:type JobCreateTime: int
:param _JobUpdateTime: spark作业更新时间
:type JobUpdateTime: int
:param _CurrentTaskId: spark作业最近任务ID
:type CurrentTaskId: str
:param _JobStatus: spark作业最近运行状态
:type JobStatus: int
:param _StreamingStat: spark流作业统计
注意:此字段可能返回 null,表示取不到有效值。
:type StreamingStat: :class:`tencentcloud.dlc.v20210125.models.StreamingStatistics`
:param _DataSource: 数据源名
注意:此字段可能返回 null,表示取不到有效值。
:type DataSource: str
:param _IsLocalPythonFiles: pyspark:依赖上传方式,1、cos;2、lakefs(控制台使用,该方式不支持直接接口调用)
注意:此字段可能返回 null,表示取不到有效值。
:type IsLocalPythonFiles: str
:param _AppPythonFiles: 注:该返回值已废弃
注意:此字段可能返回 null,表示取不到有效值。
:type AppPythonFiles: str
:param _IsLocalArchives: archives:依赖上传方式,1、cos;2、lakefs(控制台使用,该方式不支持直接接口调用)
注意:此字段可能返回 null,表示取不到有效值。
:type IsLocalArchives: str
:param _JobArchives: archives:依赖资源
注意:此字段可能返回 null,表示取不到有效值。
:type JobArchives: str
:param _SparkImage: Spark Image 版本
注意:此字段可能返回 null,表示取不到有效值。
:type SparkImage: str
:param _JobPythonFiles: pyspark:python依赖, 除py文件外,还支持zip/egg等归档格式,多文件以逗号分隔
注意:此字段可能返回 null,表示取不到有效值。
:type JobPythonFiles: str
:param _TaskNum: 当前job正在运行或准备运行的任务个数
注意:此字段可能返回 null,表示取不到有效值。
:type TaskNum: int
:param _DataEngineStatus: 引擎状态:-100(默认:未知状态),-2~11:引擎正常状态;
注意:此字段可能返回 null,表示取不到有效值。
:type DataEngineStatus: int
:param _JobExecutorMaxNumbers: 指定的Executor数量(最大值),默认为1,当开启动态分配有效,若未开启,则该值等于JobExecutorNums
注意:此字段可能返回 null,表示取不到有效值。
:type JobExecutorMaxNumbers: int
:param _SparkImageVersion: 镜像版本
注意:此字段可能返回 null,表示取不到有效值。
:type SparkImageVersion: str
:param _SessionId: 查询脚本关联id
注意:此字段可能返回 null,表示取不到有效值。
:type SessionId: str
:param _DataEngineClusterType: spark_emr_livy
注意:此字段可能返回 null,表示取不到有效值。
:type DataEngineClusterType: str
:param _DataEngineImageVersion: Spark 3.2-EMR
注意:此字段可能返回 null,表示取不到有效值。
:type DataEngineImageVersion: str
:param _IsInherit: 任务资源配置是否继承集群模板,0(默认)不继承,1:继承
注意:此字段可能返回 null,表示取不到有效值。
:type IsInherit: int
:param _IsSessionStarted: 是否使用session脚本的sql运行任务:false:否,true:是
注意:此字段可能返回 null,表示取不到有效值。
:type IsSessionStarted: bool
"""
self._JobId = None
self._JobName = None
self._JobType = None
self._DataEngine = None
self._Eni = None
self._IsLocal = None
self._JobFile = None
self._RoleArn = None
self._MainClass = None
self._CmdArgs = None
self._JobConf = None
self._IsLocalJars = None
self._JobJars = None
self._IsLocalFiles = None
self._JobFiles = None
self._JobDriverSize = None
self._JobExecutorSize = None
self._JobExecutorNums = None
self._JobMaxAttempts = None
self._JobCreator = None
self._JobCreateTime = None
self._JobUpdateTime = None
self._CurrentTaskId = None
self._JobStatus = None
self._StreamingStat = None
self._DataSource = None
self._IsLocalPythonFiles = None
self._AppPythonFiles = None
self._IsLocalArchives = None
self._JobArchives = None
self._SparkImage = None
self._JobPythonFiles = None
self._TaskNum = None
self._DataEngineStatus = None
self._JobExecutorMaxNumbers = None
self._SparkImageVersion = None
self._SessionId = None
self._DataEngineClusterType = None
self._DataEngineImageVersion = None
self._IsInherit = None
self._IsSessionStarted = None
@property
def JobId(self):
return self._JobId
@JobId.setter
def JobId(self, JobId):
self._JobId = JobId
@property
def JobName(self):
return self._JobName
@JobName.setter
def JobName(self, JobName):
self._JobName = JobName
@property
def JobType(self):
return self._JobType
@JobType.setter
def JobType(self, JobType):
self._JobType = JobType
@property
def DataEngine(self):
return self._DataEngine
@DataEngine.setter
def DataEngine(self, DataEngine):
self._DataEngine = DataEngine
@property
def Eni(self):
return self._Eni
@Eni.setter
def Eni(self, Eni):
self._Eni = Eni
@property
def IsLocal(self):
return self._IsLocal
@IsLocal.setter
def IsLocal(self, IsLocal):
self._IsLocal = IsLocal
@property
def JobFile(self):
return self._JobFile
@JobFile.setter
def JobFile(self, JobFile):
self._JobFile = JobFile
@property
def RoleArn(self):
return self._RoleArn
@RoleArn.setter
def RoleArn(self, RoleArn):
self._RoleArn = RoleArn
@property
def MainClass(self):
return self._MainClass
@MainClass.setter
def MainClass(self, MainClass):
self._MainClass = MainClass
@property
def CmdArgs(self):
return self._CmdArgs
@CmdArgs.setter
def CmdArgs(self, CmdArgs):
self._CmdArgs = CmdArgs
@property
def JobConf(self):
return self._JobConf
@JobConf.setter
def JobConf(self, JobConf):
self._JobConf = JobConf
@property
def IsLocalJars(self):
return self._IsLocalJars
@IsLocalJars.setter
def IsLocalJars(self, IsLocalJars):
self._IsLocalJars = IsLocalJars
@property
def JobJars(self):
return self._JobJars
@JobJars.setter
def JobJars(self, JobJars):
self._JobJars = JobJars
@property
def IsLocalFiles(self):
return self._IsLocalFiles
@IsLocalFiles.setter
def IsLocalFiles(self, IsLocalFiles):
self._IsLocalFiles = IsLocalFiles
@property
def JobFiles(self):
return self._JobFiles
@JobFiles.setter
def JobFiles(self, JobFiles):
self._JobFiles = JobFiles
@property
def JobDriverSize(self):
return self._JobDriverSize
@JobDriverSize.setter
def JobDriverSize(self, JobDriverSize):
self._JobDriverSize = JobDriverSize
@property
def JobExecutorSize(self):
return self._JobExecutorSize
@JobExecutorSize.setter
def JobExecutorSize(self, JobExecutorSize):
self._JobExecutorSize = JobExecutorSize
@property
def JobExecutorNums(self):
return self._JobExecutorNums
@JobExecutorNums.setter
def JobExecutorNums(self, JobExecutorNums):
self._JobExecutorNums = JobExecutorNums
@property
def JobMaxAttempts(self):
return self._JobMaxAttempts
@JobMaxAttempts.setter
def JobMaxAttempts(self, JobMaxAttempts):
self._JobMaxAttempts = JobMaxAttempts
@property
def JobCreator(self):
return self._JobCreator
@JobCreator.setter
def JobCreator(self, JobCreator):
self._JobCreator = JobCreator
@property
def JobCreateTime(self):
return self._JobCreateTime
@JobCreateTime.setter
def JobCreateTime(self, JobCreateTime):
self._JobCreateTime = JobCreateTime
@property
def JobUpdateTime(self):
return self._JobUpdateTime
@JobUpdateTime.setter
def JobUpdateTime(self, JobUpdateTime):
self._JobUpdateTime = JobUpdateTime
@property
def CurrentTaskId(self):
return self._CurrentTaskId
@CurrentTaskId.setter
def CurrentTaskId(self, CurrentTaskId):
self._CurrentTaskId = CurrentTaskId
@property
def JobStatus(self):
return self._JobStatus
@JobStatus.setter
def JobStatus(self, JobStatus):
self._JobStatus = JobStatus
@property
def StreamingStat(self):
return self._StreamingStat
@StreamingStat.setter
def StreamingStat(self, StreamingStat):
self._StreamingStat = StreamingStat
@property
def DataSource(self):
return self._DataSource
@DataSource.setter
def DataSource(self, DataSource):
self._DataSource = DataSource
@property
def IsLocalPythonFiles(self):
return self._IsLocalPythonFiles
@IsLocalPythonFiles.setter
def IsLocalPythonFiles(self, IsLocalPythonFiles):
self._IsLocalPythonFiles = IsLocalPythonFiles
@property
def AppPythonFiles(self):
return self._AppPythonFiles
@AppPythonFiles.setter
def AppPythonFiles(self, AppPythonFiles):
self._AppPythonFiles = AppPythonFiles
@property
def IsLocalArchives(self):
return self._IsLocalArchives
@IsLocalArchives.setter
def IsLocalArchives(self, IsLocalArchives):
self._IsLocalArchives = IsLocalArchives
@property
def JobArchives(self):
return self._JobArchives
@JobArchives.setter
def JobArchives(self, JobArchives):
self._JobArchives = JobArchives
@property
def SparkImage(self):
return self._SparkImage
@SparkImage.setter
def SparkImage(self, SparkImage):
self._SparkImage = SparkImage
@property
def JobPythonFiles(self):
return self._JobPythonFiles
@JobPythonFiles.setter
def JobPythonFiles(self, JobPythonFiles):
self._JobPythonFiles = JobPythonFiles
@property
def TaskNum(self):
return self._TaskNum
@TaskNum.setter
def TaskNum(self, TaskNum):
self._TaskNum = TaskNum
@property
def DataEngineStatus(self):
return self._DataEngineStatus
@DataEngineStatus.setter
def DataEngineStatus(self, DataEngineStatus):
self._DataEngineStatus = DataEngineStatus
@property
def JobExecutorMaxNumbers(self):
return self._JobExecutorMaxNumbers
@JobExecutorMaxNumbers.setter
def JobExecutorMaxNumbers(self, JobExecutorMaxNumbers):
self._JobExecutorMaxNumbers = JobExecutorMaxNumbers
@property
def SparkImageVersion(self):
return self._SparkImageVersion
@SparkImageVersion.setter
def SparkImageVersion(self, SparkImageVersion):
self._SparkImageVersion = SparkImageVersion
@property
def SessionId(self):
return self._SessionId
@SessionId.setter
def SessionId(self, SessionId):
self._SessionId = SessionId
@property
def DataEngineClusterType(self):
return self._DataEngineClusterType
@DataEngineClusterType.setter
def DataEngineClusterType(self, DataEngineClusterType):
self._DataEngineClusterType = DataEngineClusterType
@property
def DataEngineImageVersion(self):
return self._DataEngineImageVersion
@DataEngineImageVersion.setter
def DataEngineImageVersion(self, DataEngineImageVersion):
self._DataEngineImageVersion = DataEngineImageVersion
@property
def IsInherit(self):
return self._IsInherit
@IsInherit.setter
def IsInherit(self, IsInherit):
self._IsInherit = IsInherit
@property
def IsSessionStarted(self):
return self._IsSessionStarted
@IsSessionStarted.setter
def IsSessionStarted(self, IsSessionStarted):
self._IsSessionStarted = IsSessionStarted
def _deserialize(self, params):
self._JobId = params.get("JobId")
self._JobName = params.get("JobName")
self._JobType = params.get("JobType")
self._DataEngine = params.get("DataEngine")
self._Eni = params.get("Eni")
self._IsLocal = params.get("IsLocal")
self._JobFile = params.get("JobFile")
self._RoleArn = params.get("RoleArn")
self._MainClass = params.get("MainClass")
self._CmdArgs = params.get("CmdArgs")
self._JobConf = params.get("JobConf")
self._IsLocalJars = params.get("IsLocalJars")
self._JobJars = params.get("JobJars")
self._IsLocalFiles = params.get("IsLocalFiles")
self._JobFiles = params.get("JobFiles")
self._JobDriverSize = params.get("JobDriverSize")
self._JobExecutorSize = params.get("JobExecutorSize")
self._JobExecutorNums = params.get("JobExecutorNums")
self._JobMaxAttempts = params.get("JobMaxAttempts")
self._JobCreator = params.get("JobCreator")
self._JobCreateTime = params.get("JobCreateTime")
self._JobUpdateTime = params.get("JobUpdateTime")
self._CurrentTaskId = params.get("CurrentTaskId")
self._JobStatus = params.get("JobStatus")
if params.get("StreamingStat") is not None:
self._StreamingStat = StreamingStatistics()
self._StreamingStat._deserialize(params.get("StreamingStat"))
self._DataSource = params.get("DataSource")
self._IsLocalPythonFiles = params.get("IsLocalPythonFiles")
self._AppPythonFiles = params.get("AppPythonFiles")
self._IsLocalArchives = params.get("IsLocalArchives")
self._JobArchives = params.get("JobArchives")
self._SparkImage = params.get("SparkImage")
self._JobPythonFiles = params.get("JobPythonFiles")
self._TaskNum = params.get("TaskNum")
self._DataEngineStatus = params.get("DataEngineStatus")
self._JobExecutorMaxNumbers = params.get("JobExecutorMaxNumbers")
self._SparkImageVersion = params.get("SparkImageVersion")
self._SessionId = params.get("SessionId")
self._DataEngineClusterType = params.get("DataEngineClusterType")
self._DataEngineImageVersion = params.get("DataEngineImageVersion")
self._IsInherit = params.get("IsInherit")
self._IsSessionStarted = params.get("IsSessionStarted")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class SparkMonitorMetrics(AbstractModel):
"""Spark监控数据
"""
def __init__(self):
r"""
:param _ShuffleWriteBytesCos: shuffle写溢出到COS数据量,单位:byte
注意:此字段可能返回 null,表示取不到有效值。
:type ShuffleWriteBytesCos: int
:param _ShuffleWriteBytesTotal: shuffle写数据量,单位:byte
注意:此字段可能返回 null,表示取不到有效值。
:type ShuffleWriteBytesTotal: int
"""
self._ShuffleWriteBytesCos = None
self._ShuffleWriteBytesTotal = None
@property
def ShuffleWriteBytesCos(self):
return self._ShuffleWriteBytesCos
@ShuffleWriteBytesCos.setter
def ShuffleWriteBytesCos(self, ShuffleWriteBytesCos):
self._ShuffleWriteBytesCos = ShuffleWriteBytesCos
@property
def ShuffleWriteBytesTotal(self):
return self._ShuffleWriteBytesTotal
@ShuffleWriteBytesTotal.setter
def ShuffleWriteBytesTotal(self, ShuffleWriteBytesTotal):
self._ShuffleWriteBytesTotal = ShuffleWriteBytesTotal
def _deserialize(self, params):
self._ShuffleWriteBytesCos = params.get("ShuffleWriteBytesCos")
self._ShuffleWriteBytesTotal = params.get("ShuffleWriteBytesTotal")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class SparkSessionBatchLog(AbstractModel):
"""SparkSQL批任务运行日志
"""
def __init__(self):
r"""
:param _Step: 日志步骤:BEG/CS/DS/DSS/DSF/FINF/RTO/CANCEL/CT/DT/DTS/DTF/FINT/EXCE
注意:此字段可能返回 null,表示取不到有效值。
:type Step: str
:param _Time: 时间
注意:此字段可能返回 null,表示取不到有效值。
:type Time: str
:param _Message: 日志提示
注意:此字段可能返回 null,表示取不到有效值。
:type Message: str
:param _Operate: 日志操作
注意:此字段可能返回 null,表示取不到有效值。
:type Operate: list of SparkSessionBatchLogOperate
"""
self._Step = None
self._Time = None
self._Message = None
self._Operate = None
@property
def Step(self):
return self._Step
@Step.setter
def Step(self, Step):
self._Step = Step
@property
def Time(self):
return self._Time
@Time.setter
def Time(self, Time):
self._Time = Time
@property
def Message(self):
return self._Message
@Message.setter
def Message(self, Message):
self._Message = Message
@property
def Operate(self):
return self._Operate
@Operate.setter
def Operate(self, Operate):
self._Operate = Operate
def _deserialize(self, params):
self._Step = params.get("Step")
self._Time = params.get("Time")
self._Message = params.get("Message")
if params.get("Operate") is not None:
self._Operate = []
for item in params.get("Operate"):
obj = SparkSessionBatchLogOperate()
obj._deserialize(item)
self._Operate.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class SparkSessionBatchLogOperate(AbstractModel):
"""SparkSQL批任务日志操作信息。
"""
def __init__(self):
r"""
:param _Text: 操作提示
注意:此字段可能返回 null,表示取不到有效值。
:type Text: str
:param _Operate: 操作类型:COPY、LOG、UI、RESULT、List、TAB
注意:此字段可能返回 null,表示取不到有效值。
:type Operate: str
:param _Supplement: 补充信息:如:taskid、sessionid、sparkui等
注意:此字段可能返回 null,表示取不到有效值。
:type Supplement: list of KVPair
"""
self._Text = None
self._Operate = None
self._Supplement = None
@property
def Text(self):
return self._Text
@Text.setter
def Text(self, Text):
self._Text = Text
@property
def Operate(self):
return self._Operate
@Operate.setter
def Operate(self, Operate):
self._Operate = Operate
@property
def Supplement(self):
return self._Supplement
@Supplement.setter
def Supplement(self, Supplement):
self._Supplement = Supplement
def _deserialize(self, params):
self._Text = params.get("Text")
self._Operate = params.get("Operate")
if params.get("Supplement") is not None:
self._Supplement = []
for item in params.get("Supplement"):
obj = KVPair()
obj._deserialize(item)
self._Supplement.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class StatementOutput(AbstractModel):
"""notebook session statement输出信息。
"""
def __init__(self):
r"""
:param _ExecutionCount: 执行总数
注意:此字段可能返回 null,表示取不到有效值。
:type ExecutionCount: int
:param _Data: Statement数据
注意:此字段可能返回 null,表示取不到有效值。
:type Data: list of KVPair
:param _Status: Statement状态:ok,error
注意:此字段可能返回 null,表示取不到有效值。
:type Status: str
:param _ErrorName: 错误名称
注意:此字段可能返回 null,表示取不到有效值。
:type ErrorName: str
:param _ErrorValue: 错误类型
注意:此字段可能返回 null,表示取不到有效值。
:type ErrorValue: str
:param _ErrorMessage: 错误堆栈信息
注意:此字段可能返回 null,表示取不到有效值。
:type ErrorMessage: list of str
:param _SQLResult: SQL类型任务结果返回
注意:此字段可能返回 null,表示取不到有效值。
:type SQLResult: str
"""
self._ExecutionCount = None
self._Data = None
self._Status = None
self._ErrorName = None
self._ErrorValue = None
self._ErrorMessage = None
self._SQLResult = None
@property
def ExecutionCount(self):
return self._ExecutionCount
@ExecutionCount.setter
def ExecutionCount(self, ExecutionCount):
self._ExecutionCount = ExecutionCount
@property
def Data(self):
return self._Data
@Data.setter
def Data(self, Data):
self._Data = Data
@property
def Status(self):
return self._Status
@Status.setter
def Status(self, Status):
self._Status = Status
@property
def ErrorName(self):
return self._ErrorName
@ErrorName.setter
def ErrorName(self, ErrorName):
self._ErrorName = ErrorName
@property
def ErrorValue(self):
return self._ErrorValue
@ErrorValue.setter
def ErrorValue(self, ErrorValue):
self._ErrorValue = ErrorValue
@property
def ErrorMessage(self):
return self._ErrorMessage
@ErrorMessage.setter
def ErrorMessage(self, ErrorMessage):
self._ErrorMessage = ErrorMessage
@property
def SQLResult(self):
return self._SQLResult
@SQLResult.setter
def SQLResult(self, SQLResult):
self._SQLResult = SQLResult
def _deserialize(self, params):
self._ExecutionCount = params.get("ExecutionCount")
if params.get("Data") is not None:
self._Data = []
for item in params.get("Data"):
obj = KVPair()
obj._deserialize(item)
self._Data.append(obj)
self._Status = params.get("Status")
self._ErrorName = params.get("ErrorName")
self._ErrorValue = params.get("ErrorValue")
self._ErrorMessage = params.get("ErrorMessage")
self._SQLResult = params.get("SQLResult")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class StreamingStatistics(AbstractModel):
"""spark流任务统计信息
"""
def __init__(self):
r"""
:param _StartTime: 任务开始时间
:type StartTime: str
:param _Receivers: 数据接收器数
:type Receivers: int
:param _NumActiveReceivers: 运行中的接收器数
:type NumActiveReceivers: int
:param _NumInactiveReceivers: 不活跃的接收器数
:type NumInactiveReceivers: int
:param _NumActiveBatches: 运行中的批数
:type NumActiveBatches: int
:param _NumRetainedCompletedBatches: 待处理的批数
:type NumRetainedCompletedBatches: int
:param _NumTotalCompletedBatches: 已完成的批数
:type NumTotalCompletedBatches: int
:param _AverageInputRate: 平均输入速率
:type AverageInputRate: float
:param _AverageSchedulingDelay: 平均等待时长
:type AverageSchedulingDelay: float
:param _AverageProcessingTime: 平均处理时长
:type AverageProcessingTime: float
:param _AverageTotalDelay: 平均延时
:type AverageTotalDelay: float
"""
self._StartTime = None
self._Receivers = None
self._NumActiveReceivers = None
self._NumInactiveReceivers = None
self._NumActiveBatches = None
self._NumRetainedCompletedBatches = None
self._NumTotalCompletedBatches = None
self._AverageInputRate = None
self._AverageSchedulingDelay = None
self._AverageProcessingTime = None
self._AverageTotalDelay = None
@property
def StartTime(self):
return self._StartTime
@StartTime.setter
def StartTime(self, StartTime):
self._StartTime = StartTime
@property
def Receivers(self):
return self._Receivers
@Receivers.setter
def Receivers(self, Receivers):
self._Receivers = Receivers
@property
def NumActiveReceivers(self):
return self._NumActiveReceivers
@NumActiveReceivers.setter
def NumActiveReceivers(self, NumActiveReceivers):
self._NumActiveReceivers = NumActiveReceivers
@property
def NumInactiveReceivers(self):
return self._NumInactiveReceivers
@NumInactiveReceivers.setter
def NumInactiveReceivers(self, NumInactiveReceivers):
self._NumInactiveReceivers = NumInactiveReceivers
@property
def NumActiveBatches(self):
return self._NumActiveBatches
@NumActiveBatches.setter
def NumActiveBatches(self, NumActiveBatches):
self._NumActiveBatches = NumActiveBatches
@property
def NumRetainedCompletedBatches(self):
return self._NumRetainedCompletedBatches
@NumRetainedCompletedBatches.setter
def NumRetainedCompletedBatches(self, NumRetainedCompletedBatches):
self._NumRetainedCompletedBatches = NumRetainedCompletedBatches
@property
def NumTotalCompletedBatches(self):
return self._NumTotalCompletedBatches
@NumTotalCompletedBatches.setter
def NumTotalCompletedBatches(self, NumTotalCompletedBatches):
self._NumTotalCompletedBatches = NumTotalCompletedBatches
@property
def AverageInputRate(self):
return self._AverageInputRate
@AverageInputRate.setter
def AverageInputRate(self, AverageInputRate):
self._AverageInputRate = AverageInputRate
@property
def AverageSchedulingDelay(self):
return self._AverageSchedulingDelay
@AverageSchedulingDelay.setter
def AverageSchedulingDelay(self, AverageSchedulingDelay):
self._AverageSchedulingDelay = AverageSchedulingDelay
@property
def AverageProcessingTime(self):
return self._AverageProcessingTime
@AverageProcessingTime.setter
def AverageProcessingTime(self, AverageProcessingTime):
self._AverageProcessingTime = AverageProcessingTime
@property
def AverageTotalDelay(self):
return self._AverageTotalDelay
@AverageTotalDelay.setter
def AverageTotalDelay(self, AverageTotalDelay):
self._AverageTotalDelay = AverageTotalDelay
def _deserialize(self, params):
self._StartTime = params.get("StartTime")
self._Receivers = params.get("Receivers")
self._NumActiveReceivers = params.get("NumActiveReceivers")
self._NumInactiveReceivers = params.get("NumInactiveReceivers")
self._NumActiveBatches = params.get("NumActiveBatches")
self._NumRetainedCompletedBatches = params.get("NumRetainedCompletedBatches")
self._NumTotalCompletedBatches = params.get("NumTotalCompletedBatches")
self._AverageInputRate = params.get("AverageInputRate")
self._AverageSchedulingDelay = params.get("AverageSchedulingDelay")
self._AverageProcessingTime = params.get("AverageProcessingTime")
self._AverageTotalDelay = params.get("AverageTotalDelay")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class SuspendResumeDataEngineRequest(AbstractModel):
"""SuspendResumeDataEngine请求参数结构体
"""
def __init__(self):
r"""
:param _DataEngineName: 虚拟集群名称
:type DataEngineName: str
:param _Operate: 操作类型 suspend/resume
:type Operate: str
"""
self._DataEngineName = None
self._Operate = None
@property
def DataEngineName(self):
return self._DataEngineName
@DataEngineName.setter
def DataEngineName(self, DataEngineName):
self._DataEngineName = DataEngineName
@property
def Operate(self):
return self._Operate
@Operate.setter
def Operate(self, Operate):
self._Operate = Operate
def _deserialize(self, params):
self._DataEngineName = params.get("DataEngineName")
self._Operate = params.get("Operate")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class SuspendResumeDataEngineResponse(AbstractModel):
"""SuspendResumeDataEngine返回参数结构体
"""
def __init__(self):
r"""
:param _DataEngineName: 虚拟集群详细信息
:type DataEngineName: str
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._DataEngineName = None
self._RequestId = None
@property
def DataEngineName(self):
return self._DataEngineName
@DataEngineName.setter
def DataEngineName(self, DataEngineName):
self._DataEngineName = DataEngineName
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._DataEngineName = params.get("DataEngineName")
self._RequestId = params.get("RequestId")
class SwitchDataEngineRequest(AbstractModel):
"""SwitchDataEngine请求参数结构体
"""
def __init__(self):
r"""
:param _DataEngineName: 主集群名称
:type DataEngineName: str
:param _StartStandbyCluster: 是否开启备集群
:type StartStandbyCluster: bool
"""
self._DataEngineName = None
self._StartStandbyCluster = None
@property
def DataEngineName(self):
return self._DataEngineName
@DataEngineName.setter
def DataEngineName(self, DataEngineName):
self._DataEngineName = DataEngineName
@property
def StartStandbyCluster(self):
return self._StartStandbyCluster
@StartStandbyCluster.setter
def StartStandbyCluster(self, StartStandbyCluster):
self._StartStandbyCluster = StartStandbyCluster
def _deserialize(self, params):
self._DataEngineName = params.get("DataEngineName")
self._StartStandbyCluster = params.get("StartStandbyCluster")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class SwitchDataEngineResponse(AbstractModel):
"""SwitchDataEngine返回参数结构体
"""
def __init__(self):
r"""
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._RequestId = None
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._RequestId = params.get("RequestId")
class TColumn(AbstractModel):
"""表字段描述信息
"""
def __init__(self):
r"""
:param _Name: 字段名称
:type Name: str
:param _Type: 字段类型
:type Type: str
:param _Comment: 字段描述
:type Comment: str
:param _Default: 字段默认值
:type Default: str
:param _NotNull: 字段是否是非空
:type NotNull: bool
"""
self._Name = None
self._Type = None
self._Comment = None
self._Default = None
self._NotNull = None
@property
def Name(self):
return self._Name
@Name.setter
def Name(self, Name):
self._Name = Name
@property
def Type(self):
return self._Type
@Type.setter
def Type(self, Type):
self._Type = Type
@property
def Comment(self):
return self._Comment
@Comment.setter
def Comment(self, Comment):
self._Comment = Comment
@property
def Default(self):
return self._Default
@Default.setter
def Default(self, Default):
self._Default = Default
@property
def NotNull(self):
return self._NotNull
@NotNull.setter
def NotNull(self, NotNull):
self._NotNull = NotNull
def _deserialize(self, params):
self._Name = params.get("Name")
self._Type = params.get("Type")
self._Comment = params.get("Comment")
self._Default = params.get("Default")
self._NotNull = params.get("NotNull")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class TPartition(AbstractModel):
"""表分区字段信息
"""
def __init__(self):
r"""
:param _Name: 字段名称
:type Name: str
:param _Type: 字段类型
:type Type: str
:param _Comment: 字段描述
:type Comment: str
:param _PartitionType: 分区类型
:type PartitionType: str
:param _PartitionFormat: 分区格式
:type PartitionFormat: str
:param _PartitionDot: 分区分隔数
:type PartitionDot: int
:param _Transform: 分区转换策略
:type Transform: str
:param _TransformArgs: 策略参数
:type TransformArgs: list of str
"""
self._Name = None
self._Type = None
self._Comment = None
self._PartitionType = None
self._PartitionFormat = None
self._PartitionDot = None
self._Transform = None
self._TransformArgs = None
@property
def Name(self):
return self._Name
@Name.setter
def Name(self, Name):
self._Name = Name
@property
def Type(self):
return self._Type
@Type.setter
def Type(self, Type):
self._Type = Type
@property
def Comment(self):
return self._Comment
@Comment.setter
def Comment(self, Comment):
self._Comment = Comment
@property
def PartitionType(self):
return self._PartitionType
@PartitionType.setter
def PartitionType(self, PartitionType):
self._PartitionType = PartitionType
@property
def PartitionFormat(self):
return self._PartitionFormat
@PartitionFormat.setter
def PartitionFormat(self, PartitionFormat):
self._PartitionFormat = PartitionFormat
@property
def PartitionDot(self):
return self._PartitionDot
@PartitionDot.setter
def PartitionDot(self, PartitionDot):
self._PartitionDot = PartitionDot
@property
def Transform(self):
return self._Transform
@Transform.setter
def Transform(self, Transform):
self._Transform = Transform
@property
def TransformArgs(self):
return self._TransformArgs
@TransformArgs.setter
def TransformArgs(self, TransformArgs):
self._TransformArgs = TransformArgs
def _deserialize(self, params):
self._Name = params.get("Name")
self._Type = params.get("Type")
self._Comment = params.get("Comment")
self._PartitionType = params.get("PartitionType")
self._PartitionFormat = params.get("PartitionFormat")
self._PartitionDot = params.get("PartitionDot")
self._Transform = params.get("Transform")
self._TransformArgs = params.get("TransformArgs")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class TableBaseInfo(AbstractModel):
"""数据表配置信息
"""
def __init__(self):
r"""
:param _DatabaseName: 该数据表所属数据库名字
:type DatabaseName: str
:param _TableName: 数据表名字
:type TableName: str
:param _DatasourceConnectionName: 该数据表所属数据源名字
注意:此字段可能返回 null,表示取不到有效值。
:type DatasourceConnectionName: str
:param _TableComment: 该数据表备注
注意:此字段可能返回 null,表示取不到有效值。
:type TableComment: str
:param _Type: 具体类型,表or视图
注意:此字段可能返回 null,表示取不到有效值。
:type Type: str
:param _TableFormat: 数据格式类型,hive,iceberg等
注意:此字段可能返回 null,表示取不到有效值。
:type TableFormat: str
:param _UserAlias: 建表用户昵称
注意:此字段可能返回 null,表示取不到有效值。
:type UserAlias: str
:param _UserSubUin: 建表用户ID
注意:此字段可能返回 null,表示取不到有效值。
:type UserSubUin: str
:param _GovernPolicy: 数据治理配置项
注意:此字段可能返回 null,表示取不到有效值。
:type GovernPolicy: :class:`tencentcloud.dlc.v20210125.models.DataGovernPolicy`
:param _DbGovernPolicyIsDisable: 库数据治理是否关闭,关闭:true,开启:false
注意:此字段可能返回 null,表示取不到有效值。
:type DbGovernPolicyIsDisable: str
"""
self._DatabaseName = None
self._TableName = None
self._DatasourceConnectionName = None
self._TableComment = None
self._Type = None
self._TableFormat = None
self._UserAlias = None
self._UserSubUin = None
self._GovernPolicy = None
self._DbGovernPolicyIsDisable = None
@property
def DatabaseName(self):
return self._DatabaseName
@DatabaseName.setter
def DatabaseName(self, DatabaseName):
self._DatabaseName = DatabaseName
@property
def TableName(self):
return self._TableName
@TableName.setter
def TableName(self, TableName):
self._TableName = TableName
@property
def DatasourceConnectionName(self):
return self._DatasourceConnectionName
@DatasourceConnectionName.setter
def DatasourceConnectionName(self, DatasourceConnectionName):
self._DatasourceConnectionName = DatasourceConnectionName
@property
def TableComment(self):
return self._TableComment
@TableComment.setter
def TableComment(self, TableComment):
self._TableComment = TableComment
@property
def Type(self):
return self._Type
@Type.setter
def Type(self, Type):
self._Type = Type
@property
def TableFormat(self):
return self._TableFormat
@TableFormat.setter
def TableFormat(self, TableFormat):
self._TableFormat = TableFormat
@property
def UserAlias(self):
return self._UserAlias
@UserAlias.setter
def UserAlias(self, UserAlias):
self._UserAlias = UserAlias
@property
def UserSubUin(self):
return self._UserSubUin
@UserSubUin.setter
def UserSubUin(self, UserSubUin):
self._UserSubUin = UserSubUin
@property
def GovernPolicy(self):
return self._GovernPolicy
@GovernPolicy.setter
def GovernPolicy(self, GovernPolicy):
self._GovernPolicy = GovernPolicy
@property
def DbGovernPolicyIsDisable(self):
return self._DbGovernPolicyIsDisable
@DbGovernPolicyIsDisable.setter
def DbGovernPolicyIsDisable(self, DbGovernPolicyIsDisable):
self._DbGovernPolicyIsDisable = DbGovernPolicyIsDisable
def _deserialize(self, params):
self._DatabaseName = params.get("DatabaseName")
self._TableName = params.get("TableName")
self._DatasourceConnectionName = params.get("DatasourceConnectionName")
self._TableComment = params.get("TableComment")
self._Type = params.get("Type")
self._TableFormat = params.get("TableFormat")
self._UserAlias = params.get("UserAlias")
self._UserSubUin = params.get("UserSubUin")
if params.get("GovernPolicy") is not None:
self._GovernPolicy = DataGovernPolicy()
self._GovernPolicy._deserialize(params.get("GovernPolicy"))
self._DbGovernPolicyIsDisable = params.get("DbGovernPolicyIsDisable")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class TableInfo(AbstractModel):
"""返回数据表的相关信息。
"""
def __init__(self):
r"""
:param _TableBaseInfo: 数据表配置信息。
:type TableBaseInfo: :class:`tencentcloud.dlc.v20210125.models.TableBaseInfo`
:param _DataFormat: 数据表格式。每次入参可选如下其一的KV结构,[TextFile,CSV,Json, Parquet, ORC, AVRD]。
:type DataFormat: :class:`tencentcloud.dlc.v20210125.models.DataFormat`
:param _Columns: 数据表列信息。
:type Columns: list of Column
:param _Partitions: 数据表分块信息。
:type Partitions: list of Partition
:param _Location: 数据存储路径。当前仅支持cos路径,格式如下:cosn://bucket-name/filepath。
:type Location: str
"""
self._TableBaseInfo = None
self._DataFormat = None
self._Columns = None
self._Partitions = None
self._Location = None
@property
def TableBaseInfo(self):
return self._TableBaseInfo
@TableBaseInfo.setter
def TableBaseInfo(self, TableBaseInfo):
self._TableBaseInfo = TableBaseInfo
@property
def DataFormat(self):
return self._DataFormat
@DataFormat.setter
def DataFormat(self, DataFormat):
self._DataFormat = DataFormat
@property
def Columns(self):
return self._Columns
@Columns.setter
def Columns(self, Columns):
self._Columns = Columns
@property
def Partitions(self):
return self._Partitions
@Partitions.setter
def Partitions(self, Partitions):
self._Partitions = Partitions
@property
def Location(self):
return self._Location
@Location.setter
def Location(self, Location):
self._Location = Location
def _deserialize(self, params):
if params.get("TableBaseInfo") is not None:
self._TableBaseInfo = TableBaseInfo()
self._TableBaseInfo._deserialize(params.get("TableBaseInfo"))
if params.get("DataFormat") is not None:
self._DataFormat = DataFormat()
self._DataFormat._deserialize(params.get("DataFormat"))
if params.get("Columns") is not None:
self._Columns = []
for item in params.get("Columns"):
obj = Column()
obj._deserialize(item)
self._Columns.append(obj)
if params.get("Partitions") is not None:
self._Partitions = []
for item in params.get("Partitions"):
obj = Partition()
obj._deserialize(item)
self._Partitions.append(obj)
self._Location = params.get("Location")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class TableResponseInfo(AbstractModel):
"""查询表信息对象
"""
def __init__(self):
r"""
:param _TableBaseInfo: 数据表基本信息。
:type TableBaseInfo: :class:`tencentcloud.dlc.v20210125.models.TableBaseInfo`
:param _Columns: 数据表列信息。
注意:此字段可能返回 null,表示取不到有效值。
:type Columns: list of Column
:param _Partitions: 数据表分块信息。
注意:此字段可能返回 null,表示取不到有效值。
:type Partitions: list of Partition
:param _Location: 数据存储路径。
注意:此字段可能返回 null,表示取不到有效值。
:type Location: str
:param _Properties: 数据表属性信息。
注意:此字段可能返回 null,表示取不到有效值。
:type Properties: list of Property
:param _ModifiedTime: 数据表更新时间, 单位: ms。
注意:此字段可能返回 null,表示取不到有效值。
:type ModifiedTime: str
:param _CreateTime: 数据表创建时间,单位: ms。
注意:此字段可能返回 null,表示取不到有效值。
:type CreateTime: str
:param _InputFormat: 数据格式。
注意:此字段可能返回 null,表示取不到有效值。
:type InputFormat: str
:param _StorageSize: 数据表存储大小(单位:Byte)
注意:此字段可能返回 null,表示取不到有效值。
:type StorageSize: int
:param _RecordCount: 数据表行数
注意:此字段可能返回 null,表示取不到有效值。
:type RecordCount: int
:param _MapMaterializedViewName: xxxx
注意:此字段可能返回 null,表示取不到有效值。
:type MapMaterializedViewName: str
"""
self._TableBaseInfo = None
self._Columns = None
self._Partitions = None
self._Location = None
self._Properties = None
self._ModifiedTime = None
self._CreateTime = None
self._InputFormat = None
self._StorageSize = None
self._RecordCount = None
self._MapMaterializedViewName = None
@property
def TableBaseInfo(self):
return self._TableBaseInfo
@TableBaseInfo.setter
def TableBaseInfo(self, TableBaseInfo):
self._TableBaseInfo = TableBaseInfo
@property
def Columns(self):
return self._Columns
@Columns.setter
def Columns(self, Columns):
self._Columns = Columns
@property
def Partitions(self):
return self._Partitions
@Partitions.setter
def Partitions(self, Partitions):
self._Partitions = Partitions
@property
def Location(self):
return self._Location
@Location.setter
def Location(self, Location):
self._Location = Location
@property
def Properties(self):
return self._Properties
@Properties.setter
def Properties(self, Properties):
self._Properties = Properties
@property
def ModifiedTime(self):
return self._ModifiedTime
@ModifiedTime.setter
def ModifiedTime(self, ModifiedTime):
self._ModifiedTime = ModifiedTime
@property
def CreateTime(self):
return self._CreateTime
@CreateTime.setter
def CreateTime(self, CreateTime):
self._CreateTime = CreateTime
@property
def InputFormat(self):
return self._InputFormat
@InputFormat.setter
def InputFormat(self, InputFormat):
self._InputFormat = InputFormat
@property
def StorageSize(self):
return self._StorageSize
@StorageSize.setter
def StorageSize(self, StorageSize):
self._StorageSize = StorageSize
@property
def RecordCount(self):
return self._RecordCount
@RecordCount.setter
def RecordCount(self, RecordCount):
self._RecordCount = RecordCount
@property
def MapMaterializedViewName(self):
return self._MapMaterializedViewName
@MapMaterializedViewName.setter
def MapMaterializedViewName(self, MapMaterializedViewName):
self._MapMaterializedViewName = MapMaterializedViewName
def _deserialize(self, params):
if params.get("TableBaseInfo") is not None:
self._TableBaseInfo = TableBaseInfo()
self._TableBaseInfo._deserialize(params.get("TableBaseInfo"))
if params.get("Columns") is not None:
self._Columns = []
for item in params.get("Columns"):
obj = Column()
obj._deserialize(item)
self._Columns.append(obj)
if params.get("Partitions") is not None:
self._Partitions = []
for item in params.get("Partitions"):
obj = Partition()
obj._deserialize(item)
self._Partitions.append(obj)
self._Location = params.get("Location")
if params.get("Properties") is not None:
self._Properties = []
for item in params.get("Properties"):
obj = Property()
obj._deserialize(item)
self._Properties.append(obj)
self._ModifiedTime = params.get("ModifiedTime")
self._CreateTime = params.get("CreateTime")
self._InputFormat = params.get("InputFormat")
self._StorageSize = params.get("StorageSize")
self._RecordCount = params.get("RecordCount")
self._MapMaterializedViewName = params.get("MapMaterializedViewName")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class TagInfo(AbstractModel):
"""标签对信息
"""
def __init__(self):
r"""
:param _TagKey: 标签键
注意:此字段可能返回 null,表示取不到有效值。
:type TagKey: str
:param _TagValue: 标签值
注意:此字段可能返回 null,表示取不到有效值。
:type TagValue: str
"""
self._TagKey = None
self._TagValue = None
@property
def TagKey(self):
return self._TagKey
@TagKey.setter
def TagKey(self, TagKey):
self._TagKey = TagKey
@property
def TagValue(self):
return self._TagValue
@TagValue.setter
def TagValue(self, TagValue):
self._TagValue = TagValue
def _deserialize(self, params):
self._TagKey = params.get("TagKey")
self._TagValue = params.get("TagValue")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class Task(AbstractModel):
"""任务类型,任务如SQL查询等。
"""
def __init__(self):
r"""
:param _SQLTask: SQL查询任务
:type SQLTask: :class:`tencentcloud.dlc.v20210125.models.SQLTask`
:param _SparkSQLTask: Spark SQL查询任务
:type SparkSQLTask: :class:`tencentcloud.dlc.v20210125.models.SQLTask`
"""
self._SQLTask = None
self._SparkSQLTask = None
@property
def SQLTask(self):
return self._SQLTask
@SQLTask.setter
def SQLTask(self, SQLTask):
self._SQLTask = SQLTask
@property
def SparkSQLTask(self):
return self._SparkSQLTask
@SparkSQLTask.setter
def SparkSQLTask(self, SparkSQLTask):
self._SparkSQLTask = SparkSQLTask
def _deserialize(self, params):
if params.get("SQLTask") is not None:
self._SQLTask = SQLTask()
self._SQLTask._deserialize(params.get("SQLTask"))
if params.get("SparkSQLTask") is not None:
self._SparkSQLTask = SQLTask()
self._SparkSQLTask._deserialize(params.get("SparkSQLTask"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class TaskResponseInfo(AbstractModel):
"""任务实例。
"""
def __init__(self):
r"""
:param _DatabaseName: 任务所属Database的名称。
:type DatabaseName: str
:param _DataAmount: 任务数据量。
:type DataAmount: int
:param _Id: 任务Id。
:type Id: str
:param _UsedTime: 计算耗时,单位: ms
:type UsedTime: int
:param _OutputPath: 任务输出路径。
:type OutputPath: str
:param _CreateTime: 任务创建时间。
:type CreateTime: str
:param _State: 任务状态:0 初始化, 1 执行中, 2 执行成功,3 数据写入中,4 排队中。-1 执行失败,-3 已取消。
:type State: int
:param _SQLType: 任务SQL类型,DDL|DML等
:type SQLType: str
:param _SQL: 任务SQL语句
:type SQL: str
:param _ResultExpired: 结果是否过期。
:type ResultExpired: bool
:param _RowAffectInfo: 数据影响统计信息。
:type RowAffectInfo: str
:param _DataSet: 任务结果数据表。
注意:此字段可能返回 null,表示取不到有效值。
:type DataSet: str
:param _Error: 失败信息, 例如:errorMessage。该字段已废弃。
:type Error: str
:param _Percentage: 任务执行进度num/100(%)
:type Percentage: int
:param _OutputMessage: 任务执行输出信息。
:type OutputMessage: str
:param _TaskType: 执行SQL的引擎类型
:type TaskType: str
:param _ProgressDetail: 任务进度明细
注意:此字段可能返回 null,表示取不到有效值。
:type ProgressDetail: str
:param _UpdateTime: 任务结束时间
注意:此字段可能返回 null,表示取不到有效值。
:type UpdateTime: str
:param _DataEngineId: 计算资源id
注意:此字段可能返回 null,表示取不到有效值。
:type DataEngineId: str
:param _OperateUin: 执行sql的子uin
注意:此字段可能返回 null,表示取不到有效值。
:type OperateUin: str
:param _DataEngineName: 计算资源名字
注意:此字段可能返回 null,表示取不到有效值。
:type DataEngineName: str
:param _InputType: 导入类型是本地导入还是cos
注意:此字段可能返回 null,表示取不到有效值。
:type InputType: str
:param _InputConf: 导入配置
注意:此字段可能返回 null,表示取不到有效值。
:type InputConf: str
:param _DataNumber: 数据条数
注意:此字段可能返回 null,表示取不到有效值。
:type DataNumber: int
:param _CanDownload: 查询数据能不能下载
注意:此字段可能返回 null,表示取不到有效值。
:type CanDownload: bool
:param _UserAlias: 用户别名
注意:此字段可能返回 null,表示取不到有效值。
:type UserAlias: str
:param _SparkJobName: spark应用作业名
注意:此字段可能返回 null,表示取不到有效值。
:type SparkJobName: str
:param _SparkJobId: spark应用作业Id
注意:此字段可能返回 null,表示取不到有效值。
:type SparkJobId: str
:param _SparkJobFile: spark应用入口jar文件
注意:此字段可能返回 null,表示取不到有效值。
:type SparkJobFile: str
:param _UiUrl: spark ui url
注意:此字段可能返回 null,表示取不到有效值。
:type UiUrl: str
:param _TotalTime: 任务耗时,单位: ms
注意:此字段可能返回 null,表示取不到有效值。
:type TotalTime: int
:param _CmdArgs: spark app job执行task的程序入口参数
注意:此字段可能返回 null,表示取不到有效值。
:type CmdArgs: str
:param _ImageVersion: 集群镜像大版本名称
注意:此字段可能返回 null,表示取不到有效值。
:type ImageVersion: str
:param _DriverSize: driver规格:small,medium,large,xlarge;内存型(引擎类型):m.small,m.medium,m.large,m.xlarge
注意:此字段可能返回 null,表示取不到有效值。
:type DriverSize: str
:param _ExecutorSize: executor规格:small,medium,large,xlarge;内存型(引擎类型):m.small,m.medium,m.large,m.xlarge
注意:此字段可能返回 null,表示取不到有效值。
:type ExecutorSize: str
:param _ExecutorNums: 指定executor数量,最小值为1,最大值小于集群规格
注意:此字段可能返回 null,表示取不到有效值。
:type ExecutorNums: int
:param _ExecutorMaxNumbers: 指定executor max数量(动态配置场景下),最小值为1,最大值小于集群规格(当ExecutorMaxNumbers小于ExecutorNums时,改值设定为ExecutorNums)
注意:此字段可能返回 null,表示取不到有效值。
:type ExecutorMaxNumbers: int
:param _CommonMetrics: 任务公共指标数据
注意:此字段可能返回 null,表示取不到有效值。
:type CommonMetrics: :class:`tencentcloud.dlc.v20210125.models.CommonMetrics`
:param _SparkMonitorMetrics: spark任务指标数据
注意:此字段可能返回 null,表示取不到有效值。
:type SparkMonitorMetrics: :class:`tencentcloud.dlc.v20210125.models.SparkMonitorMetrics`
:param _PrestoMonitorMetrics: presto任务指标数据
注意:此字段可能返回 null,表示取不到有效值。
:type PrestoMonitorMetrics: :class:`tencentcloud.dlc.v20210125.models.PrestoMonitorMetrics`
"""
self._DatabaseName = None
self._DataAmount = None
self._Id = None
self._UsedTime = None
self._OutputPath = None
self._CreateTime = None
self._State = None
self._SQLType = None
self._SQL = None
self._ResultExpired = None
self._RowAffectInfo = None
self._DataSet = None
self._Error = None
self._Percentage = None
self._OutputMessage = None
self._TaskType = None
self._ProgressDetail = None
self._UpdateTime = None
self._DataEngineId = None
self._OperateUin = None
self._DataEngineName = None
self._InputType = None
self._InputConf = None
self._DataNumber = None
self._CanDownload = None
self._UserAlias = None
self._SparkJobName = None
self._SparkJobId = None
self._SparkJobFile = None
self._UiUrl = None
self._TotalTime = None
self._CmdArgs = None
self._ImageVersion = None
self._DriverSize = None
self._ExecutorSize = None
self._ExecutorNums = None
self._ExecutorMaxNumbers = None
self._CommonMetrics = None
self._SparkMonitorMetrics = None
self._PrestoMonitorMetrics = None
@property
def DatabaseName(self):
return self._DatabaseName
@DatabaseName.setter
def DatabaseName(self, DatabaseName):
self._DatabaseName = DatabaseName
@property
def DataAmount(self):
return self._DataAmount
@DataAmount.setter
def DataAmount(self, DataAmount):
self._DataAmount = DataAmount
@property
def Id(self):
return self._Id
@Id.setter
def Id(self, Id):
self._Id = Id
@property
def UsedTime(self):
return self._UsedTime
@UsedTime.setter
def UsedTime(self, UsedTime):
self._UsedTime = UsedTime
@property
def OutputPath(self):
return self._OutputPath
@OutputPath.setter
def OutputPath(self, OutputPath):
self._OutputPath = OutputPath
@property
def CreateTime(self):
return self._CreateTime
@CreateTime.setter
def CreateTime(self, CreateTime):
self._CreateTime = CreateTime
@property
def State(self):
return self._State
@State.setter
def State(self, State):
self._State = State
@property
def SQLType(self):
return self._SQLType
@SQLType.setter
def SQLType(self, SQLType):
self._SQLType = SQLType
@property
def SQL(self):
return self._SQL
@SQL.setter
def SQL(self, SQL):
self._SQL = SQL
@property
def ResultExpired(self):
return self._ResultExpired
@ResultExpired.setter
def ResultExpired(self, ResultExpired):
self._ResultExpired = ResultExpired
@property
def RowAffectInfo(self):
return self._RowAffectInfo
@RowAffectInfo.setter
def RowAffectInfo(self, RowAffectInfo):
self._RowAffectInfo = RowAffectInfo
@property
def DataSet(self):
return self._DataSet
@DataSet.setter
def DataSet(self, DataSet):
self._DataSet = DataSet
@property
def Error(self):
return self._Error
@Error.setter
def Error(self, Error):
self._Error = Error
@property
def Percentage(self):
return self._Percentage
@Percentage.setter
def Percentage(self, Percentage):
self._Percentage = Percentage
@property
def OutputMessage(self):
return self._OutputMessage
@OutputMessage.setter
def OutputMessage(self, OutputMessage):
self._OutputMessage = OutputMessage
@property
def TaskType(self):
return self._TaskType
@TaskType.setter
def TaskType(self, TaskType):
self._TaskType = TaskType
@property
def ProgressDetail(self):
return self._ProgressDetail
@ProgressDetail.setter
def ProgressDetail(self, ProgressDetail):
self._ProgressDetail = ProgressDetail
@property
def UpdateTime(self):
return self._UpdateTime
@UpdateTime.setter
def UpdateTime(self, UpdateTime):
self._UpdateTime = UpdateTime
@property
def DataEngineId(self):
return self._DataEngineId
@DataEngineId.setter
def DataEngineId(self, DataEngineId):
self._DataEngineId = DataEngineId
@property
def OperateUin(self):
return self._OperateUin
@OperateUin.setter
def OperateUin(self, OperateUin):
self._OperateUin = OperateUin
@property
def DataEngineName(self):
return self._DataEngineName
@DataEngineName.setter
def DataEngineName(self, DataEngineName):
self._DataEngineName = DataEngineName
@property
def InputType(self):
return self._InputType
@InputType.setter
def InputType(self, InputType):
self._InputType = InputType
@property
def InputConf(self):
return self._InputConf
@InputConf.setter
def InputConf(self, InputConf):
self._InputConf = InputConf
@property
def DataNumber(self):
return self._DataNumber
@DataNumber.setter
def DataNumber(self, DataNumber):
self._DataNumber = DataNumber
@property
def CanDownload(self):
return self._CanDownload
@CanDownload.setter
def CanDownload(self, CanDownload):
self._CanDownload = CanDownload
@property
def UserAlias(self):
return self._UserAlias
@UserAlias.setter
def UserAlias(self, UserAlias):
self._UserAlias = UserAlias
@property
def SparkJobName(self):
return self._SparkJobName
@SparkJobName.setter
def SparkJobName(self, SparkJobName):
self._SparkJobName = SparkJobName
@property
def SparkJobId(self):
return self._SparkJobId
@SparkJobId.setter
def SparkJobId(self, SparkJobId):
self._SparkJobId = SparkJobId
@property
def SparkJobFile(self):
return self._SparkJobFile
@SparkJobFile.setter
def SparkJobFile(self, SparkJobFile):
self._SparkJobFile = SparkJobFile
@property
def UiUrl(self):
return self._UiUrl
@UiUrl.setter
def UiUrl(self, UiUrl):
self._UiUrl = UiUrl
@property
def TotalTime(self):
return self._TotalTime
@TotalTime.setter
def TotalTime(self, TotalTime):
self._TotalTime = TotalTime
@property
def CmdArgs(self):
return self._CmdArgs
@CmdArgs.setter
def CmdArgs(self, CmdArgs):
self._CmdArgs = CmdArgs
@property
def ImageVersion(self):
return self._ImageVersion
@ImageVersion.setter
def ImageVersion(self, ImageVersion):
self._ImageVersion = ImageVersion
@property
def DriverSize(self):
return self._DriverSize
@DriverSize.setter
def DriverSize(self, DriverSize):
self._DriverSize = DriverSize
@property
def ExecutorSize(self):
return self._ExecutorSize
@ExecutorSize.setter
def ExecutorSize(self, ExecutorSize):
self._ExecutorSize = ExecutorSize
@property
def ExecutorNums(self):
return self._ExecutorNums
@ExecutorNums.setter
def ExecutorNums(self, ExecutorNums):
self._ExecutorNums = ExecutorNums
@property
def ExecutorMaxNumbers(self):
return self._ExecutorMaxNumbers
@ExecutorMaxNumbers.setter
def ExecutorMaxNumbers(self, ExecutorMaxNumbers):
self._ExecutorMaxNumbers = ExecutorMaxNumbers
@property
def CommonMetrics(self):
return self._CommonMetrics
@CommonMetrics.setter
def CommonMetrics(self, CommonMetrics):
self._CommonMetrics = CommonMetrics
@property
def SparkMonitorMetrics(self):
return self._SparkMonitorMetrics
@SparkMonitorMetrics.setter
def SparkMonitorMetrics(self, SparkMonitorMetrics):
self._SparkMonitorMetrics = SparkMonitorMetrics
@property
def PrestoMonitorMetrics(self):
return self._PrestoMonitorMetrics
@PrestoMonitorMetrics.setter
def PrestoMonitorMetrics(self, PrestoMonitorMetrics):
self._PrestoMonitorMetrics = PrestoMonitorMetrics
def _deserialize(self, params):
self._DatabaseName = params.get("DatabaseName")
self._DataAmount = params.get("DataAmount")
self._Id = params.get("Id")
self._UsedTime = params.get("UsedTime")
self._OutputPath = params.get("OutputPath")
self._CreateTime = params.get("CreateTime")
self._State = params.get("State")
self._SQLType = params.get("SQLType")
self._SQL = params.get("SQL")
self._ResultExpired = params.get("ResultExpired")
self._RowAffectInfo = params.get("RowAffectInfo")
self._DataSet = params.get("DataSet")
self._Error = params.get("Error")
self._Percentage = params.get("Percentage")
self._OutputMessage = params.get("OutputMessage")
self._TaskType = params.get("TaskType")
self._ProgressDetail = params.get("ProgressDetail")
self._UpdateTime = params.get("UpdateTime")
self._DataEngineId = params.get("DataEngineId")
self._OperateUin = params.get("OperateUin")
self._DataEngineName = params.get("DataEngineName")
self._InputType = params.get("InputType")
self._InputConf = params.get("InputConf")
self._DataNumber = params.get("DataNumber")
self._CanDownload = params.get("CanDownload")
self._UserAlias = params.get("UserAlias")
self._SparkJobName = params.get("SparkJobName")
self._SparkJobId = params.get("SparkJobId")
self._SparkJobFile = params.get("SparkJobFile")
self._UiUrl = params.get("UiUrl")
self._TotalTime = params.get("TotalTime")
self._CmdArgs = params.get("CmdArgs")
self._ImageVersion = params.get("ImageVersion")
self._DriverSize = params.get("DriverSize")
self._ExecutorSize = params.get("ExecutorSize")
self._ExecutorNums = params.get("ExecutorNums")
self._ExecutorMaxNumbers = params.get("ExecutorMaxNumbers")
if params.get("CommonMetrics") is not None:
self._CommonMetrics = CommonMetrics()
self._CommonMetrics._deserialize(params.get("CommonMetrics"))
if params.get("SparkMonitorMetrics") is not None:
self._SparkMonitorMetrics = SparkMonitorMetrics()
self._SparkMonitorMetrics._deserialize(params.get("SparkMonitorMetrics"))
if params.get("PrestoMonitorMetrics") is not None:
self._PrestoMonitorMetrics = PrestoMonitorMetrics()
self._PrestoMonitorMetrics._deserialize(params.get("PrestoMonitorMetrics"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class TaskResultInfo(AbstractModel):
"""任务结果信息。
"""
def __init__(self):
r"""
:param _TaskId: 任务唯一ID
:type TaskId: str
:param _DatasourceConnectionName: 数据源名称,当前任务执行时候选中的默认数据源
注意:此字段可能返回 null,表示取不到有效值。
:type DatasourceConnectionName: str
:param _DatabaseName: 数据库名称,当前任务执行时候选中的默认数据库
注意:此字段可能返回 null,表示取不到有效值。
:type DatabaseName: str
:param _SQL: 当前执行的SQL,一个任务包含一个SQL
:type SQL: str
:param _SQLType: 执行任务的类型,现在分为DDL、DML、DQL
:type SQLType: str
:param _State: 任务当前的状态,0:初始化 1:任务运行中 2:任务执行成功 3:数据写入中 4:排队中 -1:任务执行失败 -3:用户手动终止 。只有任务执行成功的情况下,才会返回任务执行的结果
:type State: int
:param _DataAmount: 扫描的数据量,单位byte
:type DataAmount: int
:param _UsedTime: 计算耗时,单位: ms
:type UsedTime: int
:param _OutputPath: 任务结果输出的COS桶地址
:type OutputPath: str
:param _CreateTime: 任务创建时间,时间戳
:type CreateTime: str
:param _OutputMessage: 任务执行信息,成功时返回success,失败时返回失败原因
:type OutputMessage: str
:param _RowAffectInfo: 被影响的行数
:type RowAffectInfo: str
:param _ResultSchema: 结果的schema信息
注意:此字段可能返回 null,表示取不到有效值。
:type ResultSchema: list of Column
:param _ResultSet: 结果信息,反转义后,外层数组的每个元素为一行数据
注意:此字段可能返回 null,表示取不到有效值。
:type ResultSet: str
:param _NextToken: 分页信息,如果没有更多结果数据,nextToken为空
:type NextToken: str
:param _Percentage: 任务执行进度num/100(%)
:type Percentage: int
:param _ProgressDetail: 任务进度明细
:type ProgressDetail: str
:param _DisplayFormat: 控制台展示格式。table:表格展示 text:文本展示
:type DisplayFormat: str
:param _TotalTime: 任务耗时,单位: ms
:type TotalTime: int
"""
self._TaskId = None
self._DatasourceConnectionName = None
self._DatabaseName = None
self._SQL = None
self._SQLType = None
self._State = None
self._DataAmount = None
self._UsedTime = None
self._OutputPath = None
self._CreateTime = None
self._OutputMessage = None
self._RowAffectInfo = None
self._ResultSchema = None
self._ResultSet = None
self._NextToken = None
self._Percentage = None
self._ProgressDetail = None
self._DisplayFormat = None
self._TotalTime = None
@property
def TaskId(self):
return self._TaskId
@TaskId.setter
def TaskId(self, TaskId):
self._TaskId = TaskId
@property
def DatasourceConnectionName(self):
return self._DatasourceConnectionName
@DatasourceConnectionName.setter
def DatasourceConnectionName(self, DatasourceConnectionName):
self._DatasourceConnectionName = DatasourceConnectionName
@property
def DatabaseName(self):
return self._DatabaseName
@DatabaseName.setter
def DatabaseName(self, DatabaseName):
self._DatabaseName = DatabaseName
@property
def SQL(self):
return self._SQL
@SQL.setter
def SQL(self, SQL):
self._SQL = SQL
@property
def SQLType(self):
return self._SQLType
@SQLType.setter
def SQLType(self, SQLType):
self._SQLType = SQLType
@property
def State(self):
return self._State
@State.setter
def State(self, State):
self._State = State
@property
def DataAmount(self):
return self._DataAmount
@DataAmount.setter
def DataAmount(self, DataAmount):
self._DataAmount = DataAmount
@property
def UsedTime(self):
return self._UsedTime
@UsedTime.setter
def UsedTime(self, UsedTime):
self._UsedTime = UsedTime
@property
def OutputPath(self):
return self._OutputPath
@OutputPath.setter
def OutputPath(self, OutputPath):
self._OutputPath = OutputPath
@property
def CreateTime(self):
return self._CreateTime
@CreateTime.setter
def CreateTime(self, CreateTime):
self._CreateTime = CreateTime
@property
def OutputMessage(self):
return self._OutputMessage
@OutputMessage.setter
def OutputMessage(self, OutputMessage):
self._OutputMessage = OutputMessage
@property
def RowAffectInfo(self):
return self._RowAffectInfo
@RowAffectInfo.setter
def RowAffectInfo(self, RowAffectInfo):
self._RowAffectInfo = RowAffectInfo
@property
def ResultSchema(self):
return self._ResultSchema
@ResultSchema.setter
def ResultSchema(self, ResultSchema):
self._ResultSchema = ResultSchema
@property
def ResultSet(self):
return self._ResultSet
@ResultSet.setter
def ResultSet(self, ResultSet):
self._ResultSet = ResultSet
@property
def NextToken(self):
return self._NextToken
@NextToken.setter
def NextToken(self, NextToken):
self._NextToken = NextToken
@property
def Percentage(self):
return self._Percentage
@Percentage.setter
def Percentage(self, Percentage):
self._Percentage = Percentage
@property
def ProgressDetail(self):
return self._ProgressDetail
@ProgressDetail.setter
def ProgressDetail(self, ProgressDetail):
self._ProgressDetail = ProgressDetail
@property
def DisplayFormat(self):
return self._DisplayFormat
@DisplayFormat.setter
def DisplayFormat(self, DisplayFormat):
self._DisplayFormat = DisplayFormat
@property
def TotalTime(self):
return self._TotalTime
@TotalTime.setter
def TotalTime(self, TotalTime):
self._TotalTime = TotalTime
def _deserialize(self, params):
self._TaskId = params.get("TaskId")
self._DatasourceConnectionName = params.get("DatasourceConnectionName")
self._DatabaseName = params.get("DatabaseName")
self._SQL = params.get("SQL")
self._SQLType = params.get("SQLType")
self._State = params.get("State")
self._DataAmount = params.get("DataAmount")
self._UsedTime = params.get("UsedTime")
self._OutputPath = params.get("OutputPath")
self._CreateTime = params.get("CreateTime")
self._OutputMessage = params.get("OutputMessage")
self._RowAffectInfo = params.get("RowAffectInfo")
if params.get("ResultSchema") is not None:
self._ResultSchema = []
for item in params.get("ResultSchema"):
obj = Column()
obj._deserialize(item)
self._ResultSchema.append(obj)
self._ResultSet = params.get("ResultSet")
self._NextToken = params.get("NextToken")
self._Percentage = params.get("Percentage")
self._ProgressDetail = params.get("ProgressDetail")
self._DisplayFormat = params.get("DisplayFormat")
self._TotalTime = params.get("TotalTime")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class TasksInfo(AbstractModel):
"""批量顺序执行任务集合
"""
def __init__(self):
r"""
:param _TaskType: 任务类型,SQLTask:SQL查询任务。SparkSQLTask:Spark SQL查询任务
:type TaskType: str
:param _FailureTolerance: 容错策略。Proceed:前面任务出错/取消后继续执行后面的任务。Terminate:前面的任务出错/取消之后终止后面任务的执行,后面的任务全部标记为已取消。
:type FailureTolerance: str
:param _SQL: base64加密后的SQL语句,用";"号分隔每个SQL语句,一次最多提交50个任务。严格按照前后顺序执行
:type SQL: str
:param _Config: 任务的配置信息,当前仅支持SparkSQLTask任务。
:type Config: list of KVPair
:param _Params: 任务的用户自定义参数信息
:type Params: list of KVPair
"""
self._TaskType = None
self._FailureTolerance = None
self._SQL = None
self._Config = None
self._Params = None
@property
def TaskType(self):
return self._TaskType
@TaskType.setter
def TaskType(self, TaskType):
self._TaskType = TaskType
@property
def FailureTolerance(self):
return self._FailureTolerance
@FailureTolerance.setter
def FailureTolerance(self, FailureTolerance):
self._FailureTolerance = FailureTolerance
@property
def SQL(self):
return self._SQL
@SQL.setter
def SQL(self, SQL):
self._SQL = SQL
@property
def Config(self):
return self._Config
@Config.setter
def Config(self, Config):
self._Config = Config
@property
def Params(self):
return self._Params
@Params.setter
def Params(self, Params):
self._Params = Params
def _deserialize(self, params):
self._TaskType = params.get("TaskType")
self._FailureTolerance = params.get("FailureTolerance")
self._SQL = params.get("SQL")
if params.get("Config") is not None:
self._Config = []
for item in params.get("Config"):
obj = KVPair()
obj._deserialize(item)
self._Config.append(obj)
if params.get("Params") is not None:
self._Params = []
for item in params.get("Params"):
obj = KVPair()
obj._deserialize(item)
self._Params.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class TasksOverview(AbstractModel):
"""任务概览
"""
def __init__(self):
r"""
:param _TaskQueuedCount: 正在排队的任务个数
:type TaskQueuedCount: int
:param _TaskInitCount: 初始化的任务个数
:type TaskInitCount: int
:param _TaskRunningCount: 正在执行的任务个数
:type TaskRunningCount: int
:param _TotalTaskCount: 当前时间范围的总任务个数
:type TotalTaskCount: int
"""
self._TaskQueuedCount = None
self._TaskInitCount = None
self._TaskRunningCount = None
self._TotalTaskCount = None
@property
def TaskQueuedCount(self):
return self._TaskQueuedCount
@TaskQueuedCount.setter
def TaskQueuedCount(self, TaskQueuedCount):
self._TaskQueuedCount = TaskQueuedCount
@property
def TaskInitCount(self):
return self._TaskInitCount
@TaskInitCount.setter
def TaskInitCount(self, TaskInitCount):
self._TaskInitCount = TaskInitCount
@property
def TaskRunningCount(self):
return self._TaskRunningCount
@TaskRunningCount.setter
def TaskRunningCount(self, TaskRunningCount):
self._TaskRunningCount = TaskRunningCount
@property
def TotalTaskCount(self):
return self._TotalTaskCount
@TotalTaskCount.setter
def TotalTaskCount(self, TotalTaskCount):
self._TotalTaskCount = TotalTaskCount
def _deserialize(self, params):
self._TaskQueuedCount = params.get("TaskQueuedCount")
self._TaskInitCount = params.get("TaskInitCount")
self._TaskRunningCount = params.get("TaskRunningCount")
self._TotalTaskCount = params.get("TotalTaskCount")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class TextFile(AbstractModel):
"""文本格式
"""
def __init__(self):
r"""
:param _Format: 文本类型,本参数取值为TextFile。
:type Format: str
:param _Regex: 处理文本用的正则表达式。
注意:此字段可能返回 null,表示取不到有效值。
:type Regex: str
"""
self._Format = None
self._Regex = None
@property
def Format(self):
return self._Format
@Format.setter
def Format(self, Format):
self._Format = Format
@property
def Regex(self):
return self._Regex
@Regex.setter
def Regex(self, Regex):
self._Regex = Regex
def _deserialize(self, params):
self._Format = params.get("Format")
self._Regex = params.get("Regex")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class UnbindWorkGroupsFromUserRequest(AbstractModel):
"""UnbindWorkGroupsFromUser请求参数结构体
"""
def __init__(self):
r"""
:param _AddInfo: 解绑的工作组Id和用户Id的关联关系
:type AddInfo: :class:`tencentcloud.dlc.v20210125.models.WorkGroupIdSetOfUserId`
"""
self._AddInfo = None
@property
def AddInfo(self):
return self._AddInfo
@AddInfo.setter
def AddInfo(self, AddInfo):
self._AddInfo = AddInfo
def _deserialize(self, params):
if params.get("AddInfo") is not None:
self._AddInfo = WorkGroupIdSetOfUserId()
self._AddInfo._deserialize(params.get("AddInfo"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class UnbindWorkGroupsFromUserResponse(AbstractModel):
"""UnbindWorkGroupsFromUser返回参数结构体
"""
def __init__(self):
r"""
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._RequestId = None
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._RequestId = params.get("RequestId")
class UnlockMetaDataRequest(AbstractModel):
"""UnlockMetaData请求参数结构体
"""
def __init__(self):
r"""
:param _LockId: 锁ID
:type LockId: int
:param _DatasourceConnectionName: 数据源名称
:type DatasourceConnectionName: str
"""
self._LockId = None
self._DatasourceConnectionName = None
@property
def LockId(self):
return self._LockId
@LockId.setter
def LockId(self, LockId):
self._LockId = LockId
@property
def DatasourceConnectionName(self):
return self._DatasourceConnectionName
@DatasourceConnectionName.setter
def DatasourceConnectionName(self, DatasourceConnectionName):
self._DatasourceConnectionName = DatasourceConnectionName
def _deserialize(self, params):
self._LockId = params.get("LockId")
self._DatasourceConnectionName = params.get("DatasourceConnectionName")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class UnlockMetaDataResponse(AbstractModel):
"""UnlockMetaData返回参数结构体
"""
def __init__(self):
r"""
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._RequestId = None
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._RequestId = params.get("RequestId")
class UpdateRowFilterRequest(AbstractModel):
"""UpdateRowFilter请求参数结构体
"""
def __init__(self):
r"""
:param _PolicyId: 行过滤策略的id,此值可以通过DescribeUserInfo或者DescribeWorkGroupInfo接口获取
:type PolicyId: int
:param _Policy: 新的过滤策略。
:type Policy: :class:`tencentcloud.dlc.v20210125.models.Policy`
"""
self._PolicyId = None
self._Policy = None
@property
def PolicyId(self):
return self._PolicyId
@PolicyId.setter
def PolicyId(self, PolicyId):
self._PolicyId = PolicyId
@property
def Policy(self):
return self._Policy
@Policy.setter
def Policy(self, Policy):
self._Policy = Policy
def _deserialize(self, params):
self._PolicyId = params.get("PolicyId")
if params.get("Policy") is not None:
self._Policy = Policy()
self._Policy._deserialize(params.get("Policy"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class UpdateRowFilterResponse(AbstractModel):
"""UpdateRowFilter返回参数结构体
"""
def __init__(self):
r"""
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._RequestId = None
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._RequestId = params.get("RequestId")
class UserIdSetOfWorkGroupId(AbstractModel):
"""绑定到同一个工作组的用户Id的集合
"""
def __init__(self):
r"""
:param _WorkGroupId: 工作组Id
:type WorkGroupId: int
:param _UserIds: 用户Id集合,和CAM侧Uin匹配
:type UserIds: list of str
"""
self._WorkGroupId = None
self._UserIds = None
@property
def WorkGroupId(self):
return self._WorkGroupId
@WorkGroupId.setter
def WorkGroupId(self, WorkGroupId):
self._WorkGroupId = WorkGroupId
@property
def UserIds(self):
return self._UserIds
@UserIds.setter
def UserIds(self, UserIds):
self._UserIds = UserIds
def _deserialize(self, params):
self._WorkGroupId = params.get("WorkGroupId")
self._UserIds = params.get("UserIds")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class UserInfo(AbstractModel):
"""授权用户信息
"""
def __init__(self):
r"""
:param _UserId: 用户Id,和子用户uin相同
:type UserId: str
:param _UserDescription: 用户描述信息,方便区分不同用户
注意:此字段可能返回 null,表示取不到有效值。
:type UserDescription: str
:param _PolicySet: 单独给用户绑定的权限集合
注意:此字段可能返回 null,表示取不到有效值。
:type PolicySet: list of Policy
:param _Creator: 当前用户的创建者
:type Creator: str
:param _CreateTime: 创建时间,格式如2021-07-28 16:19:32
:type CreateTime: str
:param _WorkGroupSet: 关联的工作组集合
注意:此字段可能返回 null,表示取不到有效值。
:type WorkGroupSet: list of WorkGroupMessage
:param _IsOwner: 是否是主账号
注意:此字段可能返回 null,表示取不到有效值。
:type IsOwner: bool
:param _UserType: 用户类型。ADMIN:管理员 COMMON:普通用户。
注意:此字段可能返回 null,表示取不到有效值。
:type UserType: str
:param _UserAlias: 用户别名
注意:此字段可能返回 null,表示取不到有效值。
:type UserAlias: str
"""
self._UserId = None
self._UserDescription = None
self._PolicySet = None
self._Creator = None
self._CreateTime = None
self._WorkGroupSet = None
self._IsOwner = None
self._UserType = None
self._UserAlias = None
@property
def UserId(self):
return self._UserId
@UserId.setter
def UserId(self, UserId):
self._UserId = UserId
@property
def UserDescription(self):
return self._UserDescription
@UserDescription.setter
def UserDescription(self, UserDescription):
self._UserDescription = UserDescription
@property
def PolicySet(self):
return self._PolicySet
@PolicySet.setter
def PolicySet(self, PolicySet):
self._PolicySet = PolicySet
@property
def Creator(self):
return self._Creator
@Creator.setter
def Creator(self, Creator):
self._Creator = Creator
@property
def CreateTime(self):
return self._CreateTime
@CreateTime.setter
def CreateTime(self, CreateTime):
self._CreateTime = CreateTime
@property
def WorkGroupSet(self):
return self._WorkGroupSet
@WorkGroupSet.setter
def WorkGroupSet(self, WorkGroupSet):
self._WorkGroupSet = WorkGroupSet
@property
def IsOwner(self):
return self._IsOwner
@IsOwner.setter
def IsOwner(self, IsOwner):
self._IsOwner = IsOwner
@property
def UserType(self):
return self._UserType
@UserType.setter
def UserType(self, UserType):
self._UserType = UserType
@property
def UserAlias(self):
return self._UserAlias
@UserAlias.setter
def UserAlias(self, UserAlias):
self._UserAlias = UserAlias
def _deserialize(self, params):
self._UserId = params.get("UserId")
self._UserDescription = params.get("UserDescription")
if params.get("PolicySet") is not None:
self._PolicySet = []
for item in params.get("PolicySet"):
obj = Policy()
obj._deserialize(item)
self._PolicySet.append(obj)
self._Creator = params.get("Creator")
self._CreateTime = params.get("CreateTime")
if params.get("WorkGroupSet") is not None:
self._WorkGroupSet = []
for item in params.get("WorkGroupSet"):
obj = WorkGroupMessage()
obj._deserialize(item)
self._WorkGroupSet.append(obj)
self._IsOwner = params.get("IsOwner")
self._UserType = params.get("UserType")
self._UserAlias = params.get("UserAlias")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class UserMessage(AbstractModel):
"""用户部分信息
"""
def __init__(self):
r"""
:param _UserId: 用户Id,和CAM侧子用户Uin匹配
:type UserId: str
:param _UserDescription: 用户描述
注意:此字段可能返回 null,表示取不到有效值。
:type UserDescription: str
:param _Creator: 当前用户的创建者
:type Creator: str
:param _CreateTime: 当前用户的创建时间,形如2021-07-28 16:19:32
:type CreateTime: str
:param _UserAlias: 用户别名
:type UserAlias: str
"""
self._UserId = None
self._UserDescription = None
self._Creator = None
self._CreateTime = None
self._UserAlias = None
@property
def UserId(self):
return self._UserId
@UserId.setter
def UserId(self, UserId):
self._UserId = UserId
@property
def UserDescription(self):
return self._UserDescription
@UserDescription.setter
def UserDescription(self, UserDescription):
self._UserDescription = UserDescription
@property
def Creator(self):
return self._Creator
@Creator.setter
def Creator(self, Creator):
self._Creator = Creator
@property
def CreateTime(self):
return self._CreateTime
@CreateTime.setter
def CreateTime(self, CreateTime):
self._CreateTime = CreateTime
@property
def UserAlias(self):
return self._UserAlias
@UserAlias.setter
def UserAlias(self, UserAlias):
self._UserAlias = UserAlias
def _deserialize(self, params):
self._UserId = params.get("UserId")
self._UserDescription = params.get("UserDescription")
self._Creator = params.get("Creator")
self._CreateTime = params.get("CreateTime")
self._UserAlias = params.get("UserAlias")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class UserRole(AbstractModel):
"""用户角色
"""
def __init__(self):
r"""
:param _RoleId: 角色ID
:type RoleId: int
:param _AppId: 用户app ID
:type AppId: str
:param _Uin: 用户ID
:type Uin: str
:param _Arn: 角色权限
:type Arn: str
:param _ModifyTime: 最近修改时间戳
:type ModifyTime: int
:param _Desc: 角色描述信息
:type Desc: str
:param _RoleName: 角色名称
注意:此字段可能返回 null,表示取不到有效值。
:type RoleName: str
:param _Creator: 创建者UIN
注意:此字段可能返回 null,表示取不到有效值。
:type Creator: str
:param _CosPermissionList: cos授权路径列表
注意:此字段可能返回 null,表示取不到有效值。
:type CosPermissionList: list of CosPermission
:param _PermissionJson: cam策略json
注意:此字段可能返回 null,表示取不到有效值。
:type PermissionJson: str
"""
self._RoleId = None
self._AppId = None
self._Uin = None
self._Arn = None
self._ModifyTime = None
self._Desc = None
self._RoleName = None
self._Creator = None
self._CosPermissionList = None
self._PermissionJson = None
@property
def RoleId(self):
return self._RoleId
@RoleId.setter
def RoleId(self, RoleId):
self._RoleId = RoleId
@property
def AppId(self):
return self._AppId
@AppId.setter
def AppId(self, AppId):
self._AppId = AppId
@property
def Uin(self):
return self._Uin
@Uin.setter
def Uin(self, Uin):
self._Uin = Uin
@property
def Arn(self):
return self._Arn
@Arn.setter
def Arn(self, Arn):
self._Arn = Arn
@property
def ModifyTime(self):
return self._ModifyTime
@ModifyTime.setter
def ModifyTime(self, ModifyTime):
self._ModifyTime = ModifyTime
@property
def Desc(self):
return self._Desc
@Desc.setter
def Desc(self, Desc):
self._Desc = Desc
@property
def RoleName(self):
return self._RoleName
@RoleName.setter
def RoleName(self, RoleName):
self._RoleName = RoleName
@property
def Creator(self):
return self._Creator
@Creator.setter
def Creator(self, Creator):
self._Creator = Creator
@property
def CosPermissionList(self):
return self._CosPermissionList
@CosPermissionList.setter
def CosPermissionList(self, CosPermissionList):
self._CosPermissionList = CosPermissionList
@property
def PermissionJson(self):
return self._PermissionJson
@PermissionJson.setter
def PermissionJson(self, PermissionJson):
self._PermissionJson = PermissionJson
def _deserialize(self, params):
self._RoleId = params.get("RoleId")
self._AppId = params.get("AppId")
self._Uin = params.get("Uin")
self._Arn = params.get("Arn")
self._ModifyTime = params.get("ModifyTime")
self._Desc = params.get("Desc")
self._RoleName = params.get("RoleName")
self._Creator = params.get("Creator")
if params.get("CosPermissionList") is not None:
self._CosPermissionList = []
for item in params.get("CosPermissionList"):
obj = CosPermission()
obj._deserialize(item)
self._CosPermissionList.append(obj)
self._PermissionJson = params.get("PermissionJson")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ViewBaseInfo(AbstractModel):
"""视图基本配置信息
"""
def __init__(self):
r"""
:param _DatabaseName: 该视图所属数据库名字
:type DatabaseName: str
:param _ViewName: 视图名称
:type ViewName: str
:param _UserAlias: 视图创建人昵称
:type UserAlias: str
:param _UserSubUin: 视图创建人ID
:type UserSubUin: str
"""
self._DatabaseName = None
self._ViewName = None
self._UserAlias = None
self._UserSubUin = None
@property
def DatabaseName(self):
return self._DatabaseName
@DatabaseName.setter
def DatabaseName(self, DatabaseName):
self._DatabaseName = DatabaseName
@property
def ViewName(self):
return self._ViewName
@ViewName.setter
def ViewName(self, ViewName):
self._ViewName = ViewName
@property
def UserAlias(self):
return self._UserAlias
@UserAlias.setter
def UserAlias(self, UserAlias):
self._UserAlias = UserAlias
@property
def UserSubUin(self):
return self._UserSubUin
@UserSubUin.setter
def UserSubUin(self, UserSubUin):
self._UserSubUin = UserSubUin
def _deserialize(self, params):
self._DatabaseName = params.get("DatabaseName")
self._ViewName = params.get("ViewName")
self._UserAlias = params.get("UserAlias")
self._UserSubUin = params.get("UserSubUin")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ViewResponseInfo(AbstractModel):
"""查询视图信息对象
"""
def __init__(self):
r"""
:param _ViewBaseInfo: 视图基本信息。
:type ViewBaseInfo: :class:`tencentcloud.dlc.v20210125.models.ViewBaseInfo`
:param _Columns: 视图列信息。
注意:此字段可能返回 null,表示取不到有效值。
:type Columns: list of Column
:param _Properties: 视图属性信息。
注意:此字段可能返回 null,表示取不到有效值。
:type Properties: list of Property
:param _CreateTime: 视图创建时间。
:type CreateTime: str
:param _ModifiedTime: 视图更新时间。
:type ModifiedTime: str
"""
self._ViewBaseInfo = None
self._Columns = None
self._Properties = None
self._CreateTime = None
self._ModifiedTime = None
@property
def ViewBaseInfo(self):
return self._ViewBaseInfo
@ViewBaseInfo.setter
def ViewBaseInfo(self, ViewBaseInfo):
self._ViewBaseInfo = ViewBaseInfo
@property
def Columns(self):
return self._Columns
@Columns.setter
def Columns(self, Columns):
self._Columns = Columns
@property
def Properties(self):
return self._Properties
@Properties.setter
def Properties(self, Properties):
self._Properties = Properties
@property
def CreateTime(self):
return self._CreateTime
@CreateTime.setter
def CreateTime(self, CreateTime):
self._CreateTime = CreateTime
@property
def ModifiedTime(self):
return self._ModifiedTime
@ModifiedTime.setter
def ModifiedTime(self, ModifiedTime):
self._ModifiedTime = ModifiedTime
def _deserialize(self, params):
if params.get("ViewBaseInfo") is not None:
self._ViewBaseInfo = ViewBaseInfo()
self._ViewBaseInfo._deserialize(params.get("ViewBaseInfo"))
if params.get("Columns") is not None:
self._Columns = []
for item in params.get("Columns"):
obj = Column()
obj._deserialize(item)
self._Columns.append(obj)
if params.get("Properties") is not None:
self._Properties = []
for item in params.get("Properties"):
obj = Property()
obj._deserialize(item)
self._Properties.append(obj)
self._CreateTime = params.get("CreateTime")
self._ModifiedTime = params.get("ModifiedTime")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class WorkGroupIdSetOfUserId(AbstractModel):
"""同一个用户绑定的工作组集合
"""
def __init__(self):
r"""
:param _UserId: 用户Id,和CAM侧Uin匹配
:type UserId: str
:param _WorkGroupIds: 工作组Id集合
:type WorkGroupIds: list of int
"""
self._UserId = None
self._WorkGroupIds = None
@property
def UserId(self):
return self._UserId
@UserId.setter
def UserId(self, UserId):
self._UserId = UserId
@property
def WorkGroupIds(self):
return self._WorkGroupIds
@WorkGroupIds.setter
def WorkGroupIds(self, WorkGroupIds):
self._WorkGroupIds = WorkGroupIds
def _deserialize(self, params):
self._UserId = params.get("UserId")
self._WorkGroupIds = params.get("WorkGroupIds")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class WorkGroupInfo(AbstractModel):
"""工作组信息
"""
def __init__(self):
r"""
:param _WorkGroupId: 查询到的工作组唯一Id
:type WorkGroupId: int
:param _WorkGroupName: 工作组名称
:type WorkGroupName: str
:param _WorkGroupDescription: 工作组描述
注意:此字段可能返回 null,表示取不到有效值。
:type WorkGroupDescription: str
:param _UserNum: 工作组关联的用户数量
:type UserNum: int
:param _UserSet: 工作组关联的用户集合
注意:此字段可能返回 null,表示取不到有效值。
:type UserSet: list of UserMessage
:param _PolicySet: 工作组绑定的权限集合
注意:此字段可能返回 null,表示取不到有效值。
:type PolicySet: list of Policy
:param _Creator: 工作组的创建人
:type Creator: str
:param _CreateTime: 工作组的创建时间,形如2021-07-28 16:19:32
:type CreateTime: str
"""
self._WorkGroupId = None
self._WorkGroupName = None
self._WorkGroupDescription = None
self._UserNum = None
self._UserSet = None
self._PolicySet = None
self._Creator = None
self._CreateTime = None
@property
def WorkGroupId(self):
return self._WorkGroupId
@WorkGroupId.setter
def WorkGroupId(self, WorkGroupId):
self._WorkGroupId = WorkGroupId
@property
def WorkGroupName(self):
return self._WorkGroupName
@WorkGroupName.setter
def WorkGroupName(self, WorkGroupName):
self._WorkGroupName = WorkGroupName
@property
def WorkGroupDescription(self):
return self._WorkGroupDescription
@WorkGroupDescription.setter
def WorkGroupDescription(self, WorkGroupDescription):
self._WorkGroupDescription = WorkGroupDescription
@property
def UserNum(self):
return self._UserNum
@UserNum.setter
def UserNum(self, UserNum):
self._UserNum = UserNum
@property
def UserSet(self):
return self._UserSet
@UserSet.setter
def UserSet(self, UserSet):
self._UserSet = UserSet
@property
def PolicySet(self):
return self._PolicySet
@PolicySet.setter
def PolicySet(self, PolicySet):
self._PolicySet = PolicySet
@property
def Creator(self):
return self._Creator
@Creator.setter
def Creator(self, Creator):
self._Creator = Creator
@property
def CreateTime(self):
return self._CreateTime
@CreateTime.setter
def CreateTime(self, CreateTime):
self._CreateTime = CreateTime
def _deserialize(self, params):
self._WorkGroupId = params.get("WorkGroupId")
self._WorkGroupName = params.get("WorkGroupName")
self._WorkGroupDescription = params.get("WorkGroupDescription")
self._UserNum = params.get("UserNum")
if params.get("UserSet") is not None:
self._UserSet = []
for item in params.get("UserSet"):
obj = UserMessage()
obj._deserialize(item)
self._UserSet.append(obj)
if params.get("PolicySet") is not None:
self._PolicySet = []
for item in params.get("PolicySet"):
obj = Policy()
obj._deserialize(item)
self._PolicySet.append(obj)
self._Creator = params.get("Creator")
self._CreateTime = params.get("CreateTime")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class WorkGroupMessage(AbstractModel):
"""工作组部分信息
"""
def __init__(self):
r"""
:param _WorkGroupId: 工作组唯一Id
:type WorkGroupId: int
:param _WorkGroupName: 工作组名称
:type WorkGroupName: str
:param _WorkGroupDescription: 工作组描述
注意:此字段可能返回 null,表示取不到有效值。
:type WorkGroupDescription: str
:param _Creator: 创建者
:type Creator: str
:param _CreateTime: 工作组创建的时间,形如2021-07-28 16:19:32
:type CreateTime: str
"""
self._WorkGroupId = None
self._WorkGroupName = None
self._WorkGroupDescription = None
self._Creator = None
self._CreateTime = None
@property
def WorkGroupId(self):
return self._WorkGroupId
@WorkGroupId.setter
def WorkGroupId(self, WorkGroupId):
self._WorkGroupId = WorkGroupId
@property
def WorkGroupName(self):
return self._WorkGroupName
@WorkGroupName.setter
def WorkGroupName(self, WorkGroupName):
self._WorkGroupName = WorkGroupName
@property
def WorkGroupDescription(self):
return self._WorkGroupDescription
@WorkGroupDescription.setter
def WorkGroupDescription(self, WorkGroupDescription):
self._WorkGroupDescription = WorkGroupDescription
@property
def Creator(self):
return self._Creator
@Creator.setter
def Creator(self, Creator):
self._Creator = Creator
@property
def CreateTime(self):
return self._CreateTime
@CreateTime.setter
def CreateTime(self, CreateTime):
self._CreateTime = CreateTime
def _deserialize(self, params):
self._WorkGroupId = params.get("WorkGroupId")
self._WorkGroupName = params.get("WorkGroupName")
self._WorkGroupDescription = params.get("WorkGroupDescription")
self._Creator = params.get("Creator")
self._CreateTime = params.get("CreateTime")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
|
283345ee21e4e92f068247b6cf9e909fa5053b21
|
e65a4dbfbfb0e54e59787ba7741efee12f7687f3
|
/devel/plasma/files/patch-setup.py
|
0c59c85a3327c7fbd9bad30622e9a6cc0d4bc3ce
|
[
"BSD-2-Clause"
] |
permissive
|
freebsd/freebsd-ports
|
86f2e89d43913412c4f6b2be3e255bc0945eac12
|
605a2983f245ac63f5420e023e7dce56898ad801
|
refs/heads/main
| 2023-08-30T21:46:28.720924
| 2023-08-30T19:33:44
| 2023-08-30T19:33:44
| 1,803,961
| 916
| 918
|
NOASSERTION
| 2023-09-08T04:06:26
| 2011-05-26T11:15:35
| null |
UTF-8
|
Python
| false
| false
| 1,518
|
py
|
patch-setup.py
|
--- setup.py.orig 2018-04-15 11:48:52 UTC
+++ setup.py
@@ -6,30 +6,33 @@ except ImportError:
from distutils.core import setup
-from pip.req import parse_requirements
+#from pip.req import parse_requirements
from distutils.core import Extension
import plasma
-requirements = parse_requirements('requirements.txt', session=False)
+#requirements = parse_requirements('requirements.txt', session=False)
requires = []
-for item in requirements:
+#for item in requirements:
# we want to handle package names and also repo urls
- if getattr(item, 'url', None): # older pip has url
- links.append(str(item.url))
- if getattr(item, 'link', None): # newer pip has link
- links.append(str(item.link))
- if item.req:
- requires.append(str(item.req))
+# if getattr(item, 'url', None): # older pip has url
+# links.append(str(item.url))
+# if getattr(item, 'link', None): # newer pip has link
+# links.append(str(item.link))
+# if item.req:
+# requires.append(str(item.req))
x86_analyzer = Extension('plasma.lib.arch.x86.analyzer',
+ include_dirs = ['%%PREFIX%%/include'],
sources = ['plasma/lib/arch/x86/analyzer.c'])
mips_analyzer = Extension('plasma.lib.arch.mips.analyzer',
+ include_dirs = ['%%PREFIX%%/include'],
sources = ['plasma/lib/arch/mips/analyzer.c'])
arm_analyzer = Extension('plasma.lib.arch.arm.analyzer',
+ include_dirs = ['%%PREFIX%%/include'],
sources = ['plasma/lib/arch/arm/analyzer.c'])
|
277d9b643ff68933de579e9e57fd31fd0539709a
|
48fe63dd60fb7641ee86c0a5c4a60df6ae7a577b
|
/scripts/map_datasets.py
|
f4ebccdd62deceb91f8c4ac1495f3de29e53d584
|
[
"MIT"
] |
permissive
|
facebookresearch/KILT
|
5a587a11b28845586cb334281b91c2e8ee91dd88
|
2664322b9a994be686e4c3a9e8f75a0b70927f22
|
refs/heads/main
| 2023-08-21T13:04:02.003196
| 2022-03-31T11:01:39
| 2022-03-31T11:01:39
| 271,075,510
| 851
| 90
|
MIT
| 2022-03-31T11:01:40
| 2020-06-09T18:07:11
|
Python
|
UTF-8
|
Python
| false
| false
| 726
|
py
|
map_datasets.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from kilt import dataset_mapper
from kilt.datasets import (
base_dataset,
entity_linking,
fact_verification,
natural_questions,
zero_shot_re,
hotpotqa,
wizard,
)
if __name__ == "__main__":
datasets = []
# NQ dev set
datasets.append(
natural_questions.NaturalQuestionsDataset.from_config_file(
"dev_natural_questions", "kilt/configs/mapping/dev_natural_questions.json"
)
)
for dataset in datasets:
dataset_mapper.map_dataset(dataset=dataset)
|
39e363f31de203d05263e7421bc0410a4e713225
|
b3950a2a6912c9b494d22b9353322c3357df0110
|
/tock/api/tests.py
|
4b4209d4e13d1f9b4e40b4aeeefc3351d7ea15c6
|
[
"CC0-1.0",
"LicenseRef-scancode-public-domain",
"MIT"
] |
permissive
|
18F/tock
|
df1fa5e817e690ce0bff315a15799e2f78915882
|
99005d8f6c4605a69fbb620c41f38447ecbee459
|
refs/heads/main
| 2023-08-31T01:34:55.299577
| 2023-08-23T18:49:10
| 2023-08-23T18:49:10
| 30,162,008
| 135
| 50
|
NOASSERTION
| 2023-09-07T18:40:30
| 2015-02-01T22:19:32
|
Python
|
UTF-8
|
Python
| false
| false
| 19,946
|
py
|
tests.py
|
# -*- coding: utf-8 -*-
import datetime
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse
from django_webtest import WebTest
from api.views import get_timecardobjects, get_user_timecard_count, TimecardList
from employees.models import EmployeeGrade, UserData
from hours.models import Timecard
from hours.factories import (
UserFactory, ReportingPeriodFactory, TimecardFactory, TimecardObjectFactory,
)
from projects.factories import AccountingCodeFactory, ProjectFactory
from rest_framework.authtoken.models import Token
from rest_framework.test import APIClient
User = get_user_model()
# common client for all API tests
def client():
request_user = User.objects.get_or_create(username='aaron.snow')[0]
token = Token.objects.get_or_create(user=request_user)[0].key
client = APIClient()
client.credentials(HTTP_AUTHORIZATION='Token ' + token)
return client
# common fixtures for all API tests
FIXTURES = [
'tock/fixtures/prod_user.json',
'projects/fixtures/projects.json',
'hours/fixtures/timecards.json'
]
class ProjectsAPITests(TestCase):
fixtures = FIXTURES
def test_projects_json(self):
pass
class ProjectInstanceAPITests(WebTest):
fixtures = FIXTURES
def test_projects_json(self):
res = client().get(reverse('ProjectInstanceView', kwargs={'pk': '29'})).data
self.assertTrue('name' in res)
self.assertTrue('start_date' in res)
self.assertTrue('end_date' in res)
self.assertEqual(res['name'], "Consulting - Agile BPA")
self.assertEqual(res['start_date'], "2016-01-01")
self.assertEqual(res['end_date'], None)
class SubmissionsAPITests(WebTest):
fixtures = FIXTURES
def test_submissions_json_counts_punctual_timecard(self):
res = client().get(reverse('Submissions', kwargs={'num_past_reporting_periods': 2})).data
self.assertEqual(len(res), 1)
self.assertEqual(res[0]["on_time_submissions"], "1")
def test_submissions_json_no_late_timecards(self):
res = client().get(reverse('Submissions', kwargs={'num_past_reporting_periods': 1})).data
self.assertEqual(len(res), 0)
def test_submissions_json_too_many_periods(self):
res = client().get(reverse('Submissions', kwargs={'num_past_reporting_periods': 100})).data
self.assertEqual(len(res), 1)
def test_user_timecard_count(self):
""" Check with unfiltered query """
all_timecards = get_user_timecard_count(Timecard.objects.all())
self.assertEqual(all_timecards.first().tcount, 3)
class UsersAPITests(TestCase):
fixtures = FIXTURES
def test_users_json(self):
pass
def test_users_csv(self):
pass
class TimecardsAPITests(WebTest):
fixtures = FIXTURES
def test_timecards_json(self):
""" Check that the timecards are rendered in json format correctly """
res = client().get(reverse('TimecardList')).data
self.assertEqual(len(res), 2)
def test_timecards_grade_is_null_when_absent(self):
res = client().get(
reverse('TimecardList'),
kwargs={'date': '2016-06-01'}).data
self.assertEqual(res[1]['grade'], None)
def test_timecards_grade_is_populated_when_present(self):
res = client().get(
reverse('TimecardList'),
kwargs={'date': '2015-06-01'}).data
self.assertEqual(res[0]['grade'], 4)
# TODO: test with more diverse data
def test_get_timecardobjects(self):
""" Check that get time cards returns the correct queryset """
# Check with no params
queryset = get_timecardobjects(TimecardList.queryset)
self.assertEqual(len(queryset), 2)
# Check with after param
queryset = get_timecardobjects(TimecardList.queryset,
params={'after': '2020-12-31'})
self.assertEqual(len(queryset), 0)
# Check with date param
queryset = get_timecardobjects(TimecardList.queryset,
params={'date': '2000-01-01'})
self.assertEqual(len(queryset), 0)
queryset = get_timecardobjects(TimecardList.queryset,
params={'date': '2015-06-08'})
self.assertEqual(len(queryset), 1)
# Check with user param
queryset = get_timecardobjects(TimecardList.queryset,
params={'user': '1'})
self.assertEqual(len(queryset), 2)
queryset = get_timecardobjects(TimecardList.queryset,
params={'user': 'aaron.snow'})
self.assertEqual(len(queryset), 2)
queryset = get_timecardobjects(TimecardList.queryset,
params={'user': '22'})
self.assertEqual(len(queryset), 0)
# Check with project param
queryset = get_timecardobjects(TimecardList.queryset,
params={'project': '1'})
self.assertEqual(len(queryset), 2)
queryset = get_timecardobjects(TimecardList.queryset,
params={'project': 'Out Of Office'})
self.assertEqual(len(queryset), 2)
queryset = get_timecardobjects(TimecardList.queryset,
params={'project': '22'})
self.assertEqual(len(queryset), 0)
# Check with before param
queryset = get_timecardobjects(TimecardList.queryset,
params={'before': '2015-06-01'})
self.assertEqual(len(queryset), 1)
queryset = get_timecardobjects(TimecardList.queryset,
params={'before': '2015-05-31'})
self.assertEqual(len(queryset), 0)
# Check with a range using before and after param
queryset = get_timecardobjects(TimecardList.queryset,
params={'after': '2015-06-01', 'before': '2016-05-31'})
self.assertEqual(len(queryset), 1)
queryset = get_timecardobjects(TimecardList.queryset,
params={'after': '2015-06-01', 'before': '2016-06-01'})
self.assertEqual(len(queryset), 1)
def test_get_unsubmitted_timecards(self):
""" Check that get time cards returns the correct queryset """
queryset = get_timecardobjects(
TimecardList.queryset,
params={'submitted': 'no'}
)
self.assertEqual(len(queryset), 1)
queryset = get_timecardobjects(
TimecardList.queryset,
params={'submitted': 'yes'}
)
self.assertEqual(len(queryset), 2)
queryset = get_timecardobjects(
TimecardList.queryset,
params={'submitted': 'foo'}
)
self.assertEqual(len(queryset), 2)
"""
Adding data to the timecards.json fixture results in failing tests since many tests
assert on the length of a list returned. You can add tests here by creating mock data
inside of setUp() and not worry about breaking existing tests that rely on the timecard
fixture
"""
class FixturelessTimecardsAPITests(WebTest):
def setUp(self):
super(FixturelessTimecardsAPITests, self).setUp()
self.user = UserFactory()
self.userdata = UserData.objects.create(user=self.user)
self.billable_code = AccountingCodeFactory(billable=True)
self.weekly_billed_project = ProjectFactory(accounting_code=self.billable_code,is_weekly_bill=True)
self.period1 = ReportingPeriodFactory(start_date=datetime.datetime(2021, 9, 1))
self.period2 = ReportingPeriodFactory(start_date=datetime.datetime(2021, 9, 8))
self.period3 = ReportingPeriodFactory(start_date=datetime.datetime(2021, 9, 14))
self.period4 = ReportingPeriodFactory(start_date=datetime.datetime(2021, 9, 21))
self.period5 = ReportingPeriodFactory(start_date=datetime.datetime(2021, 9, 29))
self.full_allocation_timecard = TimecardFactory(user=self.user, reporting_period=self.period1)
self.three_quarter_allocation_timecard = TimecardFactory(user=self.user, reporting_period=self.period2)
self.half_allocation_timecard = TimecardFactory(user=self.user, reporting_period=self.period3)
self.one_quarter_allocation_timecard = TimecardFactory(user=self.user, reporting_period=self.period4)
self.one_eighth_allocation_timecard = TimecardFactory(user=self.user, reporting_period=self.period5)
self.full_allocation_timecard_objects = [
TimecardObjectFactory(
timecard=self.full_allocation_timecard,
project=self.weekly_billed_project,
hours_spent=0,
project_allocation=1.000
)
]
self.three_quarter_allocation_timecard_objects = [
TimecardObjectFactory(
timecard=self.three_quarter_allocation_timecard,
project=self.weekly_billed_project,
hours_spent=0,
project_allocation=0.750
)
]
self.half_allocation_timecard_objects = [
TimecardObjectFactory(
timecard=self.half_allocation_timecard,
project=self.weekly_billed_project,
hours_spent=0,
project_allocation=0.500
)
]
self.one_quarter_allocation_timecard_objects = [
TimecardObjectFactory(
timecard=self.one_quarter_allocation_timecard,
project=self.weekly_billed_project,
hours_spent=0,
project_allocation=0.250
)
]
self.one_eighth_allocation_timecard_objects = [
TimecardObjectFactory(
timecard=self.one_eighth_allocation_timecard,
project=self.weekly_billed_project,
hours_spent=0,
project_allocation=0.125
)
]
def test_project_allocation_scale_precision(self):
"""
project_allocation allows a scale of 6 digits and a precision of 3 digits
Test to make sure that the API, which relies on TimecardSerializer, follows this convention
"""
all_timecards = client().get(
reverse('TimecardList'),
kwargs={'date': '2021-09-01'}).data
full_allocation_timecard = all_timecards[0]
three_quarter_allocation_timecard = all_timecards[1]
half_allocation_timecard = all_timecards[2]
one_quarter_allocation_timecard = all_timecards[3]
one_eighth_allocation_timecard = all_timecards[4]
self.assertEqual(full_allocation_timecard['project_allocation'], "1.000")
self.assertEqual(three_quarter_allocation_timecard['project_allocation'], "0.750")
self.assertEqual(half_allocation_timecard['project_allocation'], "0.500")
self.assertEqual(one_quarter_allocation_timecard['project_allocation'], "0.250")
self.assertEqual(one_eighth_allocation_timecard['project_allocation'], "0.125")
class TestAggregates(WebTest):
def setUp(self):
super(TestAggregates, self).setUp()
self.user = UserFactory()
self.userdata = UserData.objects.create(user=self.user)
self.billable_code = AccountingCodeFactory(billable=True)
self.nonbillable_code = AccountingCodeFactory(billable=False)
self.billable_project = ProjectFactory(accounting_code=self.billable_code)
self.nonbillable_project = ProjectFactory(accounting_code=self.nonbillable_code)
self.period = ReportingPeriodFactory(start_date=datetime.datetime(2015, 11, 1))
self.timecard = TimecardFactory(user=self.user, reporting_period=self.period)
self.grade = EmployeeGrade.objects.create(employee=self.user, grade=15, g_start_date=datetime.datetime(2016, 1, 1))
self.timecard_objects = [
TimecardObjectFactory(
timecard=self.timecard,
project=self.billable_project,
hours_spent=15,
grade=self.grade,
),
TimecardObjectFactory(
timecard=self.timecard,
project=self.nonbillable_project,
hours_spent=5,
grade=self.grade
),
]
def test_hours_by_quarter(self):
response = client().get(reverse('HoursByQuarter')).data
self.assertEqual(len(response), 1)
row = response[0]
self.assertEqual(row['billable'], 15)
self.assertEqual(row['nonbillable'], 5)
self.assertEqual(row['total'], 20)
self.assertEqual(row['year'], 2016)
self.assertEqual(row['quarter'], 1)
def test_hours_by_quarter_with_unsubmitted_timecards(self):
""" Check that unsubmitted timecards are not counted """
timecard_unsubmit = TimecardFactory(
user=self.user,
reporting_period=ReportingPeriodFactory(
start_date=datetime.datetime(2015, 11, 2)
),
submitted=False
)
self.timecard_objects.append([
TimecardObjectFactory(
timecard=timecard_unsubmit,
project=self.billable_project,
hours_spent=10,
),
])
response = client().get(reverse('HoursByQuarter')).data
self.assertEqual(len(self.timecard_objects), 3)
self.assertEqual(response[0]['total'], 20)
def test_hours_by_quarter_by_user(self):
response = client().get(reverse('HoursByQuarterByUser')).data
self.assertEqual(len(response), 1)
row = response[0]
self.assertEqual(row['username'], str(self.user))
self.assertEqual(row['billable'], 15)
self.assertEqual(row['nonbillable'], 5)
self.assertEqual(row['total'], 20)
self.assertEqual(row['year'], 2016)
self.assertEqual(row['quarter'], 1)
def test_hours_by_quarter_by_user_with_unsubmitted_timecards(self):
""" Check that unsubmitted timecards are not counted """
# add one unsubmitted timecard + one additional submitted one
timecard_unsubmit = TimecardFactory(
user=self.user,
reporting_period=ReportingPeriodFactory(
start_date=datetime.datetime(2015, 11, 2)
),
submitted=False
)
self.timecard_objects.append([
TimecardObjectFactory(
timecard=timecard_unsubmit,
project=self.billable_project,
hours_spent=10,
),
])
timecard_submit = TimecardFactory(
user=self.user,
reporting_period=ReportingPeriodFactory(
start_date=datetime.datetime(2015, 11, 3)
),
submitted=True
)
self.timecard_objects.append([
TimecardObjectFactory(
timecard=timecard_submit,
project=self.billable_project,
hours_spent=40,
),
])
response = client().get(reverse('HoursByQuarterByUser')).data
row = response[0]
self.assertEqual(len(self.timecard_objects), 4)
self.assertEqual(row['total'], 60)
class ReportingPeriodList(WebTest):
fixtures = FIXTURES
def test_ReportingPeriodList_json(self):
""" Check that the reporting periods are listed """
res = client().get(reverse('ReportingPeriodList')).json()
self.assertGreater(len(res), 0)
def test_ReportingPeriodList_json_empty(self):
""" Check that the ReportingPeriodList is empty when all users
have filled out thier time cards"""
reporting_periods = client().get(reverse('ReportingPeriodList')).data
start_date = reporting_periods[0]['start_date']
res = client().get(reverse(
'ReportingPeriodAudit',
kwargs={'reporting_period_start_date': start_date}
)
).data
self.assertEqual(len(res), 0)
def test_ReportingPeriodList_json_missing_timesheet(self):
""" Check that the ReportingPeriodList shows users that have missing
time cards """
# Create a user
self.regular_user = User.objects.create(username='new.user')
userdata = UserData(user=self.regular_user)
userdata.save()
reporting_periods = client().get(reverse('ReportingPeriodList')).data
start_date = reporting_periods[0]['start_date']
res = client().get(reverse(
'ReportingPeriodAudit',
kwargs={'reporting_period_start_date': start_date}
)
).data
self.assertEqual(len(res), 1)
def test_ReportingPeriodList_json_no_longer_employed(self):
""" Check that the ReportingPeriodList shows users that have missing
time cards """
# Create a user, but set the user as unemployed
self.regular_user = User.objects.create(
username='new.user')
userdata = UserData(user=self.regular_user)
userdata.current_employee = False
userdata.save()
reporting_periods = client().get(reverse('ReportingPeriodList')).data
start_date = reporting_periods[0]['start_date']
res = client().get(reverse(
'ReportingPeriodAudit',
kwargs={'reporting_period_start_date': start_date}
)
).data
self.assertEqual(len(res), 0)
class FullTimecardsAPITests(WebTest):
fixtures = FIXTURES
def test_with_no_filters_only_returns_submitted_timecards(self):
res = client().get(reverse('FullTimecardList')).data
self.assertEqual(len(res), 2)
self.assertTrue(all(tc['submitted'] for tc in res))
def test_unsubmitted_filter(self):
res = client().get(
reverse('FullTimecardList'), {'submitted': 'no'}
).data
self.assertEqual(len(res), 1)
self.assertFalse(all(tc['submitted'] for tc in res))
def test_date_filter(self):
date_to_filter_on = '2015-06-04'
res = client().get(
reverse('FullTimecardList'), {'date': date_to_filter_on}
).data
self.assertEqual(len(res), 1)
self.assertTrue(res[0]['reporting_period_start_date'] < date_to_filter_on)
self.assertTrue(res[0]['reporting_period_end_date'] > date_to_filter_on)
def test_after_filter(self):
# Note that the default behavior is to only return completed timecards, so even though
# there may be another later timecard (in our fixtures), it may or may not be
# submitted (and if not, won't show up in the response)
date_to_filter_on = '2016-01-01'
res = client().get(
reverse('FullTimecardList'), {'after': date_to_filter_on}
).data
self.assertEqual(len(res), 1)
self.assertTrue(res[0]['reporting_period_start_date'] > date_to_filter_on)
self.assertTrue(res[0]['reporting_period_end_date'] > date_to_filter_on)
def test_before_filter(self):
date_to_filter_on = '2016-01-01'
res = client().get(
reverse('FullTimecardList'), {'after': date_to_filter_on}
).data
self.assertEqual(len(res), 1)
self.assertTrue(res[0]['reporting_period_start_date'] > date_to_filter_on)
self.assertTrue(res[0]['reporting_period_end_date'] > date_to_filter_on)
def test_bad_date_format_returns_400(self):
res = client().get(
reverse('FullTimecardList'),
{'date': 'N0T-A-D8'}
)
self.assertEqual(res.status_code, 400)
def test_billable_expectation_is_present(self):
date_to_filter_on = '2016-01-01'
res = client().get(
reverse('FullTimecardList'), {'after': date_to_filter_on}
).data
self.assertTrue('billable_expectation' in res[0].keys())
|
d63aa7028cf67e591d84314d592697984fecfce9
|
cbc4eec2c39bc954d05d28be812592a205106b8b
|
/python/app/svmsvc.py
|
27ed08323c9e1f71abfcd2077567a97165b7bfd3
|
[] |
no_license
|
pranab/avenir
|
756abaf6dc1d11bdafb61a5df77b0ed3ce56008c
|
4381166d61f7bc6088be34e6b35927a37ef6ba98
|
refs/heads/master
| 2022-10-07T20:58:12.992960
| 2022-09-06T04:28:47
| 2022-09-06T04:28:47
| 4,390,566
| 171
| 84
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,012
|
py
|
svmsvc.py
|
#!/Users/pranab/Tools/anaconda/bin/python
# avenir-python: Machine Learning
# Author: Pranab Ghosh
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You may
# obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
import os
import sys
from flask import Flask, jsonify, request
from flask import current_app
#from flask.ext.cache import Cache
from flask_cache import Cache
sys.path.append(os.path.abspath("../supv"))
from rf import *
#REST service support vector machine prediction
app = Flask(__name__)
cache = Cache()
app.config['CACHE_TYPE'] = 'simple'
cache.init_app(app)
configPath = sys.argv[1]
portNum = int(sys.argv[2])
@app.route('/svm/predict/<string:recs>', methods=['GET'])
def predict(recs):
print recs
nrecs = recs.replace(",,", "\n")
print nrecs
resp = getResponse(nrecs)
return resp
@app.route('/svm/predict/batch', methods=['GET', 'POST'])
def batchPredict():
content = request.json
nrecs = content["recs"].replace(",,", "\n")
print nrecs
resp = getResponse(nrecs)
return resp
def getResponse(nrecs):
classifier = getClassifier()
cls = classifier.predictProb(nrecs)
result = cls[:,1]
result = ["%.3f" %(r) for r in result]
result = ",".join(result)
return jsonify({'predictions': result})
def getClassifier():
classifier = cache.get('svm_classifier')
if (classifier is None):
print "creating and caching svm classifier"
classifier = SupportVectorMachine(configPath)
cache.set('svm_classifier', classifier, timeout=600)
return classifier
if __name__ == '__main__':
app.run(debug=True, port=portNum, threaded=True)
|
5f901a2dcaa0e77cf38c0a16ac38a2cc2079bc93
|
6436d1e6c23f9f43a8025889dc4414a3ad66acf2
|
/Assets/Python/Victory/GoalHandlers.py
|
384ec98c9f58999460795965064b4b1140352123
|
[
"MIT"
] |
permissive
|
dguenms/Dawn-of-Civilization
|
b710195c4f46fe11d9229182c3b1e07b77f42637
|
a305e7846d085d6edf1e9c472e8dfceee1c07dd4
|
refs/heads/develop
| 2023-09-04T04:57:00.086384
| 2023-09-01T15:24:28
| 2023-09-01T15:24:28
| 45,362,597
| 116
| 121
|
MIT
| 2023-02-08T00:18:53
| 2015-11-01T23:52:28
|
C++
|
UTF-8
|
Python
| false
| false
| 6,845
|
py
|
GoalHandlers.py
|
from Core import *
from inspect import ismethod
from Events import events
class Handlers(object):
def __init__(self):
self.handlers = []
self.other_handlers = []
self.any_handlers = []
def __len__(self):
return len(self.handlers) + len(self.other_handlers) + len(self.any_handlers)
def add(self, event, func):
self.handlers.append((event, func))
def add_other(self, event, func):
self.other_handlers.append((event, func))
def add_any(self, event, func):
self.any_handlers.append((event, func))
class EventHandlerRegistry(object):
def __init__(self):
self.registered = appenddict()
def applicable(self, goal, iPlayer):
return iPlayer in goal.evaluator
def applicable_other(self, goal, iPlayer):
return iPlayer not in goal.evaluator
def applicable_any(self, goal, iPlayer):
return True
def get(self, goal, applicable, event, func):
if hasattr(self, event):
handler_func = getattr(self, event)
if ismethod(handler_func):
return handler_func(goal, applicable, func)
raise ValueError("No handler available for event '%s'" % event)
def register_handlers(self, instance, handlers, goal, applicable):
for event, handler in handlers:
handler_func = self.get(goal, applicable, event, handler)
self.registered[instance].append((event, handler_func))
events.addEventHandler(event, handler_func)
def register(self, instance, goal):
self.register_handlers(instance, instance.handlers.handlers, goal, self.applicable)
self.register_handlers(instance, instance.handlers.other_handlers, goal, self.applicable_other)
self.register_handlers(instance, instance.handlers.any_handlers, goal, self.applicable_any)
def deregister(self, instance):
if instance in self.registered:
for event, handler_func in self.registered[instance]:
events.removeEventHandler(event, handler_func)
del self.registered[instance]
def reset(self):
for instance in self.registered.keys():
self.deregister(instance)
def BeginPlayerTurn(self, goal, applicable, func):
def BeginPlayerTurn((iGameTurn, iPlayer)):
if applicable(goal, iPlayer):
func(goal, iGameTurn, iPlayer)
return BeginPlayerTurn
def blockade(self, goal, applicable, func):
def blockade((iPlayer, iGold)):
if applicable(goal, iPlayer):
func(goal, iGold)
return blockade
def buildingBuilt(self, goal, applicable, func):
def buildingBuilt((city, iBuilding)):
if applicable(goal, city.getOwner()):
func(goal, city, iBuilding)
return buildingBuilt
def cityAcquired(self, goal, applicable, func):
def cityAcquired((iOwner, iPlayer, city, bConquest, bTrade)):
if applicable(goal, iPlayer):
func(goal, city, bConquest)
return cityAcquired
def cityAcquiredAndKept(self, goal, applicable, func):
def cityAcquiredAndKept((iPlayer, city)):
if applicable(goal, iPlayer):
func(goal, city)
return cityAcquiredAndKept
def cityBuilt(self, goal, applicable, func):
def cityBuilt((city,)):
if applicable(goal, city.getOwner()):
func(goal, city)
return cityBuilt
def cityCaptureGold(self, goal, applicable, func):
def cityCaptureGold((city, iPlayer, iGold)):
if applicable(goal, iPlayer):
func(goal, iGold)
return cityCaptureGold
def cityLost(self, goal, applicable, func):
def cityLost((city,)):
if applicable(goal, city.getOwner()):
func(goal)
return cityLost
def cityRazed(self, goal, applicable, func):
def cityRazed((city, iPlayer)):
if applicable(goal, iPlayer):
func(goal)
return cityRazed
def combatFood(self, goal, applicable, func):
def combatFood((iPlayer, unit, iFood)):
if applicable(goal, iPlayer):
func(goal, iFood)
return combatFood
def combatGold(self, goal, applicable, func):
def combatGold((iPlayer, unit, iGold)):
if applicable(goal, iPlayer):
func(goal, iGold)
return combatGold
def combatResult(self, goal, applicable, func):
def combatResult((winningUnit, losingUnit)):
if applicable(goal, winningUnit.getOwner()):
func(goal, losingUnit)
return combatResult
def corporationSpread(self, goal, applicable, func):
def corporationSpread((iCorporation, iPlayer, city)):
if applicable(goal, iPlayer):
func(goal, iCorporation)
return corporationSpread
def enslave(self, goal, applicable, func):
def enslave((iPlayer, losingUnit)):
if applicable(goal, iPlayer):
func(goal, losingUnit)
return enslave
def firstContact(self, goal, applicable, func):
def firstContact((iTeam, iHasMetTeam)):
if applicable(goal, team(iTeam).getLeaderID()):
func(goal, team(iHasMetTeam).getLeaderID())
return firstContact
def greatPersonBorn(self, goal, applicable, func):
def greatPersonBorn((unit, iPlayer, city)):
if applicable(goal, iPlayer):
func(goal, unit)
return greatPersonBorn
def peaceBrokered(self, goal, applicable, func):
def peaceBrokered((iBroker, iPlayer1, iPlayer2)):
if applicable(goal, iBroker):
func(goal)
return peaceBrokered
def playerChangeStateReligion(self, goal, applicable, func):
def playerChangeStateReligion((iPlayer, iNewReligion, iOldReligion)):
if applicable(goal, iPlayer):
func(goal, iNewReligion)
return playerChangeStateReligion
def playerGoldTrade(self, goal, applicable, func):
def playerGoldTrade((iFrom, iTo, iGold)):
if applicable(goal, iTo):
func(goal, iGold)
return playerGoldTrade
def playerSlaveTrade(self, goal, applicable, func):
def playerSlaveTrade((iPlayer, iGold)):
if applicable(goal, iPlayer):
func(goal, iGold)
return playerSlaveTrade
def projectBuilt(self, goal, applicable, func):
def projectBuilt((city, iProject)):
if applicable(goal, city.getOwner()):
func(goal, iProject)
return projectBuilt
def sacrificeHappiness(self, goal, applicable, func):
def sacrificeHappiness((iPlayer, city)):
if applicable(goal, iPlayer):
func(goal)
return sacrificeHappiness
def techAcquired(self, goal, applicable, func):
def techAcquired((iTech, iTeam, iPlayer, bAnnounce)):
if applicable(goal, iPlayer):
func(goal, iTech)
return techAcquired
def tradeMission(self, goal, applicable, func):
def tradeMission((iUnit, iPlayer, iX, iY, iGold)):
if applicable(goal, iPlayer):
func(goal, (iX, iY), iGold)
return tradeMission
def unitPillage(self, goal, applicable, func):
def unitPillage((unit, iImprovement, iRoute, iPlayer, iGold)):
if applicable(goal, iPlayer):
func(goal, iGold)
return unitPillage
def vassalState(self, goal, applicable, func):
def vassalState((iMaster, iVassal, bVassal, bCapitulated)):
if applicable(goal, team(iMaster).getLeaderID()):
func(goal)
return vassalState
event_handler_registry = EventHandlerRegistry()
|
328dc55ce71f5481745078b03aa63b7bc51a8beb
|
0d48af5cb8012a79b3327a1d3e7ff0e65af54ea8
|
/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/test_blockade_datanode_isolation.py
|
7f1d34efdeeba1b1a07a197e483d9e9868ebdbc0
|
[
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"LicenseRef-scancode-unknown"
] |
permissive
|
apache/ozone
|
4426dd1ae18716d0585f1e846f7881970e3c4279
|
a3b89e73185894f905b4ad96cd6d78429497d0c9
|
refs/heads/master
| 2023-09-04T05:26:08.656890
| 2023-09-02T15:16:12
| 2023-09-02T15:16:12
| 212,382,406
| 509
| 386
|
Apache-2.0
| 2023-09-14T19:54:21
| 2019-10-02T15:56:19
|
Java
|
UTF-8
|
Python
| false
| false
| 5,458
|
py
|
test_blockade_datanode_isolation.py
|
#!/usr/bin/python
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from ozone.cluster import OzoneCluster
from ozone.exceptions import ContainerNotFoundError
logger = logging.getLogger(__name__)
def setup_function():
global cluster
cluster = OzoneCluster.create()
cluster.start()
def teardown_function():
cluster.stop()
def test_isolate_single_datanode():
"""
In this test case we will create a network partition in such a way that
one of the DN will not be able to communicate with other datanodes
but it will be able to communicate with SCM.
Once the network partition happens, SCM detects it and closes the pipeline,
which in-turn closes the containers.
The container on the first two DN will get CLOSED as they have quorum.
The container replica on the third node will be QUASI_CLOSED as it is not
able to connect with the other DNs and it doesn't have latest BCSID.
Once we restore the network, the stale replica on the third DN will be
deleted and a latest replica will be copied from any one of the other
DNs.
"""
om = cluster.om
scm = cluster.scm
dns = cluster.datanodes
client = cluster.client
oz_client = cluster.get_client()
oz_client.run_freon(1, 1, 1, 10240)
# Partition the network
first_set = [om, scm, dns[0], dns[1], client]
second_set = [om, scm, dns[2], client]
logger.info("Partitioning the network")
cluster.partition_network(first_set, second_set)
oz_client.run_freon(1, 1, 1, 10240)
containers = cluster.get_containers_on_datanode(dns[0])
# The same set of containers should also be in datanode[2]
for container in containers:
assert container.is_on(dns[2])
logger.info("Waiting for container to be CLOSED")
for container in containers:
container.wait_until_one_replica_is_closed()
for container in containers:
assert container.get_state(dns[0]) == 'CLOSED'
assert container.get_state(dns[1]) == 'CLOSED'
try:
assert container.get_state(dns[2]) == 'CLOSING' or \
container.get_state(dns[2]) == 'QUASI_CLOSED'
except ContainerNotFoundError:
assert True
# Since the replica in datanode[2] doesn't have the latest BCSID,
# ReplicationManager will delete it and copy a closed replica.
# We will now restore the network and datanode[2] should get a
# closed replica of the container
logger.info("Restoring the network")
cluster.restore_network()
logger.info("Waiting for the replica to be CLOSED")
for container in containers:
container.wait_until_replica_is_closed(dns[2])
for container in containers:
assert container.get_state(dns[0]) == 'CLOSED'
assert container.get_state(dns[1]) == 'CLOSED'
assert container.get_state(dns[2]) == 'CLOSED'
exit_code, output = oz_client.run_freon(1, 1, 1, 10240)
assert exit_code == 0, "freon run failed with output=[%s]" % output
def test_datanode_isolation_all():
"""
In this test case we will create a network partition in such a way that
all DNs cannot communicate with each other.
All DNs will be able to communicate with SCM.
Once the network partition happens, SCM detects it and closes the pipeline,
which in-turn tries to close the containers.
At least one of the replica should be in closed state
Once we restore the network, there will be three closed replicas.
"""
om = cluster.om
scm = cluster.scm
dns = cluster.datanodes
client = cluster.client
oz_client = cluster.get_client()
oz_client.run_freon(1, 1, 1, 10240)
logger.info("Partitioning the network")
first_set = [om, scm, dns[0], client]
second_set = [om, scm, dns[1], client]
third_set = [om, scm, dns[2], client]
cluster.partition_network(first_set, second_set, third_set)
containers = cluster.get_containers_on_datanode(dns[0])
container = containers.pop()
logger.info("Waiting for a replica to be CLOSED")
container.wait_until_one_replica_is_closed()
# At least one of the replica should be in closed state
assert 'CLOSED' in container.get_datanode_states()
logger.info("Restoring the network")
cluster.restore_network()
logger.info("Waiting for the container to be replicated")
container.wait_until_all_replicas_are_closed()
# After restoring the network all the replicas should be in CLOSED state
for state in container.get_datanode_states():
assert state == 'CLOSED'
exit_code, output = oz_client.run_freon(1, 1, 1, 10240)
assert exit_code == 0, "freon run failed with output=[%s]" % output
|
d0dabc1b18fec846e0ed93e12c384f2e507c2507
|
3a16a7311f2df0a7ac4e1bb5f2fcea1c18facaf1
|
/setup.py
|
1f85bcc05969d4f11dd99248f3ac1cbf429f3466
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
SAP/python-pyodata
|
5eb220e0aacebb8f9b240f05d9f7b75083112151
|
35f30025ad39a2b49d5f08d4d4f7ad98e8ea38b9
|
refs/heads/master
| 2023-09-01T12:08:11.429932
| 2023-08-02T09:41:47
| 2023-08-02T09:41:47
| 158,290,900
| 187
| 110
|
Apache-2.0
| 2023-08-15T16:36:04
| 2018-11-19T21:13:30
|
Python
|
UTF-8
|
Python
| false
| false
| 2,504
|
py
|
setup.py
|
# Copyright 2018 SAP SE.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: //www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import os.path
from setuptools import setup, find_packages
source_location = os.path.abspath(os.path.dirname(__file__))
NAME = 'python-pyodata'
HERE = os.path.abspath(os.path.dirname(__file__))
def _read(name):
with open(os.path.join(HERE, name), 'r', encoding='utf-8') as f:
return f.read()
setup(
name="pyodata",
version=_read('VERSION').strip(),
license="Apache License Version 2.0",
url="https://github.com/SAP/python-pyodata",
author="Jakub Filak, Michal Nezerka, Patrik Petrik, Petr Hanak",
author_email="jakub.filak@sap.com, michal.nezerka@sap.com, patrik.petrik@sap.com, petr.hanak@sap.com",
description="Enterprise ready Python OData client",
long_description=_read('README.md'),
long_description_content_type="text/markdown",
packages=find_packages(exclude=("tests")),
zip_safe=False,
install_requires=[
"lxml>=4.1.1",
],
extras_require={
},
tests_require=[
"codecov",
"flake8",
"setuptools>=38.2.4",
"setuptools-scm>=1.15.6",
"requests>=2.28.1",
"responses>=0.21.0",
"pylint",
"pytest>=7.1.2",
"pytest-cov>=3.0.0",
"sphinx",
],
classifiers=[ # cf. http://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries :: Python Modules',
],
entry_points = {
},
)
|
3fd86a98538cec42dff6abd6d22e5321b65f5852
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/domain/ContributeDetailInfo.py
|
03f82556c04a8230081649638557b5e1c7ec2804
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 1,576
|
py
|
ContributeDetailInfo.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class ContributeDetailInfo(object):
def __init__(self):
self._contribute_amount = None
self._contribute_type = None
@property
def contribute_amount(self):
return self._contribute_amount
@contribute_amount.setter
def contribute_amount(self, value):
self._contribute_amount = value
@property
def contribute_type(self):
return self._contribute_type
@contribute_type.setter
def contribute_type(self, value):
self._contribute_type = value
def to_alipay_dict(self):
params = dict()
if self.contribute_amount:
if hasattr(self.contribute_amount, 'to_alipay_dict'):
params['contribute_amount'] = self.contribute_amount.to_alipay_dict()
else:
params['contribute_amount'] = self.contribute_amount
if self.contribute_type:
if hasattr(self.contribute_type, 'to_alipay_dict'):
params['contribute_type'] = self.contribute_type.to_alipay_dict()
else:
params['contribute_type'] = self.contribute_type
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = ContributeDetailInfo()
if 'contribute_amount' in d:
o.contribute_amount = d['contribute_amount']
if 'contribute_type' in d:
o.contribute_type = d['contribute_type']
return o
|
69ccdc6e99390fbb2a0a6a2745c735f6533b7597
|
f509ab9825c542e09b0c6591d86ef1f9feb540a6
|
/pkgs/sdk-pkg/src/genie/libs/sdk/apis/nxos/n3k/platform/execute.py
|
18ef8dd09dc15920a6d7ae715a4a1f49df251c68
|
[
"Apache-2.0"
] |
permissive
|
CiscoTestAutomation/genielibs
|
97f597117193aaa18028defeb69078ebb241173a
|
e42e51475cddcb10f5c7814d0fe892ac865742ba
|
refs/heads/master
| 2023-08-11T16:39:41.959947
| 2023-07-27T17:58:42
| 2023-07-27T17:58:42
| 130,717,047
| 109
| 60
|
Apache-2.0
| 2023-08-29T22:32:08
| 2018-04-23T15:21:56
|
Python
|
UTF-8
|
Python
| false
| false
| 722
|
py
|
execute.py
|
'''NXOS n3k execute functions for platform'''
# Python
import logging
from unicon.core.errors import SubCommandFailure
# Logger
log = logging.getLogger(__name__)
def execute_delete_boot_variable(device, timeout=300):
''' Delete the boot variables
Args:
device ('obj'): Device object
timeout ('int'): Max time to delete boot vars in seconds
'''
try:
device.configure("no boot nxos", timeout=timeout)
except SubCommandFailure as e:
raise SubCommandFailure("Failed to no boot system on '{}'\n{}".\
format(device.name, str(e)))
device.api.is_current_boot_variable_as_expected(device=device, system=None, kickstart=None)
|
a6528c42b7ac62bc8958867ac24df89fa7e1559f
|
578db86c51d44ebddd0dc7b1738985b3dc69eb74
|
/corehq/apps/hqwebapp/doc_info.py
|
06ca2fe0aa0adef6e15bb3cb65f68c286995d067
|
[
"BSD-3-Clause"
] |
permissive
|
dimagi/commcare-hq
|
a43c7dd32b5f89c89fd5aa1b1359ab7301f4ff6b
|
e7391ddae1af1dbf118211ecb52c83fc508aa656
|
refs/heads/master
| 2023-08-16T22:38:27.853437
| 2023-08-16T19:07:19
| 2023-08-16T19:07:19
| 247,278
| 499
| 203
|
BSD-3-Clause
| 2023-09-14T19:03:24
| 2009-07-09T17:00:07
|
Python
|
UTF-8
|
Python
| false
| false
| 9,421
|
py
|
doc_info.py
|
from django.urls import reverse
from django.utils.translation import gettext as _
from corehq.apps.hqwebapp.doc_lookup import lookup_doc_id
from corehq.apps.locations.permissions import can_edit_form_location, user_can_access_case
from corehq.apps.users.models import HqPermissions
from corehq.apps.users.util import raw_username
from corehq.form_processor.models import XFormInstance
from dimagi.ext.jsonobject import BooleanProperty, JsonObject, StringProperty
from dimagi.utils.couch.undo import DELETED_SUFFIX
class DomainMismatchException(Exception):
pass
class DocInfo(JsonObject):
id = StringProperty()
domain = StringProperty()
type = StringProperty()
display = StringProperty()
link = StringProperty()
type_display = StringProperty()
is_deleted = BooleanProperty()
def user_has_permissions(self, domain, user, doc):
if self.type == "XFormInstance":
return _check_form_permissions(domain, user, doc)
elif self.type == "CommCareCase":
return _check_case_permissions(domain, user, doc)
else:
return False
def get_doc_info_by_id(domain, id):
not_found_value = DocInfo(display=id, link=None, owner_type=None)
if not id:
return not_found_value
result = lookup_doc_id(id)
if not result:
return not_found_value
doc = result.doc
try:
return get_doc_info(doc, domain_hint=domain)
except DomainMismatchException:
return not_found_value
def get_doc_info(doc, domain_hint=None, cache=None):
"""
cache is just a dictionary that you can keep passing in to speed up info
retrieval.
"""
domains = ()
if isinstance(doc, dict):
domains = doc.get('domains', ())
domain_hint = domain_hint or (domains[0] if domains else None)
doc_info = get_doc_info_couch(doc, domain_hint, cache=cache)
else:
doc_info = get_doc_info_sql(doc, cache=cache)
if domain_hint and not (
doc_info.domain == domain_hint or domain_hint in domains
):
raise DomainMismatchException("Doc '%s' does not match the domain_hint '%s'" % (doc_info.id, domain_hint))
return doc_info
def get_doc_info_couch(doc, domain_hint=None, cache=None):
"""Return DocInfo objects for Couch doc dicts"""
domain = doc.get('domain') or domain_hint
doc_type = doc.get('doc_type')
doc_id = doc.get('_id')
generic_delete = doc_type.endswith(DELETED_SUFFIX)
def has_doc_type(doc_type, expected_doc_type):
return (doc_type == expected_doc_type
or doc_type == ('%s%s' % (expected_doc_type, DELETED_SUFFIX)))
if cache and doc_id in cache:
return cache[doc_id]
if (
has_doc_type(doc_type, 'Application')
or has_doc_type(doc_type, 'LinkedApplication')
or has_doc_type(doc_type, 'RemoteApp')
):
if doc.get('copy_of'):
doc_info = DocInfo(
display='%s (#%s)' % (doc['name'], doc['version']),
type_display=_('Application Build'),
link=reverse(
'download_index',
args=[domain, doc_id],
),
is_deleted=generic_delete,
)
else:
doc_info = DocInfo(
display=doc['name'],
type_display=_('Application'),
link=reverse(
'view_app',
args=[domain, doc_id],
),
is_deleted=generic_delete,
)
elif has_doc_type(doc_type, 'CommCareCaseGroup'):
from corehq.apps.data_interfaces.views import CaseGroupCaseManagementView
doc_info = DocInfo(
type_display=_('Case Group'),
display=doc['name'],
link=reverse(
CaseGroupCaseManagementView.urlname,
args=[domain, doc_id],
),
is_deleted=generic_delete,
)
elif has_doc_type(doc_type, 'CommCareCase'):
doc_info = case_docinfo(domain, doc_id, doc['name'], generic_delete)
elif any([has_doc_type(doc_type, d) for d in XFormInstance.DOC_TYPE_TO_STATE]):
doc_info = form_docinfo(domain, doc_id, generic_delete)
elif doc_type in ('CommCareUser',):
doc_info = DocInfo(
display=raw_username(doc['username']),
type_display=_('Mobile Worker'),
link=get_commcareuser_url(domain, doc_id),
is_deleted=doc.get('base_doc', '').endswith(DELETED_SUFFIX),
)
elif doc_type in ('WebUser',):
doc_info = DocInfo(
type_display=_('Web User'),
display=doc['username'],
link=get_webuser_url(domain, doc_id),
is_deleted=doc.get('base_doc', '').endswith(DELETED_SUFFIX),
)
elif has_doc_type(doc_type, 'Group'):
from corehq.apps.users.views.mobile import EditGroupMembersView
doc_info = DocInfo(
type_display=_('Group'),
display=doc['name'],
link=reverse(
EditGroupMembersView.urlname,
args=[domain, doc_id],
),
is_deleted=generic_delete,
)
elif has_doc_type(doc_type, 'Domain'):
if doc['is_snapshot'] and doc['published']:
urlname = 'project_info'
else:
urlname = 'domain_basic_info'
doc_info = DocInfo(
type_display=_('Domain'),
display=doc['name'],
link=reverse(
urlname,
kwargs={'domain': doc['name']}
),
is_deleted=generic_delete,
)
elif has_doc_type(doc_type, 'Location'):
from corehq.apps.locations.views import EditLocationView
doc_info = DocInfo(
type_display=_('Location'),
display=doc['name'],
link=reverse(
EditLocationView.urlname,
args=[domain, doc_id],
),
is_deleted=generic_delete,
)
else:
doc_info = DocInfo(
is_deleted=generic_delete,
)
doc_info.id = doc_id
doc_info.domain = domain
doc_info.type = doc_type
if cache:
cache[doc_id] = doc_info
return doc_info
def form_docinfo(domain, doc_id, is_deleted):
doc_info = DocInfo(
id=doc_id,
type="XFormInstance",
type_display=_('Form'),
link=reverse(
'render_form_data',
args=[domain, doc_id],
),
is_deleted=is_deleted,
)
return doc_info
def case_docinfo(domain, doc_id, name, is_deleted):
return DocInfo(
id=doc_id,
type="CommCareCase",
display=name,
type_display=_('Case'),
link=get_case_url(domain, doc_id),
is_deleted=is_deleted,
)
def get_case_url(domain, case_id):
return reverse(
'case_data',
args=[domain, case_id],
)
def get_commcareuser_url(domain, user_id):
return reverse(
'edit_commcare_user',
args=[domain, user_id],
)
def get_webuser_url(domain, user_id):
return reverse(
'user_account',
args=[domain, user_id],
)
def get_doc_info_sql(obj, cache=None):
"""
Return DocInfo objects for SQL models
"""
class_name = obj.__class__.__name__
cache_key = '%s-%s' % (class_name, obj.pk)
if cache and cache_key in cache:
return cache[cache_key]
from corehq.apps.locations.models import SQLLocation
from corehq.form_processor.models import CommCareCase
if isinstance(obj, SQLLocation):
from corehq.apps.locations.views import EditLocationView
doc_info = DocInfo(
id=obj.location_id,
type="Location",
type_display=_('Location'),
display=obj.name,
link=reverse(
EditLocationView.urlname,
args=[obj.domain, obj.location_id],
),
is_deleted=False,
)
elif isinstance(obj, CommCareCase):
doc_info = case_docinfo(obj.domain, obj.case_id, obj.name, obj.is_deleted)
elif isinstance(obj, XFormInstance):
doc_info = form_docinfo(obj.domain, obj.form_id, obj.is_deleted)
else:
doc_info = DocInfo(
is_deleted=False,
)
doc_info.id = doc_info.id or str(obj.pk)
doc_info.domain = obj.domain if hasattr(obj, 'domain') else None
doc_info.type = doc_info.type or class_name
if cache:
cache[cache_key] = doc_info
return doc_info
def get_object_url(domain, doc_type, doc_id):
if doc_type == 'CommCareCase':
return get_case_url(domain, doc_id)
elif doc_type == 'CommCareUser':
return get_commcareuser_url(domain, doc_id)
elif doc_type == 'WebUser':
return get_webuser_url(domain, doc_id)
return None
def _check_form_permissions(domain, user, form):
return (
user.has_permission(
domain, HqPermissions.view_report, "corehq.apps.reports.standard.inspect.SubmitHistory"
)
and can_edit_form_location(domain, user, form)
)
def _check_case_permissions(domain, user, case):
return (
user.has_permission(
domain, HqPermissions.view_report, "corehq.apps.reports.standard.cases.basic.CaseListReport"
)
and user_can_access_case(domain, user, case)
)
|
9da27c7d2264fc057e64c0a234e37adb065b8ed3
|
ca93302d06425c04492e92b801277496ecde68d3
|
/redis/commands/search/indexDefinition.py
|
a668e85b42c99ce5f8e6bdbaa29bdd7ddfaec871
|
[
"MIT"
] |
permissive
|
redis/redis-py
|
2c34d4511e31033ed85b70437454ff5c0c2c67d1
|
e3de026a90ef2cc35a5b68934029a0ef2a5b2f53
|
refs/heads/master
| 2023-09-04T09:53:01.713856
| 2023-08-31T09:26:48
| 2023-08-31T09:26:48
| 363,150
| 2,213
| 513
|
MIT
| 2023-09-14T14:05:30
| 2009-11-06T10:22:26
|
Python
|
UTF-8
|
Python
| false
| false
| 2,489
|
py
|
indexDefinition.py
|
from enum import Enum
class IndexType(Enum):
"""Enum of the currently supported index types."""
HASH = 1
JSON = 2
class IndexDefinition:
"""IndexDefinition is used to define a index definition for automatic
indexing on Hash or Json update."""
def __init__(
self,
prefix=[],
filter=None,
language_field=None,
language=None,
score_field=None,
score=1.0,
payload_field=None,
index_type=None,
):
self.args = []
self._append_index_type(index_type)
self._append_prefix(prefix)
self._append_filter(filter)
self._append_language(language_field, language)
self._append_score(score_field, score)
self._append_payload(payload_field)
def _append_index_type(self, index_type):
"""Append `ON HASH` or `ON JSON` according to the enum."""
if index_type is IndexType.HASH:
self.args.extend(["ON", "HASH"])
elif index_type is IndexType.JSON:
self.args.extend(["ON", "JSON"])
elif index_type is not None:
raise RuntimeError(f"index_type must be one of {list(IndexType)}")
def _append_prefix(self, prefix):
"""Append PREFIX."""
if len(prefix) > 0:
self.args.append("PREFIX")
self.args.append(len(prefix))
for p in prefix:
self.args.append(p)
def _append_filter(self, filter):
"""Append FILTER."""
if filter is not None:
self.args.append("FILTER")
self.args.append(filter)
def _append_language(self, language_field, language):
"""Append LANGUAGE_FIELD and LANGUAGE."""
if language_field is not None:
self.args.append("LANGUAGE_FIELD")
self.args.append(language_field)
if language is not None:
self.args.append("LANGUAGE")
self.args.append(language)
def _append_score(self, score_field, score):
"""Append SCORE_FIELD and SCORE."""
if score_field is not None:
self.args.append("SCORE_FIELD")
self.args.append(score_field)
if score is not None:
self.args.append("SCORE")
self.args.append(score)
def _append_payload(self, payload_field):
"""Append PAYLOAD_FIELD."""
if payload_field is not None:
self.args.append("PAYLOAD_FIELD")
self.args.append(payload_field)
|
eab8b3d7e3218028679c4e44ebdca1ac7ef1d4f7
|
8f267fe1157904023004aa1fcee8cdcaf1d69f74
|
/tempest/api/volume/test_volumes_backup.py
|
89ff4977619855d65af0b83dccb3d2de7c0c3a18
|
[
"Apache-2.0"
] |
permissive
|
openstack/tempest
|
a65737f3e62d4ebeb7e387feac7bcc636d3f5fe0
|
3932a799e620a20d7abf7b89e21b520683a1809b
|
refs/heads/master
| 2023-08-28T15:04:21.241805
| 2023-08-28T10:16:57
| 2023-08-28T10:16:57
| 2,356,406
| 270
| 407
|
Apache-2.0
| 2022-06-29T15:52:45
| 2011-09-09T15:56:02
|
Python
|
UTF-8
|
Python
| false
| false
| 8,789
|
py
|
test_volumes_backup.py
|
# Copyright 2016 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from testtools import matchers
from tempest.api.volume import base
from tempest.common import utils
from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
CONF = config.CONF
class VolumesBackupsTest(base.BaseVolumeTest):
"""Test volumes backup"""
create_default_network = True
@classmethod
def skip_checks(cls):
super(VolumesBackupsTest, cls).skip_checks()
if not CONF.volume_feature_enabled.backup:
raise cls.skipException("Cinder backup feature disabled")
def restore_backup(self, backup_id):
# Restore a backup
restored_volume = self.backups_client.restore_backup(
backup_id)['restore']
# Delete backup
self.addCleanup(self.delete_volume, self.volumes_client,
restored_volume['volume_id'])
self.assertEqual(backup_id, restored_volume['backup_id'])
waiters.wait_for_volume_resource_status(self.backups_client,
backup_id, 'available')
waiters.wait_for_volume_resource_status(self.volumes_client,
restored_volume['volume_id'],
'available')
return restored_volume
@testtools.skipIf(CONF.volume.storage_protocol == 'ceph',
'ceph does not support arbitrary container names')
@decorators.idempotent_id('a66eb488-8ee1-47d4-8e9f-575a095728c6')
def test_volume_backup_create_get_detailed_list_restore_delete(self):
"""Test create/get/list/restore/delete volume backup
1. Create volume1 with metadata
2. Create backup1 from volume1
3. Show backup1
4. List backups with detail
5. Restore backup1
6. Verify backup1 has been restored successfully with the metadata
of volume1
"""
# Create a volume with metadata
metadata = {"vol-meta1": "value1",
"vol-meta2": "value2",
"vol-meta3": "value3"}
volume = self.create_volume(metadata=metadata)
self.addCleanup(self.delete_volume, self.volumes_client, volume['id'])
# Create a backup
backup_name = data_utils.rand_name(
self.__class__.__name__ + '-Backup')
description = data_utils.rand_name("volume-backup-description")
backup = self.create_backup(volume_id=volume['id'],
name=backup_name,
description=description,
container='container')
self.assertEqual(backup_name, backup['name'])
waiters.wait_for_volume_resource_status(self.volumes_client,
volume['id'], 'available')
# Get a given backup
backup = self.backups_client.show_backup(backup['id'])['backup']
self.assertEqual(backup_name, backup['name'])
self.assertEqual(description, backup['description'])
self.assertEqual('container', backup['container'])
# Get all backups with detail
backups = self.backups_client.list_backups(detail=True)['backups']
self.assertIn((backup['name'], backup['id']),
[(m['name'], m['id']) for m in backups])
restored_volume = self.restore_backup(backup['id'])
restored_volume_metadata = self.volumes_client.show_volume(
restored_volume['volume_id'])['volume']['metadata']
# Verify the backup has been restored successfully
# with the metadata of the source volume.
self.assertThat(restored_volume_metadata.items(),
matchers.ContainsAll(metadata.items()))
@decorators.idempotent_id('07af8f6d-80af-44c9-a5dc-c8427b1b62e6')
@utils.services('compute')
def test_backup_create_attached_volume(self):
"""Test backup create using force flag.
Cinder allows to create a volume backup, whether the volume status
is "available" or "in-use".
"""
# Create a server
volume = self.create_volume(wait_until=False)
self.addCleanup(self.delete_volume, self.volumes_client, volume['id'])
validation_resources = self.get_test_validation_resources(
self.os_primary)
server = self.create_server(wait_until='SSHABLE',
validation_resources=validation_resources,
validatable=True)
# Attach volume to instance
waiters.wait_for_volume_resource_status(self.volumes_client,
volume['id'], 'available')
self.attach_volume(server['id'], volume['id'])
# Create backup using force flag
backup_name = data_utils.rand_name(
self.__class__.__name__ + '-Backup')
backup = self.create_backup(volume_id=volume['id'],
name=backup_name, force=True)
waiters.wait_for_volume_resource_status(self.volumes_client,
volume['id'], 'in-use')
self.assertEqual(backup_name, backup['name'])
@decorators.idempotent_id('2a8ba340-dff2-4511-9db7-646f07156b15')
@utils.services('image')
def test_bootable_volume_backup_and_restore(self):
"""Test backuping and restoring a bootable volume
1. Create volume1 from image
2. Create backup1 from volume1
3. Restore backup1
4. Verify the restored backup volume is bootable
"""
# Create volume from image
img_uuid = CONF.compute.image_ref
volume = self.create_volume(imageRef=img_uuid)
volume_details = self.volumes_client.show_volume(
volume['id'])['volume']
self.assertTrue(volume_details['bootable'])
# Create a backup
backup = self.create_backup(volume_id=volume['id'])
waiters.wait_for_volume_resource_status(self.volumes_client,
volume['id'], 'available')
# Restore the backup
restored_volume_id = self.restore_backup(backup['id'])['volume_id']
# Verify the restored backup volume is bootable
restored_volume_info = self.volumes_client.show_volume(
restored_volume_id)['volume']
self.assertTrue(restored_volume_info['bootable'])
class VolumesBackupsV39Test(base.BaseVolumeTest):
"""Test volumes backup with volume microversion greater than 3.8"""
volume_min_microversion = '3.9'
volume_max_microversion = 'latest'
@classmethod
def skip_checks(cls):
super(VolumesBackupsV39Test, cls).skip_checks()
if not CONF.volume_feature_enabled.backup:
raise cls.skipException("Cinder backup feature disabled")
@decorators.idempotent_id('9b374cbc-be5f-4d37-8848-7efb8a873dcc')
def test_update_backup(self):
"""Test updating backup's name and description"""
# Create volume and backup
volume = self.create_volume()
backup = self.create_backup(volume_id=volume['id'])
waiters.wait_for_volume_resource_status(self.volumes_client,
volume['id'], 'available')
# Update backup and assert response body for update_backup method
update_kwargs = {
'name': data_utils.rand_name(self.__class__.__name__ + '-Backup'),
'description': data_utils.rand_name("volume-backup-description")
}
update_backup = self.backups_client.update_backup(
backup['id'], **update_kwargs)['backup']
self.assertEqual(backup['id'], update_backup['id'])
self.assertEqual(update_kwargs['name'], update_backup['name'])
# Assert response body for show_backup method
retrieved_backup = self.backups_client.show_backup(
backup['id'])['backup']
for key in update_kwargs:
self.assertEqual(update_kwargs[key], retrieved_backup[key])
|
ba68ab5865c4ede7e4688a543905485933916f91
|
6c066611b11a8de5e2c22c30cfcc578a4c49edce
|
/Color/lp_Tweaky/lp_Tweaky.py
|
abeeb20285bec98dc4270fd1870f25dc33ab5d72
|
[] |
no_license
|
NatronGitHub/natron-plugins
|
ad2d9227637b4b86b45f92856fa54d327872a0a6
|
b0c499fb6391024f54be9f26ed41b5cf7475d574
|
refs/heads/master
| 2022-12-12T10:02:20.252222
| 2022-11-30T02:29:04
| 2022-11-30T02:29:04
| 130,576,224
| 332
| 67
| null | 2022-11-30T02:29:05
| 2018-04-22T14:39:29
|
Python
|
UTF-8
|
Python
| false
| false
| 37,811
|
py
|
lp_Tweaky.py
|
# -*- coding: utf-8 -*-
# DO NOT EDIT THIS FILE
# This file was automatically generated by Natron PyPlug exporter version 10.
# Hand-written code should be added in a separate file named lp_TweakyExt.py
# See http://natron.readthedocs.org/en/master/devel/groups.html#adding-hand-written-code-callbacks-etc
# Note that Viewers are never exported
import NatronEngine
import sys
# Try to import the extensions file where callbacks and hand-written code should be located.
try:
from lp_TweakyExt import *
except ImportError:
pass
def getPluginID():
return "lp_Tweaky"
def getLabel():
return "lp_Tweaky"
def getVersion():
return 3
def getGrouping():
return "Color"
def getPluginDescription():
return "Provides a variety of little tweaking-options, like Vibrance, WB-Slider, Tint, etc.\n\nINPUTS\nimg = connect the main plate you want to treat\nmask = masks the effect by a connected alpha-channel\n\nHOW TO USE IT\nJust connect your plate and play with the controls, it\'s pretty straight forward :)\n\nHOW DOES IT WORK\nUnder the hood, the Tweaky transforms colourspaces left and right to give controls on things like white blance, tint, vibrance and other options you may or may not know from classic raw-development software. "
def createInstance(app,group):
# Create all nodes in the group
# Create the parameters of the group node the same way we did for all internal nodes
lastNode = group
lastNode.setColor(1, 0.3333, 0.498)
# Create the user parameters
lastNode.userNatron = lastNode.createPageParam("userNatron", "Controls")
param = lastNode.createColorParam("Grade1whitePoint", "wb picker", True)
param.setDisplayMinimum(0, 0)
param.setDisplayMaximum(1, 0)
param.setDefaultValue(1, 0)
param.restoreDefaultValue(0)
param.setDisplayMinimum(0, 1)
param.setDisplayMaximum(1, 1)
param.setDefaultValue(1, 1)
param.restoreDefaultValue(1)
param.setDisplayMinimum(0, 2)
param.setDisplayMaximum(1, 2)
param.setDefaultValue(1, 2)
param.restoreDefaultValue(2)
param.setDisplayMinimum(0, 3)
param.setDisplayMaximum(1, 3)
param.setDefaultValue(1, 3)
param.restoreDefaultValue(3)
# Add the param to the page
lastNode.userNatron.addParam(param)
# Set param properties
param.setHelp("Pick a colour (preferably something white/grey) you want to balance the shot to.\n\nThe picked colour will be set to a neutral grey, based on the average RGB values of the picked colour.")
param.setAddNewLine(True)
param.setAnimationEnabled(True)
lastNode.Grade1whitePoint = param
del param
param = lastNode.createDoubleParam("whitebalance", "white balance")
param.setMinimum(-2147483648, 0)
param.setMaximum(2147483647, 0)
param.setDisplayMinimum(-1, 0)
param.setDisplayMaximum(1, 0)
# Add the param to the page
lastNode.userNatron.addParam(param)
# Set param properties
param.setHelp("Adjust white balance (ratio of yellow/orange to blue).\nBest is to start with the picker above, and then work from there.")
param.setAddNewLine(True)
param.setAnimationEnabled(True)
lastNode.whitebalance = param
del param
param = lastNode.createDoubleParam("colourtint", "tint")
param.setMinimum(-2147483648, 0)
param.setMaximum(2147483647, 0)
param.setDisplayMinimum(-1, 0)
param.setDisplayMaximum(1, 0)
# Add the param to the page
lastNode.userNatron.addParam(param)
# Set param properties
param.setHelp("adjust tint (ratio of green to magenta)")
param.setAddNewLine(True)
param.setAnimationEnabled(True)
lastNode.colourtint = param
del param
param = lastNode.createDoubleParam("colourvibrance", "vibrance")
param.setMinimum(-2147483648, 0)
param.setMaximum(2147483647, 0)
param.setDisplayMinimum(-1, 0)
param.setDisplayMaximum(1, 0)
# Add the param to the page
lastNode.userNatron.addParam(param)
# Set param properties
param.setHelp("vibrance tries to boost the saturation of less saturated areas more than already saturated areas. You know this from Photoshop.")
param.setAddNewLine(True)
param.setAnimationEnabled(True)
lastNode.colourvibrance = param
del param
param = lastNode.createDoubleParam("hueshift", "hue shift")
param.setMinimum(-2147483648, 0)
param.setMaximum(2147483647, 0)
param.setDisplayMinimum(-360, 0)
param.setDisplayMaximum(360, 0)
# Add the param to the page
lastNode.userNatron.addParam(param)
# Set param properties
param.setHelp("Shifts the hue of the image.")
param.setAddNewLine(True)
param.setAnimationEnabled(True)
lastNode.hueshift = param
del param
param = lastNode.createSeparatorParam("sep01", "")
# Add the param to the page
lastNode.userNatron.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setPersistent(False)
param.setEvaluateOnChange(False)
lastNode.sep01 = param
del param
param = lastNode.createDoubleParam("highlightcorr", "highlight")
param.setMinimum(-2147483648, 0)
param.setMaximum(2147483647, 0)
param.setDisplayMinimum(0, 0)
param.setDisplayMaximum(1, 0)
param.setDefaultValue(1, 0)
param.restoreDefaultValue(0)
# Add the param to the page
lastNode.userNatron.addParam(param)
# Set param properties
param.setHelp("Adjusts Highlights.")
param.setAddNewLine(True)
param.setAnimationEnabled(True)
lastNode.highlightcorr = param
del param
param = lastNode.createDoubleParam("shadowcorr", "shadows")
param.setMinimum(-2147483648, 0)
param.setMaximum(2147483647, 0)
param.setDisplayMinimum(0, 0)
param.setDisplayMaximum(1, 0)
# Add the param to the page
lastNode.userNatron.addParam(param)
# Set param properties
param.setHelp("Adjusts Shadows.")
param.setAddNewLine(True)
param.setAnimationEnabled(True)
lastNode.shadowcorr = param
del param
param = lastNode.createSeparatorParam("sep02", "")
# Add the param to the page
lastNode.userNatron.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setPersistent(False)
param.setEvaluateOnChange(False)
lastNode.sep02 = param
del param
param = lastNode.createDoubleParam("contrastclarity", "clarity")
param.setMinimum(-2147483648, 0)
param.setMaximum(2147483647, 0)
param.setDisplayMinimum(-1, 0)
param.setDisplayMaximum(1, 0)
# Add the param to the page
lastNode.userNatron.addParam(param)
# Set param properties
param.setHelp("Tries to sharpen/soften with care. ")
param.setAddNewLine(True)
param.setAnimationEnabled(True)
lastNode.contrastclarity = param
del param
param = lastNode.createSeparatorParam("sep03", " ")
# Add the param to the page
lastNode.userNatron.addParam(param)
# Set param properties
param.setHelp(" ")
param.setAddNewLine(True)
param.setPersistent(False)
param.setEvaluateOnChange(False)
lastNode.sep03 = param
del param
param = lastNode.createBooleanParam("invmask", "invert mask")
# Add the param to the page
lastNode.userNatron.addParam(param)
# Set param properties
param.setHelp("Inverts the connected mask.")
param.setAddNewLine(True)
param.setAnimationEnabled(True)
lastNode.invmask = param
del param
param = lastNode.createStringParam("credit", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.userNatron.addParam(param)
# Set param properties
param.setHelp("lp_Tweaky 2.0\n(c)2016 by lucas pfaff")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.credit = param
del param
# Refresh the GUI with the newly created parameters
lastNode.setPagesOrder(['userNatron', 'Node', 'Settings'])
lastNode.refreshUserParamsGUI()
del lastNode
# Start of node "Output1"
lastNode = app.createNode("fr.inria.built-in.Output", 1, group)
lastNode.setLabel("Output1")
lastNode.setPosition(1877, 2872)
lastNode.setSize(104, 31)
lastNode.setColor(0.7, 0.7, 0.7)
groupOutput1 = lastNode
del lastNode
# End of node "Output1"
# Start of node "img"
lastNode = app.createNode("fr.inria.built-in.Input", 1, group)
lastNode.setScriptName("img")
lastNode.setLabel("img")
lastNode.setPosition(1879, -868)
lastNode.setSize(104, 32)
lastNode.setColor(0.3, 0.5, 0.2)
groupimg = lastNode
del lastNode
# End of node "img"
# Start of node "RGBToHSV1"
lastNode = app.createNode("net.sf.openfx.RGBToHSV", 1, group)
lastNode.setScriptName("RGBToHSV1")
lastNode.setLabel("RGBToHSV1")
lastNode.setPosition(1884, 254)
lastNode.setSize(104, 32)
lastNode.setColor(0.48, 0.66, 1)
groupRGBToHSV1 = lastNode
param = lastNode.getParam("disableNode")
if param is not None:
param.setValue(True)
del param
del lastNode
# End of node "RGBToHSV1"
# Start of node "VIBRANCE"
lastNode = app.createNode("net.sf.openfx.GradePlugin", 2, group)
lastNode.setScriptName("VIBRANCE")
lastNode.setLabel("VIBRANCE")
lastNode.setPosition(1884, 435)
lastNode.setSize(104, 32)
lastNode.setColor(0.48, 0.66, 1)
groupVIBRANCE = lastNode
param = lastNode.getParam("NatronOfxParamProcessR")
if param is not None:
param.setValue(False)
del param
param = lastNode.getParam("NatronOfxParamProcessB")
if param is not None:
param.setValue(False)
del param
param = lastNode.getParam("gamma")
if param is not None:
param.setValue(1, 1)
del param
param = lastNode.getParam("disableNode")
if param is not None:
param.setValue(True)
del param
del lastNode
# End of node "VIBRANCE"
# Start of node "mask"
lastNode = app.createNode("fr.inria.built-in.Input", 1, group)
lastNode.setScriptName("mask")
lastNode.setLabel("mask")
lastNode.setPosition(939, 1602)
lastNode.setSize(104, 32)
lastNode.setColor(0.3, 0.5, 0.2)
groupmask = lastNode
param = lastNode.getParam("optional")
if param is not None:
param.setValue(True)
del param
param = lastNode.getParam("isMask")
if param is not None:
param.setValue(True)
del param
del lastNode
# End of node "mask"
# Start of node "Invert1"
lastNode = app.createNode("net.sf.openfx.Invert", 2, group)
lastNode.setScriptName("Invert1")
lastNode.setLabel("Invert1")
lastNode.setPosition(939, 2055)
lastNode.setSize(104, 32)
lastNode.setColor(0.48, 0.66, 1)
groupInvert1 = lastNode
param = lastNode.getParam("premult")
if param is not None:
param.setValue(True)
del param
param = lastNode.getParam("disableNode")
if param is not None:
param.setValue(True)
del param
del lastNode
# End of node "Invert1"
# Start of node "Dot1"
lastNode = app.createNode("fr.inria.built-in.Dot", 1, group)
lastNode.setScriptName("Dot1")
lastNode.setLabel("Dot1")
lastNode.setPosition(1924, -710)
lastNode.setSize(15, 15)
lastNode.setColor(0.7, 0.7, 0.7)
groupDot1 = lastNode
del lastNode
# End of node "Dot1"
# Start of node "Dot2"
lastNode = app.createNode("fr.inria.built-in.Dot", 1, group)
lastNode.setScriptName("Dot2")
lastNode.setLabel("Dot2")
lastNode.setPosition(1527, -710)
lastNode.setSize(15, 15)
lastNode.setColor(0.7, 0.7, 0.7)
groupDot2 = lastNode
del lastNode
# End of node "Dot2"
# Start of node "Dot3"
lastNode = app.createNode("fr.inria.built-in.Dot", 1, group)
lastNode.setScriptName("Dot3")
lastNode.setLabel("Dot3")
lastNode.setPosition(984, 2434)
lastNode.setSize(15, 15)
lastNode.setColor(0.7, 0.7, 0.7)
groupDot3 = lastNode
del lastNode
# End of node "Dot3"
# Start of node "Merge1"
lastNode = app.createNode("net.sf.openfx.MergePlugin", 1, group)
lastNode.setScriptName("Merge1")
lastNode.setLabel("Merge1")
lastNode.setPosition(1879, 2408)
lastNode.setSize(104, 55)
lastNode.setColor(0.3, 0.37, 0.776)
groupMerge1 = lastNode
param = lastNode.getParam("operation")
if param is not None:
param.set("copy")
del param
param = lastNode.getParam("OutputChannelsA")
if param is not None:
param.setValue(False)
del param
param = lastNode.getParam("enableMask_Mask")
if param is not None:
param.setValue(True)
del param
del lastNode
# End of node "Merge1"
# Start of node "HSVToRGB1"
lastNode = app.createNode("net.sf.openfx.HSVToRGB", 1, group)
lastNode.setScriptName("HSVToRGB1")
lastNode.setLabel("HSVToRGB1")
lastNode.setPosition(1884, 511)
lastNode.setSize(104, 32)
lastNode.setColor(0.48, 0.66, 1)
groupHSVToRGB1 = lastNode
param = lastNode.getParam("disableNode")
if param is not None:
param.setValue(True)
del param
del lastNode
# End of node "HSVToRGB1"
# Start of node "SharpenInvDiff1"
lastNode = app.createNode("net.sf.cimg.CImgSharpenInvDiff", 2, group)
lastNode.setScriptName("SharpenInvDiff1")
lastNode.setLabel("SharpenInvDiff1")
lastNode.setPosition(2230, 909)
lastNode.setSize(104, 55)
lastNode.setColor(0.8, 0.5, 0.3)
groupSharpenInvDiff1 = lastNode
param = lastNode.getParam("amplitude")
if param is not None:
param.setValue(0, 0)
del param
param = lastNode.getParam("disableNode")
if param is not None:
param.setValue(True)
del param
del lastNode
# End of node "SharpenInvDiff1"
# Start of node "Blur1"
lastNode = app.createNode("net.sf.cimg.CImgBlur", 3, group)
lastNode.setScriptName("Blur1")
lastNode.setLabel("Blur1")
lastNode.setPosition(1879, 1245)
lastNode.setSize(104, 32)
lastNode.setColor(0.8, 0.5, 0.3)
groupBlur1 = lastNode
param = lastNode.getParam("NatronOfxParamProcessR")
if param is not None:
param.setValue(False)
del param
param = lastNode.getParam("NatronOfxParamProcessG")
if param is not None:
param.setValue(False)
del param
param = lastNode.getParam("NatronOfxParamProcessA")
if param is not None:
param.setValue(True)
del param
param = lastNode.getParam("size")
if param is not None:
param.setValue(0, 0)
param.setValue(0, 1)
del param
param = lastNode.getParam("filter")
if param is not None:
param.set("quasigaussian")
del param
param = lastNode.getParam("disableNode")
if param is not None:
param.setValue(True)
del param
del lastNode
# End of node "Blur1"
# Start of node "Dot5"
lastNode = app.createNode("fr.inria.built-in.Dot", 1, group)
lastNode.setScriptName("Dot5")
lastNode.setLabel("Dot5")
lastNode.setPosition(1527, 1582)
lastNode.setSize(15, 15)
lastNode.setColor(0.7, 0.7, 0.7)
groupDot5 = lastNode
del lastNode
# End of node "Dot5"
# Start of node "Dot6"
lastNode = app.createNode("fr.inria.built-in.Dot", 1, group)
lastNode.setScriptName("Dot6")
lastNode.setLabel("Dot6")
lastNode.setPosition(1755, 1582)
lastNode.setSize(15, 15)
lastNode.setColor(0.7, 0.7, 0.7)
groupDot6 = lastNode
del lastNode
# End of node "Dot6"
# Start of node "Saturation1"
lastNode = app.createNode("net.sf.openfx.SaturationPlugin", 2, group)
lastNode.setScriptName("Saturation1")
lastNode.setLabel("Saturation1")
lastNode.setPosition(1710, 1651)
lastNode.setSize(104, 32)
lastNode.setColor(0.48, 0.66, 1)
groupSaturation1 = lastNode
param = lastNode.getParam("saturation")
if param is not None:
param.setValue(0, 0)
del param
param = lastNode.getParam("clampWhite")
if param is not None:
param.setValue(True)
del param
del lastNode
# End of node "Saturation1"
# Start of node "HIGHLIGHT"
lastNode = app.createNode("net.sf.openfx.GradePlugin", 2, group)
lastNode.setScriptName("HIGHLIGHT")
lastNode.setLabel("HIGHLIGHT")
lastNode.setPosition(1879, 1885)
lastNode.setSize(104, 32)
lastNode.setColor(0.48, 0.66, 1)
groupHIGHLIGHT = lastNode
param = lastNode.getParam("multiply")
if param is not None:
param.setValue(1, 0)
param.setValue(1, 1)
param.setValue(1, 2)
param.setValue(1, 3)
del param
param = lastNode.getParam("clampBlack")
if param is not None:
param.setValue(False)
del param
param = lastNode.getParam("enableMask_Mask")
if param is not None:
param.setValue(True)
del param
param = lastNode.getParam("disableNode")
if param is not None:
param.setValue(True)
del param
del lastNode
# End of node "HIGHLIGHT"
# Start of node "Dot7"
lastNode = app.createNode("fr.inria.built-in.Dot", 1, group)
lastNode.setScriptName("Dot7")
lastNode.setLabel("Dot7")
lastNode.setPosition(1755, 1894)
lastNode.setSize(15, 15)
lastNode.setColor(0.7, 0.7, 0.7)
groupDot7 = lastNode
del lastNode
# End of node "Dot7"
# Start of node "SHADOW"
lastNode = app.createNode("net.sf.openfx.GradePlugin", 2, group)
lastNode.setScriptName("SHADOW")
lastNode.setLabel("SHADOW")
lastNode.setPosition(1879, 2054)
lastNode.setSize(104, 32)
lastNode.setColor(0.48, 0.66, 1)
groupSHADOW = lastNode
param = lastNode.getParam("gamma")
if param is not None:
param.setValue(1, 0)
param.setValue(1, 1)
param.setValue(1, 2)
param.setValue(1, 3)
del param
param = lastNode.getParam("maskInvert")
if param is not None:
param.setValue(True)
del param
param = lastNode.getParam("enableMask_Mask")
if param is not None:
param.setValue(True)
del param
param = lastNode.getParam("disableNode")
if param is not None:
param.setValue(True)
del param
del lastNode
# End of node "SHADOW"
# Start of node "Dot8"
lastNode = app.createNode("fr.inria.built-in.Dot", 1, group)
lastNode.setScriptName("Dot8")
lastNode.setLabel("Dot8")
lastNode.setPosition(1755, 2063)
lastNode.setSize(15, 15)
lastNode.setColor(0.7, 0.7, 0.7)
groupDot8 = lastNode
del lastNode
# End of node "Dot8"
# Start of node "Multiply2"
lastNode = app.createNode("net.sf.openfx.MultiplyPlugin", 2, group)
lastNode.setScriptName("Multiply2")
lastNode.setLabel("WB")
lastNode.setPosition(1879, -286)
lastNode.setSize(104, 32)
lastNode.setColor(0.48, 0.66, 1)
groupMultiply2 = lastNode
param = lastNode.getParam("NatronOfxParamProcessR")
if param is not None:
param.setValue(False)
del param
param = lastNode.getParam("NatronOfxParamProcessG")
if param is not None:
param.setValue(False)
del param
param = lastNode.getParam("value")
if param is not None:
param.setValue(1, 2)
del param
param = lastNode.getParam("disableNode")
if param is not None:
param.setValue(True)
del param
del lastNode
# End of node "Multiply2"
# Start of node "Multiply3"
lastNode = app.createNode("net.sf.openfx.MultiplyPlugin", 2, group)
lastNode.setScriptName("Multiply3")
lastNode.setLabel("TINT")
lastNode.setPosition(1879, -176)
lastNode.setSize(104, 32)
lastNode.setColor(0.48, 0.66, 1)
groupMultiply3 = lastNode
param = lastNode.getParam("NatronOfxParamProcessR")
if param is not None:
param.setValue(False)
del param
param = lastNode.getParam("NatronOfxParamProcessB")
if param is not None:
param.setValue(False)
del param
param = lastNode.getParam("value")
if param is not None:
param.setValue(1, 1)
del param
param = lastNode.getParam("disableNode")
if param is not None:
param.setValue(False)
del param
del lastNode
# End of node "Multiply3"
# Start of node "OCIOLogConvert2_2"
lastNode = app.createNode("fr.inria.openfx.OCIOLogConvert", 1, group)
lastNode.setScriptName("OCIOLogConvert2_2")
lastNode.setLabel("OCIOLogConvert2_2")
lastNode.setPosition(1879, 1411)
lastNode.setSize(104, 55)
lastNode.setColor(0.48, 0.66, 1)
groupOCIOLogConvert2_2 = lastNode
param = lastNode.getParam("disableNode")
if param is not None:
param.setValue(True)
del param
del lastNode
# End of node "OCIOLogConvert2_2"
# Start of node "Dot9"
lastNode = app.createNode("fr.inria.built-in.Dot", 1, group)
lastNode.setScriptName("Dot9")
lastNode.setLabel("Dot9")
lastNode.setPosition(2275, 1069)
lastNode.setSize(15, 15)
lastNode.setColor(0.7, 0.7, 0.7)
groupDot9 = lastNode
del lastNode
# End of node "Dot9"
# Start of node "Dot10"
lastNode = app.createNode("fr.inria.built-in.Dot", 1, group)
lastNode.setScriptName("Dot10")
lastNode.setLabel("Dot10")
lastNode.setPosition(1924, 830)
lastNode.setSize(15, 15)
lastNode.setColor(0.7, 0.7, 0.7)
groupDot10 = lastNode
del lastNode
# End of node "Dot10"
# Start of node "Dot11"
lastNode = app.createNode("fr.inria.built-in.Dot", 1, group)
lastNode.setScriptName("Dot11")
lastNode.setLabel("Dot11")
lastNode.setPosition(2275, 830)
lastNode.setSize(15, 15)
lastNode.setColor(0.7, 0.7, 0.7)
groupDot11 = lastNode
del lastNode
# End of node "Dot11"
# Start of node "RGB709ToXYZ1"
lastNode = app.createNode("net.sf.openfx.RGB709ToXYZ", 1, group)
lastNode.setScriptName("RGB709ToXYZ1")
lastNode.setLabel("RGB709ToXYZ1")
lastNode.setPosition(1879, -360)
lastNode.setSize(104, 32)
lastNode.setColor(0.48, 0.66, 1)
groupRGB709ToXYZ1 = lastNode
param = lastNode.getParam("disableNode")
if param is not None:
param.setValue(True)
del param
del lastNode
# End of node "RGB709ToXYZ1"
# Start of node "XYZToRGB7091"
lastNode = app.createNode("net.sf.openfx.XYZToRGB709", 1, group)
lastNode.setScriptName("XYZToRGB7091")
lastNode.setLabel("XYZToRGB7091")
lastNode.setPosition(1879, -87)
lastNode.setSize(104, 32)
lastNode.setColor(0.48, 0.66, 1)
groupXYZToRGB7091 = lastNode
param = lastNode.getParam("disableNode")
if param is not None:
param.setValue(True)
del param
del lastNode
# End of node "XYZToRGB7091"
# Start of node "Constant1"
lastNode = app.createNode("net.sf.openfx.ConstantPlugin", 1, group)
lastNode.setScriptName("Constant1")
lastNode.setLabel("Constant1")
lastNode.setPosition(1257, 1731)
lastNode.setSize(104, 32)
lastNode.setColor(0.3, 0.5, 0.2)
groupConstant1 = lastNode
param = lastNode.getParam("NatronParamFormatChoice")
if param is not None:
param.set("HD")
del param
del lastNode
# End of node "Constant1"
# Start of node "Dot12"
lastNode = app.createNode("fr.inria.built-in.Dot", 1, group)
lastNode.setScriptName("Dot12")
lastNode.setLabel("Dot12")
lastNode.setPosition(1302, 1582)
lastNode.setSize(15, 15)
lastNode.setColor(0.7, 0.7, 0.7)
groupDot12 = lastNode
del lastNode
# End of node "Dot12"
# Start of node "Merge2"
lastNode = app.createNode("net.sf.openfx.MergePlugin", 1, group)
lastNode.setScriptName("Merge2")
lastNode.setLabel("Merge2")
lastNode.setPosition(939, 1841)
lastNode.setSize(104, 55)
lastNode.setColor(0.3, 0.37, 0.776)
groupMerge2 = lastNode
del lastNode
# End of node "Merge2"
# Start of node "Dot13"
lastNode = app.createNode("fr.inria.built-in.Dot", 1, group)
lastNode.setScriptName("Dot13")
lastNode.setLabel("Dot13")
lastNode.setPosition(1302, 1876)
lastNode.setSize(15, 15)
lastNode.setColor(0.7, 0.7, 0.7)
groupDot13 = lastNode
del lastNode
# End of node "Dot13"
# Start of node "Dot4"
lastNode = app.createNode("fr.inria.built-in.Dot", 1, group)
lastNode.setScriptName("Dot4")
lastNode.setLabel("Dot4")
lastNode.setPosition(1527, 2116)
lastNode.setSize(15, 15)
lastNode.setColor(0.7, 0.7, 0.7)
groupDot4 = lastNode
del lastNode
# End of node "Dot4"
# Start of node "wbPicker"
lastNode = app.createNode("net.sf.openfx.GradePlugin", 2, group)
lastNode.setScriptName("wbPicker")
lastNode.setLabel("wbPicker")
lastNode.setPosition(1879, -593)
lastNode.setSize(104, 32)
lastNode.setColor(0.48, 0.66, 1)
groupwbPicker = lastNode
param = lastNode.getParam("white")
if param is not None:
param.setValue(1, 0)
param.setValue(1, 1)
param.setValue(1, 2)
param.setValue(1, 3)
del param
param = lastNode.getParam("disableNode")
if param is not None:
param.setValue(True)
del param
del lastNode
# End of node "wbPicker"
# Start of node "HUESHIFT"
lastNode = app.createNode("net.sf.openfx.GradePlugin", 2, group)
lastNode.setScriptName("HUESHIFT")
lastNode.setLabel("HUESHIFT")
lastNode.setPosition(1884, 326)
lastNode.setSize(104, 32)
lastNode.setColor(0.48, 0.66, 1)
groupHUESHIFT = lastNode
param = lastNode.getParam("NatronOfxParamProcessG")
if param is not None:
param.setValue(False)
del param
param = lastNode.getParam("NatronOfxParamProcessB")
if param is not None:
param.setValue(False)
del param
param = lastNode.getParam("offset")
if param is not None:
param.setValue(0, 0)
param.setValue(0, 1)
param.setValue(0, 2)
param.setValue(0, 3)
del param
param = lastNode.getParam("disableNode")
if param is not None:
param.setValue(True)
del param
del lastNode
# End of node "HUESHIFT"
# Start of node "Shuffle1"
lastNode = app.createNode("net.sf.openfx.ShufflePlugin", 3, group)
lastNode.setScriptName("Shuffle1")
lastNode.setLabel("Shuffle1")
lastNode.setPosition(1879, 1060)
lastNode.setSize(104, 32)
lastNode.setColor(0.6, 0.24, 0.39)
groupShuffle1 = lastNode
param = lastNode.getParam("outputB")
if param is not None:
param.set("A.uk.co.thefoundry.OfxImagePlaneColour.B")
del param
param = lastNode.getParam("setGBAFromR")
if param is not None:
param.setValue(False)
del param
param = lastNode.getParam("disableNode")
if param is not None:
param.setValue(True)
del param
del lastNode
# End of node "Shuffle1"
# Start of node "OCIOLogConvert2"
lastNode = app.createNode("fr.inria.openfx.OCIOLogConvert", 1, group)
lastNode.setScriptName("OCIOLogConvert2")
lastNode.setLabel("OCIOLogConvert2")
lastNode.setPosition(1884, 634)
lastNode.setSize(104, 55)
lastNode.setColor(0.48, 0.66, 1)
groupOCIOLogConvert2 = lastNode
param = lastNode.getParam("operation")
if param is not None:
param.set("lin2log")
del param
param = lastNode.getParam("disableNode")
if param is not None:
param.setValue(True)
del param
del lastNode
# End of node "OCIOLogConvert2"
# Start of node "RGBToHSV2"
lastNode = app.createNode("net.sf.openfx.RGBToHSV", 1, group)
lastNode.setScriptName("RGBToHSV2")
lastNode.setLabel("RGBToHSV2")
lastNode.setPosition(1884, 748)
lastNode.setSize(104, 32)
lastNode.setColor(0.48, 0.66, 1)
groupRGBToHSV2 = lastNode
param = lastNode.getParam("disableNode")
if param is not None:
param.setValue(True)
del param
del lastNode
# End of node "RGBToHSV2"
# Start of node "HSVToRGB2"
lastNode = app.createNode("net.sf.openfx.HSVToRGB", 1, group)
lastNode.setScriptName("HSVToRGB2")
lastNode.setLabel("HSVToRGB2")
lastNode.setPosition(1879, 1330)
lastNode.setSize(104, 32)
lastNode.setColor(0.48, 0.66, 1)
groupHSVToRGB2 = lastNode
param = lastNode.getParam("disableNode")
if param is not None:
param.setValue(True)
del param
del lastNode
# End of node "HSVToRGB2"
# Start of node "Shuffle2"
lastNode = app.createNode("net.sf.openfx.ShufflePlugin", 3, group)
lastNode.setScriptName("Shuffle2")
lastNode.setLabel("Shuffle2")
lastNode.setPosition(1710, 1779)
lastNode.setSize(104, 32)
lastNode.setColor(0.6, 0.24, 0.39)
groupShuffle2 = lastNode
param = lastNode.getParam("outputA")
if param is not None:
param.set("B.uk.co.thefoundry.OfxImagePlaneColour.R")
del param
del lastNode
# End of node "Shuffle2"
# Now that all nodes are created we can connect them together, restore expressions
groupOutput1.connectInput(0, groupMerge1)
groupRGBToHSV1.connectInput(0, groupXYZToRGB7091)
groupVIBRANCE.connectInput(0, groupHUESHIFT)
groupInvert1.connectInput(0, groupMerge2)
groupDot1.connectInput(0, groupimg)
groupDot2.connectInput(0, groupDot1)
groupDot3.connectInput(0, groupInvert1)
groupMerge1.connectInput(0, groupSHADOW)
groupMerge1.connectInput(1, groupDot4)
groupMerge1.connectInput(2, groupDot3)
groupHSVToRGB1.connectInput(0, groupVIBRANCE)
groupSharpenInvDiff1.connectInput(0, groupDot11)
groupBlur1.connectInput(0, groupShuffle1)
groupDot5.connectInput(0, groupDot2)
groupDot6.connectInput(0, groupDot5)
groupSaturation1.connectInput(0, groupDot6)
groupHIGHLIGHT.connectInput(0, groupOCIOLogConvert2_2)
groupHIGHLIGHT.connectInput(1, groupDot7)
groupDot7.connectInput(0, groupShuffle2)
groupSHADOW.connectInput(0, groupHIGHLIGHT)
groupSHADOW.connectInput(1, groupDot8)
groupDot8.connectInput(0, groupDot7)
groupMultiply2.connectInput(0, groupRGB709ToXYZ1)
groupMultiply3.connectInput(0, groupMultiply2)
groupOCIOLogConvert2_2.connectInput(0, groupHSVToRGB2)
groupDot9.connectInput(0, groupSharpenInvDiff1)
groupDot10.connectInput(0, groupRGBToHSV2)
groupDot11.connectInput(0, groupDot10)
groupRGB709ToXYZ1.connectInput(0, groupwbPicker)
groupXYZToRGB7091.connectInput(0, groupMultiply3)
groupConstant1.connectInput(0, groupDot12)
groupDot12.connectInput(0, groupDot5)
groupMerge2.connectInput(0, groupmask)
groupMerge2.connectInput(1, groupDot13)
groupDot13.connectInput(0, groupConstant1)
groupDot4.connectInput(0, groupDot5)
groupwbPicker.connectInput(0, groupDot1)
groupHUESHIFT.connectInput(0, groupRGBToHSV1)
groupShuffle1.connectInput(0, groupDot10)
groupShuffle1.connectInput(1, groupDot9)
groupOCIOLogConvert2.connectInput(0, groupHSVToRGB1)
groupRGBToHSV2.connectInput(0, groupOCIOLogConvert2)
groupHSVToRGB2.connectInput(0, groupBlur1)
groupShuffle2.connectInput(0, groupSaturation1)
param = groupRGBToHSV1.getParam("disableNode")
param.setExpression("if thisGroup.colourvibrance.get() or thisGroup.hueshift.get() != 0:\n\tret = 0\nelse:\n\tret = 1", True, 0)
del param
param = groupVIBRANCE.getParam("gamma")
param.setExpression("if thisGroup.colourvibrance.get() > 0:\n\tret = (1+thisGroup.colourvibrance.get()*2)\nelse:\n\tret = 1+thisGroup.colourvibrance.get()", True, 1)
del param
param = groupVIBRANCE.getParam("disableNode")
param.setExpression("if thisGroup.colourvibrance.get() != 0:\n\tret = 0\nelse:\n\tret = 1", True, 0)
del param
param = groupInvert1.getParam("disableNode")
param.setExpression("1-thisGroup.invmask.get()", False, 0)
del param
param = groupHSVToRGB1.getParam("disableNode")
param.setExpression("if thisGroup.colourvibrance.get() or thisGroup.hueshift.get() != 0:\n\tret = 0\nelse:\n\tret = 1", True, 0)
del param
param = groupSharpenInvDiff1.getParam("amplitude")
param.setExpression("thisGroup.contrastclarity.get()*3", False, 0)
del param
param = groupSharpenInvDiff1.getParam("disableNode")
param.setExpression("if thisGroup.contrastclarity.get() > 0:\n\tret = 0\nelse:\n\tret = 1", True, 0)
del param
param = groupBlur1.getParam("size")
param.setExpression("(thisGroup.contrastclarity.get()*-1)*2", False, 0)
param.setExpression("(thisGroup.contrastclarity.get()*-1)*2", False, 1)
del param
param = groupBlur1.getParam("disableNode")
param.setExpression("if thisGroup.contrastclarity.get() < 0:\n\tret = 0\nelse:\n\tret = 1", True, 0)
del param
param = groupHIGHLIGHT.getParam("multiply")
param.setExpression("thisGroup.highlightcorr.get()", False, 0)
param.setExpression("thisGroup.highlightcorr.get()", False, 1)
param.setExpression("thisGroup.highlightcorr.get()", False, 2)
param.setExpression("thisGroup.highlightcorr.get()", False, 3)
del param
param = groupHIGHLIGHT.getParam("disableNode")
param.setExpression("if thisNode.multiply.get()[dimension] != 1:\n\tret = 0\nelse:\n\tret = 1", True, 0)
del param
param = groupSHADOW.getParam("gamma")
param.setExpression("1+thisGroup.shadowcorr.get()", False, 0)
param.setExpression("1+thisGroup.shadowcorr.get()", False, 1)
param.setExpression("1+thisGroup.shadowcorr.get()", False, 2)
param.setExpression("1+thisGroup.shadowcorr.get()", False, 3)
del param
param = groupSHADOW.getParam("disableNode")
param.setExpression("if thisNode.gamma.get()[dimension] != 1:\n\tret = 0\nelse:\n\tret = 1", True, 0)
del param
param = groupMultiply2.getParam("value")
param.setExpression("1+thisGroup.whitebalance.get()", False, 2)
del param
param = groupMultiply2.getParam("disableNode")
param.setExpression("if thisGroup.whitebalance.get() != 0:\n\tret = 0\nelse:\n\tret = 1", True, 0)
del param
param = groupMultiply3.getParam("value")
param.setExpression("1-thisGroup.colourtint.get()", False, 1)
del param
param = groupMultiply3.getParam("disableNode")
param.setExpression("if thisGroup.colourtint.get() != 0:\n\tret = 0\nelse:\n\tret = 1", False, 0)
del param
param = groupOCIOLogConvert2_2.getParam("disableNode")
param.setExpression("if thisGroup.contrastclarity.get() == 0:\n\tret = 1\nelse:\n\tret = 0", True, 0)
del param
param = groupRGB709ToXYZ1.getParam("disableNode")
param.setExpression("if thisGroup.whitebalance.get() or thisGroup.colourtint.get() != 0:\n\tret = 0\nelse:\n\tret = 1", True, 0)
del param
param = groupXYZToRGB7091.getParam("disableNode")
param.setExpression("if thisGroup.whitebalance.get() or thisGroup.colourtint.get() != 0:\n\tret = 0\nelse:\n\tret = 1", True, 0)
del param
param = groupwbPicker.getParam("whitePoint")
group.getParam("Grade1whitePoint").setAsAlias(param)
del param
param = groupwbPicker.getParam("white")
param.setExpression("channelsum = thisGroup.wbPicker.whitePoint.get()[0]+thisGroup.wbPicker.whitePoint.get()[1]+thisGroup.wbPicker.whitePoint.get()[2]\nret = channelsum / 3", True, 0)
param.setExpression("channelsum = thisGroup.wbPicker.whitePoint.get()[0]+thisGroup.wbPicker.whitePoint.get()[1]+thisGroup.wbPicker.whitePoint.get()[2]\nret = channelsum / 3", True, 1)
param.setExpression("channelsum = thisGroup.wbPicker.whitePoint.get()[0]+thisGroup.wbPicker.whitePoint.get()[1]+thisGroup.wbPicker.whitePoint.get()[2]\nret = channelsum / 3", True, 2)
param.setExpression("channelsum = thisGroup.wbPicker.whitePoint.get()[0]+thisGroup.wbPicker.whitePoint.get()[1]+thisGroup.wbPicker.whitePoint.get()[2]\nret = channelsum / 3", True, 3)
del param
param = groupwbPicker.getParam("disableNode")
param.setExpression("if thisNode.white.get()[dimension] != 1:\n\tret = 0\nelse:\n\tret = 1", True, 0)
del param
param = groupHUESHIFT.getParam("offset")
param.setExpression("thisGroup.hueshift.get()/360", False, 0)
param.setExpression("thisGroup.hueshift.get()/360", False, 1)
param.setExpression("thisGroup.hueshift.get()/360", False, 2)
param.setExpression("thisGroup.hueshift.get()/360", False, 3)
del param
param = groupHUESHIFT.getParam("disableNode")
param.setExpression("if thisGroup.hueshift.get() != 0:\n\tret = 0\nelse:\n\tret = 1", True, 0)
del param
param = groupShuffle1.getParam("disableNode")
param.setExpression("if thisGroup.contrastclarity.get() == 0:\n\tret = 1\nelse:\n\tret = 0", True, 0)
del param
param = groupOCIOLogConvert2.getParam("disableNode")
param.setExpression("if thisGroup.contrastclarity.get() == 0:\n\tret = 1\nelse:\n\tret = 0", True, 0)
del param
param = groupRGBToHSV2.getParam("disableNode")
param.setExpression("if thisGroup.contrastclarity.get() == 0:\n\tret = 1\nelse:\n\tret = 0", True, 0)
del param
param = groupHSVToRGB2.getParam("disableNode")
param.setExpression("if thisGroup.contrastclarity.get() == 0:\n\tret = 1\nelse:\n\tret = 0", True, 0)
del param
try:
extModule = sys.modules["lp_TweakyExt"]
except KeyError:
extModule = None
if extModule is not None and hasattr(extModule ,"createInstanceExt") and hasattr(extModule.createInstanceExt,"__call__"):
extModule.createInstanceExt(app,group)
|
3e5e439fe90312f277c0c2b5e742c2ee37add0c1
|
b049a961f100444dde14599bab06a0a4224d869b
|
/sdk/python/pulumi_azure_native/synapse/v20210601preview/get_sql_pool_vulnerability_assessment_rule_baseline.py
|
6e33e815ff3a67046ae43820bd9ccbe426c77051
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
pulumi/pulumi-azure-native
|
b390c88beef8381f9a71ab2bed5571e0dd848e65
|
4c499abe17ec6696ce28477dde1157372896364e
|
refs/heads/master
| 2023-08-30T08:19:41.564780
| 2023-08-28T19:29:04
| 2023-08-28T19:29:04
| 172,386,632
| 107
| 29
|
Apache-2.0
| 2023-09-14T13:17:00
| 2019-02-24T20:30:21
|
Python
|
UTF-8
|
Python
| false
| false
| 6,731
|
py
|
get_sql_pool_vulnerability_assessment_rule_baseline.py
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetSqlPoolVulnerabilityAssessmentRuleBaselineResult',
'AwaitableGetSqlPoolVulnerabilityAssessmentRuleBaselineResult',
'get_sql_pool_vulnerability_assessment_rule_baseline',
'get_sql_pool_vulnerability_assessment_rule_baseline_output',
]
@pulumi.output_type
class GetSqlPoolVulnerabilityAssessmentRuleBaselineResult:
"""
A Sql pool vulnerability assessment rule baseline.
"""
def __init__(__self__, baseline_results=None, id=None, name=None, type=None):
if baseline_results and not isinstance(baseline_results, list):
raise TypeError("Expected argument 'baseline_results' to be a list")
pulumi.set(__self__, "baseline_results", baseline_results)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="baselineResults")
def baseline_results(self) -> Sequence['outputs.SqlPoolVulnerabilityAssessmentRuleBaselineItemResponse']:
"""
The rule baseline result
"""
return pulumi.get(self, "baseline_results")
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetSqlPoolVulnerabilityAssessmentRuleBaselineResult(GetSqlPoolVulnerabilityAssessmentRuleBaselineResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetSqlPoolVulnerabilityAssessmentRuleBaselineResult(
baseline_results=self.baseline_results,
id=self.id,
name=self.name,
type=self.type)
def get_sql_pool_vulnerability_assessment_rule_baseline(baseline_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
rule_id: Optional[str] = None,
sql_pool_name: Optional[str] = None,
vulnerability_assessment_name: Optional[str] = None,
workspace_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSqlPoolVulnerabilityAssessmentRuleBaselineResult:
"""
Gets a SqlPool's vulnerability assessment rule baseline.
:param str baseline_name: The name of the vulnerability assessment rule baseline (default implies a baseline on a Sql pool level rule and master for server level rule).
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str rule_id: The vulnerability assessment rule ID.
:param str sql_pool_name: SQL pool name
:param str vulnerability_assessment_name: The name of the vulnerability assessment.
:param str workspace_name: The name of the workspace.
"""
__args__ = dict()
__args__['baselineName'] = baseline_name
__args__['resourceGroupName'] = resource_group_name
__args__['ruleId'] = rule_id
__args__['sqlPoolName'] = sql_pool_name
__args__['vulnerabilityAssessmentName'] = vulnerability_assessment_name
__args__['workspaceName'] = workspace_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:synapse/v20210601preview:getSqlPoolVulnerabilityAssessmentRuleBaseline', __args__, opts=opts, typ=GetSqlPoolVulnerabilityAssessmentRuleBaselineResult).value
return AwaitableGetSqlPoolVulnerabilityAssessmentRuleBaselineResult(
baseline_results=pulumi.get(__ret__, 'baseline_results'),
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_sql_pool_vulnerability_assessment_rule_baseline)
def get_sql_pool_vulnerability_assessment_rule_baseline_output(baseline_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
rule_id: Optional[pulumi.Input[str]] = None,
sql_pool_name: Optional[pulumi.Input[str]] = None,
vulnerability_assessment_name: Optional[pulumi.Input[str]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetSqlPoolVulnerabilityAssessmentRuleBaselineResult]:
"""
Gets a SqlPool's vulnerability assessment rule baseline.
:param str baseline_name: The name of the vulnerability assessment rule baseline (default implies a baseline on a Sql pool level rule and master for server level rule).
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str rule_id: The vulnerability assessment rule ID.
:param str sql_pool_name: SQL pool name
:param str vulnerability_assessment_name: The name of the vulnerability assessment.
:param str workspace_name: The name of the workspace.
"""
...
|
04f64a2a18a2e6b52cc191f3380ed3243d90f15a
|
35b6013c1943f37d1428afd2663c8aba0a02628d
|
/securitycenter/snippets/snippets_security_marks_test.py
|
10405b7a74378710043004cb92eff09b58b7ec68
|
[
"Apache-2.0"
] |
permissive
|
GoogleCloudPlatform/python-docs-samples
|
d2a251805fbeab15d76ed995cf200727f63f887d
|
44e819e713c3885e38c99c16dc73b7d7478acfe8
|
refs/heads/main
| 2023-08-28T12:52:01.712293
| 2023-08-28T11:18:28
| 2023-08-28T11:18:28
| 35,065,876
| 7,035
| 7,593
|
Apache-2.0
| 2023-09-14T20:20:56
| 2015-05-04T23:26:13
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 4,667
|
py
|
snippets_security_marks_test.py
|
#!/usr/bin/env python
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Demos for working with security marks."""
import os
import random
import pytest
import snippets_security_marks
@pytest.fixture(scope="module")
def organization_id():
"""Gets Organization ID from the environment variable"""
return os.environ["GCLOUD_ORGANIZATION"]
@pytest.fixture(scope="module")
def asset_name(organization_id):
"""Returns a random asset name from existing assets."""
from google.cloud import securitycenter
client = securitycenter.SecurityCenterClient()
# organization_id is the numeric ID of the organization.
# organization_id=1234567777
org_name = f"organizations/{organization_id}"
assets = list(client.list_assets(request={"parent": org_name}))
# Select a random asset to avoid collision between integration tests.
asset = (random.sample(assets, 1)[0]).asset.name
# Set fresh marks.
update = client.update_security_marks(
request={
"security_marks": {
"name": f"{asset}/securityMarks",
"marks": {"other": "other_val"},
}
}
)
assert update.marks == {"other": "other_val"}
return asset
@pytest.fixture(scope="module")
def source_name(organization_id):
"""Creates a new source in the organization."""
from google.cloud import securitycenter
client = securitycenter.SecurityCenterClient()
org_name = f"organizations/{organization_id}"
source = client.create_source(
request={
"parent": org_name,
"source": {
"display_name": "Security marks Unit test source",
"description": "A new custom source that does X",
},
}
)
return source.name
@pytest.fixture(scope="module")
def finding_name(source_name):
"""Creates a new finding and returns it name."""
from google.cloud import securitycenter
from google.cloud.securitycenter_v1 import Finding
from google.protobuf.timestamp_pb2 import Timestamp
client = securitycenter.SecurityCenterClient()
now_proto = Timestamp()
now_proto.GetCurrentTime()
finding = client.create_finding(
request={
"parent": source_name,
"finding_id": "scfinding",
"finding": {
"state": Finding.State.ACTIVE,
"category": "C1",
"event_time": now_proto,
"resource_name": "//cloudresourcemanager.googleapis.com/organizations/1234",
},
}
)
client.create_finding(
request={
"parent": source_name,
"finding_id": "untouched",
"finding": {
"state": Finding.State.ACTIVE,
"category": "MEDIUM_RISK_ONE",
"event_time": now_proto,
"resource_name": "//cloudresourcemanager.googleapis.com/organizations/1234",
},
}
)
return finding.name
def test_add_to_asset(asset_name):
updated_marks, marks = snippets_security_marks.add_to_asset(asset_name)
assert updated_marks.marks.keys() >= marks.keys()
def test_clear_from_asset(asset_name):
updated_marks = snippets_security_marks.clear_from_asset(asset_name)
assert "other" in updated_marks.marks
assert len(updated_marks.marks) == 1
def test_delete_and_update_marks(asset_name):
updated_marks = snippets_security_marks.delete_and_update_marks(asset_name)
assert updated_marks.marks == {"key_a": "new_value_for_a", "other": "other_val"}
def test_add_to_finding(finding_name):
updated_marks, marks = snippets_security_marks.add_to_finding(finding_name)
assert updated_marks.marks == marks
def test_list_assets_with_query_marks(organization_id, asset_name):
count = snippets_security_marks.list_assets_with_query_marks(
organization_id, asset_name
)
assert count >= 0
def test_list_findings_with_query_marks(source_name, finding_name):
count = snippets_security_marks.list_findings_with_query_marks(
source_name, finding_name
)
assert count == 0
|
03b7f8b8ffc3f3ca734167af1637dacb7a26ccb8
|
b8d80a23cb27af08a1c4d34b478c76228ae5fbb4
|
/insights/client/subp.py
|
da2e3e5424e6c3694800736be1fc93c2d1825cf8
|
[
"Apache-2.0"
] |
permissive
|
RedHatInsights/insights-core
|
bb243e2bf8a52446fefb95ebe05478d6e35efe2e
|
b0ea07fc3f4dd8801b505fe70e9b36e628152c4a
|
refs/heads/master
| 2023-09-04T21:15:40.456257
| 2023-09-04T10:46:56
| 2023-09-04T10:46:56
| 92,518,221
| 144
| 290
|
Apache-2.0
| 2023-09-14T02:40:13
| 2017-05-26T14:23:11
|
Python
|
UTF-8
|
Python
| false
| false
| 521
|
py
|
subp.py
|
#!/usr/bin/python
import subprocess
import collections
ReturnTuple = collections.namedtuple('ReturnTuple',
['return_code', 'stdout', 'stderr'])
def subp(cmd):
"""
Run a command as a subprocess.
Return a triple of return code, standard out, standard err.
"""
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = proc.communicate()
return ReturnTuple(proc.returncode, stdout=out, stderr=err)
|
f7c1022ae84128bcc790ff8afd8c054e913d7013
|
a9ef46a7e991e062779f7e2ae09569138c1bb86c
|
/scripts/timeline_coverage.py
|
bf947b3890f9a489d9b7575596af0edd1ae5292e
|
[
"MIT"
] |
permissive
|
ForAllSecure/bncov
|
5adcb8dee1825a7c1e1db9a6e6b38cf488dac6cd
|
0a3cebcf888659aefeee37eb3225b6174be9de0d
|
refs/heads/master
| 2023-02-20T17:11:12.066890
| 2023-02-15T18:46:44
| 2023-02-15T18:46:44
| 184,456,686
| 119
| 18
|
MIT
| 2023-02-15T18:46:46
| 2019-05-01T17:47:24
|
Python
|
UTF-8
|
Python
| false
| false
| 3,373
|
py
|
timeline_coverage.py
|
#!/usr/bin/env python
import sys
import os
import time
from binaryninja import *
# this line allows these scripts to be run portably on python2/3
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
import bncov
USAGE = "USAGE: %s <target_file> <seed_directory> <coverage_directory> [log_name]" % sys.argv[0]
USAGE += "\n Break down timeline of when coverage increased based on modification time of seeds"
def format_duration(seconds):
return "%02d:%02d:%02d" % (seconds // 3600, (seconds // 60) % 60, seconds % 60)
def get_timestamp(path):
"""Return a timestamp to give a sense of time between seeds.
If you want to maintain timestamps via other means (db, flat file with times, etc),
just swap out this function for your implementation."""
return os.path.getmtime(path)
def get_coverage_timeline(covdb, seed_dir, cov_dir):
if not os.path.exists(seed_dir):
print("[!] Seed dir `%s` doesn't exist" % seed_dir)
exit(1)
seeds = os.listdir(seed_dir)
seed_paths = [os.path.join(seed_dir, seed_name) for seed_name in seeds]
seed_times = {path: get_timestamp(path) for path in seed_paths}
# Assume the bncov naming convention
seed_to_coverage = {}
for seed_path in seed_paths:
seed_name = os.path.basename(seed_path)
coverage_path = os.path.join(cov_dir, seed_name) + ".cov"
if coverage_path not in covdb.trace_dict:
print("[!] Didn't find matching trace (expected %s) for seed \"%s\", skipping" % (coverage_path, seed_path))
seed_times.pop(seed_path)
else:
seed_to_coverage[seed_path] = coverage_path
sorted_seeds = sorted(seed_times.items(), key=lambda kv: kv[1])
running_coverage = set()
initial_time = sorted_seeds[0][1]
datapoints = [] # list of (seconds_elapsed, total_blocks)
for seed_path, mod_time in sorted_seeds:
# print("[DBG] %s: %s" % (seed_path, time.asctime(time.localtime(mod_time))))
seed_name = os.path.basename(seed_path)
seed_coverage = covdb.trace_dict[seed_to_coverage[seed_path]]
new_coverage = seed_coverage - running_coverage
# print('[DBG] %s: %d total, %d new' % (seed_name, len(seed_coverage), len(new_coverage)))
if len(new_coverage) > 0:
seconds_elapsed = mod_time - initial_time
num_new_blocks = len(new_coverage)
running_coverage.update(new_coverage)
num_total_blocks = len(running_coverage)
print("[T+%s] %d new blocks from %s (%d)" %
(format_duration(seconds_elapsed), num_new_blocks, seed_name, num_total_blocks))
datapoints.append((int(seconds_elapsed)+1, num_total_blocks))
return datapoints
if __name__ == "__main__":
if len(sys.argv) not in [4, 5]:
print(USAGE)
exit(1)
target_filename, seed_dir, cov_dir = sys.argv[1:4]
log_name = None
if len(sys.argv) == 5:
log_name = sys.argv[4]
bv = bncov.make_bv(target_filename, quiet=False)
covdb = bncov.make_covdb(bv, cov_dir, quiet=False)
datapoints = get_coverage_timeline(covdb, seed_dir, cov_dir)
if log_name is not None:
with open(log_name, 'w') as f:
f.write(repr(datapoints))
print('[+] Wrote %d datapoints to %s' % (len(datapoints), log_name))
|
5d65395f78654d42c816c67414cd97e5b83afa44
|
2d5f297ec3274ce93f1f5592d5b80c2605f8edc5
|
/apps/single_curve_tf.py
|
cbd7634b91b74b633e2f8e907fc7e51fb36fadb4
|
[
"Apache-2.0"
] |
permissive
|
BachiLi/diffvg
|
9ec3e3e7b3674c82ca42b18fe49c69991c076370
|
6f60468bfdef5b9fec8cc3fa47b441dc2720eefc
|
refs/heads/master
| 2023-06-21T18:49:09.604301
| 2023-06-13T17:16:46
| 2023-06-13T17:16:46
| 292,727,955
| 747
| 134
|
Apache-2.0
| 2023-06-13T17:16:47
| 2020-09-04T02:23:40
|
Python
|
UTF-8
|
Python
| false
| false
| 4,518
|
py
|
single_curve_tf.py
|
import pydiffvg_tensorflow as pydiffvg
import tensorflow as tf
import skimage
import numpy as np
canvas_width, canvas_height = 256, 256
num_control_points = tf.constant([2, 2, 2])
points = tf.constant([[120.0, 30.0], # base
[150.0, 60.0], # control point
[ 90.0, 198.0], # control point
[ 60.0, 218.0], # base
[ 90.0, 180.0], # control point
[200.0, 65.0], # control point
[210.0, 98.0], # base
[220.0, 70.0], # control point
[130.0, 55.0]]) # control point
path = pydiffvg.Path(num_control_points = num_control_points,
points = points,
is_closed = True)
shapes = [path]
path_group = pydiffvg.ShapeGroup( shape_ids = tf.constant([0], dtype=tf.int32),
fill_color = tf.constant([0.3, 0.6, 0.3, 1.0]))
shape_groups = [path_group]
scene_args = pydiffvg.serialize_scene(\
canvas_width, canvas_height, shapes, shape_groups)
render = pydiffvg.render
img = render(tf.constant(256), # width
tf.constant(256), # height
tf.constant(2), # num_samples_x
tf.constant(2), # num_samples_y
tf.constant(0), # seed
*scene_args)
# The output image is in linear RGB space. Do Gamma correction before saving the image.
pydiffvg.imwrite(img, 'results/single_curve_tf/target.png', gamma=2.2)
target = tf.identity(img)
# Move the path to produce initial guess
# normalize points for easier learning rate
points_n = tf.Variable([[100.0/256.0, 40.0/256.0], # base
[155.0/256.0, 65.0/256.0], # control point
[100.0/256.0, 180.0/256.0], # control point
[ 65.0/256.0, 238.0/256.0], # base
[100.0/256.0, 200.0/256.0], # control point
[170.0/256.0, 55.0/256.0], # control point
[220.0/256.0, 100.0/256.0], # base
[210.0/256.0, 80.0/256.0], # control point
[140.0/256.0, 60.0/256.0]]) # control point
color = tf.Variable([0.3, 0.2, 0.5, 1.0])
path.points = points_n * 256
path_group.fill_color = color
scene_args = pydiffvg.serialize_scene(\
canvas_width, canvas_height, shapes, shape_groups)
img = render(tf.constant(256), # width
tf.constant(256), # height
tf.constant(2), # num_samples_x
tf.constant(2), # num_samples_y
tf.constant(1), # seed
*scene_args)
pydiffvg.imwrite(img, 'results/single_curve_tf/init.png', gamma=2.2)
optimizer = tf.compat.v1.train.AdamOptimizer(1e-2)
for t in range(100):
print('iteration:', t)
with tf.GradientTape() as tape:
# Forward pass: render the image.
path.points = points_n * 256
path_group.fill_color = color
# Important to use a different seed every iteration, otherwise the result
# would be biased.
scene_args = pydiffvg.serialize_scene(\
canvas_width, canvas_height, shapes, shape_groups)
img = render(tf.constant(256), # width
tf.constant(256), # height
tf.constant(2), # num_samples_x
tf.constant(2), # num_samples_y
tf.constant(t+1), # seed,
*scene_args)
loss_value = tf.reduce_sum(tf.square(img - target))
print(f"loss_value: {loss_value}")
pydiffvg.imwrite(img, 'results/single_curve_tf/iter_{}.png'.format(t))
grads = tape.gradient(loss_value, [points_n, color])
print(grads)
optimizer.apply_gradients(zip(grads, [points_n, color]))
# Render the final result.
path.points = points_n * 256
path_group.fill_color = color
scene_args = pydiffvg.serialize_scene(\
canvas_width, canvas_height, shapes, shape_groups)
img = render(tf.constant(256), # width
tf.constant(256), # height
tf.constant(2), # num_samples_x
tf.constant(2), # num_samples_y
tf.constant(101), # seed
*scene_args)
# Save the images and differences.
pydiffvg.imwrite(img, 'results/single_curve_tf/final.png')
# Convert the intermediate renderings to a video.
from subprocess import call
call(["ffmpeg", "-framerate", "24", "-i",
"results/single_curve_tf/iter_%d.png", "-vb", "20M",
"results/single_curve_tf/out.mp4"])
|
9dc639ee1e56f34333dbd2150776ef70286e03c2
|
344456e94b06352452aed3166eb834739faaf480
|
/src/entities/background.py
|
6e1226ed8455355f7a42aef3a5e19c3d6a4f3c48
|
[
"MIT"
] |
permissive
|
sourabhv/FlapPyBird
|
ebf33fd367c44345235717e51eb9648bf1525bc5
|
95af73307f7c05f2ba8f208e911097cbb56b689a
|
refs/heads/master
| 2023-06-26T08:55:29.705222
| 2023-06-18T23:13:42
| 2023-06-18T23:13:42
| 18,402,682
| 1,086
| 889
|
MIT
| 2023-06-16T14:43:35
| 2014-04-03T12:46:02
|
Python
|
UTF-8
|
Python
| false
| false
| 329
|
py
|
background.py
|
from ..utils import GameConfig
from .entity import Entity
class Background(Entity):
def __init__(self, config: GameConfig) -> None:
super().__init__(
config,
config.images.background,
0,
0,
config.window.width,
config.window.height,
)
|
c009ee790734193dc29d20ba6e35af27f065c57f
|
5dc77586e3e0f9de1f032fd2ca68494d8e58928f
|
/contrib/experimental/great_expectations_experimental/expectations/expect_column_values_to_be_hexadecimal.py
|
57d61622e360f607e32ee80e3bdf72829b2ca052
|
[
"Apache-2.0"
] |
permissive
|
great-expectations/great_expectations
|
dd7c22e6277d6b08bee3ff38a015e6e8cd434df6
|
b0290e2fd2aa05aec6d7d8871b91cb4478e9501d
|
refs/heads/develop
| 2023-09-04T09:30:26.395518
| 2023-09-02T00:00:13
| 2023-09-02T00:00:13
| 103,071,520
| 8,931
| 1,535
|
Apache-2.0
| 2023-09-14T19:57:16
| 2017-09-11T00:18:46
|
Python
|
UTF-8
|
Python
| false
| false
| 4,878
|
py
|
expect_column_values_to_be_hexadecimal.py
|
from typing import Optional
from great_expectations.core import ExpectationConfiguration
from great_expectations.expectations.regex_based_column_map_expectation import (
RegexBasedColumnMapExpectation,
)
class ExpectColumnValuesToBeHexadecimal(RegexBasedColumnMapExpectation):
"""Expect column values to be valid hexadecimals."""
regex_camel_name = "HexadecimalNumber"
regex = r"^[0-9a-fA-F]+$"
semantic_type_name_plural = "hexadecimals"
map_metric = RegexBasedColumnMapExpectation.register_metric(
regex_camel_name=regex_camel_name,
regex_=regex,
)
library_metadata = {
"maturity": "experimental",
"tags": ["experimental"],
"contributors": [
"@andrewsx",
"@mkopec87",
],
}
examples = [
{
"data": {
"a": ["3", "aa", "ba", "5A", "60F", "Gh"],
"b": ["Verify", "String", "3Z", "X", "yy", "sun"],
"c": ["0", "BB", "21D", "ca", "20", "1521D"],
"d": ["c8", "ffB", "11x", "apple", "ran", "woven"],
"e": ["a8", "21", "2.0", "1B", "4AA", "31"],
"f": ["a8", "41", "ca", "", "0", "31"],
},
"suppress_test_for": ["mssql", "bigquery", "snowflake"],
"tests": [
{
"title": "positive_test_with_mostly",
"include_in_gallery": True,
"exact_match_out": False,
"in": {"column": "a", "mostly": 0.6},
"out": {
"success": True,
"unexpected_index_list": [5],
"unexpected_list": ["Gh"],
},
},
{
"title": "negative_test_without_mostly",
"include_in_gallery": True,
"exact_match_out": False,
"in": {"column": "b"},
"out": {
"success": False,
"unexpected_index_list": [0, 1, 2, 3, 4, 5],
"unexpected_list": ["Verify", "String", "3Z", "X", "yy", "sun"],
},
},
{
"title": "positive_test_without_mostly",
"include_in_gallery": True,
"exact_match_out": False,
"in": {"column": "c"},
"out": {
"success": True,
"unexpected_index_list": [],
"unexpected_list": [],
},
},
{
"title": "negative_test_with_mostly",
"include_in_gallery": True,
"exact_match_out": False,
"in": {"column": "d", "mostly": 0.6},
"out": {
"success": False,
"unexpected_index_list": [2, 3, 4, 5],
"unexpected_list": ["11x", "apple", "ran", "woven"],
},
},
{
"title": "negative_test_with_float",
"include_in_gallery": True,
"exact_match_out": False,
"in": {"column": "e"},
"out": {
"success": False,
"unexpected_index_list": [2],
"unexpected_list": ["2.0"],
},
},
{
"title": "negative_test_with_empty_value",
"include_in_gallery": True,
"exact_match_out": False,
"in": {"column": "f"},
"out": {
"success": False,
"unexpected_index_list": [3],
"unexpected_list": [""],
},
},
],
}
]
def validate_configuration(
self, configuration: Optional[ExpectationConfiguration] = None
) -> None:
"""
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
necessary configuration arguments have been provided for the validation of the expectation.
Args:
configuration (OPTIONAL[ExpectationConfiguration]): \
An optional Expectation Configuration entry that will be used to configure the expectation
Returns:
None. Raises InvalidExpectationConfigurationError if the config is not validated successfully
"""
super().validate_configuration(configuration)
if __name__ == "__main__":
ExpectColumnValuesToBeHexadecimal().print_diagnostic_checklist()
|
ab4bca087f2b0af22471871e59f916c4621c1908
|
54b31b705d88e21bc0b23aabe1df15ca13a07de2
|
/bayespy/inference/vmp/nodes/deterministic.py
|
76509512421adbff80d502681ced33fab63c1bcd
|
[
"MIT",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-unknown-license-reference",
"AFL-3.0",
"GPL-1.0-or-later",
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
bayespy/bayespy
|
307ef4c51d511e14d4693cce9929dda37124d11d
|
5fe58f7160ebc3a9df7f9e96e50d2bd47837794a
|
refs/heads/develop
| 2023-08-18T21:35:27.744022
| 2023-05-25T08:16:36
| 2023-05-25T08:16:36
| 5,568,322
| 655
| 164
|
MIT
| 2023-08-15T09:31:55
| 2012-08-27T08:10:20
|
Python
|
UTF-8
|
Python
| false
| false
| 11,968
|
py
|
deterministic.py
|
################################################################################
# Copyright (C) 2013-2014 Jaakko Luttinen
#
# This file is licensed under the MIT License.
################################################################################
import functools
import numpy as np
from bayespy.utils import misc
from .node import Node, Moments
class Deterministic(Node):
"""
Base class for deterministic nodes.
Sub-classes must implement:
1. For implementing the deterministic function:
_compute_moments(self, *u)
2. One of the following options:
a) Simple methods:
_compute_message_to_parent(self, index, m, *u)
b) More control with:
_compute_message_and_mask_to_parent(self, index, m, *u)
Sub-classes may need to re-implement:
1. If they manipulate plates:
_compute_weights_to_parent(index, mask)
_compute_plates_to_parent(self, index, plates)
_compute_plates_from_parent(self, index, plates)
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, plates=None, notify_parents=False, **kwargs)
def _get_id_list(self):
"""
Returns the stochastic ID list.
This method is used to check that same stochastic nodes are not direct
parents of a node several times. It is only valid if there are
intermediate stochastic nodes.
To put it another way: each ID corresponds to one factor q(..) in the
posterior approximation. Different IDs mean different factors, thus they
mean independence. The parents must have independent factors.
Stochastic nodes should return their unique ID. Deterministic nodes
should return the IDs of their parents. Constant nodes should return
empty list of IDs.
"""
id_list = []
for parent in self.parents:
id_list = id_list + parent._get_id_list()
return id_list
def get_moments(self):
u_parents = self._message_from_parents()
return self._compute_moments(*u_parents)
def _compute_message_and_mask_to_parent(self, index, m_children, *u_parents):
# The following methods should be implemented by sub-classes.
m = self._compute_message_to_parent(index, m_children, *u_parents)
mask = self._compute_weights_to_parent(index, self.mask) != 0
return (m, mask)
def _get_message_and_mask_to_parent(self, index, u_parent=None):
u_parents = self._message_from_parents(exclude=index)
u_parents[index] = u_parent
if u_parent is not None:
u_self = self._compute_moments(*u_parents)
else:
u_self = None
m_children = self._message_from_children(u_self=u_self)
return self._compute_message_and_mask_to_parent(index,
m_children,
*u_parents)
def _compute_moments(self, *u_parents):
"""
Compute the moments given the moments of the parents.
"""
raise NotImplementedError()
def _compute_message_to_parent(self, index, m_children, *u_parents):
"""
Compute the message to a parent.
"""
raise NotImplementedError()
def _add_child(self, child, index):
"""
Add a child node.
Only child nodes that are stochastic (or have stochastic children
recursively) are counted as children because deterministic nodes without
stochastic children do not have any messages to send so the parents do
not need to know about the deterministic node.
A deterministic node does not notify its parents when created, but if it
gets a stochastic child node, then notify parents. This method is called
only if a stochastic child (recursively) node is added, thus there is at
least one stochastic node below this deterministic node.
Parameters
----------
child : node
index : int
The parent index of this node for the child node.
The child node recognizes its parents by their index
number.
"""
super()._add_child(child, index)
# Now that this deterministic node has non-deterministic children,
# notify parents
for (ind,parent) in enumerate(self.parents):
parent._add_child(self, ind)
def _remove_child(self, child, index):
"""
Remove a child node.
Only child nodes that are stochastic (or have stochastic children
recursively) are counted as children because deterministic nodes without
stochastic children do not have any messages to send so the parents do
not need to know about the deterministic node.
So, if the deterministic node does not have any stochastic children left
after removal, remove it from its parents.
"""
super()._remove_child(child, index)
# Check whether there are any children left. If not, remove from parents
if len(self.children) == 0:
for (ind, parent) in enumerate(self.parents):
parent._remove_child(self, ind)
def lower_bound_contribution(self, gradient=False, **kwargs):
# Deterministic functions are delta distributions so the lower bound
# contribuion is zero.
return 0
def random(self):
samples = [parent.random() for parent in self.parents]
return self._compute_function(*samples)
def tile(X, tiles):
"""
Tile the plates of the input node.
x = [a,b,c]
y = tile(x, 2) = [a,b,c,a,b,c]
There should be no need to tile plates that have unit length because they
are handled properly by the broadcasting rules already.
Parameters
----------
X : Node
Input node to be tiled.
tiles : int, tuple
Tiling of the plates (broadcasting rules for plates apply).
See also
--------
numpy.tile
"""
# Make sure `tiles` is tuple (even if an integer is given)
tiles = tuple(np.ravel(tiles))
class _Tile(Deterministic):
_parent_moments = (Moments(),)
def __init__(self, X, **kwargs):
self._moments = X._moments
super().__init__(X, dims=X.dims, **kwargs)
def _compute_plates_to_parent(self, index, plates):
plates = list(plates)
for i in range(-len(tiles), 0):
plates[i] = plates[i] // tiles[i]
return tuple(plates)
def _compute_plates_from_parent(self, index, plates):
return tuple(misc.multiply_shapes(plates, tiles))
def _compute_weights_to_parent(self, index, weights):
# Idea: Reshape the message array such that every other axis
# will be summed and every other kept.
# Make plates equal length
plates = self._plates_to_parent(index)
shape_m = np.shape(weights)
(plates, tiles_m, shape_m) = misc.make_equal_length(
plates,
tiles,
shape_m
)
# Handle broadcasting rules for axes that have unit length in
# the message (although the plate may be non-unit length). Also,
# compute the corresponding broadcasting_multiplier.
plates = list(plates)
tiles_m = list(tiles_m)
for j in range(len(plates)):
if shape_m[j] == 1:
plates[j] = 1
tiles_m[j] = 1
# Combine the tuples by picking every other from tiles_ind and
# every other from shape
shape = functools.reduce(lambda x,y: x+y,
zip(tiles_m, plates))
# ..and reshape the array, that is, every other axis corresponds
# to tiles and every other to plates/dimensions in parents
weights = np.reshape(weights, shape)
# Sum over every other axis
axes = tuple(range(0,len(shape),2))
weights = np.sum(weights, axis=axes)
# Remove extra leading axes
ndim_parent = len(self.parents[index].plates)
weights = misc.squeeze_to_dim(weights, ndim_parent)
return weights
def _compute_message_to_parent(self, index, m, u_X):
m = list(m)
for ind in range(len(m)):
# Idea: Reshape the message array such that every other axis
# will be summed and every other kept.
shape_ind = self._plates_to_parent(index) + self.dims[ind]
# Add variable dimensions to tiles
tiles_ind = tiles + (1,)*len(self.dims[ind])
# Make shape tuples equal length
shape_m = np.shape(m[ind])
(tiles_ind, shape, shape_m) = misc.make_equal_length(tiles_ind,
shape_ind,
shape_m)
# Handle broadcasting rules for axes that have unit length in
# the message (although the plate may be non-unit length). Also,
# compute the corresponding broadcasting multiplier.
r = 1
shape = list(shape)
tiles_ind = list(tiles_ind)
for j in range(len(shape)):
if shape_m[j] == 1:
r *= tiles_ind[j]
shape[j] = 1
tiles_ind[j] = 1
# Combine the tuples by picking every other from tiles_ind and
# every other from shape
shape = functools.reduce(lambda x,y: x+y,
zip(tiles_ind, shape))
# ..and reshape the array, that is, every other axis corresponds
# to tiles and every other to plates/dimensions in parents
m[ind] = np.reshape(m[ind], shape)
# Sum over every other axis
axes = tuple(range(0,len(shape),2))
m[ind] = r * np.sum(m[ind], axis=axes)
# Remove extra leading axes
ndim_parent = len(self.parents[index].get_shape(ind))
m[ind] = misc.squeeze_to_dim(m[ind], ndim_parent)
return m
def _compute_moments(self, u_X):
"""
Tile the plates of the parent's moments.
"""
# Utilize broadcasting: If a tiled axis is unit length in u_X, there
# is no need to tile it.
u = list()
for ind in range(len(u_X)):
ui = u_X[ind]
shape_u = np.shape(ui)
if np.ndim(ui) > 0:
# Add variable dimensions
tiles_ind = tiles + (1,)*len(self.dims[ind])
# Utilize broadcasting: Do not tile leading empty axes
nd = min(len(tiles_ind), np.ndim(ui))
tiles_ind = tiles_ind[(-nd):]
# For simplicity, make tiles and shape equal length
(tiles_ind, shape_u) = misc.make_equal_length(tiles_ind,
shape_u)
# Utilize broadcasting: Use tiling only if the parent's
# moment has non-unit axis length.
tiles_ind = [tile if sh > 1 else 1
for (tile, sh) in zip(tiles_ind, shape_u)]
# Tile
ui = np.tile(ui, tiles_ind)
u.append(ui)
return u
return _Tile(X, name="tile(%s, %s)" % (X.name, tiles))
|
ea23907eb44f0102cc14d86364a5014efde519fe
|
6868ac40614689637c7626b192ecb7cf815a6320
|
/src/exceptionite/blocks/PackagesUpdates.py
|
0d2fa9482e601756b03263f33cd7b30770f787cc
|
[
"MIT"
] |
permissive
|
MasoniteFramework/exceptionite
|
e89af07eb0862c2e19a7e0faf422711979e8a7d6
|
f9ecc09522e59740038acbbe94e5c2a3509d610c
|
refs/heads/master
| 2023-04-13T17:43:24.393105
| 2023-04-02T16:29:28
| 2023-04-02T16:29:28
| 227,259,824
| 114
| 5
|
MIT
| 2023-09-11T05:02:24
| 2019-12-11T02:34:55
|
Python
|
UTF-8
|
Python
| false
| false
| 1,442
|
py
|
PackagesUpdates.py
|
import pkg_resources
import requests
from ..Block import Block
def get_latest_version(name):
r = requests.get(f"https://pypi.org/pypi/{name}/json")
if r.status_code == 200:
version = r.json()["info"]["version"]
return version
return None
class PackagesUpdates(Block):
id = "packages_updates"
name = "Packages to update"
icon = "ArrowCircleUpIcon"
component = "PackagesUpdatesBlock"
empty_msg = "Selected packages are up to date !"
disable_scrubbing = True
def build(self):
installed_packages = {
package.key: package.version for package in pkg_resources.working_set
}
packages_to_check = self.options.get("list", ["exceptionite"])
packages = {}
if packages_to_check:
for package_name in packages_to_check:
current_version = installed_packages.get(package_name)
latest_version = get_latest_version(package_name)
if current_version != latest_version:
packages.update(
{
package_name: {
"current": installed_packages.get(package_name),
"latest": latest_version,
}
}
)
return packages
def has_content(self):
return len(self.data.keys()) > 0
|
890339b4aa4e04bfaa4cfac83cfbf0a5e0aaa0df
|
a64eeba4575eee849b459dab9c7000350ee636f1
|
/mediapipe/model_maker/python/text/text_classifier/dataset_test.py
|
2fa90b860e1a0bda73a76bb99e82c863a7fd161d
|
[
"Apache-2.0",
"dtoa"
] |
permissive
|
google/mediapipe
|
0b6b56aff8bacc7b680c205f0788f1b49dd33f5e
|
007824594bf1d07c7c1467df03a43886f8a4b3ad
|
refs/heads/master
| 2023-09-01T16:11:21.218234
| 2023-09-01T11:55:21
| 2023-09-01T11:57:34
| 191,820,100
| 23,940
| 5,164
|
Apache-2.0
| 2023-09-14T09:01:36
| 2019-06-13T19:16:41
|
C++
|
UTF-8
|
Python
| false
| false
| 2,985
|
py
|
dataset_test.py
|
# Copyright 2022 The MediaPipe Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import os
import tensorflow as tf
from mediapipe.model_maker.python.text.text_classifier import dataset
class DatasetTest(tf.test.TestCase):
def _get_csv_file(self):
labels_and_text = (('neutral', 'indifferent'), ('pos', 'extremely great'),
('neg', 'totally awful'), ('pos', 'super good'),
('neg', 'really bad'))
csv_file = os.path.join(self.get_temp_dir(), 'data.csv')
if os.path.exists(csv_file):
return csv_file
fieldnames = ['text', 'label']
with open(csv_file, 'w') as f:
writer = csv.DictWriter(f, fieldnames=fieldnames)
writer.writeheader()
for label, text in labels_and_text:
writer.writerow({'text': text, 'label': label})
return csv_file
def test_from_csv(self):
csv_file = self._get_csv_file()
csv_params = dataset.CSVParameters(text_column='text', label_column='label')
data = dataset.Dataset.from_csv(filename=csv_file, csv_params=csv_params)
self.assertLen(data, 5)
self.assertEqual(data.num_classes, 3)
self.assertEqual(data.label_names, ['neg', 'neutral', 'pos'])
data_values = set([(text.numpy()[0], label.numpy()[0])
for text, label in data.gen_tf_dataset()])
expected_data_values = set([(b'indifferent', 1), (b'extremely great', 2),
(b'totally awful', 0), (b'super good', 2),
(b'really bad', 0)])
self.assertEqual(data_values, expected_data_values)
def test_split(self):
ds = tf.data.Dataset.from_tensor_slices(['good', 'bad', 'neutral', 'odd'])
data = dataset.Dataset(ds, ['pos', 'neg'], size=4)
train_data, test_data = data.split(0.5)
expected_train_data = [b'good', b'bad']
expected_test_data = [b'neutral', b'odd']
self.assertLen(train_data, 2)
train_data_values = [elem.numpy() for elem in train_data._dataset]
self.assertEqual(train_data_values, expected_train_data)
self.assertEqual(train_data.num_classes, 2)
self.assertEqual(train_data.label_names, ['pos', 'neg'])
self.assertLen(test_data, 2)
test_data_values = [elem.numpy() for elem in test_data._dataset]
self.assertEqual(test_data_values, expected_test_data)
self.assertEqual(test_data.num_classes, 2)
self.assertEqual(test_data.label_names, ['pos', 'neg'])
if __name__ == '__main__':
tf.test.main()
|
329305dc20015c77b77a73a0e14aa99c40f350fc
|
374b3f27fe3cf032e88eccac5992c83eba0ad1b2
|
/tutorials/W3D4_ReinforcementLearning/solutions/W3D4_Tutorial1_Solution_6e708fa1.py
|
e4cbd92e102051e198623c8e7ef7cc8fcc6428b9
|
[
"CC-BY-4.0",
"BSD-3-Clause"
] |
permissive
|
NeuromatchAcademy/course-content
|
e2fdca96bcbdc78afaa209e4e77438f44a56c82d
|
3d638d00f02d9fd269fa2aff7d062558afdcb126
|
refs/heads/main
| 2023-08-16T16:09:09.314153
| 2023-08-02T06:21:49
| 2023-08-02T06:21:49
| 262,856,980
| 2,678
| 1,079
|
CC-BY-4.0
| 2023-08-17T00:32:24
| 2020-05-10T19:09:05
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 634
|
py
|
W3D4_Tutorial1_Solution_6e708fa1.py
|
"""
The multi-reward and probabilistic reward environments are the same. You
could simulate a probabilistic reward of 10 units, delivered 50% of the time,
by having a mixture of 10 and 0 unit rewards, or vice versa. The take home message
from these last three exercises is that the *average* or expected reward is
what matters during TD learning.
Large values of alpha prevent the TD Learner from converging. As a result, the
value function seems implausible: one state may have an extremely low value while
the neighboring ones remain high. This pattern persists even if training continues
for hundreds of thousands of trials.
""";
|
aa18bb9377b43c09ad18ee34ef0bf67094b2e656
|
e9f8704efe416f28dfd868f8a3c4ecf876809af7
|
/tests/filtering/test_harmonize_undefined_inchikey.py
|
b0a2523656966bb4c866f460369efa5585cb95b3
|
[
"Apache-2.0"
] |
permissive
|
matchms/matchms
|
8ee1285f19bb0bbd340562cc7d702803972eba5c
|
a161325b2edfa35e2a6f3fb2de30e1de171ba676
|
refs/heads/master
| 2023-09-01T20:08:05.453420
| 2023-09-01T11:30:07
| 2023-09-01T11:30:07
| 265,598,917
| 140
| 55
|
Apache-2.0
| 2023-09-12T08:48:10
| 2020-05-20T14:55:48
|
Python
|
UTF-8
|
Python
| false
| false
| 1,173
|
py
|
test_harmonize_undefined_inchikey.py
|
import pytest
from matchms.filtering import harmonize_undefined_inchikey
from ..builder_Spectrum import SpectrumBuilder
@pytest.mark.parametrize("metadata, aliases, undefined, expected", [
[{"inchikey": ""}, ["", "N/A", "NA", "n/a", "no data"], "", ""],
[{"inchikey": "n/a"}, ["", "N/A", "NA", "n/a", "no data"], "", ""],
[{"inchikey": "N/A"}, ["", "N/A", "NA", "n/a", "no data"], "", ""],
[{"inchikey": "NA"}, ["", "N/A", "NA", "n/a", "no data"], "", ""],
[{"inchikey": "no data"}, ["", "N/A", "NA", "n/a", "no data"], "", ""],
[{"inchikey": "nan"}, ["nodata", "NaN", "Nan", "nan"], "", ""],
[{"inchikey": "nan"}, ["nodata", "NaN", "Nan", "nan"], "n/a", "n/a"]
])
def test_harmonize_undefined_inchikey(metadata, aliases, undefined, expected):
spectrum_in = SpectrumBuilder().with_metadata(metadata).build()
spectrum = harmonize_undefined_inchikey(spectrum_in, aliases=aliases, undefined=undefined)
assert spectrum.get("inchikey") == expected
def test_empty_spectrum():
spectrum_in = None
spectrum = harmonize_undefined_inchikey(spectrum_in)
assert spectrum is None, "Expected different handling of None spectrum."
|
f6d33c165555a21b301d95c76b83e5b9e1d74678
|
bc8509d57a162fb685da06a98c67dc8130d96316
|
/src/nninst/backend/tensorflow/model/__init__.py
|
5c7acb4f256144d89a7a9a7b9457dfd876726d77
|
[
"Apache-2.0"
] |
permissive
|
Ptolemy-DL/Ptolemy
|
2065e2d157d641010567062410bee4608691d059
|
f72a531286d17c69e0e2e84d0ad8a5b0587e2e08
|
refs/heads/master
| 2023-05-29T08:58:18.328258
| 2021-06-15T09:28:16
| 2021-06-15T09:28:16
| 284,590,756
| 115
| 5
|
NOASSERTION
| 2020-10-24T04:18:51
| 2020-08-03T03:06:35
|
Python
|
UTF-8
|
Python
| false
| false
| 150
|
py
|
__init__.py
|
from .alexnet import AlexNet
from .inception_v4 import InceptionV4
from .lenet import LeNet
from .resnet_50 import ResNet50
from .vgg_16 import VGG16
|
2ce82eb3983944c43f40a2c5cf24dfd2b13e58ac
|
fc160694094b89ab09e5c9a0f03db80437eabc93
|
/java-errorreporting/owlbot.py
|
ac63d8a3a16b83e42a4e3d1c197bb8edd82dc0de
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
googleapis/google-cloud-java
|
4f4d97a145e0310db142ecbc3340ce3a2a444e5e
|
6e23c3a406e19af410a1a1dd0d0487329875040e
|
refs/heads/main
| 2023-09-04T09:09:02.481897
| 2023-08-31T20:45:11
| 2023-08-31T20:45:11
| 26,181,278
| 1,122
| 685
|
Apache-2.0
| 2023-09-13T21:21:23
| 2014-11-04T17:57:16
|
Java
|
UTF-8
|
Python
| false
| false
| 3,494
|
py
|
owlbot.py
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import synthtool as s
from synthtool.languages import java
for library in s.get_staging_dirs():
# put any special-case replacements here
ERROR_GROUP_OVERLOAD = """
// Inserted by synthtool to preserve backwards-compatibility
/**
* Get the specified group.
*
* <p>Sample code:
*
* <pre><code>
* try (ErrorGroupServiceClient errorGroupServiceClient = ErrorGroupServiceClient.create()) {
* GroupName groupName = GroupName.of("[PROJECT]", "[GROUP]");
* ErrorGroup response = errorGroupServiceClient.getGroup(groupName);
* }
* </code></pre>
*
* @param groupName Required. The group resource name. Written as
* <code>projects/<var>projectID</var>/groups/<var>group_name</var></code>.
* Call <a href="/error-reporting/reference/rest/v1beta1/projects.groupStats/list">
* <code>groupStats.list</code></a> to return a list of groups belonging to
* this project.
* <p>Example: <code>projects/my-project-123/groups/my-group</code>
* @throws com.google.api.gax.rpc.ApiException if the remote call fails
* @deprecated Use ErrorGroupServiceClient#getGroup(ErrorGroupName)
*/
@Deprecated
public final ErrorGroup getGroup(GroupName groupName) {
GetGroupRequest request =
GetGroupRequest.newBuilder()
.setGroupName(groupName == null ? null : groupName.toString())
.build();
return getGroup(request);
}
"""
ERROR_GROUP_OVERLOAD_PREVIOUS_METHOD = r'(\s+public ErrorGroupServiceStub getStub\(\) {\n\s+return stub;\n\s+})'
version = library.parts[len(library.parts) - 1]
service = 'errorreporting'
#java.fix_proto_headers('owl-bot-staging/v2beta1/proto-google-cloud-error-reporting-v1beta1')
#java.fix_grpc_headers('owl-bot-staging/v2beta1/grpc-google-cloud-error-reporting-v1beta1', "")
s.replace(
'owl-bot-staging/v1beta1/google-cloud-errorreporting/src/**/ErrorGroupServiceClient.java',
ERROR_GROUP_OVERLOAD_PREVIOUS_METHOD,
"\g<1>\n\n" + ERROR_GROUP_OVERLOAD
)
s.replace(
'owl-bot-staging/v1beta1/google-cloud-errorreporting/src/**/ErrorGroupServiceClient.java',
"import com.google.devtools.clouderrorreporting.v1beta1.ErrorGroupName;",
"import com.google.devtools.clouderrorreporting.v1beta1.ErrorGroupName;\nimport com.google.devtools.clouderrorreporting.v1beta1.GroupName;"
)
s.move(library)
s.remove_staging_dirs()
java.common_templates(monorepo=True, excludes=[
".github/*",
".kokoro/*",
"samples/*",
"CODE_OF_CONDUCT.md",
"CONTRIBUTING.md",
"LICENSE",
"SECURITY.md",
"java.header",
"license-checks.xml",
"renovate.json",
".gitignore"
])
|
36cd9c4c469b25304fb641c6034d3b5d1c0e8df0
|
620323fc090cebaf7aca456ff3f7fbbe1e210394
|
/search_for_similar_images__perceptual_hash__phash/ui/FieldsProgressDialog.py
|
3c1a6e0dad2cd1cb31ae14b0fe63af6164f6f84f
|
[
"CC-BY-4.0"
] |
permissive
|
gil9red/SimplePyScripts
|
bd2733372728bf9b9f00570e90316fa12116516b
|
773c2c9724edd8827a1dbd91694d780e03fcb05a
|
refs/heads/master
| 2023-08-31T04:26:09.120173
| 2023-08-30T17:22:59
| 2023-08-30T17:22:59
| 22,650,442
| 157
| 46
| null | 2023-09-08T17:51:33
| 2014-08-05T16:19:52
|
Python
|
UTF-8
|
Python
| false
| false
| 1,077
|
py
|
FieldsProgressDialog.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "ipetrash"
from PyQt5.QtWidgets import QProgressDialog
from PyQt5.QtCore import Qt
from .KeyValueLabel import KeyValueLabel
class FieldsProgressDialog(QProgressDialog):
def __init__(
self,
minimum,
maximum,
window_title,
label_text="Operation in progress...",
parent=None,
):
super().__init__(parent)
self.setWindowModality(Qt.WindowModal)
self.setRange(minimum, maximum)
self.setWindowTitle(window_title)
self._label = KeyValueLabel(self)
self.setLabel(self._label)
self.setLabelText(label_text)
def setFields(self, fields: dict):
self._label.setFields(fields)
# NOTE: Для вызова внутреннего ensureSizeIsAtLeastSizeHint, без которого не будет
# обновлен размер progress dialog.
# https://code.woboq.org/qt5/qtbase/src/widgets/dialogs/qprogressdialog.cpp.html#387
self.setLabelText("")
|
04369402caa7a11e864c120de65973a113a393be
|
07b4bfb23ab70df70c1657f075e33af5d6005f41
|
/instant/tests/test_conf.py
|
05a70e4d1178e5673011a0ed8aa450883b586b1e
|
[
"MIT"
] |
permissive
|
synw/django-instant
|
132bcabc29cdab958076635165d6a0d6b5b464fb
|
edd6b0164385d282aaa5805d5322fc59a3c556e8
|
refs/heads/master
| 2022-12-23T12:49:10.385945
| 2022-12-19T11:02:16
| 2022-12-19T11:02:16
| 64,665,381
| 128
| 15
|
MIT
| 2022-12-19T10:52:51
| 2016-08-01T12:35:34
|
Python
|
UTF-8
|
Python
| false
| false
| 1,503
|
py
|
test_conf.py
|
from typing import Any, Dict
from .base import InstantBaseTest
from instant.init import generate_settings_from_conf
from instant.conf import (
CENTRIFUGO_API_KEY,
CENTRIFUGO_HMAC_KEY,
CENTRIFUGO_HOST,
CENTRIFUGO_PORT,
SITE_SLUG,
SITE_NAME,
)
class InstantTestConf(InstantBaseTest):
def test_default_conf(self):
self.assertEqual(CENTRIFUGO_API_KEY, "key")
self.assertEqual(CENTRIFUGO_HMAC_KEY, "key")
self.assertEqual(CENTRIFUGO_HOST, "http://localhost")
self.assertEqual(CENTRIFUGO_PORT, 8427)
self.assertEqual(SITE_SLUG, "site")
self.assertEqual(SITE_NAME, "Site")
def test_generate_settings_from_conf(self):
conf: Dict[str, Any] = {"token_hmac_secret_key": "key", "api_key": "key"}
s = generate_settings_from_conf(conf, "site")
self.assertListEqual(
[
'CENTRIFUGO_HOST = "http://localhost"',
"CENTRIFUGO_PORT = 8427",
'CENTRIFUGO_HMAC_KEY = "key"',
'CENTRIFUGO_API_KEY = "key"',
'SITE_NAME = "site"',
],
s,
)
s = generate_settings_from_conf(conf)
self.assertListEqual(
[
'CENTRIFUGO_HOST = "http://localhost"',
"CENTRIFUGO_PORT = 8427",
'CENTRIFUGO_HMAC_KEY = "key"',
'CENTRIFUGO_API_KEY = "key"',
'SITE_NAME = "tests"',
],
s,
)
|
cc116ddf74eab9aba75e93cc8b206de31b74a4b4
|
f2e3a255615a6360fc3bb9e285a2a8ab752e8b29
|
/benchmarks/django/simple/test_f.py
|
4ddf8f3d6a4eb3d0a009453e63a79d86c2031b7d
|
[] |
no_license
|
tortoise/orm-benchmarks
|
cc32e3658b6e714c9d57e41c7123ccdeca7205c0
|
ff2a5875f802ac0c6ea1bee57eb87efcc5b052cb
|
refs/heads/master
| 2023-03-04T10:37:04.550558
| 2022-03-21T11:05:52
| 2022-03-21T11:05:52
| 148,795,219
| 148
| 21
| null | 2023-02-15T18:25:35
| 2018-09-14T13:50:21
|
Python
|
UTF-8
|
Python
| false
| false
| 417
|
py
|
test_f.py
|
try:
import django # noqa
django.setup() # noqa
finally:
pass
import os
import time
from random import randint
from simple.models import Journal
count = int(os.environ.get("ITERATIONS", "1000"))
maxval = count - 1
count *= 2
start = time.time()
for _ in range(count):
Journal.objects.get(id=randint(1, maxval))
now = time.time()
print(f"Django, F: Rows/sec: {count / (now - start): 10.2f}")
|
3aaf7448c2eaa24f19dd573beb4f6098f495b967
|
2a1b8a671aceda6bc446f8ce26400aa84fa444a6
|
/Packs/ApiModules/Scripts/HTTPFeedApiModule/HTTPFeedApiModule.py
|
2913b959587af74fd414d1e8c4d82f35592811d2
|
[
"MIT"
] |
permissive
|
demisto/content
|
6d4722d46f0ff0beea2748e9f7de585bf91a78b4
|
890def5a0e0ae8d6eaa538148249ddbc851dbb6b
|
refs/heads/master
| 2023-09-04T00:02:25.618032
| 2023-09-03T21:56:22
| 2023-09-03T21:56:22
| 60,525,392
| 1,023
| 1,921
|
MIT
| 2023-09-14T20:55:24
| 2016-06-06T12:17:02
|
Python
|
UTF-8
|
Python
| false
| false
| 25,662
|
py
|
HTTPFeedApiModule.py
|
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
''' IMPORTS '''
import urllib3
import requests
from typing import Optional, Pattern, List
# disable insecure warnings
urllib3.disable_warnings()
''' GLOBALS '''
TAGS = 'tags'
TLP_COLOR = 'trafficlightprotocol'
DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
class Client(BaseClient):
def __init__(self, url: str, feed_name: str = 'http', insecure: bool = False, credentials: dict = None,
ignore_regex: str = None, encoding: str = None, indicator_type: str = '',
indicator: str = '', fields: str = '{}', feed_url_to_config: dict = None, polling_timeout: int = 20,
headers: dict = None, proxy: bool = False, custom_fields_mapping: dict = None, **kwargs):
"""Implements class for miners of plain text feeds over HTTP.
**Config parameters**
:param: url: URL of the feed.
:param: polling_timeout: timeout of the polling request in seconds.
Default: 20
:param feed_name: The name of the feed.
:param: custom_fields_mapping: Dict, the "fields" to be used in the indicator - where the keys
are the *current* keys of the fields returned feed data and the *values* are the *indicator fields in Demisto*.
:param: headers: dict, Optional list of headers to send in the request.
:param: ignore_regex: Python regular expression for lines that should be
ignored. Default: *null*
:param: insecure: boolean, if *false* feed HTTPS server certificate is
verified. Default: *false*
:param credentials: username and password used for basic authentication.
Can be also used as API key header and value by specifying _header in the username field.
:param: encoding: encoding of the feed, if not UTF-8. See
``str.decode`` for options. Default: *null*, meaning do
nothing, (Assumes UTF-8).
:param: indicator_type: Default indicator type
:param: indicator: an *extraction dictionary* to extract the indicator from
the line. If *null*, the text until the first whitespace or newline
character is used as indicator. Default: *null*
:param: fields: a dictionary of *extraction dictionaries* to extract
additional attributes from each line. Default: {}
:param: feed_url_to_config: For each service, a dictionary to process indicators by.
For example, ASN feed:
'https://www.spamhaus.org/drop/asndrop.txt': {
'indicator_type': ASN,
'indicator': { (Regex to extract the indicator by, if empty - the whole line is extracted)
'regex': r'^AS[0-9]+',
},
'fields': [{ (See Extraction dictionary below)
'asndrop_country': {
'regex': '^.*;\\W([a-zA-Z]+)\\W+',
'transform: r'\1'
}
}]
}
:param: proxy: Use proxy in requests.
**Extraction dictionary**
Extraction dictionaries contain the following keys:
:regex: Python regular expression for searching the text.
:transform: template to generate the final value from the result
of the regular expression. Default: the entire match of the regex
is used as extracted value.
See Python `re <https://docs.python.org/2/library/re.html>`_ module for
details about Python regular expressions and templates.
Example:
Example config in YAML where extraction dictionaries are used to
extract the indicator and additional fields::
url: https://www.dshield.org/block.txt
ignore_regex: "[#S].*"
indicator:
regex: '^([0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3})\\t([0-9]
{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3})'
transform: '\\1-\\2'
fields:
dshield_nattacks:
regex: '^.*\\t.*\\t[0-9]+\\t([0-9]+)'
transform: '\\1'
dshield_name:
regex: '^.*\\t.*\\t[0-9]+\\t[0-9]+\\t([^\\t]+)'
transform: '\\1'
dshield_country:
regex: '^.*\\t.*\\t[0-9]+\\t[0-9]+\\t[^\\t]+\\t([A-Z]+)'
transform: '\\1'
dshield_email:
regex: '^.*\\t.*\\t[0-9]+\\t[0-9]+\\t[^\\t]+\\t[A-Z]+\\t(\\S+)'
transform: '\\1'
Example config in YAML where the text in each line until the first
whitespace is used as indicator::
url: https://ransomwaretracker.abuse.ch/downloads/CW_C2_URLBL.txt
ignore_regex: '^#'
"""
super().__init__(base_url=url, verify=not insecure, proxy=proxy)
handle_proxy()
try:
self.polling_timeout = int(polling_timeout)
except (ValueError, TypeError):
raise ValueError('Please provide an integer value for "Request Timeout"')
self.headers = headers
self.encoding = encoding
self.feed_name = feed_name
if not credentials:
credentials = {}
self.username = None
self.password = None
username = credentials.get('identifier', '')
if username.startswith('_header:'):
if not self.headers:
self.headers = {}
header_field = username.split(':')
if len(header_field) < 2:
raise ValueError('An incorrect value was provided for an API key header.'
' The correct value is "_header:<header_name>"')
header_name: str = header_field[1]
header_value: str = credentials.get('password', '')
self.headers[header_name] = header_value
else:
self.username = username
self.password = credentials.get('password', None)
self.indicator_type = indicator_type
if feed_url_to_config:
self.feed_url_to_config = feed_url_to_config
else:
self.feed_url_to_config = {url: self.get_feed_config(fields, indicator)}
self.ignore_regex: Optional[Pattern] = None
if ignore_regex is not None:
self.ignore_regex = re.compile(ignore_regex)
if custom_fields_mapping is None:
custom_fields_mapping = {}
self.custom_fields_mapping = custom_fields_mapping
def get_feed_config(self, fields_json: str = '', indicator_json: str = ''):
"""
Get the feed configuration from the indicator and field JSON strings.
:param fields_json: JSON string of fields to extract, for example:
{
'fieldname': {
'regex': regex,
'transform': r'\1'
}
},
{
'asndrop_org': {
'regex': regex,
'transform': r'\1'
}
}
:param indicator_json: JSON string of the indicator to extract, for example:
{'regex': regex}
:return: The feed configuration.
"""
config = {}
if indicator_json:
indicator = json.loads(indicator_json)
if 'regex' in indicator:
indicator['regex'] = re.compile(indicator['regex'])
else:
raise ValueError(f'{self.feed_name} - indicator stanza should have a regex')
if 'transform' not in indicator:
if indicator['regex'].groups > 0:
LOG(f'{self.feed_name} - no transform string for indicator but pattern contains groups')
indicator['transform'] = r'\g<0>'
config['indicator'] = indicator
if fields_json:
fields = json.loads(fields_json)
config['fields'] = []
for f, fattrs in fields.items():
if 'regex' in fattrs:
fattrs['regex'] = re.compile(fattrs['regex'])
else:
raise ValueError(f'{self.feed_name} - {f} field does not have a regex')
if 'transform' not in fattrs:
if fattrs['regex'].groups > 0:
LOG(f'{self.feed_name} - no transform string for field {f} but pattern contains groups')
fattrs['transform'] = r'\g<0>'
config['fields'].append({
f: fattrs
})
return config
def build_iterator(self, **kwargs):
"""
For each URL (service), send an HTTP request to get indicators and return them after filtering by Regex
:param kwargs: Arguments to send to the HTTP API endpoint
:return: List of indicators
"""
kwargs['stream'] = True
kwargs['verify'] = self._verify
kwargs['timeout'] = self.polling_timeout
if self.headers is not None:
kwargs['headers'] = self.headers
if self.username is not None and self.password is not None:
kwargs['auth'] = (self.username, self.password)
try:
urls = self._base_url
url_to_response_list: List[dict] = []
if not isinstance(urls, list):
urls = [urls]
for url in urls:
if is_demisto_version_ge('6.5.0'):
# Set the If-None-Match and If-Modified-Since headers if we have etag or
# last_modified values in the context, for server version higher than 6.5.0.
last_run = demisto.getLastRun()
etag = last_run.get(url, {}).get('etag')
last_modified = last_run.get(url, {}).get('last_modified')
if etag:
if not kwargs.get('headers'):
kwargs['headers'] = {}
kwargs['headers']['If-None-Match'] = etag
if last_modified:
if not kwargs.get('headers'):
kwargs['headers'] = {}
kwargs['headers']['If-Modified-Since'] = last_modified
r = requests.get(
url,
**kwargs
)
try:
r.raise_for_status()
except Exception:
LOG(f'{self.feed_name!r} - exception in request:'
f' {r.status_code!r} {r.content!r}')
raise
no_update = get_no_update_value(r, url) if is_demisto_version_ge('6.5.0') else True
url_to_response_list.append({url: {'response': r, 'no_update': no_update}})
except requests.exceptions.ConnectTimeout as exception:
err_msg = 'Connection Timeout Error - potential reasons might be that the Server URL parameter' \
' is incorrect or that the Server is not accessible from your host.'
raise DemistoException(err_msg, exception)
except requests.exceptions.SSLError as exception:
# in case the "Trust any certificate" is already checked
if not self._verify:
raise
err_msg = 'SSL Certificate Verification Failed - try selecting \'Trust any certificate\' checkbox in' \
' the integration configuration.'
raise DemistoException(err_msg, exception)
except requests.exceptions.ProxyError as exception:
err_msg = 'Proxy Error - if the \'Use system proxy\' checkbox in the integration configuration is' \
' selected, try clearing the checkbox.'
raise DemistoException(err_msg, exception)
except requests.exceptions.ConnectionError as exception:
# Get originating Exception in Exception chain
error_class = str(exception.__class__)
err_type = '<' + error_class[error_class.find('\'') + 1: error_class.rfind('\'')] + '>'
err_msg = 'Verify that the server URL parameter' \
' is correct and that you have access to the server from your host.' \
'\nError Type: {}\nError Number: [{}]\nMessage: {}\n' \
.format(err_type, exception.errno, exception.strerror)
raise DemistoException(err_msg, exception)
results = []
for url_to_response in url_to_response_list:
for url, res_data in url_to_response.items():
lines = res_data.get('response')
result = lines.iter_lines()
if self.encoding is not None:
result = map(
lambda x: x.decode(self.encoding).encode('utf_8'),
result
)
else:
result = map(
lambda x: x.decode('utf_8'),
result
)
if self.ignore_regex is not None:
result = filter(
lambda x: self.ignore_regex.match(x) is None, # type: ignore[union-attr]
result
)
results.append({url: {'result': result, 'no_update': res_data.get('no_update')}})
return results
def custom_fields_creator(self, attributes: dict):
created_custom_fields = {}
for attribute in attributes.keys():
if attribute in self.custom_fields_mapping.keys() or attribute in [TAGS, TLP_COLOR]:
if attribute in [TAGS, TLP_COLOR]:
created_custom_fields[attribute] = attributes[attribute]
else:
created_custom_fields[self.custom_fields_mapping[attribute]] = attributes[attribute]
return created_custom_fields
def get_no_update_value(response: requests.Response, url: str) -> bool:
"""
detect if the feed response has been modified according to the headers etag and last_modified.
For more information, see this:
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Last-Modified
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/ETag
Args:
response: (requests.Response) The feed response.
url: (str) The feed URL (service).
Returns:
boolean with the value for noUpdate argument.
The value should be False if the response was modified.
"""
if response.status_code == 304:
demisto.debug('No new indicators fetched, createIndicators will be executed with noUpdate=True.')
return True
etag = response.headers.get('ETag')
last_modified = response.headers.get('Last-Modified')
if not etag and not last_modified:
demisto.debug('Last-Modified and Etag headers are not exists,'
'createIndicators will be executed with noUpdate=False.')
return False
last_run = demisto.getLastRun()
last_run[url] = {'last_modified': last_modified, 'etag': etag}
demisto.setLastRun(last_run)
demisto.debug('New indicators fetched - the Last-Modified value has been updated,'
' createIndicators will be executed with noUpdate=False.')
return False
def datestring_to_server_format(date_string: str) -> str:
"""
formats a datestring to the ISO-8601 format which the server expects to recieve
:param date_string: Date represented as a tring
:return: ISO-8601 date string
"""
parsed_date = dateparser.parse(date_string, settings={'TIMEZONE': 'UTC'})
return parsed_date.strftime(DATE_FORMAT) # type: ignore
def get_indicator_fields(line, url, feed_tags: list, tlp_color: Optional[str], client: Client):
"""
Extract indicators according to the feed type
:param line: The current line in the feed
:param url: The feed URL
:param client: The client
:param feed_tags: The indicator tags.
:param tlp_color: Traffic Light Protocol color.
:return: The indicator
"""
attributes = None
value: str = ''
indicator = None
fields_to_extract = []
feed_config = client.feed_url_to_config.get(url, {})
if feed_config:
if 'indicator' in feed_config:
indicator = feed_config['indicator']
if 'regex' in indicator:
indicator['regex'] = re.compile(indicator['regex'])
if 'transform' not in indicator:
indicator['transform'] = r'\g<0>'
if 'fields' in feed_config:
fields = feed_config['fields']
for field in fields:
for f, fattrs in field.items():
field = {f: {}}
if 'regex' in fattrs:
field[f]['regex'] = re.compile(fattrs['regex'])
if 'transform' not in fattrs:
field[f]['transform'] = r'\g<0>'
else:
field[f]['transform'] = fattrs['transform']
fields_to_extract.append(field)
line = line.strip()
if line:
extracted_indicator = line.split()[0]
if indicator:
extracted_indicator = indicator['regex'].search(line)
if extracted_indicator is None:
return attributes, value
if 'transform' in indicator:
extracted_indicator = extracted_indicator.expand(indicator['transform'])
attributes = {}
for field in fields_to_extract:
for f, fattrs in field.items():
m = fattrs['regex'].search(line)
if m is None:
continue
attributes[f] = m.expand(fattrs['transform'])
try:
i = int(attributes[f])
except Exception:
pass
else:
attributes[f] = i
attributes['value'] = value = extracted_indicator
attributes['type'] = feed_config.get('indicator_type', client.indicator_type)
attributes['tags'] = feed_tags
if tlp_color:
attributes['trafficlightprotocol'] = tlp_color
return attributes, value
def fetch_indicators_command(client, feed_tags, tlp_color, itype, auto_detect, create_relationships=False, **kwargs):
iterators = client.build_iterator(**kwargs)
indicators = []
# set noUpdate flag in createIndicators command True only when all the results from all the urls are True.
no_update = all([next(iter(iterator.values())).get('no_update', False) for iterator in iterators])
for iterator in iterators:
for url, lines in iterator.items():
for line in lines.get('result', []):
attributes, value = get_indicator_fields(line, url, feed_tags, tlp_color, client)
if value:
if 'lastseenbysource' in attributes.keys():
attributes['lastseenbysource'] = datestring_to_server_format(attributes['lastseenbysource'])
if 'firstseenbysource' in attributes.keys():
attributes['firstseenbysource'] = datestring_to_server_format(attributes['firstseenbysource'])
indicator_type = determine_indicator_type(
client.feed_url_to_config.get(url, {}).get('indicator_type'), itype, auto_detect, value)
indicator_data = {
"value": value,
"type": indicator_type,
"rawJSON": attributes,
}
if create_relationships and client.feed_url_to_config.get(url, {}).get('relationship_name'):
if attributes.get('relationship_entity_b'):
relationships_lst = EntityRelationship(
name=client.feed_url_to_config.get(url, {}).get('relationship_name'),
entity_a=value,
entity_a_type=indicator_type,
entity_b=attributes.get('relationship_entity_b'),
entity_b_type=FeedIndicatorType.indicator_type_by_server_version(
client.feed_url_to_config.get(url, {}).get('relationship_entity_b_type')),
)
relationships_of_indicator = [relationships_lst.to_indicator()]
indicator_data['relationships'] = relationships_of_indicator
if len(client.custom_fields_mapping.keys()) > 0 or TAGS in attributes.keys():
custom_fields = client.custom_fields_creator(attributes)
indicator_data["fields"] = custom_fields
indicators.append(indicator_data)
return indicators, no_update
def determine_indicator_type(indicator_type, default_indicator_type, auto_detect, value):
"""
Detect the indicator type of the given value.
Args:
indicator_type: (str) Indicator type given in the config.
default_indicator_type: Indicator type which was inserted as a param of the integration by user.
auto_detect: (bool) True whether auto detection of the indicator type is wanted.
value: (str) The value which we'd like to get indicator type of.
Returns:
Str which stands for the indicator type after detection.
"""
if auto_detect:
indicator_type = auto_detect_indicator_type(value)
if not indicator_type:
indicator_type = default_indicator_type
return indicator_type
def get_indicators_command(client: Client, args):
itype = args.get('indicator_type', client.indicator_type)
limit = int(args.get('limit'))
feed_tags = args.get('feedTags')
tlp_color = args.get('tlp_color')
auto_detect = demisto.params().get('auto_detect_type')
create_relationships = demisto.params().get('create_relationships')
indicators_list, _ = fetch_indicators_command(client, feed_tags, tlp_color, itype, auto_detect, create_relationships)[:limit]
entry_result = camelize(indicators_list)
hr = tableToMarkdown('Indicators', entry_result, headers=['Value', 'Type', 'Rawjson'])
return hr, {}, indicators_list
def test_module(client: Client, args):
if not client.feed_url_to_config:
indicator_type = args.get('indicator_type', demisto.params().get('indicator_type'))
if not FeedIndicatorType.is_valid_type(indicator_type):
indicator_types = []
for key, val in vars(FeedIndicatorType).items():
if not key.startswith('__') and type(val) == str:
indicator_types.append(val)
supported_values = ', '.join(indicator_types)
raise ValueError(f'Indicator type of {indicator_type} is not supported. Supported values are:'
f' {supported_values}')
client.build_iterator()
return 'ok', {}, {}
def feed_main(feed_name, params=None, prefix=''):
if not params:
params = assign_params(**demisto.params())
if 'feed_name' not in params:
params['feed_name'] = feed_name
feed_tags = argToList(demisto.params().get('feedTags'))
tlp_color = demisto.params().get('tlp_color')
client = Client(**params)
command = demisto.command()
if command != 'fetch-indicators':
demisto.info('Command being called is {}'.format(command))
if prefix and not prefix.endswith('-'):
prefix += '-'
# Switch case
commands: dict = {
'test-module': test_module,
f'{prefix}get-indicators': get_indicators_command
}
try:
if command == 'fetch-indicators':
indicators, no_update = fetch_indicators_command(client, feed_tags, tlp_color,
params.get('indicator_type'),
params.get('auto_detect_type'),
params.get('create_relationships'))
# check if the version is higher than 6.5.0 so we can use noUpdate parameter
if is_demisto_version_ge('6.5.0'):
if not indicators:
demisto.createIndicators(indicators, noUpdate=no_update) # type: ignore
else:
# we submit the indicators in batches
for b in batch(indicators, batch_size=2000):
demisto.createIndicators(b, noUpdate=no_update) # type: ignore
else:
# call createIndicators without noUpdate arg
if not indicators:
demisto.createIndicators(indicators) # type: ignore
else:
for b in batch(indicators, batch_size=2000): # type: ignore
demisto.createIndicators(b)
else:
args = demisto.args()
args['feed_name'] = feed_name
if feed_tags:
args['feedTags'] = feed_tags
if tlp_color:
args['tlp_color'] = tlp_color
readable_output, outputs, raw_response = commands[command](client, args)
return_outputs(readable_output, outputs, raw_response)
except Exception as e:
err_msg = f'Error in {feed_name} integration [{e}]'
return_error(err_msg, error=e)
|
923d6a7715e6e97fb131b92a52eb5794d81fa877
|
72eda38ed90de892c13473963b8fe6a768862693
|
/tests/test_transforms.py
|
68dcce3f5b9c6c09c3ffd08a70e9d3768818d6f3
|
[
"MIT"
] |
permissive
|
wolny/pytorch-3dunet
|
22af692a79d38fe10b2d9ad841b54d797ea96cc4
|
5137f914f8c0f2af30123aacfc3091bc6de1e062
|
refs/heads/master
| 2023-09-05T17:02:08.668258
| 2023-07-31T11:03:34
| 2023-07-31T11:03:34
| 149,826,542
| 1,754
| 503
|
MIT
| 2023-08-28T09:30:44
| 2018-09-21T22:53:31
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 9,040
|
py
|
test_transforms.py
|
import numpy as np
from pytorch3dunet.augment.transforms import RandomLabelToAffinities, LabelToAffinities, Transformer, Relabel, \
CropToFixed
class TestTransforms:
config = {'dtype': 'long'}
def test_random_label_to_boundary(self):
size = 20
label = _diagonal_label_volume(size)
transform = RandomLabelToAffinities(np.random.RandomState())
result = transform(label)
assert result.shape == (1,) + label.shape
def test_random_label_to_boundary_with_ignore(self):
size = 20
label = _diagonal_label_volume(size, init=-1)
transform = RandomLabelToAffinities(np.random.RandomState(), ignore_index=-1)
result = transform(label)
assert result.shape == (1,) + label.shape
assert -1 in np.unique(result)
def test_label_to_boundary(self):
size = 20
label = _diagonal_label_volume(size)
# this transform will produce 2 channels
transform = LabelToAffinities(offsets=(2, 4), aggregate_affinities=True)
result = transform(label)
assert result.shape == (2,) + label.shape
assert np.array_equal(np.unique(result), [0, 1])
def test_label_to_boundary_with_ignore(self):
size = 20
label = _diagonal_label_volume(size, init=-1)
transform = LabelToAffinities(offsets=(2, 4), ignore_index=-1, aggregate_affinities=True)
result = transform(label)
assert result.shape == (2,) + label.shape
assert np.array_equal(np.unique(result), [-1, 0, 1])
def test_label_to_boundary_no_aggregate(self):
size = 20
label = _diagonal_label_volume(size)
# this transform will produce 6 channels
transform = LabelToAffinities(offsets=(2, 4), aggregate_affinities=False)
result = transform(label)
assert result.shape == (6,) + label.shape
assert np.array_equal(np.unique(result), [0, 1])
def test_relabel(self):
label = np.array([[10, 10, 10], [0, 0, 0], [5, 5, 5]])
r = Relabel()
result = r(label)
assert np.array_equal(result, np.array([[1, 1, 1], [0, 0, 0], [2, 2, 2]]))
def test_BaseTransformer(self):
config = {
'raw': [{'name': 'Standardize'}, {'name': 'ToTensor', 'expand_dims': True}],
'label': [{'name': 'ToTensor', 'expand_dims': False, 'dtype': 'long'}],
'weight': [{'name': 'ToTensor', 'expand_dims': False}]
}
base_config = {'mean': 0, 'std': 1}
transformer = Transformer(config, base_config)
raw_transforms = transformer.raw_transform().transforms
assert raw_transforms[0].mean == 0
assert raw_transforms[0].std == 1
assert raw_transforms[1].expand_dims
label_transforms = transformer.label_transform().transforms
assert not label_transforms[0].expand_dims
assert label_transforms[0].dtype == 'long'
weight_transforms = transformer.weight_transform().transforms
assert not weight_transforms[0].expand_dims
def test_StandardTransformer(self):
config = {
'raw': [
{'name': 'Standardize'},
{'name': 'RandomContrast', 'execution_probability': 0.5},
{'name': 'RandomFlip'},
{'name': 'RandomRotate90'},
{'name': 'ToTensor', 'expand_dims': True}
],
'label': [
{'name': 'RandomFlip'},
{'name': 'RandomRotate90'},
{'name': 'ToTensor', 'expand_dims': False, 'dtype': 'long'}
]
}
base_config = {'mean': 0, 'std': 1}
transformer = Transformer(config, base_config)
raw_transforms = transformer.raw_transform().transforms
assert raw_transforms[0].mean == 0
assert raw_transforms[0].std == 1
assert raw_transforms[1].execution_probability == 0.5
assert raw_transforms[4].expand_dims
label_transforms = transformer.label_transform().transforms
assert len(label_transforms) == 3
def test_AnisotropicRotationTransformer(self):
config = {
'raw': [
{'name': 'Standardize'},
{'name': 'RandomContrast', 'execution_probability': 0.5},
{'name': 'RandomFlip'},
{'name': 'RandomRotate90'},
{'name': 'RandomRotate', 'angle_spectrum': 17, 'axes': [[2, 1]]},
{'name': 'ToTensor', 'expand_dims': True}
],
'label': [
{'name': 'RandomFlip'},
{'name': 'RandomRotate90'},
{'name': 'RandomRotate', 'angle_spectrum': 17, 'axes': [[2, 1]]},
{'name': 'ToTensor', 'expand_dims': False, 'dtype': 'long'}
]
}
base_config = {'mean': 0, 'std': 1}
transformer = Transformer(config, base_config)
raw_transforms = transformer.raw_transform().transforms
assert raw_transforms[0].mean == 0
assert raw_transforms[0].std == 1
assert raw_transforms[1].execution_probability == 0.5
assert raw_transforms[4].angle_spectrum == 17
assert raw_transforms[4].axes == [[2, 1]]
label_transforms = transformer.label_transform().transforms
assert len(label_transforms) == 4
def test_LabelToBoundaryTransformer(self):
config = {
'raw': [
{'name': 'Standardize'},
{'name': 'RandomContrast', 'execution_probability': 0.5},
{'name': 'RandomFlip'},
{'name': 'RandomRotate90'},
{'name': 'RandomRotate', 'angle_spectrum': 17, 'axes': [[2, 1]], 'mode': 'reflect'},
{'name': 'ToTensor', 'expand_dims': True}
],
'label': [
{'name': 'RandomFlip'},
{'name': 'RandomRotate90'},
{'name': 'RandomRotate', 'angle_spectrum': 17, 'axes': [[2, 1]], 'mode': 'reflect'},
{'name': 'LabelToAffinities', 'offsets': [2, 4, 6, 8]},
{'name': 'ToTensor', 'expand_dims': False, 'dtype': 'long'}
]
}
base_config = {'mean': 0, 'std': 1}
transformer = Transformer(config, base_config)
raw_transforms = transformer.raw_transform().transforms
assert raw_transforms[0].mean == 0
assert raw_transforms[0].std == 1
assert raw_transforms[1].execution_probability == 0.5
assert raw_transforms[4].angle_spectrum == 17
assert raw_transforms[4].axes == [[2, 1]]
assert raw_transforms[4].mode == 'reflect'
label_transforms = transformer.label_transform().transforms
assert label_transforms[2].angle_spectrum == 17
assert label_transforms[2].axes == [[2, 1]]
assert label_transforms[2].mode == 'reflect'
# 3 conv kernels per offset
assert len(label_transforms[3].kernels) == 12
def test_RandomLabelToBoundaryTransformer(self):
config = {
'raw': [
{'name': 'Normalize'},
{'name': 'RandomContrast', 'execution_probability': 0.5},
{'name': 'RandomFlip'},
{'name': 'RandomRotate90'},
{'name': 'RandomRotate', 'angle_spectrum': 17, 'axes': [[2, 1]], 'mode': 'reflect'},
{'name': 'ToTensor', 'expand_dims': True}
],
'label': [
{'name': 'RandomFlip'},
{'name': 'RandomRotate90'},
{'name': 'RandomRotate', 'angle_spectrum': 17, 'axes': [[2, 1]], 'mode': 'reflect'},
{'name': 'RandomLabelToAffinities', 'max_offset': 4},
{'name': 'ToTensor', 'expand_dims': False, 'dtype': 'long'}
]
}
base_config = {'mean': 0, 'std': 1}
transformer = Transformer(config, base_config)
label_transforms = transformer.label_transform().transforms
assert label_transforms[3].offsets == (1, 2, 3, 4)
def test_crop_to_fixed_when_crop_bigger_than_volume(self):
m = np.random.rand(200, 200, 200)
rs1 = np.random.RandomState(13)
t = CropToFixed(rs1, size=(256, 256))
expected = np.pad(m, ((0, 0), (28, 28), (28, 28)), mode='reflect')
assert np.array_equal(expected, t(m))
def test_crop_to_fixed(self):
m = np.random.rand(200, 200, 200)
rs1 = np.random.RandomState(13)
rs2 = np.random.RandomState(13)
t = CropToFixed(rs1, size=(128, 128))
r = 200 - 128
y_start = rs2.randint(r)
x_start = rs2.randint(r)
m_crop = m[:, y_start:y_start + 128, x_start:x_start + 128]
assert np.array_equal(m_crop, t(m))
def _diagonal_label_volume(size, init=1):
label = init * np.ones((size, size, size), dtype=np.int32)
for i in range(size):
for j in range(size):
for k in range(size):
if i + j > 2 * k:
label[i, j, k] = 3
return label
|
1d096062db4e045a9b60bd48369f6f02700e4d44
|
e8201f803bb23a1b9a3eab9fc0fc9b1709e65d2e
|
/manim_ml/neural_network/layers/convolutional_2d.py
|
48849d89b71a4332919b767ea1fa49a46380dab2
|
[
"MIT"
] |
permissive
|
helblazer811/ManimML
|
20bc3548ceab75745a8d8088929fec51057e130f
|
5df233ea90aba16611d29c6a4b7717eb08ae7e09
|
refs/heads/main
| 2023-08-09T07:50:38.605540
| 2023-07-22T02:43:52
| 2023-07-22T02:43:52
| 454,906,591
| 1,339
| 73
|
MIT
| 2023-04-11T02:22:49
| 2022-02-02T19:26:55
|
Python
|
UTF-8
|
Python
| false
| false
| 10,815
|
py
|
convolutional_2d.py
|
from typing import Union
from manim_ml.neural_network.activation_functions import get_activation_function_by_name
from manim_ml.neural_network.activation_functions.activation_function import (
ActivationFunction,
)
import numpy as np
from manim import *
import manim_ml
from manim_ml.neural_network.layers.parent_layers import (
ThreeDLayer,
VGroupNeuralNetworkLayer,
)
from manim_ml.utils.mobjects.gridded_rectangle import GriddedRectangle
class FeatureMap(VGroup):
"""Class for making a feature map"""
def __init__(
self,
color=ORANGE,
feature_map_size=None,
fill_color=ORANGE,
fill_opacity=0.2,
cell_width=0.2,
padding=(0, 0),
stroke_width=2.0,
show_grid_lines=False,
padding_dashed=False,
):
super().__init__()
self.color = color
self.feature_map_size = feature_map_size
self.fill_color = fill_color
self.fill_opacity = fill_opacity
self.cell_width = cell_width
self.padding = padding
self.stroke_width = stroke_width
self.show_grid_lines = show_grid_lines
self.padding_dashed = padding_dashed
# Check if we have non-zero padding
if padding[0] > 0 or padding[1] > 0:
# Make the exterior rectangle dashed
width_with_padding = (
self.feature_map_size[0] + self.padding[0] * 2
) * self.cell_width
height_with_padding = (
self.feature_map_size[1] + self.padding[1] * 2
) * self.cell_width
self.untransformed_width = width_with_padding
self.untransformed_height = height_with_padding
self.exterior_rectangle = GriddedRectangle(
color=self.color,
width=width_with_padding,
height=height_with_padding,
fill_color=self.color,
fill_opacity=self.fill_opacity,
stroke_color=self.color,
stroke_width=self.stroke_width,
grid_xstep=self.cell_width,
grid_ystep=self.cell_width,
grid_stroke_width=self.stroke_width / 2,
grid_stroke_color=self.color,
show_grid_lines=self.show_grid_lines,
dotted_lines=self.padding_dashed,
)
self.add(self.exterior_rectangle)
# Add an interior rectangle with no fill color
self.interior_rectangle = GriddedRectangle(
color=self.color,
fill_opacity=0.0,
width=self.feature_map_size[0] * self.cell_width,
height=self.feature_map_size[1] * self.cell_width,
stroke_width=self.stroke_width,
)
self.add(self.interior_rectangle)
else:
# Just make an exterior rectangle with no dashes.
self.untransformed_height = (self.feature_map_size[1] * self.cell_width,)
self.untransformed_width = (self.feature_map_size[0] * self.cell_width,)
# Make the exterior rectangle
self.exterior_rectangle = GriddedRectangle(
color=self.color,
height=self.feature_map_size[1] * self.cell_width,
width=self.feature_map_size[0] * self.cell_width,
fill_color=self.color,
fill_opacity=self.fill_opacity,
stroke_color=self.color,
stroke_width=self.stroke_width,
grid_xstep=self.cell_width,
grid_ystep=self.cell_width,
grid_stroke_width=self.stroke_width / 2,
grid_stroke_color=self.color,
show_grid_lines=self.show_grid_lines,
)
self.add(self.exterior_rectangle)
def get_corners_dict(self):
"""Returns a dictionary of the corners"""
# Sort points through clockwise rotation of a vector in the xy plane
return self.exterior_rectangle.get_corners_dict()
class Convolutional2DLayer(VGroupNeuralNetworkLayer, ThreeDLayer):
"""Handles rendering a convolutional layer for a nn"""
def __init__(
self,
num_feature_maps,
feature_map_size=None,
filter_size=None,
cell_width=0.2,
filter_spacing=0.1,
color=BLUE,
active_color=ORANGE,
filter_color=ORANGE,
show_grid_lines=False,
fill_opacity=0.3,
stride=1,
stroke_width=2.0,
activation_function=None,
padding=0,
padding_dashed=True,
**kwargs,
):
super().__init__(**kwargs)
self.num_feature_maps = num_feature_maps
self.filter_color = filter_color
if isinstance(padding, tuple):
assert len(padding) == 2
self.padding = padding
elif isinstance(padding, int):
self.padding = (padding, padding)
else:
raise Exception(f"Unrecognized type for padding: {type(padding)}")
if isinstance(feature_map_size, int):
self.feature_map_size = (feature_map_size, feature_map_size)
else:
self.feature_map_size = feature_map_size
if isinstance(filter_size, int):
self.filter_size = (filter_size, filter_size)
else:
self.filter_size = filter_size
self.cell_width = cell_width
self.filter_spacing = filter_spacing
self.color = color
self.active_color = active_color
self.stride = stride
self.stroke_width = stroke_width
self.show_grid_lines = show_grid_lines
self.activation_function = activation_function
self.fill_opacity = fill_opacity
self.padding_dashed = padding_dashed
def construct_layer(
self,
input_layer: "NeuralNetworkLayer",
output_layer: "NeuralNetworkLayer",
**kwargs,
):
# Make the feature maps
self.feature_maps = self.construct_feature_maps()
self.add(self.feature_maps)
# Rotate stuff properly
# normal_vector = self.feature_maps[0].get_normal_vector()
self.rotate(
manim_ml.config.three_d_config.rotation_angle,
about_point=self.get_center(),
axis=manim_ml.config.three_d_config.rotation_axis,
)
self.construct_activation_function()
super().construct_layer(input_layer, output_layer, **kwargs)
def construct_activation_function(self):
"""Construct the activation function"""
# Add the activation function
if not self.activation_function is None:
# Check if it is a string
if isinstance(self.activation_function, str):
activation_function = get_activation_function_by_name(
self.activation_function
)()
else:
assert isinstance(self.activation_function, ActivationFunction)
activation_function = self.activation_function
# Plot the function above the rest of the layer
self.activation_function = activation_function
self.add(self.activation_function)
def construct_feature_maps(self):
"""Creates the neural network layer"""
# Draw rectangles that are filled in with opacity
feature_maps = []
for filter_index in range(self.num_feature_maps):
# Check if we need to add padding
"""
feature_map = GriddedRectangle(
color=self.color,
height=self.feature_map_size[1] * self.cell_width,
width=self.feature_map_size[0] * self.cell_width,
fill_color=self.color,
fill_opacity=self.fill_opacity,
stroke_color=self.color,
stroke_width=self.stroke_width,
grid_xstep=self.cell_width,
grid_ystep=self.cell_width,
grid_stroke_width=self.stroke_width / 2,
grid_stroke_color=self.color,
show_grid_lines=self.show_grid_lines,
)
"""
# feature_map = GriddedRectangle()
feature_map = FeatureMap(
color=self.color,
feature_map_size=self.feature_map_size,
cell_width=self.cell_width,
fill_color=self.color,
fill_opacity=self.fill_opacity,
padding=self.padding,
padding_dashed=self.padding_dashed,
)
# Move the feature map
feature_map.move_to([0, 0, filter_index * self.filter_spacing])
# rectangle.set_z_index(4)
feature_maps.append(feature_map)
return VGroup(*feature_maps)
def highlight_and_unhighlight_feature_maps(self):
"""Highlights then unhighlights feature maps"""
return Succession(
ApplyMethod(self.feature_maps.set_color, self.active_color),
ApplyMethod(self.feature_maps.set_color, self.color),
)
def make_forward_pass_animation(self, run_time=5, layer_args={}, **kwargs):
"""Convolution forward pass animation"""
# Note: most of this animation is done in the Convolution3DToConvolution3D layer
if not self.activation_function is None:
animation_group = AnimationGroup(
self.activation_function.make_evaluate_animation(),
self.highlight_and_unhighlight_feature_maps(),
lag_ratio=0.0,
)
else:
animation_group = AnimationGroup()
return animation_group
def scale(self, scale_factor, **kwargs):
self.cell_width *= scale_factor
super().scale(scale_factor, **kwargs)
def get_center(self):
"""Overrides function for getting center
The reason for this is so that the center calculation
does not include the activation function.
"""
return self.feature_maps.get_center()
def get_width(self):
"""Overrides get width function"""
return self.feature_maps.length_over_dim(0)
def get_height(self):
"""Overrides get height function"""
return self.feature_maps.length_over_dim(1)
def move_to(self, mobject_or_point):
"""Moves the center of the layer to the given mobject or point"""
layer_center = self.feature_maps.get_center()
if isinstance(mobject_or_point, Mobject):
target_center = mobject_or_point.get_center()
else:
target_center = mobject_or_point
self.shift(target_center - layer_center)
@override_animation(Create)
def _create_override(self, **kwargs):
return FadeIn(self.feature_maps)
|
f95c573b950795f59ec602ce43bd9848317d979e
|
c13063d05915b91650f08ef209fc89bd2ed617d6
|
/uer/trainer.py
|
34a345f875af77dc616fd650d939d550b50764d9
|
[
"MIT"
] |
permissive
|
autoliuweijie/K-BERT
|
aad3fadb382e51cf8df1ed58cd8c9c26fb35f3e9
|
da9358edb3b3f59e3ad6b5aab6af6df624b881ab
|
refs/heads/master
| 2023-02-07T15:40:46.979324
| 2022-08-15T06:45:47
| 2022-08-15T06:45:47
| 208,968,100
| 940
| 225
| null | 2023-01-27T09:41:49
| 2019-09-17T05:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 20,038
|
py
|
trainer.py
|
# -*- encoding:utf-8 -*-
import os
import sys
import time
import math
import torch
import torch.nn as nn
import torch.distributed as dist
import torch.multiprocessing as mp
from torch.nn.parallel import DistributedDataParallel
from uer.model_saver import save_model
from uer.model_builder import build_model
from uer.utils.optimizers import BertAdam
from uer.utils.data import *
from uer.utils.vocab import Vocab
from uer.utils.seed import set_seed
def train_and_validate(args):
set_seed(args.seed)
# Load vocabulary.
vocab = Vocab()
vocab.load(args.vocab_path)
args.vocab = vocab
# Build model.
model = build_model(args)
# Load or initialize parameters.
if args.pretrained_model_path is not None:
# Initialize with pretrained model.
model.load_state_dict(torch.load(args.pretrained_model_path), strict=False)
else:
# Initialize with normal distribution.
for n, p in list(model.named_parameters()):
if 'gamma' not in n and 'beta' not in n:
p.data.normal_(0, 0.02)
if args.dist_train:
# Multiprocessing distributed mode.
mp.spawn(worker, nprocs=args.ranks_num, args=(args.gpu_ranks, args, model), daemon=False)
elif args.single_gpu:
# Single GPU mode.
worker(args.gpu_id, None, args, model)
else:
# CPU mode.
worker(None, None, args, model)
def worker(gpu_id, gpu_ranks, args, model):
"""
Args:
gpu_id: The id of GPU for single GPU mode;
The id of process (and GPU) for multiprocessing distributed mode.
gpu_ranks: List of ranks of each process.
"""
set_seed(args.seed)
if gpu_ranks is None:
train_loader = globals()[args.target.capitalize() + "DataLoader"](args, args.dataset_path, args.batch_size, gpu_id, 1, True)
else:
train_loader = globals()[args.target.capitalize() + "DataLoader"](args, args.dataset_path, args.batch_size, gpu_id, len(gpu_ranks), True)
if gpu_id is not None:
torch.cuda.set_device(gpu_id)
model.cuda(gpu_id)
# Build optimizer.
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'gamma', 'beta']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay_rate': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay_rate': 0.0}
]
optimizer = BertAdam(optimizer_grouped_parameters, lr=args.learning_rate, warmup=args.warmup, t_total=args.total_steps)
rank = -1 # Each process has a unique rank in multiprocessing distributed mode.
if args.dist_train:
rank = gpu_ranks[gpu_id]
# Initialize multiprocessing distributed training environment.
dist.init_process_group(backend=args.backend,
init_method=args.master_ip,
world_size=args.world_size,
rank=rank)
model = DistributedDataParallel(model, device_ids=[gpu_id])
print("Worker %d is training ... " % rank)
else:
print("Worker is training ...")
globals().get("train_"+args.target)(args, gpu_id, rank, train_loader, model, optimizer)
def train_bert(args, gpu_id, rank, loader, model, optimizer):
model.train()
start_time = time.time()
total_loss, total_loss_mlm, total_loss_nsp = 0., 0., 0.
# Calculate MLM accuracy.
total_correct_mlm, total_denominator = 0., 0.
# Calculate NSP accuracy.
total_correct_nsp, total_instances = 0., 0.
steps = 1
total_steps = args.total_steps
loader_iter = iter(loader)
while True:
if steps == total_steps + 1:
break
src, tgt_mlm, tgt_nsp, seg = next(loader_iter)
if gpu_id is not None:
src = src.cuda(gpu_id)
tgt_mlm = tgt_mlm.cuda(gpu_id)
tgt_nsp = tgt_nsp.cuda(gpu_id)
seg = seg.cuda(gpu_id)
# Forward.
loss_info = model(src, (tgt_mlm, tgt_nsp), seg)
loss_mlm, loss_nsp, correct_mlm, correct_nsp, denominator = loss_info
# Backward.
loss = loss_mlm + loss_nsp
total_loss += loss.item()
total_loss_mlm += loss_mlm.item()
total_loss_nsp += loss_nsp.item()
total_correct_mlm += correct_mlm.item()
total_correct_nsp += correct_nsp.item()
total_denominator += denominator.item()
total_instances += src.size(0)
loss = loss / args.accumulation_steps
loss.backward()
if steps % args.accumulation_steps == 0:
optimizer.step()
model.zero_grad()
if steps % args.report_steps == 0 and \
(not args.dist_train or (args.dist_train and rank == 0)):
loss = total_loss / args.report_steps
loss_mlm = total_loss_mlm / args.report_steps
loss_nsp = total_loss_nsp / args.report_steps
elapsed = time.time() - start_time
done_tokens = \
args.batch_size * src.size(1) * args.report_steps * args.world_size \
if args.dist_train \
else args.batch_size * src.size(1) * args.report_steps
print("| {:8d}/{:8d} steps"
"| {:8.2f} tokens/s"
"| loss {:7.2f}"
"| loss_mlm: {:3.3f}"
"| loss_nsp: {:3.3f}"
"| acc_mlm: {:3.3f}"
"| acc_nsp: {:3.3f}".format(
steps,
total_steps,
done_tokens / elapsed,
loss,
loss_mlm,
loss_nsp,
total_correct_mlm / total_denominator,
total_correct_nsp / total_instances))
total_loss, total_loss_mlm, total_loss_nsp = 0., 0., 0.
total_correct_mlm, total_denominator = 0., 0.
total_correct_nsp, total_instances = 0., 0.
start_time = time.time()
if steps % args.save_checkpoint_steps == 0 and \
(not args.dist_train or (args.dist_train and rank == 0)):
save_model(model, args.output_model_path + "-" + str(steps))
steps += 1
def train_lm(args, gpu_id, rank, loader, model, optimizer):
model.train()
start_time = time.time()
total_loss = 0.
# Calculate MLM accuracy.
total_correct, total_denominator = 0., 0.
# Calculate NSP accuracy.
steps = 1
total_steps = args.total_steps
loader_iter = iter(loader)
while True:
if steps == total_steps + 1:
break
src, tgt, seg = next(loader_iter)
if gpu_id is not None:
src = src.cuda(gpu_id)
tgt = tgt.cuda(gpu_id)
seg = seg.cuda(gpu_id)
# Forward.
loss_info = model(src, tgt, seg)
loss, correct, denominator = loss_info
# Backward.
total_loss += loss.item()
total_correct += correct.item()
total_denominator += denominator.item()
loss = loss / args.accumulation_steps
loss.backward()
if steps % args.accumulation_steps == 0:
optimizer.step()
model.zero_grad()
if steps % args.report_steps == 0 and \
(not args.dist_train or (args.dist_train and rank == 0)):
loss = total_loss / args.report_steps
elapsed = time.time() - start_time
done_tokens = \
args.batch_size * src.size(1) * args.report_steps * args.world_size \
if args.dist_train \
else args.batch_size * src.size(1) * args.report_steps
print("| {:8d}/{:8d} steps"
"| {:8.2f} tokens/s"
"| loss {:7.2f}"
"| acc: {:3.3f}".format(
steps,
total_steps,
done_tokens / elapsed,
loss,
total_correct / total_denominator))
total_loss = 0.
total_correct, total_denominator = 0., 0.
start_time = time.time()
if steps % args.save_checkpoint_steps == 0 and \
(not args.dist_train or (args.dist_train and rank == 0)):
save_model(model, args.output_model_path + "-" + str(steps))
steps += 1
def train_bilm(args, gpu_id, rank, loader, model, optimizer):
model.train()
start_time = time.time()
total_loss, total_loss_forward, total_loss_backward = 0., 0., 0.
# Calculate BiLM accuracy.
total_correct_forward, total_correct_backward, total_denominator = 0., 0., 0.
steps = 1
total_steps = args.total_steps
loader_iter = iter(loader)
while True:
if steps == total_steps + 1:
break
src, tgt_forward, tgt_backward, seg = next(loader_iter)
if gpu_id is not None:
src = src.cuda(gpu_id)
tgt_forward = tgt_forward.cuda(gpu_id)
tgt_backward = tgt_backward.cuda(gpu_id)
seg = seg.cuda(gpu_id)
# Forward.
loss_info = model(src, (tgt_forward, tgt_backward), seg)
loss_forward, loss_backward, correct_forward, correct_backward, denominator = loss_info
# Backward.
loss = loss_forward + loss_backward
total_loss += loss.item()
total_loss_forward += loss_forward.item()
total_loss_backward += loss_backward.item()
total_correct_forward += correct_forward.item()
total_correct_backward += correct_backward.item()
total_denominator += denominator.item()
loss = loss / args.accumulation_steps
loss.backward()
if steps % args.accumulation_steps == 0:
optimizer.step()
model.zero_grad()
if steps % args.report_steps == 0 and \
(not args.dist_train or (args.dist_train and rank == 0)):
loss = total_loss / args.report_steps
elapsed = time.time() - start_time
done_tokens = \
args.batch_size * src.size(1) * args.report_steps * args.world_size \
if args.dist_train \
else args.batch_size * src.size(1) * args.report_steps
print("| {:8d}/{:8d} steps"
"| {:8.2f} tokens/s"
"| loss {:7.2f}"
"| loss_forward {:3.3f}"
"| loss_backward {:3.3f}"
"| acc_forward: {:3.3f}"
"| acc_backward: {:3.3f}".format(
steps,
total_steps,
done_tokens / elapsed,
loss,
loss_forward,
loss_backward,
total_correct_forward / total_denominator,
total_correct_backward / total_denominator))
total_loss, total_loss_forward, total_loss_backward = 0., 0., 0.
total_correct_forward, total_correct_backward, total_denominator = 0., 0., 0.
start_time = time.time()
if steps % args.save_checkpoint_steps == 0 and \
(not args.dist_train or (args.dist_train and rank == 0)):
save_model(model, args.output_model_path + "-" + str(steps))
steps += 1
def train_cls(args, gpu_id, rank, loader, model, optimizer):
model.train()
start_time = time.time()
total_loss = 0.
total_correct, total_instances = 0., 0.
steps = 1
total_steps = args.total_steps
loader_iter = iter(loader)
while True:
if steps == total_steps + 1:
break
src, tgt, seg = next(loader_iter)
if gpu_id is not None:
src = src.cuda(gpu_id)
tgt = tgt.cuda(gpu_id)
seg = seg.cuda(gpu_id)
# Forward.
loss_info = model(src, tgt, seg)
loss, correct = loss_info
# Backward.
total_loss += loss.item()
total_correct += correct.item()
total_instances += src.size(0)
loss = loss / args.accumulation_steps
loss.backward()
if steps % args.accumulation_steps == 0:
optimizer.step()
model.zero_grad()
if steps % args.report_steps == 0 and \
(not args.dist_train or (args.dist_train and rank == 0)):
loss = total_loss / args.report_steps
elapsed = time.time() - start_time
done_tokens = \
args.batch_size * src.size(1) * args.report_steps * args.world_size \
if args.dist_train \
else args.batch_size * src.size(1) * args.report_steps
print("| {:8d}/{:8d} steps"
"| {:8.2f} tokens/s"
"| loss {:7.2f}"
"| acc: {:3.3f}".format(
steps,
total_steps,
done_tokens / elapsed,
loss,
total_correct / total_instances))
total_loss = 0.
total_correct = 0.
total_instances = 0.
start_time = time.time()
if steps % args.save_checkpoint_steps == 0 and \
(not args.dist_train or (args.dist_train and rank == 0)):
save_model(model, args.output_model_path + "-" + str(steps))
steps += 1
def train_mlm(args, gpu_id, rank, loader, model, optimizer):
model.train()
start_time = time.time()
total_loss, total_loss_mlm, total_loss_nsp = 0., 0., 0.
# Calculate MLM accuracy.
total_correct, total_denominator = 0., 0.
# Calculate NSP accuracy.
total_instances = 0., 0.
steps = 1
total_steps = args.total_steps
loader_iter = iter(loader)
while True:
if steps == total_steps + 1:
break
src, tgt, seg = next(loader_iter)
if gpu_id is not None:
src = src.cuda(gpu_id)
tgt = tgt.cuda(gpu_id)
seg = seg.cuda(gpu_id)
# Forward.
loss_info = model(src, tgt, seg)
loss, correct, denominator = loss_info
# Backward.
total_loss += loss.item()
total_correct += correct.item()
total_denominator += denominator.item()
loss = loss / args.accumulation_steps
loss.backward()
if steps % args.accumulation_steps == 0:
optimizer.step()
model.zero_grad()
if steps % args.report_steps == 0 and \
(not args.dist_train or (args.dist_train and rank == 0)):
loss = total_loss / args.report_steps
elapsed = time.time() - start_time
done_tokens = \
args.batch_size * src.size(1) * args.report_steps * args.world_size \
if args.dist_train \
else args.batch_size * src.size(1) * args.report_steps
print("| {:8d}/{:8d} steps"
"| {:8.2f} tokens/s"
"| loss {:7.2f}"
"| acc: {:3.3f}".format(
steps,
total_steps,
done_tokens / elapsed,
loss,
total_correct / total_denominator))
total_loss = 0.
total_correct, total_denominator = 0., 0.
start_time = time.time()
if steps % args.save_checkpoint_steps == 0 and \
(not args.dist_train or (args.dist_train and rank == 0)):
save_model(model, args.output_model_path + "-" + str(steps))
steps += 1
def train_nsp(args, gpu_id, rank, loader, model, optimizer):
model.train()
start_time = time.time()
total_loss = 0.
total_correct, total_instances = 0., 0.
steps = 1
total_steps = args.total_steps
loader_iter = iter(loader)
while True:
if steps == total_steps + 1:
break
src, tgt, seg = next(loader_iter)
if gpu_id is not None:
src = src.cuda(gpu_id)
tgt = tgt.cuda(gpu_id)
seg = seg.cuda(gpu_id)
# Forward.
loss_info = model(src, tgt, seg)
loss, correct = loss_info
# Backward.
total_loss += loss.item()
total_correct += correct.item()
total_instances += src.size(0)
loss = loss / args.accumulation_steps
loss.backward()
if steps % args.accumulation_steps == 0:
optimizer.step()
model.zero_grad()
if steps % args.report_steps == 0 and \
(not args.dist_train or (args.dist_train and rank == 0)):
loss = total_loss / args.report_steps
elapsed = time.time() - start_time
done_tokens = \
args.batch_size * src.size(1) * args.report_steps * args.world_size \
if args.dist_train \
else args.batch_size * src.size(1) * args.report_steps
print("| {:8d}/{:8d} steps"
"| {:8.2f} tokens/s"
"| loss {:7.2f}"
"| acc: {:3.3f}".format(
steps,
total_steps,
done_tokens / elapsed,
loss,
total_correct / total_instances))
total_loss = 0.
total_correct = 0.
total_instances = 0.
start_time = time.time()
if steps % args.save_checkpoint_steps == 0 and \
(not args.dist_train or (args.dist_train and rank == 0)):
save_model(model, args.output_model_path + "-" + str(steps))
steps += 1
def train_s2s(args, gpu_id, rank, loader, model, optimizer):
model.train()
start_time = time.time()
total_loss= 0.
total_correct, total_denominator = 0., 0.
steps = 1
total_steps = args.total_steps
loader_iter = iter(loader)
while True:
if steps == total_steps + 1:
break
src, tgt, seg = next(loader_iter)
if gpu_id is not None:
src = src.cuda(gpu_id)
tgt = tgt.cuda(gpu_id)
seg = seg.cuda(gpu_id)
# Forward.
loss_info = model(src, tgt, seg)
loss, correct, denominator = loss_info
# Backward.
total_loss += loss.item()
total_correct += correct.item()
total_denominator += denominator.item()
loss = loss / args.accumulation_steps
loss.backward()
if steps % args.accumulation_steps == 0:
optimizer.step()
model.zero_grad()
if steps % args.report_steps == 0 and \
(not args.dist_train or (args.dist_train and rank == 0)):
loss = total_loss / args.report_steps
elapsed = time.time() - start_time
done_tokens = \
args.batch_size * src.size(1) * args.report_steps * args.world_size \
if args.dist_train \
else args.batch_size * src.size(1) * args.report_steps
print("| {:8d}/{:8d} steps"
"| {:8.2f} tokens/s"
"| loss {:7.2f}"
"| acc: {:3.3f}".format(
steps,
total_steps,
done_tokens / elapsed,
loss,
total_correct / total_denominator))
total_loss = 0.
total_correct, total_denominator = 0., 0.
start_time = time.time()
if steps % args.save_checkpoint_steps == 0 and \
(not args.dist_train or (args.dist_train and rank == 0)):
save_model(model, args.output_model_path + "-" + str(steps))
steps += 1
|
1d3106b98beb1a341ce13d5cb52585914b45b6c8
|
80f94bea418d7956df1ba19d4d6a1d7715a94ade
|
/lib/galaxy/webapps/galaxy/services/quotas.py
|
ca6ef0545924b12dc3ae586ee0c48f52dd5a0f6f
|
[
"CC-BY-2.5",
"MIT",
"CC-BY-3.0",
"AFL-3.0"
] |
permissive
|
galaxyproject/galaxy
|
5748409eb6693b1611f289d164f85e20c3237495
|
b9ae7a16ba0465995e880ae9701b7e87226b9bab
|
refs/heads/dev
| 2023-08-28T22:35:51.248138
| 2023-08-26T08:02:33
| 2023-08-26T08:02:33
| 31,211,061
| 1,277
| 1,137
|
NOASSERTION
| 2023-09-14T19:39:01
| 2015-02-23T14:18:06
|
Python
|
UTF-8
|
Python
| false
| false
| 6,143
|
py
|
quotas.py
|
import logging
from typing import Optional
from sqlalchemy import (
false,
true,
)
from galaxy import (
model,
util,
)
from galaxy.managers.context import ProvidesUserContext
from galaxy.managers.quotas import QuotaManager
from galaxy.quota._schema import (
CreateQuotaParams,
CreateQuotaResult,
DefaultQuotaValues,
DeleteQuotaPayload,
QuotaDetails,
QuotaSummaryList,
UpdateQuotaParams,
)
from galaxy.schema.fields import DecodedDatabaseIdField
from galaxy.security.idencoding import IdEncodingHelper
from galaxy.web import url_for
from galaxy.webapps.galaxy.services.base import ServiceBase
log = logging.getLogger(__name__)
class QuotasService(ServiceBase):
"""Interface/service object shared by controllers for interacting with quotas."""
def __init__(self, security: IdEncodingHelper, quota_manager: QuotaManager):
super().__init__(security)
self.quota_manager = quota_manager
def index(self, trans: ProvidesUserContext, deleted: bool = False) -> QuotaSummaryList:
"""Displays a list of quotas."""
rval = []
query = trans.sa_session.query(model.Quota)
if deleted:
route = "deleted_quota"
query = query.filter(model.Quota.deleted == true())
else:
route = "quota"
query = query.filter(model.Quota.deleted == false())
for quota in query:
item = quota.to_dict(value_mapper={"id": DecodedDatabaseIdField.encode})
encoded_id = DecodedDatabaseIdField.encode(quota.id)
item["url"] = url_for(route, id=encoded_id)
rval.append(item)
return QuotaSummaryList.construct(__root__=rval)
def show(self, trans: ProvidesUserContext, id: DecodedDatabaseIdField, deleted: bool = False) -> QuotaDetails:
"""Displays information about a quota."""
quota = self.quota_manager.get_quota(trans, id, deleted=deleted)
rval = quota.to_dict(
view="element", value_mapper={"id": DecodedDatabaseIdField.encode, "total_disk_usage": float}
)
return QuotaDetails.construct(**rval)
def create(self, trans: ProvidesUserContext, params: CreateQuotaParams) -> CreateQuotaResult:
"""Creates a new quota."""
payload = params.dict()
self.validate_in_users_and_groups(trans, payload)
quota, message = self.quota_manager.create_quota(payload)
item = quota.to_dict(value_mapper={"id": DecodedDatabaseIdField.encode})
item["url"] = url_for("quota", id=DecodedDatabaseIdField.encode(quota.id))
item["message"] = message
return CreateQuotaResult.construct(**item)
def update(self, trans: ProvidesUserContext, id: DecodedDatabaseIdField, params: UpdateQuotaParams) -> str:
"""Modifies a quota."""
payload = params.dict()
self.validate_in_users_and_groups(trans, payload)
quota = self.quota_manager.get_quota(trans, id, deleted=False)
params = UpdateQuotaParams(**payload)
# FIXME: Doing it this way makes the update non-atomic if a method fails after an earlier one has succeeded.
methods = []
if params.name or params.description:
methods.append(self.quota_manager.rename_quota)
if params.amount:
methods.append(self.quota_manager.edit_quota)
if params.default == DefaultQuotaValues.NO:
methods.append(self.quota_manager.unset_quota_default)
elif params.default:
methods.append(self.quota_manager.set_quota_default)
if params.in_users or params.in_groups:
methods.append(self.quota_manager.manage_users_and_groups_for_quota)
messages = []
for method in methods:
message = method(quota, params)
messages.append(message)
return "; ".join(messages)
def delete(
self, trans: ProvidesUserContext, id: DecodedDatabaseIdField, payload: Optional[DeleteQuotaPayload] = None
) -> str:
"""Marks a quota as deleted."""
quota = self.quota_manager.get_quota(
trans, id, deleted=False
) # deleted quotas are not technically members of this collection
message = self.quota_manager.delete_quota(quota)
if payload and payload.purge:
message += self.quota_manager.purge_quota(quota)
return message
def undelete(self, trans: ProvidesUserContext, id: DecodedDatabaseIdField) -> str:
"""Restores a previously deleted quota."""
quota = self.quota_manager.get_quota(trans, id, deleted=True)
return self.quota_manager.undelete_quota(quota)
def validate_in_users_and_groups(self, trans, payload):
"""
For convenience, in_users and in_groups can be encoded IDs or emails/group names in the API.
"""
def get_id(item, model_class, column):
try:
return trans.security.decode_id(item)
except Exception:
pass # maybe an email/group name
# this will raise if the item is invalid
return trans.sa_session.query(model_class).filter(column == item).first().id
new_in_users = []
new_in_groups = []
invalid = []
for item in util.listify(payload.get("in_users", [])):
try:
new_in_users.append(get_id(item, model.User, model.User.email))
except Exception:
invalid.append(item)
for item in util.listify(payload.get("in_groups", [])):
try:
new_in_groups.append(get_id(item, model.Group, model.Group.name))
except Exception:
invalid.append(item)
if invalid:
msg = (
f"The following value(s) for associated users and/or groups could not be parsed: {', '.join(invalid)}."
)
msg += " Valid values are email addresses of users, names of groups, or IDs of both."
raise Exception(msg)
payload["in_users"] = list(map(str, new_in_users))
payload["in_groups"] = list(map(str, new_in_groups))
|
7935e7e73e2f3e9ea639823afd139f2cf40f2af8
|
93b858db8d5fd4990c36a6c9eed3b75047152863
|
/magpylib/_src/obj_classes/class_BaseGeo.py
|
f6b3a57c137e1da0d7c2077552d76f4ef9f790e4
|
[
"BSD-2-Clause"
] |
permissive
|
magpylib/magpylib
|
c4dc908c4e1a0109f65f15cd73584693ddc71f4b
|
7fa3514afceb28e8b7ba94f2821f5e9c789cc996
|
refs/heads/main
| 2023-09-01T00:04:39.420872
| 2023-08-27T05:32:14
| 2023-08-27T05:32:14
| 169,390,106
| 174
| 39
|
BSD-2-Clause
| 2023-09-03T18:34:52
| 2019-02-06T10:39:43
|
Python
|
UTF-8
|
Python
| false
| false
| 12,677
|
py
|
class_BaseGeo.py
|
"""BaseGeo class code"""
# pylint: disable=cyclic-import
# pylint: disable=too-many-instance-attributes
# pylint: disable=protected-access
import numpy as np
from scipy.spatial.transform import Rotation as R
from magpylib._src.exceptions import MagpylibBadUserInput
from magpylib._src.input_checks import check_format_input_orientation
from magpylib._src.input_checks import check_format_input_vector
from magpylib._src.obj_classes.class_BaseTransform import BaseTransform
from magpylib._src.style import BaseStyle
from magpylib._src.utility import add_iteration_suffix
def pad_slice_path(path1, path2):
"""edge-pads or end-slices path 2 to fit path 1 format
path1: shape (N,x)
path2: shape (M,x)
return: path2 with format (N,x)
"""
delta_path = len(path1) - len(path2)
if delta_path > 0:
return np.pad(path2, ((0, delta_path), (0, 0)), "edge")
if delta_path < 0:
return path2[-delta_path:]
return path2
class BaseGeo(BaseTransform):
"""Initializes position and orientation properties
of an object in a global CS.
position is a ndarray with shape (3,).
orientation is a scipy.spatial.transformation.Rotation
object that gives the relative rotation to the init_state. The
init_state is defined by how the fields are implemented (e.g.
cyl upright in xy-plane)
Both attributes _position and _orientation.as_rotvec() are of shape (N,3),
and describe a path of length N. (N=1 if there is only one
object position).
Properties
----------
position: array_like, shape (N,3)
Position path
orientation: scipy.Rotation, shape (N,)
Rotation path
Methods
-------
- show
- move
- rotate
"""
_style_class = BaseStyle
def __init__(
self,
position=(
0.0,
0.0,
0.0,
),
orientation=None,
style=None,
**kwargs,
):
self._style_kwargs = {}
self._parent = None
# set _position and _orientation attributes
self._init_position_orientation(position, orientation)
if style is not None or kwargs: # avoid style creation cost if not needed
self._style_kwargs = self._process_style_kwargs(style=style, **kwargs)
@staticmethod
def _process_style_kwargs(style=None, **kwargs):
if kwargs:
if style is None:
style = {}
style_kwargs = {}
for k, v in kwargs.items():
if k.startswith("style_"):
style_kwargs[k[6:]] = v
else:
raise TypeError(
f"__init__() got an unexpected keyword argument {k!r}"
)
style.update(**style_kwargs)
return style
def _init_position_orientation(self, position, orientation):
"""tile up position and orientation input and set _position and
_orientation at class init. Because position and orientation inputs
come at the same time, tiling is slightly different then with setters.
pos: position input
ori: orientation input
"""
# format position and orientation inputs
pos = check_format_input_vector(
position,
dims=(1, 2),
shape_m1=3,
sig_name="position",
sig_type="array_like (list, tuple, ndarray) with shape (3,) or (n,3)",
reshape=(-1, 3),
)
oriQ = check_format_input_orientation(orientation, init_format=True)
# padding logic: if one is longer than the other, edge-pad up the other
len_pos = pos.shape[0]
len_ori = oriQ.shape[0]
if len_pos > len_ori:
oriQ = np.pad(oriQ, ((0, len_pos - len_ori), (0, 0)), "edge")
elif len_pos < len_ori:
pos = np.pad(pos, ((0, len_ori - len_pos), (0, 0)), "edge")
# set attributes
self._position = pos
self._orientation = R.from_quat(oriQ)
# properties ----------------------------------------------------
@property
def parent(self):
"""The object is a child of it's parent collection."""
return self._parent
@parent.setter
def parent(self, inp):
# pylint: disable=import-outside-toplevel
from magpylib._src.obj_classes.class_Collection import Collection
if isinstance(inp, Collection):
inp.add(self, override_parent=True)
elif inp is None:
if self._parent is not None:
self._parent.remove(self)
self._parent = None
else:
raise MagpylibBadUserInput(
"Input `parent` must be `None` or a `Collection` object."
f"Instead received {type(inp)}."
)
@property
def position(self):
"""
Object position(s) in the global coordinates in units of mm. For m>1, the
`position` and `orientation` attributes together represent an object path.
"""
return np.squeeze(self._position)
@position.setter
def position(self, inp):
"""
Set object position-path.
Use edge-padding and end-slicing to adjust orientation path
When a Collection position is set, then all children retain their
relative position to the Collection BaseGeo.
position: array_like, shape (3,) or (N,3)
Position-path of object.
"""
old_pos = self._position
# check and set new position
self._position = check_format_input_vector(
inp,
dims=(1, 2),
shape_m1=3,
sig_name="position",
sig_type="array_like (list, tuple, ndarray) with shape (3,) or (n,3)",
reshape=(-1, 3),
)
# pad/slice and set orientation path to same length
oriQ = self._orientation.as_quat()
self._orientation = R.from_quat(pad_slice_path(self._position, oriQ))
# when there are children include their relative position
for child in getattr(self, "children", []):
old_pos = pad_slice_path(self._position, old_pos)
child_pos = pad_slice_path(self._position, child._position)
rel_child_pos = child_pos - old_pos
# set child position (pad/slice orientation)
child.position = self._position + rel_child_pos
@property
def orientation(self):
"""
Object orientation(s) in the global coordinates. `None` corresponds to
a unit-rotation. For m>1, the `position` and `orientation` attributes
together represent an object path.
"""
# cannot squeeze (its a Rotation object)
if len(self._orientation) == 1: # single path orientation - reduce dimension
return self._orientation[0]
return self._orientation # return full path
@orientation.setter
def orientation(self, inp):
"""Set object orientation-path.
inp: None or scipy Rotation, shape (1,) or (N,)
Set orientation-path of object. None generates a unit orientation
for every path step.
"""
old_oriQ = self._orientation.as_quat()
# set _orientation attribute with ndim=2 format
oriQ = check_format_input_orientation(inp, init_format=True)
self._orientation = R.from_quat(oriQ)
# pad/slice position path to same length
self._position = pad_slice_path(oriQ, self._position)
# when there are children they rotate about self.position
# after the old Collection orientation is rotated away.
for child in getattr(self, "children", []):
# pad/slice and set child path
child.position = pad_slice_path(self._position, child._position)
# compute rotation and apply
old_ori_pad = R.from_quat(np.squeeze(pad_slice_path(oriQ, old_oriQ)))
child.rotate(
self.orientation * old_ori_pad.inv(), anchor=self._position, start=0
)
@property
def style(self):
"""
Object style in the form of a BaseStyle object. Input must be
in the form of a style dictionary.
"""
if getattr(self, "_style", None) is None:
self._style = self._style_class()
if self._style_kwargs:
style_kwargs = self._style_kwargs.copy()
self._style_kwargs = {}
try:
self._style.update(style_kwargs)
except (AttributeError, ValueError) as e:
e.args = (
f"{self!r} has been initialized with some invalid style arguments.\n"
+ str(e),
)
raise
return self._style
@style.setter
def style(self, val):
self._style = self._validate_style(val)
def _validate_style(self, val=None):
val = {} if val is None else val
style = self.style # triggers style creation
if isinstance(val, dict):
style.update(val)
elif not isinstance(val, self._style_class):
raise ValueError(
f"Input parameter `style` must be of type {self._style_class}.\n"
f"Instead received type {type(val)}"
)
return style
# dunders -------------------------------------------------------
def __add__(self, obj):
"""Add up sources to a Collection object.
Returns
-------
Collection: Collection
"""
# pylint: disable=import-outside-toplevel
from magpylib import Collection
return Collection(self, obj)
# methods -------------------------------------------------------
def reset_path(self):
"""Set object position to (0,0,0) and orientation = unit rotation.
Returns
-------
self: magpylib object
Examples
--------
Demonstration of `reset_path` functionality:
>>> import magpylib as magpy
>>> obj = magpy.Sensor(position=(1,2,3))
>>> obj.rotate_from_angax(45, 'z')
Sensor...
>>> print(obj.position)
[1. 2. 3.]
>>> print(obj.orientation.as_euler('xyz', degrees=True))
[ 0. 0. 45.]
>>> obj.reset_path()
Sensor(id=...)
>>> print(obj.position)
[0. 0. 0.]
>>> print(obj.orientation.as_euler('xyz', degrees=True))
[0. 0. 0.]
"""
self.position = (0, 0, 0)
self.orientation = None
return self
def copy(self, **kwargs):
"""Returns a copy of the current object instance. The `copy` method returns a deep copy of
the object, that is independent of the original object.
Parameters
----------
kwargs: dict
Keyword arguments (for example `position=(1,2,3)`) are applied to the copy.
Examples
--------
Create a `Sensor` object and copy to an another position:
>>> import magpylib as magpy
>>> sens1 = magpy.Sensor(style_label='sens1')
>>> sens2 = sens1.copy(position=(2,6,10), style_label='sens2')
>>> print(f"Instance {sens1.style.label} with position {sens1.position}.")
Instance sens1 with position [0. 0. 0.].
>>> print(f"Instance {sens2.style.label} with position {sens2.position}.")
Instance sens2 with position [ 2. 6. 10.].
"""
# pylint: disable=import-outside-toplevel
from copy import deepcopy
# avoid deepcopying the deep dependency upwards the tree structure
if self.parent is not None:
# using private attributes to avoid triggering `.add` method (see #530 bug)
parent = self._parent
self._parent = None
obj_copy = deepcopy(self)
self._parent = parent
else:
obj_copy = deepcopy(self)
if getattr(self, "_style", None) is not None or bool(
getattr(self, "_style_kwargs", False)
):
label = self.style.label
if label is None:
label = f"{type(self).__name__}_01"
else:
label = add_iteration_suffix(label)
obj_copy.style.label = label
style_kwargs = {}
for k, v in kwargs.items():
if k.startswith("style"):
style_kwargs[k] = v
else:
setattr(obj_copy, k, v)
if style_kwargs:
style_kwargs = self._process_style_kwargs(**style_kwargs)
obj_copy.style.update(style_kwargs)
return obj_copy
|
0d72ae3a2668968f8b37283f7e5f073f377011a6
|
549270020f6c8724e2ef1b12e38d11b025579f8d
|
/recipes/c-blosc2/all/conanfile.py
|
9cfb0264ed537ac57e61aadda1289bf620e74fc3
|
[
"MIT"
] |
permissive
|
conan-io/conan-center-index
|
1bcec065ccd65aa38b1fed93fbd94d9d5fe6bc43
|
3b17e69bb4e5601a850b6e006e44775e690bac33
|
refs/heads/master
| 2023-08-31T11:34:45.403978
| 2023-08-31T11:13:23
| 2023-08-31T11:13:23
| 204,671,232
| 844
| 1,820
|
MIT
| 2023-09-14T21:22:42
| 2019-08-27T09:43:58
|
Python
|
UTF-8
|
Python
| false
| false
| 5,821
|
py
|
conanfile.py
|
from conan import ConanFile
from conan.tools.cmake import CMake, CMakeDeps, CMakeToolchain, cmake_layout
from conan.tools.env import VirtualBuildEnv
from conan.tools.files import export_conandata_patches, apply_conandata_patches, get, copy, rm, rmdir
from conan.tools.microsoft import is_msvc
from conan.tools.scm import Version
import os
import glob
required_conan_version = ">=1.53.0"
class CBlosc2Conan(ConanFile):
name = "c-blosc2"
description = "A fast, compressed, persistent binary data store library for C."
license = "BSD-3-Clause"
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/Blosc/c-blosc2"
topics = ("c-blosc", "blosc", "compression")
package_type = "library"
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
"simd_intrinsics": [None, "sse2", "avx2"],
"with_lz4": [True, False],
"with_zlib": [None, "zlib", "zlib-ng", "zlib-ng-compat"],
"with_zstd": [True, False],
"with_plugins": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
"simd_intrinsics": "avx2",
"with_lz4": True,
"with_zlib": "zlib",
"with_zstd": True,
"with_plugins": True,
}
def export_sources(self):
export_conandata_patches(self)
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
if self.settings.arch not in ["x86", "x86_64"]:
del self.options.simd_intrinsics
def configure(self):
if self.options.shared:
self.options.rm_safe("fPIC")
self.settings.rm_safe("compiler.cppstd")
self.settings.rm_safe("compiler.libcxx")
# c-blosc2 uses zlib-ng with zlib compat options.
if self.options.with_zlib == "zlib-ng-compat":
self.options["zlib-ng"].zlib_compat = True
elif self.options.with_zlib == "zlib-ng":
self.options["zlib-ng"].zlib_compat = False
def layout(self):
cmake_layout(self, src_folder="src")
def requirements(self):
if self.options.with_lz4:
self.requires("lz4/1.9.4")
if self.options.with_zlib in ["zlib-ng", "zlib-ng-compat"]:
self.requires("zlib-ng/2.1.3")
elif self.options.with_zlib == "zlib":
self.requires("zlib/1.2.13")
if self.options.with_zstd:
self.requires("zstd/1.5.5")
def build_requirements(self):
if Version(self.version) >= "2.4.1":
self.tool_requires("cmake/[>=3.16.3 <4]")
def source(self):
get(self, **self.conan_data["sources"][self.version], strip_root=True)
def generate(self):
env = VirtualBuildEnv(self)
env.generate()
tc = CMakeToolchain(self)
tc.cache_variables["BLOSC_IS_SUBPROJECT"] = False
tc.cache_variables["BLOSC_INSTALL"] = True
tc.cache_variables["BUILD_STATIC"] = not bool(self.options.shared)
tc.cache_variables["BUILD_SHARED"] = bool(self.options.shared)
tc.cache_variables["BUILD_TESTS"] = False
tc.cache_variables["BUILD_FUZZERS"] = False
tc.cache_variables["BUILD_BENCHMARKS"] = False
tc.cache_variables["BUILD_EXAMPLES"] = False
simd_intrinsics = self.options.get_safe("simd_intrinsics", False)
tc.cache_variables["DEACTIVATE_AVX2"] = simd_intrinsics != "avx2"
tc.cache_variables["DEACTIVATE_LZ4"] = not bool(self.options.with_lz4)
tc.cache_variables["PREFER_EXTERNAL_LZ4"] = True
tc.cache_variables["DEACTIVATE_ZLIB"] = self.options.with_zlib is None
tc.cache_variables["PREFER_EXTERNAL_ZLIB"] = True
tc.cache_variables["DEACTIVATE_ZSTD"] = not bool(self.options.with_zstd)
tc.cache_variables["PREFER_EXTERNAL_ZSTD"] = True
tc.cache_variables["BUILD_PLUGINS"] = bool(self.options.with_plugins)
if self.options.with_zlib == "zlib-ng-compat":
tc.preprocessor_definitions["ZLIB_COMPAT"] = "1"
tc.generate()
deps = CMakeDeps(self)
deps.generate()
def _patch_sources(self):
apply_conandata_patches(self)
for filename in glob.glob(os.path.join(self.source_folder, "cmake", "Find*.cmake")):
if os.path.basename(filename) not in [
"FindSIMD.cmake",
]:
rm(self, os.path.basename(filename), os.path.join(self.source_folder, "cmake"))
def build(self):
self._patch_sources()
cmake = CMake(self)
cmake.configure()
cmake.build()
def package(self):
licenses = ["BLOSC.txt", "BITSHUFFLE.txt", "FASTLZ.txt", "LZ4.txt", "ZLIB.txt", "STDINT.txt"]
for license_file in licenses:
copy(self, pattern=license_file, dst=os.path.join(self.package_folder, "licenses"), src=os.path.join(self.source_folder, "LICENSES"))
cmake = CMake(self)
cmake.install()
rmdir(self, os.path.join(self.package_folder, "lib", "pkgconfig"))
rmdir(self, os.path.join(self.package_folder, "lib", "cmake"))
rmdir(self, os.path.join(self.package_folder, "cmake"))
# Remove MS runtime files
for dll_pattern_to_remove in ["concrt*.dll", "msvcp*.dll", "vcruntime*.dll"]:
rm(self, pattern=dll_pattern_to_remove, folder=os.path.join(self.package_folder, "bin"), recursive=True)
def package_info(self):
self.cpp_info.set_property("pkg_config_name", "blosc2")
prefix = "lib" if is_msvc(self) and not self.options.shared else ""
self.cpp_info.libs = [f"{prefix}blosc2"]
if self.settings.os in ["Linux", "FreeBSD"]:
self.cpp_info.system_libs = ["rt", "m", "pthread", "dl"]
|
3b88ff07b318cf3e6827c8db70427854350cfd31
|
ced3f2cdcfe770f2d018a4837c42326fb6e0d27d
|
/examples/graph/test_prbcd_cora.py
|
ffcd99950c82a7f6b74cd4a00a0687cbf69dcec4
|
[
"MIT",
"CC0-1.0",
"GPL-1.0-or-later"
] |
permissive
|
DSE-MSU/DeepRobust
|
308a4c03887eb1394a6d68b64ac3d7837b32f395
|
d25d95b33724af9ab0385d5171c989f9b4ff2359
|
refs/heads/master
| 2023-08-11T00:42:08.091214
| 2023-06-29T13:24:19
| 2023-06-29T13:24:19
| 210,014,892
| 978
| 200
|
MIT
| 2023-09-11T02:56:14
| 2019-09-21T16:09:07
|
Python
|
UTF-8
|
Python
| false
| false
| 2,150
|
py
|
test_prbcd_cora.py
|
from torch_geometric.datasets import Planetoid
from torch_geometric.utils import to_undirected
import torch_geometric.transforms as T
import argparse
import torch
import deeprobust.graph.utils as utils
from deeprobust.graph.global_attack import PRBCD
from deeprobust.graph.defense_pyg import GCN, SAGE, GAT
parser = argparse.ArgumentParser()
parser.add_argument('--ptb_rate', type=float, default=0.1, help='perturbation rate.')
args = parser.parse_args()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
dataset = Planetoid('./', 'cora')
dataset.transform = T.NormalizeFeatures()
data = dataset[0]
### we can also attack other models such as GCN, GAT, SAGE or GPRGNN
### (models in deeprobust.graph.defense_pyg), see below
print('now we choose to attack GCN model')
model = GCN(nfeat=data.x.shape[1], nhid=32, nclass=dataset.num_classes,
nlayers=2, dropout=0.5, lr=0.01, weight_decay=5e-4,
device=device).to(device)
agent = PRBCD(data, model=model, device=device, epochs=50) # by default, we are attacking the GCN model
agent.pretrain_model(model) # use the function to pretrain the provided model
edge_index, edge_weight = agent.attack(ptb_rate=args.ptb_rate)
print('now we choose to attack SAGE model')
model = SAGE(nfeat=data.x.shape[1], nhid=32, nclass=dataset.num_classes,
nlayers=2, dropout=0.5, lr=0.01, weight_decay=5e-4,
device=device).to(device)
agent = PRBCD(data, model=model, device=device, epochs=50) # by default, we are attacking the GCN model
agent.pretrain_model(model) # use the function to pretrain the provided model
edge_index, edge_weight = agent.attack(ptb_rate=args.ptb_rate)
print('now we choose to attack GAT model')
model = GAT(nfeat=data.x.shape[1], nhid=8, heads=8, weight_decay=5e-4,
lr=0.005, nlayers=2, nclass=dataset.num_classes,
dropout=0.5, device=device).to(device)
agent = PRBCD(data, model=model, device=device, epochs=50) # by default, we are attacking the GCN model
agent.pretrain_model(model) # use the function to pretrain the provided model
edge_index, edge_weight = agent.attack(ptb_rate=args.ptb_rate)
|
e2b1f2d3199d79cd787fbe13e4c2276cac644f68
|
ffdc77394c5b5532b243cf3c33bd584cbdc65cb7
|
/mindspore/python/mindspore/ops/_vmap/vmap_convolution_ops.py
|
a3dfc2201f1ae31372cf7ca0281bd36007f9c0a2
|
[
"Apache-2.0",
"LicenseRef-scancode-proprietary-license",
"MPL-1.0",
"OpenSSL",
"LGPL-3.0-only",
"LicenseRef-scancode-warranty-disclaimer",
"BSD-3-Clause-Open-MPI",
"MIT",
"MPL-2.0-no-copyleft-exception",
"NTP",
"BSD-3-Clause",
"GPL-1.0-or-later",
"0BSD",
"MPL-2.0",
"LicenseRef-scancode-free-unknown",
"AGPL-3.0-only",
"Libpng",
"MPL-1.1",
"IJG",
"GPL-2.0-only",
"BSL-1.0",
"Zlib",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-python-cwi",
"BSD-2-Clause",
"LicenseRef-scancode-gary-s-brown",
"LGPL-2.1-only",
"LicenseRef-scancode-other-permissive",
"Python-2.0",
"LicenseRef-scancode-mit-nagy",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense"
] |
permissive
|
mindspore-ai/mindspore
|
ca7d5bb51a3451c2705ff2e583a740589d80393b
|
54acb15d435533c815ee1bd9f6dc0b56b4d4cf83
|
refs/heads/master
| 2023-07-29T09:17:11.051569
| 2023-07-17T13:14:15
| 2023-07-17T13:14:15
| 239,714,835
| 4,178
| 768
|
Apache-2.0
| 2023-07-26T22:31:11
| 2020-02-11T08:43:48
|
C++
|
UTF-8
|
Python
| false
| false
| 17,797
|
py
|
vmap_convolution_ops.py
|
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""convolution vmap impl"""
from __future__ import absolute_import
import mindspore.numpy as mnp
from mindspore.ops import constexpr
from mindspore.ops.primitive import _primexpr
from mindspore.ops import operations as P
from mindspore.ops import functional as F
from mindspore.ops.operations import nn_ops as nps
from mindspore.ops.operations import _grad_ops as G
from mindspore.ops.primitive import Primitive
from mindspore.ops._vmap.vmap_base import vmap_rules_getters, vmap_general_preprocess, \
_raise_value_error, _vmap_update_prim_attr, _vmap_clone_prim
@vmap_rules_getters.register(P.Conv2D)
@vmap_rules_getters.register(P.Conv3D)
def get_conv_vmap_rule(prim, axis_size):
"""Vmap rule for `Conv2D` and `Conv3D` operations."""
if isinstance(prim, str):
prim = Primitive(prim)
attr_list = [prim.name, prim.group, prim.data_format]
new_prim = _vmap_clone_prim(prim)
def vmap_rule(input_bdim, weight_bdim):
is_all_none, result = vmap_general_preprocess(prim, input_bdim, weight_bdim)
if is_all_none:
return result
return _conv_vmap_rule(new_prim, axis_size, input_bdim, weight_bdim, attr_list)
return vmap_rule
@vmap_rules_getters.register(P.Conv2DTranspose)
@vmap_rules_getters.register(P.Conv2DBackpropInput)
def get_conv2d_transpose_vmap_rule(prim, axis_size):
"""Vmap rule for `Conv2DTranspose` and `Conv2DBackpropInput` operations."""
if isinstance(prim, str):
prim = Primitive(prim)
attr_list = [prim.name, prim.group, prim.data_format]
new_prim = _vmap_clone_prim(prim)
def vmap_rule(dout_bdim, weight_bdim, input_size_bdim):
is_all_none, result = vmap_general_preprocess(prim, dout_bdim, weight_bdim, input_size_bdim)
if is_all_none:
return result
return _conv_transpose_vmap_rule(new_prim, axis_size, dout_bdim, \
weight_bdim, input_size_bdim, attr_list)
return vmap_rule
@vmap_rules_getters.register(P.Conv3DTranspose)
def get_conv3d_transpose_vmap_rule(prim, axis_size):
"""Vmap rule for `Conv3DTranspose` operation."""
if isinstance(prim, str):
prim = Primitive(prim)
attr_list = [prim.name, prim.group, prim.data_format]
new_prim = _vmap_clone_prim(prim)
def vmap_rule(dout_bdim, weight_bdim):
is_all_none, result = vmap_general_preprocess(prim, dout_bdim, weight_bdim)
if is_all_none:
return result
return _conv_transpose_vmap_rule(new_prim, axis_size, dout_bdim, weight_bdim, None, attr_list)
return vmap_rule
@vmap_rules_getters.register(nps.Conv3DBackpropInput)
def get_conv3d_backprop_input_vmap_rule(prim, axis_size):
"""Vmap rule for `Conv3DBackpropInput` operation."""
if isinstance(prim, str):
prim = Primitive(prim)
attr_list = [prim.name, prim.group, prim.data_format]
new_prim = _vmap_clone_prim(prim)
def vmap_rule(weight_bdim, dout_bdim, input_size_bdim):
is_all_none, result = vmap_general_preprocess(prim, weight_bdim, dout_bdim, input_size_bdim)
if is_all_none:
return result
return _conv_transpose_vmap_rule(new_prim, axis_size, dout_bdim, \
weight_bdim, input_size_bdim, attr_list)
return vmap_rule
@vmap_rules_getters.register(G.Conv2DBackpropFilter)
def get_conv2d_backprop_filter_vmap_rule(prim, axis_size):
"""Vmap rule for `Conv2DBackpropFilter` operation."""
if isinstance(prim, str):
prim = Primitive(prim)
attr_list = [prim.name, prim.group, prim.data_format]
new_prim = _vmap_clone_prim(prim)
def vmap_rule(dout_bdim, input_x_bdim, weight_size_bdim):
is_all_none, result = vmap_general_preprocess(prim, dout_bdim, input_x_bdim, weight_size_bdim)
if is_all_none:
return result
return _conv_backprop_filter_vmap_rule(new_prim, axis_size, dout_bdim, \
input_x_bdim, weight_size_bdim, attr_list)
return vmap_rule
@vmap_rules_getters.register(G.Conv3DBackpropFilter)
def get_conv3d_backprop_filter_vmap_rule(prim, axis_size):
"""Vmap rule for `Conv3DBackpropFilter` operation."""
if isinstance(prim, str):
prim = Primitive(prim)
attr_list = [prim.name, prim.group, prim.data_format]
new_prim = _vmap_clone_prim(prim)
def vmap_rule(input_x_bdim, dout_bdim, weight_size_bdim):
is_all_none, result = vmap_general_preprocess(prim, input_x_bdim, dout_bdim, weight_size_bdim)
if is_all_none:
return result
return _conv_backprop_filter_vmap_rule(new_prim, axis_size, dout_bdim, \
input_x_bdim, weight_size_bdim, attr_list)
return vmap_rule
@_primexpr
def _get_reshape_src_dim(data_dim, cmp_dim):
"""Get source dim for reshape"""
if data_dim > cmp_dim:
expand_dim = cmp_dim
merge_dim = data_dim + 1
else:
expand_dim = cmp_dim + 1
merge_dim = data_dim
return expand_dim, merge_dim
@_primexpr
def _get_merge_shape(src_dim, dst_dim, shape):
"""Get new shape for merging the src_dim and dst_dim. The dst_dim is the value after removing src_dim."""
new_shape = [shape[i] for i in range(len(shape)) if i != src_dim]
new_shape[dst_dim] *= shape[src_dim]
return tuple(new_shape)
def _reshape_merge_dims(src_dim, dst_dim, target):
"""Reshape target by merging the src_dim and dst_dim."""
shape = F.shape(target)
new_shape = _get_merge_shape(src_dim, dst_dim, shape)
new_target = mnp.moveaxis(target, src_dim, dst_dim)
output = F.reshape(new_target, new_shape)
return output, new_shape
@_primexpr
def _get_expand_shape(src_dim, dst_size, shape, prim_name):
"""Get new shape for splitting src_dim into dst_size parts."""
dst_size2 = shape[src_dim] // dst_size
new_shape = list(shape)
new_shape[src_dim:(src_dim + 1)] = [dst_size, dst_size2]
return tuple(new_shape)
def _reshape_expand_dims(src_dim, dst_size, target, prim_name):
"""Reshape target by splitting src_dim into dst_size parts."""
shape = F.shape(target)
new_shape = _get_expand_shape(src_dim, dst_size, shape, prim_name)
return F.reshape(target, new_shape)
@_primexpr
def _get_new_size_by_index(input_size, batch_size, index):
"""Get the new size of input_size by multiplying input_size[index] by batch_size."""
new_size = ()
if input_size is None:
return new_size
new_size = list(input_size)
new_size[index] *= batch_size
return tuple(new_size)
@_primexpr
def _update_group_attr(prim, groups, batch_size):
"""Set new value for 'group' attribute of the convolution primitive."""
group = groups * batch_size
_vmap_update_prim_attr(prim, 'group', group)
_vmap_update_prim_attr(prim, 'groups', group)
@constexpr
def _get_channel_index(data_format, prim_name):
"""Get channel index by data_format, only supports NHWC/NCHW/NCDHW now."""
index = 0
if data_format == "NHWC":
index = 3
elif data_format in ("NCHW", "NCDHW"):
index = 1
else:
_raise_value_error("'data_format' in {} should be NHWC/NCHW/NCDHW, "
"but got {}.".format(prim_name, data_format))
return index
def _conv_vmap_rule(prim, batch_size, input_bdim, weight_bdim, attr_list):
"""Vmap rule for Convolution operations, such as `Conv2D` and `Conv3D`."""
input_x, x_dim = input_bdim
weight, w_dim = weight_bdim
prim_name = attr_list[0]
groups = attr_list[1]
data_format = attr_list[2]
c_axis = _get_channel_index(data_format, prim_name)
def _get_output_for_x_w_vmap():
new_input, _ = _reshape_merge_dims(x_dim, c_axis, input_x)
new_weight, new_w_shape = _reshape_merge_dims(w_dim, 0, weight)
_update_group_attr(prim, groups, batch_size)
_vmap_update_prim_attr(prim, 'out_channel', new_w_shape[0])
out = prim(new_input, new_weight)
out = _reshape_expand_dims(c_axis, batch_size, out, prim_name)
return out, c_axis
def _get_output_for_x_vmap():
new_input, _ = _reshape_merge_dims(x_dim, 0, input_x)
out = prim(new_input, weight)
out = _reshape_expand_dims(0, batch_size, out, prim_name)
return out, 0
def _get_output_for_w_vmap():
if groups > 1:
expand_dim, merge_dim = _get_reshape_src_dim(w_dim, 0)
new_weight = _reshape_expand_dims(expand_dim, groups, weight, prim_name)
new_weight, _ = _reshape_merge_dims(merge_dim, 1, new_weight)
new_weight, new_w_shape = _reshape_merge_dims(0, 0, new_weight)
_vmap_update_prim_attr(prim, 'out_channel', new_w_shape[0])
out = prim(input_x, new_weight)
out = _reshape_expand_dims(c_axis, groups, out, prim_name)
out = _reshape_expand_dims(c_axis + 1, batch_size, out, prim_name)
out, _ = _reshape_merge_dims(c_axis, c_axis + 1, out)
return out, c_axis
new_weight, new_w_shape = _reshape_merge_dims(w_dim, 0, weight)
_vmap_update_prim_attr(prim, 'out_channel', new_w_shape[0])
out = prim(input_x, new_weight)
out = _reshape_expand_dims(c_axis, batch_size, out, prim_name)
return out, c_axis
if x_dim is not None and w_dim is not None:
if prim_name == "Conv3D":
_raise_value_error("vmap in_axes of 'x' and 'weight in `{}` cannot be non-None at the same time,"
"but got {} and {}.".format(prim_name, x_dim, w_dim))
output = _get_output_for_x_w_vmap()
elif x_dim is not None:
output = _get_output_for_x_vmap()
else:
output = _get_output_for_w_vmap()
return output
def _conv_transpose_vmap_rule(prim, batch_size, dout_bdim, weight_bdim, input_size_bdim, attr_list):
"""
Vmap rule for transposed convolution operations, such as `Conv2DTranspose`,
`Conv2DBackpropInput`, `Conv3DTranspose` and `Conv3DBackpropInput`.
"""
prim_name = attr_list[0]
input_size = None
if input_size_bdim is not None:
input_size, input_size_dim = input_size_bdim
if input_size_dim is not None:
_raise_value_error("Vmap in_axes of 'input_size' in `{}` must be None, "
"but got {}.".format(prim_name, input_size_dim))
if not isinstance(input_size, tuple):
_raise_value_error("Unsupported vmap for dynamic shape of `{}` when "
"'input_size' is a tensor.".format(prim_name))
dout, dout_dim = dout_bdim
weight, w_dim = weight_bdim
groups = attr_list[1]
data_format = attr_list[2]
c_axis = _get_channel_index(data_format, prim_name)
def _get_conv_transpose_output(dout, weight, input_size):
out = None
if prim_name in ('Conv2DTranspose', 'Conv2DBackpropInput'):
out = prim(dout, weight, input_size)
elif prim_name == "Conv3DTranspose":
out = prim(dout, weight)
elif prim_name == "Conv3DBackpropInput":
out = prim(weight, dout, input_size)
else:
_raise_value_error("Unsupported the operation: `{}`.".format(prim_name))
return out
def _get_output_for_dout_weight_vmap():
_update_group_attr(prim, groups, batch_size)
new_dout, _ = _reshape_merge_dims(dout_dim, c_axis, dout)
new_weight, _ = _reshape_merge_dims(w_dim, 0, weight)
new_input_size = _get_new_size_by_index(input_size, batch_size, c_axis)
out = _get_conv_transpose_output(new_dout, new_weight, new_input_size)
out = _reshape_expand_dims(c_axis, batch_size, out, prim_name)
return out, c_axis
def _get_output_for_dout_vmap():
new_dout, _ = _reshape_merge_dims(dout_dim, 0, dout)
new_input_size = _get_new_size_by_index(input_size, batch_size, 0)
out = _get_conv_transpose_output(new_dout, weight, new_input_size)
out = _reshape_expand_dims(0, batch_size, out, prim_name)
return out, 0
def _get_output_for_weight_vmap():
new_weight, _ = _reshape_merge_dims(w_dim, c_axis, weight)
new_input_size = _get_new_size_by_index(input_size, batch_size, c_axis)
out = _get_conv_transpose_output(dout, new_weight, new_input_size)
if groups > 1:
out = _reshape_expand_dims(c_axis, groups, out, prim_name)
out = _reshape_expand_dims(c_axis + 1, batch_size, out, prim_name)
out, _ = _reshape_merge_dims(c_axis, c_axis + 1, out)
else:
out = _reshape_expand_dims(c_axis, batch_size, out, prim_name)
return out, c_axis
if dout_dim is not None and w_dim is not None:
if prim_name in ("Conv3DTranspose", "Conv3DBackpropInput"):
_raise_value_error("vmap in_axes of 'dout' and 'weight' in `{}` cannot be non-None at the same time,"
"but got {} and {}.".format(prim_name, dout_dim, w_dim))
output = _get_output_for_dout_weight_vmap()
elif dout_dim is not None:
output = _get_output_for_dout_vmap()
else:
output = _get_output_for_weight_vmap()
return output
def _conv_backprop_filter_vmap_rule(prim, batch_size, dout_bdim, input_bdim, weight_size_bdim, attr_list):
"""Vmap rule for `Conv2DBackpropFilter` and `Conv3DBackpropFilter` operations"""
dout, dout_dim = dout_bdim
input_x, x_dim = input_bdim
weight_size, w_size_dim = weight_size_bdim
prim_name = attr_list[0]
groups = attr_list[1]
data_format = attr_list[2]
c_axis = _get_channel_index(data_format, prim_name)
if w_size_dim is not None:
_raise_value_error("Vmap in_axes of 'weight_size' in `{}` must be None, "
"but got {}.".format(prim_name, w_size_dim))
if not isinstance(weight_size, tuple):
_raise_value_error("Unsupported vmap for dynamic shape of `{}` when "
"'weight_size' is a tensor.".format(prim_name))
def _get_conv_backprop_filter_output(dout, x, weight_size):
out = None
if prim_name == "Conv2DBackpropFilter":
out = prim(dout, x, weight_size)
elif prim_name == "Conv3DBackpropFilter":
out = prim(x, dout, weight_size)
else:
_raise_value_error("Unsupported the operation: `{}`.".format(prim_name))
return out
def _get_output_for_dout_x_vmap():
_update_group_attr(prim, groups, batch_size)
new_dout, _ = _reshape_merge_dims(dout_dim, c_axis, dout)
new_input, _ = _reshape_merge_dims(x_dim, c_axis, input_x)
new_w_size = _get_new_size_by_index(weight_size, batch_size, 0)
out = _get_conv_backprop_filter_output(new_dout, new_input, new_w_size)
out = _reshape_expand_dims(0, batch_size, out, prim_name)
return out, 0
def _get_output_for_x_vmap():
new_w_size = _get_new_size_by_index(weight_size, batch_size, c_axis)
if groups > 1:
expand_dim, merge_dim = _get_reshape_src_dim(x_dim, c_axis)
new_input = _reshape_expand_dims(expand_dim, groups, input_x, prim_name)
new_input, _ = _reshape_merge_dims(merge_dim, c_axis + 1, new_input)
new_input, _ = _reshape_merge_dims(c_axis, c_axis, new_input)
else:
new_input, _ = _reshape_merge_dims(x_dim, c_axis, input_x)
out = _get_conv_backprop_filter_output(dout, new_input, new_w_size)
out = _reshape_expand_dims(c_axis, batch_size, out, prim_name)
return out, c_axis
def _get_output_for_dout_vmap():
new_w_size = _get_new_size_by_index(weight_size, batch_size, 0)
if groups > 1:
expand_dim, merge_dim = _get_reshape_src_dim(dout_dim, c_axis)
new_dout = _reshape_expand_dims(expand_dim, groups, dout, prim_name)
new_dout, _ = _reshape_merge_dims(merge_dim, c_axis + 1, new_dout)
new_dout, _ = _reshape_merge_dims(c_axis, c_axis, new_dout)
out = _get_conv_backprop_filter_output(new_dout, input_x, new_w_size)
out = _reshape_expand_dims(0, groups, out, prim_name)
out = _reshape_expand_dims(1, batch_size, out, prim_name)
out, _ = _reshape_merge_dims(0, 1, out)
return out, 0
new_dout, _ = _reshape_merge_dims(dout_dim, c_axis, dout)
out = _get_conv_backprop_filter_output(new_dout, input_x, new_w_size)
out = _reshape_expand_dims(0, batch_size, out, prim_name)
return out, 0
if dout_dim is not None and x_dim is not None:
if prim_name == "Conv3DBackpropFilter":
_raise_value_error("vmap in_axes of 'dout' and 'x' in `{}` cannot be non-None at the same time,"
"but got {} and {}.".format(prim_name, dout_dim, x_dim))
output = _get_output_for_dout_x_vmap()
elif x_dim is not None:
output = _get_output_for_x_vmap()
else:
output = _get_output_for_dout_vmap()
return output
|
7ee27c1441fd74f6e6c59b8b073d821985567382
|
f89de0a45083225cb4eb9525244c2b89a73f14f0
|
/django_comments_xtd/tests/test_models.py
|
a66edf07d6a9ced38f9e6f297ee34c9c70da9ac7
|
[
"BSD-2-Clause"
] |
permissive
|
danirus/django-comments-xtd
|
eb6f82b9476e4e94ae214892cac94ccd6eaa5692
|
4a92d06e7aca75917fccec23de81abbf740c7fb1
|
refs/heads/master
| 2023-08-30T13:49:36.849509
| 2023-07-08T15:23:04
| 2023-07-08T15:23:04
| 3,033,940
| 559
| 172
|
BSD-2-Clause
| 2023-09-02T06:49:19
| 2011-12-22T13:22:50
|
Python
|
UTF-8
|
Python
| false
| false
| 31,852
|
py
|
test_models.py
|
from unittest.mock import patch
from datetime import datetime, timedelta
from django.db.models.signals import pre_save
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.test import TestCase as DjangoTestCase
from django_comments_xtd import get_model
from django_comments_xtd.models import (XtdComment,
MaxThreadLevelExceededException,
publish_or_unpublish_on_pre_save)
from django_comments_xtd.tests.models import Article, Diary, MyComment
class ArticleManagerTestCase(DjangoTestCase):
def test_published(self):
self.article_1 = Article.objects.create(
title="September", slug="september", body="During September...",
publish=datetime.now() - timedelta(days=1))
self.article_2 = Article.objects.create(
title="October", slug="october", body="What I did on October...",
publish=datetime.now() + timedelta(days=1))
self.assertIn(self.article_1, Article.objects.published())
self.assertNotIn(self.article_2, Article.objects.published())
class ArticleBaseTestCase(DjangoTestCase):
def setUp(self):
self.article_1 = Article.objects.create(
title="September", slug="september", body="During September...")
self.article_2 = Article.objects.create(
title="October", slug="october", body="What I did on October...")
class XtdCommentManagerTestCase(ArticleBaseTestCase):
def setUp(self):
super(XtdCommentManagerTestCase, self).setUp()
self.article_ct = ContentType.objects.get(app_label="tests",
model="article")
self.site1 = Site.objects.get(pk=1)
self.site2 = Site.objects.create(domain='site2.com', name='site2.com')
def post_comment_1(self):
XtdComment.objects.create(content_type=self.article_ct,
object_pk=self.article_1.id,
content_object=self.article_1,
site=self.site1,
comment="just a testing comment",
submit_date=datetime.now())
def post_comment_2(self):
XtdComment.objects.create(content_type=self.article_ct,
object_pk=self.article_2.id,
content_object=self.article_2,
site=self.site1,
comment="yet another comment",
submit_date=datetime.now())
def post_comment_3(self):
XtdComment.objects.create(content_type=self.article_ct,
object_pk=self.article_2.id,
content_object=self.article_2,
site=self.site1,
comment="and another one",
submit_date=datetime.now())
def post_comment_4(self):
XtdComment.objects.create(content_type=self.article_ct,
object_pk=self.article_1.id,
content_object=self.article_1,
site=self.site2,
comment="just a testing comment in site2",
submit_date=datetime.now())
def test_for_app_models(self):
# there is no comment posted yet to article_1 nor article_2
count = XtdComment.objects.for_app_models("tests.article").count()
self.assertEqual(count, 0)
self.post_comment_1()
count = XtdComment.objects.for_app_models("tests.article").count()
self.assertEqual(count, 1)
self.post_comment_2()
count = XtdComment.objects.for_app_models("tests.article").count()
self.assertEqual(count, 2)
self.post_comment_3()
count = XtdComment.objects.for_app_models("tests.article").count()
self.assertEqual(count, 3)
self.post_comment_4()
count = XtdComment.objects.for_app_models("tests.article").count()
self.assertEqual(count, 4)
def test_multi_site_for_app_models(self):
self.post_comment_1() # To site1.
self.post_comment_4() # To site2.
count_site1 = XtdComment.objects.for_app_models("tests.article",
site=self.site1).count()
self.assertEqual(count_site1, 1)
count_site2 = XtdComment.objects.for_app_models("tests.article",
site=self.site2).count()
self.assertEqual(count_site2, 1)
# In order to test 'save' and '_calculate_thread_data' methods, simulate the
# following threads, in order of arrival:
#
# testcase cmt.id parent level-0 level-1 level-2 level-3
# step1 1 - c1 <- c1
# step1 2 - c2 <- c2
# step2 3 1 -- c3 <- c3.c1
# step2 4 1 -- c4 <- c4.c1
# step3 5 2 -- c5 <- c5.c2
# step4 6 5 -- -- c6 <- c6.c5.c2
# step4 7 4 -- -- c7 <- c7.c4.c1
# step5 8 3 -- -- c8 <- c8.c3.c1
# step5 9 - c9 <- c9
# step6 10 7 c10 <- c10.c7.c4.c1
# step6 11 8 c11 <- c11.c8.c4.c1
def thread_test_step_1(article, model=get_model(), **kwargs):
article_ct = ContentType.objects.get(app_label="tests", model="article")
site = Site.objects.get(pk=1)
# post Comment 1 with parent_id 0
model.objects.create(content_type=article_ct,
object_pk=article.id,
content_object=article,
site=site,
comment="c1",
submit_date=datetime.now(),
**kwargs)
# post Comment 2 with parent_id 0
model.objects.create(content_type=article_ct,
object_pk=article.id,
content_object=article,
site=site,
comment="c2",
submit_date=datetime.now(),
**kwargs)
def thread_test_step_2(article, model=get_model(), **kwargs):
article_ct = ContentType.objects.get(app_label="tests", model="article")
site = Site.objects.get(pk=1)
# post Comment 3 to parent_id 1
model.objects.create(content_type=article_ct,
object_pk=article.id,
content_object=article,
site=site,
comment="c3.c1",
submit_date=datetime.now(),
parent_id=1,
**kwargs)
# post Comment 4 to parent_id 1
model.objects.create(content_type=article_ct,
object_pk=article.id,
content_object=article,
site=site,
comment="c4.c1",
submit_date=datetime.now(),
parent_id=1,
**kwargs)
def thread_test_step_3(article):
article_ct = ContentType.objects.get(app_label="tests", model="article")
site = Site.objects.get(pk=1)
# post Comment 5 to parent_id 2
XtdComment.objects.create(content_type=article_ct,
object_pk=article.id,
content_object=article,
site=site,
comment="c5.c2",
submit_date=datetime.now(),
parent_id=2)
def thread_test_step_4(article):
article_ct = ContentType.objects.get(app_label="tests", model="article")
site = Site.objects.get(pk=1)
# post Comment 6 to parent_id 5
XtdComment.objects.create(content_type=article_ct,
object_pk=article.id,
content_object=article,
site=site,
comment="c6.c5.c2",
submit_date=datetime.now(),
parent_id=5)
# post Comment 7 to parent_id 4
XtdComment.objects.create(content_type=article_ct,
object_pk=article.id,
content_object=article,
site=site,
comment="c7.c4.c1",
submit_date=datetime.now(),
parent_id=4)
def thread_test_step_5(article):
article_ct = ContentType.objects.get(app_label="tests", model="article")
site = Site.objects.get(pk=1)
# post Comment 8 to parent_id 3
XtdComment.objects.create(content_type=article_ct,
object_pk=article.id,
content_object=article,
site=site,
comment="c8.c3.c1",
submit_date=datetime.now(),
parent_id=3)
# post Comment 9 with parent_id 0
XtdComment.objects.create(content_type=article_ct,
object_pk=article.id,
content_object=article,
site=site,
comment="c3",
submit_date=datetime.now())
def thread_test_step_6(article):
model = get_model()
article_ct = ContentType.objects.get(app_label="tests", model="article")
site = Site.objects.get(pk=1)
# post Comment 10 to parent_id 7
model.objects.create(content_type=article_ct,
object_pk=article.id,
content_object=article,
comment="c10.c7.c4.c1",
submit_date=datetime.now(),
parent_id=7,
site=site)
# post Comment 11 to parent_id 8
model.objects.create(content_type=article_ct,
object_pk=article.id,
content_object=article,
comment="c11.c8.c3.c1",
submit_date=datetime.now(),
parent_id=8,
site=site)
class BaseThreadStep1TestCase(ArticleBaseTestCase):
def setUp(self):
super(BaseThreadStep1TestCase, self).setUp()
thread_test_step_1(self.article_1)
( # content -> cmt.id thread_id parent_id level order nested
self.c1, # -> 1 1 1 0 1 0
self.c2 # -> 2 2 2 0 1 0
) = XtdComment.objects.all()
def test_threaded_comments_step_1_level_0(self):
# comment 1
self.assertTrue(self.c1.parent_id == 1 and self.c1.thread_id == 1)
self.assertTrue(self.c1.level == 0 and self.c1.order == 1)
self.assertEqual(self.c1.nested_count, 0)
# comment 2
self.assertTrue(self.c2.parent_id == 2 and self.c2.thread_id == 2)
self.assertTrue(self.c2.level == 0 and self.c2.order == 1)
self.assertEqual(self.c2.nested_count, 0)
class ThreadStep2TestCase(ArticleBaseTestCase):
def setUp(self):
super(ThreadStep2TestCase, self).setUp()
thread_test_step_1(self.article_1)
thread_test_step_2(self.article_1)
( # content -> cmt.id thread_id parent_id level order nested
self.c1, # -> 1 1 1 0 1 2
self.c3, # -> 3 1 1 1 2 0
self.c4, # -> 4 1 1 1 3 0
self.c2 # -> 2 2 2 0 1 0
) = XtdComment.objects.all()
def test_threaded_comments_step_2_level_0(self):
# comment 1
self.assertTrue(self.c1.parent_id == 1 and self.c1.thread_id == 1)
self.assertTrue(self.c1.level == 0 and self.c1.order == 1)
self.assertEqual(self.c1.nested_count, 2)
# comment 2
self.assertTrue(self.c2.parent_id == 2 and self.c2.thread_id == 2)
self.assertTrue(self.c2.level == 0 and self.c2.order == 1)
self.assertEqual(self.c2.nested_count, 0)
def test_threaded_comments_step_2_level_1(self):
# comment 3
self.assertTrue(self.c3.parent_id == 1 and self.c3.thread_id == 1)
self.assertTrue(self.c3.level == 1 and self.c3.order == 2)
self.assertEqual(self.c3.nested_count, 0)
# comment 4
self.assertTrue(self.c4.parent_id == 1 and self.c4.thread_id == 1)
self.assertTrue(self.c4.level == 1 and self.c4.order == 3)
self.assertEqual(self.c4.nested_count, 0)
class ThreadStep3TestCase(ArticleBaseTestCase):
def setUp(self):
super(ThreadStep3TestCase, self).setUp()
thread_test_step_1(self.article_1)
thread_test_step_2(self.article_1)
thread_test_step_3(self.article_1)
( # -> content: cmt.id thread_id parent_id level order nested
self.c1, # -> 1 1 1 0 1 2
self.c3, # -> 3 1 1 1 2 0
self.c4, # -> 4 1 1 1 3 0
self.c2, # -> 2 2 2 0 1 1
self.c5 # -> 5 2 2 1 2 0
) = XtdComment.objects.all()
def test_threaded_comments_step_3_level_0(self):
# comment 1
self.assertTrue(self.c1.parent_id == 1 and self.c1.thread_id == 1)
self.assertTrue(self.c1.level == 0 and self.c1.order == 1)
self.assertEqual(self.c1.nested_count, 2)
# comment 2
self.assertTrue(self.c2.parent_id == 2 and self.c2.thread_id == 2)
self.assertTrue(self.c2.level == 0 and self.c2.order == 1)
self.assertEqual(self.c2.nested_count, 1)
def test_threaded_comments_step_3_level_1(self):
# comment 3
self.assertTrue(self.c3.parent_id == 1 and self.c3.thread_id == 1)
self.assertTrue(self.c3.level == 1 and self.c3.order == 2)
self.assertEqual(self.c3.nested_count, 0)
# comment 4
self.assertTrue(self.c4.parent_id == 1 and self.c4.thread_id == 1)
self.assertTrue(self.c4.level == 1 and self.c4.order == 3)
self.assertEqual(self.c4.nested_count, 0)
# comment 5
self.assertTrue(self.c5.parent_id == 2 and self.c5.thread_id == 2)
self.assertTrue(self.c5.level == 1 and self.c5.order == 2)
self.assertEqual(self.c5.nested_count, 0)
class ThreadStep4TestCase(ArticleBaseTestCase):
def setUp(self):
super(ThreadStep4TestCase, self).setUp()
thread_test_step_1(self.article_1)
thread_test_step_2(self.article_1)
thread_test_step_3(self.article_1)
thread_test_step_4(self.article_1)
( # content -> cmt.id thread_id parent_id level order nested
self.c1, # -> 1 1 1 0 1 3
self.c3, # -> 3 1 1 1 2 0
self.c4, # -> 4 1 1 1 3 1
self.c7, # -> 7 1 4 2 4 0
self.c2, # -> 2 2 2 0 1 2
self.c5, # -> 5 2 2 1 2 1
self.c6 # -> 6 2 5 2 3 0
) = XtdComment.objects.all()
def test_threaded_comments_step_4_level_0(self):
# comment 1
self.assertTrue(self.c1.parent_id == 1 and self.c1.thread_id == 1)
self.assertTrue(self.c1.level == 0 and self.c1.order == 1)
self.assertEqual(self.c1.nested_count, 3)
# comment 2
self.assertTrue(self.c2.parent_id == 2 and self.c2.thread_id == 2)
self.assertTrue(self.c2.level == 0 and self.c2.order == 1)
self.assertEqual(self.c2.nested_count, 2)
def test_threaded_comments_step_4_level_1(self):
# comment 3
self.assertTrue(self.c3.parent_id == 1 and self.c3.thread_id == 1)
self.assertTrue(self.c3.level == 1 and self.c3.order == 2)
self.assertEqual(self.c3.nested_count, 0)
# comment 4
self.assertTrue(self.c4.parent_id == 1 and self.c4.thread_id == 1)
self.assertTrue(self.c4.level == 1 and self.c4.order == 3)
self.assertEqual(self.c4.nested_count, 1)
# comment 5
self.assertTrue(self.c5.parent_id == 2 and self.c5.thread_id == 2)
self.assertTrue(self.c5.level == 1 and self.c5.order == 2)
self.assertEqual(self.c5.nested_count, 1)
def test_threaded_comments_step_4_level_2(self):
# comment 6
self.assertTrue(self.c6.parent_id == 5 and self.c6.thread_id == 2)
self.assertTrue(self.c6.level == 2 and self.c6.order == 3)
self.assertEqual(self.c6.nested_count, 0)
# comment 7
self.assertTrue(self.c7.parent_id == 4 and self.c7.thread_id == 1)
self.assertTrue(self.c7.level == 2 and self.c7.order == 4)
self.assertEqual(self.c7.nested_count, 0)
class ThreadStep5TestCase(ArticleBaseTestCase):
def setUp(self):
super(ThreadStep5TestCase, self).setUp()
thread_test_step_1(self.article_1)
thread_test_step_2(self.article_1)
thread_test_step_3(self.article_1)
thread_test_step_4(self.article_1)
thread_test_step_5(self.article_1)
( # content -> cmt.id thread_id parent_id level order nested
self.c1, # -> 1 1 1 0 1 4
self.c3, # -> 3 1 1 1 2 1
self.c8, # -> 8 1 3 2 3 0
self.c4, # -> 4 1 1 1 4 1
self.c7, # -> 7 1 4 2 5 0
self.c2, # -> 2 2 2 0 1 2
self.c5, # -> 5 2 2 1 2 1
self.c6, # -> 6 2 5 2 3 0
self.c9 # -> 9 9 9 0 1 0
) = XtdComment.objects.all()
def test_threaded_comments_step_5_level_0(self):
# comment 1
self.assertTrue(self.c1.parent_id == 1 and self.c1.thread_id == 1)
self.assertTrue(self.c1.level == 0 and self.c1.order == 1)
self.assertEqual(self.c1.nested_count, 4)
# comment 2
self.assertTrue(self.c2.parent_id == 2 and self.c2.thread_id == 2)
self.assertTrue(self.c2.level == 0 and self.c2.order == 1)
self.assertEqual(self.c2.nested_count, 2)
# comment 9
self.assertTrue(self.c9.parent_id == 9 and self.c9.thread_id == 9)
self.assertTrue(self.c9.level == 0 and self.c9.order == 1)
self.assertEqual(self.c9.nested_count, 0)
def test_threaded_comments_step_5_level_1(self):
# comment 3
self.assertTrue(self.c3.parent_id == 1 and self.c3.thread_id == 1)
self.assertTrue(self.c3.level == 1 and self.c3.order == 2)
self.assertEqual(self.c3.nested_count, 1)
# comment 4
self.assertTrue(self.c4.parent_id == 1 and self.c4.thread_id == 1)
self.assertTrue(self.c4.level == 1 and self.c4.order == 4) # changed
self.assertEqual(self.c4.nested_count, 1)
# comment 5
self.assertTrue(self.c5.parent_id == 2 and self.c5.thread_id == 2)
self.assertTrue(self.c5.level == 1 and self.c5.order == 2)
self.assertEqual(self.c5.nested_count, 1)
def test_threaded_comments_step_5_level_2(self):
# comment 6
self.assertTrue(self.c6.parent_id == 5 and self.c6.thread_id == 2)
self.assertTrue(self.c6.level == 2 and self.c6.order == 3)
self.assertEqual(self.c6.nested_count, 0)
# comment 7
self.assertTrue(self.c7.parent_id == 4 and self.c7.thread_id == 1)
self.assertTrue(self.c7.level == 2 and self.c7.order == 5) # changed
self.assertEqual(self.c7.nested_count, 0)
# comment 8
self.assertTrue(self.c8.parent_id == 3 and self.c8.thread_id == 1)
self.assertTrue(self.c8.level == 2 and self.c8.order == 3)
self.assertEqual(self.c8.nested_count, 0)
@patch.multiple('django_comments_xtd.conf.settings',
COMMENTS_XTD_MAX_THREAD_LEVEL=2)
def test_exceed_max_thread_level_raises_exception(self):
article_ct = ContentType.objects.get(app_label="tests", model="article")
site = Site.objects.get(pk=1)
with self.assertRaises(MaxThreadLevelExceededException):
XtdComment.objects.create(content_type=article_ct,
object_pk=self.article_1.id,
content_object=self.article_1,
site=site,
comment="cmt 1 to cmt 2 to cmt 1",
submit_date=datetime.now(),
parent_id=8) # already max thread level
def test_removing_c4_withdraws_c7_and_updates_nested_count(self):
cm4 = XtdComment.objects.get(pk=4)
self.assertEqual(cm4.nested_count, 1)
cm1 = XtdComment.objects.get(pk=1)
self.assertEqual(cm1.nested_count, 4)
# Remove comment 4, save, and check again.
cm4.is_removed = True
cm4.save()
cm4 = XtdComment.objects.get(pk=4)
self.assertEqual(cm4.nested_count, 1)
cm1 = XtdComment.objects.get(pk=1)
self.assertEqual(cm1.nested_count, 3)
class ThreadStep6TestCase(ArticleBaseTestCase):
def setUp(self):
super(ThreadStep6TestCase, self).setUp()
thread_test_step_1(self.article_1)
thread_test_step_2(self.article_1)
thread_test_step_3(self.article_1)
thread_test_step_4(self.article_1)
thread_test_step_5(self.article_1)
thread_test_step_6(self.article_1)
( # content -> cmt.id thread_id parent_id level order nested
self.c1, # -> 1 1 1 0 1 6
self.c3, # -> 3 1 1 1 2 2
self.c8, # -> 8 1 3 2 3 1
self.c11, # -> 11 1 8 3 4 0
self.c4, # -> 4 1 1 1 5 2
self.c7, # -> 7 1 4 2 6 1
self.c10, # -> 10 1 7 3 7 0
self.c2, # -> 2 2 2 0 1 2
self.c5, # -> 5 2 2 1 2 1
self.c6, # -> 6 2 5 2 3 0
self.c9 # -> 9 9 9 0 1 0
) = XtdComment.objects.all()
def test_threaded_comments_step_6_level_0(self):
# comment 1
self.assertTrue(self.c1.parent_id == 1 and self.c1.thread_id == 1)
self.assertTrue(self.c1.level == 0 and self.c1.order == 1)
self.assertEqual(self.c1.nested_count, 6)
# comment 2
self.assertTrue(self.c2.parent_id == 2 and self.c2.thread_id == 2)
self.assertTrue(self.c2.level == 0 and self.c2.order == 1)
self.assertEqual(self.c2.nested_count, 2)
# comment 9
self.assertTrue(self.c9.parent_id == 9 and self.c9.thread_id == 9)
self.assertTrue(self.c9.level == 0 and self.c9.order == 1)
self.assertEqual(self.c9.nested_count, 0)
def test_threaded_comments_step_6_level_1(self):
# comment 3
self.assertTrue(self.c3.parent_id == 1 and self.c3.thread_id == 1)
self.assertTrue(self.c3.level == 1 and self.c3.order == 2)
self.assertEqual(self.c3.nested_count, 2)
# comment 4
self.assertTrue(self.c4.parent_id == 1 and self.c4.thread_id == 1)
self.assertTrue(self.c4.level == 1 and self.c4.order == 5)
self.assertEqual(self.c4.nested_count, 2)
# comment 5
self.assertTrue(self.c5.parent_id == 2 and self.c5.thread_id == 2)
self.assertTrue(self.c5.level == 1 and self.c5.order == 2)
self.assertEqual(self.c5.nested_count, 1)
def test_threaded_comments_step_6_level_2(self):
# comment 8
self.assertTrue(self.c8.parent_id == 3 and self.c8.thread_id == 1)
self.assertTrue(self.c8.level == 2 and self.c8.order == 3)
self.assertEqual(self.c8.nested_count, 1)
# comment 7
self.assertTrue(self.c7.parent_id == 4 and self.c7.thread_id == 1)
self.assertTrue(self.c7.level == 2 and self.c7.order == 6)
self.assertEqual(self.c7.nested_count, 1)
# comment 6
self.assertTrue(self.c6.parent_id == 5 and self.c6.thread_id == 2)
self.assertTrue(self.c6.level == 2 and self.c6.order == 3)
self.assertEqual(self.c6.nested_count, 0)
def test_threaded_comments_step_6_level_3(self):
# comment 10
self.assertTrue(self.c10.parent_id == 7 and self.c10.thread_id == 1)
self.assertTrue(self.c10.level == 3 and self.c10.order == 7)
self.assertEqual(self.c10.nested_count, 0)
# comment 11
self.assertTrue(self.c11.parent_id == 8 and self.c11.thread_id == 1)
self.assertTrue(self.c11.level == 3 and self.c11.order == 4)
self.assertEqual(self.c11.nested_count, 0)
def add_comment_to_diary_entry(diary_entry):
diary_ct = ContentType.objects.get(app_label="tests", model="diary")
site = Site.objects.get(pk=1)
XtdComment.objects.create(content_type=diary_ct,
object_pk=diary_entry.id,
content_object=diary_entry,
site=site,
comment="cmt to day in diary",
submit_date=datetime.now())
class DiaryBaseTestCase(DjangoTestCase):
def setUp(self):
self.day_in_diary = Diary.objects.create(body="About Today...")
add_comment_to_diary_entry(self.day_in_diary)
def test_max_thread_level_by_app_model(self):
diary_ct = ContentType.objects.get(app_label="tests", model="diary")
site = Site.objects.get(pk=1)
with self.assertRaises(MaxThreadLevelExceededException):
XtdComment.objects.create(content_type=diary_ct,
object_pk=self.day_in_diary.id,
content_object=self.day_in_diary,
site=site,
comment="cmt to cmt to day in diary",
submit_date=datetime.now(),
parent_id=1) # already max thread level
class PublishOrUnpublishNestedCommentsOneTestCase(ArticleBaseTestCase):
# Add a threaded comment structure (c1, c2, c3) and verify that
# removing c1 unpublishes c3.
def setUp(self):
super(PublishOrUnpublishNestedCommentsOneTestCase, self).setUp()
thread_test_step_1(self.article_1)
thread_test_step_2(self.article_1)
#
# These two lines create the following comments:
#
# ( # content -> cmt.id thread_id parent_id level order nested
# cm1, # -> 1 1 1 0 1 2
# cm3, # -> 3 1 1 1 2 0
# cm4, # -> 4 1 1 1 3 0
# cm2, # -> 2 2 2 0 1 0
# ) = XtdComment.objects.all()
def test_all_comments_are_public_and_have_not_been_removed(self):
for cm in XtdComment.objects.all():
self.assertTrue(cm.is_public)
self.assertFalse(cm.is_removed)
def test_removing_c1_unpublishes_c3_and_c4(self):
cm1 = XtdComment.objects.get(pk=1)
self.assertEqual(cm1.nested_count, 2) # nested_count should be 2.
cm1.is_removed = True
cm1.save()
cm1 = XtdComment.objects.get(pk=1)
self.assertTrue(cm1.is_public)
self.assertTrue(cm1.is_removed)
# Is still public, so the nested_count doesn't change.
self.assertEqual(cm1.nested_count, 2)
cm3 = XtdComment.objects.get(pk=3)
self.assertFalse(cm3.is_public)
self.assertFalse(cm3.is_removed)
cm4 = XtdComment.objects.get(pk=4)
self.assertFalse(cm4.is_public)
self.assertFalse(cm4.is_removed)
_model = "django_comments_xtd.tests.models.MyComment"
class PublishOrUnpublishNestedCommentsTwoTestCase(ArticleBaseTestCase):
# Then mock the settings so that the project uses a customized
# comment model (django_comments_xtd.tests.models.MyComment), and repeat
# the logic adding MyComment instances. Then remove c1 and be sure
# that c3 gets unpublished.
def setUp(self):
super(PublishOrUnpublishNestedCommentsTwoTestCase, self).setUp()
thread_test_step_1(self.article_1, model=MyComment,
title="Can't be empty 1")
thread_test_step_2(self.article_1, model=MyComment,
title="Can't be empty 2")
#
# These two lines create the following comments:
#
# ( # content -> cmt.id thread_id parent_id level order nested
# cm1, # -> 1 1 1 0 1 2
# cm3, # -> 3 1 1 1 2 0
# cm4, # -> 4 1 1 1 3 0
# cm2, # -> 2 2 2 0 1 0
# ) = MyComment.objects.all()
def test_all_comments_are_public_and_have_not_been_removed(self):
for cm in MyComment.objects.all():
self.assertTrue(cm.is_public)
self.assertFalse(cm.is_removed)
@patch.multiple('django_comments_xtd.conf.settings',
COMMENTS_XTD_MODEL=_model)
def test_removing_c1_unpublishes_c3_and_c4(self):
# Register the receiver again. It was registered in apps.py, but we
# have patched the COMMENTS_XTD_MODEL, however we won't fake the ready.
# It's easier to just register again the receiver, to test only what
# depends on django-comments-xtd.
model_app_label = get_model()._meta.label
pre_save.connect(publish_or_unpublish_on_pre_save,
sender=model_app_label)
cm1 = MyComment.objects.get(pk=1)
cm1.is_removed = True
cm1.save()
self.assertTrue(cm1.is_public)
self.assertTrue(cm1.is_removed)
cm3 = MyComment.objects.get(pk=3)
self.assertFalse(cm3.is_public)
self.assertFalse(cm3.is_removed)
cm4 = MyComment.objects.get(pk=4)
self.assertFalse(cm4.is_public)
self.assertFalse(cm4.is_removed)
|
9c64b2f060bee09256749b642379bc3707077bef
|
7d605f3eec7059bedc98b5962c7fe7ae16fdf54b
|
/tests/monitors/logstash_tcp/logstash_tcp_test.py
|
46156d3ae0dbd7aef4ce20738c1cd8a706500e45
|
[
"Apache-2.0"
] |
permissive
|
signalfx/signalfx-agent
|
d1cc49d8e945252132d36e0734f438f5f7f30a76
|
fbc24b0fdd3884bd0bbfbd69fe3c83f49d4c0b77
|
refs/heads/main
| 2023-08-10T08:07:38.518125
| 2023-06-30T22:40:19
| 2023-06-30T23:10:27
| 80,042,799
| 129
| 230
|
Apache-2.0
| 2023-07-07T23:43:17
| 2017-01-25T18:17:17
|
Go
|
UTF-8
|
Python
| false
| false
| 3,776
|
py
|
logstash_tcp_test.py
|
import re
from functools import partial as p
from pathlib import Path
from textwrap import dedent
import pytest
from tests.helpers.agent import Agent
from tests.helpers.assertions import has_datapoint, tcp_socket_open
from tests.helpers.util import (
container_ip,
copy_file_content_into_container,
get_host_ip,
run_container,
wait_for,
wait_for_value,
)
PIPELINE_CONF = Path(__file__).parent.joinpath("pipeline.conf").resolve()
SAMPLE_EVENTS = """
Logged in
Took 1 seconds
Logged in
Took 2 seconds
Logged in
Took 3 seconds
Logged in
Took 4 seconds
Logged in
Took 5 seconds
Logged in
Took 6 seconds
Logged in
Took 7 seconds
"""
@pytest.mark.parametrize("version", ["7.3.0", "5.6.16"])
def test_logstash_tcp_client(version):
with run_container(
f"docker.elastic.co/logstash/logstash:{version}",
environment={"XPACK_MONITORING_ENABLED": "false", "CONFIG_RELOAD_AUTOMATIC": "true"},
) as logstash_cont:
copy_file_content_into_container(SAMPLE_EVENTS, logstash_cont, "tmp/events.log")
copy_file_content_into_container(
PIPELINE_CONF.read_text(encoding="utf-8"), logstash_cont, "/usr/share/logstash/pipeline/test.conf"
)
host = container_ip(logstash_cont)
config = dedent(
f"""
monitors:
- type: logstash-tcp
mode: client
host: {host}
port: 8900
"""
)
with Agent.run(config) as agent:
assert wait_for(p(tcp_socket_open, host, 8900), timeout_seconds=180), "logstash didn't start"
assert wait_for(p(has_datapoint, agent.fake_services, "logins.count", value=7, dimensions={}))
assert wait_for(p(has_datapoint, agent.fake_services, "process_time.count", value=7, dimensions={}))
assert wait_for(p(has_datapoint, agent.fake_services, "process_time.mean", value=4, dimensions={}))
LISTEN_LOG_RE = re.compile(r"Listening for Logstash events on .*:(\d+)")
@pytest.mark.parametrize("version", ["7.3.0", "5.6.16"])
def test_logstash_tcp_server(version):
with run_container(
f"docker.elastic.co/logstash/logstash:{version}",
environment={"XPACK_MONITORING_ENABLED": "false", "CONFIG_RELOAD_AUTOMATIC": "true"},
) as logstash_cont:
agent_host = get_host_ip()
copy_file_content_into_container(SAMPLE_EVENTS, logstash_cont, "tmp/events.log")
config = dedent(
f"""
monitors:
- type: logstash-tcp
mode: server
host: 0.0.0.0
port: 0
"""
)
with Agent.run(config) as agent:
log_match = wait_for_value(lambda: LISTEN_LOG_RE.search(agent.output))
assert log_match is not None
listen_port = int(log_match.groups()[0])
copy_file_content_into_container(
# The pipeline conf is written for server mode so patch it to
# act as a client.
PIPELINE_CONF.read_text(encoding="utf-8")
.replace('mode => "server"', 'mode => "client"')
.replace('host => "0.0.0.0"', f'host => "{agent_host}"')
.replace("port => 8900", f"port => {listen_port}"),
logstash_cont,
"/usr/share/logstash/pipeline/test.conf",
)
assert wait_for(
p(has_datapoint, agent.fake_services, "logins.count", value=7, dimensions={}), timeout_seconds=180
)
assert wait_for(p(has_datapoint, agent.fake_services, "process_time.count", value=7, dimensions={}))
assert wait_for(p(has_datapoint, agent.fake_services, "process_time.mean", value=4, dimensions={}))
|
a1c21b0ea8a7e89340dcc0e20adf2e048edafe74
|
cadb6dceb7bb67ce47ef48b2c83f480a65d6b01a
|
/s3prl/upstream/decoar2/hubconf.py
|
612b3900afd1fc87f69c2affc095916b4fa98a2d
|
[
"Apache-2.0",
"CC-BY-NC-4.0"
] |
permissive
|
s3prl/s3prl
|
52ec2ae4df5a61c786c122085603aa9c5e8c2681
|
76a9432b824f6ae3eae09a35a67782c4ed582832
|
refs/heads/main
| 2023-08-17T02:26:57.524087
| 2023-06-10T17:12:27
| 2023-06-10T17:12:27
| 196,905,457
| 1,549
| 398
|
Apache-2.0
| 2023-09-14T13:07:05
| 2019-07-15T01:54:52
|
Python
|
UTF-8
|
Python
| false
| false
| 1,030
|
py
|
hubconf.py
|
import os
import torch
from s3prl.util.download import _urls_to_filepaths
from .expert import UpstreamExpert as _UpstreamExpert
def decoar2_custom(ckpt: str, refresh=False, *args, **kwargs):
if ckpt.startswith("http"):
ckpt = _urls_to_filepaths(ckpt, refresh=refresh)
return _UpstreamExpert(ckpt, *args, **kwargs)
def decoar2_local(*args, **kwargs):
"""
The model from local ckpt
ckpt (str): PATH
feature_selection (str): 'c' (default) or 'z'
"""
return decoar2_custom(*args, **kwargs)
def decoar2_url(*args, **kwargs):
"""
The model from URL
ckpt (str): URL
"""
return decoar2_custom(*args, **kwargs)
def decoar2(*args, refresh=False, **kwargs):
"""
The apc standard model on 360hr
refresh (bool): whether to download ckpt/config again if existed
"""
kwargs[
"ckpt"
] = "https://huggingface.co/s3prl/converted_ckpts/resolve/main/checkpoint_decoar2.pt"
return decoar2_url(*args, refresh=refresh, **kwargs)
|
d07e5b8c51c029145b5aa8e9a344992ac466a7d2
|
d7fd46dfd8aab520c4958fa065367e168b6bfee7
|
/tests/service/connection_test.py
|
c51e58463de8458051f118a3ea1013ec60fdbe97
|
[
"MIT"
] |
permissive
|
facebookresearch/CompilerGym
|
f04a79fbfdbaf8afd6920ec205db6f1b6003d073
|
9e0c0beb12da1e1ea82ae6ce920713ee28dda4c9
|
refs/heads/development
| 2023-08-31T09:17:48.967970
| 2023-03-10T19:29:56
| 2023-03-10T19:29:56
| 312,059,069
| 787
| 126
|
MIT
| 2023-03-10T19:29:58
| 2020-11-11T18:44:35
|
Python
|
UTF-8
|
Python
| false
| false
| 3,291
|
py
|
connection_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym/service:connection."""
import gym
import pytest
import compiler_gym.envs # noqa Register LLVM environments.
from compiler_gym.errors import ServiceError
from compiler_gym.service import CompilerGymServiceConnection, ConnectionOpts
from compiler_gym.service.proto import GetSpacesRequest
from tests.test_main import main
@pytest.fixture(scope="function")
def connection() -> CompilerGymServiceConnection:
"""Yields a connection to a local service."""
with gym.make("llvm-v0") as env:
yield env.service
@pytest.fixture(scope="function")
def dead_connection() -> CompilerGymServiceConnection:
"""Yields a connection to a dead local service service."""
with gym.make("llvm-v0") as env:
# Kill the service.
env.service.connection.process.terminate()
env.service.connection.process.communicate()
yield env.service
def test_create_invalid_options():
with pytest.raises(TypeError, match="No endpoint provided for service connection"):
CompilerGymServiceConnection("")
def test_create_channel_failed_subprocess(
dead_connection: CompilerGymServiceConnection,
):
with pytest.raises(
(ServiceError, TimeoutError), match="Failed to create connection to localhost:"
):
CompilerGymServiceConnection(
f"{dead_connection.connection.url}",
ConnectionOpts(
init_max_seconds=1,
init_max_attempts=2,
rpc_init_max_seconds=0.1,
),
)
def test_create_channel_failed_subprocess_rpc_timeout(
dead_connection: CompilerGymServiceConnection,
):
"""Same as the above test, but RPC timeout is long enough that only a single
attempt can be made.
"""
with pytest.raises(
OSError,
match=(
r"Failed to create connection to localhost:\d+ after "
r"[\d\.]+ seconds \(1 attempt made\)"
),
):
CompilerGymServiceConnection(
f"{dead_connection.connection.url}",
ConnectionOpts(
init_max_seconds=0.1,
init_max_attempts=2,
rpc_init_max_seconds=1,
),
)
def test_call_stub_invalid_type(connection: CompilerGymServiceConnection):
with pytest.raises(
TypeError, match="Exception serializing request! Request type: type"
):
connection(connection.stub.GetSpaces, int)
def test_call_stub_negative_timeout(connection: CompilerGymServiceConnection):
with pytest.raises(TimeoutError, match=r"Deadline Exceeded \(-10.0 seconds\)"):
connection(connection.stub.GetSpaces, GetSpacesRequest(), timeout=-10)
def test_ManagedConnection_repr(connection: CompilerGymServiceConnection):
cnx = connection.connection
assert (
repr(cnx)
== f"Connection to service at {cnx.url} running on PID {cnx.process.pid}"
)
# Kill the service.
cnx.process.terminate()
cnx.process.communicate()
assert repr(cnx) == f"Connection to dead service at {cnx.url}"
if __name__ == "__main__":
main()
|
b1574c0b74cb7f0dc18a652d9ad32553d65ef179
|
d70e3750c600da2cd14f5a07f6cc4bc4d2261cd3
|
/src/nba_api/stats/static/__init__.py
|
725f3f696bfac950f868567fa7feb8686b1b0dd6
|
[
"MIT"
] |
permissive
|
swar/nba_api
|
93d794123a856dcf2005e6c2856c2a369d29839e
|
8480b574c286a22c9064c014505ae61f7dd84fc1
|
refs/heads/master
| 2023-09-05T07:03:01.612102
| 2023-09-01T00:59:16
| 2023-09-01T00:59:16
| 149,062,453
| 1,909
| 554
|
MIT
| 2023-09-14T18:56:46
| 2018-09-17T03:13:07
|
Python
|
UTF-8
|
Python
| false
| false
| 31
|
py
|
__init__.py
|
__all__ = ['players', 'teams']
|
fd2314e72dd3801e0eb07289c288c173e49b900d
|
dff8e90423ad3883f37f1ffe282ace59ed54c72d
|
/lambdas/molecule/setup.py
|
ebfe8e05bb0fc4fb3e45b8a5123efeb88a73b4db
|
[
"Apache-2.0"
] |
permissive
|
quiltdata/quilt
|
50ba1c97486b6865a63558b684aa6e83c179938c
|
9dc817aea82355a07e598e18007eea7bbebf5b32
|
refs/heads/master
| 2023-09-03T18:09:02.933492
| 2023-08-31T20:02:12
| 2023-08-31T20:02:12
| 81,518,954
| 1,261
| 100
|
Apache-2.0
| 2023-09-14T13:30:25
| 2017-02-10T02:46:03
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 234
|
py
|
setup.py
|
from setuptools import find_packages, setup
setup(
name="t4_lambda_molecule",
version="0.0.1",
packages=find_packages(where="src"),
package_dir={"": "src"},
install_requires=[
"requests==2.31.0",
],
)
|
e387c3d09769aaefc5170775ae19461178fba166
|
3dc3bbe607ab7b583eb52dbaae86636eb642960a
|
/configs/detection/slowonly/slowonly_k700-pre-r50-context_8xb8-8x8x1-10e_ava-kinetics-rgb.py
|
344668f8528efe5dabe475c1921fc4ef5dce6ae8
|
[
"Apache-2.0"
] |
permissive
|
open-mmlab/mmaction2
|
659c36c6083fd3d9d072e074a8d4b3a50342b9bd
|
582b78fd6c3240500d5cacd292339d7d1ddbb056
|
refs/heads/main
| 2023-08-28T18:14:50.423980
| 2023-08-10T09:20:06
| 2023-08-10T09:20:06
| 278,810,244
| 3,498
| 1,028
|
Apache-2.0
| 2023-09-07T06:50:44
| 2020-07-11T07:19:10
|
Python
|
UTF-8
|
Python
| false
| false
| 198
|
py
|
slowonly_k700-pre-r50-context_8xb8-8x8x1-10e_ava-kinetics-rgb.py
|
_base_ = ['slowonly_k700-pre-r50_8xb8-8x8x1-10e_ava-kinetics-rgb.py']
model = dict(
roi_head=dict(
bbox_roi_extractor=dict(with_global=True),
bbox_head=dict(in_channels=4096)))
|
c3db0d4f552e78daf8e82e77d5b780902d3848f3
|
58cfad962e57b935e7782bb214a2008d689751d6
|
/xero_python/file/models/__init__.py
|
1e354185b250b6ede19fdfb79b2e5157db661c02
|
[
"MIT"
] |
permissive
|
XeroAPI/xero-python
|
ce43c060c216a42efd5f47159987468deb0e4622
|
07efa3bfc87a3bd08ba217dd2b642f6a3515ddff
|
refs/heads/master
| 2023-07-21T04:01:27.461727
| 2023-07-11T02:35:44
| 2023-07-11T02:35:44
| 240,158,613
| 109
| 42
|
MIT
| 2023-07-11T02:35:45
| 2020-02-13T02:17:05
|
Python
|
UTF-8
|
Python
| false
| false
| 736
|
py
|
__init__.py
|
# coding: utf-8
# flake8: noqa
"""
Xero Files API
These endpoints are specific to Xero Files API # noqa: E501
Contact: api@xero.com
Generated by: https://openapi-generator.tech
"""
# import models into model package
from xero_python.file.models.association import Association
from xero_python.file.models.file_object import FileObject
from xero_python.file.models.files import Files
from xero_python.file.models.folder import Folder
from xero_python.file.models.folders import Folders
from xero_python.file.models.object_group import ObjectGroup
from xero_python.file.models.object_type import ObjectType
from xero_python.file.models.upload_object import UploadObject
from xero_python.file.models.user import User
|
b43dd76192268813ae41b9b9f45b732b35c2f14f
|
6e964d46b8fab9bccbd199ea7ade41297282b0a7
|
/plugin/PySrc/space_tracer/traced_finder.py
|
8b30f7e148e0029967367eb6c5ba6a35533cf527
|
[
"MIT"
] |
permissive
|
donkirkby/live-py-plugin
|
1a4cb87a796983245094d7c97c3e72f3cea0c540
|
165b447cc1288c94f24f1e660e0c45a6ef476826
|
refs/heads/master
| 2023-08-29T15:14:37.585327
| 2023-07-23T21:12:19
| 2023-07-23T21:12:19
| 4,332,096
| 257
| 59
|
MIT
| 2023-09-09T18:18:40
| 2012-05-15T04:41:18
|
Python
|
UTF-8
|
Python
| false
| false
| 1,490
|
py
|
traced_finder.py
|
from ast import parse, NodeVisitor
PSEUDO_FILENAME = 'live_source.py'
DEFAULT_MODULE_NAME = '__main__'
LIVE_MODULE_NAME = '__live_coding__'
class TracedFinder(object):
""" Find which nodes to trace in a module. """
def __init__(self, source_code, traced, filename=None):
""" Initialize the finder.
:param str source_code: the source code that will be traced, or None if
the source code should be read from the normal path.
:param str traced: the module, method, or function name to trace
:param str filename: the file the source code was read from
"""
self.source_code = source_code
self.traced = traced
self.traced_node = None
self.source_tree = parse(source_code, filename or PSEUDO_FILENAME)
visitor = TreeVisitor(self)
visitor.visit(self.source_tree)
self.is_tracing = self.traced_node is not None
# noinspection PyPep8Naming
class TreeVisitor(NodeVisitor):
def __init__(self, finder):
self.finder = finder
self.target = finder.traced.split('.')
self.context = []
def visit_FunctionDef(self, node):
self.visit_node(node)
def visit_ClassDef(self, node):
self.visit_node(node)
def visit_node(self, node):
name = node.name
self.context.append(name)
self.generic_visit(node)
if self.target == self.context:
self.finder.traced_node = node
self.context.pop()
|
650798f790f15fec337d9cb90ea83413d4041bdd
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/testData/inspections/PyUnresolvedReferencesInspection/ConditionalImports/a.py
|
24a1622f48efab3fbf1a801c00692c03fdf850b6
|
[
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 245
|
py
|
a.py
|
def f(c, x):
if c > 2:
from lib1 import f #pass
elif c > 1:
from lib2 import f #pass
else:
<warning descr="Unused import statement 'from lib2 import g #fail'">from lib2 import g #fail</warning>
return f(x)
|
33110d55f7bb0ebee62d7d5a33af76d52d9578de
|
2342b8737b9ffeb9715158b8ec74a33c7a4947f6
|
/koku/reporting/provider/ocp/costs/models.py
|
1e0380f2d0853c6209903c63ec140cd4ec57367c
|
[
"Apache-2.0"
] |
permissive
|
project-koku/koku
|
444d8df05da5416c9cee606c42481c99be45f13d
|
0416e5216eb1ec4b41c8dd4999adde218b1ab2e1
|
refs/heads/main
| 2023-08-20T11:30:17.510182
| 2023-08-17T18:27:30
| 2023-08-17T18:27:30
| 126,496,611
| 225
| 94
|
Apache-2.0
| 2023-09-14T17:38:08
| 2018-03-23T14:29:23
|
Python
|
UTF-8
|
Python
| false
| false
| 2,355
|
py
|
models.py
|
#
# Copyright 2021 Red Hat Inc.
# SPDX-License-Identifier: Apache-2.0
#
"""Models for OCP cost view tables."""
from django.contrib.postgres.indexes import GinIndex
from django.db import models
from django.db.models import JSONField
class CostSummary(models.Model):
"""A summary table of OCP costs."""
class Meta:
"""Meta for CostSummary."""
db_table = "reporting_ocpcosts_summary"
indexes = [
models.Index(fields=["usage_start"], name="ocpcostsum_usage_start_idx"),
models.Index(fields=["namespace"], name="ocpcostsum_namespace_idx", opclasses=["varchar_pattern_ops"]),
models.Index(fields=["node"], name="ocpcostsum_node_idx", opclasses=["varchar_pattern_ops"]),
GinIndex(fields=["pod_labels"], name="ocpcostsum_pod_labels_idx"),
]
report_period = models.ForeignKey("OCPUsageReportPeriod", on_delete=models.CASCADE, null=True)
cluster_id = models.CharField(max_length=50, null=True)
cluster_alias = models.CharField(max_length=256, null=True)
# Kubernetes objects by convention have a max name length of 253 chars
namespace = models.CharField(max_length=253, null=True)
pod = models.CharField(max_length=253, null=True)
node = models.CharField(max_length=253, null=True)
usage_start = models.DateField(null=False)
usage_end = models.DateField(null=False)
pod_charge_cpu_core_hours = models.DecimalField(max_digits=27, decimal_places=9, null=True)
pod_charge_memory_gigabyte_hours = models.DecimalField(max_digits=27, decimal_places=9, null=True)
persistentvolumeclaim_charge_gb_month = models.DecimalField(max_digits=27, decimal_places=9, null=True)
# Need more precision on calculated fields, otherwise there will be
# Rounding errors
infra_cost = models.DecimalField(max_digits=33, decimal_places=15, null=True)
# This field is used in place of infrastructure_cost when
# grouping by project
project_infra_cost = models.DecimalField(max_digits=33, decimal_places=15, null=True)
markup_cost = models.DecimalField(max_digits=27, decimal_places=9, null=True)
project_markup_cost = models.DecimalField(max_digits=27, decimal_places=9, null=True)
pod_labels = JSONField(null=True)
monthly_cost = models.DecimalField(max_digits=33, decimal_places=15, null=True)
|
b9d6ba3eb0ba902d499338701d8e1fa0b27368b9
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/homeassistant/components/qnap_qsw/button.py
|
acd8d3bd1ef1b676fedfb8e029779f5f6555f5c9
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 2,296
|
py
|
button.py
|
"""Support for the QNAP QSW buttons."""
from __future__ import annotations
from collections.abc import Awaitable, Callable
from dataclasses import dataclass
from typing import Final
from aioqsw.localapi import QnapQswApi
from homeassistant.components.button import (
ButtonDeviceClass,
ButtonEntity,
ButtonEntityDescription,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import EntityCategory
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .const import DOMAIN, QSW_COORD_DATA, QSW_REBOOT
from .coordinator import QswDataCoordinator
from .entity import QswDataEntity
@dataclass
class QswButtonDescriptionMixin:
"""Mixin to describe a Button entity."""
press_action: Callable[[QnapQswApi], Awaitable[bool]]
@dataclass
class QswButtonDescription(ButtonEntityDescription, QswButtonDescriptionMixin):
"""Class to describe a Button entity."""
BUTTON_TYPES: Final[tuple[QswButtonDescription, ...]] = (
QswButtonDescription(
device_class=ButtonDeviceClass.RESTART,
entity_category=EntityCategory.CONFIG,
key=QSW_REBOOT,
press_action=lambda qsw: qsw.reboot(),
),
)
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Add QNAP QSW buttons from a config_entry."""
coordinator: QswDataCoordinator = hass.data[DOMAIN][entry.entry_id][QSW_COORD_DATA]
async_add_entities(
QswButton(coordinator, description, entry) for description in BUTTON_TYPES
)
class QswButton(QswDataEntity, ButtonEntity):
"""Define a QNAP QSW button."""
_attr_has_entity_name = True
entity_description: QswButtonDescription
def __init__(
self,
coordinator: QswDataCoordinator,
description: QswButtonDescription,
entry: ConfigEntry,
) -> None:
"""Initialize."""
super().__init__(coordinator, entry)
self._attr_unique_id = f"{entry.unique_id}_{description.key}"
self.entity_description = description
async def async_press(self) -> None:
"""Triggers the QNAP QSW button action."""
await self.entity_description.press_action(self.coordinator.qsw)
|
aa0956c1c3725fb756edb1696f075a484923a227
|
e4523061fa8e4dc20ec74f7fbebdd407dabd6443
|
/colmap_runner/run_colmap_posed.py
|
eadd60953faf889791c26959c630392c2a4cbd78
|
[
"BSD-2-Clause"
] |
permissive
|
Kai-46/nerfplusplus
|
a3d19f4d966449cc30b139fa8606691879736bd4
|
ebf2f3e75fd6c5dfc8c9d0b533800daaf17bd95f
|
refs/heads/master
| 2022-07-22T20:26:42.549489
| 2022-03-16T14:44:03
| 2022-03-16T14:44:03
| 303,247,907
| 841
| 114
| null | 2022-02-21T15:17:21
| 2020-10-12T01:31:04
|
Python
|
UTF-8
|
Python
| false
| false
| 9,983
|
py
|
run_colmap_posed.py
|
import os
import json
from database import COLMAPDatabase
from pyquaternion import Quaternion
import numpy as np
import imageio
import subprocess
def bash_run(cmd):
# local install of colmap
env = os.environ.copy()
env['LD_LIBRARY_PATH'] = '/home/zhangka2/code/colmap/build/__install__/lib'
colmap_bin = '/home/zhangka2/code/colmap/build/__install__/bin/colmap'
cmd = colmap_bin + ' ' + cmd
print('\nRunning cmd: ', cmd)
subprocess.check_call(['/bin/bash', '-c', cmd], env=env)
gpu_index = '-1'
def run_sift_matching(img_dir, db_file):
print('Running sift matching...')
# if os.path.exists(db_file): # otherwise colmap will skip sift matching
# os.remove(db_file)
# feature extraction
# if there's no attached display, cannot use feature extractor with GPU
cmd = ' feature_extractor --database_path {} \
--image_path {} \
--ImageReader.camera_model PINHOLE \
--SiftExtraction.max_image_size 5000 \
--SiftExtraction.estimate_affine_shape 0 \
--SiftExtraction.domain_size_pooling 1 \
--SiftExtraction.num_threads 32 \
--SiftExtraction.use_gpu 1 \
--SiftExtraction.gpu_index {}'.format(db_file, img_dir, gpu_index)
bash_run(cmd)
# feature matching
cmd = ' exhaustive_matcher --database_path {} \
--SiftMatching.guided_matching 1 \
--SiftMatching.use_gpu 1 \
--SiftMatching.gpu_index {}'.format(db_file, gpu_index)
bash_run(cmd)
def create_init_files(pinhole_dict_file, db_file, out_dir):
if not os.path.exists(out_dir):
os.mkdir(out_dir)
# create template
with open(pinhole_dict_file) as fp:
pinhole_dict = json.load(fp)
template = {}
cameras_line_template = '{camera_id} PINHOLE {width} {height} {fx} {fy} {cx} {cy}\n'
images_line_template = '{image_id} {qw} {qx} {qy} {qz} {tx} {ty} {tz} {camera_id} {image_name}\n\n'
for img_name in pinhole_dict:
# w, h, fx, fy, cx, cy, qvec, t
params = pinhole_dict[img_name]
w = params[0]
h = params[1]
fx = params[2]
fy = params[3]
cx = params[4]
cy = params[5]
qvec = params[6:10]
tvec = params[10:13]
cam_line = cameras_line_template.format(camera_id="{camera_id}", width=w, height=h, fx=fx, fy=fy, cx=cx, cy=cy)
img_line = images_line_template.format(image_id="{image_id}", qw=qvec[0], qx=qvec[1], qy=qvec[2], qz=qvec[3],
tx=tvec[0], ty=tvec[1], tz=tvec[2], camera_id="{camera_id}", image_name=img_name)
template[img_name] = (cam_line, img_line)
# read database
db = COLMAPDatabase.connect(db_file)
table_images = db.execute("SELECT * FROM images")
img_name2id_dict = {}
for row in table_images:
img_name2id_dict[row[1]] = row[0]
cameras_txt_lines = []
images_txt_lines = []
for img_name, img_id in img_name2id_dict.items():
camera_line = template[img_name][0].format(camera_id=img_id)
cameras_txt_lines.append(camera_line)
image_line = template[img_name][1].format(image_id=img_id, camera_id=img_id)
images_txt_lines.append(image_line)
with open(os.path.join(out_dir, 'cameras.txt'), 'w') as fp:
fp.writelines(cameras_txt_lines)
with open(os.path.join(out_dir, 'images.txt'), 'w') as fp:
fp.writelines(images_txt_lines)
fp.write('\n')
# create an empty points3D.txt
fp = open(os.path.join(out_dir, 'points3D.txt'), 'w')
fp.close()
def run_point_triangulation(img_dir, db_file, out_dir):
print('Running point triangulation...')
# triangulate points
cmd = ' point_triangulator --database_path {} \
--image_path {} \
--input_path {} \
--output_path {} \
--Mapper.tri_ignore_two_view_tracks 1'.format(db_file, img_dir, out_dir, out_dir)
bash_run(cmd)
# this step is optional
def run_global_ba(in_dir, out_dir):
print('Running global BA...')
if not os.path.exists(out_dir):
os.mkdir(out_dir)
cmd = ' bundle_adjuster --input_path {in_dir} --output_path {out_dir}'.format(in_dir=in_dir, out_dir=out_dir)
bash_run(cmd)
def prepare_mvs(img_dir, sfm_dir, mvs_dir):
if not os.path.exists(mvs_dir):
os.mkdir(mvs_dir)
images_symlink = os.path.join(mvs_dir, 'images')
if os.path.exists(images_symlink):
os.unlink(images_symlink)
os.symlink(os.path.relpath(img_dir, mvs_dir),
images_symlink)
sparse_symlink = os.path.join(mvs_dir, 'sparse')
if os.path.exists(sparse_symlink):
os.unlink(sparse_symlink)
os.symlink(os.path.relpath(sfm_dir, mvs_dir),
sparse_symlink)
# prepare stereo directory
stereo_dir = os.path.join(mvs_dir, 'stereo')
for subdir in [stereo_dir,
os.path.join(stereo_dir, 'depth_maps'),
os.path.join(stereo_dir, 'normal_maps'),
os.path.join(stereo_dir, 'consistency_graphs')]:
if not os.path.exists(subdir):
os.mkdir(subdir)
# write patch-match.cfg and fusion.cfg
image_names = sorted(os.listdir(os.path.join(mvs_dir, 'images')))
with open(os.path.join(stereo_dir, 'patch-match.cfg'), 'w') as fp:
for img_name in image_names:
fp.write(img_name + '\n__auto__, 20\n')
# use all images
# fp.write(img_name + '\n__all__\n')
# randomly choose 20 images
# from random import shuffle
# candi_src_images = [x for x in image_names if x != img_name]
# shuffle(candi_src_images)
# max_src_images = 10
# fp.write(img_name + '\n' + ', '.join(candi_src_images[:max_src_images]) + '\n')
with open(os.path.join(stereo_dir, 'fusion.cfg'), 'w') as fp:
for img_name in image_names:
fp.write(img_name + '\n')
def run_photometric_mvs(mvs_dir, window_radius):
print('Running photometric MVS...')
cmd = ' patch_match_stereo --workspace_path {} \
--PatchMatchStereo.window_radius {} \
--PatchMatchStereo.min_triangulation_angle 3.0 \
--PatchMatchStereo.filter 1 \
--PatchMatchStereo.geom_consistency 1 \
--PatchMatchStereo.gpu_index={} \
--PatchMatchStereo.num_samples 15 \
--PatchMatchStereo.num_iterations 12'.format(mvs_dir,
window_radius, gpu_index)
bash_run(cmd)
def run_fuse(mvs_dir, out_ply):
print('Running depth fusion...')
cmd = ' stereo_fusion --workspace_path {} \
--output_path {} \
--input_type geometric'.format(mvs_dir, out_ply)
bash_run(cmd)
def run_possion_mesher(in_ply, out_ply, trim):
print('Running possion mesher...')
cmd = ' poisson_mesher \
--input_path {} \
--output_path {} \
--PoissonMeshing.trim {}'.format(in_ply, out_ply, trim)
bash_run(cmd)
def main(img_dir, pinhole_dict_file, out_dir):
if not os.path.exists(out_dir):
os.mkdir(out_dir)
db_file = os.path.join(out_dir, 'database.db')
run_sift_matching(img_dir, db_file)
sfm_dir = os.path.join(out_dir, 'sfm')
create_init_files(pinhole_dict_file, db_file, sfm_dir)
run_point_triangulation(img_dir, db_file, sfm_dir)
# # optional
# run_global_ba(sfm_dir, sfm_dir)
mvs_dir = os.path.join(out_dir, 'mvs')
prepare_mvs(img_dir, sfm_dir, mvs_dir)
run_photometric_mvs(mvs_dir, window_radius=5)
out_ply = os.path.join(out_dir, 'fused.ply')
run_fuse(mvs_dir, out_ply)
out_mesh_ply = os.path.join(out_dir, 'meshed_trim_3.ply')
run_possion_mesher(out_ply, out_mesh_ply, trim=3)
def convert_cam_dict_to_pinhole_dict(cam_dict_file, pinhole_dict_file, img_dir):
print('Writing pinhole_dict to: ', pinhole_dict_file)
with open(cam_dict_file) as fp:
cam_dict = json.load(fp)
pinhole_dict = {}
for img_name in cam_dict:
data_item = cam_dict[img_name]
if 'img_size' in data_item:
w, h = data_item['img_size']
else:
im = imageio.imread(os.path.join(img_dir, img_name))
h, w = im.shape[:2]
K = np.array(data_item['K']).reshape((4, 4))
W2C = np.array(data_item['W2C']).reshape((4, 4))
# params
fx = K[0, 0]
fy = K[1, 1]
assert(np.isclose(K[0, 1], 0.))
cx = K[0, 2]
cy = K[1, 2]
print(img_name)
R = W2C[:3, :3]
print(R)
u, s_old, vh = np.linalg.svd(R, full_matrices=False)
s = np.round(s_old)
print('s: {} ---> {}'.format(s_old, s))
R = np.dot(u * s, vh)
qvec = Quaternion(matrix=R)
tvec = W2C[:3, 3]
params = [w, h, fx, fy, cx, cy,
qvec[0], qvec[1], qvec[2], qvec[3],
tvec[0], tvec[1], tvec[2]]
pinhole_dict[img_name] = params
with open(pinhole_dict_file, 'w') as fp:
json.dump(pinhole_dict, fp, indent=2, sort_keys=True)
if __name__ == '__main__':
img_dir = ''
cam_dict_file = ''
out_dir = ''
os.makedirs(out_dir, exist_ok=True)
pinhole_dict_file = os.path.join(out_dir, 'pinhole_dict.json')
convert_cam_dict_to_pinhole_dict(cam_dict_file, pinhole_dict_file, img_dir)
main(img_dir, pinhole_dict_file, out_dir)
|
f92d536eb3677efe8b5cc7b97dcfc7abb65402c6
|
008c1eaa9354c76bc42700c81df1a65b8ecc5f0d
|
/backend/projects/permissions.py
|
aa6691518cb40100cffb9cdbaf92b616610e0b1f
|
[
"MIT"
] |
permissive
|
doccano/doccano
|
d3417706fa8a431fe2ac36a2a9b87c8604a0e4d6
|
63870976cc62811807648075d04a2531a1a6734d
|
refs/heads/master
| 2023-08-18T04:33:46.271524
| 2023-08-10T04:32:53
| 2023-08-10T04:32:53
| 132,709,824
| 6,297
| 1,393
|
MIT
| 2023-09-06T05:55:55
| 2018-05-09T06:10:05
|
Python
|
UTF-8
|
Python
| false
| false
| 1,617
|
py
|
permissions.py
|
from django.conf import settings
from rest_framework.permissions import SAFE_METHODS, BasePermission
from .models import Member
class RolePermission(BasePermission):
UNSAFE_METHODS = ("POST", "PATCH", "DELETE")
unsafe_methods_check = True
role_name = ""
@classmethod
def get_project_id(cls, request, view):
return view.kwargs.get("project_id") or request.query_params.get("project_id")
def has_permission(self, request, view):
if request.user.is_superuser:
return True
if self.unsafe_methods_check and request.method in self.UNSAFE_METHODS:
return request.user.is_superuser
project_id = self.get_project_id(request, view)
if not project_id and request.method in SAFE_METHODS:
return True
return Member.objects.has_role(project_id, request.user, self.role_name)
class IsProjectAdmin(RolePermission):
unsafe_methods_check = False
role_name = settings.ROLE_PROJECT_ADMIN
class IsAnnotatorAndReadOnly(RolePermission):
role_name = settings.ROLE_ANNOTATOR
class IsAnnotator(RolePermission):
unsafe_methods_check = False
role_name = settings.ROLE_ANNOTATOR
class IsAnnotationApproverAndReadOnly(RolePermission):
role_name = settings.ROLE_ANNOTATION_APPROVER
class IsAnnotationApprover(RolePermission):
unsafe_methods_check = False
role_name = settings.ROLE_ANNOTATION_APPROVER
IsProjectMember = IsAnnotator | IsAnnotationApprover | IsProjectAdmin # type: ignore
IsProjectStaffAndReadOnly = IsAnnotatorAndReadOnly | IsAnnotationApproverAndReadOnly # type: ignore
|
1eef7aabeed3100792e53c8139c1a8845193ff5d
|
944a49e62bc79622fe01abee62403397a1b0504d
|
/openstackclient/tests/functional/compute/v2/test_aggregate.py
|
d5d7c579673a1ffaab85b0b3f3188ff59923305e
|
[
"Apache-2.0"
] |
permissive
|
openstack/python-openstackclient
|
1c22984f9b29ae8ff9bbea26067981e2130ed039
|
78988d1786c0634ee055714910d1e6187f941673
|
refs/heads/master
| 2023-08-28T15:10:05.542862
| 2023-08-26T12:44:20
| 2023-08-26T12:44:20
| 4,170,310
| 286
| 224
|
Apache-2.0
| 2022-09-19T13:29:49
| 2012-04-28T21:07:25
|
Python
|
UTF-8
|
Python
| false
| false
| 5,478
|
py
|
test_aggregate.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from openstackclient.tests.functional import base
class AggregateTests(base.TestCase):
"""Functional tests for aggregate"""
def test_aggregate_crud(self):
"""Test create, delete multiple"""
name1 = uuid.uuid4().hex
self.addCleanup(
self.openstack,
'aggregate delete ' + name1,
fail_ok=True,
)
cmd_output = self.openstack(
'aggregate create ' + '--zone nova ' + '--property a=b ' + name1,
parse_output=True,
)
self.assertEqual(name1, cmd_output['name'])
self.assertEqual('nova', cmd_output['availability_zone'])
self.assertIn('a', cmd_output['properties'])
cmd_output = self.openstack(
'aggregate show ' + name1,
parse_output=True,
)
self.assertEqual(name1, cmd_output['name'])
name2 = uuid.uuid4().hex
self.addCleanup(
self.openstack,
'aggregate delete ' + name2,
fail_ok=True,
)
cmd_output = self.openstack(
'aggregate create ' + '--zone external ' + name2,
parse_output=True,
)
self.assertEqual(name2, cmd_output['name'])
self.assertEqual('external', cmd_output['availability_zone'])
cmd_output = self.openstack(
'aggregate show ' + name2,
parse_output=True,
)
self.assertEqual(name2, cmd_output['name'])
# Test aggregate set
name3 = uuid.uuid4().hex
self.addCleanup(
self.openstack,
'aggregate delete ' + name3,
fail_ok=True,
)
raw_output = self.openstack(
'aggregate set '
+ '--name '
+ name3
+ ' '
+ '--zone internal '
+ '--no-property '
+ '--property c=d '
+ name1
)
self.assertOutput('', raw_output)
cmd_output = self.openstack(
'aggregate show ' + name3,
parse_output=True,
)
self.assertEqual(name3, cmd_output['name'])
self.assertEqual('internal', cmd_output['availability_zone'])
self.assertIn('c', cmd_output['properties'])
self.assertNotIn('a', cmd_output['properties'])
# Test aggregate list
cmd_output = self.openstack(
'aggregate list',
parse_output=True,
)
names = [x['Name'] for x in cmd_output]
self.assertIn(name3, names)
self.assertIn(name2, names)
zones = [x['Availability Zone'] for x in cmd_output]
self.assertIn('external', zones)
self.assertIn('internal', zones)
# Test aggregate list --long
cmd_output = self.openstack(
'aggregate list --long',
parse_output=True,
)
names = [x['Name'] for x in cmd_output]
self.assertIn(name3, names)
self.assertIn(name2, names)
zones = [x['Availability Zone'] for x in cmd_output]
self.assertIn('external', zones)
self.assertIn('internal', zones)
properties = [x['Properties'] for x in cmd_output]
self.assertNotIn({'a': 'b'}, properties)
self.assertIn({'c': 'd'}, properties)
# Test unset
raw_output = self.openstack(
'aggregate unset ' + '--property c ' + name3
)
self.assertOutput('', raw_output)
cmd_output = self.openstack(
'aggregate show ' + name3,
parse_output=True,
)
self.assertNotIn("c='d'", cmd_output['properties'])
# test aggregate delete
del_output = self.openstack('aggregate delete ' + name3 + ' ' + name2)
self.assertOutput('', del_output)
def test_aggregate_add_and_remove_host(self):
"""Test aggregate add and remove host"""
# Get a host
cmd_output = self.openstack(
'host list',
parse_output=True,
)
host_name = cmd_output[0]['Host Name']
# NOTE(dtroyer): Cells v1 is not operable with aggregates. Hostnames
# are returned as rrr@host or ccc!rrr@host.
if '@' in host_name:
self.skipTest("Skip aggregates in a Nova cells v1 configuration")
name = uuid.uuid4().hex
self.addCleanup(self.openstack, 'aggregate delete ' + name)
self.openstack('aggregate create ' + name)
# Test add host
cmd_output = self.openstack(
'aggregate add host ' + name + ' ' + host_name,
parse_output=True,
)
self.assertIn(host_name, cmd_output['hosts'])
# Test remove host
cmd_output = self.openstack(
'aggregate remove host ' + name + ' ' + host_name,
parse_output=True,
)
self.assertNotIn(host_name, cmd_output['hosts'])
|
e2b40f4f32082cb0a26c95ccfc6909f0671deb7e
|
829a7b38243821a1d3f274952fd7c4e5140c9f40
|
/mahotas/zernike.py
|
7cc5a04264b1fc870a0a2e3079ea800aba143ac5
|
[
"MIT",
"BSL-1.0"
] |
permissive
|
luispedro/mahotas
|
e6f9c1e21acaded80946ebf8b1c0366cbcd91d2f
|
f7edeb22a4b47e6c5200c008a8e7386067d72443
|
refs/heads/master
| 2023-08-23T22:38:05.018281
| 2023-06-09T12:57:00
| 2023-06-09T22:11:37
| 495,613
| 648
| 175
|
NOASSERTION
| 2023-05-26T18:46:28
| 2010-01-31T00:13:06
|
Python
|
UTF-8
|
Python
| false
| false
| 141
|
py
|
zernike.py
|
import warnings
warnings.warn(
'''Use
from mahotas.features import zernike
''', DeprecationWarning)
from mahotas.features.zernike import *
|
e80a4b06d4b352d5227cbd1d8c25eb54ecb8044d
|
fc645876cbd921cef92768f1d73c7c7b97fba673
|
/learntools/game_ai/ex1.py
|
e7191ee1828b6b2996270d5f9e5e0aa63fd08bbb
|
[
"Apache-2.0"
] |
permissive
|
Kaggle/learntools
|
a91b07473f8244a2fda2a6be4f0f65d516ccddea
|
55574bf6512577688c9d0bcde665683903594910
|
refs/heads/master
| 2023-08-31T17:58:12.314620
| 2023-08-30T02:16:17
| 2023-08-30T02:16:17
| 126,507,129
| 447
| 258
|
Apache-2.0
| 2023-08-30T02:16:18
| 2018-03-23T15:51:22
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 7,877
|
py
|
ex1.py
|
from learntools.core import *
import numpy as np
import os
class MyConfig(object):
def __init__(self):
self.columns = 7
self.rows = 6
self.inarow = 4
config = MyConfig()
class MyBoard(object):
def __init__(self, board, mark):
self.board = board
self.mark = mark
def flip_mark(board):
return list(np.where(np.array(board)==2, 1, np.array(board)*2))
def check_column(agent, my_board, true_column):
sel_column = agent(my_board, config)
reshaped_board = np.array(my_board.board).reshape([config.rows,config.columns]).__str__().replace('[', '').replace(']', '').replace('\n ','\n')
assert sel_column == true_column, \
"""For the game board below, the agent has mark {}, and the opponent has mark {}. \nThe agent should have selected column {}, but it selected column {}. \n(_Recall that column indexing starts at 0: so, column 0 is the leftmost column, and column 6 is the rightmost column._)
\n`{}`
""".format(my_board.mark, my_board.mark%2+1, true_column, sel_column, reshaped_board)
pos_diag_board = [0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 1, 0, 0,
0, 0, 0, 1, 2, 0, 0,
0, 0, 1, 2, 2, 0, 0,
0, 0, 2, 1, 2, 0, 0]
pos_diag_col = 1
obs_pos_diag_win_1 = MyBoard(pos_diag_board, 1)
obs_pos_diag_win_2 = MyBoard(flip_mark(pos_diag_board), 2)
obs_pos_diag_block_1 = MyBoard(flip_mark(pos_diag_board), 1)
obs_pos_diag_block_2 = MyBoard(pos_diag_board, 2)
neg_diag_board = [0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0,
0, 0, 1, 0, 0, 0, 0,
0, 0, 2, 1, 0, 0, 0,
0, 0, 2, 2, 1, 0, 0,
0, 0, 1, 1, 2, 0, 0]
neg_diag_col = 5
obs_neg_diag_win_1 = MyBoard(neg_diag_board, 1)
obs_neg_diag_win_2 = MyBoard(flip_mark(neg_diag_board), 2)
obs_neg_diag_block_1 = MyBoard(flip_mark(neg_diag_board), 1)
obs_neg_diag_block_2 = MyBoard(neg_diag_board, 2)
horizontal_board = [0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0,
0, 0, 2, 2, 0, 0, 0,
0, 2, 1, 1, 1, 0, 0]
horizontal_col = 5
obs_horizontal_win_1 = MyBoard(horizontal_board, 1)
obs_horizontal_win_2 = MyBoard(flip_mark(horizontal_board), 2)
obs_horizontal_block_1 = MyBoard(flip_mark(horizontal_board), 1)
obs_horizontal_block_2 = MyBoard(horizontal_board, 2)
vertical_board = [0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0,
0, 0, 1, 0, 0, 0, 0,
0, 0, 1, 2, 0, 0, 0,
0, 0, 1, 2, 2, 0, 0]
vertical_col = 2
obs_vertical_win_1 = MyBoard(vertical_board, 1)
obs_vertical_win_2 = MyBoard(flip_mark(vertical_board), 2)
obs_vertical_block_1 = MyBoard(flip_mark(vertical_board), 1)
obs_vertical_block_2 = MyBoard(vertical_board, 2)
#################################################################################
class SelectWinning(CodingProblem):
_var = "agent_q1"
_hint = ("Use the `check_winning_move()` function, and set `piece=obs.mark`. You can check if "
"the agent can win the game by dropping its piece in a specific column by supplying the column "
"as the `col` argument to the function.")
_solution = CS(
"""def agent_q1(obs, config):
valid_moves = [col for col in range(config.columns) if obs.board[col] == 0]
for col in valid_moves:
if check_winning_move(obs, config, col, obs.mark):
return col
return random.choice(valid_moves)
""")
_var = 'agent_q1'
def check(self, agent_q1):
check_column(agent_q1, obs_pos_diag_win_1, pos_diag_col)
check_column(agent_q1, obs_pos_diag_win_2, pos_diag_col)
check_column(agent_q1, obs_neg_diag_win_1, neg_diag_col)
check_column(agent_q1, obs_neg_diag_win_2, neg_diag_col)
check_column(agent_q1, obs_horizontal_win_1, horizontal_col)
check_column(agent_q1, obs_horizontal_win_2, horizontal_col)
check_column(agent_q1, obs_vertical_win_1, vertical_col)
check_column(agent_q1, obs_vertical_win_2, vertical_col)
class BlockOpponent(CodingProblem):
_hint = ("Start with the code from the agent you created above. To check if the opponent can "
"win in its next move, use the same `check_winning_move()` function, and set `piece=obs.mark%2+1`.")
_solution = CS(
"""def agent_q2(obs, config):
valid_moves = [col for col in range(config.columns) if obs.board[col] == 0]
for col in valid_moves:
if check_winning_move(obs, config, col, obs.mark):
return col
for col in valid_moves:
if check_winning_move(obs, config, col, obs.mark%2+1):
return col
return random.choice(valid_moves)
""")
_var = 'agent_q2'
def check(self, agent_q2):
# win
check_column(agent_q2, obs_pos_diag_win_1, pos_diag_col)
check_column(agent_q2, obs_pos_diag_win_2, pos_diag_col)
check_column(agent_q2, obs_neg_diag_win_1, neg_diag_col)
check_column(agent_q2, obs_neg_diag_win_2, neg_diag_col)
check_column(agent_q2, obs_horizontal_win_1, horizontal_col)
check_column(agent_q2, obs_horizontal_win_2, horizontal_col)
check_column(agent_q2, obs_vertical_win_1, vertical_col)
check_column(agent_q2, obs_vertical_win_2, vertical_col)
# block
check_column(agent_q2, obs_pos_diag_block_1, pos_diag_col)
check_column(agent_q2, obs_pos_diag_block_2, pos_diag_col)
check_column(agent_q2, obs_neg_diag_block_1, neg_diag_col)
check_column(agent_q2, obs_neg_diag_block_2, neg_diag_col)
check_column(agent_q2, obs_horizontal_block_1, horizontal_col)
check_column(agent_q2, obs_horizontal_block_2, horizontal_col)
check_column(agent_q2, obs_vertical_block_1, vertical_col)
check_column(agent_q2, obs_vertical_block_2, vertical_col)
class WhyNotOptimal(ThoughtExperiment):
board1 = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 0, 0, 0, 0, 1, 1, 1, 0, 0]
board1_shaped = np.array(board1).reshape([config.rows,config.columns]).__str__().replace('[', '').replace(']', '').replace('\n ','\n')
board2 = [2, 1, 2, 2, 2, 0, 2, 1, 2, 1, 1, 1, 0, 1, 2, 1, 2, 2, 2, 0, 2, 1, 2, 1, 1, 1, 0, 1, 2, 1, 2, 2, 2, 0, 2, 1, 2, 1, 1, 2, 0, 1]
board2_shaped = np.array(board2).reshape([config.rows,config.columns]).__str__().replace('[', '').replace(']', '').replace('\n ','\n')
_hint = \
"""\
Consider this board: \n
`{}`\n
or this board: \n
`{}`
""".format(board1_shaped, board2_shaped)
_solution = (
"""The agent can still lose the game, if
- the opponent has set up the board so that it can win in the next move by dropping a disc in any of 2 or more columns, or
- the only move that is available to the agent is one where, once played, the opponent can win in the next move.
""")
class CreateAgentEx1(CodingProblem):
_hint = "Follow the instructions to create an agent."
_solution = "Follow the instructions to create an agent."
_congrats = "Thank you for creating an agent!"
_correct_message = ""
def check(self):
pass
class SubmissionEx1(CodingProblem):
_hint = "Follow the instructions to create a submission file."
_solution = "Follow the instructions to create a submission file."
_congrats = "Thank you for creating a submission file!"
_correct_message = ""
def check(self):
assert os.path.exists("./submission.py"), "You do not yet have a submission file."
qvars = bind_exercises(globals(), [
SelectWinning,
BlockOpponent,
WhyNotOptimal,
CreateAgentEx1,
SubmissionEx1
],
var_format='q_{n}',
)
__all__ = list(qvars)
|
33aa7a3e71edf4f96b6848b4cd87511554e78ec0
|
607dc8df19fc5248f6289cdda97857b5d58ca16f
|
/examples/1_basics/5_continue.py
|
025856fee0059fd0626923a50cc4e07fd718bb61
|
[
"BSD-3-Clause",
"LicenseRef-scancode-philippe-de-muyter",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
automl/SMAC3
|
7dce243a33023c52d6819deff966f7b502e90ed0
|
541ee7e0383b491b86d1a23dcff669f2efad616d
|
refs/heads/main
| 2023-08-31T17:36:06.067579
| 2023-08-01T13:02:51
| 2023-08-01T13:02:51
| 65,900,469
| 943
| 259
|
NOASSERTION
| 2023-09-11T02:36:57
| 2016-08-17T10:58:05
|
Python
|
UTF-8
|
Python
| false
| false
| 3,506
|
py
|
5_continue.py
|
"""
Continue an Optimization
^^^^^^^^^^^^^^^^^^^^^^^^
SMAC can also be continued from a previous run. To do so, it reads in old files (derived from scenario's name,
output_directory and seed) and sets the corresponding components. In this example, an optimization of a simple quadratic
function is continued.
First, after creating a scenario with 50 trials, we run SMAC with overwrite=True. This will
overwrite any previous runs (in case the example was called before). We use a custom callback to artificially stop
this first optimization after 10 trials.
Second, we again run the SMAC optimization using the same scenario, but this time with overwrite=False. As
there already is a previous run with the same meta data, this run will be continued until the 50 trials are reached.
"""
from __future__ import annotations
from ConfigSpace import Configuration, ConfigurationSpace, Float
from smac import Callback
from smac import HyperparameterOptimizationFacade as HPOFacade
from smac import Scenario
from smac.main.smbo import SMBO
from smac.runhistory import TrialInfo, TrialValue
__copyright__ = "Copyright 2021, AutoML.org Freiburg-Hannover"
__license__ = "3-clause BSD"
class StopCallback(Callback):
def __init__(self, stop_after: int):
self._stop_after = stop_after
def on_tell_end(self, smbo: SMBO, info: TrialInfo, value: TrialValue) -> bool | None:
"""Called after the stats are updated and the trial is added to the runhistory. Optionally, returns false
to gracefully stop the optimization.
"""
if smbo.runhistory.finished == self._stop_after:
return False
return None
class QuadraticFunction:
@property
def configspace(self) -> ConfigurationSpace:
cs = ConfigurationSpace(seed=0)
x = Float("x", (-5, 5), default=-5)
cs.add_hyperparameters([x])
return cs
def train(self, config: Configuration, seed: int = 0) -> float:
"""Returns the y value of a quadratic function with a minimum at x=0."""
x = config["x"]
return x * x
if __name__ == "__main__":
model = QuadraticFunction()
# Scenario object specifying the optimization "environment"
scenario = Scenario(model.configspace, deterministic=True, n_trials=50)
stop_after = 10
# Now we use SMAC to find the best hyperparameters
smac = HPOFacade(
scenario,
model.train, # We pass the target function here
callbacks=[StopCallback(stop_after=stop_after)],
overwrite=True, # Overrides any previous results that are found that are inconsistent with the meta-data
)
incumbent = smac.optimize()
assert smac.runhistory.finished == stop_after
# Now, we want to continue the optimization
# Make sure, we don't overwrite the last run
smac2 = HPOFacade(
scenario,
model.train,
overwrite=False,
)
# Check whether we get the same incumbent
assert smac.intensifier.get_incumbent() == smac2.intensifier.get_incumbent()
assert smac2.runhistory.finished == stop_after
# And now we finish the optimization
incumbent2 = smac2.optimize()
default_cost = smac.validate(model.configspace.get_default_configuration())
print(f"Default cost: {default_cost}")
incumbent_cost = smac.validate(incumbent)
print(f"Incumbent cost of first run: {incumbent_cost}")
incumbent_cost = smac2.validate(incumbent2)
print(f"Incumbent cost of continued run: {incumbent_cost}")
|
033ee974e119b645a713182eb48bb67548be2910
|
e9acc55fb26cb741dd9ac70562fdea3fc200a531
|
/tests/test_vector.py
|
ac9ff2b161b88ae80f3940ad1504097a1483b9f9
|
[
"MIT"
] |
permissive
|
ESA-PhiLab/OpenSarToolkit
|
3ce9bb89bf87a31905464f511ca170b34ba5673d
|
ecf7327638c5207f37ffc3240d13392bce2f3bdb
|
refs/heads/main
| 2023-07-19T17:40:41.448959
| 2022-05-06T10:44:48
| 2022-05-06T10:44:48
| 158,724,393
| 177
| 51
|
MIT
| 2023-07-06T22:56:57
| 2018-11-22T16:17:11
|
Python
|
UTF-8
|
Python
| false
| false
| 1,021
|
py
|
test_vector.py
|
# from ost.helpers import vector
# test aoi_to_wkt
# aoi = vector.aoi_to_wkt("IRL")
# assert type(aoi) == str and aoi.startswith("POLYGON")
# aoi = vector.aoi_to_wkt("POLYGON((1 2,1 4,3 4,3 2,1 2))")
# assert type(aoi) == str and aoi.startswith("POLYGON")
# aoi = vector.aoi_to_wkt("tests/testdata/test_polygon.geojson")
# assert type(aoi) == str and aoi.startswith("POLYGON")
# test latlon_to_wkt
# lat, lon = "-67", "-61"
# aoi = vector.latlon_to_wkt(lat, lon)
# assert type(aoi) == str and aoi.startswith("POINT")
# aoi = vector.latlon_to_wkt(lat, lon, buffer_degree=1)
# assert type(aoi) == str and aoi.startswith("POLYGON")
# aoi = vector.latlon_to_wkt(lat, lon, buffer_degree=1, envelope=False)
# assert type(aoi) == str and aoi.startswith("POLYGON")
# aoi = vector.latlon_to_wkt(lat, lon, buffer_degree=1, envelope=True)
# assert type(aoi) == str and aoi.startswith("POLYGON")
# aoi = vector.latlon_to_wkt(lat, lon, buffer_meter=1000, envelope=True)
# assert type(aoi) == str and aoi.startswith("POLYGON")
|
68e8a6f05455f83d53f187beb4f4c08685b1ed9d
|
3c582a006b945cd95974d910ab5b0ff551ab42fa
|
/tsuru_dashboard/__init__.py
|
0dbc67e39f9b8b706324af2dbb38dba7e79cd0ed
|
[] |
no_license
|
tsuru/tsuru-dashboard
|
f8be15a72366a5cefeadd4a3aac117ed760e85bc
|
c94b0b1a6ec30d7f59b939adcff41646bad00e87
|
refs/heads/master
| 2023-06-22T12:01:20.024933
| 2022-10-20T19:50:47
| 2022-10-20T19:50:47
| 5,112,553
| 119
| 60
| null | 2023-06-13T17:53:35
| 2012-07-19T16:31:42
|
Python
|
UTF-8
|
Python
| false
| false
| 66
|
py
|
__init__.py
|
from tsuru_dashboard import apps # NOQA
__version__ = "0.15.7"
|
e5979fc6150c1ae9bb27ee710a3f96afdd7d4764
|
49173bc5a31caa2adf5f304f2f55cc3f5fda452a
|
/qutip/settings.py
|
29ee053b75a13db0b24f3031575a832f909609b3
|
[
"BSD-3-Clause"
] |
permissive
|
qutip/qutip
|
a35e73670501c33898e24fe72bf255f8fb4a6632
|
2dc5f3f0a6ad2175e6dc015649989125401fc3bf
|
refs/heads/master
| 2023-09-01T01:09:53.422153
| 2023-08-25T14:18:15
| 2023-08-25T14:18:15
| 6,136,261
| 1,489
| 679
|
BSD-3-Clause
| 2023-09-07T14:16:03
| 2012-10-09T06:20:46
|
Python
|
UTF-8
|
Python
| false
| false
| 8,248
|
py
|
settings.py
|
"""
This module contains settings for the QuTiP graphics, multiprocessing, and
tidyup functionality, etc.
"""
import os
import sys
from ctypes import cdll
import platform
import numpy as np
__all__ = ['settings']
def _blas_info():
config = np.__config__
if hasattr(config, 'blas_ilp64_opt_info'):
blas_info = config.blas_ilp64_opt_info
elif hasattr(config, 'blas_opt_info'):
blas_info = config.blas_opt_info
else:
blas_info = {}
def _in_libaries(name):
return any(name in lib for lib in blas_info.get('libraries', []))
if getattr(config, 'mkl_info', False) or _in_libaries("mkl"):
blas = 'INTEL MKL'
elif getattr(config, 'openblas_info', False) or _in_libaries('openblas'):
blas = 'OPENBLAS'
elif '-Wl,Accelerate' in blas_info.get('extra_link_args', []):
blas = 'Accelerate'
else:
blas = 'Generic'
return blas
def available_cpu_count():
"""
Get the number of cpus.
It tries to only get the number available to qutip.
"""
import os
import multiprocessing
try:
import psutil
except ImportError:
psutil = None
num_cpu = 0
if 'QUTIP_NUM_PROCESSES' in os.environ:
# We consider QUTIP_NUM_PROCESSES=0 as unset.
num_cpu = int(os.environ['QUTIP_NUM_PROCESSES'])
if num_cpu == 0 and 'SLURM_CPUS_PER_TASK' in os.environ:
num_cpu = int(os.environ['SLURM_CPUS_PER_TASK'])
if num_cpu == 0 and hasattr(os, 'sched_getaffinity'):
num_cpu = len(os.sched_getaffinity(0))
if (
num_cpu == 0
and psutil is not None
and hasattr(psutil.Process(), "cpu_affinity")
):
num_cpu = len(psutil.Process().cpu_affinity())
if num_cpu == 0:
try:
num_cpu = multiprocessing.cpu_count()
except NotImplementedError:
pass
return num_cpu or 1
def _find_mkl():
"""
Finds the MKL runtime library for the Anaconda and Intel Python
distributions.
"""
mkl_lib = None
if _blas_info() == 'INTEL MKL':
plat = sys.platform
python_dir = os.path.dirname(sys.executable)
if plat in ['darwin', 'linux2', 'linux']:
python_dir = os.path.dirname(python_dir)
if plat == 'darwin':
lib = '/libmkl_rt.dylib'
elif plat == 'win32':
lib = r'\mkl_rt.dll'
elif plat in ['linux2', 'linux']:
lib = '/libmkl_rt.so'
else:
raise Exception('Unknown platfrom.')
if plat in ['darwin', 'linux2', 'linux']:
lib_dir = '/lib'
else:
lib_dir = r'\Library\bin'
# Try in default Anaconda location first
try:
mkl_lib = cdll.LoadLibrary(python_dir+lib_dir+lib)
except Exception:
pass
# Look in Intel Python distro location
if mkl_lib is None:
if plat in ['darwin', 'linux2', 'linux']:
lib_dir = '/ext/lib'
else:
lib_dir = r'\ext\lib'
try:
mkl_lib = \
cdll.LoadLibrary(python_dir + lib_dir + lib)
except Exception:
pass
return mkl_lib
class Settings:
"""
Qutip's settings and options.
"""
def __init__(self):
self._mkl_lib = ""
try:
self.tmproot = os.path.join(os.path.expanduser("~"), '.qutip')
except OSError:
self._tmproot = "."
self.core = None # set in qutip.core.options
self.compile = None # set in qutip.core.coefficient
self._debug = False
self._log_handler = "default"
self._colorblind_safe = False
@property
def has_mkl(self):
""" Whether qutip found an mkl installation. """
return self.mkl_lib is not None
@property
def mkl_lib(self):
""" Location of the mkl installation. """
if self._mkl_lib == "":
self._mkl_lib = _find_mkl()
return _find_mkl()
@property
def ipython(self):
""" Whether qutip is running in ipython. """
try:
__IPYTHON__
return True
except NameError:
return False
@property
def eigh_unsafe(self):
"""
Whether `eigh` call is reliable.
Some implementation of blas have some issues on some OS.
"""
from packaging import version as pac_version
import scipy
is_old_scipy = (
pac_version.parse(scipy.__version__) < pac_version.parse("1.5")
)
return (
# macOS OpenBLAS eigh is unstable, see #1288
(_blas_info() == "OPENBLAS" and platform.system() == 'Darwin')
# The combination of scipy<1.5 and MKL causes wrong results when
# calling eigh for big matrices. See #1495, #1491 and #1498.
or (is_old_scipy and (_blas_info() == 'INTEL MKL'))
)
@property
def tmproot(self):
"""
Location in which qutip place cython string coefficient folders.
The default is "$HOME/.qutip".
Can be updated.
"""
return self._tmproot
@tmproot.setter
def tmproot(self, root):
if not os.path.exists(root):
os.mkdir(root)
self._tmproot = root
@property
def coeffroot(self):
"""
Location in which qutip save cython string coefficient files.
Usually "{qutip.settings.tmproot}/qutip_coeffs_X.X".
Can be updated.
"""
return self._coeffroot
@coeffroot.setter
def coeffroot(self, root):
if not os.path.exists(root):
os.mkdir(root)
if root not in sys.path:
sys.path.insert(0, root)
self._coeffroot = root
@property
def coeff_write_ok(self):
""" Whether qutip has write acces to ``qutip.settings.coeffroot``."""
return os.access(self.coeffroot, os.W_OK)
@property
def has_openmp(self):
return False
# We keep this as a reminder for when openmp is restored: see Pull #652
# os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
@property
def idxint_size(self):
"""
Integer type used by ``CSR`` data.
Sparse ``CSR`` matrices can contain at most ``2**idxint_size``
non-zeros elements.
"""
from .core import data
return data.base.idxint_size
@property
def num_cpus(self):
"""
Number of cpu detected.
Use the solver options to control the number of cpus used.
"""
if 'QUTIP_NUM_PROCESSES' in os.environ:
num_cpus = int(os.environ['QUTIP_NUM_PROCESSES'])
else:
num_cpus = available_cpu_count()
os.environ['QUTIP_NUM_PROCESSES'] = str(num_cpus)
return num_cpus
@property
def debug(self):
"""
Debug mode for development.
"""
return self._debug
@debug.setter
def debug(self, value):
self._debug = value
@property
def log_handler(self):
"""
Define whether log handler should be:
- default: switch based on IPython detection
- stream: set up non-propagating StreamHandler
- basic: call basicConfig
- null: leave logging to the user
"""
return self._log_handler
@log_handler.setter
def log_handler(self, value):
self._log_handler = value
@property
def colorblind_safe(self):
"""
Allow for a colorblind mode that uses different colormaps
and plotting options by default.
"""
return self._colorblind_safe
@colorblind_safe.setter
def colorblind_safe(self, value):
self._colorblind_safe = value
def __str__(self):
lines = ["Qutip settings:"]
for attr in self.__dir__():
if not attr.startswith('_') and attr not in ["core", "compile"]:
lines.append(f" {attr}: {self.__getattribute__(attr)}")
lines.append(f" compile: {self.compile.__repr__(full=False)}")
return '\n'.join(lines)
def __repr__(self):
return self.__str__()
settings = Settings()
|
d13bb8decf9725c88d4e49eea764036710fc2d55
|
3a50c0712e0a31b88d0a5e80a0c01dbefc6a6e75
|
/thrift/compiler/test/fixtures/py-kwargs/gen-py/module/ttypes.py
|
93589a2e79b7642d959207fbcedc0f93f86f96d6
|
[
"Apache-2.0"
] |
permissive
|
facebook/fbthrift
|
3b7b94a533666c965ce69cfd6054041218b1ea6f
|
53cf6f138a7648efe5aef9a263aabed3d282df91
|
refs/heads/main
| 2023-08-24T12:51:32.367985
| 2023-08-24T08:28:35
| 2023-08-24T08:28:35
| 11,131,631
| 2,347
| 666
|
Apache-2.0
| 2023-09-01T01:44:39
| 2013-07-02T18:15:51
|
C++
|
UTF-8
|
Python
| false
| false
| 175,902
|
py
|
ttypes.py
|
#
# Autogenerated by Thrift
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
# @generated
#
from __future__ import absolute_import
import sys
from thrift.util.Recursive import fix_spec
from thrift.Thrift import TType, TMessageType, TPriority, TRequestContext, TProcessorEventHandler, TServerInterface, TProcessor, TException, TApplicationException, UnimplementedTypedef
from thrift.protocol.TProtocol import TProtocolException
import pprint
import warnings
from thrift import Thrift
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
from thrift.protocol import TCompactProtocol
from thrift.protocol import THeaderProtocol
fastproto = None
try:
from thrift.protocol import fastproto
except ImportError:
pass
all_structs = []
UTF8STRINGS = bool(0) or sys.version_info.major >= 3
__all__ = ['UTF8STRINGS', 'Foo']
class Foo:
r"""
Attributes:
- field1
- field2
- field3
- field4
- field5
- field6
- field7
- field8
- field9
- field10
- field11
- field12
- field13
- field14
- field15
- field16
- field17
- field18
- field19
- field20
- field21
- field22
- field23
- field24
- field25
- field26
- field27
- field28
- field29
- field30
- field31
- field32
- field33
- field34
- field35
- field36
- field37
- field38
- field39
- field40
- field41
- field42
- field43
- field44
- field45
- field46
- field47
- field48
- field49
- field50
- field51
- field52
- field53
- field54
- field55
- field56
- field57
- field58
- field59
- field60
- field61
- field62
- field63
- field64
- field65
- field66
- field67
- field68
- field69
- field70
- field71
- field72
- field73
- field74
- field75
- field76
- field77
- field78
- field79
- field80
- field81
- field82
- field83
- field84
- field85
- field86
- field87
- field88
- field89
- field90
- field91
- field92
- field93
- field94
- field95
- field96
- field97
- field98
- field99
- field100
- field101
- field102
- field103
- field104
- field105
- field106
- field107
- field108
- field109
- field110
- field111
- field112
- field113
- field114
- field115
- field116
- field117
- field118
- field119
- field120
- field121
- field122
- field123
- field124
- field125
- field126
- field127
- field128
- field129
- field130
- field131
- field132
- field133
- field134
- field135
- field136
- field137
- field138
- field139
- field140
- field141
- field142
- field143
- field144
- field145
- field146
- field147
- field148
- field149
- field150
- field151
- field152
- field153
- field154
- field155
- field156
- field157
- field158
- field159
- field160
- field161
- field162
- field163
- field164
- field165
- field166
- field167
- field168
- field169
- field170
- field171
- field172
- field173
- field174
- field175
- field176
- field177
- field178
- field179
- field180
- field181
- field182
- field183
- field184
- field185
- field186
- field187
- field188
- field189
- field190
- field191
- field192
- field193
- field194
- field195
- field196
- field197
- field198
- field199
- field200
- field201
- field202
- field203
- field204
- field205
- field206
- field207
- field208
- field209
- field210
- field211
- field212
- field213
- field214
- field215
- field216
- field217
- field218
- field219
- field220
- field221
- field222
- field223
- field224
- field225
- field226
- field227
- field228
- field229
- field230
- field231
- field232
- field233
- field234
- field235
- field236
- field237
- field238
- field239
- field240
- field241
- field242
- field243
- field244
- field245
- field246
- field247
- field248
- field249
- field250
- field251
- field252
- field253
- field254
- field255
- field256
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.field1 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.field2 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I32:
self.field3 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I32:
self.field4 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I32:
self.field5 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.I32:
self.field6 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.I32:
self.field7 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.I32:
self.field8 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 9:
if ftype == TType.I32:
self.field9 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 10:
if ftype == TType.I32:
self.field10 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 11:
if ftype == TType.I32:
self.field11 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 12:
if ftype == TType.I32:
self.field12 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 13:
if ftype == TType.I32:
self.field13 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 14:
if ftype == TType.I32:
self.field14 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 15:
if ftype == TType.I32:
self.field15 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 16:
if ftype == TType.I32:
self.field16 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 17:
if ftype == TType.I32:
self.field17 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 18:
if ftype == TType.I32:
self.field18 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 19:
if ftype == TType.I32:
self.field19 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 20:
if ftype == TType.I32:
self.field20 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 21:
if ftype == TType.I32:
self.field21 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 22:
if ftype == TType.I32:
self.field22 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 23:
if ftype == TType.I32:
self.field23 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 24:
if ftype == TType.I32:
self.field24 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 25:
if ftype == TType.I32:
self.field25 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 26:
if ftype == TType.I32:
self.field26 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 27:
if ftype == TType.I32:
self.field27 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 28:
if ftype == TType.I32:
self.field28 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 29:
if ftype == TType.I32:
self.field29 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 30:
if ftype == TType.I32:
self.field30 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 31:
if ftype == TType.I32:
self.field31 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 32:
if ftype == TType.I32:
self.field32 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 33:
if ftype == TType.I32:
self.field33 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 34:
if ftype == TType.I32:
self.field34 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 35:
if ftype == TType.I32:
self.field35 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 36:
if ftype == TType.I32:
self.field36 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 37:
if ftype == TType.I32:
self.field37 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 38:
if ftype == TType.I32:
self.field38 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 39:
if ftype == TType.I32:
self.field39 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 40:
if ftype == TType.I32:
self.field40 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 41:
if ftype == TType.I32:
self.field41 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 42:
if ftype == TType.I32:
self.field42 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 43:
if ftype == TType.I32:
self.field43 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 44:
if ftype == TType.I32:
self.field44 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 45:
if ftype == TType.I32:
self.field45 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 46:
if ftype == TType.I32:
self.field46 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 47:
if ftype == TType.I32:
self.field47 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 48:
if ftype == TType.I32:
self.field48 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 49:
if ftype == TType.I32:
self.field49 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 50:
if ftype == TType.I32:
self.field50 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 51:
if ftype == TType.I32:
self.field51 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 52:
if ftype == TType.I32:
self.field52 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 53:
if ftype == TType.I32:
self.field53 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 54:
if ftype == TType.I32:
self.field54 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 55:
if ftype == TType.I32:
self.field55 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 56:
if ftype == TType.I32:
self.field56 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 57:
if ftype == TType.I32:
self.field57 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 58:
if ftype == TType.I32:
self.field58 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 59:
if ftype == TType.I32:
self.field59 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 60:
if ftype == TType.I32:
self.field60 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 61:
if ftype == TType.I32:
self.field61 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 62:
if ftype == TType.I32:
self.field62 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 63:
if ftype == TType.I32:
self.field63 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 64:
if ftype == TType.I32:
self.field64 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 65:
if ftype == TType.I32:
self.field65 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 66:
if ftype == TType.I32:
self.field66 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 67:
if ftype == TType.I32:
self.field67 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 68:
if ftype == TType.I32:
self.field68 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 69:
if ftype == TType.I32:
self.field69 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 70:
if ftype == TType.I32:
self.field70 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 71:
if ftype == TType.I32:
self.field71 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 72:
if ftype == TType.I32:
self.field72 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 73:
if ftype == TType.I32:
self.field73 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 74:
if ftype == TType.I32:
self.field74 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 75:
if ftype == TType.I32:
self.field75 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 76:
if ftype == TType.I32:
self.field76 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 77:
if ftype == TType.I32:
self.field77 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 78:
if ftype == TType.I32:
self.field78 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 79:
if ftype == TType.I32:
self.field79 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 80:
if ftype == TType.I32:
self.field80 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 81:
if ftype == TType.I32:
self.field81 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 82:
if ftype == TType.I32:
self.field82 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 83:
if ftype == TType.I32:
self.field83 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 84:
if ftype == TType.I32:
self.field84 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 85:
if ftype == TType.I32:
self.field85 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 86:
if ftype == TType.I32:
self.field86 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 87:
if ftype == TType.I32:
self.field87 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 88:
if ftype == TType.I32:
self.field88 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 89:
if ftype == TType.I32:
self.field89 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 90:
if ftype == TType.I32:
self.field90 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 91:
if ftype == TType.I32:
self.field91 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 92:
if ftype == TType.I32:
self.field92 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 93:
if ftype == TType.I32:
self.field93 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 94:
if ftype == TType.I32:
self.field94 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 95:
if ftype == TType.I32:
self.field95 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 96:
if ftype == TType.I32:
self.field96 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 97:
if ftype == TType.I32:
self.field97 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 98:
if ftype == TType.I32:
self.field98 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 99:
if ftype == TType.I32:
self.field99 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 100:
if ftype == TType.I32:
self.field100 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 101:
if ftype == TType.I32:
self.field101 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 102:
if ftype == TType.I32:
self.field102 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 103:
if ftype == TType.I32:
self.field103 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 104:
if ftype == TType.I32:
self.field104 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 105:
if ftype == TType.I32:
self.field105 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 106:
if ftype == TType.I32:
self.field106 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 107:
if ftype == TType.I32:
self.field107 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 108:
if ftype == TType.I32:
self.field108 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 109:
if ftype == TType.I32:
self.field109 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 110:
if ftype == TType.I32:
self.field110 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 111:
if ftype == TType.I32:
self.field111 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 112:
if ftype == TType.I32:
self.field112 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 113:
if ftype == TType.I32:
self.field113 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 114:
if ftype == TType.I32:
self.field114 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 115:
if ftype == TType.I32:
self.field115 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 116:
if ftype == TType.I32:
self.field116 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 117:
if ftype == TType.I32:
self.field117 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 118:
if ftype == TType.I32:
self.field118 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 119:
if ftype == TType.I32:
self.field119 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 120:
if ftype == TType.I32:
self.field120 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 121:
if ftype == TType.I32:
self.field121 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 122:
if ftype == TType.I32:
self.field122 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 123:
if ftype == TType.I32:
self.field123 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 124:
if ftype == TType.I32:
self.field124 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 125:
if ftype == TType.I32:
self.field125 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 126:
if ftype == TType.I32:
self.field126 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 127:
if ftype == TType.I32:
self.field127 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 128:
if ftype == TType.I32:
self.field128 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 129:
if ftype == TType.I32:
self.field129 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 130:
if ftype == TType.I32:
self.field130 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 131:
if ftype == TType.I32:
self.field131 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 132:
if ftype == TType.I32:
self.field132 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 133:
if ftype == TType.I32:
self.field133 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 134:
if ftype == TType.I32:
self.field134 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 135:
if ftype == TType.I32:
self.field135 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 136:
if ftype == TType.I32:
self.field136 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 137:
if ftype == TType.I32:
self.field137 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 138:
if ftype == TType.I32:
self.field138 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 139:
if ftype == TType.I32:
self.field139 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 140:
if ftype == TType.I32:
self.field140 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 141:
if ftype == TType.I32:
self.field141 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 142:
if ftype == TType.I32:
self.field142 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 143:
if ftype == TType.I32:
self.field143 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 144:
if ftype == TType.I32:
self.field144 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 145:
if ftype == TType.I32:
self.field145 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 146:
if ftype == TType.I32:
self.field146 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 147:
if ftype == TType.I32:
self.field147 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 148:
if ftype == TType.I32:
self.field148 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 149:
if ftype == TType.I32:
self.field149 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 150:
if ftype == TType.I32:
self.field150 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 151:
if ftype == TType.I32:
self.field151 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 152:
if ftype == TType.I32:
self.field152 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 153:
if ftype == TType.I32:
self.field153 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 154:
if ftype == TType.I32:
self.field154 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 155:
if ftype == TType.I32:
self.field155 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 156:
if ftype == TType.I32:
self.field156 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 157:
if ftype == TType.I32:
self.field157 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 158:
if ftype == TType.I32:
self.field158 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 159:
if ftype == TType.I32:
self.field159 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 160:
if ftype == TType.I32:
self.field160 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 161:
if ftype == TType.I32:
self.field161 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 162:
if ftype == TType.I32:
self.field162 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 163:
if ftype == TType.I32:
self.field163 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 164:
if ftype == TType.I32:
self.field164 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 165:
if ftype == TType.I32:
self.field165 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 166:
if ftype == TType.I32:
self.field166 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 167:
if ftype == TType.I32:
self.field167 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 168:
if ftype == TType.I32:
self.field168 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 169:
if ftype == TType.I32:
self.field169 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 170:
if ftype == TType.I32:
self.field170 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 171:
if ftype == TType.I32:
self.field171 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 172:
if ftype == TType.I32:
self.field172 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 173:
if ftype == TType.I32:
self.field173 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 174:
if ftype == TType.I32:
self.field174 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 175:
if ftype == TType.I32:
self.field175 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 176:
if ftype == TType.I32:
self.field176 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 177:
if ftype == TType.I32:
self.field177 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 178:
if ftype == TType.I32:
self.field178 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 179:
if ftype == TType.I32:
self.field179 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 180:
if ftype == TType.I32:
self.field180 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 181:
if ftype == TType.I32:
self.field181 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 182:
if ftype == TType.I32:
self.field182 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 183:
if ftype == TType.I32:
self.field183 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 184:
if ftype == TType.I32:
self.field184 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 185:
if ftype == TType.I32:
self.field185 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 186:
if ftype == TType.I32:
self.field186 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 187:
if ftype == TType.I32:
self.field187 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 188:
if ftype == TType.I32:
self.field188 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 189:
if ftype == TType.I32:
self.field189 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 190:
if ftype == TType.I32:
self.field190 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 191:
if ftype == TType.I32:
self.field191 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 192:
if ftype == TType.I32:
self.field192 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 193:
if ftype == TType.I32:
self.field193 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 194:
if ftype == TType.I32:
self.field194 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 195:
if ftype == TType.I32:
self.field195 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 196:
if ftype == TType.I32:
self.field196 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 197:
if ftype == TType.I32:
self.field197 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 198:
if ftype == TType.I32:
self.field198 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 199:
if ftype == TType.I32:
self.field199 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 200:
if ftype == TType.I32:
self.field200 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 201:
if ftype == TType.I32:
self.field201 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 202:
if ftype == TType.I32:
self.field202 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 203:
if ftype == TType.I32:
self.field203 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 204:
if ftype == TType.I32:
self.field204 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 205:
if ftype == TType.I32:
self.field205 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 206:
if ftype == TType.I32:
self.field206 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 207:
if ftype == TType.I32:
self.field207 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 208:
if ftype == TType.I32:
self.field208 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 209:
if ftype == TType.I32:
self.field209 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 210:
if ftype == TType.I32:
self.field210 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 211:
if ftype == TType.I32:
self.field211 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 212:
if ftype == TType.I32:
self.field212 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 213:
if ftype == TType.I32:
self.field213 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 214:
if ftype == TType.I32:
self.field214 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 215:
if ftype == TType.I32:
self.field215 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 216:
if ftype == TType.I32:
self.field216 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 217:
if ftype == TType.I32:
self.field217 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 218:
if ftype == TType.I32:
self.field218 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 219:
if ftype == TType.I32:
self.field219 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 220:
if ftype == TType.I32:
self.field220 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 221:
if ftype == TType.I32:
self.field221 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 222:
if ftype == TType.I32:
self.field222 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 223:
if ftype == TType.I32:
self.field223 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 224:
if ftype == TType.I32:
self.field224 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 225:
if ftype == TType.I32:
self.field225 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 226:
if ftype == TType.I32:
self.field226 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 227:
if ftype == TType.I32:
self.field227 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 228:
if ftype == TType.I32:
self.field228 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 229:
if ftype == TType.I32:
self.field229 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 230:
if ftype == TType.I32:
self.field230 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 231:
if ftype == TType.I32:
self.field231 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 232:
if ftype == TType.I32:
self.field232 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 233:
if ftype == TType.I32:
self.field233 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 234:
if ftype == TType.I32:
self.field234 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 235:
if ftype == TType.I32:
self.field235 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 236:
if ftype == TType.I32:
self.field236 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 237:
if ftype == TType.I32:
self.field237 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 238:
if ftype == TType.I32:
self.field238 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 239:
if ftype == TType.I32:
self.field239 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 240:
if ftype == TType.I32:
self.field240 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 241:
if ftype == TType.I32:
self.field241 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 242:
if ftype == TType.I32:
self.field242 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 243:
if ftype == TType.I32:
self.field243 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 244:
if ftype == TType.I32:
self.field244 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 245:
if ftype == TType.I32:
self.field245 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 246:
if ftype == TType.I32:
self.field246 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 247:
if ftype == TType.I32:
self.field247 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 248:
if ftype == TType.I32:
self.field248 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 249:
if ftype == TType.I32:
self.field249 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 250:
if ftype == TType.I32:
self.field250 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 251:
if ftype == TType.I32:
self.field251 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 252:
if ftype == TType.I32:
self.field252 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 253:
if ftype == TType.I32:
self.field253 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 254:
if ftype == TType.I32:
self.field254 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 255:
if ftype == TType.I32:
self.field255 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 256:
if ftype == TType.I32:
self.field256 = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('Foo')
if self.field1 != None:
oprot.writeFieldBegin('field1', TType.I32, 1)
oprot.writeI32(self.field1)
oprot.writeFieldEnd()
if self.field2 != None:
oprot.writeFieldBegin('field2', TType.I32, 2)
oprot.writeI32(self.field2)
oprot.writeFieldEnd()
if self.field3 != None:
oprot.writeFieldBegin('field3', TType.I32, 3)
oprot.writeI32(self.field3)
oprot.writeFieldEnd()
if self.field4 != None:
oprot.writeFieldBegin('field4', TType.I32, 4)
oprot.writeI32(self.field4)
oprot.writeFieldEnd()
if self.field5 != None:
oprot.writeFieldBegin('field5', TType.I32, 5)
oprot.writeI32(self.field5)
oprot.writeFieldEnd()
if self.field6 != None:
oprot.writeFieldBegin('field6', TType.I32, 6)
oprot.writeI32(self.field6)
oprot.writeFieldEnd()
if self.field7 != None:
oprot.writeFieldBegin('field7', TType.I32, 7)
oprot.writeI32(self.field7)
oprot.writeFieldEnd()
if self.field8 != None:
oprot.writeFieldBegin('field8', TType.I32, 8)
oprot.writeI32(self.field8)
oprot.writeFieldEnd()
if self.field9 != None:
oprot.writeFieldBegin('field9', TType.I32, 9)
oprot.writeI32(self.field9)
oprot.writeFieldEnd()
if self.field10 != None:
oprot.writeFieldBegin('field10', TType.I32, 10)
oprot.writeI32(self.field10)
oprot.writeFieldEnd()
if self.field11 != None:
oprot.writeFieldBegin('field11', TType.I32, 11)
oprot.writeI32(self.field11)
oprot.writeFieldEnd()
if self.field12 != None:
oprot.writeFieldBegin('field12', TType.I32, 12)
oprot.writeI32(self.field12)
oprot.writeFieldEnd()
if self.field13 != None:
oprot.writeFieldBegin('field13', TType.I32, 13)
oprot.writeI32(self.field13)
oprot.writeFieldEnd()
if self.field14 != None:
oprot.writeFieldBegin('field14', TType.I32, 14)
oprot.writeI32(self.field14)
oprot.writeFieldEnd()
if self.field15 != None:
oprot.writeFieldBegin('field15', TType.I32, 15)
oprot.writeI32(self.field15)
oprot.writeFieldEnd()
if self.field16 != None:
oprot.writeFieldBegin('field16', TType.I32, 16)
oprot.writeI32(self.field16)
oprot.writeFieldEnd()
if self.field17 != None:
oprot.writeFieldBegin('field17', TType.I32, 17)
oprot.writeI32(self.field17)
oprot.writeFieldEnd()
if self.field18 != None:
oprot.writeFieldBegin('field18', TType.I32, 18)
oprot.writeI32(self.field18)
oprot.writeFieldEnd()
if self.field19 != None:
oprot.writeFieldBegin('field19', TType.I32, 19)
oprot.writeI32(self.field19)
oprot.writeFieldEnd()
if self.field20 != None:
oprot.writeFieldBegin('field20', TType.I32, 20)
oprot.writeI32(self.field20)
oprot.writeFieldEnd()
if self.field21 != None:
oprot.writeFieldBegin('field21', TType.I32, 21)
oprot.writeI32(self.field21)
oprot.writeFieldEnd()
if self.field22 != None:
oprot.writeFieldBegin('field22', TType.I32, 22)
oprot.writeI32(self.field22)
oprot.writeFieldEnd()
if self.field23 != None:
oprot.writeFieldBegin('field23', TType.I32, 23)
oprot.writeI32(self.field23)
oprot.writeFieldEnd()
if self.field24 != None:
oprot.writeFieldBegin('field24', TType.I32, 24)
oprot.writeI32(self.field24)
oprot.writeFieldEnd()
if self.field25 != None:
oprot.writeFieldBegin('field25', TType.I32, 25)
oprot.writeI32(self.field25)
oprot.writeFieldEnd()
if self.field26 != None:
oprot.writeFieldBegin('field26', TType.I32, 26)
oprot.writeI32(self.field26)
oprot.writeFieldEnd()
if self.field27 != None:
oprot.writeFieldBegin('field27', TType.I32, 27)
oprot.writeI32(self.field27)
oprot.writeFieldEnd()
if self.field28 != None:
oprot.writeFieldBegin('field28', TType.I32, 28)
oprot.writeI32(self.field28)
oprot.writeFieldEnd()
if self.field29 != None:
oprot.writeFieldBegin('field29', TType.I32, 29)
oprot.writeI32(self.field29)
oprot.writeFieldEnd()
if self.field30 != None:
oprot.writeFieldBegin('field30', TType.I32, 30)
oprot.writeI32(self.field30)
oprot.writeFieldEnd()
if self.field31 != None:
oprot.writeFieldBegin('field31', TType.I32, 31)
oprot.writeI32(self.field31)
oprot.writeFieldEnd()
if self.field32 != None:
oprot.writeFieldBegin('field32', TType.I32, 32)
oprot.writeI32(self.field32)
oprot.writeFieldEnd()
if self.field33 != None:
oprot.writeFieldBegin('field33', TType.I32, 33)
oprot.writeI32(self.field33)
oprot.writeFieldEnd()
if self.field34 != None:
oprot.writeFieldBegin('field34', TType.I32, 34)
oprot.writeI32(self.field34)
oprot.writeFieldEnd()
if self.field35 != None:
oprot.writeFieldBegin('field35', TType.I32, 35)
oprot.writeI32(self.field35)
oprot.writeFieldEnd()
if self.field36 != None:
oprot.writeFieldBegin('field36', TType.I32, 36)
oprot.writeI32(self.field36)
oprot.writeFieldEnd()
if self.field37 != None:
oprot.writeFieldBegin('field37', TType.I32, 37)
oprot.writeI32(self.field37)
oprot.writeFieldEnd()
if self.field38 != None:
oprot.writeFieldBegin('field38', TType.I32, 38)
oprot.writeI32(self.field38)
oprot.writeFieldEnd()
if self.field39 != None:
oprot.writeFieldBegin('field39', TType.I32, 39)
oprot.writeI32(self.field39)
oprot.writeFieldEnd()
if self.field40 != None:
oprot.writeFieldBegin('field40', TType.I32, 40)
oprot.writeI32(self.field40)
oprot.writeFieldEnd()
if self.field41 != None:
oprot.writeFieldBegin('field41', TType.I32, 41)
oprot.writeI32(self.field41)
oprot.writeFieldEnd()
if self.field42 != None:
oprot.writeFieldBegin('field42', TType.I32, 42)
oprot.writeI32(self.field42)
oprot.writeFieldEnd()
if self.field43 != None:
oprot.writeFieldBegin('field43', TType.I32, 43)
oprot.writeI32(self.field43)
oprot.writeFieldEnd()
if self.field44 != None:
oprot.writeFieldBegin('field44', TType.I32, 44)
oprot.writeI32(self.field44)
oprot.writeFieldEnd()
if self.field45 != None:
oprot.writeFieldBegin('field45', TType.I32, 45)
oprot.writeI32(self.field45)
oprot.writeFieldEnd()
if self.field46 != None:
oprot.writeFieldBegin('field46', TType.I32, 46)
oprot.writeI32(self.field46)
oprot.writeFieldEnd()
if self.field47 != None:
oprot.writeFieldBegin('field47', TType.I32, 47)
oprot.writeI32(self.field47)
oprot.writeFieldEnd()
if self.field48 != None:
oprot.writeFieldBegin('field48', TType.I32, 48)
oprot.writeI32(self.field48)
oprot.writeFieldEnd()
if self.field49 != None:
oprot.writeFieldBegin('field49', TType.I32, 49)
oprot.writeI32(self.field49)
oprot.writeFieldEnd()
if self.field50 != None:
oprot.writeFieldBegin('field50', TType.I32, 50)
oprot.writeI32(self.field50)
oprot.writeFieldEnd()
if self.field51 != None:
oprot.writeFieldBegin('field51', TType.I32, 51)
oprot.writeI32(self.field51)
oprot.writeFieldEnd()
if self.field52 != None:
oprot.writeFieldBegin('field52', TType.I32, 52)
oprot.writeI32(self.field52)
oprot.writeFieldEnd()
if self.field53 != None:
oprot.writeFieldBegin('field53', TType.I32, 53)
oprot.writeI32(self.field53)
oprot.writeFieldEnd()
if self.field54 != None:
oprot.writeFieldBegin('field54', TType.I32, 54)
oprot.writeI32(self.field54)
oprot.writeFieldEnd()
if self.field55 != None:
oprot.writeFieldBegin('field55', TType.I32, 55)
oprot.writeI32(self.field55)
oprot.writeFieldEnd()
if self.field56 != None:
oprot.writeFieldBegin('field56', TType.I32, 56)
oprot.writeI32(self.field56)
oprot.writeFieldEnd()
if self.field57 != None:
oprot.writeFieldBegin('field57', TType.I32, 57)
oprot.writeI32(self.field57)
oprot.writeFieldEnd()
if self.field58 != None:
oprot.writeFieldBegin('field58', TType.I32, 58)
oprot.writeI32(self.field58)
oprot.writeFieldEnd()
if self.field59 != None:
oprot.writeFieldBegin('field59', TType.I32, 59)
oprot.writeI32(self.field59)
oprot.writeFieldEnd()
if self.field60 != None:
oprot.writeFieldBegin('field60', TType.I32, 60)
oprot.writeI32(self.field60)
oprot.writeFieldEnd()
if self.field61 != None:
oprot.writeFieldBegin('field61', TType.I32, 61)
oprot.writeI32(self.field61)
oprot.writeFieldEnd()
if self.field62 != None:
oprot.writeFieldBegin('field62', TType.I32, 62)
oprot.writeI32(self.field62)
oprot.writeFieldEnd()
if self.field63 != None:
oprot.writeFieldBegin('field63', TType.I32, 63)
oprot.writeI32(self.field63)
oprot.writeFieldEnd()
if self.field64 != None:
oprot.writeFieldBegin('field64', TType.I32, 64)
oprot.writeI32(self.field64)
oprot.writeFieldEnd()
if self.field65 != None:
oprot.writeFieldBegin('field65', TType.I32, 65)
oprot.writeI32(self.field65)
oprot.writeFieldEnd()
if self.field66 != None:
oprot.writeFieldBegin('field66', TType.I32, 66)
oprot.writeI32(self.field66)
oprot.writeFieldEnd()
if self.field67 != None:
oprot.writeFieldBegin('field67', TType.I32, 67)
oprot.writeI32(self.field67)
oprot.writeFieldEnd()
if self.field68 != None:
oprot.writeFieldBegin('field68', TType.I32, 68)
oprot.writeI32(self.field68)
oprot.writeFieldEnd()
if self.field69 != None:
oprot.writeFieldBegin('field69', TType.I32, 69)
oprot.writeI32(self.field69)
oprot.writeFieldEnd()
if self.field70 != None:
oprot.writeFieldBegin('field70', TType.I32, 70)
oprot.writeI32(self.field70)
oprot.writeFieldEnd()
if self.field71 != None:
oprot.writeFieldBegin('field71', TType.I32, 71)
oprot.writeI32(self.field71)
oprot.writeFieldEnd()
if self.field72 != None:
oprot.writeFieldBegin('field72', TType.I32, 72)
oprot.writeI32(self.field72)
oprot.writeFieldEnd()
if self.field73 != None:
oprot.writeFieldBegin('field73', TType.I32, 73)
oprot.writeI32(self.field73)
oprot.writeFieldEnd()
if self.field74 != None:
oprot.writeFieldBegin('field74', TType.I32, 74)
oprot.writeI32(self.field74)
oprot.writeFieldEnd()
if self.field75 != None:
oprot.writeFieldBegin('field75', TType.I32, 75)
oprot.writeI32(self.field75)
oprot.writeFieldEnd()
if self.field76 != None:
oprot.writeFieldBegin('field76', TType.I32, 76)
oprot.writeI32(self.field76)
oprot.writeFieldEnd()
if self.field77 != None:
oprot.writeFieldBegin('field77', TType.I32, 77)
oprot.writeI32(self.field77)
oprot.writeFieldEnd()
if self.field78 != None:
oprot.writeFieldBegin('field78', TType.I32, 78)
oprot.writeI32(self.field78)
oprot.writeFieldEnd()
if self.field79 != None:
oprot.writeFieldBegin('field79', TType.I32, 79)
oprot.writeI32(self.field79)
oprot.writeFieldEnd()
if self.field80 != None:
oprot.writeFieldBegin('field80', TType.I32, 80)
oprot.writeI32(self.field80)
oprot.writeFieldEnd()
if self.field81 != None:
oprot.writeFieldBegin('field81', TType.I32, 81)
oprot.writeI32(self.field81)
oprot.writeFieldEnd()
if self.field82 != None:
oprot.writeFieldBegin('field82', TType.I32, 82)
oprot.writeI32(self.field82)
oprot.writeFieldEnd()
if self.field83 != None:
oprot.writeFieldBegin('field83', TType.I32, 83)
oprot.writeI32(self.field83)
oprot.writeFieldEnd()
if self.field84 != None:
oprot.writeFieldBegin('field84', TType.I32, 84)
oprot.writeI32(self.field84)
oprot.writeFieldEnd()
if self.field85 != None:
oprot.writeFieldBegin('field85', TType.I32, 85)
oprot.writeI32(self.field85)
oprot.writeFieldEnd()
if self.field86 != None:
oprot.writeFieldBegin('field86', TType.I32, 86)
oprot.writeI32(self.field86)
oprot.writeFieldEnd()
if self.field87 != None:
oprot.writeFieldBegin('field87', TType.I32, 87)
oprot.writeI32(self.field87)
oprot.writeFieldEnd()
if self.field88 != None:
oprot.writeFieldBegin('field88', TType.I32, 88)
oprot.writeI32(self.field88)
oprot.writeFieldEnd()
if self.field89 != None:
oprot.writeFieldBegin('field89', TType.I32, 89)
oprot.writeI32(self.field89)
oprot.writeFieldEnd()
if self.field90 != None:
oprot.writeFieldBegin('field90', TType.I32, 90)
oprot.writeI32(self.field90)
oprot.writeFieldEnd()
if self.field91 != None:
oprot.writeFieldBegin('field91', TType.I32, 91)
oprot.writeI32(self.field91)
oprot.writeFieldEnd()
if self.field92 != None:
oprot.writeFieldBegin('field92', TType.I32, 92)
oprot.writeI32(self.field92)
oprot.writeFieldEnd()
if self.field93 != None:
oprot.writeFieldBegin('field93', TType.I32, 93)
oprot.writeI32(self.field93)
oprot.writeFieldEnd()
if self.field94 != None:
oprot.writeFieldBegin('field94', TType.I32, 94)
oprot.writeI32(self.field94)
oprot.writeFieldEnd()
if self.field95 != None:
oprot.writeFieldBegin('field95', TType.I32, 95)
oprot.writeI32(self.field95)
oprot.writeFieldEnd()
if self.field96 != None:
oprot.writeFieldBegin('field96', TType.I32, 96)
oprot.writeI32(self.field96)
oprot.writeFieldEnd()
if self.field97 != None:
oprot.writeFieldBegin('field97', TType.I32, 97)
oprot.writeI32(self.field97)
oprot.writeFieldEnd()
if self.field98 != None:
oprot.writeFieldBegin('field98', TType.I32, 98)
oprot.writeI32(self.field98)
oprot.writeFieldEnd()
if self.field99 != None:
oprot.writeFieldBegin('field99', TType.I32, 99)
oprot.writeI32(self.field99)
oprot.writeFieldEnd()
if self.field100 != None:
oprot.writeFieldBegin('field100', TType.I32, 100)
oprot.writeI32(self.field100)
oprot.writeFieldEnd()
if self.field101 != None:
oprot.writeFieldBegin('field101', TType.I32, 101)
oprot.writeI32(self.field101)
oprot.writeFieldEnd()
if self.field102 != None:
oprot.writeFieldBegin('field102', TType.I32, 102)
oprot.writeI32(self.field102)
oprot.writeFieldEnd()
if self.field103 != None:
oprot.writeFieldBegin('field103', TType.I32, 103)
oprot.writeI32(self.field103)
oprot.writeFieldEnd()
if self.field104 != None:
oprot.writeFieldBegin('field104', TType.I32, 104)
oprot.writeI32(self.field104)
oprot.writeFieldEnd()
if self.field105 != None:
oprot.writeFieldBegin('field105', TType.I32, 105)
oprot.writeI32(self.field105)
oprot.writeFieldEnd()
if self.field106 != None:
oprot.writeFieldBegin('field106', TType.I32, 106)
oprot.writeI32(self.field106)
oprot.writeFieldEnd()
if self.field107 != None:
oprot.writeFieldBegin('field107', TType.I32, 107)
oprot.writeI32(self.field107)
oprot.writeFieldEnd()
if self.field108 != None:
oprot.writeFieldBegin('field108', TType.I32, 108)
oprot.writeI32(self.field108)
oprot.writeFieldEnd()
if self.field109 != None:
oprot.writeFieldBegin('field109', TType.I32, 109)
oprot.writeI32(self.field109)
oprot.writeFieldEnd()
if self.field110 != None:
oprot.writeFieldBegin('field110', TType.I32, 110)
oprot.writeI32(self.field110)
oprot.writeFieldEnd()
if self.field111 != None:
oprot.writeFieldBegin('field111', TType.I32, 111)
oprot.writeI32(self.field111)
oprot.writeFieldEnd()
if self.field112 != None:
oprot.writeFieldBegin('field112', TType.I32, 112)
oprot.writeI32(self.field112)
oprot.writeFieldEnd()
if self.field113 != None:
oprot.writeFieldBegin('field113', TType.I32, 113)
oprot.writeI32(self.field113)
oprot.writeFieldEnd()
if self.field114 != None:
oprot.writeFieldBegin('field114', TType.I32, 114)
oprot.writeI32(self.field114)
oprot.writeFieldEnd()
if self.field115 != None:
oprot.writeFieldBegin('field115', TType.I32, 115)
oprot.writeI32(self.field115)
oprot.writeFieldEnd()
if self.field116 != None:
oprot.writeFieldBegin('field116', TType.I32, 116)
oprot.writeI32(self.field116)
oprot.writeFieldEnd()
if self.field117 != None:
oprot.writeFieldBegin('field117', TType.I32, 117)
oprot.writeI32(self.field117)
oprot.writeFieldEnd()
if self.field118 != None:
oprot.writeFieldBegin('field118', TType.I32, 118)
oprot.writeI32(self.field118)
oprot.writeFieldEnd()
if self.field119 != None:
oprot.writeFieldBegin('field119', TType.I32, 119)
oprot.writeI32(self.field119)
oprot.writeFieldEnd()
if self.field120 != None:
oprot.writeFieldBegin('field120', TType.I32, 120)
oprot.writeI32(self.field120)
oprot.writeFieldEnd()
if self.field121 != None:
oprot.writeFieldBegin('field121', TType.I32, 121)
oprot.writeI32(self.field121)
oprot.writeFieldEnd()
if self.field122 != None:
oprot.writeFieldBegin('field122', TType.I32, 122)
oprot.writeI32(self.field122)
oprot.writeFieldEnd()
if self.field123 != None:
oprot.writeFieldBegin('field123', TType.I32, 123)
oprot.writeI32(self.field123)
oprot.writeFieldEnd()
if self.field124 != None:
oprot.writeFieldBegin('field124', TType.I32, 124)
oprot.writeI32(self.field124)
oprot.writeFieldEnd()
if self.field125 != None:
oprot.writeFieldBegin('field125', TType.I32, 125)
oprot.writeI32(self.field125)
oprot.writeFieldEnd()
if self.field126 != None:
oprot.writeFieldBegin('field126', TType.I32, 126)
oprot.writeI32(self.field126)
oprot.writeFieldEnd()
if self.field127 != None:
oprot.writeFieldBegin('field127', TType.I32, 127)
oprot.writeI32(self.field127)
oprot.writeFieldEnd()
if self.field128 != None:
oprot.writeFieldBegin('field128', TType.I32, 128)
oprot.writeI32(self.field128)
oprot.writeFieldEnd()
if self.field129 != None:
oprot.writeFieldBegin('field129', TType.I32, 129)
oprot.writeI32(self.field129)
oprot.writeFieldEnd()
if self.field130 != None:
oprot.writeFieldBegin('field130', TType.I32, 130)
oprot.writeI32(self.field130)
oprot.writeFieldEnd()
if self.field131 != None:
oprot.writeFieldBegin('field131', TType.I32, 131)
oprot.writeI32(self.field131)
oprot.writeFieldEnd()
if self.field132 != None:
oprot.writeFieldBegin('field132', TType.I32, 132)
oprot.writeI32(self.field132)
oprot.writeFieldEnd()
if self.field133 != None:
oprot.writeFieldBegin('field133', TType.I32, 133)
oprot.writeI32(self.field133)
oprot.writeFieldEnd()
if self.field134 != None:
oprot.writeFieldBegin('field134', TType.I32, 134)
oprot.writeI32(self.field134)
oprot.writeFieldEnd()
if self.field135 != None:
oprot.writeFieldBegin('field135', TType.I32, 135)
oprot.writeI32(self.field135)
oprot.writeFieldEnd()
if self.field136 != None:
oprot.writeFieldBegin('field136', TType.I32, 136)
oprot.writeI32(self.field136)
oprot.writeFieldEnd()
if self.field137 != None:
oprot.writeFieldBegin('field137', TType.I32, 137)
oprot.writeI32(self.field137)
oprot.writeFieldEnd()
if self.field138 != None:
oprot.writeFieldBegin('field138', TType.I32, 138)
oprot.writeI32(self.field138)
oprot.writeFieldEnd()
if self.field139 != None:
oprot.writeFieldBegin('field139', TType.I32, 139)
oprot.writeI32(self.field139)
oprot.writeFieldEnd()
if self.field140 != None:
oprot.writeFieldBegin('field140', TType.I32, 140)
oprot.writeI32(self.field140)
oprot.writeFieldEnd()
if self.field141 != None:
oprot.writeFieldBegin('field141', TType.I32, 141)
oprot.writeI32(self.field141)
oprot.writeFieldEnd()
if self.field142 != None:
oprot.writeFieldBegin('field142', TType.I32, 142)
oprot.writeI32(self.field142)
oprot.writeFieldEnd()
if self.field143 != None:
oprot.writeFieldBegin('field143', TType.I32, 143)
oprot.writeI32(self.field143)
oprot.writeFieldEnd()
if self.field144 != None:
oprot.writeFieldBegin('field144', TType.I32, 144)
oprot.writeI32(self.field144)
oprot.writeFieldEnd()
if self.field145 != None:
oprot.writeFieldBegin('field145', TType.I32, 145)
oprot.writeI32(self.field145)
oprot.writeFieldEnd()
if self.field146 != None:
oprot.writeFieldBegin('field146', TType.I32, 146)
oprot.writeI32(self.field146)
oprot.writeFieldEnd()
if self.field147 != None:
oprot.writeFieldBegin('field147', TType.I32, 147)
oprot.writeI32(self.field147)
oprot.writeFieldEnd()
if self.field148 != None:
oprot.writeFieldBegin('field148', TType.I32, 148)
oprot.writeI32(self.field148)
oprot.writeFieldEnd()
if self.field149 != None:
oprot.writeFieldBegin('field149', TType.I32, 149)
oprot.writeI32(self.field149)
oprot.writeFieldEnd()
if self.field150 != None:
oprot.writeFieldBegin('field150', TType.I32, 150)
oprot.writeI32(self.field150)
oprot.writeFieldEnd()
if self.field151 != None:
oprot.writeFieldBegin('field151', TType.I32, 151)
oprot.writeI32(self.field151)
oprot.writeFieldEnd()
if self.field152 != None:
oprot.writeFieldBegin('field152', TType.I32, 152)
oprot.writeI32(self.field152)
oprot.writeFieldEnd()
if self.field153 != None:
oprot.writeFieldBegin('field153', TType.I32, 153)
oprot.writeI32(self.field153)
oprot.writeFieldEnd()
if self.field154 != None:
oprot.writeFieldBegin('field154', TType.I32, 154)
oprot.writeI32(self.field154)
oprot.writeFieldEnd()
if self.field155 != None:
oprot.writeFieldBegin('field155', TType.I32, 155)
oprot.writeI32(self.field155)
oprot.writeFieldEnd()
if self.field156 != None:
oprot.writeFieldBegin('field156', TType.I32, 156)
oprot.writeI32(self.field156)
oprot.writeFieldEnd()
if self.field157 != None:
oprot.writeFieldBegin('field157', TType.I32, 157)
oprot.writeI32(self.field157)
oprot.writeFieldEnd()
if self.field158 != None:
oprot.writeFieldBegin('field158', TType.I32, 158)
oprot.writeI32(self.field158)
oprot.writeFieldEnd()
if self.field159 != None:
oprot.writeFieldBegin('field159', TType.I32, 159)
oprot.writeI32(self.field159)
oprot.writeFieldEnd()
if self.field160 != None:
oprot.writeFieldBegin('field160', TType.I32, 160)
oprot.writeI32(self.field160)
oprot.writeFieldEnd()
if self.field161 != None:
oprot.writeFieldBegin('field161', TType.I32, 161)
oprot.writeI32(self.field161)
oprot.writeFieldEnd()
if self.field162 != None:
oprot.writeFieldBegin('field162', TType.I32, 162)
oprot.writeI32(self.field162)
oprot.writeFieldEnd()
if self.field163 != None:
oprot.writeFieldBegin('field163', TType.I32, 163)
oprot.writeI32(self.field163)
oprot.writeFieldEnd()
if self.field164 != None:
oprot.writeFieldBegin('field164', TType.I32, 164)
oprot.writeI32(self.field164)
oprot.writeFieldEnd()
if self.field165 != None:
oprot.writeFieldBegin('field165', TType.I32, 165)
oprot.writeI32(self.field165)
oprot.writeFieldEnd()
if self.field166 != None:
oprot.writeFieldBegin('field166', TType.I32, 166)
oprot.writeI32(self.field166)
oprot.writeFieldEnd()
if self.field167 != None:
oprot.writeFieldBegin('field167', TType.I32, 167)
oprot.writeI32(self.field167)
oprot.writeFieldEnd()
if self.field168 != None:
oprot.writeFieldBegin('field168', TType.I32, 168)
oprot.writeI32(self.field168)
oprot.writeFieldEnd()
if self.field169 != None:
oprot.writeFieldBegin('field169', TType.I32, 169)
oprot.writeI32(self.field169)
oprot.writeFieldEnd()
if self.field170 != None:
oprot.writeFieldBegin('field170', TType.I32, 170)
oprot.writeI32(self.field170)
oprot.writeFieldEnd()
if self.field171 != None:
oprot.writeFieldBegin('field171', TType.I32, 171)
oprot.writeI32(self.field171)
oprot.writeFieldEnd()
if self.field172 != None:
oprot.writeFieldBegin('field172', TType.I32, 172)
oprot.writeI32(self.field172)
oprot.writeFieldEnd()
if self.field173 != None:
oprot.writeFieldBegin('field173', TType.I32, 173)
oprot.writeI32(self.field173)
oprot.writeFieldEnd()
if self.field174 != None:
oprot.writeFieldBegin('field174', TType.I32, 174)
oprot.writeI32(self.field174)
oprot.writeFieldEnd()
if self.field175 != None:
oprot.writeFieldBegin('field175', TType.I32, 175)
oprot.writeI32(self.field175)
oprot.writeFieldEnd()
if self.field176 != None:
oprot.writeFieldBegin('field176', TType.I32, 176)
oprot.writeI32(self.field176)
oprot.writeFieldEnd()
if self.field177 != None:
oprot.writeFieldBegin('field177', TType.I32, 177)
oprot.writeI32(self.field177)
oprot.writeFieldEnd()
if self.field178 != None:
oprot.writeFieldBegin('field178', TType.I32, 178)
oprot.writeI32(self.field178)
oprot.writeFieldEnd()
if self.field179 != None:
oprot.writeFieldBegin('field179', TType.I32, 179)
oprot.writeI32(self.field179)
oprot.writeFieldEnd()
if self.field180 != None:
oprot.writeFieldBegin('field180', TType.I32, 180)
oprot.writeI32(self.field180)
oprot.writeFieldEnd()
if self.field181 != None:
oprot.writeFieldBegin('field181', TType.I32, 181)
oprot.writeI32(self.field181)
oprot.writeFieldEnd()
if self.field182 != None:
oprot.writeFieldBegin('field182', TType.I32, 182)
oprot.writeI32(self.field182)
oprot.writeFieldEnd()
if self.field183 != None:
oprot.writeFieldBegin('field183', TType.I32, 183)
oprot.writeI32(self.field183)
oprot.writeFieldEnd()
if self.field184 != None:
oprot.writeFieldBegin('field184', TType.I32, 184)
oprot.writeI32(self.field184)
oprot.writeFieldEnd()
if self.field185 != None:
oprot.writeFieldBegin('field185', TType.I32, 185)
oprot.writeI32(self.field185)
oprot.writeFieldEnd()
if self.field186 != None:
oprot.writeFieldBegin('field186', TType.I32, 186)
oprot.writeI32(self.field186)
oprot.writeFieldEnd()
if self.field187 != None:
oprot.writeFieldBegin('field187', TType.I32, 187)
oprot.writeI32(self.field187)
oprot.writeFieldEnd()
if self.field188 != None:
oprot.writeFieldBegin('field188', TType.I32, 188)
oprot.writeI32(self.field188)
oprot.writeFieldEnd()
if self.field189 != None:
oprot.writeFieldBegin('field189', TType.I32, 189)
oprot.writeI32(self.field189)
oprot.writeFieldEnd()
if self.field190 != None:
oprot.writeFieldBegin('field190', TType.I32, 190)
oprot.writeI32(self.field190)
oprot.writeFieldEnd()
if self.field191 != None:
oprot.writeFieldBegin('field191', TType.I32, 191)
oprot.writeI32(self.field191)
oprot.writeFieldEnd()
if self.field192 != None:
oprot.writeFieldBegin('field192', TType.I32, 192)
oprot.writeI32(self.field192)
oprot.writeFieldEnd()
if self.field193 != None:
oprot.writeFieldBegin('field193', TType.I32, 193)
oprot.writeI32(self.field193)
oprot.writeFieldEnd()
if self.field194 != None:
oprot.writeFieldBegin('field194', TType.I32, 194)
oprot.writeI32(self.field194)
oprot.writeFieldEnd()
if self.field195 != None:
oprot.writeFieldBegin('field195', TType.I32, 195)
oprot.writeI32(self.field195)
oprot.writeFieldEnd()
if self.field196 != None:
oprot.writeFieldBegin('field196', TType.I32, 196)
oprot.writeI32(self.field196)
oprot.writeFieldEnd()
if self.field197 != None:
oprot.writeFieldBegin('field197', TType.I32, 197)
oprot.writeI32(self.field197)
oprot.writeFieldEnd()
if self.field198 != None:
oprot.writeFieldBegin('field198', TType.I32, 198)
oprot.writeI32(self.field198)
oprot.writeFieldEnd()
if self.field199 != None:
oprot.writeFieldBegin('field199', TType.I32, 199)
oprot.writeI32(self.field199)
oprot.writeFieldEnd()
if self.field200 != None:
oprot.writeFieldBegin('field200', TType.I32, 200)
oprot.writeI32(self.field200)
oprot.writeFieldEnd()
if self.field201 != None:
oprot.writeFieldBegin('field201', TType.I32, 201)
oprot.writeI32(self.field201)
oprot.writeFieldEnd()
if self.field202 != None:
oprot.writeFieldBegin('field202', TType.I32, 202)
oprot.writeI32(self.field202)
oprot.writeFieldEnd()
if self.field203 != None:
oprot.writeFieldBegin('field203', TType.I32, 203)
oprot.writeI32(self.field203)
oprot.writeFieldEnd()
if self.field204 != None:
oprot.writeFieldBegin('field204', TType.I32, 204)
oprot.writeI32(self.field204)
oprot.writeFieldEnd()
if self.field205 != None:
oprot.writeFieldBegin('field205', TType.I32, 205)
oprot.writeI32(self.field205)
oprot.writeFieldEnd()
if self.field206 != None:
oprot.writeFieldBegin('field206', TType.I32, 206)
oprot.writeI32(self.field206)
oprot.writeFieldEnd()
if self.field207 != None:
oprot.writeFieldBegin('field207', TType.I32, 207)
oprot.writeI32(self.field207)
oprot.writeFieldEnd()
if self.field208 != None:
oprot.writeFieldBegin('field208', TType.I32, 208)
oprot.writeI32(self.field208)
oprot.writeFieldEnd()
if self.field209 != None:
oprot.writeFieldBegin('field209', TType.I32, 209)
oprot.writeI32(self.field209)
oprot.writeFieldEnd()
if self.field210 != None:
oprot.writeFieldBegin('field210', TType.I32, 210)
oprot.writeI32(self.field210)
oprot.writeFieldEnd()
if self.field211 != None:
oprot.writeFieldBegin('field211', TType.I32, 211)
oprot.writeI32(self.field211)
oprot.writeFieldEnd()
if self.field212 != None:
oprot.writeFieldBegin('field212', TType.I32, 212)
oprot.writeI32(self.field212)
oprot.writeFieldEnd()
if self.field213 != None:
oprot.writeFieldBegin('field213', TType.I32, 213)
oprot.writeI32(self.field213)
oprot.writeFieldEnd()
if self.field214 != None:
oprot.writeFieldBegin('field214', TType.I32, 214)
oprot.writeI32(self.field214)
oprot.writeFieldEnd()
if self.field215 != None:
oprot.writeFieldBegin('field215', TType.I32, 215)
oprot.writeI32(self.field215)
oprot.writeFieldEnd()
if self.field216 != None:
oprot.writeFieldBegin('field216', TType.I32, 216)
oprot.writeI32(self.field216)
oprot.writeFieldEnd()
if self.field217 != None:
oprot.writeFieldBegin('field217', TType.I32, 217)
oprot.writeI32(self.field217)
oprot.writeFieldEnd()
if self.field218 != None:
oprot.writeFieldBegin('field218', TType.I32, 218)
oprot.writeI32(self.field218)
oprot.writeFieldEnd()
if self.field219 != None:
oprot.writeFieldBegin('field219', TType.I32, 219)
oprot.writeI32(self.field219)
oprot.writeFieldEnd()
if self.field220 != None:
oprot.writeFieldBegin('field220', TType.I32, 220)
oprot.writeI32(self.field220)
oprot.writeFieldEnd()
if self.field221 != None:
oprot.writeFieldBegin('field221', TType.I32, 221)
oprot.writeI32(self.field221)
oprot.writeFieldEnd()
if self.field222 != None:
oprot.writeFieldBegin('field222', TType.I32, 222)
oprot.writeI32(self.field222)
oprot.writeFieldEnd()
if self.field223 != None:
oprot.writeFieldBegin('field223', TType.I32, 223)
oprot.writeI32(self.field223)
oprot.writeFieldEnd()
if self.field224 != None:
oprot.writeFieldBegin('field224', TType.I32, 224)
oprot.writeI32(self.field224)
oprot.writeFieldEnd()
if self.field225 != None:
oprot.writeFieldBegin('field225', TType.I32, 225)
oprot.writeI32(self.field225)
oprot.writeFieldEnd()
if self.field226 != None:
oprot.writeFieldBegin('field226', TType.I32, 226)
oprot.writeI32(self.field226)
oprot.writeFieldEnd()
if self.field227 != None:
oprot.writeFieldBegin('field227', TType.I32, 227)
oprot.writeI32(self.field227)
oprot.writeFieldEnd()
if self.field228 != None:
oprot.writeFieldBegin('field228', TType.I32, 228)
oprot.writeI32(self.field228)
oprot.writeFieldEnd()
if self.field229 != None:
oprot.writeFieldBegin('field229', TType.I32, 229)
oprot.writeI32(self.field229)
oprot.writeFieldEnd()
if self.field230 != None:
oprot.writeFieldBegin('field230', TType.I32, 230)
oprot.writeI32(self.field230)
oprot.writeFieldEnd()
if self.field231 != None:
oprot.writeFieldBegin('field231', TType.I32, 231)
oprot.writeI32(self.field231)
oprot.writeFieldEnd()
if self.field232 != None:
oprot.writeFieldBegin('field232', TType.I32, 232)
oprot.writeI32(self.field232)
oprot.writeFieldEnd()
if self.field233 != None:
oprot.writeFieldBegin('field233', TType.I32, 233)
oprot.writeI32(self.field233)
oprot.writeFieldEnd()
if self.field234 != None:
oprot.writeFieldBegin('field234', TType.I32, 234)
oprot.writeI32(self.field234)
oprot.writeFieldEnd()
if self.field235 != None:
oprot.writeFieldBegin('field235', TType.I32, 235)
oprot.writeI32(self.field235)
oprot.writeFieldEnd()
if self.field236 != None:
oprot.writeFieldBegin('field236', TType.I32, 236)
oprot.writeI32(self.field236)
oprot.writeFieldEnd()
if self.field237 != None:
oprot.writeFieldBegin('field237', TType.I32, 237)
oprot.writeI32(self.field237)
oprot.writeFieldEnd()
if self.field238 != None:
oprot.writeFieldBegin('field238', TType.I32, 238)
oprot.writeI32(self.field238)
oprot.writeFieldEnd()
if self.field239 != None:
oprot.writeFieldBegin('field239', TType.I32, 239)
oprot.writeI32(self.field239)
oprot.writeFieldEnd()
if self.field240 != None:
oprot.writeFieldBegin('field240', TType.I32, 240)
oprot.writeI32(self.field240)
oprot.writeFieldEnd()
if self.field241 != None:
oprot.writeFieldBegin('field241', TType.I32, 241)
oprot.writeI32(self.field241)
oprot.writeFieldEnd()
if self.field242 != None:
oprot.writeFieldBegin('field242', TType.I32, 242)
oprot.writeI32(self.field242)
oprot.writeFieldEnd()
if self.field243 != None:
oprot.writeFieldBegin('field243', TType.I32, 243)
oprot.writeI32(self.field243)
oprot.writeFieldEnd()
if self.field244 != None:
oprot.writeFieldBegin('field244', TType.I32, 244)
oprot.writeI32(self.field244)
oprot.writeFieldEnd()
if self.field245 != None:
oprot.writeFieldBegin('field245', TType.I32, 245)
oprot.writeI32(self.field245)
oprot.writeFieldEnd()
if self.field246 != None:
oprot.writeFieldBegin('field246', TType.I32, 246)
oprot.writeI32(self.field246)
oprot.writeFieldEnd()
if self.field247 != None:
oprot.writeFieldBegin('field247', TType.I32, 247)
oprot.writeI32(self.field247)
oprot.writeFieldEnd()
if self.field248 != None:
oprot.writeFieldBegin('field248', TType.I32, 248)
oprot.writeI32(self.field248)
oprot.writeFieldEnd()
if self.field249 != None:
oprot.writeFieldBegin('field249', TType.I32, 249)
oprot.writeI32(self.field249)
oprot.writeFieldEnd()
if self.field250 != None:
oprot.writeFieldBegin('field250', TType.I32, 250)
oprot.writeI32(self.field250)
oprot.writeFieldEnd()
if self.field251 != None:
oprot.writeFieldBegin('field251', TType.I32, 251)
oprot.writeI32(self.field251)
oprot.writeFieldEnd()
if self.field252 != None:
oprot.writeFieldBegin('field252', TType.I32, 252)
oprot.writeI32(self.field252)
oprot.writeFieldEnd()
if self.field253 != None:
oprot.writeFieldBegin('field253', TType.I32, 253)
oprot.writeI32(self.field253)
oprot.writeFieldEnd()
if self.field254 != None:
oprot.writeFieldBegin('field254', TType.I32, 254)
oprot.writeI32(self.field254)
oprot.writeFieldEnd()
if self.field255 != None:
oprot.writeFieldBegin('field255', TType.I32, 255)
oprot.writeI32(self.field255)
oprot.writeFieldEnd()
if self.field256 != None:
oprot.writeFieldBegin('field256', TType.I32, 256)
oprot.writeI32(self.field256)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
if self.field1 is not None:
value = pprint.pformat(self.field1, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field1=%s' % (value))
if self.field2 is not None:
value = pprint.pformat(self.field2, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field2=%s' % (value))
if self.field3 is not None:
value = pprint.pformat(self.field3, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field3=%s' % (value))
if self.field4 is not None:
value = pprint.pformat(self.field4, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field4=%s' % (value))
if self.field5 is not None:
value = pprint.pformat(self.field5, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field5=%s' % (value))
if self.field6 is not None:
value = pprint.pformat(self.field6, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field6=%s' % (value))
if self.field7 is not None:
value = pprint.pformat(self.field7, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field7=%s' % (value))
if self.field8 is not None:
value = pprint.pformat(self.field8, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field8=%s' % (value))
if self.field9 is not None:
value = pprint.pformat(self.field9, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field9=%s' % (value))
if self.field10 is not None:
value = pprint.pformat(self.field10, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field10=%s' % (value))
if self.field11 is not None:
value = pprint.pformat(self.field11, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field11=%s' % (value))
if self.field12 is not None:
value = pprint.pformat(self.field12, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field12=%s' % (value))
if self.field13 is not None:
value = pprint.pformat(self.field13, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field13=%s' % (value))
if self.field14 is not None:
value = pprint.pformat(self.field14, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field14=%s' % (value))
if self.field15 is not None:
value = pprint.pformat(self.field15, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field15=%s' % (value))
if self.field16 is not None:
value = pprint.pformat(self.field16, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field16=%s' % (value))
if self.field17 is not None:
value = pprint.pformat(self.field17, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field17=%s' % (value))
if self.field18 is not None:
value = pprint.pformat(self.field18, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field18=%s' % (value))
if self.field19 is not None:
value = pprint.pformat(self.field19, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field19=%s' % (value))
if self.field20 is not None:
value = pprint.pformat(self.field20, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field20=%s' % (value))
if self.field21 is not None:
value = pprint.pformat(self.field21, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field21=%s' % (value))
if self.field22 is not None:
value = pprint.pformat(self.field22, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field22=%s' % (value))
if self.field23 is not None:
value = pprint.pformat(self.field23, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field23=%s' % (value))
if self.field24 is not None:
value = pprint.pformat(self.field24, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field24=%s' % (value))
if self.field25 is not None:
value = pprint.pformat(self.field25, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field25=%s' % (value))
if self.field26 is not None:
value = pprint.pformat(self.field26, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field26=%s' % (value))
if self.field27 is not None:
value = pprint.pformat(self.field27, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field27=%s' % (value))
if self.field28 is not None:
value = pprint.pformat(self.field28, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field28=%s' % (value))
if self.field29 is not None:
value = pprint.pformat(self.field29, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field29=%s' % (value))
if self.field30 is not None:
value = pprint.pformat(self.field30, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field30=%s' % (value))
if self.field31 is not None:
value = pprint.pformat(self.field31, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field31=%s' % (value))
if self.field32 is not None:
value = pprint.pformat(self.field32, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field32=%s' % (value))
if self.field33 is not None:
value = pprint.pformat(self.field33, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field33=%s' % (value))
if self.field34 is not None:
value = pprint.pformat(self.field34, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field34=%s' % (value))
if self.field35 is not None:
value = pprint.pformat(self.field35, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field35=%s' % (value))
if self.field36 is not None:
value = pprint.pformat(self.field36, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field36=%s' % (value))
if self.field37 is not None:
value = pprint.pformat(self.field37, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field37=%s' % (value))
if self.field38 is not None:
value = pprint.pformat(self.field38, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field38=%s' % (value))
if self.field39 is not None:
value = pprint.pformat(self.field39, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field39=%s' % (value))
if self.field40 is not None:
value = pprint.pformat(self.field40, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field40=%s' % (value))
if self.field41 is not None:
value = pprint.pformat(self.field41, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field41=%s' % (value))
if self.field42 is not None:
value = pprint.pformat(self.field42, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field42=%s' % (value))
if self.field43 is not None:
value = pprint.pformat(self.field43, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field43=%s' % (value))
if self.field44 is not None:
value = pprint.pformat(self.field44, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field44=%s' % (value))
if self.field45 is not None:
value = pprint.pformat(self.field45, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field45=%s' % (value))
if self.field46 is not None:
value = pprint.pformat(self.field46, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field46=%s' % (value))
if self.field47 is not None:
value = pprint.pformat(self.field47, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field47=%s' % (value))
if self.field48 is not None:
value = pprint.pformat(self.field48, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field48=%s' % (value))
if self.field49 is not None:
value = pprint.pformat(self.field49, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field49=%s' % (value))
if self.field50 is not None:
value = pprint.pformat(self.field50, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field50=%s' % (value))
if self.field51 is not None:
value = pprint.pformat(self.field51, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field51=%s' % (value))
if self.field52 is not None:
value = pprint.pformat(self.field52, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field52=%s' % (value))
if self.field53 is not None:
value = pprint.pformat(self.field53, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field53=%s' % (value))
if self.field54 is not None:
value = pprint.pformat(self.field54, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field54=%s' % (value))
if self.field55 is not None:
value = pprint.pformat(self.field55, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field55=%s' % (value))
if self.field56 is not None:
value = pprint.pformat(self.field56, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field56=%s' % (value))
if self.field57 is not None:
value = pprint.pformat(self.field57, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field57=%s' % (value))
if self.field58 is not None:
value = pprint.pformat(self.field58, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field58=%s' % (value))
if self.field59 is not None:
value = pprint.pformat(self.field59, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field59=%s' % (value))
if self.field60 is not None:
value = pprint.pformat(self.field60, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field60=%s' % (value))
if self.field61 is not None:
value = pprint.pformat(self.field61, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field61=%s' % (value))
if self.field62 is not None:
value = pprint.pformat(self.field62, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field62=%s' % (value))
if self.field63 is not None:
value = pprint.pformat(self.field63, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field63=%s' % (value))
if self.field64 is not None:
value = pprint.pformat(self.field64, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field64=%s' % (value))
if self.field65 is not None:
value = pprint.pformat(self.field65, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field65=%s' % (value))
if self.field66 is not None:
value = pprint.pformat(self.field66, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field66=%s' % (value))
if self.field67 is not None:
value = pprint.pformat(self.field67, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field67=%s' % (value))
if self.field68 is not None:
value = pprint.pformat(self.field68, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field68=%s' % (value))
if self.field69 is not None:
value = pprint.pformat(self.field69, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field69=%s' % (value))
if self.field70 is not None:
value = pprint.pformat(self.field70, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field70=%s' % (value))
if self.field71 is not None:
value = pprint.pformat(self.field71, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field71=%s' % (value))
if self.field72 is not None:
value = pprint.pformat(self.field72, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field72=%s' % (value))
if self.field73 is not None:
value = pprint.pformat(self.field73, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field73=%s' % (value))
if self.field74 is not None:
value = pprint.pformat(self.field74, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field74=%s' % (value))
if self.field75 is not None:
value = pprint.pformat(self.field75, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field75=%s' % (value))
if self.field76 is not None:
value = pprint.pformat(self.field76, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field76=%s' % (value))
if self.field77 is not None:
value = pprint.pformat(self.field77, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field77=%s' % (value))
if self.field78 is not None:
value = pprint.pformat(self.field78, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field78=%s' % (value))
if self.field79 is not None:
value = pprint.pformat(self.field79, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field79=%s' % (value))
if self.field80 is not None:
value = pprint.pformat(self.field80, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field80=%s' % (value))
if self.field81 is not None:
value = pprint.pformat(self.field81, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field81=%s' % (value))
if self.field82 is not None:
value = pprint.pformat(self.field82, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field82=%s' % (value))
if self.field83 is not None:
value = pprint.pformat(self.field83, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field83=%s' % (value))
if self.field84 is not None:
value = pprint.pformat(self.field84, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field84=%s' % (value))
if self.field85 is not None:
value = pprint.pformat(self.field85, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field85=%s' % (value))
if self.field86 is not None:
value = pprint.pformat(self.field86, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field86=%s' % (value))
if self.field87 is not None:
value = pprint.pformat(self.field87, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field87=%s' % (value))
if self.field88 is not None:
value = pprint.pformat(self.field88, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field88=%s' % (value))
if self.field89 is not None:
value = pprint.pformat(self.field89, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field89=%s' % (value))
if self.field90 is not None:
value = pprint.pformat(self.field90, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field90=%s' % (value))
if self.field91 is not None:
value = pprint.pformat(self.field91, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field91=%s' % (value))
if self.field92 is not None:
value = pprint.pformat(self.field92, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field92=%s' % (value))
if self.field93 is not None:
value = pprint.pformat(self.field93, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field93=%s' % (value))
if self.field94 is not None:
value = pprint.pformat(self.field94, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field94=%s' % (value))
if self.field95 is not None:
value = pprint.pformat(self.field95, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field95=%s' % (value))
if self.field96 is not None:
value = pprint.pformat(self.field96, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field96=%s' % (value))
if self.field97 is not None:
value = pprint.pformat(self.field97, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field97=%s' % (value))
if self.field98 is not None:
value = pprint.pformat(self.field98, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field98=%s' % (value))
if self.field99 is not None:
value = pprint.pformat(self.field99, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field99=%s' % (value))
if self.field100 is not None:
value = pprint.pformat(self.field100, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field100=%s' % (value))
if self.field101 is not None:
value = pprint.pformat(self.field101, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field101=%s' % (value))
if self.field102 is not None:
value = pprint.pformat(self.field102, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field102=%s' % (value))
if self.field103 is not None:
value = pprint.pformat(self.field103, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field103=%s' % (value))
if self.field104 is not None:
value = pprint.pformat(self.field104, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field104=%s' % (value))
if self.field105 is not None:
value = pprint.pformat(self.field105, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field105=%s' % (value))
if self.field106 is not None:
value = pprint.pformat(self.field106, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field106=%s' % (value))
if self.field107 is not None:
value = pprint.pformat(self.field107, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field107=%s' % (value))
if self.field108 is not None:
value = pprint.pformat(self.field108, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field108=%s' % (value))
if self.field109 is not None:
value = pprint.pformat(self.field109, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field109=%s' % (value))
if self.field110 is not None:
value = pprint.pformat(self.field110, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field110=%s' % (value))
if self.field111 is not None:
value = pprint.pformat(self.field111, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field111=%s' % (value))
if self.field112 is not None:
value = pprint.pformat(self.field112, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field112=%s' % (value))
if self.field113 is not None:
value = pprint.pformat(self.field113, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field113=%s' % (value))
if self.field114 is not None:
value = pprint.pformat(self.field114, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field114=%s' % (value))
if self.field115 is not None:
value = pprint.pformat(self.field115, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field115=%s' % (value))
if self.field116 is not None:
value = pprint.pformat(self.field116, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field116=%s' % (value))
if self.field117 is not None:
value = pprint.pformat(self.field117, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field117=%s' % (value))
if self.field118 is not None:
value = pprint.pformat(self.field118, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field118=%s' % (value))
if self.field119 is not None:
value = pprint.pformat(self.field119, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field119=%s' % (value))
if self.field120 is not None:
value = pprint.pformat(self.field120, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field120=%s' % (value))
if self.field121 is not None:
value = pprint.pformat(self.field121, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field121=%s' % (value))
if self.field122 is not None:
value = pprint.pformat(self.field122, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field122=%s' % (value))
if self.field123 is not None:
value = pprint.pformat(self.field123, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field123=%s' % (value))
if self.field124 is not None:
value = pprint.pformat(self.field124, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field124=%s' % (value))
if self.field125 is not None:
value = pprint.pformat(self.field125, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field125=%s' % (value))
if self.field126 is not None:
value = pprint.pformat(self.field126, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field126=%s' % (value))
if self.field127 is not None:
value = pprint.pformat(self.field127, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field127=%s' % (value))
if self.field128 is not None:
value = pprint.pformat(self.field128, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field128=%s' % (value))
if self.field129 is not None:
value = pprint.pformat(self.field129, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field129=%s' % (value))
if self.field130 is not None:
value = pprint.pformat(self.field130, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field130=%s' % (value))
if self.field131 is not None:
value = pprint.pformat(self.field131, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field131=%s' % (value))
if self.field132 is not None:
value = pprint.pformat(self.field132, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field132=%s' % (value))
if self.field133 is not None:
value = pprint.pformat(self.field133, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field133=%s' % (value))
if self.field134 is not None:
value = pprint.pformat(self.field134, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field134=%s' % (value))
if self.field135 is not None:
value = pprint.pformat(self.field135, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field135=%s' % (value))
if self.field136 is not None:
value = pprint.pformat(self.field136, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field136=%s' % (value))
if self.field137 is not None:
value = pprint.pformat(self.field137, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field137=%s' % (value))
if self.field138 is not None:
value = pprint.pformat(self.field138, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field138=%s' % (value))
if self.field139 is not None:
value = pprint.pformat(self.field139, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field139=%s' % (value))
if self.field140 is not None:
value = pprint.pformat(self.field140, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field140=%s' % (value))
if self.field141 is not None:
value = pprint.pformat(self.field141, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field141=%s' % (value))
if self.field142 is not None:
value = pprint.pformat(self.field142, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field142=%s' % (value))
if self.field143 is not None:
value = pprint.pformat(self.field143, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field143=%s' % (value))
if self.field144 is not None:
value = pprint.pformat(self.field144, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field144=%s' % (value))
if self.field145 is not None:
value = pprint.pformat(self.field145, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field145=%s' % (value))
if self.field146 is not None:
value = pprint.pformat(self.field146, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field146=%s' % (value))
if self.field147 is not None:
value = pprint.pformat(self.field147, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field147=%s' % (value))
if self.field148 is not None:
value = pprint.pformat(self.field148, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field148=%s' % (value))
if self.field149 is not None:
value = pprint.pformat(self.field149, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field149=%s' % (value))
if self.field150 is not None:
value = pprint.pformat(self.field150, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field150=%s' % (value))
if self.field151 is not None:
value = pprint.pformat(self.field151, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field151=%s' % (value))
if self.field152 is not None:
value = pprint.pformat(self.field152, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field152=%s' % (value))
if self.field153 is not None:
value = pprint.pformat(self.field153, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field153=%s' % (value))
if self.field154 is not None:
value = pprint.pformat(self.field154, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field154=%s' % (value))
if self.field155 is not None:
value = pprint.pformat(self.field155, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field155=%s' % (value))
if self.field156 is not None:
value = pprint.pformat(self.field156, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field156=%s' % (value))
if self.field157 is not None:
value = pprint.pformat(self.field157, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field157=%s' % (value))
if self.field158 is not None:
value = pprint.pformat(self.field158, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field158=%s' % (value))
if self.field159 is not None:
value = pprint.pformat(self.field159, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field159=%s' % (value))
if self.field160 is not None:
value = pprint.pformat(self.field160, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field160=%s' % (value))
if self.field161 is not None:
value = pprint.pformat(self.field161, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field161=%s' % (value))
if self.field162 is not None:
value = pprint.pformat(self.field162, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field162=%s' % (value))
if self.field163 is not None:
value = pprint.pformat(self.field163, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field163=%s' % (value))
if self.field164 is not None:
value = pprint.pformat(self.field164, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field164=%s' % (value))
if self.field165 is not None:
value = pprint.pformat(self.field165, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field165=%s' % (value))
if self.field166 is not None:
value = pprint.pformat(self.field166, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field166=%s' % (value))
if self.field167 is not None:
value = pprint.pformat(self.field167, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field167=%s' % (value))
if self.field168 is not None:
value = pprint.pformat(self.field168, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field168=%s' % (value))
if self.field169 is not None:
value = pprint.pformat(self.field169, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field169=%s' % (value))
if self.field170 is not None:
value = pprint.pformat(self.field170, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field170=%s' % (value))
if self.field171 is not None:
value = pprint.pformat(self.field171, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field171=%s' % (value))
if self.field172 is not None:
value = pprint.pformat(self.field172, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field172=%s' % (value))
if self.field173 is not None:
value = pprint.pformat(self.field173, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field173=%s' % (value))
if self.field174 is not None:
value = pprint.pformat(self.field174, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field174=%s' % (value))
if self.field175 is not None:
value = pprint.pformat(self.field175, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field175=%s' % (value))
if self.field176 is not None:
value = pprint.pformat(self.field176, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field176=%s' % (value))
if self.field177 is not None:
value = pprint.pformat(self.field177, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field177=%s' % (value))
if self.field178 is not None:
value = pprint.pformat(self.field178, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field178=%s' % (value))
if self.field179 is not None:
value = pprint.pformat(self.field179, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field179=%s' % (value))
if self.field180 is not None:
value = pprint.pformat(self.field180, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field180=%s' % (value))
if self.field181 is not None:
value = pprint.pformat(self.field181, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field181=%s' % (value))
if self.field182 is not None:
value = pprint.pformat(self.field182, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field182=%s' % (value))
if self.field183 is not None:
value = pprint.pformat(self.field183, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field183=%s' % (value))
if self.field184 is not None:
value = pprint.pformat(self.field184, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field184=%s' % (value))
if self.field185 is not None:
value = pprint.pformat(self.field185, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field185=%s' % (value))
if self.field186 is not None:
value = pprint.pformat(self.field186, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field186=%s' % (value))
if self.field187 is not None:
value = pprint.pformat(self.field187, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field187=%s' % (value))
if self.field188 is not None:
value = pprint.pformat(self.field188, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field188=%s' % (value))
if self.field189 is not None:
value = pprint.pformat(self.field189, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field189=%s' % (value))
if self.field190 is not None:
value = pprint.pformat(self.field190, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field190=%s' % (value))
if self.field191 is not None:
value = pprint.pformat(self.field191, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field191=%s' % (value))
if self.field192 is not None:
value = pprint.pformat(self.field192, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field192=%s' % (value))
if self.field193 is not None:
value = pprint.pformat(self.field193, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field193=%s' % (value))
if self.field194 is not None:
value = pprint.pformat(self.field194, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field194=%s' % (value))
if self.field195 is not None:
value = pprint.pformat(self.field195, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field195=%s' % (value))
if self.field196 is not None:
value = pprint.pformat(self.field196, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field196=%s' % (value))
if self.field197 is not None:
value = pprint.pformat(self.field197, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field197=%s' % (value))
if self.field198 is not None:
value = pprint.pformat(self.field198, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field198=%s' % (value))
if self.field199 is not None:
value = pprint.pformat(self.field199, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field199=%s' % (value))
if self.field200 is not None:
value = pprint.pformat(self.field200, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field200=%s' % (value))
if self.field201 is not None:
value = pprint.pformat(self.field201, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field201=%s' % (value))
if self.field202 is not None:
value = pprint.pformat(self.field202, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field202=%s' % (value))
if self.field203 is not None:
value = pprint.pformat(self.field203, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field203=%s' % (value))
if self.field204 is not None:
value = pprint.pformat(self.field204, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field204=%s' % (value))
if self.field205 is not None:
value = pprint.pformat(self.field205, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field205=%s' % (value))
if self.field206 is not None:
value = pprint.pformat(self.field206, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field206=%s' % (value))
if self.field207 is not None:
value = pprint.pformat(self.field207, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field207=%s' % (value))
if self.field208 is not None:
value = pprint.pformat(self.field208, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field208=%s' % (value))
if self.field209 is not None:
value = pprint.pformat(self.field209, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field209=%s' % (value))
if self.field210 is not None:
value = pprint.pformat(self.field210, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field210=%s' % (value))
if self.field211 is not None:
value = pprint.pformat(self.field211, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field211=%s' % (value))
if self.field212 is not None:
value = pprint.pformat(self.field212, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field212=%s' % (value))
if self.field213 is not None:
value = pprint.pformat(self.field213, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field213=%s' % (value))
if self.field214 is not None:
value = pprint.pformat(self.field214, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field214=%s' % (value))
if self.field215 is not None:
value = pprint.pformat(self.field215, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field215=%s' % (value))
if self.field216 is not None:
value = pprint.pformat(self.field216, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field216=%s' % (value))
if self.field217 is not None:
value = pprint.pformat(self.field217, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field217=%s' % (value))
if self.field218 is not None:
value = pprint.pformat(self.field218, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field218=%s' % (value))
if self.field219 is not None:
value = pprint.pformat(self.field219, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field219=%s' % (value))
if self.field220 is not None:
value = pprint.pformat(self.field220, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field220=%s' % (value))
if self.field221 is not None:
value = pprint.pformat(self.field221, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field221=%s' % (value))
if self.field222 is not None:
value = pprint.pformat(self.field222, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field222=%s' % (value))
if self.field223 is not None:
value = pprint.pformat(self.field223, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field223=%s' % (value))
if self.field224 is not None:
value = pprint.pformat(self.field224, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field224=%s' % (value))
if self.field225 is not None:
value = pprint.pformat(self.field225, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field225=%s' % (value))
if self.field226 is not None:
value = pprint.pformat(self.field226, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field226=%s' % (value))
if self.field227 is not None:
value = pprint.pformat(self.field227, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field227=%s' % (value))
if self.field228 is not None:
value = pprint.pformat(self.field228, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field228=%s' % (value))
if self.field229 is not None:
value = pprint.pformat(self.field229, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field229=%s' % (value))
if self.field230 is not None:
value = pprint.pformat(self.field230, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field230=%s' % (value))
if self.field231 is not None:
value = pprint.pformat(self.field231, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field231=%s' % (value))
if self.field232 is not None:
value = pprint.pformat(self.field232, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field232=%s' % (value))
if self.field233 is not None:
value = pprint.pformat(self.field233, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field233=%s' % (value))
if self.field234 is not None:
value = pprint.pformat(self.field234, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field234=%s' % (value))
if self.field235 is not None:
value = pprint.pformat(self.field235, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field235=%s' % (value))
if self.field236 is not None:
value = pprint.pformat(self.field236, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field236=%s' % (value))
if self.field237 is not None:
value = pprint.pformat(self.field237, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field237=%s' % (value))
if self.field238 is not None:
value = pprint.pformat(self.field238, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field238=%s' % (value))
if self.field239 is not None:
value = pprint.pformat(self.field239, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field239=%s' % (value))
if self.field240 is not None:
value = pprint.pformat(self.field240, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field240=%s' % (value))
if self.field241 is not None:
value = pprint.pformat(self.field241, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field241=%s' % (value))
if self.field242 is not None:
value = pprint.pformat(self.field242, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field242=%s' % (value))
if self.field243 is not None:
value = pprint.pformat(self.field243, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field243=%s' % (value))
if self.field244 is not None:
value = pprint.pformat(self.field244, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field244=%s' % (value))
if self.field245 is not None:
value = pprint.pformat(self.field245, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field245=%s' % (value))
if self.field246 is not None:
value = pprint.pformat(self.field246, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field246=%s' % (value))
if self.field247 is not None:
value = pprint.pformat(self.field247, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field247=%s' % (value))
if self.field248 is not None:
value = pprint.pformat(self.field248, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field248=%s' % (value))
if self.field249 is not None:
value = pprint.pformat(self.field249, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field249=%s' % (value))
if self.field250 is not None:
value = pprint.pformat(self.field250, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field250=%s' % (value))
if self.field251 is not None:
value = pprint.pformat(self.field251, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field251=%s' % (value))
if self.field252 is not None:
value = pprint.pformat(self.field252, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field252=%s' % (value))
if self.field253 is not None:
value = pprint.pformat(self.field253, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field253=%s' % (value))
if self.field254 is not None:
value = pprint.pformat(self.field254, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field254=%s' % (value))
if self.field255 is not None:
value = pprint.pformat(self.field255, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field255=%s' % (value))
if self.field256 is not None:
value = pprint.pformat(self.field256, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field256=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
def __dir__(self):
return (
'field1',
'field2',
'field3',
'field4',
'field5',
'field6',
'field7',
'field8',
'field9',
'field10',
'field11',
'field12',
'field13',
'field14',
'field15',
'field16',
'field17',
'field18',
'field19',
'field20',
'field21',
'field22',
'field23',
'field24',
'field25',
'field26',
'field27',
'field28',
'field29',
'field30',
'field31',
'field32',
'field33',
'field34',
'field35',
'field36',
'field37',
'field38',
'field39',
'field40',
'field41',
'field42',
'field43',
'field44',
'field45',
'field46',
'field47',
'field48',
'field49',
'field50',
'field51',
'field52',
'field53',
'field54',
'field55',
'field56',
'field57',
'field58',
'field59',
'field60',
'field61',
'field62',
'field63',
'field64',
'field65',
'field66',
'field67',
'field68',
'field69',
'field70',
'field71',
'field72',
'field73',
'field74',
'field75',
'field76',
'field77',
'field78',
'field79',
'field80',
'field81',
'field82',
'field83',
'field84',
'field85',
'field86',
'field87',
'field88',
'field89',
'field90',
'field91',
'field92',
'field93',
'field94',
'field95',
'field96',
'field97',
'field98',
'field99',
'field100',
'field101',
'field102',
'field103',
'field104',
'field105',
'field106',
'field107',
'field108',
'field109',
'field110',
'field111',
'field112',
'field113',
'field114',
'field115',
'field116',
'field117',
'field118',
'field119',
'field120',
'field121',
'field122',
'field123',
'field124',
'field125',
'field126',
'field127',
'field128',
'field129',
'field130',
'field131',
'field132',
'field133',
'field134',
'field135',
'field136',
'field137',
'field138',
'field139',
'field140',
'field141',
'field142',
'field143',
'field144',
'field145',
'field146',
'field147',
'field148',
'field149',
'field150',
'field151',
'field152',
'field153',
'field154',
'field155',
'field156',
'field157',
'field158',
'field159',
'field160',
'field161',
'field162',
'field163',
'field164',
'field165',
'field166',
'field167',
'field168',
'field169',
'field170',
'field171',
'field172',
'field173',
'field174',
'field175',
'field176',
'field177',
'field178',
'field179',
'field180',
'field181',
'field182',
'field183',
'field184',
'field185',
'field186',
'field187',
'field188',
'field189',
'field190',
'field191',
'field192',
'field193',
'field194',
'field195',
'field196',
'field197',
'field198',
'field199',
'field200',
'field201',
'field202',
'field203',
'field204',
'field205',
'field206',
'field207',
'field208',
'field209',
'field210',
'field211',
'field212',
'field213',
'field214',
'field215',
'field216',
'field217',
'field218',
'field219',
'field220',
'field221',
'field222',
'field223',
'field224',
'field225',
'field226',
'field227',
'field228',
'field229',
'field230',
'field231',
'field232',
'field233',
'field234',
'field235',
'field236',
'field237',
'field238',
'field239',
'field240',
'field241',
'field242',
'field243',
'field244',
'field245',
'field246',
'field247',
'field248',
'field249',
'field250',
'field251',
'field252',
'field253',
'field254',
'field255',
'field256',
)
__hash__ = object.__hash__
def _to_python(self):
import importlib
import thrift.python.converter
python_types = importlib.import_module("module.thrift_types")
return thrift.python.converter.to_python_struct(python_types.Foo, self)
def _to_py3(self):
import importlib
import thrift.py3.converter
py3_types = importlib.import_module("module.types")
return thrift.py3.converter.to_py3_struct(py3_types.Foo, self)
def _to_py_deprecated(self):
return self
all_structs.append(Foo)
Foo.thrift_spec = (
None, # 0
(1, TType.I32, 'field1', None, None, 2, ), # 1
(2, TType.I32, 'field2', None, None, 2, ), # 2
(3, TType.I32, 'field3', None, None, 2, ), # 3
(4, TType.I32, 'field4', None, None, 2, ), # 4
(5, TType.I32, 'field5', None, None, 2, ), # 5
(6, TType.I32, 'field6', None, None, 2, ), # 6
(7, TType.I32, 'field7', None, None, 2, ), # 7
(8, TType.I32, 'field8', None, None, 2, ), # 8
(9, TType.I32, 'field9', None, None, 2, ), # 9
(10, TType.I32, 'field10', None, None, 2, ), # 10
(11, TType.I32, 'field11', None, None, 2, ), # 11
(12, TType.I32, 'field12', None, None, 2, ), # 12
(13, TType.I32, 'field13', None, None, 2, ), # 13
(14, TType.I32, 'field14', None, None, 2, ), # 14
(15, TType.I32, 'field15', None, None, 2, ), # 15
(16, TType.I32, 'field16', None, None, 2, ), # 16
(17, TType.I32, 'field17', None, None, 2, ), # 17
(18, TType.I32, 'field18', None, None, 2, ), # 18
(19, TType.I32, 'field19', None, None, 2, ), # 19
(20, TType.I32, 'field20', None, None, 2, ), # 20
(21, TType.I32, 'field21', None, None, 2, ), # 21
(22, TType.I32, 'field22', None, None, 2, ), # 22
(23, TType.I32, 'field23', None, None, 2, ), # 23
(24, TType.I32, 'field24', None, None, 2, ), # 24
(25, TType.I32, 'field25', None, None, 2, ), # 25
(26, TType.I32, 'field26', None, None, 2, ), # 26
(27, TType.I32, 'field27', None, None, 2, ), # 27
(28, TType.I32, 'field28', None, None, 2, ), # 28
(29, TType.I32, 'field29', None, None, 2, ), # 29
(30, TType.I32, 'field30', None, None, 2, ), # 30
(31, TType.I32, 'field31', None, None, 2, ), # 31
(32, TType.I32, 'field32', None, None, 2, ), # 32
(33, TType.I32, 'field33', None, None, 2, ), # 33
(34, TType.I32, 'field34', None, None, 2, ), # 34
(35, TType.I32, 'field35', None, None, 2, ), # 35
(36, TType.I32, 'field36', None, None, 2, ), # 36
(37, TType.I32, 'field37', None, None, 2, ), # 37
(38, TType.I32, 'field38', None, None, 2, ), # 38
(39, TType.I32, 'field39', None, None, 2, ), # 39
(40, TType.I32, 'field40', None, None, 2, ), # 40
(41, TType.I32, 'field41', None, None, 2, ), # 41
(42, TType.I32, 'field42', None, 1024, 2, ), # 42
(43, TType.I32, 'field43', None, None, 2, ), # 43
(44, TType.I32, 'field44', None, None, 2, ), # 44
(45, TType.I32, 'field45', None, None, 2, ), # 45
(46, TType.I32, 'field46', None, None, 2, ), # 46
(47, TType.I32, 'field47', None, None, 2, ), # 47
(48, TType.I32, 'field48', None, None, 2, ), # 48
(49, TType.I32, 'field49', None, None, 2, ), # 49
(50, TType.I32, 'field50', None, None, 2, ), # 50
(51, TType.I32, 'field51', None, None, 2, ), # 51
(52, TType.I32, 'field52', None, None, 2, ), # 52
(53, TType.I32, 'field53', None, None, 2, ), # 53
(54, TType.I32, 'field54', None, None, 2, ), # 54
(55, TType.I32, 'field55', None, None, 2, ), # 55
(56, TType.I32, 'field56', None, None, 2, ), # 56
(57, TType.I32, 'field57', None, None, 2, ), # 57
(58, TType.I32, 'field58', None, None, 2, ), # 58
(59, TType.I32, 'field59', None, None, 2, ), # 59
(60, TType.I32, 'field60', None, None, 2, ), # 60
(61, TType.I32, 'field61', None, None, 2, ), # 61
(62, TType.I32, 'field62', None, None, 2, ), # 62
(63, TType.I32, 'field63', None, None, 2, ), # 63
(64, TType.I32, 'field64', None, None, 2, ), # 64
(65, TType.I32, 'field65', None, None, 2, ), # 65
(66, TType.I32, 'field66', None, None, 2, ), # 66
(67, TType.I32, 'field67', None, None, 2, ), # 67
(68, TType.I32, 'field68', None, None, 2, ), # 68
(69, TType.I32, 'field69', None, None, 2, ), # 69
(70, TType.I32, 'field70', None, None, 2, ), # 70
(71, TType.I32, 'field71', None, None, 2, ), # 71
(72, TType.I32, 'field72', None, None, 2, ), # 72
(73, TType.I32, 'field73', None, None, 2, ), # 73
(74, TType.I32, 'field74', None, None, 2, ), # 74
(75, TType.I32, 'field75', None, None, 2, ), # 75
(76, TType.I32, 'field76', None, None, 2, ), # 76
(77, TType.I32, 'field77', None, None, 2, ), # 77
(78, TType.I32, 'field78', None, None, 2, ), # 78
(79, TType.I32, 'field79', None, None, 2, ), # 79
(80, TType.I32, 'field80', None, None, 2, ), # 80
(81, TType.I32, 'field81', None, None, 2, ), # 81
(82, TType.I32, 'field82', None, None, 2, ), # 82
(83, TType.I32, 'field83', None, None, 2, ), # 83
(84, TType.I32, 'field84', None, None, 2, ), # 84
(85, TType.I32, 'field85', None, None, 2, ), # 85
(86, TType.I32, 'field86', None, None, 2, ), # 86
(87, TType.I32, 'field87', None, None, 2, ), # 87
(88, TType.I32, 'field88', None, None, 2, ), # 88
(89, TType.I32, 'field89', None, None, 2, ), # 89
(90, TType.I32, 'field90', None, None, 2, ), # 90
(91, TType.I32, 'field91', None, None, 2, ), # 91
(92, TType.I32, 'field92', None, None, 2, ), # 92
(93, TType.I32, 'field93', None, None, 2, ), # 93
(94, TType.I32, 'field94', None, None, 2, ), # 94
(95, TType.I32, 'field95', None, None, 2, ), # 95
(96, TType.I32, 'field96', None, None, 2, ), # 96
(97, TType.I32, 'field97', None, None, 2, ), # 97
(98, TType.I32, 'field98', None, None, 2, ), # 98
(99, TType.I32, 'field99', None, None, 2, ), # 99
(100, TType.I32, 'field100', None, None, 2, ), # 100
(101, TType.I32, 'field101', None, None, 2, ), # 101
(102, TType.I32, 'field102', None, None, 2, ), # 102
(103, TType.I32, 'field103', None, None, 2, ), # 103
(104, TType.I32, 'field104', None, None, 2, ), # 104
(105, TType.I32, 'field105', None, None, 2, ), # 105
(106, TType.I32, 'field106', None, None, 2, ), # 106
(107, TType.I32, 'field107', None, None, 2, ), # 107
(108, TType.I32, 'field108', None, None, 2, ), # 108
(109, TType.I32, 'field109', None, None, 2, ), # 109
(110, TType.I32, 'field110', None, None, 2, ), # 110
(111, TType.I32, 'field111', None, None, 2, ), # 111
(112, TType.I32, 'field112', None, None, 2, ), # 112
(113, TType.I32, 'field113', None, None, 2, ), # 113
(114, TType.I32, 'field114', None, None, 2, ), # 114
(115, TType.I32, 'field115', None, None, 2, ), # 115
(116, TType.I32, 'field116', None, None, 2, ), # 116
(117, TType.I32, 'field117', None, None, 2, ), # 117
(118, TType.I32, 'field118', None, None, 2, ), # 118
(119, TType.I32, 'field119', None, None, 2, ), # 119
(120, TType.I32, 'field120', None, None, 2, ), # 120
(121, TType.I32, 'field121', None, None, 2, ), # 121
(122, TType.I32, 'field122', None, None, 2, ), # 122
(123, TType.I32, 'field123', None, None, 2, ), # 123
(124, TType.I32, 'field124', None, None, 2, ), # 124
(125, TType.I32, 'field125', None, None, 2, ), # 125
(126, TType.I32, 'field126', None, None, 2, ), # 126
(127, TType.I32, 'field127', None, None, 2, ), # 127
(128, TType.I32, 'field128', None, None, 2, ), # 128
(129, TType.I32, 'field129', None, None, 2, ), # 129
(130, TType.I32, 'field130', None, None, 2, ), # 130
(131, TType.I32, 'field131', None, None, 2, ), # 131
(132, TType.I32, 'field132', None, None, 2, ), # 132
(133, TType.I32, 'field133', None, None, 2, ), # 133
(134, TType.I32, 'field134', None, None, 2, ), # 134
(135, TType.I32, 'field135', None, None, 2, ), # 135
(136, TType.I32, 'field136', None, None, 2, ), # 136
(137, TType.I32, 'field137', None, None, 2, ), # 137
(138, TType.I32, 'field138', None, None, 2, ), # 138
(139, TType.I32, 'field139', None, None, 2, ), # 139
(140, TType.I32, 'field140', None, None, 2, ), # 140
(141, TType.I32, 'field141', None, None, 2, ), # 141
(142, TType.I32, 'field142', None, None, 2, ), # 142
(143, TType.I32, 'field143', None, None, 2, ), # 143
(144, TType.I32, 'field144', None, None, 2, ), # 144
(145, TType.I32, 'field145', None, None, 2, ), # 145
(146, TType.I32, 'field146', None, None, 2, ), # 146
(147, TType.I32, 'field147', None, None, 2, ), # 147
(148, TType.I32, 'field148', None, None, 2, ), # 148
(149, TType.I32, 'field149', None, None, 2, ), # 149
(150, TType.I32, 'field150', None, None, 2, ), # 150
(151, TType.I32, 'field151', None, None, 2, ), # 151
(152, TType.I32, 'field152', None, None, 2, ), # 152
(153, TType.I32, 'field153', None, None, 2, ), # 153
(154, TType.I32, 'field154', None, None, 2, ), # 154
(155, TType.I32, 'field155', None, None, 2, ), # 155
(156, TType.I32, 'field156', None, None, 2, ), # 156
(157, TType.I32, 'field157', None, None, 2, ), # 157
(158, TType.I32, 'field158', None, None, 2, ), # 158
(159, TType.I32, 'field159', None, None, 2, ), # 159
(160, TType.I32, 'field160', None, None, 2, ), # 160
(161, TType.I32, 'field161', None, None, 2, ), # 161
(162, TType.I32, 'field162', None, None, 2, ), # 162
(163, TType.I32, 'field163', None, None, 2, ), # 163
(164, TType.I32, 'field164', None, None, 2, ), # 164
(165, TType.I32, 'field165', None, None, 2, ), # 165
(166, TType.I32, 'field166', None, None, 2, ), # 166
(167, TType.I32, 'field167', None, None, 2, ), # 167
(168, TType.I32, 'field168', None, None, 2, ), # 168
(169, TType.I32, 'field169', None, None, 2, ), # 169
(170, TType.I32, 'field170', None, None, 2, ), # 170
(171, TType.I32, 'field171', None, None, 2, ), # 171
(172, TType.I32, 'field172', None, None, 2, ), # 172
(173, TType.I32, 'field173', None, None, 2, ), # 173
(174, TType.I32, 'field174', None, None, 2, ), # 174
(175, TType.I32, 'field175', None, None, 2, ), # 175
(176, TType.I32, 'field176', None, None, 2, ), # 176
(177, TType.I32, 'field177', None, None, 2, ), # 177
(178, TType.I32, 'field178', None, None, 2, ), # 178
(179, TType.I32, 'field179', None, None, 2, ), # 179
(180, TType.I32, 'field180', None, None, 2, ), # 180
(181, TType.I32, 'field181', None, None, 2, ), # 181
(182, TType.I32, 'field182', None, None, 2, ), # 182
(183, TType.I32, 'field183', None, None, 2, ), # 183
(184, TType.I32, 'field184', None, None, 2, ), # 184
(185, TType.I32, 'field185', None, None, 2, ), # 185
(186, TType.I32, 'field186', None, None, 2, ), # 186
(187, TType.I32, 'field187', None, None, 2, ), # 187
(188, TType.I32, 'field188', None, None, 2, ), # 188
(189, TType.I32, 'field189', None, None, 2, ), # 189
(190, TType.I32, 'field190', None, None, 2, ), # 190
(191, TType.I32, 'field191', None, None, 2, ), # 191
(192, TType.I32, 'field192', None, None, 2, ), # 192
(193, TType.I32, 'field193', None, None, 2, ), # 193
(194, TType.I32, 'field194', None, None, 2, ), # 194
(195, TType.I32, 'field195', None, None, 2, ), # 195
(196, TType.I32, 'field196', None, None, 2, ), # 196
(197, TType.I32, 'field197', None, None, 2, ), # 197
(198, TType.I32, 'field198', None, None, 2, ), # 198
(199, TType.I32, 'field199', None, None, 2, ), # 199
(200, TType.I32, 'field200', None, None, 2, ), # 200
(201, TType.I32, 'field201', None, None, 2, ), # 201
(202, TType.I32, 'field202', None, None, 2, ), # 202
(203, TType.I32, 'field203', None, None, 2, ), # 203
(204, TType.I32, 'field204', None, None, 2, ), # 204
(205, TType.I32, 'field205', None, None, 2, ), # 205
(206, TType.I32, 'field206', None, None, 2, ), # 206
(207, TType.I32, 'field207', None, None, 2, ), # 207
(208, TType.I32, 'field208', None, None, 2, ), # 208
(209, TType.I32, 'field209', None, None, 2, ), # 209
(210, TType.I32, 'field210', None, None, 2, ), # 210
(211, TType.I32, 'field211', None, None, 2, ), # 211
(212, TType.I32, 'field212', None, None, 2, ), # 212
(213, TType.I32, 'field213', None, None, 2, ), # 213
(214, TType.I32, 'field214', None, None, 2, ), # 214
(215, TType.I32, 'field215', None, None, 2, ), # 215
(216, TType.I32, 'field216', None, None, 2, ), # 216
(217, TType.I32, 'field217', None, None, 2, ), # 217
(218, TType.I32, 'field218', None, None, 2, ), # 218
(219, TType.I32, 'field219', None, None, 2, ), # 219
(220, TType.I32, 'field220', None, None, 2, ), # 220
(221, TType.I32, 'field221', None, None, 2, ), # 221
(222, TType.I32, 'field222', None, None, 2, ), # 222
(223, TType.I32, 'field223', None, None, 2, ), # 223
(224, TType.I32, 'field224', None, None, 2, ), # 224
(225, TType.I32, 'field225', None, None, 2, ), # 225
(226, TType.I32, 'field226', None, None, 2, ), # 226
(227, TType.I32, 'field227', None, None, 2, ), # 227
(228, TType.I32, 'field228', None, None, 2, ), # 228
(229, TType.I32, 'field229', None, None, 2, ), # 229
(230, TType.I32, 'field230', None, None, 2, ), # 230
(231, TType.I32, 'field231', None, None, 2, ), # 231
(232, TType.I32, 'field232', None, None, 2, ), # 232
(233, TType.I32, 'field233', None, None, 2, ), # 233
(234, TType.I32, 'field234', None, None, 2, ), # 234
(235, TType.I32, 'field235', None, None, 2, ), # 235
(236, TType.I32, 'field236', None, None, 2, ), # 236
(237, TType.I32, 'field237', None, None, 2, ), # 237
(238, TType.I32, 'field238', None, None, 2, ), # 238
(239, TType.I32, 'field239', None, None, 2, ), # 239
(240, TType.I32, 'field240', None, None, 2, ), # 240
(241, TType.I32, 'field241', None, None, 2, ), # 241
(242, TType.I32, 'field242', None, None, 2, ), # 242
(243, TType.I32, 'field243', None, None, 2, ), # 243
(244, TType.I32, 'field244', None, None, 2, ), # 244
(245, TType.I32, 'field245', None, None, 2, ), # 245
(246, TType.I32, 'field246', None, None, 2, ), # 246
(247, TType.I32, 'field247', None, None, 2, ), # 247
(248, TType.I32, 'field248', None, None, 2, ), # 248
(249, TType.I32, 'field249', None, None, 2, ), # 249
(250, TType.I32, 'field250', None, None, 2, ), # 250
(251, TType.I32, 'field251', None, None, 2, ), # 251
(252, TType.I32, 'field252', None, None, 2, ), # 252
(253, TType.I32, 'field253', None, None, 2, ), # 253
(254, TType.I32, 'field254', None, None, 2, ), # 254
(255, TType.I32, 'field255', None, None, 2, ), # 255
(256, TType.I32, 'field256', None, None, 2, ), # 256
)
Foo.thrift_struct_annotations = {
}
Foo.thrift_field_annotations = {
}
def Foo__init__(self, **kwargs):
field1 = kwargs.pop(
"field1",
None,
)
field2 = kwargs.pop(
"field2",
None,
)
field3 = kwargs.pop(
"field3",
None,
)
field4 = kwargs.pop(
"field4",
None,
)
field5 = kwargs.pop(
"field5",
None,
)
field6 = kwargs.pop(
"field6",
None,
)
field7 = kwargs.pop(
"field7",
None,
)
field8 = kwargs.pop(
"field8",
None,
)
field9 = kwargs.pop(
"field9",
None,
)
field10 = kwargs.pop(
"field10",
None,
)
field11 = kwargs.pop(
"field11",
None,
)
field12 = kwargs.pop(
"field12",
None,
)
field13 = kwargs.pop(
"field13",
None,
)
field14 = kwargs.pop(
"field14",
None,
)
field15 = kwargs.pop(
"field15",
None,
)
field16 = kwargs.pop(
"field16",
None,
)
field17 = kwargs.pop(
"field17",
None,
)
field18 = kwargs.pop(
"field18",
None,
)
field19 = kwargs.pop(
"field19",
None,
)
field20 = kwargs.pop(
"field20",
None,
)
field21 = kwargs.pop(
"field21",
None,
)
field22 = kwargs.pop(
"field22",
None,
)
field23 = kwargs.pop(
"field23",
None,
)
field24 = kwargs.pop(
"field24",
None,
)
field25 = kwargs.pop(
"field25",
None,
)
field26 = kwargs.pop(
"field26",
None,
)
field27 = kwargs.pop(
"field27",
None,
)
field28 = kwargs.pop(
"field28",
None,
)
field29 = kwargs.pop(
"field29",
None,
)
field30 = kwargs.pop(
"field30",
None,
)
field31 = kwargs.pop(
"field31",
None,
)
field32 = kwargs.pop(
"field32",
None,
)
field33 = kwargs.pop(
"field33",
None,
)
field34 = kwargs.pop(
"field34",
None,
)
field35 = kwargs.pop(
"field35",
None,
)
field36 = kwargs.pop(
"field36",
None,
)
field37 = kwargs.pop(
"field37",
None,
)
field38 = kwargs.pop(
"field38",
None,
)
field39 = kwargs.pop(
"field39",
None,
)
field40 = kwargs.pop(
"field40",
None,
)
field41 = kwargs.pop(
"field41",
None,
)
field42 = kwargs.pop(
"field42",
Foo.thrift_spec[42][4],
)
field43 = kwargs.pop(
"field43",
None,
)
field44 = kwargs.pop(
"field44",
None,
)
field45 = kwargs.pop(
"field45",
None,
)
field46 = kwargs.pop(
"field46",
None,
)
field47 = kwargs.pop(
"field47",
None,
)
field48 = kwargs.pop(
"field48",
None,
)
field49 = kwargs.pop(
"field49",
None,
)
field50 = kwargs.pop(
"field50",
None,
)
field51 = kwargs.pop(
"field51",
None,
)
field52 = kwargs.pop(
"field52",
None,
)
field53 = kwargs.pop(
"field53",
None,
)
field54 = kwargs.pop(
"field54",
None,
)
field55 = kwargs.pop(
"field55",
None,
)
field56 = kwargs.pop(
"field56",
None,
)
field57 = kwargs.pop(
"field57",
None,
)
field58 = kwargs.pop(
"field58",
None,
)
field59 = kwargs.pop(
"field59",
None,
)
field60 = kwargs.pop(
"field60",
None,
)
field61 = kwargs.pop(
"field61",
None,
)
field62 = kwargs.pop(
"field62",
None,
)
field63 = kwargs.pop(
"field63",
None,
)
field64 = kwargs.pop(
"field64",
None,
)
field65 = kwargs.pop(
"field65",
None,
)
field66 = kwargs.pop(
"field66",
None,
)
field67 = kwargs.pop(
"field67",
None,
)
field68 = kwargs.pop(
"field68",
None,
)
field69 = kwargs.pop(
"field69",
None,
)
field70 = kwargs.pop(
"field70",
None,
)
field71 = kwargs.pop(
"field71",
None,
)
field72 = kwargs.pop(
"field72",
None,
)
field73 = kwargs.pop(
"field73",
None,
)
field74 = kwargs.pop(
"field74",
None,
)
field75 = kwargs.pop(
"field75",
None,
)
field76 = kwargs.pop(
"field76",
None,
)
field77 = kwargs.pop(
"field77",
None,
)
field78 = kwargs.pop(
"field78",
None,
)
field79 = kwargs.pop(
"field79",
None,
)
field80 = kwargs.pop(
"field80",
None,
)
field81 = kwargs.pop(
"field81",
None,
)
field82 = kwargs.pop(
"field82",
None,
)
field83 = kwargs.pop(
"field83",
None,
)
field84 = kwargs.pop(
"field84",
None,
)
field85 = kwargs.pop(
"field85",
None,
)
field86 = kwargs.pop(
"field86",
None,
)
field87 = kwargs.pop(
"field87",
None,
)
field88 = kwargs.pop(
"field88",
None,
)
field89 = kwargs.pop(
"field89",
None,
)
field90 = kwargs.pop(
"field90",
None,
)
field91 = kwargs.pop(
"field91",
None,
)
field92 = kwargs.pop(
"field92",
None,
)
field93 = kwargs.pop(
"field93",
None,
)
field94 = kwargs.pop(
"field94",
None,
)
field95 = kwargs.pop(
"field95",
None,
)
field96 = kwargs.pop(
"field96",
None,
)
field97 = kwargs.pop(
"field97",
None,
)
field98 = kwargs.pop(
"field98",
None,
)
field99 = kwargs.pop(
"field99",
None,
)
field100 = kwargs.pop(
"field100",
None,
)
field101 = kwargs.pop(
"field101",
None,
)
field102 = kwargs.pop(
"field102",
None,
)
field103 = kwargs.pop(
"field103",
None,
)
field104 = kwargs.pop(
"field104",
None,
)
field105 = kwargs.pop(
"field105",
None,
)
field106 = kwargs.pop(
"field106",
None,
)
field107 = kwargs.pop(
"field107",
None,
)
field108 = kwargs.pop(
"field108",
None,
)
field109 = kwargs.pop(
"field109",
None,
)
field110 = kwargs.pop(
"field110",
None,
)
field111 = kwargs.pop(
"field111",
None,
)
field112 = kwargs.pop(
"field112",
None,
)
field113 = kwargs.pop(
"field113",
None,
)
field114 = kwargs.pop(
"field114",
None,
)
field115 = kwargs.pop(
"field115",
None,
)
field116 = kwargs.pop(
"field116",
None,
)
field117 = kwargs.pop(
"field117",
None,
)
field118 = kwargs.pop(
"field118",
None,
)
field119 = kwargs.pop(
"field119",
None,
)
field120 = kwargs.pop(
"field120",
None,
)
field121 = kwargs.pop(
"field121",
None,
)
field122 = kwargs.pop(
"field122",
None,
)
field123 = kwargs.pop(
"field123",
None,
)
field124 = kwargs.pop(
"field124",
None,
)
field125 = kwargs.pop(
"field125",
None,
)
field126 = kwargs.pop(
"field126",
None,
)
field127 = kwargs.pop(
"field127",
None,
)
field128 = kwargs.pop(
"field128",
None,
)
field129 = kwargs.pop(
"field129",
None,
)
field130 = kwargs.pop(
"field130",
None,
)
field131 = kwargs.pop(
"field131",
None,
)
field132 = kwargs.pop(
"field132",
None,
)
field133 = kwargs.pop(
"field133",
None,
)
field134 = kwargs.pop(
"field134",
None,
)
field135 = kwargs.pop(
"field135",
None,
)
field136 = kwargs.pop(
"field136",
None,
)
field137 = kwargs.pop(
"field137",
None,
)
field138 = kwargs.pop(
"field138",
None,
)
field139 = kwargs.pop(
"field139",
None,
)
field140 = kwargs.pop(
"field140",
None,
)
field141 = kwargs.pop(
"field141",
None,
)
field142 = kwargs.pop(
"field142",
None,
)
field143 = kwargs.pop(
"field143",
None,
)
field144 = kwargs.pop(
"field144",
None,
)
field145 = kwargs.pop(
"field145",
None,
)
field146 = kwargs.pop(
"field146",
None,
)
field147 = kwargs.pop(
"field147",
None,
)
field148 = kwargs.pop(
"field148",
None,
)
field149 = kwargs.pop(
"field149",
None,
)
field150 = kwargs.pop(
"field150",
None,
)
field151 = kwargs.pop(
"field151",
None,
)
field152 = kwargs.pop(
"field152",
None,
)
field153 = kwargs.pop(
"field153",
None,
)
field154 = kwargs.pop(
"field154",
None,
)
field155 = kwargs.pop(
"field155",
None,
)
field156 = kwargs.pop(
"field156",
None,
)
field157 = kwargs.pop(
"field157",
None,
)
field158 = kwargs.pop(
"field158",
None,
)
field159 = kwargs.pop(
"field159",
None,
)
field160 = kwargs.pop(
"field160",
None,
)
field161 = kwargs.pop(
"field161",
None,
)
field162 = kwargs.pop(
"field162",
None,
)
field163 = kwargs.pop(
"field163",
None,
)
field164 = kwargs.pop(
"field164",
None,
)
field165 = kwargs.pop(
"field165",
None,
)
field166 = kwargs.pop(
"field166",
None,
)
field167 = kwargs.pop(
"field167",
None,
)
field168 = kwargs.pop(
"field168",
None,
)
field169 = kwargs.pop(
"field169",
None,
)
field170 = kwargs.pop(
"field170",
None,
)
field171 = kwargs.pop(
"field171",
None,
)
field172 = kwargs.pop(
"field172",
None,
)
field173 = kwargs.pop(
"field173",
None,
)
field174 = kwargs.pop(
"field174",
None,
)
field175 = kwargs.pop(
"field175",
None,
)
field176 = kwargs.pop(
"field176",
None,
)
field177 = kwargs.pop(
"field177",
None,
)
field178 = kwargs.pop(
"field178",
None,
)
field179 = kwargs.pop(
"field179",
None,
)
field180 = kwargs.pop(
"field180",
None,
)
field181 = kwargs.pop(
"field181",
None,
)
field182 = kwargs.pop(
"field182",
None,
)
field183 = kwargs.pop(
"field183",
None,
)
field184 = kwargs.pop(
"field184",
None,
)
field185 = kwargs.pop(
"field185",
None,
)
field186 = kwargs.pop(
"field186",
None,
)
field187 = kwargs.pop(
"field187",
None,
)
field188 = kwargs.pop(
"field188",
None,
)
field189 = kwargs.pop(
"field189",
None,
)
field190 = kwargs.pop(
"field190",
None,
)
field191 = kwargs.pop(
"field191",
None,
)
field192 = kwargs.pop(
"field192",
None,
)
field193 = kwargs.pop(
"field193",
None,
)
field194 = kwargs.pop(
"field194",
None,
)
field195 = kwargs.pop(
"field195",
None,
)
field196 = kwargs.pop(
"field196",
None,
)
field197 = kwargs.pop(
"field197",
None,
)
field198 = kwargs.pop(
"field198",
None,
)
field199 = kwargs.pop(
"field199",
None,
)
field200 = kwargs.pop(
"field200",
None,
)
field201 = kwargs.pop(
"field201",
None,
)
field202 = kwargs.pop(
"field202",
None,
)
field203 = kwargs.pop(
"field203",
None,
)
field204 = kwargs.pop(
"field204",
None,
)
field205 = kwargs.pop(
"field205",
None,
)
field206 = kwargs.pop(
"field206",
None,
)
field207 = kwargs.pop(
"field207",
None,
)
field208 = kwargs.pop(
"field208",
None,
)
field209 = kwargs.pop(
"field209",
None,
)
field210 = kwargs.pop(
"field210",
None,
)
field211 = kwargs.pop(
"field211",
None,
)
field212 = kwargs.pop(
"field212",
None,
)
field213 = kwargs.pop(
"field213",
None,
)
field214 = kwargs.pop(
"field214",
None,
)
field215 = kwargs.pop(
"field215",
None,
)
field216 = kwargs.pop(
"field216",
None,
)
field217 = kwargs.pop(
"field217",
None,
)
field218 = kwargs.pop(
"field218",
None,
)
field219 = kwargs.pop(
"field219",
None,
)
field220 = kwargs.pop(
"field220",
None,
)
field221 = kwargs.pop(
"field221",
None,
)
field222 = kwargs.pop(
"field222",
None,
)
field223 = kwargs.pop(
"field223",
None,
)
field224 = kwargs.pop(
"field224",
None,
)
field225 = kwargs.pop(
"field225",
None,
)
field226 = kwargs.pop(
"field226",
None,
)
field227 = kwargs.pop(
"field227",
None,
)
field228 = kwargs.pop(
"field228",
None,
)
field229 = kwargs.pop(
"field229",
None,
)
field230 = kwargs.pop(
"field230",
None,
)
field231 = kwargs.pop(
"field231",
None,
)
field232 = kwargs.pop(
"field232",
None,
)
field233 = kwargs.pop(
"field233",
None,
)
field234 = kwargs.pop(
"field234",
None,
)
field235 = kwargs.pop(
"field235",
None,
)
field236 = kwargs.pop(
"field236",
None,
)
field237 = kwargs.pop(
"field237",
None,
)
field238 = kwargs.pop(
"field238",
None,
)
field239 = kwargs.pop(
"field239",
None,
)
field240 = kwargs.pop(
"field240",
None,
)
field241 = kwargs.pop(
"field241",
None,
)
field242 = kwargs.pop(
"field242",
None,
)
field243 = kwargs.pop(
"field243",
None,
)
field244 = kwargs.pop(
"field244",
None,
)
field245 = kwargs.pop(
"field245",
None,
)
field246 = kwargs.pop(
"field246",
None,
)
field247 = kwargs.pop(
"field247",
None,
)
field248 = kwargs.pop(
"field248",
None,
)
field249 = kwargs.pop(
"field249",
None,
)
field250 = kwargs.pop(
"field250",
None,
)
field251 = kwargs.pop(
"field251",
None,
)
field252 = kwargs.pop(
"field252",
None,
)
field253 = kwargs.pop(
"field253",
None,
)
field254 = kwargs.pop(
"field254",
None,
)
field255 = kwargs.pop(
"field255",
None,
)
field256 = kwargs.pop(
"field256",
None,
)
if kwargs:
key, _value = kwargs.popitem()
raise TypeError("{}() got an unexpected keyword argument '{}'".format("Foo__init__", key))
self.field1 = field1
self.field2 = field2
self.field3 = field3
self.field4 = field4
self.field5 = field5
self.field6 = field6
self.field7 = field7
self.field8 = field8
self.field9 = field9
self.field10 = field10
self.field11 = field11
self.field12 = field12
self.field13 = field13
self.field14 = field14
self.field15 = field15
self.field16 = field16
self.field17 = field17
self.field18 = field18
self.field19 = field19
self.field20 = field20
self.field21 = field21
self.field22 = field22
self.field23 = field23
self.field24 = field24
self.field25 = field25
self.field26 = field26
self.field27 = field27
self.field28 = field28
self.field29 = field29
self.field30 = field30
self.field31 = field31
self.field32 = field32
self.field33 = field33
self.field34 = field34
self.field35 = field35
self.field36 = field36
self.field37 = field37
self.field38 = field38
self.field39 = field39
self.field40 = field40
self.field41 = field41
self.field42 = field42
self.field43 = field43
self.field44 = field44
self.field45 = field45
self.field46 = field46
self.field47 = field47
self.field48 = field48
self.field49 = field49
self.field50 = field50
self.field51 = field51
self.field52 = field52
self.field53 = field53
self.field54 = field54
self.field55 = field55
self.field56 = field56
self.field57 = field57
self.field58 = field58
self.field59 = field59
self.field60 = field60
self.field61 = field61
self.field62 = field62
self.field63 = field63
self.field64 = field64
self.field65 = field65
self.field66 = field66
self.field67 = field67
self.field68 = field68
self.field69 = field69
self.field70 = field70
self.field71 = field71
self.field72 = field72
self.field73 = field73
self.field74 = field74
self.field75 = field75
self.field76 = field76
self.field77 = field77
self.field78 = field78
self.field79 = field79
self.field80 = field80
self.field81 = field81
self.field82 = field82
self.field83 = field83
self.field84 = field84
self.field85 = field85
self.field86 = field86
self.field87 = field87
self.field88 = field88
self.field89 = field89
self.field90 = field90
self.field91 = field91
self.field92 = field92
self.field93 = field93
self.field94 = field94
self.field95 = field95
self.field96 = field96
self.field97 = field97
self.field98 = field98
self.field99 = field99
self.field100 = field100
self.field101 = field101
self.field102 = field102
self.field103 = field103
self.field104 = field104
self.field105 = field105
self.field106 = field106
self.field107 = field107
self.field108 = field108
self.field109 = field109
self.field110 = field110
self.field111 = field111
self.field112 = field112
self.field113 = field113
self.field114 = field114
self.field115 = field115
self.field116 = field116
self.field117 = field117
self.field118 = field118
self.field119 = field119
self.field120 = field120
self.field121 = field121
self.field122 = field122
self.field123 = field123
self.field124 = field124
self.field125 = field125
self.field126 = field126
self.field127 = field127
self.field128 = field128
self.field129 = field129
self.field130 = field130
self.field131 = field131
self.field132 = field132
self.field133 = field133
self.field134 = field134
self.field135 = field135
self.field136 = field136
self.field137 = field137
self.field138 = field138
self.field139 = field139
self.field140 = field140
self.field141 = field141
self.field142 = field142
self.field143 = field143
self.field144 = field144
self.field145 = field145
self.field146 = field146
self.field147 = field147
self.field148 = field148
self.field149 = field149
self.field150 = field150
self.field151 = field151
self.field152 = field152
self.field153 = field153
self.field154 = field154
self.field155 = field155
self.field156 = field156
self.field157 = field157
self.field158 = field158
self.field159 = field159
self.field160 = field160
self.field161 = field161
self.field162 = field162
self.field163 = field163
self.field164 = field164
self.field165 = field165
self.field166 = field166
self.field167 = field167
self.field168 = field168
self.field169 = field169
self.field170 = field170
self.field171 = field171
self.field172 = field172
self.field173 = field173
self.field174 = field174
self.field175 = field175
self.field176 = field176
self.field177 = field177
self.field178 = field178
self.field179 = field179
self.field180 = field180
self.field181 = field181
self.field182 = field182
self.field183 = field183
self.field184 = field184
self.field185 = field185
self.field186 = field186
self.field187 = field187
self.field188 = field188
self.field189 = field189
self.field190 = field190
self.field191 = field191
self.field192 = field192
self.field193 = field193
self.field194 = field194
self.field195 = field195
self.field196 = field196
self.field197 = field197
self.field198 = field198
self.field199 = field199
self.field200 = field200
self.field201 = field201
self.field202 = field202
self.field203 = field203
self.field204 = field204
self.field205 = field205
self.field206 = field206
self.field207 = field207
self.field208 = field208
self.field209 = field209
self.field210 = field210
self.field211 = field211
self.field212 = field212
self.field213 = field213
self.field214 = field214
self.field215 = field215
self.field216 = field216
self.field217 = field217
self.field218 = field218
self.field219 = field219
self.field220 = field220
self.field221 = field221
self.field222 = field222
self.field223 = field223
self.field224 = field224
self.field225 = field225
self.field226 = field226
self.field227 = field227
self.field228 = field228
self.field229 = field229
self.field230 = field230
self.field231 = field231
self.field232 = field232
self.field233 = field233
self.field234 = field234
self.field235 = field235
self.field236 = field236
self.field237 = field237
self.field238 = field238
self.field239 = field239
self.field240 = field240
self.field241 = field241
self.field242 = field242
self.field243 = field243
self.field244 = field244
self.field245 = field245
self.field246 = field246
self.field247 = field247
self.field248 = field248
self.field249 = field249
self.field250 = field250
self.field251 = field251
self.field252 = field252
self.field253 = field253
self.field254 = field254
self.field255 = field255
self.field256 = field256
Foo.__init__ = Foo__init__
def Foo__setstate__(self, state):
state.setdefault('field1', None)
state.setdefault('field2', None)
state.setdefault('field3', None)
state.setdefault('field4', None)
state.setdefault('field5', None)
state.setdefault('field6', None)
state.setdefault('field7', None)
state.setdefault('field8', None)
state.setdefault('field9', None)
state.setdefault('field10', None)
state.setdefault('field11', None)
state.setdefault('field12', None)
state.setdefault('field13', None)
state.setdefault('field14', None)
state.setdefault('field15', None)
state.setdefault('field16', None)
state.setdefault('field17', None)
state.setdefault('field18', None)
state.setdefault('field19', None)
state.setdefault('field20', None)
state.setdefault('field21', None)
state.setdefault('field22', None)
state.setdefault('field23', None)
state.setdefault('field24', None)
state.setdefault('field25', None)
state.setdefault('field26', None)
state.setdefault('field27', None)
state.setdefault('field28', None)
state.setdefault('field29', None)
state.setdefault('field30', None)
state.setdefault('field31', None)
state.setdefault('field32', None)
state.setdefault('field33', None)
state.setdefault('field34', None)
state.setdefault('field35', None)
state.setdefault('field36', None)
state.setdefault('field37', None)
state.setdefault('field38', None)
state.setdefault('field39', None)
state.setdefault('field40', None)
state.setdefault('field41', None)
state.setdefault('field42', 1024)
state.setdefault('field43', None)
state.setdefault('field44', None)
state.setdefault('field45', None)
state.setdefault('field46', None)
state.setdefault('field47', None)
state.setdefault('field48', None)
state.setdefault('field49', None)
state.setdefault('field50', None)
state.setdefault('field51', None)
state.setdefault('field52', None)
state.setdefault('field53', None)
state.setdefault('field54', None)
state.setdefault('field55', None)
state.setdefault('field56', None)
state.setdefault('field57', None)
state.setdefault('field58', None)
state.setdefault('field59', None)
state.setdefault('field60', None)
state.setdefault('field61', None)
state.setdefault('field62', None)
state.setdefault('field63', None)
state.setdefault('field64', None)
state.setdefault('field65', None)
state.setdefault('field66', None)
state.setdefault('field67', None)
state.setdefault('field68', None)
state.setdefault('field69', None)
state.setdefault('field70', None)
state.setdefault('field71', None)
state.setdefault('field72', None)
state.setdefault('field73', None)
state.setdefault('field74', None)
state.setdefault('field75', None)
state.setdefault('field76', None)
state.setdefault('field77', None)
state.setdefault('field78', None)
state.setdefault('field79', None)
state.setdefault('field80', None)
state.setdefault('field81', None)
state.setdefault('field82', None)
state.setdefault('field83', None)
state.setdefault('field84', None)
state.setdefault('field85', None)
state.setdefault('field86', None)
state.setdefault('field87', None)
state.setdefault('field88', None)
state.setdefault('field89', None)
state.setdefault('field90', None)
state.setdefault('field91', None)
state.setdefault('field92', None)
state.setdefault('field93', None)
state.setdefault('field94', None)
state.setdefault('field95', None)
state.setdefault('field96', None)
state.setdefault('field97', None)
state.setdefault('field98', None)
state.setdefault('field99', None)
state.setdefault('field100', None)
state.setdefault('field101', None)
state.setdefault('field102', None)
state.setdefault('field103', None)
state.setdefault('field104', None)
state.setdefault('field105', None)
state.setdefault('field106', None)
state.setdefault('field107', None)
state.setdefault('field108', None)
state.setdefault('field109', None)
state.setdefault('field110', None)
state.setdefault('field111', None)
state.setdefault('field112', None)
state.setdefault('field113', None)
state.setdefault('field114', None)
state.setdefault('field115', None)
state.setdefault('field116', None)
state.setdefault('field117', None)
state.setdefault('field118', None)
state.setdefault('field119', None)
state.setdefault('field120', None)
state.setdefault('field121', None)
state.setdefault('field122', None)
state.setdefault('field123', None)
state.setdefault('field124', None)
state.setdefault('field125', None)
state.setdefault('field126', None)
state.setdefault('field127', None)
state.setdefault('field128', None)
state.setdefault('field129', None)
state.setdefault('field130', None)
state.setdefault('field131', None)
state.setdefault('field132', None)
state.setdefault('field133', None)
state.setdefault('field134', None)
state.setdefault('field135', None)
state.setdefault('field136', None)
state.setdefault('field137', None)
state.setdefault('field138', None)
state.setdefault('field139', None)
state.setdefault('field140', None)
state.setdefault('field141', None)
state.setdefault('field142', None)
state.setdefault('field143', None)
state.setdefault('field144', None)
state.setdefault('field145', None)
state.setdefault('field146', None)
state.setdefault('field147', None)
state.setdefault('field148', None)
state.setdefault('field149', None)
state.setdefault('field150', None)
state.setdefault('field151', None)
state.setdefault('field152', None)
state.setdefault('field153', None)
state.setdefault('field154', None)
state.setdefault('field155', None)
state.setdefault('field156', None)
state.setdefault('field157', None)
state.setdefault('field158', None)
state.setdefault('field159', None)
state.setdefault('field160', None)
state.setdefault('field161', None)
state.setdefault('field162', None)
state.setdefault('field163', None)
state.setdefault('field164', None)
state.setdefault('field165', None)
state.setdefault('field166', None)
state.setdefault('field167', None)
state.setdefault('field168', None)
state.setdefault('field169', None)
state.setdefault('field170', None)
state.setdefault('field171', None)
state.setdefault('field172', None)
state.setdefault('field173', None)
state.setdefault('field174', None)
state.setdefault('field175', None)
state.setdefault('field176', None)
state.setdefault('field177', None)
state.setdefault('field178', None)
state.setdefault('field179', None)
state.setdefault('field180', None)
state.setdefault('field181', None)
state.setdefault('field182', None)
state.setdefault('field183', None)
state.setdefault('field184', None)
state.setdefault('field185', None)
state.setdefault('field186', None)
state.setdefault('field187', None)
state.setdefault('field188', None)
state.setdefault('field189', None)
state.setdefault('field190', None)
state.setdefault('field191', None)
state.setdefault('field192', None)
state.setdefault('field193', None)
state.setdefault('field194', None)
state.setdefault('field195', None)
state.setdefault('field196', None)
state.setdefault('field197', None)
state.setdefault('field198', None)
state.setdefault('field199', None)
state.setdefault('field200', None)
state.setdefault('field201', None)
state.setdefault('field202', None)
state.setdefault('field203', None)
state.setdefault('field204', None)
state.setdefault('field205', None)
state.setdefault('field206', None)
state.setdefault('field207', None)
state.setdefault('field208', None)
state.setdefault('field209', None)
state.setdefault('field210', None)
state.setdefault('field211', None)
state.setdefault('field212', None)
state.setdefault('field213', None)
state.setdefault('field214', None)
state.setdefault('field215', None)
state.setdefault('field216', None)
state.setdefault('field217', None)
state.setdefault('field218', None)
state.setdefault('field219', None)
state.setdefault('field220', None)
state.setdefault('field221', None)
state.setdefault('field222', None)
state.setdefault('field223', None)
state.setdefault('field224', None)
state.setdefault('field225', None)
state.setdefault('field226', None)
state.setdefault('field227', None)
state.setdefault('field228', None)
state.setdefault('field229', None)
state.setdefault('field230', None)
state.setdefault('field231', None)
state.setdefault('field232', None)
state.setdefault('field233', None)
state.setdefault('field234', None)
state.setdefault('field235', None)
state.setdefault('field236', None)
state.setdefault('field237', None)
state.setdefault('field238', None)
state.setdefault('field239', None)
state.setdefault('field240', None)
state.setdefault('field241', None)
state.setdefault('field242', None)
state.setdefault('field243', None)
state.setdefault('field244', None)
state.setdefault('field245', None)
state.setdefault('field246', None)
state.setdefault('field247', None)
state.setdefault('field248', None)
state.setdefault('field249', None)
state.setdefault('field250', None)
state.setdefault('field251', None)
state.setdefault('field252', None)
state.setdefault('field253', None)
state.setdefault('field254', None)
state.setdefault('field255', None)
state.setdefault('field256', None)
self.__dict__ = state
Foo.__getstate__ = lambda self: self.__dict__.copy()
Foo.__setstate__ = Foo__setstate__
fix_spec(all_structs)
del all_structs
|
9de3129279abeb713af43eff2aa0fbad1d3bfe1b
|
3196488df20871d5196e7a7224577c6bb345477e
|
/darwin/utils/__init__.py
|
fc7c082fbc9fa6c4f8a6632e316a63c3066a5cd2
|
[
"MIT"
] |
permissive
|
v7labs/darwin-py
|
60360d94c12fc5170643588a2fa890981aeab075
|
3cc2d5299fb48d48aeac10e01f79f49e856e6967
|
refs/heads/master
| 2023-08-31T10:06:55.334381
| 2023-08-31T09:51:01
| 2023-08-31T09:51:01
| 192,462,056
| 110
| 36
|
MIT
| 2023-09-13T17:25:24
| 2019-06-18T03:58:27
|
Python
|
UTF-8
|
Python
| false
| false
| 60
|
py
|
__init__.py
|
from .flatten_list import flatten_list
from .utils import *
|
e66e189d3001b1ea13b63a0def1c7f91ef8bbebc
|
61b95ee2aefbcfbd6c4abf9511d976d0b9d0e100
|
/faker/providers/ssn/en_CA/__init__.py
|
6df8f8bb253ff4adbe89dc33f34c6c4e7069b631
|
[
"MIT"
] |
permissive
|
joke2k/faker
|
fed7472580ced2bce326fe4ea0c3d1c810853d5e
|
33e36b1b6cc9c6f039fe387988853771bab60624
|
refs/heads/master
| 2023-09-04T00:43:33.599705
| 2023-08-31T16:15:04
| 2023-08-31T16:15:04
| 6,662,075
| 14,544
| 2,215
|
MIT
| 2023-09-11T16:06:14
| 2012-11-12T23:00:09
|
Python
|
UTF-8
|
Python
| false
| false
| 2,966
|
py
|
__init__.py
|
from .. import Provider as SsnProvider
def checksum(sin):
"""
Determine validity of a Canadian Social Insurance Number.
Validation is performed using a modified Luhn Algorithm. To check
the Every second digit of the SIN is doubled and the result is
summed. If the result is a multiple of ten, the Social Insurance
Number is considered valid.
https://en.wikipedia.org/wiki/Social_Insurance_Number
"""
# Remove spaces and create a list of digits.
checksumCollection = list(sin.replace(" ", ""))
checksumCollection = [int(i) for i in checksumCollection]
# Discard the last digit, we will be calculating it later.
checksumCollection[-1] = 0
# Iterate over the provided SIN and double every second digit.
# In the case that doubling that digit results in a two-digit
# number, then add the two digits together and keep that sum.
for i in range(1, len(checksumCollection), 2):
result = checksumCollection[i] * 2
if result < 10:
checksumCollection[i] = result
else:
checksumCollection[i] = result - 10 + 1
# The appropriate checksum digit is the value that, when summed
# with the first eight values, results in a value divisible by 10
check_digit = 10 - (sum(checksumCollection) % 10)
check_digit = 0 if check_digit == 10 else check_digit
return check_digit
class Provider(SsnProvider):
# In order to create a valid SIN we need to provide a number that
# passes a simple modified Luhn Algorithm checksum.
#
# This function reverses the checksum steps to create a random
# valid nine-digit Canadian SIN (Social Insurance Number) in the
# format '### ### ###'.
def ssn(self) -> str:
# Create an array of 8 elements initialized randomly.
digits = self.generator.random.sample(range(9), 8)
# The final step of the validation requires that all of the
# digits sum to a multiple of 10. First, sum the first 8 and
# set the 9th to the value that results in a multiple of 10.
check_digit = 10 - (sum(digits) % 10)
check_digit = 0 if check_digit == 10 else check_digit
digits.append(check_digit)
# digits is now the digital root of the number we want
# multiplied by the magic number 121 212 121. The next step is
# to reverse the multiplication which occurred on every other
# element.
for i in range(1, len(digits), 2):
if digits[i] % 2 == 0:
digits[i] = digits[i] // 2
else:
digits[i] = (digits[i] + 9) // 2
# Build the resulting SIN string.
sin = ""
for i in range(0, len(digits)):
sin += str(digits[i])
# Add a space to make it conform to Canadian formatting.
if i in (2, 5):
sin += " "
# Finally return our random but valid SIN.
return sin
|
cac48c4b06fde9eacd4ea3cb3db9cd2874b2f080
|
5105403f2b75990654519438d8ceabcf80962ebf
|
/tests/unit/bokeh/models/test_annotations.py
|
2a1ba9db5f0c38763b9db22a8a49487adf7f1374
|
[
"BSD-3-Clause"
] |
permissive
|
bokeh/bokeh
|
ed1d81eb07d27d27c6710c9fec9114886047f528
|
310cb2cbeabc4c4b8180cbda566df16039737cdc
|
refs/heads/branch-3.3
| 2023-08-31T23:53:06.537061
| 2023-08-30T03:43:05
| 2023-08-30T03:43:05
| 3,834,332
| 17,174
| 5,251
|
BSD-3-Clause
| 2023-09-14T11:37:23
| 2012-03-26T15:40:01
|
Python
|
UTF-8
|
Python
| false
| false
| 19,277
|
py
|
test_annotations.py
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2023, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations # isort:skip
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
from datetime import datetime
from unittest import mock
# Bokeh imports
from bokeh.core.properties import field, value
from bokeh.core.validation import check_integrity, process_validation_issues
from bokeh.models import (
Arrow,
ArrowHead,
Band,
BoxAnnotation,
ColorBar,
ColumnDataSource,
GlyphRenderer,
Label,
LabelSet,
Legend,
LegendItem,
LinearColorMapper,
PolyAnnotation,
Slope,
Span,
Title,
Whisker,
)
from bokeh.util.serialization import convert_datetime_type
from _util_models import (
ABOVE_FILL,
ABOVE_HATCH,
ANGLE,
BELOW_FILL,
BELOW_HATCH,
FILL,
HATCH,
HOVER_FILL,
HOVER_HATCH,
HOVER_LINE,
LINE,
TEXT,
check_fill_properties,
check_hatch_properties,
check_line_properties,
check_properties_existence,
check_text_properties,
prefix,
)
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
ANNOTATION = [
"visible",
"level",
"coordinates",
"x_range_name",
"y_range_name",
"group",
"propagate_hover",
]
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
def test_Legend() -> None:
legend = Legend()
assert legend.location == 'top_right'
assert legend.orientation == 'vertical'
assert legend.ncols == "auto"
assert legend.nrows == "auto"
assert legend.title is None
assert legend.title_location == "above"
assert legend.title_standoff == 5
assert legend.label_standoff == 5
assert legend.label_height == 20
assert legend.label_width == 20
assert legend.glyph_height == 20
assert legend.glyph_width == 20
assert legend.padding == 10
assert legend.spacing == 3
assert legend.margin == 10
assert legend.items == []
assert legend.click_policy == "none"
assert legend.item_background_policy == "none"
check_line_properties(legend, "border_", "#e5e5e5", 1.0, 0.5)
check_text_properties(legend, "label_", "13px", "middle", scalar=True)
check_fill_properties(legend, "background_", "#ffffff", 0.95)
check_fill_properties(legend, "item_background_", "#f1f1f1", 0.8)
check_properties_existence(legend, [
*ANNOTATION,
"location",
"orientation",
"ncols",
"nrows",
"title",
"title_location",
"title_standoff",
"label_standoff",
"label_height",
"label_width",
"glyph_height",
"glyph_width",
"margin",
"padding",
"spacing",
"items",
"click_policy",
"item_background_policy",
],
prefix('label_', TEXT),
prefix('title_', TEXT),
prefix('border_', LINE),
prefix('background_', FILL),
prefix('item_background_', FILL),
prefix('inactive_', FILL),
)
def test_LegendItem() -> None:
item = LegendItem()
assert item.index is None
assert item.label is None
assert item.name is None
assert item.renderers == []
assert item.visible is True
def test_ColorBar() -> None:
color_mapper = LinearColorMapper()
color_bar = ColorBar(color_mapper=color_mapper)
assert color_bar.location == 'top_right'
assert color_bar.orientation == 'auto'
assert color_bar.height == 'auto'
assert color_bar.width == 'auto'
assert color_bar.scale_alpha == 1.0
assert color_bar.title is None
assert color_bar.title_standoff == 2
assert color_bar.ticker == "auto"
assert color_bar.formatter == "auto"
assert color_bar.color_mapper == color_mapper
assert color_bar.margin == 30
assert color_bar.padding == 10
assert color_bar.label_standoff == 5
assert color_bar.major_tick_in == 5
assert color_bar.major_tick_out == 0
assert color_bar.minor_tick_in == 0
assert color_bar.minor_tick_out == 0
assert color_bar.display_low is None
assert color_bar.display_high is None
check_text_properties(color_bar, "title_", "13px", "bottom", "italic", scalar=True)
check_text_properties(color_bar, "major_label_", "11px", "bottom", "normal", "left", scalar=True)
check_line_properties(color_bar, "major_tick_", "#ffffff")
check_line_properties(color_bar, "minor_tick_", None)
check_line_properties(color_bar, "bar_", None)
check_line_properties(color_bar, "border_", None)
check_fill_properties(color_bar, "background_", "#ffffff", 0.95)
check_properties_existence(color_bar, [
*ANNOTATION,
"location",
"orientation",
"height",
"width",
"scale_alpha",
"title",
"title_standoff",
"ticker",
"formatter",
"color_mapper",
"margin",
"padding",
"label_standoff",
"major_tick_in",
"major_tick_out",
"minor_tick_in",
"minor_tick_out",
"major_label_overrides",
"major_label_policy",
"display_low",
"display_high",
],
prefix('title_', TEXT),
prefix('major_label_', TEXT),
prefix('major_tick_', LINE),
prefix('minor_tick_', LINE),
prefix('bar_', LINE),
prefix('border_', LINE),
prefix('background_', FILL),
)
def test_Arrow() -> None:
arrow = Arrow()
assert arrow.x_start == field("x_start")
assert arrow.y_start == field("y_start")
assert arrow.start_units == 'data'
assert arrow.start is None
assert arrow.x_end == field("x_end")
assert arrow.y_end == field("y_end")
assert arrow.end_units == 'data'
assert isinstance(arrow.end, ArrowHead)
assert isinstance(arrow.source, ColumnDataSource)
assert arrow.x_range_name == "default"
assert arrow.y_range_name == "default"
check_line_properties(arrow)
check_properties_existence(arrow, [
*ANNOTATION,
"x_start",
"y_start",
"start_units",
"start",
"x_end",
"y_end",
"end_units",
"end",
"source",
], LINE)
def test_BoxAnnotation() -> None:
box = BoxAnnotation()
assert box.left is None
assert box.left_units == "data"
assert box.right is None
assert box.right_units == "data"
assert box.bottom is None
assert box.bottom_units == "data"
assert box.top is None
assert box.top_units == "data"
assert box.x_range_name == "default"
assert box.y_range_name == "default"
assert box.level == "annotation"
assert box.editable is False
check_line_properties(box, "", "#cccccc", 1, 0.3)
check_fill_properties(box, "", "#fff9ba", 0.4)
check_hatch_properties(box)
check_line_properties(box, "hover_", None, 1, 0.3)
check_fill_properties(box, "hover_", None, 0.4)
check_hatch_properties(box, "hover_")
check_properties_existence(box, [
*ANNOTATION,
"left",
"left_units",
"right",
"right_units",
"bottom",
"bottom_units",
"top",
"top_units",
"border_radius",
"editable",
"resizable",
"movable",
"symmetric",
], LINE, FILL, HATCH, HOVER_LINE, HOVER_FILL, HOVER_HATCH)
def test_BoxAnnotation_accepts_datetime() -> None:
obj = BoxAnnotation(
left = datetime(2018,8,7,0,0),
right = datetime(2018,8,7,0,0),
top = datetime(2018,8,7,0,0),
bottom = datetime(2018,8,7,0,0),
)
assert isinstance(obj.left, datetime)
assert isinstance(obj.right, datetime)
assert isinstance(obj.top, datetime)
assert isinstance(obj.bottom, datetime)
assert convert_datetime_type(obj.left) == 1533600000000.0
assert convert_datetime_type(obj.right) == 1533600000000.0
assert convert_datetime_type(obj.top) == 1533600000000.0
assert convert_datetime_type(obj.bottom) == 1533600000000.0
def test_Band() -> None:
band = Band()
assert band.level == 'annotation'
assert band.lower == field("lower")
assert band.lower_units == 'data'
assert band.upper == field("upper")
assert band.upper_units == 'data'
assert band.base == field("base")
assert band.dimension == 'height'
assert isinstance(band.source, ColumnDataSource)
assert band.x_range_name == 'default'
assert band.y_range_name == 'default'
check_line_properties(band, "", "#cccccc", 1.0, 0.3)
check_fill_properties(band, "", "#fff9ba", 0.4)
check_properties_existence(band, [
*ANNOTATION,
"lower",
"lower_units",
"upper",
"upper_units",
"base",
"base_units",
"dimension",
"source",
], LINE, FILL)
def test_Label() -> None:
label = Label(x=11, y=12)
assert label.level == 'annotation'
assert label.x == 11
assert label.y == 12
assert label.x_units == 'data'
assert label.y_units == 'data'
assert label.text == ""
assert label.angle == 0
assert label.angle_units == 'rad'
assert label.x_offset == 0
assert label.y_offset == 0
assert label.x_range_name == 'default'
assert label.y_range_name == 'default'
check_text_properties(label, scalar=True)
check_fill_properties(label, "background_", None, 1.0)
check_line_properties(label, "border_", None, 1.0, 1.0)
check_properties_existence(label, [
*ANNOTATION,
"x",
"y",
"x_units",
"y_units",
"text",
"angle",
"angle_units",
"x_offset",
"y_offset",
],
TEXT,
prefix('border_', LINE),
prefix('background_', FILL),
)
def test_Label_accepts_datetime_xy() -> None:
obj = Label(
x = datetime(2018,8,7,0,0),
y = datetime(2018,8,7,0,0),
)
assert isinstance(obj.x, datetime)
assert isinstance(obj.y, datetime)
assert convert_datetime_type(obj.x) == 1533600000000.0
assert convert_datetime_type(obj.y) == 1533600000000.0
def test_LabelSet() -> None:
label_set = LabelSet()
assert label_set.level == 'annotation'
assert label_set.x == field("x")
assert label_set.y == field("y")
assert label_set.x_units == 'data'
assert label_set.y_units == 'data'
assert label_set.text == field("text")
assert label_set.angle == 0
assert label_set.angle_units == 'rad'
assert label_set.x_offset == 0
assert label_set.y_offset == 0
assert label_set.x_range_name == 'default'
assert label_set.y_range_name == 'default'
assert isinstance(label_set.source, ColumnDataSource)
assert label_set.source.data == {}
check_text_properties(label_set)
check_fill_properties(label_set, "background_", None, 1.0)
check_line_properties(label_set, "border_", None, 1.0, 1.0)
check_properties_existence(label_set, [
*ANNOTATION,
"x",
"y",
"x_units",
"y_units",
"text",
"angle",
"angle_units",
"x_offset",
"y_offset",
"source",
],
TEXT,
ANGLE,
prefix('border_', LINE),
prefix('background_', FILL),
)
def test_PolyAnnotation() -> None:
poly = PolyAnnotation()
assert poly.xs == []
assert poly.xs_units == "data"
assert poly.ys == []
assert poly.ys_units == "data"
assert poly.x_range_name == "default"
assert poly.y_range_name == "default"
assert poly.level == "annotation"
assert poly.editable is False
check_line_properties(poly, "", "#cccccc", 1, 0.3)
check_fill_properties(poly, "", "#fff9ba", 0.4)
check_hatch_properties(poly)
check_line_properties(poly, "hover_", None, 1, 0.3)
check_fill_properties(poly, "hover_", None, 0.4)
check_hatch_properties(poly, "hover_")
check_properties_existence(poly, [
*ANNOTATION,
"xs",
"xs_units",
"ys",
"ys_units",
"editable",
], LINE, FILL, HATCH, HOVER_LINE, HOVER_FILL, HOVER_HATCH)
def test_PolyAnnotation_accepts_datetime_xs_ys() -> None:
obj = PolyAnnotation(
xs = [datetime(2018,8,7,0,0),1],
ys = [datetime(2018,8,7,0,0),1],
)
assert isinstance(obj.xs[0], datetime)
assert isinstance(obj.xs[1], int)
assert isinstance(obj.ys[0], datetime)
assert isinstance(obj.ys[1], int)
assert convert_datetime_type(obj.xs[0]) == 1533600000000.0
assert convert_datetime_type(obj.ys[0]) == 1533600000000.0
def test_Slope() -> None:
slope = Slope()
assert slope.gradient is None
assert slope.y_intercept is None
assert slope.x_range_name == 'default'
assert slope.y_range_name == 'default'
assert slope.level == 'annotation'
check_line_properties(slope, "", 'black', 1.0)
check_properties_existence(slope, [
*ANNOTATION,
"gradient",
"y_intercept",
], LINE, ABOVE_FILL, ABOVE_HATCH, BELOW_FILL, BELOW_HATCH)
def test_Span() -> None:
line = Span()
assert line.location is None
assert line.location_units == 'data'
assert line.dimension == 'width'
assert line.x_range_name == 'default'
assert line.y_range_name == 'default'
assert line.level == 'annotation'
assert line.editable is False
check_line_properties(line, "", 'black', 1.0)
check_properties_existence(line, [
*ANNOTATION,
"location",
"location_units",
"dimension",
"editable",
], LINE, HOVER_LINE)
def test_Span_accepts_datetime_location() -> None:
obj = Span(
location = datetime(2018,8,7,0,0),
)
assert isinstance(obj.location, datetime)
assert convert_datetime_type(obj.location) == 1533600000000.0
def test_Title() -> None:
title = Title()
assert title.level == 'annotation'
assert title.text == ""
assert title.vertical_align == 'bottom'
assert title.align == 'left'
assert title.offset == 0
assert title.text_font == 'helvetica'
assert title.text_font_size == '13px'
assert title.text_font_style == 'bold'
assert title.text_color == '#444444'
assert title.text_alpha == 1.0
assert title.text_line_height == 1.0
check_fill_properties(title, "background_", None, 1.0)
check_line_properties(title, "border_", None, 1.0, 1.0)
check_properties_existence(title, [
*ANNOTATION,
"text",
"vertical_align",
"align",
"offset",
"standoff",
],
TEXT,
prefix("border_", LINE),
prefix("background_", FILL),
)
def test_Whisker() -> None:
whisker = Whisker()
assert whisker.level == 'underlay'
assert whisker.lower == field("lower")
assert whisker.lower_units == 'data'
assert isinstance(whisker.lower_head, ArrowHead)
assert whisker.lower_head.size == 10
assert whisker.upper == field("upper")
assert whisker.upper_units == 'data'
assert isinstance(whisker.upper_head, ArrowHead)
assert whisker.upper_head.size == 10
assert whisker.base == field("base")
assert whisker.dimension == 'height'
assert isinstance(whisker.source, ColumnDataSource)
assert whisker.x_range_name == 'default'
assert whisker.y_range_name == 'default'
check_line_properties(whisker, "")
check_properties_existence(whisker, [
*ANNOTATION,
"lower",
"lower_units",
"lower_head",
"upper",
"upper_units",
"upper_head",
"base",
"base_units",
"dimension",
"source",
], LINE)
def test_Whisker_and_Band_accept_negative_values() -> None:
whisker = Whisker(base=-1., lower=-1.5, upper=-0.5)
assert whisker.base == -1.
assert whisker.lower == -1.5
assert whisker.upper == -0.5
band = Band(base=-1., lower=-1.5, upper=-0.5)
assert band.base == -1.
assert band.lower == -1.5
assert band.upper == -0.5
def test_can_add_multiple_glyph_renderers_to_legend_item() -> None:
legend_item = LegendItem()
gr_1 = GlyphRenderer(data_source=ColumnDataSource())
gr_2 = GlyphRenderer(data_source=ColumnDataSource())
legend_item.renderers = [gr_1, gr_2]
with mock.patch('bokeh.core.validation.check.log') as mock_logger:
issues = check_integrity([legend_item])
process_validation_issues(issues)
assert mock_logger.error.call_count == 0
def test_legend_item_with_field_label_and_different_data_sources_raises_a_validation_error() -> None:
legend_item = LegendItem()
gr_1 = GlyphRenderer(data_source=ColumnDataSource(data={'label': [1]}))
gr_2 = GlyphRenderer(data_source=ColumnDataSource(data={'label': [1]}))
legend_item.label = field('label')
legend_item.renderers = [gr_1, gr_2]
with mock.patch('bokeh.core.validation.check.log') as mock_logger:
issues = check_integrity([legend_item])
process_validation_issues(issues)
assert mock_logger.error.call_count == 1
def test_legend_item_with_value_label_and_different_data_sources_does_not_raise_a_validation_error() -> None:
legend_item = LegendItem()
gr_1 = GlyphRenderer(data_source=ColumnDataSource())
gr_2 = GlyphRenderer(data_source=ColumnDataSource())
legend_item.label = value('label')
legend_item.renderers = [gr_1, gr_2]
with mock.patch('bokeh.core.validation.check.log') as mock_logger:
issues = check_integrity([legend_item])
process_validation_issues(issues)
assert mock_logger.error.call_count == 0
def test_legend_item_with_field_label_raises_error_if_field_not_in_cds() -> None:
legend_item = LegendItem()
gr_1 = GlyphRenderer(data_source=ColumnDataSource())
legend_item.label = field('label')
legend_item.renderers = [gr_1]
with mock.patch('bokeh.core.validation.check.log') as mock_logger:
issues = check_integrity([legend_item])
process_validation_issues(issues)
assert mock_logger.error.call_count == 1
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
2ce380d061efafc44ec8c71293c883bf4ac3ccd1
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-iam/huaweicloudsdkiam/v3/iam_credentials.py
|
30345b66d9218d8733aa31868056232dff1666cd
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318
| 2023-08-31T08:28:59
| 2023-08-31T08:28:59
| 262,207,814
| 103
| 44
|
NOASSERTION
| 2023-06-22T14:50:48
| 2020-05-08T02:28:43
|
Python
|
UTF-8
|
Python
| false
| false
| 747
|
py
|
iam_credentials.py
|
from huaweicloudsdkcore.signer.signer import Signer
class IamCredentials:
_X_AUTH_TOKEN = "X-Auth-Token"
def __init__(self, auth_token=None):
self.auth_token = auth_token
def with_x_auth_token(self, auth_token):
self.auth_token = auth_token
return self
def get_update_path_params(self):
return {}
def process_auth_params(self, http_client, region_id):
return self
def process_auth_request(self, request, http_client):
if self.auth_token and self._X_AUTH_TOKEN not in request.header_params:
request.header_params[self._X_AUTH_TOKEN] = self.auth_token
Signer.process_request_uri(request)
return http_client.executor.submit(lambda: request)
|
90e0b04015a496cbe5abc7bf71869da72e4673f0
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/FastSimulation/TrajectoryManager/python/TrackerSimHits_cfi.py
|
dcd28a1e09ac4a993603649079ae8f52684b3f83
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 500
|
py
|
TrackerSimHits_cfi.py
|
import FWCore.ParameterSet.Config as cms
TrackerSimHitsBlock = cms.PSet(
TrackerSimHits = cms.PSet(
# Smallest charged particle pT for which SimHit's are saved (GeV/c)
pTmin = cms.untracked.double(0.2),
# Save SimHit's only for the first loop
firstLoop = cms.untracked.bool(True)
)
)
#
# Modify for running in Run 2
#
from Configuration.Eras.Modifier_run2_common_cff import run2_common
run2_common.toModify( TrackerSimHitsBlock.TrackerSimHits, pTmin = 0.1 )
|
72d4e619b24e918222f1608d85263228dabd75c5
|
a3d6556180e74af7b555f8d47d3fea55b94bcbda
|
/third_party/blink/web_tests/external/wpt/webdriver/tests/classic/new_session/response.py
|
43a8d57931143f9cf2b5cb0946f982b003afd85c
|
[
"LGPL-2.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"GPL-1.0-or-later",
"GPL-2.0-only",
"LGPL-2.0-only",
"BSD-2-Clause",
"LicenseRef-scancode-other-copyleft",
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] |
permissive
|
chromium/chromium
|
aaa9eda10115b50b0616d2f1aed5ef35d1d779d6
|
a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c
|
refs/heads/main
| 2023-08-24T00:35:12.585945
| 2023-08-23T22:01:11
| 2023-08-23T22:01:11
| 120,360,765
| 17,408
| 7,102
|
BSD-3-Clause
| 2023-09-10T23:44:27
| 2018-02-05T20:55:32
| null |
UTF-8
|
Python
| false
| false
| 1,496
|
py
|
response.py
|
import uuid
import pytest
from tests.support.asserts import assert_success
def test_sessionid(new_session, add_browser_capabilities):
response, _ = new_session({"capabilities": {"alwaysMatch": add_browser_capabilities({})}})
value = assert_success(response)
assert isinstance(value["sessionId"], str)
uuid.UUID(hex=value["sessionId"])
@pytest.mark.parametrize("capability, type", [
("browserName", str),
("browserVersion", str),
("platformName", str),
("acceptInsecureCerts", bool),
("pageLoadStrategy", str),
("proxy", dict),
("setWindowRect", bool),
("timeouts", dict),
("strictFileInteractability", bool),
("unhandledPromptBehavior", str),
])
def test_capability_type(session, capability, type):
assert isinstance(session.capabilities, dict)
assert capability in session.capabilities
assert isinstance(session.capabilities[capability], type)
@pytest.mark.parametrize("capability, default_value", [
("acceptInsecureCerts", False),
("pageLoadStrategy", "normal"),
("proxy", {}),
("setWindowRect", True),
("timeouts", {"implicit": 0, "pageLoad": 300000, "script": 30000}),
("strictFileInteractability", False),
("unhandledPromptBehavior", "dismiss and notify"),
])
def test_capability_default_value(session, capability, default_value):
assert isinstance(session.capabilities, dict)
assert capability in session.capabilities
assert session.capabilities[capability] == default_value
|
f26e78d3daaa38b4614a0540d9321474dbc2f1a0
|
5c04ad01f19732cb164047e4e4e3dcb830ec3ac7
|
/marge/single_merge_job.py
|
1f8155ba0dd4fa5170affaec2b71becb4f6c4ea2
|
[
"BSD-3-Clause"
] |
permissive
|
smarkets/marge-bot
|
f7dcb9c3d9a1ad8dbb0ee6737dc0ec82bdcd8257
|
91b23058afbec984aa23d3981fa0fca63159e5ca
|
refs/heads/master
| 2023-08-30T02:07:30.674075
| 2023-06-15T14:13:32
| 2023-06-15T14:13:32
| 95,782,585
| 647
| 154
|
BSD-3-Clause
| 2023-08-08T16:05:29
| 2017-06-29T13:50:25
|
Python
|
UTF-8
|
Python
| false
| false
| 8,857
|
py
|
single_merge_job.py
|
# pylint: disable=too-many-locals,too-many-branches,too-many-statements
import logging as log
import time
from datetime import datetime
from . import git, gitlab
from .commit import Commit
from .job import CannotMerge, GitLabRebaseResultMismatch, MergeJob, SkipMerge
class SingleMergeJob(MergeJob):
def __init__(self, *, api, user, project, repo, options, merge_request):
super().__init__(api=api, user=user, project=project, repo=repo, options=options)
self._merge_request = merge_request
self._options = options
def execute(self):
merge_request = self._merge_request
log.info('Processing !%s - %r', merge_request.iid, merge_request.title)
try:
approvals = merge_request.fetch_approvals()
self.update_merge_request_and_accept(approvals)
log.info('Successfully merged !%s.', merge_request.info['iid'])
except SkipMerge as err:
log.warning("Skipping MR !%s: %s", merge_request.info['iid'], err.reason)
except CannotMerge as err:
message = "I couldn't merge this branch: %s" % err.reason
log.warning(message)
self.unassign_from_mr(merge_request)
merge_request.comment(message)
except git.GitError:
log.exception('Unexpected Git error')
merge_request.comment('Something seems broken on my local git repo; check my logs!')
raise
except Exception:
log.exception('Unexpected Exception')
merge_request.comment("I'm broken on the inside, please somebody fix me... :cry:")
self.unassign_from_mr(merge_request)
raise
def update_merge_request_and_accept(self, approvals):
api = self._api
merge_request = self._merge_request
updated_into_up_to_date_target_branch = False
while not updated_into_up_to_date_target_branch:
self.ensure_mergeable_mr(merge_request)
source_project, source_repo_url, _ = self.fetch_source_project(merge_request)
target_project = self.get_target_project(merge_request)
try:
# NB. this will be a no-op if there is nothing to update/rewrite
target_sha, _updated_sha, actual_sha = self.update_from_target_branch_and_push(
merge_request,
source_repo_url=source_repo_url,
)
except GitLabRebaseResultMismatch:
log.info("Gitlab rebase didn't give expected result")
merge_request.comment("Someone skipped the queue! Will have to try again...")
continue
if _updated_sha == actual_sha and self._options.guarantee_final_pipeline:
log.info('No commits on target branch to fuse, triggering pipeline...')
merge_request.comment("jenkins retry")
time.sleep(30)
log.info(
'Commit id to merge %r into: %r (updated sha: %r)',
actual_sha,
target_sha,
_updated_sha
)
time.sleep(5)
sha_now = Commit.last_on_branch(source_project.id, merge_request.source_branch, api).id
# Make sure no-one managed to race and push to the branch in the
# meantime, because we're about to impersonate the approvers, and
# we don't want to approve unreviewed commits
if sha_now != actual_sha:
raise CannotMerge('Someone pushed to branch while we were trying to merge')
self.maybe_reapprove(merge_request, approvals)
if target_project.only_allow_merge_if_pipeline_succeeds:
self.wait_for_ci_to_pass(merge_request, actual_sha)
time.sleep(2)
self.wait_for_merge_status_to_resolve(merge_request)
self.ensure_mergeable_mr(merge_request)
try:
ret = merge_request.accept(
remove_branch=merge_request.force_remove_source_branch,
sha=actual_sha,
merge_when_pipeline_succeeds=bool(target_project.only_allow_merge_if_pipeline_succeeds),
)
log.info('merge_request.accept result: %s', ret)
except gitlab.NotAcceptable as err:
new_target_sha = Commit.last_on_branch(self._project.id, merge_request.target_branch, api).id
# target_branch has moved under us since we updated, just try again
if new_target_sha != target_sha:
log.info('Someone was naughty and by-passed marge')
merge_request.comment(
"My job would be easier if people didn't jump the queue and push directly... *sigh*"
)
continue
# otherwise the source branch has been pushed to or something
# unexpected went wrong in either case, we expect the user to
# explicitly re-assign to marge (after resolving potential
# problems)
raise CannotMerge('Merge request was rejected by GitLab: %r' % err.error_message) from err
except gitlab.Unauthorized as err:
log.warning('Unauthorized!')
raise CannotMerge('My user cannot accept merge requests!') from err
except gitlab.NotFound as ex:
log.warning('Not Found!: %s', ex)
merge_request.refetch_info()
if merge_request.state == 'merged':
# someone must have hit "merge when build succeeds" and we lost the race,
# the branch is gone and we got a 404. Anyway, our job here is done.
# (see #33)
updated_into_up_to_date_target_branch = True
else:
log.warning('For the record, merge request state is %r', merge_request.state)
raise
except gitlab.MethodNotAllowed as ex:
log.warning('Not Allowed!: %s', ex)
merge_request.refetch_info()
if merge_request.work_in_progress:
raise CannotMerge(
'The request was marked as WIP as I was processing it (maybe a WIP commit?)'
) from ex
if merge_request.state == 'reopened':
raise CannotMerge(
'GitLab refused to merge this branch. I suspect that a Push Rule or a git-hook '
'is rejecting my commits; maybe my email needs to be white-listed?'
) from ex
if merge_request.state == 'closed':
raise CannotMerge(
'Someone closed the merge request while I was attempting to merge it.'
) from ex
if merge_request.state == 'merged':
# We are not covering any observed behaviour here, but if at this
# point the request is merged, our job is done, so no need to complain
log.info('Merge request is already merged, someone was faster!')
updated_into_up_to_date_target_branch = True
else:
raise CannotMerge(
"Gitlab refused to merge this request and I don't know why!" + (
" Maybe you have unresolved discussions?"
if self._project.only_allow_merge_if_all_discussions_are_resolved else ""
)
) from ex
except gitlab.ApiError as err:
log.exception('Unanticipated ApiError from GitLab on merge attempt')
raise CannotMerge('had some issue with GitLab, check my logs...') from err
else:
self.wait_for_branch_to_be_merged()
updated_into_up_to_date_target_branch = True
def wait_for_branch_to_be_merged(self):
merge_request = self._merge_request
time_0 = datetime.utcnow()
waiting_time_in_secs = 10
while datetime.utcnow() - time_0 < self._merge_timeout:
merge_request.refetch_info()
if merge_request.state == 'merged':
return # success!
if merge_request.state == 'closed':
raise CannotMerge('someone closed the merge request while merging!')
assert merge_request.state in ('opened', 'reopened', 'locked'), merge_request.state
log.info('Giving %s more secs for !%s to be merged...', waiting_time_in_secs, merge_request.iid)
time.sleep(waiting_time_in_secs)
raise CannotMerge('It is taking too long to see the request marked as merged!')
|
4ded0a160cb02e4dac24bff0ba410b0a0c072a45
|
620323fc090cebaf7aca456ff3f7fbbe1e210394
|
/job_compassplus/tx_parse_xml/show_db_views.py
|
46755d2cbf228006f367a802a580e645596a9a70
|
[
"CC-BY-4.0"
] |
permissive
|
gil9red/SimplePyScripts
|
bd2733372728bf9b9f00570e90316fa12116516b
|
773c2c9724edd8827a1dbd91694d780e03fcb05a
|
refs/heads/master
| 2023-08-31T04:26:09.120173
| 2023-08-30T17:22:59
| 2023-08-30T17:22:59
| 22,650,442
| 157
| 46
| null | 2023-09-08T17:51:33
| 2014-08-05T16:19:52
|
Python
|
UTF-8
|
Python
| false
| false
| 1,808
|
py
|
show_db_views.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "ipetrash"
import xml.etree.ElementTree as ET
from collections import defaultdict
from dataclasses import dataclass
from pathlib import Path
@dataclass
class View:
id: str
name: str
db_name: str
def get_views(model_path: Path) -> list[View]:
ns = dict(
dds="http://schemas.radixware.org/ddsdef.xsd",
)
items = []
model = ET.fromstring(model_path.read_text(encoding="utf-8"))
for view in model.findall(".//dds:Views/dds:View", namespaces=ns):
items.append(
View(
id=view.attrib["Id"],
name=view.attrib["Name"],
db_name=view.attrib["DbName"],
)
)
return items
def process(branch_dir: Path | str) -> dict[str, list[View]]:
if isinstance(branch_dir, str):
branch_dir = Path(branch_dir)
layer_module_by_triggers = defaultdict(list)
for layer_dir in branch_dir.glob("*"):
if not layer_dir.is_dir():
continue
layer_dds_dir = layer_dir / "dds"
if layer_dds_dir.is_dir():
layer = layer_dir.name
for model_xml in layer_dds_dir.glob("*/model.xml"):
module = model_xml.parent.name
for view in get_views(model_xml):
key = f"{layer}/{module}"
layer_module_by_triggers[key].append(view)
return layer_module_by_triggers
if __name__ == "__main__":
path = r"C:\DEV__OPTT\trunk_optt"
layer_module_by_triggers = process(path)
for key, views in layer_module_by_triggers.items():
print(f"{key} ({len(views)})")
for i, view in enumerate(views, 1):
print(f" {i}. {view.name} ({view.db_name}, {view.id})")
print()
|
623964b18e3f875eb15eaf9bc0ecd747f6f22ab6
|
ec85250addb7357dfe7bb3e0680d53fc7b0fd8fb
|
/python_modules/dagster/dagster_tests/core_tests/test_compute_only_job.py
|
5b9000a03ac4896df504cb0bb00e4997b8f36511
|
[
"Apache-2.0"
] |
permissive
|
dagster-io/dagster
|
6adb5deee8bcf3ea1866a6a64f2ed81e1db5e73a
|
fe21995e0402878437a828c6a4244025eac8c43b
|
refs/heads/master
| 2023-09-05T20:46:08.203794
| 2023-09-05T19:54:52
| 2023-09-05T19:54:52
| 131,619,646
| 8,565
| 1,154
|
Apache-2.0
| 2023-09-14T21:57:37
| 2018-04-30T16:30:04
|
Python
|
UTF-8
|
Python
| false
| false
| 1,132
|
py
|
test_compute_only_job.py
|
from typing import Dict, TypeVar
from dagster import job, op
T = TypeVar("T")
def _set_key_value(ddict: Dict[str, object], key: str, value: T) -> T:
ddict[key] = value
return value
def test_execute_op_with_dep_only_inputs_no_api():
did_run_dict = {}
@op
def step_one_op(_):
_set_key_value(did_run_dict, "step_one", True)
@op
def step_two_op(_, _in):
_set_key_value(did_run_dict, "step_two", True)
@job
def foo_job():
step_two_op(step_one_op())
result = foo_job.execute_in_process()
assert result.success
assert did_run_dict["step_one"] is True
assert did_run_dict["step_two"] is True
def test_execute_op_with_dep_only_inputs_with_api():
did_run_dict = {}
@op
def step_one_op(_):
_set_key_value(did_run_dict, "step_one", True)
@op
def step_two_op(_, _in):
_set_key_value(did_run_dict, "step_two", True)
@job
def foo_job():
step_two_op(step_one_op())
assert foo_job.execute_in_process().success
assert did_run_dict["step_one"] is True
assert did_run_dict["step_two"] is True
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.