blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
761524de157a99d41594474981fe05a28e60cd15
|
5b6ba0f288b1e2ac236af846a9bf546a63228476
|
/simtbx/diffBragg/refiners/crystal_systems/monoclinic.py
|
e40e6dcbe40c41554a5bfe0d0a9f917ed54e5b78
|
[
"BSD-3-Clause-LBNL"
] |
permissive
|
cctbx/cctbx_project
|
5b547b416cadbdf95cca21dace9f54272a08d98a
|
7f4dfb6c873fd560920f697cbfd8a5ff6eed82fa
|
refs/heads/master
| 2023-08-17T17:44:05.077010
| 2023-08-16T22:40:22
| 2023-08-16T22:40:22
| 39,508,026
| 206
| 131
|
NOASSERTION
| 2023-09-14T17:12:55
| 2015-07-22T13:36:27
|
Python
|
UTF-8
|
Python
| false
| false
| 2,494
|
py
|
monoclinic.py
|
from __future__ import division
from simtbx.diffBragg.refiners.crystal_systems import CrystalSystemManager
import numpy as np
from scitbx.matrix import sqr
class MonoclinicManager(CrystalSystemManager):
def __init__(self, a=55, b=65, c=77, beta=100*np.pi/180.):
self.variables = [a, b, c, beta]
@property
def variables(self):
return self._variables
@variables.setter
def variables(self, val):
self._variables = val
@property
def derivative_matrices(self):
return [self._dB_da_real, self._dB_db_real,
self._dB_dc_real, self._dB_dbeta_real]
@property
def second_derivative_matrices(self):
return [self._d2B_da2_real, self._d2B_db2_real,
self._d2B_dc2_real, self._d2B_dbeta2_real]
@property
def a(self):
return self.variables[0]
@property
def b(self):
return self.variables[1]
@property
def c(self):
return self.variables[2]
@property
def al(self):
return np.pi/2
@property
def be(self):
return self.variables[3]
@property
def ga(self):
return np.pi/2
@property
def variable_names(self):
return [self._names[0], self._names[1],
self._names[2], self._names[4]]
@property
def _dB_da_real(self):
return sqr((1, 0, 0,
0, 0, 0,
0, 0, 0))
@property
def _d2B_da2_real(self):
return sqr((0, 0, 0,
0, 0, 0,
0, 0, 0))
@property
def _dB_db_real(self):
return sqr((0, 0, 0,
0, 1, 0,
0, 0, 0))
@property
def _d2B_db2_real(self):
return sqr((0, 0, 0,
0, 0, 0,
0, 0, 0))
@property
def _dB_dc_real(self):
return sqr((0, 0, self.cbe,
0, 0, 0,
0, 0, np.sqrt(1-self.cbe**2)))
@property
def _d2B_dc2_real(self):
return sqr((0, 0, 0,
0, 0, 0,
0, 0, 0))
@property
def _dB_dbeta_real(self):
return sqr((0, 0, -self.c*self.sbe,
0, 0, 0,
0, 0, self.c*self.cbe*self.sbe/np.sqrt(1-self.cbe**2)))
@property
def _d2B_dbeta2_real(self):
return sqr((0, 0, -self.c*self.cbe,
0, 0, 0,
0, 0, -self.c*np.sqrt(1-self.cbe**2)))
|
408316ab405bc0d60859bfac26df8afbee52f5bf
|
fd8ef75bb06383538cdb21ed2a0ef88e570179b7
|
/src/openfermion/circuits/primitives/optimal_givens_decomposition_test.py
|
fb3744acd81b91bcd2b06e278eccc9e13b2ddac0
|
[
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] |
permissive
|
quantumlib/OpenFermion
|
d1147383f99573d19005bd0f3e0120e9e9bed04c
|
788481753c798a72c5cb3aa9f2aa9da3ce3190b0
|
refs/heads/master
| 2023-09-04T11:00:32.124157
| 2023-08-24T21:54:30
| 2023-08-24T21:54:30
| 104,403,768
| 1,481
| 406
|
Apache-2.0
| 2023-08-24T21:54:31
| 2017-09-21T22:10:28
|
Python
|
UTF-8
|
Python
| false
| false
| 8,679
|
py
|
optimal_givens_decomposition_test.py
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from itertools import product
import numpy
import scipy
import cirq
from openfermion.linalg import (givens_matrix_elements, givens_rotate,
get_sparse_operator)
from openfermion.ops import QubitOperator, FermionOperator
from openfermion.transforms import jordan_wigner
from openfermion.circuits.primitives.optimal_givens_decomposition import \
optimal_givens_decomposition
def test_givens_inverse():
r"""
The Givens rotation in OpenFermion is defined as
$$
\begin{pmatrix}
\cos(\theta) & -e^{i \varphi} \sin(\theta) \\
\sin(\theta) & e^{i \varphi} \cos(\theta)
\end{pmatrix}.
$$
confirm numerically its hermitian conjugate is it's inverse
"""
a = numpy.random.random() + 1j * numpy.random.random()
b = numpy.random.random() + 1j * numpy.random.random()
ab_rotation = givens_matrix_elements(a, b, which='right')
assert numpy.allclose(ab_rotation.dot(numpy.conj(ab_rotation).T),
numpy.eye(2))
assert numpy.allclose(
numpy.conj(ab_rotation).T.dot(ab_rotation), numpy.eye(2))
def test_row_eliminate():
"""
Test elemination of element in U[i, j] by rotating in i-1 and i.
"""
dim = 3
u_generator = numpy.random.random((dim, dim)) + 1j * numpy.random.random(
(dim, dim))
u_generator = u_generator - numpy.conj(u_generator).T
# make sure the generator is actually antihermitian
assert numpy.allclose(-1 * u_generator, numpy.conj(u_generator).T)
unitary = scipy.linalg.expm(u_generator)
# eliminate U[2, 0] by rotating in 1, 2
gmat = givens_matrix_elements(unitary[1, 0], unitary[2, 0], which='right')
givens_rotate(unitary, gmat, 1, 2, which='row')
assert numpy.isclose(unitary[2, 0], 0.0)
# eliminate U[1, 0] by rotating in 0, 1
gmat = givens_matrix_elements(unitary[0, 0], unitary[1, 0], which='right')
givens_rotate(unitary, gmat, 0, 1, which='row')
assert numpy.isclose(unitary[1, 0], 0.0)
# eliminate U[2, 1] by rotating in 1, 2
gmat = givens_matrix_elements(unitary[1, 1], unitary[2, 1], which='right')
givens_rotate(unitary, gmat, 1, 2, which='row')
assert numpy.isclose(unitary[2, 1], 0.0)
def create_givens(givens_mat, i, j, dim):
"""
Create the givens matrix on the larger space
:param givens_mat: 2x2 matrix with first column is real
:param i: row index i
:param j: row index i < j
:param dim: dimension
"""
gmat = numpy.eye(dim, dtype=complex)
gmat[i, i] = givens_mat[0, 0]
gmat[i, j] = givens_mat[0, 1]
gmat[j, i] = givens_mat[1, 0]
gmat[j, j] = givens_mat[1, 1]
return gmat
def test_col_eliminate():
"""
Test elimination by rotating in the column space. Left multiplication of
inverse givens
"""
dim = 3
u_generator = numpy.random.random((dim, dim)) + 1j * numpy.random.random(
(dim, dim))
u_generator = u_generator - numpy.conj(u_generator).T
# make sure the generator is actually antihermitian
assert numpy.allclose(-1 * u_generator, numpy.conj(u_generator).T)
unitary = scipy.linalg.expm(u_generator)
# eliminate U[1, 0] by rotation in rows [0, 1] and
# mixing U[1, 0] and U[0, 0]
unitary_original = unitary.copy()
gmat = givens_matrix_elements(unitary[0, 0], unitary[1, 0], which='right')
vec = numpy.array([[unitary[0, 0]], [unitary[1, 0]]])
fullgmat = create_givens(gmat, 0, 1, 3)
zeroed_unitary = fullgmat.dot(unitary)
givens_rotate(unitary, gmat, 0, 1)
assert numpy.isclose(unitary[1, 0], 0.0)
assert numpy.allclose(unitary.real, zeroed_unitary.real)
assert numpy.allclose(unitary.imag, zeroed_unitary.imag)
# eliminate U[2, 0] by rotating columns [0, 1] and
# mixing U[2, 0] and U[2, 1].
unitary = unitary_original.copy()
gmat = givens_matrix_elements(unitary[2, 0], unitary[2, 1], which='left')
vec = numpy.array([[unitary[2, 0]], [unitary[2, 1]]])
assert numpy.isclose((gmat.dot(vec))[0, 0], 0.0)
assert numpy.isclose((vec.T.dot(gmat.T))[0, 0], 0.0)
fullgmat = create_givens(gmat, 0, 1, 3)
zeroed_unitary = unitary.dot(fullgmat.T)
# because col takes g[0, 0] * col_i + g[0, 1].conj() * col_j -> col_i
# this is equivalent ot left multiplication by gmat.T
givens_rotate(unitary, gmat.conj(), 0, 1, which='col')
assert numpy.isclose(zeroed_unitary[2, 0], 0.0)
assert numpy.allclose(unitary, zeroed_unitary)
def test_front_back_iteration():
"""
Code demonstrating how we iterated over the matrix
[[ 0. 0. 0. 0. 0. 0.]
[15. 0. 0. 0. 0. 0.]
[ 7. 14. 0. 0. 0. 0.]
[ 6. 8. 13. 0. 0. 0.]
[ 2. 5. 9. 12. 0. 0.]
[ 1. 3. 4. 10. 11. 0.]]
"""
N = 6
unitary = numpy.zeros((N, N))
unitary[-1, 0] = 1
unitary[-2, 0] = 2
unitary[-1, 1] = 3
unitary[-1, 2] = 4
unitary[-2, 1] = 5
unitary[-3, 0] = 6
unitary[-4, 0] = 7
unitary[-3, 1] = 8
unitary[-2, 2] = 9
unitary[-1, 3] = 10
unitary[-1, 4] = 11
unitary[-2, 3] = 12
unitary[-3, 2] = 13
unitary[-4, 1] = 14
unitary[-5, 0] = 15
counter = 1
for i in range(1, N):
if i % 2 == 1:
for j in range(0, i):
print((N - j, i - j), i - j, i - j + 1, "col rotation")
assert numpy.isclose(unitary[N - j - 1, i - j - 1], counter)
counter += 1
else:
for j in range(1, i + 1):
print((N + j - i, j), N + j - i - 1, N + j - i, "row rotation")
assert numpy.isclose(unitary[N + j - i - 1, j - 1], counter)
counter += 1
def test_circuit_generation_and_accuracy():
for dim in range(2, 10):
qubits = cirq.LineQubit.range(dim)
u_generator = numpy.random.random(
(dim, dim)) + 1j * numpy.random.random((dim, dim))
u_generator = u_generator - numpy.conj(u_generator).T
assert numpy.allclose(-1 * u_generator, numpy.conj(u_generator).T)
unitary = scipy.linalg.expm(u_generator)
circuit = cirq.Circuit()
circuit.append(optimal_givens_decomposition(qubits, unitary))
fermion_generator = QubitOperator(()) * 0.0
for i, j in product(range(dim), repeat=2):
fermion_generator += jordan_wigner(
FermionOperator(((i, 1), (j, 0)), u_generator[i, j]))
true_unitary = scipy.linalg.expm(
get_sparse_operator(fermion_generator).toarray())
assert numpy.allclose(true_unitary.conj().T.dot(true_unitary),
numpy.eye(2**dim, dtype=complex))
test_unitary = cirq.unitary(circuit)
assert numpy.isclose(
abs(numpy.trace(true_unitary.conj().T.dot(test_unitary))), 2**dim)
def test_circuit_generation_state():
"""
Determine if we rotate the Hartree-Fock state correctly
"""
simulator = cirq.Simulator()
circuit = cirq.Circuit()
qubits = cirq.LineQubit.range(4)
circuit.append([
cirq.X(qubits[0]),
cirq.X(qubits[1]),
cirq.X(qubits[1]),
cirq.X(qubits[2]),
cirq.X(qubits[3]),
cirq.X(qubits[3])
]) # alpha-spins are first then beta spins
wavefunction = numpy.zeros((2**4, 1), dtype=complex)
wavefunction[10, 0] = 1.0
dim = 2
u_generator = numpy.random.random((dim, dim)) + 1j * numpy.random.random(
(dim, dim))
u_generator = u_generator - numpy.conj(u_generator).T
unitary = scipy.linalg.expm(u_generator)
circuit.append(optimal_givens_decomposition(qubits[:2], unitary))
fermion_generator = QubitOperator(()) * 0.0
for i, j in product(range(dim), repeat=2):
fermion_generator += jordan_wigner(
FermionOperator(((i, 1), (j, 0)), u_generator[i, j]))
test_unitary = scipy.linalg.expm(
get_sparse_operator(fermion_generator, 4).toarray())
test_final_state = test_unitary.dot(wavefunction)
cirq_wf = simulator.simulate(circuit).final_state_vector
assert numpy.allclose(cirq_wf, test_final_state.flatten())
|
4da1800b67a2e75df319f9839811569dfce265ad
|
af41ca2086f7da6ca036921b2e2cec89e0e5d522
|
/examples/handshake/server.py
|
21d36ea9c0ff7f868d082aee6ba7b2904f29d154
|
[
"MIT"
] |
permissive
|
irmen/Pyro4
|
023830905bb0d8fc25aed8e990631268f7fbe52c
|
8ec0db055d76ae1512239710b1e30883ee6bd74b
|
refs/heads/master
| 2023-08-22T10:18:47.878310
| 2023-06-04T16:00:32
| 2023-06-04T16:00:32
| 11,037,154
| 667
| 105
|
MIT
| 2022-06-26T14:23:01
| 2013-06-28T20:25:58
|
Python
|
UTF-8
|
Python
| false
| false
| 1,061
|
py
|
server.py
|
from __future__ import print_function
import Pyro4
import Pyro4.constants
secret_code = "pancakes"
class CustomDaemon(Pyro4.Daemon):
def validateHandshake(self, conn, data):
print("Daemon received handshake request from:", conn.sock.getpeername())
print("Handshake data:", data)
# if needed, you can inspect Pyro4.current_context
if data == secret_code:
print("Secret code okay! Connection accepted.")
# return some custom handshake data:
return ["how", "are", "you", "doing"]
else:
print("Secret code wrong! Connection refused.")
raise ValueError("wrong secret code, connection refused")
def clientDisconnect(self, conn):
print("Daemon client disconnects:", conn.sock.getpeername())
with CustomDaemon() as daemon:
print("Server is ready. You can use the following URI to connect:")
print(daemon.uriFor(Pyro4.constants.DAEMON_NAME))
print("When asked, enter the following secret code: ", secret_code)
daemon.requestLoop()
|
e8f93e0da1637bfbbae5b1ac1b79554c5d75654a
|
0121f2810d6081fee314d730b8f22081e0943faf
|
/python/examples-qt6/apireview/main.py
|
da097a9edbdee8917729266793694cf7d1dbb959
|
[
"MIT"
] |
permissive
|
KDAB/KDChart
|
329d5010d3ea93509f0f862cd6b9cbd36856cdfd
|
8ecbc4402b1efe0a21bdb5aa879dbf1acde0eb0b
|
refs/heads/kdchart-3.0
| 2023-09-01T13:52:24.166135
| 2023-08-15T10:46:24
| 2023-08-15T10:46:24
| 182,813,124
| 103
| 31
|
NOASSERTION
| 2023-08-23T01:54:56
| 2019-04-22T15:14:18
|
C++
|
UTF-8
|
Python
| false
| false
| 489
|
py
|
main.py
|
#!/usr/bin/env python
#
# This file is part of the KD Chart library.
#
# SPDX-FileCopyrightText: 2019-2023 Klarälvdalens Datakonsult AB, a KDAB Group company <info@kdab.com>
#
# SPDX-License-Identifier: MIT
#
''' Main entry point for the API Review Example '''
import sys
from PySide6.QtWidgets import QApplication
from mainwindow import MainWindow
if __name__ == '__main__':
app = QApplication(sys.argv)
mainWin = MainWindow()
mainWin.show()
sys.exit(app.exec())
|
733c6d5f70c5ad5f279ad9dec3bd1aa1729a1cfb
|
e3bb1df7fa4c51900dec7e9ddf5295e1a80938bd
|
/test/hummingbot/connector/exchange/bittrex/test_bittrex_exchange.py
|
394906f31e34bccaaecd2fc3256e1c7ff2952746
|
[
"Apache-2.0"
] |
permissive
|
CoinAlpha/hummingbot
|
0d1e2bd94de1280748647108c7d7800a09546eb8
|
c3f101759ab7e7a2165cd23a3a3e94c90c642a9b
|
refs/heads/development
| 2023-09-01T11:24:43.322137
| 2023-08-31T03:08:06
| 2023-08-31T03:08:06
| 439,330,952
| 135
| 98
|
Apache-2.0
| 2023-08-30T13:55:08
| 2021-12-17T12:50:42
|
Python
|
UTF-8
|
Python
| false
| false
| 13,810
|
py
|
test_bittrex_exchange.py
|
import asyncio
import functools
import json
import re
import unittest
from decimal import Decimal
from typing import Awaitable, Callable, Dict, Optional
from unittest.mock import AsyncMock
from aioresponses import aioresponses
from hummingbot.client.config.client_config_map import ClientConfigMap
from hummingbot.client.config.config_helpers import ClientConfigAdapter
from hummingbot.connector.exchange.bittrex.bittrex_exchange import BittrexExchange
from hummingbot.core.data_type.common import OrderType, TradeType
from hummingbot.core.data_type.trade_fee import TokenAmount
from hummingbot.core.event.event_logger import EventLogger
from hummingbot.core.event.events import MarketEvent, OrderFilledEvent
class BittrexExchangeTest(unittest.TestCase):
# the level is required to receive logs from the data source logger
level = 0
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
cls.api_key = "someKey"
cls.secret_key = "someSecret"
cls.base_asset = "COINALPHA"
cls.quote_asset = "HBOT"
cls.trading_pair = f"{cls.base_asset}-{cls.quote_asset}"
cls.symbol = f"{cls.base_asset}{cls.quote_asset}"
def setUp(self) -> None:
super().setUp()
self.ev_loop = asyncio.get_event_loop()
self.log_records = []
self.test_task: Optional[asyncio.Task] = None
self.resume_test_event = asyncio.Event()
self.client_config_map = ClientConfigAdapter(ClientConfigMap())
self.exchange = BittrexExchange(
client_config_map=self.client_config_map,
bittrex_api_key=self.api_key,
bittrex_secret_key=self.secret_key,
trading_pairs=[self.trading_pair])
self.exchange.logger().setLevel(1)
self.exchange.logger().addHandler(self)
self._initialize_event_loggers()
def tearDown(self) -> None:
self.test_task and self.test_task.cancel()
super().tearDown()
def _initialize_event_loggers(self):
self.buy_order_completed_logger = EventLogger()
self.sell_order_completed_logger = EventLogger()
self.order_filled_logger = EventLogger()
self.order_cancelled_logger = EventLogger()
events_and_loggers = [
(MarketEvent.BuyOrderCompleted, self.buy_order_completed_logger),
(MarketEvent.SellOrderCompleted, self.sell_order_completed_logger),
(MarketEvent.OrderFilled, self.order_filled_logger),
(MarketEvent.OrderCancelled, self.order_cancelled_logger)]
for event, logger in events_and_loggers:
self.exchange.add_listener(event, logger)
def handle(self, record):
self.log_records.append(record)
def _is_logged(self, log_level: str, message: str) -> bool:
return any(record.levelname == log_level and record.getMessage() == message for record in self.log_records)
def async_run_with_timeout(self, coroutine: Awaitable, timeout: int = 1):
ret = self.ev_loop.run_until_complete(asyncio.wait_for(coroutine, timeout))
return ret
def _return_calculation_and_set_done_event(self, calculation: Callable, *args, **kwargs):
if self.resume_test_event.is_set():
raise asyncio.CancelledError
self.resume_test_event.set()
return calculation(*args, **kwargs)
def get_filled_response(self) -> Dict:
filled_resp = {
"id": "87076200-79bc-4f97-82b1-ad8fa3e630cf",
"marketSymbol": self.trading_pair,
"direction": "BUY",
"type": "LIMIT",
"quantity": "1",
"limit": "10",
"timeInForce": "POST_ONLY_GOOD_TIL_CANCELLED",
"fillQuantity": "1",
"commission": "0.11805420",
"proceeds": "23.61084196",
"status": "CLOSED",
"createdAt": "2021-09-08T10:00:34.83Z",
"updatedAt": "2021-09-08T10:00:35.05Z",
"closedAt": "2021-09-08T10:00:35.05Z",
}
return filled_resp
@aioresponses()
def test_execute_cancel(self, mocked_api):
url = f"{self.exchange.BITTREX_API_ENDPOINT}/orders/"
regex_url = re.compile(f"^{url}")
resp = {"status": "CLOSED"}
mocked_api.delete(regex_url, body=json.dumps(resp))
order_id = "someId"
self.exchange.start_tracking_order(
order_id=order_id,
exchange_order_id="someExchangeId",
trading_pair=self.trading_pair,
order_type=OrderType.LIMIT_MAKER,
trade_type=TradeType.BUY,
price=Decimal("10.0"),
amount=Decimal("1.0"),
)
self.async_run_with_timeout(coroutine=self.exchange.execute_cancel(self.trading_pair, order_id))
self.assertEqual(1, len(self.order_cancelled_logger.event_log))
event = self.order_cancelled_logger.event_log[0]
self.assertEqual(order_id, event.order_id)
self.assertTrue(order_id not in self.exchange.in_flight_orders)
@aioresponses()
def test_execute_cancel_already_filled(self, mocked_api):
url = f"{self.exchange.BITTREX_API_ENDPOINT}/orders/"
regex_url = re.compile(f"^{url}")
del_resp = {"code": "ORDER_NOT_OPEN"}
mocked_api.delete(regex_url, status=409, body=json.dumps(del_resp))
get_resp = self.get_filled_response()
mocked_api.get(regex_url, body=json.dumps(get_resp))
order_id = "someId"
self.exchange.start_tracking_order(
order_id=order_id,
exchange_order_id="someExchangeId",
trading_pair=self.trading_pair,
order_type=OrderType.LIMIT_MAKER,
trade_type=TradeType.BUY,
price=Decimal("10.0"),
amount=Decimal("1.0"),
)
self.async_run_with_timeout(coroutine=self.exchange.execute_cancel(self.trading_pair, order_id))
self.assertEqual(1, len(self.buy_order_completed_logger.event_log))
event = self.buy_order_completed_logger.event_log[0]
self.assertEqual(order_id, event.order_id)
self.assertTrue(order_id not in self.exchange.in_flight_orders)
def test_order_fill_event_takes_fee_from_update_event(self):
self.exchange.start_tracking_order(
order_id="OID1",
exchange_order_id="EOID1",
trading_pair=self.trading_pair,
order_type=OrderType.LIMIT,
trade_type=TradeType.BUY,
price=Decimal("10000"),
amount=Decimal("1"),
)
order = self.exchange.in_flight_orders.get("OID1")
partial_fill = {
"accountId": "testAccount",
"sequence": "1001",
"deltas": [{
"id": "1",
"marketSymbol": f"{self.base_asset}{self.quote_asset}",
"executedAt": "12-03-2021 6:17:16",
"quantity": "0.1",
"rate": "10050",
"orderId": "EOID1",
"commission": "10",
"isTaker": False
}]
}
message = {
"event_type": "execution",
"content": partial_fill,
}
mock_user_stream = AsyncMock()
mock_user_stream.get.side_effect = functools.partial(self._return_calculation_and_set_done_event,
lambda: message)
self.exchange.user_stream_tracker._user_stream = mock_user_stream
self.test_task = asyncio.get_event_loop().create_task(self.exchange._user_stream_event_listener())
self.async_run_with_timeout(self.resume_test_event.wait())
self.assertEqual(Decimal("10"), order.fee_paid)
self.assertEqual(1, len(self.order_filled_logger.event_log))
fill_event: OrderFilledEvent = self.order_filled_logger.event_log[0]
self.assertEqual(Decimal("0"), fill_event.trade_fee.percent)
self.assertEqual([TokenAmount(order.quote_asset, Decimal(partial_fill["deltas"][0]["commission"]))],
fill_event.trade_fee.flat_fees)
self.assertTrue(self._is_logged(
"INFO",
f"Filled {Decimal(partial_fill['deltas'][0]['quantity'])} out of {order.amount} of the "
f"{order.order_type_description} order {order.client_order_id}. - ws"
))
self.assertEqual(0, len(self.buy_order_completed_logger.event_log))
complete_fill = {
"accountId": "testAccount",
"sequence": "1001",
"deltas": [{
"id": "2",
"marketSymbol": f"{self.base_asset}{self.quote_asset}",
"executedAt": "12-03-2021 6:17:16",
"quantity": "0.9",
"rate": "10060",
"orderId": "EOID1",
"commission": "30",
"isTaker": False
}]
}
message["content"] = complete_fill
self.resume_test_event = asyncio.Event()
mock_user_stream = AsyncMock()
mock_user_stream.get.side_effect = functools.partial(self._return_calculation_and_set_done_event,
lambda: message)
self.exchange.user_stream_tracker._user_stream = mock_user_stream
self.test_task = asyncio.get_event_loop().create_task(self.exchange._user_stream_event_listener())
self.async_run_with_timeout(self.resume_test_event.wait())
self.assertEqual(Decimal("40"), order.fee_paid)
self.assertEqual(2, len(self.order_filled_logger.event_log))
fill_event: OrderFilledEvent = self.order_filled_logger.event_log[1]
self.assertEqual(Decimal("0"), fill_event.trade_fee.percent)
self.assertEqual([TokenAmount(order.quote_asset, Decimal(complete_fill["deltas"][0]["commission"]))],
fill_event.trade_fee.flat_fees)
# The order should be marked as complete only when the "done" event arrives, not with the fill event
self.assertFalse(self._is_logged(
"INFO",
f"The market buy order {order.client_order_id} has completed according to Coinbase Pro user stream."
))
self.assertEqual(0, len(self.buy_order_completed_logger.event_log))
def test_order_fill_event_processed_before_order_complete_event(self):
self.exchange.start_tracking_order(
order_id="OID1",
exchange_order_id="EOID1",
trading_pair=self.trading_pair,
order_type=OrderType.LIMIT,
trade_type=TradeType.BUY,
price=Decimal("10000"),
amount=Decimal("1"),
)
order = self.exchange.in_flight_orders.get("OID1")
complete_fill = {
"id": "1",
"marketSymbol": f"{self.base_asset}{self.quote_asset}",
"executedAt": "12-03-2021 6:17:16",
"quantity": "1",
"rate": "10050",
"orderId": "EOID1",
"commission": "10",
"isTaker": False
}
fill_message = {
"event_type": "execution",
"content": {
"accountId": "testAccount",
"sequence": "1001",
"deltas": [complete_fill]
}
}
update_data = {
"id": "EOID1",
"marketSymbol": f"{self.base_asset}{self.quote_asset}",
"direction": "BUY",
"type": "LIMIT",
"quantity": "1",
"limit": "10000",
"ceiling": "10000",
"timeInForce": "GOOD_TIL_CANCELLED",
"clientOrderId": "OID1",
"fillQuantity": "1",
"commission": "10",
"proceeds": "10050",
"status": "CLOSED",
"createdAt": "12-03-2021 6:17:16",
"updatedAt": "12-03-2021 6:17:16",
"closedAt": "12-03-2021 6:17:16",
"orderToCancel": {
"type": "LIMIT",
"id": "string (uuid)"
}
}
update_message = {
"event_type": "order",
"content": {
"accountId": "testAccount",
"sequence": "1001",
"delta": update_data
}
}
mock_user_stream = AsyncMock()
# We simulate the case when the order update arrives before the order fill
mock_user_stream.get.side_effect = [update_message, fill_message, asyncio.CancelledError()]
self.exchange.user_stream_tracker._user_stream = mock_user_stream
self.test_task = asyncio.get_event_loop().create_task(self.exchange._user_stream_event_listener())
try:
self.async_run_with_timeout(self.test_task)
except asyncio.CancelledError:
pass
self.async_run_with_timeout(order.wait_until_completely_filled())
self.assertEqual(Decimal("10"), order.fee_paid)
self.assertEqual(1, len(self.order_filled_logger.event_log))
fill_event: OrderFilledEvent = self.order_filled_logger.event_log[0]
self.assertEqual(Decimal("0"), fill_event.trade_fee.percent)
self.assertEqual(
[TokenAmount(order.quote_asset, Decimal(complete_fill["commission"]))], fill_event.trade_fee.flat_fees
)
self.assertTrue(self._is_logged(
"INFO",
f"Filled {Decimal(complete_fill['quantity'])} out of {order.amount} of the "
f"{order.order_type_description} order {order.client_order_id}. - ws"
))
self.assertTrue(self._is_logged(
"INFO",
f"The BUY order {order.client_order_id} has completed according to order delta websocket API."
))
self.assertEqual(1, len(self.buy_order_completed_logger.event_log))
|
b52ac3039b77f149cbdec9d023f496e15eb9d6b4
|
b832340b8fcce09a7ad8996eb0aed8abec4d4974
|
/fastflix/encoders/vp9/command_builder.py
|
78329c1e6fc1de7bbbfa3df95ba1ac2000fddd02
|
[
"MIT"
] |
permissive
|
cdgriffith/FastFlix
|
4819c276dd6bbb8ad0039788c68691371b525ac2
|
80cca411a9362c7345975606e9f5d936c00ed111
|
refs/heads/master
| 2023-08-31T14:53:02.515626
| 2023-08-26T17:59:47
| 2023-08-26T17:59:47
| 171,392,325
| 901
| 64
|
MIT
| 2023-08-26T17:59:48
| 2019-02-19T02:41:07
|
Python
|
UTF-8
|
Python
| false
| false
| 2,735
|
py
|
command_builder.py
|
# -*- coding: utf-8 -*-
import re
import secrets
from fastflix.encoders.common.helpers import Command, generate_all, generate_color_details, null
from fastflix.models.encode import VP9Settings
from fastflix.models.fastflix import FastFlix
def build(fastflix: FastFlix):
settings: VP9Settings = fastflix.current_video.video_settings.video_encoder_settings
beginning, ending, output_fps = generate_all(fastflix, "libvpx-vp9")
beginning += f'{"-row-mt 1" if settings.row_mt else ""} ' f"{generate_color_details(fastflix)} "
if not settings.single_pass:
pass_log_file = fastflix.current_video.work_path / f"pass_log_file_{secrets.token_hex(10)}"
beginning += f'-passlogfile "{pass_log_file}" '
# TODO color_range 1
# if not fastflix.current_video.video_settings.remove_hdr and settings.pix_fmt in ("yuv420p10le", "yuv420p12le"):
# if fastflix.current_video.color_space.startswith("bt2020"):
# beginning += "-color_primaries bt2020 -color_trc smpte2084 -colorspace bt2020nc -color_range 1"
details = f"-quality:v {settings.quality} -profile:v {settings.profile} -tile-columns:v {settings.tile_columns} -tile-rows:v {settings.tile_rows} "
if settings.bitrate:
if settings.quality == "realtime":
return [
Command(
command=f"{beginning} -speed:v {settings.speed} -b:v {settings.bitrate} {details} {settings.extra} {ending} ",
name="Single pass realtime bitrate",
exe="ffmpeg",
)
]
command_1 = f"{beginning} -speed:v {'4' if settings.fast_first_pass else settings.speed} -b:v {settings.bitrate} {details} -pass 1 {settings.extra if settings.extra_both_passes else ''} -an {output_fps} -f webm {null}"
command_2 = (
f"{beginning} -speed:v {settings.speed} -b:v {settings.bitrate} {details} -pass 2 {settings.extra} {ending}"
)
elif settings.crf:
command_1 = f"{beginning} -b:v 0 -crf:v {settings.crf} {details} -pass 1 {settings.extra if settings.extra_both_passes else ''} -an {output_fps} -f webm {null}"
command_2 = (
f"{beginning} -b:v 0 -crf:v {settings.crf} {details} "
f'{"-pass 2" if not settings.single_pass else ""} {settings.extra} {ending}'
)
else:
return []
if settings.crf and settings.single_pass:
return [Command(command=command_2, name="Single pass CRF", exe="ffmpeg")]
pass_type = "bitrate" if settings.bitrate else "CRF"
return [
Command(command=command_1, name=f"First pass {pass_type}", exe="ffmpeg"),
Command(command=command_2, name=f"Second pass {pass_type} ", exe="ffmpeg"),
]
|
a7f361fe11b882e9b20403c26eb3e26a6673f12d
|
5f607259244dc1bf49a4bf2d7995d29e31020697
|
/src/pytorch_tabular/feature_extractor.py
|
dfef7732d4ece97f96542d23d3bb749f634e9c9b
|
[
"MIT"
] |
permissive
|
manujosephv/pytorch_tabular
|
022af92ea42c42b948576054dc007b255bff786f
|
20499a473056ff75474cda1d8a9d3ac7a105ed20
|
refs/heads/main
| 2023-09-04T11:33:07.732511
| 2023-07-19T02:39:43
| 2023-07-19T02:39:43
| 321,584,367
| 1,027
| 113
|
MIT
| 2023-09-01T11:30:56
| 2020-12-15T07:17:03
|
Python
|
UTF-8
|
Python
| false
| false
| 4,208
|
py
|
feature_extractor.py
|
# Pytorch Tabular
# Author: Manu Joseph <manujoseph@gmail.com>
# For license information, see LICENSE.TXT
from collections import defaultdict
import pandas as pd
from rich.progress import track
from sklearn.base import BaseEstimator, TransformerMixin
from pytorch_tabular.models import NODEModel, TabNetModel
from pytorch_tabular.models.mixture_density import MDNModel
try:
import cPickle as pickle
except ImportError:
import pickle
import torch
class DeepFeatureExtractor(BaseEstimator, TransformerMixin):
def __init__(self, tabular_model, extract_keys=["backbone_features"], drop_original=True):
"""Initializes the Transformer and extracts the neural features.
Args:
tabular_model (TabularModel): The trained TabularModel object
"""
assert not (
isinstance(tabular_model.model, NODEModel)
or isinstance(tabular_model.model, TabNetModel)
or isinstance(tabular_model.model, MDNModel)
), "FeatureExtractor doesn't work for Mixture Density Networks, NODE Model, & Tabnet Model"
self.tabular_model = tabular_model
self.extract_keys = extract_keys
self.drop_original = drop_original
def fit(self, X, y=None):
"""Just for compatibility.
Does not do anything
"""
return self
def transform(self, X: pd.DataFrame, y=None) -> pd.DataFrame:
"""Transforms the categorical columns specified to the trained neural features from the model.
Args:
X (pd.DataFrame): DataFrame of features, shape (n_samples, n_features). Must contain columns to encode.
y ([type], optional): Only for compatibility. Not used. Defaults to None.
Raises:
ValueError: [description]
Returns:
pd.DataFrame: The encoded dataframe
"""
X_encoded = X.copy(deep=True)
orig_features = X_encoded.columns
self.tabular_model.model.eval()
inference_dataloader = self.tabular_model.datamodule.prepare_inference_dataloader(X_encoded)
logits_predictions = defaultdict(list)
for batch in track(inference_dataloader, description="Generating Features..."):
for k, v in batch.items():
if isinstance(v, list) and (len(v) == 0):
# Skipping empty list
continue
batch[k] = v.to(self.tabular_model.model.device)
if self.tabular_model.config.task == "ssl":
ret_value = {"backbone_features": self.tabular_model.model.predict(batch, ret_model_output=True)}
else:
_, ret_value = self.tabular_model.model.predict(batch, ret_model_output=True)
for k in self.extract_keys:
if k in ret_value.keys():
logits_predictions[k].append(ret_value[k].detach().cpu())
for k, v in logits_predictions.items():
v = torch.cat(v, dim=0).numpy()
if v.ndim == 1:
v = v.reshape(-1, 1)
for i in range(v.shape[-1]):
if v.shape[-1] > 1:
X_encoded[f"{k}_{i}"] = v[:, i]
else:
X_encoded[f"{k}"] = v[:, i]
if self.drop_original:
X_encoded.drop(columns=orig_features, inplace=True)
return X_encoded
def fit_transform(self, X: pd.DataFrame, y=None) -> pd.DataFrame:
"""Encode given columns of X based on the learned features.
Args:
X (pd.DataFrame): DataFrame of features, shape (n_samples, n_features). Must contain columns to encode.
y ([type], optional): Only for compatibility. Not used. Defaults to None.
Returns:
pd.DataFrame: The encoded dataframe
"""
self.fit(X, y)
return self.transform(X)
def save_as_object_file(self, path):
if not self._mapping:
raise ValueError("`fit` method must be called before `save_as_object_file`.")
pickle.dump(self.__dict__, open(path, "wb"))
def load_from_object_file(self, path):
for k, v in pickle.load(open(path, "rb")).items():
setattr(self, k, v)
|
a2b794cf708452a3edcffd0560a2748618baa63d
|
285ec77e4e5a914f882a5d42ce1604f94f081b06
|
/tests/test_parser.py
|
caa6f55d9e81a0bb749795c01221cdc59992d33f
|
[
"MIT"
] |
permissive
|
buildinspace/peru
|
c327c983dc001758f8f13028c3e7412aefbcebd9
|
bc8de020a2ee8e380ff0ff9130bd12e93430b819
|
refs/heads/master
| 2023-09-05T16:06:41.037281
| 2023-02-10T22:20:28
| 2023-04-05T05:27:14
| 13,880,738
| 649
| 54
|
MIT
| 2023-07-28T12:18:11
| 2013-10-26T09:14:57
|
Python
|
UTF-8
|
Python
| false
| false
| 6,932
|
py
|
test_parser.py
|
from textwrap import dedent
from peru import parser
from peru.parser import parse_string, ParserError
from peru.module import Module
from peru.rule import Rule
import shared
class ParserTest(shared.PeruTest):
def test_parse_empty_file(self):
scope, imports = parse_string('')
self.assertDictEqual(scope.modules, {})
self.assertDictEqual(scope.rules, {})
self.assertEqual(imports, {})
def test_parse_rule(self):
input = dedent("""\
rule foo:
export: out/
""")
scope, imports = parse_string(input)
self.assertIn("foo", scope.rules)
rule = scope.rules["foo"]
self.assertIsInstance(rule, Rule)
self.assertEqual(rule.name, "foo")
self.assertEqual(rule.export, "out/")
def test_parse_module(self):
input = dedent("""\
sometype module foo:
url: http://www.example.com/
rev: abcdefg
""")
scope, imports = parse_string(input)
self.assertIn("foo", scope.modules)
module = scope.modules["foo"]
self.assertIsInstance(module, Module)
self.assertEqual(module.name, "foo")
self.assertEqual(module.type, "sometype")
self.assertDictEqual(module.plugin_fields, {
"url": "http://www.example.com/",
"rev": "abcdefg"
})
def test_parse_module_default_rule(self):
input = dedent("""\
git module bar:
export: bar
""")
scope, imports = parse_string(input)
self.assertIn("bar", scope.modules)
module = scope.modules["bar"]
self.assertIsInstance(module, Module)
self.assertIsInstance(module.default_rule, Rule)
self.assertEqual(module.default_rule.export, "bar")
def test_parse_toplevel_imports(self):
input = dedent("""\
imports:
foo: bar/
""")
scope, imports = parse_string(input)
self.assertDictEqual(scope.modules, {})
self.assertDictEqual(scope.rules, {})
self.assertEqual(imports, {'foo': ('bar/', )})
def test_parse_multimap_imports(self):
input = dedent('''\
imports:
foo:
- bar/
''')
scope, imports = parse_string(input)
self.assertDictEqual(scope.modules, {})
self.assertDictEqual(scope.rules, {})
self.assertEqual(imports, {'foo': ('bar/', )})
def test_parse_empty_imports(self):
input = dedent('''\
imports:
''')
scope, imports = parse_string(input)
self.assertDictEqual(scope.modules, {})
self.assertDictEqual(scope.rules, {})
self.assertEqual(imports, {})
def test_parse_wrong_type_imports_throw(self):
with self.assertRaises(ParserError):
parse_string('imports: 5')
def test_bad_toplevel_field_throw(self):
with self.assertRaises(ParserError):
parse_string("foo: bar")
def test_bad_rule_field_throw(self):
with self.assertRaises(ParserError):
parse_string(
dedent("""\
rule foo:
bad_field: junk
"""))
def test_bad_rule_name_throw(self):
with self.assertRaises(ParserError):
parse_string("rule foo bar:")
def test_bad_module_name_throw(self):
with self.assertRaises(ParserError):
parse_string("git module abc def:")
with self.assertRaises(ParserError):
parse_string("git module:")
def test_duplicate_names_throw(self):
# Modules and rules should not conflict.
ok_input = dedent('''
rule foo:
git module foo:
''')
parse_string(ok_input)
# But duplicate modules should fail. (Duplicate rules are a not
# currently possible, because their YAML keys would be exact
# duplicates.)
bad_input = dedent('''
git module foo:
hg module foo:
''')
with self.assertRaises(ParserError):
parse_string(bad_input)
def test_non_string_module_field_name(self):
input = dedent('''\
git module foo:
12345: bar
''')
try:
parse_string(input)
except ParserError as e:
assert '12345' in e.message
else:
assert False, 'expected ParserError'
def test_non_string_module_field_value(self):
input = dedent('''\
git module foo:
bar: 123
# These booleans should turn into "true" and "false".
baz: yes
bing: no
''')
scope, imports = parse_string(input)
foo = scope.modules['foo']
self.assertDictEqual(foo.plugin_fields, {
"bar": "123",
"baz": "true",
"bing": "false",
})
def test_build_field_deprecated_message(self):
input = dedent('''\
rule foo:
build: shell command
''')
try:
parse_string(input)
except ParserError as e:
assert 'The "build" field is no longer supported.' in e.message
else:
assert False, 'expected ParserError'
def test_name_prefix(self):
input = dedent('''\
git module foo:
url: fun stuff
rule bar:
export: more stuff
''')
scope, imports = parse_string(input, name_prefix='x')
# Lookup keys should be unaffected, but the names that modules and
# rules give for themselves should have the prefix.
assert scope.modules['foo'].name == 'xfoo'
assert scope.rules['bar'].name == 'xbar'
def test_forgotten_colon(self):
# There are many different permutations of this error, and this only
# tests the one mentioned in
# https://github.com/keybase/client/issues/242.
# TODO: A more general data validation library might help the parser do
# a better job of checking these things. See
# https://github.com/buildinspace/peru/issues/40.
input = dedent('''\
rule test:
pick bla
''')
with self.assertRaises(ParserError):
parse_string(input)
def test_duplicate_key_heuristic(self):
yaml = dedent('''\
a:
a: 1
b: 1
b:
a: 1
b: 1
a: 1
a : whitespace before colon
a: stuff
''')
duplicates = parser._get_duplicate_keys_approximate(yaml)
self.assertEqual([
('a', 5, 7),
('a', 1, 8),
('a', 8, 9),
], duplicates)
|
a0ff9db9b7fd40e78b1fac7f7082b3f31f1e85c7
|
7dfc958846a98c9dc90829ace237adb0f4e79057
|
/tests/assets/spec-with-functions-flat/my_tasks_flat/raw.py
|
657c84924f5fa91480348b39f868f0a14aab88ef
|
[
"Apache-2.0"
] |
permissive
|
ploomber/ploomber
|
b4fad542106ed85bb3a195f38ee547da111bd049
|
516b64e531b13eeda36b747a268506fa8dd4dc98
|
refs/heads/master
| 2023-08-08T07:18:12.419524
| 2023-08-03T04:40:19
| 2023-08-03T04:40:19
| 235,190,220
| 3,076
| 219
|
Apache-2.0
| 2023-08-29T03:56:08
| 2020-01-20T20:13:06
|
Python
|
UTF-8
|
Python
| false
| false
| 138
|
py
|
raw.py
|
from pathlib import Path
def function(product):
Path(str(product)).touch()
def function2(product):
Path(str(product)).touch()
|
8c25dee74367e50a77fb63e03972eb185fa78223
|
0ca218c0f54dac33a2ade4accfdf8f5be3207588
|
/test/dialect/test_pyodbc.py
|
80f1e468abfabf9829f72d2e55370dd7fb290677
|
[
"MIT"
] |
permissive
|
sqlalchemy/sqlalchemy
|
9d949c67c9b5396b1f33e7ff0f3230c81babf5be
|
b382bff6e3464f039db0fd1f2ce1b79038675e48
|
refs/heads/main
| 2023-08-31T17:40:59.565421
| 2023-08-30T15:01:41
| 2023-08-30T15:01:41
| 159,271,175
| 8,083
| 1,489
|
MIT
| 2023-09-12T18:53:55
| 2018-11-27T03:35:03
|
Python
|
UTF-8
|
Python
| false
| false
| 505
|
py
|
test_pyodbc.py
|
from sqlalchemy.connectors import pyodbc
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
class PyODBCTest(fixtures.TestBase):
def test_pyodbc_version(self):
connector = pyodbc.PyODBCConnector()
for vers, expected in [
("2.1.8", (2, 1, 8)),
("py3-3.0.1-beta4", (3, 0, 1, "beta4")),
("10.15.17", (10, 15, 17)),
("crap.crap.crap", ()),
]:
eq_(connector._parse_dbapi_version(vers), expected)
|
3692d47a8fadaabd045e8aab7223d6597f9038f9
|
b8cebcf2850834b9f386c8704541e1d5bc39d5e9
|
/vaas/vaas/settings/dev.py
|
3d222fddaf14e078fc2cf8302fd6030b593d79d5
|
[
"Apache-2.0"
] |
permissive
|
allegro/vaas
|
246590b1804aacf44c34821c64488b0b5c467fa9
|
a5cc9e04d49594de9e7a0dfc1051539256f38bfe
|
refs/heads/master
| 2023-09-01T10:49:37.778610
| 2023-05-22T07:17:34
| 2023-05-22T07:17:34
| 41,783,059
| 258
| 35
|
NOASSERTION
| 2023-09-06T18:11:47
| 2015-09-02T06:02:15
|
Python
|
UTF-8
|
Python
| false
| false
| 148
|
py
|
dev.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
from .base import *
from .oauth import *
from .tracking import *
|
c67c4f2888bc0ddac1b3055859790d4de0f5d5d1
|
0760fb4901a75766921a205b55686d6d6f049b30
|
/release/tune_tests/scalability_tests/workloads/test_result_throughput_cluster.py
|
0230ee23aa5355c4a377e5302c6c2b391e3f6a76
|
[
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
ray-project/ray
|
a4bb6940b08b59a61ef0b8e755a52d8563a2f867
|
edba68c3e7cf255d1d6479329f305adb7fa4c3ed
|
refs/heads/master
| 2023-08-31T03:36:48.164405
| 2023-08-31T03:20:38
| 2023-08-31T03:20:38
| 71,932,349
| 29,482
| 5,669
|
Apache-2.0
| 2023-09-14T21:48:14
| 2016-10-25T19:38:30
|
Python
|
UTF-8
|
Python
| false
| false
| 1,016
|
py
|
test_result_throughput_cluster.py
|
"""Result throughput on a cluster
In this run, we will start 1000 trials concurrently that report often
(1 result/2 seconds). We thus measure the amount of overhead incurred when
dealing with a large number of results from distributed trials.
Cluster: cluster_16x64.yaml
Test owner: krfricke
Acceptance criteria: Should run faster than 130 seconds.
Theoretical minimum time: 100 seconds
"""
import os
import ray
from ray.tune.utils.release_test_util import timed_tune_run
def main():
os.environ["TUNE_DISABLE_AUTO_CALLBACK_LOGGERS"] = "1" # Tweak
os.environ["TUNE_RESULT_BUFFER_LENGTH"] = "1000"
ray.init(address="auto")
num_samples = 1000
results_per_second = 0.5
trial_length_s = 100
max_runtime = 130
timed_tune_run(
name="result throughput cluster",
num_samples=num_samples,
results_per_second=results_per_second,
trial_length_s=trial_length_s,
max_runtime=max_runtime,
) # Tweak!
if __name__ == "__main__":
main()
|
e81b9ee32d729a0029cfeadf5b4297eaebc2dc5a
|
b2fd19abc6aef8725ebf06888f0d48ec386e2454
|
/auto_novel_imagenet.py
|
64c3ce25127818abc14535ccce415c11e3b6c05e
|
[] |
no_license
|
k-han/AutoNovel
|
58bb0d13c8f02d4983314cea795feaee3e523912
|
5eda7e45898cf3fbcde4c34b9c14c743082abd94
|
refs/heads/master
| 2022-04-05T11:47:23.962306
| 2020-02-13T13:15:34
| 2020-02-13T13:15:34
| 232,853,696
| 242
| 24
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,304
|
py
|
auto_novel_imagenet.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.optim import SGD, lr_scheduler
from sklearn.metrics.cluster import normalized_mutual_info_score as nmi_score
from sklearn.metrics import adjusted_rand_score as ari_score
from utils.util import BCE, PairEnum, cluster_acc, Identity, AverageMeter, seed_torch
from utils import ramps
from torchvision.models.resnet import BasicBlock
from data.imagenetloader import ImageNetLoader30, ImageNetLoader882_30Mix, ImageNetLoader882
from tqdm import tqdm
import numpy as np
import math
import os
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
class ResNet(nn.Module):
def __init__(self, block, layers, num_labeled_classes=10, num_unlabeled_classes=10):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.head1= nn.Linear(512 * block.expansion, num_labeled_classes)
self.head2= nn.Linear(512 * block.expansion, num_unlabeled_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
out1 = self.head1(x)
out2 = self.head2(x)
return out1, out2, x
def train(model, train_loader, labeled_eval_loader, unlabeled_eval_loader, args):
optimizer = SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=args.step_size, gamma=args.gamma)
criterion1 = nn.CrossEntropyLoss()
criterion2 = BCE()
for epoch in range(args.epochs):
loss_record = AverageMeter()
model.train()
exp_lr_scheduler.step()
w = args.rampup_coefficient * ramps.sigmoid_rampup(epoch, args.rampup_length)
for batch_idx, ((x, x_bar), label, idx) in enumerate(tqdm(train_loader)):
x, x_bar, label = x.to(device), x_bar.to(device), label.to(device)
output1, output2, feat = model(x)
output1_bar, output2_bar, _ = model(x_bar)
prob1, prob1_bar, prob2, prob2_bar=F.softmax(output1, dim=1), F.softmax(output1_bar, dim=1), F.softmax(output2, dim=1), F.softmax(output2_bar, dim=1)
mask_lb = idx<train_loader.labeled_length
rank_feat = (feat[~mask_lb]).detach()
rank_idx = torch.argsort(rank_feat, dim=1, descending=True)
rank_idx1, rank_idx2= PairEnum(rank_idx)
rank_idx1, rank_idx2=rank_idx1[:, :args.topk], rank_idx2[:, :args.topk]
rank_idx1, _ = torch.sort(rank_idx1, dim=1)
rank_idx2, _ = torch.sort(rank_idx2, dim=1)
rank_diff = rank_idx1 - rank_idx2
rank_diff = torch.sum(torch.abs(rank_diff), dim=1)
target_ulb = torch.ones_like(rank_diff).float().to(device)
target_ulb[rank_diff>0] = -1
prob1_ulb, _= PairEnum(prob2[~mask_lb])
_, prob2_ulb = PairEnum(prob2_bar[~mask_lb])
loss_ce = criterion1(output1[mask_lb], label[mask_lb])
loss_bce = criterion2(prob1_ulb, prob2_ulb, target_ulb)
consistency_loss = (F.mse_loss(prob1, prob1_bar) + F.mse_loss(prob2, prob2_bar))
loss = loss_ce + loss_bce + w * consistency_loss
loss_record.update(loss.item(), x.size(0))
optimizer.zero_grad()
loss.backward()
optimizer.step()
print('Train Epoch: {} Avg Loss: {:.4f}'.format(epoch, loss_record.avg))
print('test on labeled classes')
args.head = 'head1'
test(model, labeled_eval_loader, args)
print('test on unlabeled classes')
args.head='head2'
test(model, unlabeled_eval_loader, args)
def test(model, test_loader, args):
model.eval()
preds=np.array([])
targets=np.array([])
for batch_idx, (x, label, _) in enumerate(tqdm(test_loader)):
x, label = x.to(device), label.to(device)
output1, output2, _ = model(x)
if args.head=='head1':
output = output1
else:
output = output2
_, pred = output.max(1)
targets=np.append(targets, label.cpu().numpy())
preds=np.append(preds, pred.cpu().numpy())
acc, nmi, ari = cluster_acc(targets.astype(int), preds.astype(int)), nmi_score(targets, preds), ari_score(targets, preds)
print('Test acc {:.4f}, nmi {:.4f}, ari {:.4f}'.format(acc, nmi, ari))
def copy_param(model, pretrain_dir):
pre_dict = torch.load(pretrain_dir)
new=list(pre_dict.items())
dict_len = len(pre_dict.items())
model_kvpair=model.state_dict()
count=0
for key, value in model_kvpair.items():
if count < dict_len:
layer_name,weights=new[count]
model_kvpair[key]=weights
count+=1
else:
break
model.load_state_dict(model_kvpair)
return model
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(
description='cluster',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--device_ids', default=[0], type=int, nargs='+',
help='device ids assignment (e.g 0 1 2 3)')
parser.add_argument('--lr', type=float, default=0.1)
parser.add_argument('--gamma', type=float, default=0.1)
parser.add_argument('--momentum', type=float, default=0.9)
parser.add_argument('--weight_decay', type=float, default=1e-4)
parser.add_argument('--epochs', default=90, type=int)
parser.add_argument('--rampup_length', default=50, type=int)
parser.add_argument('--rampup_coefficient', type=float, default=10.0)
parser.add_argument('--step_size', default=30, type=int)
parser.add_argument('--batch_size', default=512, type=int)
parser.add_argument('--unlabeled_batch_size', default=128, type=int)
parser.add_argument('--num_labeled_classes', default=882, type=int)
parser.add_argument('--num_unlabeled_classes', default=30, type=int)
parser.add_argument('--dataset_root', type=str, default='./data/datasets/ImageNet/')
parser.add_argument('--exp_root', type=str, default='./data/experiments/')
parser.add_argument('--warmup_model_dir', type=str, default='./data/experiments/pretrained/resnet18_imagenet_classif_882_ICLR18.pth')
parser.add_argument('--topk', default=5, type=int)
parser.add_argument('--model_name', type=str, default='resnet')
parser.add_argument('--seed', default=1, type=int)
parser.add_argument('--unlabeled_subset', type=str, default='A')
parser.add_argument('--mode', type=str, default='train')
args = parser.parse_args()
args.cuda = torch.cuda.is_available()
device = torch.device("cuda" if args.cuda else "cpu")
seed_torch(args.seed)
runner_name = os.path.basename(__file__).split(".")[0]
model_dir= os.path.join(args.exp_root, runner_name)
if not os.path.exists(model_dir):
os.makedirs(model_dir)
args.model_dir = model_dir+'/'+'{}_{}.pth'.format(args.model_name, args.unlabeled_subset)
model = ResNet(BasicBlock, [2,2,2,2], args.num_labeled_classes, args.num_unlabeled_classes)
model = nn.DataParallel(model, args.device_ids).to(device)
model = copy_param(model, args.warmup_model_dir)
for name, param in model.named_parameters():
if 'head' not in name and 'layer4' not in name:
param.requires_grad = False
mix_train_loader = ImageNetLoader882_30Mix(args.batch_size, num_workers=8, path=args.dataset_root, unlabeled_subset=args.unlabeled_subset, aug='twice', shuffle=True, subfolder='train', unlabeled_batch_size=args.unlabeled_batch_size)
labeled_eval_loader = ImageNetLoader882(args.batch_size, num_workers=8, path=args.dataset_root, aug=None, shuffle=False, subfolder='val')
unlabeled_eval_loader = ImageNetLoader30(args.batch_size, num_workers=8, path=args.dataset_root, subset=args.unlabeled_subset, aug=None, shuffle=False, subfolder='train')
if args.mode == 'train':
train(model, mix_train_loader, labeled_eval_loader, unlabeled_eval_loader, args)
torch.save(model.state_dict(), args.model_dir)
print("model saved to {}.".format(args.model_dir))
else:
print("model loaded from {}.".format(args.model_dir))
model.load_state_dict(torch.load(args.model_dir))
print('test on labeled classes')
args.head = 'head1'
test(model, labeled_eval_loader, args)
print('test on unlabeled classes')
args.head = 'head2'
test(model, unlabeled_eval_loader, args)
|
cd5b0a1dd22efde1880345c83daa7499564b3be4
|
e8b38b8dfa348ff006eb197a7906ca8e491a23dc
|
/tests/external/scripts/mpi4py/np_Gather.py
|
65e6b94d6006f8d61fba742429afce747b822c39
|
[
"MIT"
] |
permissive
|
pyccel/pyccel
|
d79a81dbdff1172839a6a1227abfcc1f97e6c97b
|
1896b761ba662c90b14c195bbb6eb5cddc57cbfc
|
refs/heads/devel
| 2023-08-30T12:15:25.244401
| 2023-08-28T09:31:32
| 2023-08-28T09:31:32
| 100,463,736
| 307
| 39
|
MIT
| 2023-09-14T19:29:26
| 2017-08-16T07:59:14
|
Python
|
UTF-8
|
Python
| false
| false
| 720
|
py
|
np_Gather.py
|
# pylint: disable=missing-function-docstring, missing-module-docstring
from mpi4py import MPI
from numpy import zeros
# we need to declare these variables somehow,
# since we are calling mpi subroutines
size_ = -1
rank = -1
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size_ = comm.Get_size()
master = 1
nb_values = 8
block_length = nb_values // size_
# ...
values = zeros(block_length, 'int')
for i in range(0, block_length):
values[i] = 1000 + rank*nb_values + i
print('I, process ', rank, 'sent my values array : ', values)
# ...
# ...
data = zeros(nb_values, 'int')
comm.Gather(values, data,master)
# ...
if rank == master:
print('I, process ', rank, ', received ', data, ' of process ', master)
|
9bc2d56686dee7b83987f968dd5cac212ee7981e
|
748626778e870ce6cdfd0c1f3b46d7f8a096b5a8
|
/src/condor_tests/test_bogus_collector.py
|
2d64a65e8b1a6d7f0771f7ca3b0f5e73d44247cf
|
[
"Apache-2.0"
] |
permissive
|
htcondor/htcondor
|
1c8bab33379299f64e5274a7d525f3c64c64c47f
|
3b67625b2f4d97bcc28f534340a6fd7ac518dc75
|
refs/heads/main
| 2023-08-31T03:17:53.070364
| 2023-08-31T00:18:04
| 2023-08-31T00:18:04
| 5,808,515
| 249
| 165
|
Apache-2.0
| 2023-09-14T18:44:26
| 2012-09-14T11:35:15
|
C++
|
UTF-8
|
Python
| false
| false
| 1,995
|
py
|
test_bogus_collector.py
|
#!/usr/bin/env pytest
# Test that a startd that reports to a collector whose name
# doesn't exist neither crashes nor hangs
import logging
import htcondor
from ornithology import *
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# Setup the startd to also report to a bogus collector
@standup
def condor(test_dir):
with Condor(test_dir / "condor", config={"NUM_CPUS": "4", "COLLECTOR_HOST": "$(COLLECTOR_HOST), bogus.example.com"}) as condor:
yield condor
@action
def submit_sleep_job_cmd(test_dir, condor, path_to_sleep):
sub_description = """
executable = {exe}
arguments = 1
queue
""".format(
exe=path_to_sleep
)
submit_file = write_file(test_dir / "submit" / "job.sub", sub_description)
return condor.run_command(["condor_submit", submit_file])
@action
def finished_sleep_jobid(condor, submit_sleep_job_cmd):
clusterid, num_procs = parse_submit_result(submit_sleep_job_cmd)
jobid = JobID(clusterid, 0)
condor.job_queue.wait_for_events(
expected_events={jobid: [SetJobStatus(JobStatus.COMPLETED)]},
unexpected_events={jobid: {SetJobStatus(JobStatus.HELD)}},
)
return jobid
@action
def job_queue_events_for_sleep_job(condor, finished_sleep_jobid):
return condor.job_queue.by_jobid[finished_sleep_jobid]
class TestBogusCollector:
def test_submit_cmd_succeeded(self, submit_sleep_job_cmd):
assert submit_sleep_job_cmd.returncode == 0
def test_job_queue_events_in_correct_order(self, job_queue_events_for_sleep_job):
assert in_order(
job_queue_events_for_sleep_job,
[
SetJobStatus(JobStatus.IDLE),
SetJobStatus(JobStatus.RUNNING),
SetJobStatus(JobStatus.COMPLETED),
],
)
def test_job_executed_successfully(self, job_queue_events_for_sleep_job):
assert SetAttribute("ExitCode", "0") in job_queue_events_for_sleep_job
|
58bcd3d3804a83390aa974e2562c9da9ee4b2c75
|
12bdda6d7e048ec67d74261f9fcea4bab50571a9
|
/Labs/Local/aes_scripts/aes_mult.py
|
b4d5983999ded210f3365edc7963fb97d9ea9584
|
[] |
no_license
|
Yossioren/AttacksonImplementationsCourseBook
|
2aa64332780d9f6de2c5774201ce27582f95355c
|
32ef531273f7e9c21a6a3f868435e0471c709dc3
|
refs/heads/master
| 2023-05-11T03:32:35.669142
| 2023-05-09T16:08:13
| 2023-05-09T16:08:13
| 251,563,089
| 218
| 87
| null | 2023-03-12T08:24:45
| 2020-03-31T09:54:18
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,089
|
py
|
aes_mult.py
|
import numpy as np
from aes_scripts.aes_xtimes import aes_xtimes
# function [result] = aes_mult(input_data, constant)
# helper function for the AES Mixcolums transformation
#
# DESCRIPTION:
#
# aes_mult(input_data, constant)
#
# multiplies each byte of the matrix 'input_data' with the
# value 'constant'. This multiplication is performed in GF(2^8) modulo the irreducible
# polynomial used for the AES.
#
# PARAMETERS:
#
# - data: A matrix of bytes
# - constant: A scalar value
#
# RETURNVALUES:
#
# - result:
# A matrix of the size of 'input_data', where each byte is the product of 'constant' and
# the corresponding byte of 'input_data'.
#
# EXAMPLE:
#
# aes_mult([1, 2,3 ,4; 5, 6 ,7 ,8], 3)
# AUTHORS: Stefan Mangard, Mario Kirschbaum
#
# CREATION_DATE: 31 July 2001
# LAST_REVISION: 27 October 2008
def aes_mult(input_data, constant):
result = 0
mult_val = np.copy(input_data)
# maximum constant is 15
for i in range(4):
if (constant >> i) & 1:
result = np.bitwise_xor(result, mult_val)
mult_val = aes_xtimes(mult_val)
return result
|
643c359293e8f7a700ac8d3b0faadc728f89e3cf
|
88ae8695987ada722184307301e221e1ba3cc2fa
|
/third_party/webdriver/pylib/selenium/webdriver/common/options.py
|
e940a4645a3b1e589e374697246cbfa2ea826fc2
|
[
"Apache-2.0",
"LGPL-2.0-or-later",
"MIT",
"GPL-1.0-or-later",
"BSD-3-Clause"
] |
permissive
|
iridium-browser/iridium-browser
|
71d9c5ff76e014e6900b825f67389ab0ccd01329
|
5ee297f53dc7f8e70183031cff62f37b0f19d25f
|
refs/heads/master
| 2023-08-03T16:44:16.844552
| 2023-07-20T15:17:00
| 2023-07-23T16:09:30
| 220,016,632
| 341
| 40
|
BSD-3-Clause
| 2021-08-13T13:54:45
| 2019-11-06T14:32:31
| null |
UTF-8
|
Python
| false
| false
| 9,081
|
py
|
options.py
|
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from abc import ABCMeta, abstractmethod
from typing import NoReturn
from selenium.webdriver.common.proxy import Proxy
from selenium.common.exceptions import InvalidArgumentException
class BaseOptions(metaclass=ABCMeta):
"""
Base class for individual browser options
"""
def __init__(self):
super(BaseOptions, self).__init__()
self._caps = self.default_capabilities
self.set_capability("pageLoadStrategy", "normal")
self.mobile_options = None
@property
def capabilities(self):
return self._caps
def set_capability(self, name, value):
""" Sets a capability """
self._caps[name] = value
@property
def browser_version(self) -> str:
"""
:returns: the version of the browser if set, otherwise None.
"""
return self._caps["browserVersion"]
@browser_version.setter
def browser_version(self, version: str) -> NoReturn:
"""
Requires the major version of the browser to match provided value:
https://w3c.github.io/webdriver/#dfn-browser-version
:param version: The required version of the browser
"""
self.set_capability("browserVersion", version)
@property
def platform_name(self) -> str:
"""
:returns: The name of the platform
"""
return self._caps["platformName"]
@platform_name.setter
def platform_name(self, platform: str) -> NoReturn:
"""
Requires the platform to match the provided value: https://w3c.github.io/webdriver/#dfn-platform-name
:param platform: the required name of the platform
"""
self.set_capability("platformName", platform)
@property
def page_load_strategy(self) -> str:
"""
:returns: page load strategy if set, the default is "normal"
"""
return self._caps["pageLoadStrategy"]
@page_load_strategy.setter
def page_load_strategy(self, strategy: str) -> NoReturn:
"""
Determines the point at which a navigation command is returned:
https://w3c.github.io/webdriver/#dfn-table-of-page-load-strategies
:param strategy: the strategy corresponding to a document readiness state
"""
if strategy in ["normal", "eager", "none"]:
self.set_capability("pageLoadStrategy", strategy)
else:
raise ValueError("Strategy can only be one of the following: normal, eager, none")
@property
def unhandled_prompt_behavior(self) -> str:
"""
:returns: unhandled prompt behavior if set, the default is "dismiss and notify"
"""
return self._caps["unhandledPromptBehavior"]
@unhandled_prompt_behavior.setter
def unhandled_prompt_behavior(self, behavior: str) -> NoReturn:
"""
How the driver should respond when an alert is present and the command sent is not handling the alert:
https://w3c.github.io/webdriver/#dfn-table-of-page-load-strategies
:param behavior: behavior to use when an alert is encountered
"""
if behavior in ["dismiss", "accept", "dismiss and notify", "accept and notify", "ignore"]:
self.set_capability("unhandledPromptBehavior", behavior)
else:
raise ValueError("Behavior can only be one of the following: dismiss, accept, dismiss and notify, "
"accept and notify, ignore")
@property
def timeouts(self) -> dict:
"""
:returns: Values for implicit timeout, pageLoad timeout and script timeout if set (in milliseconds)
"""
return self._caps["timeouts"]
@timeouts.setter
def timeouts(self, timeouts: dict) -> NoReturn:
"""
How long the driver should wait for actions to complete before returning an error
https://w3c.github.io/webdriver/#timeouts
:param timeouts: values in milliseconds for implicit wait, page load and script timeout
"""
if all(x in timeouts.keys() for x in ["implicit", "pageLoad", "script"]):
self.set_capability("timeouts", timeouts)
else:
raise ValueError("Timeout keys can only be one of the following: implicit, pageLoad, script")
def enable_mobile(self, android_package: str = None, android_activity: str = None, device_serial: str = None):
"""
Enables mobile browser use for browsers that support it
:Args:
android_activity: The name of the android package to start
"""
if not android_package:
raise AttributeError("android_package must be passed in")
self.mobile_options = {
"androidPackage": android_package
}
if android_activity:
self.mobile_options["androidActivity"] = android_activity
if device_serial:
self.mobile_options["androidDeviceSerial"] = device_serial
@property
def accept_insecure_certs(self) -> bool:
"""
:returns: whether the session accepts insecure certificates
"""
return self._caps.get('acceptInsecureCerts')
@accept_insecure_certs.setter
def accept_insecure_certs(self, value: bool) -> NoReturn:
"""
Whether untrusted and self-signed TLS certificates are implicitly trusted:
https://w3c.github.io/webdriver/#dfn-insecure-tls-certificates
:param value: whether to accept insecure certificates
"""
self._caps['acceptInsecureCerts'] = value
@property
def strict_file_interactability(self) -> bool:
"""
:returns: whether session is strict about file interactability
"""
return self._caps.get('strictFileInteractability')
@strict_file_interactability.setter
def strict_file_interactability(self, value: bool):
"""
Whether interactability checks will be applied to file type input elements. The default is false.
:param value: whether file interactability is strict
"""
self._caps['strictFileInteractability'] = value
@property
def set_window_rect(self) -> bool:
"""
:returns: whether the remote end supports setting window size and position
"""
return self._caps.get('setWindowRect')
@set_window_rect.setter
def set_window_rect(self, value: bool):
"""
Whether the remote end supports all of the resizing and positioning commands. The default is false.
https://w3c.github.io/webdriver/#dfn-strict-file-interactability
:param value: whether remote end must support setting window resizing and repositioning
"""
self._caps['setWindowRect'] = value
@property
def proxy(self) -> Proxy:
"""
:Returns: Proxy if set, otherwise None.
"""
return self._proxy
@proxy.setter
def proxy(self, value: Proxy):
if not isinstance(value, Proxy):
raise InvalidArgumentException("Only Proxy objects can be passed in.")
self._proxy = value
@abstractmethod
def to_capabilities(self):
"""Convert options into capabilities dictionary."""
@property
@abstractmethod
def default_capabilities(self):
"""Return minimal capabilities necessary as a dictionary."""
class ArgOptions(BaseOptions):
def __init__(self):
super(ArgOptions, self).__init__()
self._arguments = []
self._ignore_local_proxy = False
@property
def arguments(self):
"""
:Returns: A list of arguments needed for the browser
"""
return self._arguments
def add_argument(self, argument):
"""
Adds an argument to the list
:Args:
- Sets the arguments
"""
if argument:
self._arguments.append(argument)
else:
raise ValueError('argument can not be null')
def ignore_local_proxy_environment_variables(self):
"""
By calling this you will ignore HTTP_PROXY and HTTPS_PROXY from being picked up and used.
"""
self._ignore_local_proxy = True
def to_capabilities(self):
return self._caps
@property
def default_capabilities(self):
return {}
|
8b850056933100be803e678017fbdebec8e3a134
|
b2eacba773b016a3619af364dce39a4d3eed424c
|
/custom_components/afvalbeheer/calendar.py
|
ca5a9ffc67ec132d1a601ded46918f7353f98a5b
|
[
"Apache-2.0"
] |
permissive
|
pippyn/Home-Assistant-Sensor-Afvalbeheer
|
fa63c0f84117ea6011496f18a197e9fd9b8bd9bd
|
21e4bb998fc3e73852f29e99e8c80fddeb071676
|
refs/heads/master
| 2023-08-31T11:03:14.784526
| 2023-08-22T09:30:03
| 2023-08-22T09:30:03
| 166,045,890
| 233
| 99
|
Apache-2.0
| 2023-05-23T12:55:35
| 2019-01-16T13:35:38
|
Python
|
UTF-8
|
Python
| false
| false
| 2,505
|
py
|
calendar.py
|
import logging
from datetime import datetime
from datetime import timedelta
from typing import Optional, List
from .API import WasteData
from homeassistant.const import CONF_RESOURCES
from homeassistant.components.calendar import CalendarEntity, CalendarEvent
from homeassistant.core import HomeAssistant
from .const import DOMAIN, CONF_ID
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, async_add_entities, discovery_info=None):
if discovery_info and "config" in discovery_info:
conf = discovery_info["config"]
else:
conf = config
if not conf:
return
async_add_entities([AfvalbeheerCalendar(hass.data[DOMAIN][conf[CONF_ID]], conf)])
class AfvalbeheerCalendar(CalendarEntity):
"""Defines a Afvalbeheer calendar."""
_attr_icon = "mdi:delete-empty"
def __init__(
self,
WasteData: WasteData,
config,
) -> None:
"""Initialize the Afvalbeheer entity."""
self.WasteData = WasteData
self.config = config
self._attr_name = f"{DOMAIN.capitalize()} {WasteData.waste_collector}"
self._attr_unique_id = f"{DOMAIN}_{config[CONF_ID]}"
self._event = None
@property
def event(self) -> Optional[CalendarEvent]:
"""Return the next upcoming event."""
if len(self.WasteData.collections) > 0:
waste_item = self.WasteData.collections.get_sorted()[0]
return CalendarEvent(
summary=waste_item.waste_type,
start=waste_item.date.date(),
end=(waste_item.date + timedelta(days=1)).date(),
)
async def async_get_events(
self, hass: HomeAssistant, start_date: datetime, end_date: datetime
) -> List[CalendarEvent]:
"""Return calendar events within a datetime range."""
events: List[CalendarEvent] = []
for waste_items in self.WasteData.collections:
if start_date.date() <= waste_items.date.date() <= end_date.date():
# Summary below will define the name of event in calendar
if waste_items.waste_type in self.config[CONF_RESOURCES]:
events.append(
CalendarEvent(
summary=waste_items.waste_type,
start=waste_items.date.date(),
end=waste_items.date.date() + timedelta(days=1),
)
)
return events
|
d4151366db7aa123fd8c80bf7ef250cdc83ed8d8
|
9ed4d46aedd4d4acadb48d610e940594b5b7b3fd
|
/financial/price_plus_tax.py
|
43876d35e57c416a3d009659748e08723c0986cc
|
[
"MIT"
] |
permissive
|
TheAlgorithms/Python
|
7596a0e236ed12a61f9db19a7ea68309779cc85b
|
421ace81edb0d9af3a173f4ca7e66cc900078c1d
|
refs/heads/master
| 2023-09-01T17:32:20.190949
| 2023-08-29T13:18:10
| 2023-08-29T13:18:10
| 63,476,337
| 184,217
| 48,615
|
MIT
| 2023-09-14T02:05:29
| 2016-07-16T09:44:01
|
Python
|
UTF-8
|
Python
| false
| false
| 413
|
py
|
price_plus_tax.py
|
"""
Calculate price plus tax of a good or service given its price and a tax rate.
"""
def price_plus_tax(price: float, tax_rate: float) -> float:
"""
>>> price_plus_tax(100, 0.25)
125.0
>>> price_plus_tax(125.50, 0.05)
131.775
"""
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f"{price_plus_tax(100, 0.25) = }")
print(f"{price_plus_tax(125.50, 0.05) = }")
|
9d654c0ebd2a97d1050521a027a011786558df1d
|
2b7180b739df298195e35a71e20a4251f83b4813
|
/test/validation/test_request_list_rooms.py
|
9415861ae7cd9e5a8f3aa4964378081fd543113e
|
[
"Apache-2.0"
] |
permissive
|
thenetcircle/dino
|
625f752046502a04ab9ec42b0a8c437d7123bcbb
|
f1f68954191f64cdec4b3914caf154300ccbf519
|
refs/heads/master
| 2023-08-10T09:59:07.064141
| 2023-08-03T07:56:19
| 2023-08-03T07:56:19
| 69,937,941
| 153
| 21
|
Apache-2.0
| 2023-02-15T22:53:29
| 2016-10-04T05:40:09
|
Python
|
UTF-8
|
Python
| false
| false
| 4,432
|
py
|
test_request_list_rooms.py
|
from activitystreams import parse as as_parser
from dino.config import ApiActions
from dino.config import ErrorCodes
from dino.config import SessionKeys
from dino.validation import request
from test.base import BaseTest
class RequestListRoomsTest(BaseTest):
def test_list_rooms_status_code_true(self):
self.assert_in_room(False)
self.create_and_join_room()
self.assert_in_room(True)
response_data = request.on_list_rooms(as_parser(self.activity_for_list_rooms()))
self.assertEqual(True, response_data[0])
def test_list_rooms_no_actor_id_status_code_false(self):
self.assert_in_room(False)
self.create_and_join_room()
self.assert_in_room(True)
activity = self.activity_for_list_rooms()
del activity['actor']['id']
response_data = request.on_list_rooms(as_parser(activity))
self.assertEqual(True, response_data[0])
def test_list_rooms_not_allowed(self):
self.assert_in_room(False)
self.set_channel_acl({ApiActions.LIST: {'gender': 'm'}})
activity = self.activity_for_list_rooms()
is_valid, code, msg = request.on_list_rooms(as_parser(activity))
self.assertFalse(is_valid)
self.assertEqual(code, ErrorCodes.NOT_ALLOWED)
def test_list_rooms_spoken_country_none(self):
self._test_spoken_language(False, 'de', None)
def test_list_rooms_spoken_country_empty(self):
self._test_spoken_language(False, 'de', '')
def test_list_rooms_spoken_country_wrong_single(self):
self._test_spoken_language(False, 'de', 'en')
def test_list_rooms_spoken_country_wrong_multi(self):
self._test_spoken_language(False, 'de', 'en,es')
def test_list_rooms_spoken_country_allows_multi_user_none(self):
self._test_spoken_language(False, 'de,en', None)
def test_list_rooms_spoken_country_allows_multi_user_empty(self):
self._test_spoken_language(False, 'de,en', '')
def test_list_rooms_spoken_country_allows_multi_user_not_matching(self):
self._test_spoken_language(False, 'de,en', 'es')
def test_list_rooms_spoken_country_allows_multi_user_multi_none_matching(self):
self._test_spoken_language(False, 'de,en', 'es,sv')
def test_list_rooms_spoken_country_allows_multi_user_multi_none_matching_trailing(self):
self._test_spoken_language(False, 'de,en', 'es,sv,')
def test_list_rooms_spoken_country_same_single(self):
self._test_spoken_language(True, 'de', 'de')
def test_list_rooms_spoken_country_same_multi(self):
self._test_spoken_language(True, 'de', 'de,en')
def test_list_rooms_spoken_country_allows_multi_user_matching(self):
self._test_spoken_language(True, 'de,en', 'en')
def test_list_rooms_spoken_country_allows_multi_user_multi_matching_single(self):
self._test_spoken_language(True, 'de,en', 'es,en')
def test_list_rooms_spoken_country_allows_multi_user_multi_matching_single_reverse(self):
self._test_spoken_language(True, 'de,en', 'en,es')
def test_list_rooms_spoken_country_allows_multi_user_multi_matching_single_reverse_trailing(self):
self._test_spoken_language(True, 'de,en', 'en,es,')
def test_list_rooms_no_channel_id_status_code_false(self):
self.assert_in_room(False)
activity = self.activity_for_list_rooms()
del activity['object']['url']
is_valid, code, msg = request.on_list_rooms(as_parser(activity))
self.assertFalse(is_valid)
self.assertEqual(code, ErrorCodes.MISSING_OBJECT_URL)
def test_list_rooms_status_code_true_if_no_rooms(self):
self.assert_in_room(False)
response_data = request.on_list_rooms(as_parser(self.activity_for_list_rooms()))
self.assertEqual(True, response_data[0])
def _test_spoken_language(self, should_succeed: bool, channel_lang, user_lang):
self.assert_in_room(False)
self.set_channel_acl({ApiActions.LIST: {'spoken_language': channel_lang}})
self.set_session(SessionKeys.spoken_language.value, user_lang)
activity = self.activity_for_list_rooms()
is_valid, code, msg = request.on_list_rooms(as_parser(activity))
if should_succeed:
self.assertTrue(is_valid)
self.assertIsNone(code)
else:
self.assertFalse(is_valid)
self.assertEqual(code, ErrorCodes.NOT_ALLOWED)
|
745935b89efc4b62558cba622757ce93aea6caf7
|
a28d672c50faf9632983287d206e8691282cab51
|
/scripts/data_preparation/download_datasets.py
|
c97e2f4774caea4c758b8ce1fd9789d4bbf3be28
|
[
"Apache-2.0"
] |
permissive
|
XPixelGroup/BasicSR
|
42cf240fbc91bee10cfa12930ab86820969e854c
|
033cd6896d898fdd3dcda32e3102a792efa1b8f4
|
refs/heads/master
| 2023-06-07T15:16:21.940587
| 2023-02-02T07:07:47
| 2023-02-02T07:07:47
| 130,259,654
| 2,088
| 300
|
Apache-2.0
| 2023-09-14T00:50:17
| 2018-04-19T18:58:00
|
Python
|
UTF-8
|
Python
| false
| false
| 2,444
|
py
|
download_datasets.py
|
import argparse
import glob
import os
from os import path as osp
from basicsr.utils.download_util import download_file_from_google_drive
def download_dataset(dataset, file_ids):
save_path_root = './datasets/'
os.makedirs(save_path_root, exist_ok=True)
for file_name, file_id in file_ids.items():
save_path = osp.abspath(osp.join(save_path_root, file_name))
if osp.exists(save_path):
user_response = input(f'{file_name} already exist. Do you want to cover it? Y/N\n')
if user_response.lower() == 'y':
print(f'Covering {file_name} to {save_path}')
download_file_from_google_drive(file_id, save_path)
elif user_response.lower() == 'n':
print(f'Skipping {file_name}')
else:
raise ValueError('Wrong input. Only accepts Y/N.')
else:
print(f'Downloading {file_name} to {save_path}')
download_file_from_google_drive(file_id, save_path)
# unzip
if save_path.endswith('.zip'):
extracted_path = save_path.replace('.zip', '')
print(f'Extract {save_path} to {extracted_path}')
import zipfile
with zipfile.ZipFile(save_path, 'r') as zip_ref:
zip_ref.extractall(extracted_path)
file_name = file_name.replace('.zip', '')
subfolder = osp.join(extracted_path, file_name)
if osp.isdir(subfolder):
print(f'Move {subfolder} to {extracted_path}')
import shutil
for path in glob.glob(osp.join(subfolder, '*')):
shutil.move(path, extracted_path)
shutil.rmtree(subfolder)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'dataset',
type=str,
help=("Options: 'Set5', 'Set14'. "
"Set to 'all' if you want to download all the dataset."))
args = parser.parse_args()
file_ids = {
'Set5': {
'Set5.zip': # file name
'1RtyIeUFTyW8u7oa4z7a0lSzT3T1FwZE9', # file id
},
'Set14': {
'Set14.zip': '1vsw07sV8wGrRQ8UARe2fO5jjgy9QJy_E',
}
}
if args.dataset == 'all':
for dataset in file_ids.keys():
download_dataset(dataset, file_ids[dataset])
else:
download_dataset(args.dataset, file_ids[args.dataset])
|
18e85e09445c57528a679c6687db58e03f03117c
|
4da937474505923797032a13bb19d61092ccfc47
|
/bertopic/backend/_use.py
|
142e06bfa153f006f9d9b7fcc5bfc6224927c252
|
[
"MIT"
] |
permissive
|
MaartenGr/BERTopic
|
8e4c533b2fb5524b4f10510b25f82f570c684f56
|
951b97645acdf55e184889c761a83d2e1d73812f
|
refs/heads/master
| 2023-09-01T20:46:35.921196
| 2023-08-30T09:16:41
| 2023-08-30T09:16:41
| 297,672,263
| 4,587
| 623
|
MIT
| 2023-09-12T10:14:14
| 2020-09-22T14:19:29
|
Python
|
UTF-8
|
Python
| false
| false
| 1,767
|
py
|
_use.py
|
import numpy as np
from tqdm import tqdm
from typing import List
from bertopic.backend import BaseEmbedder
class USEBackend(BaseEmbedder):
""" Universal Sentence Encoder
USE encodes text into high-dimensional vectors that
are used for semantic similarity in BERTopic.
Arguments:
embedding_model: An USE embedding model
Examples:
```python
import tensorflow_hub
from bertopic.backend import USEBackend
embedding_model = tensorflow_hub.load("https://tfhub.dev/google/universal-sentence-encoder/4")
use_embedder = USEBackend(embedding_model)
```
"""
def __init__(self, embedding_model):
super().__init__()
try:
embedding_model(["test sentence"])
self.embedding_model = embedding_model
except TypeError:
raise ValueError("Please select a correct USE model: \n"
"`import tensorflow_hub` \n"
"`embedding_model = tensorflow_hub.load(path_to_model)`")
def embed(self,
documents: List[str],
verbose: bool = False) -> np.ndarray:
""" Embed a list of n documents/words into an n-dimensional
matrix of embeddings
Arguments:
documents: A list of documents or words to be embedded
verbose: Controls the verbosity of the process
Returns:
Document/words embeddings with shape (n, m) with `n` documents/words
that each have an embeddings size of `m`
"""
embeddings = np.array(
[
self.embedding_model([doc]).cpu().numpy()[0]
for doc in tqdm(documents, disable=not verbose)
]
)
return embeddings
|
2e1077d1d6ef04245c8c3ed4203f133cd265815f
|
44d1936bbc8e256534f3946f100bb0028e92fee5
|
/backend/src/hatchling/__about__.py
|
9eb549af38545e646fd8effb30b6579143ca3ac0
|
[
"MIT"
] |
permissive
|
pypa/hatch
|
aeb72e6a465a39073a020f63a931def16ce90ce8
|
7dac9856d2545393f7dd96d31fc8620dde0dc12d
|
refs/heads/master
| 2023-09-04T04:04:25.079348
| 2023-09-03T23:48:21
| 2023-09-03T23:48:21
| 92,997,800
| 1,869
| 125
|
MIT
| 2023-09-13T19:39:25
| 2017-05-31T23:37:53
|
Python
|
UTF-8
|
Python
| false
| false
| 23
|
py
|
__about__.py
|
__version__ = '1.18.0'
|
2dda3518a5a5d4a858997ac28f39d00ab7c02668
|
444a9480bce2035565332d4d4654244c0b5cd47b
|
/research/cv/ResidualAttentionNet/src/dataset.py
|
15168a2746e3223c7769e9f9cc168a7476990fc9
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
mindspore-ai/models
|
7ede9c6454e77e995e674628204e1c6e76bd7b27
|
eab643f51336dbf7d711f02d27e6516e5affee59
|
refs/heads/master
| 2023-07-20T01:49:34.614616
| 2023-07-17T11:43:18
| 2023-07-17T11:43:18
| 417,393,380
| 301
| 92
|
Apache-2.0
| 2023-05-17T11:22:28
| 2021-10-15T06:38:37
|
Python
|
UTF-8
|
Python
| false
| false
| 6,019
|
py
|
dataset.py
|
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
create train or eval dataset.
"""
import os
import mindspore
import mindspore.common.dtype as mstype
import mindspore.dataset as ds
import mindspore.dataset.transforms.c_transforms as C2
import mindspore.dataset.vision.c_transforms as C
import mindspore.dataset.vision.py_transforms as py_transforms
import mindspore.dataset.transforms.py_transforms as py_transforms2
from mindspore.dataset.vision import Inter
from mindspore.communication.management import init, get_rank, get_group_size
from src.model_utils.config import config
from src.model_utils.device_adapter import get_device_num, get_rank_id
def create_dataset1(dataset_path, do_train, repeat_num=1, batch_size=32,
target="Ascend", distribute=False):
"""
create a train or evaluate cifar10 dataset for resnet50
Args:
dataset_path(string): the path of dataset.
do_train(bool): whether dataset is used for train or eval.
repeat_num(int): the repeat times of dataset. Default: 1
batch_size(int): the batch size of dataset. Default: 32
target(str): the device target. Default: Ascend
distribute(bool): data for distribute or not. Default: False
enable_cache(bool): whether tensor caching service is used for eval. Default: False
cache_session_id(int): If enable_cache, cache session_id need to be provided. Default: None
Returns:
dataset
"""
if target == "Ascend":
device_num, rank_id = _get_rank_info()
else:
if distribute:
init()
rank_id = get_rank()
device_num = get_group_size()
else:
device_num = 1
ds.config.set_prefetch_size(64)
if do_train:
usage = "train"
transform = py_transforms2.Compose([py_transforms.ToPIL(),
py_transforms.RandomHorizontalFlip(),
py_transforms.RandomCrop((32, 32), (4, 4, 4, 4)),
py_transforms.ToTensor()])
else:
usage = "test"
transform = py_transforms2.Compose([py_transforms.ToPIL(),
py_transforms.ToTensor()])
if device_num == 1:
dataset = ds.Cifar10Dataset(dataset_path, usage=usage, num_parallel_workers=8, shuffle=True)
else:
dataset = ds.Cifar10Dataset(dataset_path, usage=usage, num_parallel_workers=8,
shuffle=True, num_shards=device_num, shard_id=rank_id)
type_cast_op = C2.TypeCast(mindspore.int32)
dataset = dataset.map(operations=transform, input_columns="image")
dataset = dataset.map(operations=type_cast_op, input_columns="label")
# apply batch operations
dataset = dataset.batch(batch_size, drop_remainder=True)
# apply dataset repeat operation
dataset = dataset.repeat(repeat_num)
return dataset
def create_dataset2(dataset_path, do_train, repeat_num=1, batch_size=32, train_image_size=224, eval_image_size=224,
target="Ascend", distribute=False, enable_cache=False, cache_session_id=None):
if target == "Ascend":
device_num, rank_id = _get_rank_info()
else:
if distribute:
init()
rank_id = get_rank()
device_num = get_group_size()
else:
device_num = 1
ds.config.set_prefetch_size(64)
if device_num == 1:
data_set = ds.ImageFolderDataset(dataset_path, num_parallel_workers=12, shuffle=True)
else:
data_set = ds.ImageFolderDataset(dataset_path, num_parallel_workers=12, shuffle=True,
num_shards=device_num, shard_id=rank_id)
mean = [0.485 * 255, 0.456 * 255, 0.406 * 255]
std = [0.229 * 255, 0.224 * 255, 0.225 * 255]
# define map operations
if do_train:
trans = [
C.RandomCropDecodeResize(train_image_size, scale=(0.08, 1.0), interpolation=Inter.BICUBIC),
C.RandomHorizontalFlip(prob=0.5),
C.RandomColorAdjust(brightness=0.4, contrast=0.4, saturation=0.4),
C.Normalize(mean=mean, std=std),
C.HWC2CHW()
]
else:
trans = [
C.Decode(),
C.Resize(256),
C.CenterCrop(eval_image_size),
C.Normalize(mean=mean, std=std),
C.HWC2CHW()
]
type_cast_op = C2.TypeCast(mstype.int32)
data_set = data_set.map(operations=trans, input_columns="image", num_parallel_workers=12)
data_set = data_set.map(operations=type_cast_op, input_columns="label", num_parallel_workers=12)
# apply batch operations
data_set = data_set.batch(batch_size, drop_remainder=True)
# apply dataset repeat operation
data_set = data_set.repeat(repeat_num)
return data_set
def _get_rank_info():
"""
get rank size and rank id
"""
rank_size = int(os.environ.get("RANK_SIZE", 1))
if config.device_target == "Ascend":
if rank_size > 1:
rank_size = get_device_num()
rank_id = get_rank_id()
else:
rank_size = 1
rank_id = 0
else:
if rank_size > 1:
rank_size = get_group_size()
rank_id = get_rank()
else:
rank_size = 1
rank_id = 0
return rank_size, rank_id
|
e3b7c67b516d28b49fbec27d9504355bb0327b69
|
069c2295076c482afadfe6351da5ae02be8e18e6
|
/tests/migrations/test_migrations_plan/0003_third.py
|
7a5e341798923a39d8b6b456dac245e1c489ee38
|
[
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"GPL-1.0-or-later",
"Python-2.0.1",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-other-permissive",
"Python-2.0"
] |
permissive
|
django/django
|
5eb557f57053631cd4f566f451e43197309dbeeb
|
c74a6fad5475495756a5bdb18b2cab2b68d429bc
|
refs/heads/main
| 2023-09-01T03:43:44.033530
| 2023-08-31T08:27:32
| 2023-08-31T08:27:32
| 4,164,482
| 73,530
| 38,187
|
BSD-3-Clause
| 2023-09-14T20:03:48
| 2012-04-28T02:47:18
|
Python
|
UTF-8
|
Python
| false
| false
| 449
|
py
|
0003_third.py
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("migrations", "0002_second"),
]
operations = [
migrations.CreateModel(
"Author",
[
("id", models.AutoField(primary_key=True)),
],
),
migrations.RunSQL(
["SELECT * FROM migrations_author"], ["SELECT * FROM migrations_book"]
),
]
|
c8ac9fc69cac10752e041286e2fad9e58eac95fd
|
3a61d74a32de232682867a1063944cad4daaa47f
|
/tests/sample_data.py
|
5b9be1cc3799eda5a77e0d603dd2766cedffcf09
|
[
"MIT"
] |
permissive
|
legrego/homeassistant-elasticsearch
|
c131eacf125bb8008fa85dfb477375ec6abb185c
|
fbe6f8c48c62c9b9b4b559d372d38716afe8ebf1
|
refs/heads/main
| 2023-08-22T12:27:49.162089
| 2023-08-13T23:43:52
| 2023-08-13T23:43:52
| 132,661,981
| 115
| 29
|
MIT
| 2023-09-07T18:20:09
| 2018-05-08T20:35:58
|
Python
|
UTF-8
|
Python
| false
| false
| 817
|
py
|
sample_data.py
|
""" Sample data for testing """
from datetime import datetime
from pytz import utc
def create_sample_state(**kwargs):
""" Creates a sample state object """
state = {
"state": "off",
"entity_id": "switch.sample_entity",
"domain": "switch",
"object_id": "sample_entity",
"name": "Sample Entity",
"last_updated": kwargs.get("last_updated", datetime.now().astimezone(utc)),
"last_changed": kwargs.get("last_changed", datetime.now().astimezone(utc)),
"attributes": kwargs.get(
"attributes", dict({"sample_attribute": "sample_attribute_value"})
),
}
return state
sample_state_change_event = {
"entity_id": "switch.sample_entity",
"old_state": create_sample_state(),
"new_state": create_sample_state(),
}
|
7b0ed2fa40a4895cb9e27eaa3290aeed10018782
|
f62cd59f7e5c8d22b4d9ad130eff0a70d0272341
|
/pyACA/ToolSimpleDtw.py
|
dfdbb6b38da584ce320e8181b7d175ee87d6f197
|
[
"MIT"
] |
permissive
|
alexanderlerch/pyACA
|
61ea3c1b9350562bc248fa45b7bba3246e98686d
|
3ee1dde02999cf0a0e512ee73e021ea618303a89
|
refs/heads/master
| 2023-05-22T18:43:09.506175
| 2022-09-12T18:37:24
| 2022-09-12T18:37:24
| 195,197,713
| 141
| 37
|
MIT
| 2022-01-06T15:19:40
| 2019-07-04T08:09:05
|
Python
|
UTF-8
|
Python
| false
| false
| 1,297
|
py
|
ToolSimpleDtw.py
|
# -*- coding: utf-8 -*-
import numpy as np
## helper function: dynamic time warping
#
# @param D: distance matrix
#
# @return p: path with matrix indices
# @return C: accumulated cost matrix
def ToolSimpleDtw(D):
# init directions for back-tracking [diag, vert, hori]
iDec = np.array([[-1, -1], [-1, 0], [0, -1]])
# cost initialization
C = np.zeros(D.shape)
C[0, :] = np.cumsum(D[0, :])
C[:, 0] = np.cumsum(D[:, 0])
# traceback initialization
DeltaP = np.zeros(D.shape, dtype=int)
DeltaP[0, :] = 2 # (0,-1)
DeltaP[:, 0] = 1 # (-1,0)
DeltaP[0, 0] = 0 # (-1,-1)
# recursion
for n_A in range(1, D.shape[0]):
for n_B in range(1, D.shape[1]):
# find preceding min (diag, column, row)
DeltaP[n_A, n_B] = int(np.argmin([C[n_A - 1, n_B - 1], C[n_A - 1, n_B], C[n_A, n_B - 1]]))
prevC_index = [n_A, n_B] + iDec[DeltaP[n_A, n_B], :]
C[n_A, n_B] = D[n_A, n_B] + C[prevC_index[0], prevC_index[1]]
# traceback init
p = np.asarray(D.shape, dtype=int) - 1 # start with the last element
n = p
while (n[0] >= 0) or (n[1] >= 0):
n = n + iDec[DeltaP[n[0], n[1]], :]
# update path
tmp = np.vstack([n, p])
p = tmp
return p[1:, :], C
|
5ead8e8164c2e0792c534b67be5f9d76ee1b508d
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/testData/joinLines/FStringAndPlainLiteral-after.py
|
76353360971b41e5068be2d89cfbae5f5252b9a8
|
[
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 14
|
py
|
FStringAndPlainLiteral-after.py
|
(f'{42}' '{}')
|
267480b7c984e2c52f7549489699960072df07e0
|
3a7262c7184b82607bb3a109838c2b6e486e1e20
|
/tests/param_scheduler/test_scheduler_composite.py
|
730f61af035da27edad5e5c436695ff4681b9888
|
[
"Apache-2.0"
] |
permissive
|
facebookresearch/fvcore
|
a98bd7a8f88a2fe86e4818e617a4c00c73782d63
|
0f2b23b6f93e36041d9a74764ee824541cf0a0e5
|
refs/heads/main
| 2023-08-22T05:50:10.827997
| 2023-07-25T18:46:45
| 2023-07-25T18:46:45
| 210,951,915
| 1,706
| 245
|
Apache-2.0
| 2023-06-16T16:21:49
| 2019-09-25T22:27:59
|
Python
|
UTF-8
|
Python
| false
| false
| 7,240
|
py
|
test_scheduler_composite.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import copy
import unittest
from fvcore.common.param_scheduler import (
CompositeParamScheduler,
ConstantParamScheduler,
CosineParamScheduler,
LinearParamScheduler,
StepParamScheduler,
)
class TestCompositeScheduler(unittest.TestCase):
_num_updates = 10
def _get_valid_long_config(self):
return {
"schedulers": [
ConstantParamScheduler(0.1),
ConstantParamScheduler(0.2),
ConstantParamScheduler(0.3),
ConstantParamScheduler(0.4),
],
"lengths": [0.2, 0.4, 0.1, 0.3],
"interval_scaling": ["rescaled"] * 4,
}
def _get_lengths_sum_less_one_config(self):
return {
"schedulers": [
ConstantParamScheduler(0.1),
ConstantParamScheduler(0.2),
],
"lengths": [0.7, 0.2999],
"interval_scaling": ["rescaled", "rescaled"],
}
def _get_valid_mixed_config(self):
return {
"schedulers": [
StepParamScheduler(values=[0.1, 0.2, 0.3, 0.4, 0.5], num_updates=10),
CosineParamScheduler(start_value=0.42, end_value=0.0001),
],
"lengths": [0.5, 0.5],
"interval_scaling": ["rescaled", "rescaled"],
}
def _get_valid_linear_config(self):
return {
"schedulers": [
LinearParamScheduler(start_value=0.0, end_value=0.5),
LinearParamScheduler(start_value=0.5, end_value=1.0),
],
"lengths": [0.5, 0.5],
"interval_scaling": ["rescaled", "rescaled"],
}
def test_invalid_config(self):
config = self._get_valid_mixed_config()
bad_config = copy.deepcopy(config)
# Size of schedulers and lengths doesn't match
bad_config["schedulers"] = copy.deepcopy(config["schedulers"])
bad_config["lengths"] = copy.deepcopy(config["lengths"])
bad_config["schedulers"].append(bad_config["schedulers"][-1])
with self.assertRaises(ValueError):
CompositeParamScheduler(**bad_config)
# Sum of lengths < 1
bad_config["schedulers"] = copy.deepcopy(config["schedulers"])
bad_config["lengths"][-1] -= 0.1
with self.assertRaises(ValueError):
CompositeParamScheduler(**bad_config)
# Sum of lengths > 1
bad_config["lengths"] = copy.deepcopy(config["lengths"])
bad_config["lengths"][-1] += 0.1
with self.assertRaises(ValueError):
CompositeParamScheduler(**bad_config)
# Bad value for composition_mode
bad_config["interval_scaling"] = ["rescaled", "rescaleds"]
with self.assertRaises(ValueError):
CompositeParamScheduler(**bad_config)
# Wrong number composition modes
bad_config["interval_scaling"] = ["rescaled"]
with self.assertRaises(ValueError):
CompositeParamScheduler(**bad_config)
def test_long_scheduler(self):
config = self._get_valid_long_config()
scheduler = CompositeParamScheduler(**config)
schedule = [
scheduler(epoch_num / self._num_updates)
for epoch_num in range(self._num_updates)
]
expected_schedule = [0.1, 0.1, 0.2, 0.2, 0.2, 0.2, 0.3, 0.4, 0.4, 0.4]
self.assertEqual(schedule, expected_schedule)
def test_scheduler_lengths_within_epsilon_of_one(self):
config = self._get_lengths_sum_less_one_config()
scheduler = CompositeParamScheduler(**config)
schedule = [
scheduler(epoch_num / self._num_updates)
for epoch_num in range(self._num_updates)
]
expected_schedule = [0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.2, 0.2, 0.2]
self.assertEqual(schedule, expected_schedule)
def test_scheduler_with_mixed_types(self):
config = self._get_valid_mixed_config()
scheduler_0 = config["schedulers"][0]
scheduler_1 = config["schedulers"][1]
# Check scaled
config["interval_scaling"] = ["rescaled", "rescaled"]
scheduler = CompositeParamScheduler(**config)
scaled_schedule = [
round(scheduler(epoch_num / self._num_updates), 4)
for epoch_num in range(self._num_updates)
]
expected_schedule = [
round(scheduler_0(epoch_num / self._num_updates), 4)
for epoch_num in range(0, self._num_updates, 2)
] + [
round(scheduler_1(epoch_num / self._num_updates), 4)
for epoch_num in range(0, self._num_updates, 2)
]
self.assertEqual(scaled_schedule, expected_schedule)
# Check fixed
config["interval_scaling"] = ["fixed", "fixed"]
scheduler = CompositeParamScheduler(**config)
fixed_schedule = [
round(scheduler(epoch_num / self._num_updates), 4)
for epoch_num in range(self._num_updates)
]
expected_schedule = [
round(scheduler_0(epoch_num / self._num_updates), 4)
for epoch_num in range(0, int(self._num_updates / 2))
] + [
round(scheduler_1(epoch_num / self._num_updates), 4)
for epoch_num in range(int(self._num_updates / 2), self._num_updates)
]
self.assertEqual(fixed_schedule, expected_schedule)
# Check warmup of rescaled then fixed
config["interval_scaling"] = ["rescaled", "fixed"]
scheduler = CompositeParamScheduler(**config)
fixed_schedule = [
round(scheduler(epoch_num / self._num_updates), 4)
for epoch_num in range(self._num_updates)
]
expected_schedule = [
round(scheduler_0(epoch_num / self._num_updates), 4)
for epoch_num in range(0, int(self._num_updates), 2)
] + [
round(scheduler_1(epoch_num / self._num_updates), 4)
for epoch_num in range(int(self._num_updates / 2), self._num_updates)
]
self.assertEqual(fixed_schedule, expected_schedule)
def test_linear_scheduler_no_gaps(self):
config = self._get_valid_linear_config()
# Check rescaled
scheduler = CompositeParamScheduler(**config)
schedule = [
scheduler(epoch_num / self._num_updates)
for epoch_num in range(self._num_updates)
]
expected_schedule = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
self.assertEqual(expected_schedule, schedule)
# Check fixed composition gives same result as only 1 scheduler
config["schedulers"][1] = config["schedulers"][0]
config["interval_scaling"] = ["fixed", "fixed"]
scheduler = CompositeParamScheduler(**config)
linear_scheduler = config["schedulers"][0]
schedule = [
scheduler(epoch_num / self._num_updates)
for epoch_num in range(self._num_updates)
]
expected_schedule = [
linear_scheduler(epoch_num / self._num_updates)
for epoch_num in range(self._num_updates)
]
self.assertEqual(expected_schedule, schedule)
|
704d86bf944d61bb19233d82ef620978153c46e9
|
45e376ae66b78b17788b1d3575b334b2cb1d0b1c
|
/checkov/cloudformation/checks/resource/aws/AmazonMQBrokerPublicAccess.py
|
938c1e385516c38c78e31f64641c83776443f0ef
|
[
"Apache-2.0"
] |
permissive
|
bridgecrewio/checkov
|
aeb8febed2ed90e61d5755f8f9d80b125362644d
|
e64cbd27ffb6f09c2c9f081b45b7a821a3aa1a4d
|
refs/heads/main
| 2023-08-31T06:57:21.990147
| 2023-08-30T23:01:47
| 2023-08-30T23:01:47
| 224,386,599
| 5,929
| 1,056
|
Apache-2.0
| 2023-09-14T20:10:23
| 2019-11-27T08:55:14
|
Python
|
UTF-8
|
Python
| false
| false
| 1,138
|
py
|
AmazonMQBrokerPublicAccess.py
|
from typing import Any
from checkov.common.models.enums import CheckResult, CheckCategories
from checkov.cloudformation.checks.resource.base_resource_value_check import BaseResourceValueCheck
class AmazonMQBrokerPublicAccess(BaseResourceValueCheck):
def __init__(self) -> None:
name = "Ensure Amazon MQ Broker should not have public access"
id = "CKV_AWS_69"
supported_resources = ('AWS::AmazonMQ::Broker',)
categories = (CheckCategories.GENERAL_SECURITY,)
super().__init__(
name=name,
id=id,
categories=categories,
supported_resources=supported_resources,
missing_block_result=CheckResult.FAILED,
)
def get_expected_value(self) -> Any:
return False
def get_inspected_key(self) -> str:
"""
validates Amazon MQ Broker should not have public access
https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-amazonmq-broker.html
:return: <CheckResult>
"""
return 'Properties/PubliclyAccessible'
check = AmazonMQBrokerPublicAccess()
|
96ba3dde78fee50a61ec2f1dcc0ace96923c6ef5
|
9e988f0dce0ee4b847808210bf5b703b1619fed5
|
/scripts/maf_gc_content.py
|
78cce22dec1002138fee9ae1b3adad1664428d04
|
[
"MIT"
] |
permissive
|
bxlab/bx-python
|
1c4aa875e77ee97dd3c30b89c28a6c4acd0b821b
|
7758bc4492626ffdbaa90c8fc5dd7620b1e2f3f8
|
refs/heads/main
| 2023-08-08T15:17:47.383099
| 2023-07-27T12:09:24
| 2023-07-27T12:09:24
| 58,659,170
| 141
| 55
|
MIT
| 2023-07-26T10:28:07
| 2016-05-12T16:39:43
|
Python
|
UTF-8
|
Python
| false
| false
| 588
|
py
|
maf_gc_content.py
|
#!/usr/bin/env python
"""
Read a MAF from standard input and print average GC content of each alignment
usage: %prog < maf > out
"""
import sys
from bx.align import maf
def __main__():
maf_reader = maf.Reader(sys.stdin)
for m in maf_reader:
gc = 0
bases = 0
for c in m.components:
gc += c.text.count("G")
gc += c.text.count("C")
gc += c.text.count("g")
gc += c.text.count("c")
bases += len(c.text) - c.text.count("-")
print(gc / bases)
if __name__ == "__main__":
__main__()
|
8c0c8e9bc8e894a4221b6692288be33fb25ffa17
|
974d04d2ea27b1bba1c01015a98112d2afb78fe5
|
/test/legacy_test/test_bincount_op.py
|
504b4b51c77d510514611e442615bc93e63b4cf8
|
[
"Apache-2.0"
] |
permissive
|
PaddlePaddle/Paddle
|
b3d2583119082c8e4b74331dacc4d39ed4d7cff0
|
22a11a60e0e3d10a3cf610077a3d9942a6f964cb
|
refs/heads/develop
| 2023-08-17T21:27:30.568889
| 2023-08-17T12:38:22
| 2023-08-17T12:38:22
| 65,711,522
| 20,414
| 5,891
|
Apache-2.0
| 2023-09-14T19:20:51
| 2016-08-15T06:59:08
|
C++
|
UTF-8
|
Python
| false
| false
| 10,206
|
py
|
test_bincount_op.py
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import unittest
import numpy as np
from eager_op_test import OpTest
import paddle
import paddle.inference as paddle_infer
from paddle import fluid
from paddle.fluid.framework import in_dygraph_mode
paddle.enable_static()
class TestBincountOpAPI(unittest.TestCase):
"""Test bincount api."""
def test_static_graph(self):
startup_program = fluid.Program()
train_program = fluid.Program()
with fluid.program_guard(train_program, startup_program):
inputs = paddle.static.data(name='input', dtype='int64', shape=[7])
weights = paddle.static.data(
name='weights', dtype='int64', shape=[7]
)
output = paddle.bincount(inputs, weights=weights)
place = fluid.CPUPlace()
if fluid.core.is_compiled_with_cuda():
place = fluid.CUDAPlace(0)
exe = fluid.Executor(place)
exe.run(startup_program)
img = np.array([0, 1, 1, 3, 2, 1, 7]).astype(np.int64)
w = np.array([0, 1, 1, 2, 2, 1, 0]).astype(np.int64)
res = exe.run(
train_program,
feed={'input': img, 'weights': w},
fetch_list=[output],
)
actual = np.array(res[0])
expected = np.bincount(img, weights=w)
self.assertTrue(
(actual == expected).all(),
msg='bincount output is wrong, out =' + str(actual),
)
def test_dygraph(self):
with fluid.dygraph.guard():
inputs_np = np.array([0, 1, 1, 3, 2, 1, 7]).astype(np.int64)
inputs = fluid.dygraph.to_variable(inputs_np)
actual = paddle.bincount(inputs)
expected = np.bincount(inputs)
self.assertTrue(
(actual.numpy() == expected).all(),
msg='bincount output is wrong, out =' + str(actual.numpy()),
)
class TestBincountOpError(unittest.TestCase):
"""Test bincount op error."""
def run_network(self, net_func):
with fluid.dygraph.guard():
net_func()
def test_input_value_error(self):
"""Test input tensor should be non-negative."""
def net_func():
input_value = paddle.to_tensor([1, 2, 3, 4, -5])
paddle.bincount(input_value)
with self.assertRaises(ValueError):
self.run_network(net_func)
def test_input_shape_error(self):
"""Test input tensor should be 1-D tansor."""
def net_func():
input_value = paddle.to_tensor([[1, 2, 3], [4, 5, 6]])
paddle.bincount(input_value)
with self.assertRaises(ValueError):
self.run_network(net_func)
def test_minlength_value_error(self):
"""Test minlength is non-negative ints."""
def net_func():
input_value = paddle.to_tensor([1, 2, 3, 4, 5])
paddle.bincount(input_value, minlength=-1)
with fluid.dygraph.guard():
if in_dygraph_mode():
# InvalidArgument for phi BincountKernel
with self.assertRaises(ValueError):
self.run_network(net_func)
else:
# OutOfRange for EqualGreaterThanChecker
with self.assertRaises(IndexError):
self.run_network(net_func)
def test_input_type_errors(self):
"""Test input tensor should only contain non-negative ints."""
def net_func():
input_value = paddle.to_tensor([1.0, 2.0, 3.0, 4.0, 5.0])
paddle.bincount(input_value)
with self.assertRaises(TypeError):
self.run_network(net_func)
def test_weights_shape_error(self):
"""Test weights tensor should have the same shape as input tensor."""
def net_func():
input_value = paddle.to_tensor([1, 2, 3, 4, 5])
weights = paddle.to_tensor([1, 1, 1, 1, 1, 1])
paddle.bincount(input_value, weights=weights)
with self.assertRaises(ValueError):
self.run_network(net_func)
class TestBincountOp(OpTest):
# without weights
def setUp(self):
self.op_type = "bincount"
self.python_api = paddle.bincount
self.init_test_case()
self.inputs = {"X": self.np_input}
self.attrs = {"minlength": self.minlength}
self.outputs = {"Out": self.Out}
def init_test_case(self):
self.minlength = 0
self.np_input = np.random.randint(low=0, high=20, size=10)
self.Out = np.bincount(self.np_input, minlength=self.minlength)
def test_check_output(self):
self.check_output()
class TestCase1(TestBincountOp):
# with weights(FLOAT32)
def setUp(self):
self.op_type = "bincount"
self.python_api = paddle.bincount
self.init_test_case()
self.inputs = {"X": self.np_input, "Weights": self.np_weights}
self.attrs = {"minlength": self.minlength}
self.outputs = {"Out": self.Out}
def init_test_case(self):
self.minlength = 0
self.np_weights = np.random.randint(low=0, high=20, size=10).astype(
np.float32
)
self.np_input = np.random.randint(low=0, high=20, size=10)
self.Out = np.bincount(
self.np_input, weights=self.np_weights, minlength=self.minlength
).astype(np.float32)
class TestCase2(TestBincountOp):
# with weights(other)
def setUp(self):
self.op_type = "bincount"
self.python_api = paddle.bincount
self.init_test_case()
self.inputs = {"X": self.np_input, "Weights": self.np_weights}
self.attrs = {"minlength": self.minlength}
self.outputs = {"Out": self.Out}
def init_test_case(self):
self.minlength = 0
self.np_weights = np.random.randint(low=0, high=20, size=10)
self.np_input = np.random.randint(low=0, high=20, size=10)
self.Out = np.bincount(
self.np_input, weights=self.np_weights, minlength=self.minlength
)
class TestCase3(TestBincountOp):
# empty input
def init_test_case(self):
self.minlength = 0
self.np_input = np.array([], dtype=np.int64)
self.Out = np.bincount(self.np_input, minlength=self.minlength)
class TestCase4(TestBincountOp):
# with input(INT32)
def init_test_case(self):
self.minlength = 0
self.np_input = np.random.randint(low=0, high=20, size=10).astype(
np.int32
)
self.Out = np.bincount(self.np_input, minlength=self.minlength)
class TestCase5(TestBincountOp):
# with minlength greater than max(X)
def init_test_case(self):
self.minlength = 20
self.np_input = np.random.randint(low=0, high=10, size=10)
self.Out = np.bincount(self.np_input, minlength=self.minlength)
class TestTensorMinlength(unittest.TestCase):
def setUp(self):
paddle.disable_static()
paddle.seed(2022)
self.temp_dir = tempfile.TemporaryDirectory()
self.save_path = os.path.join(
self.temp_dir.name, 'tensor_minlength_bincount'
)
self.place = (
paddle.CUDAPlace(0)
if paddle.is_compiled_with_cuda()
else paddle.CPUPlace()
)
def test_dygraph(self):
paddle.disable_static()
x = np.random.randint(0, 10, [20])
minlength = 2
np_out = np.bincount(x, minlength=minlength)
pd_out = paddle.bincount(
paddle.to_tensor(x), minlength=paddle.to_tensor([2], dtype='int32')
)
np.testing.assert_allclose(np_out, pd_out.numpy())
def test_static_and_infer(self):
paddle.enable_static()
np_x = np.random.randn(100).astype('float32')
main_prog = paddle.static.Program()
starup_prog = paddle.static.Program()
with paddle.static.program_guard(main_prog, starup_prog):
# run static
x = paddle.static.data(shape=np_x.shape, name='x', dtype=np_x.dtype)
linear = paddle.nn.Linear(np_x.shape[0], np_x.shape[0])
linear_out = linear(x)
relu_out = paddle.nn.functional.relu(linear_out)
minlength = paddle.full([1], 3, dtype='int32')
out = paddle.bincount(
paddle.cast(relu_out, 'int32'), minlength=minlength
)
exe = paddle.static.Executor(self.place)
exe.run(starup_prog)
static_out = exe.run(feed={'x': np_x}, fetch_list=[out])
# run infer
paddle.static.save_inference_model(self.save_path, [x], [out], exe)
config = paddle_infer.Config(
self.save_path + '.pdmodel', self.save_path + '.pdiparams'
)
if paddle.is_compiled_with_cuda():
config.enable_use_gpu(100, 0)
else:
config.disable_gpu()
predictor = paddle_infer.create_predictor(config)
input_names = predictor.get_input_names()
input_handle = predictor.get_input_handle(input_names[0])
fake_input = np_x
input_handle.reshape(np_x.shape)
input_handle.copy_from_cpu(fake_input)
predictor.run()
output_names = predictor.get_output_names()
output_handle = predictor.get_output_handle(output_names[0])
infer_out = output_handle.copy_to_cpu()
np.testing.assert_allclose(static_out[0], infer_out)
if __name__ == "__main__":
unittest.main()
|
55020567966e8cda56467f6eb8b8590d07cf74f3
|
efc378b30e64de28ecd13fd2a281ea5a309e84de
|
/tutorials/plot_04-MoreFOOOF.py
|
8b916fc48b452744aaed28b2143b6bba074b75d5
|
[
"Apache-2.0"
] |
permissive
|
fooof-tools/fooof
|
acb0069d3d17055bd7e6a85e062c1d648bf2a9bd
|
d63aae0bc9251677fbf158190e3802f29e47c53d
|
refs/heads/main
| 2023-08-31T04:34:24.796460
| 2023-08-27T20:33:49
| 2023-08-27T20:33:49
| 95,601,569
| 244
| 72
|
Apache-2.0
| 2023-09-13T12:42:49
| 2017-06-27T21:16:03
|
Python
|
UTF-8
|
Python
| false
| false
| 13,085
|
py
|
plot_04-MoreFOOOF.py
|
"""
04: Exploring the FOOOF Object
==============================
Further exploring the FOOOF object, including algorithm settings and available methods.
"""
###################################################################################################
# Import the FOOOF object
from fooof import FOOOF
# Import utility to download and load example data
from fooof.utils.download import load_fooof_data
###################################################################################################
# Initialize a FOOOF object
fm = FOOOF()
###################################################################################################
# Description of methods and attributes
# -------------------------------------
#
# The :class:`~fooof.FOOOF` object contents consist of 4 main components (groups of data / code):
#
# - 1) settings attributes, that control the algorithm fitting
# - 2) data attributes, that contain and describe the data
# - 3) result attributes, that contain the resulting parameters that describe the model fit
# - 4) methods (functions) that perform procedures for fitting models and associated utilities
#
# Each these which are described in more detail below.
#
# The FOOOF module follows the following Python conventions:
#
# - all user exposed settings, data, and methods are directly accessible through the object
# - 'hidden' (internal) settings and methods have a leading underscore
#
# - you *can* access these if you need to, but one should be cautious doing so
#
###################################################################################################
# 1) Settings (attributes)
# ^^^^^^^^^^^^^^^^^^^^^^^^
#
# There are a number of settings that control the fitting algorithm, that
# can be set by the user when initializing the :class:`~fooof.FOOOF` object.
#
# There are some internal settings that are not exposed at initialization.
# These settings are unlikely to need to be accessed by the user, but can be if desired -
# they are all defined and documented in the code in the object's \__init\__ (there are no
# other settings, or 'magic numbers' in any other parts of the code).
#
###################################################################################################
# Controlling Peak Fits
# ~~~~~~~~~~~~~~~~~~~~~
#
# **peak_width_limits (Hz)** default: [0.5, 12]
#
# Enforced limits on the minimum and maximum widths of extracted peaks, given as a list of
# [minimum bandwidth, maximum bandwidth]. We recommend that the minimum bandwidth be set to
# be at last twice the frequency resolution of the power spectrum, so that single points
# can not be fit as peaks.
#
###################################################################################################
# Peak Search Stopping Criteria
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# An iterative procedure searches for candidate peaks in the flattened spectrum. Candidate
# peaks are extracted in order of decreasing height, until some stopping criterion is met,
# which is controlled by the following settings:
#
# **max_n_peaks (int)** default: infinite
#
# The maximum number of peaks that can be extracted from a given power spectrum. The algorithm
# will halt searching for new peaks when this number is reached. Note that peaks are extracted
# iteratively by height (over and above the aperiodic component), and so this approach will
# extract (up to) the *n* largest peaks.
#
# **peak_threshold (relative threshold - standard deviation of power spectrum)** default: 2.0
#
# The threshold, in terms of standard deviation of the aperiodic-removed power
# spectrum, above which a data point must pass to be considered a candidate peak.
# Once a candidate peak drops below this threshold, the peak search is halted (without
# including the most recent candidate).
#
# **min_peak_height (absolute threshold - units of log power)** default: 0
#
# The minimum height, above the aperiodic fit, that a peak must have to be extracted
# in the initial fit stage. This threshold is defined in units of log power. Once a
# candidate peak drops below this threshold, the peak search is halted (without including
# the most recent candidate). Note that because this constraint is enforced during peak search,
# and prior to final peak fit, returned peaks are not guaranteed to surpass this value in height.
#
# There are two different height-related halting conditions for the peak searching.
# By default, the relative (standard-deviation based) threshold is defined, whereas the
# absolute threshold is set to zero (this default is because there is no general way to
# set this value without knowing the scale of the data). If both are defined, both are
# used and the peak search will halt when a candidate peak fails to pass either the absolute,
# or relative threshold.
#
# Aperiodic Mode
# ~~~~~~~~~~~~~~
#
# **aperiodic_mode (string)** default='fixed'
#
# The fitting approach to use for the aperiodic component.
#
# Options:
# - 'fixed' : fits without a knee parameter (with the knee parameter 'fixed' at 0)
# - 'knee' : fits the full exponential equation, including the 'knee' parameter
#
# Verbosity
# ~~~~~~~~~
#
# **verbose (boolean)** default='True'
#
# Whether to print out status updates and warnings.
#
###################################################################################################
# You can check all the user defined settings with check_settings
# The description parameter here is set to print out quick descriptions of the settings
fm.print_settings(description=True)
###################################################################################################
# Changing Settings
# ~~~~~~~~~~~~~~~~~
#
# Note that if you wish to change settings, then you should re-initialize
# a new :class:`~fooof.FOOOF` object with new settings.
#
# Simply changing the value of the relevant attribute may not appropriately propagate
# the value, and thus may lead to a failure, either creating an error, or not applying
# the settings properly during fit and returning erroneous results.
#
# Here we will re-initialize a new FOOOF object, with some new settings.
#
###################################################################################################
# Re-initialize a new FOOOF object, with some new specified settings
fm = FOOOF(peak_width_limits=[1, 8], max_n_peaks=6, min_peak_height=0.15)
###################################################################################################
# 2) Data (attributes)
# ^^^^^^^^^^^^^^^^^^^^
#
# The :class:`~fooof.FOOOF` object stores the following data attributes:
#
# - ``freqs``: the frequency values of the power spectrum
# - ``power_spectrum``: the power values of the power spectrum
# - ``freq_range``: the frequency range of the data
# - ``freq_res``: the frequency resolution of the data
#
# During the fit procedure, interim (hidden) data variables are also created and used.
#
# There is also an indicator attribute, ``has_data`` which indicates
# if the current object has data loaded.
#
###################################################################################################
# Load example data files needed for this example
freqs = load_fooof_data('freqs_2.npy', folder='data')
spectrum = load_fooof_data('spectrum_2.npy', folder='data')
###################################################################################################
# Set a frequency range and add the data to the object
freq_range = [2, 40]
fm.add_data(freqs, spectrum, freq_range)
###################################################################################################
# Check if the object has data loaded
print('Has data loaded: ', fm.has_data)
###################################################################################################
# Check out the data attributes in the object
print('Frequency Range: \t', fm.freq_range)
print('Frequency Resolution: \t', fm.freq_res)
print('Frequency Values: \t', fm.freqs[0:5])
print('Power Values: \t\t', fm.power_spectrum[0:5])
###################################################################################################
#
# Now that we have picked our settings, and added the data, let's fit a power spectrum model.
#
###################################################################################################
# Fit a power spectrum model to the loaded data
fm.fit()
###################################################################################################
# 3) Results (attributes)
# ^^^^^^^^^^^^^^^^^^^^^^^
#
# With our model fit, the results attributes should now hold values.
#
# Recall that by convention, any attributes that contain model results are
# indicated by a trailing underscore.
#
# The model results stored by the object are:
#
# - ``aperiodic_params_``: a list of aperiodic parameters, stored as [Offset, (Knee), Exponent]
# - ``peak_params_``: all periodic parameters, where each row is a peak, as [CF, PW, BW]
# - ``r_squared_``: the r-squared of the model, as compared to the original data
# - ``error_``: the error of the model, as compared to the original data
#
# Other attributes which store outputs from the model are:
#
# - ``fooofed_spectrum_``: the full model reconstruction
# - ``n_peaks_``: a helper attribute which indicates how many peaks were fit in the model
#
# The :class:`~fooof.FOOOF` object also has an indicator attribute, ``has_model``
# which indicates if the current object has model results available.
#
###################################################################################################
# Check if the object has model results
print('Has model results: ', fm.has_model)
###################################################################################################
# Print out model fit results
print('aperiodic params: \t', fm.aperiodic_params_)
print('peak params: \t', fm.peak_params_)
print('r-squared: \t', fm.r_squared_)
print('fit error: \t', fm.error_)
print('fooofed spectrum: \t', fm.fooofed_spectrum_[0:5])
###################################################################################################
# 4) Methods
# ^^^^^^^^^^
#
# The :class:`~fooof.FOOOF` object contains a number of methods that are either used
# to fit models and access data, and/or offer extra functionality.
#
# In addition to the exposed methods, there are some internal private methods,
# with a leading underscore in their name, that are called in the
# fitting procedure. These methods should not be called directly by the user
# as they may depend on internal state of the object as defined by other methods,
# and so may not do as expected in isolation.
#
###################################################################################################
# This piece of code is just a way to print out all the public methods with their description
[print(it + '\n\t' + eval('fm.' + it + '.__doc__').split('\n')[0]) \
for it in dir(fm) if it[0] != '_' and callable(eval('fm.' + it))];
###################################################################################################
# Saving Data & Results
# ~~~~~~~~~~~~~~~~~~~~~
#
# There is also functionality for saving out, and loading back in, data and results.
#
# You have the option to specify which data to save.
#
# - `results`: model fit results (same as is returned in FOOOFResult)
# - `settings`: all public settings (everything available at initialization)
# - `data`: freqs & power spectrum
#
# Selected items are saved out to JSON files. You can specify a file name to save
# or append to, or pass in a JSON file object.
#
###################################################################################################
# Save out results, settings, and data
fm.save('FOOOF_results', save_results=True, save_settings=True, save_data=True)
###################################################################################################
# Load back in the saved out information
nfm = FOOOF()
nfm.load('FOOOF_results')
###################################################################################################
# Plot loaded results
nfm.plot()
###################################################################################################
# Creating Reports
# ~~~~~~~~~~~~~~~~
#
# There is also functionality to save out a 'report' of a particular model fit.
#
# This generates and saves a PDF which contains the same output as
# :meth:`~fooof.FOOOF.print_results`,
# :meth:`~fooof.FOOOF.plot`, and
# :meth:`~fooof.FOOOF.print_settings`.
#
###################################################################################################
# Save out a report of the current model fit & results
fm.save_report('FOOOF_report')
###################################################################################################
# Conclusion
# ----------
#
# We have now fully explored the :class:`~fooof.FOOOF` object, and all it contains.
# Next, we will take a deeper dive into how to choose different modes for fitting
# the aperiodic component of power spectra.
#
|
316ecf6e60753673ff112410c62a7687057a3ba9
|
5e4913b3d7b6dfd9f35d9e5f24486bb6b6145125
|
/scripts/kdb/find-tools
|
1de3e3c7b1256f1c19266280b5d8a3551d059d87
|
[
"BSD-3-Clause"
] |
permissive
|
ElektraInitiative/libelektra
|
ff5d5cfc4bf91d704f58405b14ea694aad3a2edd
|
dbbe4ae4f669c322a8f95f59112d3f5fc370bbd9
|
refs/heads/master
| 2023-08-05T14:54:48.081359
| 2023-08-04T12:40:00
| 2023-08-04T12:40:00
| 21,063,580
| 215
| 170
|
BSD-3-Clause
| 2023-09-07T13:34:30
| 2014-06-21T08:01:04
|
C
|
UTF-8
|
Python
| false
| false
| 5,983
|
find-tools
|
#!/usr/bin/env python3
#
# @author Sebastian Bachmann, Kurt Micheli <kurt.micheli@libelektra.org>
# @tags org
# @brief This script is used to search for scripts
# @date 10.05.2016
metatags = ["date", "brief", "author", "tags"]
# If you create own Tags, please document them here!
# (then they are listed with --alltags even if the script is not installed)
tags = { 'configure': "This script is used for the build configuration",
'convert': "This script is used convert things",
'generator': "This script is a generator",
'creator': "This script creates things",
'validation': "This script is used to validate stuff",
'specification': "This script works with specifications",
'env': "This script does some env stuff",
'org': "This script organizes other scripts",
'build': "This script builds Elektra",
'release': "Scripts related to the release process",
'mount': "This script mounts things",
'reformat': "This script reformats things",
'benchmark': "This script runs benchmarks",
'debian': "Special script for debian system"}
# No changes below this line necessary for creating tags!
from itertools import islice
import os
from collections import defaultdict
import re
import argparse
def get_head (filename, n=10):
head = ""
with open (filename, "r") as myfile:
head = map (lambda x: x.replace ("\n", "").replace ("\r", ""), list (islice(myfile, n)))
return head
def get_scripts ():
scripts = defaultdict (dict)
scripts_path = os.path.dirname (os.path.realpath (__file__))
for f in os.listdir (scripts_path):
f = os.path.join (scripts_path, f)
if not os.path.isfile (f):
continue
# If file not readable, skip it
try:
head = list (get_head (f))
except UnicodeDecodeError:
continue
# Do not process txt and md, default is '#'
filesDict = {'.sh': '#', '.py': '#', ".txt": -1, ".md": -1}
comment = '#'
for key in filesDict.keys ():
if (f.endswith(key)):
comment = filesDict[key]
if (comment == -1):
continue
tag_match = re.compile ("^"+comment+"[ ]*@(%s) (.*)$" % "|".join (metatags))
s = os.stat (f)
fn = os.path.basename (f)
scripts[fn]["perm"] = (s.st_mode & 64)
scripts[fn]["mtime"] = s.st_mtime
if head[0].startswith ("#!"):
scripts[fn]["shebang"] = head[0]
else:
scripts[fn]["shebang"] = ""
# Look for a line that looks like a vim modeline:
scripts[fn]["modeline"] = ""
for line in head:
if re.match (r"^#[ ]*vim: .*$", line):
scripts[fn]["modeline"] = line
break
# just look at lines that have a metatag
scripts[fn]["tags"] = {k: v for k, v in map (lambda x: tag_match.match(x).groups(), filter(lambda x: tag_match.match(x), head))}
return scripts
def main():
parser = argparse.ArgumentParser (description="KDB Tool finder")
parser.add_argument ("--warnings", help="Print a list of all scripts that does not contain a shebang and or metatags", dest="warnings", action="store_true", default=False)
parser.add_argument ("--good", help="Print a list of all scripts that are compliant with the Meta System", dest="good", action="store_true", default=False)
parser.add_argument ("--alltags", help="Print a list of all known tags", dest="alltags", action="store_true", default=False)
so = parser.add_argument_group ('Search Options. If multiple are given, they will be concatenated with AND')
so.add_argument ("-n", "--name", help="Search by file name")
so.add_argument ("-a", "--author", help="Search by author")
so.add_argument ("-d", "--date", help="Search by date (format DD.MM.YYYY)")
so.add_argument ("-t", "--tags", help="Search by tags", nargs="+")
so.add_argument ("-b", "--brief", help="Search inside the brief text")
so.add_argument ("-e", "--execute", help="Search by file executable (shebang)")
args = parser.parse_args ()
scripts = get_scripts ()
if args.alltags:
stags = set(tags)
for s, d in scripts.items ():
if "tags" in d["tags"]:
for tag in d["tags"]["tags"].split(","):
stags.add (tag.strip ())
print("\n".join (sorted (stags)))
return None
if args.warnings:
for s, d in sorted (scripts.items ()):
message = []
if d["shebang"] == "":
message.append ("No Shebang")
if d["perm"] == 0:
message.append ("Not executable")
if d["tags"] == {}:
message.append ("No Metatags")
if d["modeline"] == "":
message.append ("No Modeline")
if message != []:
print ("%s%s%s" % (s, " "*(2 + max(map(len, scripts.keys())) - len(s)), ", ".join(message)))
elif args.good:
for s, d in sorted (scripts.items ()):
if d["shebang"] != "" and d["perm"] != 0 and d["tags"] != {}:
print("%s%s%s%s%s" % (s, " "*(2 + max(map(len, scripts.keys())) - len(s)), d["shebang"][2:], " "*(30 - len(d["shebang"])), d["tags"]["brief"] if "brief" in d["tags"].keys() else ""))
else:
# Search instead!
if args.name is not None:
scripts = {k: v for k, v in scripts.items() if args.name.lower() in k.lower()}
if args.author is not None:
scripts = {k: v for k, v in scripts.items() if "author" in v["tags"].keys() and args.author.lower() in v["tags"]["author"].lower()}
if args.date is not None:
scripts = {k: v for k, v in scripts.items() if "date" in v["tags"].keys() and args.date.lower() in v["tags"]["date"].lower()}
if args.tags is not None:
scripts = {k: v for k, v in scripts.items() if "tags" in v["tags"].keys() and len(set(args.tags) & set(map(str.strip, v["tags"]["tags"].split(",")))) > 0}
if args.brief is not None:
scripts = {k: v for k, v in scripts.items() if "brief" in v["tags"].keys() and args.brief.lower() in v["tags"]["brief"].lower()}
if args.execute is not None:
scripts = {k: v for k, v in scripts.items() if args.execute.lower() in v["shebang"].lower()}
for s, d in sorted (scripts.items ()):
print("%s%s%s%s%s" % (s, " "*(2 + max(map(len, scripts.keys())) - len(s)), d["shebang"][2:], " "*(30 - len(d["shebang"])), d["tags"]["brief"] if "brief" in d["tags"].keys() else ""))
if __name__ == "__main__":
main()
|
|
24060e921817f0645e09ed384c7441aaa7771ca7
|
902abf2c8a0ae6147975864802575b5e543ef1e4
|
/src/ZODB/scripts/fsrefs.py
|
12a2eef557d244b3b3bb7e74d7240884ed13da56
|
[
"ZPL-2.1"
] |
permissive
|
zopefoundation/ZODB
|
b4d37dfe232a60dccf226f82276d630148fe43db
|
0632974df76e90a3f00b45b995bdff0209dd2def
|
refs/heads/master
| 2023-09-04T05:26:06.885522
| 2023-08-01T17:16:24
| 2023-08-01T17:16:24
| 7,357,595
| 629
| 104
|
NOASSERTION
| 2023-08-01T17:16:26
| 2012-12-28T17:47:39
|
Python
|
UTF-8
|
Python
| false
| false
| 6,891
|
py
|
fsrefs.py
|
#!/usr/bin/env python
##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""Check FileStorage for dangling references.
usage: fsrefs.py [-v] data.fs
fsrefs.py checks object sanity by trying to load the current revision of
every object O in the database, and also verifies that every object
directly reachable from each such O exists in the database.
It's hard to explain exactly what it does because it relies on undocumented
features in Python's cPickle module: many of the crucial steps of loading
an object are taken, but application objects aren't actually created. This
saves a lot of time, and allows fsrefs to be run even if the code
implementing the object classes isn't available.
A read-only connection to the specified FileStorage is made, but it is not
recommended to run fsrefs against a live FileStorage. Because a live
FileStorage is mutating while fsrefs runs, it's not possible for fsrefs to
get a wholly consistent view of the database across the entire time fsrefs
is running; spurious error messages may result.
fsrefs doesn't normally produce any output. If an object fails to load, the
oid of the object is given in a message saying so, and if -v was specified
then the traceback corresponding to the load failure is also displayed
(this is the only effect of the -v flag).
Three other kinds of errors are also detected, when an object O loads OK,
and directly refers to a persistent object P but there's a problem with P:
- If P doesn't exist in the database, a message saying so is displayed.
The unsatisifiable reference to P is often called a "dangling
reference"; P is called "missing" in the error output.
- If the current state of the database is such that P's creation has
been undone, then P can't be loaded either. This is also a kind of
dangling reference, but is identified as "object creation was undone".
- If P can't be loaded (but does exist in the database), a message saying
that O refers to an object that can't be loaded is displayed.
fsrefs also (indirectly) checks that the .index file is sane, because
fsrefs uses the index to get its idea of what constitutes "all the objects
in the database".
Note these limitations: because fsrefs only looks at the current revision
of objects, it does not attempt to load objects in versions, or non-current
revisions of objects; therefore fsrefs cannot find problems in versions or
in non-current revisions.
"""
import traceback
from BTrees.QQBTree import QQBTree
from ZODB.FileStorage import FileStorage
from ZODB.POSException import POSKeyError
from ZODB.serialize import get_refs
from ZODB.TimeStamp import TimeStamp
from ZODB.utils import get_pickle_metadata
from ZODB.utils import load_current
from ZODB.utils import oid_repr
from ZODB.utils import p64
from ZODB.utils import u64
# There's a problem with oid. 'data' is its pickle, and 'serial' its
# serial number. 'missing' is a list of (oid, class, reason) triples,
# explaining what the problem(s) is(are).
def report(oid, data, serial, missing):
from_mod, from_class = get_pickle_metadata(data)
if len(missing) > 1:
plural = "s"
else:
plural = ""
ts = TimeStamp(serial)
print("oid {} {}.{}".format(hex(u64(oid)), from_mod, from_class))
print("last updated: {}, tid={}".format(ts, hex(u64(serial))))
print("refers to invalid object%s:" % plural)
for oid, info, reason in missing:
if isinstance(info, tuple):
description = "%s.%s" % info
else:
description = str(info)
print("\toid {} {}: {!r}".format(oid_repr(oid), reason, description))
print()
def main(path=None):
verbose = 0
if path is None:
import getopt
import sys
opts, args = getopt.getopt(sys.argv[1:], "v")
for k, v in opts:
if k == "-v":
verbose += 1
path, = args
fs = FileStorage(path, read_only=1)
# Set of oids in the index that failed to load due to POSKeyError.
# This is what happens if undo is applied to the transaction creating
# the object (the oid is still in the index, but its current data
# record has a backpointer of 0, and POSKeyError is raised then
# because of that backpointer).
undone = {}
# Set of oids that were present in the index but failed to load.
# This does not include oids in undone.
noload = {}
# build {pos -> oid} index that is reverse to {oid -> pos} fs._index
# we'll need this to iterate objects in order of ascending file position to
# optimize disk IO.
pos2oid = QQBTree() # pos -> u64(oid)
for oid, pos in fs._index.iteritems():
pos2oid[pos] = u64(oid)
# pass 1: load all objects listed in the index and remember those objects
# that are deleted or load with an error. Iterate objects in order of
# ascending file position to optimize disk IO.
for oid64 in pos2oid.itervalues():
oid = p64(oid64)
try:
data, serial = load_current(fs, oid)
except (KeyboardInterrupt, SystemExit):
raise
except POSKeyError:
undone[oid] = 1
except: # noqa: E722 do not use bare 'except'
if verbose:
traceback.print_exc()
noload[oid] = 1
# pass 2: go through all objects again and verify that their references do
# not point to problematic object set. Iterate objects in order of
# ascending file position to optimize disk IO.
inactive = noload.copy()
inactive.update(undone)
for oid64 in pos2oid.itervalues():
oid = p64(oid64)
if oid in inactive:
continue
data, serial = load_current(fs, oid)
refs = get_refs(data)
missing = [] # contains 3-tuples of oid, klass-metadata, reason
for ref, klass in refs:
if klass is None:
klass = '<unknown>'
if ref not in fs._index:
missing.append((ref, klass, "missing"))
if ref in noload:
missing.append((ref, klass, "failed to load"))
if ref in undone:
missing.append((ref, klass, "object creation was undone"))
if missing:
report(oid, data, serial, missing)
if __name__ == "__main__":
main()
|
b2f0fe68c7836210c66b7411ba4a1e314569e15d
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/response/AlipayAccountExrateAdviceAcceptResponse.py
|
6b1746af99e57823a55d2f7729accd00f7efec1f
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 4,276
|
py
|
AlipayAccountExrateAdviceAcceptResponse.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayAccountExrateAdviceAcceptResponse(AlipayResponse):
def __init__(self):
super(AlipayAccountExrateAdviceAcceptResponse, self).__init__()
self._client_id = None
self._contra_amount = None
self._contra_ccy = None
self._deal_ref = None
self._dealt_rate = None
self._duplicate = None
self._messag_id = None
self._msg_type = None
self._requested_rate_status = None
self._side = None
self._transaction_amount = None
self._transaction_ccy = None
self._value_date = None
@property
def client_id(self):
return self._client_id
@client_id.setter
def client_id(self, value):
self._client_id = value
@property
def contra_amount(self):
return self._contra_amount
@contra_amount.setter
def contra_amount(self, value):
self._contra_amount = value
@property
def contra_ccy(self):
return self._contra_ccy
@contra_ccy.setter
def contra_ccy(self, value):
self._contra_ccy = value
@property
def deal_ref(self):
return self._deal_ref
@deal_ref.setter
def deal_ref(self, value):
self._deal_ref = value
@property
def dealt_rate(self):
return self._dealt_rate
@dealt_rate.setter
def dealt_rate(self, value):
self._dealt_rate = value
@property
def duplicate(self):
return self._duplicate
@duplicate.setter
def duplicate(self, value):
self._duplicate = value
@property
def messag_id(self):
return self._messag_id
@messag_id.setter
def messag_id(self, value):
self._messag_id = value
@property
def msg_type(self):
return self._msg_type
@msg_type.setter
def msg_type(self, value):
self._msg_type = value
@property
def requested_rate_status(self):
return self._requested_rate_status
@requested_rate_status.setter
def requested_rate_status(self, value):
self._requested_rate_status = value
@property
def side(self):
return self._side
@side.setter
def side(self, value):
self._side = value
@property
def transaction_amount(self):
return self._transaction_amount
@transaction_amount.setter
def transaction_amount(self, value):
self._transaction_amount = value
@property
def transaction_ccy(self):
return self._transaction_ccy
@transaction_ccy.setter
def transaction_ccy(self, value):
self._transaction_ccy = value
@property
def value_date(self):
return self._value_date
@value_date.setter
def value_date(self, value):
self._value_date = value
def parse_response_content(self, response_content):
response = super(AlipayAccountExrateAdviceAcceptResponse, self).parse_response_content(response_content)
if 'client_id' in response:
self.client_id = response['client_id']
if 'contra_amount' in response:
self.contra_amount = response['contra_amount']
if 'contra_ccy' in response:
self.contra_ccy = response['contra_ccy']
if 'deal_ref' in response:
self.deal_ref = response['deal_ref']
if 'dealt_rate' in response:
self.dealt_rate = response['dealt_rate']
if 'duplicate' in response:
self.duplicate = response['duplicate']
if 'messag_id' in response:
self.messag_id = response['messag_id']
if 'msg_type' in response:
self.msg_type = response['msg_type']
if 'requested_rate_status' in response:
self.requested_rate_status = response['requested_rate_status']
if 'side' in response:
self.side = response['side']
if 'transaction_amount' in response:
self.transaction_amount = response['transaction_amount']
if 'transaction_ccy' in response:
self.transaction_ccy = response['transaction_ccy']
if 'value_date' in response:
self.value_date = response['value_date']
|
668e0a1f14dc2066642dd8dd381e2c279432936a
|
9734c93c86c982b1ce046340bac9e53645b261b8
|
/plaso/cli/helpers/filter_file.py
|
9d51e03063c7ded0ccc7b5d905f6d978b1c79dec
|
[
"Apache-2.0"
] |
permissive
|
log2timeline/plaso
|
cd72dd407d6c5627506c14f58cb8f6a6926aa808
|
d6022f8cfebfddf2d08ab2d300a41b61f3349933
|
refs/heads/main
| 2023-09-02T08:43:48.241198
| 2023-08-19T07:28:12
| 2023-08-19T07:28:12
| 23,812,315
| 1,506
| 421
|
Apache-2.0
| 2023-09-04T08:24:53
| 2014-09-08T23:29:28
|
Python
|
UTF-8
|
Python
| false
| false
| 2,607
|
py
|
filter_file.py
|
# -*- coding: utf-8 -*-
"""The filter file CLI arguments helper."""
import os
from plaso.cli import tools
from plaso.cli.helpers import interface
from plaso.cli.helpers import manager
from plaso.lib import errors
class FilterFileArgumentsHelper(interface.ArgumentsHelper):
"""Filter file CLI arguments helper."""
NAME = 'filter_file'
DESCRIPTION = 'Filter file command line arguments.'
@classmethod
def AddArguments(cls, argument_group):
"""Adds command line arguments to an argument group.
This function takes an argument parser or an argument group object and adds
to it all the command line arguments this helper supports.
Args:
argument_group (argparse._ArgumentGroup|argparse.ArgumentParser):
argparse group.
"""
argument_group.add_argument(
'-f', '--filter-file', '--filter_file', '--file-filter',
'--file_filter', dest='file_filter', action='store', type=str,
default=None, help=(
'List of files to include for targeted collection of files to '
'parse, one line per file path, setup is /path|file - where each '
'element can contain either a variable set in the preprocessing '
'stage or a regular expression.'))
@classmethod
def ParseOptions(cls, options, configuration_object):
"""Parses and validates options.
Args:
options (argparse.Namespace): parser options.
configuration_object (CLITool): object to be configured by the argument
helper.
Raises:
BadConfigObject: when the configuration object is of the wrong type.
BadConfigOption: if the collection file does not exist.
"""
if not isinstance(configuration_object, tools.CLITool):
raise errors.BadConfigObject(
'Configuration object is not an instance of CLITool')
filter_file = cls._ParseStringOption(options, 'file_filter')
# Search the data location for the filter file.
if filter_file and not os.path.isfile(filter_file):
if configuration_object.data_location:
filter_file_basename = os.path.basename(filter_file)
filter_file_path = os.path.join(
configuration_object.data_location, filter_file_basename)
if os.path.isfile(filter_file_path):
filter_file = filter_file_path
if filter_file and not os.path.isfile(filter_file):
raise errors.BadConfigOption(
f'No such collection filter file: {filter_file:s}')
setattr(configuration_object, '_filter_file', filter_file)
manager.ArgumentHelperManager.RegisterHelper(FilterFileArgumentsHelper)
|
15436faccc5508f5363e0e07ef8ce454b262c114
|
a1657a0c5c8f3f8b51b98074293e2f2e9b16e6f4
|
/eks/demo/.cache/kubeflow/kubeflow-9804feb9fc23fc30075632a857087f4b529294e2/scripts/gke/delete_role_bindings.py
|
1eb809e713fb6ba819d3129b8b7a84070f2e12f7
|
[
"Apache-2.0"
] |
permissive
|
PipelineAI/pipeline
|
e8067636f5844dea0653aef84bd894ca2e700fc6
|
0f26e3eaad727c1d10950f592fe1949ece8153aa
|
refs/heads/master
| 2023-01-07T15:27:33.741088
| 2022-10-25T23:01:51
| 2022-10-25T23:01:51
| 38,730,494
| 2,596
| 512
|
Apache-2.0
| 2020-01-30T23:00:08
| 2015-07-08T03:49:23
|
Jsonnet
|
UTF-8
|
Python
| false
| false
| 1,228
|
py
|
delete_role_bindings.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# A simple script to delete all role bindings for the service accounts created
# as part of a Kubeflow deployment. This is an effort to deal with:
# https://github.com/kubeflow/kubeflow/issues/953
import argparse
import logging
import json
import subprocess
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--project", default=None, type=str, help=("The project."))
parser.add_argument(
"--service_account", type=str, help=("The service account."))
args = parser.parse_args()
output = subprocess.check_output([
"gcloud",
"projects",
"get-iam-policy",
"--format=json",
args.project,
])
bindings = json.loads(output)
roles = []
entry = "serviceAccount:" + args.service_account
for b in bindings["bindings"]:
if entry in b["members"]:
roles.append(b["role"])
# TODO(jlewi): Can we issue a single gcloud command.
for r in roles:
command = [
"gcloud",
"projects",
"remove-iam-policy-binding",
args.project,
"--member",
entry,
"--role",
r,
]
print(" ".join(command))
subprocess.call(command)
|
253a5bbb91bac174d7b1b206939d0e45c45d0736
|
61004e474b7b2ad0071c16766f0f7874f04f9466
|
/examples/cloudml-churn-prediction/trainer/trainer/metadata.py
|
085a6aba2415fd3765bb263e82edf75b0706f9b2
|
[
"Apache-2.0"
] |
permissive
|
GoogleCloudPlatform/professional-services
|
eb79751efae765a8c691a745e520f44f51bd715c
|
0f51121b945bd74c7f667e74e8861fceda87565c
|
refs/heads/main
| 2023-09-05T02:57:33.328973
| 2023-08-30T14:40:30
| 2023-08-30T14:40:30
| 91,730,359
| 2,626
| 1,381
|
Apache-2.0
| 2023-09-14T20:13:42
| 2017-05-18T19:29:27
|
Python
|
UTF-8
|
Python
| false
| false
| 852
|
py
|
metadata.py
|
# Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Define metadata constants."""
LABEL_COLUMN = 'labelArray'
KEY_COLUMN = 'fullVisitorId'
# columns to omit from model features
NON_FEATURE_COLUMNS = [LABEL_COLUMN, KEY_COLUMN]
NUM_INTERVALS = 4 # number of bounded churn duration intervals
SEED = 123
|
1940e6b999a5f6c9adacf6d71db793f0801fcce5
|
5ef6c8d47864f471e26b9902d61f8c687e941f05
|
/src/genie/libs/parser/iosxe/tests/ShowTcpProxyStatistics/cli/equal/golden_output_expected.py
|
632c6d02b890ecca78a0f2a0bef65870c4747758
|
[
"Apache-2.0"
] |
permissive
|
CiscoTestAutomation/genieparser
|
169c196558f1c1a0f0d10650876096f993224917
|
b531eff760b2e44cd69d7a2716db6f866907c239
|
refs/heads/master
| 2023-09-03T08:56:18.831340
| 2023-08-29T22:32:02
| 2023-08-29T22:32:02
| 131,621,824
| 247
| 409
|
Apache-2.0
| 2023-08-29T22:32:04
| 2018-04-30T16:51:50
|
Python
|
UTF-8
|
Python
| false
| false
| 6,794
|
py
|
golden_output_expected.py
|
expected_output = {
"tcpproxy_statistics": {
"total_connections": 32420,
"max_concurrent_connections": 1466,
"flow_entries_created": 32432,
"flow_entries_deleted": 32432,
"current_flow_entries": 0,
"current_valid_flow_entries": 0,
"current_connections": 0,
"connections_in_progress": 0,
"failed_connections": 12,
"invalid_flow_entries": 0,
"syncache_added": 32432,
"syncache_not_added_nat_entry_null": 0,
"syncache_not_added_mrkd_for_cleanup": 0,
"syncache_not_added_flow_entry_null": 0,
"syncache_not_added_flow_invalid": 0,
"syncache_not_added_flow_is_in_use": 0,
"syn_purge_enqueued": 0,
"syn_purge_enqueue_failed": 0,
"other_cleanup_enqueued": 0,
"other_cleanup_enqueue_failed": 0,
"stack_cleanup_enqueued": 11787,
"stack_cleanup_enqueue_failed": 0,
"timer_expire_cleanup_enqueued": 0,
"timer_expire_cleanup_enqueue_failed": 0,
"proxy_cleanup_enqueued": 20645,
"proxy_cleanup_enqueue_failed": 0,
"cleanup_req_watcher_called": 118623,
"pre_tcp_flow_list_enq_failed": 0,
"pre_tcp_flow_list_deq_failed_timer": 0,
"pre_tcp_flow_list_deq_failed_accept": 0,
"pre_tcp_flow_list_enq_success": 32432,
"pre_tcp_flow_list_deq_cleanup": 0,
"pre_tcp_flow_list_deq_accept": 32432,
"pre_tcp_cleanup_timeout_update_count": 0,
"total_flow_entries_pending_cleanup_0": 0,
"total_cleanup_done": 32432,
"num_stack_cb_with_null_ctx": 0,
"vpath_cleanup_from_nmrx_thread": 0,
"vpath_cleanup_from_ev_thread": 32432,
"failed_conn_already_accepted_conn": 0,
"ssl_init_failure": 0,
"max_queue_length_work": 27,
"current_queue_length_work": 0,
"max_queue_length_ism": 0,
"current_queue_length_ism": 0,
"max_queue_length_sc": 0,
"current_queue_length_sc": 0,
"total_tx_enq_ign_due_to_conn_close": 15,
"current_rx_epoll": 0,
"current_tx_epoll": 0,
"paused_by_tcp_tx_full": 0,
"resumed_by_tcp_tx_below_threshold": 0,
"paused_by_tcp_buffer_consumed": 0,
"resumed_by_tcp_buffer_released": 0,
"ssl_pause_done": 0,
"ssl_resume_done": 0,
"snort_pause_done": 0,
"snort_resume_done": 0,
"dre_pause_done": 0,
"dre_resume_done": 0,
"dre_resume_msg_to_be_sent": 0,
"dre_resume_msg_sent": 0,
"dre_bypass_received_from_peer": 0,
"dre_bypass_hints_sent": 0,
"dre_smb_bypass_success_received": 0,
"dre_http_bypass_success_received": 0,
"ev_ssl_pause_process": 0,
"ev_snort_pause_process": 0,
"ev_dre_pause_process": 0,
"ev_ssl_snort_resume_process": 4728,
"socket_pause_done": 0,
"socket_resume_done": 4728,
"ssl_pause_called": 0,
"ssl_resume_called": 0,
"async_events_sent": 31822,
"async_events_processed": 31822,
"tx_async_events_sent": 416778,
"tx_async_events_recvd": 416777,
"tx_async_events_processed": 416777,
"failed_send": 0,
"tcp_ssl_reset_initiated": 0,
"tcp_snort_reset_initiated": 0,
"tcp_dre_close_initiated": 0,
"tcp_fin_received_from_clnt_svr": 44168,
"tcp_reset_received_from_clnt_svr": 24995,
"ssl_fin_received_sc": 0,
"ssl_reset_received_sc": 0,
"sc_fin_received_ssl": 0,
"sc_reset_received_ssl": 0,
"ssl_fin_received_tcp": 0,
"ssl_reset_received_tcp": 0,
"tcp_fin_processed": 44168,
"tcp_fin_ignored_fd_already_closed": 0,
"tcp_reset_processed": 20672,
"svc_reset_processed": 0,
"flow_cleaned_with_client_data": 0,
"flow_cleaned_with_server_data": 0,
"buffers_dropped_in_tx_socket_close": 1,
"buffers_dropped_in_tx_not_writable": 0,
"buffers_dropped_in_tx_socket_closed": 1,
"tcp_4k_allocated_buffers": 416778,
"tcp_16k_allocated_buffers": 0,
"tcp_32k_allocated_buffers": 0,
"tcp_128k_allocated_buffers": 0,
"tcp_freed_buffers": 449210,
"ssl_allocated_buffers": 0,
"ssl_freed_buffers": 0,
"tcp_received_buffers": 351938,
"tcp_to_ssl_enqueued_buffers": 0,
"ssl_to_svc_enqueued_buffers": 0,
"svc_to_ssl_enqueued_buffers": 0,
"ssl_to_tcp_enqueued_buffers": 0,
"tcp_buffers_sent": 351933,
"tcp_failed_buffers_allocations": 0,
"tcp_failed_16k_buffers_allocations": 0,
"tcp_failed_32k_buffers_allocations": 0,
"tcp_failed_128k_buffers_allocations": 0,
"ssl_failed_buffers_allocations": 0,
"rx_sock_bytes_read_512": 49486,
"rx_sock_bytes_read_1024": 3207,
"rx_sock_bytes_read_2048": 18568,
"rx_sock_bytes_read_4096": 280677,
"ssl_server_init": 0,
"flows_dropped_snort_gbl_health_yellow": 0,
"flows_dropped_snort_inst_health_yellow": 0,
"flows_dropped_wcapi_channel_health_yellow": 0,
"total_wcapi_snd_flow_create_svc_chain_failed": 0,
"total_wcapi_snd_flow_delete_svc_chain_failed": 0,
"total_wcapi_send_data_svc_chain_failed": 0,
"total_wcapi_send_close_svc_chain_failed": 0,
"total_tx_enqueue_failed": 0,
"total_cleanup_flow_msg_add_to_wk_q_failed": 0,
"total_cleanup_flow_msg_added_to_wk_q": 0,
"total_cleanup_flow_msg_rcvd_in_wk_q": 0,
"total_cleanup_flow_ignored_already_done": 0,
"total_cleanup_ssl_msg_add_to_wk_q_failed": 0,
"total_ssl_trigger_reset_msg_to_wk_q_failed": 0,
"total_uhi_mmap": 7793,
"total_uhi_munmap": 0,
"total_uhi_page_alloc": 0,
"total_uhi_page_alloc_retry": 0,
"total_uhi_page_alloc_failed": 0,
"total_uhi_page_alloc_failed_invalid_size": 0,
"total_uhi_page_free": 0,
"total_enable_rx_enqueued": 0,
"total_enable_rx_called": 0,
"total_enable_rx_process_done": 0,
"total_enable_rx_enqueue_failed": 0,
"total_enable_rx_process_failed": 0,
"total_enable_rx_socket_on_client_stack_close": 11228,
"total_enable_rx_socket_on_server_stack_close": 20594,
"unified_logging_msg_received": 0,
"unified_logging_drop_data_too_long": 0,
"unified_logging_enqueue_success": 0,
"unified_logging_dequeue_success": 0,
"unified_logging_deq_fail_not_enough_space": 0,
"flow_stats_add_failure_count": 0,
"flow_stats_delete_failure_count": 0,
"aoim_sync_started": 0,
"aoim_sync_completed": 0,
"aoim_sync_errored": 0,
}
}
|
cf56e8946acdece4b3be17c96872a7926dbb790d
|
704976ea552111c6a5af9cd7cb62b9d9abaf3996
|
/pypy/module/_winreg/__init__.py
|
d865ca691e9b8488b0587459a0764807887eade2
|
[
"BSD-3-Clause"
] |
permissive
|
mesalock-linux/mesapy
|
4f02c5819ce7f2f6e249d34840f1aa097577645d
|
ed546d59a21b36feb93e2309d5c6b75aa0ad95c9
|
refs/heads/mesapy2.7
| 2023-08-16T21:33:02.239581
| 2019-08-13T10:29:43
| 2019-08-13T18:06:45
| 136,080,721
| 396
| 33
|
NOASSERTION
| 2020-04-01T03:05:18
| 2018-06-04T20:45:17
|
Python
|
UTF-8
|
Python
| false
| false
| 3,394
|
py
|
__init__.py
|
from pypy.interpreter.mixedmodule import MixedModule
from rpython.rlib.rwinreg import constants
class Module(MixedModule):
"""This module provides access to the Windows registry API.
Functions:
CloseKey() - Closes a registry key.
ConnectRegistry() - Establishes a connection to a predefined registry handle
on another computer.
CreateKey() - Creates the specified key, or opens it if it already exists.
DeleteKey() - Deletes the specified key.
DeleteValue() - Removes a named value from the specified registry key.
EnumKey() - Enumerates subkeys of the specified open registry key.
EnumValue() - Enumerates values of the specified open registry key.
ExpandEnvironmentStrings() - Expand the env strings in a REG_EXPAND_SZ string.
FlushKey() - Writes all the attributes of the specified key to the registry.
LoadKey() - Creates a subkey under HKEY_USER or HKEY_LOCAL_MACHINE and stores
registration information from a specified file into that subkey.
OpenKey() - Alias for <om win32api.RegOpenKeyEx>
OpenKeyEx() - Opens the specified key.
QueryValue() - Retrieves the value associated with the unnamed value for a
specified key in the registry.
QueryValueEx() - Retrieves the type and data for a specified value name
associated with an open registry key.
QueryInfoKey() - Returns information about the specified key.
SaveKey() - Saves the specified key, and all its subkeys a file.
SetValue() - Associates a value with a specified key.
SetValueEx() - Stores data in the value field of an open registry key.
Special objects:
HKEYType -- type object for HKEY objects
error -- exception raised for Win32 errors
Integer constants:
Many constants are defined - see the documentation for each function
to see what constants are used, and where."""
appleveldefs = {
}
interpleveldefs = {
'error' : 'space.w_WindowsError',
'HKEYType' : 'interp_winreg.W_HKEY',
'SetValue' : 'interp_winreg.SetValue',
'SetValueEx' : 'interp_winreg.SetValueEx',
'QueryValue' : 'interp_winreg.QueryValue',
'QueryValueEx' : 'interp_winreg.QueryValueEx',
'CreateKey' : 'interp_winreg.CreateKey',
'CreateKeyEx' : 'interp_winreg.CreateKeyEx',
'DeleteKey' : 'interp_winreg.DeleteKey',
'DeleteValue' : 'interp_winreg.DeleteValue',
'OpenKey' : 'interp_winreg.OpenKey',
'OpenKeyEx' : 'interp_winreg.OpenKey',
'EnumValue' : 'interp_winreg.EnumValue',
'EnumKey' : 'interp_winreg.EnumKey',
'FlushKey' : 'interp_winreg.FlushKey',
'CloseKey' : 'interp_winreg.CloseKey',
'QueryInfoKey' : 'interp_winreg.QueryInfoKey',
'LoadKey' : 'interp_winreg.LoadKey',
'SaveKey' : 'interp_winreg.SaveKey',
'ConnectRegistry': 'interp_winreg.ConnectRegistry',
'ExpandEnvironmentStrings': 'interp_winreg.ExpandEnvironmentStrings',
'DisableReflectionKey': 'interp_winreg.DisableReflectionKey',
'EnableReflectionKey': 'interp_winreg.EnableReflectionKey',
'QueryReflectionKey': 'interp_winreg.QueryReflectionKey',
'DeleteKeyEx': 'interp_winreg.DeleteKeyEx',
}
for name, value in constants.iteritems():
interpleveldefs[name] = "space.wrap(%s)" % (value,)
|
19d374fc1eeb76facec0772316524d0c4df5967e
|
44ba493efd0fd7ae78880d3d93cc0d66166935e5
|
/src/briefcase/integrations/flatpak.py
|
f1c706402a11c6ac00c353b1d4180d678549dcc1
|
[
"BSD-3-Clause"
] |
permissive
|
beeware/briefcase
|
1b3eaebf0791728c68986809aa07abc436e422c6
|
cc2dae1ffc58f9700d0ca57461cb05909bc01bec
|
refs/heads/main
| 2023-09-01T19:24:15.424713
| 2023-09-01T04:35:53
| 2023-09-01T04:35:53
| 39,841,700
| 1,609
| 256
|
BSD-3-Clause
| 2023-09-11T10:04:34
| 2015-07-28T15:20:03
|
Python
|
UTF-8
|
Python
| false
| false
| 11,996
|
py
|
flatpak.py
|
from __future__ import annotations
import subprocess
from pathlib import Path
from briefcase.exceptions import BriefcaseCommandError
from briefcase.integrations.base import Tool, ToolCache
from briefcase.integrations.subprocess import SubprocessArgT
class Flatpak(Tool):
name = "flatpak"
full_name = "Flatpak"
supported_host_os = {"Linux"}
DEFAULT_REPO_ALIAS = "flathub"
DEFAULT_REPO_URL = "https://flathub.org/repo/flathub.flatpakrepo"
@classmethod
def verify_install(cls, tools: ToolCache, **kwargs) -> Flatpak:
"""Verify that the Flatpak toolchain is available.
:param tools: ToolCache of available tools
:returns: A wrapper for the Flatpak tools.
"""
# short circuit since already verified and available
if hasattr(tools, "flatpak"):
return tools.flatpak
flatpak = Flatpak(tools=tools)
try:
output = tools.subprocess.check_output(["flatpak", "--version"]).strip("\n")
parts = output.split(" ")
try:
if parts[0] == "Flatpak":
version = parts[1].split(".")
if int(version[0]) < 1:
raise BriefcaseCommandError(
"Briefcase requires Flatpak 1.0 or later."
)
else:
raise ValueError(f"Unexpected tool name {parts[0]}")
except (ValueError, IndexError):
tools.logger.warning(
"""\
*************************************************************************
** WARNING: Unable to determine the version of Flatpak **
*************************************************************************
Briefcase will proceed, assuming everything is OK. If you
experience problems, this is almost certainly the cause of those
problems.
Please report this as a bug at:
https://github.com/beeware/briefcase/issues/new
In your report, please including the output from running:
flatpak --version
from the command prompt.
*************************************************************************
"""
)
except OSError as e:
raise BriefcaseCommandError(
"""\
Briefcase requires the Flatpak toolchain, but it does not appear to be installed.
Instructions for installing the Flatpak toolchain can be found at:
https://flatpak.org/setup/
You must install both flatpak and flatpak-builder.
"""
) from e
except subprocess.CalledProcessError as e:
raise BriefcaseCommandError("Unable to invoke flatpak.") from e
try:
output = tools.subprocess.check_output(
["flatpak-builder", "--version"]
).strip("\n")
parts = output.split(" ")
try:
if parts[0] == "flatpak-builder":
version = parts[1].split(".")
if int(version[0]) < 1:
raise BriefcaseCommandError(
"Briefcase requires flatpak-builder 1.0 or later."
)
else:
raise ValueError(f"Unexpected tool name {parts[0]}")
except (ValueError, IndexError):
tools.logger.warning(
"""\
*************************************************************************
** WARNING: Unable to determine the version of flatpak-builder **
*************************************************************************
Briefcase will proceed, assuming everything is OK. If you
experience problems, this is almost certainly the cause of those
problems.
Please report this as a bug at:
https://github.com/beeware/briefcase/issues/new
In your report, please including the output from running:
flatpak-builder --version
from the command prompt.
*************************************************************************
"""
)
except OSError as e:
raise BriefcaseCommandError(
"""\
Briefcase requires the full Flatpak development toolchain, but flatpak-builder
does not appear to be installed.
Instructions for installing the Flatpak toolchain can be found at:
https://flatpak.org/setup/
You must install both flatpak and flatpak-builder.
"""
) from e
except subprocess.CalledProcessError as e:
raise BriefcaseCommandError("Unable to invoke flatpak-builder.") from e
tools.flatpak = flatpak
return flatpak
def verify_repo(self, repo_alias: str, url: str):
"""Verify that the Flatpak repository has been registered.
:param repo_alias: The alias to use when registering the repo.
:param url: The URL of the Flatpak repo.
"""
try:
self.tools.subprocess.run(
[
"flatpak",
"remote-add",
"--user",
"--if-not-exists",
repo_alias,
url,
]
+ (["--verbose"] if self.tools.logger.is_deep_debug else []),
check=True,
)
except subprocess.CalledProcessError as e:
raise BriefcaseCommandError(
f"Unable to add Flatpak repo {url} with alias {repo_alias}."
) from e
def verify_runtime(
self,
repo_alias: str,
runtime: str,
runtime_version: str,
sdk: str,
):
"""Verify that a specific Flatpak runtime and SDK are available.
:param repo_alias: The alias of the repo where the runtime and SDK are stored.
:param runtime: The identifier of the Flatpak runtime
:param runtime_version: The version of the Flatpak runtime
:param sdk: The Flatpak SDK
"""
try:
self.tools.subprocess.run(
[
"flatpak",
"install",
"--assumeyes",
"--user",
repo_alias,
f"{runtime}/{self.tools.host_arch}/{runtime_version}",
f"{sdk}/{self.tools.host_arch}/{runtime_version}",
]
+ (["--verbose"] if self.tools.logger.is_deep_debug else []),
check=True,
# flatpak install uses many animations that cannot be disabled
stream_output=False,
)
except subprocess.CalledProcessError as e:
raise BriefcaseCommandError(
f"Unable to install Flatpak runtime {runtime}/{self.tools.host_arch}/{runtime_version} "
f"and SDK {sdk}/{self.tools.host_arch}/{runtime_version} from repo {repo_alias}."
) from e
def build(self, bundle_identifier: str, app_name: str, path: Path):
"""Build a Flatpak manifest.
On success, the app is installed into the user's local Flatpak install, and a
shell script is created that can be used to start the app. The shell file isn't
really needed to start the app, but it serves as a marker for a successful build
that Briefcase can use.
:param bundle_identifier: The bundle identifier for the app being built.
:param app_name: The app name.
:param path: The path to the folder containing the app's Flatpak manifest file.
"""
try:
self.tools.subprocess.run(
[
"flatpak-builder",
"--force-clean",
# Archive into a local repository
"--repo",
"repo",
# Install the app into the user space
"--install",
"--user",
"build",
"manifest.yml",
]
+ (["--verbose"] if self.tools.logger.is_deep_debug else []),
check=True,
cwd=path,
)
# Create a marker file to indicate a build has completed.
# For bonus points, the marker file also is executable
# and is an alias for the command that would actually start
# the flatpak.
bin_path = path / bundle_identifier
with bin_path.open("w", encoding="utf-8") as f:
f.write(
f"""\
#!/bin/sh
# echo To run this flatpak, run:
flatpak run {bundle_identifier}
"""
)
self.tools.os.chmod(bin_path, 0o755)
except subprocess.CalledProcessError as e:
raise BriefcaseCommandError(f"Error while building app {app_name}.") from e
def run(
self,
bundle_identifier: str,
args: list[SubprocessArgT] | None = None,
main_module: str | None = None,
) -> subprocess.Popen[str]:
"""Run a Flatpak in a way that allows for log streaming.
:param bundle_identifier: The bundle identifier for the app being built.
:param args: (Optional) The list of arguments to pass to the app
:param main_module: (Optional) The main module to run. Only required if you want
to override the default main module for the app.
:returns: A Popen object for the running app.
"""
if main_module:
# Set a BRIEFCASE_MAIN_MODULE environment variable
# to override the module at startup
kwargs = {
"env": {
"BRIEFCASE_MAIN_MODULE": main_module,
}
}
else:
kwargs = {}
flatpak_run_cmd = ["flatpak", "run", bundle_identifier]
flatpak_run_cmd.extend([] if args is None else args)
if self.tools.logger.is_deep_debug:
# Must come before bundle identifier; otherwise, it's passed as an arg to app
flatpak_run_cmd.insert(2, "--verbose")
return self.tools.subprocess.Popen(
flatpak_run_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
bufsize=1,
**kwargs,
)
def bundle(
self,
repo_url: str,
bundle_identifier: str,
app_name: str,
version: str,
build_path: Path,
output_path: Path,
):
"""Bundle a Flatpak for distribution.
Generates a standalone .flatpak file that can be installed into another user's
Flatpak repository.
:param repo_url: The URL of the repository that contains the runtime used by the
app.
:param bundle_identifier: The bundle identifier for the app being built.
:param app_name: The app name.
:param version: The version of the app being built.
:param build_path: The path where the flatpak was built. This path will contain
the repo where the built flatpak was exported.
:param output_path: The path of the output file to create as an export.
"""
try:
self.tools.subprocess.run(
[
"flatpak",
"build-bundle",
# Set the repo where the runtime can be found
"--runtime-repo",
repo_url,
# Sign the export
# "--gpg-sign", "..."
"repo",
output_path,
bundle_identifier,
version,
]
+ (["--verbose"] if self.tools.logger.is_deep_debug else []),
check=True,
cwd=build_path,
)
except subprocess.CalledProcessError as e:
raise BriefcaseCommandError(
f"Unable to build a Flatpak bundle for app {app_name}."
) from e
|
3b4e9c94397e132c0b09e8826490ba29beb15579
|
77c94e8d7297c009a57bee406c601dcc985bf5b9
|
/src/OFS/Uninstalled.py
|
c5e9f3132e2e9467fc84eb7c078cc3d9a5127c32
|
[
"ZPL-2.1"
] |
permissive
|
zopefoundation/Zope
|
94b71ce10e501e4c0e55d16d94f1bc94ff575212
|
c31b1c635e85a1766f2666cb0bd117337ae5fa67
|
refs/heads/master
| 2023-09-03T20:22:32.121746
| 2023-08-30T06:34:44
| 2023-08-30T06:34:44
| 8,436,350
| 335
| 115
|
NOASSERTION
| 2023-09-11T21:21:27
| 2013-02-26T16:13:23
|
Python
|
UTF-8
|
Python
| false
| false
| 2,899
|
py
|
Uninstalled.py
|
##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""
Objects for packages that have been uninstalled.
"""
import html
from _thread import allocate_lock
from logging import getLogger
from Acquisition import Acquired
from Acquisition import Explicit
from App.special_dtml import DTMLFile
from OFS.SimpleItem import Item
from Persistence import Overridable
from ZODB.broken import Broken as ZODB_Broken
from ZODB.broken import persistentBroken
broken_klasses = {}
broken_klasses_lock = allocate_lock()
LOG = getLogger('OFS.Uninstalled')
class BrokenClass(ZODB_Broken, Explicit, Item, Overridable):
_p_changed = 0
meta_type = 'Broken Because Product is Gone'
product_name = 'unknown'
id = 'broken'
manage_page_header = Acquired
manage_page_footer = Acquired
def __getattr__(self, name):
if name[:3] == '_p_':
return BrokenClass.inheritedAttribute('__getattr__')(self, name)
raise AttributeError(html.escape(name, True))
manage = DTMLFile('dtml/brokenEdit', globals())
manage_main = DTMLFile('dtml/brokenEdit', globals())
manage_workspace = DTMLFile('dtml/brokenEdit', globals())
def Broken(self, oid, pair):
broken_klasses_lock.acquire()
try:
if pair in broken_klasses:
klass = broken_klasses[pair]
else:
module, klassname = pair
d = {'BrokenClass': BrokenClass}
exec("class %s(BrokenClass): ' '; __module__=%r" %
(klassname, module), d)
klass = broken_klasses[pair] = d[klassname]
module = module.split('.')
if len(module) > 2 and module[0] == 'Products':
klass.product_name = module[1]
klass.title = (
'This object from the %s product '
'is broken!' %
klass.product_name)
klass.info = (
'This object\'s class was %s in module %s.' %
(klass.__name__, klass.__module__))
klass = persistentBroken(klass)
LOG.warning(
'Could not import class %r '
'from module %r' % (klass.__name__, klass.__module__))
finally:
broken_klasses_lock.release()
if oid is None:
return klass
i = klass()
i._p_oid = oid
i._p_jar = self
return i
|
f7bd62527ebeb2f3668f061f8bd295efa68f3c6e
|
6e5cb5ac15aed033de4109a6f0849d0c920413b0
|
/tests/integration/test_git.py
|
b73006b2306a457f0a31289d91e158a71fe9f507
|
[
"MIT"
] |
permissive
|
saltstack/kitchen-salt
|
6ab86dddd58a3b4490acdec36a37adbc2dce7c4c
|
03a438aadf8c73b8e49820d21f44a468e0632901
|
refs/heads/master
| 2023-08-29T09:45:20.728163
| 2022-08-25T18:20:46
| 2022-08-25T18:20:46
| 15,855,060
| 107
| 79
|
MIT
| 2022-05-19T10:18:48
| 2014-01-13T01:39:21
|
Ruby
|
UTF-8
|
Python
| false
| false
| 276
|
py
|
test_git.py
|
import pytest
import os
@pytest.mark.skipif('windows' in os.environ.get('KITCHEN_INSTANCE'), reason='Skip on freebsd images')
@pytest.mark.parametrize("pkgname", [
"git",
])
def test_pkg(host, pkgname):
pkg = host.package(pkgname)
assert pkg.is_installed is True
|
60165f0c937655ec3f726a0f60ecdf1d42e50628
|
0e083f405af00029c9ec31849f0f7f81c56844b5
|
/mmdeploy/apis/rknn/__init__.py
|
5754e0a61a1e3267c9d9885e2c34e5ab2c385502
|
[
"Apache-2.0"
] |
permissive
|
open-mmlab/mmdeploy
|
39b9e7b611caab2c76a6142fcb99f0bf1d92ad24
|
5479c8774f5b88d7ed9d399d4e305cb42cc2e73a
|
refs/heads/main
| 2023-09-01T21:29:25.315371
| 2023-08-31T09:59:29
| 2023-08-31T09:59:29
| 441,467,833
| 2,164
| 605
|
Apache-2.0
| 2023-09-14T10:39:04
| 2021-12-24T13:04:44
|
Python
|
UTF-8
|
Python
| false
| false
| 350
|
py
|
__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdeploy.backend.rknn import is_available
__all__ = ['is_available']
if is_available():
from mmdeploy.backend.rknn.onnx2rknn import onnx2rknn as _onnx2rknn
from ..core import PIPELINE_MANAGER
onnx2rknn = PIPELINE_MANAGER.register_pipeline()(_onnx2rknn)
__all__ += ['onnx2rknn']
|
244c8d1de580eef57f373ee4d1940ac2d86f25b4
|
6186a3787d1e74f1866844491da48b9643c8f1a9
|
/ghostwriter/rolodex/migrations/0039_alter_projecttarget_note.py
|
28537a6f8fee76ecea0d440b717e00e6c3b8b96b
|
[
"BSD-3-Clause"
] |
permissive
|
GhostManager/Ghostwriter
|
b46b2421e5737ed0afbf49182dce9eeb5eb31936
|
b9eae4459ba192fbb2d4a5b66f8210d57fd7112a
|
refs/heads/master
| 2023-09-04T02:34:54.085997
| 2023-07-13T22:38:44
| 2023-07-13T22:38:44
| 197,269,443
| 1,011
| 197
|
BSD-3-Clause
| 2023-09-08T00:19:52
| 2019-07-16T21:19:43
|
Python
|
UTF-8
|
Python
| false
| false
| 502
|
py
|
0039_alter_projecttarget_note.py
|
# Generated by Django 3.2.11 on 2022-12-16 21:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rolodex', '0038_auto_20221122_1938'),
]
operations = [
migrations.AlterField(
model_name='projecttarget',
name='note',
field=models.TextField(blank=True, help_text='Provide additional information about the target or its environment', null=True, verbose_name='Notes'),
),
]
|
d1c10c7e57f254aa61be649ab51932daaead359d
|
695180c14d136c1af33e50497c7957dc74835680
|
/models/official/retinanet/retinanet_segmentation_main.py
|
1d2c2a0cf9c28a2ca949c5f5b708f7e1cb88a4e1
|
[
"Apache-2.0"
] |
permissive
|
tensorflow/tpu
|
4517c43b42c3dd98b8abafbbd9457384dd9639b1
|
0f7adb97a93ec3e3485c261d030c507eb16b33e4
|
refs/heads/master
| 2023-09-02T23:33:31.299394
| 2023-09-01T00:43:03
| 2023-09-01T00:43:03
| 96,946,693
| 5,627
| 2,107
|
Apache-2.0
| 2023-09-13T17:50:43
| 2017-07-11T23:54:39
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 10,349
|
py
|
retinanet_segmentation_main.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training script for RetinaNet segmentation model.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import app
from absl import flags
import absl.logging as _logging # pylint: disable=unused-import
import tensorflow.compat.v1 as tf
import dataloader
import retinanet_segmentation_model
from tensorflow.contrib import cluster_resolver as contrib_cluster_resolver
from tensorflow.contrib import tpu as contrib_tpu
from tensorflow.contrib import training as contrib_training
# Cloud TPU Cluster Resolvers
flags.DEFINE_string(
'tpu', default=None,
help='The Cloud TPU to use for training. This should be either the name '
'used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 '
'url.')
flags.DEFINE_string(
'gcp_project', default=None,
help='Project name for the Cloud TPU-enabled project. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
flags.DEFINE_string(
'tpu_zone', default=None,
help='GCE zone where the Cloud TPU is located in. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
# Model specific paramenters
flags.DEFINE_bool('use_tpu', True, 'Use TPUs rather than CPUs')
flags.DEFINE_string('model_dir', None, 'Location of model_dir')
flags.DEFINE_string('resnet_checkpoint', None,
'Location of the ResNet50 checkpoint to use for model '
'initialization.')
flags.DEFINE_string('hparams', '',
'Comma separated k=v pairs of hyperparameters.')
flags.DEFINE_integer(
'num_shards', default=8, help='Number of shards (TPU cores)')
flags.DEFINE_integer('train_batch_size', 64, 'training batch size')
flags.DEFINE_integer('eval_batch_size', 8, 'evaluation batch size')
flags.DEFINE_integer('eval_samples', 1449, 'The number of samples for '
'evaluation.')
flags.DEFINE_integer(
'iterations_per_loop', 100, 'Number of iterations per TPU training loop')
flags.DEFINE_string(
'training_file_pattern', None,
'Glob for training data files (e.g., Pascal VOC train set)')
flags.DEFINE_string(
'validation_file_pattern', None,
'Glob for evaluation tfrecords (e.g., Pascal VOC validation set)')
flags.DEFINE_integer('num_examples_per_epoch', 10582,
'Number of examples in one epoch')
flags.DEFINE_integer('num_epochs', 45, 'Number of epochs for training')
flags.DEFINE_string('mode', 'train_and_eval',
'Mode to run: train or eval (default: train)')
flags.DEFINE_bool('eval_after_training', False, 'Run one eval after the '
'training finishes.')
# For Eval mode
flags.DEFINE_integer('min_eval_interval', 180,
'Minimum seconds between evaluations.')
flags.DEFINE_integer(
'eval_timeout', None,
'Maximum seconds between checkpoints before evaluation terminates.')
FLAGS = flags.FLAGS
def main(argv):
del argv # Unused.
if FLAGS.use_tpu:
tpu_cluster_resolver = contrib_cluster_resolver.TPUClusterResolver(
FLAGS.tpu, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
tpu_grpc_url = tpu_cluster_resolver.get_master()
tf.Session.reset(tpu_grpc_url)
if FLAGS.mode in ('train',
'train_and_eval') and FLAGS.training_file_pattern is None:
raise RuntimeError('You must specify --training_file_pattern for training.')
if FLAGS.mode in ('eval', 'train_and_eval'):
if FLAGS.validation_file_pattern is None:
raise RuntimeError('You must specify'
'--validation_file_pattern for evaluation.')
# Parse hparams
hparams = retinanet_segmentation_model.default_hparams()
hparams.parse(FLAGS.hparams)
params = dict(
hparams.values(),
num_shards=FLAGS.num_shards,
num_examples_per_epoch=FLAGS.num_examples_per_epoch,
use_tpu=FLAGS.use_tpu,
resnet_checkpoint=FLAGS.resnet_checkpoint,
mode=FLAGS.mode,
)
run_config = contrib_tpu.RunConfig(
cluster=tpu_cluster_resolver,
evaluation_master='',
model_dir=FLAGS.model_dir,
keep_checkpoint_max=3,
log_step_count_steps=FLAGS.iterations_per_loop,
session_config=tf.ConfigProto(
allow_soft_placement=True, log_device_placement=False),
tpu_config=contrib_tpu.TPUConfig(
FLAGS.iterations_per_loop,
FLAGS.num_shards,
per_host_input_for_training=(
contrib_tpu.InputPipelineConfig.PER_HOST_V2)))
model_fn = retinanet_segmentation_model.segmentation_model_fn
# TPU Estimator
eval_params = dict(
params,
use_tpu=FLAGS.use_tpu,
input_rand_hflip=False,
resnet_checkpoint=None,
is_training_bn=False,
)
if FLAGS.mode == 'train':
train_estimator = contrib_tpu.TPUEstimator(
model_fn=model_fn,
use_tpu=FLAGS.use_tpu,
train_batch_size=FLAGS.train_batch_size,
config=run_config,
params=params)
train_estimator.train(
input_fn=dataloader.SegmentationInputReader(
FLAGS.training_file_pattern, is_training=True),
max_steps=int((FLAGS.num_epochs * FLAGS.num_examples_per_epoch) /
FLAGS.train_batch_size),
)
if FLAGS.eval_after_training:
# Run evaluation on CPU after training finishes.
eval_estimator = contrib_tpu.TPUEstimator(
model_fn=retinanet_segmentation_model.segmentation_model_fn,
use_tpu=FLAGS.use_tpu,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size,
config=run_config,
params=eval_params)
eval_results = eval_estimator.evaluate(
input_fn=dataloader.SegmentationInputReader(
FLAGS.validation_file_pattern, is_training=False),
steps=FLAGS.eval_samples//FLAGS.eval_batch_size)
tf.logging.info('Eval results: %s' % eval_results)
elif FLAGS.mode == 'eval':
eval_estimator = contrib_tpu.TPUEstimator(
model_fn=retinanet_segmentation_model.segmentation_model_fn,
use_tpu=FLAGS.use_tpu,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size,
config=run_config,
params=eval_params)
def terminate_eval():
tf.logging.info('Terminating eval after %d seconds of no checkpoints' %
FLAGS.eval_timeout)
return True
# Run evaluation when there's a new checkpoint
for ckpt in contrib_training.checkpoints_iterator(
FLAGS.model_dir,
min_interval_secs=FLAGS.min_eval_interval,
timeout=FLAGS.eval_timeout,
timeout_fn=terminate_eval):
tf.logging.info('Starting to evaluate.')
try:
# Note that if the eval_samples size is not fully divided by the
# eval_batch_size. The remainder will be dropped and result in
# differet evaluation performance than validating on the full set.
eval_results = eval_estimator.evaluate(
input_fn=dataloader.SegmentationInputReader(
FLAGS.validation_file_pattern, is_training=False),
steps=FLAGS.eval_samples//FLAGS.eval_batch_size)
tf.logging.info('Eval results: %s' % eval_results)
# Terminate eval job when final checkpoint is reached
current_step = int(os.path.basename(ckpt).split('-')[1])
total_step = int((FLAGS.num_epochs * FLAGS.num_examples_per_epoch) /
FLAGS.train_batch_size)
if current_step >= total_step:
tf.logging.info('Evaluation finished after training step %d' %
current_step)
break
except tf.errors.NotFoundError:
# Since the coordinator is on a different job than the TPU worker,
# sometimes the TPU worker does not finish initializing until long after
# the CPU job tells it to start evaluating. In this case, the checkpoint
# file could have been deleted already.
tf.logging.info('Checkpoint %s no longer exists, skipping checkpoint' %
ckpt)
elif FLAGS.mode == 'train_and_eval':
train_estimator = contrib_tpu.TPUEstimator(
model_fn=retinanet_segmentation_model.segmentation_model_fn,
use_tpu=FLAGS.use_tpu,
train_batch_size=FLAGS.train_batch_size,
config=run_config,
params=params)
eval_estimator = contrib_tpu.TPUEstimator(
model_fn=retinanet_segmentation_model.segmentation_model_fn,
use_tpu=FLAGS.use_tpu,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size,
config=run_config,
params=eval_params)
for cycle in range(0, FLAGS.num_epochs):
tf.logging.info('Starting training cycle, epoch: %d.' % cycle)
train_estimator.train(
input_fn=dataloader.SegmentationInputReader(
FLAGS.training_file_pattern, is_training=True),
steps=int(FLAGS.num_examples_per_epoch / FLAGS.train_batch_size))
tf.logging.info('Starting evaluation cycle, epoch: {:d}.'.format(
cycle + 1))
# Run evaluation after training finishes.
eval_results = eval_estimator.evaluate(
input_fn=dataloader.SegmentationInputReader(
FLAGS.validation_file_pattern, is_training=False),
steps=FLAGS.eval_samples//FLAGS.eval_batch_size)
tf.logging.info('Evaluation results: %s' % eval_results)
else:
tf.logging.info('Mode not found.')
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
app.run(main)
|
3b892fdaafdeece489ad6707a913f2669a1e83d3
|
c85ec637dd7202eccbab3623f0e12608f2c58c73
|
/redditdownloader/interfaces/__init__.py
|
1b2ae9b1eb2650a9f0de91c13e8984707aef8291
|
[] |
no_license
|
shadowmoose/RedditDownloader
|
2d7b8d68d3be7cd63614c5019e2935e25d8548f8
|
ebcb791f78e5d761efcca28b5ebd5b7e1b61df85
|
refs/heads/master
| 2023-07-13T09:55:21.700858
| 2023-07-02T05:02:18
| 2023-07-02T05:02:18
| 93,103,288
| 1,134
| 120
| null | 2023-05-04T05:57:26
| 2017-06-01T22:03:41
|
Python
|
UTF-8
|
Python
| false
| false
| 390
|
py
|
__init__.py
|
from abc import ABC, abstractmethod
class UserInterface(ABC):
def __init__(self, ui_id):
self.ui_id = ui_id
@abstractmethod
def display(self):
"""
The primary method to implement, this is called when this UI should take over rendering.
Calling this does not *require* that RMD starts downloading at once,
however, this UI should facilitate that functionaliy.
"""
pass
|
81460aabebe103bb8c0481a7431fc41c55859381
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/RecoJets/JetPlusTracks/python/JetPlusTrackCorrections_cff.py
|
5223888f23e3edf4e0e3703551f3c98ad49c384d
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 2,081
|
py
|
JetPlusTrackCorrections_cff.py
|
import FWCore.ParameterSet.Config as cms
# ---------- Add assigned jet-track association
from RecoJets.JetAssociationProducers.ak4JTA_cff import *
ak4JetTracksAssociatorAtVertexJPT = ak4JetTracksAssociatorAtVertex.clone(
useAssigned = True,
pvSrc = "offlinePrimaryVertices"
)
# ---------- Tight Electron ID
from RecoEgamma.ElectronIdentification.electronIdSequence_cff import eidTight
JPTeidTight = eidTight.clone()
# ---------- Seeds from TrackJets
from RecoJets.JetPlusTracks.jetPlusTrackAddonSeedProducer_cfi import *
JetPlusTrackAddonSeedReco = jetPlusTrackAddonSeedProducer.clone()
# ---------- Module definition
from RecoJets.JetPlusTracks.JetPlusTrackCorrections_cfi import *
JetPlusTrackZSPCorJetAntiKt4 = cms.EDProducer(
"JetPlusTrackProducer",
cms.PSet(JPTZSPCorrectorAntiKt4),
src = cms.InputTag("ak4CaloJets"),
srcTrackJets = cms.InputTag("ak4TrackJets"),
srcAddCaloJets = cms.InputTag('JetPlusTrackAddonSeedReco'),
extrapolations = cms.InputTag("trackExtrapolator"),
tagName = cms.vstring('ZSP_CMSSW390_Akt_05_PU0'),
tagNameOffset = cms.vstring(),
PU = cms.int32(-1),
FixedPU = cms.int32(0),
UseZSP = cms.bool(False),
srcPVs = cms.InputTag('offlinePrimaryVertices'),
alias = cms.untracked.string('JetPlusTrackZSPCorJetAntiKt4'),
ptCUT = cms.double(15.),
dRcone = cms.double(0.4)
)
JetPlusTrackZSPCorJetAntiKt4.JetTracksAssociationAtVertex = "ak4JetTracksAssociatorAtVertexJPT"
JetPlusTrackZSPCorJetAntiKt4.JetTracksAssociationAtCaloFace = "ak4JetTracksAssociatorAtCaloFace"
JetPlusTrackZSPCorJetAntiKt4.JetSplitMerge = 2
### ---------- Sequences
# Anti-Kt
JetPlusTrackCorrectionsAntiKt4Task = cms.Task(
JPTeidTight,
JetPlusTrackAddonSeedReco,
ak4JetTracksAssociatorAtVertexJPT,
ak4JetTracksAssociatorAtCaloFace,
JetPlusTrackZSPCorJetAntiKt4
)
JetPlusTrackCorrectionsAntiKt4 = cms.Sequence(JetPlusTrackCorrectionsAntiKt4Task)
# For backward-compatiblity (but to be deprecated!)
JetPlusTrackCorrections = cms.Sequence(JetPlusTrackCorrectionsAntiKt4)
|
19e643350d6ebd61d9275f90442ea33f564ef9f3
|
f1c2e4b3147af77e23306f841610aafd6db1c6b0
|
/dev-support/examples/mnist-tensorflow/MultiWorkerMirroredStrategy/mnist_keras_distributed.py
|
791bffc8e116d015688865d8de3317c80d079753
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"BSD-2-Clause",
"GPL-2.0-only",
"LicenseRef-scancode-public-domain",
"MIT",
"CDDL-1.1",
"Classpath-exception-2.0"
] |
permissive
|
apache/submarine
|
a2927f5f4f7f5faff4701139f2f0f88a98195e7f
|
0c10613f39b707d5e446c515c12fa28295c8052e
|
refs/heads/master
| 2023-08-30T14:35:43.145942
| 2023-08-20T00:19:54
| 2023-08-24T23:50:49
| 209,459,144
| 663
| 269
|
Apache-2.0
| 2023-09-03T09:05:06
| 2019-09-19T04:00:17
|
Java
|
UTF-8
|
Python
| false
| false
| 3,647
|
py
|
mnist_keras_distributed.py
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
# References
# https://www.tensorflow.org/tutorials/distribute/multi_worker_with_keras
# https://reurl.cc/no9Zk8
import json
import os
import tensorflow as tf
import tensorflow_datasets as tfds
import submarine
BUFFER_SIZE = 10000
BATCH_SIZE = 32
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()
def make_datasets_unbatched():
# Scaling MNIST data from (0, 255] to (0., 1.]
def scale(image, label):
image = tf.cast(image, tf.float32)
image /= 255
return image, label
datasets, info = tfds.load(name="mnist", with_info=True, as_supervised=True)
return (
datasets["train"]
.map(scale, num_parallel_calls=tf.data.experimental.AUTOTUNE)
.cache()
.shuffle(BUFFER_SIZE)
)
def build_and_compile_cnn_model():
model = tf.keras.Sequential(
[
tf.keras.layers.Conv2D(32, 3, activation="relu", input_shape=(28, 28, 1)),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(64, activation="relu"),
tf.keras.layers.Dense(10),
]
)
model.compile(
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer=tf.keras.optimizers.SGD(learning_rate=0.001),
metrics=["accuracy"],
)
return model
tf_config = json.loads(os.environ["TF_CONFIG"])
NUM_WORKERS = len(tf_config["cluster"]["worker"])
# Here the batch size scales up by number of workers since
# `tf.data.Dataset.batch` expects the global batch size. Previously we used 64,
# and now this becomes 128.
GLOBAL_BATCH_SIZE = 64 * NUM_WORKERS
# Creation of dataset needs to be after MultiWorkerMirroredStrategy object
# is instantiated.
train_datasets = make_datasets_unbatched().batch(GLOBAL_BATCH_SIZE)
# next three line is the key point to fix this problem
options = tf.data.Options()
options.experimental_distribute.auto_shard_policy = (
tf.data.experimental.AutoShardPolicy.DATA
) # AutoShardPolicy.OFF can work too.
train_datasets_no_auto_shard = train_datasets.with_options(options)
with strategy.scope():
# Model building/compiling need to be within `strategy.scope()`.
multi_worker_model = build_and_compile_cnn_model()
# Keras' `model.fit()` trains the model with specified number of epochs and
# number of steps per epoch. Note that the numbers here are for demonstration
# purposes only and may not sufficiently produce a model with good quality.
# attention: x=train_datasets_no_auto_shard , not x = train_datasets
if __name__ == "__main__":
EPOCHS = 5
hist = multi_worker_model.fit(x=train_datasets_no_auto_shard, epochs=EPOCHS, steps_per_epoch=5)
for i in range(EPOCHS):
submarine.log_metric("val_loss", hist.history["loss"][i], i)
submarine.log_metric("Val_accuracy", hist.history["accuracy"][i], i)
|
e0deb5ad8dd59f1f057d4d5a84496f91f8efa6e5
|
ac35c35de85d9b8763d51a0e8e91274f20064cb5
|
/kb_python/count.py
|
f15bfff161c2158c07649027aace79681d00ee29
|
[
"BSD-2-Clause"
] |
permissive
|
pachterlab/kb_python
|
b1b184ce01cca50b9986a3bf9be8c42faddafd55
|
14a18ef36a943160378ccbdf12a0fd6b6b84e449
|
refs/heads/master
| 2023-09-04T12:35:24.023580
| 2022-09-19T16:11:40
| 2022-09-19T16:11:40
| 215,165,758
| 122
| 24
|
BSD-2-Clause
| 2023-07-25T22:42:54
| 2019-10-14T23:50:12
|
Python
|
UTF-8
|
Python
| false
| false
| 75,350
|
py
|
count.py
|
import os
import re
from typing import Dict, List, Optional, Union
from urllib.parse import urlparse
import scipy.io
from typing_extensions import Literal
from .config import get_bustools_binary_path, get_kallisto_binary_path
from .constants import (
ABUNDANCE_FILENAME,
ABUNDANCE_GENE_FILENAME,
ABUNDANCE_GENE_TPM_FILENAME,
ABUNDANCE_TPM_FILENAME,
ADATA_PREFIX,
BUS_CDNA_PREFIX,
BUS_FILENAME,
BUS_INTRON_PREFIX,
CAPTURE_FILENAME,
CELLRANGER_BARCODES,
CELLRANGER_DIR,
CELLRANGER_GENES,
CELLRANGER_MATRIX,
CORRECT_CODE,
COUNTS_PREFIX,
ECMAP_FILENAME,
FEATURE_NAME,
FEATURE_PREFIX,
FILTER_WHITELIST_FILENAME,
FILTERED_CODE,
FILTERED_COUNTS_DIR,
FLD_FILENAME,
FLENS_FILENAME,
GENE_NAME,
GENES_FILENAME,
INSPECT_FILENAME,
INSPECT_INTERNAL_FILENAME,
INSPECT_UMI_FILENAME,
INTERNAL_SUFFIX,
KALLISTO_INFO_FILENAME,
KB_INFO_FILENAME,
PROJECT_CODE,
REPORT_HTML_FILENAME,
REPORT_NOTEBOOK_FILENAME,
SAVED_INDEX_FILENAME,
SORT_CODE,
TCC_PREFIX,
TRANSCRIPT_NAME,
TXNAMES_FILENAME,
UMI_SUFFIX,
UNFILTERED_CODE,
UNFILTERED_COUNTS_DIR,
UNFILTERED_QUANT_DIR,
WHITELIST_FILENAME,
)
from .dry import dryable
from .dry import count as dry_count
from .logging import logger
from .report import render_report
from .utils import (
copy_whitelist,
create_10x_feature_barcode_map,
get_temporary_filename,
import_matrix_as_anndata,
import_tcc_matrix_as_anndata,
make_directory,
open_as_text,
overlay_anndatas,
read_t2g,
remove_directory,
run_executable,
stream_file,
sum_anndatas,
update_filename,
whitelist_provided,
)
from .stats import STATS
from .validate import validate_files
INSPECT_PARSER = re.compile(r'^.*?(?P<count>[0-9]+)')
@validate_files()
def kallisto_bus(
fastqs: Union[List[str], str],
index_path: str,
technology: str,
out_dir: str,
threads: int = 8,
n: bool = False,
k: bool = False,
paired: bool = False,
strand: Optional[Literal['unstranded', 'forward', 'reverse']] = None,
) -> Dict[str, str]:
"""Runs `kallisto bus`.
Args:
fastqs: List of FASTQ file paths, or a single path to a batch file
index_path: Path to kallisto index
technology: Single-cell technology used
out_dir: Path to output directory
threads: Number of threads to use, defaults to `8`
n: Include number of read in flag column (used when splitting indices),
defaults to `False`
k: Alignment is done per k-mer (used when splitting indices),
defaults to `False`
paired: Whether or not to supply the `--paired` flag, only used for
bulk and smartseq2 samples, defaults to `False`
strand: Strandedness, defaults to `None`
Returns:
Dictionary containing paths to generated files
"""
logger.info(
f'Using index {index_path} to generate BUS file to {out_dir} from'
)
results = {
'bus': os.path.join(out_dir, BUS_FILENAME),
'ecmap': os.path.join(out_dir, ECMAP_FILENAME),
'txnames': os.path.join(out_dir, TXNAMES_FILENAME),
'info': os.path.join(out_dir, KALLISTO_INFO_FILENAME)
}
is_batch = isinstance(fastqs, str)
for fastq in [fastqs] if is_batch else fastqs:
logger.info((' ' * 8) + fastq)
command = [get_kallisto_binary_path(), 'bus']
command += ['-i', index_path]
command += ['-o', out_dir]
if not is_batch:
command += ['-x', technology]
command += ['-t', threads]
if n:
command += ['--num']
if k:
command += ['--kmer']
if paired:
command += ['--paired']
results['flens'] = os.path.join(out_dir, FLENS_FILENAME)
if strand == 'unstranded':
command += ['--unstranded']
elif strand == 'forward':
command += ['--fr-stranded']
elif strand == 'reverse':
command += ['--rf-stranded']
if is_batch:
command += ['--batch', fastqs]
else:
command += fastqs
run_executable(command)
if technology.upper() in ('BULK', 'SMARTSEQ3'):
results['saved_index'] = os.path.join(out_dir, SAVED_INDEX_FILENAME)
return results
@validate_files(pre=False)
def kallisto_quant_tcc(
mtx_path: str,
saved_index_path: str,
ecmap_path: str,
t2g_path: str,
out_dir: str,
flens_path: Optional[str] = None,
l: Optional[int] = None,
s: Optional[int] = None,
threads: int = 8,
) -> Dict[str, str]:
"""Runs `kallisto quant-tcc`.
Args:
mtx_path: Path to counts matrix
saved_index_path: Path to index.saved
ecmap_path: Path to ecmap
t2g_path: Path to T2G
out_dir: Output directory path
flens_path: Path to flens.txt, defaults to `None`
l: Mean fragment length, defaults to `None`
s: Standard deviation of fragment length, defaults to `None`
threads: Number of threads to use, defaults to `8`
Returns:
Dictionary containing path to output files
"""
logger.info(
f'Quantifying transcript abundances to {out_dir} from mtx file {mtx_path}'
)
command = [get_kallisto_binary_path(), 'quant-tcc']
command += ['-o', out_dir]
command += ['-i', saved_index_path]
command += ['-e', ecmap_path]
command += ['-g', t2g_path]
command += ['-t', threads]
if flens_path:
command += ['-f', flens_path]
if l:
command += ['-l', l]
if s:
command += ['-s', s]
command += [mtx_path]
run_executable(command)
return {
'genes': os.path.join(out_dir, GENES_FILENAME),
'gene_mtx': os.path.join(out_dir, ABUNDANCE_GENE_FILENAME),
'gene_tpm_mtx': os.path.join(out_dir, ABUNDANCE_GENE_TPM_FILENAME),
'mtx': os.path.join(out_dir, ABUNDANCE_FILENAME),
'tpm_mtx': os.path.join(out_dir, ABUNDANCE_TPM_FILENAME),
'fld': os.path.join(out_dir, FLD_FILENAME),
'txnames': os.path.join(out_dir, TXNAMES_FILENAME),
}
@validate_files(pre=False)
def bustools_project(
bus_path: str, out_path: str, map_path: str, ecmap_path: str,
txnames_path: str
) -> Dict[str, str]:
"""Runs `bustools project`.
bus_path: Path to BUS file to sort
out_dir: Path to output directory
map_path: Path to file containing source-to-destination mapping
ecmap_path: Path to ecmap file, as generated by `kallisto bus`
txnames_path: Path to transcript names file, as generated by `kallisto bus`
Returns:
Dictionary containing path to generated BUS file
"""
logger.info('Projecting BUS file {} with map {}'.format(bus_path, map_path))
command = [get_bustools_binary_path(), 'project']
command += ['-o', out_path]
command += ['-m', map_path]
command += ['-e', ecmap_path]
command += ['-t', txnames_path]
command += ['--barcode']
command += [bus_path]
run_executable(command)
return {'bus': out_path}
@validate_files(pre=False)
def bustools_sort(
bus_path: str,
out_path: str,
temp_dir: str = 'tmp',
threads: int = 8,
memory: str = '4G',
flags: bool = False,
) -> Dict[str, str]:
"""Runs `bustools sort`.
Args:
bus_path: Path to BUS file to sort
out_dir: Path to output BUS path
temp_dir: Path to temporary directory, defaults to `tmp`
threads: Number of threads to use, defaults to `8`
memory: Amount of memory to use, defaults to `4G`
flags: Whether to supply the `--flags` argument to sort, defaults to
`False`
Returns:
Dictionary containing path to generated index
"""
logger.info('Sorting BUS file {} to {}'.format(bus_path, out_path))
command = [get_bustools_binary_path(), 'sort']
command += ['-o', out_path]
command += ['-T', temp_dir]
command += ['-t', threads]
command += ['-m', memory]
if flags:
command += ['--flags']
command += [bus_path]
run_executable(command)
return {'bus': out_path}
@validate_files(pre=False)
def bustools_inspect(
bus_path: str,
out_path: str,
whitelist_path: Optional[str] = None,
ecmap_path: Optional[str] = None,
) -> Dict[str, str]:
"""Runs `bustools inspect`.
Args:
bus_path: Path to BUS file to sort
out_path: Path to output inspect JSON file
whitelist_path: Path to whitelist
ecmap_path: Path to ecmap file, as generated by `kallisto bus`
Returns:
Dictionary containing path to generated index
"""
logger.info('Inspecting BUS file {}'.format(bus_path))
command = [get_bustools_binary_path(), 'inspect']
command += ['-o', out_path]
if whitelist_path:
command += ['-w', whitelist_path]
if ecmap_path:
command += ['-e', ecmap_path]
command += [bus_path]
run_executable(command)
return {'inspect': out_path}
@validate_files(pre=False)
def bustools_correct(bus_path: str, out_path: str,
whitelist_path: str) -> Dict[str, str]:
"""Runs `bustools correct`.
Args:
bus_path: Path to BUS file to correct
out_path: Path to output corrected BUS file
whitelist_path: Path to whitelist
Returns:
Dictionary containing path to generated index
"""
logger.info(
'Correcting BUS records in {} to {} with whitelist {}'.format(
bus_path, out_path, whitelist_path
)
)
command = [get_bustools_binary_path(), 'correct']
command += ['-o', out_path]
command += ['-w', whitelist_path]
command += [bus_path]
run_executable(command)
return {'bus': out_path}
@validate_files(pre=False)
def bustools_count(
bus_path: str,
out_prefix: str,
t2g_path: str,
ecmap_path: str,
txnames_path: str,
tcc: bool = False,
mm: bool = False,
cm: bool = False,
umi_gene: bool = False,
em: bool = False,
) -> Dict[str, str]:
"""Runs `bustools count`.
Args:
bus_path: Path to BUS file to correct
out_prefix: Prefix of the output files to generate
t2g_path: Path to output transcript-to-gene mapping
ecmap_path: Path to ecmap file, as generated by `kallisto bus`
txnames_path: Path to transcript names file, as generated by `kallisto bus`
tcc: Whether to generate a TCC matrix instead of a gene count matrix,
defaults to `False`
mm: Whether to include BUS records that pseudoalign to multiple genes,
defaults to `False`
cm: Count multiplicities instead of UMIs. Used for chemitries
without UMIs, such as bulk and Smartseq2, defaults to `False`
umi_gene: Whether to use genes to deduplicate umis, defaults to `False`
em: Whether to estimate gene abundances using EM algorithm, defaults
to `False`
Returns:
Dictionary containing path to generated index
"""
logger.info(
f'Generating count matrix {out_prefix} from BUS file {bus_path}'
)
command = [get_bustools_binary_path(), 'count']
command += ['-o', out_prefix]
command += ['-g', t2g_path]
command += ['-e', ecmap_path]
command += ['-t', txnames_path]
if not tcc:
command += ['--genecounts']
if mm:
command += ['--multimapping']
if cm:
command += ['--cm']
if umi_gene:
command += ['--umi-gene']
if em:
command += ['--em']
command += [bus_path]
# There is currently a bug when a directory with the same path as `out_prefix`
# exists, the matrix is named incorrectly. So, to get around this, manually
# detect and remove such a directory should it exist.
if os.path.isdir(out_prefix):
remove_directory(out_prefix)
run_executable(command)
return {
'mtx':
f'{out_prefix}.mtx',
'ec' if tcc else 'genes':
f'{out_prefix}.ec.txt' if tcc else f'{out_prefix}.genes.txt',
'barcodes':
f'{out_prefix}.barcodes.txt',
}
@validate_files(pre=False)
def bustools_capture(
bus_path: str,
out_path: str,
capture_path: str,
ecmap_path: Optional[str] = None,
txnames_path: Optional[str] = None,
capture_type: Literal['transcripts', 'umis', 'barcode'] = 'transcripts',
complement: bool = True,
) -> Dict[str, str]:
"""Runs `bustools capture`.
Args:
bus_path: Path to BUS file to capture
out_path: Path to BUS file to generate
capture_path: Path transcripts-to-capture list
ecmap_path: Path to ecmap file, as generated by `kallisto bus`
txnames_path: Path to transcript names file, as generated by `kallisto bus`
capture_type: The type of information in the capture list. Can be one of
`transcripts`, `umis`, `barcode`.
complement: Whether or not to complement, defaults to `True`
Returns:
Dictionary containing path to generated index
"""
logger.info(
f'Capturing records from BUS file {bus_path} to {out_path} with capture list {capture_path}'
)
command = [get_bustools_binary_path(), 'capture']
command += ['-o', out_path]
command += ['-c', capture_path]
if ecmap_path:
command += ['-e', ecmap_path]
if txnames_path:
command += ['-t', txnames_path]
if complement:
command += ['--complement']
command += ['--{}'.format(capture_type)]
command += [bus_path]
run_executable(command)
return {'bus': out_path}
@validate_files(pre=False)
def bustools_whitelist(
bus_path: str,
out_path: str,
threshold: Optional[int] = None
) -> Dict[str, str]:
"""Runs `bustools whitelist`.
Args:
bus_path: Path to BUS file generate the whitelist from
out_path: Path to output whitelist
threshold: Barcode threshold to be included in whitelist
Returns:
Dictionary containing path to generated index
"""
logger.info(
'Generating whitelist {} from BUS file {}'.format(out_path, bus_path)
)
command = [get_bustools_binary_path(), 'whitelist']
command += ['-o', out_path]
if threshold:
command += ['--threshold', threshold]
command += [bus_path]
run_executable(command)
return {'whitelist': out_path}
def matrix_to_cellranger(
matrix_path: str, barcodes_path: str, genes_path: str, t2g_path: str,
out_dir: str
) -> Dict[str, str]:
"""Convert bustools count matrix to cellranger-format matrix.
Args:
matrix_path: Path to matrix
barcodes_path: List of paths to barcodes.txt
genes_path: Path to genes.txt
t2g_path: Path to transcript-to-gene mapping
out_dir: Path to output matrix
Returns:
Dictionary of matrix files
"""
make_directory(out_dir)
logger.info(f'Writing matrix in cellranger format to {out_dir}')
cr_matrix_path = os.path.join(out_dir, CELLRANGER_MATRIX)
cr_barcodes_path = os.path.join(out_dir, CELLRANGER_BARCODES)
cr_genes_path = os.path.join(out_dir, CELLRANGER_GENES)
# Cellranger outputs genes x cells matrix
mtx = scipy.io.mmread(matrix_path)
scipy.io.mmwrite(cr_matrix_path, mtx.T, field='integer')
with open(barcodes_path, 'r') as f, open(cr_barcodes_path, 'w') as out:
for line in f:
if line.isspace():
continue
out.write(f'{line.strip()}-1\n')
# Get all (available) gene names
gene_to_name = {}
with open(t2g_path, 'r') as f:
for line in f:
if line.isspace():
continue
split = line.strip().split('\t')
if len(split) > 2:
gene_to_name[split[1]] = split[2]
with open(genes_path, 'r') as f, open(cr_genes_path, 'w') as out:
for line in f:
if line.isspace():
continue
gene = line.strip()
gene_name = gene_to_name.get(gene, gene)
out.write(f'{gene}\t{gene_name}\n')
return {
'mtx': cr_matrix_path,
'barcodes': cr_barcodes_path,
'genes': cr_genes_path
}
def convert_matrix(
counts_dir: str,
matrix_path: str,
barcodes_path: str,
genes_path: Optional[str] = None,
ec_path: Optional[str] = None,
t2g_path: Optional[str] = None,
txnames_path: Optional[str] = None,
name: str = 'gene',
loom: bool = False,
h5ad: bool = False,
by_name: bool = False,
tcc: bool = False,
threads: int = 8,
) -> Dict[str, str]:
"""Convert a gene count or TCC matrix to loom or h5ad.
Args:
counts_dir: Path to counts directory
matrix_path: Path to matrix
barcodes_path: List of paths to barcodes.txt
genes_path: Path to genes.txt, defaults to `None`
ec_path: Path to ec.txt, defaults to `None`
t2g_path: Path to transcript-to-gene mapping. If this is provided,
the third column of the mapping is appended to the anndata var,
defaults to `None`
txnames_path: Path to transcripts.txt, defaults to `None`
name: Name of the columns, defaults to "gene"
loom: Whether to generate loom file, defaults to `False`
h5ad: Whether to generate h5ad file, defaults to `False`
by_name: Aggregate counts by name instead of ID. Only affects when
`tcc=False`.
tcc: Whether the matrix is a TCC matrix, defaults to `False`
threads: Number of threads to use, defaults to `8`
Returns:
Dictionary of generated files
"""
results = {}
logger.info(f'Reading matrix {matrix_path}')
adata = import_tcc_matrix_as_anndata(
matrix_path, barcodes_path, ec_path, txnames_path, threads=threads
) if tcc else import_matrix_as_anndata(
matrix_path,
barcodes_path,
genes_path,
t2g_path=t2g_path,
name=name,
by_name=by_name
)
if loom:
loom_path = os.path.join(counts_dir, f'{ADATA_PREFIX}.loom')
logger.info(f'Writing matrix to loom {loom_path}')
adata.write_loom(loom_path)
results.update({'loom': loom_path})
if h5ad:
h5ad_path = os.path.join(counts_dir, f'{ADATA_PREFIX}.h5ad')
logger.info(f'Writing matrix to h5ad {h5ad_path}')
adata.write(h5ad_path)
results.update({'h5ad': h5ad_path})
return results
def convert_matrices(
counts_dir: str,
matrix_paths: List[str],
barcodes_paths: List[str],
genes_paths: Optional[List[str]] = None,
ec_paths: Optional[List[str]] = None,
t2g_path: Optional[str] = None,
txnames_path: Optional[str] = None,
name: str = 'gene',
loom: bool = False,
h5ad: bool = False,
by_name: bool = False,
nucleus: bool = False,
tcc: bool = False,
threads: int = 8,
) -> Dict[str, str]:
"""Convert a gene count or TCC matrix to loom or h5ad.
Args:
counts_dir: Path to counts directory
matrix_paths: List of paths to matrices
barcodes_paths: List of paths to barcodes.txt
genes_paths: List of paths to genes.txt, defaults to `None`
ec_paths: List of path to ec.txt, defaults to `None`
t2g_path: Path to transcript-to-gene mapping. If this is provided,
the third column of the mapping is appended to the anndata var,
defaults to `None`
txnames_path: List of paths to transcripts.txt, defaults to `None`
name: Name of the columns, defaults to "gene"
loom: Whether to generate loom file, defaults to `False`
h5ad: Whether to generate h5ad file, defaults to `False`
by_name: Aggregate counts by name instead of ID. Only affects when
`tcc=False`.
nucleus: Whether the matrices contain single nucleus counts, defaults to `False`
tcc: Whether the matrix is a TCC matrix, defaults to `False`
threads: Number of threads to use, defaults to `8`
Returns:
Dictionary of generated files
"""
results = {}
adatas = []
matrix_paths = matrix_paths or []
barcodes_paths = barcodes_paths or []
genes_paths = genes_paths or []
ec_paths = ec_paths or []
for matrix_path, barcodes_path, genes_ec_path in zip(
matrix_paths, barcodes_paths, ec_paths
if not genes_paths or None in genes_paths else genes_paths):
logger.info(f'Reading matrix {matrix_path}')
adatas.append(
import_tcc_matrix_as_anndata(
matrix_path,
barcodes_path,
genes_ec_path,
txnames_path,
threads=threads
) if tcc else import_matrix_as_anndata(
matrix_path,
barcodes_path,
genes_ec_path,
t2g_path=t2g_path,
name=name,
by_name=by_name
)
)
logger.info('Combining matrices')
adata = sum_anndatas(*adatas) if nucleus else overlay_anndatas(*adatas)
if loom:
loom_path = os.path.join(counts_dir, f'{ADATA_PREFIX}.loom')
logger.info(f'Writing matrices to loom {loom_path}')
adata.write_loom(loom_path)
results.update({'loom': loom_path})
if h5ad:
h5ad_path = os.path.join(counts_dir, f'{ADATA_PREFIX}.h5ad')
logger.info(f'Writing matrices to h5ad {h5ad_path}')
adata.write(h5ad_path)
results.update({'h5ad': h5ad_path})
return results
def filter_with_bustools(
bus_path: str,
ecmap_path: str,
txnames_path: str,
t2g_path: str,
whitelist_path: str,
filtered_bus_path: str,
filter_threshold: Optional[int] = None,
counts_prefix: Optional[str] = None,
tcc: bool = False,
mm: bool = False,
kite: bool = False,
temp_dir: str = 'tmp',
threads: int = 8,
memory: str = '4G',
count: bool = True,
loom: bool = False,
h5ad: bool = False,
by_name: bool = False,
cellranger: bool = False,
umi_gene: bool = False,
em: bool = False,
) -> Dict[str, str]:
"""Generate filtered count matrices with bustools.
Args:
bus_path: Path to sorted, corrected, sorted BUS file
ecmap_path: Path to matrix ec file
txnames_path: Path to list of transcripts
t2g_path: Path to transcript-to-gene mapping
whitelist_path: Path to filter whitelist to generate
filtered_bus_path: Path to filtered BUS file to generate
filter_threshold: Barcode filter threshold for bustools, defaults
to `None`
counts_prefix: Prefix of count matrix, defaults to `None`
tcc: Whether to generate a TCC matrix instead of a gene count matrix,
defaults to `False`
mm: Whether to include BUS records that pseudoalign to multiple genes,
defaults to `False`
kite: Whether this is a KITE workflow
temp_dir: Path to temporary directory, defaults to `tmp`
threads: Number of threads to use, defaults to `8`
memory: Amount of memory to use, defaults to `4G`
count: Whether to run `bustools count`, defaults to `True`
loom: Whether to convert the final count matrix into a loom file,
defaults to `False`
h5ad: Whether to convert the final count matrix into a h5ad file,
defaults to `False`
by_name: Aggregate counts by name instead of ID. Only affects when
`tcc=False`.
cellranger: Whether to convert the final count matrix into a
cellranger-compatible matrix, defaults to `False`
umi_gene: Whether to perform gene-level UMI collapsing, defaults to
`False`
em: Whether to estimate gene abundances using EM algorithm, defaults to
`False`
Returns:
Dictionary of generated files
"""
logger.info('Filtering with bustools')
results = {}
whitelist_result = bustools_whitelist(
bus_path, whitelist_path, threshold=filter_threshold
)
results.update(whitelist_result)
correct_result = bustools_correct(
bus_path,
os.path.join(
temp_dir, update_filename(os.path.basename(bus_path), CORRECT_CODE)
),
whitelist_result['whitelist'],
)
sort_result = bustools_sort(
correct_result['bus'],
filtered_bus_path,
temp_dir=temp_dir,
threads=threads,
memory=memory,
)
results.update({'bus_scs': sort_result['bus']})
if count:
counts_dir = os.path.dirname(counts_prefix)
make_directory(counts_dir)
count_result = bustools_count(
sort_result['bus'],
counts_prefix,
t2g_path,
ecmap_path,
txnames_path,
tcc=tcc,
mm=mm,
umi_gene=umi_gene,
em=em,
)
results.update(count_result)
if loom or h5ad:
results.update(
convert_matrix(
counts_dir,
count_result['mtx'],
count_result['barcodes'],
genes_path=count_result.get('genes'),
t2g_path=t2g_path,
ec_path=count_result.get('ec'),
txnames_path=txnames_path,
name=FEATURE_NAME if kite else GENE_NAME,
loom=loom,
h5ad=h5ad,
by_name=by_name,
tcc=tcc,
threads=threads
)
)
if cellranger:
if not tcc:
cr_result = matrix_to_cellranger(
count_result['mtx'], count_result['barcodes'],
count_result['genes'], t2g_path,
os.path.join(counts_dir, CELLRANGER_DIR)
)
results.update({'cellranger': cr_result})
else:
logger.warning(
'TCC matrices can not be converted to cellranger-compatible format.'
)
return results
def stream_fastqs(fastqs: List[str], temp_dir: str = 'tmp') -> List[str]:
"""Given a list of fastqs (that may be local or remote paths), stream any
remote files. Internally, calls utils.
Args:
fastqs: List of (remote or local) fastq paths
temp_dir: Temporary directory
Returns:
All remote paths substituted with a local path
"""
return [
stream_file(fastq, os.path.join(temp_dir, os.path.basename(fastq)))
if urlparse(fastq).scheme in ('http', 'https', 'ftp', 'ftps') else fastq
for fastq in fastqs
]
@dryable(dry_count.stream_batch)
def stream_batch(batch_path: str, temp_dir: str = 'tmp') -> str:
"""Given a path to a batch file, produce a new batch file where all the
remote FASTQs are being streamed.
Args:
fastqs: List of (remote or local) fastq paths
temp_dir: Temporary directory
Returns:
New batch file with all remote paths substituted with a local path
"""
new_batch_path = get_temporary_filename(temp_dir)
with open(batch_path, 'r') as f_in, open(new_batch_path, 'w') as f_out:
for line in f_in:
if line.isspace() or line.startswith('#'):
continue
sep = '\t' if '\t' in line else ' '
split = line.strip().split(sep)
name = split[0]
fastqs = stream_fastqs(split[1:])
f_out.write(f'{name}\t' + '\t'.join(fastqs) + '\n')
return new_batch_path
def copy_or_create_whitelist(
technology: str, bus_path: str, out_dir: str
) -> str:
"""Copies a pre-packaged whitelist if it is provided. Otherwise, runs
`bustools whitelist` to generate a whitelist.
Args:
technology: Single-cell technology used
bus_path: Path to BUS file generate the whitelist from
out_dir: Path to output directory
Returns:
Path to copied or generated whitelist
"""
if whitelist_provided(technology):
logger.info(
'Copying pre-packaged {} whitelist to {}'.format(
technology.upper(), out_dir
)
)
return copy_whitelist(technology, out_dir)
else:
return bustools_whitelist(
bus_path, os.path.join(out_dir, WHITELIST_FILENAME)
)['whitelist']
def convert_transcripts_to_genes(
txnames_path: str, t2g_path: str, genes_path: str
) -> str:
"""Convert a textfile containing transcript IDs to another textfile containing
gene IDs, given a transcript-to-gene mapping.
Args:
txnames_path: Path to transcripts.txt
t2g_path: Path to transcript-to-genes mapping
genes_path: Path to output genes.txt
Returns:
Path to written genes.txt
"""
t2g = read_t2g(t2g_path)
with open_as_text(txnames_path, 'r') as f, open_as_text(genes_path,
'w') as out:
for line in f:
if line.isspace():
continue
transcript = line.strip()
if transcript not in t2g:
logger.warning(
f'Transcript {transcript} was found in {txnames_path} but not in {t2g_path}. '
'This transcript will not be converted to a gene.'
)
attributes = t2g.get(transcript)
if attributes:
out.write(f'{attributes[0]}\n')
else:
out.write(f'{transcript}\n')
return genes_path
@dryable(dry_count.write_smartseq3_capture)
def write_smartseq3_capture(capture_path: str) -> str:
"""Write the capture sequence for smartseq3.
Args:
capture_path: Path to write the capture sequence
Returns:
Path to written file
"""
with open(capture_path, 'w') as f:
f.write(('T' * 32) + '\n')
return capture_path
@logger.namespaced('count')
def count(
index_path: str,
t2g_path: str,
technology: str,
out_dir: str,
fastqs: List[str],
whitelist_path: Optional[str] = None,
tcc: bool = False,
mm: bool = False,
filter: Optional[Literal['bustools']] = None,
filter_threshold: Optional[int] = None,
kite: bool = False,
FB: bool = False,
temp_dir: str = 'tmp',
threads: int = 8,
memory: str = '4G',
overwrite: bool = False,
loom: bool = False,
h5ad: bool = False,
by_name: bool = False,
cellranger: bool = False,
inspect: bool = True,
report: bool = False,
fragment_l: Optional[int] = None,
fragment_s: Optional[int] = None,
paired: bool = False,
strand: Optional[Literal['unstranded', 'forward', 'reverse']] = None,
umi_gene: bool = False,
em: bool = False,
) -> Dict[str, Union[str, Dict[str, str]]]:
"""Generates count matrices for single-cell RNA seq.
Args:
index_path: Path to kallisto index
t2g_path: Path to transcript-to-gene mapping
technology: Single-cell technology used
out_dir: Path to output directory
fastqs: List of FASTQ file paths or a single batch definition file
whitelist_path: Path to whitelist, defaults to `None`
tcc: Whether to generate a TCC matrix instead of a gene count matrix,
defaults to `False`
mm: Whether to include BUS records that pseudoalign to multiple genes,
defaults to `False`
filter: Filter to use to generate a filtered count matrix,
defaults to `None`
filter_threshold: Barcode filter threshold for bustools, defaults
to `None`
kite: Whether this is a KITE workflow
FB: Whether 10x Genomics Feature Barcoding technology was used,
defaults to `False`
temp_dir: Path to temporary directory, defaults to `tmp`
threads: Pumber of threads to use, defaults to `8`
memory: Amount of memory to use, defaults to `4G`
overwrite: Overwrite an existing index file, defaults to `False`
loom: Whether to convert the final count matrix into a loom file,
defaults to `False`
h5ad: Whether to convert the final count matrix into a h5ad file,
defaults to `False`
by_name: Aggregate counts by name instead of ID. Only affects when
`tcc=False`.
cellranger: Whether to convert the final count matrix into a
cellranger-compatible matrix, defaults to `False`
inspect: Whether or not to inspect the output BUS file and generate
the inspect.json
report: Generate an HTMl report, defaults to `False`
fragment_l: Mean length of fragments, defaults to `None`
fragment_s: Standard deviation of fragment lengths, defaults to `None`
paired: Whether the fastqs are paired. Has no effect when a single
batch file is provided. Defaults to `False`
strand: Strandedness, defaults to `None`
umi_gene: Whether to perform gene-level UMI collapsing, defaults to
`False`
em: Whether to estimate gene abundances using EM algorithm,
defaults to `False`
Returns:
Dictionary containing paths to generated files
"""
STATS.start()
is_batch = isinstance(fastqs, str)
results = {}
make_directory(out_dir)
unfiltered_results = results.setdefault('unfiltered', {})
bus_result = {
'bus': os.path.join(out_dir, BUS_FILENAME),
'ecmap': os.path.join(out_dir, ECMAP_FILENAME),
'txnames': os.path.join(out_dir, TXNAMES_FILENAME),
'info': os.path.join(out_dir, KALLISTO_INFO_FILENAME)
}
if paired:
bus_result['flens'] = os.path.join(out_dir, FLENS_FILENAME)
if technology.upper() in ('BULK', 'SMARTSEQ2'):
bus_result['saved_index'] = os.path.join(out_dir, SAVED_INDEX_FILENAME)
if any(not os.path.exists(path)
for name, path in bus_result.items()) or overwrite:
_technology = 'BULK' if technology.upper(
) == 'SMARTSEQ2' else technology
# Pipe any remote files.
fastqs = stream_batch(
fastqs, temp_dir=temp_dir
) if is_batch else stream_fastqs(
fastqs, temp_dir=temp_dir
)
bus_result = kallisto_bus(
fastqs,
index_path,
_technology,
out_dir,
threads=threads,
paired=paired,
strand=strand,
)
else:
logger.info(
'Skipping kallisto bus because output files already exist. Use the --overwrite flag to overwrite.'
)
unfiltered_results.update(bus_result)
sort_result = bustools_sort(
bus_result['bus'],
os.path.join(
temp_dir,
update_filename(os.path.basename(bus_result['bus']), SORT_CODE)
),
temp_dir=temp_dir,
threads=threads,
memory=memory
)
if not whitelist_path and not is_batch:
logger.info('Whitelist not provided')
whitelist_path = copy_or_create_whitelist(
technology if not FB else '10xFB', sort_result['bus'], out_dir
)
unfiltered_results.update({'whitelist': whitelist_path})
prev_result = sort_result
if inspect:
inspect_result = bustools_inspect(
prev_result['bus'],
os.path.join(out_dir, INSPECT_FILENAME),
whitelist_path=whitelist_path,
)
unfiltered_results.update(inspect_result)
if not is_batch:
prev_result = bustools_correct(
prev_result['bus'],
os.path.join(
temp_dir,
update_filename(
os.path.basename(prev_result['bus']), CORRECT_CODE
)
), whitelist_path
)
prev_result = bustools_sort(
prev_result['bus'],
os.path.join(out_dir, f'output.{UNFILTERED_CODE}.bus')
if not FB else os.path.join(
temp_dir,
update_filename(
os.path.basename(prev_result['bus']), SORT_CODE
)
),
temp_dir=temp_dir,
threads=threads,
memory=memory
)
if FB:
logger.info(
f'Creating {technology} feature-to-barcode map at {out_dir}'
)
map_path = create_10x_feature_barcode_map(
os.path.join(out_dir, '10x_feature_barcode_map.txt')
)
prev_result = bustools_project(
prev_result['bus'],
os.path.join(
temp_dir,
update_filename(
os.path.basename(prev_result['bus']), PROJECT_CODE
)
), map_path, bus_result['ecmap'], bus_result['txnames']
)
prev_result = bustools_sort(
prev_result['bus'],
os.path.join(out_dir, f'output.{UNFILTERED_CODE}.bus'),
temp_dir=temp_dir,
threads=threads,
memory=memory
)
unfiltered_results.update({'bus_scs': prev_result['bus']})
counts_dir = os.path.join(out_dir, UNFILTERED_COUNTS_DIR)
make_directory(counts_dir)
counts_prefix = os.path.join(
counts_dir,
TCC_PREFIX if tcc else FEATURE_PREFIX if kite else COUNTS_PREFIX
)
cm = technology.upper() in ('BULK', 'SMARTSEQ2')
quant = cm and tcc
count_result = bustools_count(
prev_result['bus'],
counts_prefix,
t2g_path,
bus_result['ecmap'],
bus_result['txnames'],
tcc=tcc,
mm=mm or tcc,
cm=cm,
umi_gene=umi_gene,
em=em,
)
unfiltered_results.update(count_result)
if quant:
quant_dir = os.path.join(out_dir, UNFILTERED_QUANT_DIR)
make_directory(quant_dir)
quant_result = kallisto_quant_tcc(
count_result['mtx'],
bus_result['saved_index'],
bus_result['ecmap'],
t2g_path,
quant_dir,
flens_path=bus_result.get('flens'),
l=fragment_l,
s=fragment_s,
threads=threads,
)
unfiltered_results.update(quant_result)
# Convert outputs.
final_result = quant_result if quant else count_result
if loom or h5ad:
name = GENE_NAME
if kite:
name = FEATURE_NAME
elif quant:
name = TRANSCRIPT_NAME
unfiltered_results.update(
convert_matrix(
quant_dir if quant else counts_dir,
final_result['mtx'],
count_result['barcodes'],
genes_path=final_result['txnames']
if quant else final_result.get('genes'),
t2g_path=t2g_path,
ec_path=count_result.get('ec'),
txnames_path=bus_result['txnames'],
name=name,
loom=loom,
h5ad=h5ad,
by_name=by_name,
tcc=tcc and not quant,
threads=threads,
)
)
if cellranger:
cr_result = matrix_to_cellranger(
count_result['mtx'], count_result['barcodes'],
count_result['genes'], t2g_path,
os.path.join(counts_dir, CELLRANGER_DIR)
)
unfiltered_results.update({'cellranger': cr_result})
# NOTE: bulk/smartseq2 does not support filtering, so everything here
# assumes technology is not bulk/smartseq2
if filter == 'bustools':
filtered_counts_prefix = os.path.join(
out_dir, FILTERED_COUNTS_DIR,
TCC_PREFIX if tcc else FEATURE_PREFIX if kite else COUNTS_PREFIX
)
filtered_whitelist_path = os.path.join(
out_dir, FILTER_WHITELIST_FILENAME
)
filtered_bus_path = os.path.join(out_dir, f'output.{FILTERED_CODE}.bus')
results['filtered'] = filter_with_bustools(
prev_result['bus'],
bus_result['ecmap'],
bus_result['txnames'],
t2g_path,
filtered_whitelist_path,
filtered_bus_path,
filter_threshold=filter_threshold,
counts_prefix=filtered_counts_prefix,
kite=kite,
tcc=tcc,
temp_dir=temp_dir,
threads=threads,
memory=memory,
loom=loom,
h5ad=h5ad,
by_name=by_name,
umi_gene=umi_gene,
em=em,
)
# Generate report.
STATS.end()
stats_path = STATS.save(os.path.join(out_dir, KB_INFO_FILENAME))
results.update({'stats': stats_path})
if report:
nb_path = os.path.join(out_dir, REPORT_NOTEBOOK_FILENAME)
html_path = os.path.join(out_dir, REPORT_HTML_FILENAME)
logger.info(
f'Writing report Jupyter notebook at {nb_path} and rendering it to {html_path}'
)
report_result = render_report(
stats_path,
bus_result['info'],
inspect_result['inspect'],
nb_path,
html_path,
count_result['mtx'],
count_result.get('barcodes'),
count_result.get('genes'),
t2g_path,
temp_dir=temp_dir
)
unfiltered_results.update(report_result)
return results
@logger.namespaced('count_smartseq3')
def count_smartseq3(
index_path: str,
t2g_path: str,
out_dir: str,
fastqs: List[str],
whitelist_path: Optional[str] = None,
tcc: bool = False,
mm: bool = False,
temp_dir: str = 'tmp',
threads: int = 8,
memory: str = '4G',
overwrite: bool = False,
loom: bool = False,
h5ad: bool = False,
by_name: bool = False,
inspect: bool = True,
strand: Optional[Literal['unstranded', 'forward', 'reverse']] = None,
) -> Dict[str, Union[str, Dict[str, str]]]:
"""Generates count matrices for Smartseq3.
Args:
index_path: Path to kallisto index
t2g_path: Path to transcript-to-gene mapping
out_dir: Path to output directory
fastqs: List of FASTQ file paths
whitelist_path: Path to whitelist, defaults to `None`
tcc: Whether to generate a TCC matrix instead of a gene count matrix,
defaults to `False`
mm: Whether to include BUS records that pseudoalign to multiple genes,
defaults to `False`
temp_dir: Path to temporary directory, defaults to `tmp`
threads: Pumber of threads to use, defaults to `8`
memory: Amount of memory to use, defaults to `4G`
overwrite: Overwrite an existing index file, defaults to `False`
loom: Whether to convert the final count matrix into a loom file,
defaults to `False`
h5ad: Whether to convert the final count matrix into a h5ad file,
defaults to `False`
by_name: Aggregate counts by name instead of ID. Only affects when
`tcc=False`.
inspect: Whether or not to inspect the output BUS file and generate
the inspect.json
strand: Strandedness, defaults to `None`
Returns:
Dictionary containing paths to generated files
"""
STATS.start()
is_batch = isinstance(fastqs, str)
results = {}
make_directory(out_dir)
unfiltered_results = results.setdefault('unfiltered', {})
bus_result = {
'bus': os.path.join(out_dir, BUS_FILENAME),
'ecmap': os.path.join(out_dir, ECMAP_FILENAME),
'txnames': os.path.join(out_dir, TXNAMES_FILENAME),
'info': os.path.join(out_dir, KALLISTO_INFO_FILENAME),
'flens': os.path.join(out_dir, FLENS_FILENAME),
'saved_index': os.path.join(out_dir, SAVED_INDEX_FILENAME)
}
if any(not os.path.exists(path)
for name, path in bus_result.items()) or overwrite:
# Pipe any remote files.
fastqs = stream_batch(
fastqs, temp_dir=temp_dir
) if is_batch else stream_fastqs(
fastqs, temp_dir=temp_dir
)
bus_result = kallisto_bus(
fastqs,
index_path,
'SMARTSEQ3',
out_dir,
threads=threads,
paired=True,
strand=strand,
)
else:
logger.info(
'Skipping kallisto bus because output files already exist. Use the --overwrite flag to overwrite.'
)
unfiltered_results.update(bus_result)
sort_result = bustools_sort(
bus_result['bus'],
os.path.join(
temp_dir,
update_filename(os.path.basename(bus_result['bus']), SORT_CODE)
),
temp_dir=temp_dir,
threads=threads,
memory=memory
)
if not whitelist_path:
logger.info('Whitelist not provided')
whitelist_path = copy_or_create_whitelist(
'SMARTSEQ3', sort_result['bus'], out_dir
)
unfiltered_results.update({'whitelist': whitelist_path})
prev_result = sort_result
if inspect:
inspect_result = bustools_inspect(
prev_result['bus'],
os.path.join(out_dir, INSPECT_FILENAME),
whitelist_path=whitelist_path,
)
unfiltered_results.update(inspect_result)
prev_result = bustools_correct(
prev_result['bus'],
os.path.join(
temp_dir,
update_filename(os.path.basename(prev_result['bus']), CORRECT_CODE)
), whitelist_path
)
prev_result = bustools_sort(
prev_result['bus'],
os.path.join(out_dir, f'output.{UNFILTERED_CODE}.bus'),
temp_dir=temp_dir,
threads=threads,
memory=memory
)
unfiltered_results.update({'bus_scs': prev_result['bus']})
# Helper function to update results with suffix
def update_results_with_suffix(current_results, new_results, suffix):
current_results.update({
f'{key}{suffix}': value
for key, value in new_results.items()
})
# Write capture file & capture internal/umi records.
capture_path = write_smartseq3_capture(
os.path.join(out_dir, CAPTURE_FILENAME)
)
suffix_to_inspect_filename = {
INTERNAL_SUFFIX: INSPECT_INTERNAL_FILENAME,
UMI_SUFFIX: INSPECT_UMI_FILENAME,
}
for suffix, inspect_filename in suffix_to_inspect_filename.items():
capture_result = bustools_capture(
prev_result['bus'],
os.path.join(out_dir, f'output{suffix}.bus'),
capture_path,
capture_type='umis',
complement=suffix == UMI_SUFFIX
)
update_results_with_suffix(unfiltered_results, capture_result, suffix)
if inspect:
inspect_result = bustools_inspect(
capture_result['bus'],
os.path.join(out_dir, inspect_filename),
whitelist_path=whitelist_path,
)
update_results_with_suffix(
unfiltered_results, inspect_result, suffix
)
counts_dir = os.path.join(out_dir, f'{UNFILTERED_COUNTS_DIR}{suffix}')
make_directory(counts_dir)
counts_prefix = os.path.join(
counts_dir, TCC_PREFIX if tcc else COUNTS_PREFIX
)
count_result = bustools_count(
capture_result['bus'],
counts_prefix,
t2g_path,
bus_result['ecmap'],
bus_result['txnames'],
tcc=tcc,
mm=mm or tcc,
cm=suffix == INTERNAL_SUFFIX,
umi_gene=suffix == UMI_SUFFIX
)
update_results_with_suffix(unfiltered_results, count_result, suffix)
if tcc:
quant_dir = os.path.join(out_dir, f'{UNFILTERED_QUANT_DIR}{suffix}')
make_directory(quant_dir)
quant_result = kallisto_quant_tcc(
count_result['mtx'],
bus_result['saved_index'],
bus_result['ecmap'],
t2g_path,
quant_dir,
flens_path=bus_result['flens'],
threads=threads,
)
update_results_with_suffix(unfiltered_results, quant_result, suffix)
if loom or h5ad:
name = GENE_NAME
if tcc:
name = TRANSCRIPT_NAME
result = quant_result if tcc else count_result
convert_result = convert_matrix(
quant_dir if tcc else counts_dir,
result['mtx'],
count_result['barcodes'],
genes_path=result['txnames'] if tcc else result.get('genes'),
t2g_path=t2g_path,
ec_path=count_result.get('ec'),
txnames_path=bus_result['txnames'],
name=name,
loom=loom,
h5ad=h5ad,
by_name=by_name,
tcc=False,
threads=threads
)
update_results_with_suffix(
unfiltered_results, convert_result, suffix
)
STATS.end()
stats_path = STATS.save(os.path.join(out_dir, KB_INFO_FILENAME))
results.update({'stats': stats_path})
return results
@logger.namespaced('count_lamanno')
def count_velocity(
index_path: str,
t2g_path: str,
cdna_t2c_path: str,
intron_t2c_path: str,
technology: str,
out_dir: str,
fastqs: List[str],
whitelist_path: Optional[str] = None,
tcc: bool = False,
mm: bool = False,
filter: Optional[Literal['bustools']] = None,
filter_threshold: Optional[int] = None,
temp_dir: str = 'tmp',
threads: int = 8,
memory: str = '4G',
overwrite: bool = False,
loom: bool = False,
h5ad: bool = False,
by_name: bool = False,
cellranger: bool = False,
inspect: bool = True,
report: bool = False,
nucleus: bool = False,
fragment_l: Optional[int] = None,
fragment_s: Optional[int] = None,
paired: bool = False,
strand: Optional[Literal['unstranded', 'forward', 'reverse']] = None,
umi_gene: bool = False,
em: bool = False,
) -> Dict[str, Union[Dict[str, str], str]]:
"""Generates RNA velocity matrices for single-cell RNA seq.
Args:
index_path: Path to kallisto index
t2g_path: Path to transcript-to-gene mapping
cdna_t2c_path: Path to cDNA transcripts-to-capture file
intron_t2c_path: Path to intron transcripts-to-capture file
technology: Single-cell technology used
out_dir: Path to output directory
fastqs: List of FASTQ file paths or a single batch definition file
whitelist_path: Path to whitelist, defaults to `None`
tcc: Whether to generate a TCC matrix instead of a gene count matrix,
defaults to `False`
mm: Whether to include BUS records that pseudoalign to multiple genes,
defaults to `False`
filter: Filter to use to generate a filtered count matrix,
defaults to `None`
filter_threshold: Barcode filter threshold for bustools, defaults
to `None`
temp_dir: Path to temporary directory, defaults to `tmp`
threads: Number of threads to use, defaults to `8`
memory: Amount of memory to use, defaults to `4G`
overwrite: Overwrite an existing index file, defaults to `False`
loom: Whether to convert the final count matrix into a loom file,
defaults to `False`
h5ad: Whether to convert the final count matrix into a h5ad file,
defaults to `False`
by_name: Aggregate counts by name instead of ID. Only affects when
`tcc=False`.
cellranger: Whether to convert the final count matrix into a
cellranger-compatible matrix, defaults to `False`
inspect: Whether or not to inspect the output BUS file and generate
the inspect.json
report: Generate HTML reports, defaults to `False`
nucleus: Whether this is a single-nucleus experiment. if `True`, the
spliced and unspliced count matrices will be summed, defaults to
`False`
fragment_l: Mean length of fragments, defaults to `None`
fragment_s: Standard deviation of fragment lengths, defaults to `None`
paired: Whether the fastqs are paired. Has no effect when a single
batch file is provided. Defaults to `False`
strand: Strandedness, defaults to `None`
umi_gene: Whether to perform gene-level UMI collapsing, defaults to
`False`
em: Whether to estimate gene abundances using EM algorithm, defaults to
`False`
Returns:
Dictionary containing path to generated index
"""
STATS.start()
is_batch = isinstance(fastqs, str)
results = {}
make_directory(out_dir)
unfiltered_results = results.setdefault('unfiltered', {})
bus_result = {
'bus': os.path.join(out_dir, BUS_FILENAME),
'ecmap': os.path.join(out_dir, ECMAP_FILENAME),
'txnames': os.path.join(out_dir, TXNAMES_FILENAME),
'info': os.path.join(out_dir, KALLISTO_INFO_FILENAME)
}
if technology.upper() in ('BULK', 'SMARTSEQ2'):
bus_result['saved_index'] = os.path.join(out_dir, SAVED_INDEX_FILENAME)
if any(not os.path.exists(path)
for name, path in bus_result.items()) or overwrite:
_technology = 'BULK' if technology.upper(
) == 'SMARTSEQ2' else technology
# Pipe any remote files.
fastqs = stream_batch(
fastqs, temp_dir=temp_dir
) if is_batch else stream_fastqs(
fastqs, temp_dir=temp_dir
)
bus_result = kallisto_bus(
fastqs,
index_path,
_technology,
out_dir,
threads=threads,
paired=paired,
strand=strand
)
else:
logger.info(
'Skipping kallisto bus because output files already exist. Use the --overwrite flag to overwrite.'
)
unfiltered_results.update(bus_result)
sort_result = bustools_sort(
bus_result['bus'],
os.path.join(
temp_dir,
update_filename(os.path.basename(bus_result['bus']), SORT_CODE)
),
temp_dir=temp_dir,
threads=threads,
memory=memory
)
if not whitelist_path and not is_batch:
logger.info('Whitelist not provided')
whitelist_path = copy_or_create_whitelist(
technology, sort_result['bus'], out_dir
)
unfiltered_results.update({'whitelist': whitelist_path})
if inspect:
inspect_result = bustools_inspect(
sort_result['bus'],
os.path.join(out_dir, INSPECT_FILENAME),
whitelist_path=whitelist_path,
)
unfiltered_results.update(inspect_result)
prev_result = sort_result
if not is_batch:
prev_result = bustools_correct(
prev_result['bus'],
os.path.join(
temp_dir,
update_filename(
os.path.basename(sort_result['bus']), CORRECT_CODE
)
), whitelist_path
)
prev_result = bustools_sort(
prev_result['bus'],
os.path.join(out_dir, f'output.{UNFILTERED_CODE}.bus'),
temp_dir=temp_dir,
threads=threads,
memory=memory
)
unfiltered_results.update({'bus_scs': prev_result['bus']})
prefixes = [BUS_CDNA_PREFIX, BUS_INTRON_PREFIX]
# The prefix and t2cs are swapped because we call bustools capture with
# the --complement flag.
prefix_to_t2c = {
BUS_CDNA_PREFIX: intron_t2c_path,
BUS_INTRON_PREFIX: cdna_t2c_path,
}
counts_dir = os.path.join(out_dir, UNFILTERED_COUNTS_DIR)
make_directory(counts_dir)
cm = technology.upper() in ('BULK', 'SMARTSEQ2')
quant = cm and tcc
if quant:
quant_dir = os.path.join(out_dir, UNFILTERED_QUANT_DIR)
make_directory(quant_dir)
for prefix, t2c_path in prefix_to_t2c.items():
capture_result = bustools_capture(
prev_result['bus'], os.path.join(temp_dir, '{}.bus'.format(prefix)),
t2c_path, bus_result['ecmap'], bus_result['txnames']
)
sort_result = bustools_sort(
capture_result['bus'],
os.path.join(out_dir, f'{prefix}.{UNFILTERED_CODE}.bus'),
temp_dir=temp_dir,
threads=threads,
memory=memory
)
if prefix not in unfiltered_results:
unfiltered_results[prefix] = {}
unfiltered_results[prefix].update(sort_result)
if inspect:
inspect_result = bustools_inspect(
sort_result['bus'],
os.path.join(
out_dir, update_filename(INSPECT_FILENAME, prefix)
),
whitelist_path=whitelist_path,
)
unfiltered_results[prefix].update(inspect_result)
counts_prefix = os.path.join(counts_dir, prefix)
count_result = bustools_count(
sort_result['bus'],
counts_prefix,
t2g_path,
bus_result['ecmap'],
bus_result['txnames'],
tcc=tcc,
mm=mm or tcc,
cm=cm,
umi_gene=umi_gene,
em=em,
)
unfiltered_results[prefix].update(count_result)
if quant:
quant_result = kallisto_quant_tcc(
count_result['mtx'],
bus_result['saved_index'],
bus_result['ecmap'],
t2g_path,
quant_dir,
flens_path=bus_result.get('flens'),
l=fragment_l,
s=fragment_s,
threads=threads,
)
unfiltered_results.update(quant_result)
if cellranger:
cr_result = matrix_to_cellranger(
count_result['mtx'], count_result['barcodes'],
count_result['genes'], t2g_path,
os.path.join(counts_dir, f'{CELLRANGER_DIR}_{prefix}')
)
unfiltered_results[prefix].update({'cellranger': cr_result})
if loom or h5ad:
name = GENE_NAME
if quant:
name = TRANSCRIPT_NAME
unfiltered_results.update(
convert_matrices(
quant_dir if quant else counts_dir,
[unfiltered_results[prefix]['mtx'] for prefix in prefixes],
[unfiltered_results[prefix]['barcodes'] for prefix in prefixes],
genes_paths=[
unfiltered_results[prefix]['txnames']
if quant else unfiltered_results[prefix].get('genes')
for prefix in prefixes
],
t2g_path=t2g_path,
ec_paths=[
unfiltered_results[prefix].get('ec') for prefix in prefixes
],
txnames_path=bus_result['txnames'],
name=name,
loom=loom,
h5ad=h5ad,
by_name=by_name,
tcc=tcc,
nucleus=nucleus,
threads=threads,
)
)
# NOTE: bulk/smartseq2 does not support filtering, so everything here
# assumes technology is not bulk/smartseq2
if filter:
filtered_results = results.setdefault('filtered', {})
if filter == 'bustools':
filtered_results.update(
filter_with_bustools(
prev_result['bus'],
bus_result['ecmap'],
bus_result['txnames'],
t2g_path,
os.path.join(out_dir, FILTER_WHITELIST_FILENAME),
os.path.join(out_dir, f'output.{FILTERED_CODE}.bus'),
filter_threshold=filter_threshold,
temp_dir=temp_dir,
memory=memory,
count=False,
umi_gene=umi_gene,
em=em,
)
)
for prefix, t2c_path in prefix_to_t2c.items():
filtered_capture_result = bustools_capture(
filtered_results['bus_scs'],
os.path.join(temp_dir, '{}.bus'.format(prefix)), t2c_path,
bus_result['ecmap'], bus_result['txnames']
)
filtered_sort_result = bustools_sort(
filtered_capture_result['bus'],
os.path.join(out_dir, f'{prefix}.{FILTERED_CODE}.bus'),
temp_dir=temp_dir,
threads=threads,
memory=memory
)
filtered_results.setdefault(prefix,
{}).update(filtered_sort_result)
filtered_counts_dir = os.path.join(out_dir, FILTERED_COUNTS_DIR)
make_directory(filtered_counts_dir)
filtered_counts_prefix = os.path.join(
filtered_counts_dir, prefix
)
count_result = bustools_count(
filtered_sort_result['bus'],
filtered_counts_prefix,
t2g_path,
bus_result['ecmap'],
bus_result['txnames'],
tcc=tcc,
mm=mm or tcc,
umi_gene=umi_gene,
em=em,
)
filtered_results[prefix].update(count_result)
if cellranger:
if not tcc:
cr_result = matrix_to_cellranger(
count_result['mtx'], count_result['barcodes'],
count_result['genes'], t2g_path,
os.path.join(
filtered_counts_dir,
f'{CELLRANGER_DIR}_{prefix}'
)
)
unfiltered_results[prefix].update({
'cellranger': cr_result
})
else:
logger.warning(
'TCC matrices can not be converted to cellranger-compatible format.'
)
if loom or h5ad:
filtered_results.update(
convert_matrices(
filtered_counts_dir,
[filtered_results[prefix]['mtx'] for prefix in prefixes],
[
filtered_results[prefix]['barcodes']
for prefix in prefixes
],
genes_paths=[
filtered_results[prefix].get('genes')
for prefix in prefixes
],
t2g_path=t2g_path,
ec_paths=[
filtered_results[prefix].get('ec')
for prefix in prefixes
],
txnames_path=bus_result['txnames'],
loom=loom,
h5ad=h5ad,
by_name=by_name,
tcc=tcc,
nucleus=nucleus,
threads=threads,
)
)
STATS.end()
stats_path = STATS.save(os.path.join(out_dir, KB_INFO_FILENAME))
results.update({'stats': stats_path})
# Reports
nb_path = os.path.join(out_dir, REPORT_NOTEBOOK_FILENAME)
html_path = os.path.join(out_dir, REPORT_HTML_FILENAME)
if report:
logger.info(
f'Writing report Jupyter notebook at {nb_path} and rendering it to {html_path}'
)
report_result = render_report(
stats_path,
bus_result['info'],
unfiltered_results['inspect'],
nb_path,
html_path,
temp_dir=temp_dir
)
unfiltered_results.update(report_result)
for prefix in prefix_to_t2c:
nb_path = os.path.join(
out_dir, update_filename(REPORT_NOTEBOOK_FILENAME, prefix)
)
html_path = os.path.join(
out_dir, update_filename(REPORT_HTML_FILENAME, prefix)
)
logger.info(
f'Writing report Jupyter notebook at {nb_path} and rendering it to {html_path}'
)
report_result = render_report(
stats_path,
bus_result['info'],
unfiltered_results[prefix]['inspect'],
nb_path,
html_path,
unfiltered_results[prefix]['mtx'],
unfiltered_results[prefix].get('barcodes'),
unfiltered_results[prefix].get('genes'),
t2g_path,
temp_dir=temp_dir
)
unfiltered_results[prefix].update(report_result)
if tcc:
logger.warning(
'Plots for TCC matrices have not yet been implemented. The HTML report will not contain any plots.'
)
return results
@logger.namespaced('count_velocity_smartseq3')
def count_velocity_smartseq3(
index_path: str,
t2g_path: str,
cdna_t2c_path: str,
intron_t2c_path: str,
out_dir: str,
fastqs: List[str],
whitelist_path: Optional[str] = None,
tcc: bool = False,
mm: bool = False,
temp_dir: str = 'tmp',
threads: int = 8,
memory: str = '4G',
overwrite: bool = False,
loom: bool = False,
h5ad: bool = False,
by_name: bool = False,
inspect: bool = True,
strand: Optional[Literal['unstranded', 'forward', 'reverse']] = None,
) -> Dict[str, Union[str, Dict[str, str]]]:
"""Generates count matrices for Smartseq3.
Args:
index_path: Path to kallisto index
t2g_path: Path to transcript-to-gene mapping
out_dir: Path to output directory
fastqs: List of FASTQ file paths
whitelist_path: Path to whitelist, defaults to `None`
tcc: Whether to generate a TCC matrix instead of a gene count matrix,
defaults to `False`
mm: Whether to include BUS records that pseudoalign to multiple genes,
defaults to `False`
temp_dir: Path to temporary directory, defaults to `tmp`
threads: Pumber of threads to use, defaults to `8`
memory: Amount of memory to use, defaults to `4G`
overwrite: Overwrite an existing index file, defaults to `False`
loom: Whether to convert the final count matrix into a loom file,
defaults to `False`
h5ad: Whether to convert the final count matrix into a h5ad file,
defaults to `False`
by_name: Aggregate counts by name instead of ID. Only affects when
`tcc=False`.
inspect: Whether or not to inspect the output BUS file and generate
the inspect.json
strand: Strandedness, defaults to `None`
Returns:
Dictionary containing paths to generated files
"""
STATS.start()
is_batch = isinstance(fastqs, str)
results = {}
make_directory(out_dir)
unfiltered_results = results.setdefault('unfiltered', {})
bus_result = {
'bus': os.path.join(out_dir, BUS_FILENAME),
'ecmap': os.path.join(out_dir, ECMAP_FILENAME),
'txnames': os.path.join(out_dir, TXNAMES_FILENAME),
'info': os.path.join(out_dir, KALLISTO_INFO_FILENAME),
'flens': os.path.join(out_dir, FLENS_FILENAME),
'saved_index': os.path.join(out_dir, SAVED_INDEX_FILENAME)
}
if any(not os.path.exists(path)
for name, path in bus_result.items()) or overwrite:
# Pipe any remote files.
fastqs = stream_batch(
fastqs, temp_dir=temp_dir
) if is_batch else stream_fastqs(
fastqs, temp_dir=temp_dir
)
bus_result = kallisto_bus(
fastqs,
index_path,
'SMARTSEQ3',
out_dir,
threads=threads,
paired=True,
strand=strand,
)
else:
logger.info(
'Skipping kallisto bus because output files already exist. Use the --overwrite flag to overwrite.'
)
unfiltered_results.update(bus_result)
sort_result = bustools_sort(
bus_result['bus'],
os.path.join(
temp_dir,
update_filename(os.path.basename(bus_result['bus']), SORT_CODE)
),
temp_dir=temp_dir,
threads=threads,
memory=memory
)
logger.info('Whitelist not provided')
whitelist_path = copy_or_create_whitelist(
'SMARTSEQ3', sort_result['bus'], out_dir
)
unfiltered_results.update({'whitelist': whitelist_path})
prev_result = sort_result
if inspect:
inspect_result = bustools_inspect(
prev_result['bus'],
os.path.join(out_dir, INSPECT_FILENAME),
whitelist_path=whitelist_path,
)
unfiltered_results.update(inspect_result)
prev_result = bustools_correct(
prev_result['bus'],
os.path.join(
temp_dir,
update_filename(os.path.basename(prev_result['bus']), CORRECT_CODE)
), whitelist_path
)
prev_result = bustools_sort(
prev_result['bus'],
os.path.join(out_dir, f'output.{UNFILTERED_CODE}.bus'),
temp_dir=temp_dir,
threads=threads,
memory=memory
)
unfiltered_results.update({'bus_scs': prev_result['bus']})
# Helper function to update results with suffix
def update_results_with_suffix(current_results, new_results, suffix):
current_results.update({
f'{key}{suffix}': value
for key, value in new_results.items()
})
# Write capture file & capture internal/umi records.
capture_path = write_smartseq3_capture(
os.path.join(out_dir, CAPTURE_FILENAME)
)
prefixes = [BUS_CDNA_PREFIX, BUS_INTRON_PREFIX]
# The prefix and t2cs are swapped because we call bustools capture with
# the --complement flag.
prefix_to_t2c = {
BUS_CDNA_PREFIX: intron_t2c_path,
BUS_INTRON_PREFIX: cdna_t2c_path,
}
suffix_to_inspect_filename = {
INTERNAL_SUFFIX: INSPECT_INTERNAL_FILENAME,
UMI_SUFFIX: INSPECT_UMI_FILENAME,
}
for suffix, inspect_filename in suffix_to_inspect_filename.items():
capture_result = bustools_capture(
prev_result['bus'],
os.path.join(out_dir, f'output{suffix}.bus'),
capture_path,
capture_type='umis',
complement=suffix == UMI_SUFFIX
)
update_results_with_suffix(unfiltered_results, capture_result, suffix)
if inspect:
inspect_result = bustools_inspect(
capture_result['bus'],
os.path.join(out_dir, inspect_filename),
whitelist_path=whitelist_path,
)
update_results_with_suffix(
unfiltered_results, inspect_result, suffix
)
counts_dir = os.path.join(out_dir, f'{UNFILTERED_COUNTS_DIR}{suffix}')
make_directory(counts_dir)
if tcc:
quant_dir = os.path.join(out_dir, f'{UNFILTERED_QUANT_DIR}{suffix}')
make_directory(quant_dir)
for prefix, t2c_path in prefix_to_t2c.items():
prefix_capture_result = bustools_capture(
capture_result['bus'],
os.path.join(temp_dir, f'{prefix}{suffix}.bus'), t2c_path,
bus_result['ecmap'], bus_result['txnames']
)
sort_result = bustools_sort(
prefix_capture_result['bus'],
os.path.join(
out_dir, f'{prefix}{suffix}.{UNFILTERED_CODE}.bus'
),
temp_dir=temp_dir,
threads=threads,
memory=memory
)
prefix_results = unfiltered_results.setdefault(prefix, {})
update_results_with_suffix(prefix_results, sort_result, suffix)
if inspect:
inpsect_result = bustools_inspect(
sort_result['bus'],
os.path.join(
out_dir, update_filename(inspect_filename, prefix)
),
)
update_results_with_suffix(
prefix_results, inpsect_result, suffix
)
counts_prefix = os.path.join(counts_dir, prefix)
count_result = bustools_count(
sort_result['bus'],
counts_prefix,
t2g_path,
bus_result['ecmap'],
bus_result['txnames'],
tcc=tcc,
mm=mm or tcc,
cm=suffix == INTERNAL_SUFFIX,
umi_gene=suffix == UMI_SUFFIX,
)
update_results_with_suffix(prefix_results, count_result, suffix)
if tcc:
quant_result = kallisto_quant_tcc(
count_result['mtx'],
bus_result['saved_index'],
bus_result['ecmap'],
t2g_path,
quant_dir,
flens_path=bus_result['flens'],
threads=threads,
)
update_results_with_suffix(prefix_results, quant_result, suffix)
# After internal/UMI is done, create anndata separately for each
if loom or h5ad:
name = GENE_NAME
if tcc:
name = TRANSCRIPT_NAME
convert_result = convert_matrices(
quant_dir if tcc else counts_dir,
[
unfiltered_results[prefix][f'mtx{suffix}']
for prefix in prefixes
],
[
unfiltered_results[prefix][f'barcodes{suffix}']
for prefix in prefixes
],
genes_paths=[
unfiltered_results[prefix][f'txnames{suffix}'] if tcc else
unfiltered_results[prefix].get(f'genes{suffix}')
for prefix in prefixes
],
t2g_path=t2g_path,
ec_paths=[
unfiltered_results[prefix].get(f'ec{suffix}')
for prefix in prefixes
],
txnames_path=bus_result['txnames'],
name=name,
loom=loom,
h5ad=h5ad,
by_name=by_name,
tcc=False,
threads=threads,
)
update_results_with_suffix(
unfiltered_results, convert_result, suffix
)
STATS.end()
stats_path = STATS.save(os.path.join(out_dir, KB_INFO_FILENAME))
results.update({'stats': stats_path})
return results
|
5fc58bc30c1ea0b9c00d5c3285d8af6938dcbf4c
|
831c1e735a1b7d1bc6aa779bba88d3d3efe42565
|
/tests/test_http_responder.py
|
a0f9cf5a62cc9d62150abb4351ae95ed49046e1d
|
[
"Apache-2.0"
] |
permissive
|
pyGrowler/Growler
|
a46db1909d1877d332c103194dd1dabb1f8f3b15
|
5492466d8828115bb04c665917d6aeb4f4323f44
|
refs/heads/dev
| 2023-03-08T01:10:20.093556
| 2020-03-08T07:51:41
| 2020-03-08T07:51:41
| 25,556,740
| 814
| 38
| null | 2016-05-16T05:22:07
| 2014-10-22T02:11:12
|
Python
|
UTF-8
|
Python
| false
| false
| 5,705
|
py
|
test_http_responder.py
|
#
# tests/test_http_responder.py
#
import growler
from growler.http.responder import GrowlerHTTPResponder
from growler.http.methods import HTTPMethod
from growler.http.errors import HTTPErrorBadRequest
from growler.aio.http_protocol import GrowlerHTTPProtocol
import asyncio
import pytest
from unittest import mock
from mocks import (
mock_event_loop,
)
from test_http_protocol import (
mock_app,
mock_req_factory,
mock_res_factory,
mock_req,
mock_res,
mock_parser,
mock_parser_factory,
)
from mock_classes import (
responder,
request_uri,
)
GET = HTTPMethod.GET
POST = HTTPMethod.POST
PUT = HTTPMethod.PUT
DELETE = HTTPMethod.DELETE
@pytest.fixture
def mock_protocol(mock_app, mock_event_loop):
protocol = mock.Mock(spec=GrowlerHTTPProtocol)
protocol.socket.getpeername = mock.MagicMock()
protocol.http_application = mock_app
protocol.loop = mock_event_loop
protocol.client_headers = None
return protocol
@pytest.fixture
def responder(mock_protocol,
mock_parser_factory,
mock_req_factory,
mock_res_factory):
resp = GrowlerHTTPResponder(mock_protocol,
parser_factory=mock_parser_factory,
request_factory=mock_req_factory,
response_factory=mock_res_factory
)
return resp
def test_responder_constructor(mock_protocol):
r = GrowlerHTTPResponder(mock_protocol)
assert r.loop is mock_protocol.loop
assert r.headers == {}
@pytest.mark.parametrize("data", [
# b'',
b'GET /',
b'GET / HTTP/1.1\n',
b'GET / HTTP/1.1\n\nblahh',
])
def test_on_data_no_headers(responder, mock_parser, data):
mock_parser.consume.return_value = None
responder.on_data(data)
assert responder.headers == {}
mock_parser.consume.assert_called_with(data)
@pytest.mark.parametrize("data", [
b'1234567',
# b'GET /',
# b'GET / HTTP/1.1\n',
# b'GET / HTTP/1.1\n\nblahh',
])
def test_on_data_post_headers(responder,
mock_parser,
mock_req,
mock_res,
mock_app,
data,
):
# mock_req.body = mock.Mock(spec=asyncio.Future)
def on_consume(d):
mock_parser.method = POST
mock_parser.parsed_url = '/'
mock_parser.version = 'HTTP/1.1'
responder.parser.headers = {
'CONTENT-LENGTH': '%d' % len(data)
}
return data
mock_parser.consume.side_effect = on_consume
mock_parser.headers = dict()
responder.on_data(data)
assert responder.req is mock_req
assert responder.res is mock_res
# assert responder.loop.create_task.called
# responder.app.handle_client_request.assert_called_with(mock_req, mock_res)
@pytest.mark.parametrize("method", [
(POST),
(PUT),
])
def test_missing_thing(responder, method):
with pytest.raises(HTTPErrorBadRequest):
responder.init_body_buffer(method, {})
@pytest.mark.parametrize("method", [
(GET),
(DELETE),
])
def test_missing_thang(responder, method):
with pytest.raises(HTTPErrorBadRequest):
responder.init_body_buffer(method, {'CONTENT-LENGTH': 100})
@pytest.mark.parametrize("header", [
])
def test_content_length_wrong_method(responder, header):
print('')
@pytest.mark.parametrize("data, length", [
# (b' ' * 10, 100),
(b'_' * 15, 10),
])
def test_bad_content_length(responder, mock_parser, data, length):
headers = {'CONTENT-LENGTH': length}
responder.init_body_buffer(POST, headers)
with pytest.raises(HTTPErrorBadRequest) as e:
responder.validate_and_store_body_data(data)
assert e.value.phrase == "Unexpected body data sent"
@pytest.mark.parametrize("method, request_uri", [
(GET, '/'),
(POST, '/foo'),
(PUT, '/'),
(DELETE, '/')
])
def test_set_request_line_content_length(responder, method, request_uri):
responder.set_request_line(method, request_uri, "HTTP/1.1")
assert responder.parsed_request == (method, request_uri, "HTTP/1.1")
assert responder.request['method'] == method
assert responder.request['url'] == request_uri
assert responder.request['version'] == "HTTP/1.1"
def test_build_req_and_res(responder, mock_req, mock_res):
req, res = responder.build_req_and_res()
assert req is mock_req
assert res is mock_res
def test_set_request_line(responder, mock_protocol):
responder.set_request_line('GET', '/', 'HTTP/1.1')
assert responder.request['method'] == 'GET'
assert responder.request['url'] == '/'
assert responder.request['version'] == 'HTTP/1.1'
def test_property_method(responder, mock_parser):
assert responder.method is mock_parser.method
def test_property_method_str(responder, mock_parser):
assert responder.method_str is mock_parser.method
def test_property_pasred_query(responder, mock_parser):
assert responder.parsed_query is mock_parser.query
def test_property_headers(responder, mock_parser):
assert responder.headers is mock_parser.headers
def test_property_loop(responder, mock_protocol, mock_event_loop):
assert responder.loop is mock_protocol.loop
assert responder.loop is mock_event_loop
def test_property_app(responder, mock_protocol, mock_app):
assert responder.app is mock_protocol.http_application
assert responder.app is mock_app
def test_property_ip(responder, mock_protocol):
ip = '0.0.0.0'
mock_protocol.socket.getpeername.return_value = (ip, None)
assert responder.ip is ip
|
92519e15f36824655e09ec8dbe118089127fd26e
|
2342b8737b9ffeb9715158b8ec74a33c7a4947f6
|
/koku/forecast/__init__.py
|
eecdd34ed008f86e2f7c9a355506d2f8c04e56df
|
[
"Apache-2.0"
] |
permissive
|
project-koku/koku
|
444d8df05da5416c9cee606c42481c99be45f13d
|
0416e5216eb1ec4b41c8dd4999adde218b1ab2e1
|
refs/heads/main
| 2023-08-20T11:30:17.510182
| 2023-08-17T18:27:30
| 2023-08-17T18:27:30
| 126,496,611
| 225
| 94
|
Apache-2.0
| 2023-09-14T17:38:08
| 2018-03-23T14:29:23
|
Python
|
UTF-8
|
Python
| false
| false
| 591
|
py
|
__init__.py
|
#
# Copyright 2021 Red Hat Inc.
# SPDX-License-Identifier: Apache-2.0
#
"""Forecasting Module."""
from .forecast import AWSForecast # noqa: F401
from .forecast import AzureForecast # noqa: F401
from .forecast import Forecast # noqa: F401
from .forecast import GCPForecast # noqa: F401
from .forecast import OCIForecast # noqa: F401
from .forecast import OCPAllForecast # noqa: F401
from .forecast import OCPAWSForecast # noqa: F401
from .forecast import OCPAzureForecast # noqa: F401
from .forecast import OCPForecast # noqa: F401
from .forecast import OCPGCPForecast # noqa: F401
|
bbf8766a589eefdad55e36c0c42b4ba52a1e1296
|
8188f026dcfa3ca6c4e2d58e6c56d04d24e37a18
|
/projectq/setups/decompositions/_gates_test.py
|
12c05cdc89b4356334d444ce634960b3e22e23dd
|
[
"Apache-2.0"
] |
permissive
|
ProjectQ-Framework/ProjectQ
|
2e342da0622d4b5d513c15504556e95d3d0e2aea
|
67c660ca18725d23ab0b261a45e34873b6a58d03
|
refs/heads/develop
| 2023-09-04T02:18:25.581119
| 2023-03-09T16:03:57
| 2023-03-09T16:03:57
| 77,520,796
| 886
| 335
|
Apache-2.0
| 2023-07-24T07:07:15
| 2016-12-28T09:31:53
|
Python
|
UTF-8
|
Python
| false
| false
| 3,466
|
py
|
_gates_test.py
|
# Copyright 2017 ProjectQ-Framework (www.projectq.ch)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for decompositions rules (using the Simulator).
"""
import pytest
from projectq.backends import Simulator
from projectq.cengines import (
AutoReplacer,
DecompositionRuleSet,
DummyEngine,
InstructionFilter,
MainEngine,
)
from projectq.meta import Control
from projectq.ops import (
All,
ClassicalInstructionGate,
CRz,
Entangle,
H,
Measure,
Ph,
R,
Rz,
T,
Tdag,
Toffoli,
X,
)
from projectq.setups.decompositions import (
crz2cxandrz,
entangle,
globalphase,
ph2r,
r2rzandph,
toffoli2cnotandtgate,
)
def low_level_gates(eng, cmd):
g = cmd.gate
if isinstance(g, ClassicalInstructionGate):
return True
if len(cmd.control_qubits) == 0:
if g == T or g == Tdag or g == H or isinstance(g, Rz) or isinstance(g, Ph):
return True
else:
if len(cmd.control_qubits) == 1 and cmd.gate == X:
return True
return False
def test_entangle():
rule_set = DecompositionRuleSet(modules=[entangle])
sim = Simulator()
eng = MainEngine(sim, [AutoReplacer(rule_set), InstructionFilter(low_level_gates)])
qureg = eng.allocate_qureg(4)
Entangle | qureg
assert 0.5 == pytest.approx(abs(sim.cheat()[1][0]) ** 2)
assert 0.5 == pytest.approx(abs(sim.cheat()[1][-1]) ** 2)
All(Measure) | qureg
def low_level_gates_noglobalphase(eng, cmd):
return low_level_gates(eng, cmd) and not isinstance(cmd.gate, Ph) and not isinstance(cmd.gate, R)
def test_globalphase():
rule_set = DecompositionRuleSet(modules=[globalphase, r2rzandph])
dummy = DummyEngine(save_commands=True)
eng = MainEngine(
dummy,
[AutoReplacer(rule_set), InstructionFilter(low_level_gates_noglobalphase)],
)
qubit = eng.allocate_qubit()
R(1.2) | qubit
rz_count = 0
for cmd in dummy.received_commands:
assert not isinstance(cmd.gate, R)
if isinstance(cmd.gate, Rz):
rz_count += 1
assert cmd.gate == Rz(1.2)
assert rz_count == 1
def run_circuit(eng):
qureg = eng.allocate_qureg(4)
All(H) | qureg
CRz(3.0) | (qureg[0], qureg[1])
Toffoli | (qureg[1], qureg[2], qureg[3])
with Control(eng, qureg[0:2]):
Ph(1.43) | qureg[2]
return qureg
def test_gate_decompositions():
sim = Simulator()
eng = MainEngine(sim, [])
rule_set = DecompositionRuleSet(modules=[r2rzandph, crz2cxandrz, toffoli2cnotandtgate, ph2r])
qureg = run_circuit(eng)
sim2 = Simulator()
eng_lowlevel = MainEngine(sim2, [AutoReplacer(rule_set), InstructionFilter(low_level_gates)])
qureg2 = run_circuit(eng_lowlevel)
for i in range(len(sim.cheat()[1])):
assert sim.cheat()[1][i] == pytest.approx(sim2.cheat()[1][i])
All(Measure) | qureg
All(Measure) | qureg2
|
af8b66a523fd15792a9d1e9e66a3e7e1e0acc1be
|
4eaf223758fdc7d5bd56b0276e834ff2fc453672
|
/ibm_db_tests/test_081_ConnWrongUser.py
|
c1374412d1bbb4ca775cfbc91b5488e1db30c47e
|
[
"Apache-2.0"
] |
permissive
|
ibmdb/python-ibmdb
|
76b19632d16d9365729480df3888cd89bc3744b4
|
58551a59a4ec40c879fcfc9d173ab8a3e327fa43
|
refs/heads/master
| 2023-09-04T03:59:35.020750
| 2023-09-01T12:56:17
| 2023-09-01T12:56:17
| 32,306,469
| 321
| 260
|
Apache-2.0
| 2023-09-14T11:36:16
| 2015-03-16T06:47:14
|
Python
|
UTF-8
|
Python
| false
| false
| 856
|
py
|
test_081_ConnWrongUser.py
|
#
# Licensed Materials - Property of IBM
#
# (c) Copyright IBM Corp. 2007-2008
#
from __future__ import print_function
import sys
import unittest
import ibm_db
import config
from testfunctions import IbmDbTestFunctions
class IbmDbTestCase(unittest.TestCase):
def test_081_ConnWrongUser(self):
obj = IbmDbTestFunctions()
obj.assert_expect(self.run_test_081)
def run_test_081(self):
try:
conn = ibm_db.connect(config.database, "y", config.password)
print("??? No way.")
except:
print(ibm_db.conn_error())
#if conn:
# print "??? No way."
#else:
# err = ibm_db.conn_error
# print err
#__END__
#__LUW_EXPECTED__
#08001
#__ZOS_EXPECTED__
#08001
#__SYSTEMI_EXPECTED__
#08001
#__IDS_EXPECTED__
#08001
#__ZOS_ODBC_EXPECTED__
#??? No way.
|
6cc3c00460e4293c05c4f72ba35da45c49602e9d
|
bc6e87f8e9a3f6c35f8080718ac409801dab3b24
|
/server/workers/persistence/src/apis/persistence.py
|
c97795a4ee1e53e176313cd66fad3b734bf10d30
|
[
"MIT"
] |
permissive
|
OpenKnowledgeMaps/Headstart
|
b7f56d8562d044e8d96a08f9f7ae0bc6de1076cd
|
94dcc248e1892de7b603d5a4dad175f5d8a128db
|
refs/heads/master
| 2023-08-31T20:06:34.485558
| 2023-08-25T17:34:03
| 2023-08-25T17:34:03
| 15,936,466
| 132
| 36
|
MIT
| 2023-08-25T17:34:05
| 2014-01-15T13:52:50
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 11,940
|
py
|
persistence.py
|
import os
from hashlib import md5
from datetime import datetime
import json
from collections import OrderedDict
from flask import Blueprint, request, make_response, jsonify, abort
from flask_restx import Namespace, Resource, fields
from models import Revisions, Visualizations
from database import sessions
persistence_ns = Namespace("persistence", description="OKMAps persistence operations")
def select_session(Session=None):
"""Select session according to database,
else select session for default database."""
if Session is not None:
return Session()
else:
return sessions.get(os.getenv("DEFAULT_DATABASE"))()
def create_vis_id(params, param_types):
# create map id
ordered_params = OrderedDict()
for k in param_types:
v = params[k]
v = [str(e) for e in v] if isinstance(v, list) else str(v)
ordered_params[k] = v
string_to_hash = json.dumps(ordered_params, separators=(',', ':'))
string_to_hash = " ".join([params["q"].replace('"', '\\"'), string_to_hash])
vis_id = md5(string_to_hash.encode('utf-8')).hexdigest()
return vis_id
def write_revision(database, vis_id, data, rev_id=None):
session = select_session(sessions.get(database))
vis = session.query(Visualizations).filter_by(vis_id=vis_id).first()
if rev_id is None:
if vis.vis_latest is None:
rev_id = 1
else:
rev_id = vis.vis_latest + 1
query = vis.vis_clean_query
new_rev = Revisions(
rev_id=rev_id,
rev_vis=vis_id,
rev_user="System",
rev_timestamp=datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
rev_comment="Visualization created",
rev_data=data,
vis_query=query)
session.add(new_rev)
vis.vis_latest = rev_id
session.commit()
session.close()
def create_visualization(database,
vis_id, vis_title, data,
vis_clean_query=None, vis_query=None,
vis_params=None):
if not exists_visualization(database, vis_id):
session = select_session(sessions.get(database))
new_vis = Visualizations(
vis_id=vis_id,
vis_clean_query=vis_clean_query,
vis_query=vis_query,
vis_title=vis_title,
vis_params=vis_params)
session.add(new_vis)
session.commit()
write_revision(database, vis_id, data, 1)
session.close()
def exists_visualization(database, vis_id):
session = select_session(sessions.get(database))
vis = session.query(Visualizations).filter_by(vis_id=vis_id).first()
exists = True if vis else False
session.close()
return exists
def get_last_version(database, vis_id, details=False, context=False):
return get_revision(database, vis_id, None, details, context)
def get_revision(database, vis_id, rev_id, details=False, context=False):
try:
session = select_session(sessions.get(database))
if rev_id is None:
vis, rev = (session
.query(Visualizations, Revisions)
.select_from(Visualizations, Revisions)
.filter(Visualizations.vis_id == vis_id)
.filter(Revisions.rev_vis == vis_id)
.filter(Revisions.rev_id == Visualizations.vis_latest)
).first()
else:
vis, rev = (session
.query(Visualizations, Revisions)
.select_from(Visualizations, Revisions)
.filter(Visualizations.vis_id == vis_id)
.filter(Revisions.rev_vis == vis_id)
.filter(Revisions.rev_id == rev_id)
).first()
session.close()
if context is True:
res = {
"rev_vis": rev.rev_vis,
"vis_query": rev.vis_query,
"vis_title": vis.vis_title,
"rev_timestamp": rev.rev_timestamp,
"vis_params": vis.vis_params,
"rev_data": rev.rev_data
}
return res
else:
if details is True:
return rev.as_dict()
else:
return rev.rev_data
except TypeError:
persistence_ns.logger.info("Vis ID not found: %s in database %s" % (vis_id, database))
return None
def get_context(database, vis_id, revision_context=False):
session = select_session(sessions.get(database))
vis, rev = (session
.query(Visualizations, Revisions)
.select_from(Visualizations, Revisions)
.filter(Visualizations.vis_id == vis_id)
.filter(Revisions.rev_vis == vis_id)
.filter(Revisions.rev_id == Visualizations.vis_latest)
).first()
res = {
"rev_vis": rev.rev_vis,
"vis_query": rev.vis_query,
"vis_title": vis.vis_title,
"rev_timestamp": rev.rev_timestamp,
"vis_params": vis.vis_params
}
if revision_context == 'true':
data = json.loads(rev.rev_data)
res["additional_context"] = data.get("additional_context", {})
session.close()
return res
@persistence_ns.route('/existsVisualization/<database>')
class existsVisualization(Resource):
def post(self, database):
payload = request.get_json()
vis_id = payload.get("vis_id")
persistence_ns.logger.debug("existsVisualization: %s" % vis_id)
exists = exists_visualization(database, vis_id)
# create response
headers = {}
result = {"exists": exists}
headers["Content-Type"] = "application/json"
return make_response(result, 200, headers)
@persistence_ns.route('/createVisualization/<database>')
class createVisualization(Resource):
def post(self, database):
try:
payload = request.get_json()
persistence_ns.logger.debug("createVisualization")
persistence_ns.logger.debug(payload.keys())
vis_id = payload.get('vis_id')
vis_title = payload.get('vis_title')
data = payload.get('data')
vis_clean_query = payload.get('vis_clean_query')
vis_query = payload.get('vis_query')
vis_params = payload.get('vis_params')
persistence_ns.logger.debug(vis_id)
persistence_ns.logger.debug(vis_title)
persistence_ns.logger.debug(vis_clean_query)
persistence_ns.logger.debug(vis_query)
persistence_ns.logger.debug(vis_params)
create_visualization(database,
vis_id, vis_title, data,
vis_clean_query, vis_query, vis_params)
result = {'success': True}
headers = {'ContentType': 'application/json'}
return make_response(jsonify(result),
200,
headers)
except Exception as e:
result = {'success': False, 'reason': str(e)}
headers = {'ContentType': 'application/json'}
return make_response(jsonify(result),
500,
headers)
@persistence_ns.route('/writeRevision/<database>')
class writeRevision(Resource):
@persistence_ns.produces(["application/json"])
def post(self, database):
try:
payload = request.get_json()
persistence_ns.logger.debug("writeRevision")
vis_id = payload.get("vis_id")
data = payload.get("data")
# persistence_ns.logger.debug(data)
write_revision(database, vis_id, data)
result = {'success': True}
headers = {'ContentType': 'application/json'}
return make_response(jsonify(result), 200, headers)
except Exception as e:
result = {'success': False, 'reason': str(e)}
headers = {'ContentType': 'application/json'}
return make_response(jsonify(result), 500, headers)
@persistence_ns.route('/getLastVersion/<database>')
class getLastVersion(Resource):
"""
Is actually a call to getRevision but taking the latest one
params: vis_id, details(false), context(false)
"""
def post(self, database):
try:
payload = request.get_json()
persistence_ns.logger.debug("getLastVersion")
persistence_ns.logger.debug(payload)
vis_id = payload.get('vis_id')
details = payload.get('details')
context = payload.get('context')
result = get_last_version(database, vis_id, details, context)
headers = {'ContentType': 'application/json'}
return make_response(jsonify(result),
200,
headers)
except Exception as e:
result = {'success': False, 'reason': str(e)}
headers = {'ContentType': 'application/json'}
return make_response(jsonify(result),
500,
headers)
@persistence_ns.route('/getRevision/<database>')
class getRevision(Resource):
@persistence_ns.produces(["application/json"])
def post(self, database):
# create response
headers = {}
result = {}
headers["Content-Type"] = "application/json"
return make_response(result, 200, headers)
@persistence_ns.route('/getContext/<database>')
class getContext(Resource):
@persistence_ns.produces(["application/json"])
def post(self, database):
try:
payload = request.get_json()
persistence_ns.logger.debug("getContext")
persistence_ns.logger.debug(payload)
vis_id = payload.get('vis_id')
revision_context = payload.get('revision_context', False)
result = get_context(database, vis_id, revision_context)
persistence_ns.logger.debug(result)
headers = {'ContentType': 'application/json'}
return make_response(jsonify(result),
200,
headers)
except Exception as e:
result = {'success': False, 'reason': str(e)}
headers = {'ContentType': 'application/json'}
return make_response(jsonify(result),
500,
headers)
@persistence_ns.route('/createID/<database>')
class createID(Resource):
@persistence_ns.produces(["application/json"])
def post(self, database):
try:
persistence_ns.logger.debug("createID")
payload = request.get_json()
params = payload.get("params")
param_types = payload.get("param_types")
vis_id = create_vis_id(params, param_types)
persistence_ns.logger.debug(params)
persistence_ns.logger.debug(param_types)
persistence_ns.logger.debug(vis_id)
# create response
headers = {}
result = {"unique_id": vis_id}
headers["Content-Type"] = "application/json"
return make_response(jsonify(result), 200, headers)
except Exception as e:
result = {'success': False, 'reason': str(e)}
headers = {'ContentType': 'application/json'}
return make_response(jsonify(result), 500, headers)
@persistence_ns.route('/service_version')
class ServiceVersion(Resource):
def get(self):
result = {"service_version": os.getenv("SERVICE_VERSION")}
return make_response(result, 200, {"Content-Type": "application/json"})
|
1f8df2bce1fa1751decb732a0bb3b537fe3b83e6
|
afbeee6a3a83946449e5fccf7c74457461ed921f
|
/docs/source/reference/plots/factory/mip_render_mhd_plot.py
|
710534bf8fbc7eb2408040e51faa136aed72fdf3
|
[
"MIT"
] |
permissive
|
K3D-tools/K3D-jupyter
|
d69e541de90835415be5516d3e6758b1fcd530d2
|
5973d30947f6bc80b2a50ba260f198bec57ddfc1
|
refs/heads/main
| 2023-09-01T20:41:01.159202
| 2023-08-26T20:45:56
| 2023-08-26T20:45:56
| 44,377,817
| 859
| 134
|
MIT
| 2023-08-26T20:33:59
| 2015-10-16T10:14:20
|
Python
|
UTF-8
|
Python
| false
| false
| 455
|
py
|
mip_render_mhd_plot.py
|
import k3d
import numpy as np
import os
import SimpleITK as sitk
def generate():
filepath = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'../../assets/factory/heart.mhd')
im_sitk = sitk.ReadImage(filepath)
img = sitk.GetArrayFromImage(im_sitk)
plt_mip = k3d.mip(img.astype(np.float32))
plot = k3d.plot()
plot += plt_mip
plot.snapshot_type = 'inline'
return plot.get_snapshot()
|
994fea31ae20c5ddd819bca583e8362b58028011
|
a902290fb3b911676358ae4d93f83061a6c2bd0f
|
/InvenTree/plugin/base/barcodes/mixins.py
|
5ad4794ebd819a2fe1c4c1478942cff76fa3219f
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
inventree/InvenTree
|
a15e54182c9bfafdf5348cc9a66da1004e23e760
|
e88a8e99a5f0b201c67a95cba097c729f090d5e2
|
refs/heads/master
| 2023-09-03T19:32:35.438375
| 2023-08-30T00:25:40
| 2023-08-30T00:25:40
| 85,894,461
| 3,077
| 549
|
MIT
| 2023-09-14T14:21:01
| 2017-03-23T01:44:10
|
Python
|
UTF-8
|
Python
| false
| false
| 1,051
|
py
|
mixins.py
|
"""Plugin mixin classes for barcode plugin."""
class BarcodeMixin:
"""Mixin that enables barcode handling.
Custom barcode plugins should use and extend this mixin as necessary.
"""
ACTION_NAME = ""
class MixinMeta:
"""Meta options for this mixin."""
MIXIN_NAME = 'Barcode'
def __init__(self):
"""Register mixin."""
super().__init__()
self.add_mixin('barcode', 'has_barcode', __class__)
@property
def has_barcode(self):
"""Does this plugin have everything needed to process a barcode."""
return True
def scan(self, barcode_data):
"""Scan a barcode against this plugin.
This method is explicitly called from the /scan/ API endpoint,
and thus it is expected that any barcode which matches this barcode will return a result.
If this plugin finds a match against the provided barcode, it should return a dict object
with the intended result.
Default return value is None
"""
return None
|
ebce34ba70dfe6bd56c149526c670cf5c66b81d3
|
d01680fe164d915bb3ffd6b10dea1d7cac503630
|
/python-package/lets_plot/plot/theme_set.py
|
8ee1aff803f9e100fe42b0a05608167ded7dd893
|
[
"MIT",
"Apache-2.0",
"LGPL-2.0-or-later",
"BSD-3-Clause",
"LGPL-3.0-only"
] |
permissive
|
JetBrains/lets-plot
|
4ba8edd8910967d5e15d8d0ea1a9cd7a9c50432f
|
af4f6554eb9cc250259a6a6757b5c8d920dde8c4
|
refs/heads/master
| 2023-09-01T04:15:04.414149
| 2023-08-31T16:48:57
| 2023-08-31T16:48:57
| 176,771,727
| 1,264
| 59
|
MIT
| 2023-09-07T12:42:01
| 2019-03-20T16:13:03
|
Kotlin
|
UTF-8
|
Python
| false
| false
| 9,103
|
py
|
theme_set.py
|
#
# Copyright (c) 2019. JetBrains s.r.o.
# Use of this source code is governed by the MIT license that can be found in the LICENSE file.
#
from .core import FeatureSpec
__all__ = [
'theme_grey',
'theme_light',
'theme_classic',
'theme_minimal',
'theme_minimal2',
'theme_none',
'theme_bw',
'theme_void',
'flavor_darcula',
'flavor_solarized_light',
'flavor_solarized_dark',
'flavor_high_contrast_light',
'flavor_high_contrast_dark'
]
def theme_grey():
"""
Grey background and white gridlines.
Returns
-------
`FeatureSpec`
Theme specification.
Examples
--------
.. jupyter-execute::
:linenos:
:emphasize-lines: 7
import numpy as np
from lets_plot import *
LetsPlot.setup_html()
np.random.seed(42)
data = {'x': np.random.normal(size=1000)}
ggplot(data, aes(x='x')) + geom_histogram() + \\
theme_grey()
"""
return FeatureSpec('theme', name="grey")
def theme_light():
"""
Light grey lines of various widths on white background.
Returns
-------
`FeatureSpec`
Theme specification.
Examples
--------
.. jupyter-execute::
:linenos:
:emphasize-lines: 7
import numpy as np
from lets_plot import *
LetsPlot.setup_html()
np.random.seed(42)
data = {'x': np.random.normal(size=1000)}
ggplot(data, aes(x='x')) + geom_histogram() + \\
theme_light()
"""
return FeatureSpec('theme', name="light")
def theme_classic():
"""
Dark grey axes and no gridlines.
Returns
-------
`FeatureSpec`
Theme specification.
Examples
--------
.. jupyter-execute::
:linenos:
:emphasize-lines: 7
import numpy as np
from lets_plot import *
LetsPlot.setup_html()
np.random.seed(42)
data = {'x': np.random.normal(size=1000)}
ggplot(data, aes(x='x')) + geom_histogram() + \\
theme_classic()
"""
return FeatureSpec('theme', name="classic")
def theme_minimal():
"""
A minimalistic theme without axes lines.
Returns
-------
`FeatureSpec`
Theme specification.
Examples
--------
.. jupyter-execute::
:linenos:
:emphasize-lines: 7
import numpy as np
from lets_plot import *
LetsPlot.setup_html()
np.random.seed(42)
data = {'x': np.random.normal(size=1000)}
ggplot(data, aes(x='x')) + geom_histogram() + \\
theme_minimal()
"""
return FeatureSpec('theme', name="minimal")
def theme_minimal2():
"""
Default theme similar to `theme_minimal()` but with x axis line and only major grid lines.
Returns
-------
`FeatureSpec`
Theme specification.
Examples
--------
.. jupyter-execute::
:linenos:
:emphasize-lines: 7
import numpy as np
from lets_plot import *
LetsPlot.setup_html()
np.random.seed(42)
data = {'x': np.random.normal(size=1000)}
ggplot(data, aes(x='x')) + geom_histogram() + \\
theme_minimal2()
"""
return FeatureSpec('theme', name="minimal2")
def theme_none():
"""
Basic settings are applied.
Returns
-------
`FeatureSpec`
Theme specification.
Examples
--------
.. jupyter-execute::
:linenos:
:emphasize-lines: 7
import numpy as np
from lets_plot import *
LetsPlot.setup_html()
np.random.seed(42)
data = {'x': np.random.normal(size=1000)}
ggplot(data, aes(x='x')) + geom_histogram() + \\
theme_none()
"""
return FeatureSpec('theme', name="none")
def theme_bw():
"""
Grey lines on white background with dark grey plot border.
Returns
-------
`FeatureSpec`
Theme specification.
Examples
--------
.. jupyter-execute::
:linenos:
:emphasize-lines: 7
import numpy as np
from lets_plot import *
LetsPlot.setup_html()
np.random.seed(42)
data = {'x': np.random.normal(size=1000)}
ggplot(data, aes(x='x')) + geom_histogram() + \\
theme_bw()
"""
return FeatureSpec('theme', name="bw")
def theme_void():
"""
A completely blank (or "void") background theme: no borders, axes, or gridlines.
Returns
-------
`FeatureSpec`
Theme specification.
Examples
--------
.. jupyter-execute::
:linenos:
:emphasize-lines: 7
import numpy as np
from lets_plot import *
LetsPlot.setup_html()
np.random.seed(42)
data = {'x': np.random.normal(size=1000)}
ggplot(data, aes(x='x')) + geom_histogram() + \\
theme_void()
"""
blank_elems = {'line': 'blank', 'axis': 'blank'}
return theme_classic() + FeatureSpec('theme', name=None, **blank_elems)
def flavor_darcula():
"""
Darcula color scheme.
Returns
-------
`FeatureSpec`
Theme specification.
Examples
--------
.. jupyter-execute::
:linenos:
:emphasize-lines: 11
import numpy as np
from lets_plot import *
LetsPlot.setup_html()
data = {'name': ['pen', 'brush', 'paper'],
'slice': [1, 3, 3]}
ggplot(data) + \\
geom_pie(aes(fill='name', slice='slice'),
stat='identity', color='pen',
tooltips='none', labels=layer_labels().line('@name')) + \\
scale_fill_manual(['pen', 'brush', 'paper']) + \\
flavor_darcula()
"""
return FeatureSpec('theme', name=None, flavor="darcula")
def flavor_solarized_light():
"""
Solarized light color scheme.
Returns
-------
`FeatureSpec`
Theme specification.
Examples
--------
.. jupyter-execute::
:linenos:
:emphasize-lines: 11
import numpy as np
from lets_plot import *
LetsPlot.setup_html()
data = {'name': ['pen', 'brush', 'paper'],
'slice': [1, 3, 3]}
ggplot(data) + \\
geom_pie(aes(fill='name', slice='slice'),
stat='identity', color='pen',
tooltips='none', labels=layer_labels().line('@name')) + \\
scale_fill_manual(['pen', 'brush', 'paper']) + \\
flavor_solarized_light()
"""
return FeatureSpec('theme', name=None, flavor="solarized_light")
def flavor_solarized_dark():
"""
Solarized dark color scheme.
Returns
-------
`FeatureSpec`
Theme specification.
Examples
--------
.. jupyter-execute::
:linenos:
:emphasize-lines: 11
import numpy as np
from lets_plot import *
LetsPlot.setup_html()
data = {'name': ['pen', 'brush', 'paper'],
'slice': [1, 3, 3]}
ggplot(data) + \\
geom_pie(aes(fill='name', slice='slice'),
stat='identity', color='pen',
tooltips='none', labels=layer_labels().line('@name')) + \\
scale_fill_manual(['pen', 'brush', 'paper']) + \\
flavor_solarized_dark()
"""
return FeatureSpec('theme', name=None, flavor="solarized_dark")
def flavor_high_contrast_light():
"""
High contrast light color scheme.
Returns
-------
`FeatureSpec`
Theme specification.
Examples
--------
.. jupyter-execute::
:linenos:
:emphasize-lines: 11
import numpy as np
from lets_plot import *
LetsPlot.setup_html()
data = {'name': ['pen', 'brush', 'paper'],
'slice': [1, 3, 3]}
ggplot(data) + \\
geom_pie(aes(fill='name', slice='slice'),
stat='identity', color='pen',
tooltips='none', labels=layer_labels().line('@name')) + \\
scale_fill_manual(['pen', 'brush', 'paper']) + \\
flavor_high_contrast_light()
"""
return FeatureSpec('theme', name=None, flavor="high_contrast_light")
def flavor_high_contrast_dark():
"""
High contrast dark color scheme.
Returns
-------
`FeatureSpec`
Theme specification.
Examples
--------
.. jupyter-execute::
:linenos:
:emphasize-lines: 11
import numpy as np
from lets_plot import *
LetsPlot.setup_html()
data = {'name': ['pen', 'brush', 'paper'],
'slice': [1, 3, 3]}
ggplot(data) + \\
geom_pie(aes(fill='name', slice='slice'),
stat='identity', color='pen',
tooltips='none', labels=layer_labels().line('@name')) + \\
scale_fill_manual(['pen', 'brush', 'paper']) + \\
flavor_high_contrast_dark()
"""
return FeatureSpec('theme', name=None, flavor="high_contrast_dark")
|
59d5c42720e2daf7bb6c48e8c2d520e54b5ca999
|
3d8b20612a709cc08ae06719324761c1d4094883
|
/selfdrive/car/ford/values.py
|
b8890107fcc52b39d82bd2b780dc0e385219b593
|
[
"MIT"
] |
permissive
|
commaai/chffrplus
|
fee9178a96a5a15857f26d5fa62fd1f19d3a24c3
|
12527e1a686753cfc6215461f474bda9211878a0
|
refs/heads/release2
| 2021-10-24T01:24:44.271176
| 2018-12-18T02:49:54
| 2018-12-18T02:49:54
| 107,477,295
| 138
| 65
|
MIT
| 2021-01-27T13:04:17
| 2017-10-19T00:14:01
|
C
|
UTF-8
|
Python
| false
| false
| 821
|
py
|
values.py
|
from selfdrive.car import dbc_dict
class CAR:
FUSION = "FORD FUSION 2018"
FINGERPRINTS = {
CAR.FUSION: [{
71: 8, 74: 8, 75: 8, 76: 8, 90: 8, 92: 8, 93: 8, 118: 8, 119: 8, 120: 8, 125: 8, 129: 8, 130: 8, 131: 8, 132: 8, 133: 8, 145: 8, 146: 8, 357: 8, 359: 8, 360: 8, 361: 8, 376: 8, 390: 8, 391: 8, 392: 8, 394: 8, 512: 8, 514: 8, 516: 8, 531: 8, 532: 8, 534: 8, 535: 8, 560: 8, 578: 8, 604: 8, 613: 8, 673: 8, 827: 8, 848: 8, 934: 8, 935: 8, 936: 8, 947: 8, 963: 8, 970: 8, 972: 8, 973: 8, 984: 8, 992: 8, 994: 8, 997: 8, 998: 8, 1003: 8, 1034: 8, 1045: 8, 1046: 8, 1053: 8, 1054: 8, 1058: 8, 1059: 8, 1068: 8, 1072: 8, 1073: 8, 1082: 8, 1107: 8, 1108: 8, 1109: 8, 1110: 8, 1200: 8, 1427: 8, 1430: 8, 1438: 8, 1459: 8
}],
}
DBC = {
CAR.FUSION: dbc_dict('ford_fusion_2018_pt', 'ford_fusion_2018_adas'),
}
|
ee86d83fd185faf948cfdeaba364d11e2593d3ab
|
0869d7edac80e8aebe951682a2cc311a083eade3
|
/Python/tdw/FBOutput/AvatarTransformMatrices.py
|
91fc7631a56d70d4cfd0ab41ccc3378cbd066826
|
[
"BSD-2-Clause"
] |
permissive
|
threedworld-mit/tdw
|
7d5b4453832647733ff91ad7a7ce7ec2320454c1
|
9df96fba455b327bb360d8dd5886d8754046c690
|
refs/heads/master
| 2023-09-01T11:45:28.132298
| 2023-08-31T16:13:30
| 2023-08-31T16:13:30
| 245,492,977
| 427
| 75
|
BSD-2-Clause
| 2023-09-14T17:36:12
| 2020-03-06T18:42:09
|
Python
|
UTF-8
|
Python
| false
| false
| 3,893
|
py
|
AvatarTransformMatrices.py
|
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: FBOutput
import tdw.flatbuffers
class AvatarTransformMatrices(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsAvatarTransformMatrices(cls, buf, offset):
n = tdw.flatbuffers.encode.Get(tdw.flatbuffers.packer.uoffset, buf, offset)
x = AvatarTransformMatrices()
x.Init(buf, n + offset)
return x
# AvatarTransformMatrices
def Init(self, buf, pos):
self._tab = tdw.flatbuffers.table.Table(buf, pos)
# AvatarTransformMatrices
def AvatarIds(self, j):
o = tdw.flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
a = self._tab.Vector(o)
return self._tab.String(a + tdw.flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
return ""
# AvatarTransformMatrices
def AvatarIdsLength(self):
o = tdw.flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.VectorLen(o)
return 0
# AvatarTransformMatrices
def AvatarMatrices(self, j):
o = tdw.flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(tdw.flatbuffers.number_types.Float32Flags, a + tdw.flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
return 0
# AvatarTransformMatrices
def AvatarMatricesAsNumpy(self):
o = tdw.flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.GetVectorAsNumpy(tdw.flatbuffers.number_types.Float32Flags, o)
return 0
# AvatarTransformMatrices
def AvatarMatricesLength(self):
o = tdw.flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.VectorLen(o)
return 0
# AvatarTransformMatrices
def SensorContainerMatrices(self, j):
o = tdw.flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(tdw.flatbuffers.number_types.Float32Flags, a + tdw.flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
return 0
# AvatarTransformMatrices
def SensorContainerMatricesAsNumpy(self):
o = tdw.flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.GetVectorAsNumpy(tdw.flatbuffers.number_types.Float32Flags, o)
return 0
# AvatarTransformMatrices
def SensorContainerMatricesLength(self):
o = tdw.flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.VectorLen(o)
return 0
def AvatarTransformMatricesStart(builder): builder.StartObject(3)
def AvatarTransformMatricesAddAvatarIds(builder, avatarIds): builder.PrependUOffsetTRelativeSlot(0, tdw.flatbuffers.number_types.UOffsetTFlags.py_type(avatarIds), 0)
def AvatarTransformMatricesStartAvatarIdsVector(builder, numElems): return builder.StartVector(4, numElems, 4)
def AvatarTransformMatricesAddAvatarMatrices(builder, avatarMatrices): builder.PrependUOffsetTRelativeSlot(1, tdw.flatbuffers.number_types.UOffsetTFlags.py_type(avatarMatrices), 0)
def AvatarTransformMatricesStartAvatarMatricesVector(builder, numElems): return builder.StartVector(4, numElems, 4)
def AvatarTransformMatricesAddSensorContainerMatrices(builder, sensorContainerMatrices): builder.PrependUOffsetTRelativeSlot(2, tdw.flatbuffers.number_types.UOffsetTFlags.py_type(sensorContainerMatrices), 0)
def AvatarTransformMatricesStartSensorContainerMatricesVector(builder, numElems): return builder.StartVector(4, numElems, 4)
def AvatarTransformMatricesEnd(builder): return builder.EndObject()
|
5a50f05c71c772b621237f3db82a23e2c7359255
|
6dfc23ef65e5943712340ef2b4b648cc25ea1fad
|
/2018/03/15/How to Use Django REST Framework Permissions/api_example/api_example/languages/models.py
|
72a92fff417761b2ed0ff888f755334b498a3b37
|
[
"Unlicense"
] |
permissive
|
PrettyPrinted/youtube_video_code
|
6d265c910de18d780cdb99f7ea11b8b963929dc2
|
5654e5feba854d3b41b8dd75218e0221408e7831
|
refs/heads/master
| 2023-09-04T21:28:57.386174
| 2023-08-11T07:07:45
| 2023-08-11T07:07:45
| 186,743,986
| 698
| 2,347
|
Unlicense
| 2022-10-06T04:06:56
| 2019-05-15T03:40:45
|
HTML
|
UTF-8
|
Python
| false
| false
| 517
|
py
|
models.py
|
from django.db import models
class Paradigm(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return self.name
class Language(models.Model):
name = models.CharField(max_length=50)
paradigm = models.ForeignKey(Paradigm, on_delete=models.CASCADE)
def __str__(self):
return self.name
class Programmer(models.Model):
name = models.CharField(max_length=50)
languages = models.ManyToManyField(Language)
def __str__(self):
return self.name
|
a8dbe3ea38de6ad45d4687322666b730ab37c5d4
|
54922b19f91e172ae7f2f0de7eb8baa4607dd64c
|
/pylatexenc/latexencode/_unicode_to_latex_encoder.py
|
d11da68a6a76b4986c4f0e8bd0c587b7c5dcee7e
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
phfaist/pylatexenc
|
3c75afa0d31e676276f61821cd5ba51822eaced0
|
1d36361c58db305e3e3edf2f9ed17344047fdcca
|
refs/heads/main
| 2023-09-04T04:48:40.626633
| 2023-08-29T14:54:36
| 2023-08-29T14:54:36
| 20,764,371
| 227
| 36
|
MIT
| 2023-07-27T10:27:41
| 2014-06-12T10:57:49
|
Python
|
UTF-8
|
Python
| false
| false
| 20,894
|
py
|
_unicode_to_latex_encoder.py
|
# -*- coding: utf-8 -*-
#
# The MIT License (MIT)
#
# Copyright (c) 2021 Philippe Faist
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
from __future__ import print_function, absolute_import, unicode_literals
import logging
import functools
import itertools
import unicodedata
# "defaults" e.g. for Transcrypt:
def unicode_str(s=None):
if s is None:
return ''
return str(s)
basestring_cls = str
#__pragma__('ecom')
#__pragma__('skip')
import sys
if sys.version_info.major > 2:
unicode_str = str # need to support unicode() w/ no arguments
basestring_cls = str
# use MappingProxyType for keeping
from types import MappingProxyType as _MappingProxyType
else:
unicode_str = unicode
basestring_cls = basestring
_MappingProxyType = dict
#__pragma__('noskip')
logger = logging.getLogger(__name__)
### BEGINPATCH_LATEXENCODE_CALLABLE_ACCEPTS_U2LOBJ_ARG
import sys
if sys.version_info.major > 2:
from inspect import getfullargspec
else:
from inspect import getargspec as getfullargspec
def _callable_accepts_u2lobj_arg(fn):
return ('u2lobj' in getfullargspec(fn)[0])
### ENDPATCH_LATEXENCODE_CALLABLE_ACCEPTS_U2LOBJ_ARG
# Apparently, Transcrypt does not support hexadecimal formatting, neither
# through a string format like '{:x}'.format(...), nor f'{...:x}', neither via
# hex(...) ... so we provide our own JS implementation.
# Feed some raw JS to transcrypt directly
#__pragma__('ecom')
"""?
__pragma__('js', 'var HexstrN = (v, N=4) => (+v).toString(16).toUpperCase().padStart(N, "0")');
?"""
#__pragma__('skip')
def HexstrN(value, N=4):
return ('%X'%(value)).zfill(N)
#__pragma__('noskip')
## Transcrypt currently does not provide an implementation of m.expand() or
## rx.match(..., pos=) ... :/
#__pragma__('skip')
def re_match_expand(m, repl):
return m.expand(repl)
def regex_match_pos(rx, s, pos):
# CAREFUL !! m.start() and m.end() will differ on Transcrypt because we
# slice the string. Only rely on the difference m.start()-m.end() !!
return rx.match(s, pos)
#__pragma__('noskip')
"""?
__pragma__('js', '''
var re_match_expand = (m, repl) => repl.replace(/\\\\((\\d)|g<(\\w+)>)/g, (esc, x, digit, gname) => m.group((digit != null) ? parseInt(digit) : gname)) ;
var regex_match_pos = (rx, s, pos) => rx.match(s.slice(pos)) ;
''');
?"""
### BEGIN_PYLATEXENC_GET_DEFAULT_SPECS_FN
from .get_builtin_rules import (
get_builtin_uni2latex_dict, get_builtin_conversion_rules
)
### END_PYLATEXENC_GET_DEFAULT_SPECS_FN
from ._rule import (
RULE_DICT,
RULE_REGEX,
RULE_CALLABLE,
UnicodeToLatexConversionRule,
)
class UnicodeToLatexEncoder(object):
r"""
Encode a string with unicode characters into a LaTeX snippet.
The following general attributes can be specified as keyword arguments to
the constructor. Note: These attributes must be specified to the
constructor and may NOT be subsequently modified. This is because in the
constructor we pre-compile some rules and flags to optimize calls to
:py:meth:`unicode_to_text()`.
.. py:attribute:: non_ascii_only
Whether we should convert only non-ascii characters into LaTeX sequences,
or also all known ascii characters with special LaTeX meaning such as
'\\\\', '$', '&', etc.
If `non_ascii_only` is set to `True` (the default is `False`), then
conversion rules are not applied at positions in the string where an
ASCII character is encountered.
.. py:attribute:: conversion_rules
The conversion rules, specified as a list of
:py:class:`UnicodeToLatexConversionRule` objects. For each position in
the string, the rules will be applied in the given sequence until a
replacement string is found.
Instead of a :py:class:`UnicodeToLatexConversionRule` object you may also
specify a string specifying a built-in rule (e.g., 'defaults'), which
will be expanded to the corresponding rules according to
:py:func:`get_builtin_conversion_rules()`.
If you specify your own list of rules using this argument, you will
probably want to include presumably at the end of your list the element
'defaults' to include all built-in default conversion rules. To override
built-in rules, simply add your custom rules earlier in the list.
Example::
conversion_rules = [
# our custom rules
UnicodeToLatexConversionRule(RULE_REGEX, [
# double \\ needed, see UnicodeToLatexConversionRule
( re.compile(r'...'), r'\\ldots' ),
( re.compile(r'î'), r'\\^i' ),
]),
# plus all the default rules
'defaults'
]
u = UnicodeToLatexEncoder(conversion_rules=conversion_rules)
.. py:attribute:: replacement_latex_protection
How to "protect" LaTeX replacement text that looks like it could be
interpreted differently if concatenated to arbitrary strings before and
after.
Currently in the default scheme only one situation is recognized: if the
replacement string ends with a latex macro invocation with a non-symbol
macro name, e.g. ``\textemdash`` or ``\^\i``. Indeed, if we naively
replace these texts in an arbitrary string (like ``maître``), we might
get an invalid macro invocation (like ``ma\^\itre`` which causes un known
macro name ``\itre``).
Possible protection schemes are:
- 'braces' (the default): Any suspicious replacement text (that
might look fragile) is placed in curly braces ``{...}``.
- 'braces-all': All replacement latex escapes are surrounded in
protective curly braces ``{...}``, regardless of whether or not they
might be deemed "fragile" or "unsafe".
- 'braces-almost-all': Almost all replacement latex escapes are
surrounded in protective curly braces ``{...}``. This option
emulates closely the behavior of `brackets=True` of the function
`utf8tolatex()` in `pylatexenc 1.x`, though I'm not sure it is really
useful. [Specifically, all those replacement strings that start with
a backslash are surrounded by curly braces].
- 'braces-after-macro': In the situation where the replacement latex
code ends with a string-named macro, then a pair of empty braces is
added at the end of the replacement text to protect the macro.
- 'none': No protection is applied, even in "unsafe" cases. This is
not recommended, as this will likely result in invalid LaTeX
code. (Note this is the string 'none', not Python's built-in `None`.)
- any callable object: The callable should take a single argument, the
replacement latex string associated with a piece of the input (maybe
a special character) that has been encoded; it should return the
actual string to append to the output string.
.. versionadded:: 2.10
You can specify a callable object to `replacement_latex_protection`
since `pylatexenc 2.10`.
.. py:attribute:: unknown_char_policy
What to do when a non-ascii character is encountered without any known
substitution macro. The attribute `unknown_char_policy` can be set to one of:
- 'keep': keep the character as is;
- 'replace': replace the character by a boldface question mark;
- 'ignore': ignore the character from the input entirely and don't
output anything for it;
- 'fail': raise a `ValueError` exception;
- 'unihex': output the unicode hexadecimal code (U+XXXX) of the
character in typewriter font;
- a Python callable --- will be called with argument the character that
could not be encoded. (If the callable accepts a second argument
called 'u2lobj', then the `UnicodeToLatexEncoder` instance is
provided to that argument.) The return value of the callable is used
as LaTeX replacement code.
.. py:attribute:: unknown_char_warning
In addition to the `unknown_char_policy`, this attribute indicates
whether or not (`True` or `False`) one should generate a warning when a
nonascii character without any known latex representation is
encountered. (Default: True)
.. py:attribute:: latex_string_class
The return type of :py:meth:`unicode_to_latex()`. Normally this is a
simple unicode string (`str` on `Python 3` or `unicode` on `Python 2`).
But you can specify your custom string type via the `latex_string_class`
argument. The `latex_string_class` will be invoked with no arguments to
construct an empty object (so `latex_string_class` can be either an
object that can be constructed with no arguments or it can be a function
with no arguments that return a fresh object instance). The object must
support the operation "+=", i.e., you should overload the ``__iadd__()``
method.
For instance, you can record the chunks that would have been appended
into a single string as follows::
class LatexChunkList:
def __init__(self):
self.chunks = []
def __iadd__(self, s):
self.chunks.append(s)
return self
u = UnicodeToLatexEncoder(latex_string_class=LatexChunkList,
replacement_latex_protection='none')
result = u.unicode_to_latex("é → α")
# result.chunks == [ r"\'e", ' ', r'\textrightarrow', ' ',
# r'\ensuremath{\alpha}' ]
.. warning::
None of the above attributes should be modified after constructing the
object. The values specified to the class constructor are final and
cannot be changed. [Indeed, the class constructor "compiles" these
attribute values into a data structure that makes
:py:meth:`unicode_to_text()` slightly more efficient.]
.. versionadded:: 2.0
This class was introduced in `pylatexenc 2.0`.
"""
def __init__(self, **kwargs):
self.non_ascii_only = kwargs.pop('non_ascii_only', False)
self.conversion_rules = kwargs.pop('conversion_rules', ['defaults'])
self.replacement_latex_protection = kwargs.pop('replacement_latex_protection', 'braces')
self.unknown_char_policy = kwargs.pop('unknown_char_policy', 'keep')
self.unknown_char_warning = kwargs.pop('unknown_char_warning', True)
self.latex_string_class = kwargs.pop('latex_string_class', unicode_str)
if len(kwargs):
logger.warning("Ignoring unknown keyword arguments: %s", ",".join(kwargs.keys()))
super(UnicodeToLatexEncoder, self).__init__(**kwargs)
# build generator that expands built-in conversion rules
expanded_conversion_rules = []
# = itertools.chain.from_iterable([
# (get_builtin_conversion_rules(r) if isinstance(r, basestring_cls) else [ r ])
# for r in self.conversion_rules
# ])
for r in self.conversion_rules:
### BEGIN_PYLATEXENC_GET_DEFAULT_SPECS_FN
if isinstance(r, basestring_cls):
expanded_conversion_rules.extend( get_builtin_conversion_rules(r) )
continue
### END_PYLATEXENC_GET_DEFAULT_SPECS_FN
if isinstance(r, basestring_cls):
logger.warning(
"The pylatexenc.latexencode module was preprocessed and/or transpiled "
"without support for implicit/string-specified default rules. Please "
"import the pylatexenc.latexencode.get_builtin_rules module and "
"specify the relevant rules manually."
)
continue
expanded_conversion_rules.append( r )
#
# now "pre-compile" some stuff so that calls to unicode_to_latex() can
# hopefully execute faster
#
# "pre-compile" rules and check rule types:
self._compiled_rules = []
for rule in expanded_conversion_rules:
if rule.rule_type == RULE_DICT:
self._compiled_rules.append(
functools.partial(self._apply_rule_dict, rule.rule, rule)
)
elif rule.rule_type == RULE_REGEX:
self._compiled_rules.append(
functools.partial(self._apply_rule_regex, rule.rule, rule)
)
elif rule.rule_type == RULE_CALLABLE:
thecallable = rule.rule
if _callable_accepts_u2lobj_arg(thecallable):
#if 'u2lobj' in getfullargspec(thecallable)[0]:
thecallable = functools.partial(rule.rule, u2lobj=self)
self._compiled_rules.append(
functools.partial(self._apply_rule_callable, thecallable, rule)
)
else:
raise TypeError("Invalid rule type: {}".format(rule.rule_type))
# bad char policy:
if isinstance(self.unknown_char_policy, basestring_cls):
self._do_unknown_char = self._get_method_fn(
'do_unknown_char',
self.unknown_char_policy,
what='unknown_char_policy'
)
elif callable(self.unknown_char_policy):
fn = self.unknown_char_policy
if _callable_accepts_u2lobj_arg(fn):
#if 'u2lobj' in getfullargspec(fn)[0]:
self._do_unknown_char = functools.partial(self.unknown_char_policy, u2lobj=self)
else:
self._do_unknown_char = self.unknown_char_policy
else:
raise TypeError("Invalid argument for unknown_char_policy: {}"
.format(repr(self.unknown_char_policy)))
# bad char warning:
if not self.unknown_char_warning:
self._do_warn_unknown_char = lambda ch: None # replace method by no-op
else:
self._do_warn_unknown_char = self._do_warn_unknown_char_defaultimpl
# set a method that will skip ascii characters if required:
if self.non_ascii_only:
self._maybe_skip_ascii = self._check_do_skip_ascii
else:
self._maybe_skip_ascii = lambda s, p: False
# set a method to protect replacement latex code, if necessary:
self._apply_protection = self._get_replacement_latex_fn(
self.replacement_latex_protection
)
def _get_method_fn(self, base, name, what):
selfmethname = '_' + base + '_' + name.replace('-', '_')
if not hasattr(self, selfmethname):
raise ValueError("Invalid {}: {}".format(what, name))
return getattr(self, selfmethname)
def _get_replacement_latex_fn(self, replacement_latex_protection):
if callable(replacement_latex_protection):
return replacement_latex_protection
return self._get_method_fn(
'apply_protection',
replacement_latex_protection,
what='replacement_latex_protection'
)
def unicode_to_latex(self, s):
"""
Convert unicode characters in the string `s` into latex escape sequences,
according to the rules and options given to the constructor.
"""
s = unicode_str(s) # make sure s is unicode
s = unicodedata.normalize('NFC', s)
class _NS: pass
p = _NS()
p.latex = self.latex_string_class()
p.pos = 0
while p.pos < len(s):
if self._maybe_skip_ascii(s, p):
continue
for compiledrule in self._compiled_rules:
if compiledrule(s, p):
break
else:
# for-else, see
# https://docs.python.org/2/tutorial/controlflow.html\
# #break-and-continue-statements-and-else-clauses-on-loops
ch = s[p.pos]
o = ord(ch)
if (o >= 32 and o <= 127) or (ch in "\n\r\t"):
p.latex += ch
p.pos += 1
else:
self._do_warn_unknown_char(ch)
p.latex += self._do_unknown_char(ch)
p.pos += 1
return p.latex
def _check_do_skip_ascii(self, s, p):
if ord(s[p.pos]) < 127:
# skip, we only want to convert non-ascii chars
p.latex += s[p.pos]
p.pos += 1
return True
return False
def _apply_rule_dict(self, ruledict, rule, s, p):
o = ord(s[p.pos])
if o in ruledict:
self._apply_replacement(p, ruledict[o], 1, rule)
return True
return None
def _apply_rule_regex(self, ruleregexes, rule, s, p):
for regex, repl in ruleregexes:
m = regex_match_pos(regex, s, p.pos)
if m is not None:
if callable(repl):
replstr = repl(m)
else:
replstr = re_match_expand(m, repl)
self._apply_replacement(p, replstr, m.end() - m.start(), rule)
return True
return None
def _apply_rule_callable(self, rulecallable, rule, s, p):
res = rulecallable(s, p.pos)
if res is None:
return None
(consumed, repl) = res
self._apply_replacement(p, repl, consumed, rule)
return True
def _apply_replacement(self, p, repl, numchars, ruleobj):
# check for possible replacement latex protection, like braces.
protect_fn = self._apply_protection
# maybe the rule object has overridden the replacement_latex_protection to use.
if ruleobj.replacement_latex_protection is not None:
protect_fn = self._get_replacement_latex_fn(
ruleobj.replacement_latex_protection
)
repl = protect_fn(repl)
p.latex += repl
p.pos += numchars
def _apply_protection_none(self, repl):
# no protection
return repl
def _apply_protection_braces(self, repl):
k = repl.rfind('\\')
if k >= 0 and repl[k+1:].isalpha():
# has dangling named macro, apply protection.
return '{' + repl + '}'
return repl
def _apply_protection_braces_almost_all(self, repl):
if repl[0:1] == '\\':
return '{' + repl + '}'
return repl
def _apply_protection_braces_all(self, repl):
return '{' + repl + '}'
def _apply_protection_braces_after_macro(self, repl):
k = repl.rfind('\\')
if k >= 0 and repl[k+1:].isalpha():
# has dangling named macro, apply protection.
return repl + '{}'
return repl
# policies for "bad chars":
def _do_unknown_char_keep(self, ch):
return ch
def _do_unknown_char_replace(self, ch):
return r'{\bfseries ?}'
def _do_unknown_char_ignore(self, ch):
return ''
def _do_unknown_char_fail(self, ch):
raise ValueError(
"No known latex representation for character: U+{} - ‘{}’"
.format(HexstrN(ord(ch), 4), ch)
)
def _do_unknown_char_unihex(self, ch):
return (
r'\ensuremath{\langle}\texttt{U+' + HexstrN(ord(ch), 4)
+ r'}\ensuremath{\rangle}'
)
def _do_warn_unknown_char_defaultimpl(self, ch):
logger.warning(
"No known latex representation for character: U+{} - ‘{}’"
.format(HexstrN(ord(ch), 4), ch)
)
|
4ef0eb2ef68e5454c5b17c108af865d4580e3508
|
38fff7bdefd8d62a740d51329b50d0e1e49258bb
|
/projects/aiohttp/fuzz_payload_url.py
|
8d4fd3d9f95fcf8c1df76d747690a2a6f787b585
|
[
"Apache-2.0"
] |
permissive
|
google/oss-fuzz
|
026384c2ada61ef68b147548e830f60730c5e738
|
f0275421f84b8f80ee767fb9230134ac97cb687b
|
refs/heads/master
| 2023-08-31T23:30:28.157702
| 2023-08-31T21:49:30
| 2023-08-31T21:49:30
| 63,809,205
| 9,438
| 2,315
|
Apache-2.0
| 2023-09-14T20:32:19
| 2016-07-20T19:39:50
|
Shell
|
UTF-8
|
Python
| false
| false
| 1,200
|
py
|
fuzz_payload_url.py
|
#!/usr/bin/python3
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import atheris
# aiohttp specific
with atheris.instrument_imports():
from aiohttp import http_exceptions, payload
from yarl import URL
@atheris.instrument_func
def TestOneInput(data):
fdp = atheris.FuzzedDataProvider(data)
original = fdp.ConsumeString(sys.maxsize)
try:
p = payload.StringPayload(original)
except UnicodeEncodeError:
None
try:
u = URL(original)
except ValueError:
None
def main():
atheris.Setup(sys.argv, TestOneInput, enable_python_coverage=True)
atheris.Fuzz()
if __name__ == "__main__":
main()
|
82a7f78ace99895da24efa5bf2a5618770dee4ec
|
88c4d5b462998a9c0411a0243ab95ee05ffee60a
|
/src/graphql/type/assert_name.py
|
d1fe8dd6e02aeacbba7be1d7aab95b5e8e5d3ce5
|
[
"MIT"
] |
permissive
|
graphql-python/graphql-core
|
606f3f3d479d576a4bdcd7d7995c0fddc486282f
|
0c93b8452eed38d4f800c7e71cf6f3f3758cd1c6
|
refs/heads/main
| 2023-09-04T09:22:45.162575
| 2023-06-09T22:13:10
| 2023-06-09T22:13:10
| 143,207,933
| 259
| 101
|
MIT
| 2023-06-09T22:13:11
| 2018-08-01T20:57:07
|
Python
|
UTF-8
|
Python
| false
| false
| 1,053
|
py
|
assert_name.py
|
from ..error import GraphQLError
from ..language.character_classes import is_name_continue, is_name_start
__all__ = ["assert_name", "assert_enum_value_name"]
def assert_name(name: str) -> str:
"""Uphold the spec rules about naming."""
if name is None:
raise TypeError("Must provide name.")
if not isinstance(name, str):
raise TypeError("Expected name to be a string.")
if not name:
raise GraphQLError("Expected name to be a non-empty string.")
if not all(is_name_continue(char) for char in name[1:]):
raise GraphQLError(
f"Names must only contain [_a-zA-Z0-9] but {name!r} does not."
)
if not is_name_start(name[0]):
raise GraphQLError(f"Names must start with [_a-zA-Z] but {name!r} does not.")
return name
def assert_enum_value_name(name: str) -> str:
"""Uphold the spec rules about naming enum values."""
assert_name(name)
if name in {"true", "false", "null"}:
raise GraphQLError(f"Enum values cannot be named: {name}.")
return name
|
87361e21d758da4a085f82dac219bbc13ff6fb89
|
e22fd36933c9114a9df1694e7a6274bf059de2a6
|
/selfdrive/debug/read_dtc_status.py
|
9ad5563975f70ef5488edacbae29bdb657758bb6
|
[
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] |
permissive
|
commaai/openpilot
|
66dfb7f31290bc8f58c9ead95d56697a52b45afb
|
a0b49d54222c52ff0112c402bc0e0d9262e77a66
|
refs/heads/master
| 2023-09-05T21:34:14.076796
| 2023-09-05T21:15:18
| 2023-09-05T21:15:18
| 74,627,617
| 46,071
| 9,878
|
MIT
| 2023-09-14T21:51:23
| 2016-11-24T01:33:30
|
Python
|
UTF-8
|
Python
| false
| false
| 1,435
|
py
|
read_dtc_status.py
|
#!/usr/bin/env python3
import sys
import argparse
from subprocess import check_output, CalledProcessError
from panda import Panda
from panda.python.uds import UdsClient, SESSION_TYPE, DTC_REPORT_TYPE, DTC_STATUS_MASK_TYPE
from panda.python.uds import get_dtc_num_as_str, get_dtc_status_names
parser = argparse.ArgumentParser(description="read DTC status")
parser.add_argument("addr", type=lambda x: int(x,0))
parser.add_argument("--bus", type=int, default=0)
parser.add_argument('--debug', action='store_true')
args = parser.parse_args()
try:
check_output(["pidof", "boardd"])
print("boardd is running, please kill openpilot before running this script! (aborted)")
sys.exit(1)
except CalledProcessError as e:
if e.returncode != 1: # 1 == no process found (boardd not running)
raise e
panda = Panda()
panda.set_safety_mode(Panda.SAFETY_ELM327)
uds_client = UdsClient(panda, args.addr, bus=args.bus, debug=args.debug)
print("extended diagnostic session ...")
uds_client.diagnostic_session_control(SESSION_TYPE.EXTENDED_DIAGNOSTIC)
print("read diagnostic codes ...")
data = uds_client.read_dtc_information(DTC_REPORT_TYPE.DTC_BY_STATUS_MASK, DTC_STATUS_MASK_TYPE.ALL)
print("status availability:", " ".join(get_dtc_status_names(data[0])))
print("DTC status:")
for i in range(1, len(data), 4):
dtc_num = get_dtc_num_as_str(data[i:i+3])
dtc_status = " ".join(get_dtc_status_names(data[i+3]))
print(dtc_num, dtc_status)
|
5a14460ee9751d80b2d0723ef223cf8e46e2bcf6
|
967968e56ec17a2ee641af84cfca669c1d16a6f1
|
/tests/ad/ldap_configuration/test_ldap_configuration_schema.py
|
27e0f84bebc3cbcf8600ef207a01664827b8af53
|
[
"MIT"
] |
permissive
|
tenable/pyTenable
|
72108c2564682e65cba181ded6ef6a9c990ef004
|
4e31049891f55016168b14ae30d332a965523640
|
refs/heads/master
| 2023-08-30T23:26:33.161062
| 2023-08-08T04:39:04
| 2023-08-08T04:39:04
| 114,689,090
| 300
| 211
|
MIT
| 2023-08-08T04:39:05
| 2017-12-18T21:23:01
|
Python
|
UTF-8
|
Python
| false
| false
| 1,885
|
py
|
test_ldap_configuration_schema.py
|
'''
Testing the ldap configuration schemas
'''
import pytest
from tenable.ad.ldap_configuration.schema import LDAPConfigurationSchema
@pytest.fixture()
def ldap_configuration_schema():
return {
'enabled': True,
'url': 'customer.tenable.ad',
'search_user_dn': 'customer.tenable.ad',
'search_user_password': None,
'user_search_base': 'tenable.ad',
'user_search_filter': '.ad',
'allowed_groups': [{
'name': 'groups name',
'default_role_ids': [1, 2],
'default_profile_id': 1
}]
}
def test_ldap_configuration_schema(ldap_configuration_schema):
'''
test ldap configuration schema
'''
test_resp = {
'enabled': True,
'url': 'customer.tenable.ad',
'searchUserDN': 'customer.tenable.ad',
'searchUserPassword': None,
'userSearchBase': 'tenable.ad',
'userSearchFilter': '.ad',
'allowedGroups': [{
'name': 'groups name',
'defaultRoleIds': [1, 2],
'defaultProfileId': 1
}]
}
schema = LDAPConfigurationSchema()
req = schema.dump(schema.load(ldap_configuration_schema))
assert test_resp['enabled'] == req['enabled']
assert test_resp['url'] == req['url']
assert test_resp['searchUserDN'] == req['searchUserDN']
assert test_resp['searchUserPassword'] == req['searchUserPassword']
assert test_resp['userSearchBase'] == req['userSearchBase']
assert test_resp['userSearchFilter'] == req['userSearchFilter']
assert test_resp['allowedGroups'][0]['name'] == \
req['allowedGroups'][0]['name']
assert test_resp['allowedGroups'][0]['defaultRoleIds'] == \
req['allowedGroups'][0]['defaultRoleIds']
assert test_resp['allowedGroups'][0]['defaultProfileId'] == \
req['allowedGroups'][0]['defaultProfileId']
|
08b7935f4bdd20dc266e219d3fbb302985601ec4
|
21be7833b4935fb4a8f39b816fe868d6cda78b07
|
/termius/core/utils.py
|
53c202de0977d774cebedf8a3bd88508e6bdb532
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
termius/termius-cli
|
dd45295dada12ee5dbd7a864e6fcf634dfbd02a3
|
2664d0c70d3d682ad931b885b4965447b156c280
|
refs/heads/master
| 2023-09-05T18:53:04.812354
| 2021-04-05T04:10:56
| 2021-04-05T04:10:56
| 10,905,793
| 262
| 41
|
NOASSERTION
| 2023-03-30T21:40:42
| 2013-06-24T11:00:03
|
Python
|
UTF-8
|
Python
| false
| false
| 824
|
py
|
utils.py
|
# -*- coding: utf-8 -*-
"""Miscellaneous extra functions."""
from six import PY2, PY3
if PY2:
p_input = raw_input
p_map = map
def to_bytes(s):
if isinstance(s, str):
return s
if isinstance(s, unicode):
return s.encode('utf-8')
to_str = to_bytes
def bchr(s):
return chr(s)
def bord(s):
return ord(s)
elif PY3:
p_input = input
p_map = lambda f, it: list(map(f, it))
def to_bytes(s):
if isinstance(s, bytes):
return s
if isinstance(s, str):
return s.encode('utf-8')
def to_str(s):
if isinstance(s, bytes):
return s.decode('utf-8')
if isinstance(s, str):
return s
def bchr(s):
return bytes([s])
def bord(s):
return s
|
60e7810594ddffce4a1ffaf2ab57af80a5dce42b
|
7c3bace625eb6ece1b06326940b8e89ba3fdc68f
|
/airtest/core/device.py
|
56245c873c82515eebdda317fe1aae3f6af5f35a
|
[
"Apache-2.0"
] |
permissive
|
AirtestProject/Airtest
|
64c218a54e6a28cba42af0ffe8a81d30703ffcca
|
bf49dfad0be05125df75c64ea47a282132bc03d5
|
refs/heads/master
| 2023-08-31T05:31:32.059552
| 2023-08-22T06:23:36
| 2023-08-22T06:23:36
| 118,709,540
| 7,580
| 1,256
|
Apache-2.0
| 2023-08-22T06:23:37
| 2018-01-24T04:00:35
|
Python
|
UTF-8
|
Python
| false
| false
| 2,000
|
py
|
device.py
|
# encoding=utf-8
from six import with_metaclass
class MetaDevice(type):
REGISTRY = {}
def __new__(meta, name, bases, class_dict):
cls = type.__new__(meta, name, bases, class_dict)
meta.REGISTRY[name] = cls
return cls
class Device(with_metaclass(MetaDevice, object)):
"""base class for test device"""
def __init__(self):
super(Device, self).__init__()
@property
def uuid(self):
self._raise_not_implemented_error()
def shell(self, *args, **kwargs):
self._raise_not_implemented_error()
def snapshot(self, *args, **kwargs):
self._raise_not_implemented_error()
def touch(self, target, **kwargs):
self._raise_not_implemented_error()
def double_click(self, target):
raise NotImplementedError
def swipe(self, t1, t2, **kwargs):
self._raise_not_implemented_error()
def keyevent(self, key, **kwargs):
self._raise_not_implemented_error()
def text(self, text, enter=True):
self._raise_not_implemented_error()
def start_app(self, package, **kwargs):
self._raise_not_implemented_error()
def stop_app(self, package):
self._raise_not_implemented_error()
def clear_app(self, package):
self._raise_not_implemented_error()
def list_app(self, **kwargs):
self._raise_not_implemented_error()
def install_app(self, uri, **kwargs):
self._raise_not_implemented_error()
def uninstall_app(self, package):
self._raise_not_implemented_error()
def get_current_resolution(self):
self._raise_not_implemented_error()
def get_render_resolution(self):
self._raise_not_implemented_error()
def get_ip_address(self):
self._raise_not_implemented_error()
def _raise_not_implemented_error(self):
platform = self.__class__.__name__
raise NotImplementedError("Method not implemented on %s" % platform)
def disconnect(self):
pass
|
386b699634e3ececb1623ccf0170ef583d3dcfd4
|
58cc3f8a833ca18a7bec1048d00d38f17fa72cd8
|
/research/frombase2.py
|
b81cb9b55aaf947fb5dfa4e11e47ad1aa70ef056
|
[
"BSD-3-Clause"
] |
permissive
|
dropbox/rust-brotli
|
b3e46c619f9e4394416ad465e7219e0ff33515d6
|
e216a8a8ec15b875ffe76c06e0a3bf562af67c20
|
refs/heads/master
| 2023-09-02T01:21:50.846998
| 2022-10-28T06:37:01
| 2023-01-16T00:36:19
| 56,421,556
| 765
| 93
|
BSD-3-Clause
| 2023-08-27T19:11:54
| 2016-04-17T06:01:45
|
Rust
|
UTF-8
|
Python
| false
| false
| 375
|
py
|
frombase2.py
|
import sys
result = []
cur_count = 0
cur_val = 0
for byte in sys.stdin.read():
if byte == '1':
cur_val |= (1<<cur_count)
elif byte != '0':
break
cur_count += 1
if cur_count == 8:
result.append(chr(cur_val))
cur_val = 0
cur_count = 0
if cur_count != 0:
result.append(chr(cur_val))
sys.stdout.write(''.join(result))
|
92bbc5a684ae9b87f4642502d2ab859125e95887
|
9258c2915fafe684c9bb6fb09e27bca9e8a5ffcd
|
/tests/test_strrange.py
|
eab8ca5465d16861149932fc4a6a8f7aeacdeff1
|
[
"MIT"
] |
permissive
|
runfalk/spans
|
0b876c9c29ddb8b7580192e32fa21f537f4d6aa4
|
1584ec3d997eca924e37c282cba9c67cb7e0c9df
|
refs/heads/master
| 2022-12-24T17:53:14.411842
| 2022-06-19T10:56:08
| 2022-06-19T11:01:32
| 12,484,739
| 127
| 12
|
MIT
| 2022-12-09T06:52:51
| 2013-08-30T11:16:12
|
Python
|
UTF-8
|
Python
| false
| false
| 616
|
py
|
test_strrange.py
|
import sys
import pytest
from spans import strrange
@pytest.mark.parametrize(
"span, last",
[
(strrange("a", "c"), "b"),
(strrange("aa", "cc"), "cb"),
],
)
def test_last(span, last):
assert span.last == last
@pytest.mark.parametrize(
"a, b",
[
("", ""),
("b", "a"),
(chr(0), chr(sys.maxunicode)),
],
)
def test_prev(a, b):
assert strrange.prev(a) == b
@pytest.mark.parametrize(
"a, b",
[
("", ""),
("a", "b"),
(chr(sys.maxunicode), chr(0)),
],
)
def test_next(a, b):
assert strrange.next(a) == b
|
84b9bcce248cdcf6200b576700c71ffd09f869b0
|
e7727fbb00c73818de931a584af58657eeb69513
|
/dymos/examples/racecar/accelerationODE.py
|
1aeda9a9bc635c44f182bd35bbe541d601c5e87f
|
[
"Apache-2.0"
] |
permissive
|
OpenMDAO/dymos
|
f3bb20ebccf5f9408295403c72cb1b25c8e48019
|
1aca42a4fedcca6e493c5516fadc2d78e4709797
|
refs/heads/master
| 2023-08-18T15:30:20.664129
| 2023-08-09T12:33:12
| 2023-08-09T12:33:12
| 121,387,262
| 165
| 56
|
Apache-2.0
| 2023-09-14T17:51:14
| 2018-02-13T13:44:34
|
Python
|
UTF-8
|
Python
| false
| false
| 3,523
|
py
|
accelerationODE.py
|
import openmdao.api as om
import numpy as np
class AccelerationODE(om.ExplicitComponent):
def initialize(self):
self.options.declare('num_nodes', types=int)
def setup(self):
nn = self.options['num_nodes']
# constants
self.add_input('tau_y', val=0.2, desc='lateral load transfer time constant', units='s')
self.add_input('tau_x', val=0.2, desc='longitudinal load transfer time constant', units='s')
# states
self.add_input('V', val=np.zeros(nn), desc='speed', units='m/s')
self.add_input('lambda', val=np.zeros(nn), desc='body slip angle', units='rad')
self.add_input('omega', val=np.zeros(nn), desc='yaw rate', units='rad/s')
self.add_input('Vdot', val=np.zeros(nn), desc='speed', units='m/s**2')
self.add_input('lambdadot', val=np.zeros(nn), desc='body slip angle', units='rad/s')
self.add_input('ax', val=np.zeros(nn), desc='longitudinal acceleration', units='m/s**2')
self.add_input('ay', val=np.zeros(nn), desc='lateral acceleration', units='m/s**2')
# outputs
self.add_output('axdot', val=np.zeros(nn), desc='longitudinal jerk', units='m/s**3')
self.add_output('aydot', val=np.zeros(nn), desc='lateral jerk', units='m/s**3')
# Setup partials
arange = np.arange(self.options['num_nodes'], dtype=int)
# partials
self.declare_partials(of='axdot', wrt='ax', rows=arange, cols=arange)
self.declare_partials(of='axdot', wrt='Vdot', rows=arange, cols=arange)
self.declare_partials(of='axdot', wrt='omega', rows=arange, cols=arange)
self.declare_partials(of='axdot', wrt='V', rows=arange, cols=arange)
self.declare_partials(of='axdot', wrt='lambda', rows=arange, cols=arange)
self.declare_partials(of='aydot', wrt='ay', rows=arange, cols=arange)
self.declare_partials(of='aydot', wrt='omega', rows=arange, cols=arange)
self.declare_partials(of='aydot', wrt='V', rows=arange, cols=arange)
self.declare_partials(of='aydot', wrt='Vdot', rows=arange, cols=arange)
self.declare_partials(of='aydot', wrt='lambda', rows=arange, cols=arange)
self.declare_partials(of='aydot', wrt='lambdadot', rows=arange, cols=arange)
def compute(self, inputs, outputs):
tau_y = inputs['tau_y']
tau_x = inputs['tau_x']
V = inputs['V']
lamb = inputs['lambda']
omega = inputs['omega']
Vdot = inputs['Vdot']
lambdadot = inputs['lambdadot']
ax = inputs['ax']
ay = inputs['ay']
outputs['axdot'] = (Vdot+omega*V*lamb-ax)/tau_x
outputs['aydot'] = (omega*V-(V*lambdadot+Vdot*lamb)-ay)/tau_y
def compute_partials(self, inputs, jacobian):
tau_y = inputs['tau_y']
tau_x = inputs['tau_x']
V = inputs['V']
lamb = inputs['lambda']
omega = inputs['omega']
Vdot = inputs['Vdot']
lambdadot = inputs['lambdadot']
jacobian['axdot', 'ax'] = -1/tau_x
jacobian['axdot', 'Vdot'] = 1/tau_x
jacobian['axdot', 'omega'] = (V*lamb)/tau_x
jacobian['axdot', 'lambda'] = (omega*V)/tau_x
jacobian['axdot', 'V'] = (omega*lamb)/tau_x
jacobian['aydot', 'ay'] = -1/tau_y
jacobian['aydot', 'omega'] = V/tau_y
jacobian['aydot', 'V'] = (omega-lambdadot)/tau_y
jacobian['aydot', 'lambda'] = -Vdot/tau_y
jacobian['aydot', 'lambdadot'] = -V/tau_y
jacobian['aydot', 'Vdot'] = -lamb/tau_y
|
2bfc76e78c1294b6cc7b35adf38d7d307cd16c27
|
98f1a0bfa5b20a0b81e9e555d76e706c62d949c9
|
/tutorials/cpu/cpu_best_practises.py
|
92be58bf28fb521c5e7e175ea4e10660407b7d66
|
[
"Apache-2.0"
] |
permissive
|
dmlc/dgl
|
3a8fbca3a7f0e9adf6e69679ad62948df48dfc42
|
bbc8ff6261f2e0d2b5982e992b6fbe545e2a4aa1
|
refs/heads/master
| 2023-08-31T16:33:21.139163
| 2023-08-31T07:49:22
| 2023-08-31T07:49:22
| 130,375,797
| 12,631
| 3,482
|
Apache-2.0
| 2023-09-14T15:48:24
| 2018-04-20T14:49:09
|
Python
|
UTF-8
|
Python
| false
| false
| 3,965
|
py
|
cpu_best_practises.py
|
"""
CPU Best Practices
=====================================================
This chapter focus on providing best practises for environment setup
to get the best performance during training and inference on the CPU.
Intel
`````````````````````````````
Hyper-threading
---------------------------
For specific workloads as GNN’s domain, suggested default setting for having best performance
is to turn off hyperthreading.
Turning off the hyper threading feature can be done at BIOS [#f1]_ or operating system level [#f2]_ [#f3]_ .
Alternative memory allocators
---------------------------
Alternative memory allocators, such as *tcmalloc*, might provide significant performance improvements by more efficient memory usage, reducing overhead on unnecessary memory allocations or deallocations. *tcmalloc* uses thread-local caches to reduce overhead on thread synchronization, locks contention by using spinlocks and per-thread arenas respectively and categorizes memory allocations by sizes to reduce overhead on memory fragmentation.
To take advantage of optimizations *tcmalloc* provides, install it on your system (on Ubuntu *tcmalloc* is included in libgoogle-perftools4 package) and add shared library to the LD_PRELOAD environment variable:
.. code-block:: shell
export LD_PRELOAD=/lib/x86_64-linux-gnu/libtcmalloc.so.4:$LD_PRELOAD
OpenMP settings
---------------------------
During training on CPU, the training and dataloading part need to be maintained simultaneously.
Best performance of parallelization in OpenMP
can be achieved by setting up the optimal number of working threads and dataloading workers.
Nodes with high number of CPU cores may benefit from higher number of dataloading workers.
A good starting point could be setting num_threads=4 in Dataloader constructor for nodes with 32 cores or more.
If number of cores is rather small, the best performance might be achieved with just one
dataloader worker or even with dataloader num_threads=0 for dataloading and trainig performed
in the same process
**Dataloader CPU affinity**
If number of dataloader workers is more than 0, please consider using **use_cpu_affinity()** method
of DGL Dataloader class, it will generally result in significant performance improvement for training.
*use_cpu_affinity* will set the proper OpenMP thread count (equal to the number of CPU cores allocated for main process),
affinitize dataloader workers for separate CPU cores and restrict the main process to remaining cores
In multiple NUMA nodes setups *use_cpu_affinity* will only use cores of NUMA node 0 by default
with an assumption, that the workload is scaling poorly across multiple NUMA nodes. If you believe
your workload will have better performance utilizing more than one NUMA node, you can pass
the list of cores to use for dataloading (loader_cores) and for compute (compute_cores).
loader_cores and compute_cores arguments (list of CPU cores) can be passed to *enable_cpu_affinity* for more
control over which cores should be used, e.g. in case a workload scales well across multiple NUMA nodes.
Usage:
.. code:: python
dataloader = dgl.dataloading.DataLoader(...)
...
with dataloader.enable_cpu_affinity():
<training loop or inferencing>
**Manual control**
For advanced and more fine-grained control over OpenMP settings please refer to Maximize Performance of Intel® Optimization for PyTorch* on CPU [#f4]_ article
.. rubric:: Footnotes
.. [#f1] https://www.intel.com/content/www/us/en/support/articles/000007645/boards-and-kits/desktop-boards.html
.. [#f2] https://aws.amazon.com/blogs/compute/disabling-intel-hyper-threading-technology-on-amazon-linux/
.. [#f3] https://aws.amazon.com/blogs/compute/disabling-intel-hyper-threading-technology-on-amazon-ec2-windows-instances/
.. [#f4] https://software.intel.com/content/www/us/en/develop/articles/how-to-get-better-performance-on-pytorchcaffe2-with-intel-acceleration.html
"""
|
29f9618c7e899f2e34ee0e678412cb0141d1e8be
|
9b1eda0abdc5dea7c6e9695ff4e1098abe0a708b
|
/docs/examples/widgets/sparkline_colors.py
|
d6a4549a6e014003e24421c406cfd1ecb95bb8ec
|
[
"MIT"
] |
permissive
|
Textualize/textual
|
b8cf4b5d18069fccc7623b3116436f479e1ef446
|
b74ac1e47fdd16133ca567390c99ea19de278c5a
|
refs/heads/main
| 2023-08-30T21:40:21.563823
| 2023-08-30T10:18:27
| 2023-08-30T10:18:27
| 355,959,597
| 14,818
| 588
|
MIT
| 2023-09-14T20:22:02
| 2021-04-08T15:24:47
|
Python
|
UTF-8
|
Python
| false
| false
| 979
|
py
|
sparkline_colors.py
|
from math import sin
from textual.app import App, ComposeResult
from textual.widgets import Sparkline
class SparklineColorsApp(App[None]):
CSS_PATH = "sparkline_colors.tcss"
def compose(self) -> ComposeResult:
nums = [abs(sin(x / 3.14)) for x in range(0, 360 * 6, 20)]
yield Sparkline(nums, summary_function=max, id="fst")
yield Sparkline(nums, summary_function=max, id="snd")
yield Sparkline(nums, summary_function=max, id="trd")
yield Sparkline(nums, summary_function=max, id="frt")
yield Sparkline(nums, summary_function=max, id="fft")
yield Sparkline(nums, summary_function=max, id="sxt")
yield Sparkline(nums, summary_function=max, id="svt")
yield Sparkline(nums, summary_function=max, id="egt")
yield Sparkline(nums, summary_function=max, id="nnt")
yield Sparkline(nums, summary_function=max, id="tnt")
app = SparklineColorsApp()
if __name__ == "__main__":
app.run()
|
6edabd3206f6d1edab2d824cd8d4d1e537a52052
|
bb021c074c95c4fb684cd543b288bc0b976df188
|
/intake/source/tests/plugin_searchpath/intake_foo/__init__.py
|
362973e5b9f6042aab1863ea6b909a424be5a359
|
[
"BSD-2-Clause"
] |
permissive
|
intake/intake
|
6c96d4bf32f125fbd5df322377ae2a98ac76be99
|
81b1567a2030adfb22b856b4f63cefe35de68983
|
refs/heads/master
| 2023-08-25T14:07:08.855001
| 2023-08-24T19:49:13
| 2023-08-24T19:49:13
| 100,307,970
| 774
| 116
|
BSD-2-Clause
| 2023-09-11T13:51:16
| 2017-08-14T20:44:22
|
Python
|
UTF-8
|
Python
| false
| false
| 544
|
py
|
__init__.py
|
# -----------------------------------------------------------------------------
# Copyright (c) 2012 - 2018, Anaconda, Inc. and Intake contributors
# All rights reserved.
#
# The full license is in the LICENSE file, distributed with this software.
# -----------------------------------------------------------------------------
from intake.source.base import DataSource
class FooPlugin(DataSource):
name = "foo"
version = "0.1"
container = "dataframe"
partition_access = False
def __init__(self, **kwargs):
pass
|
1ce930df5556099704a4cdfb7c769bb43f8360e8
|
360328d098a74581d0822fba489dd15e0d4e7ab3
|
/src/richie/plugins/nesteditem/factories.py
|
749c14dfc5c55b4161141b8132df306aa0f397a4
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
openfun/richie
|
0cef545486267bfb40e75e5fb2ce2a74f85a53ff
|
f2d46fc46b271eb3b4d565039a29c15ba15f027c
|
refs/heads/master
| 2023-08-31T23:51:37.714179
| 2023-08-29T15:25:04
| 2023-08-29T15:48:39
| 111,388,461
| 238
| 96
|
MIT
| 2023-09-13T12:48:53
| 2017-11-20T09:23:40
|
Python
|
UTF-8
|
Python
| false
| false
| 411
|
py
|
factories.py
|
"""
NestedItem CMS plugin factories
"""
import factory
from .defaults import NESTEDITEM_VARIANTS
from .models import NestedItem
class NestedItemFactory(factory.django.DjangoModelFactory):
"""
Factory to create random instances of NestedItem for testing.
"""
class Meta:
model = NestedItem
variant = NESTEDITEM_VARIANTS[0][0]
content = factory.Faker("text", max_nb_chars=84)
|
6fb5716491c0dd69402517487d2a5eaa435fdd38
|
975b2d421d3661e6770b601929d5f11d981d8985
|
/msgraph/generated/models/event_message_response.py
|
1e70fd28e9e64b5a1f2040d828023b03299ae8b3
|
[
"MIT"
] |
permissive
|
microsoftgraph/msgraph-sdk-python
|
a7c551b85daadeebf76ec4ae12668664ea639b42
|
27de7ccbe688d7614b2f6bde0fdbcda4bc5cc949
|
refs/heads/main
| 2023-09-03T21:45:27.989672
| 2023-08-31T06:22:18
| 2023-08-31T06:22:18
| 534,665,999
| 135
| 18
|
MIT
| 2023-09-14T11:04:11
| 2022-09-09T14:00:17
|
Python
|
UTF-8
|
Python
| false
| false
| 2,635
|
py
|
event_message_response.py
|
from __future__ import annotations
from dataclasses import dataclass, field
from kiota_abstractions.serialization import Parsable, ParseNode, SerializationWriter
from typing import Any, Callable, Dict, List, Optional, TYPE_CHECKING, Union
if TYPE_CHECKING:
from .event_message import EventMessage
from .response_type import ResponseType
from .time_slot import TimeSlot
from .event_message import EventMessage
@dataclass
class EventMessageResponse(EventMessage):
# The OdataType property
odata_type: Optional[str] = "#microsoft.graph.eventMessageResponse"
# The proposedNewTime property
proposed_new_time: Optional[TimeSlot] = None
# The responseType property
response_type: Optional[ResponseType] = None
@staticmethod
def create_from_discriminator_value(parse_node: Optional[ParseNode] = None) -> EventMessageResponse:
"""
Creates a new instance of the appropriate class based on discriminator value
Args:
parse_node: The parse node to use to read the discriminator value and create the object
Returns: EventMessageResponse
"""
if not parse_node:
raise TypeError("parse_node cannot be null.")
return EventMessageResponse()
def get_field_deserializers(self,) -> Dict[str, Callable[[ParseNode], None]]:
"""
The deserialization information for the current model
Returns: Dict[str, Callable[[ParseNode], None]]
"""
from .event_message import EventMessage
from .response_type import ResponseType
from .time_slot import TimeSlot
from .event_message import EventMessage
from .response_type import ResponseType
from .time_slot import TimeSlot
fields: Dict[str, Callable[[Any], None]] = {
"proposedNewTime": lambda n : setattr(self, 'proposed_new_time', n.get_object_value(TimeSlot)),
"responseType": lambda n : setattr(self, 'response_type', n.get_enum_value(ResponseType)),
}
super_fields = super().get_field_deserializers()
fields.update(super_fields)
return fields
def serialize(self,writer: SerializationWriter) -> None:
"""
Serializes information the current object
Args:
writer: Serialization writer to use to serialize this model
"""
if not writer:
raise TypeError("writer cannot be null.")
super().serialize(writer)
writer.write_object_value("proposedNewTime", self.proposed_new_time)
writer.write_enum_value("responseType", self.response_type)
|
7b67405fcaedad8d6ce0b665cfac4c4fa7471544
|
e7efae2b83216d9621bd93390959d652de779c3d
|
/win32_event_log/datadog_checks/win32_event_log/utils.py
|
dfc548fed7259903627dc4b0841aaeace2371a12
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"BSD-3-Clause-Modification",
"Unlicense",
"Apache-2.0",
"LGPL-3.0-only",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"CC0-1.0"
] |
permissive
|
DataDog/integrations-core
|
ee1886cc7655972b2791e6ab8a1c62ab35afdb47
|
406072e4294edff5b46b513f0cdf7c2c00fac9d2
|
refs/heads/master
| 2023-08-31T04:08:06.243593
| 2023-08-30T18:22:10
| 2023-08-30T18:22:10
| 47,203,045
| 852
| 1,548
|
BSD-3-Clause
| 2023-09-14T16:39:54
| 2015-12-01T16:41:45
|
Python
|
UTF-8
|
Python
| false
| false
| 702
|
py
|
utils.py
|
# (C) Datadog, Inc. 2020-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import win32api # no cov
def get_last_error_message(): # no cov
"""
Helper function to get the error message from the calling thread's most recently failed operation.
It appears that in most cases pywin32 catches such failures and raises Python exceptions.
"""
# https://docs.microsoft.com/en-us/windows/win32/api/errhandlingapi/nf-errhandlingapi-getlasterror
# https://docs.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-formatmessage
# https://mhammond.github.io/pywin32/win32api__FormatMessage_meth.html
return win32api.FormatMessage(0)
|
e6973a715135c9b47cc035bbf08367a5ea4d05ba
|
d110546d747d7e3865ce5742d5fca09f404623c0
|
/salt/modules/boto_asg.py
|
c52c794679100de2b97e60fbd707cb4c573c9c8c
|
[
"Apache-2.0",
"MIT",
"BSD-2-Clause"
] |
permissive
|
saltstack/salt
|
354fc86a7be1f69514b3dd3b2edb9e6f66844c1d
|
1ef90cbdc7203f97775edb7666db86a41eb9fc15
|
refs/heads/master
| 2023-07-19T20:56:20.210556
| 2023-06-29T23:12:28
| 2023-07-19T11:47:47
| 1,390,248
| 11,026
| 6,296
|
Apache-2.0
| 2023-09-14T20:45:37
| 2011-02-20T20:16:56
|
Python
|
UTF-8
|
Python
| false
| false
| 36,550
|
py
|
boto_asg.py
|
"""
Connection module for Amazon Autoscale Groups
.. versionadded:: 2014.7.0
:configuration: This module accepts explicit autoscale credentials but can also
utilize IAM roles assigned to the instance through Instance Profiles.
Dynamic credentials are then automatically obtained from AWS API and no
further configuration is necessary. More Information available at:
.. code-block:: text
http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
If IAM roles are not used you need to specify them either in a pillar or
in the minion's config file:
.. code-block:: yaml
asg.keyid: GKTADJGHEIQSXMKKRBJ08H
asg.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
A region may also be specified in the configuration:
.. code-block:: yaml
asg.region: us-east-1
If a region is not specified, the default is us-east-1.
It's also possible to specify key, keyid and region via a profile, either
as a passed in dict, or as a string to pull from pillars or minion config:
.. code-block:: yaml
myprofile:
keyid: GKTADJGHEIQSXMKKRBJ08H
key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
region: us-east-1
:depends: boto
:depends: boto3
"""
# keep lint from choking on _get_conn and _cache_id
# pylint: disable=E0602
import datetime
import email.mime.multipart
import logging
import sys
import time
import salt.utils.compat
import salt.utils.json
import salt.utils.odict as odict
import salt.utils.versions
log = logging.getLogger(__name__)
DATE_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
try:
import boto
import boto.ec2
import boto.ec2.autoscale as autoscale
import boto.ec2.blockdevicemapping as blockdevicemapping
import boto.ec2.instance
logging.getLogger("boto").setLevel(logging.CRITICAL)
import boto3 # pylint: disable=unused-import
from botocore.exceptions import ClientError
logging.getLogger("boto3").setLevel(logging.CRITICAL)
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def __virtual__():
"""
Only load if boto libraries exist.
"""
has_boto_reqs = salt.utils.versions.check_boto_reqs()
if has_boto_reqs is True:
__utils__["boto.assign_funcs"](
__name__, "asg", module="ec2.autoscale", pack=__salt__
)
setattr(
sys.modules[__name__],
"_get_ec2_conn",
__utils__["boto.get_connection_func"]("ec2"),
)
return has_boto_reqs
def __init__(opts):
if HAS_BOTO:
__utils__["boto3.assign_funcs"](
__name__, "autoscaling", get_conn_funcname="_get_conn_autoscaling_boto3"
)
def exists(name, region=None, key=None, keyid=None, profile=None):
"""
Check to see if an autoscale group exists.
CLI Example:
.. code-block:: bash
salt myminion boto_asg.exists myasg region=us-east-1
"""
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
retries = 30
while True:
try:
_conn = conn.get_all_groups(names=[name])
if _conn:
return True
else:
msg = "The autoscale group does not exist in region {}".format(region)
log.debug(msg)
return False
except boto.exception.BotoServerError as e:
if retries and e.code == "Throttling":
log.debug("Throttled by AWS API, retrying in 5 seconds...")
time.sleep(5)
retries -= 1
continue
log.error(e)
return False
def get_config(name, region=None, key=None, keyid=None, profile=None):
"""
Get the configuration for an autoscale group.
CLI Example:
.. code-block:: bash
salt myminion boto_asg.get_config myasg region=us-east-1
"""
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
retries = 30
while True:
try:
asg = conn.get_all_groups(names=[name])
if asg:
asg = asg[0]
else:
return {}
ret = odict.OrderedDict()
attrs = [
"name",
"availability_zones",
"default_cooldown",
"desired_capacity",
"health_check_period",
"health_check_type",
"launch_config_name",
"load_balancers",
"max_size",
"min_size",
"placement_group",
"vpc_zone_identifier",
"tags",
"termination_policies",
"suspended_processes",
]
for attr in attrs:
# Tags are objects, so we need to turn them into dicts.
if attr == "tags":
_tags = []
for tag in asg.tags:
_tag = odict.OrderedDict()
_tag["key"] = tag.key
_tag["value"] = tag.value
_tag["propagate_at_launch"] = tag.propagate_at_launch
_tags.append(_tag)
ret["tags"] = _tags
# Boto accepts a string or list as input for vpc_zone_identifier,
# but always returns a comma separated list. We require lists in
# states.
elif attr == "vpc_zone_identifier":
ret[attr] = getattr(asg, attr).split(",")
# convert SuspendedProcess objects to names
elif attr == "suspended_processes":
suspended_processes = getattr(asg, attr)
ret[attr] = sorted(x.process_name for x in suspended_processes)
else:
ret[attr] = getattr(asg, attr)
# scaling policies
policies = conn.get_all_policies(as_group=name)
ret["scaling_policies"] = []
for policy in policies:
ret["scaling_policies"].append(
dict(
[
("name", policy.name),
("adjustment_type", policy.adjustment_type),
("scaling_adjustment", policy.scaling_adjustment),
("min_adjustment_step", policy.min_adjustment_step),
("cooldown", policy.cooldown),
]
)
)
# scheduled actions
actions = conn.get_all_scheduled_actions(as_group=name)
ret["scheduled_actions"] = {}
for action in actions:
end_time = None
if action.end_time:
end_time = action.end_time.isoformat()
ret["scheduled_actions"][action.name] = dict(
[
("min_size", action.min_size),
("max_size", action.max_size),
# AWS bug
("desired_capacity", int(action.desired_capacity)),
("start_time", action.start_time.isoformat()),
("end_time", end_time),
("recurrence", action.recurrence),
]
)
return ret
except boto.exception.BotoServerError as e:
if retries and e.code == "Throttling":
log.debug("Throttled by AWS API, retrying in 5 seconds...")
time.sleep(5)
retries -= 1
continue
log.error(e)
return {}
def create(
name,
launch_config_name,
availability_zones,
min_size,
max_size,
desired_capacity=None,
load_balancers=None,
default_cooldown=None,
health_check_type=None,
health_check_period=None,
placement_group=None,
vpc_zone_identifier=None,
tags=None,
termination_policies=None,
suspended_processes=None,
scaling_policies=None,
scheduled_actions=None,
region=None,
notification_arn=None,
notification_types=None,
key=None,
keyid=None,
profile=None,
):
"""
Create an autoscale group.
CLI Example:
.. code-block:: bash
salt myminion boto_asg.create myasg mylc '["us-east-1a", "us-east-1e"]' 1 10 load_balancers='["myelb", "myelb2"]' tags='[{"key": "Name", value="myasg", "propagate_at_launch": True}]'
"""
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if isinstance(availability_zones, str):
availability_zones = salt.utils.json.loads(availability_zones)
if isinstance(load_balancers, str):
load_balancers = salt.utils.json.loads(load_balancers)
if isinstance(vpc_zone_identifier, str):
vpc_zone_identifier = salt.utils.json.loads(vpc_zone_identifier)
if isinstance(tags, str):
tags = salt.utils.json.loads(tags)
# Make a list of tag objects from the dict.
_tags = []
if tags:
for tag in tags:
try:
key = tag.get("key")
except KeyError:
log.error("Tag missing key.")
return False
try:
value = tag.get("value")
except KeyError:
log.error("Tag missing value.")
return False
propagate_at_launch = tag.get("propagate_at_launch", False)
_tag = autoscale.Tag(
key=key,
value=value,
resource_id=name,
propagate_at_launch=propagate_at_launch,
)
_tags.append(_tag)
if isinstance(termination_policies, str):
termination_policies = salt.utils.json.loads(termination_policies)
if isinstance(suspended_processes, str):
suspended_processes = salt.utils.json.loads(suspended_processes)
if isinstance(scheduled_actions, str):
scheduled_actions = salt.utils.json.loads(scheduled_actions)
retries = 30
while True:
try:
_asg = autoscale.AutoScalingGroup(
name=name,
launch_config=launch_config_name,
availability_zones=availability_zones,
min_size=min_size,
max_size=max_size,
desired_capacity=desired_capacity,
load_balancers=load_balancers,
default_cooldown=default_cooldown,
health_check_type=health_check_type,
health_check_period=health_check_period,
placement_group=placement_group,
tags=_tags,
vpc_zone_identifier=vpc_zone_identifier,
termination_policies=termination_policies,
suspended_processes=suspended_processes,
)
conn.create_auto_scaling_group(_asg)
# create scaling policies
_create_scaling_policies(conn, name, scaling_policies)
# create scheduled actions
_create_scheduled_actions(conn, name, scheduled_actions)
# create notifications
if notification_arn and notification_types:
conn.put_notification_configuration(
_asg, notification_arn, notification_types
)
log.info("Created ASG %s", name)
return True
except boto.exception.BotoServerError as e:
if retries and e.code == "Throttling":
log.debug("Throttled by AWS API, retrying in 5 seconds...")
time.sleep(5)
retries -= 1
continue
log.error(e)
msg = "Failed to create ASG %s", name
log.error(msg)
return False
def update(
name,
launch_config_name,
availability_zones,
min_size,
max_size,
desired_capacity=None,
load_balancers=None,
default_cooldown=None,
health_check_type=None,
health_check_period=None,
placement_group=None,
vpc_zone_identifier=None,
tags=None,
termination_policies=None,
suspended_processes=None,
scaling_policies=None,
scheduled_actions=None,
notification_arn=None,
notification_types=None,
region=None,
key=None,
keyid=None,
profile=None,
):
"""
Update an autoscale group.
CLI Example:
.. code-block:: bash
salt myminion boto_asg.update myasg mylc '["us-east-1a", "us-east-1e"]' 1 10 load_balancers='["myelb", "myelb2"]' tags='[{"key": "Name", value="myasg", "propagate_at_launch": True}]'
"""
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
conn3 = _get_conn_autoscaling_boto3(
region=region, key=key, keyid=keyid, profile=profile
)
if not conn:
return False, "failed to connect to AWS"
if isinstance(availability_zones, str):
availability_zones = salt.utils.json.loads(availability_zones)
if isinstance(load_balancers, str):
load_balancers = salt.utils.json.loads(load_balancers)
if isinstance(vpc_zone_identifier, str):
vpc_zone_identifier = salt.utils.json.loads(vpc_zone_identifier)
if isinstance(tags, str):
tags = salt.utils.json.loads(tags)
if isinstance(termination_policies, str):
termination_policies = salt.utils.json.loads(termination_policies)
if isinstance(suspended_processes, str):
suspended_processes = salt.utils.json.loads(suspended_processes)
if isinstance(scheduled_actions, str):
scheduled_actions = salt.utils.json.loads(scheduled_actions)
# Massage our tagset into add / remove lists
# Use a boto3 call here b/c the boto2 call doeesn't implement filters
current_tags = conn3.describe_tags(
Filters=[{"Name": "auto-scaling-group", "Values": [name]}]
).get("Tags", [])
current_tags = [
{
"key": t["Key"],
"value": t["Value"],
"resource_id": t["ResourceId"],
"propagate_at_launch": t.get("PropagateAtLaunch", False),
}
for t in current_tags
]
add_tags = []
desired_tags = []
if tags:
tags = __utils__["boto3.ordered"](tags)
for tag in tags:
try:
key = tag.get("key")
except KeyError:
log.error("Tag missing key.")
return False, "Tag {} missing key".format(tag)
try:
value = tag.get("value")
except KeyError:
log.error("Tag missing value.")
return False, "Tag {} missing value".format(tag)
propagate_at_launch = tag.get("propagate_at_launch", False)
_tag = {
"key": key,
"value": value,
"resource_id": name,
"propagate_at_launch": propagate_at_launch,
}
if _tag not in current_tags:
add_tags.append(_tag)
desired_tags.append(_tag)
delete_tags = [t for t in current_tags if t not in desired_tags]
retries = 30
while True:
try:
_asg = autoscale.AutoScalingGroup(
connection=conn,
name=name,
launch_config=launch_config_name,
availability_zones=availability_zones,
min_size=min_size,
max_size=max_size,
desired_capacity=desired_capacity,
load_balancers=load_balancers,
default_cooldown=default_cooldown,
health_check_type=health_check_type,
health_check_period=health_check_period,
placement_group=placement_group,
tags=add_tags,
vpc_zone_identifier=vpc_zone_identifier,
termination_policies=termination_policies,
)
if notification_arn and notification_types:
conn.put_notification_configuration(
_asg, notification_arn, notification_types
)
_asg.update()
# Seems the update call doesn't handle tags, so we'll need to update
# that separately.
if add_tags:
log.debug("Adding/updating tags from ASG: %s", add_tags)
conn.create_or_update_tags([autoscale.Tag(**t) for t in add_tags])
if delete_tags:
log.debug("Deleting tags from ASG: %s", delete_tags)
conn.delete_tags([autoscale.Tag(**t) for t in delete_tags])
# update doesn't handle suspended_processes either
# Resume all processes
_asg.resume_processes()
# suspend any that are specified. Note that the boto default of empty
# list suspends all; don't do that.
if suspended_processes:
_asg.suspend_processes(suspended_processes)
log.info("Updated ASG %s", name)
# ### scaling policies
# delete all policies, then recreate them
for policy in conn.get_all_policies(as_group=name):
conn.delete_policy(policy.name, autoscale_group=name)
_create_scaling_policies(conn, name, scaling_policies)
# ### scheduled actions
# delete all scheduled actions, then recreate them
for scheduled_action in conn.get_all_scheduled_actions(as_group=name):
conn.delete_scheduled_action(
scheduled_action.name, autoscale_group=name
)
_create_scheduled_actions(conn, name, scheduled_actions)
return True, ""
except boto.exception.BotoServerError as e:
if retries and e.code == "Throttling":
log.debug("Throttled by AWS API, retrying in 5 seconds...")
time.sleep(5)
retries -= 1
continue
log.error(e)
msg = "Failed to update ASG {}".format(name)
log.error(msg)
return False, str(e)
def _create_scaling_policies(conn, as_name, scaling_policies):
"helper function to create scaling policies"
if scaling_policies:
for policy in scaling_policies:
policy = autoscale.policy.ScalingPolicy(
name=policy["name"],
as_name=as_name,
adjustment_type=policy["adjustment_type"],
scaling_adjustment=policy["scaling_adjustment"],
min_adjustment_step=policy.get("min_adjustment_step", None),
cooldown=policy["cooldown"],
)
conn.create_scaling_policy(policy)
def _create_scheduled_actions(conn, as_name, scheduled_actions):
"""
Helper function to create scheduled actions
"""
if scheduled_actions:
for name, action in scheduled_actions.items():
if "start_time" in action and isinstance(action["start_time"], str):
action["start_time"] = datetime.datetime.strptime(
action["start_time"], DATE_FORMAT
)
if "end_time" in action and isinstance(action["end_time"], str):
action["end_time"] = datetime.datetime.strptime(
action["end_time"], DATE_FORMAT
)
conn.create_scheduled_group_action(
as_name,
name,
desired_capacity=action.get("desired_capacity"),
min_size=action.get("min_size"),
max_size=action.get("max_size"),
start_time=action.get("start_time"),
end_time=action.get("end_time"),
recurrence=action.get("recurrence"),
)
def delete(name, force=False, region=None, key=None, keyid=None, profile=None):
"""
Delete an autoscale group.
CLI Example:
.. code-block:: bash
salt myminion boto_asg.delete myasg region=us-east-1
"""
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
retries = 30
while True:
try:
conn.delete_auto_scaling_group(name, force)
msg = "Deleted autoscale group {}.".format(name)
log.info(msg)
return True
except boto.exception.BotoServerError as e:
if retries and e.code == "Throttling":
log.debug("Throttled by AWS API, retrying in 5 seconds...")
time.sleep(5)
retries -= 1
continue
log.error(e)
msg = "Failed to delete autoscale group {}".format(name)
log.error(msg)
return False
def get_cloud_init_mime(cloud_init):
"""
Get a mime multipart encoded string from a cloud-init dict. Currently
supports boothooks, scripts and cloud-config.
CLI Example:
.. code-block:: bash
salt myminion boto.get_cloud_init_mime <cloud init>
"""
if isinstance(cloud_init, str):
cloud_init = salt.utils.json.loads(cloud_init)
_cloud_init = email.mime.multipart.MIMEMultipart()
if "boothooks" in cloud_init:
for script_name, script in cloud_init["boothooks"].items():
_script = email.mime.text.MIMEText(script, "cloud-boothook")
_cloud_init.attach(_script)
if "scripts" in cloud_init:
for script_name, script in cloud_init["scripts"].items():
_script = email.mime.text.MIMEText(script, "x-shellscript")
_cloud_init.attach(_script)
if "cloud-config" in cloud_init:
cloud_config = cloud_init["cloud-config"]
_cloud_config = email.mime.text.MIMEText(
salt.utils.yaml.safe_dump(cloud_config, default_flow_style=False),
"cloud-config",
)
_cloud_init.attach(_cloud_config)
return _cloud_init.as_string()
def launch_configuration_exists(name, region=None, key=None, keyid=None, profile=None):
"""
Check for a launch configuration's existence.
CLI Example:
.. code-block:: bash
salt myminion boto_asg.launch_configuration_exists mylc
"""
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
retries = 30
while True:
try:
lc = conn.get_all_launch_configurations(names=[name])
if lc:
return True
else:
msg = "The launch configuration does not exist in region {}".format(
region
)
log.debug(msg)
return False
except boto.exception.BotoServerError as e:
if retries and e.code == "Throttling":
log.debug("Throttled by AWS API, retrying in 5 seconds...")
time.sleep(5)
retries -= 1
continue
log.error(e)
return False
def get_all_launch_configurations(region=None, key=None, keyid=None, profile=None):
"""
Fetch and return all Launch Configuration with details.
CLI Example:
.. code-block:: bash
salt myminion boto_asg.get_all_launch_configurations
"""
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
retries = 30
while True:
try:
return conn.get_all_launch_configurations()
except boto.exception.BotoServerError as e:
if retries and e.code == "Throttling":
log.debug("Throttled by AWS API, retrying in 5 seconds...")
time.sleep(5)
retries -= 1
continue
log.error(e)
return []
def list_launch_configurations(region=None, key=None, keyid=None, profile=None):
"""
List all Launch Configurations.
CLI Example:
.. code-block:: bash
salt myminion boto_asg.list_launch_configurations
"""
ret = get_all_launch_configurations(region, key, keyid, profile)
return [r.name for r in ret]
def describe_launch_configuration(
name, region=None, key=None, keyid=None, profile=None
):
"""
Dump details of a given launch configuration.
CLI Example:
.. code-block:: bash
salt myminion boto_asg.describe_launch_configuration mylc
"""
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
retries = 30
while True:
try:
lc = conn.get_all_launch_configurations(names=[name])
if lc:
return lc[0]
else:
msg = "The launch configuration does not exist in region {}".format(
region
)
log.debug(msg)
return None
except boto.exception.BotoServerError as e:
if retries and e.code == "Throttling":
log.debug("Throttled by AWS API, retrying in 5 seconds...")
time.sleep(5)
retries -= 1
continue
log.error(e)
return None
def create_launch_configuration(
name,
image_id,
key_name=None,
vpc_id=None,
vpc_name=None,
security_groups=None,
user_data=None,
instance_type="m1.small",
kernel_id=None,
ramdisk_id=None,
block_device_mappings=None,
instance_monitoring=False,
spot_price=None,
instance_profile_name=None,
ebs_optimized=False,
associate_public_ip_address=None,
volume_type=None,
delete_on_termination=True,
iops=None,
use_block_device_types=False,
region=None,
key=None,
keyid=None,
profile=None,
):
"""
Create a launch configuration.
CLI Example:
.. code-block:: bash
salt myminion boto_asg.create_launch_configuration mylc image_id=ami-0b9c9f62 key_name='mykey' security_groups='["mygroup"]' instance_type='c3.2xlarge'
"""
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if isinstance(security_groups, str):
security_groups = salt.utils.json.loads(security_groups)
if isinstance(block_device_mappings, str):
block_device_mappings = salt.utils.json.loads(block_device_mappings)
_bdms = []
if block_device_mappings:
# Boto requires objects for the mappings and the devices.
_block_device_map = blockdevicemapping.BlockDeviceMapping()
for block_device_dict in block_device_mappings:
for block_device, attributes in block_device_dict.items():
_block_device = blockdevicemapping.EBSBlockDeviceType()
for attribute, value in attributes.items():
setattr(_block_device, attribute, value)
_block_device_map[block_device] = _block_device
_bdms = [_block_device_map]
# If a VPC is specified, then determine the secgroup id's within that VPC, not
# within the default VPC. If a security group id is already part of the list,
# convert_to_group_ids leaves that entry without attempting a lookup on it.
if security_groups and (vpc_id or vpc_name):
security_groups = __salt__["boto_secgroup.convert_to_group_ids"](
security_groups,
vpc_id=vpc_id,
vpc_name=vpc_name,
region=region,
key=key,
keyid=keyid,
profile=profile,
)
lc = autoscale.LaunchConfiguration(
name=name,
image_id=image_id,
key_name=key_name,
security_groups=security_groups,
user_data=user_data,
instance_type=instance_type,
kernel_id=kernel_id,
ramdisk_id=ramdisk_id,
block_device_mappings=_bdms,
instance_monitoring=instance_monitoring,
spot_price=spot_price,
instance_profile_name=instance_profile_name,
ebs_optimized=ebs_optimized,
associate_public_ip_address=associate_public_ip_address,
volume_type=volume_type,
delete_on_termination=delete_on_termination,
iops=iops,
use_block_device_types=use_block_device_types,
)
retries = 30
while True:
try:
conn.create_launch_configuration(lc)
log.info("Created LC %s", name)
return True
except boto.exception.BotoServerError as e:
if retries and e.code == "Throttling":
log.debug("Throttled by AWS API, retrying in 5 seconds...")
time.sleep(5)
retries -= 1
continue
log.error(e)
msg = "Failed to create LC {}".format(name)
log.error(msg)
return False
def delete_launch_configuration(name, region=None, key=None, keyid=None, profile=None):
"""
Delete a launch configuration.
CLI Example:
.. code-block:: bash
salt myminion boto_asg.delete_launch_configuration mylc
"""
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
retries = 30
while True:
try:
conn.delete_launch_configuration(name)
log.info("Deleted LC %s", name)
return True
except boto.exception.BotoServerError as e:
if retries and e.code == "Throttling":
log.debug("Throttled by AWS API, retrying in 5 seconds...")
time.sleep(5)
retries -= 1
continue
log.error(e)
msg = "Failed to delete LC {}".format(name)
log.error(msg)
return False
def get_scaling_policy_arn(
as_group, scaling_policy_name, region=None, key=None, keyid=None, profile=None
):
"""
Return the arn for a scaling policy in a specific autoscale group or None
if not found. Mainly used as a helper method for boto_cloudwatch_alarm, for
linking alarms to scaling policies.
CLI Example:
.. code-block:: bash
salt '*' boto_asg.get_scaling_policy_arn mygroup mypolicy
"""
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
retries = 30
while retries > 0:
retries -= 1
try:
policies = conn.get_all_policies(as_group=as_group)
for policy in policies:
if policy.name == scaling_policy_name:
return policy.policy_arn
log.error("Could not convert: %s", as_group)
return None
except boto.exception.BotoServerError as e:
if e.error_code != "Throttling":
raise
log.debug("Throttled by API, will retry in 5 seconds")
time.sleep(5)
log.error("Maximum number of retries exceeded")
return None
def get_all_groups(region=None, key=None, keyid=None, profile=None):
"""
Return all AutoScale Groups visible in the account
(as a list of boto.ec2.autoscale.group.AutoScalingGroup).
.. versionadded:: 2016.11.0
CLI Example:
.. code-block:: bash
salt-call boto_asg.get_all_groups region=us-east-1 --output yaml
"""
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
retries = 30
while True:
try:
next_token = ""
asgs = []
while next_token is not None:
ret = conn.get_all_groups(next_token=next_token)
asgs += [a for a in ret]
next_token = ret.next_token
return asgs
except boto.exception.BotoServerError as e:
if retries and e.code == "Throttling":
log.debug("Throttled by AWS API, retrying in 5 seconds...")
time.sleep(5)
retries -= 1
continue
log.error(e)
return []
def list_groups(region=None, key=None, keyid=None, profile=None):
"""
Return all AutoScale Groups visible in the account
(as a list of names).
.. versionadded:: 2016.11.0
CLI Example:
.. code-block:: bash
salt-call boto_asg.list_groups region=us-east-1
"""
return [
a.name
for a in get_all_groups(region=region, key=key, keyid=keyid, profile=profile)
]
def get_instances(
name,
lifecycle_state="InService",
health_status="Healthy",
attribute="private_ip_address",
attributes=None,
region=None,
key=None,
keyid=None,
profile=None,
):
"""
return attribute of all instances in the named autoscale group.
CLI Example:
.. code-block:: bash
salt-call boto_asg.get_instances my_autoscale_group_name
"""
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
ec2_conn = _get_ec2_conn(region=region, key=key, keyid=keyid, profile=profile)
retries = 30
while True:
try:
asgs = conn.get_all_groups(names=[name])
break
except boto.exception.BotoServerError as e:
if retries and e.code == "Throttling":
log.debug("Throttled by AWS API, retrying in 5 seconds...")
time.sleep(5)
retries -= 1
continue
log.error(e)
return False
if len(asgs) != 1:
log.debug(
"name '%s' returns multiple ASGs: %s", name, [asg.name for asg in asgs]
)
return False
asg = asgs[0]
instance_ids = []
# match lifecycle_state and health_status
for i in asg.instances:
if lifecycle_state is not None and i.lifecycle_state != lifecycle_state:
continue
if health_status is not None and i.health_status != health_status:
continue
instance_ids.append(i.instance_id)
# get full instance info, so that we can return the attribute
instances = ec2_conn.get_only_instances(instance_ids=instance_ids)
if attributes:
return [
[_convert_attribute(instance, attr) for attr in attributes]
for instance in instances
]
else:
# properly handle case when not all instances have the requested attribute
return [
_convert_attribute(instance, attribute)
for instance in instances
if getattr(instance, attribute)
]
def _convert_attribute(instance, attribute):
if attribute == "tags":
tags = dict(getattr(instance, attribute))
return {
key.encode("utf-8"): value.encode("utf-8") for key, value in tags.items()
}
return getattr(instance, attribute).encode("ascii")
def enter_standby(
name,
instance_ids,
should_decrement_desired_capacity=False,
region=None,
key=None,
keyid=None,
profile=None,
):
"""
Switch desired instances to StandBy mode
.. versionadded:: 2016.11.0
CLI Example:
.. code-block:: bash
salt-call boto_asg.enter_standby my_autoscale_group_name '["i-xxxxxx"]'
"""
conn = _get_conn_autoscaling_boto3(
region=region, key=key, keyid=keyid, profile=profile
)
try:
response = conn.enter_standby(
InstanceIds=instance_ids,
AutoScalingGroupName=name,
ShouldDecrementDesiredCapacity=should_decrement_desired_capacity,
)
except ClientError as e:
err = __utils__["boto3.get_error"](e)
if e.response.get("Error", {}).get("Code") == "ResourceNotFoundException":
return {"exists": False}
return {"error": err}
return all(
activity["StatusCode"] != "Failed" for activity in response["Activities"]
)
def exit_standby(
name,
instance_ids,
should_decrement_desired_capacity=False,
region=None,
key=None,
keyid=None,
profile=None,
):
"""
Exit desired instances from StandBy mode
.. versionadded:: 2016.11.0
CLI Example:
.. code-block:: bash
salt-call boto_asg.exit_standby my_autoscale_group_name '["i-xxxxxx"]'
"""
conn = _get_conn_autoscaling_boto3(
region=region, key=key, keyid=keyid, profile=profile
)
try:
response = conn.exit_standby(
InstanceIds=instance_ids, AutoScalingGroupName=name
)
except ClientError as e:
err = __utils__["boto3.get_error"](e)
if e.response.get("Error", {}).get("Code") == "ResourceNotFoundException":
return {"exists": False}
return {"error": err}
return all(
activity["StatusCode"] != "Failed" for activity in response["Activities"]
)
|
4cce01fd0311ac217cab4596214f31df2b5dd6d2
|
182bbadb0ee7f59f1abd154d06484e555a30c6d8
|
/api/tests/integration/common/util.py
|
2584d4b2eab19bc13cca74618cfdd9907233d7bd
|
[
"Apache-2.0"
] |
permissive
|
epam/Indigo
|
08559861adf474122366b6e2e499ed3aa56272d1
|
8e473e69f393c3a57ff75b7728999c5fb4cbf1a3
|
refs/heads/master
| 2023-09-02T10:14:46.843829
| 2023-08-25T08:39:24
| 2023-08-25T08:39:24
| 37,536,320
| 265
| 106
|
Apache-2.0
| 2023-09-14T17:34:00
| 2015-06-16T14:45:56
|
C++
|
UTF-8
|
Python
| false
| false
| 5,575
|
py
|
util.py
|
import os
import platform
import re
import sys
REPO_ROOT = os.path.normpath(
os.path.join(
os.path.abspath(os.path.dirname(__file__)), "..", "..", "..", ".."
)
)
system_name = None
def overridePlatform(platform):
global system_name
system_name = platform
def isIronPython():
return sys.platform == "cli" or (
"implementation" in dir(sys)
and sys.implementation.name == "ironpython"
)
def isJython():
return os.name == "java"
def getIndigoVersion():
version = ""
cur_dir = os.path.split(__file__)[0]
if not os.path.exists(
os.path.join(cur_dir, "../../../indigo/api/indigo-version.cmake")
):
return version
for line in open(
os.path.join(cur_dir, "../../../indigo/api/indigo-version.cmake")
):
m = re.search('SET\(INDIGO_VERSION "(.*)"', line)
if m:
version = m.group(1)
return version
def getCpuCount():
if os.name == "java":
from java.lang import Runtime
runtime = Runtime.getRuntime()
cpu_count = runtime.availableProcessors()
else:
import multiprocessing
cpu_count = multiprocessing.cpu_count()
return cpu_count
def getPlatform():
global system_name
if not system_name:
if isJython():
import java.lang.System
osname = java.lang.System.getProperty("os.name")
if osname.startswith("Windows"):
system_name = "win"
elif osname == "Mac OS X":
system_name = "mac"
elif osname == "Linux":
system_name = "linux"
else:
raise EnvironmentError(
"Unsupported operating system %s" % osname
)
else:
if os.name == "nt":
if "GCC" in platform.python_compiler():
system_name = "mingw"
else:
system_name = "win"
elif os.name == "posix":
if platform.mac_ver()[0]:
system_name = "mac"
else:
system_name = "linux"
else:
raise EnvironmentError(
"Unsupported operating system %s" % os.name
)
return system_name
def get_indigo_java_version():
import xml.etree.cElementTree as ElementTree
pom_path = os.path.join(REPO_ROOT, "api", "java", "pom.xml")
ElementTree.register_namespace("", "http://maven.apache.org/POM/4.0.0")
tree = ElementTree.parse(pom_path)
namespace = r"{http://maven.apache.org/POM/4.0.0}"
indigo_version = None
jna_version = None
for l1_child in tree.getroot():
if l1_child.tag == "{}properties".format(namespace):
for l2_child in l1_child:
if l2_child.tag == "{}revision".format(namespace):
indigo_version = l2_child.text
if l1_child.tag == "{}dependencies".format(namespace):
for l2_child in l1_child:
if l2_child.tag == "{}dependency".format(namespace):
jna_found = False
for l3_child in l2_child:
if (
l3_child.tag == "{}artifactId".format(namespace)
and l3_child.text == "jna"
):
jna_found = True
break
if jna_found:
for l3_child in l2_child:
if l3_child.tag == "{}version".format(namespace):
jna_version = l3_child.text
if not indigo_version:
raise ValueError(
"Could not find Indigo version in {}".format(pom_path)
)
if not jna_version:
raise ValueError("Could not find JNA version in {}".format(pom_path))
return indigo_version, jna_version
def file_sha1(path):
import hashlib
sha1sum = hashlib.sha1()
with open(path, "rb") as source:
block = source.read(2**16)
while len(block) != 0:
sha1sum.update(block)
block = source.read(2**16)
return sha1sum.hexdigest()
def download_jna(jna_version, path):
import urllib
def check_jna_sha1():
jna_sha1_url = "{}.sha1".format(jna_url)
jna_ref_sha1 = urllib.urlopen(jna_sha1_url).read()
jna_file_sha1 = file_sha1(output_path)
if jna_ref_sha1 != jna_file_sha1:
print(
"Checked JNA at {}, sha1 {} is not equal to reference {}".format(
output_path, jna_file_sha1, jna_ref_sha1
)
)
return False
print(
"Checked JNA at {}, sha1 {} verified".format(
output_path, jna_file_sha1
)
)
return True
output_path = os.path.join(path, "jna-{}.jar".format(jna_version))
jna_url = "https://search.maven.org/remotecontent?filepath=net/java/dev/jna/jna/{0}/jna-{0}.jar".format(
jna_version
)
if os.path.exists(output_path) and check_jna_sha1():
return
try:
import urllib
urllib.urlretrieve(jna_url, output_path)
if check_jna_sha1():
print("Successfully downloaded JNA to {0}".format(output_path))
else:
raise RuntimeError(
"Could not download and/or verify JNA from {}".format(jna_url)
)
except Exception as e:
os.remove(output_path)
raise e
|
2d2556ddabc409ea0139c6bfb8fdb0906cc5d444
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/PhysicsTools/PatAlgos/python/tools/trigTools.py
|
40b39ca45b3d6f8caae3530f8558951d0da0ffee
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 42,361
|
py
|
trigTools.py
|
from __future__ import print_function
from builtins import range
from PhysicsTools.PatAlgos.tools.ConfigToolBase import *
from PhysicsTools.PatAlgos.tools.helpers import *
from PhysicsTools.PatAlgos.patEventContent_cff import patTriggerL1RefsEventContent
from PhysicsTools.PatAlgos.triggerLayer1.triggerMatcherExamples_cfi import _exampleTriggerMatchers
_defaultTriggerProducer = 'patTrigger'
_defaultTriggerEventProducer = 'patTriggerEvent'
_defaultPath = ''
_defaultHltProcess = 'HLT'
_defaultOutputModule = 'out'
_defaultPostfix = ''
_defaultTriggerMatchersComment = "Trigger matcher modules' labels, default: ..."
_defaultTriggerProducerComment = "PATTriggerProducer module label, default: %s"%( _defaultTriggerProducer )
_defaultTriggerEventProducerComment = "PATTriggerEventProducer module label, default: %s"%( _defaultTriggerEventProducer )
_defaultPathComment = "Name of path to use, default: %s"%( _defaultPath )
_defaultHltProcessComment = "HLT process name, default: %s"%( _defaultHltProcess )
_defaultOutputModuleComment = "Output module label, empty label indicates no output, default: %s"%( _defaultOutputModule )
_defaultPostfixComment = "Postfix to apply to PAT module labels, default: %s"%( _defaultPostfix )
_longLine = '---------------------------------------------------------------------'
def _modulesInPath( process, pathLabel ):
return [ m.label() for m in listModules( getattr( process, pathLabel ) ) ]
def _addEventContent( outputCommands, eventContent ):
# add new entry to event content
for content in eventContent:
if content not in outputCommands:
outputCommands += [ content ]
# check for obsolete entries
listToRemove = []
for i in range( len( outputCommands ) ):
if i in listToRemove:
continue
command = outputCommands[ i ]
if command[ : 4 ] == 'keep':
dropCommand = command.replace( 'keep ', 'drop ' )
for j in range( i + 1, len( outputCommands ) ):
testCommand = outputCommands[ j ]
if testCommand == command:
listToRemove += [ j ]
elif testCommand == dropCommand:
listToRemove += [ i, j ]
break
# copy entries excl. obsolete ones
newOutputCommands = cms.untracked.vstring()
for i in range( len( outputCommands ) ):
if i not in listToRemove:
newOutputCommands += [ outputCommands[ i ] ]
# return result
return newOutputCommands
class SwitchOnTrigger( ConfigToolBase ):
""" Enables trigger information in PAT
SwitchOnTrigger( [cms.Process], triggerProducer = 'patTrigger', triggerEventProducer = 'patTriggerEvent', path = '', hltProcess = 'HLT', outputModule = 'out' )
- [cms.Process] : the 'cms.Process'
- triggerProducer : PATTriggerProducer module label;
optional, default: 'patTrigger'
- triggerEventProducer: PATTriggerEventProducer module label;
optional, default: 'patTriggerEvent'
- path : name of path to use;
optional, default: ''
- hltProcess : HLT process name;
optional, default: 'HLT'
- outputModule : output module label;
empty label indicates no output;
optional, default: 'out'
Using None as any argument restores its default value.
"""
_label = 'switchOnTrigger'
_defaultParameters = dicttypes.SortedKeysDict()
def __init__( self ):
ConfigToolBase.__init__( self )
self.addParameter( self._defaultParameters, 'triggerProducer' , _defaultTriggerProducer , _defaultTriggerProducerComment )
self.addParameter( self._defaultParameters, 'triggerEventProducer', _defaultTriggerEventProducer, _defaultTriggerEventProducerComment )
self.addParameter( self._defaultParameters, 'path' , _defaultPath , _defaultPathComment )
self.addParameter( self._defaultParameters, 'hltProcess' , _defaultHltProcess , _defaultHltProcessComment )
self.addParameter( self._defaultParameters, 'outputModule' , _defaultOutputModule , _defaultOutputModuleComment )
self._parameters = copy.deepcopy( self._defaultParameters )
self._comment = ""
def getDefaultParameters( self ):
return self._defaultParameters
def __call__( self, process
, triggerProducer = None
, triggerEventProducer = None
, path = None
, hltProcess = None
, outputModule = None
):
if triggerProducer is None:
triggerProducer = self._defaultParameters[ 'triggerProducer' ].value
if triggerEventProducer is None:
triggerEventProducer = self._defaultParameters[ 'triggerEventProducer' ].value
if path is None:
path = self._defaultParameters[ 'path' ].value
if hltProcess is None:
hltProcess = self._defaultParameters[ 'hltProcess' ].value
if outputModule is None:
outputModule = self._defaultParameters[ 'outputModule' ].value
self.setParameter( 'triggerProducer' , triggerProducer )
self.setParameter( 'triggerEventProducer', triggerEventProducer )
self.setParameter( 'path' , path )
self.setParameter( 'hltProcess' , hltProcess )
self.setParameter( 'outputModule' , outputModule )
self.apply( process )
def toolCode( self, process ):
triggerProducer = self._parameters[ 'triggerProducer' ].value
triggerEventProducer = self._parameters[ 'triggerEventProducer' ].value
path = self._parameters[ 'path' ].value
hltProcess = self._parameters[ 'hltProcess' ].value
outputModule = self._parameters[ 'outputModule' ].value
task = getPatAlgosToolsTask(process)
# Load default producers from existing config files, if needed
if not hasattr( process, triggerProducer ):
from PhysicsTools.PatAlgos.triggerLayer1.triggerProducer_cfi import patTrigger
addToProcessAndTask(triggerProducer, patTrigger.clone(), process, task)
else:
print('%s():'%( self._label ))
print(' PATTriggerProducer module \'%s\' exists already in process'%( triggerProducer ))
print(' ==> entry re-used')
print(_longLine)
if not hasattr( process, triggerEventProducer ):
from PhysicsTools.PatAlgos.triggerLayer1.triggerEventProducer_cfi import patTriggerEvent
addToProcessAndTask(triggerEventProducer, patTriggerEvent.clone(), process, task)
else:
print('%s():'%( self._label ))
print(' PATTriggerEventProducer module \'%s\' exists already in process'%( triggerEventProducer ))
print(' ==> entry re-used')
print(_longLine)
# Maintain configurations
trigProdMod = getattr( process, triggerProducer )
trigProdMod.processName = hltProcess
if trigProdMod.onlyStandAlone.value() is True:
trigProdMod.onlyStandAlone = False
print(' configuration parameter automatically changed')
print(' PATTriggerProducer %s.onlyStandAlone --> %s'%( triggerProducer, trigProdMod.onlyStandAlone ))
print(_longLine)
trigEvtProdMod = getattr( process, triggerEventProducer )
trigEvtProdMod.processName = hltProcess
trigEvtProdMod.patTriggerProducer = cms.InputTag( triggerProducer )
if path != '':
if not hasattr( process, path ):
prodPath = cms.Path( trigProdMod + trigEvtProdMod )
setattr( process, path, prodPath )
print('%s():'%( self._label ))
print(' Path \'%s\' does not exist in process'%( path ))
print(' ==> created')
print(_longLine)
# Try to get the order right, but cannot deal with all possible cases.
# Simply rely on the exclusive usage of these tools without manual intervention.
else:
if not triggerProducer in _modulesInPath( process, path ):
prodPath = getattr( process, path )
prodPath += trigProdMod
if not triggerEventProducer in _modulesInPath( process, path ):
prodPath = getattr( process, path )
prodPath += trigEvtProdMod
# Add event content
if outputModule != '':
patTriggerEventContent = [ 'keep patTriggerObjects_%s_*_%s'%( triggerProducer, process.name_() )
, 'keep patTriggerFilters_%s_*_%s'%( triggerProducer, process.name_() )
, 'keep patTriggerPaths_%s_*_%s'%( triggerProducer, process.name_() )
, 'keep patTriggerEvent_%s_*_%s'%( triggerEventProducer, process.name_() )
]
if ( hasattr( trigProdMod, 'addL1Algos' ) and trigProdMod.addL1Algos.value() is True ):
patTriggerEventContent += [ 'keep patTriggerConditions_%s_*_%s'%( triggerProducer, process.name_() )
, 'keep patTriggerAlgorithms_%s_*_%s'%( triggerProducer, process.name_() )
]
if ( hasattr( trigProdMod, 'saveL1Refs' ) and trigProdMod.saveL1Refs.value() is True ):
patTriggerEventContent += patTriggerL1RefsEventContent
getattr( process, outputModule ).outputCommands = _addEventContent( getattr( process, outputModule ).outputCommands, patTriggerEventContent )
switchOnTrigger = SwitchOnTrigger()
class SwitchOnTriggerStandAlone( ConfigToolBase ):
""" Enables trigger information in PAT, limited to stand-alone trigger objects
SwitchOnTriggerStandAlone( [cms.Process], triggerProducer = 'patTrigger', path = '', hltProcess = 'HLT', outputModule = 'out' )
- [cms.Process] : the 'cms.Process'
- triggerProducer : PATTriggerProducer module label;
optional, default: 'patTrigger'
- path : name of path to use;
optional, default: ''
- hltProcess : HLT process name;
optional, default: 'HLT'
- outputModule : output module label;
empty label indicates no output;
optional, default: 'out'
Using None as any argument restores its default value.
"""
_label = 'switchOnTriggerStandAlone'
_defaultParameters = dicttypes.SortedKeysDict()
def __init__( self ):
ConfigToolBase.__init__( self )
self.addParameter( self._defaultParameters, 'triggerProducer', _defaultTriggerProducer, _defaultTriggerProducerComment )
self.addParameter( self._defaultParameters, 'path' , _defaultPath , _defaultPathComment )
self.addParameter( self._defaultParameters, 'hltProcess' , _defaultHltProcess , _defaultHltProcessComment )
self.addParameter( self._defaultParameters, 'outputModule' , _defaultOutputModule , _defaultOutputModuleComment )
self._parameters = copy.deepcopy( self._defaultParameters )
self._comment = ""
def getDefaultParameters( self ):
return self._defaultParameters
def __call__( self, process
, triggerProducer = None
, path = None
, hltProcess = None
, outputModule = None
):
if triggerProducer is None:
triggerProducer = self._defaultParameters[ 'triggerProducer' ].value
if path is None:
path = self._defaultParameters[ 'path' ].value
if hltProcess is None:
hltProcess = self._defaultParameters[ 'hltProcess' ].value
if outputModule is None:
outputModule = self._defaultParameters[ 'outputModule' ].value
self.setParameter( 'triggerProducer', triggerProducer )
self.setParameter( 'path' , path )
self.setParameter( 'hltProcess' , hltProcess )
self.setParameter( 'outputModule' , outputModule )
self.apply( process )
def toolCode( self, process ):
task = getPatAlgosToolsTask(process)
triggerProducer = self._parameters[ 'triggerProducer' ].value
path = self._parameters[ 'path' ].value
hltProcess = self._parameters[ 'hltProcess' ].value
outputModule = self._parameters[ 'outputModule' ].value
# Load default producer from existing config file, if needed
if not hasattr( process, triggerProducer ):
from PhysicsTools.PatAlgos.triggerLayer1.triggerProducer_cfi import patTrigger
addToProcessAndTask(triggerProducer, patTrigger.clone( onlyStandAlone = True ), process, task)
else:
print('%s():'%( self._label ))
print(' PATTriggerProducer module \'%s\' exists already in process'%( triggerProducer ))
print(' ==> entry re-used')
print(_longLine)
# Maintain configuration
trigProdMod = getattr( process, triggerProducer )
trigProdMod.processName = hltProcess
if path != '':
if not hasattr( process, path ):
prodPath = cms.Path( trigProdMod )
setattr( process, path, prodPath )
print('%s():'%( self._label ))
print(' Path \'%s\' does not exist in process'%( path ))
print(' ==> created')
print(_longLine)
elif not triggerProducer in _modulesInPath( process, path ):
prodPath = getattr( process, path )
prodPath += trigProdMod
# Add event content
if outputModule != '':
patTriggerEventContent = [ 'keep patTriggerObjectStandAlones_%s_*_%s'%( triggerProducer, process.name_() )
]
if ( hasattr( trigProdMod, 'saveL1Refs' ) and trigProdMod.saveL1Refs.value() is True ):
patTriggerEventContent += patTriggerL1RefsEventContent
getattr( process, outputModule ).outputCommands = _addEventContent( getattr( process, outputModule ).outputCommands, patTriggerEventContent )
switchOnTriggerStandAlone = SwitchOnTriggerStandAlone()
class SwitchOnTriggerMatching( ConfigToolBase ):
""" Enables trigger matching in PAT
SwitchOnTriggerMatching( [cms.Process], triggerMatchers = [default list], triggerProducer = 'patTrigger', triggerEventProducer = 'patTriggerEvent', path = '', hltProcess = 'HLT', outputModule = 'out', postfix = '' )
- [cms.Process] : the 'cms.Process'
- triggerMatchers : PAT trigger matcher module labels (list)
optional; default: defined in '_exampleTriggerMatchers'
(s. PhysicsTools/PatAlgos/python/triggerLayer1/triggerMatcherExamples_cfi.py)
- triggerProducer : PATTriggerProducer module label;
optional, default: 'patTrigger'
- triggerEventProducer: PATTriggerEventProducer module label;
optional, default: 'patTriggerEvent'
- path : name of path to use;
optional, default: 'patDefaultSequence'
- hltProcess : HLT process name;
optional, default: 'HLT'
- outputModule : output module label;
empty label indicates no output;
optional, default: 'out'
- postfix : postfix to apply to PAT module labels;
optional, default: ''
Using None as any argument restores its default value.
"""
_label = 'switchOnTriggerMatching'
_defaultParameters = dicttypes.SortedKeysDict()
def __init__( self ):
ConfigToolBase.__init__( self )
self.addParameter( self._defaultParameters, 'triggerMatchers' , _exampleTriggerMatchers , _defaultTriggerMatchersComment )
self.addParameter( self._defaultParameters, 'exampleMatchers' , False , '' )
self.addParameter( self._defaultParameters, 'triggerProducer' , _defaultTriggerProducer , _defaultTriggerProducerComment )
self.addParameter( self._defaultParameters, 'triggerEventProducer', _defaultTriggerEventProducer, _defaultTriggerEventProducerComment )
self.addParameter( self._defaultParameters, 'path' , _defaultPath , _defaultPathComment )
self.addParameter( self._defaultParameters, 'hltProcess' , _defaultHltProcess , _defaultHltProcessComment )
self.addParameter( self._defaultParameters, 'outputModule' , _defaultOutputModule , _defaultOutputModuleComment )
self.addParameter( self._defaultParameters, 'postfix' , _defaultPostfix , _defaultPostfixComment )
self._parameters = copy.deepcopy( self._defaultParameters )
self._comment = ""
def getDefaultParameters( self ):
return self._defaultParameters
def __call__( self, process
, triggerMatchers = None
, triggerProducer = None
, triggerEventProducer = None
, path = None
, hltProcess = None
, outputModule = None
, postfix = None
):
if triggerMatchers is None:
triggerMatchers = self._defaultParameters[ 'triggerMatchers' ].value
self.setParameter( 'exampleMatchers', True )
if triggerProducer is None:
triggerProducer = self._defaultParameters[ 'triggerProducer' ].value
if triggerEventProducer is None:
triggerEventProducer = self._defaultParameters[ 'triggerEventProducer' ].value
if path is None:
path = self._defaultParameters[ 'path' ].value
if hltProcess is None:
hltProcess = self._defaultParameters[ 'hltProcess' ].value
if outputModule is None:
outputModule = self._defaultParameters[ 'outputModule' ].value
if postfix is None:
postfix = self._defaultParameters[ 'postfix' ].value
self.setParameter( 'triggerMatchers' , triggerMatchers )
self.setParameter( 'triggerProducer' , triggerProducer )
self.setParameter( 'triggerEventProducer', triggerEventProducer )
self.setParameter( 'path' , path )
self.setParameter( 'hltProcess' , hltProcess )
self.setParameter( 'outputModule' , outputModule )
self.setParameter( 'postfix' , postfix )
self.apply( process )
def toolCode( self, process ):
triggerMatchers = self._parameters[ 'triggerMatchers' ].value
exampleMatchers = self._parameters[ 'exampleMatchers' ].value
triggerProducer = self._parameters[ 'triggerProducer' ].value
triggerEventProducer = self._parameters[ 'triggerEventProducer' ].value
path = self._parameters[ 'path' ].value
hltProcess = self._parameters[ 'hltProcess' ].value
outputModule = self._parameters[ 'outputModule' ].value
postfix = self._parameters[ 'postfix' ].value
# Load default producers from existing config file, if needed
task = getPatAlgosToolsTask(process)
if exampleMatchers:
process.load( "PhysicsTools.PatAlgos.triggerLayer1.triggerMatcherExamples_cfi" )
task.add(process.triggerMatcherExamplesTask)
# Switch on PAT trigger information if needed
if not hasattr( process, triggerEventProducer ):
print('%s():'%( self._label ))
print(' PAT trigger production switched on automatically using')
print(' switchOnTrigger( process, \'%s\', \'%s\', \'%s\', \'%s\', \'%s\' )'%( hltProcess, triggerProducer, triggerEventProducer, path, outputModule ))
print(_longLine)
switchOnTrigger( process, triggerProducer, triggerEventProducer, path, hltProcess, outputModule )
# Maintain configurations
trigEvtProdMod = getattr( process, triggerEventProducer )
triggerMatchersKnown = []
for matcher in triggerMatchers:
if not hasattr( process, matcher ):
print('%s():'%( self._label ))
print(' Matcher \'%s\' not known to process'%( matcher ))
print(' ==> skipped')
print(_longLine)
continue
triggerMatchersKnown.append( matcher )
trigMchMod = getattr( process, matcher )
trigMchMod.src = cms.InputTag( trigMchMod.src.getModuleLabel() + postfix )
trigMchMod.matched = triggerProducer
matchers = getattr( trigEvtProdMod, 'patTriggerMatches' )
if len( matchers ) > 0:
print('%s():'%( self._label ))
print(' PAT trigger matchers already attached to existing PATTriggerEventProducer \'%s\''%( triggerEventProducer ))
print(' configuration parameters automatically changed')
for matcher in matchers:
trigMchMod = getattr( process, matcher )
if trigMchMod.matched.value() is not triggerProducer:
trigMchMod.matched = triggerProducer
print(' PAT trigger matcher %s.matched --> %s'%( matcher, trigMchMod.matched ))
print(_longLine)
else:
trigEvtProdMod.patTriggerMatches = cms.VInputTag()
for matcher in triggerMatchersKnown:
trigEvtProdMod.patTriggerMatches.append( cms.InputTag( matcher ) )
# Add event content
if outputModule != '':
patTriggerEventContent = []
for matcher in triggerMatchersKnown:
patTriggerEventContent += [ 'keep patTriggerObjectsedmAssociation_%s_%s_%s'%( triggerEventProducer, matcher, process.name_() )
, 'keep *_%s_*_*'%( getattr( process, matcher ).src.value() )
]
getattr( process, outputModule ).outputCommands = _addEventContent( getattr( process, outputModule ).outputCommands, patTriggerEventContent )
switchOnTriggerMatching = SwitchOnTriggerMatching()
class SwitchOnTriggerMatchingStandAlone( ConfigToolBase ):
""" Enables trigger matching in PAT
SwitchOnTriggerMatchingStandAlone( [cms.Process], triggerMatchers = [default list], triggerProducer = 'patTrigger', path = '', hltProcess = 'HLT', outputModule = 'out', postfix = '' )
- [cms.Process] : the 'cms.Process'
- triggerMatchers: PAT trigger matcher module labels (list)
optional; default: defined in 'triggerMatchingDefaultSequence'
(s. PhysicsTools/PatAlgos/python/triggerLayer1/triggerMatcherExamples_cfi.py)
- triggerProducer: PATTriggerProducer module label;
optional, default: 'patTrigger'
- path : name of path to use;
optional, default: ''
- hltProcess : HLT process name;
optional, default: 'HLT'
- outputModule : output module label;
empty label indicates no output;
optional, default: 'out'
- postfix : postfix to apply to PAT module labels;
optional, default: ''
Using None as any argument restores its default value.
"""
_label = 'switchOnTriggerMatchingStandAlone'
_defaultParameters = dicttypes.SortedKeysDict()
def __init__( self ):
ConfigToolBase.__init__( self )
self.addParameter( self._defaultParameters, 'triggerMatchers', _exampleTriggerMatchers, _defaultTriggerMatchersComment )
self.addParameter( self._defaultParameters, 'exampleMatchers', False , '' )
self.addParameter( self._defaultParameters, 'triggerProducer', _defaultTriggerProducer, _defaultTriggerProducerComment )
self.addParameter( self._defaultParameters, 'path' , _defaultPath , _defaultPathComment )
self.addParameter( self._defaultParameters, 'hltProcess' , _defaultHltProcess , _defaultHltProcessComment )
self.addParameter( self._defaultParameters, 'outputModule' , _defaultOutputModule , _defaultOutputModuleComment )
self.addParameter( self._defaultParameters, 'postfix' , _defaultPostfix , _defaultPostfixComment )
self._parameters = copy.deepcopy( self._defaultParameters )
self._comment = ""
def getDefaultParameters( self ):
return self._defaultParameters
def __call__( self, process
, triggerMatchers = None
, triggerProducer = None
, path = None
, hltProcess = None
, outputModule = None
, postfix = None
):
if triggerMatchers is None:
triggerMatchers = self._defaultParameters[ 'triggerMatchers' ].value
self.setParameter( 'exampleMatchers', True )
if triggerProducer is None:
triggerProducer = self._defaultParameters[ 'triggerProducer' ].value
if path is None:
path = self._defaultParameters[ 'path' ].value
if hltProcess is None:
hltProcess = self._defaultParameters[ 'hltProcess' ].value
if outputModule is None:
outputModule = self._defaultParameters[ 'outputModule' ].value
if postfix is None:
postfix = self._defaultParameters[ 'postfix' ].value
self.setParameter( 'triggerMatchers', triggerMatchers )
self.setParameter( 'triggerProducer', triggerProducer )
self.setParameter( 'path' , path )
self.setParameter( 'hltProcess' , hltProcess )
self.setParameter( 'outputModule' , outputModule )
self.setParameter( 'postfix' , postfix )
self.apply( process )
def toolCode( self, process ):
triggerMatchers = self._parameters[ 'triggerMatchers' ].value
exampleMatchers = self._parameters[ 'exampleMatchers' ].value
triggerProducer = self._parameters[ 'triggerProducer' ].value
path = self._parameters[ 'path' ].value
hltProcess = self._parameters[ 'hltProcess' ].value
outputModule = self._parameters[ 'outputModule' ].value
postfix = self._parameters[ 'postfix' ].value
# Load default producers from existing config file, if needed
task = getPatAlgosToolsTask(process)
if exampleMatchers:
process.load( "PhysicsTools.PatAlgos.triggerLayer1.triggerMatcherExamples_cfi" )
task.add(process.triggerMatcherExamplesTask)
# Switch on PAT trigger information if needed
if not hasattr( process, triggerProducer ):
print('%s():'%( self._label ))
print(' PAT trigger production switched on automatically using')
print(' switchOnTriggerStandAlone( process, \'%s\', \'%s\', \'%s\', \'%s\' )'%( hltProcess, triggerProducer, path, outputModule ))
print(_longLine)
switchOnTriggerStandAlone( process, triggerProducer, path, hltProcess, outputModule )
# Maintain configurations
triggerMatchersKnown = []
for matcher in triggerMatchers:
if not hasattr( process, matcher ):
print('%s():'%( self._label ))
print(' Matcher \'%s\' not known to process'%( matcher ))
print(' ==> skipped')
print(_longLine)
continue
triggerMatchersKnown.append( matcher )
trigMchMod = getattr( process, matcher )
trigMchMod.src = cms.InputTag( trigMchMod.src.getModuleLabel() + postfix )
trigMchMod.matched = triggerProducer
# Add event content
if outputModule != '':
patTriggerEventContent = []
for matcher in triggerMatchersKnown:
patTriggerEventContent += [ 'keep patTriggerObjectStandAlonesedmAssociation_%s_*_%s'%( matcher, process.name_() )
, 'keep *_%s_*_*'%( getattr( process, matcher ).src.value() )
]
getattr( process, outputModule ).outputCommands = _addEventContent( getattr( process, outputModule ).outputCommands, patTriggerEventContent )
switchOnTriggerMatchingStandAlone = SwitchOnTriggerMatchingStandAlone()
class SwitchOnTriggerMatchEmbedding( ConfigToolBase ):
""" Enables embedding of trigger matches into PAT objects
SwitchOnTriggerMatchEmbedding( [cms.Process], triggerMatchers = [default list], triggerProducer = 'patTrigger', path = '', hltProcess = 'HLT', outputModule = 'out', postfix = '' )
- [cms.Process] : the 'cms.Process'
- triggerMatchers: PAT trigger matcher module labels (list)
optional; default: defined in 'triggerMatchingDefaultSequence'
(s. PhysicsTools/PatAlgos/python/triggerLayer1/triggerMatcherExamples_cfi.py)
- triggerProducer: PATTriggerProducer module label;
optional, default: 'patTrigger'
- path : name of path to use;
optional, default: ''
- hltProcess : HLT process name;
optional, default: 'HLT'
- outputModule : output module label;
empty label indicates no output;
optional, default: 'out'
- postfix : postfix to apply to PAT module labels;
optional, default: ''
Using None as any argument restores its default value.
"""
_label = 'switchOnTriggerMatchEmbedding'
_defaultParameters = dicttypes.SortedKeysDict()
def __init__( self ):
ConfigToolBase.__init__( self )
self.addParameter( self._defaultParameters, 'triggerMatchers', _exampleTriggerMatchers, _defaultTriggerMatchersComment )
self.addParameter( self._defaultParameters, 'exampleMatchers', False , '' )
self.addParameter( self._defaultParameters, 'triggerProducer', _defaultTriggerProducer, _defaultTriggerProducerComment )
self.addParameter( self._defaultParameters, 'path' , _defaultPath , _defaultPathComment )
self.addParameter( self._defaultParameters, 'hltProcess' , _defaultHltProcess , _defaultHltProcessComment )
self.addParameter( self._defaultParameters, 'outputModule' , _defaultOutputModule , _defaultOutputModuleComment )
self.addParameter( self._defaultParameters, 'postfix' , _defaultPostfix , _defaultPostfixComment )
self._parameters = copy.deepcopy( self._defaultParameters )
self._comment = ""
def getDefaultParameters( self ):
return self._defaultParameters
def __call__( self, process
, triggerMatchers = None
, triggerProducer = None
, path = None
, hltProcess = None
, outputModule = None
, postfix = None
):
if triggerMatchers is None:
triggerMatchers = self._defaultParameters[ 'triggerMatchers' ].value
self.setParameter( 'exampleMatchers', True )
if triggerProducer is None:
triggerProducer = self._defaultParameters[ 'triggerProducer' ].value
if path is None:
path = self._defaultParameters[ 'path' ].value
if hltProcess is None:
hltProcess = self._defaultParameters[ 'hltProcess' ].value
if outputModule is None:
outputModule = self._defaultParameters[ 'outputModule' ].value
if postfix is None:
postfix = self._defaultParameters[ 'postfix' ].value
self.setParameter( 'triggerMatchers', triggerMatchers )
self.setParameter( 'triggerProducer', triggerProducer )
self.setParameter( 'path' , path )
self.setParameter( 'hltProcess' , hltProcess )
self.setParameter( 'outputModule' , outputModule )
self.setParameter( 'postfix' , postfix )
self.apply( process )
def toolCode( self, process ):
triggerMatchers = self._parameters[ 'triggerMatchers' ].value
exampleMatchers = self._parameters[ 'exampleMatchers' ].value
triggerProducer = self._parameters[ 'triggerProducer' ].value
path = self._parameters[ 'path' ].value
hltProcess = self._parameters[ 'hltProcess' ].value
outputModule = self._parameters[ 'outputModule' ].value
postfix = self._parameters[ 'postfix' ].value
# Load default producers from existing config file, if needed
task = getPatAlgosToolsTask(process)
if exampleMatchers:
process.load( "PhysicsTools.PatAlgos.triggerLayer1.triggerMatcherExamples_cfi" )
task.add(process.triggerMatcherExamplesTask)
# Build dictionary of allowed input producers
dictPatProducers = { 'PATPhotonCleaner' : 'PATTriggerMatchPhotonEmbedder'
, 'PATElectronCleaner': 'PATTriggerMatchElectronEmbedder'
, 'PATMuonCleaner' : 'PATTriggerMatchMuonEmbedder'
, 'PATTauCleaner' : 'PATTriggerMatchTauEmbedder'
, 'PATJetCleaner' : 'PATTriggerMatchJetEmbedder'
, 'PATMETCleaner' : 'PATTriggerMatchMETEmbedder'
# , 'PATGenericParticleCleaner' : ''
# , 'PATPFParticleCleaner' : ''
, 'PATPhotonSelector' : 'PATTriggerMatchPhotonEmbedder'
, 'PATElectronSelector': 'PATTriggerMatchElectronEmbedder'
, 'PATMuonSelector' : 'PATTriggerMatchMuonEmbedder'
, 'PATTauSelector' : 'PATTriggerMatchTauEmbedder'
, 'PATJetSelector' : 'PATTriggerMatchJetEmbedder'
, 'PATMETSelector' : 'PATTriggerMatchMETEmbedder'
# , 'PATGenericParticleSelector' : ''
# , 'PATPFParticleSelector' : ''
# , 'PATCompositeCandidateSelector' : ''
, 'PATPhotonRefSelector' : 'PATTriggerMatchPhotonEmbedder'
, 'PATElectronRefSelector': 'PATTriggerMatchElectronEmbedder'
, 'PATMuonRefSelector' : 'PATTriggerMatchMuonEmbedder'
, 'PATTauRefSelector' : 'PATTriggerMatchTauEmbedder'
, 'PATJetRefSelector' : 'PATTriggerMatchJetEmbedder'
, 'PATMETRefSelector' : 'PATTriggerMatchMETEmbedder'
# , 'PATGenericParticleRefSelector' : ''
# , 'PATPFParticleRefSelector' : ''
# , 'PATCompositeCandidateRefSelector' : ''
, 'PATPhotonProducer' : 'PATTriggerMatchPhotonEmbedder'
, 'PATElectronProducer': 'PATTriggerMatchElectronEmbedder'
, 'PATMuonProducer' : 'PATTriggerMatchMuonEmbedder'
, 'PATTauProducer' : 'PATTriggerMatchTauEmbedder'
, 'PATJetProducer' : 'PATTriggerMatchJetEmbedder'
, 'PATMETProducer' : 'PATTriggerMatchMETEmbedder'
# , 'PATGenericParticleProducer' : ''
# , 'PATPFParticleProducer' : ''
# , 'PATCompositeCandidateProducer' : ''
, 'MuonSelectorVertex': 'PATTriggerMatchMuonEmbedder'
}
# Switch on PAT trigger matching if needed
dictConfig = {}
if not hasattr( process, triggerProducer ):
if exampleMatchers:
print('%s():'%( self._label ))
print(' PAT trigger matching switched on automatically using')
print(' switchOnTriggerMatchingStandAlone( process, \'%s\', None, \'%s\', \'%s\', \'%s\', \'%s\' )'%( hltProcess, triggerProducer, path, outputModule, postfix ))
print(_longLine)
switchOnTriggerMatchingStandAlone( process, None, triggerProducer, path, hltProcess, '', postfix ) # Do not store intermediate output collections.
else:
print('%s():'%( self._label ))
print(' PAT trigger matching switched on automatically using')
print(' switchOnTriggerMatchingStandAlone( process, \'%s\', %s, \'%s\', \'%s\', \'%s\', \'%s\' )'%( hltProcess, triggerMatchers, triggerProducer, path, outputModule, postfix ))
print(_longLine)
switchOnTriggerMatchingStandAlone( process, triggerMatchers, triggerProducer, path, hltProcess, '', postfix ) # Do not store intermediate output collections.
elif exampleMatchers:
process.load( "PhysicsTools.PatAlgos.triggerLayer1.triggerMatcherExamples_cfi" )
task.add(process.triggerMatcherExamplesTask)
# Build dictionary of matchers
for matcher in triggerMatchers:
if not hasattr( process, matcher ):
print('%s():'%( self._label ))
print(' PAT trigger matcher \'%s\' not known to process'%( matcher ))
print(' ==> skipped')
print(_longLine)
continue
trigMchMod = getattr( process, matcher )
patObjProd = getattr( process, trigMchMod.src.value() + postfix )
if trigMchMod.src.value() in dictConfig:
dictConfig[ patObjProd.type_() ] += [ matcher ]
else:
dictConfig[ patObjProd.type_() ] = [ matcher ]
# Maintain configurations
patTriggerEventContent = []
for patObjProdType in dictConfig.keys():
if patObjProdType in dictPatProducers:
for matcher in dictConfig[ patObjProdType ]:
trigMchMod = getattr( process, matcher )
patObjProd = getattr( process, trigMchMod.src.value() + postfix )
# Configure embedder module
label = patObjProd.label_() + 'TriggerMatch' # hardcoded default
if hasattr( process, label ):
print('%s():'%( self._label ))
print(' PAT trigger match embedder \'%s\' exists already in process'%( label ))
print(' ==> entry re-used')
print(_longLine)
module = getattr( process, label )
if not module.type_() is dictPatProducers[ patObjProdType ]:
print('%s():'%( self._label ))
print(' Configuration conflict for PAT trigger match embedder \'%s\''%( label ))
print(' - exists as %s'%( module.type_() ))
print(' - requested as %s by \'%s\''%( dictPatProducers[ patObjProdType ], matcher ))
print(' ==> skipped')
print(_longLine)
continue
if not module.src.value() is trigMchMod.src.value() + postfix:
print('%s():'%( self._label ))
print(' Configuration conflict for PAT trigger match embedder \'%s\''%( label ))
print(' - exists for input %s'%( module.src.value() ))
print(' - requested for input %s by \'%s\''%( trigMchMod.src.value() + postfix, matcher ))
print(' ==> skipped')
print(_longLine)
continue
module.matches.append( cms.InputTag( matcher ) )
else:
module = cms.EDProducer( dictPatProducers[ patObjProdType ] )
module.src = cms.InputTag( patObjProd.label_() )
module.matches = cms.VInputTag( matcher )
addToProcessAndTask(label, module, process, task)
# Add event content
patTriggerEventContent += [ 'drop *_%s_*_*'%( patObjProd.label_() )
, 'keep *_%s_*_%s'%( label, process.name_() )
]
else:
print('%s():'%( self._label ))
print(' Invalid input source for trigger match embedding')
print(' ==> %s with matchers \'%s\' is skipped'%( patObjProdType, dictConfig[ patObjProdType ] ))
print(_longLine)
if outputModule != '':
getattr( process, outputModule ).outputCommands = _addEventContent( getattr( process, outputModule ).outputCommands, patTriggerEventContent )
switchOnTriggerMatchEmbedding = SwitchOnTriggerMatchEmbedding()
|
f077b31d0a60a40e66afae1213124bdf15b30f22
|
833ef1cc5cbd5cf76da144d10d393e30976d9185
|
/froide/document/migrations/0029_documentcollection_foirequests.py
|
4c1a6ecde402bbacf66b1674091e2395efef2c48
|
[
"MIT"
] |
permissive
|
okfde/froide
|
d022407ec30bf018e6ca587ae9df0b73a8625edf
|
16e3c69b333fc82cb1e52378fc003ddf071152a7
|
refs/heads/main
| 2023-08-31T08:02:23.343743
| 2023-08-29T07:01:03
| 2023-08-29T07:01:03
| 1,700,944
| 230
| 48
|
MIT
| 2023-09-13T09:10:40
| 2011-05-04T12:20:51
|
Python
|
UTF-8
|
Python
| false
| false
| 492
|
py
|
0029_documentcollection_foirequests.py
|
# Generated by Django 3.2.14 on 2022-07-07 09:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("foirequest", "0055_foiattachment_is_moderated"),
("document", "0028_auto_20220404_1608"),
]
operations = [
migrations.AddField(
model_name="documentcollection",
name="foirequests",
field=models.ManyToManyField(blank=True, to="foirequest.FoiRequest"),
),
]
|
cad60ea8661319df037c118e75a9abd5297b071b
|
94c1805df5a09c39159d502f420d19ad54b567fc
|
/runtime/deps/gyp/test/win/vs-macros/targetpath.gyp
|
a8699ffb25341970cfd37e644f5c5859d79f9e70
|
[
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
tmikov/jscomp
|
9805a5a4d06520549c57380f0df4a1c0aa0dab56
|
83828441cb38ec96603a6a60be06977d4852940a
|
refs/heads/develop
| 2021-01-19T02:56:35.102659
| 2016-04-12T06:19:30
| 2016-04-12T06:19:30
| 36,981,674
| 237
| 13
|
Apache-2.0
| 2018-10-14T09:48:12
| 2015-06-06T13:49:26
|
C
|
UTF-8
|
Python
| false
| false
| 1,425
|
gyp
|
targetpath.gyp
|
# Copyright (c) 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'test_targetpath_executable',
'type': 'executable',
'sources': ['hello.cc'],
'msvs_settings': {
'VCLinkerTool': {
'OutputFile': '$(TargetPath)',
},
},
},
{
'target_name': 'test_targetpath_loadable_module',
'type': 'loadable_module',
'sources': ['hello.cc'],
'msvs_settings': {
'VCLinkerTool': {
'OutputFile': '$(TargetPath)',
},
},
},
{
'target_name': 'test_targetpath_shared_library',
'type': 'loadable_module',
'sources': ['hello.cc'],
'msvs_settings': {
'VCLinkerTool': {
'OutputFile': '$(TargetPath)',
},
},
},
{
'target_name': 'test_targetpath_static_library',
'type': 'static_library',
'sources': ['hello.cc'],
'msvs_settings': {
'VCLibrarianTool': {
'OutputFile': '$(TargetPath)',
},
},
},
{
'target_name': 'test_targetpath_product_extension',
'type': 'executable',
'sources': ['hello.cc'],
'product_extension': 'foo',
'msvs_settings': {
'VCLinkerTool': {
'OutputFile': '$(TargetPath)',
},
},
},
]
}
|
356879d104eed5054cd14a027a868bfaff291f18
|
a675b8eb42745a8e08bc09153705cca2a3dc44f7
|
/SibeliaZ-LCB/maf_to_gfa1.py
|
33f2321b6c14c70a5ff0ea731e81a7a5b6a1ebef
|
[
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
medvedevgroup/SibeliaZ
|
72363086d54e814ed8f34594ec1e6f4b4864759e
|
2a902eec247895ceeb810f7936857929431e2dc5
|
refs/heads/master
| 2022-10-02T03:52:09.675803
| 2022-09-29T04:16:45
| 2022-09-29T04:16:45
| 78,565,121
| 136
| 20
|
NOASSERTION
| 2022-09-29T03:35:49
| 2017-01-10T19:12:59
|
C++
|
UTF-8
|
Python
| false
| false
| 7,619
|
py
|
maf_to_gfa1.py
|
import sys
import numpy
import argparse
import collections
from Bio.Seq import Seq
from Bio import SeqIO
from Bio.Alphabet import IUPAC
from Bio.SeqRecord import SeqRecord
MafRecord = collections.namedtuple(
'MafRecord', ['seq_name', 'start', 'size', 'strand', 'seq_size', 'body'])
def pos_start(record):
if record.strand == '+':
return record.start
return record.seq_size - (record.start + record.size)
def parse_maf(file_name):
handle = open(file_name)
group = []
for line in handle:
line = line.strip()
if line:
if line[0] == 'a':
if len(group) > 0:
yield group
group = []
elif line[0] == 's':
line = line.split()
record = MafRecord(seq_name=line[1], start=int(line[2]), size=int(line[3]),
strand=line[4], seq_size=int(line[5]), body=line[6])
group.append(record)
handle.close()
yield group
def print_maf(group, handle):
for line in group:
print >> handle, '\t'.join(str(x) for x in (
's', line.seq_name, line.start, line.size, line.strand, line.seq_size, line.body))
def print_all_maf(handle, alignment):
print >> handle, '##maf version=1'
for idx, group in enumerate(alignment):
print >> handle, ''
print >> handle, 'a'
print_maf(group, handle)
def profile(group, column):
return [group[i].body[column] == '-' for i in xrange(len(group))]
def slice(group, column_start, column_end, pos, shift):
return [MafRecord(seq_name=group[i].seq_name, start=pos[i] + shift, size=column_end - column_start, strand=group[i].strand, seq_size=group[i].seq_size, body=group[i].body[column_start:column_end]) for i in xrange(len(group)) if group[i].body[column_start] != '-']
def is_homogeneous(group, column):
ch = set()
for record in group:
ch.add(record.body[column])
return len(ch) == 1 or (len(ch) == 2 and '-' in ch)
def decompose_column(group, blocks):
group.sort(key=lambda x: x.body[0])
i = 0
while i < len(group):
j = i
while j < len(group) and group[i].body[0] == group[j].body[0]:
j += 1
blocks.append(group[i:j])
i = j
# Split a MAF subrange with the same gap patter into subblocks with identical strings
def split_range(group, column_start, column_end, pos, blocks):
origin = column_start
for i in xrange(column_start, column_end):
if not is_homogeneous(group, i):
if column_start < i:
blocks.append(slice(group, column_start, i,
pos, column_start - origin))
bad_column = slice(group, i, i + 1, pos, i - origin)
decompose_column(bad_column, blocks)
column_start = i + 1
if column_start < column_end:
blocks.append(slice(group, column_start, column_end,
pos, column_start - origin))
# Split MAF blocks into subblocks where aligned strings are identical
# It works by first cutting MAF into subranges where the pattern of gaps is identical
# For example:
# AC--T
# AGTCT
# ACTCT
# Will produces ranges (0, 1) (1, 2), (2, 4), (4, 5)
def split_maf_blocks(maf_file):
blocks = []
sequence = dict()
for maf in parse_maf(maf_file):
for record in maf:
if record.seq_name not in sequence:
sequence[record.seq_name] = []
pos = [record.start for record in maf]
prev_profile = profile(maf, 0)
prev_column = 0
while prev_column < len(maf[0].body):
next_column = prev_column
pos_inc = [0 for _ in xrange(len(pos))]
while next_column < len(maf[0].body):
next_profile = profile(maf, next_column)
for i in xrange(len(pos)):
pos_inc[i] += 0 if maf[i].body[next_column] == '-' else 1
if next_profile == prev_profile:
next_column += 1
else:
prev_profile = next_profile
break
split_range(maf, prev_column, next_column, pos, blocks)
for i in xrange(len(pos)):
pos[i] += pos_inc[i] - (0 if next_column < len(maf[0].body)
and maf[i].body[next_column] == '-' else 1)
prev_column = next_column
return (blocks, sequence)
# Generate blocks from the sequences uncovered by MAF
def get_uncovered_blocks(fasta, blocks, sequence):
covered = dict()
sequence_record = dict()
for fasta_file in fasta:
for record in SeqIO.parse(fasta_file, "fasta"):
sequence_record[record.id] = record
covered[record.id] = [False for _ in xrange(len(record.seq))]
if record.id not in sequence:
sequence[record.id] = []
for b in xrange(len(blocks)):
for record in blocks[b]:
sequence[record.seq_name].append((pos_start(record), b, record))
for i in xrange(pos_start(record), pos_start(record) + record.size):
covered[record.seq_name][i] = True
for seq_id, cov in covered.items():
i = 0
while i < len(cov):
if cov[i] == False:
j = i
while j < len(cov) and cov[j] == False:
j += 1
blocks.append([MafRecord(seq_name=seq_id, start=i, size=j - i, strand='+',
seq_size=len(cov), body=sequence_record[seq_id].seq[i:j])])
sequence[seq_id].append((i, len(blocks) - 1, blocks[-1][0]))
i = j
else:
i += 1
def blocks_debug_output(blocks):
maf_out = open("out.maf", "w")
print_all_maf(maf_out, blocks)
maf_out.close()
def output_block(b, remember_block):
if b not in remember_block:
print "S\t" + str(b + 1) + "\t" + blocks[b][0].body
remember_block.add(b)
def output_link(a, b, remember_block, remember_link):
start1, block1, record1 = a
start2, block2, record2 = b
output_block(block1, remember_block)
output_block(block2, remember_block)
if start1 + record1.size == start2:
link = ','.join((str(block1), record1.strand,
str(block2), record2.strand))
if link not in remember_link:
id = len(remember_link)
remember_link[link] = id
print "\t".join(("L", str(block1 + 1), record1.strand,
str(block2 + 1), record2.strand, "*"))
id = remember_link[link]
else:
print "FAIL", start1, record1.size, start2
parser = argparse.ArgumentParser(description='A helper script for covnerting MAF produced by SibeliaZ to GFA1.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('maf', help='MAF output by SibeliaZ')
parser.add_argument('fasta', nargs='+', help='Input genomes')
args = parser.parse_args()
blocks, sequence = split_maf_blocks(args.maf)
get_uncovered_blocks(args.fasta, blocks, sequence)
# blocks_debug_output(blocks)
remember_block = set()
remember_link = dict()
print "H\tVN:Z:1.0"
for header, blocks_seq in sequence.items():
blocks_seq.sort()
for i in xrange(0, len(blocks_seq) - 1):
output_link(blocks_seq[i], blocks_seq[i + 1],
remember_block, remember_link)
print "P\t" + header + "\t" + \
','.join((str(block[1] + 1) + block[2].strand for block in blocks_seq))
|
9ae9a73554cfc55c0cc7f21784c8a0f47f733a85
|
2bf27eb2faef64cef8987a9366e322d2ddcac7c9
|
/meta_blocks/samplers/uniform.py
|
bc1d288096bf1187b5ef1c87505e53c1eb0a3f22
|
[] |
permissive
|
alshedivat/meta-blocks
|
5bf4193111f22f6538d4fa52929bdbc656692503
|
6f6d93dfaab75766e8afdf9eb2fad17dc79218f2
|
refs/heads/master
| 2023-04-27T20:34:36.410434
| 2020-06-22T01:15:34
| 2020-06-22T01:37:27
| 242,857,817
| 130
| 8
|
BSD-3-Clause
| 2020-05-12T15:32:19
| 2020-02-24T22:29:29
|
Python
|
UTF-8
|
Python
| false
| false
| 2,646
|
py
|
uniform.py
|
"""Uniform sampling in NumPy."""
from typing import Optional, Tuple
import numpy as np
from meta_blocks.samplers import base
from meta_blocks.tasks.base import TaskDistribution
__all__ = ["UniformSampler"]
class UniformSampler(base.Sampler):
"""Samples instances uniformly at random."""
stateful = False
def __init__(self, stratified=False, name: Optional[str] = None, **_unused_kwargs):
super(UniformSampler, self).__init__(name=(name or self.__class__.__name__))
self.stratified = stratified
# Random state must be set globally.
self._rng = np.random
# Internal.
self.tasks = None
# --- Methods. ---
def _build(self, *, task_dist: TaskDistribution, **_unused_kwargs):
"""Builds a tuple of selected indices tensors."""
self.tasks = task_dist.task_batch
def select_labeled(self, size: int, **_unused_kwargs) -> Tuple[np.ndarray]:
"""Return an actively selected labeled data points from the dataset."""
# Build selected indices tensors.
selected_indices = []
for i, task in enumerate(self.tasks):
data_size = task.dataset.size
num_classes = task.dataset.num_classes
num_query_shots = task.num_query_shots
# Select indices of the elements to be labeled.
if self.stratified:
# TODO: better handle edge cases + add tests.
assert size % num_classes == 0
assert data_size % num_classes == 0
data_size_per_class = data_size // num_classes
support_data_per_class = data_size_per_class - num_query_shots
sample_size_per_class = size // num_classes
# Select support elements uniformly stratified by class.
task_indices = []
for c in range(num_classes):
id_offset = c * support_data_per_class
c_ids = id_offset + self._rng.choice(
support_data_per_class,
size=sample_size_per_class,
replace=False,
)
task_indices.append(c_ids)
task_indices = np.concatenate(task_indices)
else:
# Select support elements uniformly at random.
support_data_size = data_size - (num_classes * num_query_shots)
task_indices = self._rng.choice(
support_data_size, size=size, replace=False
)
selected_indices.append(task_indices)
return tuple(selected_indices)
|
2f3d7ac21d196959cfb0495a92f9e517e12ecc10
|
c1b8b6080f29c8037100080298b897618a826475
|
/gammapy/maps/tests/test_axes.py
|
27c0d6698b465f124c81396728e98bf11c594450
|
[
"BSD-3-Clause"
] |
permissive
|
gammapy/gammapy
|
a5d7acbdde848e92e124fefbce9716faa296f572
|
60f03adb8fc7851b9f3ca039512c03a669e3fe10
|
refs/heads/main
| 2023-08-16T21:19:06.624561
| 2023-08-04T12:13:08
| 2023-08-04T12:13:08
| 10,073,640
| 204
| 184
|
BSD-3-Clause
| 2023-09-14T15:26:05
| 2013-05-15T07:50:40
|
Python
|
UTF-8
|
Python
| false
| false
| 29,152
|
py
|
test_axes.py
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from numpy.testing import assert_allclose, assert_equal
import astropy.units as u
from astropy.table import Table
from astropy.time import Time
from astropy.visualization import quantity_support
import matplotlib.pyplot as plt
from gammapy.data import GTI
from gammapy.maps import LabelMapAxis, MapAxes, MapAxis, RegionNDMap, TimeMapAxis
from gammapy.utils.scripts import make_path
from gammapy.utils.testing import assert_time_allclose, mpl_plot_check, requires_data
from gammapy.utils.time import time_ref_to_dict
MAP_AXIS_INTERP = [
(np.array([0.25, 0.75, 1.0, 2.0]), "lin"),
(np.array([0.25, 0.75, 1.0, 2.0]), "log"),
(np.array([0.25, 0.75, 1.0, 2.0]), "sqrt"),
]
MAP_AXIS_NODE_TYPES = [
([0.25, 0.75, 1.0, 2.0], "lin", "edges"),
([0.25, 0.75, 1.0, 2.0], "log", "edges"),
([0.25, 0.75, 1.0, 2.0], "sqrt", "edges"),
([0.25, 0.75, 1.0, 2.0], "lin", "center"),
([0.25, 0.75, 1.0, 2.0], "log", "center"),
([0.25, 0.75, 1.0, 2.0], "sqrt", "center"),
]
nodes_array = np.array([0.25, 0.75, 1.0, 2.0])
MAP_AXIS_NODE_TYPE_UNIT = [
(nodes_array, "lin", "edges", "s", "TEST", True),
(nodes_array, "log", "edges", "s", "test", False),
(nodes_array, "lin", "edges", "TeV", "TEST", False),
(nodes_array, "sqrt", "edges", "s", "test", False),
(nodes_array, "lin", "center", "s", "test", False),
(nodes_array + 1e-9, "lin", "edges", "s", "test", True),
(nodes_array + 1e-3, "lin", "edges", "s", "test", False),
(nodes_array / 3600.0, "lin", "edges", "hr", "TEST", True),
]
@pytest.fixture
def time_intervals():
t0 = Time("2020-03-19")
t_min = np.linspace(0, 10, 20) * u.d
t_max = t_min + 1 * u.h
return {"t_min": t_min, "t_max": t_max, "t_ref": t0}
@pytest.fixture
def time_interval():
t0 = Time("2020-03-19")
t_min = 1 * u.d
t_max = 11 * u.d
return {"t_min": t_min, "t_max": t_max, "t_ref": t0}
@pytest.fixture(scope="session")
def energy_axis_ref():
edges = np.arange(1, 11) * u.TeV
return MapAxis.from_edges(edges, name="energy")
def test_mapaxis_repr():
axis = MapAxis([1, 2, 3], name="test")
assert "MapAxis" in repr(axis)
def test_mapaxis_invalid_name():
with pytest.raises(TypeError):
MapAxis([1, 2, 3], name=1)
@pytest.mark.parametrize(
("nodes", "interp", "node_type", "unit", "name", "result"),
MAP_AXIS_NODE_TYPE_UNIT,
)
def test_mapaxis_equal(nodes, interp, node_type, unit, name, result):
axis1 = MapAxis(
nodes=[0.25, 0.75, 1.0, 2.0],
name="test",
unit="s",
interp="lin",
node_type="edges",
)
axis2 = MapAxis(nodes, name=name, unit=unit, interp=interp, node_type=node_type)
assert (axis1 == axis2) is result
assert (axis1 != axis2) is not result
def test_squash():
axis = MapAxis(
nodes=[0, 1, 2, 3], unit="TeV", name="energy", node_type="edges", interp="lin"
)
ax_sq = axis.squash()
assert_allclose(ax_sq.nbin, 1)
assert_allclose(axis.edges[0], ax_sq.edges[0])
assert_allclose(axis.edges[-1], ax_sq.edges[1])
assert_allclose(ax_sq.center, 1.5 * u.TeV)
def test_upsample():
axis = MapAxis(
nodes=[0, 1, 2, 3], unit="TeV", name="energy", node_type="edges", interp="lin"
)
axis_up = axis.upsample(10)
assert_allclose(axis_up.nbin, 10 * axis.nbin)
assert_allclose(axis_up.edges[0], axis.edges[0])
assert_allclose(axis_up.edges[-1], axis.edges[-1])
assert axis_up.node_type == axis.node_type
def test_downsample():
axis = MapAxis(
nodes=[0, 1, 2, 3, 4, 5, 6, 7, 8],
unit="TeV",
name="energy",
node_type="edges",
interp="lin",
)
axis_down = axis.downsample(2)
assert_allclose(axis_down.nbin, 0.5 * axis.nbin)
assert_allclose(axis_down.edges[0], axis.edges[0])
assert_allclose(axis_down.edges[-1], axis.edges[-1])
assert axis_down.node_type == axis.node_type
def test_upsample_non_regular():
axis = MapAxis.from_edges([0, 1, 3, 7], name="test", interp="lin")
axis_up = axis.upsample(2)
assert_allclose(axis_up.nbin, 2 * axis.nbin)
assert_allclose(axis_up.edges[0], axis.edges[0])
assert_allclose(axis_up.edges[-1], axis.edges[-1])
assert axis_up.node_type == axis.node_type
def test_upsample_non_regular_nodes():
axis = MapAxis.from_nodes([0, 1, 3, 7], name="test", interp="lin")
axis_up = axis.upsample(2)
assert_allclose(axis_up.nbin, 2 * axis.nbin - 1)
assert_allclose(axis_up.center[0], axis.center[0])
assert_allclose(axis_up.center[-1], axis.center[-1])
assert axis_up.node_type == axis.node_type
def test_downsample_non_regular():
axis = MapAxis.from_edges([0, 1, 3, 7, 13], name="test", interp="lin")
axis_down = axis.downsample(2)
assert_allclose(axis_down.nbin, 0.5 * axis.nbin)
assert_allclose(axis_down.edges[0], axis.edges[0])
assert_allclose(axis_down.edges[-1], axis.edges[-1])
assert axis_down.node_type == axis.node_type
def test_downsample_non_regular_nodes():
axis = MapAxis.from_edges([0, 1, 3, 7, 9], name="test", interp="lin")
axis_down = axis.downsample(2)
assert_allclose(axis_down.nbin, 0.5 * axis.nbin)
assert_allclose(axis_down.edges[0], axis.edges[0])
assert_allclose(axis_down.edges[-1], axis.edges[-1])
assert axis_down.node_type == axis.node_type
@pytest.mark.parametrize("factor", [1, 3, 5, 7, 11])
def test_up_downsample_consistency(factor):
axis = MapAxis.from_edges([0, 1, 3, 7, 13], name="test", interp="lin")
axis_new = axis.upsample(factor).downsample(factor)
assert_allclose(axis.edges, axis_new.edges)
def test_one_bin_nodes():
axis = MapAxis.from_nodes([1], name="test", unit="deg")
assert_allclose(axis.center, 1 * u.deg)
assert_allclose(axis.coord_to_pix(1 * u.deg), 0)
assert_allclose(axis.coord_to_pix(2 * u.deg), 0)
assert_allclose(axis.pix_to_coord(0), 1 * u.deg)
def test_group_table_basic(energy_axis_ref):
energy_edges = [1, 2, 10] * u.TeV
groups = energy_axis_ref.group_table(energy_edges)
assert_allclose(groups["group_idx"], [0, 1])
assert_allclose(groups["idx_min"], [0, 1])
assert_allclose(groups["idx_max"], [0, 8])
assert_allclose(groups["energy_min"], [1, 2])
assert_allclose(groups["energy_max"], [2, 10])
bin_type = [_.strip() for _ in groups["bin_type"]]
assert_equal(bin_type, ["normal", "normal"])
@pytest.mark.parametrize(
"energy_edges",
[[1.8, 4.8, 7.2] * u.TeV, [2, 5, 7] * u.TeV, [2000, 5000, 7000] * u.GeV],
)
def test_group_tablenergy_edges(energy_axis_ref, energy_edges):
groups = energy_axis_ref.group_table(energy_edges)
assert_allclose(groups["group_idx"], [0, 1, 2, 3])
assert_allclose(groups["idx_min"], [0, 1, 4, 6])
assert_allclose(groups["idx_max"], [0, 3, 5, 8])
assert_allclose(groups["energy_min"].quantity.to_value("TeV"), [1, 2, 5, 7])
assert_allclose(groups["energy_max"].quantity.to_value("TeV"), [2, 5, 7, 10])
bin_type = [_.strip() for _ in groups["bin_type"]]
assert_equal(bin_type, ["underflow", "normal", "normal", "overflow"])
def test_group_table_below_range(energy_axis_ref):
energy_edges = [0.7, 0.8, 1, 4] * u.TeV
groups = energy_axis_ref.group_table(energy_edges)
assert_allclose(groups["group_idx"], [0, 1])
assert_allclose(groups["idx_min"], [0, 3])
assert_allclose(groups["idx_max"], [2, 8])
assert_allclose(groups["energy_min"], [1, 4])
assert_allclose(groups["energy_max"], [4, 10])
bin_type = [_.strip() for _ in groups["bin_type"]]
assert_equal(bin_type, ["normal", "overflow"])
def test_group_table_above_range(energy_axis_ref):
energy_edges = [5, 7, 11, 13] * u.TeV
groups = energy_axis_ref.group_table(energy_edges)
assert_allclose(groups["group_idx"], [0, 1, 2])
assert_allclose(groups["idx_min"], [0, 4, 6])
assert_allclose(groups["idx_max"], [3, 5, 8])
assert_allclose(groups["energy_min"], [1, 5, 7])
assert_allclose(groups["energy_max"], [5, 7, 10])
bin_type = [_.strip() for _ in groups["bin_type"]]
assert_equal(bin_type, ["underflow", "normal", "normal"])
def test_group_table_outside_range(energy_axis_ref):
energy_edges = [20, 30, 40] * u.TeV
with pytest.raises(ValueError):
energy_axis_ref.group_table(energy_edges)
def test_map_axis_aligned():
ax1 = MapAxis([1, 2, 3], interp="lin", node_type="edges")
ax2 = MapAxis([1.5, 2.5], interp="log", node_type="center")
assert not ax1.is_aligned(ax2)
def test_map_axis_pad():
axis = MapAxis.from_energy_bounds("1 TeV", "10 TeV", nbin=1)
padded = axis.pad(pad_width=(0, 1))
assert_allclose(padded.edges, [1, 10, 100] * u.TeV)
padded = axis.pad(pad_width=(1, 0))
assert_allclose(padded.edges, [0.1, 1, 10] * u.TeV)
padded = axis.pad(pad_width=1)
assert_allclose(padded.edges, [0.1, 1, 10, 100] * u.TeV)
def test_map_axes_pad():
axis_1 = MapAxis.from_energy_bounds("1 TeV", "10 TeV", nbin=1)
axis_2 = MapAxis.from_bounds(0, 1, nbin=2, unit="deg", name="rad")
axes = MapAxes([axis_1, axis_2])
axes = axes.pad(axis_name="energy", pad_width=1)
assert_allclose(axes["energy"].edges, [0.1, 1, 10, 100] * u.TeV)
def test_rename():
axis_1 = MapAxis.from_energy_bounds("1 TeV", "10 TeV", nbin=1)
axis = axis_1.rename("energy_true")
assert axis_1.name == "energy"
assert axis.name == "energy_true"
axis_2 = MapAxis.from_bounds(0, 1, nbin=2, unit="deg", name="rad")
axes = MapAxes([axis_1, axis_2])
axes = axes.rename_axes(["energy", "rad"], ["energy_true", "angle"])
assert axes.names == ["energy_true", "angle"]
@pytest.mark.parametrize(("edges", "interp"), MAP_AXIS_INTERP)
def test_mapaxis_init_from_edges(edges, interp):
axis = MapAxis(edges, interp=interp)
assert_allclose(axis.edges, edges)
assert_allclose(axis.nbin, len(edges) - 1)
with pytest.raises(ValueError):
MapAxis.from_edges([1])
MapAxis.from_edges([0, 1, 1, 2])
MapAxis.from_edges([0, 1, 3, 2])
@pytest.mark.parametrize(("nodes", "interp"), MAP_AXIS_INTERP)
def test_mapaxis_from_nodes(nodes, interp):
axis = MapAxis.from_nodes(nodes, interp=interp)
assert_allclose(axis.center, nodes)
assert_allclose(axis.nbin, len(nodes))
with pytest.raises(ValueError):
MapAxis.from_nodes([])
MapAxis.from_nodes([0, 1, 1, 2])
MapAxis.from_nodes([0, 1, 3, 2])
@pytest.mark.parametrize(("nodes", "interp"), MAP_AXIS_INTERP)
def test_mapaxis_from_bounds(nodes, interp):
axis = MapAxis.from_bounds(nodes[0], nodes[-1], 3, interp=interp)
assert_allclose(axis.edges[0], nodes[0])
assert_allclose(axis.edges[-1], nodes[-1])
assert_allclose(axis.nbin, 3)
with pytest.raises(ValueError):
MapAxis.from_bounds(1, 1, 1)
def test_map_axis_from_energy_units():
with pytest.raises(ValueError):
_ = MapAxis.from_energy_bounds(0.1, 10, 2, unit="deg")
with pytest.raises(ValueError):
_ = MapAxis.from_energy_edges([0.1, 1, 10] * u.deg)
@pytest.mark.parametrize(("nodes", "interp", "node_type"), MAP_AXIS_NODE_TYPES)
def test_mapaxis_pix_to_coord(nodes, interp, node_type):
axis = MapAxis(nodes, interp=interp, node_type=node_type)
assert_allclose(axis.center, axis.pix_to_coord(np.arange(axis.nbin, dtype=float)))
assert_allclose(
np.arange(axis.nbin + 1, dtype=float) - 0.5, axis.coord_to_pix(axis.edges)
)
@pytest.mark.parametrize(("nodes", "interp", "node_type"), MAP_AXIS_NODE_TYPES)
def test_mapaxis_coord_to_idx(nodes, interp, node_type):
axis = MapAxis(nodes, interp=interp, node_type=node_type)
assert_allclose(np.arange(axis.nbin, dtype=int), axis.coord_to_idx(axis.center))
@pytest.mark.parametrize(("nodes", "interp", "node_type"), MAP_AXIS_NODE_TYPES)
def test_mapaxis_slice(nodes, interp, node_type):
axis = MapAxis(nodes, interp=interp, node_type=node_type)
saxis = axis.slice(slice(1, 3))
assert_allclose(saxis.nbin, 2)
assert_allclose(saxis.center, axis.center[slice(1, 3)])
axis = MapAxis(nodes, interp=interp, node_type=node_type)
saxis = axis.slice(slice(1, None))
assert_allclose(saxis.nbin, axis.nbin - 1)
assert_allclose(saxis.center, axis.center[slice(1, None)])
axis = MapAxis(nodes, interp=interp, node_type=node_type)
saxis = axis.slice(slice(None, 2))
assert_allclose(saxis.nbin, 2)
assert_allclose(saxis.center, axis.center[slice(None, 2)])
axis = MapAxis(nodes, interp=interp, node_type=node_type)
saxis = axis.slice(slice(None, -1))
assert_allclose(saxis.nbin, axis.nbin - 1)
assert_allclose(saxis.center, axis.center[slice(None, -1)])
def test_map_axis_plot_helpers():
axis = MapAxis.from_nodes([0, 1, 2], unit="deg", name="offset")
labels = axis.as_plot_labels
assert labels[0] == "0.00e+00 deg"
assert_allclose(axis.center, axis.as_plot_center)
assert_allclose(axis.edges, axis.as_plot_edges)
def test_map_axis_concatenate():
axis_1 = MapAxis.from_bounds(0, 10, 10, name="axis")
axis_2 = MapAxis.from_bounds(10, 20, 10, name="axis")
axis_2_other_name = MapAxis.from_bounds(10, 20, 10, name="other_axis")
axis_12 = axis_1.concatenate(axis_2)
assert_equal(axis_12.edges, np.linspace(0, 20, 21))
with pytest.raises(ValueError):
axis_1.concatenate(axis_2_other_name)
def test_time_axis(time_intervals):
axis = TimeMapAxis(
time_intervals["t_min"], time_intervals["t_max"], time_intervals["t_ref"]
)
axis_copy = axis.copy()
assert axis.nbin == 20
assert axis.name == "time"
assert axis.node_type == "intervals"
assert_allclose(axis.time_delta.to_value("min"), 60)
assert_allclose(axis.time_mid[0].mjd, 58927.020833333336)
assert "time" in axis.__str__()
assert "20" in axis.__str__()
with pytest.raises(ValueError):
axis.assert_name("bad")
assert axis_copy == axis
assert not axis.is_contiguous
ax_cont = axis.to_contiguous()
assert_allclose(ax_cont.nbin, 39)
def test_single_interval_time_axis(time_interval):
axis = TimeMapAxis(
edges_min=time_interval["t_min"],
edges_max=time_interval["t_max"],
reference_time=time_interval["t_ref"],
)
coord = Time(58933, format="mjd") + u.Quantity([1.5, 3.5, 10], unit="d")
pix = axis.coord_to_pix(coord)
assert axis.nbin == 1
assert_allclose(axis.time_delta.to_value("d"), 10)
assert_allclose(axis.time_mid[0].mjd, 58933)
pix_min = axis.coord_to_pix(time_interval["t_min"] + 0.001 * u.s)
assert_allclose(pix_min, -0.5)
pix_max = axis.coord_to_pix(time_interval["t_max"] - 0.001 * u.s)
assert_allclose(pix_max, 0.5)
assert_allclose(pix, [0.15, 0.35, np.nan])
def test_slice_squash_time_axis(time_intervals):
axis = TimeMapAxis(
time_intervals["t_min"], time_intervals["t_max"], time_intervals["t_ref"]
)
axis_squash = axis.squash()
axis_slice = axis.slice(slice(1, 5))
assert axis_squash.nbin == 1
assert_allclose(axis_squash.time_min[0].mjd, 58927)
assert_allclose(axis_squash.time_delta.to_value("d"), 10.04166666)
assert axis_slice.nbin == 4
assert_allclose(axis_slice.time_delta.to_value("d")[0], 0.04166666666)
assert axis_squash != axis_slice
def test_from_time_edges_time_axis():
t0 = Time("2020-03-19")
t_min = t0 + np.linspace(0, 10, 20) * u.d
t_max = t_min + 1 * u.h
axis = TimeMapAxis.from_time_edges(t_min, t_max)
axis_h = TimeMapAxis.from_time_edges(t_min, t_max, unit="h")
assert axis.nbin == 20
assert axis.name == "time"
assert_time_allclose(axis.reference_time, t0)
assert_allclose(axis.time_delta.to_value("min"), 60)
assert_allclose(axis.time_mid[0].mjd, 58927.020833333336)
assert_allclose(axis_h.time_delta.to_value("h"), 1)
assert_allclose(axis_h.time_mid[0].mjd, 58927.020833333336)
assert axis == axis_h
def test_incorrect_time_axis():
tmin = np.linspace(0, 10, 20) * u.h
tmax = np.linspace(1, 11, 20) * u.h
# incorrect reference time
with pytest.raises(ValueError):
TimeMapAxis(tmin, tmax, reference_time=51000 * u.d, name="time")
# overlapping time intervals
with pytest.raises(ValueError):
TimeMapAxis(tmin, tmax, reference_time=Time.now(), name="time")
def test_bad_length_sort_time_axis(time_intervals):
tref = time_intervals["t_ref"]
tmin = time_intervals["t_min"]
tmax_reverse = time_intervals["t_max"][::-1]
tmax_short = time_intervals["t_max"][:-1]
with pytest.raises(ValueError):
TimeMapAxis(tmin, tmax_reverse, tref, name="time")
with pytest.raises(ValueError):
TimeMapAxis(tmin, tmax_short, tref, name="time")
def test_coord_to_idx_time_axis(time_intervals):
tmin = time_intervals["t_min"]
tmax = time_intervals["t_max"]
tref = time_intervals["t_ref"]
axis = TimeMapAxis(tmin, tmax, tref, name="time")
time = Time(58927.020833333336, format="mjd")
times = axis.time_mid
times[::2] += 1 * u.h
times = times.insert(0, tref - [1, 2] * u.yr)
idx = axis.coord_to_idx(time)
indices = axis.coord_to_idx(times)
pix = axis.coord_to_pix(time)
pixels = axis.coord_to_pix(times)
assert idx == 0
assert_allclose(indices[1::2], [-1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19])
assert_allclose(indices[::2], -1)
assert_allclose(pix, 0, atol=1e-10)
assert_allclose(pixels[1::2], [np.nan, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19])
def test_pix_to_coord_time_axis(time_intervals):
tmin = time_intervals["t_min"]
tmax = time_intervals["t_max"]
tref = time_intervals["t_ref"]
axis = TimeMapAxis(tmin, tmax, tref, name="time")
pixels = [1.3, 3.2, 5.4, 7, 15.33, 17.21, 19.11]
coords = axis.pix_to_coord(pixels)
assert_allclose(
coords[0:3].mjd, [58927.0125, 58927.534649, 58928.069298], rtol=1e-5
)
# test with nan indices
pixels.append(np.nan)
coords = axis.pix_to_coord(pixels)
assert_allclose(
coords[-3:].mjd,
[58929.64032894737, 58930.162478070175, -3.725000e-04],
rtol=1e-5,
)
# assert with invalid pixels & multidim array
coords = axis.pix_to_coord([[-1.2, 0.6], [1.5, 24.7]])
assert_allclose(
coords.mjd,
[[-3.725000e-04, 58927.551315789475], [58928.07346491228, -3.725000e-04]],
rtol=1e-5,
)
# test with one value
coords = axis.pix_to_coord(3)
assert_allclose(coords.mjd, 58927.0, rtol=1e-5)
def test_slice_time_axis(time_intervals):
axis = TimeMapAxis(
time_intervals["t_min"], time_intervals["t_max"], time_intervals["t_ref"]
)
new_axis = axis.slice([2, 6, 9])
squashed = axis.squash()
assert new_axis.nbin == 3
assert_allclose(squashed.time_max[0].mjd, 58937.041667)
assert squashed.nbin == 1
assert_allclose(squashed.time_max[0].mjd, 58937.041667)
def test_time_map_axis_from_time_bounds():
t_min = Time("2006-02-12", scale="utc")
t_max = t_min + 12 * u.h
axis = TimeMapAxis.from_time_bounds(time_min=t_min, time_max=t_max, nbin=3)
assert_allclose(axis.center, [0.083333, 0.25, 0.416667] * u.d, rtol=1e-5)
def test_from_table_time_axis():
t0 = Time("2006-02-12", scale="utc")
t_min = np.linspace(0, 10, 10) * u.d
t_max = t_min + 12 * u.h
table = Table()
table["TIME_MIN"] = t_min
table["TIME_MAX"] = t_max
table.meta.update(time_ref_to_dict(t0))
table.meta["AXCOLS1"] = "TIME_MIN,TIME_MAX"
axis = TimeMapAxis.from_table(table, format="gadf")
assert axis.nbin == 10
assert_allclose(axis.time_mid[0].mjd, 53778.25)
def test_from_table_time_axis_lightcurve_format():
t0 = Time("2006-02-12", scale="tt")
t_min = np.linspace(0, 10, 10) * u.d
t_max = t_min + 12 * u.h
table = Table()
table["time_min"] = t_min.to_value("h")
table["time_max"] = t_max.to_value("h")
table.meta.update(time_ref_to_dict(t0))
table.meta["TIMEUNIT"] = "h"
axis = TimeMapAxis.from_table(table, format="lightcurve")
assert axis.nbin == 10
assert_allclose(axis.time_mid[0].mjd, 53778.25)
assert axis.time_mid.scale == "tt"
t0.format = "mjd"
assert_time_allclose(axis.reference_time, t0)
@requires_data()
def test_from_gti_time_axis():
filename = "$GAMMAPY_DATA/hess-dl3-dr1/data/hess_dl3_dr1_obs_id_020136.fits.gz"
filename = make_path(filename)
gti = GTI.read(filename)
axis = TimeMapAxis.from_gti(gti)
expected = Time(53090.123451203704, format="mjd", scale="tt")
assert_time_allclose(axis.time_min[0], expected)
assert axis.nbin == 1
def test_from_gti_bounds():
start = u.Quantity([1, 2], "min")
stop = u.Quantity([1.5, 2.5], "min")
time_ref = Time("2010-01-01 00:00:00.0")
gti = GTI.create(start, stop, time_ref)
axis = TimeMapAxis.from_gti_bounds(
gti=gti,
t_delta=10 * u.s,
)
assert axis.nbin == 8
expected = Time("2010-01-01 00:01:00.0")
# GTI.create() changes the reference time format
expected.format = "mjd"
assert_time_allclose(axis.time_min[0], expected)
expected = Time("2010-01-01 00:02:30.0")
expected.format = "mjd"
assert_time_allclose(axis.time_max[-1], expected)
def test_map_with_time_axis(time_intervals):
time_axis = TimeMapAxis(
time_intervals["t_min"], time_intervals["t_max"], time_intervals["t_ref"]
)
energy_axis = MapAxis.from_energy_bounds(0.1, 10, 2, unit="TeV")
region_map = RegionNDMap.create(
region="fk5; circle(0,0,0.1)", axes=[energy_axis, time_axis]
)
assert region_map.geom.data_shape == (20, 2, 1, 1)
def test_time_axis_plot_helpers():
time_ref = Time("1999-01-01T00:00:00.123456789")
time_axis = TimeMapAxis(
edges_min=[0, 1, 3] * u.d,
edges_max=[0.8, 1.9, 5.4] * u.d,
reference_time=time_ref,
)
labels = time_axis.as_plot_labels
assert labels[0] == "1999-01-01 00:00:00.123 - 1999-01-01 19:12:00.123"
center = time_axis.as_plot_center
assert center[0].year == 1999
edges = time_axis.to_contiguous().as_plot_edges
assert edges[0].year == 1999
def test_axes_basics():
energy_axis = MapAxis.from_energy_edges([1, 3] * u.TeV)
time_ref = Time("1999-01-01T00:00:00.123456789")
time_axis = TimeMapAxis(
edges_min=[0, 1, 3] * u.d,
edges_max=[0.8, 1.9, 5.4] * u.d,
reference_time=time_ref,
)
axes = MapAxes([energy_axis, time_axis])
assert axes.shape == (1, 3)
assert axes.is_unidimensional
assert not axes.is_flat
assert axes.primary_axis.name == "time"
new_axes = axes.copy()
assert new_axes[0] == new_axes[0]
assert new_axes[1] == new_axes[1]
assert new_axes == axes
energy_axis = MapAxis.from_energy_edges([1, 4] * u.TeV)
new_axes = MapAxes([energy_axis, time_axis.copy()])
assert new_axes != axes
def test_axes_getitem():
axis1 = MapAxis.from_bounds(1, 4, 3, name="a1")
axis2 = axis1.copy(name="a2")
axis3 = axis1.copy(name="a3")
axes = MapAxes([axis1, axis2, axis3])
assert isinstance(axes[0], MapAxis)
assert axes[-1].name == "a3"
assert isinstance(axes[1:], MapAxes)
assert len(axes[1:]) == 2
assert isinstance(axes[0:1], MapAxes)
assert len(axes[0:1]) == 1
assert isinstance(axes[["a3", "a1"]], MapAxes)
assert axes[["a3", "a1"]][0].name == "a3"
def test_label_map_axis_basics():
axis = LabelMapAxis(labels=["label-1", "label-2"], name="label-axis")
axis_str = str(axis)
assert "node type" in axis_str
assert "labels" in axis_str
assert "label-2" in axis_str
with pytest.raises(ValueError):
axis.assert_name("time")
assert axis.nbin == 2
assert axis.node_type == "label"
assert_allclose(axis.bin_width, 1)
assert axis.name == "label-axis"
with pytest.raises(ValueError):
axis.edges
axis_copy = axis.copy()
assert axis_copy.name == "label-axis"
def test_label_map_axis_coord_to_idx():
axis = LabelMapAxis(labels=["label-1", "label-2", "label-3"], name="label-axis")
labels = "label-1"
idx = axis.coord_to_idx(coord=labels)
assert_allclose(idx, 0)
labels = ["label-1", "label-3"]
idx = axis.coord_to_idx(coord=labels)
assert_allclose(idx, [0, 2])
labels = [["label-1"], ["label-2"]]
idx = axis.coord_to_idx(coord=labels)
assert_allclose(idx, [[0], [1]])
with pytest.raises(ValueError):
labels = [["bad-label"], ["label-2"]]
_ = axis.coord_to_idx(coord=labels)
def test_mixed_axes():
label_axis = LabelMapAxis(labels=["label-1", "label-2", "label-3"], name="label")
time_axis = TimeMapAxis(
edges_min=[1, 10] * u.day,
edges_max=[2, 13] * u.day,
reference_time=Time("2020-03-19"),
)
energy_axis = MapAxis.from_energy_bounds("1 TeV", "10 TeV", nbin=4)
axes = MapAxes(axes=[energy_axis, time_axis, label_axis])
coords = axes.get_coord()
assert coords["label"].shape == (1, 1, 3)
assert coords["energy"].shape == (4, 1, 1)
assert coords["time"].shape == (1, 2, 1)
idx = axes.coord_to_idx(coords)
assert_allclose(idx[0], np.arange(4).reshape((4, 1, 1)))
assert_allclose(idx[1], np.arange(2).reshape((1, 2, 1)))
assert_allclose(idx[2], np.arange(3).reshape((1, 1, 3)))
hdu = axes.to_table_hdu(format="gadf")
table = Table.read(hdu)
assert table["LABEL"].dtype == np.dtype("U7")
assert len(table) == 24
def test_map_axis_format_plot_xaxis():
axis = MapAxis.from_energy_bounds(
"0.03 TeV", "300 TeV", nbin=20, per_decade=True, name="energy_true"
)
with mpl_plot_check():
ax = plt.gca()
with quantity_support():
ax.plot(axis.center, np.ones_like(axis.center))
ax1 = axis.format_plot_xaxis(ax=ax)
assert ax1.xaxis.units == u.Unit("TeV")
assert " ".join(ax1.axes.axes.get_xlabel().split()[:2]) == "True Energy"
def test_time_format(time_intervals):
axis = TimeMapAxis(
time_intervals["t_min"],
time_intervals["t_max"],
time_intervals["t_ref"],
name="time",
)
with pytest.raises(ValueError):
axis.time_format = "null"
def test_time_map_axis_format_plot_xaxis(time_intervals):
axis = TimeMapAxis(
time_intervals["t_min"],
time_intervals["t_max"],
time_intervals["t_ref"],
name="time",
)
with mpl_plot_check():
ax = plt.gca()
with quantity_support():
ax.plot(axis.as_plot_center, np.ones_like(axis.center))
ax1 = axis.format_plot_xaxis(ax=ax)
assert ax1.axes.axes.get_xlabel().split()[0] == "Time"
assert ax1.axes.axes.get_xlabel().split()[1] == "[iso]"
axis.time_format = "mjd"
with mpl_plot_check():
ax = plt.gca()
with quantity_support():
ax.plot(axis.as_plot_center, np.ones_like(axis.center))
ax2 = axis.format_plot_xaxis(ax=ax)
assert ax2.axes.axes.get_xlabel().split()[1] == "[mjd]"
def test_single_valued_axis():
# this will be interpreted as a scalar value
# that is against the specifications, but we allow it nevertheless
theta_values = np.array([0.5]) * u.deg
table = Table(data=[theta_values, theta_values], names=["THETA_LO", "THETA_HI"])
_ = MapAxis.from_table(table, format="gadf-dl3", column_prefix="THETA")
# this is a proper array-like axis with just a single value
theta_values = np.array([[0.5]]) * u.deg
table = Table(data=[theta_values, theta_values], names=["THETA_LO", "THETA_HI"])
_ = MapAxis.from_table(table, format="gadf-dl3", column_prefix="THETA")
def test_label_map_axis_concatenate():
label1 = LabelMapAxis(["aa", "bb"], name="letters")
label2 = LabelMapAxis(["cc", "dd"], name="letters")
label3 = LabelMapAxis(["ee", "ff"], name="other_letters")
label_append12 = label1.concatenate(label2)
assert_equal(label_append12.center, np.array(["aa", "bb", "cc", "dd"], dtype="<U2"))
assert label_append12.name == "letters"
with pytest.raises(ValueError):
label2.concatenate(label3)
def test_label_map_axis_from_stack():
label1 = LabelMapAxis(["a", "b", "c"], name="letters")
label2 = LabelMapAxis(["d", "e"], name="letters")
label3 = LabelMapAxis(["f"], name="letters")
label_stack = LabelMapAxis.from_stack([label1, label2, label3])
assert_equal(label_stack.center, np.array(["a", "b", "c", "d", "e", "f"]))
assert label_stack.name == "letters"
def test_label_map_axis_squash():
label = LabelMapAxis(["a", "b", "c"], name="Letters")
squash_label = label.squash()
assert squash_label.nbin == 1
assert_equal(squash_label.center, np.array(["a...c"]))
def test_energy_bin_per_decade_not_strict_bounds():
nbin = 5
axis = MapAxis.from_energy_bounds(
"0.03 TeV", "333 TeV", nbin=nbin, per_decade=True, strict_bounds=False
)
assert_allclose(axis.edges[0:-nbin] * 10.0, axis.edges[nbin:], rtol=1e-5, atol=0)
|
c535804def451776f5adbbe8594e755dc198f176
|
20f1a157f45e95027d3e40536b26b6867f902d21
|
/openwisp_users/admin.py
|
07af694b68398554f81bdb713f12694aabfe1f74
|
[
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
openwisp/openwisp-users
|
544a3d8f99cb24fdc18bda7948811c0b422e3336
|
8403e5a242b2a4605868592729b18c2b520f2ec8
|
refs/heads/master
| 2023-08-24T12:28:10.604419
| 2023-08-17T17:29:23
| 2023-08-17T17:29:23
| 84,102,610
| 189
| 77
|
BSD-3-Clause
| 2023-08-17T17:29:25
| 2017-03-06T17:41:34
|
Python
|
UTF-8
|
Python
| false
| false
| 25,518
|
py
|
admin.py
|
import logging
from copy import deepcopy
from allauth import app_settings as allauth_settings
from allauth.account.models import EmailAddress
from django import forms
from django.apps import apps
from django.conf import settings
from django.contrib import admin, messages
from django.contrib.admin.actions import delete_selected
from django.contrib.admin.utils import model_ngettext
from django.contrib.auth import get_user_model
from django.contrib.auth.admin import GroupAdmin as BaseGroupAdmin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.contrib.auth.forms import UserChangeForm as BaseUserChangeForm
from django.contrib.auth.forms import UserCreationForm as BaseUserCreationForm
from django.core.exceptions import ValidationError
from django.forms.models import BaseInlineFormSet
from django.http import HttpResponseRedirect
from django.template.response import TemplateResponse
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from django.utils.translation import ngettext
from openwisp_utils.admin import UUIDAdmin
from organizations.base_admin import (
BaseOrganizationAdmin,
BaseOrganizationOwnerAdmin,
BaseOrganizationUserAdmin,
)
from organizations.exceptions import OwnershipRequired
from phonenumber_field.formfields import PhoneNumberField
from swapper import load_model
from . import settings as app_settings
from .multitenancy import MultitenantAdminMixin, MultitenantOrgFilter
from .utils import BaseAdmin
Group = load_model('openwisp_users', 'Group')
Organization = load_model('openwisp_users', 'Organization')
OrganizationOwner = load_model('openwisp_users', 'OrganizationOwner')
OrganizationUser = load_model('openwisp_users', 'OrganizationUser')
User = get_user_model()
logger = logging.getLogger(__name__)
class EmailAddressInline(admin.StackedInline):
model = EmailAddress
extra = 0
readonly_fields = ['email']
def has_add_permission(self, *args, **kwargs):
"""
Do not let admins add new email objects via inlines
in order to not mess the coherence of the database.
Admins can still change the main email field of the User model,
that will automatically add a new email address object and
send a confirmation email, see ``UserAdmin.save_model``
"""
return False
def has_change_permission(self, request, obj=None):
if user_not_allowed_to_change_owner(request.user, obj):
self.can_delete = False
return False
return super().has_change_permission(request, obj)
class RequiredInlineFormSet(BaseInlineFormSet):
"""
Generates an inline formset that is required
"""
def _construct_form(self, i, **kwargs):
"""
Override the method to change the form attribute empty_permitted
"""
form = super()._construct_form(i, **kwargs)
# only super users can be created without organization
form.empty_permitted = self.instance.is_superuser
return form
class OrganizationOwnerInline(admin.StackedInline):
model = OrganizationOwner
extra = 0
if app_settings.ORGANIZATION_USER_ADMIN and app_settings.ORGANIZATION_OWNER_ADMIN:
autocomplete_fields = ('organization_user',)
def has_change_permission(self, request, obj=None):
if obj and not request.user.is_superuser and not request.user.is_owner(obj):
return False
return super().has_change_permission(request, obj)
class OrganizationUserInline(admin.StackedInline):
model = OrganizationUser
formset = RequiredInlineFormSet
view_on_site = False
autocomplete_fields = ('organization',)
def get_formset(self, request, obj=None, **kwargs):
"""
In form dropdowns, display only organizations
in which operator `is_admin` and for superusers
display all organizations
"""
formset = super().get_formset(request, obj=obj, **kwargs)
if request.user.is_superuser:
return formset
if not request.user.is_superuser:
formset.form.base_fields[
'organization'
].queryset = Organization.objects.filter(
pk__in=request.user.organizations_managed
)
return formset
def get_extra(self, request, obj=None, **kwargs):
if not obj:
return 1
return 0
class OrganizationUserInlineReadOnly(OrganizationUserInline):
can_delete = False
def get_readonly_fields(self, request, obj=None):
if obj and not request.user.is_superuser:
self.readonly_fields = ['is_admin']
return self.readonly_fields
def has_add_permission(self, request, obj=None):
return False
def has_change_permission(self, request, obj=None):
if user_not_allowed_to_change_owner(request.user, obj):
return False
return super().has_change_permission(request, obj)
class UserFormMixin(forms.ModelForm):
email = forms.EmailField(label=_('Email'), max_length=254, required=True)
def validate_user_groups(self, data):
is_staff = data.get('is_staff')
is_superuser = data.get('is_superuser')
groups = data.get('groups')
if is_staff and not is_superuser and not groups:
raise ValidationError(
{'groups': _('A staff user must belong to a group, please select one.')}
)
def clean(self):
cleaned_data = super().clean()
self.validate_user_groups(cleaned_data)
return cleaned_data
class UserCreationForm(UserFormMixin, BaseUserCreationForm):
phone_number = PhoneNumberField(widget=forms.TextInput(), required=False)
class Meta(BaseUserCreationForm.Meta):
fields = ['username', 'email', 'password1', 'password2']
personal_fields = ['first_name', 'last_name', 'phone_number', 'birth_date']
fieldsets = (
(None, {'classes': ('wide',), 'fields': fields}),
('Personal Info', {'classes': ('wide',), 'fields': personal_fields}),
(
'Permissions',
{'classes': ('wide',), 'fields': ['is_active', 'is_staff', 'groups']},
),
)
fieldsets_superuser = (
(None, {'classes': ('wide',), 'fields': fields}),
('Personal Info', {'classes': ('wide',), 'fields': personal_fields}),
(
'Permissions',
{
'classes': ('wide',),
'fields': ['is_active', 'is_staff', 'is_superuser', 'groups'],
},
),
)
class Media:
js = ('admin/js/jquery.init.js', 'openwisp-users/js/addform.js')
class UserChangeForm(UserFormMixin, BaseUserChangeForm):
pass
class UserAdmin(MultitenantAdminMixin, BaseUserAdmin, BaseAdmin):
add_form = UserCreationForm
form = UserChangeForm
ordering = ['-date_joined']
readonly_fields = ['last_login', 'date_joined']
list_display = [
'username',
'email',
'is_active',
'is_staff',
'is_superuser',
'date_joined',
'last_login',
]
inlines = [EmailAddressInline, OrganizationUserInline]
save_on_top = True
actions = ['delete_selected_overridden', 'make_inactive', 'make_active']
fieldsets = list(BaseUserAdmin.fieldsets)
# To ensure extended apps use this template.
change_form_template = 'admin/openwisp_users/user/change_form.html'
def require_confirmation(func):
"""
Decorator to lead to a confirmation page.
This has been used rather than simply adding the same lines
in action functions inorder to avoid repetition of the same
lines in the two added actions and more actions
incase they are added in future.
"""
def wrapper(modeladmin, request, queryset):
opts = modeladmin.model._meta
if request.POST.get('confirmation') is None:
request.current_app = modeladmin.admin_site.name
context = {
**modeladmin.admin_site.each_context(request),
'title': _('Are you sure?'),
'action': request.POST['action'],
'queryset': queryset,
'opts': opts,
}
return TemplateResponse(
request, 'admin/action_confirmation.html', context
)
return func(modeladmin, request, queryset)
wrapper.__name__ = func.__name__
return wrapper
@require_confirmation
def make_inactive(self, request, queryset):
queryset.update(is_active=False)
count = queryset.count()
if count:
self.message_user(
request,
_(
f'Successfully made {count} '
f'{model_ngettext(self.opts, count)} inactive.'
),
messages.SUCCESS,
)
make_inactive.short_description = _('Flag selected users as inactive')
@require_confirmation
def make_active(self, request, queryset):
queryset.update(is_active=True)
count = queryset.count()
if count:
self.message_user(
request,
_(
f'Successfully made {count} '
f'{model_ngettext(self.opts, count)} active.'
),
messages.SUCCESS,
)
make_active.short_description = _('Flag selected users as active')
def get_list_display(self, request):
"""
Hide `is_superuser` from column from operators
"""
default_list_display = super().get_list_display(request)
if not request.user.is_superuser and 'is_superuser' in default_list_display:
# avoid editing the default_list_display
operators_list_display = default_list_display[:]
operators_list_display.remove('is_superuser')
return operators_list_display
return default_list_display
def get_list_filter(self, request):
filters = super().get_list_filter(request)
if not request.user.is_superuser and 'is_superuser' in filters:
# hide is_superuser filter for non-superusers
operators_filter_list = list(filters)
operators_filter_list.remove('is_superuser')
return tuple(operators_filter_list)
return filters
def get_fieldsets(self, request, obj=None):
# add form fields for staff users
if not obj and not request.user.is_superuser:
return self.add_form.Meta.fieldsets
# add form fields for superusers
if not obj and request.user.is_superuser:
return self.add_form.Meta.fieldsets_superuser
# return fieldsets according to user
fieldsets = super().get_fieldsets(request, obj)
if not request.user.is_superuser:
# edit this tuple to add / remove permission items
# visible to non-superusers
user_permissions = ('is_active', 'is_staff', 'groups', 'user_permissions')
# copy to avoid modifying reference
non_superuser_fieldsets = deepcopy(fieldsets)
non_superuser_fieldsets[2][1]['fields'] = user_permissions
return non_superuser_fieldsets
return fieldsets
def get_readonly_fields(self, request, obj=None):
# retrieve readonly fields
fields = super().get_readonly_fields(request, obj)
# do not allow operators to escalate their privileges
if not request.user.is_superuser:
# copy to avoid modifying reference
fields = fields[:] + ['user_permissions', 'is_superuser']
return fields
def has_change_permission(self, request, obj=None):
if user_not_allowed_to_change_owner(request.user, obj):
return False
# do not allow operators to edit details of superusers
# returns 403 if trying to access the change form of a superuser
if (
obj and obj.is_superuser and not request.user.is_superuser
): # pragma: no cover
return False
return super().has_change_permission(request, obj)
def has_delete_permission(self, request, obj=None):
if user_not_allowed_to_change_owner(request.user, obj):
return False
return super().has_delete_permission(request, obj)
def get_actions(self, request):
actions = super().get_actions(request)
if not request.POST.get('post') and 'delete_selected' in actions:
del actions['delete_selected']
return actions
def delete_selected_overridden(self, request, queryset):
if not request.user.is_superuser:
users_pk = queryset.values_list('pk', flat=True)
owners_list = list(
OrganizationOwner.objects.filter(organization_user__user__in=users_pk)
.select_related('organization_user__user')
.values_list('organization_user__user__username', flat=True)
)
owners = ', '.join(owners_list)
excluded_owners_qs = queryset.exclude(username__in=owners_list)
# if trying to delete any owner, show an error message
count = len(owners_list)
if count:
self.message_user(
request,
ngettext(
f"Can't delete %d organization owner: {owners}",
f"Can't delete %d organization owners: {owners}",
count,
)
% count,
messages.ERROR,
)
# if trying to delete only owners, stop here
if queryset.exists() and not excluded_owners_qs.exists():
redirect_url = reverse(
f'admin:{self.model._meta.app_label}_user_changelist'
)
return HttpResponseRedirect(redirect_url)
# otherwise proceed but remove owners from the delete queryset
else:
queryset = excluded_owners_qs
return delete_selected(self, request, queryset)
delete_selected_overridden.short_description = delete_selected.short_description
def get_inline_instances(self, request, obj=None):
"""
1. Avoid displaying inline objects when adding a new user
2. Make OrganizationUserInline readonly for non-superuser
"""
if obj:
inlines = super().get_inline_instances(request, obj).copy()
if not request.user.is_superuser:
for inline in inlines:
if isinstance(inline, OrganizationUserInline):
orguser_index = inlines.index(inline)
inlines.remove(inline)
orguser_readonly = OrganizationUserInlineReadOnly(
self.model, self.admin_site
)
inlines.insert(orguser_index, orguser_readonly)
break
return inlines
inline = OrganizationUserInline(self.model, self.admin_site)
if request:
if hasattr(inline, '_has_add_permission'):
has_add_perm = inline._has_add_permission(request, obj)
else:
has_add_perm = inline.has_add_permission(request, obj)
if has_add_perm:
return [inline]
return []
def change_view(self, request, object_id, form_url='', extra_context=None):
extra_context = extra_context or {}
obj = self.get_object(request, object_id)
if obj is not None and user_not_allowed_to_change_owner(request.user, obj):
show_owner_warning = True
extra_context.update({'show_owner_warning': show_owner_warning})
return super().change_view(request, object_id, form_url, extra_context)
def save_model(self, request, obj, form, change):
"""
Automatically creates email addresses for users
added/changed via the django-admin interface
"""
super().save_model(request, obj, form, change)
if obj.email:
try:
EmailAddress.objects.add_email(
request, user=obj, email=obj.email, confirm=True, signup=True
)
except Exception as e:
logger.exception(
'Got exception {} while sending '
'verification email to user {}, email {}'.format(
type(e), obj.username, obj.email
)
)
def save_formset(self, request, form, formset, change):
instances = formset.save(commit=False)
not_deleted = 0
for obj in formset.deleted_objects:
try:
obj.delete()
except OwnershipRequired:
not_deleted += 1
if not_deleted:
single_msg = (
f"Can't delete {not_deleted} organization user because it "
'belongs to an organization owner.'
)
multiple_msg = (
f"Can't delete {not_deleted} organization users because they "
'belong to some organization owners.'
)
self.message_user(
request, ngettext(single_msg, multiple_msg, not_deleted), messages.ERROR
)
for instance in instances:
instance.save()
class OrganizationUserFilter(MultitenantOrgFilter):
"""
Allows filtering users by the organization they're related to
"""
field_name = f'{Organization._meta.app_label}_organization'
def queryset(self, request, queryset):
if self.value():
queryset = queryset.filter(
openwisp_users_organizationuser__organization=self.value()
)
return queryset
base_fields = list(UserAdmin.fieldsets[1][1]['fields'])
additional_fields = ['bio', 'url', 'company', 'location', 'phone_number', 'birth_date']
UserAdmin.fieldsets[1][1]['fields'] = base_fields + additional_fields
UserAdmin.fieldsets.insert(3, ('Internal', {'fields': ('notes',)}))
UserAdmin.add_fieldsets[0][1]['fields'] = (
'username',
'email',
'password1',
'password2',
)
UserAdmin.search_fields += ('phone_number',)
UserAdmin.list_filter = (OrganizationUserFilter,) + UserAdmin.list_filter
class GroupAdmin(BaseGroupAdmin, BaseAdmin):
if 'reversion' in settings.INSTALLED_APPS:
# Correctly register the proxy model
def reversion_register(self, model, **kwargs):
return super().reversion_register(model, for_concrete_model=False, **kwargs)
class OrganizationAdmin(
MultitenantAdminMixin, BaseOrganizationAdmin, BaseAdmin, UUIDAdmin
):
view_on_site = False
inlines = [OrganizationOwnerInline]
readonly_fields = ['uuid', 'created', 'modified']
ordering = ['name']
list_display = ['name', 'is_active', 'created', 'modified']
def get_inline_instances(self, request, obj=None):
"""
Remove OrganizationOwnerInline from organization add form
"""
inlines = super().get_inline_instances(request, obj).copy()
if not obj:
for inline in inlines:
if isinstance(inline, OrganizationOwnerInline):
inlines.remove(inline)
break
return inlines
def has_change_permission(self, request, obj=None):
"""
Allow only managers and superuser to change organization
"""
if obj and not request.user.is_superuser and not request.user.is_manager(obj):
return False
return super().has_change_permission(request, obj)
class Media(UUIDAdmin.Media):
css = {'all': ('openwisp-users/css/admin.css',)}
class OrganizationUserAdmin(
MultitenantAdminMixin, BaseOrganizationUserAdmin, BaseAdmin
):
view_on_site = False
actions = ['delete_selected_overridden']
search_fields = ['user__username', 'organization__name']
def get_readonly_fields(self, request, obj=None):
# retrieve readonly fields
fields = super().get_readonly_fields(request, obj)
# do not allow operators to escalate their privileges
if not request.user.is_superuser:
# copy to avoid modifying reference
fields = ['is_admin']
return fields
def has_delete_permission(self, request, obj=None):
"""
operators should not delete organization users of organizations
where they are not admins
"""
if request.user.is_superuser:
return True
if obj and not request.user.is_manager(obj.organization):
return False
return super().has_delete_permission(request, obj)
def delete_view(self, request, object_id, extra_context=None):
try:
return super().delete_view(request, object_id, extra_context)
except OwnershipRequired:
self.message_user(
request,
_(
"Can't delete this organization user because "
'it belongs to an organization owner.'
),
messages.ERROR,
)
redirect_url = reverse(
f'admin:{self.model._meta.app_label}_organizationuser_change',
args=[object_id],
)
return HttpResponseRedirect(redirect_url)
def get_actions(self, request):
actions = super().get_actions(request)
if not request.POST.get('post') and 'delete_selected' in actions:
del actions['delete_selected']
return actions
def delete_selected_overridden(self, request, queryset):
count = 0
pks = []
for obj in queryset:
if obj.user.is_owner(obj.organization_id):
pks.append(obj.pk)
count += 1
# if trying to delete only org users which belong to owners, stop here
if count and count == queryset.count():
self.message_user(
request,
_("Can't delete organization users which belong to owners."),
messages.ERROR,
)
redirect_url = reverse(
f'admin:{self.model._meta.app_label}_organizationuser_changelist'
)
return HttpResponseRedirect(redirect_url)
# if some org owners' org users were selected
if count and count != queryset.count():
queryset = queryset.exclude(pk__in=pks)
single_msg = (
f"Can't delete {count} organization user because it "
'belongs to an organization owner.'
)
multiple_msg = (
f"Can't delete {count} organization users because they "
'belong to some organization owners.'
)
self.message_user(
request, ngettext(single_msg, multiple_msg, count), messages.ERROR
)
# otherwise proceed but remove org users from the delete queryset
return delete_selected(self, request, queryset)
delete_selected_overridden.short_description = delete_selected.short_description
class OrganizationOwnerAdmin(
MultitenantAdminMixin, BaseOrganizationOwnerAdmin, BaseAdmin
):
list_display = ('get_user', 'organization')
if app_settings.ORGANIZATION_USER_ADMIN and app_settings.ORGANIZATION_OWNER_ADMIN:
autocomplete_fields = ['organization_user', 'organization']
def get_user(self, obj):
return obj.organization_user.user
admin.site.register(User, UserAdmin)
admin.site.register(Organization, OrganizationAdmin)
# OrganizationUser items can be managed on the user page
if app_settings.ORGANIZATION_USER_ADMIN:
admin.site.register(OrganizationUser, OrganizationUserAdmin)
# this item is not being used right now
if app_settings.ORGANIZATION_OWNER_ADMIN:
admin.site.register(OrganizationOwner, OrganizationOwnerAdmin)
# unregister auth.Group
base_group_model = apps.get_model('auth', 'Group')
admin.site.unregister(base_group_model)
# register openwisp_users.Group proxy model
admin.site.register(Group, GroupAdmin)
# unregister some admin components to keep the admin interface simple
# we can re-enable these models later when they will be really needed
EmailAddress = apps.get_model('account', 'EmailAddress')
if admin.site.is_registered(EmailAddress):
admin.site.unregister(EmailAddress)
if allauth_settings.SOCIALACCOUNT_ENABLED:
for model in [
('socialaccount', 'SocialApp'),
('socialaccount', 'SocialToken'),
('socialaccount', 'SocialAccount'),
]:
model_class = apps.get_model(*model)
if admin.site.is_registered(model_class):
admin.site.unregister(model_class)
if 'rest_framework.authtoken' in settings.INSTALLED_APPS: # pragma: no cover
TokenProxy = apps.get_model('authtoken', 'TokenProxy')
if admin.site.is_registered(TokenProxy):
admin.site.unregister(TokenProxy)
def user_not_allowed_to_change_owner(user, obj):
return (
obj
and not user.is_superuser
and user.pk != obj.pk
and obj.is_owner_of_any_organization
)
|
dff3d74fdb25220ba402095c5345c67344025160
|
9efca95a55cb4df52d895d42f1ec10331516a734
|
/tests/test_mu.py
|
020bee024ae5379d1565a6ef3e390fd5322acbba
|
[
"Apache-2.0"
] |
permissive
|
cloud-custodian/cloud-custodian
|
519e602abe00c642786441b64cc40857ef5bc9de
|
27563cf4571040f923124e1acb2463f11e372225
|
refs/heads/main
| 2023-09-04T10:54:55.963703
| 2023-09-01T17:40:17
| 2023-09-01T17:40:17
| 52,837,350
| 3,327
| 1,096
|
Apache-2.0
| 2023-09-14T14:03:30
| 2016-03-01T01:11:20
|
Python
|
UTF-8
|
Python
| false
| false
| 52,693
|
py
|
test_mu.py
|
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
import importlib
import json
import logging
import os
import platform
import py_compile
import shutil
import site
import sys
import tempfile
import time
import unittest
from unittest import mock
from unittest.mock import patch
import zipfile
from c7n.config import Config
from c7n.mu import (
custodian_archive,
generate_requirements,
get_exec_options,
BucketLambdaNotification,
LambdaFunction,
LambdaManager,
PolicyLambda,
PythonPackageArchive,
SNSSubscription,
SQSSubscription,
CloudWatchEventSource,
CloudWatchLogSubscription
)
from .common import (
BaseTest, event_data, functional, Bag, ACCOUNT_ID)
from .data import helloworld
ROLE = "arn:aws:iam::644160558196:role/custodian-mu"
def test_get_exec_options():
assert get_exec_options(Config().empty()) == {'tracer': 'default'}
assert get_exec_options(Config().empty(output_dir='/tmp/xyz')) == {
'tracer': 'default'}
assert get_exec_options(
Config().empty(log_group='gcp', output_dir='gs://mybucket/myprefix')) == {
'tracer': 'default',
'output_dir': 'gs://mybucket/myprefix',
'log_group': 'gcp'}
def test_generate_requirements():
lines = generate_requirements(
'boto3', ignore=('docutils', 's3transfer', 'six'), exclude=['urllib3'])
packages = []
for l in lines.split('\n'):
pkg_name, version = l.split('==')
packages.append(pkg_name)
assert set(packages) == {'botocore', 'jmespath', 'python-dateutil'}
class Publish(BaseTest):
def make_func(self, **kw):
func_data = dict(
name="test-foo-bar",
handler="index.handler",
memory_size=128,
timeout=3,
role='custodian-mu',
runtime="python2.7",
description="test",
)
func_data.update(kw)
archive = PythonPackageArchive()
archive.add_contents(
"index.py", """def handler(*a, **kw):\n print("Greetings, program!")"""
)
archive.close()
self.addCleanup(archive.remove)
return LambdaFunction(func_data, archive)
def test_publishes_a_lambda(self):
session_factory = self.replay_flight_data("test_publishes_a_lambda")
mgr = LambdaManager(session_factory)
func = self.make_func()
self.addCleanup(mgr.remove, func)
result = mgr.publish(func)
self.assertEqual(result["CodeSize"], 169)
def test_publish_a_lambda_with_layer_and_concurrency(self):
factory = self.replay_flight_data('test_lambda_layer_concurrent_publish')
mgr = LambdaManager(factory)
layers = ['arn:aws:lambda:us-east-1:644160558196:layer:CustodianLayer:2']
func = self.make_func(
concurrency=5,
layers=layers)
self.addCleanup(mgr.remove, func)
result = mgr.publish(func)
self.assertEqual(result['Layers'][0]['Arn'], layers[0])
state = mgr.get(func.name)
self.assertEqual(state['Concurrency']['ReservedConcurrentExecutions'], 5)
func = self.make_func(layers=layers)
output = self.capture_logging("custodian.serverless", level=logging.DEBUG)
result = mgr.publish(func)
self.assertEqual(result['Layers'][0]['Arn'], layers[0])
lines = output.getvalue().strip().split("\n")
self.assertFalse('Updating function: test-foo-bar config Layers' in lines)
self.assertTrue('Removing function: test-foo-bar concurrency' in lines)
def test_can_switch_runtimes(self):
session_factory = self.replay_flight_data("test_can_switch_runtimes")
func = self.make_func()
mgr = LambdaManager(session_factory)
self.addCleanup(mgr.remove, func)
result = mgr.publish(func)
self.assertEqual(result["Runtime"], "python2.7")
func.func_data["runtime"] = "python3.6"
result = mgr.publish(func)
self.assertEqual(result["Runtime"], "python3.6")
class PolicyLambdaProvision(Publish):
role = "arn:aws:iam::644160558196:role/custodian-mu"
def assert_items(self, result, expected):
for k, v in expected.items():
self.assertEqual(v, result[k])
def test_config_rule_provision(self):
session_factory = self.replay_flight_data("test_config_rule")
p = self.load_policy(
{
"resource": "security-group",
"name": "sg-modified",
"mode": {"type": "config-rule"},
},
session_factory=session_factory
)
pl = PolicyLambda(p)
mgr = LambdaManager(session_factory)
result = mgr.publish(pl, "Dev", role=ROLE)
self.assertEqual(result["FunctionName"], "custodian-sg-modified")
self.addCleanup(mgr.remove, pl)
def test_published_lambda_architecture(self):
session_factory = self.replay_flight_data("test_published_lambda_architecture")
with patch('platform.machine', return_value="arm64"):
p = self.load_policy({
'name': 'ec2-foo-bar',
'resource': 'aws.ec2',
'mode': {
'type': 'cloudtrail',
'role': 'arn:aws:iam::644160558196:role/custodian-mu',
'events': ['RunInstances']}})
pl = PolicyLambda(p)
mgr = LambdaManager(session_factory)
result = mgr.publish(pl)
self.assertEqual(result["Architectures"], ["arm64"])
def test_deferred_interpolation(self):
p = self.load_policy({
'name': 'ec2-foo-bar',
'resource': 'aws.ec2',
'mode': {
'type': 'cloudtrail',
'role': 'arn:aws:iam::644160558196:role/custodian-mu',
'events': ['RunInstances']},
'actions': [{
'type': 'tag', 'key': 'LastMatch', 'value': '{now}'
}]})
p.expand_variables(p.get_variables())
pl = PolicyLambda(p)
pl.get_archive()
def test_updated_lambda_architecture(self):
session_factory = self.replay_flight_data("test_updated_lambda_architecture")
lambda_client = session_factory().client("lambda")
initial_config = lambda_client.get_function(FunctionName="custodian-ec2-foo-bar")
self.assertEqual(initial_config.get('Configuration').get('Architectures'), ["arm64"])
with patch('platform.machine', return_value="x86_64"):
p1 = self.load_policy({
'name': 'ec2-foo-bar',
'resource': 'aws.ec2',
'mode': {
'type': 'cloudtrail',
'role': 'arn:aws:iam::644160558196:role/custodian-mu',
'events': ['RunInstances']}})
pl1 = PolicyLambda(p1)
mgr = LambdaManager(session_factory)
mgr.publish(pl1)
if self.recording:
time.sleep(30)
updated_config = lambda_client.get_function_configuration(
FunctionName="custodian-ec2-foo-bar")
self.assertEqual(updated_config.get('Architectures'), ["x86_64"])
self.addCleanup(mgr.remove, pl1)
def test_config_poll_rule_evaluation(self):
session_factory = self.record_flight_data("test_config_poll_rule_provision")
# config added support for kinesis streams after that test was written
# disable that support so the original behavior check on config poll mode
# can be verified
from c7n.resources.kinesis import KinesisStream
self.patch(
KinesisStream.resource_type, 'config_type', None)
p = self.load_policy({
'name': 'configx',
'resource': 'aws.kinesis',
'mode': {
'schedule': 'Three_Hours',
'type': 'config-poll-rule'}})
mu_policy = PolicyLambda(p)
mu_policy.arn = "arn:aws:lambda:us-east-1:644160558196:function:CloudCustodian"
events = mu_policy.get_events(session_factory)
self.assertEqual(len(events), 1)
config_rule = events.pop()
self.assertEqual(
config_rule.get_rule_params(mu_policy),
{'ConfigRuleName': 'custodian-configx',
'Description': 'cloud-custodian lambda policy',
'MaximumExecutionFrequency': 'Three_Hours',
'Scope': {'ComplianceResourceTypes': ['AWS::Kinesis::Stream']},
'Source': {
'Owner': 'CUSTOM_LAMBDA',
'SourceDetails': [{'EventSource': 'aws.config',
'MessageType': 'ScheduledNotification'}],
'SourceIdentifier': 'arn:aws:lambda:us-east-1:644160558196:function:CloudCustodian'} # noqa
})
def test_config_rule_evaluation(self):
session_factory = self.replay_flight_data("test_config_rule_evaluate")
p = self.load_policy(
{
"resource": "ec2",
"name": "ec2-modified",
"mode": {"type": "config-rule"},
"filters": [{"InstanceId": "i-094bc87c84d56c589"}],
},
session_factory=session_factory,
)
mode = p.get_execution_mode()
event = event_data("event-config-rule-instance.json")
resources = mode.run(event, None)
self.assertEqual(len(resources), 1)
def test_phd_account_mode(self):
factory = self.replay_flight_data('test_phd_event_mode')
p = self.load_policy(
{'name': 'ec2-retire',
'resource': 'account',
'mode': {
'categories': ['scheduledChange'],
'events': ['AWS_EC2_PERSISTENT_INSTANCE_RETIREMENT_SCHEDULED'],
'type': 'phd'}}, session_factory=factory)
mode = p.get_execution_mode()
event = event_data('event-phd-ec2-retire.json')
resources = mode.run(event, None)
self.assertEqual(len(resources), 1)
self.assertTrue('c7n:HealthEvent' in resources[0])
def test_phd_mode_sans_details(self):
factory = self.replay_flight_data('test_phd_event_mode')
p = self.load_policy(
{'name': 'ec2-retire',
'resource': 'account',
'mode': {'type': 'phd'}}, session_factory=factory)
p_lambda = PolicyLambda(p)
events = p_lambda.get_events(factory)
self.assertEqual(
json.loads(events[0].render_event_pattern()),
{'source': ['aws.health']}
)
def test_phd_mode(self):
factory = self.replay_flight_data('test_phd_event_mode')
p = self.load_policy(
{'name': 'ec2-retire',
'resource': 'ec2',
'mode': {
'categories': ['scheduledChange'],
'events': ['AWS_EC2_PERSISTENT_INSTANCE_RETIREMENT_SCHEDULED'],
'type': 'phd'}}, session_factory=factory)
mode = p.get_execution_mode()
event = event_data('event-phd-ec2-retire.json')
resources = mode.run(event, None)
self.assertEqual(len(resources), 1)
p_lambda = PolicyLambda(p)
events = p_lambda.get_events(factory)
self.assertEqual(
json.loads(events[0].render_event_pattern()),
{'detail': {
'eventTypeCategory': ['scheduledChange'],
'eventTypeCode': ['AWS_EC2_PERSISTENT_INSTANCE_RETIREMENT_SCHEDULED']},
'source': ['aws.health']}
)
def test_phd_mode_account(self):
factory = self.replay_flight_data('test_phd_event_account')
p = self.load_policy(
{'name': 'ec2-retire',
'resource': 'account',
'mode': {
'categories': ['issue', 'scheduledChange'],
'statuses': ['open', 'upcoming'],
'type': 'phd'}}, session_factory=factory)
p_lambda = PolicyLambda(p)
events = p_lambda.get_events(factory)
self.assertEqual(
json.loads(events[0].render_event_pattern()),
{'detail': {
'eventTypeCategory': ['issue', 'scheduledChange']},
'source': ['aws.health']}
)
def test_cloudtrail_delay(self):
p = self.load_policy({
'name': 'aws-account',
'resource': 'aws.account',
'mode': {
'type': 'cloudtrail',
'delay': 32,
'role': 'CustodianRole',
'events': ['RunInstances']}})
from c7n import policy
class time:
invokes = []
@classmethod
def sleep(cls, duration):
cls.invokes.append(duration)
self.patch(policy, 'time', time)
trail_mode = p.get_execution_mode()
results = trail_mode.run({
'detail': {
'eventSource': 'ec2.amazonaws.com',
'eventName': 'RunInstances'}},
None)
self.assertEqual(len(results), 0)
self.assertEqual(time.invokes, [32])
def test_user_pattern_merge(self):
p = self.load_policy({
'name': 'ec2-retire',
'resource': 'ec2',
'mode': {
'type': 'cloudtrail',
'pattern': {
'detail': {
'userIdentity': {
'userName': [{'anything-but': 'deputy'}]}}},
'events': [{
'ids': 'responseElements.subnet.subnetId',
'source': 'ec2.amazonaws.com',
'event': 'CreateSubnet'}]}})
p_lambda = PolicyLambda(p)
events = p_lambda.get_events(None)
self.assertEqual(
json.loads(events[0].render_event_pattern()),
{'detail': {'eventName': ['CreateSubnet'],
'eventSource': ['ec2.amazonaws.com'],
'userIdentity': {'userName': [{'anything-but': 'deputy'}]}},
'detail-type': ['AWS API Call via CloudTrail']})
@functional
def test_sqs_subscriber(self):
session_factory = self.replay_flight_data('test_mu_sqs_subscriber')
func_name = 'c7n-hello-sqs'
queue_name = "my-dev-test-3"
# Setup Queues
session = session_factory()
client = session.client('sqs')
queue_url = client.create_queue(QueueName=queue_name).get('QueueUrl')
queue_arn = client.get_queue_attributes(
QueueUrl=queue_url,
AttributeNames=['QueueArn'])['Attributes']['QueueArn']
self.addCleanup(client.delete_queue, QueueUrl=queue_url)
# Setup Function
params = dict(
session_factory=session_factory,
name=func_name,
role="arn:aws:iam::644160558196:role/custodian-mu",
events=[SQSSubscription(session_factory, [queue_arn])])
func = helloworld.get_function(**params)
manager = LambdaManager(session_factory)
manager.publish(func)
self.addCleanup(manager.remove, func)
# Send and Receive Check
client.send_message(
QueueUrl=queue_url, MessageBody=json.dumps({'jurassic': 'block'}))
if self.recording:
time.sleep(60)
# log_events = list(manager.logs(func, "1970-1-1 UTC", "2037-1-1"))
# messages = [
# e["message"] for e in log_events if e["message"].startswith('{"Records')
# ]
self.addCleanup(
session.client("logs").delete_log_group,
logGroupName="/aws/lambda/%s" % func_name)
# self.assertIn(
# 'jurassic',
# json.loads(messages[0])["Records"][0]["body"])
@functional
def test_sns_subscriber_and_ipaddress(self):
self.patch(SNSSubscription, "iam_delay", 0.01)
session_factory = self.replay_flight_data("test_sns_subscriber_and_ipaddress")
session = session_factory()
client = session.client("sns")
# create an sns topic
tname = "custodian-test-sns-sub"
topic_arn = client.create_topic(Name=tname)["TopicArn"]
self.addCleanup(client.delete_topic, TopicArn=topic_arn)
# provision a lambda via mu
params = dict(
session_factory=session_factory,
name="c7n-hello-world",
role="arn:aws:iam::644160558196:role/custodian-mu",
events=[SNSSubscription(session_factory, [topic_arn])],
)
func = helloworld.get_function(**params)
manager = LambdaManager(session_factory)
manager.publish(func)
self.addCleanup(manager.remove, func)
# now publish to the topic and look for lambda log output
client.publish(TopicArn=topic_arn, Message="Greetings, program!")
if self.recording:
time.sleep(30)
# log_events = manager.logs(func, "1970-1-1 UTC", "2037-1-1")
# messages = [
# e["message"] for e in log_events if e["message"].startswith('{"Records')
# ]
# self.addCleanup(
# session.client("logs").delete_log_group,
# logGroupName="/aws/lambda/c7n-hello-world",
# )
# self.assertEqual(
# json.loads(messages[0])["Records"][0]["Sns"]["Message"],
# "Greetings, program!",
# )
def test_cwe_update_config_and_code(self):
# Originally this was testing the no update case.. but
# That is tricky to record, any updates to the code end up
# causing issues due to checksum mismatches which imply updating
# the function code / which invalidate the recorded data and
# the focus of the test.
session_factory = self.replay_flight_data("test_cwe_update", zdata=True)
p = self.load_policy({
"resource": "s3",
"name": "s3-bucket-policy",
"mode": {"type": "cloudtrail",
"events": ["CreateBucket"], 'runtime': 'python2.7'},
"filters": [
{"type": "missing-policy-statement",
"statement_ids": ["RequireEncryptedPutObject"]},
],
"actions": ["no-op"],
})
pl = PolicyLambda(p)
mgr = LambdaManager(session_factory)
result = mgr.publish(pl, "Dev", role=ROLE)
self.addCleanup(mgr.remove, pl)
p = self.load_policy(
{
"resource": "s3",
"name": "s3-bucket-policy",
"mode": {
"type": "cloudtrail",
"memory": 256,
'runtime': 'python2.7',
"events": [
"CreateBucket",
{
"event": "PutBucketPolicy",
"ids": "requestParameters.bucketName",
"source": "s3.amazonaws.com",
},
],
},
"filters": [
{
"type": "missing-policy-statement",
"statement_ids": ["RequireEncryptedPutObject"],
}
],
"actions": ["no-op"],
},
)
output = self.capture_logging("custodian.serverless", level=logging.DEBUG)
result2 = mgr.publish(PolicyLambda(p), "Dev", role=ROLE)
lines = output.getvalue().strip().split("\n")
self.assertTrue("Updating function custodian-s3-bucket-policy code" in lines)
self.assertTrue(
"Updating function: custodian-s3-bucket-policy config MemorySize" in lines)
self.assertEqual(result["FunctionName"], result2["FunctionName"])
# drive by coverage
functions = [
i
for i in mgr.list_functions()
if i["FunctionName"] == "custodian-s3-bucket-policy"
]
self.assertTrue(len(functions), 1)
def test_cwe_trail(self):
session_factory = self.replay_flight_data("test_cwe_trail", zdata=True)
p = self.load_policy({
"resource": "s3",
"name": "s3-bucket-policy",
"mode": {"type": "cloudtrail", "events": ["CreateBucket"]},
"filters": [
{
"type": "missing-policy-statement",
"statement_ids": ["RequireEncryptedPutObject"],
}
],
"actions": ["no-op"]},
session_factory=session_factory)
pl = PolicyLambda(p)
mgr = LambdaManager(session_factory)
self.addCleanup(mgr.remove, pl)
result = mgr.publish(pl, "Dev", role=ROLE)
events = pl.get_events(session_factory)
self.assertEqual(len(events), 1)
event = events.pop()
self.assertEqual(
json.loads(event.render_event_pattern()),
{
u"detail": {
u"eventName": [u"CreateBucket"],
u"eventSource": [u"s3.amazonaws.com"],
},
u"detail-type": ["AWS API Call via CloudTrail"],
},
)
self.assert_items(
result,
{
"Description": "cloud-custodian lambda policy",
"FunctionName": "custodian-s3-bucket-policy",
"Handler": "custodian_policy.run",
"MemorySize": 512,
"Runtime": "python2.7",
"Timeout": 60,
},
)
def test_cwe_instance(self):
session_factory = self.replay_flight_data("test_cwe_instance", zdata=True)
p = self.load_policy({
"resource": "s3",
"name": "ec2-encrypted-vol",
"mode": {"type": "ec2-instance-state", "events": ["pending"]}},
session_factory=session_factory)
pl = PolicyLambda(p)
mgr = LambdaManager(session_factory)
self.addCleanup(mgr.remove, pl)
result = mgr.publish(pl, "Dev", role=ROLE)
self.assert_items(
result,
{
"Description": "cloud-custodian lambda policy",
"FunctionName": "custodian-ec2-encrypted-vol",
"Handler": "custodian_policy.run",
"MemorySize": 512,
"Runtime": "python2.7",
"Timeout": 60,
},
)
events = session_factory().client("events")
result = events.list_rules(NamePrefix="custodian-ec2-encrypted-vol")
self.assert_items(
result["Rules"][0],
{"State": "ENABLED", "Name": "custodian-ec2-encrypted-vol"},
)
self.assertEqual(
json.loads(result["Rules"][0]["EventPattern"]),
{
"source": ["aws.ec2"],
"detail": {"state": ["pending"]},
"detail-type": ["EC2 Instance State-change Notification"],
},
)
def test_cwe_asg_instance(self):
session_factory = self.replay_flight_data("test_cwe_asg", zdata=True)
p = self.load_policy(
{
"resource": "asg",
"name": "asg-spin-detector",
"mode": {"type": "asg-instance-state", "events": ["launch-failure"]},
}, session_factory=session_factory)
pl = PolicyLambda(p)
mgr = LambdaManager(session_factory)
self.addCleanup(mgr.remove, pl)
result = mgr.publish(pl, "Dev", role=ROLE)
self.assert_items(
result,
{
"FunctionName": "custodian-asg-spin-detector",
"Handler": "custodian_policy.run",
"MemorySize": 512,
"Runtime": "python2.7",
"Timeout": 60,
},
)
events = session_factory().client("events")
result = events.list_rules(NamePrefix="custodian-asg-spin-detector")
self.assert_items(
result["Rules"][0],
{"State": "ENABLED", "Name": "custodian-asg-spin-detector"},
)
self.assertEqual(
json.loads(result["Rules"][0]["EventPattern"]),
{
"source": ["aws.autoscaling"],
"detail-type": ["EC2 Instance Launch Unsuccessful"],
},
)
def test_cwe_security_hub_action(self):
factory = self.replay_flight_data('test_mu_cwe_sechub_action')
p = self.load_policy({
'name': 'sechub',
'resource': 'account',
'mode': {
'type': 'hub-action'}},
session_factory=factory,
config={'account_id': ACCOUNT_ID})
mu_policy = PolicyLambda(p)
events = mu_policy.get_events(factory)
self.assertEqual(len(events), 1)
hub_action = events.pop()
self.assertEqual(
json.loads(hub_action.cwe.render_event_pattern()),
{'resources': [
'arn:aws:securityhub:us-east-1:644160558196:action/custom/sechub'],
'source': ['aws.securityhub'],
'detail-type': [
'Security Hub Findings - Custom Action', 'Security Hub Insight Results'
]})
hub_action.cwe = cwe = mock.Mock(CloudWatchEventSource)
cwe.get.return_value = False
cwe.update.return_value = True
cwe.add.return_value = True
self.assertEqual(repr(hub_action), "<SecurityHub Action sechub>")
self.assertEqual(
hub_action._get_arn(),
"arn:aws:securityhub:us-east-1:644160558196:action/custom/sechub")
self.assertEqual(
hub_action.get(mu_policy.name), {'event': False, 'action': None})
hub_action.add(mu_policy)
self.assertEqual(
{'event': False,
'action': {
'ActionTargetArn': ('arn:aws:securityhub:us-east-1:'
'644160558196:action/custom/sechub'),
'Name': 'Account sechub', 'Description': 'sechub'}},
hub_action.get(mu_policy.name))
hub_action.update(mu_policy)
hub_action.remove(mu_policy, func_deleted=True)
self.assertEqual(
hub_action.get(mu_policy.name),
{'event': False, 'action': None})
def test_cwe_schedule(self):
session_factory = self.replay_flight_data("test_cwe_schedule", zdata=True)
p = self.load_policy(
{
"resource": "ec2",
"name": "periodic-ec2-checker",
"mode": {"type": "periodic", "schedule": "rate(1 day)"},
}, session_factory=session_factory)
pl = PolicyLambda(p)
mgr = LambdaManager(session_factory)
self.addCleanup(mgr.remove, pl)
result = mgr.publish(pl, "Dev", role=ROLE)
self.assert_items(
result,
{
"FunctionName": "custodian-periodic-ec2-checker",
"Handler": "custodian_policy.run",
"MemorySize": 512,
"Runtime": "python2.7",
"Timeout": 60,
},
)
events = session_factory().client("events")
result = events.list_rules(NamePrefix="custodian-periodic-ec2-checker")
self.assert_items(
result["Rules"][0],
{
"State": "ENABLED",
"ScheduleExpression": "rate(1 day)",
"Name": "custodian-periodic-ec2-checker",
},
)
key_arn = "arn:aws:kms:us-west-2:644160558196:key/" "44d25a5c-7efa-44ed-8436-b9511ea921b3"
sns_arn = "arn:aws:sns:us-west-2:644160558196:config-topic"
def create_a_lambda(self, flight, **extra):
session_factory = self.replay_flight_data(flight, zdata=True)
mode = {
"type": "config-rule", "role": "arn:aws:iam::644160558196:role/custodian-mu"
}
mode.update(extra)
p = self.load_policy({
"resource": "s3",
"name": "hello-world",
"actions": ["no-op"],
"mode": mode},
session_factory=session_factory)
pl = PolicyLambda(p)
mgr = LambdaManager(session_factory)
def cleanup():
mgr.remove(pl)
if self.recording:
time.sleep(60)
self.addCleanup(cleanup)
return mgr, mgr.publish(pl)
def create_a_lambda_with_lots_of_config(self, flight):
extra = {
"environment": {"Variables": {"FOO": "bar"}},
"kms_key_arn": self.key_arn,
"dead_letter_config": {"TargetArn": self.sns_arn},
"tracing_config": {"Mode": "Active"},
"tags": {"Foo": "Bar"},
}
return self.create_a_lambda(flight, **extra)
def update_a_lambda(self, mgr, **config):
mode = {
"type": "config-rule", "role": "arn:aws:iam::644160558196:role/custodian-mu"
}
mode.update(config)
p = self.load_policy({
"resource": "s3",
"name": "hello-world",
"actions": ["no-op"],
"mode": mode,
})
pl = PolicyLambda(p)
return mgr.publish(pl)
def test_config_coverage_for_lambda_creation(self):
mgr, result = self.create_a_lambda_with_lots_of_config(
"test_config_coverage_for_lambda_creation"
)
self.assert_items(
result,
{
"Description": "cloud-custodian lambda policy",
"FunctionName": "custodian-hello-world",
"Handler": "custodian_policy.run",
"MemorySize": 512,
"Runtime": "python2.7",
"Timeout": 60,
"DeadLetterConfig": {"TargetArn": self.sns_arn},
"Environment": {"Variables": {"FOO": "bar"}},
"KMSKeyArn": self.key_arn,
"TracingConfig": {"Mode": "Active"},
},
)
tags = mgr.client.list_tags(Resource=result["FunctionArn"])["Tags"]
self.assert_items(tags, {"Foo": "Bar"})
def test_config_coverage_for_lambda_update_from_plain(self):
mgr, result = self.create_a_lambda(
"test_config_coverage_for_lambda_update_from_plain"
)
result = self.update_a_lambda(
mgr,
**{
"environment": {"Variables": {"FOO": "bloo"}},
"kms_key_arn": self.key_arn,
"dead_letter_config": {"TargetArn": self.sns_arn},
"tracing_config": {"Mode": "Active"},
"tags": {"Foo": "Bloo"},
}
)
self.assert_items(
result,
{
"Description": "cloud-custodian lambda policy",
"FunctionName": "custodian-hello-world",
"Handler": "custodian_policy.run",
"MemorySize": 512,
"Runtime": "python2.7",
"Timeout": 60,
"DeadLetterConfig": {"TargetArn": self.sns_arn},
"Environment": {"Variables": {"FOO": "bloo"}},
"TracingConfig": {"Mode": "Active"},
},
)
tags = mgr.client.list_tags(Resource=result["FunctionArn"])["Tags"]
self.assert_items(tags, {"Foo": "Bloo"})
def test_config_coverage_for_lambda_update_from_complex(self):
mgr, result = self.create_a_lambda_with_lots_of_config(
"test_config_coverage_for_lambda_update_from_complex"
)
result = self.update_a_lambda(
mgr,
**{
"runtime": "python3.6",
"environment": {"Variables": {"FOO": "baz"}},
"kms_key_arn": "",
"dead_letter_config": {},
"tracing_config": {},
"tags": {"Foo": "Baz", "Bah": "Bug"},
}
)
self.assert_items(
result,
{
"Description": "cloud-custodian lambda policy",
"FunctionName": "custodian-hello-world",
"Handler": "custodian_policy.run",
"MemorySize": 512,
"Runtime": "python3.6",
"Timeout": 60,
"DeadLetterConfig": {"TargetArn": self.sns_arn},
"Environment": {"Variables": {"FOO": "baz"}},
"TracingConfig": {"Mode": "Active"},
},
)
tags = mgr.client.list_tags(Resource=result["FunctionArn"])["Tags"]
self.assert_items(tags, {"Foo": "Baz", "Bah": "Bug"})
def test_optional_packages(self):
data = {
"name": "s3-lambda-extra",
"resource": "s3",
"mode": {
"type": "cloudtrail",
"packages": ["boto3"],
"events": ["CreateBucket"],
},
}
p = self.load_policy(data)
pl = PolicyLambda(p)
pl.archive.close()
self.assertTrue("boto3/utils.py" in pl.archive.get_filenames())
def test_delta_config_diff(self):
delta = LambdaManager.delta_function
self.assertFalse(
delta(
{
"VpcConfig": {
"SubnetIds": ["s-1", "s-2"],
"SecurityGroupIds": ["sg-1", "sg-2"],
}
},
{
"VpcConfig": {
"SubnetIds": ["s-2", "s-1"],
"SecurityGroupIds": ["sg-2", "sg-1"],
}
},
)
)
self.assertTrue(
delta(
{
"VpcConfig": {
"SubnetIds": ["s-1", "s-2"],
"SecurityGroupIds": ["sg-1", "sg-2"],
}
},
{
"VpcConfig": {
"SubnetIds": ["s-2", "s-1"],
"SecurityGroupIds": ["sg-3", "sg-1"],
}
},
)
)
self.assertFalse(delta({}, {"DeadLetterConfig": {}}))
self.assertTrue(delta({}, {"DeadLetterConfig": {"TargetArn": "arn"}}))
self.assertFalse(delta({}, {"Environment": {"Variables": {}}}))
self.assertTrue(delta({}, {"Environment": {"Variables": {"k": "v"}}}))
self.assertFalse(delta({}, {"KMSKeyArn": ""}))
self.assertFalse(
delta({}, {"VpcConfig": {"SecurityGroupIds": [], "SubnetIds": []}})
)
def test_different_lambda_handler(self):
p = PolicyLambda(Bag({"name": "hello", "data": {"mode": {"handler": "custom.handler"}}}))
self.assertEqual(
p.get_config()["Handler"],
"custom.handler"
)
def test_config_defaults(self):
p = PolicyLambda(Bag({"name": "hello", "data": {"mode": {}}}))
self.maxDiff = None
default_arch = platform.machine()
if default_arch in ('aarch64', 'arm64'):
default_arch = 'arm64'
else:
default_arch = 'x86_64'
self.assertEqual(
p.get_config(),
{
"DeadLetterConfig": {},
"Description": "cloud-custodian lambda policy",
"FunctionName": "custodian-hello",
"Handler": "custodian_policy.run",
"KMSKeyArn": "",
"MemorySize": 512,
"Role": "",
"Runtime": "python3.9",
"Architectures": [default_arch],
"Tags": {},
"Timeout": 900,
"TracingConfig": {"Mode": "PassThrough"},
"VpcConfig": {"SecurityGroupIds": [], "SubnetIds": []},
},
)
def test_lambda_architecture(self):
p = PolicyLambda(Bag({"name": "hello", "data": {"mode": {}}}))
with patch('platform.machine', return_value='arm64'):
self.assertEqual(
p.get_config(),
{
"DeadLetterConfig": {},
"Description": "cloud-custodian lambda policy",
"FunctionName": "custodian-hello",
"Handler": "custodian_policy.run",
"KMSKeyArn": "",
"MemorySize": 512,
"Role": "",
"Runtime": "python3.9",
"Architectures": ["arm64"],
"Tags": {},
"Timeout": 900,
"TracingConfig": {"Mode": "PassThrough"},
"VpcConfig": {"SecurityGroupIds": [], "SubnetIds": []},
},
)
with patch('platform.machine', return_value='x86_64'):
self.assertEqual(
p.get_config(),
{
"DeadLetterConfig": {},
"Description": "cloud-custodian lambda policy",
"FunctionName": "custodian-hello",
"Handler": "custodian_policy.run",
"KMSKeyArn": "",
"MemorySize": 512,
"Role": "",
"Runtime": "python3.9",
"Architectures": ["x86_64"],
"Tags": {},
"Timeout": 900,
"TracingConfig": {"Mode": "PassThrough"},
"VpcConfig": {"SecurityGroupIds": [], "SubnetIds": []},
},
)
def test_remove_permissions_from_event_cloudtrail(self):
session_factory = self.replay_flight_data("test_remove_permissions_event")
p = self.load_policy({
"resource": "ec2",
"name": "test",
"mode": {"type": "cloudtrail", "events": ["RunInstances"]}},
session_factory=session_factory)
pl = PolicyLambda(p)
mgr = LambdaManager(session_factory)
self.addCleanup(mgr.remove, pl, True)
mgr.publish(pl, "Dev", role=ROLE)
events = pl.get_events(session_factory)
lambda_client = session_factory().client("lambda")
policy = lambda_client.get_policy(FunctionName="custodian-test")
self.assertTrue(policy)
self.assertTrue(len(events) > 0)
for e in events:
e.remove(pl, func_deleted=False)
with self.assertRaises(lambda_client.exceptions.ResourceNotFoundException):
lambda_client.get_policy(FunctionName="custodian-test")
# we should be able to call the remove again even tho it's already gone
for e in events:
e.remove(pl, func_deleted=False)
# we should also be able to explicitly remove_permissions even though it's
# already gone
for e in events:
e.remove_permissions(pl, remove_permission=True)
def test_pause_resume_policy(self):
session_factory = self.replay_flight_data("test_pause_resume_policy")
p = self.load_policy({
"resource": "ec2",
"name": "test",
"mode": {"type": "cloudtrail", "events": ["RunInstances"]}},
session_factory=session_factory)
pl = PolicyLambda(p)
mgr = LambdaManager(session_factory)
self.addCleanup(mgr.remove, pl, True)
mgr.publish(pl, "Dev", role=ROLE)
events = pl.get_events(session_factory)
self.assertEqual(len(events), 1)
cw_client = session_factory().client('events')
events[0].pause(pl)
rule = cw_client.describe_rule(Name=pl.event_name)
self.assertEqual(rule["State"], "DISABLED")
# subsequent calls to pause an already paused rule should be a no-op
events[0].pause(pl)
events[0].resume(pl)
rule = cw_client.describe_rule(Name=pl.event_name)
self.assertEqual(rule["State"], "ENABLED")
# subsequent calls to resume an already enabled rule should be a no-op
events[0].resume(pl)
def test_cloudwatch_log_subscription(self):
session_factory = self.replay_flight_data("test_cloudwatch_log_subscription")
func = self.make_func(role=ROLE)
LambdaManager(session_factory).publish(func)
cwls = CloudWatchLogSubscription(
session_factory,
[
{
"logGroupName": "/aws/lambda/test",
"arn": "arn:aws:logs:us-east-1:644160558196:log-group:/aws/lambda/test:*",
}
],
"foo"
)
cwls.add(func)
lambda_client = session_factory().client("lambda")
policy = lambda_client.get_policy(FunctionName="test-foo-bar")
self.assertTrue(policy)
cwls.remove(func, func_deleted=False)
with self.assertRaises(lambda_client.exceptions.ResourceNotFoundException):
lambda_client.get_policy(FunctionName="test-foo-bar")
def test_sns_subscription_remove_permission_idempotent(self):
session_factory = self.replay_flight_data(
"test_sns_subscription_remove_permission_idempotent"
)
func = self.make_func(role=ROLE, runtime="python3.9")
mgr = LambdaManager(session_factory)
mgr.publish(func)
sns_sub = SNSSubscription(
session_factory,
topic_arns=["arn:aws:sns:us-east-1:644160558196:test-topic"]
)
# this shouldn't raise an exception even though we never added it
sns_sub.remove(func, func_deleted=False)
# verify the permissions are not there
lambda_client = session_factory().client("lambda")
found_function = lambda_client.get_function(FunctionName="test-foo-bar")
self.assertTrue(found_function)
with self.assertRaises(lambda_client.exceptions.ResourceNotFoundException):
lambda_client.get_policy(FunctionName="test-foo-bar")
def test_s3_bucket_lambda_notification_remove_idempotent(self):
session_factory = self.replay_flight_data(
"test_s3_bucket_lambda_notification_remove_idempotent"
)
func = self.make_func(role=ROLE, runtime="python3.9")
mgr = LambdaManager(session_factory)
self.addCleanup(mgr.remove, func, True)
mgr.publish(func)
bln = BucketLambdaNotification(
data={},
session_factory=session_factory,
bucket={"Name": "c7n-ci20210930214353595400000001"}
)
bln.add(func)
bln.remove(func, func_deleted=False)
# we should be able to do idempotent removal
bln.remove(func, func_deleted=False)
# verify the permissions are gone
lambda_client = session_factory().client("lambda")
found_function = lambda_client.get_function(FunctionName="test-foo-bar")
self.assertTrue(found_function)
with self.assertRaises(lambda_client.exceptions.ResourceNotFoundException):
lambda_client.get_policy(FunctionName="test-foo-bar")
class PythonArchiveTest(unittest.TestCase):
def make_archive(self, modules=(), cache_file=None):
archive = self.make_open_archive(modules, cache_file=cache_file)
archive.close()
return archive
def make_open_archive(self, modules=(), cache_file=None):
archive = PythonPackageArchive(modules=modules, cache_file=cache_file)
self.addCleanup(archive.remove)
return archive
def get_filenames(self, modules=()):
return self.make_archive(modules).get_filenames()
def test_handles_stdlib_modules(self):
filenames = self.get_filenames(["webbrowser"])
self.assertTrue("webbrowser.py" in filenames)
def test_handles_third_party_modules(self):
filenames = self.get_filenames(["botocore"])
self.assertTrue("botocore/__init__.py" in filenames)
def test_handles_packages(self):
filenames = self.get_filenames(["c7n"])
self.assertTrue("c7n/__init__.py" in filenames)
self.assertTrue("c7n/resources/s3.py" in filenames)
self.assertTrue("c7n/ufuncs/s3crypt.py" in filenames)
def _install_namespace_package(self, tmp_sitedir):
# Install our test namespace package in such a way that both py27 and
# py36 can find it.
from setuptools import namespaces
installer = namespaces.Installer()
class Distribution:
namespace_packages = ["namespace_package"]
installer.distribution = Distribution()
installer.target = os.path.join(tmp_sitedir, "namespace_package.pth")
installer.outputs = []
installer.dry_run = False
installer.install_namespaces()
site.addsitedir(tmp_sitedir, known_paths=site._init_pathinfo())
def test_handles_namespace_packages(self):
bench = tempfile.mkdtemp()
def cleanup():
while bench in sys.path:
sys.path.remove(bench)
shutil.rmtree(bench)
self.addCleanup(cleanup)
subpackage = os.path.join(bench, "namespace_package", "subpackage")
os.makedirs(subpackage)
open(os.path.join(subpackage, "__init__.py"), "w+").write("foo = 42\n")
def _():
from namespace_package.subpackage import foo
assert foo # dodge linter
self.assertRaises(ImportError, _)
self._install_namespace_package(bench)
from namespace_package.subpackage import foo
self.assertEqual(foo, 42)
filenames = self.get_filenames(["namespace_package"])
self.assertTrue("namespace_package/__init__.py" not in filenames)
self.assertTrue("namespace_package/subpackage/__init__.py" in filenames)
self.assertTrue(filenames[-1].endswith("-nspkg.pth"))
def test_excludes_non_py_files(self):
filenames = self.get_filenames(["ctypes"])
self.assertTrue("README.ctypes" not in filenames)
def test_cant_get_bytes_when_open(self):
archive = self.make_open_archive()
self.assertRaises(AssertionError, archive.get_bytes)
def test_cant_add_files_when_closed(self):
archive = self.make_archive()
self.assertRaises(AssertionError, archive.add_file, __file__)
def test_cant_add_contents_when_closed(self):
archive = self.make_archive()
self.assertRaises(AssertionError, archive.add_contents, "foo", "bar")
def test_can_add_additional_files_while_open(self):
archive = self.make_open_archive()
archive.add_file(__file__)
archive.close()
filenames = archive.get_filenames()
self.assertTrue(os.path.basename(__file__) in filenames)
def test_can_set_path_when_adding_files(self):
archive = self.make_open_archive()
archive.add_file(__file__, "cheese/is/yummy.txt")
archive.close()
filenames = archive.get_filenames()
self.assertTrue(os.path.basename(__file__) not in filenames)
self.assertTrue("cheese/is/yummy.txt" in filenames)
def test_can_add_a_file_with_contents_from_a_string(self):
archive = self.make_open_archive()
archive.add_contents("cheese.txt", "So yummy!")
archive.close()
self.assertTrue("cheese.txt" in archive.get_filenames())
with archive.get_reader() as reader:
self.assertEqual(b"So yummy!", reader.read("cheese.txt"))
def test_custodian_archive_creates_a_custodian_archive(self):
archive = custodian_archive()
self.addCleanup(archive.remove)
archive.close()
filenames = archive.get_filenames()
self.assertTrue("c7n/__init__.py" in filenames)
def make_file(self):
bench = tempfile.mkdtemp()
path = os.path.join(bench, "foo.txt")
open(path, "w+").write("Foo.")
self.addCleanup(lambda: shutil.rmtree(bench))
return path
def check_world_readable(self, archive):
world_readable = 0o004 << 16
for info in zipfile.ZipFile(archive.path).filelist:
self.assertEqual(info.external_attr & world_readable, world_readable)
def test_files_are_all_readable(self):
self.check_world_readable(self.make_archive(["c7n"]))
def test_even_unreadable_files_become_readable(self):
path = self.make_file()
os.chmod(path, 0o600)
archive = self.make_open_archive()
archive.add_file(path)
archive.close()
self.check_world_readable(archive)
def test_unless_you_make_your_own_zipinfo(self):
info = zipfile.ZipInfo(self.make_file())
archive = self.make_open_archive()
archive.add_contents(info, "foo.txt")
archive.close()
self.assertRaises(AssertionError, self.check_world_readable, archive)
def test_cache_zip_file(self):
archive = self.make_archive(cache_file=os.path.join(os.path.dirname(__file__),
"data",
"test.zip"))
self.assertTrue("cheese.txt" in archive.get_filenames())
self.assertTrue("cheese/is/yummy.txt" in archive.get_filenames())
with archive.get_reader() as reader:
self.assertEqual(b"So yummy!", reader.read("cheese.txt"))
self.assertEqual(b"True!", reader.read("cheese/is/yummy.txt"))
class PycCase(unittest.TestCase):
def setUp(self):
self.bench = tempfile.mkdtemp()
sys.path.insert(0, self.bench)
def tearDown(self):
sys.path.remove(self.bench)
shutil.rmtree(self.bench)
def py_with_pyc(self, name):
path = os.path.join(self.bench, name)
with open(path, "w+") as fp:
fp.write("42")
py_compile.compile(path)
return path
class Constructor(PycCase):
def test_class_constructor_only_accepts_py_modules_not_pyc(self):
# Create a module with both *.py and *.pyc.
self.py_with_pyc("foo.py")
# Create another with a *.pyc but no *.py behind it.
os.unlink(self.py_with_pyc("bar.py"))
# Now: *.py takes precedence over *.pyc ...
def get(name):
return os.path.basename(importlib.import_module(name).__file__)
self.assertTrue(get("foo"), "foo.py")
try:
# ... and while *.pyc is importable ...
self.assertTrue(get("bar"), "bar.pyc")
except ImportError:
try:
# (except on PyPy)
# http://doc.pypy.org/en/latest/config/objspace.lonepycfiles.html
self.assertEqual(platform.python_implementation(), "PyPy")
except AssertionError:
# (... aaaaaand Python 3)
self.assertEqual(platform.python_version_tuple()[0], "3")
else:
# ... we refuse it.
with self.assertRaises(ValueError) as raised:
PythonPackageArchive(modules=["bar"])
msg = raised.exception.args[0]
self.assertTrue(msg.startswith("Could not find a *.py source file"))
self.assertTrue(msg.endswith("bar.pyc"))
# We readily ignore a *.pyc if a *.py exists.
archive = PythonPackageArchive(modules=["foo"])
archive.close()
self.assertEqual(archive.get_filenames(), ["foo.py"])
with archive.get_reader() as reader:
self.assertEqual(b"42", reader.read("foo.py"))
class AddPyFile(PycCase):
def test_can_add_py_file(self):
archive = PythonPackageArchive()
archive.add_py_file(self.py_with_pyc("foo.py"))
archive.close()
self.assertEqual(archive.get_filenames(), ["foo.py"])
def test_reverts_to_py_if_available(self):
archive = PythonPackageArchive()
py = self.py_with_pyc("foo.py")
archive.add_py_file(py + "c")
archive.close()
self.assertEqual(archive.get_filenames(), ["foo.py"])
def test_fails_if_py_not_available(self):
archive = PythonPackageArchive()
py = self.py_with_pyc("foo.py")
os.unlink(py)
self.assertRaises(IOError, archive.add_py_file, py + "c")
class DiffTags(unittest.TestCase):
def test_empty(self):
assert LambdaManager.diff_tags({}, {}) == ({}, [])
def test_removal(self):
assert LambdaManager.diff_tags({"Foo": "Bar"}, {}) == ({}, ["Foo"])
def test_addition(self):
assert LambdaManager.diff_tags({}, {"Foo": "Bar"}) == ({"Foo": "Bar"}, [])
def test_update(self):
assert LambdaManager.diff_tags(
{"Foo": "Bar"}, {"Foo": "Baz"}) == ({"Foo": "Baz"}, [])
|
ad72f7152b54d9b57a06f892a282a64cf7d9d1cb
|
110044654f706e920380dad2779bb32a77f1f26f
|
/test/Glob/glob-libpath.py
|
b688b7ef37015e6b1dac2fdf9203374ac297dd4a
|
[
"MIT",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
SCons/scons
|
89327bb9635cee6e7cc59249edca9cd859d7d1ff
|
b2a7d7066a2b854460a334a5fe737ea389655e6e
|
refs/heads/master
| 2023-09-01T19:37:03.603772
| 2023-08-28T04:32:42
| 2023-08-28T04:32:42
| 104,670,160
| 1,827
| 342
|
MIT
| 2023-09-14T15:13:21
| 2017-09-24T19:23:46
|
Python
|
UTF-8
|
Python
| false
| false
| 2,795
|
py
|
glob-libpath.py
|
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
Verify that Glob() in a subdir doesn't corrupt LIBPATH.
See bug #2184, "Glob pollutes LIBPATH" from Ian P. Cardenas.
Test output should not contain -Lsrc/util.
"""
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import TestSCons
test = TestSCons.TestSCons()
test.subdir('src', ['src', 'util'])
test.write('SConstruct', """\
base_env = Environment()
Export('base_env')
swat = base_env.SConscript('src/SConscript', variant_dir='build')
Default(swat)
""")
test.write('SConstruct', """\
base_env = Environment()
Export('base_env')
swat = base_env.SConscript('src/SConscript', variant_dir='build')
Default(swat)
""")
test.write(['src', 'SConscript'], """Import('base_env')
libutil = base_env.SConscript('util/SConscript')
env = base_env.Clone()
env.AppendUnique( LIBPATH = 'util')
env.AppendUnique( LIBS = libutil )
swat = env.Program( 'main', 'main.cpp' )
Return('swat')
""")
test.write(['src', 'main.cpp'], """int main(void) { return 0; }
""")
test.write(['src', 'util', 'SConscript'], """Import('base_env')
libutil = base_env.Library('util', Glob('*.cpp'))
Return('libutil')
""")
test.write(['src', 'util', 'util.cpp'], """int i=0;
""")
test.run(arguments = '-Q .')
if not test.match_re_dotall(test.stdout(), r".*(-L|/LIBPATH:)build[/\\]util.*"):
print(repr(test.stdout())+" should contain -Lbuild/util or /LIBPATH:build\\util")
test.fail_test()
if test.match_re_dotall(test.stdout(), r".*(-L|/LIBPATH:)src[/\\]util.*"):
print(repr(test.stdout())+" should not contain -Lsrc/util or /LIBPATH:src\\util")
test.fail_test()
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
3d6ecd3c1af34d70536ebab52f3d37ca9b4fe4f6
|
f305f84ea6f721c2391300f0a60e21d2ce14f2a5
|
/20_杂题/滴滴历届编程题真题/5_宅男的生活-记忆化dfs.py
|
6de248afeb3e7bf460ad8269544811b10c307316
|
[] |
no_license
|
981377660LMT/algorithm-study
|
f2ada3e6959338ae1bc21934a84f7314a8ecff82
|
7e79e26bb8f641868561b186e34c1127ed63c9e0
|
refs/heads/master
| 2023-09-01T18:26:16.525579
| 2023-09-01T12:21:58
| 2023-09-01T12:21:58
| 385,861,235
| 225
| 24
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,675
|
py
|
5_宅男的生活-记忆化dfs.py
|
# 2 <= n <= 64
# 他想雨露均沾对待这两件事情,每件事情都刚好占用 n/2 天时间
# 他不想连续 d 天在做同一件事情(如果在某一件事情花费的时间已经到 n/2 天了,
# 剩下时间只能做另外一件事情,这种情况除外)
# 第一天的选择和最后一天的选择不一样
# 现在他想知道有多少种方案安排自己的时间
from functools import lru_cache
def solve():
n, d = map(int, input().split())
# 分别表示当前天数,上一个选择,上一个选择的连续天数,两个选择分别的剩余天数,以及第一天的选择
@lru_cache(None)
def dfs(index: int, pre: int, preLen: int, remain1: int, remain2: int, first: int) -> int:
if index == n:
return int(pre != first)
# 只能做同一件事情
if not remain1:
return int(first == 0)
if not remain2:
return int(first == 1)
res = 0
# 能否继续上一个选择
if preLen < d:
if pre == 0:
res += dfs(index + 1, 0, preLen + 1, remain1 - 1, remain2, first)
else:
res += dfs(index + 1, 1, preLen + 1, remain1, remain2 - 1, first)
# 选择另外一个
if pre == 0:
res += dfs(index + 1, 1, 1, remain1, remain2 - 1, first)
else:
res += dfs(index + 1, 0, 1, remain1 - 1, remain2, first)
return res
res = dfs(1, 0, 1, n // 2 - 1, n // 2, 0) + dfs(1, 1, 1, n // 2, n // 2 - 1, 1)
print(res)
if __name__ == "__main__":
for _ in range(int(input())):
solve()
|
22e8520fafaf0d66b17d9d71f0c39bde28a14ff6
|
5e255ad1360c90478393744586663741a9569c21
|
/tests/async_api/test_get_profile.py
|
1b93f1d546110526e8fa7281dd2383dbad774a1a
|
[
"Apache-2.0"
] |
permissive
|
line/line-bot-sdk-python
|
d76268e8b542060d6eccbacc5dbfab16960ecc35
|
cffd35948238ae24982173e30b1ea1e595bbefd9
|
refs/heads/master
| 2023-08-31T22:12:31.698183
| 2023-08-28T01:10:09
| 2023-08-28T01:10:09
| 70,553,423
| 1,898
| 1,181
|
Apache-2.0
| 2023-09-11T05:14:07
| 2016-10-11T03:42:26
|
Python
|
UTF-8
|
Python
| false
| false
| 1,518
|
py
|
test_get_profile.py
|
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from aiohttp import web
from linebot import (
AsyncLineBotApi
)
from linebot.aiohttp_async_http_client import AiohttpAsyncHttpClient
async def test_async_profile(aiohttp_client, loop):
expect = {
'displayName': 'test',
'userId': 'test',
'language': 'en',
'pictureUrl': 'https://obs.line-apps.com/...',
'statusMessage': 'Hello, LINE!'
}
async def profile(request):
return web.json_response(expect)
app = web.Application()
app.router.add_get('//v2/bot/profile/test', profile)
aiohttp = await aiohttp_client(app, server_kwargs={"skip_url_asserts": True})
async_client = AiohttpAsyncHttpClient(session=aiohttp)
bot = AsyncLineBotApi('TOKENTOKEN', async_client, endpoint='/')
profile_response = await bot.get_profile('test')
assert profile_response.user_id == expect['userId']
assert profile_response.display_name == expect['displayName']
|
6791d5fa3d70fe69469a4424a5d833de2d194266
|
c3542b98289c1ba85f62d08b5edbe1a3c18f3c80
|
/BoardGame-CLI/uno.py
|
4f36372a5f89d29a6a784f9724f4ce88e7f32c9c
|
[
"LicenseRef-scancode-unknown",
"GPL-1.0-or-later",
"MIT"
] |
permissive
|
geekcomputers/Python
|
16674289843f89f6cc287097f033b928f4181d84
|
bc55e2a2c5a98f4c7597e901a04457dfb9d5df0c
|
refs/heads/master
| 2023-08-18T21:04:18.163283
| 2023-08-17T17:38:16
| 2023-08-17T17:38:16
| 2,881,789
| 32,418
| 15,024
|
MIT
| 2023-09-02T18:40:33
| 2011-11-30T09:04:08
|
Python
|
UTF-8
|
Python
| false
| false
| 5,559
|
py
|
uno.py
|
# uno game #
import random
"""
Generate the UNO deck of 108 cards.
Parameters: None
Return values: deck=>list
"""
def buildDeck():
deck = []
# example card:Red 7,Green 8, Blue skip
colours = ["Red", "Green", "Yellow", "Blue"]
values = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, "Draw Two", "Skip", "Reverse"]
wilds = ["Wild", "Wild Draw Four"]
for colour in colours:
for value in values:
cardVal = "{} {}".format(colour, value)
deck.append(cardVal)
if value != 0:
deck.append(cardVal)
for i in range(4):
deck.append(wilds[0])
deck.append(wilds[1])
print(deck)
return deck
"""
Shuffles a list of items passed into it
Parameters: deck=>list
Return values: deck=>list
"""
def shuffleDeck(deck):
for cardPos in range(len(deck)):
randPos = random.randint(0, 107)
deck[cardPos], deck[randPos] = deck[randPos], deck[cardPos]
return deck
"""Draw card function that draws a specified number of cards off the top of the deck
Parameters: numCards -> integer
Return: cardsDrawn -> list
"""
def drawCards(numCards):
cardsDrawn = []
for x in range(numCards):
cardsDrawn.append(unoDeck.pop(0))
return cardsDrawn
"""
Print formatted list of player's hand
Parameter: player->integer , playerHand->list
Return: None
"""
def showHand(player, playerHand):
print("Player {}'s Turn".format(players_name[player]))
print("Your Hand")
print("------------------")
y = 1
for card in playerHand:
print("{}) {}".format(y, card))
y += 1
print("")
"""
Check whether a player is able to play a card, or not
Parameters: discardCard->string,value->string, playerHand->list
Return: boolean
"""
def canPlay(colour, value, playerHand):
for card in playerHand:
if "Wild" in card:
return True
elif colour in card or value in card:
return True
return False
unoDeck = buildDeck()
unoDeck = shuffleDeck(unoDeck)
unoDeck = shuffleDeck(unoDeck)
discards = []
players_name = []
players = []
colours = ["Red", "Green", "Yellow", "Blue"]
numPlayers = int(input("How many players?"))
while numPlayers < 2 or numPlayers > 4:
numPlayers = int(
input("Invalid. Please enter a number between 2-4.\nHow many players?"))
for player in range(numPlayers):
players_name.append(input("Enter player {} name: ".format(player+1)))
players.append(drawCards(5))
playerTurn = 0
playDirection = 1
playing = True
discards.append(unoDeck.pop(0))
splitCard = discards[0].split(" ", 1)
currentColour = splitCard[0]
if currentColour != "Wild":
cardVal = splitCard[1]
else:
cardVal = "Any"
while playing:
showHand(playerTurn, players[playerTurn])
print("Card on top of discard pile: {}".format(discards[-1]))
if canPlay(currentColour, cardVal, players[playerTurn]):
cardChosen = int(input("Which card do you want to play?"))
while not canPlay(currentColour, cardVal, [players[playerTurn][cardChosen-1]]):
cardChosen = int(
input("Not a valid card. Which card do you want to play?"))
print("You played {}".format(players[playerTurn][cardChosen-1]))
discards.append(players[playerTurn].pop(cardChosen-1))
# cheak if player won
if len(players[playerTurn]) == 0:
playing = False
# winner = "Player {}".format(playerTurn+1)
winner = players_name[playerTurn]
else:
# cheak for special cards
splitCard = discards[-1].split(" ", 1)
currentColour = splitCard[0]
if len(splitCard) == 1:
cardVal = "Any"
else:
cardVal = splitCard[1]
if currentColour == "Wild":
for x in range(len(colours)):
print("{}) {}".format(x+1, colours[x]))
newColour = int(
input("What colour would you like to choose? "))
while newColour < 1 or newColour > 4:
newColour = int(
input("Invalid option. What colour would you like to choose"))
currentColour = colours[newColour-1]
if cardVal == "Reverse":
playDirection = playDirection * -1
elif cardVal == "Skip":
playerTurn += playDirection
if playerTurn >= numPlayers:
playerTurn = 0
elif playerTurn < 0:
playerTurn = numPlayers-1
elif cardVal == "Draw Two":
playerDraw = playerTurn+playDirection
if playerDraw == numPlayers:
playerDraw = 0
elif playerDraw < 0:
playerDraw = numPlayers-1
players[playerDraw].extend(drawCards(2))
elif cardVal == "Draw Four":
playerDraw = playerTurn+playDirection
if playerDraw == numPlayers:
playerDraw = 0
elif playerDraw < 0:
playerDraw = numPlayers-1
players[playerDraw].extend(drawCards(4))
print("")
else:
print("You can't play. You have to draw a card.")
players[playerTurn].extend(drawCards(1))
playerTurn += playDirection
if playerTurn >= numPlayers:
playerTurn = 0
elif playerTurn < 0:
playerTurn = numPlayers-1
print("Game Over")
print("{} is the Winner!".format(winner))
|
c2f2e30ec1d5285f41562c1a5e4f4c61b8b31c73
|
867364dc92d3236f5b42aa4fe82ee69d008d09e5
|
/insomniac/action_runners/core.py
|
5373fabc3aaf66ddb6a0a1886fe5e45b87db63bf
|
[
"MIT"
] |
permissive
|
alexal1/Insomniac
|
6acde5a6e4b4d50e4e0d4fb233fb2e0f98d52314
|
03e25aeaae5b38a0e47a4dfd705a3140ff2e8086
|
refs/heads/master
| 2023-09-03T16:56:23.546483
| 2022-09-03T14:21:08
| 2022-09-03T14:21:08
| 268,484,843
| 666
| 194
|
MIT
| 2022-03-01T23:12:28
| 2020-06-01T09:55:52
|
Python
|
UTF-8
|
Python
| false
| false
| 1,296
|
py
|
core.py
|
from abc import ABC
from enum import unique, Enum
@unique
class ActionState(Enum):
PRE_RUN = 0
RUNNING = 1
DONE = 2
SOURCE_LIMIT_REACHED = 3
SESSION_LIMIT_REACHED = 4
class ActionStatus(object):
def __init__(self, state):
self.state = state
self.limit_state = None
def set(self, state):
self.state = state
def get(self):
return self.state
def set_limit(self, limit_state):
self.limit_state = limit_state
def get_limit(self):
return self.limit_state
class ActionsRunner(ABC):
"""An interface for actions-runner object"""
ACTION_ID = "OVERRIDE"
ACTION_ARGS = {"OVERRIDE": "OVERRIDE"}
action_status = None
def is_action_selected(self, args):
raise NotImplementedError()
def set_params(self, args):
raise NotImplementedError()
def reset_params(self):
raise NotImplementedError()
class InsomniacActionsRunner(ActionsRunner, ABC):
"""An interface for extra-actions-runner object"""
def run(self, device_wrapper, storage, session_state, on_action, is_limit_reached, is_passed_filters=None):
raise NotImplementedError()
class CoreActionsRunner(InsomniacActionsRunner, ABC):
"""An interface for core-actions-runner object"""
|
f07a7cda6f19702c30793784f9539823cba8d69b
|
100bfa827dacb23637d3dd2d1396a830c7d9a4b2
|
/mode/examples/Basics/Control/Conditionals1/Conditionals1.pyde
|
88836a541c38a0f8b09d9105b4cb197b87c58327
|
[
"Apache-2.0"
] |
permissive
|
jdf/processing.py
|
82b37e5b1f4ce68825b5fe919205362ecdc16993
|
f38544c70892c7534f059e8acc1c9a492e2b7c86
|
refs/heads/master
| 2023-08-26T01:42:50.442853
| 2023-02-15T21:33:12
| 2023-02-15T21:33:12
| 833,574
| 1,399
| 246
|
Apache-2.0
| 2023-02-21T12:28:09
| 2010-08-12T14:29:22
|
Python
|
UTF-8
|
Python
| false
| false
| 653
|
pyde
|
Conditionals1.pyde
|
"""
Conditionals 1.
Conditions are like questions.
They allow a program to decide to take one action if
the answer to a question is true or to do another action
if the answer to the question is false.
The questions asked within a program are always logical
or relational statements. For example, if the variable 'i' is
equal to zero then draw a line.
"""
size(640, 360)
background(0)
for i in range(10, width, 10):
# If 'i' divides by 20 with no remainder draw the first line
# else draw the second line
if i % 20 == 0:
stroke(255)
line(i, 80, i, height / 2)
else:
stroke(153)
line(i, 20, i, 180)
|
25404bbc6a20fed79f5ba2b14b704e9d532ac6ca
|
5130754859e274cd06f63260439e5203c2000a11
|
/core/domain/app_feedback_report_domain_test.py
|
67c4106245516a24c717144851bcb1597bb2cefe
|
[
"Apache-2.0"
] |
permissive
|
oppia/oppia
|
8ebc9c7c7f2b336e9a79ce04533abe3956f48cbe
|
d16fdf23d790eafd63812bd7239532256e30a21d
|
refs/heads/develop
| 2023-09-04T07:50:13.661276
| 2023-09-03T09:21:32
| 2023-09-03T09:21:32
| 40,687,563
| 6,172
| 4,666
|
Apache-2.0
| 2023-09-14T18:25:11
| 2015-08-14T00:16:14
|
Python
|
UTF-8
|
Python
| false
| false
| 80,999
|
py
|
app_feedback_report_domain_test.py
|
# coding: utf-8
#
# Copyright 2021 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for app feedback reporting domain objects."""
from __future__ import annotations
import datetime
import enum
from core import feconf
from core import utils
from core.domain import app_feedback_report_constants
from core.domain import app_feedback_report_domain
from core.platform import models
from core.tests import test_utils
from typing import List
MYPY = False
if MYPY: # pragma: no cover
from mypy_imports import app_feedback_report_models
(app_feedback_report_models,) = models.Registry.import_models(
[models.Names.APP_FEEDBACK_REPORT])
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
# The timestamp in sec since epoch for Mar 7 2021 21:17:16 UTC.
REPORT_SUBMITTED_TIMESTAMP = datetime.datetime.fromtimestamp(1615151836)
# The timestamp in sec since epoch for Mar 19 2021 17:10:36 UTC.
TICKET_CREATION_TIMESTAMP = datetime.datetime.fromtimestamp(1616173836)
TICKET_CREATION_TIMESTAMP_MSEC = utils.get_time_in_millisecs(
TICKET_CREATION_TIMESTAMP)
PLATFORM_ANDROID = app_feedback_report_constants.PLATFORM_CHOICE_ANDROID
PLATFORM_WEB = app_feedback_report_constants.PLATFORM_CHOICE_WEB
TICKET_NAME = 'ticket name'
TICKET_ID = '%s.%s.%s' % (
'random_hash', int(TICKET_CREATION_TIMESTAMP_MSEC), '16CharString1234')
REPORT_TYPE_SUGGESTION = app_feedback_report_constants.ReportType.SUGGESTION
REPORT_TYPE_ISSUE = app_feedback_report_constants.ReportType.ISSUE
CATEGORY_SUGGESTION_OTHER = (
app_feedback_report_constants.Category.OTHER_SUGGESTION)
CATEGORY_ISSUE_TOPICS = app_feedback_report_constants.Category.TOPICS_ISSUE
ANDROID_PLATFORM_VERSION = '0.1-alpha-abcdef1234'
COUNTRY_LOCALE_CODE_INDIA = 'in'
ANDROID_DEVICE_MODEL = 'Pixel 4a'
ANDROID_SDK_VERSION = 28
ENTRY_POINT_CRASH = 'crash'
ENTRY_POINT_NAVIGATION_DRAWER = 'navigation_drawer'
LANGUAGE_LOCALE_CODE_ENGLISH = 'en'
ANDROID_PACKAGE_VERSION_CODE = 1
NETWORK_WIFI = app_feedback_report_constants.AndroidNetworkType.WIFI
ANDROID_TEXT_SIZE = (
app_feedback_report_constants.AndroidTextSize.MEDIUM_TEXT_SIZE)
ANDROID_BUILD_FINGERPRINT = 'example_fingerprint_id'
EVENT_LOGS = ['event1', 'event2']
LOGCAT_LOGS = ['logcat1', 'logcat2']
USER_SELECTED_ITEMS: List[str] = []
USER_TEXT_INPUT = 'add and admin'
ANDROID_REPORT_INFO: app_feedback_report_models.ReportInfoDict = {
'user_feedback_selected_items': USER_SELECTED_ITEMS,
'user_feedback_other_text_input': USER_TEXT_INPUT,
'event_logs': ['event1', 'event2'],
'logcat_logs': ['logcat1', 'logcat2'],
'package_version_code': ANDROID_PACKAGE_VERSION_CODE,
'build_fingerprint': ANDROID_BUILD_FINGERPRINT,
'network_type': NETWORK_WIFI.value,
'android_device_language_locale_code': LANGUAGE_LOCALE_CODE_ENGLISH,
'language_locale_code': 'en',
'entry_point_info': {
'entry_point_name': ENTRY_POINT_NAVIGATION_DRAWER,
},
'text_size': ANDROID_TEXT_SIZE.value,
'only_allows_wifi_download_and_update': True,
'automatically_update_topics': False,
'is_curriculum_admin': False,
'account_is_profile_admin': False
}
WEB_REPORT_INFO = {
'user_feedback_selected_items': [],
'user_feedback_other_text_input': USER_TEXT_INPUT,
}
ANDROID_REPORT_INFO_SCHEMA_VERSION = 1
WEB_PLATFORM_VERSION = '3.0.8'
class AppFeedbackReportDomainTests(test_utils.GenericTestBase):
def setUp(self) -> None:
super().setUp()
self.android_report_id = (
app_feedback_report_models.AppFeedbackReportModel.generate_id(
PLATFORM_ANDROID, REPORT_SUBMITTED_TIMESTAMP))
android_user_supplied_feedback = (
app_feedback_report_domain.UserSuppliedFeedback(
REPORT_TYPE_SUGGESTION, CATEGORY_SUGGESTION_OTHER,
USER_SELECTED_ITEMS, USER_TEXT_INPUT))
android_device_system_context = (
app_feedback_report_domain.AndroidDeviceSystemContext(
ANDROID_PLATFORM_VERSION, ANDROID_PACKAGE_VERSION_CODE,
COUNTRY_LOCALE_CODE_INDIA, LANGUAGE_LOCALE_CODE_ENGLISH,
ANDROID_DEVICE_MODEL, ANDROID_SDK_VERSION,
ANDROID_BUILD_FINGERPRINT, NETWORK_WIFI))
navigation_drawer_entry_point = (
app_feedback_report_domain.NavigationDrawerEntryPoint())
android_app_context = (
app_feedback_report_domain.AndroidAppContext(
navigation_drawer_entry_point, LANGUAGE_LOCALE_CODE_ENGLISH,
LANGUAGE_LOCALE_CODE_ENGLISH, ANDROID_TEXT_SIZE, True,
False, False, EVENT_LOGS, LOGCAT_LOGS))
self.android_report_obj = app_feedback_report_domain.AppFeedbackReport(
self.android_report_id, ANDROID_REPORT_INFO_SCHEMA_VERSION,
PLATFORM_ANDROID, REPORT_SUBMITTED_TIMESTAMP, 0, TICKET_ID, None,
android_user_supplied_feedback, android_device_system_context,
android_app_context)
self.web_report_id = (
app_feedback_report_models.AppFeedbackReportModel.generate_id(
PLATFORM_WEB, REPORT_SUBMITTED_TIMESTAMP))
web_user_supplied_feedback = (
app_feedback_report_domain.UserSuppliedFeedback(
REPORT_TYPE_SUGGESTION, CATEGORY_SUGGESTION_OTHER,
USER_SELECTED_ITEMS, USER_TEXT_INPUT))
device_system_context = (
app_feedback_report_domain.DeviceSystemContext(
WEB_PLATFORM_VERSION, LANGUAGE_LOCALE_CODE_ENGLISH))
crash_entry_point = app_feedback_report_domain.CrashEntryPoint()
app_context = (
app_feedback_report_domain.AppContext(
crash_entry_point, LANGUAGE_LOCALE_CODE_ENGLISH,
LANGUAGE_LOCALE_CODE_ENGLISH))
self.web_report_obj = app_feedback_report_domain.AppFeedbackReport(
self.web_report_id, ANDROID_REPORT_INFO_SCHEMA_VERSION,
PLATFORM_WEB, REPORT_SUBMITTED_TIMESTAMP, 0, TICKET_ID, None,
web_user_supplied_feedback, device_system_context, app_context)
def test_to_dict_android_report(self) -> None:
expected_report_id = self.android_report_id
expected_dict = {
'report_id': expected_report_id,
'schema_version': ANDROID_REPORT_INFO_SCHEMA_VERSION,
'platform': PLATFORM_ANDROID,
'submitted_on_timestamp': utils.get_human_readable_time_string(
utils.get_time_in_millisecs(REPORT_SUBMITTED_TIMESTAMP)),
'local_timezone_offset_hrs': 0,
'ticket_id': TICKET_ID,
'scrubbed_by': None,
'user_supplied_feedback': {
'report_type': REPORT_TYPE_SUGGESTION.value,
'category': CATEGORY_SUGGESTION_OTHER.value,
'user_feedback_selected_items': USER_SELECTED_ITEMS,
'user_feedback_other_text_input': USER_TEXT_INPUT
},
'device_system_context': {
'version_name': ANDROID_PLATFORM_VERSION,
'package_version_code': ANDROID_PACKAGE_VERSION_CODE,
'device_country_locale_code': COUNTRY_LOCALE_CODE_INDIA,
'device_language_locale_code': LANGUAGE_LOCALE_CODE_ENGLISH,
'device_model': ANDROID_DEVICE_MODEL,
'sdk_version': ANDROID_SDK_VERSION,
'build_fingerprint': ANDROID_BUILD_FINGERPRINT,
'network_type': NETWORK_WIFI.value
},
'app_context': {
'entry_point': {
'entry_point_name': ENTRY_POINT_NAVIGATION_DRAWER
},
'text_language_code': LANGUAGE_LOCALE_CODE_ENGLISH,
'audio_language_code': LANGUAGE_LOCALE_CODE_ENGLISH,
'text_size': ANDROID_TEXT_SIZE.value,
'only_allows_wifi_download_and_update': True,
'automatically_update_topics': False,
'account_is_profile_admin': False,
'event_logs': EVENT_LOGS,
'logcat_logs': LOGCAT_LOGS
}
}
self.assertDictEqual(expected_dict, self.android_report_obj.to_dict())
def test_report_web_platform_validation_fails(self) -> None:
with self.assertRaisesRegex(
NotImplementedError,
'Domain objects for web reports have not been implemented yet.'):
self.web_report_obj.validate()
# TODO(#13059): Here we use MyPy ignore because after we fully type the
# codebase we plan to get rid of the tests that intentionally test wrong
# inputs that we can normally catch by typing.
def test_report_android_schema_version_not_an_int_validation_fails(
self
) -> None:
self.android_report_obj.schema_version = 'bad_schema_version' # type: ignore[assignment]
self._assert_validation_error(
self.android_report_obj,
'The report schema version %r is invalid, expected an integer' % (
self.android_report_obj.schema_version))
def test_report_android_schema_version_less_than_minimum_validation_fails(
self) -> None:
# The current minimum is 1 a version less than the minimum returns
# an error for a non-positive integer.
self.android_report_obj.schema_version = (
feconf.MINIMUM_ANDROID_REPORT_SCHEMA_VERSION - 1)
self._assert_validation_error(
self.android_report_obj,
'The report schema version %r is invalid, expected an integer' % (
self.android_report_obj.schema_version))
def test_report_android_schema_version_greater_than_max_validation_fails(
self) -> None:
self.android_report_obj.schema_version = (
feconf.CURRENT_ANDROID_REPORT_SCHEMA_VERSION + 1)
self._assert_validation_error(
self.android_report_obj,
'The supported report schema versions for %s reports are' % (
PLATFORM_ANDROID))
def test_report_platform_is_invalid_validation_fails(self) -> None:
self.android_report_obj.platform = 'invalid_platform'
self._assert_validation_error(
self.android_report_obj, 'Report platform should be one of ')
def test_report_scrubber_id_is_invalid_validation_fails(self) -> None:
self.android_report_obj.scrubbed_by = 'invalid_user'
self._assert_validation_error(
self.android_report_obj,
'The scrubbed_by user id \'%s\' is invalid.' % (
self.android_report_obj.scrubbed_by))
# TODO(#13059): Here we use MyPy ignore because after we fully type the
# codebase we plan to get rid of the tests that intentionally test wrong
# inputs that we can normally catch by typing.
def test_report_scrubber_id_is_not_string_validation_fails(self) -> None:
self.android_report_obj.scrubbed_by = 123 # type: ignore[assignment]
self._assert_validation_error(
self.android_report_obj,
'The scrubbed_by user must be a string')
def test_report_timezone_offset_is_invalid_validation_fails(self) -> None:
self.android_report_obj.local_timezone_offset_hrs = (
app_feedback_report_constants.TIMEZONE_MINIMUM_OFFSET - 1)
self._assert_validation_error(
self.android_report_obj,
'Expected local timezone offset to be in')
# TODO(#13059): Here we use MyPy ignore because after we fully type the
# codebase we plan to get rid of the tests that intentionally test wrong
# inputs that we can normally catch by typing.
def test_android_report_system_context_invalid_type_validation_fails(
self
) -> None:
self.android_report_obj.device_system_context = {} # type: ignore[assignment]
self._assert_validation_error(
self.android_report_obj,
'Expected device and system context to be of type '
'AndroidDeviceSystemContext')
# TODO(#13059): Here we use MyPy ignore because after we fully type the
# codebase we plan to get rid of the tests that intentionally test wrong
# inputs that we can normally catch by typing.
def test_report_platform_is_none_fails_validation(self) -> None:
self.android_report_obj.platform = None # type: ignore[assignment]
self._assert_validation_error(
self.android_report_obj, 'No platform supplied.')
def test_get_report_type_from_string_returns_expected_report_type(
self) -> None:
feedback_report = app_feedback_report_domain.AppFeedbackReport
for report_type in app_feedback_report_constants.ReportType:
self.assertEqual(
feedback_report.get_report_type_from_string(
report_type.value), report_type)
def test_get_report_type_from_string_with_invalid_string_raises_error(
self) -> None:
feedback_report = app_feedback_report_domain.AppFeedbackReport
invalid_report_type = 'invalid_report_type'
with self.assertRaisesRegex(
utils.InvalidInputException,
'The given report type %s is invalid.' % invalid_report_type):
feedback_report.get_report_type_from_string(
invalid_report_type)
def test_get_category_from_string_returns_expected_category(self) -> None:
feedback_report = app_feedback_report_domain.AppFeedbackReport
for category in app_feedback_report_constants.ALLOWED_CATEGORIES:
self.assertEqual(
feedback_report.get_category_from_string(
category.value), category)
def test_get_category_from_string_with_invalid_string_raises_error(
self) -> None:
feedback_report = app_feedback_report_domain.AppFeedbackReport
invalid_category = 'invalid_category'
with self.assertRaisesRegex(
utils.InvalidInputException,
'The given category %s is invalid.' % invalid_category):
feedback_report.get_category_from_string(
invalid_category)
def test_get_android_text_size_from_string_returns_expected_text_size(
self) -> None:
feedback_report = app_feedback_report_domain.AppFeedbackReport
for text_size in (
app_feedback_report_constants.ALLOWED_ANDROID_TEXT_SIZES):
self.assertEqual(
feedback_report.get_android_text_size_from_string(
text_size.value), text_size)
def test_get_android_text_size_from_string_with_invalid_string_raises_error(
self) -> None:
feedback_report = app_feedback_report_domain.AppFeedbackReport
invalid_text_size = 'invalid_text_size'
with self.assertRaisesRegex(
utils.InvalidInputException,
'The given Android app text size %s is invalid.' % (
invalid_text_size)):
feedback_report.get_android_text_size_from_string(
invalid_text_size)
def test_get_entry_point_from_json_returns_expected_entry_point_obj(
self
) -> None:
feedback_report = app_feedback_report_domain.AppFeedbackReport
entry_point_json: app_feedback_report_domain.EntryPointDict = {
'entry_point_name': '',
'entry_point_topic_id': 'topic_id',
'entry_point_story_id': 'story_id',
'entry_point_exploration_id': 'exploration_id',
'entry_point_subtopic_id': 'subtopic_id'
}
entry_point_json['entry_point_name'] = (
app_feedback_report_constants.EntryPoint.NAVIGATION_DRAWER.value)
navigation_drawer_obj = (
feedback_report.get_entry_point_from_json(
entry_point_json))
self.assertTrue(
isinstance(
navigation_drawer_obj,
app_feedback_report_domain.NavigationDrawerEntryPoint))
entry_point_json['entry_point_name'] = (
app_feedback_report_constants.EntryPoint.LESSON_PLAYER.value)
lesson_player_obj = (
feedback_report.get_entry_point_from_json(
entry_point_json))
self.assertTrue(
isinstance(
lesson_player_obj,
app_feedback_report_domain.LessonPlayerEntryPoint))
entry_point_json['entry_point_name'] = (
app_feedback_report_constants.EntryPoint.REVISION_CARD.value)
revision_card_obj = (
feedback_report.get_entry_point_from_json(
entry_point_json))
self.assertTrue(
isinstance(
revision_card_obj,
app_feedback_report_domain.RevisionCardEntryPoint))
entry_point_json['entry_point_name'] = (
app_feedback_report_constants.EntryPoint.CRASH.value)
crash_obj = (
feedback_report.get_entry_point_from_json(
entry_point_json))
self.assertTrue(
isinstance(
crash_obj, app_feedback_report_domain.CrashEntryPoint))
def test_raises_error_with_invalid_entry_point_during_entry_point_from_json(
self
) -> None:
feedback_report = app_feedback_report_domain.AppFeedbackReport
entry_point_json: app_feedback_report_domain.EntryPointDict = {
'entry_point_name': '',
'entry_point_topic_id': 'topic_id',
'entry_point_story_id': 'story_id',
'entry_point_exploration_id': 'exploration_id',
'entry_point_subtopic_id': 'subtopic_id'
}
entry_point_json['entry_point_name'] = (
app_feedback_report_constants.EntryPoint.LESSON_PLAYER.value)
with self.assertRaisesRegex(
Exception, 'No story_id provided for LessonPlayerEntryPoint.'
):
entry_point_json['entry_point_story_id'] = None
feedback_report.get_entry_point_from_json(entry_point_json)
with self.assertRaisesRegex(
Exception, 'No topic_id provided for LessonPlayerEntryPoint.'
):
entry_point_json['entry_point_story_id'] = 'story_id'
entry_point_json['entry_point_topic_id'] = None
feedback_report.get_entry_point_from_json(entry_point_json)
with self.assertRaisesRegex(
Exception,
'No exploration_id provided for LessonPlayerEntryPoint.'
):
entry_point_json['entry_point_topic_id'] = 'topic_id'
entry_point_json['entry_point_exploration_id'] = None
feedback_report.get_entry_point_from_json(entry_point_json)
entry_point_json['entry_point_name'] = (
app_feedback_report_constants.EntryPoint.REVISION_CARD.value)
with self.assertRaisesRegex(
Exception,
'No topic_id provided for RevisionCardEntryPoint.'
):
entry_point_json['entry_point_topic_id'] = None
feedback_report.get_entry_point_from_json(entry_point_json)
with self.assertRaisesRegex(
Exception,
'No subtopic_id provided for RevisionCardEntryPoint.'
):
entry_point_json['entry_point_topic_id'] = 'topic_id'
entry_point_json['entry_point_subtopic_id'] = None
feedback_report.get_entry_point_from_json(entry_point_json)
# TODO(#13059): Here we use MyPy ignore because after we fully type the
# codebase we plan to get rid of the tests that intentionally test wrong
# inputs that we can normally catch by typing.
def test_get_entry_point_from_json_with_invalid_json_raises_error(
self
) -> None:
feedback_report = app_feedback_report_domain.AppFeedbackReport
invalid_json = {
'entry_point_name': 'invalid_entry_point_name'
}
with self.assertRaisesRegex(
utils.InvalidInputException,
'The given entry point %s is invalid.' % (
'invalid_entry_point_name')):
feedback_report.get_entry_point_from_json(
invalid_json) # type: ignore[arg-type]
def test_get_android_network_type_from_string_returns_expected_network_type(
self) -> None:
feedback_report = app_feedback_report_domain.AppFeedbackReport
for network_type in app_feedback_report_constants.AndroidNetworkType:
self.assertEqual(
feedback_report.get_android_network_type_from_string(
network_type.value), network_type)
def test_get_android_network_type_from_string_invalid_string_raises_error(
self) -> None:
feedback_report = app_feedback_report_domain.AppFeedbackReport
invalid_network_type = 'invalid_text_size'
with self.assertRaisesRegex(
utils.InvalidInputException,
'The given Android network type %s is invalid.' % (
invalid_network_type)):
feedback_report.get_android_network_type_from_string(
invalid_network_type)
def _assert_validation_error(
self,
report_obj: app_feedback_report_domain.AppFeedbackReport,
expected_error_substring: str
) -> None:
"""Checks that the feedback report passes validation.
Args:
report_obj: AppFeedbackReport. The domain object to validate.
expected_error_substring: str. String that should be a substring
of the expected error message.
"""
with self.assertRaisesRegex(
utils.ValidationError, expected_error_substring):
report_obj.validate()
def _assert_not_implemented_error(
self,
report_obj: app_feedback_report_domain.AppFeedbackReport,
expected_error_substring: str
) -> None:
"""Checks that the feedback report passes validation.
Args:
report_obj: AppFeedbackReport. The domain object to validate.
expected_error_substring: str. String that should be a substring
of the expected error message.
"""
with self.assertRaisesRegex(
NotImplementedError, expected_error_substring):
report_obj.validate()
class UserSuppliedFeedbackDomainTests(test_utils.GenericTestBase):
def setUp(self) -> None:
super().setUp()
self.user_supplied_feedback = (
app_feedback_report_domain.UserSuppliedFeedback(
REPORT_TYPE_SUGGESTION, CATEGORY_SUGGESTION_OTHER,
USER_SELECTED_ITEMS, USER_TEXT_INPUT))
def test_to_dict(self) -> None:
expected_dict = {
'report_type': REPORT_TYPE_SUGGESTION.value,
'category': CATEGORY_SUGGESTION_OTHER.value,
'user_feedback_selected_items': USER_SELECTED_ITEMS,
'user_feedback_other_text_input': USER_TEXT_INPUT
}
self.assertDictEqual(
expected_dict, self.user_supplied_feedback.to_dict())
def test_validation_invalid_report_type_fails(self) -> None:
# Here we use MyPy ignore because here we assign type string to
# type class ReportType. This is done to test the validation of the
# report_type.
self.user_supplied_feedback.report_type = 'invalid_report_type' # type: ignore[assignment]
self._assert_validation_error(
self.user_supplied_feedback, 'Invalid report type ')
def test_validation_invalid_report_category_fails(self) -> None:
self.user_supplied_feedback.report_type = REPORT_TYPE_ISSUE
# Here we use MyPy ignore because here we assign type string to
# type class category. This is done to test the validation of the
# category.
self.user_supplied_feedback.category = 'invalid_category' # type: ignore[assignment]
self._assert_validation_error(
self.user_supplied_feedback,
'Invalid category invalid_category,')
def test_validation_has_selected_items_for_invalid_category_fails(
self) -> None:
self.user_supplied_feedback.user_feedback_selected_items = (
['invalid', 'list'])
self._assert_validation_error(
self.user_supplied_feedback,
'Report cannot have selection options for category ')
# Here we use MyPy ignore because here we assign type None to
# type List[str]. This is done to test the validation of the
# UserSuppliedFeedback.
def test_validation_selected_items_is_none_fails(self) -> None:
self.user_supplied_feedback.user_feedback_selected_items = None # type: ignore[assignment]
self._assert_validation_error(
self.user_supplied_feedback,
'No user_feedback_selected_items supplied')
# Here we use MyPy ignore because here we assign type None to
# type List[str]. This is done to test the validation of the
# UserSuppliedFeedback.
def test_validation_text_input_is_none_fails(self) -> None:
self.user_supplied_feedback.report_type = REPORT_TYPE_SUGGESTION
self.user_supplied_feedback.category = CATEGORY_SUGGESTION_OTHER
self.user_supplied_feedback.user_feedback_selected_items = []
self.user_supplied_feedback.user_feedback_other_text_input = None # type: ignore[assignment]
self._assert_validation_error(
self.user_supplied_feedback,
'No user_feedback_selected_items supplied')
# TODO(#13059): Here we use MyPy ignore because after we fully type the
# codebase we plan to get rid of the tests that intentionally test wrong
# inputs that we can normally catch by typing.
def test_validation_invalid_selected_item_list_fails(self) -> None:
self.user_supplied_feedback.report_type = REPORT_TYPE_ISSUE
self.user_supplied_feedback.category = CATEGORY_ISSUE_TOPICS
self.user_supplied_feedback.user_feedback_selected_items = (
[123]) # type: ignore[list-item]
self.user_supplied_feedback.user_feedback_other_text_input = ''
self._assert_validation_error(
self.user_supplied_feedback,
'Invalid option 123 selected by user.')
# TODO(#13059): Here we use MyPy ignore because after we fully type the
# codebase we plan to get rid of the tests that intentionally test wrong
# inputs that we can normally catch by typing.
def test_validation_invalid_text_input_with_only_text_input_allowed_fails(
self) -> None:
self.user_supplied_feedback.report_type = REPORT_TYPE_SUGGESTION
self.user_supplied_feedback.category = CATEGORY_SUGGESTION_OTHER
self.user_supplied_feedback.user_feedback_selected_items = []
self.user_supplied_feedback.user_feedback_other_text_input = 123 # type: ignore[assignment]
self._assert_validation_error(
self.user_supplied_feedback,
'Invalid input text, must be a string')
def test_report_type_is_none_fails_validation(self) -> None:
# Here we use MyPy ignore because here we assign type None to
# type class ReportType. This is done to test the validation of
# the UserSuppliedFeedback.
self.user_supplied_feedback.report_type = None # type: ignore[assignment]
self._assert_validation_error(
self.user_supplied_feedback, 'No report_type supplied.')
def test_report_category_is_none_fails_validation(self) -> None:
# Here we use MyPy ignore because here we assign type None to
# type class category. This is done to test the validation of the
# category.
self.user_supplied_feedback.category = None # type: ignore[assignment]
self._assert_validation_error(
self.user_supplied_feedback, 'No category supplied.')
def _assert_validation_error(
self,
feedback_obj: app_feedback_report_domain.UserSuppliedFeedback,
expected_error_substring: str
) -> None:
"""Checks that the user supplied feeedback passes validation.
Args:
feedback_obj: UserSuppliedFeedback. The domain object to validate.
expected_error_substring: str. String that should be a substring
of the expected error message.
"""
with self.assertRaisesRegex(
utils.ValidationError, expected_error_substring):
feedback_obj.validate()
class DeviceSystemContextDomainTests(test_utils.GenericTestBase):
def setUp(self) -> None:
super().setUp()
self.device_system_context = (
app_feedback_report_domain.DeviceSystemContext(
WEB_PLATFORM_VERSION, COUNTRY_LOCALE_CODE_INDIA))
def test_to_dict(self) -> None:
expected_dict = {
'version_name': WEB_PLATFORM_VERSION,
'device_country_locale_code': COUNTRY_LOCALE_CODE_INDIA
}
self.assertDictEqual(
expected_dict, self.device_system_context.to_dict())
def test_validation_raises_not_implemented_error(self) -> None:
with self.assertRaisesRegex(
NotImplementedError,
'Subclasses of DeviceSystemContext should implement domain '
'validation.'):
self.device_system_context.validate()
class AndroidDeviceSystemContextTests(test_utils.GenericTestBase):
def setUp(self) -> None:
super().setUp()
self.device_system_context = (
app_feedback_report_domain.AndroidDeviceSystemContext(
ANDROID_PLATFORM_VERSION, ANDROID_PACKAGE_VERSION_CODE,
COUNTRY_LOCALE_CODE_INDIA, LANGUAGE_LOCALE_CODE_ENGLISH,
ANDROID_DEVICE_MODEL, ANDROID_SDK_VERSION,
ANDROID_BUILD_FINGERPRINT, NETWORK_WIFI))
def test_to_dict(self) -> None:
expected_dict = {
'version_name': ANDROID_PLATFORM_VERSION,
'package_version_code': ANDROID_PACKAGE_VERSION_CODE,
'device_country_locale_code': COUNTRY_LOCALE_CODE_INDIA,
'device_language_locale_code': LANGUAGE_LOCALE_CODE_ENGLISH,
'device_model': ANDROID_DEVICE_MODEL,
'sdk_version': ANDROID_SDK_VERSION,
'build_fingerprint': ANDROID_BUILD_FINGERPRINT,
'network_type': NETWORK_WIFI.value
}
self.assertDictEqual(
expected_dict, self.device_system_context.to_dict())
# Here we use MyPy ignore because here we assign type None
# to type int. This is done to test the validation of
# AndroidDeviceSystemContext.
def test_validation_version_name_is_none_fails(self) -> None:
self.device_system_context.version_name = None # type: ignore[assignment]
self._assert_validation_error(
self.device_system_context, 'No version name supplied.')
# TODO(#13059): Here we use MyPy ignore because after we fully type the
# codebase we plan to get rid of the tests that intentionally test wrong
# inputs that we can normally catch by typing.
def test_validation_version_name_is_not_a_string_fails(self) -> None:
self.device_system_context.version_name = 1 # type: ignore[assignment]
self._assert_validation_error(
self.device_system_context, 'Version name must be a string')
def test_validation_invalid_version_name_fails(self) -> None:
self.device_system_context.version_name = 'invalid_version_name'
self._assert_validation_error(
self.device_system_context,
'The version name is not a valid string format')
# Here we use MyPy ignore because here we assign type None
# to type int. This is done to test the validation of the
# AndroidDeviceSystemContext.
def test_validation_package_version_code_is_none_fails(self) -> None:
self.device_system_context.package_version_code = None # type: ignore[assignment]
self._assert_validation_error(
self.device_system_context, 'No package version code supplied.')
# TODO(#13059): Here we use MyPy ignore because after we fully type the
# codebase we plan to get rid of the tests that intentionally test wrong
# inputs that we can normally catch by typing.
def test_validation_package_version_code_is_not_an_int_fails(self) -> None:
self.device_system_context.package_version_code = 'invalid_code' # type: ignore[assignment]
self._assert_validation_error(
self.device_system_context, 'Package version code must be an int')
def test_validation_package_version_code_less_than_minimum_fails(
self) -> None:
self.device_system_context.package_version_code = (
feconf.MINIMUM_ANDROID_PACKAGE_VERSION_CODE - 1)
self._assert_validation_error(
self.device_system_context,
'The package version code is not a valid int. The minimum '
'supported version is %d' % (
feconf.MINIMUM_ANDROID_PACKAGE_VERSION_CODE))
# Here we use MyPy ignore because here we assign type None
# to type str. This is done to test the validation of the
# AndroidDeviceSystemContext.
def test_validation_country_locale_code_is_none_fails(self) -> None:
self.device_system_context.device_country_locale_code = None # type: ignore[assignment]
self._assert_validation_error(
self.device_system_context,
'No device country locale code supplied.')
# TODO(#13059): Here we use MyPy ignore because after we fully type the
# codebase we plan to get rid of the tests that intentionally test wrong
# inputs that we can normally catch by typing.
def test_validation_country_locale_code_not_a_string_fails(self) -> None:
self.device_system_context.device_country_locale_code = 123 # type: ignore[assignment]
self._assert_validation_error(
self.device_system_context,
'device\'s country locale code must be an string,')
def test_validation_invalid_country_locale_code_fails(self) -> None:
self.device_system_context.device_country_locale_code = 'not a code 123'
self._assert_validation_error(
self.device_system_context,
'device\'s country locale code is not a valid string')
# Here we use MyPy ignore because here we assign type None
# to type str. This is done to test the validation of the
# AndroidDeviceSystemContext.
def test_validation_language_locale_code_is_none_fails(self) -> None:
self.device_system_context.device_language_locale_code = None # type: ignore[assignment]
self._assert_validation_error(
self.device_system_context,
'No device language locale code supplied.')
# TODO(#13059): Here we use MyPy ignore because after we fully type the
# codebase we plan to get rid of the tests that intentionally test wrong
# inputs that we can normally catch by typing.
def test_validation_language_locale_code_not_a_string_fails(self) -> None:
self.device_system_context.device_language_locale_code = 123 # type: ignore[assignment]
self._assert_validation_error(
self.device_system_context,
'device\'s language locale code must be an string,')
def test_validation_invalid_language_locale_code_fails(self) -> None:
self.device_system_context.device_language_locale_code = 'not a code 12'
self._assert_validation_error(
self.device_system_context,
'device\'s language locale code is not a valid string')
# Here we use MyPy ignore because here we assign type None
# to type str. This is done to test the validation of the
# AndroidDeviceSystemContext.
def test_validation_device_model_is_none_fails(self) -> None:
self.device_system_context.device_model = None # type: ignore[assignment]
self._assert_validation_error(
self.device_system_context,
'No device model supplied.')
# TODO(#13059): Here we use MyPy ignore because after we fully type the
# codebase we plan to get rid of the tests that intentionally test wrong
# inputs that we can normally catch by typing.
def test_validation_device_model_not_a_string_fails(self) -> None:
self.device_system_context.device_model = 123 # type: ignore[assignment]
self._assert_validation_error(
self.device_system_context,
'Android device model must be an string')
# Here we use MyPy ignore because here we assign type None
# to type int. This is done to test the validation of the
# AndroidDeviceSystemContext.
def test_validation_sdk_version_is_none_fails(self) -> None:
self.device_system_context.sdk_version = None # type: ignore[assignment]
self._assert_validation_error(
self.device_system_context, 'No SDK version supplied.')
# TODO(#13059): Here we use MyPy ignore because after we fully type the
# codebase we plan to get rid of the tests that intentionally test wrong
# inputs that we can normally catch by typing.
def test_validation_sdk_version_not_an_int_fails(self) -> None:
self.device_system_context.sdk_version = 'invalid_sdk_code' # type: ignore[assignment]
self._assert_validation_error(
self.device_system_context, 'SDK version must be an int')
def test_validation_sdk_version_lower_than_minimum_fails(self) -> None:
self.device_system_context.sdk_version = (
app_feedback_report_constants.MINIMUM_ANDROID_SDK_VERSION - 1)
self._assert_validation_error(
self.device_system_context, 'Invalid SDK version')
# TODO(#13059): Here we use MyPy ignore because after we fully type the
# codebase we plan to get rid of the tests that intentionally test wrong
# inputs that we can normally catch by typing.
def test_validation_build_fingerprint_is_none_fails(self) -> None:
self.device_system_context.build_fingerprint = 123 # type: ignore[assignment]
self._assert_validation_error(
self.device_system_context, 'Build fingerprint must be a string')
# Here we use MyPy ignore because here we assigning type None
# to type str. This is done to test the validation of the
# AndroidDeviceSystemContext.
def test_validation_build_fingerprint_not_a_string_fails(self) -> None:
self.device_system_context.build_fingerprint = None # type: ignore[assignment]
self._assert_validation_error(
self.device_system_context, 'No build fingerprint supplied.')
# Here we use MyPy ignore because here we assigning type None
# to type AndroidNetworkType Enum. This is done to test the
# validation of the AndroidDeviceSystemContext.
def test_validation_network_type_is_none_fails(self) -> None:
self.device_system_context.network_type = None # type: ignore[assignment]
self._assert_validation_error(
self.device_system_context, 'No network type supplied.')
# Here we use MyPy ignore because here we assigning type str
# to type AndroidNetworkType Enum. This is done to test the
# validation of the AndroidDeviceSystemContext.
def test_validation_invalid_network_type_fails(self) -> None:
self.device_system_context.network_type = 'invaid_network_type' # type: ignore[assignment]
self._assert_validation_error(
self.device_system_context, 'Invalid network type,')
def _assert_validation_error(
self,
context_obj: app_feedback_report_domain.AndroidDeviceSystemContext,
expected_error_substring: str
) -> None:
"""Checks that the Android device system context passes validation.
Args:
context_obj: AndroidDeviceSystemContext. The domain object to
validate.
expected_error_substring: str. String that should be a substring
of the expected error message.
"""
with self.assertRaisesRegex(
utils.ValidationError, expected_error_substring):
context_obj.validate()
class EntryPointDomainTests(test_utils.GenericTestBase):
def setUp(self) -> None:
super().setUp()
self.entry_point = (
app_feedback_report_domain.EntryPoint(
app_feedback_report_constants.EntryPoint.NAVIGATION_DRAWER,
'topic_id', 'story_id', 'exploration_id', 'subtopic_id'))
def test_to_dict_raises_exception(self) -> None:
with self.assertRaisesRegex(
NotImplementedError,
'Subclasses of EntryPoint should implement their own dict'):
self.entry_point.to_dict()
def test_validation_raises_exception(self) -> None:
with self.assertRaisesRegex(
NotImplementedError,
'Subclasses of EntryPoint should implement their own validation'):
self.entry_point.validate()
class NavigationDrawerEntryPointDomainTests(test_utils.GenericTestBase):
def setUp(self) -> None:
super().setUp()
self.entry_point = (
app_feedback_report_domain.NavigationDrawerEntryPoint())
def test_to_dict(self) -> None:
expected_dict = {
'entry_point_name': (
app_feedback_report_constants.EntryPoint.NAVIGATION_DRAWER.value) # pylint: disable=line-too-long
}
self.assertDictEqual(
expected_dict, self.entry_point.to_dict())
def test_validation_name_is_none_fails(self) -> None:
# Here we use MyPy ignore because we assign type None to
# type str. This is done to test that the validation fails.
self.entry_point.entry_point_name = None # type: ignore[assignment]
with self.assertRaisesRegex(
utils.ValidationError,
'No entry point name supplied.'):
self.entry_point.validate()
# TODO(#13059): Here we use MyPy ignore because after we fully type the
# codebase we plan to get rid of the tests that intentionally test wrong
# inputs that we can normally catch by typing.
def test_validation_name_not_a_string_fails(self) -> None:
self.entry_point.entry_point_name = 123 # type: ignore[assignment]
with self.assertRaisesRegex(
utils.ValidationError,
'Entry point name must be a string,'):
self.entry_point.validate()
def test_validation_name_is_invalid_fails(self) -> None:
self.entry_point.entry_point_name = 'invalid_entry_point_name'
with self.assertRaisesRegex(
utils.ValidationError,
'Expected entry point name %s' % (
app_feedback_report_constants.EntryPoint.NAVIGATION_DRAWER.value)): # pylint: disable=line-too-long
self.entry_point.validate()
class LessonPlayerEntryPointDomainTests(test_utils.GenericTestBase):
def setUp(self) -> None:
super().setUp()
self.entry_point = (
app_feedback_report_domain.LessonPlayerEntryPoint(
'topic_id', 'story_id', 'exploration_id'))
def test_to_dict(self) -> None:
expected_dict = {
'entry_point_name': (
app_feedback_report_constants.EntryPoint.LESSON_PLAYER.value),
'topic_id': 'topic_id',
'story_id': 'story_id',
'exploration_id': 'exploration_id'
}
self.assertDictEqual(
expected_dict, self.entry_point.to_dict())
# Here we use MyPy ignore because we assign type None to type str.
# This is done to test the validation of LessonPlayerEntryPoint.
def test_validation_name_is_none_fails(self) -> None:
self.entry_point.entry_point_name = None # type: ignore[assignment]
self._assert_validation_error(
self.entry_point,
'No entry point name supplied.')
# TODO(#13059): Here we use MyPy ignore because after we fully type the
# codebase we plan to get rid of the tests that intentionally test wrong
# inputs that we can normally catch by typing.
def test_validation_name_not_a_string_fails(self) -> None:
self.entry_point.entry_point_name = 123 # type: ignore[assignment]
self._assert_validation_error(
self.entry_point,
'Entry point name must be a string,')
def test_validation_name_is_invalid_fails(self) -> None:
self.entry_point.entry_point_name = 'invalid_entry_point_name'
self._assert_validation_error(
self.entry_point,
'Expected entry point name %s' % (
app_feedback_report_constants.EntryPoint.LESSON_PLAYER.value))
def test_validation_invalid_topic_id_fails(self) -> None:
self.entry_point.topic_id = 'invalid_topic_id'
self._assert_validation_error(
self.entry_point,
'Topic id %s is invalid' % 'invalid_topic_id')
def test_validation_invalid_story_id_fails(self) -> None:
self.entry_point.topic_id = 'valid_topic1'
self.entry_point.story_id = 'invalid_story_id'
self._assert_validation_error(
self.entry_point, 'Invalid story id')
def test_validation_invalid_exploration_id_fails(self) -> None:
self.entry_point.topic_id = 'valid_topic1'
self.entry_point.story_id = 'valid_story1'
self.entry_point.exploration_id = 'invalid_exploration'
self._assert_validation_error(
self.entry_point,
'Exploration with id invalid_exploration is not part of story '
'with id')
# TODO(#13059): Here we use MyPy ignore because after we fully type the
# codebase we plan to get rid of the tests that intentionally test wrong
# inputs that we can normally catch by typing.
def test_validation_exploration_id_not_a_stringfails(self) -> None:
self.entry_point.topic_id = 'valid_topic1'
self.entry_point.story_id = 'valid_story1'
self.entry_point.exploration_id = 123 # type: ignore[assignment]
self._assert_validation_error(
self.entry_point,
'Exploration id should be a string')
def test_validation_story_id_fails_on_none_value(self) -> None:
self.entry_point.topic_id = 'valid_topic1'
self.entry_point.story_id = None
self._assert_validation_error(
self.entry_point,
'The story_id must be a string value, received None')
def _assert_validation_error(
self,
entry_point_obj: app_feedback_report_domain.LessonPlayerEntryPoint,
expected_error_substring: str
) -> None:
"""Checks that the entry point passes validation.
Args:
entry_point_obj: LessonPlayerEntryPoint. The domain object to
validate.
expected_error_substring: str. String that should be a substring
of the expected error message.
"""
with self.assertRaisesRegex(
utils.ValidationError, expected_error_substring):
entry_point_obj.validate()
class RevisionCardEntryPointDomainTests(test_utils.GenericTestBase):
def setUp(self) -> None:
super().setUp()
self.entry_point = (
app_feedback_report_domain.RevisionCardEntryPoint(
'topic_id', 'subtopic_id'))
def test_to_dict(self) -> None:
expected_dict = {
'entry_point_name': (
app_feedback_report_constants.EntryPoint.REVISION_CARD.value),
'topic_id': 'topic_id',
'subtopic_id': 'subtopic_id'
}
self.assertDictEqual(
expected_dict, self.entry_point.to_dict())
def test_validation_name_is_none_fails(self) -> None:
# Here we use MyPy ignore because we assign type None to
# type str. This is done to test that the validation fails.
self.entry_point.entry_point_name = None # type: ignore[assignment]
self._assert_validation_error(
self.entry_point,
'No entry point name supplied.')
# TODO(#13059): Here we use MyPy ignore because after we fully type the
# codebase we plan to get rid of the tests that intentionally test wrong
# inputs that we can normally catch by typing.
def test_validation_name_not_a_string_fails(self) -> None:
self.entry_point.entry_point_name = 123 # type: ignore[assignment]
self._assert_validation_error(
self.entry_point,
'Entry point name must be a string,')
def test_validation_name_is_invalid_fails(self) -> None:
self.entry_point.entry_point_name = 'invalid_entry_point_name'
self._assert_validation_error(
self.entry_point,
'Expected entry point name %s' % (
app_feedback_report_constants.EntryPoint.REVISION_CARD.value))
def test_validation_invalid_topic_id_fails(self) -> None:
self.entry_point.topic_id = 'invalid_topic_id'
self._assert_validation_error(
self.entry_point,
'Topic id %s is invalid' % 'invalid_topic_id')
def test_validation_invalid_subtopic_id_fails(self) -> None:
self.entry_point.topic_id = 'valid_topic1'
self.entry_point.subtopic_id = 'invalid_subtopic_id'
self._assert_validation_error(
self.entry_point, 'Expected subtopic id to be an int')
def _assert_validation_error(
self,
entry_point_obj: app_feedback_report_domain.RevisionCardEntryPoint,
expected_error_substring: str
) -> None:
"""Checks that the entry point passes validation.
Args:
entry_point_obj: RevisionCardEntryPoint. The domain object to
validate.
expected_error_substring: str. String that should be a substring
of the expected error message.
"""
with self.assertRaisesRegex(
utils.ValidationError, expected_error_substring):
entry_point_obj.validate()
class CrashEntryPointDomainTests(test_utils.GenericTestBase):
def setUp(self) -> None:
super().setUp()
self.entry_point = (
app_feedback_report_domain.CrashEntryPoint())
def test_to_dict(self) -> None:
expected_dict = {
'entry_point_name': (
app_feedback_report_constants.EntryPoint.CRASH.value)
}
self.assertDictEqual(
expected_dict, self.entry_point.to_dict())
def test_validation_name_is_none_fails(self) -> None:
# Here we use MyPy ignore because we assign type None to
# type str. This is done to test that the validation fails.
self.entry_point.entry_point_name = None # type: ignore[assignment]
with self.assertRaisesRegex(
utils.ValidationError,
'No entry point name supplied.'):
self.entry_point.validate()
# TODO(#13059): Here we use MyPy ignore because after we fully type the
# codebase we plan to get rid of the tests that intentionally test wrong
# inputs that we can normally catch by typing.
def test_validation_name_not_a_string_fails(self) -> None:
self.entry_point.entry_point_name = 123 # type: ignore[assignment]
with self.assertRaisesRegex(
utils.ValidationError,
'Entry point name must be a string,'):
self.entry_point.validate()
def test_validation_name_is_invalid_fails(self) -> None:
self.entry_point.entry_point_name = 'invalid_entry_point_name'
with self.assertRaisesRegex(
utils.ValidationError,
'Expected entry point name %s' % (
app_feedback_report_constants.EntryPoint.CRASH.value)):
self.entry_point.validate()
class AppContextDomainTests(test_utils.GenericTestBase):
def setUp(self) -> None:
super().setUp()
entry_point = (
app_feedback_report_domain.NavigationDrawerEntryPoint())
self.app_context = (
app_feedback_report_domain.AppContext(
entry_point, LANGUAGE_LOCALE_CODE_ENGLISH,
LANGUAGE_LOCALE_CODE_ENGLISH))
def test_to_dict(self) -> None:
expected_dict = {
'entry_point': {
'entry_point_name': (
app_feedback_report_constants.EntryPoint.NAVIGATION_DRAWER.value), # pylint: disable=line-too-long
},
'text_language_code': LANGUAGE_LOCALE_CODE_ENGLISH,
'audio_language_code': LANGUAGE_LOCALE_CODE_ENGLISH
}
self.assertDictEqual(
expected_dict, self.app_context.to_dict())
def test_validation_raises_exception(self) -> None:
with self.assertRaisesRegex(
NotImplementedError,
'Subclasses of AppContext should implement their own validation'):
self.app_context.validate()
class AndroidAppContextDomainTests(test_utils.GenericTestBase):
def setUp(self) -> None:
super().setUp()
entry_point = (
app_feedback_report_domain.NavigationDrawerEntryPoint())
self.app_context = (
app_feedback_report_domain.AndroidAppContext(
entry_point, LANGUAGE_LOCALE_CODE_ENGLISH,
LANGUAGE_LOCALE_CODE_ENGLISH, ANDROID_TEXT_SIZE, True, False,
False, EVENT_LOGS, LOGCAT_LOGS))
def test_to_dict(self) -> None:
expected_dict = {
'entry_point': {
'entry_point_name': (
app_feedback_report_constants.EntryPoint.NAVIGATION_DRAWER.value), # pylint: disable=line-too-long
},
'text_language_code': LANGUAGE_LOCALE_CODE_ENGLISH,
'audio_language_code': LANGUAGE_LOCALE_CODE_ENGLISH,
'text_size': ANDROID_TEXT_SIZE.value,
'only_allows_wifi_download_and_update': True,
'automatically_update_topics': False,
'account_is_profile_admin': False,
'event_logs': EVENT_LOGS,
'logcat_logs': LOGCAT_LOGS
}
self.assertDictEqual(
expected_dict, self.app_context.to_dict())
# Here we use MyPy ignore because we assign type None to
# type AndroidTextSize Enum. This is done to test that the
# validation fails.
def test_validation_text_size_is_none_fails(self) -> None:
self.app_context.text_size = None # type: ignore[assignment]
self._assert_validation_error(
self.app_context, 'No text size supplied.')
# Here we use MyPy ignore because we assign type string to
# type AndroidTextSize Enum. This is done to test that the
# validation fails.
def test_validation_text_size_is_invalid_fails(self) -> None:
self.app_context.text_size = 'invalid_text_size' # type: ignore[assignment]
self._assert_validation_error(
self.app_context, 'App text size should be one of')
# Here we use MyPy ignore because we assign type None to
# type str. This is done to test that the validation fails.
def test_validation_text_language_code_is_none_fails(self) -> None:
self.app_context.text_language_code = None # type: ignore[assignment]
self._assert_validation_error(
self.app_context, 'No app text language code supplied.')
# Here we use MyPy ignore because we assign type None to
# type str. This is done to test that the validation fails.
def test_validation_audio_language_code_is_none_fails(self) -> None:
self.app_context.audio_language_code = None # type: ignore[assignment]
self._assert_validation_error(
self.app_context, 'No app audio language code supplied.')
# TODO(#13059): Here we use MyPy ignore because after we fully type the
# codebase we plan to get rid of the tests that intentionally test wrong
# inputs that we can normally catch by typing.
def test_validation_text_language_code_is_not_a_string_fails(self) -> None:
self.app_context.text_language_code = 123 # type: ignore[assignment]
self._assert_validation_error(
self.app_context,
'Expected the app\'s text language code to be a string')
# TODO(#13059): Here we use MyPy ignore because after we fully type the
# codebase we plan to get rid of the tests that intentionally test wrong
# inputs that we can normally catch by typing.
def test_validation_audio_language_code_is_not_a_string_fails(self) -> None:
self.app_context.audio_language_code = 123 # type: ignore[assignment]
self._assert_validation_error(
self.app_context,
'Expected the app\'s audio language code to be a string')
def test_validation_text_language_code_does_not_match_fails(self) -> None:
self.app_context.text_language_code = 'invalid string regex'
self._assert_validation_error(
self.app_context,
'The app\'s text language code is not a valid string')
def test_validation_audio_language_code_does_not_match_fails(self) -> None:
self.app_context.audio_language_code = 'invalid string regex'
self._assert_validation_error(
self.app_context,
'The app\'s audio language code is not a valid string')
# TODO(#13059): Here we use MyPy ignore because after we fully type the
# codebase we plan to get rid of the tests that intentionally test wrong
# inputs that we can normally catch by typing.
def test_validation_only_allow_wifi_downloads_is_none_fails(self) -> None:
self.app_context.only_allows_wifi_download_and_update = None # type: ignore[assignment]
self._assert_validation_error(
self.app_context,
'only_allows_wifi_download_and_update field should be a boolean')
# TODO(#13059): Here we use MyPy ignore because after we fully type the
# codebase we plan to get rid of the tests that intentionally test wrong
# inputs that we can normally catch by typing.
def test_validation_automatically_update_topics_is_none_fails(self) -> None:
self.app_context.automatically_update_topics = None # type: ignore[assignment]
self._assert_validation_error(
self.app_context,
'automatically_update_topics field should be a boolean')
# TODO(#13059): Here we use MyPy ignore because after we fully type the
# codebase we plan to get rid of the tests that intentionally test wrong
# inputs that we can normally catch by typing.
def test_validation_account_is_profile_admin_is_none_fails(self) -> None:
self.app_context.account_is_profile_admin = None # type: ignore[assignment]
self._assert_validation_error(
self.app_context,
'account_is_profile_admin field should be a boolean')
# TODO(#13059): Here we use MyPy ignore because after we fully type the
# codebase we plan to get rid of the tests that intentionally test wrong
# inputs that we can normally catch by typing.
def test_validation_event_logs_is_none_fails(self) -> None:
self.app_context.event_logs = None # type: ignore[assignment]
self._assert_validation_error(
self.app_context, 'Should have an event log list')
# TODO(#13059): Here we use MyPy ignore because after we fully type the
# codebase we plan to get rid of the tests that intentionally test wrong
# inputs that we can normally catch by typing.
def test_validation_logcat_logs_is_none_fails(self) -> None:
self.app_context.logcat_logs = None # type: ignore[assignment]
self._assert_validation_error(
self.app_context, 'Should have a logcat log list')
def _assert_validation_error(
self,
app_context_obj: app_feedback_report_domain.AndroidAppContext,
expected_error_substring: str
) -> None:
"""Checks that the app context passes validation.
Args:
app_context_obj: AndroidAppContext. The domain object to
validate.
expected_error_substring: str. String that should be a substring
of the expected error message.
"""
with self.assertRaisesRegex(
utils.ValidationError, expected_error_substring):
app_context_obj.validate()
class AppFeedbackReportTicketDomainTests(test_utils.GenericTestBase):
def setUp(self) -> None:
super().setUp()
self.ticket_id = (
app_feedback_report_models.AppFeedbackReportTicketModel.generate_id(
TICKET_NAME))
self.android_report_id = (
app_feedback_report_models.AppFeedbackReportModel.generate_id(
PLATFORM_ANDROID, REPORT_SUBMITTED_TIMESTAMP))
android_user_supplied_feedback = (
app_feedback_report_domain.UserSuppliedFeedback(
REPORT_TYPE_SUGGESTION, CATEGORY_SUGGESTION_OTHER,
USER_SELECTED_ITEMS, USER_TEXT_INPUT))
android_device_system_context = (
app_feedback_report_domain.AndroidDeviceSystemContext(
ANDROID_PLATFORM_VERSION, ANDROID_PACKAGE_VERSION_CODE,
COUNTRY_LOCALE_CODE_INDIA, LANGUAGE_LOCALE_CODE_ENGLISH,
ANDROID_DEVICE_MODEL, ANDROID_SDK_VERSION,
ANDROID_BUILD_FINGERPRINT, NETWORK_WIFI))
navigation_drawer_entry_point = (
app_feedback_report_domain.NavigationDrawerEntryPoint())
android_app_context = (
app_feedback_report_domain.AndroidAppContext(
navigation_drawer_entry_point, LANGUAGE_LOCALE_CODE_ENGLISH,
LANGUAGE_LOCALE_CODE_ENGLISH, ANDROID_TEXT_SIZE, True,
False, False, EVENT_LOGS, LOGCAT_LOGS))
self.android_report_obj = app_feedback_report_domain.AppFeedbackReport(
self.android_report_id, ANDROID_REPORT_INFO_SCHEMA_VERSION,
PLATFORM_ANDROID, REPORT_SUBMITTED_TIMESTAMP, 0, self.ticket_id,
None, android_user_supplied_feedback, android_device_system_context,
android_app_context)
self.ticket_obj = app_feedback_report_domain.AppFeedbackReportTicket(
self.ticket_id, TICKET_NAME, PLATFORM_ANDROID, None, None, False,
REPORT_SUBMITTED_TIMESTAMP, [self.android_report_id])
def test_to_dict(self) -> None:
expected_dict = {
'ticket_id': self.ticket_id,
'ticket_name': TICKET_NAME,
'platform': PLATFORM_ANDROID,
'github_issue_repo_name': None,
'github_issue_number': None,
'archived': False,
'newest_report_creation_timestamp_isoformat': (
REPORT_SUBMITTED_TIMESTAMP.isoformat()),
'reports': [self.android_report_id]
}
self.assertDictEqual(
expected_dict, self.ticket_obj.to_dict())
# TODO(#13059): Here we use MyPy ignore because after we fully type the
# codebase we plan to get rid of the tests that intentionally test wrong
# inputs that we can normally catch by typing.
def test_validation_ticket_id_not_a_string_fails(self) -> None:
self.ticket_obj.ticket_id = 123 # type: ignore[assignment]
self._assert_validation_error(
self.ticket_obj,
'The ticket id should be a string')
def test_validation_invalid_ticket_id_fails(self) -> None:
self.ticket_obj.ticket_id = 'invalid_ticket_id'
self._assert_validation_error(
self.ticket_obj,
'The ticket id %s is invalid' % 'invalid_ticket_id')
# Here we use MyPy ignore because we assign type None to
# type str. This is done to test that the validation fails.
def test_validation_ticket_name_is_none_fails(self) -> None:
self.ticket_obj.ticket_name = None # type: ignore[assignment]
self._assert_validation_error(
self.ticket_obj,
'No ticket name supplied.')
# TODO(#13059): Here we use MyPy ignore because after we fully type the
# codebase we plan to get rid of the tests that intentionally test wrong
# inputs that we can normally catch by typing.
def test_validation_ticket_name_is_not_a_string_fails(self) -> None:
self.ticket_obj.ticket_name = 123 # type: ignore[assignment]
self._assert_validation_error(
self.ticket_obj,
'The ticket name should be a string')
def test_validation_ticket_name_too_long_fails(self) -> None:
long_name = 'too long' + 'x' * (
app_feedback_report_constants.MAXIMUM_TICKET_NAME_LENGTH)
self.ticket_obj.ticket_name = long_name
self._assert_validation_error(
self.ticket_obj,
'The ticket name is too long, has %d characters' % len(long_name))
# Here we use MyPy ignore because we assign type None to
# type List[str]. This is done to test that the validation fails.
def test_validation_report_ids_is_none_fails(self) -> None:
self.ticket_obj.reports = None # type: ignore[assignment]
self._assert_validation_error(
self.ticket_obj,
'No reports list supplied.')
# TODO(#13059): Here we use MyPy ignore because after we fully type the
# codebase we plan to get rid of the tests that intentionally test wrong
# inputs that we can normally catch by typing.
def test_validation_report_ids_not_a_list_fails(self) -> None:
self.ticket_obj.reports = 123 # type: ignore[assignment]
self._assert_validation_error(
self.ticket_obj,
'The reports list should be a list')
def test_validation_invalid_report_ids_fails(self) -> None:
self.ticket_obj.reports = ['invalid_report_id']
self._assert_validation_error(
self.ticket_obj,
'The report with id %s is invalid.' % 'invalid_report_id')
def test_validation_invalid_github_issue_number_fails(self) -> None:
self.ticket_obj.github_issue_number = -1
self._assert_validation_error(
self.ticket_obj,
'The Github issue number name must be a positive integer')
# TODO(#13059): Here we use MyPy ignore because after we fully type the
# codebase we plan to get rid of the tests that intentionally test wrong
# inputs that we can normally catch by typing.
def test_validation_github_repo_name_not_a_string_fails(self) -> None:
self.ticket_obj.github_issue_repo_name = 123 # type: ignore[assignment]
self._assert_validation_error(
self.ticket_obj,
'The Github repo name should be a string')
def test_validation_invalid_github_repo_name_fails(self) -> None:
self.ticket_obj.github_issue_repo_name = 'invalid_repo_name'
self._assert_validation_error(
self.ticket_obj,
'The Github repo %s is invalid' % 'invalid_repo_name')
# TODO(#13059): Here we use MyPy ignore because after we fully type the
# codebase we plan to get rid of the tests that intentionally test wrong
# inputs that we can normally catch by typing.
def test_validation_archived_is_not_boolean_fails(self) -> None:
self.ticket_obj.archived = 123 # type: ignore[assignment]
self._assert_validation_error(
self.ticket_obj,
'The ticket archived status must be a boolean')
def _assert_validation_error(
self,
ticket_obj: app_feedback_report_domain.AppFeedbackReportTicket,
expected_error_substring: str
) -> None:
"""Checks that the ticket passes validation.
Args:
ticket_obj: AppFeedbackReportTicket. The domain object to validate.
expected_error_substring: str. String that should be a substring
of the expected error message.
"""
with self.assertRaisesRegex(
utils.ValidationError, expected_error_substring):
ticket_obj.validate()
class AppFeedbackReportDailyStatsDomainTests(test_utils.GenericTestBase):
def setUp(self) -> None:
super().setUp()
self.ticket_id = (
app_feedback_report_models.AppFeedbackReportTicketModel.generate_id(
TICKET_NAME))
self.android_report_id = (
app_feedback_report_models.AppFeedbackReportModel.generate_id(
PLATFORM_ANDROID, REPORT_SUBMITTED_TIMESTAMP))
self.ticket_obj = app_feedback_report_domain.AppFeedbackReportTicket(
self.ticket_id, TICKET_NAME, PLATFORM_ANDROID, None, None, False,
REPORT_SUBMITTED_TIMESTAMP, [self.android_report_id])
app_feedback_report_models.AppFeedbackReportModel.create(
self.android_report_id, PLATFORM_ANDROID,
REPORT_SUBMITTED_TIMESTAMP, 0, REPORT_TYPE_SUGGESTION.value,
CATEGORY_SUGGESTION_OTHER.value, ANDROID_PLATFORM_VERSION,
COUNTRY_LOCALE_CODE_INDIA, ANDROID_SDK_VERSION,
ANDROID_DEVICE_MODEL, ENTRY_POINT_NAVIGATION_DRAWER, None, None,
None, None, LANGUAGE_LOCALE_CODE_ENGLISH,
LANGUAGE_LOCALE_CODE_ENGLISH, ANDROID_REPORT_INFO, None)
param_stats = {
'platform': (
app_feedback_report_domain.ReportStatsParameterValueCounts({
PLATFORM_ANDROID: 1})
),
'report_type': (
app_feedback_report_domain.ReportStatsParameterValueCounts({
REPORT_TYPE_SUGGESTION.value: 1})
),
'country_locale_code': (
app_feedback_report_domain.ReportStatsParameterValueCounts({
COUNTRY_LOCALE_CODE_INDIA: 1})
),
'entry_point_name': (
app_feedback_report_domain.ReportStatsParameterValueCounts({
ENTRY_POINT_NAVIGATION_DRAWER: 1})
),
'text_language_code': (
app_feedback_report_domain.ReportStatsParameterValueCounts({
LANGUAGE_LOCALE_CODE_ENGLISH: 1})
),
'audio_language_code': (
app_feedback_report_domain.ReportStatsParameterValueCounts({
LANGUAGE_LOCALE_CODE_ENGLISH: 1})
),
'android_sdk_version': (
app_feedback_report_domain.ReportStatsParameterValueCounts({
str(ANDROID_SDK_VERSION): 1})
),
'version_name': (
app_feedback_report_domain.ReportStatsParameterValueCounts({
ANDROID_PLATFORM_VERSION: 1})
)
}
self.stats_id = (
app_feedback_report_models.AppFeedbackReportStatsModel.calculate_id(
PLATFORM_ANDROID, self.ticket_id,
REPORT_SUBMITTED_TIMESTAMP.date()))
self.stats_obj = app_feedback_report_domain.AppFeedbackReportDailyStats(
self.stats_id, self.ticket_obj, PLATFORM_ANDROID,
REPORT_SUBMITTED_TIMESTAMP, 1, param_stats)
def test_to_dict(self) -> None:
expected_dict = {
'stats_id': self.stats_id,
'ticket': self.ticket_obj.to_dict(),
'platform': PLATFORM_ANDROID,
'stats_tracking_date': REPORT_SUBMITTED_TIMESTAMP.isoformat(),
'total_reports_submitted': 1,
'daily_param_stats': {
'platform': {PLATFORM_ANDROID: 1},
'report_type': {REPORT_TYPE_SUGGESTION.value: 1},
'country_locale_code': {COUNTRY_LOCALE_CODE_INDIA: 1},
'entry_point_name': {ENTRY_POINT_NAVIGATION_DRAWER: 1},
'text_language_code': {LANGUAGE_LOCALE_CODE_ENGLISH: 1},
'audio_language_code': {LANGUAGE_LOCALE_CODE_ENGLISH: 1},
'android_sdk_version': {str(ANDROID_SDK_VERSION): 1},
'version_name': {ANDROID_PLATFORM_VERSION: 1}
}
}
self.assertDictEqual(expected_dict, self.stats_obj.to_dict())
def test_validation_on_valid_stats_does_not_fail(self) -> None:
self.stats_obj.validate()
# TODO(#13059): Here we use MyPy ignore because after we fully type the
# codebase we plan to get rid of the tests that intentionally test wrong
# inputs that we can normally catch by typing.
def test_validation_stats_id_is_not_a_string_fails(self) -> None:
self.stats_obj.stats_id = 123 # type: ignore[assignment]
self._assert_validation_error(
self.stats_obj, 'The stats id should be a string')
def test_validation_invalid_id_fails(self) -> None:
self.stats_obj.stats_id = 'invalid_stats_id'
self._assert_validation_error(
self.stats_obj, 'The stats id %s is invalid' % 'invalid_stats_id')
# TODO(#13059): Here we use MyPy ignore because after we fully type the
# codebase we plan to get rid of the tests that intentionally test wrong
# inputs that we can normally catch by typing.
def test_validation_total_reports_submitted_is_not_an_int_fails(
self
) -> None:
self.stats_obj.total_reports_submitted = 'wrong type' # type: ignore[assignment]
self._assert_validation_error(
self.stats_obj,
'The total number of submitted reports should be an int')
def test_validation_total_reports_submitted_is_less_than_0_fails(
self) -> None:
self.stats_obj.total_reports_submitted = -1
self._assert_validation_error(
self.stats_obj,
'The total number of submitted reports should be a non-negative '
'int')
# TODO(#13059): Here we use MyPy ignore because after we fully type the
# codebase we plan to get rid of the tests that intentionally test wrong
# inputs that we can normally catch by typing.
def test_validation_daily_param_stats_is_not_a_dict_fails(self) -> None:
self.stats_obj.daily_param_stats = 123 # type: ignore[assignment]
self._assert_validation_error(
self.stats_obj, 'The parameter stats should be a dict')
def test_validation_invalid_daily_param_stats_fails(self) -> None:
self.stats_obj.daily_param_stats = {
'invalid_stat_name':
app_feedback_report_domain.ReportStatsParameterValueCounts(
{'invalid_stats': 0}),
}
self._assert_validation_error(
self.stats_obj,
'The parameter %s is not a valid parameter to aggregate stats '
'on' % 'invalid_stat_name')
# TODO(#13059): Here we use MyPy ignore because after we fully type the
# codebase we plan to get rid of the tests that intentionally test wrong
# inputs that we can normally catch by typing.
def test_validation_parameter_value_counts_objects_are_invalid_fails(
self) -> None:
self.stats_obj.daily_param_stats = {
'report_type': (
app_feedback_report_domain.ReportStatsParameterValueCounts(
{
123: 1 # type: ignore[dict-item]
}
)
)
}
self._assert_validation_error(
self.stats_obj, 'The parameter value should be a string')
def _assert_validation_error(
self,
stats_obj: app_feedback_report_domain.AppFeedbackReportDailyStats,
expected_error_substring: str
) -> None:
"""Checks that the stats object passes validation.
Args:
stats_obj: AppFeedbackReportStats. The domain object to validate.
expected_error_substring: str. String that should be a substring
of the expected error message.
"""
with self.assertRaisesRegex(
utils.ValidationError, expected_error_substring):
stats_obj.validate()
class ReportStatsParameterValueCountsDomainTests(test_utils.GenericTestBase):
def test_to_dict(self) -> None:
counts_obj = app_feedback_report_domain.ReportStatsParameterValueCounts(
{
PLATFORM_ANDROID: 1,
PLATFORM_WEB: 1
})
expected_dict = {
'android': 1,
'web': 1
}
self.assertDictEqual(
expected_dict, counts_obj.to_dict())
# TODO(#13059): Here we use MyPy ignore because after we fully type the
# codebase we plan to get rid of the tests that intentionally test wrong
# inputs that we can normally catch by typing.
def test_validation_with_invalid_parameter_value_fails(self) -> None:
counts_obj = app_feedback_report_domain.ReportStatsParameterValueCounts(
{
1: 1, # type: ignore[dict-item]
# Here we use MyPy ignore because this wrong type is provided
# only for type checking.
2: 1 # type: ignore[dict-item]
})
self._assert_validation_error(
counts_obj, 'The parameter value should be a string')
def test_validation_with_invalid_parameter_counts_fails(self) -> None:
counts_obj = app_feedback_report_domain.ReportStatsParameterValueCounts(
{
'value_1': -1,
})
self._assert_validation_error(
counts_obj, 'The parameter value count should be a non-negative '
'int')
def _assert_validation_error(
self,
counts_obj: (
app_feedback_report_domain.ReportStatsParameterValueCounts),
expected_error_substring: str
) -> None:
"""Checks that the parameter counts passes validation.
Args:
counts_obj: ReportStatsParameterValueCounts. The domain object to
validate.
expected_error_substring: str. String that should be a substring
of the expected error message.
"""
with self.assertRaisesRegex(
utils.ValidationError, expected_error_substring):
counts_obj.validate()
class AppFeedbackReportFilterDomainTests(test_utils.GenericTestBase):
def setUp(self) -> None:
super().setUp()
self.filter = app_feedback_report_domain.AppFeedbackReportFilter(
app_feedback_report_constants.FilterFieldNames.PLATFORM,
['web', 'android'])
def test_to_dict(self) -> None:
app_feedback_report_constants.PLATFORM_CHOICES.sort()
expected_dict = {
'filter_field': 'PLATFORM',
'filter_options': app_feedback_report_constants.PLATFORM_CHOICES
}
self.assertDictEqual(
expected_dict, self.filter.to_dict())
def test_validation_with_invalid_filter_field_fails(self) -> None:
class InvalidFieldName(enum.Enum):
"""Invalid field name."""
INVALID_FILTER_FIELD = 'invalid_filter_field'
# Here we use MyPy ignore because we assign type
# "InvalidFieldName" to the class filter field name.
# This is done to make sure that the type checker does
# not complain about the type of the filter field name.
self.filter.filter_field = InvalidFieldName.INVALID_FILTER_FIELD # type: ignore[assignment]
self._assert_validation_error(
self.filter,
'The filter field should be one of ')
# TODO(#13059): Here we use MyPy ignore because after we fully type the
# codebase we plan to get rid of the tests that intentionally test wrong
# inputs that we can normally catch by typing.
def test_validation_filter_values_list_is_none_fails(self) -> None:
self.filter.filter_options = None # type: ignore[assignment]
self._assert_validation_error(
self.filter,
'The filter options should be a list')
def _assert_validation_error(
self,
filter_obj: app_feedback_report_domain.AppFeedbackReportFilter,
expected_error_substring: str
) -> None:
"""Checks that the filter object passes validation.
Args:
filter_obj: AppFeedbackReportFilter. The domain object to
validate.
expected_error_substring: str. String that should be a substring
of the expected error message.
"""
with self.assertRaisesRegex(
utils.ValidationError, expected_error_substring):
filter_obj.validate()
|
91c2f7a2a4670d5b7456138472abe972ac481420
|
d110546d747d7e3865ce5742d5fca09f404623c0
|
/tests/pytests/unit/modules/test_win_path.py
|
7dad024a8783dda3f40683abbc15dfdf77cbdb34
|
[
"Apache-2.0",
"MIT",
"BSD-2-Clause"
] |
permissive
|
saltstack/salt
|
354fc86a7be1f69514b3dd3b2edb9e6f66844c1d
|
1ef90cbdc7203f97775edb7666db86a41eb9fc15
|
refs/heads/master
| 2023-07-19T20:56:20.210556
| 2023-06-29T23:12:28
| 2023-07-19T11:47:47
| 1,390,248
| 11,026
| 6,296
|
Apache-2.0
| 2023-09-14T20:45:37
| 2011-02-20T20:16:56
|
Python
|
UTF-8
|
Python
| false
| false
| 8,959
|
py
|
test_win_path.py
|
"""
:codeauthor: Rahul Handay <rahulha@saltstack.com>
"""
import os
import pytest
import salt.modules.win_path as win_path
import salt.utils.stringutils
import salt.utils.win_reg as reg_util
from tests.support.mock import MagicMock, patch
pytestmark = [pytest.mark.windows_whitelisted, pytest.mark.skip_unless_on_windows]
"""
Test cases for salt.modules.win_path.
"""
@pytest.fixture()
def pathsep():
return ";"
@pytest.fixture
def configure_loader_modules():
return {
win_path: {
"__opts__": {"test": False},
"__salt__": {},
"__utils__": {"reg.read_value": reg_util.read_value},
},
}
def test_get_path():
"""
Test to return the system path
"""
mock = MagicMock(return_value={"vdata": "C:\\Salt"})
with patch.dict(win_path.__utils__, {"reg.read_value": mock}):
assert win_path.get_path() == ["C:\\Salt"]
def test_exists():
"""
Test to check if the directory is configured
"""
mock = MagicMock(return_value=["C:\\Foo", "C:\\Bar"])
with patch.object(win_path, "get_path", mock):
# Ensure case insensitivity respected
assert (win_path.exists("C:\\FOO")) is True
assert (win_path.exists("c:\\foo")) is True
assert (win_path.exists("c:\\mystuff")) is False
def test_util_reg():
"""
Test to check if registry comes back clean when get_path is called
"""
mock = MagicMock(return_value={"vdata": ""})
with patch.dict(win_path.__utils__, {"reg.read_value": mock}):
assert win_path.get_path() == []
def test_add(pathsep):
"""
Test to add the directory to the SYSTEM path
"""
orig_path = ("C:\\Foo", "C:\\Bar")
# Helper function to make the env var easier to reuse
def _env(path):
return {"PATH": salt.utils.stringutils.to_str(pathsep.join(path))}
# Helper function to make the run call easier to reuse
def _run(name, index=None, retval=True, path=None):
if path is None:
path = orig_path
env = _env(path)
# Mock getters and setters
mock_get = MagicMock(return_value=list(path))
mock_set = MagicMock(return_value=retval)
# Mock individual calls that would occur during normal usage
patch_sep = patch.object(win_path, "PATHSEP", pathsep)
patch_path = patch.object(win_path, "get_path", mock_get)
patch_env = patch.object(os, "environ", env)
patch_dict = patch.dict(win_path.__utils__, {"reg.set_value": mock_set})
patch_rehash = patch.object(win_path, "rehash", MagicMock(return_value=True))
with patch_sep, patch_path, patch_env, patch_dict, patch_rehash:
return win_path.add(name, index), env, mock_set
def _path_matches(path):
return salt.utils.stringutils.to_str(pathsep.join(path))
# Test an empty reg update
ret, env, mock_set = _run("")
assert ret is False
# Test a successful reg update
ret, env, mock_set = _run("c:\\salt", retval=True)
new_path = ("C:\\Foo", "C:\\Bar", "c:\\salt")
assert ret is True
assert env["PATH"] == _path_matches(new_path)
# Test an unsuccessful reg update
ret, env, mock_set = _run("c:\\salt", retval=False)
new_path = ("C:\\Foo", "C:\\Bar", "c:\\salt")
assert ret is False
assert env["PATH"] == _path_matches(new_path)
# Test adding with a custom index
ret, env, mock_set = _run("c:\\salt", index=1, retval=True)
new_path = ("C:\\Foo", "c:\\salt", "C:\\Bar")
assert ret is True
assert env["PATH"] == _path_matches(new_path)
# Test adding with a custom index of 0
ret, env, mock_set = _run("c:\\salt", index=0, retval=True)
new_path = ("c:\\salt", "C:\\Foo", "C:\\Bar")
assert ret is True
assert env["PATH"] == _path_matches(new_path)
# Test adding path with a case-insensitive match already present, and
# no index provided. The path should remain unchanged and we should not
# update the registry.
ret, env, mock_set = _run("c:\\foo", retval=True)
assert ret is True
assert env["PATH"] == _path_matches(orig_path)
# Test adding path with a case-insensitive match already present, and a
# negative index provided which does not match the current index. The
# match should be removed, and the path should be added to the end of
# the list.
ret, env, mock_set = _run("c:\\foo", index=-1, retval=True)
new_path = ("C:\\Bar", "c:\\foo")
assert ret is True
assert env["PATH"] == _path_matches(new_path)
# Test adding path with a case-insensitive match already present, and a
# negative index provided which matches the current index. No changes
# should be made.
ret, env, mock_set = _run("c:\\foo", index=-2, retval=True)
assert ret is True
assert env["PATH"] == _path_matches(orig_path)
# Test adding path with a case-insensitive match already present, and a
# negative index provided which is larger than the size of the list. No
# changes should be made, since in these cases we assume an index of 0,
# and the case-insensitive match is also at index 0.
ret, env, mock_set = _run("c:\\foo", index=-5, retval=True)
assert ret is True
assert env["PATH"] == _path_matches(orig_path)
# Test adding path with a case-insensitive match already present, and a
# negative index provided which is larger than the size of the list.
# The match should be removed from its current location and inserted at
# the beginning, since when a negative index is larger than the list,
# we put it at the beginning of the list.
ret, env, mock_set = _run("c:\\bar", index=-5, retval=True)
new_path = ("c:\\bar", "C:\\Foo")
assert ret is True
assert env["PATH"] == _path_matches(new_path)
# Test adding path with a case-insensitive match already present, and a
# negative index provided which matches the current index. The path
# should remain unchanged and we should not update the registry.
ret, env, mock_set = _run("c:\\bar", index=-1, retval=True)
assert ret is True
assert env["PATH"] == _path_matches(orig_path)
# Test adding path with a case-insensitive match already present, and
# an index provided which does not match the current index, and is also
# larger than the size of the PATH list. The match should be removed,
# and the path should be added to the end of the list.
ret, env, mock_set = _run("c:\\foo", index=5, retval=True)
new_path = ("C:\\Bar", "c:\\foo")
assert ret is True
assert env["PATH"] == _path_matches(new_path)
def test_remove(pathsep):
"""
Test win_path.remove
"""
orig_path = ("C:\\Foo", "C:\\Bar", "C:\\Baz")
# Helper function to make the env var easier to reuse
def _env(path):
return {"PATH": salt.utils.stringutils.to_str(pathsep.join(path))}
def _run(name="c:\\salt", retval=True, path=None):
if path is None:
path = orig_path
env = _env(path)
# Mock getters and setters
mock_get = MagicMock(return_value=list(path))
mock_set = MagicMock(return_value=retval)
patch_path_sep = patch.object(win_path, "PATHSEP", pathsep)
patch_path = patch.object(win_path, "get_path", mock_get)
patch_env = patch.object(os, "environ", env)
patch_dict = patch.dict(win_path.__utils__, {"reg.set_value": mock_set})
patch_rehash = patch.object(win_path, "rehash", MagicMock(return_value=True))
with patch_path_sep, patch_path, patch_env, patch_dict, patch_rehash:
return win_path.remove(name), env, mock_set
def _path_matches(path):
return salt.utils.stringutils.to_str(pathsep.join(path))
# Test a successful reg update
ret, env, mock_set = _run("C:\\Bar", retval=True)
new_path = ("C:\\Foo", "C:\\Baz")
assert ret is True
assert env["PATH"] == _path_matches(new_path)
# Test a successful reg update with a case-insensitive match
ret, env, mock_set = _run("c:\\bar", retval=True)
new_path = ("C:\\Foo", "C:\\Baz")
assert ret is True
assert env["PATH"] == _path_matches(new_path)
# Test a successful reg update with multiple case-insensitive matches.
# All matches should be removed.
old_path = orig_path + ("C:\\BAR",)
ret, env, mock_set = _run("c:\\bar", retval=True)
new_path = ("C:\\Foo", "C:\\Baz")
assert ret is True
assert env["PATH"] == _path_matches(new_path)
# Test an unsuccessful reg update
ret, env, mock_set = _run("c:\\bar", retval=False)
new_path = ("C:\\Foo", "C:\\Baz")
assert ret is False
# The local path should still have been modified even
# though reg.set_value failed.
assert env["PATH"] == _path_matches(new_path)
# Test when no match found
ret, env, mock_set = _run("C:\\NotThere", retval=True)
assert ret is True
assert env["PATH"] == _path_matches(orig_path)
|
074c1e517ec371d54fcdc8c0a9a7af36313fde77
|
620323fc090cebaf7aca456ff3f7fbbe1e210394
|
/Decorators__examples/thread.py
|
bbb34bd2e36a02f4f9ce3799031dc7c1d4dd7cb9
|
[
"CC-BY-4.0"
] |
permissive
|
gil9red/SimplePyScripts
|
bd2733372728bf9b9f00570e90316fa12116516b
|
773c2c9724edd8827a1dbd91694d780e03fcb05a
|
refs/heads/master
| 2023-08-31T04:26:09.120173
| 2023-08-30T17:22:59
| 2023-08-30T17:22:59
| 22,650,442
| 157
| 46
| null | 2023-09-08T17:51:33
| 2014-08-05T16:19:52
|
Python
|
UTF-8
|
Python
| false
| false
| 858
|
py
|
thread.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "ipetrash"
import time
from threading import Thread
def thread(my_func):
def wrapper(*args, **kwargs):
my_thread = Thread(target=my_func, args=args, kwargs=kwargs)
my_thread.start()
return wrapper
if __name__ == "__main__":
@thread
def _print_and_sleep(timeout=2):
print("start. print_and_sleep")
time.sleep(timeout)
print("finish. print_and_sleep")
@thread
def _print_loop(name, max_num=10):
print("start. _print_loop")
i = 0
while True:
print(name, i)
time.sleep(1)
i += 1
if i == max_num:
break
print("finish. _print_loop")
_print_and_sleep()
_print_and_sleep(4)
_print_and_sleep()
_print_loop(" loop")
|
09576aa348035d8f1e17a44b150ffa3fe8b353f6
|
99dcb18a9e3ea367272f740b8cbf3c34285a0c08
|
/google/cloud/aiplatform/metadata/metadata.py
|
8947f3ea75e70c342bb9c31a67baa2baa5a4b9e6
|
[
"Apache-2.0"
] |
permissive
|
googleapis/python-aiplatform
|
926a4873f35dbea15b2fd86c0e16b5e6556d803e
|
76b95b92c1d3b87c72d754d8c02b1bca652b9a27
|
refs/heads/main
| 2023-08-19T23:49:02.180075
| 2023-08-19T13:25:59
| 2023-08-19T13:27:27
| 298,017,988
| 418
| 240
|
Apache-2.0
| 2023-09-14T21:08:33
| 2020-09-23T15:43:39
|
Python
|
UTF-8
|
Python
| false
| false
| 38,620
|
py
|
metadata.py
|
# -*- coding: utf-8 -*-
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import logging
import os
from typing import Dict, Union, Optional, Any, List
from google.api_core import exceptions
from google.auth import credentials as auth_credentials
from google.protobuf import timestamp_pb2
from google.cloud.aiplatform import base
from google.cloud.aiplatform import pipeline_jobs
from google.cloud.aiplatform.compat.types import execution as gca_execution
from google.cloud.aiplatform.metadata import constants
from google.cloud.aiplatform.metadata import context
from google.cloud.aiplatform.metadata import execution
from google.cloud.aiplatform.metadata import experiment_resources
from google.cloud.aiplatform.metadata import experiment_run_resource
from google.cloud.aiplatform.metadata.schema.google import (
artifact_schema as google_artifact_schema,
)
from google.cloud.aiplatform.tensorboard import tensorboard_resource
from google.cloud.aiplatform.utils import autologging_utils
from google.cloud.aiplatform_v1.types import execution as execution_v1
_LOGGER = base.Logger(__name__)
class _MLFlowLogFilter(logging.Filter):
"""Log filter to only show MLFlow logs for unsupported framework versions."""
def filter(self, record) -> bool:
if record.msg.startswith("You are using an unsupported version"):
return True
else:
return False
def _get_experiment_schema_version() -> str:
"""Helper method to get experiment schema version
Returns:
str: schema version of the currently set experiment tracking version
"""
return constants.SCHEMA_VERSIONS[constants.SYSTEM_EXPERIMENT]
def _get_or_create_default_tensorboard() -> tensorboard_resource.Tensorboard:
"""Helper method to get the default TensorBoard instance if already exists, or create a default TensorBoard instance.
Returns:
tensorboard_resource.Tensorboard: the default TensorBoard instance.
"""
tensorboards = tensorboard_resource.Tensorboard.list(filter="is_default=true")
if tensorboards:
return tensorboards[0]
else:
default_tensorboard = tensorboard_resource.Tensorboard.create(
display_name="Default Tensorboard "
+ datetime.datetime.now().isoformat(sep=" "),
is_default=True,
)
return default_tensorboard
# Legacy Experiment tracking
# Maintaining creation APIs for backwards compatibility testing
class _LegacyExperimentService:
"""Contains the exposed APIs to interact with the Managed Metadata Service."""
@staticmethod
def get_pipeline_df(pipeline: str) -> "pd.DataFrame": # noqa: F821
"""Returns a Pandas DataFrame of the parameters and metrics associated with one pipeline.
Args:
pipeline: Name of the Pipeline to filter results.
Returns:
Pandas Dataframe of Pipeline with metrics and parameters.
"""
source = "pipeline"
pipeline_resource_name = (
_LegacyExperimentService._get_experiment_or_pipeline_resource_name(
name=pipeline, source=source, expected_schema=constants.SYSTEM_PIPELINE
)
)
return _LegacyExperimentService._query_runs_to_data_frame(
context_id=pipeline,
context_resource_name=pipeline_resource_name,
source=source,
)
@staticmethod
def _get_experiment_or_pipeline_resource_name(
name: str, source: str, expected_schema: str
) -> str:
"""Get the full resource name of the Context representing an Experiment or Pipeline.
Args:
name (str):
Name of the Experiment or Pipeline.
source (str):
Identify whether the this is an Experiment or a Pipeline.
expected_schema (str):
expected_schema identifies the expected schema used for Experiment or Pipeline.
Returns:
The full resource name of the Experiment or Pipeline Context.
Raise:
NotFound exception if experiment or pipeline does not exist.
"""
this_context = context.Context(resource_name=name)
if this_context.schema_title != expected_schema:
raise ValueError(
f"Please provide a valid {source} name. {name} is not a {source}."
)
return this_context.resource_name
@staticmethod
def _query_runs_to_data_frame(
context_id: str, context_resource_name: str, source: str
) -> "pd.DataFrame": # noqa: F821
"""Get metrics and parameters associated with a given Context into a Dataframe.
Args:
context_id (str):
Name of the Experiment or Pipeline.
context_resource_name (str):
Full resource name of the Context associated with an Experiment or Pipeline.
source (str):
Identify whether the this is an Experiment or a Pipeline.
Returns:
The full resource name of the Experiment or Pipeline Context.
"""
try:
import pandas as pd
except ImportError:
raise ImportError(
"Pandas is not installed and is required to get dataframe as the return format. "
'Please install the SDK using "pip install google-cloud-aiplatform[metadata]"'
)
filter = f'schema_title="{constants.SYSTEM_RUN}" AND in_context("{context_resource_name}")'
run_executions = execution.Execution.list(filter=filter)
context_summary = []
for run_execution in run_executions:
run_dict = {
f"{source}_name": context_id,
"run_name": run_execution.display_name,
}
run_dict.update(
_LegacyExperimentService._execution_to_column_named_metadata(
"param", run_execution.metadata
)
)
for metric_artifact in run_execution.get_output_artifacts():
run_dict.update(
_LegacyExperimentService._execution_to_column_named_metadata(
"metric", metric_artifact.metadata
)
)
context_summary.append(run_dict)
return pd.DataFrame(context_summary)
@staticmethod
def _execution_to_column_named_metadata(
metadata_type: str, metadata: Dict, filter_prefix: Optional[str] = None
) -> Dict[str, Union[int, float, str]]:
"""Returns a dict of the Execution/Artifact metadata with column names.
Args:
metadata_type: The type of this execution properties (param, metric).
metadata: Either an Execution or Artifact metadata field.
filter_prefix:
Remove this prefix from the key of metadata field. Mainly used for removing
"input:" from PipelineJob parameter keys
Returns:
Dict of custom properties with keys mapped to column names
"""
column_key_to_value = {}
for key, value in metadata.items():
if filter_prefix and key.startswith(filter_prefix):
key = key[len(filter_prefix) :]
column_key_to_value[".".join([metadata_type, key])] = value
return column_key_to_value
class _ExperimentTracker:
"""Tracks Experiments and Experiment Runs wil high level APIs"""
def __init__(self):
self._experiment: Optional[experiment_resources.Experiment] = None
self._experiment_run: Optional[experiment_run_resource.ExperimentRun] = None
self._global_tensorboard: Optional[tensorboard_resource.Tensorboard] = None
self._existing_tracking_uri: Optional[str] = None
def reset(self):
"""Resets this experiment tracker, clearing the current experiment and run."""
self._experiment = None
self._experiment_run = None
@property
def experiment_name(self) -> Optional[str]:
"""Return the currently set experiment name, if experiment is not set, return None"""
if self.experiment:
return self.experiment.name
return None
@property
def experiment(self) -> Optional[experiment_resources.Experiment]:
"""Returns the currently set Experiment or Experiment set via env variable AIP_EXPERIMENT_NAME."""
if self._experiment:
return self._experiment
if os.getenv(constants.ENV_EXPERIMENT_KEY):
self._experiment = experiment_resources.Experiment.get(
os.getenv(constants.ENV_EXPERIMENT_KEY)
)
return self._experiment
return None
@property
def experiment_run(self) -> Optional[experiment_run_resource.ExperimentRun]:
"""Returns the currently set experiment run or experiment run set via env variable AIP_EXPERIMENT_RUN_NAME."""
if self._experiment_run:
return self._experiment_run
if os.getenv(constants.ENV_EXPERIMENT_RUN_KEY):
self._experiment_run = experiment_run_resource.ExperimentRun.get(
os.getenv(constants.ENV_EXPERIMENT_RUN_KEY),
experiment=self.experiment,
)
return self._experiment_run
return None
def set_experiment(
self,
experiment: str,
*,
description: Optional[str] = None,
backing_tensorboard: Optional[
Union[str, tensorboard_resource.Tensorboard]
] = None,
):
"""Set the experiment. Will retrieve the Experiment if it exists or create one with the provided name.
Args:
experiment (str):
Required. Name of the experiment to set.
description (str):
Optional. Description of an experiment.
backing_tensorboard Union[str, aiplatform.Tensorboard]:
Optional. If provided, assigns tensorboard as backing tensorboard to support time series metrics
logging.
"""
self.reset()
experiment = experiment_resources.Experiment.get_or_create(
experiment_name=experiment, description=description
)
backing_tb = (
backing_tensorboard
or self._global_tensorboard
or _get_or_create_default_tensorboard()
)
current_backing_tb = experiment.backing_tensorboard_resource_name
if not current_backing_tb and backing_tb:
experiment.assign_backing_tensorboard(tensorboard=backing_tb)
self._experiment = experiment
def set_tensorboard(
self,
tensorboard: Union[
tensorboard_resource.Tensorboard,
str,
],
project: Optional[str] = None,
location: Optional[str] = None,
credentials: Optional[auth_credentials.Credentials] = None,
):
"""Sets the global Tensorboard resource for this session.
Args:
tensorboard (Union[str, aiplatform.Tensorboard]):
Required. The Tensorboard resource to set as the global Tensorboard.
project (str):
Optional. Project associated with this Tensorboard resource.
location (str):
Optional. Location associated with this Tensorboard resource.
credentials (auth_credentials.Credentials):
Optional. Custom credentials used to set this Tensorboard resource.
"""
if isinstance(tensorboard, str):
tensorboard = tensorboard_resource.Tensorboard(
tensorboard,
project=project,
location=location,
credentials=credentials,
)
self._global_tensorboard = tensorboard
def _initialize_mlflow_plugin():
"""Invokes the Vertex MLFlow plugin.
Adding our log filter to MLFlow before calling mlflow.autolog() with
silent=False will only surface warning logs when the installed ML
framework version used for autologging is not supported by MLFlow.
"""
import mlflow
from mlflow.tracking._tracking_service import utils as mlflow_tracking_utils
from google.cloud.aiplatform._mlflow_plugin._vertex_mlflow_tracking import (
_VertexMlflowTracking,
)
# Only show MLFlow warning logs for ML framework version mismatches
logging.getLogger("mlflow").setLevel(logging.WARNING)
logging.getLogger("mlflow.tracking.fluent").disabled = True
logging.getLogger("mlflow.utils.autologging_utils").addFilter(
_MLFlowLogFilter()
)
mlflow_tracking_utils._tracking_store_registry.register(
"vertex-mlflow-plugin", _VertexMlflowTracking
)
mlflow.set_tracking_uri("vertex-mlflow-plugin://")
mlflow.autolog(
log_input_examples=False,
log_model_signatures=False,
log_models=False,
silent=False, # using False to show unsupported framework version warnings with _MLFlowLogFilter
)
def start_run(
self,
run: str,
*,
tensorboard: Union[tensorboard_resource.Tensorboard, str, None] = None,
resume=False,
) -> experiment_run_resource.ExperimentRun:
"""Start a run to current session.
```
aiplatform.init(experiment='my-experiment')
aiplatform.start_run('my-run')
aiplatform.log_params({'learning_rate':0.1})
```
Use as context manager. Run will be ended on context exit:
```
aiplatform.init(experiment='my-experiment')
with aiplatform.start_run('my-run') as my_run:
my_run.log_params({'learning_rate':0.1})
```
Resume a previously started run:
```
aiplatform.init(experiment='my-experiment')
with aiplatform.start_run('my-run', resume=True) as my_run:
my_run.log_params({'learning_rate':0.1})
```
Args:
run(str):
Required. Name of the run to assign current session with.
tensorboard Union[str, tensorboard_resource.Tensorboard]:
Optional. Backing Tensorboard Resource to enable and store time series metrics
logged to this Experiment Run using `log_time_series_metrics`.
If not provided will the the default backing tensorboard of the currently
set experiment.
resume (bool):
Whether to resume this run. If False a new run will be created.
Raises:
ValueError:
if experiment is not set. Or if run execution or metrics artifact is already created
but with a different schema.
"""
if not self.experiment:
raise ValueError(
"No experiment set for this run. Make sure to call aiplatform.init(experiment='my-experiment') "
"before invoking start_run. "
)
if self.experiment_run:
self.end_run()
if resume:
self._experiment_run = experiment_run_resource.ExperimentRun(
run_name=run, experiment=self.experiment
)
if tensorboard:
self._experiment_run.assign_backing_tensorboard(tensorboard=tensorboard)
self._experiment_run.update_state(
state=execution_v1.Execution.State.RUNNING
)
else:
self._experiment_run = experiment_run_resource.ExperimentRun.create(
run_name=run, experiment=self.experiment, tensorboard=tensorboard
)
return self._experiment_run
def end_run(
self,
state: execution_v1.Execution.State = execution_v1.Execution.State.COMPLETE,
):
"""Ends the the current experiment run.
```
aiplatform.start_run('my-run')
...
aiplatform.end_run()
```
"""
self._validate_experiment_and_run(method_name="end_run")
try:
self.experiment_run.end_run(state=state)
except exceptions.NotFound:
_LOGGER.warning(
f"Experiment run {self.experiment_run.name} was not found."
"It may have been deleted"
)
finally:
self._experiment_run = None
def autolog(self, disable=False):
"""Enables autologging of parameters and metrics to Vertex Experiments.
After calling `aiplatform.autolog()`, any metrics and parameters from
model training calls with supported ML frameworks will be automatically
logged to Vertex Experiments.
Using autologging requires setting an experiment and experiment_tensorboard.
Args:
disable (bool):
Optional. Whether to disable autologging. Defaults to False.
If set to True, this resets the MLFlow tracking URI to its
previous state before autologging was called and remove logging
filters.
Raises:
ImportError:
If MLFlow is not installed. MLFlow is required to use
autologging in Vertex.
ValueError:
If experiment or experiment_tensorboard is not set.
If `disable` is passed and autologging hasn't been enbaled.
"""
try:
import mlflow
except ImportError:
raise ImportError(
"MLFlow is not installed. Please install MLFlow using pip install google-cloud-aiplatform[autologging] to use autologging in the Vertex SDK."
)
if disable:
if not autologging_utils._is_autologging_enabled():
raise ValueError(
"Autologging is not enabled. Enable autologging by calling aiplatform.autolog()."
)
if self._existing_tracking_uri:
mlflow.set_tracking_uri(self._existing_tracking_uri)
mlflow.autolog(disable=True)
# Remove the log filters we applied in the plugin
logging.getLogger("mlflow").setLevel(logging.INFO)
logging.getLogger("mlflow.tracking.fluent").disabled = False
logging.getLogger("mlflow.utils.autologging_utils").removeFilter(
_MLFlowLogFilter()
)
elif not self.experiment:
raise ValueError(
"No experiment set. Make sure to call aiplatform.init(experiment='my-experiment') "
"before calling aiplatform.autolog()."
)
elif not self.experiment._metadata_context.metadata.get(
constants._BACKING_TENSORBOARD_RESOURCE_KEY
):
raise ValueError(
"Setting an experiment tensorboard is required to use autologging. "
"Please set a backing tensorboard resource by calling "
"aiplatform.init(experiment_tensorboard=aiplatform.Tensorboard(...))."
)
else:
self._existing_tracking_uri = mlflow.get_tracking_uri()
_ExperimentTracker._initialize_mlflow_plugin()
def log_params(self, params: Dict[str, Union[float, int, str]]):
"""Log single or multiple parameters with specified key and value pairs.
Parameters with the same key will be overwritten.
```
aiplatform.start_run('my-run')
aiplatform.log_params({'learning_rate': 0.1, 'dropout_rate': 0.2})
```
Args:
params (Dict[str, Union[float, int, str]]):
Required. Parameter key/value pairs.
"""
self._validate_experiment_and_run(method_name="log_params")
# query the latest run execution resource before logging.
self.experiment_run.log_params(params=params)
def log_metrics(self, metrics: Dict[str, Union[float, int, str]]):
"""Log single or multiple Metrics with specified key and value pairs.
Metrics with the same key will be overwritten.
```
aiplatform.start_run('my-run', experiment='my-experiment')
aiplatform.log_metrics({'accuracy': 0.9, 'recall': 0.8})
```
Args:
metrics (Dict[str, Union[float, int, str]]):
Required. Metrics key/value pairs.
"""
self._validate_experiment_and_run(method_name="log_metrics")
# query the latest metrics artifact resource before logging.
self.experiment_run.log_metrics(metrics=metrics)
def log_classification_metrics(
self,
*,
labels: Optional[List[str]] = None,
matrix: Optional[List[List[int]]] = None,
fpr: Optional[List[float]] = None,
tpr: Optional[List[float]] = None,
threshold: Optional[List[float]] = None,
display_name: Optional[str] = None,
) -> google_artifact_schema.ClassificationMetrics:
"""Create an artifact for classification metrics and log to ExperimentRun. Currently support confusion matrix and ROC curve.
```
my_run = aiplatform.ExperimentRun('my-run', experiment='my-experiment')
classification_metrics = my_run.log_classification_metrics(
display_name='my-classification-metrics',
labels=['cat', 'dog'],
matrix=[[9, 1], [1, 9]],
fpr=[0.1, 0.5, 0.9],
tpr=[0.1, 0.7, 0.9],
threshold=[0.9, 0.5, 0.1],
)
```
Args:
labels (List[str]):
Optional. List of label names for the confusion matrix. Must be set if 'matrix' is set.
matrix (List[List[int]):
Optional. Values for the confusion matrix. Must be set if 'labels' is set.
fpr (List[float]):
Optional. List of false positive rates for the ROC curve. Must be set if 'tpr' or 'thresholds' is set.
tpr (List[float]):
Optional. List of true positive rates for the ROC curve. Must be set if 'fpr' or 'thresholds' is set.
threshold (List[float]):
Optional. List of thresholds for the ROC curve. Must be set if 'fpr' or 'tpr' is set.
display_name (str):
Optional. The user-defined name for the classification metric artifact.
Raises:
ValueError: if 'labels' and 'matrix' are not set together
or if 'labels' and 'matrix' are not in the same length
or if 'fpr' and 'tpr' and 'threshold' are not set together
or if 'fpr' and 'tpr' and 'threshold' are not in the same length
"""
self._validate_experiment_and_run(method_name="log_classification_metrics")
# query the latest metrics artifact resource before logging.
return self.experiment_run.log_classification_metrics(
display_name=display_name,
labels=labels,
matrix=matrix,
fpr=fpr,
tpr=tpr,
threshold=threshold,
)
def log_model(
self,
model: Union[
"sklearn.base.BaseEstimator", "xgb.Booster", "tf.Module" # noqa: F821
],
artifact_id: Optional[str] = None,
*,
uri: Optional[str] = None,
input_example: Union[
list, dict, "pd.DataFrame", "np.ndarray" # noqa: F821
] = None,
display_name: Optional[str] = None,
metadata_store_id: Optional[str] = "default",
project: Optional[str] = None,
location: Optional[str] = None,
credentials: Optional[auth_credentials.Credentials] = None,
) -> google_artifact_schema.ExperimentModel:
"""Saves a ML model into a MLMD artifact and log it to this ExperimentRun.
Supported model frameworks: sklearn, xgboost, tensorflow.
Example usage:
model = LinearRegression()
model.fit(X, y)
aiplatform.init(
project="my-project",
location="my-location",
staging_bucket="gs://my-bucket",
experiment="my-exp"
)
with aiplatform.start_run("my-run"):
aiplatform.log_model(model, "my-sklearn-model")
Args:
model (Union["sklearn.base.BaseEstimator", "xgb.Booster", "tf.Module"]):
Required. A machine learning model.
artifact_id (str):
Optional. The resource id of the artifact. This id must be globally unique
in a metadataStore. It may be up to 63 characters, and valid characters
are `[a-z0-9_-]`. The first character cannot be a number or hyphen.
uri (str):
Optional. A gcs directory to save the model file. If not provided,
`gs://default-bucket/timestamp-uuid-frameworkName-model` will be used.
If default staging bucket is not set, a new bucket will be created.
input_example (Union[list, dict, pd.DataFrame, np.ndarray]):
Optional. An example of a valid model input. Will be stored as a yaml file
in the gcs uri. Accepts list, dict, pd.DataFrame, and np.ndarray
The value inside a list must be a scalar or list. The value inside
a dict must be a scalar, list, or np.ndarray.
display_name (str):
Optional. The display name of the artifact.
metadata_store_id (str):
Optional. The <metadata_store_id> portion of the resource name with
the format:
projects/123/locations/us-central1/metadataStores/<metadata_store_id>/artifacts/<resource_id>
If not provided, the MetadataStore's ID will be set to "default".
project (str):
Optional. Project used to create this Artifact. Overrides project set in
aiplatform.init.
location (str):
Optional. Location used to create this Artifact. Overrides location set in
aiplatform.init.
credentials (auth_credentials.Credentials):
Optional. Custom credentials used to create this Artifact. Overrides
credentials set in aiplatform.init.
Returns:
An ExperimentModel instance.
Raises:
ValueError: if model type is not supported.
"""
self._validate_experiment_and_run(method_name="log_model")
self.experiment_run.log_model(
model=model,
artifact_id=artifact_id,
uri=uri,
input_example=input_example,
display_name=display_name,
metadata_store_id=metadata_store_id,
project=project,
location=location,
credentials=credentials,
)
def _validate_experiment_and_run(self, method_name: str):
"""Validates Experiment and Run are set and raises informative error message.
Args:
method_name: The name of th method to raise from.
Raises:
ValueError: If Experiment or Run are not set.
"""
if not self.experiment:
raise ValueError(
f"No experiment set. Make sure to call aiplatform.init(experiment='my-experiment') "
f"before trying to {method_name}. "
)
if not self.experiment_run:
raise ValueError(
f"No run set. Make sure to call aiplatform.start_run('my-run') before trying to {method_name}. "
)
def get_experiment_df(
self, experiment: Optional[str] = None
) -> "pd.DataFrame": # noqa: F821
"""Returns a Pandas DataFrame of the parameters and metrics associated with one experiment.
Example:
aiplatform.init(experiment='exp-1')
aiplatform.start_run(run='run-1')
aiplatform.log_params({'learning_rate': 0.1})
aiplatform.log_metrics({'accuracy': 0.9})
aiplatform.start_run(run='run-2')
aiplatform.log_params({'learning_rate': 0.2})
aiplatform.log_metrics({'accuracy': 0.95})
aiplatform.get_experiments_df()
Will result in the following DataFrame
___________________________________________________________________________
| experiment_name | run_name | param.learning_rate | metric.accuracy |
---------------------------------------------------------------------------
| exp-1 | run-1 | 0.1 | 0.9 |
| exp-1 | run-2 | 0.2 | 0.95 |
---------------------------------------------------------------------------
Args:
experiment (str):
Name of the Experiment to filter results. If not set, return results of current active experiment.
Returns:
Pandas Dataframe of Experiment with metrics and parameters.
Raise:
NotFound exception if experiment does not exist.
ValueError if given experiment is not associated with a wrong schema.
"""
if not experiment:
experiment = self.experiment
else:
experiment = experiment_resources.Experiment(experiment)
return experiment.get_data_frame()
def log(
self,
*,
pipeline_job: Optional[pipeline_jobs.PipelineJob] = None,
):
"""Log Vertex AI Resources to the current experiment run.
```
aiplatform.start_run('my-run')
my_job = aiplatform.PipelineJob(...)
my_job.submit()
aiplatform.log(my_job)
```
Args:
pipeline_job (pipeline_jobs.PipelineJob):
Optional. Vertex PipelineJob to associate to this Experiment Run.
"""
self._validate_experiment_and_run(method_name="log")
self.experiment_run.log(pipeline_job=pipeline_job)
def log_time_series_metrics(
self,
metrics: Dict[str, Union[float]],
step: Optional[int] = None,
wall_time: Optional[timestamp_pb2.Timestamp] = None,
):
"""Logs time series metrics to to this Experiment Run.
Requires the experiment or experiment run has a backing Vertex Tensorboard resource.
```
my_tensorboard = aiplatform.Tensorboard(...)
aiplatform.init(experiment='my-experiment', experiment_tensorboard=my_tensorboard)
aiplatform.start_run('my-run')
# increments steps as logged
for i in range(10):
aiplatform.log_time_series_metrics({'loss': loss})
# explicitly log steps
for i in range(10):
aiplatform.log_time_series_metrics({'loss': loss}, step=i)
```
Args:
metrics (Dict[str, Union[str, float]]):
Required. Dictionary of where keys are metric names and values are metric values.
step (int):
Optional. Step index of this data point within the run.
If not provided, the latest
step amongst all time series metrics already logged will be used.
wall_time (timestamp_pb2.Timestamp):
Optional. Wall clock timestamp when this data point is
generated by the end user.
If not provided, this will be generated based on the value from time.time()
Raises:
RuntimeError: If current experiment run doesn't have a backing Tensorboard resource.
"""
self._validate_experiment_and_run(method_name="log_time_series_metrics")
self.experiment_run.log_time_series_metrics(
metrics=metrics, step=step, wall_time=wall_time
)
def start_execution(
self,
*,
schema_title: Optional[str] = None,
display_name: Optional[str] = None,
resource_id: Optional[str] = None,
metadata: Optional[Dict[str, Any]] = None,
schema_version: Optional[str] = None,
description: Optional[str] = None,
resume: bool = False,
project: Optional[str] = None,
location: Optional[str] = None,
credentials: Optional[auth_credentials.Credentials] = None,
) -> execution.Execution:
"""
Create and starts a new Metadata Execution or resumes a previously created Execution.
To start a new execution:
```
with aiplatform.start_execution(schema_title='system.ContainerExecution', display_name='trainer) as exc:
exc.assign_input_artifacts([my_artifact])
model = aiplatform.Artifact.create(uri='gs://my-uri', schema_title='system.Model')
exc.assign_output_artifacts([model])
```
To continue a previously created execution:
```
with aiplatform.start_execution(resource_id='my-exc', resume=True) as exc:
...
```
Args:
schema_title (str):
Optional. schema_title identifies the schema title used by the Execution. Required if starting
a new Execution.
resource_id (str):
Optional. The <resource_id> portion of the Execution name with
the format. This is globally unique in a metadataStore:
projects/123/locations/us-central1/metadataStores/<metadata_store_id>/executions/<resource_id>.
display_name (str):
Optional. The user-defined name of the Execution.
schema_version (str):
Optional. schema_version specifies the version used by the Execution.
If not set, defaults to use the latest version.
metadata (Dict):
Optional. Contains the metadata information that will be stored in the Execution.
description (str):
Optional. Describes the purpose of the Execution to be created.
metadata_store_id (str):
Optional. The <metadata_store_id> portion of the resource name with
the format:
projects/123/locations/us-central1/metadataStores/<metadata_store_id>/artifacts/<resource_id>
If not provided, the MetadataStore's ID will be set to "default".
project (str):
Optional. Project used to create this Execution. Overrides project set in
aiplatform.init.
location (str):
Optional. Location used to create this Execution. Overrides location set in
aiplatform.init.
credentials (auth_credentials.Credentials):
Optional. Custom credentials used to create this Execution. Overrides
credentials set in aiplatform.init.
Returns:
Execution: Instantiated representation of the managed Metadata Execution.
Raises:
ValueError: If experiment run is set and project or location do not match experiment run.
ValueError: If resume set to `True` and resource_id is not provided.
ValueError: If creating a new executin and schema_title is not provided.
"""
if self.experiment_run and not self.experiment_run._is_legacy_experiment_run():
if project and project != self.experiment_run.project:
raise ValueError(
f"Currently set Experiment run project {self.experiment_run.project} must"
f"match provided project {project}"
)
if location and location != self.experiment_run.location:
raise ValueError(
f"Currently set Experiment run location {self.experiment_run.location} must"
f"match provided location {project}"
)
if resume:
if not resource_id:
raise ValueError("resource_id is required when resume=True")
run_execution = execution.Execution(
execution_name=resource_id,
project=project,
location=location,
credentials=credentials,
)
# TODO(handle updates if resuming)
run_execution.update(state=gca_execution.Execution.State.RUNNING)
else:
if not schema_title:
raise ValueError(
"schema_title must be provided when starting a new Execution"
)
run_execution = execution.Execution.create(
display_name=display_name,
schema_title=schema_title,
schema_version=schema_version,
metadata=metadata,
description=description,
resource_id=resource_id,
project=project,
location=location,
credentials=credentials,
)
if self.experiment_run:
if self.experiment_run._is_legacy_experiment_run():
_LOGGER.warning(
f"{self.experiment_run._run_name} is an Experiment run created in Vertex Experiment Preview",
" and does not support tracking Executions."
" Please create a new Experiment run to track executions against an Experiment run.",
)
else:
self.experiment_run.associate_execution(run_execution)
run_execution.assign_input_artifacts = (
self.experiment_run._association_wrapper(
run_execution.assign_input_artifacts
)
)
run_execution.assign_output_artifacts = (
self.experiment_run._association_wrapper(
run_execution.assign_output_artifacts
)
)
return run_execution
_experiment_tracker = _ExperimentTracker()
|
d79d09be2ea87265c29478bab3ea715992098b2c
|
069c2295076c482afadfe6351da5ae02be8e18e6
|
/tests/test_client_regress/views.py
|
91b8bdfefc4a44d39ce19849e76a211744d36267
|
[
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"GPL-1.0-or-later",
"Python-2.0.1",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-other-permissive",
"Python-2.0"
] |
permissive
|
django/django
|
5eb557f57053631cd4f566f451e43197309dbeeb
|
c74a6fad5475495756a5bdb18b2cab2b68d429bc
|
refs/heads/main
| 2023-09-01T03:43:44.033530
| 2023-08-31T08:27:32
| 2023-08-31T08:27:32
| 4,164,482
| 73,530
| 38,187
|
BSD-3-Clause
| 2023-09-14T20:03:48
| 2012-04-28T02:47:18
|
Python
|
UTF-8
|
Python
| false
| false
| 5,211
|
py
|
views.py
|
from urllib.parse import urlencode
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse, HttpResponseRedirect, JsonResponse
from django.shortcuts import render
from django.template.loader import render_to_string
from django.test import Client
from django.test.client import CONTENT_TYPE_RE
class CustomTestException(Exception):
pass
def no_template_view(request):
"A simple view that expects a GET request, and returns a rendered template"
return HttpResponse(
"No template used. Sample content: twice once twice. Content ends."
)
def staff_only_view(request):
"A view that can only be visited by staff. Non staff members get an exception"
if request.user.is_staff:
return HttpResponse()
else:
raise CustomTestException()
@login_required
def get_view(request):
"A simple login protected view"
return HttpResponse("Hello world")
def request_data(request, template="base.html", data="sausage"):
"A simple view that returns the request data in the context"
return render(
request,
template,
{
"get-foo": request.GET.get("foo"),
"get-bar": request.GET.get("bar"),
"post-foo": request.POST.get("foo"),
"post-bar": request.POST.get("bar"),
"data": data,
},
)
def view_with_argument(request, name):
"""A view that takes a string argument
The purpose of this view is to check that if a space is provided in
the argument, the test framework unescapes the %20 before passing
the value to the view.
"""
if name == "Arthur Dent":
return HttpResponse("Hi, Arthur")
else:
return HttpResponse("Howdy, %s" % name)
def nested_view(request):
"""
A view that uses test client to call another view.
"""
c = Client()
c.get("/no_template_view/")
return render(request, "base.html", {"nested": "yes"})
@login_required
def login_protected_redirect_view(request):
"A view that redirects all requests to the GET view"
return HttpResponseRedirect("/get_view/")
def redirect_to_self_with_changing_query_view(request):
query = request.GET.copy()
query["counter"] += "0"
return HttpResponseRedirect(
"/redirect_to_self_with_changing_query_view/?%s" % urlencode(query)
)
def set_session_view(request):
"A view that sets a session variable"
request.session["session_var"] = "YES"
return HttpResponse("set_session")
def check_session_view(request):
"A view that reads a session variable"
return HttpResponse(request.session.get("session_var", "NO"))
def request_methods_view(request):
"A view that responds with the request method"
return HttpResponse("request method: %s" % request.method)
def return_unicode(request):
return render(request, "unicode.html")
def return_undecodable_binary(request):
return HttpResponse(
b"%PDF-1.4\r\n%\x93\x8c\x8b\x9e ReportLab Generated PDF document "
b"http://www.reportlab.com"
)
def return_json_response(request):
content_type = request.GET.get("content_type")
kwargs = {"content_type": content_type} if content_type else {}
return JsonResponse({"key": "value"}, **kwargs)
def return_json_response_latin1(request):
return HttpResponse(
b'{"a":"\xc5"}', content_type="application/json; charset=latin1"
)
def return_text_file(request):
"A view that parses and returns text as a file."
match = CONTENT_TYPE_RE.match(request.META["CONTENT_TYPE"])
if match:
charset = match[1]
else:
charset = settings.DEFAULT_CHARSET
return HttpResponse(
request.body, status=200, content_type="text/plain; charset=%s" % charset
)
def check_headers(request):
"A view that responds with value of the X-ARG-CHECK header"
return HttpResponse(
"HTTP_X_ARG_CHECK: %s" % request.META.get("HTTP_X_ARG_CHECK", "Undefined")
)
def body(request):
"A view that is requested with GET and accesses request.body. Refs #14753."
return HttpResponse(request.body)
def read_all(request):
"A view that is requested with accesses request.read()."
return HttpResponse(request.read())
def read_buffer(request):
"A view that is requested with accesses request.read(LARGE_BUFFER)."
return HttpResponse(request.read(99999))
def request_context_view(request):
# Special attribute that won't be present on a plain HttpRequest
request.special_path = request.path
return render(request, "request_context.html")
def render_template_multiple_times(request):
"""A view that renders a template multiple times."""
return HttpResponse(render_to_string("base.html") + render_to_string("base.html"))
def redirect_based_on_extra_headers_1_view(request):
if "HTTP_REDIRECT" in request.META:
return HttpResponseRedirect("/redirect_based_on_extra_headers_2/")
return HttpResponse()
def redirect_based_on_extra_headers_2_view(request):
if "HTTP_REDIRECT" in request.META:
return HttpResponseRedirect("/redirects/further/more/")
return HttpResponse()
|
b2d7c65032d5b70e975ab7c8b01fee073b56895b
|
7c91ff850f81bf8759b055971d592a71ef025732
|
/pyglet/media/drivers/silent/__init__.py
|
ebf66602590f5bffb4058c9e644d2de67d03db68
|
[
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
pyglet/pyglet
|
d9da2cccd52a6bc5c09548536876602f6e1412f0
|
094c638f0529fecab4e74556487b92453a78753c
|
refs/heads/master
| 2023-08-17T15:08:09.192350
| 2023-08-17T01:51:50
| 2023-08-17T01:51:50
| 191,043,601
| 1,687
| 427
|
BSD-3-Clause
| 2023-09-14T08:51:31
| 2019-06-09T18:55:00
|
Python
|
UTF-8
|
Python
| false
| false
| 127
|
py
|
__init__.py
|
from . import adaptation
def create_audio_driver():
return adaptation.SilentDriver()
__all__ = ["create_audio_driver"]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.