blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
90f1ae6deea54b3090a45b70722f36daafde01fd
|
96e98f67a75959fe3e899960f604f0d8de5cb13b
|
/vcap/vcap/loading/import_hacks.py
|
31ab131b766baf87e72ba2ae79d1abb00803ecd5
|
[
"BSD-3-Clause"
] |
permissive
|
opencv/open_vision_capsules
|
6c47aece3584021a805068c2acdd8e03bc0b99eb
|
7412902fed8f91c9c82bd42b0180e07673c38bf1
|
refs/heads/master
| 2023-06-10T13:00:49.350387
| 2023-04-12T05:01:05
| 2023-04-12T05:01:05
| 234,445,543
| 124
| 50
|
NOASSERTION
| 2022-08-26T17:40:09
| 2020-01-17T01:29:06
|
Python
|
UTF-8
|
Python
| false
| false
| 5,599
|
py
|
import_hacks.py
|
"""Code which changes the way Python processes imports. Allows for capsules to
import Python modules and packages within the capsule itself.
"""
from importlib.abc import Loader, MetaPathFinder
from importlib.machinery import ModuleSpec
import os
from pathlib import Path
from zipfile import ZipFile
class ZipFinder(MetaPathFinder):
"""An import finder that allows modules and packages inside a zip file to be
imported.
"""
def __init__(self, zip_file: ZipFile,
capsule_dir_path: Path,
root_package_name: str):
"""
:param zip_file: The ZipFile loaded in memory
:param capsule_dir_path:
The path to the directory where the development version of the
capsule is stored. For example, if the capsule being loaded is in
../capsules/my_capsule_name.py
Then capsule_dir_path would be
../capsules/my_capsule_name/
The reason for this is so that debugging can work within capsules.
When a capsule is executed, it is "compiled" to this path, so that
debug information still works (along with breakpoints!)
:param root_package_name: The name of the root package that this zip
file provides
"""
self._zip_file = zip_file
self._capsule_dir_path = capsule_dir_path
self._capsule_dir_name = self._capsule_dir_path.name
self._root_package_name = root_package_name
def find_spec(self, fullname, _path=None, _target=None):
if not self._in_capsule(fullname):
# If the capsule directory name is not in the fullname, it's
# not our job to import it
return None
if fullname == self._root_package_name:
# If the capsule root directory is being loaded, return a
# modulespec
return ModuleSpec(
name=fullname,
loader=None,
is_package=True)
# Get rid of the capsule name prefix
pruned_fullname = _remove_capsule_name(self._capsule_dir_name,
fullname)
package_path = _package_fullname_to_path(pruned_fullname)
module_path = _module_fullname_to_path(pruned_fullname)
if package_path in self._zip_file.namelist():
# If a directory exists with a name that matches the import, we
# assume it is a package import.
return ModuleSpec(name=fullname,
loader=None,
is_package=True)
elif module_path in self._zip_file.namelist():
# If a .py file exists with a name that matches the import, we
# assume it is a module import
module_file_path = self._capsule_dir_path / module_path
loader = ZipModuleLoader(zip_file=self._zip_file,
module_file_path=module_file_path,
capsule_dir_name=self._capsule_dir_name)
return ModuleSpec(name=fullname,
loader=loader)
raise ImportError(f"Problem while importing {fullname}")
def _in_capsule(self, fullname):
parts = fullname.split(".")
if parts[0] != self._root_package_name:
return False
return True
class ZipModuleLoader(Loader):
"""Loads modules from a zip file."""
def __init__(self, zip_file: ZipFile,
module_file_path: Path,
capsule_dir_name: str):
"""
:param zip_file: The ZipFile loaded in memory
:param module_file_path: The path to where the python file would be
in the filesystem if it was being run directly as opposed to in a
*.cap zip.
"""
self._zip_file = zip_file
self._module_file_path = module_file_path
self._capsule_dir_name = capsule_dir_name
def create_module(self, spec):
return None
def exec_module(self, module):
pruned_name = _remove_capsule_name(
capsule_name=self._capsule_dir_name,
fullname=module.__name__)
zip_path = _module_fullname_to_path(pruned_name)
# Extract the code from the zip
code = self._zip_file.read(zip_path)
# Compile code with the file path set first, so that debugging and
# tracebacks work in development (they reference a file)
# also breakpoints work in IDE's, which is quite helpful.
compiled = compile(code, self._module_file_path, "exec")
exec(compiled, module.__dict__)
return module
def _package_fullname_to_path(fullname):
"""Converts a package's fullname to a file path that should be the package's
directory.
:param fullname: The fullname of a package, like package_a.package_b
:return: A derived filepath, like package_a/package_b
"""
return fullname.replace(".", os.sep) + os.sep
def _module_fullname_to_path(fullname):
"""Converts a module's fullname to a file path that should be the module's
Python file.
:param fullname: The fullname of a module, like package_a.my_module
:return: A derived filepath, like package_a/my_module.py
"""
return fullname.replace(".", os.sep) + ".py"
def _remove_capsule_name(capsule_name, fullname):
"""Remove "capsule_name" from capsule_name.some_module.some_module2
Since the files in the zip file won't have "capsule_name" in the paths
"""
parts = fullname.split(".")
return ".".join(parts[1:])
|
e0ad9625f56d556fa0f5d4f0915c11d7ec36a398
|
b26c41926fa3a7c2c061132d80e91a2750f2f468
|
/tensorflow_probability/python/experimental/distribute/diagonal_mass_matrix_adaptation_test.py
|
a91e2f98b0e3968f463e80a001d1a2fc2313db3a
|
[
"Apache-2.0"
] |
permissive
|
tensorflow/probability
|
22e679a4a883e408f8ef237cda56e3e3dfa42b17
|
42a64ba0d9e0973b1707fcd9b8bd8d14b2d4e3e5
|
refs/heads/main
| 2023-09-04T02:06:08.174935
| 2023-08-31T20:30:00
| 2023-08-31T20:31:33
| 108,053,674
| 4,055
| 1,269
|
Apache-2.0
| 2023-09-13T21:49:49
| 2017-10-23T23:50:54
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 6,910
|
py
|
diagonal_mass_matrix_adaptation_test.py
|
# Copyright 2021 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for tensorflow_probability.python.experimental.distribute.joint_distribution."""
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.distributions import independent
from tensorflow_probability.python.distributions import normal
from tensorflow_probability.python.distributions import sample
from tensorflow_probability.python.experimental.distribute import sharded
from tensorflow_probability.python.experimental.mcmc import diagonal_mass_matrix_adaptation as dmma
from tensorflow_probability.python.experimental.mcmc import preconditioned_hmc as phmc
from tensorflow_probability.python.experimental.stats import sample_stats
from tensorflow_probability.python.internal import distribute_test_lib as test_lib
from tensorflow_probability.python.internal import loop_util
from tensorflow_probability.python.internal import samplers
from tensorflow_probability.python.internal import test_util
class EchoKernel(phmc.PreconditionedHamiltonianMonteCarlo):
def __init__(self, *args, **kwargs):
super().__init__(
target_log_prob_fn=lambda x: -x * x,
step_size=0.1,
num_leapfrog_steps=2,
)
def one_step(self, current_state, previous_kernel_results, seed=None):
_, nkr = super().one_step(current_state, previous_kernel_results, seed=seed)
return current_state, nkr
@test_util.test_all_tf_execution_regimes
class DiagonalAdaptationTest(test_lib.DistributedTest):
def test_diagonal_mass_matrix_no_distribute(self):
"""Nothing distributed. Make sure EchoKernel works."""
kernel = dmma.DiagonalMassMatrixAdaptation(
EchoKernel(),
sample_stats.RunningVariance.from_stats(
num_samples=10., mean=tf.zeros(3), variance=tf.ones(3)))
state = tf.zeros(3)
pkr = kernel.bootstrap_results(state)
draws = np.random.randn(10, 3).astype(np.float32)
def body(pkr_seed, draw):
pkr, seed = pkr_seed
seed, kernel_seed = samplers.split_seed(seed)
_, pkr = kernel.one_step(draw, pkr, seed=kernel_seed)
return (pkr, seed)
(pkr, _), _ = loop_util.trace_scan(body,
(pkr, samplers.sanitize_seed(self.key)),
draws, lambda _: ())
running_variance = pkr.running_variance[0]
emp_mean = draws.sum(axis=0) / 20.
emp_squared_residuals = (np.sum((draws - emp_mean) ** 2, axis=0) +
10 * emp_mean ** 2 +
10)
self.assertAllClose(emp_mean, running_variance.mean)
self.assertAllClose(emp_squared_residuals,
running_variance.sum_squared_residuals)
def test_diagonal_mass_matrix_independent(self):
@tf.function(autograph=False)
def run(seed):
dist_seed, *seeds = samplers.split_seed(seed, 11)
dist = sharded.Sharded(
independent.Independent(normal.Normal(tf.zeros(3), tf.ones(3)), 1),
shard_axis_name=self.axis_name)
state = dist.sample(seed=dist_seed)
kernel = dmma.DiagonalMassMatrixAdaptation(
EchoKernel(),
sample_stats.RunningVariance.from_stats(
num_samples=10., mean=tf.zeros(3), variance=tf.ones(3)))
pkr = kernel.bootstrap_results(state)
def body(draw_pkr, i):
seed = tf.gather(seeds, i)
_, pkr = draw_pkr
draw_seed, step_seed = samplers.split_seed(seed)
draw = dist.sample(seed=draw_seed)
_, pkr = kernel.one_step(draw, pkr, seed=step_seed)
return draw, pkr
(_, pkr), draws = loop_util.trace_scan(body,
(tf.zeros(dist.event_shape), pkr),
tf.range(len(seeds)),
lambda v: v[0])
return draws, pkr
draws, pkr = self.strategy_run(run, (self.key,), in_axes=None)
running_variance = self.per_replica_to_composite_tensor(
pkr.running_variance[0])
draws = self.per_replica_to_tensor(draws, axis=1)
mean, sum_squared_residuals, draws = self.evaluate(
(running_variance.mean, running_variance.sum_squared_residuals, draws))
emp_mean = tf.reduce_sum(draws, axis=0) / 20.
emp_squared_residuals = (
tf.reduce_sum((draws - emp_mean)**2, axis=0) + 10 * emp_mean**2 + 10)
self.assertAllClose(emp_mean, mean)
self.assertAllClose(emp_squared_residuals, sum_squared_residuals)
def test_diagonal_mass_matrix_sample(self):
@tf.function(autograph=False)
def run(seed):
dist_seed, *seeds = samplers.split_seed(seed, 11)
dist = sharded.Sharded(
sample.Sample(normal.Normal(0., 1.), 3),
shard_axis_name=self.axis_name)
state = dist.sample(seed=dist_seed)
kernel = dmma.DiagonalMassMatrixAdaptation(
EchoKernel(),
sample_stats.RunningVariance.from_stats(
num_samples=10., mean=tf.zeros(3), variance=tf.ones(3)))
pkr = kernel.bootstrap_results(state)
def body(draw_pkr, i):
seed = tf.gather(seeds, i)
_, pkr = draw_pkr
draw_seed, step_seed = samplers.split_seed(seed)
draw = dist.sample(seed=draw_seed)
_, pkr = kernel.one_step(draw, pkr, seed=step_seed)
return draw, pkr
(_, pkr), draws = loop_util.trace_scan(body,
(tf.zeros(dist.event_shape), pkr),
tf.range(len(seeds)),
lambda v: v[0])
return draws, pkr
draws, pkr = self.strategy_run(run, (self.key,), in_axes=None)
running_variance = self.per_replica_to_composite_tensor(
pkr.running_variance[0])
draws = self.per_replica_to_tensor(draws, axis=1)
mean, sum_squared_residuals, draws = self.evaluate(
(running_variance.mean, running_variance.sum_squared_residuals, draws))
emp_mean = tf.reduce_sum(draws, axis=0) / 20.
emp_squared_residuals = tf.reduce_sum(
(draws - emp_mean[None, ...])**2, axis=0) + 10 * emp_mean**2 + 10
self.assertAllClose(emp_mean, mean)
self.assertAllClose(emp_squared_residuals, sum_squared_residuals)
if __name__ == '__main__':
test_util.main()
|
d279d3799494c406876c5d1602d48d799c536dae
|
80a3d98eae1d755d6914b5cbde63fd10f5cc2046
|
/autox/autox_competition/feature_selection/grn_feature_selection.py
|
ba9d0eeeb5483653f51e843e6015f8e44d7511b8
|
[
"Apache-2.0"
] |
permissive
|
4paradigm/AutoX
|
efda57b51b586209e1d58e1dab7d0797083aadc5
|
7eab9f4744329a225ff01bb5ec360c4662e1e52e
|
refs/heads/master
| 2023-05-24T00:53:37.109036
| 2023-02-14T14:21:50
| 2023-02-14T14:21:50
| 388,068,949
| 752
| 162
|
Apache-2.0
| 2022-07-12T08:28:09
| 2021-07-21T09:45:41
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 19,378
|
py
|
grn_feature_selection.py
|
import warnings
warnings.simplefilter('default')
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import KFold
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
import sklearn.preprocessing
from tqdm import tqdm
class GatedLinearUnit(nn.Module):
"""**The unit of gating operation that maps the input to the range of 0-1 and multiple original input through the
sigmoid function.**
"""
def __init__(self, input_size,
hidden_layer_size,
dropout_rate,
activation=None):
"""
:param input_size: Number of features
:param hidden_layer_size: The size of nn.Linear layer, global default is 160
:param dropout_rate: The rate of linear layer parameters randomly discarded during training
:param activation: activation function used to activate raw input, default is None
"""
super(GatedLinearUnit, self).__init__()
self.input_size = input_size
self.hidden_layer_size = hidden_layer_size
self.dropout_rate = dropout_rate
self.activation_name = activation
if self.dropout_rate:
self.dropout = nn.Dropout(p=self.dropout_rate)
self.W4 = torch.nn.Linear(self.input_size, self.hidden_layer_size)
self.W5 = torch.nn.Linear(self.input_size, self.hidden_layer_size)
if self.activation_name:
self.activation = getattr(nn, self.activation_name)()
self.sigmoid = nn.Sigmoid()
self.init_weights()
def init_weights(self):
for n, p in self.named_parameters():
if 'bias' not in n:
torch.nn.init.xavier_uniform_(p)
elif 'bias' in n:
torch.nn.init.zeros_(p)
def forward(self, x):
if self.dropout_rate:
x = self.dropout(x)
if self.activation_name:
output = self.sigmoid(self.W4(x)) * self.activation(self.W5(x))
else:
output = self.sigmoid(self.W4(x)) * self.W5(x)
return output
class GateAddNormNetwork(nn.Module):
"""**Units that adding gating output to skip connection improves generalization.**"""
def __init__(self, input_size,
hidden_layer_size,
dropout_rate,
activation=None):
"""
:param input_size: Number of features
:param hidden_layer_size: The size of nn.Linear layer, global default is 160
:param dropout_rate: The rate of linear layer parameters randomly discarded during training
:param activation: activation function used to activate raw input, default is None
"""
super(GateAddNormNetwork, self).__init__()
self.input_size = input_size
self.hidden_layer_size = hidden_layer_size
self.dropout_rate = dropout_rate
self.activation_name = activation
self.GLU = GatedLinearUnit(self.input_size,
self.hidden_layer_size,
self.dropout_rate,
activation=self.activation_name)
self.LayerNorm = nn.LayerNorm(self.hidden_layer_size)
def forward(self, x, skip):
output = self.LayerNorm(self.GLU(x) + skip)
return output
class GatedResidualNetwork(nn.Module):
"""**GRN main module, which divides all inputs into two ways, calculates the gating one way for linear mapping twice and
passes the original input to GateAddNormNetwork together. ** """
def __init__(self,
hidden_layer_size,
input_size=None,
output_size=None,
dropout_rate=None):
"""
:param hidden_layer_size: The size of nn.Linear layer, global default is 160
:param input_size: Number of features
:param output_size: Number of features
:param dropout_rate: The rate of linear layer parameters randomly discarded during training
"""
super(GatedResidualNetwork, self).__init__()
self.hidden_layer_size = hidden_layer_size
self.input_size = input_size if input_size else self.hidden_layer_size
self.output_size = output_size
self.dropout_rate = dropout_rate
self.W1 = torch.nn.Linear(self.hidden_layer_size, self.hidden_layer_size)
self.W2 = torch.nn.Linear(self.input_size, self.hidden_layer_size)
if self.output_size:
self.skip_linear = torch.nn.Linear(self.input_size, self.output_size)
self.glu_add_norm = GateAddNormNetwork(self.hidden_layer_size,
self.output_size,
self.dropout_rate)
else:
self.glu_add_norm = GateAddNormNetwork(self.hidden_layer_size,
self.hidden_layer_size,
self.dropout_rate)
self.init_weights()
def init_weights(self):
for name, p in self.named_parameters():
if ('W2' in name or 'W3' in name) and 'bias' not in name:
torch.nn.init.kaiming_normal_(p, a=0, mode='fan_in', nonlinearity='leaky_relu')
elif ('skip_linear' in name or 'W1' in name) and 'bias' not in name:
torch.nn.init.xavier_uniform_(p)
elif 'bias' in name:
torch.nn.init.zeros_(p)
def forward(self, x):
n2 = F.elu(self.W2(x))
n1 = self.W1(n2)
if self.output_size:
output = self.glu_add_norm(n1, self.skip_linear(x))
else:
output = self.glu_add_norm(n1, x)
return output
class VariableSelectionNetwork(nn.Module):
"""**Feature selection module, which inputs a vector stitched into all features, takes the weights of each
feature and multiply with the original input as output. ** """
def __init__(self, hidden_layer_size,
dropout_rate,
output_size,
input_size):
"""
:param hidden_layer_size: The size of nn.Linear layer, global default is 160
:param dropout_rate: The rate of linear layer parameters randomly discarded during training
:param output_size: Number of features
:param input_size: Number of features
"""
super(VariableSelectionNetwork, self).__init__()
self.hidden_layer_size = hidden_layer_size
self.input_size = input_size
self.output_size = output_size
self.dropout_rate = dropout_rate
self.flattened_grn = GatedResidualNetwork(self.hidden_layer_size,
input_size=self.input_size,
output_size=self.output_size,
dropout_rate=self.dropout_rate, )
def forward(self, x):
embedding = x
flatten = torch.flatten(embedding, start_dim=1)
mlp_outputs = self.flattened_grn(flatten)
sparse_weights = F.softmax(mlp_outputs, dim=-1).mean(-2)
combined = sparse_weights * flatten
return combined, sparse_weights
def swish(x):
return x * torch.sigmoid(x)
class SimpleMLP(nn.Module):
"""**The module where the main model is defined. The model consists of GRN and a single layer neural network. The
input discrete features are embedding and real valued features to the GRN module, and then obtains the feature
weight and multiply the output to the single output through the single layer neural network, and then the loss is
calculated with target. ** """
def __init__(self, cat_num_classes, real_num):
"""
:param cat_num_classes: Number of category features
:param real_num: Number of real valued features
"""
super().__init__()
self.categorical_var_embeddings = None
self.cat_num_classes = cat_num_classes
self.cat_size = len(cat_num_classes)
self.input_size = self.cat_size + real_num
self.bn1 = nn.BatchNorm1d(real_num)
self.output = nn.Linear(self.input_size, 1)
self.lin_drop = nn.Dropout(0.25)
self.sparse_weight = None
self.temporal_vsn = VariableSelectionNetwork(hidden_layer_size=160,
input_size=self.input_size,
output_size=self.input_size,
dropout_rate=0.1)
if cat_num_classes:
self.build_cat_embeddings()
def build_cat_embeddings(self):
self.categorical_var_embeddings = nn.ModuleList(
[nn.Embedding(self.cat_num_classes[i], 1) for i in range(len(self.cat_num_classes))])
def forward(self, inputs):
cat_embeddings = []
for i in range(self.cat_size):
e = self.categorical_var_embeddings[i](inputs['cat'][:, i])
cat_embeddings.append(e)
cat_embeddings = torch.cat(cat_embeddings, 1)
real_embeddings = self.bn1(inputs['num'])
x = torch.cat([cat_embeddings, real_embeddings], 1)
x, self.sparse_weight = self.temporal_vsn(x)
x = self.output(swish(x))
return x
class GRN_DATASET(Dataset):
"""**According to the definition of the feature column data type, the corresponding columns are extracted from
the dataset and processed into the format of the model input. ** """
def __init__(self, df_data, _column_definition, mode='train'):
"""
:param df_data: Input data stored in a DataFrame
:param _column_definition: Data type definitions for different feature columns
:param mode: Mode of processing data, data processed by train mode was used for training, and data processed by test mode for testing
"""
self.mode = mode
self._column_definition = _column_definition
if _column_definition['cat']:
self.ids = np.array(df_data.loc[:, _column_definition['cat']].values.tolist(), dtype=np.int64)
if self._column_definition['num']:
self.vals = np.array(df_data.loc[:, _column_definition['num']].values.tolist(), dtype=np.float32)
if self.mode != 'test':
self.targets = np.array(df_data.loc[:, ['target']].values, dtype=np.float64)
self.len = df_data.shape[0]
def __len__(self):
return self.len
def __getitem__(self, index):
data_map = {}
# print(index)
if self._column_definition['cat']:
data_map['cat'] = self.ids[index]
if self._column_definition['num']:
# print(self.vals[index])
data_map['num'] = self.vals[index]
if self.mode != 'test':
targets_out = self.targets[index]
return data_map, targets_out
else:
return data_map
def train_fn(dataloaders, device, cat_num_classes, real_num):
"""**Training function**"""
model = SimpleMLP(cat_num_classes, real_num).to(device)
loss_fn = nn.MSELoss()
optimizer = optim.Adam(model.parameters(),
lr=1e-3)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer,
factor=0.5,
patience=0,
mode='min')
epochs = 10
num_train_examples = len(dataloaders['train'])
num_valid_examples = len(dataloaders['valid'])
losses = []
best_loss = np.inf
weights = None
for e in range(epochs):
# train
model.train()
train_loss = 0
num = 0
for i, (maps, targets) in enumerate(dataloaders['train']):
for k, v in maps.items():
maps[k] = v.to(device)
targets = targets.to(device, dtype=torch.float)
yhat = model(maps)
loss = loss_fn(yhat, targets)
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss += loss.item()
num += len(targets)
train_epoch_loss = train_loss / num_train_examples
# valid
model.eval()
valid_preds = list()
valid_loss = 0
with torch.no_grad():
for i, (maps, targets) in enumerate(dataloaders['valid']):
for k, v in maps.items():
maps[k] = v.to(device)
targets = targets.to(device, dtype=torch.float)
yhat = model(maps)
val_loss = loss_fn(yhat, targets)
valid_loss += val_loss.item()
valid_preds.extend(yhat.detach().cpu().numpy().flatten())
valid_epoch_loss = valid_loss / num_valid_examples
# change lr
scheduler.step(valid_epoch_loss)
losses.append((train_epoch_loss, valid_epoch_loss))
# save model
if best_loss > valid_epoch_loss:
weights = model.sparse_weight.detach().cpu().numpy()
best_loss = valid_epoch_loss
return weights
class GRN_feature_selection():
"""**Each feature weight is output according to the feature column definition.**
Example::
`GRN_FeatureSelection_AutoX <https://www.kaggle.com/code/hengwdai/grn-featureselection-autox>`_
"""
def __init__(self):
self.feature2weight = None
self.new_columns = []
self._real_scaler = None
self._cat_scalers = None
self.weights = None
self.selected_df = None
self._column_definition = None
self._num_classes_per_cat_input = None
def fit(self, df, y, column_definition):
"""
:param df: Input data stored in a DataFrame
:param y: Input single column target stored in a DataFrame
:param column_definition: Data type definitions for different feature columns
"""
self._column_definition = column_definition
# 检查特征列定义和dataframe是否对应,并且转换对应数据类型,将定义的特征列取出来作为新的df,之后所有的操作都是在新的df上进行
df = self.check_column_definition(df, y)
# Scaler 和 transforme input 必须组合使用,目前来看占用时间比较多,后期可增加开关选择性使用
self.set_scalers(df)
df = self.transform_inputs(df)
# 当前只在一个fold上跑,取验证得分最佳时的特征权重,后期可增加多个fold取得的权重平均
print('Training weights\n')
kf = KFold(n_splits=5)
for fold_id, (trn_idx, val_idx) in tqdm(enumerate(kf.split(df)), total=5):
df_train = df.iloc[trn_idx]
df_valid = df.iloc[val_idx]
train_set = GRN_DATASET(df_train, self._column_definition, mode='train')
valid_set = GRN_DATASET(df_valid, self._column_definition, mode='valid')
dataloaders = {
'train': DataLoader(train_set, batch_size=1024, num_workers=4, pin_memory=True, shuffle=True),
'valid': DataLoader(valid_set, batch_size=1024, num_workers=4, pin_memory=True, shuffle=False)
}
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if fold_id != 0:
self.weights += (np.array(
train_fn(dataloaders, device, self._num_classes_per_cat_input,
len(self._column_definition['num'])))) / 5
else:
self.weights = (np.array(
train_fn(dataloaders, device, self._num_classes_per_cat_input,
len(self._column_definition['num'])))) / 5
print('Training has ended and the weights for each feature are as follows, please use "transform(data,k=K)" to '
'get Top K Important Features\n')
self.feature2weight = pd.DataFrame(self.new_columns[:len(self.weights)], columns=['feature'])
self.feature2weight['weight'] = self.weights
print(self.feature2weight)
def transform(self, df, top_k=10):
"""
:param df: Input data stored in a DataFrame
:param top_k: Number of features used as output which have higher weights
:return: New dataframe contains top_k features which have higher weights
"""
ind = list(np.argpartition(self.weights, -top_k)[-top_k:])
names = list(np.array(self.new_columns)[ind])
self.selected_df = df.loc[:, names]
return self.selected_df
def check_column_definition(self, df, y):
print("""Checking columns' definition\n""")
# 检查列定义是否存在
assert ('cat' or 'num') in self._column_definition, \
'Lack of established columns of " num " or "cat" '
# 检查列名是否为空
assert (self._column_definition['cat'] or self._column_definition['num']), \
'A list with the column names cannot be empty'
# 检查对应列名是否存在,若存在则加入到子列名列表
for data_type in ['cat', 'num']:
if self._column_definition[data_type]:
for col in self._column_definition[data_type]:
assert col in df, f'The {data_type} column "{col}" not in dataframe'
self.new_columns.append(col)
# 尝试将每一列数据类型转为对应的类型,目前默认target是连续型,考虑加上try except?
y = y.astype(float)
if self._column_definition['num']:
df.loc[:, self._column_definition['num']] = df.loc[:, self._column_definition['num']].apply(
lambda row: row.astype(float))
df = df.loc[:, self.new_columns]
df['target'] = y
# #返回子数据集
return df
def set_scalers(self, new_df):
# 暂时使用MinMaxScaler,后期看需求增加其他scaler
print('Setting scalers\n')
self._real_scaler = MinMaxScaler(feature_range=(-1, 1))
if "cat" in self._column_definition:
categorical_scalers = {}
num_classes = []
for col in self._column_definition["cat"]:
# Set all to str so that we don't have mixed integer/string columns
srs = new_df[col].apply(str)
categorical_scalers[col] = sklearn.preprocessing.LabelEncoder().fit(
srs.values)
num_classes.append(srs.nunique())
self._cat_scalers = categorical_scalers
self._num_classes_per_cat_input = num_classes
def transform_inputs(self, new_df):
print('Scalering inputs\n')
if "num" in self._column_definition:
new_df[self._column_definition["num"]] = self._real_scaler.fit_transform(
new_df[self._column_definition["num"]])
# Format categorical inputs
if "cat" in self._column_definition:
for col in self._column_definition["cat"]:
string_df = new_df[col].apply(str)
new_df[col] = self._cat_scalers[col].transform(string_df)
return new_df
|
3387608f4849ff225141adeb801b9605a781f3be
|
bba02b96608e53bed25eae8fcc30334f238b6a6b
|
/gdown/exceptions.py
|
12104ee37b689123656239965cf38137bbb82ed5
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
wkentaro/gdown
|
de1a3ce1058b3457ed4a3087b70cf620d85e9c5a
|
5c7507f02718048899b85d4010a6ed93316cbf27
|
refs/heads/main
| 2023-07-25T11:25:03.088818
| 2023-04-19T19:39:39
| 2023-04-22T06:02:17
| 44,421,756
| 3,266
| 319
|
MIT
| 2023-09-12T20:53:04
| 2015-10-17T03:01:23
|
Python
|
UTF-8
|
Python
| false
| false
| 110
|
py
|
exceptions.py
|
class FileURLRetrievalError(Exception):
pass
class FolderContentsMaximumLimitError(Exception):
pass
|
cf2b247b5bf9eb63ec2f4d995bb18bcb9a6b37f8
|
b8441dc1987be9e64fa3081d456b2a3060ec44d1
|
/mars/tensor/statistics/quantile.py
|
6a1f0d6bcf4cd40944daf52c234879b85d7fa213
|
[
"BSD-3-Clause",
"MIT",
"ISC",
"Apache-2.0",
"CC0-1.0",
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
mars-project/mars
|
f99fefbce999d58a9249bc72046787a9731c9c73
|
c36c53fa22e10ef9477d9c454401a2f281375f31
|
refs/heads/master
| 2023-07-23T00:23:55.133015
| 2023-07-03T11:44:54
| 2023-07-03T11:44:54
| 160,543,708
| 2,704
| 362
|
Apache-2.0
| 2023-09-11T07:57:35
| 2018-12-05T16:04:03
|
Python
|
UTF-8
|
Python
| false
| false
| 17,579
|
py
|
quantile.py
|
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections.abc import Iterable
import numpy as np
from ... import opcodes as OperandDef
from ...core import ENTITY_TYPE, recursive_tile
from ...core.context import get_context
from ...serialization.serializables import KeyField, AnyField, StringField, BoolField
from ...utils import has_unknown_shape
from ..datasource import tensor as astensor
from ..base import moveaxis, where
from ..indexing import take
from ..arithmetic import isnan, add
from ..reduction import any as tensor_any
from ..operands import TensorOperand, TensorOperandMixin
from ..core import TENSOR_TYPE, TENSOR_CHUNK_TYPE, TensorOrder
from ..utils import check_out_param
from ..array_utils import as_same_device, device
from .core import _ureduce
def _quantile_is_valid(q):
# avoid expensive reductions, relevant for arrays with < O(1000) elements
if q.ndim == 1 and q.size < 10:
for i in range(q.size):
if q[i] < 0.0 or q[i] > 1.0:
return False
else:
# faster than any()
if np.count_nonzero(q < 0.0) or np.count_nonzero(q > 1.0):
return False
return True
def _quantile_ureduce_func(
a,
q,
axis=None,
out=None,
overwrite_input=False,
interpolation="linear",
keepdims=False,
):
a = astensor(a)
out = astensor(out) if out is not None else None
if q.ndim == 0:
# Do not allow 0-d arrays because following code fails for scalar
zerod = True
q = q[None]
else:
zerod = False
# prepare a for partitioning
if overwrite_input:
if axis is None:
ap = a.ravel()
else:
ap = a
else:
if axis is None:
ap = a.flatten()
else:
ap = a.copy()
if axis is None:
axis = 0
Nx = ap.shape[axis]
indices = q * (Nx - 1)
# round fractional indices according to interpolation method
if interpolation == "lower":
indices = np.floor(indices).astype(np.intp)
elif interpolation == "higher":
indices = np.ceil(indices).astype(np.intp)
elif interpolation == "midpoint":
indices = 0.5 * (np.floor(indices) + np.ceil(indices))
elif interpolation == "nearest":
indices = np.around(indices).astype(np.intp)
else:
assert interpolation == "linear"
# keep index as fraction and interpolate
n = np.array(False, dtype=bool) # check for nan's flag
if indices.dtype == np.intp: # take the points along axis
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
indices = np.concatenate((indices, [-1]))
ap.partition(indices, axis=axis, need_align=True)
# ensure axis with q-th is first
ap = moveaxis(ap, axis, 0)
axis = 0
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
indices = indices[:-1]
n = isnan(ap[-1:, ...])
if zerod:
indices = indices[0]
r = take(ap, indices, axis=axis, out=out)
else: # weight the points above and below the indices
indices_below = np.floor(indices).astype(np.intp)
indices_above = indices_below + 1
indices_above[indices_above > Nx - 1] = Nx - 1
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
indices_above = np.concatenate((indices_above, [-1]))
weights_above = indices - indices_below
weights_below = 1 - weights_above
weights_shape = [1] * ap.ndim
weights_shape[axis] = len(indices)
weights_below.shape = weights_shape
weights_above.shape = weights_shape
ap.partition(
np.concatenate((indices_below, indices_above)), axis=axis, need_align=True
)
# ensure axis with q-th is first
ap = moveaxis(ap, axis, 0)
weights_below = np.moveaxis(weights_below, axis, 0)
weights_above = np.moveaxis(weights_above, axis, 0)
axis = 0
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
indices_above = indices_above[:-1]
n = isnan(ap[-1:, ...])
x1 = take(ap, indices_below, axis=axis) * weights_below
x2 = take(ap, indices_above, axis=axis) * weights_above
# ensure axis with q-th is first
x1 = moveaxis(x1, axis, 0)
x2 = moveaxis(x2, axis, 0)
if zerod:
x1 = x1.squeeze(0)
x2 = x2.squeeze(0)
if out is not None:
r = add(x1, x2, out=out)
else:
r = add(x1, x2)
if isinstance(n, TENSOR_TYPE):
if zerod:
if ap.ndim == 1:
r.data = where(tensor_any(n), a.dtype.type(np.nan), r).data
if out is not None:
out.data = r.data
else:
r[:] = where(
tensor_any(n), where(n.squeeze(0), a.dtype.type(np.nan), r), r
)
else:
if r.ndim == 1:
r[:] = where(tensor_any(n), np.full(r.shape, a.dtype.type(np.nan)), r)
else:
r[:] = where(
tensor_any(n),
where(n.repeat(q.size, 0), a.dtype.type(np.nan), r),
r,
)
return r
q_error_msg = "Quantiles must be in the range [0, 1]"
class TensorQuantile(TensorOperand, TensorOperandMixin):
__slots__ = ("q_error_msg",)
_op_type_ = OperandDef.QUANTILE
_a = KeyField("a")
_q = AnyField("q")
_axis = AnyField("axis")
_out = KeyField("out")
_overwrite_input = BoolField("overwrite_input")
_interpolation = StringField("interpolation")
_keepdims = BoolField("keepdims")
def __init__(
self,
q=None,
axis=None,
out=None,
overwrite_input=None,
interpolation=None,
keepdims=None,
**kw,
):
self.q_error_msg = kw.pop("q_error_msg", q_error_msg)
super().__init__(
_q=q,
_axis=axis,
_interpolation=interpolation,
_out=out,
_overwrite_input=overwrite_input,
_keepdims=keepdims,
**kw,
)
def _set_inputs(self, inputs):
super()._set_inputs(inputs)
self._a = self._inputs[0]
if isinstance(self._q, (TENSOR_TYPE, TENSOR_CHUNK_TYPE)):
self._q = self._inputs[1]
if isinstance(self._out, (TENSOR_TYPE, TENSOR_CHUNK_TYPE)):
self._out = self._inputs[-1]
@property
def a(self):
return self._a
@property
def q(self):
return self._q
@property
def axis(self):
return self._axis
@property
def out(self):
return self._out
@property
def overwrite_input(self):
return self._overwrite_input
@property
def interpolation(self):
return self._interpolation
@property
def keepdims(self):
return self._keepdims
def __call__(self, a, q=None, out=None):
shape = [self._q.size] if self._q.ndim > 0 else []
if self._axis is None:
exclude_axes = set(range(a.ndim))
elif isinstance(self._axis, tuple):
exclude_axes = set(self._axis)
else:
exclude_axes = {self._axis}
for ax, s in enumerate(a.shape):
if ax not in exclude_axes:
shape.append(s)
elif self._keepdims:
shape.append(1)
inputs = [a] if q is None else [a, q]
order = TensorOrder.C_ORDER
if out is not None:
inputs.append(out)
order = out.order
shape = out.shape
t = self.new_tensor(inputs, shape=tuple(shape), order=order)
if out is not None:
check_out_param(out, t, "same_kind")
out.data = t.data
return out
else:
return t
@classmethod
def _tile(cls, op, q):
r, k = _ureduce(
op.a,
func=_quantile_ureduce_func,
q=q,
axis=op.axis,
out=op.out,
overwrite_input=op.overwrite_input,
interpolation=op.interpolation,
)
if op.keepdims:
return r.reshape(q.shape + k)
else:
return r
@classmethod
def _tile_one_chunk(cls, op, q):
in_tensor = op.inputs[0]
out_tensor = op.outputs[0]
chunk_op = op.copy().reset_key()
chunk_op._q = q
chunk_inputs = [in_tensor.chunks[0]]
if op.out is not None:
chunk_inputs.append(op.out.chunks[0])
chunk = chunk_op.new_chunk(
chunk_inputs,
shape=out_tensor.shape,
index=(0,) * out_tensor.ndim,
order=out_tensor.order,
)
op = op.copy()
return op.new_tensors(
op.inputs,
shape=out_tensor.shape,
order=out_tensor.order,
nsplits=tuple((s,) for s in out_tensor.shape),
chunks=[chunk],
)
@classmethod
def tile(cls, op):
if isinstance(op.q, TENSOR_TYPE):
ctx = get_context()
# get q's data
q_chunk_keys = [c.key for c in op.q.chunks]
try:
q_data = ctx.get_chunks_result(q_chunk_keys)
except KeyError:
# trigger execution of `q`
yield op.q.chunks
q_data = ctx.get_chunks_result(q_chunk_keys)
op._q = q = np.concatenate(q_data)
if not _quantile_is_valid(q):
raise ValueError(op.q_error_msg)
else:
if has_unknown_shape(*op.inputs):
yield
q = np.asarray(op.q)
if len(op.a.chunks) == 1 and (op.out is None or len(op.out.chunks) == 1):
return cls._tile_one_chunk(op, q)
else:
tiled = yield from recursive_tile(cls._tile(op, q))
return [tiled]
@classmethod
def execute(cls, ctx, op):
inputs, device_id, xp = as_same_device(
[ctx[inp.key] for inp in op.inputs], device=op.device, ret_extra=True
)
a = inputs[0]
out = inputs[-1].copy() if op.out is not None else None
with device(device_id):
ctx[op.outputs[0].key] = xp.quantile(
a,
q=op.q,
axis=op.axis,
out=out,
interpolation=op.interpolation,
keepdims=op.keepdims,
)
INTERPOLATION_TYPES = {"linear", "lower", "higher", "midpoint", "nearest"}
def _quantile_unchecked(
a,
q,
axis=None,
out=None,
overwrite_input=False,
interpolation="linear",
keepdims=False,
q_error_msg=None,
handle_non_numeric=None,
):
a = astensor(a)
raw_dtype = a.dtype
need_view_back = False
if handle_non_numeric and not np.issubdtype(a.dtype, np.number):
# enable handle_non_numeric is often used
# to handle the datetime-like dtype
a = a.astype("i8")
need_view_back = True
if isinstance(q, ENTITY_TYPE):
q = astensor(q)
# do check in tile
q_input = q
else:
q_input = None
if isinstance(axis, Iterable):
axis = tuple(axis)
if q.ndim > 1:
raise ValueError("`q` should be a scalar or array of float")
if out is not None and not isinstance(out, TENSOR_TYPE):
raise TypeError(f"`out` should be a tensor, got {type(out)}")
if interpolation not in INTERPOLATION_TYPES:
raise ValueError(
"interpolation can only be 'linear', 'lower' "
"'higher', 'midpoint', or 'nearest'"
)
# infer dtype
q_tiny = np.random.rand(2 if q.size % 2 == 0 else 1).astype(q.dtype)
if handle_non_numeric and not np.issubdtype(a.dtype, np.number):
dtype = a.dtype
else:
dtype = np.quantile(
np.empty(1, dtype=a.dtype), q_tiny, interpolation=interpolation
).dtype
op = TensorQuantile(
q=q,
axis=axis,
out=out,
overwrite_input=overwrite_input,
interpolation=interpolation,
keepdims=keepdims,
handle_non_numeric=handle_non_numeric,
q_error_msg=q_error_msg,
dtype=dtype,
gpu=a.op.gpu,
)
ret = op(a, q=q_input, out=out)
if need_view_back:
ret = ret.astype(raw_dtype)
return ret
def quantile(
a,
q,
axis=None,
out=None,
overwrite_input=False,
interpolation="linear",
keepdims=False,
**kw,
):
"""
Compute the q-th quantile of the data along the specified axis.
Parameters
----------
a : array_like
Input tensor or object that can be converted to a tensor.
q : array_like of float
Quantile or sequence of quantiles to compute, which must be between
0 and 1 inclusive.
axis : {int, tuple of int, None}, optional
Axis or axes along which the quantiles are computed. The
default is to compute the quantile(s) along a flattened
version of the tensor.
out : Tensor, optional
Alternative output tensor in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
overwrite_input : bool, optional
Just for compatibility with Numpy, would not take effect.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to
use when the desired quantile lies between two data points
``i < j``:
* linear: ``i + (j - i) * fraction``, where ``fraction``
is the fractional part of the index surrounded by ``i``
and ``j``.
* lower: ``i``.
* higher: ``j``.
* nearest: ``i`` or ``j``, whichever is nearest.
* midpoint: ``(i + j) / 2``.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option, the
result will broadcast correctly against the original tensor `a`.
Returns
-------
quantile : scalar or Tensor
If `q` is a single quantile and `axis=None`, then the result
is a scalar. If multiple quantiles are given, first axis of
the result corresponds to the quantiles. The other axes are
the axes that remain after the reduction of `a`. If the input
contains integers or floats smaller than ``float64``, the output
data-type is ``float64``. Otherwise, the output data-type is the
same as that of the input. If `out` is specified, that tensor is
returned instead.
See Also
--------
mean
percentile : equivalent to quantile, but with q in the range [0, 100].
median : equivalent to ``quantile(..., 0.5)``
nanquantile
Notes
-----
Given a vector ``V`` of length ``N``, the q-th quantile of
``V`` is the value ``q`` of the way from the minimum to the
maximum in a sorted copy of ``V``. The values and distances of
the two nearest neighbors as well as the `interpolation` parameter
will determine the quantile if the normalized ranking does not
match the location of ``q`` exactly. This function is the same as
the median if ``q=0.5``, the same as the minimum if ``q=0.0`` and the
same as the maximum if ``q=1.0``.
Examples
--------
>>> import mars.tensor as mt
>>> a = mt.array([[10, 7, 4], [3, 2, 1]])
>>> a.execute()
array([[10, 7, 4],
[ 3, 2, 1]])
>>> mt.quantile(a, 0.5).execute()
3.5
>>> mt.quantile(a, 0.5, axis=0).execute()
array([6.5, 4.5, 2.5])
>>> mt.quantile(a, 0.5, axis=1).execute()
array([7., 2.])
>>> mt.quantile(a, 0.5, axis=1, keepdims=True).execute()
array([[7.],
[2.]])
>>> m = mt.quantile(a, 0.5, axis=0)
>>> out = mt.zeros_like(m)
>>> mt.quantile(a, 0.5, axis=0, out=out).execute()
array([6.5, 4.5, 2.5])
>>> m.execute()
array([6.5, 4.5, 2.5])
"""
handle_non_numeric = kw.pop("handle_non_numeric", None)
if len(kw) > 0: # pragma: no cover
raise TypeError(
f"quantile() got an unexpected keyword argument '{next(iter(kw))}'"
)
if not isinstance(q, ENTITY_TYPE):
q = np.asanyarray(q)
# do check instantly if q is not a tensor
if not _quantile_is_valid(q):
raise ValueError(q_error_msg)
return _quantile_unchecked(
a,
q,
axis=axis,
out=out,
overwrite_input=overwrite_input,
interpolation=interpolation,
keepdims=keepdims,
handle_non_numeric=handle_non_numeric,
)
|
a7780c1a3d58814f6ae10744c67788f9065fea77
|
40195e6f86bf8620850f0c56e98eae5693e88277
|
/coremltools/test/blob/test_weights.py
|
72bb061d1e2c460f259746dc16273bf6ca52cbc0
|
[
"MIT",
"BSD-3-Clause"
] |
permissive
|
apple/coremltools
|
009dfa7154d34cab8edcafa618e689e407521f50
|
feed174188f7773631a3d574e1ff9889a135c986
|
refs/heads/main
| 2023-09-01T23:26:13.491955
| 2023-08-31T18:44:31
| 2023-08-31T18:44:31
| 95,862,535
| 3,742
| 705
|
BSD-3-Clause
| 2023-09-14T17:33:58
| 2017-06-30T07:39:02
|
Python
|
UTF-8
|
Python
| false
| false
| 3,333
|
py
|
test_weights.py
|
# Copyright (c) 2021, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
import os
import shutil
import tempfile
import unittest
import numpy as np
from coremltools.libmilstoragepython import _BlobStorageReader as BlobReader
from coremltools.libmilstoragepython import _BlobStorageWriter as BlobWriter
class WeightTest(unittest.TestCase):
def setUp(self):
self.working_dir = tempfile.mkdtemp()
def tearDown(self):
if os.path.exists(self.working_dir):
shutil.rmtree(self.working_dir)
def test_weight_blob_int8(self):
writer = BlobWriter(self.working_dir + "/net.wt")
input_arr = np.array([-5, -2, 0, 2, 5], dtype=np.int8)
offset = writer.write_int8_data(input_arr)
writer = None
reader = BlobReader(self.working_dir + "/net.wt")
output_arr = reader.read_int8_data(offset)
np.testing.assert_equal(input_arr, output_arr)
def test_weight_blob_uint8(self):
writer = BlobWriter(self.working_dir + "/net.wt")
input_arr = np.array([1, 2, 3, 4, 5], dtype=np.uint8)
offset = writer.write_uint8_data(input_arr)
writer = None
reader = BlobReader(self.working_dir + "/net.wt")
output_arr = reader.read_uint8_data(offset)
np.testing.assert_almost_equal(input_arr, output_arr)
def test_weight_blob_int16(self):
writer = BlobWriter(self.working_dir + "/net.wt")
input_arr = np.array([-5, -2, 0, 2, 5], dtype=np.int16)
offset = writer.write_int16_data(input_arr)
writer = None
reader = BlobReader(self.working_dir + "/net.wt")
output_arr = reader.read_int16_data(offset)
np.testing.assert_equal(input_arr, output_arr)
def test_weight_blob_uint16(self):
writer = BlobWriter(self.working_dir + "/net.wt")
input_arr = np.array([1, 2, 3, 4, 5], dtype=np.uint16)
offset = writer.write_uint16_data(input_arr)
writer = None
reader = BlobReader(self.working_dir + "/net.wt")
output_arr = reader.read_uint16_data(offset)
np.testing.assert_almost_equal(input_arr, output_arr)
def test_weight_blob_fp16(self):
writer = BlobWriter(self.working_dir + "/net.wt")
input_arr = np.array([2.3, 4.6, 7.9], dtype=np.float16)
input_arr_to_bytes_uint16 = np.frombuffer(input_arr.tobytes(), np.uint16)
offset = writer.write_fp16_data(input_arr_to_bytes_uint16)
writer = None
reader = BlobReader(self.working_dir + "/net.wt")
output_arr_uint16 = reader.read_fp16_data(offset)
output_arr = np.frombuffer(output_arr_uint16.tobytes(), np.float16)
np.testing.assert_almost_equal(input_arr, output_arr)
def test_weight_blob_fp32(self):
writer = BlobWriter(self.working_dir + "/net.wt")
input_arr = np.array([1.0, 2.4, 3.9, -4.8, 5.2], dtype=np.float32)
offset = writer.write_float_data(input_arr)
writer = None
reader = BlobReader(self.working_dir + "/net.wt")
output_arr = reader.read_float_data(offset)
np.testing.assert_almost_equal(input_arr, output_arr)
if __name__ == "__main__":
unittest.main()
|
d4a60caa27aac25ce29f565863f7a205aa594428
|
fbbe424559f64e9a94116a07eaaa555a01b0a7bb
|
/Sklearn_x86/source/setuptools/command/__init__.py
|
b966dcea57a2072f98b96dbba75ceb26bd26d2dd
|
[
"MIT"
] |
permissive
|
ryfeus/lambda-packs
|
6544adb4dec19b8e71d75c24d8ed789b785b0369
|
cabf6e4f1970dc14302f87414f170de19944bac2
|
refs/heads/master
| 2022-12-07T16:18:52.475504
| 2022-11-29T13:35:35
| 2022-11-29T13:35:35
| 71,386,735
| 1,283
| 263
|
MIT
| 2022-11-26T05:02:14
| 2016-10-19T18:22:39
|
Python
|
UTF-8
|
Python
| false
| false
| 217
|
py
|
__init__.py
|
from distutils.command.bdist import bdist
import sys
if 'egg' not in bdist.format_commands:
bdist.format_command['egg'] = ('bdist_egg', "Python .egg file")
bdist.format_commands.append('egg')
del bdist, sys
|
ee9ff4bc3f717017e7254986f6b08c3dca8fd34a
|
e18f61d2655035bd0da8f839bf9378d8d1869eb4
|
/pyshacl/extras/js/js_executable.py
|
84960f12bd102d7246822df93682fc9991dac90e
|
[
"Apache-2.0"
] |
permissive
|
RDFLib/pySHACL
|
0cdbc89fa44c348bfcc6a787355e8e9fcb5a60b8
|
a0f5647a0f84f9bd6a57b627071d6dab2a1144e2
|
refs/heads/master
| 2023-08-28T02:31:15.979762
| 2023-08-14T23:16:40
| 2023-08-14T23:16:40
| 147,505,799
| 225
| 65
|
Apache-2.0
| 2023-09-05T23:45:01
| 2018-09-05T11:13:51
|
Python
|
UTF-8
|
Python
| false
| false
| 5,125
|
py
|
js_executable.py
|
#
#
import typing
from typing import Dict
from rdflib import Literal
from pyshacl.consts import SH, SH_jsFunctionName, SH_jsLibrary
from pyshacl.errors import ConstraintLoadError
from .context import SHACLJSContext
if typing.TYPE_CHECKING:
from pyshacl.shapes_graph import ShapesGraph
SH_jsLibraryURL = SH.jsLibraryURL
class JSExecutable(object):
__slots__ = ("sg", "node", "fn_name", "libraries")
def __new__(cls, shapes_graph: 'ShapesGraph', node):
return super(JSExecutable, cls).__new__(cls)
def __init__(self, shapes_graph: 'ShapesGraph', node):
self.node = node
self.sg = shapes_graph
fn_names = set(shapes_graph.objects(node, SH_jsFunctionName))
if len(fn_names) < 1:
raise ConstraintLoadError(
"At least one sh:jsFunctionName must be present on a JS Executable.",
"https://www.w3.org/TR/shacl-js/#dfn-javascript-executables",
)
elif len(fn_names) > 1:
raise ConstraintLoadError(
"At most one sh:jsFunctionName can be present on a JS Executable.",
"https://www.w3.org/TR/shacl-js/#dfn-javascript-executables",
)
fn_name = next(iter(fn_names))
if not isinstance(fn_name, Literal):
raise ConstraintLoadError(
"sh:jsFunctionName must be an RDF Literal with type xsd:string.",
"https://www.w3.org/TR/shacl-js/#dfn-javascript-executables",
)
else:
fn_name = str(fn_name)
self.fn_name = fn_name
library_defs = shapes_graph.objects(node, SH_jsLibrary)
seen_library_defs = []
libraries: Dict = {}
for libn in library_defs:
# Library defs can only do two levels deep for now.
# TODO: Make this recursive somehow to some further depth
if libn in seen_library_defs:
continue
if isinstance(libn, Literal):
raise ConstraintLoadError(
"sh:jsLibrary must not have a value that is a Literal.",
"https://www.w3.org/TR/shacl-js/#dfn-javascript-executables",
)
seen_library_defs.append(libn)
jsLibraryURLs = list(shapes_graph.objects(libn, SH_jsLibraryURL))
if len(jsLibraryURLs) > 0:
libraries[libn] = libraries.get(libn, [])
for u in jsLibraryURLs:
if not isinstance(u, Literal):
raise ConstraintLoadError(
"sh:jsLibraryURL must have a value that is a Literal.",
"https://www.w3.org/TR/shacl-js/#dfn-javascript-executables",
)
libraries[libn].append(str(u))
library_defs2 = shapes_graph.objects(libn, SH_jsLibrary)
for libn2 in library_defs2:
if libn2 in seen_library_defs:
continue
if isinstance(libn2, Literal):
raise ConstraintLoadError(
"sh:jsLibrary must not have a value that is a Literal.",
"https://www.w3.org/TR/shacl-js/#dfn-javascript-executables",
)
seen_library_defs.append(libn2)
jsLibraryURLs2 = list(shapes_graph.objects(libn2, SH_jsLibraryURL))
if len(jsLibraryURLs2) > 0:
libraries[libn2] = libraries.get(libn2, [])
for u2 in jsLibraryURLs2:
if not isinstance(u2, Literal):
raise ConstraintLoadError(
"sh:jsLibraryURL must have a value that is a Literal.",
"https://www.w3.org/TR/shacl-js/#dfn-javascript-executables",
)
libraries[libn2].append(str(u2))
self.libraries = libraries
def execute(self, data_graph, args_map, *args, mode=None, return_type=None, **kwargs):
"""
:param data_graph:
:param args_map:
:param args:
:param mode:
:param return_type:
:param kwargs:
:return:
:rtype: dict
"""
if mode == "function":
ctx = SHACLJSContext(data_graph, shapes_graph=None, **kwargs)
else:
ctx = SHACLJSContext(data_graph, shapes_graph=self.sg, **kwargs)
for lib_node, lib_urls in self.libraries.items():
for lib_url in lib_urls:
ctx.load_js_library(lib_url)
fn_args = ctx.get_fn_args(self.fn_name, args_map)
rvals = ctx.run_js_function(self.fn_name, fn_args)
res = rvals['_result']
if mode == "function":
rvals['_result'] = ctx.build_results_as_shacl_function(res, return_type)
elif mode == "construct":
rvals['_result'] = ctx.build_results_as_construct(res)
elif mode == 'target':
rvals['_result'] = ctx.build_results_as_target(res)
else:
rvals['_result'] = ctx.build_results_as_constraint(res)
return rvals
|
599019f70555d2956418a7442116ec69c239e5e5
|
85373d45a83e4096affafa4f4e5b400787413e57
|
/src/programy/storage/entities/store.py
|
16c348cc4fb4325297d2d0f2c2d68415c4d90b8f
|
[
"MIT"
] |
permissive
|
keiffster/program-y
|
a02bb9d8278835547cc875f4f9cd668d5b1f44da
|
fc7b0a3afa4fa6ed683e0c817a9aa89f9543bb20
|
refs/heads/master
| 2023-08-23T13:55:39.255535
| 2022-12-13T09:51:57
| 2022-12-13T09:51:57
| 74,462,571
| 379
| 173
|
NOASSERTION
| 2023-05-23T00:51:21
| 2016-11-22T10:43:41
|
Python
|
UTF-8
|
Python
| false
| false
| 6,803
|
py
|
store.py
|
"""
Copyright (c) 2016-2020 Keith Sterling http://www.keithsterling.com
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import os
import os.path
from programy.storage.utils.processors import TextFile
from programy.storage.utils.processors import CSVFileReader
from programy.utils.logging.ylogger import YLogger
class Store:
TEXT_FORMAT = "text"
CSV_FORMAT = "csv"
XML_FORMAT = "xml"
BINARY_FORMAT = "bin"
YAML_FORMAT = "yaml"
def __init__(self):
pass # pragma: no cover
def empty(self):
return # pragma: no cover
def empty_named(self, name):
del name # pragma: no cover
return # pragma: no cover
def commit(self, commit=True):
return # pragma: no cover
def rollback(self, commit=True):
return # pragma: no cover
def get_split_char(self):
return ","
def split_into_fields(self, line):
return line.split(",")
def process_line(self, name, fields, verbose=False):
del name # pragma: no cover
del fields # pragma: no cover
del verbose # pragma: no cover
return False
def upload_from_text(self, name, text, commit=True):
try:
lines = text.split('\n')
for line in lines:
line = line.strip()
if line and len(line)>0:
fields = self.split_into_fields(line)
self.process_line(name, fields)
self.commit(commit)
except Exception as e:
YLogger.exception_nostack(self, "Error loading from text", e)
self.rollback(commit)
@staticmethod
def get_file_processor(fileformat, filename):
if fileformat == Store.TEXT_FORMAT:
return TextFile(filename)
elif fileformat == Store.CSV_FORMAT:
return CSVFileReader(filename)
else:
raise Exception("Unknown file format [%s]" % fileformat)
@staticmethod
def get_just_filename_from_filepath(filepath):
if os.sep in filepath:
pathsplits = filepath.split(os.sep)
filename_ext = pathsplits[-1]
else:
filename_ext = filepath
if "." in filename_ext:
filesplits = filename_ext.split(".")
filename = filesplits[0]
else:
filename = filename_ext
return filename.upper()
def upload_from_directory(self, directory, fileformat=TEXT_FORMAT, extension=None, subdir=True, commit=True,
verbose=False):
final_count = 0
final_success = 0
try:
if subdir is False:
paths = os.listdir(directory)
for filename in paths:
fullpath = os.path.join(directory, filename)
if os.path.isdir(fullpath) is False:
if extension is not None:
if filename.endswith(extension):
count, success = self.upload_from_file(fullpath, fileformat=fileformat, commit=commit,
verbose=verbose)
final_count += count
final_success += success
else:
count, success = self.upload_from_file(fullpath, fileformat=fileformat, commit=commit,
verbose=verbose)
final_count += count
final_success += success
else:
for dirpath, _, filenames in os.walk(directory):
for filename in filenames:
if extension is not None:
if filename.endswith(extension):
count, success = self.upload_from_file(os.path.join(dirpath, filename),
fileformat=fileformat, commit=commit,
verbose=verbose)
final_count += count
final_success += success
else:
count, success = self.upload_from_file(os.path.join(dirpath, filename),
fileformat=fileformat, commit=commit,
verbose=verbose)
final_count += count
final_success += success
self.commit(commit)
except Exception as e:
YLogger.exception_nostack(self, "Error loading from directory", e)
self.rollback(commit)
return final_count, final_success
def upload_from_file(self, filename, fileformat=TEXT_FORMAT, commit=True, verbose=False):
file_processor = None
final_count = 0
final_success = 0
try:
name = Store.get_just_filename_from_filepath(filename)
file_processor = Store.get_file_processor(fileformat, filename)
count, success = file_processor.process_lines(name, self, verbose=verbose)
final_count += count
final_success += success
self.commit(commit)
except Exception as e:
YLogger.exception_nostack(self, "Error uploading from file", e)
self.rollback(commit=commit)
finally:
if file_processor is not None:
file_processor.close()
return final_count, final_success
|
cc920b7da65040bb148974b11f90e311ca7fd34b
|
f305f84ea6f721c2391300f0a60e21d2ce14f2a5
|
/前端笔记/牛客/牛客网前端笔试题/算法题/美团2016研发工程师编程题/字符编码-哈夫曼树建树.py
|
0fa1c7043d4f4ac25cf651d2c35fc009a4e80a2b
|
[] |
no_license
|
981377660LMT/algorithm-study
|
f2ada3e6959338ae1bc21934a84f7314a8ecff82
|
7e79e26bb8f641868561b186e34c1127ed63c9e0
|
refs/heads/master
| 2023-09-01T18:26:16.525579
| 2023-09-01T12:21:58
| 2023-09-01T12:21:58
| 385,861,235
| 225
| 24
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,029
|
py
|
字符编码-哈夫曼树建树.py
|
# 请设计一个算法,给一个字符串进行二进制编码,使得编码后字符串的长度最短。
# 一行输出最短的编码后长度。
# n<=1000
# 解法一:直接模拟建立哈夫曼树
from collections import Counter
from heapq import heapify, heappop, heappush
class Node:
def __init__(self, weight: int, value: str = None, left: 'Node' = None, right: 'Node' = None):
self.weight = weight
self.value = value
self.left = left
self.right = right
def __lt__(self, other: 'Node') -> bool:
return self.weight < other.weight
def __eq__(self, other: 'Node') -> bool:
return self.weight == other.weight
def main1(string: str) -> None:
def dfs(root: Node, depth: int) -> None:
nonlocal res
if not root:
return
if root.value is not None:
res += root.weight * depth
root.left and dfs(root.left, depth + 1)
root.right and dfs(root.right, depth + 1)
chars = list(string)
counter = Counter(chars)
pq = []
for value, weight in counter.items():
pq.append((weight, Node(weight, value)))
heapify(pq)
while len(pq) >= 2:
_, left = heappop(pq)
_, right = heappop(pq)
parent = Node(left.weight + right.weight, None, left, right)
heappush(pq, (parent.weight, parent))
root = pq[0][1]
res = 0
dfs(root, 0)
print(res)
# 只需要push字符数即可,不必实际建树
def main2(string: str) -> None:
chars = list(string)
counter = Counter(chars)
pq = []
for _, weight in counter.items():
pq.append((weight))
heapify(pq)
res = 0
while len(pq) >= 2:
left = heappop(pq)
right = heappop(pq)
res += left + right
heappush(pq, left + right)
print(res)
while True:
try:
string = input()
main2(string)
except EOFError:
break
|
ad0885c2a29f8ea7327f3de8902216bbba9549ca
|
32809f6f425bf5665fc19de2bc929bacc3eeb469
|
/src/1007-Minimum-Domino-Rotations-For-Equal-Row/1007.py
|
ca9fbcf078105450033ba968d7e75e03585e988d
|
[] |
no_license
|
luliyucoordinate/Leetcode
|
9f6bf01f79aa680e2dff11e73e4d10993467f113
|
bcc04d49969654cb44f79218a7ef2fd5c1e5449a
|
refs/heads/master
| 2023-05-25T04:58:45.046772
| 2023-05-24T11:57:20
| 2023-05-24T11:57:20
| 132,753,892
| 1,575
| 569
| null | 2023-05-24T11:57:22
| 2018-05-09T12:30:59
|
C++
|
UTF-8
|
Python
| false
| false
| 267
|
py
|
1007.py
|
class Solution:
def minDominoRotations(self, A: List[int], B: List[int]) -> int:
for i in range(1,7):
if all(i == a or i == b for a,b in zip(A,B)):
return min(len(A)-A.count(i),len(B)-B.count(i))
return -1
|
c43b8754119ab9e2f5eceaac5c6312174d96f260
|
30674cc03db1e93c0d5a6ff213b528d8ea70bb6a
|
/tests/test_dummy_device.py
|
428e3c354fef219d42a1e5d3d6f8b2f713fcf90b
|
[
"MIT"
] |
permissive
|
rm-hull/luma.core
|
6df4db6f6886a562dca9eec82e3cc42fe4dd5f98
|
d871d66644288b788641af0b3a20d3a97583dd70
|
refs/heads/master
| 2023-03-04T01:37:36.636573
| 2023-02-21T07:52:03
| 2023-02-21T07:52:03
| 78,548,891
| 134
| 61
|
MIT
| 2023-09-01T20:59:12
| 2017-01-10T15:55:14
|
Python
|
UTF-8
|
Python
| false
| false
| 1,520
|
py
|
test_dummy_device.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2017-18 Richard Hull and contributors
# See LICENSE.rst for details.
"""
Tests for the :py:class:`luma.core.device.dummy` class.
"""
from PIL import Image
from luma.core.render import canvas
from luma.core.device import dummy
import baseline_data
from helpers import get_reference_image, assert_identical_image
def test_capture_noops():
device = dummy()
# All these should have no effect
device.hide()
device.show()
device.cleanup()
device.contrast(123)
device.command(1, 2, 4, 4)
device.data([1, 2, 4, 4])
def test_portrait():
img_path = get_reference_image('portrait.png')
with open(img_path, 'rb') as p:
reference = Image.open(p)
device = dummy(rotate=1)
# Use the same drawing primitives as the demo
with canvas(device) as draw:
baseline_data.primitives(device, draw)
assert_identical_image(reference, device.image, img_path)
def test_dither():
img_path = get_reference_image('dither.png')
with open(img_path, 'rb') as p:
reference = Image.open(p)
device = dummy(mode="1")
with canvas(device, dither=True) as draw:
draw.rectangle((0, 0, 64, 32), fill="red")
draw.rectangle((64, 0, 128, 32), fill="yellow")
draw.rectangle((0, 32, 64, 64), fill="blue")
draw.rectangle((64, 32, 128, 64), fill="white")
assert_identical_image(reference, device.image, img_path)
|
30c891718d616d75608c360da43d927cf3eefc4e
|
1b364500b756c5096d94358d3ad745e248c20dc4
|
/wradlib/io/gdal.py
|
02b81ff49dc99939c10e834167d88c5b0b843b0a
|
[
"MIT"
] |
permissive
|
wradlib/wradlib
|
fdf3b3670aa8b2ea6ddf4bb6083321992eb361a9
|
17f876c2c6257171888d6e04f5cbb86f0ac46f90
|
refs/heads/main
| 2023-08-26T09:07:45.866267
| 2023-05-31T06:12:50
| 2023-05-31T06:12:50
| 52,089,638
| 228
| 89
|
MIT
| 2023-09-11T23:29:48
| 2016-02-19T13:32:22
|
Python
|
UTF-8
|
Python
| false
| false
| 22,966
|
py
|
gdal.py
|
#!/usr/bin/env python
# Copyright (c) 2011-2020, wradlib developers.
# Distributed under the MIT License. See LICENSE.txt for more info.
"""
GDAL Raster/Vector Data I/O
^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. autosummary::
:nosignatures:
:toctree: generated/
{}
"""
__all__ = [
"open_vector",
"open_raster",
"read_safnwc",
"gdal_create_dataset",
"write_raster_dataset",
"VectorSource",
]
__doc__ = __doc__.format("\n ".join(__all__))
import os
import tempfile
import warnings
import numpy as np
from wradlib import georef
from wradlib.util import import_optional
osr = import_optional("osgeo.osr")
ogr = import_optional("osgeo.ogr")
gdal = import_optional("osgeo.gdal")
# check windows
isWindows = os.name == "nt"
def open_vector(filename, driver=None, layer=0):
"""Open vector file, return gdal.Dataset and OGR.Layer
.. warning:: dataset and layer have to live in the same context,
if dataset is deleted all layer references will get lost
Parameters
----------
filename : str
vector file name
driver : str
gdal driver string
layer : int or str
Returns
-------
dataset : :py:class:`gdal:osgeo.gdal.Dataset`
gdal.Dataset
layer : :py:class:`gdal:osgeo.ogr.Layer`
ogr.Layer
"""
dataset = gdal.OpenEx(filename)
if driver:
gdal.GetDriverByName(driver)
layer = dataset.GetLayer(layer)
return dataset, layer
def open_raster(filename, driver=None):
"""Open raster file, return gdal.Dataset
Parameters
----------
filename : str
raster file name
driver : str
gdal driver string
Returns
-------
dataset : :py:class:`gdal:osgeo.gdal.Dataset`
gdal.Dataset
"""
dataset = gdal.OpenEx(filename)
if driver:
gdal.GetDriverByName(driver)
return dataset
def read_safnwc(filename):
"""Read MSG SAFNWC hdf5 file into a gdal georeferenced object
Parameters
----------
filename : str
satellite file name
Returns
-------
ds : :py:class:`gdal:osgeo.gdal.Dataset`
gdal.DataSet with satellite data
"""
root = gdal.Open(filename)
ds1 = gdal.Open("HDF5:" + filename + "://CT")
ds = gdal.GetDriverByName("MEM").CreateCopy("out", ds1, 0)
try:
proj = osr.SpatialReference()
proj.ImportFromProj4(ds.GetMetadata()["PROJECTION"])
except KeyError:
raise KeyError("WRADLIB: Projection is missing for satellite file {filename}")
geotransform = root.GetMetadata()["GEOTRANSFORM_GDAL_TABLE"].split(",")
geotransform[0] = root.GetMetadata()["XGEO_UP_LEFT"]
geotransform[3] = root.GetMetadata()["YGEO_UP_LEFT"]
ds.SetProjection(proj.ExportToWkt())
ds.SetGeoTransform([float(x) for x in geotransform])
return ds
def gdal_create_dataset(
drv, name, cols=0, rows=0, bands=0, gdal_type=None, remove=False
):
"""Creates GDAL.DataSet object.
Parameters
----------
drv : str
GDAL driver string
name : str
path to filename
cols : int
# of columns
rows : int
# of rows
bands : int
# of raster bands
gdal_type : :py:class:`gdal:osgeo.ogr.DataType`
raster data type eg. gdal.GDT_Float32
remove : bool
if True, existing gdal.Dataset will be
removed before creation
Returns
-------
out : :py:class:`gdal:osgeo.gdal.Dataset`
gdal.Dataset
"""
if gdal_type is None:
gdal_type = gdal.GDT_Unknown
driver = gdal.GetDriverByName(drv)
metadata = driver.GetMetadata()
if not metadata.get("DCAP_CREATE", False):
raise TypeError(f"WRADLIB: Driver {drv} doesn't support Create() method.")
if remove:
if os.path.exists(name):
driver.Delete(name)
ds = driver.Create(name, cols, rows, bands, gdal_type)
return ds
def write_raster_dataset(fpath, dataset, rformat, options=None, remove=False):
"""Write raster dataset to file format
Parameters
----------
fpath : str
A file path - should have file extension corresponding to format.
dataset : :py:class:`gdal:osgeo.gdal.Dataset`
gdal.Dataset gdal raster dataset
rformat : str
gdal raster format string
options : list
List of option strings for the corresponding format.
remove : bool
if True, existing gdal.Dataset will be
removed before creation
Note
----
For format and options refer to
`formats_list <https://gdal.org/formats_list.html>`_.
Examples
--------
See :ref:`/notebooks/fileio/wradlib_gis_export_example.ipynb`.
"""
# check for option list
if options is None:
options = []
driver = gdal.GetDriverByName(rformat)
metadata = driver.GetMetadata()
# check driver capability
if not ("DCAP_CREATECOPY" in metadata and metadata["DCAP_CREATECOPY"] == "YES"):
raise TypeError(
f"WRADLIB: Raster Driver {rformat} doesn't support CreateCopy() method."
)
if remove:
if os.path.exists(fpath):
driver.Delete(fpath)
target = driver.CreateCopy(fpath, dataset, 0, options)
del target
class VectorSource:
"""DataSource class for handling ogr/gdal vector data
DataSource handles creates in-memory (vector) ogr DataSource object with
one layer for point or polygon geometries.
Parameters
----------
data : sequence or str
sequence of source points (shape Nx2) or polygons (shape NxMx2) or
Vector File (GDAL/OGR) filename containing source points/polygons
srs : :py:class:`gdal:osgeo.osr.SpatialReference`
SRS describing projection source data should be projected to
Keyword Arguments
-----------------
name : str
Layer Name, defaults to "layer".
source : int
Number of layer to load, if multiple layers in source shape file.
mode : str
Return type of class access functions/properties.
Can be either of "numpy", "geo" and "ogr", defaults to "numpy".
projection_source : :py:class:`gdal:osgeo.osr.SpatialReference`
SRS describing projection source in which data is provided in.
Warning
-------
Writing shapefiles with the wrong locale settings can have impact on the
type of the decimal. If problem arise use ``LC_NUMERIC=C`` in your environment.
Examples
--------
See :ref:`/notebooks/fileio/wradlib_vector_data.ipynb`.
"""
def __init__(self, data=None, srs=None, name="layer", source=0, **kwargs):
self._srs = srs
self._name = name
self._geo = None
self._mode = kwargs.get("mode", "numpy")
self._src_srs = kwargs.get("projection_source", None)
if data is not None:
if isinstance(data, (np.ndarray, list)):
self._ds = self._check_src(data)
else:
self.load_vector(data, source=source)
self._create_spatial_index()
else:
self._ds = None
def close(self):
if self._geo is not None:
self._geo = None
if self.ds is not None:
fname = self.ds.GetDescription()
driver = self.ds.GetDriver()
self.ds = None
driver.Delete(fname)
__del__ = close
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def __iter__(self):
"""Return Layer Feature Iterator."""
if self._mode == "ogr":
lyr = self.ds.GetLayer()
return iter(lyr)
elif self._mode == "geo":
return self.geo.iterrows()
else:
lyr = self.ds.GetLayer()
def _get_geom(feat):
return georef.ogr_to_numpy(feat.GetGeometryRef())
return iter(map(_get_geom, lyr))
def __len__(self):
lyr = self.ds.GetLayer()
return lyr.GetFeatureCount()
def __repr__(self):
lyr = self.ds.GetLayer()
summary = [f"<wradlib.{type(self).__name__}>"]
geom_type = f"Type: {ogr.GeometryTypeToName(lyr.GetGeomType())}"
summary.append(geom_type)
geoms = f"Geometries: {len(self)}"
summary.append(geoms)
return "\n".join(summary)
@property
def mode(self):
return self._mode
@mode.setter
def mode(self, value):
self._mode = value
@property
def ds(self):
"""Returns VectorSource"""
self._check_ds()
return self._ds
@ds.setter
def ds(self, value):
self._ds = value
def _check_ds(self):
"""Raise ValueError if empty VectorSource"""
if self._ds is None:
raise ValueError("Trying to access empty VectorSource.")
@property
def extent(self):
return self.ds.GetLayer().GetExtent()
@property
def crs(self):
return self.ds.GetLayer().GetSpatialRef()
@property
def data(self):
"""Returns VectorSource geometries as numpy arrays
Note
----
This may be slow, because it extracts all source polygons
"""
lyr = self.ds.GetLayer()
lyr.ResetReading()
lyr.SetSpatialFilter(None)
lyr.SetAttributeFilter(None)
return self._get_data()
@property
def geo(self):
"""Returns VectorSource geometries as GeoPandas Dataframe"""
self._check_ds()
if self._geo is None:
geopandas = import_optional("geopandas")
self._geo = geopandas.read_file(self.ds.GetDescription())
return self._geo
def _get_data(self, mode=None):
"""Returns DataSource geometries
Keyword Arguments
-----------------
mode : str
return type ("numpy", "geo", "ogr"), defaults to "numpy"
"""
if mode is None:
mode = self._mode
lyr = self.ds.GetLayer()
sources = []
for feature in lyr:
geom = feature.GetGeometryRef()
if mode == "numpy":
poly = georef.vector.ogr_to_numpy(geom)
sources.append(poly)
else:
poly = geom
sources.append(poly)
return np.array(sources, dtype=object)
def get_data_by_idx(self, idx, mode=None):
"""Returns DataSource geometries from given index
Parameters
----------
idx : sequence
sequence of int indices
Keyword Arguments
-----------------
mode : str
return type ("numpy", "geo", "ogr"), defaults to "numpy"
"""
if mode is None:
mode = self._mode
if mode == "geo":
if isinstance(idx, (list, slice)):
return self.geo.loc[idx]
elif np.isscalar(idx):
return self.geo.iloc[idx]
else:
return self.geo.loc[idx]
lyr = self.ds.GetLayer()
lyr.ResetReading()
lyr.SetSpatialFilter(None)
lyr.SetAttributeFilter(None)
sources = []
for i in idx:
feature = lyr.GetFeature(i)
geom = feature.GetGeometryRef()
poly = georef.vector.ogr_to_numpy(geom)
# need to recreate the geometry because access is lost if layer gets out of scope
if mode == "ogr":
poly = georef.vector.numpy_to_ogr(
poly, geom.GetGeometryName().capitalize()
)
sources.append(poly)
return np.array(sources, dtype=object)
def get_data_by_att(self, attr=None, value=None, mode=None):
"""Returns DataSource geometries filtered by given attribute/value
Keyword Arguments
-----------------
attr : str
attribute name
value : str
attribute value
mode : str
return type ("numpy", "geo", "ogr"), defaults to "numpy"
"""
if mode is None:
mode = self._mode
if np.isscalar(value):
sql = f"{attr}={value}"
else:
sql = f"{attr} in {tuple(value)}"
if mode == "geo":
return self.geo.query(sql)
lyr = self.ds.GetLayer()
lyr.ResetReading()
lyr.SetSpatialFilter(None)
lyr.SetAttributeFilter(sql)
return self._get_data(mode=mode)
def get_data_by_geom(self, geom=None, mode=None):
"""Returns DataSource geometries filtered by given geometry
Keyword Arguments
-----------------
geom : :py:class:`gdal:osgeo.ogr.Geometry` | :py:class:`geopandas.GeoDataFrame`
OGR.Geometry object or geopandas.GeoDataFrame containing the Geometry
mode : str
return type ("numpy", "geo", "ogr"), defaults to "numpy"
"""
if mode is None:
mode = self._mode
if mode == "geo":
return self.geo[self.geo.within(geom)]
lyr = self.ds.GetLayer()
lyr.ResetReading()
lyr.SetAttributeFilter(None)
lyr.SetSpatialFilter(geom)
return self._get_data(mode=mode)
def _create_spatial_index(self):
"""Creates spatial index file .qix"""
sql1 = f"DROP SPATIAL INDEX ON {self._name}"
sql2 = f"CREATE SPATIAL INDEX ON {self._name}"
self.ds.ExecuteSQL(sql1)
self.ds.ExecuteSQL(sql2)
def _create_table_index(self, col):
"""Creates attribute index files"""
sql1 = f"DROP INDEX ON {self._name}"
sql2 = f"CREATE INDEX ON {self._name} USING {col}"
self.ds.ExecuteSQL(sql1)
self.ds.ExecuteSQL(sql2)
def _check_src(self, src):
"""Basic check of source elements (sequence of points or polygons).
- array cast of source elements
- create ogr_src datasource/layer holding src points/polygons
- transforming source grid points/polygons to ogr.geometries
on ogr.layer
"""
tmpfile = tempfile.NamedTemporaryFile(mode="w+b").name
ogr_src = gdal_create_dataset(
"ESRI Shapefile", os.path.join("/vsimem", tmpfile), gdal_type=gdal.OF_VECTOR
)
src = np.array(src)
if self._src_srs is not None:
src = georef.reproject(
src, projection_source=self._src_srs, projection_target=self._srs
)
# create memory datasource, layer and create features
if src.ndim == 2:
geom_type = ogr.wkbPoint
# no Polygons, just Points
else:
geom_type = ogr.wkbPolygon
fields = [("index", ogr.OFTInteger)]
georef.vector.ogr_create_layer(
ogr_src, self._name, srs=self._srs, geom_type=geom_type, fields=fields
)
georef.vector.ogr_add_feature(ogr_src, src, name=self._name)
return ogr_src
def dump_vector(self, filename, driver="ESRI Shapefile", remove=True):
"""Output layer to OGR Vector File
Parameters
----------
filename : str
path to shape-filename
Keyword Arguments
-----------------
driver : str
driver string
remove : bool
if True removes existing output file
"""
ds_out = gdal_create_dataset(
driver, filename, gdal_type=gdal.OF_VECTOR, remove=remove
)
georef.vector.ogr_copy_layer(self.ds, 0, ds_out)
# flush everything
del ds_out
def load_vector(self, filename, source=0, driver="ESRI Shapefile"):
"""Read Layer from OGR Vector File
Parameters
----------
filename : str
path to shape-filename
Keyword Arguments
-----------------
source : int or str
number or name of wanted layer, defaults to 0
driver : str
driver string
"""
tmpfile = tempfile.NamedTemporaryFile(mode="w+b").name
self.ds = gdal_create_dataset(
"ESRI Shapefile", os.path.join("/vsimem", tmpfile), gdal_type=gdal.OF_VECTOR
)
# get input file handles
ds_in, tmp_lyr = open_vector(filename, driver=driver, layer=source)
# get spatial reference object
srs = tmp_lyr.GetSpatialRef()
# fall back to given projection
if srs is None:
srs = self._src_srs
# raise error as we can't do anything about it
if self._srs is None and srs is None:
raise ValueError(
f"Spatial reference missing from source file {filename}. "
f"Please provide a fitting spatial reference object"
)
# this will be combined with the above the future to raise unconditionally
if srs is None:
warnings.warn(
f"Spatial reference missing from source file {filename}. "
f"This will raise an error from wradlib version 2.0",
FutureWarning,
)
# reproject layer if necessary
if self._srs is not None and srs is not None and srs != self._srs:
ogr_src_lyr = self.ds.CreateLayer(
self._name, self._srs, geom_type=ogr.wkbPolygon
)
georef.vector.ogr_reproject_layer(
tmp_lyr, ogr_src_lyr, self._srs, src_srs=srs
)
else:
# copy layer
ogr_src_lyr = self.ds.CopyLayer(tmp_lyr, self._name)
if self._srs is None:
self._srs = srs
# flush everything
del ds_in
def dump_raster(
self, filename, driver="GTiff", attr=None, pixel_size=1.0, remove=True, **kwargs
):
"""Output layer to GDAL Rasterfile
Parameters
----------
filename : str
path to shape-filename
Keyword Arguments
-----------------
driver : str
GDAL Raster Driver
attr : str
attribute to burn into raster
pixel_size : float
pixel Size in source units
remove : bool
if True removes existing output file
silent : bool
If True no ProgressBar is shown. Defaults to False.
"""
silent = kwargs.pop("silent", False)
progress = None if (silent or isWindows) else gdal.TermProgress
layer = self.ds.GetLayer()
layer.ResetReading()
x_min, x_max, y_min, y_max = layer.GetExtent()
cols = int((x_max - x_min) / pixel_size)
rows = int((y_max - y_min) / pixel_size)
# Todo: at the moment, always writing floats
ds_out = gdal_create_dataset(
"MEM", "", cols, rows, 1, gdal_type=gdal.GDT_Float32
)
ds_out.SetGeoTransform((x_min, pixel_size, 0, y_max, 0, -pixel_size))
proj = layer.GetSpatialRef()
if proj is None:
proj = self._srs
ds_out.SetProjection(proj.ExportToWkt())
band = ds_out.GetRasterBand(1)
band.FlushCache()
if attr is not None:
gdal.RasterizeLayer(
ds_out,
[1],
layer,
burn_values=[0],
options=[f"ATTRIBUTE={attr}", "ALL_TOUCHED=TRUE"],
callback=progress,
)
else:
gdal.RasterizeLayer(
ds_out,
[1],
layer,
burn_values=[1],
options=["ALL_TOUCHED=TRUE"],
callback=progress,
)
write_raster_dataset(filename, ds_out, driver, remove=remove)
del ds_out
def set_attribute(self, name, values, reset_filter=False):
"""Add/Set given Attribute with given values
Parameters
----------
name : str
Attribute Name
values : :class:`numpy:numpy.ndarray`
Values to fill in attributes
Keyword Arguments
-----------------
reset_filter : bool
reset any layer filter (spatial/attribute), defaults to False
"""
lyr = self.ds.GetLayerByIndex(0)
if reset_filter:
lyr.SetAttributeFilter(None)
lyr.SetSpatialFilter(None)
lyr.ResetReading()
# todo: automatically check for value type
defn = lyr.GetLayerDefn()
if defn.GetFieldIndex(name) == -1:
lyr.CreateField(ogr.FieldDefn(name, ogr.OFTReal))
for i, item in enumerate(lyr):
item.SetField(name, values[i])
lyr.SetFeature(item)
lyr.SyncToDisk()
self._geo = None
def get_attributes(self, attrs, filt=None):
"""Return attributes
Parameters
----------
attrs : list
Attribute Names to retrieve
Keyword Arguments
-----------------
filt : tuple
(attname, value) for Attribute Filter
"""
lyr = self.ds.GetLayer()
lyr.ResetReading()
lyr.SetAttributeFilter(None)
lyr.SetSpatialFilter(None)
if filt is not None:
lyr.SetAttributeFilter(f"{filt[0]}={filt[1]}")
ret = [[] for _ in attrs]
for ogr_src in lyr:
for i, att in enumerate(attrs):
ret[i].append(ogr_src.GetField(att))
return ret
def get_geom_properties(self, props, filt=None):
"""Return geometry properties
Parameters
----------
props : list
Property Names to retrieve
Keyword Arguments
-----------------
filt : tuple
(attname, value) for Attribute Filter
"""
lyr = self.ds.GetLayer()
lyr.ResetReading()
if filt is not None:
lyr.SetAttributeFilter(f"{filt[0]}={filt[1]}")
ret = [[] for _ in props]
for ogr_src in lyr:
for i, prop in enumerate(props):
ret[i].append(getattr(ogr_src.GetGeometryRef(), prop)())
return ret
def get_attrs_and_props(self, attrs=None, props=None, filt=None):
"""Return properties and attributes
Keyword Arguments
-----------------
attrs : list
Attribute Names to retrieve
props : list
Property Names to retrieve
filt : tuple
(attname, value) for Attribute Filter
"""
lyr = self.ds.GetLayer()
lyr.ResetReading()
if filt is not None:
lyr.SetAttributeFilter(f"{filt[0]}={filt[1]}")
ret_props = [[] for _ in props]
ret_attrs = [[] for _ in attrs]
for ogr_src in lyr:
for i, att in enumerate(attrs):
ret_attrs[i].append(ogr_src.GetField(att))
for i, prop in enumerate(props):
ret_props[i].append(getattr(ogr_src.GetGeometryRef(), prop)())
return ret_attrs, ret_props
|
d7d8ef1780391466dad844faa09a0d2866f21cc7
|
25a08bdeb17a0dc032ddf6c11b71693ca4c1a6cd
|
/python/gtirb/auxdata.py
|
94032fd4068e467871dcb8509f1f8618876aa388
|
[
"MIT"
] |
permissive
|
GrammaTech/gtirb
|
20b05dc6170d93af8cc29fd65f7094bd59a9fb4d
|
f4301401a0d98a783e3b6f40e390fe9b1b1d386d
|
refs/heads/master
| 2023-08-18T12:17:45.797733
| 2023-08-14T23:28:22
| 2023-08-15T12:02:48
| 136,977,182
| 277
| 40
|
MIT
| 2019-01-08T01:43:01
| 2018-06-11T20:25:41
|
C++
|
UTF-8
|
Python
| false
| false
| 8,213
|
py
|
auxdata.py
|
from io import BytesIO
from typing import TYPE_CHECKING, Any, Callable, ClassVar, Dict, Optional
from uuid import UUID
from google.protobuf.internal.containers import MessageMap
from .node import Node
from .proto import AuxData_pb2
from .serialization import Serialization
from .util import DictLike
if TYPE_CHECKING: # pragma: no cover
# Ignore flake8 "imported but unused" errors.
from .ir import IR # noqa: F401
class _LazyDataContainer:
"""
Container that holds the raw byte stream until it is read, then releases
it. If it is never read, then serialization skips re-encoding (and
deserializing) the data.
"""
def __init__(
self,
raw_data: bytes,
type_name: str,
get_by_uuid: Callable[[UUID], Optional[Node]],
):
self.raw_data: Optional[bytes] = raw_data
self.type_name = type_name
self.get_by_uuid = get_by_uuid
def get_data(self) -> object:
"""
Get any pending still-serialized data, or return the passed data
instead (the default).
"""
assert self.raw_data is not None
rv = AuxData.serializer.decode(
self.raw_data, self.type_name, self.get_by_uuid
)
self.raw_data = None
return rv
def get_raw_data(self) -> bytes:
""" """
assert self.raw_data is not None
return self.raw_data
class AuxData:
"""AuxData objects can be attached to the :class:`gtirb.IR` or individual
:class:`gtirb.Module` s to store additional client-specific data in a
portable way.
AuxData represents a portable, language-independent manner of encoding
rich data. To do this, all data is stored on disk as a series of bytes
with a string describing the format of the data, called a *type name*. See
:mod:`gtirb.serialization` for the list of all default types. Types may
also be parameterized; for example, ``mapping<string,UUID>`` is a ``dict``
from ``str`` objects to ``UUID`` objects. All ``AuxData`` requires
a valid type name in order to be serialized.
:ivar ~.data: The value stored in this AuxData.
:ivar ~.type_name: A string describing the type of ``data``.
Used to determine the proper codec for serializing this AuxData.
"""
serializer: ClassVar[Serialization] = Serialization()
"""This is a :class:`gtirb.Serialization` instance, used to
encode and decode ``data`` fields of all ``AuxData``. See
:mod:`gtirb.serialization` for details.
"""
def __init__(
self,
data: object,
type_name: str,
lazy_container: Optional[_LazyDataContainer] = None,
):
"""
:param data: The value stored in this AuxData.
:param type_name: A string describing the type of ``data``.
Used to determine the proper codec for serializing this AuxData.
:param lazy_container: An object that will lazily deserialize the
auxdata table backing this object, or None.
"""
self._lazy_container = lazy_container
# _data has type Any to avoid disrupting clients want to type check
# their use of gtirb. If _data had type object, they would have to
# verify the element types of potentially large containers, or else
# just subvert the type system by casting anyway.
self._data: Any = data # type: ignore[misc]
self.type_name = type_name
@property
def data(self) -> Any: # type: ignore[misc]
if self._lazy_container is not None:
self._data = self._lazy_container.get_data()
self._lazy_container = None
return self._data
@data.setter
def data(self, value: object) -> None:
self._data = value
self._lazy_container = None
@classmethod
def _from_protobuf(
cls,
aux_data: AuxData_pb2.AuxData,
ir: Optional["IR"],
) -> "AuxData":
"""Deserialize AuxData from Protobuf. Lazy, will not perform
deserialization until .data is accessed.
:param aux_data: The Protobuf AuxData object.
"""
# Defer deserialization until someone accesses .data
assert ir
lazy_container = _LazyDataContainer(
aux_data.data, aux_data.type_name, ir.get_by_uuid
)
return cls(
data=None,
type_name=aux_data.type_name,
lazy_container=lazy_container,
)
def _to_protobuf(self) -> AuxData_pb2.AuxData:
"""Get a Protobuf representation of the AuxData."""
proto_auxdata = AuxData_pb2.AuxData()
proto_auxdata.type_name = self.type_name
# If we are serializing the same data, and the way that data is encoded
# has not changed, then just use the already serialized copy.
if self._lazy_container is not None and (
self.type_name == self._lazy_container.type_name
):
proto_auxdata.data = self._lazy_container.get_raw_data()
else:
data_stream = BytesIO()
AuxData.serializer.encode(data_stream, self.data, self.type_name)
proto_auxdata.data = data_stream.getvalue()
return proto_auxdata
def __repr__(self) -> str:
return (
"AuxData("
"type_name={type_name!r}, "
"data={data!r}, "
")".format(type_name=self.type_name, data=self.data)
)
class AuxDataContainer(Node):
"""The base class for anything that holds AuxData tables; that is,
:class:`gtirb.IR` and :class:`gtirb.Module`.
:ivar ~.aux_data: The auxiliary data associated
with the object, as a mapping from names to
:class:`gtirb.AuxData`.
"""
def __init__(
self,
aux_data: DictLike[str, AuxData] = {},
uuid: Optional[UUID] = None,
):
"""
:param aux_data: The initial auxiliary data to be associated
with the object, as a mapping from names to
:class:`gtirb.AuxData`. Defaults to an empty :class:`dict`.
:param uuid: the UUID of this ``AuxDataContainer``,
or None if a new UUID needs generated via :func:`uuid.uuid4`.
Defaults to None.
"""
super().__init__(uuid)
self.aux_data: Dict[str, AuxData] = dict(aux_data)
@classmethod
def _read_protobuf_aux_data(
cls,
proto_container: "MessageMap[str, AuxData_pb2.AuxData]",
ir: Optional["IR"],
) -> Dict[str, AuxData]:
"""
Instead of the overrided _decode_protobuf, this method requires the
Protobuf message to read from. AuxDataContainers need to call this
method in their own _decode_protobuf overrides.
:param proto_container: A Protobuf message with a field called
``aux_data``.
"""
return {
key: AuxData._from_protobuf(val, ir)
for key, val in proto_container.items()
}
def _write_protobuf_aux_data(
self, proto_container: "MessageMap[str, AuxData_pb2.AuxData]"
) -> None:
"""
Instead of the overrided _to_protobuf, this method requires the
Protobuf message to write into. AuxDataContainers need to call this
method in their own _to_protobuf overrides.
:param proto_container: A Protobuf message with a field called
``aux_data``.
"""
for k, v in self.aux_data.items():
proto_container[k].CopyFrom(v._to_protobuf())
def deep_eq(self, other: object) -> bool:
"""This overrides :func:`gtirb.Node.deep_eq` to check for
AuxData equality.
Because the values stored by AuxData are not necessarily
amenable to deep checking, the auxiliary data dictionaries
stored for ``self`` and ``other`` are not deeply checked. Instead,
they are considered to be equal if their sets of keys are equal.
"""
if not isinstance(other, AuxDataContainer):
return False
if (
self.uuid != other.uuid
or self.aux_data.keys() != other.aux_data.keys()
):
return False
return True
|
e188da7ccc5ef84a7705bfc65ad967dbdae9d2dd
|
269ffc022565c7982017a866ec0e515c90b48940
|
/miditok/tokenizations/mumidi.py
|
54a8d1ccdcbb75fb31fbd96f589a254459422d3d
|
[
"MIT"
] |
permissive
|
Natooz/MidiTok
|
36efda7fef567f4f2fc81053609568e0c6cbc678
|
a1543cd0e0a9a3ee1de6fb77abcdfcffc274b9f1
|
refs/heads/main
| 2023-09-01T14:11:00.854742
| 2023-08-31T13:44:40
| 2023-08-31T13:44:40
| 394,933,651
| 410
| 54
|
MIT
| 2023-09-07T08:18:00
| 2021-08-11T09:24:38
|
Python
|
UTF-8
|
Python
| false
| false
| 21,372
|
py
|
mumidi.py
|
from math import ceil
from pathlib import Path
from typing import List, Dict, Optional, Union, Any
import numpy as np
from miditoolkit import MidiFile, Instrument, Note, TempoChange
from ..midi_tokenizer import MIDITokenizer, _in_as_seq, _out_as_complete_seq
from ..classes import TokSequence, Event
from ..utils import detect_chords
from ..constants import (
TIME_DIVISION,
TEMPO,
MIDI_INSTRUMENTS,
DRUM_PITCH_RANGE,
)
class MuMIDI(MIDITokenizer):
r"""Introduced with `PopMAG (Ren et al.) <https://arxiv.org/abs/2008.07703>`_,
this tokenization made for multitrack tasks and uses embedding pooling. Time is
represented with *Bar* and *Position* tokens. The key idea of MuMIDI is to represent
all tracks in a single token sequence. At each time step, *Track* tokens preceding
note tokens indicate their track. MuMIDI also include a "built-in" and learned
positional encoding. As in the original paper, the pitches of drums are distinct
from those of all other instruments.
Each pooled token will be a list of the form (index: Token type):
* 0: Pitch / DrumPitch / Position / Bar / Program / (Chord) / (Rest)
* 1: BarPosEnc
* 2: PositionPosEnc
* (-3 / 3: Tempo)
* -2: Velocity
* -1: Duration
The output hidden states of the model will then be fed to several output layers
(one per token type). This means that the training requires to add multiple losses.
For generation, the decoding implies sample from several distributions, which can be
very delicate. Hence, we do not recommend this tokenization for generation with small models.
**Add a `drum_pitch_range` entry in the config, mapping to a tuple of values to restrict the range of drum pitches
to use.**
**Notes:**
* Tokens are first sorted by time, then track, then pitch values.
* Tracks with the same *Program* will be merged.
"""
def _tweak_config_before_creating_voc(self):
self.config.use_rests = False
self.config.use_time_signatures = False
self.config.use_sustain_pedals = False
self.config.use_pitch_bends = False
self.config.use_programs = True
self.config.one_token_stream_for_programs = True
if "drum_pitch_range" not in self.config.additional_params:
self.config.additional_params["drum_pitch_range"] = DRUM_PITCH_RANGE
if "max_bar_embedding" not in self.config.additional_params:
# this attribute might increase over tokenizations, if the tokenizer encounter longer MIDIs
self.config.additional_params["max_bar_embedding"] = 60
self.vocab_types_idx = {
"Pitch": 0,
"DrumPitch": 0,
"Position": 0,
"Bar": 0,
"Program": 0,
"BarPosEnc": 1,
"PositionPosEnc": 2,
"Velocity": -2,
"Duration": -1,
}
if self.config.use_chords:
self.vocab_types_idx["Chord"] = 0
if self.config.use_rests:
self.vocab_types_idx["Rest"] = 0
if self.config.use_tempos:
self.vocab_types_idx["Tempo"] = -3
@_out_as_complete_seq
def _midi_to_tokens(self, midi: MidiFile, *args, **kwargs) -> TokSequence:
r"""Tokenize a MIDI file.
Each pooled token will be a list of the form (index: Token type):
* 0: Pitch / DrumPitch / Position / Bar / Program / (Chord) / (Rest)
* 1: BarPosEnc
* 2: PositionPosEnc
* (-3 / 3: Tempo)
* -2: Velocity
* -1: Duration
:param midi: the MIDI object to convert
:return: sequences of tokens
"""
# Check bar embedding limit, update if needed
nb_bars = ceil(midi.max_tick / (midi.ticks_per_beat * 4))
if self.config.additional_params["max_bar_embedding"] < nb_bars:
for i in range(self.config.additional_params["max_bar_embedding"], nb_bars):
self.add_to_vocab(f"BarPosEnc_{i}", 1)
self.config.additional_params["max_bar_embedding"] = nb_bars
# Convert each track to tokens (except first pos to track time)
note_tokens = []
for track in midi.instruments:
if track.program in self.config.programs:
note_tokens += self._track_to_tokens(track)
note_tokens.sort(
key=lambda x: (x[0].time, x[0].desc)
) # Sort by time then track
ticks_per_sample = midi.ticks_per_beat / max(self.config.beat_res.values())
ticks_per_bar = midi.ticks_per_beat * 4
tokens = []
current_tick = -1
current_bar = -1
current_pos = -1
current_track = -2 # because -2 doesn't exist
current_tempo_idx = 0
current_tempo = self._current_midi_metadata["tempo_changes"][
current_tempo_idx
].tempo
for note_token in note_tokens:
# (Tempo) update tempo values current_tempo
if self.config.use_tempos:
# If the current tempo is not the last one
if current_tempo_idx + 1 < len(
self._current_midi_metadata["tempo_changes"]
):
# Will loop over incoming tempo changes
for tempo_change in self._current_midi_metadata["tempo_changes"][
current_tempo_idx + 1 :
]:
# If this tempo change happened before the current moment
if tempo_change.time <= note_token[0].time:
current_tempo = tempo_change.tempo
current_tempo_idx += (
1 # update tempo value (might not change) and index
)
elif tempo_change.time > note_token[0].time:
break # this tempo change is beyond the current time step, we break the loop
# Positions and bars pos enc
if note_token[0].time != current_tick:
pos_index = int((note_token[0].time % ticks_per_bar) / ticks_per_sample)
current_tick = note_token[0].time
current_pos = pos_index
current_track = -2 # reset
# (New bar)
if current_bar < current_tick // ticks_per_bar:
nb_new_bars = current_tick // ticks_per_bar - current_bar
for i in range(nb_new_bars):
bar_token = [
"Bar_None",
f"BarPosEnc_{current_bar + i + 1}",
"PositionPosEnc_None",
]
if self.config.use_tempos:
bar_token.append(f"Tempo_{current_tempo}")
tokens.append(bar_token)
current_bar += nb_new_bars
# Position
pos_token = [
f"Position_{current_pos}",
f"BarPosEnc_{current_bar}",
f"PositionPosEnc_{current_pos}",
]
if self.config.use_tempos:
pos_token.append(f"Tempo_{current_tempo}")
tokens.append(pos_token)
# Program (track)
if note_token[0].desc != current_track:
current_track = note_token[0].desc
track_token = [
f"Program_{current_track}",
f"BarPosEnc_{current_bar}",
f"PositionPosEnc_{current_pos}",
]
if self.config.use_tempos:
track_token.append(f"Tempo_{current_tempo}")
tokens.append(track_token)
# Adding bar and position tokens to notes for positional encoding
note_token[0] = str(note_token[0])
note_token.insert(1, f"BarPosEnc_{current_bar}")
note_token.insert(2, f"PositionPosEnc_{current_pos}")
if self.config.use_tempos:
note_token.insert(3, f"Tempo_{current_tempo}")
tokens.append(note_token)
return TokSequence(tokens=tokens)
def _track_to_tokens(self, track: Instrument) -> List[List[Union[Event, str]]]:
r"""Converts a track (miditoolkit.Instrument object) into a sequence of tokens (:class:`miditok.TokSequence`).
For each note, it creates a time step as a list of tokens where (list index: token type):
* 0: Pitch (as an Event object for sorting purpose afterwards)
* 1: Velocity
* 2: Duration
:param track: track object to convert.
:return: sequence of corresponding tokens.
"""
# Make sure the notes are sorted first by their onset (start) times, second by pitch
# notes.sort(key=lambda x: (x.start, x.pitch)) # done in midi_to_tokens
dur_bins = self._durations_ticks[self._current_midi_metadata["time_division"]]
tokens = []
for note in track.notes:
# Note
duration = note.end - note.start
dur_idx = np.argmin(np.abs(dur_bins - duration))
if not track.is_drum:
tokens.append(
[
Event(
type="Pitch",
value=note.pitch,
time=note.start,
desc=track.program,
),
f"Velocity_{note.velocity}",
f'Duration_{".".join(map(str, self.durations[dur_idx]))}',
]
)
else:
tokens.append(
[
Event(
type="DrumPitch",
value=note.pitch,
time=note.start,
desc=-1,
),
f"Velocity_{note.velocity}",
f'Duration_{".".join(map(str, self.durations[dur_idx]))}',
]
)
# Adds chord tokens if specified
if self.config.use_chords and not track.is_drum:
chords = detect_chords(
track.notes,
self._current_midi_metadata["time_division"],
chord_maps=self.config.chord_maps,
specify_root_note=self.config.chord_tokens_with_root_note,
beat_res=self._first_beat_res,
unknown_chords_nb_notes_range=self.config.chord_unknown,
)
unsqueezed = []
for c in range(len(chords)):
chords[c].desc = track.program
unsqueezed.append([chords[c]])
tokens = (
unsqueezed + tokens
) # chords at the beginning to keep the good order during sorting
return tokens
@_in_as_seq()
def tokens_to_midi(
self,
tokens: Union[TokSequence, List, np.ndarray, Any],
_=None,
output_path: Optional[str] = None,
time_division: Optional[int] = TIME_DIVISION,
) -> MidiFile:
r"""Override the parent class method
Convert multiple sequences of tokens into a multitrack MIDI and save it.
The tokens will be converted to event objects and then to a miditoolkit.MidiFile object.
A time step is a list of tokens where (list index: token type):
* 0: Pitch / DrumPitch / Position / Bar / Program / (Chord) / (Rest)
* 1: BarPosEnc
* 2: PositionPosEnc
* (-3 / 3: Tempo)
* -2: Velocity
* -1: Duration
:param tokens: tokens to convert. Can be either a Tensor (PyTorch and Tensorflow are supported),
a numpy array, a Python list or a TokSequence.
:param tokens: list of lists of tokens to convert, each list inside the
first list corresponds to a track
:param _: unused, to match parent method signature
:param output_path: path to save the file (with its name, e.g. music.mid),
leave None to not save the file
:param time_division: MIDI time division / resolution, in ticks/beat (of the MIDI to create)
:return: the midi object (miditoolkit.MidiFile)
"""
assert (
time_division % max(self.config.beat_res.values()) == 0
), f"Invalid time division, please give one divisible by {max(self.config.beat_res.values())}"
midi = MidiFile(ticks_per_beat=time_division)
# Tempos
if self.config.use_tempos:
first_tempo = float(tokens.tokens[0][3].split("_")[1])
else:
first_tempo = TEMPO
midi.tempo_changes.append(TempoChange(first_tempo, 0))
ticks_per_sample = time_division // max(self.config.beat_res.values())
tracks = {}
current_tick = 0
current_bar = -1
current_track = 0 # default set to piano
for time_step in tokens.tokens:
tok_type, tok_val = time_step[0].split("_")
if tok_type == "Bar":
current_bar += 1
current_tick = current_bar * time_division * 4
elif tok_type == "Position":
if current_bar == -1:
current_bar = (
0 # as this Position token occurs before any Bar token
)
current_tick = (
current_bar * time_division * 4 + int(tok_val) * ticks_per_sample
)
elif tok_type == "Program":
current_track = tok_val
try:
_ = tracks[current_track]
except KeyError:
tracks[current_track] = []
elif tok_type == "Pitch" or tok_type == "DrumPitch":
vel, duration = (time_step[i].split("_")[1] for i in (-2, -1))
if any(val == "None" for val in (vel, duration)):
continue
pitch = int(tok_val)
vel = int(vel)
duration = self._token_duration_to_ticks(duration, time_division)
tracks[current_track].append(
Note(vel, pitch, current_tick, current_tick + duration)
)
# Decode tempo if required
if self.config.use_tempos:
tempo_val = float(time_step[3].split("_")[1])
if tempo_val != midi.tempo_changes[-1].tempo:
midi.tempo_changes.append(TempoChange(tempo_val, current_tick))
# Appends created notes to MIDI object
for program, notes in tracks.items():
if int(program) == -1:
midi.instruments.append(Instrument(0, True, "Drums"))
else:
midi.instruments.append(
Instrument(
int(program), False, MIDI_INSTRUMENTS[int(program)]["name"]
)
)
midi.instruments[-1].notes = notes
# Write MIDI file
if output_path:
Path(output_path).mkdir(parents=True, exist_ok=True)
midi.dump(output_path)
return midi
def _create_base_vocabulary(self) -> List[List[str]]:
r"""Creates the vocabulary, as a list of string tokens.
Each token as to be given as the form of "Type_Value", separated with an underscore.
Example: Pitch_58
The :class:`miditok.MIDITokenizer` main class will then create the "real" vocabulary as
a dictionary.
Special tokens have to be given when creating the tokenizer, and
will be added to the vocabulary by :class:`miditok.MIDITokenizer`.
For MUMIDI, token index 0 is used as a padding index for training.
* 0: Pitch / DrumPitch / Position / Bar / Program / (Chord) / (Rest)
* 1: BarPosEnc
* 2: PositionPosEnc
* (-3 / 3: Tempo)
* -2: Velocity
* -1: Duration
:return: the vocabulary as a list of string.
"""
vocab = [[] for _ in range(3)]
# PITCH & DRUM PITCHES & BAR & POSITIONS & PROGRAM
vocab[0] += [f"Pitch_{i}" for i in range(*self.config.pitch_range)]
vocab[0] += [
f"DrumPitch_{i}"
for i in range(*self.config.additional_params["drum_pitch_range"])
]
vocab[0] += ["Bar_None"] # new bar token
max_nb_beats = max(
map(lambda ts: ceil(4 * ts[0] / ts[1]), self.time_signatures)
)
nb_positions = max(self.config.beat_res.values()) * max_nb_beats
vocab[0] += [f"Position_{i}" for i in range(nb_positions)]
vocab[0] += [f"Program_{program}" for program in self.config.programs]
# BAR POS ENC
vocab[1] += [
f"BarPosEnc_{i}"
for i in range(self.config.additional_params["max_bar_embedding"])
]
# POSITION POS ENC
vocab[2] += [
"PositionPosEnc_None"
] # special embedding used with 'Bar_None' tokens
vocab[2] += [f"PositionPosEnc_{i}" for i in range(nb_positions)] # pos enc
# CHORD
if self.config.use_chords:
vocab[0] += self._create_chords_tokens()
# REST
if self.config.use_rests:
vocab[0] += [f'Rest_{".".join(map(str, rest))}' for rest in self.rests]
# TEMPO
if self.config.use_tempos:
vocab.append([f"Tempo_{i}" for i in self.tempos])
# Velocity and Duration in last position
# VELOCITY
vocab.append([f"Velocity_{i}" for i in self.velocities])
# DURATION
vocab.append(
[f'Duration_{".".join(map(str, duration))}' for duration in self.durations]
)
return vocab
def _create_token_types_graph(self) -> Dict[str, List[str]]:
r"""Returns a graph (as a dictionary) of the possible token
types successions.
Here the combination of Pitch, Velocity and Duration tokens is represented by
"Pitch" in the graph.
:return: the token types transitions dictionary
"""
dic = dict()
dic["Bar"] = ["Bar", "Position"]
dic["Position"] = ["Program"]
dic["Program"] = ["Pitch", "DrumPitch"]
dic["Pitch"] = ["Pitch", "Program", "Bar", "Position"]
dic["DrumPitch"] = ["DrumPitch", "Program", "Bar", "Position"]
if self.config.use_chords:
dic["Program"] += ["Chord"]
dic["Chord"] = ["Pitch"]
return dic
@_in_as_seq()
def tokens_errors(self, tokens: Union[TokSequence, List, np.ndarray, Any]) -> float:
r"""Checks if a sequence of tokens is made of good token types
successions and returns the error ratio (lower is better).
The Pitch and Position values are also analyzed:
- a bar token value cannot be < to the current bar (it would go back in time)
- same for positions
- a pitch token should not be present if the same pitch is already played at the current position
:param tokens: sequence of tokens to check
:return: the error ratio (lower is better)
"""
tokens = tokens.tokens
err = 0
previous_type = tokens[0][0].split("_")[0]
current_pitches = []
current_bar = int(tokens[0][1].split("_")[1])
current_pos = tokens[0][2].split("_")[1]
current_pos = int(current_pos) if current_pos != "None" else -1
for token in tokens[1:]:
# debug = {j: self.tokens_to_events([tokens[1:][j]])[0] for j in range(i - 4, min(i + 4, len(tokens[1:])))}
bar_value = int(token[1].split("_")[1])
pos_value = token[2].split("_")[1]
pos_value = int(pos_value) if pos_value != "None" else -1
token_type, token_value = token[0].split("_")
if any(tok.split("_")[0] in ["PAD", "MASK"] for i, tok in enumerate(token)):
err += 1
continue
# Good token type
if token_type in self.tokens_types_graph[previous_type]:
if token_type == "Bar": # reset
current_bar += 1
current_pos = -1
current_pitches = []
elif token_type == "Pitch":
if int(token_value) in current_pitches:
err += 1 # pitch already played at current position
else:
current_pitches.append(int(token_value))
elif token_type == "Position":
if int(token_value) <= current_pos or int(token_value) != pos_value:
err += 1 # token position value <= to the current position
else:
current_pos = int(token_value)
current_pitches = []
elif token_type == "Program":
current_pitches = []
if pos_value < current_pos or bar_value < current_bar:
err += 1
# Bad token type
else:
err += 1
previous_type = token_type
return err / len(tokens)
|
eb335ff467a59ef95f1a901386f25372672c61e2
|
a2b20597759990445081057d35d113434cfcf970
|
/client/configuration/shared_memory.py
|
f78d2916da07268082a8bab6d85ec9cca49a1097
|
[
"MIT"
] |
permissive
|
facebook/pyre-check
|
34059599c02b65605c574f13555229f3b931fd4e
|
fe8ccedc572cc1faa1fd01e9138f65e982875002
|
refs/heads/main
| 2023-09-03T19:10:11.587028
| 2023-09-02T07:40:35
| 2023-09-02T07:40:35
| 110,274,488
| 6,703
| 575
|
MIT
| 2023-09-13T17:02:32
| 2017-11-10T17:31:36
|
OCaml
|
UTF-8
|
Python
| false
| false
| 1,339
|
py
|
shared_memory.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
This module specifies how one can tune the backend's shared memory settings
in a .pyre_configuration file. This is needed because for large projects we
might want a larger heap and fatter hash table than the defaults.
"""
import dataclasses
from typing import Dict, Optional
from .. import dataclasses_merge
@dataclasses_merge.dataclass_merge
@dataclasses.dataclass(frozen=True)
class SharedMemory:
heap_size: Optional[int] = None
dependency_table_power: Optional[int] = None
hash_table_power: Optional[int] = None
def to_json(self) -> Dict[str, int]:
heap_size = self.heap_size
dependency_table_power = self.dependency_table_power
hash_table_power = self.hash_table_power
return {
**({"heap_size": heap_size} if heap_size is not None else {}),
**(
{"dependency_table_power": dependency_table_power}
if dependency_table_power is not None
else {}
),
**(
{"hash_table_power": hash_table_power}
if hash_table_power is not None
else {}
),
}
|
f1ff381c029e9aea0a42871cee581bdf3a931f83
|
c7f92e01853d106dfef77248c00e585ba1e6e8da
|
/plugins/tts/pico-tts/pico.py
|
fdb031ca37ec9030ec4f2b75d3ab3d33fab1ce38
|
[
"MIT"
] |
permissive
|
NaomiProject/Naomi
|
ff6eb740e9ec0bb29aee136b9a654fccceb18eeb
|
e65086865ffac0e905e09adff51be520938eaa38
|
refs/heads/naomi-dev
| 2023-07-07T07:36:01.524236
| 2023-07-03T00:45:26
| 2023-07-03T00:45:26
| 65,221,460
| 226
| 73
|
MIT
| 2023-07-03T00:45:27
| 2016-08-08T16:41:13
|
Python
|
UTF-8
|
Python
| false
| false
| 2,330
|
py
|
pico.py
|
import logging
import os
import pipes
import re
import subprocess
import tempfile
import unittest
from naomi import diagnose
from naomi import plugin
from naomi import profile
EXECUTABLE = 'pico2wave'
if not diagnose.check_executable(EXECUTABLE):
raise unittest.SkipTest("Skipping Pico, executable '%s' not found!" % EXECUTABLE)
raise ImportError("Executable '%s' not found!" % EXECUTABLE)
class PicoTTSPlugin(plugin.TTSPlugin):
"""
Uses the svox-pico-tts speech synthesizer
Requires pico2wave to be available
"""
def __init__(self, *args, **kwargs):
plugin.TTSPlugin.__init__(self, *args, **kwargs)
language = profile.get(['language'], 'en-US')
available_languages = self.get_languages()
if language not in available_languages:
raise ValueError("Language '%s' not supported" % language)
self._language = language
def get_languages(self):
cmd = [EXECUTABLE, '-l', 'NULL',
'-w', os.devnull,
'NULL']
with tempfile.SpooledTemporaryFile() as f:
subprocess.call(cmd, stderr=f)
f.seek(0)
output = f.read().decode('utf-8')
pattern = re.compile(r'Unknown language: NULL\nValid languages:\n' +
r'((?:[a-z]{2}-[A-Z]{2}\n)+)')
matchobj = pattern.match(output)
if not matchobj:
raise RuntimeError("%s: valid languages not detected" % EXECUTABLE)
langs = matchobj.group(1).split()
return langs
def say(self, phrase):
logger = logging.getLogger(__name__)
with tempfile.NamedTemporaryFile(suffix='.wav', delete=False) as f:
fname = f.name
cmd = [EXECUTABLE, '-w', fname,
'-l', self._language,
phrase]
logger.debug('Executing %s', ' '.join([pipes.quote(arg)
for arg in cmd]))
with tempfile.TemporaryFile() as f:
subprocess.call(cmd, stdout=f, stderr=f)
f.seek(0)
output = f.read()
if output:
logger.debug("Output was: '%s'", output)
with open(fname, 'rb') as f:
data = f.read()
os.remove(fname)
return data
|
1a2e1139f9e83668e385a802845d24311dd6f080
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/homeassistant/components/msteams/notify.py
|
7a729897e76a5c906c9892edb2205bf7de4880cc
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 2,229
|
py
|
notify.py
|
"""Microsoft Teams platform for notify component."""
from __future__ import annotations
import logging
import pymsteams
import voluptuous as vol
from homeassistant.components.notify import (
ATTR_DATA,
ATTR_TITLE,
ATTR_TITLE_DEFAULT,
PLATFORM_SCHEMA,
BaseNotificationService,
)
from homeassistant.const import CONF_URL
from homeassistant.core import HomeAssistant
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
_LOGGER = logging.getLogger(__name__)
ATTR_FILE_URL = "image_url"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({vol.Required(CONF_URL): cv.url})
def get_service(
hass: HomeAssistant,
config: ConfigType,
discovery_info: DiscoveryInfoType | None = None,
) -> MSTeamsNotificationService | None:
"""Get the Microsoft Teams notification service."""
webhook_url = config.get(CONF_URL)
try:
return MSTeamsNotificationService(webhook_url)
except RuntimeError as err:
_LOGGER.exception("Error in creating a new Microsoft Teams message: %s", err)
return None
class MSTeamsNotificationService(BaseNotificationService):
"""Implement the notification service for Microsoft Teams."""
def __init__(self, webhook_url):
"""Initialize the service."""
self._webhook_url = webhook_url
def send_message(self, message=None, **kwargs):
"""Send a message to the webhook."""
teams_message = pymsteams.connectorcard(self._webhook_url)
title = kwargs.get(ATTR_TITLE, ATTR_TITLE_DEFAULT)
data = kwargs.get(ATTR_DATA)
teams_message.title(title)
teams_message.text(message)
if data is not None and (file_url := data.get(ATTR_FILE_URL)) is not None:
if not file_url.startswith("http"):
_LOGGER.error("URL should start with http or https")
return
message_section = pymsteams.cardsection()
message_section.addImage(file_url)
teams_message.addSection(message_section)
try:
teams_message.send()
except RuntimeError as err:
_LOGGER.error("Could not send notification. Error: %s", err)
|
5731c7a393aab6b0697433a6b3d7d1ebe5850841
|
5a361fd402b330189792b592465c1fc48c6a5fb4
|
/application/flicket/views/create.py
|
faf3cdc687624fddc7e2c627d1b3551f25bb7742
|
[
"MIT"
] |
permissive
|
evereux/flicket
|
a807e4a3cabce45b42cf02acceee1deb023b0487
|
ef1330dca9bcc75cedd9fb68a0ecb4de67f2e6cc
|
refs/heads/master
| 2023-05-10T17:14:05.116769
| 2023-05-01T11:20:23
| 2023-05-01T11:20:23
| 74,390,774
| 120
| 64
|
MIT
| 2023-05-02T00:13:00
| 2016-11-21T17:58:26
|
Python
|
UTF-8
|
Python
| false
| false
| 1,939
|
py
|
create.py
|
#! usr/bin/python3
# -*- coding: utf-8 -*-
#
# Flicket - copyright Paul Bourne: evereux@gmail.com
from flask import (flash,
redirect,
url_for,
request,
session,
render_template,
g)
from flask_babel import gettext
from flask_login import login_required
from . import flicket_bp
from application import app
from application.flicket.forms.flicket_forms import CreateTicketForm
from application.flicket.models.flicket_models_ext import FlicketTicketExt
# create ticket
@flicket_bp.route(app.config['FLICKET'] + 'ticket_create/', methods=['GET', 'POST'])
@login_required
def ticket_create():
# default category based on last submit (get from session)
# using session, as information about last created ticket can be sensitive
# in future it can be stored in extended user model instead
last_category = session.get('ticket_create_last_category')
form = CreateTicketForm(category=last_category)
if form.validate_on_submit():
new_ticket = FlicketTicketExt.create_ticket(title=form.title.data,
user=g.user,
content=form.content.data,
category=form.category.data,
priority=form.priority.data,
hours=form.hours.data,
files=request.files.getlist("file"))
flash(gettext('New Ticket created.'), category='success')
session['ticket_create_last_category'] = form.category.data
return redirect(url_for('flicket_bp.ticket_view', ticket_id=new_ticket.id))
title = gettext('Create Ticket')
return render_template('flicket_create.html', title=title, form=form)
|
66af7d882941de36d1e18475eba1e84a24c54d0b
|
65078b8087c2040cf0188e2550ea298d20518f62
|
/tests/unit/_internal/utils/test_analytics.py
|
01fc271845769c8fc53e48c2c73b7976250848c4
|
[
"Apache-2.0"
] |
permissive
|
bentoml/BentoML
|
20ab6f8351b1c5cd116d6d60a28098246a1581b3
|
4a14f073d8a3e700aff29483b17ea053058c0c63
|
refs/heads/main
| 2023-09-05T16:03:08.909692
| 2023-09-04T18:54:33
| 2023-09-04T18:54:33
| 178,976,529
| 5,712
| 732
|
Apache-2.0
| 2023-09-14T20:07:54
| 2019-04-02T01:39:27
|
Python
|
UTF-8
|
Python
| false
| false
| 12,132
|
py
|
test_analytics.py
|
# pylint: disable=unused-argument
from __future__ import annotations
import logging
import typing as t
from typing import TYPE_CHECKING
from unittest.mock import Mock
from unittest.mock import patch
import pytest
from prometheus_client.parser import text_string_to_metric_families
# type: ignore (no prometheus types)
from schema import And
from schema import Or
from schema import Schema
import bentoml
from bentoml._internal.utils import analytics
if TYPE_CHECKING:
from unittest.mock import MagicMock
from _pytest.logging import LogCaptureFixture
from _pytest.monkeypatch import MonkeyPatch
from prometheus_client.metrics_core import Metric
from bentoml import Service
SCHEMA = Schema(
{
"common_properties": {
"timestamp": str,
"bentoml_version": str,
"client": {"creation_timestamp": str, "id": str},
"memory_usage_percent": Or(int, float),
"platform": str,
"python_version": str,
"total_memory_in_mb": int,
"yatai_user_email": Or(str, None),
"yatai_version": Or(str, None),
"yatai_org_uid": Or(str, None),
"yatai_cluster_uid": Or(str, None),
"yatai_deployment_uid": Or(str, None),
"is_interactive": bool,
"in_notebook": bool,
},
"event_properties": {
"module": str,
"model_size_in_kb": Or(float, int),
},
"session_id": str,
"event_type": And(str, str.islower),
}
)
@pytest.fixture(scope="function", name="event_properties")
def fixture_event_properties() -> analytics.schemas.ModelSaveEvent:
return analytics.schemas.ModelSaveEvent(module="test", model_size_in_kb=123123123)
def test_get_payload(event_properties: analytics.schemas.ModelSaveEvent):
payload = analytics.usage_stats.get_payload(
event_properties=event_properties, session_id="random_session_id"
)
assert SCHEMA.validate(payload)
@patch("bentoml._internal.utils.analytics.usage_stats.requests.post")
@patch("bentoml._internal.utils.analytics.usage_stats.do_not_track")
@patch("bentoml._internal.utils.analytics.usage_stats._usage_event_debugging")
def test_send_usage(
mock_usage_event_debugging: MagicMock,
mock_do_not_track: MagicMock,
mock_post: MagicMock,
event_properties: analytics.schemas.ModelSaveEvent,
caplog: LogCaptureFixture,
):
mock_usage_event_debugging.return_value = False
mock_do_not_track.return_value = False
analytics.track(event_properties)
assert mock_do_not_track.called
assert mock_post.called
mock_usage_event_debugging.return_value = True
with caplog.at_level(logging.INFO):
analytics.track(event_properties)
assert "Tracking Payload" in caplog.text
@patch("bentoml._internal.utils.analytics.usage_stats.requests.post")
@patch("bentoml._internal.utils.analytics.usage_stats.do_not_track")
def test_do_not_track(
mock_do_not_track: MagicMock,
mock_post: MagicMock,
event_properties: analytics.schemas.ModelSaveEvent,
):
mock_do_not_track.return_value = True
analytics.track(event_properties)
assert mock_do_not_track.called
assert not mock_post.called
@patch("bentoml._internal.utils.analytics.usage_stats.logger")
@patch("bentoml._internal.utils.analytics.usage_stats.requests.post")
@patch("bentoml._internal.utils.analytics.usage_stats.do_not_track")
def test_send_usage_failure(
mock_do_not_track: MagicMock,
mock_post: MagicMock,
mock_logger: MagicMock,
event_properties: analytics.schemas.ModelSaveEvent,
):
mock_do_not_track.return_value = False
mock_post.side_effect = AssertionError("something went wrong")
# nothing should happen
analytics.track(event_properties)
assert mock_do_not_track.called
assert mock_post.called
mock_logger.debug.assert_called_with("Tracking Error: %s", mock_post.side_effect)
@patch("bentoml._internal.utils.analytics.usage_stats.requests.post")
@patch("bentoml._internal.utils.analytics.usage_stats.do_not_track")
@patch("bentoml._internal.utils.analytics.usage_stats._usage_event_debugging")
@pytest.mark.parametrize("production", [False, True])
@pytest.mark.usefixtures("propagate_logs")
def test_track_serve_init(
mock_usage_event_debugging: MagicMock,
mock_do_not_track: MagicMock,
mock_post: MagicMock,
simple_service: Service,
production: bool,
caplog: LogCaptureFixture,
):
mock_do_not_track.return_value = False
mock_usage_event_debugging.return_value = False
mock_response = Mock()
mock_post.return_value = mock_response
mock_response.text = "sent"
analytics.usage_stats._track_serve_init( # type: ignore (private warning)
simple_service,
production=production,
serve_info=analytics.usage_stats.get_serve_info(),
serve_kind="http",
from_server_api=False,
)
assert mock_do_not_track.called
assert mock_post.called
mock_usage_event_debugging.return_value = True
with caplog.at_level(logging.INFO):
analytics.usage_stats._track_serve_init( # type: ignore (private warning)
simple_service,
production=production,
serve_info=analytics.usage_stats.get_serve_info(),
serve_kind="http",
from_server_api=False,
)
assert "model_types" in caplog.text
@patch("bentoml._internal.utils.analytics.usage_stats.do_not_track")
@patch("bentoml._internal.utils.analytics.usage_stats._usage_event_debugging")
def test_track_serve_init_no_bento(
mock_usage_event_debugging: MagicMock,
mock_do_not_track: MagicMock,
caplog: LogCaptureFixture,
):
logger = logging.getLogger("bentoml")
logger.propagate = False
mock_do_not_track.return_value = False
mock_usage_event_debugging.return_value = True
caplog.clear()
with caplog.at_level(logging.INFO):
analytics.usage_stats._track_serve_init( # type: ignore (private warning)
bentoml.Service("test"),
production=False,
serve_info=analytics.usage_stats.get_serve_info(),
serve_kind="http",
from_server_api=False,
)
assert "model_types" not in caplog.text
@patch("bentoml._internal.server.metrics.prometheus.PrometheusClient")
@pytest.mark.parametrize(
"mock_output,expected",
[
(b"", []),
(
b"""# HELP BENTOML_noop_request_total Multiprocess metric""",
[],
),
],
)
@pytest.mark.parametrize("serve_kind", ["grpc", "http"])
def test_filter_metrics_report(
mock_prometheus_client: MagicMock,
mock_output: bytes,
expected: tuple[list[t.Any], bool | None],
serve_kind: str,
):
mock_prometheus_client.multiproc.return_value = False
mock_prometheus_client.generate_latest.return_value = mock_output
assert (
analytics.usage_stats.get_metrics_report(
mock_prometheus_client, serve_kind=serve_kind
)
== expected
)
@patch("bentoml._internal.utils.analytics.usage_stats.do_not_track")
def test_track_serve_do_not_track(
mock_do_not_track: MagicMock, simple_service: Service
):
mock_do_not_track.return_value = True
with analytics.track_serve(
simple_service,
production=False,
serve_info=analytics.usage_stats.get_serve_info(),
) as output:
pass
assert not output
assert mock_do_not_track.called
@patch("bentoml._internal.utils.analytics.usage_stats.do_not_track")
@patch("bentoml._internal.server.metrics.prometheus.PrometheusClient")
def test_legacy_get_metrics_report(
mock_prometheus_client: MagicMock,
mock_do_not_track: MagicMock,
simple_service: Service,
):
mock_do_not_track.return_value = True
mock_prometheus_client.multiproc.return_value = False
mock_prometheus_client.text_string_to_metric_families.return_value = text_string_to_metric_families(
b"""\
# HELP BENTOML_simple_service_request_in_progress Multiprocess metric
# TYPE BENTOML_simple_service_request_in_progress gauge
BENTOML_simple_service_request_in_progress{endpoint="/predict",service_version="not available"} 0.0
# HELP BENTOML_simple_service_request_total Multiprocess metric
# TYPE BENTOML_simple_service_request_total counter
BENTOML_simple_service_request_total{endpoint="/predict",http_response_code="200",service_version="not available"} 8.0
""".decode(
"utf-8"
)
)
output = analytics.usage_stats.get_metrics_report(
mock_prometheus_client, serve_kind="http"
)
assert {
"endpoint": "/predict",
"http_response_code": "200",
"service_version": "not available",
"value": 8.0,
} in output
endpoints = [filtered["endpoint"] for filtered in output]
assert not any(x in endpoints for x in analytics.usage_stats.EXCLUDE_PATHS)
@patch("bentoml._internal.server.metrics.prometheus.PrometheusClient")
@pytest.mark.parametrize(
"serve_kind,expected",
[
(
"grpc",
{
"api_name": "pred_json",
"http_response_code": "200",
"service_name": "simple_service",
"service_version": "not available",
"value": 15.0,
},
),
("http", None),
],
)
@pytest.mark.parametrize(
"generated_metrics",
[
text_string_to_metric_families(
b"""\
# HELP bentoml_api_server_request_total Multiprocess metric
# TYPE bentoml_api_server_request_total counter
bentoml_api_server_request_total{api_name="pred_json",http_response_code="200",service_name="simple_service",service_version="not available"} 15.0
# HELP bentoml_api_server_request_in_progress Multiprocess metric
# TYPE bentoml_api_server_request_in_progress gauge
bentoml_api_server_request_in_progress{api_name="pred_json",service_name="simple_service",service_version="not available"} 0.0
""".decode(
"utf-8"
)
)
],
)
def test_get_metrics_report(
mock_prometheus_client: MagicMock,
simple_service: Service,
serve_kind: str,
expected: dict[str, str | float] | None,
generated_metrics: t.Generator[Metric, None, None],
):
mock_prometheus_client.multiproc.return_value = False
mock_prometheus_client.text_string_to_metric_families.return_value = (
generated_metrics
)
output = analytics.usage_stats.get_metrics_report(
mock_prometheus_client, serve_kind=serve_kind
)
if expected:
assert expected in output
@patch("bentoml._internal.utils.analytics.usage_stats.do_not_track")
@patch("bentoml._internal.utils.analytics.usage_stats.requests.post")
@patch("bentoml._internal.utils.analytics.usage_stats._track_serve_init")
@patch("bentoml._internal.utils.analytics.usage_stats._usage_event_debugging")
@patch("bentoml._internal.server.metrics.prometheus.PrometheusClient")
@pytest.mark.usefixtures("propagate_logs")
def test_track_serve(
mock_prometheus_client: MagicMock,
mock_usage_event_debugging: MagicMock,
mock_track_serve_init: MagicMock,
mock_post: MagicMock,
mock_do_not_track: MagicMock,
simple_service: Service,
monkeypatch: MonkeyPatch,
caplog: LogCaptureFixture,
):
mock_prometheus_client.multiproc.return_value = False
mock_do_not_track.return_value = False
mock_usage_event_debugging.return_value = True
monkeypatch.setenv("__BENTOML_DEBUG_USAGE", "True")
analytics.usage_stats.SERVE_USAGE_TRACKING_INTERVAL_SECONDS = 1
with caplog.at_level(logging.INFO):
with analytics.track_serve(
simple_service,
production=False,
metrics_client=mock_prometheus_client,
serve_info=analytics.usage_stats.get_serve_info(),
):
import time
time.sleep(2)
assert not mock_post.called
assert mock_do_not_track.called
assert mock_track_serve_init.called
|
afb1cdf1739535fe65c0a49658a9c0fae7c671a1
|
749af8e81d5ccd2d8714a34434a9c77772df551b
|
/statsmodels/nonparametric/tests/test_asymmetric.py
|
25a545da2ed19c193d8fd3c1e2f7c735788776ea
|
[
"BSD-3-Clause"
] |
permissive
|
statsmodels/statsmodels
|
98ca67192c08bcc611ed3a75edaded2c7181ab98
|
01b19d7d111b29c183f620ff0a949ef6391ff8ee
|
refs/heads/main
| 2023-09-05T13:05:49.497076
| 2023-09-01T10:54:50
| 2023-09-01T10:54:50
| 1,885,237
| 8,666
| 3,023
|
BSD-3-Clause
| 2023-09-13T17:51:48
| 2011-06-12T17:04:50
|
Python
|
UTF-8
|
Python
| false
| false
| 4,682
|
py
|
test_asymmetric.py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 8 16:18:21 2021
Author: Josef Perktold
License: BSD-3
"""
import numpy as np
from numpy.testing import assert_allclose, assert_array_less
from scipy import stats
import pytest
import statsmodels.nonparametric.kernels_asymmetric as kern
kernels_rplus = [("gamma", 0.1),
("gamma2", 0.1),
("invgamma", 0.02),
("invgauss", 0.01),
("recipinvgauss", 0.1),
("bs", 0.1),
("lognorm", 0.01),
("weibull", 0.1),
]
kernels_unit = [("beta", 0.005),
("beta2", 0.005),
]
class CheckKernels:
def test_kernels(self, case):
name, bw = case
rvs = self.rvs
x_plot = self.x_plot
kde = []
kce = []
for xi in x_plot:
kde.append(kern.pdf_kernel_asym(xi, rvs, bw, name))
kce.append(kern.cdf_kernel_asym(xi, rvs, bw, name))
kde = np.asarray(kde)
kce = np.asarray(kce)
# average mean squared error
amse = ((kde - self.pdf_dgp)**2).mean()
assert_array_less(amse, self.amse_pdf)
amse = ((kce - self.cdf_dgp)**2).mean()
assert_array_less(amse, self.amse_cdf)
def test_kernels_vectorized(self, case):
name, bw = case
rvs = self.rvs
x_plot = self.x_plot
kde = []
kce = []
for xi in x_plot:
kde.append(kern.pdf_kernel_asym(xi, rvs, bw, name))
kce.append(kern.cdf_kernel_asym(xi, rvs, bw, name))
kde = np.asarray(kde)
kce = np.asarray(kce)
kde1 = kern.pdf_kernel_asym(x_plot, rvs, bw, name)
kce1 = kern.cdf_kernel_asym(x_plot, rvs, bw, name)
assert_allclose(kde1, kde, rtol=1e-12)
assert_allclose(kce1, kce, rtol=1e-12)
def test_kernels_weights(self, case):
name, bw = case
rvs = self.rvs
x = self.x_plot
kde2 = kern.pdf_kernel_asym(x, rvs, bw, name)
kce2 = kern.cdf_kernel_asym(x, rvs, bw, name)
n = len(rvs)
w = np.ones(n) / n
kde1 = kern.pdf_kernel_asym(x, rvs, bw, name, weights=w)
kce1 = kern.cdf_kernel_asym(x, rvs, bw, name, weights=w)
assert_allclose(kde1, kde2, rtol=1e-12)
assert_allclose(kce1, kce2, rtol=1e-12)
# weights that do not add to 1 are valid, but do not produce pdf, cdf
n = len(rvs)
w = np.ones(n) / n * 2
kde1 = kern.pdf_kernel_asym(x, rvs, bw, name, weights=w)
kce1 = kern.cdf_kernel_asym(x, rvs, bw, name, weights=w)
assert_allclose(kde1, kde2 * 2, rtol=1e-12)
assert_allclose(kce1, kce2 * 2, rtol=1e-12)
class TestKernelsRplus(CheckKernels):
@classmethod
def setup_class(cls):
b = 2
scale = 1.5
np.random.seed(1)
nobs = 1000
distr0 = stats.gamma(b, scale=scale)
rvs = distr0.rvs(size=nobs)
x_plot = np.linspace(0.5, 16, 51) + 1e-13
cls.rvs = rvs
cls.x_plot = x_plot
cls.pdf_dgp = distr0.pdf(x_plot)
cls.cdf_dgp = distr0.cdf(x_plot)
cls.amse_pdf = 1e-4 # tol for average mean squared error
cls.amse_cdf = 5e-4
@pytest.mark.parametrize('case', kernels_rplus)
def test_kernels(self, case):
super(TestKernelsRplus, self).test_kernels(case)
@pytest.mark.parametrize('case', kernels_rplus)
def test_kernels_vectorized(self, case):
super(TestKernelsRplus, self).test_kernels_vectorized(case)
@pytest.mark.parametrize('case', kernels_rplus)
def test_kernels_weights(self, case):
super(TestKernelsRplus, self).test_kernels_weights(case)
class TestKernelsUnit(CheckKernels):
@classmethod
def setup_class(cls):
np.random.seed(987456)
nobs = 1000
distr0 = stats.beta(2, 3)
rvs = distr0.rvs(size=nobs)
# Runtime warning if x_plot includes 0
x_plot = np.linspace(1e-10, 1, 51)
cls.rvs = rvs
cls.x_plot = x_plot
cls.pdf_dgp = distr0.pdf(x_plot)
cls.cdf_dgp = distr0.cdf(x_plot)
cls.amse_pdf = 0.01
cls.amse_cdf = 5e-3
@pytest.mark.parametrize('case', kernels_unit)
def test_kernels(self, case):
super(TestKernelsUnit, self).test_kernels(case)
@pytest.mark.parametrize('case', kernels_unit)
def test_kernels_vectorized(self, case):
super(TestKernelsUnit, self).test_kernels_vectorized(case)
@pytest.mark.parametrize('case', kernels_unit)
def test_kernels_weights(self, case):
super(TestKernelsUnit, self).test_kernels_weights(case)
|
b09e50d2ea7ee20b1538d8935090463f0e3e7b5c
|
ebd9c249d446d809abc9a0f3e4593f34922a1b93
|
/topic/hash/python/__init__.py
|
3a01267e7b9b28a616293bde2ab9b5e27b4f8cef
|
[] |
no_license
|
jaychsu/algorithm
|
ac7a9dc7366f58c635a68bc46bf1640d2f5ff16d
|
91892fd64281d96b8a9d5c0d57b938c314ae71be
|
refs/heads/master
| 2023-05-11T00:40:39.237813
| 2022-09-14T07:43:12
| 2022-09-14T07:43:12
| 106,277,156
| 143
| 39
| null | 2022-09-14T07:43:13
| 2017-10-09T11:51:48
|
Python
|
UTF-8
|
Python
| false
| false
| 84
|
py
|
__init__.py
|
from hash.python.hashtable import HashTable
from hash.python.geohash import GeoHash
|
87592727841e5c40333a327cf10d93745b21a4dd
|
ae1c7877f0d624ab0c9bae65ce8b0de0f9750683
|
/25-DouYin/douyin.py
|
4b7af005fec90c16f06fb91bec5ce96598930bf7
|
[] |
no_license
|
Northxw/Python3_WebSpider
|
a38b723be440707cab40e50e7a3d7486254433b4
|
87cbae60f7a5b033851b0056dff741a3d5980d06
|
refs/heads/master
| 2022-05-01T04:02:13.123747
| 2022-04-08T03:43:00
| 2022-04-08T03:43:00
| 152,024,239
| 545
| 302
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,572
|
py
|
douyin.py
|
# -*- coding:utf-8 -*-
import requests
import re
from lxml import etree
from font import get_mapping_table
import os
def get_share_info(shareid):
"""个人主页信息"""
headers = {
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.131 Safari/537.36"
}
url = "https://www.iesdouyin.com/share/user/%s" % shareid
r = requests.get(url, headers=headers)
html = etree.HTML(r.text)
user_info = {}
# 昵称
user_info['nickname'] = re.findall('class="nickname">(.*?)<', r.text, re.S)[0]
# 抖音ID
id_length = len(html.xpath('//p[@class="shortid"]/i'))
regex_id = 'class="shortid">' + '.*?<i\sclass="icon iconfont ">\s(.*?);\s</i>' * id_length
id_str = html.xpath('//p[@class="shortid"]/text()')[0].replace("抖音ID:",'').strip()
id_code_nums = re.search(regex_id, r.text, re.S).groups()
user_info['id'] = id_str + ''.join([get_mapping_table(num) for num in id_code_nums])
# 类型
try:
user_info['job'] = html.xpath('//div[@class="verify-info"]/span/text()')[0].strip()
except:
user_info['job'] = ''
# 签名
user_info['signature'] = re.findall('class="signature">(.*?)<', r.text, re.S)[0]
# 头像
user_info['avatar'] = html.xpath('//img[@class="avatar"]/@src')[0]
# 关注
focus_num_length = len(html.xpath('//span[contains(@class, "focus")]/span'))
num_unit = ''.join(re.findall(r'[a-zA-Z]', ''.join(html.xpath('//span[contains(@class, "focus")]/span/text()'))))
regex_focus = 'class="focus block">.*?<span\sclass="num">' + '.*?<i\sclass="icon iconfont follow-num">\s(.*?);\s</i>' * focus_num_length
focus_code_nums = re.search(regex_focus, r.text, re.S).groups()
focus = ''.join([get_mapping_table(num) for num in focus_code_nums])
user_info['focus'] = focus if len(focus) < 5 and not num_unit else str(eval(focus) / 10) + num_unit
# 粉丝
fans_num_length = len(html.xpath('//span[contains(@class, "follower")]/span/i'))
num_unit = ''.join(re.findall(r'[a-zA-Z]', ''.join(html.xpath('//span[contains(@class, "follower")]/span/text()'))))
regex_fans = 'class="follower block">.*?<span\sclass="num">' + '.*?<i\sclass="icon iconfont follow-num">\s(.*?);\s</i>' * fans_num_length
fans_code_nums = re.search(regex_fans, r.text, re.S).groups()
fans = ''.join([get_mapping_table(num) for num in fans_code_nums])
user_info['fans'] = fans if len(fans) < 5 and not num_unit else str(eval(fans) / 10) + num_unit
# 赞
like_num_length = len(html.xpath('//span[contains(@class, "liked-num")]/span/i'))
num_unit = ''.join(re.findall(r'[a-zA-Z]', ''.join(html.xpath('//span[contains(@class, "liked-num")]/span/text()'))))
regex_likes = 'class="liked-num block">.*?<span\sclass="num">' + '.*?<i\sclass="icon iconfont follow-num">\s(.*?);\s</i>' * like_num_length
like_code_nums = re.search(regex_likes, r.text, re.S).groups()
like_num = ''.join([get_mapping_table(num) for num in like_code_nums])
user_info['liked_num'] = like_num if len(like_num) < 5 and not num_unit else str(eval(like_num) / 10) + num_unit
tab_nums = len(html.xpath("//div[@class='tab-wrap']/div"))
if tab_nums > 2:
# 作品
user_tab_num_length = len(html.xpath('//div[@class="tab-wrap"]/div[2]/span/i'))
num_unit = ''.join(re.findall(r'[a-zA-Z]', ''.join(html.xpath('//div[@class="tab-wrap"]/div[2]/span/text()'))))
regex_tabs = 'class="user-tab tab get-list" data-type="post">.*?<span class="num">' + '.*?<i\sclass="icon iconfont tab-num">\s(.*?);\s</i>' * user_tab_num_length
# 喜欢
like_tab_num_length = len(html.xpath('//div[@class="tab-wrap"]/div[3]/span/i'))
num_unit = ''.join(re.findall(r'[a-zA-Z]', ''.join(html.xpath('//div[@class="tab-wrap"]/div[3]/span/text()'))))
else:
# 作品
user_tab_num_length = len(html.xpath('//div[@class="tab-wrap"]/div[1]/span/i'))
num_unit = ''.join(re.findall(r'[a-zA-Z]', ''.join(html.xpath('//div[@class="tab-wrap"]/div[1]/span/text()'))))
regex_tabs = 'class="user-tab active tab get-list" data-type="post">.*?<span class="num">' + '.*?<i\sclass="icon iconfont tab-num">\s(.*?);\s</i>' * user_tab_num_length
# 喜欢
like_tab_num_length = len(html.xpath('//div[@class="tab-wrap"]/div[2]/span/i'))
num_unit = ''.join(re.findall(r'[a-zA-Z]', ''.join(html.xpath('//div[@class="tab-wrap"]/div[2]/span/text()'))))
# 作品
tab_code_nums = re.search(regex_tabs, r.text, re.S).groups()
tab_num = ''.join([get_mapping_table(num) for num in tab_code_nums])
user_info['tab_num'] = tab_num if len(tab_num) < 5 and not num_unit else str(eval(tab_num) / 10) + num_unit
# 喜欢
regex_like_tabs = 'class="like-tab tab get-list" data-type="like">.*?<span class="num">' + '.*?<i\sclass="icon iconfont tab-num">\s(.*?);\s</i>' * like_tab_num_length
like_tab_code_nums = re.search(regex_like_tabs, r.text, re.S).groups()
like_tab_num = ''.join([get_mapping_table(num) for num in like_tab_code_nums])
user_info['like_tab_num'] = like_tab_num if len(like_tab_num) < 5 and not num_unit else str(eval(like_tab_num) / 10) + num_unit
return user_info
if __name__ == '__main__':
# print(get_share_info('76055758243'))
shareid_path = os.path.dirname(os.path.realpath(__file__)) + "\\shareid.txt"
with open(shareid_path) as f:
shareid_list = f.readlines()
for shareid in shareid_list:
print(get_share_info(shareid.strip()))
|
9a56e9883ea98e32acb83710d8d0a0e2e4e6ff42
|
d6aae799e18e907fb413b715200c7832252a87e5
|
/speech-synthesis/TTS/fastspeech2/utils/text/text.py
|
0337452313d8d150366ada90d6948614788b549b
|
[
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-proprietary-license",
"Apache-2.0",
"CC-BY-NC-4.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
sony/nnabla-examples
|
0d0bbd5df3028996e790bcf07248fdb0932697d1
|
41f71faa6efff7774a76bbd5af3198322a90a6ab
|
refs/heads/master
| 2023-09-04T03:45:54.023899
| 2023-08-22T03:31:21
| 2023-08-22T03:31:21
| 109,625,584
| 308
| 108
|
Apache-2.0
| 2023-08-22T03:31:23
| 2017-11-05T23:30:40
|
Python
|
UTF-8
|
Python
| false
| false
| 2,344
|
py
|
text.py
|
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import unicodedata
from string import punctuation
from g2p_en import G2p
from .wdict import ___SYMBOL_TO_ID___, __ABBREVIATIONS__
def cleaner(text):
r"""Clean text.
Args:
text (str): Input text.
Returns:
str: Text after being cleaned.
"""
# remove accents
text = ''.join(ch for ch in unicodedata.normalize('NFD', text)
if unicodedata.category(ch) != 'Mn')
# to lowercase
text = text.lower()
# expand abbreviation
for abbreviation, replacement in __ABBREVIATIONS__:
regex = re.compile('\\b%s\\.' % abbreviation)
text = re.sub(regex, replacement, text)
# collapse white space
text = re.sub(re.compile(r'\s+'), ' ', text)
# remove trailing characters
text = text.strip().rstrip(punctuation)
return text
def text_to_phonemes(text):
r"""Clean text and convert it to phonemes.
Args:
text (str): Input text.
Returns:
List[str]: List of phonemes.
"""
g2p = G2p() # phonemes converter
phones = list()
text = cleaner(text) # cleaning text
words = re.split(r"([,;.\-\?\!\s+])", text)
for w in words:
phones += list(filter(lambda p: p != " ", g2p(w)))
phones = "{" + "}{".join(phones) + "}"
phones = re.sub(r"\{[^\w\s]?\}", "{sp}", phones)
phones = phones.replace("}{", " ")
phones = phones[1:-1].split()
return phones
def phonemes_to_ids(phonemes):
r"""Converting a list of phonemes to a list of IDs corresponding to the
symbols in the text.
Args:
phonemes (List[str]): A list of phonemes.
Returns:
List[int]: List of IDs.
"""
return [___SYMBOL_TO_ID___[p] for p in phonemes if p in ___SYMBOL_TO_ID___]
|
c4f71678cfe914ff1c713b0084b6e8b63158d6ad
|
d6aa13cb1021773d88e2ef780bc4450b38455644
|
/apex/amp/opt.py
|
baf311684de179e3026f15bae99d7aae9e266ce6
|
[
"BSD-3-Clause"
] |
permissive
|
NVIDIA/apex
|
f54a9ced5d8b1c14f777e6bb53f11b3dc3ff2d6b
|
7995de18677295c5edeeab082179edbfdb6ee16a
|
refs/heads/master
| 2023-08-21T13:25:44.408616
| 2023-08-19T04:36:48
| 2023-08-19T04:36:48
| 130,725,814
| 7,932
| 1,381
|
BSD-3-Clause
| 2023-09-13T16:09:42
| 2018-04-23T16:28:52
|
Python
|
UTF-8
|
Python
| false
| false
| 3,446
|
py
|
opt.py
|
import contextlib
import warnings
from .scaler import LossScaler, master_params
from ._amp_state import maybe_print
import numpy as np
class OptimWrapper(object):
def __init__(self, optimizer, amp_handle, num_loss):
self._optimizer = optimizer
self._amp_handle = amp_handle
self._num_loss = num_loss
self._loss_idx = 0
self._skip_next = [False] * num_loss
self._loss_scaler = [LossScaler('dynamic') for _ in range(num_loss)]
@contextlib.contextmanager
def scale_loss(self, loss):
if not self._amp_handle.is_active():
yield loss
return
# When there are multiple losses per-optimizer, we need
# to save out current grad accumulation, since we won't be
# able to unscale this particulare loss once the grads are
# all mixed together.
cached_grads = []
if self._loss_idx > 0:
for p in master_params(self._optimizer):
if p.grad is not None:
cached_grads.append(p.grad.data.detach().clone())
else:
cached_grads.append(None)
self._optimizer.zero_grad()
loss_scale = self._cur_loss_scaler().loss_scale()
yield loss * loss_scale
self._cur_loss_scaler().clear_overflow_state()
self._cur_loss_scaler().unscale(
master_params(self._optimizer),
master_params(self._optimizer),
loss_scale)
self._skip_next[self._loss_idx] = self._cur_loss_scaler().update_scale()
self._loss_idx += 1
if len(cached_grads) > 0:
for p, cached_grad in zip(master_params(self._optimizer),
cached_grads):
if cached_grad is not None:
p.grad.data.add_(cached_grad)
cached_grads = []
def _cur_loss_scaler(self):
assert 0 <= self._loss_idx < self._num_loss
return self._loss_scaler[self._loss_idx]
def step(self, closure=None):
if not self._amp_handle.is_active():
return self._optimizer.step(closure=closure)
self._loss_idx = 0
for group in self._optimizer.param_groups:
for p in group['params']:
self._amp_handle.remove_cache(p)
if closure is not None:
raise NotImplementedError(
'The `closure` argument is unsupported by the amp ' +
'optimizer wrapper.')
if any(self._skip_next):
maybe_print('Gradient overflow, skipping update')
self._skip_next = [False] * self._num_loss
else:
return self._optimizer.step(closure=closure)
# Forward any attribute lookups
def __getattr__(self, attr):
return getattr(self._optimizer, attr)
# Forward all torch.optim.Optimizer methods
def __getstate__(self):
return self._optimizer.__getstate__()
def __setstate__(self):
return self._optimizer.__setstate__()
def __repr__(self):
return self._optimizer.__repr__()
def state_dict(self):
return self._optimizer.state_dict()
def load_state_dict(self, state_dict):
return self._optimizer.load_state_dict(state_dict)
def zero_grad(self):
return self._optimizer.zero_grad()
def add_param_group(self, param_group):
return self._optimizer.add_param_group(param_group)
|
517c21e2c5a69256d6b0bb897ac9649ef8924b7d
|
71fb04f723b46a1bf45295be239bcec25e07f98c
|
/keras_cv/layers/object_detection_3d/centernet_label_encoder.py
|
5e27a7477ca58e5cedf5731173920e7a4d9c6690
|
[
"Apache-2.0"
] |
permissive
|
keras-team/keras-cv
|
9bca4479474e853ec3a1c541b8be20fea2447a1a
|
e83f229f1b7b847cd712d5cd4810097d3e06d14e
|
refs/heads/master
| 2023-08-31T10:22:08.406394
| 2023-08-30T20:24:57
| 2023-08-30T20:24:57
| 265,079,853
| 818
| 287
|
NOASSERTION
| 2023-09-12T16:49:01
| 2020-05-18T22:39:21
|
Python
|
UTF-8
|
Python
| false
| false
| 16,743
|
py
|
centernet_label_encoder.py
|
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from typing import Sequence
from typing import Tuple
from typing import Union
import numpy as np
import tensorflow as tf
from tensorflow import keras
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import ops
from keras_cv.backend import scope
from keras_cv.backend.scope import tf_data
from keras_cv.layers.object_detection_3d import voxel_utils
# Infinite voxel size.
INF_VOXEL_SIZE = 100
def _meshgrid(
max_radius_in_voxels: Sequence[int], voxel_size: Sequence[float]
) -> np.ndarray:
"""Computes the mesh grid given number of points in each dimension.
NOTE: this is a pure numpy function.
Args:
max_radius_in_voxels: max radius in each dimension in units of voxels.
voxel_size: voxel size of each dimension.
Returns:
point tensor of shape [-1, len(voxel_size)].
"""
m = max_radius_in_voxels
dim = len(m)
assert dim == 2 or dim == 3
if dim == 2:
mesh = np.mgrid[-m[0] : m[0] + 1, -m[1] : m[1] + 1]
else:
mesh = np.mgrid[-m[0] : m[0] + 1, -m[1] : m[1] + 1, -m[2] : m[2] + 1]
mesh = np.concatenate(mesh[..., np.newaxis], axis=-1)
mesh = np.reshape(mesh, [-1, dim])
return mesh * voxel_size
@tf_data
def compute_heatmap(
box_3d: tf.Tensor,
box_mask: tf.Tensor,
voxel_size: Sequence[float],
max_radius: Sequence[float],
) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor]:
"""Compute heatmap for boxes.
Args:
box_3d: 3d boxes in xyz format, vehicle frame, [B, boxes, 7].
box_mask: box masking, [B, boxes]
voxel_size: the size on each voxel dimension (xyz)
max_radius: the maximum radius on each voxel dimension (xyz)
Returns:
point_xyz: the point location w.r.t. vehicle frame, [B, boxes,
max_voxels_per_box, 3]
mask: point mask, [B, boxes, max_voxels_per_box]
heatmap: the returned heatmap w.r.t box frame, [B, boxes,
max_voxels_per_box]
box_id: the box id each point belongs to, [B, boxes, max_voxels_per_box]
"""
# convert radius from point unit to voxel unit.
max_radius_in_voxels = [
math.ceil(mr / vs) for mr, vs in zip(max_radius, voxel_size)
]
# get the mesh grid based on max radius w.r.t each box
# [max_num_voxels_per_box, 3]
points_numpy = _meshgrid(max_radius_in_voxels, voxel_size=voxel_size)
box_center = box_3d[:, :, :3]
# voxelize and de-voxelize point_xyz
# This ensures that we are computing heatmap for each voxel with these
# quantized x,y,z.
# [B, N, max_num_voxels_per_box, 3]
point_xyz = (
box_center[:, :, tf.newaxis, :]
+ tf.constant(points_numpy, dtype=tf.float32)[
tf.newaxis, tf.newaxis, :, :
]
)
# [B, N, max_num_voxels_per_box, 3]
point_xyz = voxel_utils.point_to_voxel_coord(
point_xyz, voxel_size, dtype=tf.int32
)
# Map voxel back to xyz to get quantized version.
# [B, N, max_num_voxels_per_box, 3]
point_xyz = voxel_utils.voxel_coord_to_point(
point_xyz, voxel_size, dtype=tf.float32
)
# Transforms these points to the box frame from vehicle frame.
heading = box_3d[:, :, -1]
# [B, N, 3, 3]
rot = voxel_utils.get_yaw_rotation(heading)
# [B, N, max_num_voxels_per_box, 3]
point_xyz_rot = tf.linalg.matmul(point_xyz, rot)
# convert from box frame to vehicle frame.
# [B, N, max_num_voxels_per_box, 3]
point_xyz_transform = (
point_xyz_rot
+ voxel_utils.inv_loc(rot, box_center)[:, :, tf.newaxis, :]
)
# Due to the transform above, z=0 can be transformed to a non-zero value.
# For 2d heatmap, we do not want to use z.
if voxel_size[2] > INF_VOXEL_SIZE:
point_xyz_transform = tf.concat(
[
point_xyz_transform[..., :2],
tf.zeros_like(point_xyz_transform[..., :1]),
],
axis=-1,
)
# The Gaussian radius is set as the dimension of the boxes
# [B, N, 3]
radius = box_3d[:, :, 3:6]
# [B, N, 1, 3]
radius = radius[:, :, tf.newaxis, :]
# The Gaussian standard deviation is set as 1.
# [B, N, 1, 3]
sigma = tf.ones_like(radius, dtype=radius.dtype)
# Compute point mask. Anything outside the radius is invalid.
# [B, N, max_num_voxels_per_box, 3]
mask = tf.math.less_equal(tf.math.abs(point_xyz_transform), radius)
# [B, N, max_num_voxels_per_box]
mask = tf.math.reduce_all(mask, axis=-1)
# [B, N, max_num_voxels_per_box]
mask = tf.logical_and(box_mask[:, :, tf.newaxis], mask)
# [B, N, max_num_voxels_per_box]
# Gaussian kernel
p2 = point_xyz_transform * point_xyz_transform
p2_sigma = p2 * (-0.5 / (sigma * sigma))
# in box frame.
heatmap = tf.exp(tf.reduce_sum(p2_sigma, axis=-1))
(
batch_size,
num_box,
max_num_voxels_per_box,
_,
) = ops.shape(point_xyz)
box_id = tf.range(num_box, dtype=tf.int32)
box_id = tf.tile(
box_id[tf.newaxis, :, tf.newaxis],
[batch_size, 1, max_num_voxels_per_box],
)
point_xyz = tf.reshape(
point_xyz, [batch_size, num_box * max_num_voxels_per_box, 3]
)
heatmap = tf.reshape(
heatmap, [batch_size, num_box * max_num_voxels_per_box]
)
box_id = tf.reshape(box_id, [batch_size, num_box * max_num_voxels_per_box])
mask = tf.reshape(mask, [batch_size, num_box * max_num_voxels_per_box])
return point_xyz, mask, heatmap, box_id
def scatter_to_dense_heatmap(
point_xyz: tf.Tensor,
point_mask: tf.Tensor,
point_box_id: tf.Tensor,
heatmap: tf.Tensor,
voxel_size: Sequence[float],
spatial_size: Sequence[float],
) -> Tuple[tf.Tensor, tf.Tensor]:
"""Scatter the heatmap to a dense grid.
N = num_boxes * max_voxels_per_box
Args:
point_xyz: [B, N, 3] 3d points, point coordinate in vehicle frame.
point_mask: [B, N] valid point mask.
point_box_id: [B, N] box id of each point. The ID indexes into the input
box tensors. See compute_heatmap for more details.
heatmap: [B, N] heatmap value of each point.
voxel_size: voxel size.
spatial_size: the spatial size.
Returns:
dense_heatmap: [B, H, W] heatmap value.
dense_box_id: [B, H, W] box id associated with each feature map pixel.
Only pixels with positive heatmap value have valid box id set. Other
locations have random values.
"""
# [B, N, 3]
# convert to voxel units.
point_voxel_xyz = voxel_utils.point_to_voxel_coord(
point_xyz, voxel_size, dtype=tf.int32
)
# [3]
voxel_origin = voxel_utils.compute_voxel_origin(spatial_size, voxel_size)
# [B, N, 3]
# shift point voxel coordinates to positive voxel index.
point_voxel_xyz = point_voxel_xyz - voxel_origin[tf.newaxis, tf.newaxis, :]
voxel_spatial_size = voxel_utils.compute_voxel_spatial_size(
spatial_size, voxel_size
)
# [B, N]
point_voxel_valid_mask = tf.math.reduce_all(
tf.math.logical_and(
point_voxel_xyz >= 0, point_voxel_xyz < voxel_spatial_size
),
axis=-1,
)
# [B, N]
point_voxel_valid_mask = tf.math.logical_and(
point_voxel_valid_mask, point_mask
)
# [B, N]
point_voxel_xyz = point_voxel_xyz * tf.cast(
point_voxel_valid_mask[..., tf.newaxis], dtype=point_voxel_xyz.dtype
)
# [B, N]
# filtered heatmap with out of range voxels.
heatmap = heatmap * tf.cast(point_voxel_valid_mask, dtype=heatmap.dtype)
# TODO(tanzheny): consider a batched implementation.
def fn(args):
"""Calls scatter update."""
point_voxel_xyz_i, mask_i, heatmap_i, point_box_id_i = args
mask_index = tf.where(mask_i)
point_voxel_xyz_i = tf.cast(
tf.gather_nd(point_voxel_xyz_i, mask_index), tf.int32
)
heatmap_i = tf.gather_nd(heatmap_i, mask_index)
point_box_id_i = tf.gather_nd(point_box_id_i, mask_index)
# scatter from local heatmap to global heatmap based on point_xyz voxel
# units
dense_heatmap_i = tf.tensor_scatter_nd_update(
tf.zeros(voxel_spatial_size, dtype=heatmap_i.dtype),
point_voxel_xyz_i,
heatmap_i,
)
dense_box_id_i = tf.tensor_scatter_nd_update(
tf.zeros(voxel_spatial_size, dtype=tf.int32),
point_voxel_xyz_i,
point_box_id_i,
)
return dense_heatmap_i, dense_box_id_i
dense_heatmap, dense_box_id = tf.map_fn(
fn,
elems=[point_voxel_xyz, point_voxel_valid_mask, heatmap, point_box_id],
fn_output_signature=(heatmap.dtype, point_box_id.dtype),
)
return dense_heatmap, dense_box_id
def decode_tensor(
t: tf.Tensor, dims: Sequence[Union[tf.Tensor, int]]
) -> tf.Tensor:
"""
Args:
t: int32 or int64 tensor of shape [shape], [B, k]
dims: list of ints., [H, W, Z]
Returns:
t_decoded: int32 or int64 decoded tensor of shape [shape, len(dims)],
[B, k, 3]
"""
with tf.name_scope("decode_tensor"):
multipliers = []
multiplier = 1
assert dims
for d in reversed(dims):
multipliers.append(multiplier)
multiplier = multiplier * d
multipliers = list(reversed(multipliers))
t_decoded_list = []
remainder = t
for m in multipliers:
t_decoded_list.append(tf.math.floordiv(remainder, m))
remainder = tf.math.floormod(remainder, m)
return tf.stack(t_decoded_list, axis=-1)
@tf_data
def compute_top_k_heatmap_idx(heatmap: tf.Tensor, k: int) -> tf.Tensor:
"""Computes the top_k heatmap indices.
Args:
heatmap: [B, H, W] for 2 dimension or [B, H, W, Z] for 3 dimensions
k: integer, represent top_k
Returns:
top_k_index: [B, k, 2] for 2 dimensions or [B, k, 3] for 3 dimensions
"""
shape = ops.shape(heatmap)
# [B, H*W*Z]
heatmap_reshape = tf.reshape(heatmap, [shape[0], -1])
# [B, k]
# each index in the range of [0, H*W*Z)
_, indices = tf.math.top_k(heatmap_reshape, k=k, sorted=False)
# [B, k, 2] or [B, k, 3]
# shape[1:] = [H, W, Z], convert the indices from 1 dimension to 3
# dimensions in the range of [0, H), [0, W), [0, Z)
res = decode_tensor(indices, shape[1:])
return res
@keras_cv_export("keras_cv.layers.CenterNetLabelEncoder")
class CenterNetLabelEncoder(keras.layers.Layer):
"""Transforms the raw sparse labels into class specific dense training
labels.
This layer takes the box locations, box classes and box masks, voxelizes
and compute the Gaussian radius for each box, then computes class specific
heatmap for classification and class specific box offset w.r.t to feature
map for regression.
Args:
voxel_size: the x, y, z dimension (in meters) of each voxel.
max_radius: maximum Gaussian radius in each dimension in meters.
spatial_size: the x, y, z boundary of voxels
num_classes: number of object classes.
top_k_heatmap: A sequence of integers, top k for each class. Can be None.
"""
def __init__(
self,
voxel_size: Sequence[float],
max_radius: Sequence[float],
spatial_size: Sequence[float],
num_classes: int,
top_k_heatmap: Sequence[int],
**kwargs,
):
super().__init__(**kwargs)
self._voxel_size = voxel_size
self._max_radius = max_radius
self._spatial_size = spatial_size
self._num_classes = num_classes
self._top_k_heatmap = top_k_heatmap
def call(self, inputs):
"""
Args:
inputs: dictionary of Tensors representing a batch of data. Must
contain 3D box targets under the key "3d_boxes".
Returns:
A dictionary of Tensors with all of the original inputs, plus, for
each class, a new key with encoded CenterNet targets in the format:
```
"class_{class_index}": {
"heatmap": float Tensor [B, H, W, Z] or [B, H, W]
"boxes": float Tensor [B, H, W, Z, 7] or [B, H, W, 7]
"tok_k_index": int Tensor [B, k, 3] or [B, k, 2]
}
```
where:
H: number of voxels in y dimension
W: number of voxels in x dimension
Z: number of voxels in z dimension
k: `top_k_heatmap` slice
"""
with scope.TFDataScope():
box_3d = inputs["3d_boxes"]["boxes"]
box_mask = inputs["3d_boxes"]["mask"]
box_classes = inputs["3d_boxes"]["classes"]
# point_xyz - [B, num_boxes * max_num_voxels_per_box, 3]
# heatmap - [B, num_boxes * max_num_voxels_per_box]
# compute localized heatmap around its radius.
point_xyz, point_mask, heatmap, box_id = compute_heatmap(
box_3d,
box_mask,
self._voxel_size,
self._max_radius,
)
# heatmap - [B, H, W, Z]
# scatter the localized heatmap to global heatmap in vehicle frame.
dense_heatmap, dense_box_id = scatter_to_dense_heatmap(
point_xyz,
point_mask,
box_id,
heatmap,
self._voxel_size,
self._spatial_size,
)
b, h, w, z = ops.shape(dense_box_id)
# [B, H * W * Z]
dense_box_id = tf.reshape(dense_box_id, [b, h * w * z])
# mask out invalid boxes to 0, which represents background
box_classes = box_classes * tf.cast(box_mask, box_classes.dtype)
# [B, H, W, Z]
dense_box_classes = tf.reshape(
tf.gather(box_classes, dense_box_id, batch_dims=1), [b, h, w, z]
)
# [B, H, W, Z, 7] in vehicle frame.
dense_box_3d = tf.reshape(
tf.gather(box_3d, dense_box_id, batch_dims=1), [b, h, w, z, -1]
)
global_xyz = tf.zeros([b, 3], dtype=point_xyz.dtype)
# [B, H, W, Z, 3]
feature_map_ref_xyz = tf.constant(
voxel_utils.compute_feature_map_ref_xyz(
self._voxel_size, self._spatial_size, global_xyz
),
)
# convert from global box point xyz to offset w.r.t center of
# feature map.
# [B, H, W, Z, 3]
dense_box_3d_center = dense_box_3d[..., :3] - tf.cast(
feature_map_ref_xyz, dense_box_3d.dtype
)
# [B, H, W, Z, 7]
dense_box_3d = tf.concat(
[dense_box_3d_center, dense_box_3d[..., 3:]], axis=-1
)
centernet_targets = {}
for i in range(self._num_classes):
# Object class is 1-indexed (0 is background).
dense_box_classes_i = tf.cast(
tf.math.equal(dense_box_classes, i + 1),
dtype=dense_heatmap.dtype,
)
dense_heatmap_i = dense_heatmap * dense_box_classes_i
dense_box_3d_i = (
dense_box_3d * dense_box_classes_i[..., tf.newaxis]
)
# Remove z-dimension if this is 2D setup.
if self._voxel_size[2] > INF_VOXEL_SIZE:
dense_heatmap_i = tf.squeeze(dense_heatmap_i, axis=-1)
dense_box_3d_i = tf.squeeze(dense_box_3d_i, axis=-2)
top_k_heatmap_feature_idx_i = None
if self._top_k_heatmap[i] > 0:
top_k_heatmap_feature_idx_i = compute_top_k_heatmap_idx(
dense_heatmap_i, self._top_k_heatmap[i]
)
centernet_targets[f"class_{i+1}"] = {
"heatmap": dense_heatmap_i,
"boxes": dense_box_3d_i,
"top_k_index": top_k_heatmap_feature_idx_i,
}
inputs.update(centernet_targets)
return inputs
|
3e9e02f64c9c3b7fd9d35a6a494d99f44f142d2c
|
c641636e184c0ec1dcc7b851bad678c898cdd05d
|
/legacy/examples/stgcn/data_loader/data_utils.py
|
c7e20864c77d275b21270ab0ed7c27af23a47471
|
[
"Apache-2.0"
] |
permissive
|
PaddlePaddle/PGL
|
d8f0a82854a141bee1afdddd9a77bdd723c83ed8
|
7a55649d46d7ad93de31eb9b3ebf71b82d1fcffb
|
refs/heads/main
| 2023-08-17T10:33:02.425526
| 2023-08-04T02:52:06
| 2023-08-04T02:52:06
| 191,286,408
| 1,719
| 341
|
Apache-2.0
| 2023-08-04T02:52:07
| 2019-06-11T03:23:28
|
Python
|
UTF-8
|
Python
| false
| false
| 8,052
|
py
|
data_utils.py
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""data processing
"""
import numpy as np
import pandas as pd
from utils.math_utils import z_score
class Dataset(object):
"""Dataset
"""
def __init__(self, data, stats):
self.__data = data
self.mean = stats['mean']
self.std = stats['std']
def get_data(self, type): # type: train, val or test
return self.__data[type]
def get_stats(self):
return {'mean': self.mean, 'std': self.std}
def get_len(self, type):
return len(self.__data[type])
def z_inverse(self, type):
return self.__data[type] * self.std + self.mean
def seq_gen(len_seq, data_seq, offset, n_frame, n_route, day_slot, C_0=1):
"""Generate data in the form of standard sequence unit."""
n_slot = day_slot - n_frame + 1
tmp_seq = np.zeros((len_seq * n_slot, n_frame, n_route, C_0))
for i in range(len_seq):
for j in range(n_slot):
sta = (i + offset) * day_slot + j
end = sta + n_frame
tmp_seq[i * n_slot + j, :, :, :] = np.reshape(
data_seq[sta:end, :], [n_frame, n_route, C_0])
return tmp_seq
def adj_matrx_gen_custom(input_file, city_file):
"""genenrate Adjacency Matrix from file
"""
print("generate adj_matrix data (take long time)...")
# data
df = pd.read_csv(
input_file,
sep='\t',
names=['date', '迁出省份', '迁出城市', '迁入省份', '迁入城市', '人数'])
# 只需要2020年的数据
df['date'] = pd.to_datetime(df['date'], format="%Y%m%d")
df = df.set_index('date')
df = df['2020']
city_df = pd.read_csv(city_file)
# 剔除武汉
city_df = city_df.drop(0)
num = len(city_df)
matrix = np.zeros([num, num])
for i in city_df['city']:
for j in city_df['city']:
if (i == j):
continue
# 选出从i到j的每日人数
cut = df[df['迁出城市'].str.contains(i)]
cut = cut[cut['迁入城市'].str.contains(j)]
# 求均值作为权重
average = cut['人数'].mean()
# 赋值给matrix
i_index = int(city_df[city_df['city'] == i]['num']) - 1
j_index = int(city_df[city_df['city'] == j]['num']) - 1
matrix[i_index, j_index] = average
np.savetxt("dataset/W_74.csv", matrix, delimiter=",")
def data_gen_custom(input_file, output_file, city_file, n, n_his, n_pred,
n_config):
"""data_gen_custom"""
print("generate training data...")
# data
df = pd.read_csv(
input_file,
sep='\t',
names=['date', '迁出省份', '迁出城市', '迁入省份', '迁入城市', '人数'])
# 只需要2020年的数据
df['date'] = pd.to_datetime(df['date'], format="%Y%m%d")
df = df.set_index('date')
df = df['2020']
city_df = pd.read_csv(city_file)
input_df = pd.DataFrame()
out_df_wuhan = df[df['迁出城市'].str.contains('武汉')]
for i in city_df['city']:
# 筛选迁入城市
in_df_i = out_df_wuhan[out_df_wuhan['迁入城市'].str.contains(i)]
# 确保按时间升序
# in_df_i.sort_values("date",inplace=True)
# 按时间插入
in_df_i.reset_index(drop=True, inplace=True)
input_df[i] = in_df_i['人数']
# 替换Nan值
input_df = input_df.replace(np.nan, 0)
x = input_df
y = pd.read_csv(output_file)
# 删除第1列
x.drop(
x.columns[x.columns.str.contains(
'unnamed', case=False)],
axis=1,
inplace=True)
y = y.drop(columns=['date'])
# 剔除迁入武汉的数据
x = x.drop(columns=['武汉'])
y = y.drop(columns=['武汉'])
# param
n_val, n_test = n_config
n_train = len(y) - n_val - n_test - 2
# (?,26,74,1)
df = pd.DataFrame(columns=x.columns)
for i in range(len(y) - n_pred + 1):
df = df.append(x[i:i + n_his])
df = df.append(y[i:i + n_pred])
data = df.values.reshape(-1, n_his + n_pred, n,
1) # n == num_nodes == city num
x_stats = {'mean': np.mean(data), 'std': np.std(data)}
x_train = data[:n_train]
x_val = data[n_train:n_train + n_val]
x_test = data[n_train + n_val:]
x_data = {'train': x_train, 'val': x_val, 'test': x_test}
dataset = Dataset(x_data, x_stats)
print("generate successfully!")
return dataset
def data_gen_mydata(input_file, label_file, n, n_his, n_pred, n_config):
"""data processing
"""
# data
x = pd.read_csv(input_file)
y = pd.read_csv(label_file)
x = x.drop(columns=['date'])
y = y.drop(columns=['date'])
# param
n_val, n_test = n_config
n_train = len(y) - n_val - n_test - 2
# (?,26,74,1)
df = pd.DataFrame(columns=x.columns)
for i in range(len(y) - n_pred + 1):
df = df.append(x[i:i + n_his])
df = df.append(y[i:i + n_pred])
data = df.values.reshape(-1, n_his + n_pred, n, 1)
x_stats = {'mean': np.mean(data), 'std': np.std(data)}
x_train = data[:n_train]
x_val = data[n_train:n_train + n_val]
x_test = data[n_train + n_val:]
x_data = {'train': x_train, 'val': x_val, 'test': x_test}
dataset = Dataset(x_data, x_stats)
return dataset
def data_gen(file_path, data_config, n_route, n_frame=21, day_slot=288):
"""Source file load and dataset generation."""
n_train, n_val, n_test = data_config
# generate training, validation and test data
try:
data_seq = pd.read_csv(file_path, header=None).values
except FileNotFoundError:
print(f'ERROR: input file was not found in {file_path}.')
seq_train = seq_gen(n_train, data_seq, 0, n_frame, n_route, day_slot)
seq_val = seq_gen(n_val, data_seq, n_train, n_frame, n_route, day_slot)
seq_test = seq_gen(n_test, data_seq, n_train + n_val, n_frame, n_route,
day_slot)
# x_stats: dict, the stats for the train dataset, including the value of mean and standard deviation.
x_stats = {'mean': np.mean(seq_train), 'std': np.std(seq_train)}
# x_train, x_val, x_test: np.array, [sample_size, n_frame, n_route, channel_size].
x_train = z_score(seq_train, x_stats['mean'], x_stats['std'])
x_val = z_score(seq_val, x_stats['mean'], x_stats['std'])
x_test = z_score(seq_test, x_stats['mean'], x_stats['std'])
x_data = {'train': x_train, 'val': x_val, 'test': x_test}
dataset = Dataset(x_data, x_stats)
return dataset
def gen_batch(inputs, batch_size, dynamic_batch=False, shuffle=False):
"""Data iterator in batch.
Args:
inputs: np.ndarray, [len_seq, n_frame, n_route, C_0], standard sequence units.
batch_size: int, size of batch.
dynamic_batch: bool, whether changes the batch size in the last batch
if its length is less than the default.
shuffle: bool, whether shuffle the batches.
"""
len_inputs = len(inputs)
if shuffle:
idx = np.arange(len_inputs)
np.random.shuffle(idx)
for start_idx in range(0, len_inputs, batch_size):
end_idx = start_idx + batch_size
if end_idx > len_inputs:
if dynamic_batch:
end_idx = len_inputs
else:
break
if shuffle:
slide = idx[start_idx:end_idx]
else:
slide = slice(start_idx, end_idx)
yield inputs[slide]
|
afffc0a2cd2216232811bca024b8d0b691d72662
|
37ba914737fba69b3a656ab0a9fcdbc45d113f70
|
/library/panos_check.py
|
61ecbff468a54f43ac7b3c3f11988ffd91140337
|
[
"Apache-2.0"
] |
permissive
|
PaloAltoNetworks/ansible-pan
|
1a30def492393cb540d346b04dc5e7e0f217791c
|
dce32f6d1837dafb59f431e32d954c108c291a9a
|
refs/heads/develop
| 2023-06-23T13:30:26.444014
| 2023-06-14T13:13:14
| 2023-06-14T13:13:14
| 29,692,434
| 236
| 202
|
NOASSERTION
| 2023-06-14T13:13:10
| 2015-01-22T18:07:22
|
Python
|
UTF-8
|
Python
| false
| false
| 3,995
|
py
|
panos_check.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2016 Palo Alto Networks, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
DOCUMENTATION = '''
---
module: panos_check
short_description: check if PAN-OS device is ready for configuration
description:
- NOTE: The modules in this role are deprecated in favour of the modules in the collection U(https://paloaltonetworks.github.io/pan-os-ansible)
- Check if PAN-OS device is ready for being configured (no pending jobs).
- The check could be done once or multiple times until the device is ready.
author:
- Luigi Mori (@jtschichold)
- Ivan Bojer (@ivanbojer)
- Garfield Lee Freeman (@shinmog)
version_added: "2.3"
requirements:
- pan-python
- pandevice
notes:
- Panorama is supported.
- Checkmode is not supported.
extends_documentation_fragment:
- panos.transitional_provider
options:
initial_delay:
description:
- Length of time (in seconds) to wait before doing any readiness checks.
default: 0
type: int
timeout:
description:
- Length of time (in seconds) to wait for jobs to finish.
default: 60
type: int
interval:
description:
- Length of time (in seconds) to wait between checks.
default: 0
type: int
'''
EXAMPLES = '''
# Single check.
- name: check if ready
panos_check:
provider: '{{ provider }}'
timeout: 0
# Wait 2 minutes, then check every 5 seconds for 10 minutes.
- name: wait for reboot
panos_check:
provider: '{{ provider }}'
initial_delay: 120
interval: 5
timeout: 600
'''
RETURN = '''
# Default return values
'''
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
import time
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.panos.panos import get_connection
try:
from pandevice.errors import PanDeviceError
except ImportError:
pass
def check_jobs(jobs):
for j in jobs:
status = j.find('.//status')
if status is None or status.text != 'FIN':
return False
return True
def main():
helper = get_connection(
with_classic_provider_spec=True,
argument_spec=dict(
initial_delay=dict(default=0, type='int'),
timeout=dict(default=60, type='int'),
interval=dict(default=0, type='int')
),
)
module = AnsibleModule(
argument_spec=helper.argument_spec,
supports_check_mode=False,
required_one_of=helper.required_one_of,
)
# Optional delay before performing readiness checks.
if module.params['initial_delay']:
time.sleep(module.params['initial_delay'])
timeout = module.params['timeout']
interval = module.params['interval']
end_time = time.time() + timeout
parent = helper.get_pandevice_parent(module, timeout)
# TODO(gfreeman) - consider param for "show chassis-ready".
while True:
try:
ans = parent.op(cmd="show jobs all")
except PanDeviceError:
pass
else:
jobs = ans.findall('.//job')
if check_jobs(jobs):
break
if time.time() > end_time:
module.fail_json(msg='Timeout')
time.sleep(interval)
module.exit_json(changed=True, msg="done")
if __name__ == '__main__':
main()
|
216ad373f281e257118824ff0339f75332b1c1a5
|
4091caecbc727e6d6ae0d827afce11c5979a84fd
|
/demos/multi_camera_multi_target_tracking_demo/python/multi_camera_multi_target_tracking_demo.py
|
4e72fdddb1a59ee204f642e735450e3a1e11d4be
|
[
"Apache-2.0"
] |
permissive
|
openvinotoolkit/open_model_zoo
|
fdb03dd40bfccb854e4ed4f7b9beaa90596963cd
|
7929adbe91e9cfe8dc5dc1daad5ae7392f9719a0
|
refs/heads/master
| 2023-08-18T18:03:47.254427
| 2023-08-18T10:54:31
| 2023-08-18T10:54:31
| 153,097,694
| 1,712
| 730
|
Apache-2.0
| 2023-09-11T11:31:20
| 2018-10-15T10:55:02
|
Python
|
UTF-8
|
Python
| false
| false
| 11,354
|
py
|
multi_camera_multi_target_tracking_demo.py
|
#!/usr/bin/env python3
"""
Copyright (c) 2019-2023 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import time
import queue
from threading import Thread
import json
import logging as log
import os
from pathlib import Path
import random
import sys
import cv2 as cv
from utils.network_wrappers import Detector, VectorCNN, MaskRCNN, DetectionsFromFileReader
from mc_tracker.mct import MultiCameraTracker
from utils.analyzer import save_embeddings
from utils.misc import read_py_config, check_pressed_keys
from utils.video import MulticamCapture, NormalizerCLAHE
from utils.visualization import visualize_multicam_detections, get_target_size
from openvino.runtime import Core, get_version
sys.path.append(str(Path(__file__).resolve().parents[2] / 'common/python'))
sys.path.append(str(Path(__file__).resolve().parents[2] / 'common/python/openvino/model_zoo'))
import monitors
from model_api.performance_metrics import PerformanceMetrics
log.basicConfig(format='[ %(levelname)s ] %(message)s', level=log.DEBUG, stream=sys.stdout)
def check_detectors(args):
detectors = {
'--m_detector': args.m_detector,
'--m_segmentation': args.m_segmentation,
'--detections': args.detections
}
non_empty_detectors = [(det, value) for det, value in detectors.items() if value]
det_number = len(non_empty_detectors)
if det_number == 0:
log.error('No detector specified, please specify one of the following parameters: '
'\'--m_detector\', \'--m_segmentation\' or \'--detections\'')
elif det_number > 1:
det_string = ''.join('\n\t{}={}'.format(det[0], det[1]) for det in non_empty_detectors)
log.error('Only one detector expected but got {}, please specify one of them:{}'
.format(len(non_empty_detectors), det_string))
return det_number
def update_detections(output, detections, frame_number):
for i, detection in enumerate(detections):
entry = {'frame_id': frame_number, 'scores': [], 'boxes': []}
for det in detection:
entry['boxes'].append(det[0])
entry['scores'].append(float(det[1]))
output[i].append(entry)
def save_json_file(save_path, data, description=''):
save_dir = os.path.dirname(save_path)
if save_dir and not os.path.exists(save_dir):
os.makedirs(save_dir)
with open(save_path, 'w') as outfile:
json.dump(data, outfile)
if description:
log.debug('{} saved to {}'.format(description, save_path))
class FramesThreadBody:
def __init__(self, capture, max_queue_length=2):
self.process = True
self.frames_queue = queue.Queue()
self.capture = capture
self.max_queue_length = max_queue_length
def __call__(self):
while self.process:
if self.frames_queue.qsize() > self.max_queue_length:
time.sleep(0.1)
continue
has_frames, frames = self.capture.get_frames()
if not has_frames and self.frames_queue.empty():
self.process = False
break
if has_frames:
self.frames_queue.put(frames)
def run(params, config, capture, detector, reid):
win_name = 'Multi camera tracking'
frame_number = 0
output_detections = [[] for _ in range(capture.get_num_sources())]
key = -1
if config.normalizer_config.enabled:
capture.add_transform(
NormalizerCLAHE(
config.normalizer_config.clip_limit,
config.normalizer_config.tile_size,
)
)
tracker = MultiCameraTracker(capture.get_num_sources(), reid, config.sct_config,
**vars(config.mct_config), visual_analyze=config.analyzer)
thread_body = FramesThreadBody(capture, max_queue_length=len(capture.captures) * 2)
frames_thread = Thread(target=thread_body)
frames_thread.start()
frames_read = False
set_output_params = False
prev_frames = thread_body.frames_queue.get()
detector.run_async(prev_frames, frame_number)
metrics = PerformanceMetrics()
presenter = monitors.Presenter(params.utilization_monitors, 0)
while thread_body.process:
if not params.no_show:
key = check_pressed_keys(key)
if key == 27:
break
presenter.handleKey(key)
start_time = time.perf_counter()
try:
frames = thread_body.frames_queue.get_nowait()
frames_read = True
except queue.Empty:
frames = None
if frames is None:
continue
all_detections = detector.wait_and_grab()
if params.save_detections:
update_detections(output_detections, all_detections, frame_number)
frame_number += 1
detector.run_async(frames, frame_number)
all_masks = [[] for _ in range(len(all_detections))]
for i, detections in enumerate(all_detections):
all_detections[i] = [det[0] for det in detections]
all_masks[i] = [det[2] for det in detections if len(det) == 3]
tracker.process(prev_frames, all_detections, all_masks)
tracked_objects = tracker.get_tracked_objects()
vis = visualize_multicam_detections(prev_frames, tracked_objects,
**vars(config.visualization_config))
metrics.update(start_time, vis)
presenter.drawGraphs(vis)
if not params.no_show:
cv.imshow(win_name, vis)
if frames_read and not set_output_params:
set_output_params = True
if len(params.output_video):
frame_size = [frame.shape[1::-1] for frame in frames]
fps = capture.get_fps()
target_width, target_height = get_target_size(
frame_size, None, **vars(config.visualization_config))
video_output_size = (target_width, target_height)
fourcc = cv.VideoWriter_fourcc(*'XVID')
output_video = cv.VideoWriter(params.output_video, fourcc, min(fps), video_output_size)
else:
output_video = None
if set_output_params and output_video:
output_video.write(cv.resize(vis, video_output_size))
prev_frames, frames = frames, prev_frames
metrics.log_total()
for rep in presenter.reportMeans():
log.info(rep)
thread_body.process = False
frames_thread.join()
if len(params.history_file):
save_json_file(params.history_file, tracker.get_all_tracks_history(), description='History file')
if len(params.save_detections):
save_json_file(params.save_detections, output_detections, description='Detections')
if len(config.embeddings.save_path):
save_embeddings(tracker.scts, **vars(config.embeddings))
detector.infer_queue.wait_all()
def main():
current_dir = os.path.dirname(os.path.abspath(__file__))
"""Prepares data for the object tracking demo"""
parser = argparse.ArgumentParser(description='Multi camera multi object \
tracking live demo script')
parser.add_argument('-i', '--input', required=True, nargs='+',
help='Required. Input sources (indexes of cameras or paths to video files)')
parser.add_argument('--loop', default=False, action='store_true',
help='Optional. Enable reading the input in a loop')
parser.add_argument('--config', type=str, default=os.path.join(current_dir, 'configs/person.py'), required=False,
help='Configuration file')
parser.add_argument('--detections', type=str, help='JSON file with bounding boxes')
parser.add_argument('-m', '--m_detector', type=str, required=False,
help='Path to the object detection model')
parser.add_argument('--t_detector', type=float, default=0.6,
help='Threshold for the object detection model')
parser.add_argument('--m_segmentation', type=str, required=False,
help='Path to the object instance segmentation model')
parser.add_argument('--t_segmentation', type=float, default=0.6,
help='Threshold for object instance segmentation model')
parser.add_argument('--m_reid', type=str, required=True,
help='Required. Path to the object re-identification model')
parser.add_argument('--output_video', type=str, default='', required=False,
help='Optional. Path to output video')
parser.add_argument('--history_file', type=str, default='', required=False,
help='Optional. Path to file in JSON format to save results of the demo')
parser.add_argument('--save_detections', type=str, default='', required=False,
help='Optional. Path to file in JSON format to save bounding boxes')
parser.add_argument("--no_show", help="Optional. Don't show output", action='store_true')
parser.add_argument('-d', '--device', type=str, default='CPU')
parser.add_argument('-u', '--utilization_monitors', default='', type=str,
help='Optional. List of monitors to show initially.')
args = parser.parse_args()
if check_detectors(args) != 1:
sys.exit(1)
if len(args.config):
log.debug('Reading config from {}'.format(args.config))
config = read_py_config(args.config)
else:
log.error('No configuration file specified. Please specify parameter \'--config\'')
sys.exit(1)
random.seed(config.random_seed)
capture = MulticamCapture(args.input, args.loop)
log.info('OpenVINO Runtime')
log.info('\tbuild: {}'.format(get_version()))
core = Core()
if args.detections:
object_detector = DetectionsFromFileReader(args.detections, args.t_detector)
elif args.m_segmentation:
object_detector = MaskRCNN(core, args.m_segmentation,
config.obj_segm.trg_classes,
args.t_segmentation,
args.device,
capture.get_num_sources())
else:
object_detector = Detector(core, args.m_detector,
config.obj_det.trg_classes,
args.t_detector,
args.device,
capture.get_num_sources())
if args.m_reid:
object_recognizer = VectorCNN(core, args.m_reid, args.device)
else:
object_recognizer = None
run(args, config, capture, object_detector, object_recognizer)
if __name__ == '__main__':
sys.exit(main() or 0)
|
18eaabe18a867167bd4d08eb3894bf0f32efcbf7
|
3083a4918f9a9a0670ce83566341eba04b290bc4
|
/kafka_utils/util/validation.py
|
0d3f13f4e183a43a2442440581e73a09f1a858a3
|
[
"Apache-2.0"
] |
permissive
|
Yelp/kafka-utils
|
b122c0aad0dfc225e948623ceaf466e7e63d9f91
|
def433ec4d07c60290d5dc937d3b4e5189eca9dc
|
refs/heads/master
| 2023-08-31T06:20:33.740273
| 2023-08-24T10:15:43
| 2023-08-24T10:15:43
| 55,727,134
| 322
| 142
|
Apache-2.0
| 2023-08-24T10:15:44
| 2016-04-07T20:50:50
|
Python
|
UTF-8
|
Python
| false
| false
| 11,068
|
py
|
validation.py
|
# Copyright 2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""Provide functions to validate and generate a Kafka assignment"""
from __future__ import annotations
import logging
from collections import Counter
from typing_extensions import TypedDict
_log = logging.getLogger(__name__)
class PartitionDict(TypedDict):
topic: str
partition: int
replicas: list[int]
class PlanDict(TypedDict):
version: int
partitions: list[PartitionDict]
def plan_to_assignment(plan: PlanDict) -> dict[tuple[str, int], list[int]]:
"""Convert the plan to the format used by cluster-topology."""
assignment = {}
for elem in plan['partitions']:
assignment[
(elem['topic'], elem['partition'])
] = elem['replicas']
return assignment
def assignment_to_plan(assignment: dict[tuple[str, int], list[int]]) -> PlanDict:
"""Convert an assignment to the format used by Kafka to
describe a reassignment plan.
"""
return {
'version': 1,
'partitions':
[{'topic': t_p[0],
'partition': t_p[1],
'replicas': replica
} for t_p, replica in assignment.items()]
}
def validate_plan(
new_plan: PlanDict,
base_plan: PlanDict | None = None,
is_partition_subset: bool = True,
allow_rf_change: bool = False,
allow_rf_mismatch: bool = False,
) -> bool:
"""Verify that the new plan is valid for execution.
Given kafka-reassignment plan should affirm with following rules:
- Plan should have at least one partition for re-assignment
- Partition-name list should be subset of base-plan partition-list
- Replication-factor for each partition of same topic is same
- Replication-factor for each partition remains unchanged
- No duplicate broker-ids in each replicas
"""
if not _validate_plan(new_plan, allow_rf_mismatch=allow_rf_mismatch):
_log.error('Invalid proposed-plan.')
return False
# Validate given plan in reference to base-plan
if base_plan:
if not _validate_plan(base_plan, allow_rf_mismatch=allow_rf_mismatch):
_log.error('Invalid assignment from cluster.')
return False
if not _validate_plan_base(
new_plan,
base_plan,
is_partition_subset,
allow_rf_change,
):
return False
# Plan validation successful
return True
def _validate_plan_base(
new_plan: PlanDict,
base_plan: PlanDict,
is_partition_subset: bool = True,
allow_rf_change: bool = False,
) -> bool:
"""Validate if given plan is valid comparing with given base-plan.
Validate following assertions:
- Partition-check: New partition-set should be subset of base-partition set
- Replica-count check: Replication-factor for each partition remains same
- Broker-check: New broker-set should be subset of base broker-set
"""
# Verify that partitions in plan are subset of base plan.
new_partitions = {
(p_data['topic'], p_data['partition'])
for p_data in new_plan['partitions']
}
base_partitions = {
(p_data['topic'], p_data['partition'])
for p_data in base_plan['partitions']
}
if is_partition_subset:
invalid_partitions = list(new_partitions - base_partitions)
else:
# partition set should be equal
invalid_partitions = list(
new_partitions.union(base_partitions) -
new_partitions.intersection(base_partitions),
)
if invalid_partitions:
_log.error(
'Invalid partition(s) found: {p_list}'.format(
p_list=invalid_partitions,
)
)
return False
# Verify replication-factor remains consistent
base_partition_replicas = {
(p_data['topic'], p_data['partition']): p_data['replicas']
for p_data in base_plan['partitions']
}
new_partition_replicas = {
(p_data['topic'], p_data['partition']): p_data['replicas']
for p_data in new_plan['partitions']
}
if not allow_rf_change:
invalid_replication_factor = False
for new_partition, replicas in new_partition_replicas.items():
base_replica_cnt = len(base_partition_replicas[new_partition])
if len(replicas) != base_replica_cnt:
invalid_replication_factor = True
_log.error(
'Replication-factor Mismatch: Partition: {partition}: '
'Base-replicas: {expected}, Proposed-replicas: {actual}'
.format(
partition=new_partition,
expected=base_partition_replicas[new_partition],
actual=replicas,
),
)
if invalid_replication_factor:
return False
# Validation successful
return True
def _validate_format(plan: PlanDict) -> bool:
"""Validate if the format of the plan as expected.
Validate format of plan on following rules:
a) Verify if it ONLY and MUST have keys and value, 'version' and 'partitions'
b) Verify if each value of 'partitions' ONLY and MUST have keys 'replicas',
'partition', 'topic'
c) Verify desired type of each value
d) Verify non-empty partitions and replicas
Sample-plan format:
{
"version": 1,
"partitions": [
{"partition":0, "topic":'t1', "replicas":[0,1,2]},
{"partition":0, "topic":'t2', "replicas":[1,2]},
...
]}
"""
# Verify presence of required keys
if set(plan.keys()) != {'version', 'partitions'}:
_log.error(
'Invalid or incomplete keys in given plan. Expected: "version", '
'"partitions". Found:{keys}'
.format(keys=', '.join(list(plan.keys()))),
)
return False
# Invalid version
if plan['version'] != 1:
_log.error(
'Invalid version of plan {version}'
.format(version=plan['version']),
)
return False
# Empty partitions
if not plan['partitions']:
_log.error('"partitions" list found empty"')
return False
# Invalid partitions type
if not isinstance(plan['partitions'], list):
_log.error('"partitions" of type list expected.') # type: ignore[unreachable]
return False
# Invalid partition-data
for p_data in plan['partitions']:
if set(p_data.keys()) != {'topic', 'partition', 'replicas'}:
_log.error(
'Invalid keys in partition-data {keys}'
.format(keys=', '.join(list(p_data.keys()))),
)
return False
# Check types
if not isinstance(p_data['topic'], str):
_log.error( # type: ignore[unreachable]
'"topic" of type unicode expected {p_data}, found {t_type}'
.format(p_data=p_data, t_type=type(p_data['topic'])),
)
return False
if not isinstance(p_data['partition'], int):
_log.error( # type: ignore[unreachable]
'"partition" of type int expected {p_data}, found {p_type}'
.format(p_data=p_data, p_type=type(p_data['partition'])),
)
return False
if not isinstance(p_data['replicas'], list):
_log.error( # type: ignore[unreachable]
'"replicas" of type list expected {p_data}, found {r_type}'
.format(p_data=p_data, r_type=type(p_data['replicas'])),
)
return False
if not p_data['replicas']:
_log.error(
'Non-empty "replicas" expected: {p_data}'
.format(p_data=p_data),
)
return False
# Invalid broker-type
for broker in p_data['replicas']:
if not isinstance(broker, int):
_log.error( # type: ignore[unreachable]
'"replicas" of type integer list expected {p_data}'
.format(p_data=p_data),
)
return False
return True
def _validate_plan(plan: PlanDict, allow_rf_mismatch: bool = False) -> bool:
"""Validate if given plan is valid based on kafka-cluster-assignment protocols.
Validate following parameters:
- Correct format of plan
- Partition-list should be unique
- Every partition of a topic should have same replication-factor
- Replicas of a partition should have unique broker-set
"""
# Validate format of plan
if not _validate_format(plan):
return False
# Verify no duplicate partitions
partition_names = [
(p_data['topic'], p_data['partition'])
for p_data in plan['partitions']
]
duplicate_partitions = [
partition for partition, count in Counter(partition_names).items()
if count > 1
]
if duplicate_partitions:
_log.error(
'Duplicate partitions in plan {p_list}'
.format(p_list=duplicate_partitions),
)
return False
# Verify no duplicate brokers in partition-replicas
dup_replica_brokers = []
for p_data in plan['partitions']:
dup_replica_brokers = [
broker
for broker, count in Counter(p_data['replicas']).items()
if count > 1
]
if dup_replica_brokers:
_log.error(
'Duplicate brokers: ({topic}, {p_id}) in replicas {replicas}'
.format(
topic=p_data['topic'],
p_id=p_data['partition'],
replicas=p_data['replicas'],
)
)
return False
# Verify same replication-factor for partitions in the same topic
if not allow_rf_mismatch:
topic_replication_factor: dict[str, int] = {}
for partition_info in plan['partitions']:
topic = partition_info['topic']
replication_factor = len(partition_info['replicas'])
if topic in list(topic_replication_factor.keys()):
if topic_replication_factor[topic] != replication_factor:
_log.error(
'Mismatch in replication-factor of partitions for topic '
'{topic}'.format(topic=topic),
)
return False
else:
topic_replication_factor[topic] = replication_factor
return True
|
bf8cdd81e3fdb2060c17dbbf154315831471f1e2
|
87d70540706887b89ae10aacb2803797dc6b77db
|
/tests/ipython/factories/test_basic_heap_object_factory.py
|
1198fcdacce06ce58c2fb9c374a81e387dcf417a
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
lgpage/nbtutor
|
d8fc3451d95e7c556ccec05521c16fc5d1063380
|
7bbca6286a6ec85f189b17094ce1bf785156fe8e
|
refs/heads/main
| 2023-09-04T11:12:33.240937
| 2023-04-14T07:14:04
| 2023-04-14T07:14:04
| 73,084,481
| 476
| 45
|
NOASSERTION
| 2023-07-09T10:30:31
| 2016-11-07T14:08:25
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 2,211
|
py
|
test_basic_heap_object_factory.py
|
import json
from nbtutor.ipython.factories.basic_heap_object_factory import BasicHeapObjectFactory
class TestBasicHeapObjectFactory(object):
def test_create_bool_returns_expected_result(self):
py_obj = True
factory = BasicHeapObjectFactory(py_obj)
heap_obj = factory.create()
assert heap_obj.id == "{0}".format(id(py_obj))
assert heap_obj.type == 'bool'
assert heap_obj.value == 'True'
assert heap_obj.render_type == 'basic'
assert heap_obj.references is None
assert heap_obj.render_options is None
assert heap_obj.immutable
assert json.dumps(heap_obj.to_dict()) # can serialize
def test_create_int_returns_expected_result(self):
py_obj = 12
heap_obj = BasicHeapObjectFactory(py_obj).create()
assert heap_obj.id == "{0}".format(id(py_obj))
assert heap_obj.type == 'int'
assert heap_obj.value == '12'
assert heap_obj.render_type == 'basic'
assert heap_obj.references is None
assert heap_obj.render_options is None
assert heap_obj.immutable
assert json.dumps(heap_obj.to_dict()) # can serialize
def test_create_float_returns_expected_result(self):
py_obj = 12.123
heap_obj = BasicHeapObjectFactory(py_obj).create()
assert heap_obj.id == "{0}".format(id(py_obj))
assert heap_obj.type == 'float'
assert heap_obj.value == '12.123'
assert heap_obj.render_type == 'basic'
assert heap_obj.references is None
assert heap_obj.render_options is None
assert heap_obj.immutable
assert json.dumps(heap_obj.to_dict()) # can serialize
def test_create_str_returns_expected_result(self):
py_obj = "hello world"
heap_obj = BasicHeapObjectFactory(py_obj).create()
assert heap_obj.id == "{0}".format(id(py_obj))
assert heap_obj.type == 'str'
assert heap_obj.value == 'hello world'
assert heap_obj.render_type == 'basic'
assert heap_obj.references is None
assert heap_obj.render_options is None
assert heap_obj.immutable
assert json.dumps(heap_obj.to_dict()) # can serialize
|
e341e18ce84d04a1a350088da893c79b2ebb008e
|
3cf8454257ce93926a39bd379a2f0f7a61a860e8
|
/mtools/mloginfo/sections/rs_info_section.py
|
131f1de206ff837335a4084efb85a84cd3728db9
|
[
"Apache-2.0"
] |
permissive
|
rueckstiess/mtools
|
12e6e73a06934bd49dc53623162fd70f6f676905
|
370e598527949077859dbe4a9fcfe0b9c9f2d1d2
|
refs/heads/develop
| 2023-08-25T02:16:05.206326
| 2023-05-02T04:49:18
| 2023-05-02T05:57:19
| 5,806,020
| 1,654
| 396
|
Apache-2.0
| 2023-09-04T03:59:53
| 2012-09-14T07:28:16
|
Python
|
UTF-8
|
Python
| false
| false
| 1,916
|
py
|
rs_info_section.py
|
from .base_section import BaseSection
from mtools.util.logformat import LogFormat
class RsInfoSection(BaseSection):
"""
RsInfoSection class.
This section determines if there is any Replica Set infomation like the
replset name in the log file and prints the available information.
"""
name = "rsinfo"
def __init__(self, mloginfo):
BaseSection.__init__(self, mloginfo)
# add --rsinfo flag to argparser
helptext = 'outputs replica set config information'
self.mloginfo.argparser_sectiongroup.add_argument('--rsinfo',
action='store_true',
help=helptext)
@property
def active(self):
"""Return boolean if this section is active."""
return self.mloginfo.args['rsinfo']
def run(self):
"""Run this section and print out information."""
if self.mloginfo.logfile.logformat == LogFormat.PROFILE:
print("\nERROR: mloginfo --restarts does not support "
"system.profile collections\n")
return
if self.mloginfo.logfile.repl_set:
print(" rs name: %s" % self.mloginfo.logfile.repl_set)
print(" rs members: %s"
% (self.mloginfo.logfile.repl_set_members
if self.mloginfo.logfile.repl_set_members
else "unknown"))
print(" rs version: %s"
% (self.mloginfo.logfile.repl_set_version
if self.mloginfo.logfile.repl_set_version
else "unknown"))
print("rs protocol: %s"
% (self.mloginfo.logfile.repl_set_protocol
if self.mloginfo.logfile.repl_set_protocol
else "unknown"))
else:
print(" no rs info changes found")
|
2b3321551db10d730b12db74564e130215d4467d
|
3bca0d7aa49bfddba274c85ad2b12245196a0a8d
|
/examples/hello.py
|
abb068c4c6b44753c75ff8416bb51721a6d3ccfd
|
[
"BSD-3-Clause"
] |
permissive
|
thadeusb/flask-cache
|
d1887028c1c52fffd516f9b74adeca7d5ee5eba4
|
1c60076b6d4c2df0ac1de54c59e63b4f780cecbc
|
refs/heads/master
| 2023-07-09T05:39:25.670107
| 2014-09-21T20:22:36
| 2014-09-21T20:22:36
| 871,644
| 560
| 194
|
NOASSERTION
| 2021-12-08T15:10:16
| 2010-08-30T07:39:51
|
Python
|
UTF-8
|
Python
| false
| false
| 1,099
|
py
|
hello.py
|
import random
from datetime import datetime
from flask import Flask, jsonify
from flask.ext.cache import Cache
app = Flask(__name__)
app.config.from_pyfile('hello.cfg')
cache = Cache(app)
#: This is an example of a cached view
@app.route('/api/now')
@cache.cached(50)
def current_time():
return str(datetime.now())
#: This is an example of a cached function
@cache.cached(key_prefix='binary')
def random_binary():
return [random.randrange(0, 2) for i in range(500)]
@app.route('/api/get/binary')
def get_binary():
return jsonify({'data': random_binary()})
#: This is an example of a memoized function
@cache.memoize(60)
def _add(a, b):
return a + b + random.randrange(0, 1000)
@cache.memoize(60)
def _sub(a, b):
return a - b - random.randrange(0, 1000)
@app.route('/api/add/<int:a>/<int:b>')
def add(a, b):
return str(_add(a, b))
@app.route('/api/sub/<int:a>/<int:b>')
def sub(a, b):
return str(_sub(a, b))
@app.route('/api/cache/delete')
def delete_cache():
cache.delete_memoized('_add', '_sub')
return 'OK'
if __name__ == '__main__':
app.run()
|
82ed3a79e6cd20487c31499c67cab504e83848dc
|
c001930958cb94f8b91b1f734108671f1db9e9f1
|
/components/dash-table/tests/selenium/test_sort.py
|
a17bc80cd5bda927f3da4a58f3c186a01659d03e
|
[
"MIT"
] |
permissive
|
plotly/dash
|
73c752135937e27975071fbd144e3fb21618e7b4
|
6eaf2e17c25f7ca1847c41aafeb18e87c586cb9f
|
refs/heads/dev
| 2023-08-30T21:21:06.056499
| 2023-08-29T16:49:04
| 2023-08-29T16:49:04
| 33,702,544
| 20,553
| 2,355
|
MIT
| 2023-08-31T20:51:14
| 2015-04-10T01:53:08
|
Python
|
UTF-8
|
Python
| false
| false
| 928
|
py
|
test_sort.py
|
import dash
from utils import basic_modes, get_props
from dash.dash_table import DataTable
import pytest
def get_app(props=dict()):
app = dash.Dash(__name__)
baseProps = get_props()
baseProps.update(dict(sort_action="native"))
baseProps.update(props)
app.layout = DataTable(**baseProps)
return app
@pytest.mark.parametrize("props", basic_modes)
def test_sort001_can_sort(test, props):
test.start_server(get_app(props))
target = test.table("table")
target.cell(0, "bbb-readonly").get_text() == "label Wet"
target.cell(1, "bbb-readonly").get_text() == "label Snowy"
target.cell(2, "bbb-readonly").get_text() == "label Tropical Beaches"
target.cell(3, "bbb-readonly").get_text() == "label Humid"
target.column("bbb-readonly").sort(2)
for i in range(4):
target.cell(i, "bbb-readonly").get_text() == "label Humid"
assert test.get_log_errors() == []
|
bc646ab6e40b47ab0eb7a9a25b564fe8bcaf6eed
|
4467a183750d42e6633e6447b5c93f6cdf95a70a
|
/pylot/debug/visualizer_operator.py
|
ce069bf3a01a4df8114712da4a505a4e9ac39746
|
[
"Apache-2.0"
] |
permissive
|
erdos-project/pylot
|
e741a4377e3e102c83d6bbe94ab6745648de3b90
|
a71ae927328388dc44acc784662bf32a99f273f0
|
refs/heads/master
| 2023-04-08T00:13:49.240926
| 2023-01-27T03:24:16
| 2023-01-27T03:24:16
| 190,252,594
| 389
| 117
|
Apache-2.0
| 2023-03-24T22:58:22
| 2019-06-04T17:54:40
|
Python
|
UTF-8
|
Python
| false
| false
| 19,943
|
py
|
visualizer_operator.py
|
"""This module implements an operator for visualizing the state of
the different pipeline components (e.g., detections, tracked obstacles,
planning waypoints)."""
from collections import deque
from functools import partial
import erdos
import numpy as np
import pygame
from pygame.locals import K_n
import pylot.utils
from pylot.drivers.sensor_setup import RGBCameraSetup
from pylot.perception.camera_frame import CameraFrame
from pylot.planning.world import World
DEFAULT_VIS_TIME = 30000.0
class VisualizerOperator(erdos.Operator):
""" The `VisualizerOperator` allows developers to see the current state
of the entire pipeline by visualizing it on a pygame instance.
This receives input data from almost the entire pipeline and renders the
results of the operator currently chosen by the developer on the screen.
"""
def __init__(self, pose_stream, rgb_camera_stream, tl_camera_stream,
prediction_camera_stream, depth_camera_stream,
point_cloud_stream, segmentation_stream, imu_stream,
obstacles_stream, traffic_lights_stream,
tracked_obstacles_stream, lane_detection_stream,
prediction_stream, waypoints_stream, control_stream,
display_control_stream, pygame_display, flags):
visualize_streams = []
self._pose_msgs = deque()
pose_stream.add_callback(
partial(self.save, msg_type="Pose", queue=self._pose_msgs))
visualize_streams.append(pose_stream)
self._bgr_msgs = deque()
rgb_camera_stream.add_callback(
partial(self.save, msg_type="RGB", queue=self._bgr_msgs))
visualize_streams.append(rgb_camera_stream)
self._imu_msgs = deque()
imu_stream.add_callback(
partial(self.save, msg_type="IMU", queue=self._imu_msgs))
visualize_streams.append(imu_stream)
self._obstacle_msgs = deque()
obstacles_stream.add_callback(
partial(self.save, msg_type="Obstacle", queue=self._obstacle_msgs))
visualize_streams.append(obstacles_stream)
self._tracked_obstacle_msgs = deque()
tracked_obstacles_stream.add_callback(
partial(self.save,
msg_type="TrackedObstacle",
queue=self._tracked_obstacle_msgs))
visualize_streams.append(tracked_obstacles_stream)
self._tl_camera_msgs = deque()
tl_camera_stream.add_callback(
partial(self.save, msg_type="TLCamera",
queue=self._tl_camera_msgs))
visualize_streams.append(tl_camera_stream)
self._traffic_light_msgs = deque()
traffic_lights_stream.add_callback(
partial(self.save,
msg_type="TrafficLight",
queue=self._traffic_light_msgs))
visualize_streams.append(traffic_lights_stream)
self._waypoint_msgs = deque()
waypoints_stream.add_callback(
partial(self.save, msg_type="Waypoint", queue=self._waypoint_msgs))
visualize_streams.append(waypoints_stream)
self._prediction_camera_msgs = deque()
prediction_camera_stream.add_callback(
partial(self.save,
msg_type="PredictionCamera",
queue=self._prediction_camera_msgs))
visualize_streams.append(prediction_camera_stream)
self._prediction_msgs = deque()
prediction_stream.add_callback(
partial(self.save,
msg_type="Prediction",
queue=self._prediction_msgs))
visualize_streams.append(prediction_stream)
self._point_cloud_msgs = deque()
point_cloud_stream.add_callback(
partial(self.save,
msg_type="PointCloud",
queue=self._point_cloud_msgs))
visualize_streams.append(point_cloud_stream)
self._lane_detection_msgs = deque()
lane_detection_stream.add_callback(
partial(self.save,
msg_type="Lanes",
queue=self._lane_detection_msgs))
visualize_streams.append(lane_detection_stream)
self._depth_msgs = deque()
depth_camera_stream.add_callback(
partial(self.save, msg_type="Depth", queue=self._depth_msgs))
visualize_streams.append(depth_camera_stream)
self._segmentation_msgs = deque()
segmentation_stream.add_callback(
partial(self.save,
msg_type="Segmentation",
queue=self._segmentation_msgs))
visualize_streams.append(segmentation_stream)
self._control_msgs = deque()
control_stream.add_callback(
partial(self.save, msg_type="Control", queue=self._control_msgs))
visualize_streams.append(control_stream)
# Register a watermark callback on all the streams to be visualized.
erdos.add_watermark_callback(visualize_streams, [], self.on_watermark)
# Add a callback on a control stream to figure out what to display.
display_control_stream.add_callback(self.change_display)
self._logger = erdos.utils.setup_logging(self.config.name,
self.config.log_file_name)
self.display = pygame_display
# Set the font.
fonts = [x for x in pygame.font.get_fonts() if 'mono' in x]
default_font = 'ubuntumono'
mono = default_font if default_font in fonts else fonts[0]
mono = pygame.font.match_font(mono)
self.font = pygame.font.Font(mono, 14)
# Array of keys to figure out which message to display.
self.current_display = 0
self.display_array = []
self.window_titles = []
if flags.visualize_rgb_camera:
self.display_array.append("RGB")
self.window_titles.append("RGB Camera")
if flags.visualize_detected_obstacles:
self.display_array.append("Obstacle")
self.window_titles.append("Detected obstacles")
if flags.visualize_tracked_obstacles:
self.display_array.append("TrackedObstacle")
self.window_titles.append("Obstacle tracking")
if flags.visualize_detected_traffic_lights:
self.display_array.append("TLCamera")
self.window_titles.append("Detected traffic lights")
if flags.visualize_waypoints:
self.display_array.append("Waypoint")
self.window_titles.append("Planning")
if flags.visualize_prediction:
self.display_array.append("PredictionCamera")
self.window_titles.append("Prediction")
if flags.visualize_lidar:
self.display_array.append("PointCloud")
self.window_titles.append("LiDAR")
if flags.visualize_detected_lanes:
self.display_array.append("Lanes")
self.window_titles.append("Detected lanes")
if flags.visualize_depth_camera:
self.display_array.append("Depth")
self.window_titles.append("Depth Camera")
if flags.visualize_segmentation:
self.display_array.append("Segmentation")
self.window_titles.append("Segmentation")
if flags.visualize_world:
self._planning_world = World(flags, self._logger)
top_down_transform = pylot.utils.get_top_down_transform(
pylot.utils.Transform(pylot.utils.Location(),
pylot.utils.Rotation()),
flags.top_down_camera_altitude)
self._bird_eye_camera_setup = RGBCameraSetup(
'bird_eye_camera', flags.camera_image_width,
flags.camera_image_height, top_down_transform, 90)
self.display_array.append("PlanningWorld")
self.window_titles.append("Planning world")
else:
self._planning_world = None
assert len(self.display_array) == len(self.window_titles), \
"The display and titles differ."
# Save the flags.
self._flags = flags
@staticmethod
def connect(pose_stream, rgb_camera_stream, tl_camera_stream,
prediction_camera_stream, depth_stream, point_cloud_stream,
segmentation_stream, imu_stream, obstacles_stream,
traffic_lights_stream, tracked_obstacles_stream,
lane_detection_stream, prediction_stream, waypoints_stream,
control_stream, display_control_stream):
return []
def destroy(self):
self._logger.warn('destroying {}'.format(self.config.name))
def save(self, msg, msg_type, queue):
self._logger.debug("@{}: Received {} message.".format(
msg.timestamp, msg_type))
queue.append(msg)
def change_display(self, display_message):
if display_message.data == K_n:
self.current_display = (self.current_display + 1) % len(
self.display_array)
self._logger.debug("@{}: Visualizer changed to {}".format(
display_message.timestamp, self.current_display))
def get_message(self, queue, timestamp, name):
msg = None
if queue:
while len(queue) > 0:
retrieved_msg = queue.popleft()
if retrieved_msg.timestamp == timestamp:
msg = retrieved_msg
break
if not msg:
self._logger.warning(
"@{}: message for {} was not found".format(
timestamp, name))
return msg
def render_text(self, pose, control, timestamp):
# Generate the text to be shown on the box.
info_text = [
"Display : {}".format(self.window_titles[self.current_display]),
"Timestamp: {}".format(timestamp.coordinates[0]),
]
# Add information from the pose.
if pose:
info_text += [
"Location : {:.1f}, {:.1f}, {:.1f}".format(
*tuple(pose.transform.location.as_numpy_array())),
"Rotation : {:.1f}, {:.1f}, {:.1f}".format(
*tuple(pose.transform.rotation.as_numpy_array())),
"Speed : {:.2f} m/s".format(pose.forward_speed),
]
# Add information from the control message
if control:
info_text += [
"Throttle : {:.2f}".format(control.throttle),
"Steer : {:.2f}".format(control.steer),
"Brake : {:.2f}".format(control.brake),
"Reverse : {:.2f}".format(control.reverse),
]
# Display the information box.
info_surface = pygame.Surface(
(220, self._flags.camera_image_height // 3))
info_surface.set_alpha(100)
self.display.blit(info_surface, (0, 0))
# Render the text.
v_offset = 10
for line in info_text:
if v_offset + 18 > self._flags.camera_image_height:
break
surface = self.font.render(line, True, (255, 255, 255))
self.display.blit(surface, (8, v_offset))
v_offset += 18
pygame.display.flip()
def on_watermark(self, timestamp):
self._logger.debug("@{}: received watermark.".format(timestamp))
if timestamp.is_top:
return
pose_msg = self.get_message(self._pose_msgs, timestamp, "Pose")
bgr_msg = self.get_message(self._bgr_msgs, timestamp, "BGR")
tl_camera_msg = self.get_message(self._tl_camera_msgs, timestamp,
"TLCamera")
depth_msg = self.get_message(self._depth_msgs, timestamp, "Depth")
point_cloud_msg = self.get_message(self._point_cloud_msgs, timestamp,
"PointCloud")
segmentation_msg = self.get_message(self._segmentation_msgs, timestamp,
"Segmentation")
imu_msg = self.get_message(self._imu_msgs, timestamp, "IMU")
obstacle_msg = self.get_message(self._obstacle_msgs, timestamp,
"Obstacle")
traffic_light_msg = self.get_message(self._traffic_light_msgs,
timestamp, "TrafficLight")
tracked_obstacle_msg = self.get_message(self._tracked_obstacle_msgs,
timestamp, "TrackedObstacle")
lane_detection_msg = self.get_message(self._lane_detection_msgs,
timestamp, "Lanes")
prediction_camera_msg = self.get_message(self._prediction_camera_msgs,
timestamp, "PredictionCamera")
prediction_msg = self.get_message(self._prediction_msgs, timestamp,
"Prediction")
waypoint_msg = self.get_message(self._waypoint_msgs, timestamp,
"Waypoint")
control_msg = self.get_message(self._control_msgs, timestamp,
"Control")
if pose_msg:
ego_transform = pose_msg.data.transform
else:
ego_transform = None
# Add the visualizations on world.
if self._flags.visualize_pose:
self._visualize_pose(ego_transform)
if self._flags.visualize_imu:
self._visualize_imu(imu_msg)
sensor_to_display = self.display_array[self.current_display]
if sensor_to_display == "RGB" and bgr_msg:
bgr_msg.frame.visualize(self.display, timestamp=timestamp)
elif sensor_to_display == "Obstacle" and bgr_msg and obstacle_msg:
bgr_msg.frame.annotate_with_bounding_boxes(timestamp,
obstacle_msg.obstacles,
ego_transform)
bgr_msg.frame.visualize(self.display, timestamp=timestamp)
elif (sensor_to_display == "TLCamera" and tl_camera_msg
and traffic_light_msg):
tl_camera_msg.frame.annotate_with_bounding_boxes(
timestamp, traffic_light_msg.obstacles)
tl_camera_msg.frame.visualize(self.display, timestamp=timestamp)
elif (sensor_to_display == "TrackedObstacle" and bgr_msg
and tracked_obstacle_msg):
bgr_msg.frame.annotate_with_bounding_boxes(
timestamp, tracked_obstacle_msg.obstacle_trajectories,
ego_transform)
bgr_msg.frame.visualize(self.display, timestamp=timestamp)
elif sensor_to_display == "Waypoint" and (bgr_msg and pose_msg
and waypoint_msg):
bgr_frame = bgr_msg.frame
if self._flags.draw_waypoints_on_camera_frames:
bgr_frame.camera_setup.set_transform(
pose_msg.data.transform * bgr_frame.camera_setup.transform)
waypoint_msg.waypoints.draw_on_frame(bgr_frame)
if self._flags.draw_waypoints_on_world:
waypoint_msg.waypoints.draw_on_world(self._world)
bgr_frame.visualize(self.display, timestamp=timestamp)
elif (sensor_to_display == "PredictionCamera" and prediction_camera_msg
and prediction_msg):
frame = prediction_camera_msg.frame
frame.transform_to_cityscapes()
for obstacle_prediction in prediction_msg.predictions:
obstacle_prediction.draw_trajectory_on_frame(frame)
frame.visualize(self.display, timestamp=timestamp)
elif sensor_to_display == "PointCloud" and point_cloud_msg:
point_cloud_msg.point_cloud.visualize(self.display,
timestamp=timestamp)
elif (sensor_to_display == "Lanes" and bgr_msg and lane_detection_msg):
for lane in lane_detection_msg.data:
lane.draw_on_frame(bgr_msg.frame)
bgr_msg.frame.visualize(self.display, timestamp=timestamp)
elif sensor_to_display == "Depth" and depth_msg:
depth_msg.frame.visualize(self.display, timestamp=timestamp)
elif sensor_to_display == "Segmentation" and segmentation_msg:
segmentation_msg.frame.visualize(self.display, timestamp=timestamp)
elif sensor_to_display == "PlanningWorld":
if prediction_camera_msg is None:
# Top-down prediction is not available. Show planning
# world on a black image.
black_img = np.zeros((self._bird_eye_camera_setup.height,
self._bird_eye_camera_setup.width, 3),
dtype=np.dtype("uint8"))
frame = CameraFrame(black_img, 'RGB',
self._bird_eye_camera_setup)
else:
frame = prediction_camera_msg.frame
frame.transform_to_cityscapes()
if lane_detection_msg:
lanes = lane_detection_msg.data
else:
lanes = None
self._planning_world.update(timestamp,
pose_msg.data,
prediction_msg.predictions,
traffic_light_msg.obstacles,
None,
lanes=lanes)
self._planning_world.update_waypoints(None, waypoint_msg.waypoints)
self._planning_world.draw_on_frame(frame)
frame.visualize(self.display, timestamp=timestamp)
self.render_text(pose_msg.data, control_msg, timestamp)
def run(self):
# Run method is invoked after all operators finished initializing.
# Thus, we're sure the world is up-to-date here.
if (self._flags.visualize_pose or self._flags.visualize_imu
or (self._flags.visualize_waypoints
and self._flags.draw_waypoints_on_world)):
from pylot.simulation.utils import get_world
_, self._world = get_world(self._flags.simulator_host,
self._flags.simulator_port,
self._flags.simulator_timeout)
def _visualize_pose(self, ego_transform):
# Draw position. We add 0.5 to z to ensure that the point is above
# the road surface.
loc = (ego_transform.location +
pylot.utils.Location(0, 0, 0.5)).as_simulator_location()
self._world.debug.draw_point(loc, size=0.2, life_time=DEFAULT_VIS_TIME)
def _visualize_imu(self, msg):
transform = msg.transform
# Acceleration measured in ego frame, not global
# z acceleration not useful for visualization so set to 0
rotation_transform = pylot.utils.Transform(
location=pylot.utils.Location(0, 0, 0),
rotation=transform.rotation)
rotated_acceleration = rotation_transform.transform_locations(
[pylot.utils.Location(msg.acceleration.x, msg.acceleration.y,
0)])[0]
# Construct arrow.
begin_acc = transform.location + pylot.utils.Location(z=0.5)
end_acc = begin_acc + pylot.utils.Location(rotated_acceleration.x,
rotated_acceleration.y, 0)
# draw arrow
self._logger.debug("Acc: {}".format(rotated_acceleration))
self._world.debug.draw_arrow(begin_acc.as_simulator_location(),
end_acc.as_simulator_location(),
arrow_size=0.1,
life_time=0.1)
|
47486078f5aed81e971840fb5ada154a69ab60be
|
29dfa1deefc72493d1b1eecf1a8df62e24599a77
|
/tests/file_io/data_range_io.py
|
5a8d449f12c81a6a970636d475171dfc4bb67f65
|
[
"Apache-2.0"
] |
permissive
|
log2timeline/dfvfs
|
fd301eaf721a9945641a44ff722aec963158a6b3
|
28756d910e951a22c5f0b2bcf5184f055a19d544
|
refs/heads/main
| 2023-08-07T22:45:45.432668
| 2023-07-30T12:17:56
| 2023-07-30T12:17:56
| 23,820,144
| 197
| 65
|
Apache-2.0
| 2023-07-30T12:17:58
| 2014-09-09T05:06:44
|
Python
|
UTF-8
|
Python
| false
| false
| 2,793
|
py
|
data_range_io.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for the data range file-like object."""
import unittest
from dfvfs.file_io import data_range_io
from dfvfs.lib import definitions
from dfvfs.path import factory as path_spec_factory
from dfvfs.resolver import context
from tests.file_io import test_lib
class DataRangeTest(test_lib.SylogTestCase):
"""Tests for the data range file-like object."""
# pylint: disable=protected-access
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._resolver_context = context.Context()
test_path = self._GetTestFilePath(['syslog'])
self._SkipIfPathNotExists(test_path)
test_os_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_OS, location=test_path)
self._data_range_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_DATA_RANGE, parent=test_os_path_spec,
range_offset=167, range_size=1080)
def tearDown(self):
"""Cleans up the needed objects used throughout the test."""
self._resolver_context.Empty()
def testOpenCloseFileObject(self):
"""Test the open and close functionality using a file-like object."""
file_object = data_range_io.DataRange(
self._resolver_context, self._data_range_path_spec)
file_object.Open()
self.assertEqual(file_object.get_size(), 1080)
def testSetRange(self):
"""Test the _SetRange function."""
file_object = data_range_io.DataRange(
self._resolver_context, self._data_range_path_spec)
self.assertEqual(file_object._range_offset, -1)
self.assertEqual(file_object._range_size, -1)
file_object._SetRange(167, 1080)
self.assertEqual(file_object._range_offset, 167)
self.assertEqual(file_object._range_size, 1080)
with self.assertRaises(ValueError):
file_object._SetRange(-1, 1080)
with self.assertRaises(ValueError):
file_object._SetRange(167, -1)
def testOpenClosePathSpec(self):
"""Test the open and close functionality using a path specification."""
file_object = data_range_io.DataRange(
self._resolver_context, self._data_range_path_spec)
file_object.Open()
self.assertEqual(file_object.get_size(), 1080)
def testSeek(self):
"""Test the seek functionality."""
file_object = data_range_io.DataRange(
self._resolver_context, self._data_range_path_spec)
file_object.Open()
self._TestSeekFileObject(file_object, base_offset=0)
def testRead(self):
"""Test the read functionality."""
file_object = data_range_io.DataRange(
self._resolver_context, self._data_range_path_spec)
file_object.Open()
self._TestReadFileObject(file_object, base_offset=0)
if __name__ == '__main__':
unittest.main()
|
16e04b143263eb6f01f3556d9cd093bf76ec823d
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/homeassistant/components/fritzbox_callmonitor/base.py
|
df19bca7b13c3773b00b5e48512143751ad0fefb
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 2,606
|
py
|
base.py
|
"""Base class for fritzbox_callmonitor entities."""
from __future__ import annotations
from contextlib import suppress
from datetime import timedelta
import logging
import re
from fritzconnection.lib.fritzphonebook import FritzPhonebook
from homeassistant.util import Throttle
from .const import REGEX_NUMBER, UNKNOWN_NAME
_LOGGER = logging.getLogger(__name__)
# Return cached results if phonebook was downloaded less then this time ago.
MIN_TIME_PHONEBOOK_UPDATE = timedelta(hours=6)
class FritzBoxPhonebook:
"""Connects to a FritzBox router and downloads its phone book."""
fph: FritzPhonebook
phonebook_dict: dict[str, list[str]]
number_dict: dict[str, str]
def __init__(
self,
host: str,
username: str,
password: str,
phonebook_id: int | None = None,
prefixes: list[str] | None = None,
) -> None:
"""Initialize the class."""
self.host = host
self.username = username
self.password = password
self.phonebook_id = phonebook_id
self.prefixes = prefixes
def init_phonebook(self) -> None:
"""Establish a connection to the FRITZ!Box and check if phonebook_id is valid."""
self.fph = FritzPhonebook(
address=self.host,
user=self.username,
password=self.password,
)
self.update_phonebook()
@Throttle(MIN_TIME_PHONEBOOK_UPDATE)
def update_phonebook(self) -> None:
"""Update the phone book dictionary."""
if self.phonebook_id is None:
return
self.phonebook_dict = self.fph.get_all_names(self.phonebook_id)
self.number_dict = {
re.sub(REGEX_NUMBER, "", nr): name
for name, nrs in self.phonebook_dict.items()
for nr in nrs
}
_LOGGER.info("Fritz!Box phone book successfully updated")
def get_phonebook_ids(self) -> list[int]:
"""Return list of phonebook ids."""
return self.fph.phonebook_ids # type: ignore[no-any-return]
def get_name(self, number: str) -> str:
"""Return a name for a given phone number."""
number = re.sub(REGEX_NUMBER, "", str(number))
with suppress(KeyError):
return self.number_dict[number]
if not self.prefixes:
return UNKNOWN_NAME
for prefix in self.prefixes:
with suppress(KeyError):
return self.number_dict[prefix + number]
with suppress(KeyError):
return self.number_dict[prefix + number.lstrip("0")]
return UNKNOWN_NAME
|
59b1aa49ed8cf258572bbc217f696bf5dfaa153e
|
50dd46b8ece33f3cdd174284b15d1d51f89669d4
|
/2018/finals/re-drm/challenge/encode_str.py
|
2483b482e9f0b8fc471d97c2c7a1931118ef9b4c
|
[
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] |
permissive
|
google/google-ctf
|
f99da1ee07729bbccb869fff1cbaed6a80e43bcc
|
df02323eaf945d15e124801c74abaadca2749dc7
|
refs/heads/master
| 2023-08-31T14:30:27.548081
| 2023-08-29T13:04:20
| 2023-08-29T13:04:20
| 131,317,137
| 4,136
| 607
|
Apache-2.0
| 2023-08-30T22:17:02
| 2018-04-27T15:56:03
|
Go
|
UTF-8
|
Python
| false
| false
| 1,014
|
py
|
encode_str.py
|
#!/usr/bin/python
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = "Ian Eldred Pudney"
import sys
data = sys.stdin.read()
print "#include \"xorstr.h\""
print "#include \"string.h\""
print "#include <string>"
split_data = [data[i:i+57] for i in range(0, len(data), 57)]
print "std::string " + sys.argv[1] + "() {"
print " std::string ret;"
for line in split_data:
print " ret += xorstr(R\"EOF(" + line + ")EOF\").crypt_get();"
print " return ret;"
print "}"
|
5291debea14f0ee5b48822a2b00d36e6330f5d22
|
e9f0510faac898afc09eb5bb664ac6048f72c33a
|
/samples/ReadRange.py
|
cc5371136b4c83a94f3ce8aaf7fe8eb9c746b9ef
|
[
"MIT"
] |
permissive
|
JoelBender/bacpypes
|
c9ddf9a4f56dd0d012046a9e4ffad19bd580cb45
|
a5be2ad5ac69821c12299716b167dd52041b5342
|
refs/heads/master
| 2023-05-28T05:03:17.980236
| 2023-05-10T05:33:01
| 2023-05-10T05:33:01
| 39,159,799
| 284
| 161
|
MIT
| 2023-05-15T15:11:27
| 2015-07-15T20:33:51
|
Python
|
UTF-8
|
Python
| false
| false
| 6,457
|
py
|
ReadRange.py
|
#!/usr/bin/env python
"""
This application presents a 'console' prompt to the user asking for readrange
commands which create ReadRangeRequest PDUs, then lines up the coorresponding
ReadRangeACK and prints the value.
"""
import sys
from bacpypes.debugging import bacpypes_debugging, ModuleLogger
from bacpypes.consolelogging import ConfigArgumentParser
from bacpypes.consolecmd import ConsoleCmd
from bacpypes.core import run, deferred, enable_sleeping
from bacpypes.iocb import IOCB
from bacpypes.pdu import Address
from bacpypes.object import get_datatype
from bacpypes.apdu import (
ReadRangeRequest,
Range,
RangeByPosition,
RangeBySequenceNumber,
RangeByTime,
ReadRangeACK,
)
from bacpypes.app import BIPSimpleApplication
from bacpypes.primitivedata import Date, Time, ObjectIdentifier
from bacpypes.constructeddata import Array, List
from bacpypes.basetypes import DateTime
from bacpypes.local.device import LocalDeviceObject
# some debugging
_debug = 0
_log = ModuleLogger(globals())
# globals
this_application = None
#
# ReadRangeConsoleCmd
#
@bacpypes_debugging
class ReadRangeConsoleCmd(ConsoleCmd):
def do_readrange(self, args):
"""readrange <addr> <objid> <prop> [ <indx> ]
[ p <indx> <count> ]
[ s <seq> <count> ]
[ t <date> <time> <count> ]
"""
args = args.split()
if _debug:
ReadRangeConsoleCmd._debug("do_readrange %r", args)
try:
addr = Address(args.pop(0))
obj_id = ObjectIdentifier(args.pop(0)).value
prop_id = args.pop(0)
if prop_id.isdigit():
prop_id = int(prop_id)
datatype = get_datatype(obj_id[0], prop_id)
if not datatype:
raise ValueError("invalid property for object type")
# build a request
request = ReadRangeRequest(
destination=addr, objectIdentifier=obj_id, propertyIdentifier=prop_id
)
# index is optional
if args:
if args[0].isdigit():
if not issubclass(datatype, Array):
raise ValueError("property is not an array")
request.propertyArrayIndex = int(args.pop(0))
datatype = datatype.subtype
if not issubclass(datatype, List):
raise ValueError("property is not a list")
# range is optional
if args:
range_type = args.pop(0)
if range_type == "p":
rbp = RangeByPosition(
referenceIndex=int(args[0]), count=int(args[1])
)
request.range = Range(byPosition=rbp)
elif range_type == "s":
rbs = RangeBySequenceNumber(
referenceSequenceNumber=int(args[0]), count=int(args[1])
)
request.range = Range(bySequenceNumber=rbs)
elif range_type == "t":
rbt = RangeByTime(
referenceTime=DateTime(
date=Date(args[0]).value, time=Time(args[1]).value
),
count=int(args[2]),
)
request.range = Range(byTime=rbt)
elif range_type == "x":
# should be missing required parameter
request.range = Range()
else:
raise ValueError("unknown range type: %r" % (range_type,))
if _debug:
ReadRangeConsoleCmd._debug(" - request: %r", request)
# make an IOCB
iocb = IOCB(request)
if _debug:
ReadRangeConsoleCmd._debug(" - iocb: %r", iocb)
# give it to the application
deferred(this_application.request_io, iocb)
# wait for it to complete
iocb.wait()
# do something for success
if iocb.ioResponse:
apdu = iocb.ioResponse
if _debug:
ReadRangeConsoleCmd._debug(" - apdu: %r", apdu)
# should be an ack
if not isinstance(apdu, ReadRangeACK):
if _debug:
ReadRangeConsoleCmd._debug(" - not an ack")
return
# find the datatype
datatype = get_datatype(
apdu.objectIdentifier[0], apdu.propertyIdentifier
)
if _debug:
ReadRangeConsoleCmd._debug(" - datatype: %r", datatype)
if not datatype:
raise TypeError("unknown datatype")
sys.stdout.write(
"firstSequenceNumber: %s\n" % (apdu.firstSequenceNumber,)
)
sys.stdout.write("resultFlags: %s\n" % (apdu.resultFlags,))
# cast out the data into a list
value = apdu.itemData.cast_out(datatype)
# dump it out
for i, item in enumerate(value):
sys.stdout.write("[%d]\n" % (i,))
item.debug_contents(file=sys.stdout, indent=2)
sys.stdout.flush()
# do something for error/reject/abort
if iocb.ioError:
sys.stdout.write(str(iocb.ioError) + "\n")
except Exception as error:
ReadRangeConsoleCmd._exception("exception: %r", error)
#
# __main__
#
def main():
global this_application
# parse the command line arguments
args = ConfigArgumentParser(description=__doc__).parse_args()
if _debug:
_log.debug("initialization")
if _debug:
_log.debug(" - args: %r", args)
# make a device object
this_device = LocalDeviceObject(ini=args.ini)
if _debug:
_log.debug(" - this_device: %r", this_device)
# make a simple application
this_application = BIPSimpleApplication(this_device, args.ini.address)
# make a console
this_console = ReadRangeConsoleCmd()
if _debug:
_log.debug(" - this_console: %r", this_console)
# enable sleeping will help with threads
enable_sleeping()
_log.debug("running")
run()
_log.debug("fini")
if __name__ == "__main__":
main()
|
2c2804125a60dbda3cd25ee56036c9b43d5e08bf
|
353730afc44b31cf4efded67a4e2835d19c75922
|
/tensorly/metrics/tests/test_regression.py
|
76edebe1c9306b32926f86be9914a59794de1ed6
|
[
"BSD-3-Clause"
] |
permissive
|
tensorly/tensorly
|
605529bf5206f1977c6067f96f47bec439355246
|
de05e178850eb2abe43ec1a40f80624ca606807d
|
refs/heads/main
| 2023-08-31T14:01:45.527525
| 2023-08-20T18:28:25
| 2023-08-20T18:28:25
| 71,603,727
| 1,533
| 334
|
NOASSERTION
| 2023-09-08T18:10:37
| 2016-10-21T23:14:52
|
Python
|
UTF-8
|
Python
| false
| false
| 1,361
|
py
|
test_regression.py
|
import numpy as np
import tensorly as tl
from ..regression import MSE, RMSE, R2_score, correlation
from ...testing import assert_array_almost_equal
def test_MSE():
"""Test for MSE"""
y_true = tl.tensor([1, 0, 2, -2])
y_pred = tl.tensor([1, -1, 1, -1])
true_mse = 0.75
assert_array_almost_equal(MSE(y_true, y_pred), true_mse)
def test_RMSE():
"""Test for RMSE"""
y_true = tl.tensor([1, 0, 2, -2])
y_pred = tl.tensor([0, -1, 1, -1])
true_mse = 1
assert_array_almost_equal(RMSE(y_true, y_pred), true_mse)
def test_R2_score():
"""Test for RMSE"""
X_original = tl.randn((5, 4, 3))
assert R2_score(X_original, X_original) == 1.0
assert R2_score(X_original, X_original * 2) == 0.0
assert R2_score(X_original, tl.zeros_like(X_original)) == 0.0
def test_correlation():
"""Test for correlation"""
a = tl.tensor(np.random.random(10))
b = tl.tensor(np.random.random(10))
assert_array_almost_equal(correlation(a, a * 2 + 1), 1)
assert_array_almost_equal(correlation(a, -a * 2 + 1), -1)
a = tl.tensor([1, 2, 3, 2, 1])
b = tl.tensor([1, 2, 3, 4, 5])
assert_array_almost_equal(correlation(a, b), 0)
a = tl.tensor([[1, 2, 3, 2, 1]] * 3)
b = tl.tensor([[1, 2, 3, 4, 5]] * 3)
res = tl.tensor([0, 0, 0])
assert_array_almost_equal(correlation(a, b, axis=1), res)
|
0b6af8ac3f0d4dd73abe24e4ca612c6dc6e0fe3f
|
b60686a2e351a756f249e0d9faab8fe154a08f11
|
/tests/test_cli.py
|
451436b0defe46e405bbe2b18f7301d63ba6929b
|
[
"MIT",
"LicenseRef-scancode-generic-export-compliance"
] |
permissive
|
intel/dffml
|
86483b47229b9b62c9f8dfef51491aa02563347e
|
7d381bf67a72fe1ecb1012393d5726085564cb0e
|
refs/heads/main
| 2023-08-28T00:35:04.219193
| 2023-06-06T18:29:16
| 2023-06-06T18:29:16
| 149,512,216
| 237
| 204
|
MIT
| 2023-05-05T15:39:35
| 2018-09-19T21:06:34
|
Python
|
UTF-8
|
Python
| false
| false
| 16,653
|
py
|
test_cli.py
|
# SPDX-License-Identifier: MIT
# Copyright (c) 2019 Intel Corporation
import os
import io
import json
import shutil
import random
import tempfile
import contextlib
from pathlib import Path
from unittest.mock import patch
from typing import AsyncIterator, List
from dffml.record import Record
from dffml.feature import Feature, Features
from dffml.source.source import Sources, SourcesContext
from dffml.source.file import FileSourceConfig
from dffml.source.json import JSONSource
from dffml.source.csv import CSVSource, CSVSourceConfig
from dffml.model.model import ModelContext, Model
from dffml.util.entrypoint import entrypoint
from dffml.util.asynctestcase import (
AsyncTestCase,
AsyncTestCase,
non_existant_tempfile,
)
from dffml.base import config
from dffml.cli.cli import Merge
from dffml.cli.ml import Predict, Train
from dffml.cli.list import List
from dffml.cli.dataflow import Dataflow
from .test_df import OPERATIONS, OPIMPS
class RecordsTestCase(AsyncTestCase):
async def setUp(self):
await super().setUp()
self.records = [
Record(str(random.random()), data={"features": {"fake": 1}},)
for _ in range(0, 10)
]
self.temp_filename = self.mktempfile()
self.sconfig = FileSourceConfig(
filename=self.temp_filename, readwrite=True, allowempty=True
)
async with JSONSource(self.sconfig) as source:
async with source() as sctx:
for record in self.records:
await sctx.update(record)
contents = json.loads(Path(self.sconfig.filename).read_text())
# Ensure there are records in the file
self.assertEqual(
len(contents.get(self.sconfig.tag)),
len(self.records),
"RecordsTestCase JSON file erroneously initialized as empty",
)
# TODO(p3) For some reason patching Model.load doesn't work
self._stack.enter_context(
patch("dffml.model.model.Model.load", new=model_load)
)
self._stack.enter_context(
patch("dffml.df.base.OperationImplementation.load", new=opimp_load)
)
self._stack.enter_context(
patch("dffml.df.types.Operation.load", new=op_load)
)
@config
class FakeConfig:
features: Features
predict: Feature
location: str = os.path.join(
os.path.expanduser("~"), ".cache", "dffml", "test_cli", "fake"
)
class FakeModelContext(ModelContext):
async def train(self, sources: Sources):
pass
async def predict(self, sources: SourcesContext) -> AsyncIterator[Record]:
target = self.parent.config.predict.name
async for record in sources.with_features(
self.parent.config.features.names()
):
record.predicted(target, random.random(), float(record.key))
yield record
@entrypoint("fake")
class FakeModel(Model):
CONTEXT = FakeModelContext
CONFIG = FakeConfig
def model_load(loading):
if loading == "fake":
return FakeModel
return [FakeModel]
def op_load(loading):
return list(filter(lambda op: loading == op.name, OPERATIONS))[0]
def opimp_load(loading=None):
if loading is not None:
return list(filter(lambda imp: loading == imp.op.name, OPIMPS))[0]
return OPIMPS
class TestMerge(RecordsTestCase):
async def test_json_tag(self):
await Merge.cli(
"src=json",
"dest=json",
"-source-src-filename",
self.temp_filename,
"-source-dest-filename",
self.temp_filename,
"-source-dest-tag",
"sometag",
"-source-src-allowempty",
"-source-dest-allowempty",
"-source-src-readwrite",
"-source-dest-readwrite",
)
# Check the untagged source
with self.subTest(tagged=None):
async with JSONSource(
FileSourceConfig(filename=self.temp_filename)
) as source:
async with source() as sctx:
records = [record async for record in sctx.records()]
self.assertEqual(len(records), len(self.records))
# Check the tagged source
with self.subTest(tagged="sometag"):
async with JSONSource(
FileSourceConfig(filename=self.temp_filename, tag="sometag")
) as source:
async with source() as sctx:
records = [record async for record in sctx.records()]
self.assertEqual(len(records), len(self.records))
async def test_json_to_csv(self):
with non_existant_tempfile() as csv_tempfile:
await Merge.cli(
"src=json",
"dest=csv",
"-source-src-filename",
self.temp_filename,
"-source-dest-filename",
csv_tempfile,
"-source-dest-key",
"key",
"-source-src-allowempty",
"-source-dest-allowempty",
"-source-src-readwrite",
"-source-dest-readwrite",
)
contents = Path(csv_tempfile).read_text()
self.assertEqual(
contents,
"key,tag,fake\n"
+ "\n".join(
[f"{record.key},untagged,1" for record in self.records]
)
+ "\n",
"Incorrect data in csv file",
)
async def test_csv_tag(self):
with non_existant_tempfile() as csv_tempfile:
# Move the pre-populated json data to a csv source
with self.subTest(json_to_csv=True):
await Merge.cli(
"src=json",
"dest=csv",
"-source-src-filename",
self.temp_filename,
"-source-dest-filename",
csv_tempfile,
"-source-src-allowempty",
"-source-dest-allowempty",
"-source-src-readwrite",
"-source-dest-readwrite",
)
# Merge one tag to another within the same file
with self.subTest(merge_same_file=True):
await Merge.cli(
"src=csv",
"dest=csv",
"-source-src-filename",
csv_tempfile,
"-source-dest-filename",
csv_tempfile,
"-source-dest-tag",
"sometag",
"-source-src-allowempty",
"-source-dest-allowempty",
"-source-src-readwrite",
"-source-dest-readwrite",
)
contents = Path(csv_tempfile).read_text()
self.assertIn("untagged", contents)
self.assertIn("sometag", contents)
# Check the untagged source
with self.subTest(tagged=None):
async with CSVSource(
CSVSourceConfig(filename=csv_tempfile)
) as source:
async with source() as sctx:
records = [record async for record in sctx.records()]
self.assertEqual(len(records), len(self.records))
contents = Path(csv_tempfile).read_text()
self.assertIn("sometag", contents)
self.assertIn("untagged", contents)
# Check the tagged source
with self.subTest(tagged="sometag"):
async with CSVSource(
CSVSourceConfig(filename=csv_tempfile, tag="sometag")
) as source:
async with source() as sctx:
records = [record async for record in sctx.records()]
self.assertEqual(len(records), len(self.records))
contents = Path(csv_tempfile).read_text()
self.assertIn("sometag", contents)
self.assertIn("untagged", contents)
class TestListRecords(RecordsTestCase):
async def test_run(self):
result = await List.cli(
"records",
"-sources",
"primary=json",
"-source-primary-filename",
self.temp_filename,
"-source-primary-readwrite",
"true",
)
result = list(map(lambda r: r.export(), result))
result = dict(map(lambda r: (r["key"], r), result))
for record in self.records:
self.assertIn(record.key, result)
class TestDataflowRunAllRecords(RecordsTestCase):
async def test_run(self):
self.record_keys = {"add 40 and 2": 42, "multiply 42 and 10": 420}
self.records = list(map(Record, self.record_keys.keys()))
os.unlink(self.temp_filename)
async with JSONSource(self.sconfig) as source:
async with source() as sctx:
for record in self.records:
await sctx.update(record)
tmpdir = tempfile.mkdtemp()
handle, dataflow_file = tempfile.mkstemp(suffix=".json", dir=tmpdir)
os.close(handle)
with open(dataflow_file, mode="w+b") as dataflow_file:
dataflow = io.StringIO()
with contextlib.redirect_stdout(dataflow):
await Dataflow.cli(
"create",
"-configloader",
"json",
*map(lambda op: op.name, OPERATIONS),
)
dataflow_file.write(dataflow.getvalue().encode())
dataflow_file.seek(0)
results = await Dataflow.cli(
"run",
"records",
"all",
"-dataflow",
dataflow_file.name,
"primary=json",
"-sources",
"primary=json",
"-source-filename",
self.temp_filename,
"-record-def",
"calc_string",
"-inputs",
'["result"]=get_single_spec',
)
results = {
result.key: result.feature("result") for result in results
}
for record in self.records:
self.assertIn(record.key, results)
self.assertEqual(
self.record_keys[record.key], results[record.key]
)
shutil.rmtree(tmpdir)
class TestDataflowRunRecordSet(RecordsTestCase):
async def test_run(self):
test_key = "multiply 42 and 10"
self.record_keys = {"add 40 and 2": 42, "multiply 42 and 10": 420}
self.records = list(map(Record, self.record_keys.keys()))
os.unlink(self.temp_filename)
async with JSONSource(self.sconfig) as source:
async with source() as sctx:
for record in self.records:
await sctx.update(record)
tmpdir = tempfile.mkdtemp()
handle, dataflow_file = tempfile.mkstemp(suffix=".json", dir=tmpdir)
os.close(handle)
with open(dataflow_file, mode="w+b") as dataflow_file:
dataflow = io.StringIO()
with contextlib.redirect_stdout(dataflow):
await Dataflow.cli(
"create",
"-configloader",
"json",
*map(lambda op: op.name, OPERATIONS),
)
dataflow_file.write(dataflow.getvalue().encode())
dataflow_file.seek(0)
results = await Dataflow.cli(
"run",
"records",
"set",
"-keys",
test_key,
"-dataflow",
dataflow_file.name,
"primary=json",
"-sources",
"primary=json",
"-source-filename",
self.temp_filename,
"-record-def",
"calc_string",
"-inputs",
'["result"]=get_single_spec',
)
self.assertEqual(len(results), 1)
self.assertEqual(
self.record_keys[test_key], results[0].feature("result")
)
shutil.rmtree(tmpdir)
class TestDataflowRunSingle(AsyncTestCase):
async def test_run(self):
tmpdir = tempfile.mkdtemp()
handle, dataflow_file = tempfile.mkstemp(suffix=".json", dir=tmpdir)
os.close(handle)
with open(dataflow_file, mode="w+b") as dataflow_file:
dataflow = io.StringIO()
with contextlib.redirect_stdout(dataflow):
await Dataflow.cli(
"create",
"-configloader",
"json",
*map(lambda op: op.name, OPERATIONS),
)
dataflow_file.write(dataflow.getvalue().encode())
dataflow_file.seek(0)
results = await Dataflow.cli(
"run",
"single",
"-dataflow",
dataflow_file.name,
"-inputs",
'["result"]=get_single_spec',
"add 40 and 2=calc_string",
)
self.assertEqual(len(results), 1)
self.assertEqual(results[0], {"result": 42})
shutil.rmtree(tmpdir)
class TestDataflowRunContexts(AsyncTestCase):
async def test_run(self):
tmpdir = tempfile.mkdtemp()
handle, dataflow_file = tempfile.mkstemp(suffix=".json", dir=tmpdir)
os.close(handle)
with open(dataflow_file, mode="w+b") as dataflow_file:
dataflow = io.StringIO()
with contextlib.redirect_stdout(dataflow):
await Dataflow.cli(
"create",
"-configloader",
"json",
*map(lambda op: op.name, OPERATIONS),
)
dataflow_file.write(dataflow.getvalue().encode())
dataflow_file.seek(0)
test_contexts = {"add 40 and 2": 42, "multiply 42 and 10": 420}
results = await Dataflow.cli(
"run",
"contexts",
"-dataflow",
dataflow_file.name,
"-context-def",
"calc_string",
"-contexts",
*test_contexts.keys(),
"-input",
'["result"]=get_single_spec',
)
self.assertCountEqual(
results,
[
{ctx_string: {"result": result}}
for ctx_string, result in test_contexts.items()
],
)
shutil.rmtree(tmpdir)
class TestTrain(RecordsTestCase):
async def test_run(self):
await Train.cli(
"-model",
"fake",
"-model-features",
"fake",
"-model-predict",
"fake",
"-sources",
"primary=json",
"-source-filename",
self.temp_filename,
)
class TestPredict(RecordsTestCase):
async def test_all(self):
results = await Predict.cli(
"all",
"-model",
"fake",
"-model-features",
"fake:float:1",
"-model-predict",
"fake",
"-sources",
"primary=json",
"-source-filename",
self.temp_filename,
)
results = {
record.key: record.prediction("fake").confidence
for record in results
}
for record in self.records:
self.assertEqual(float(record.key), results[record.key])
async def test_record(self):
subset = self.records[: (int(len(self.records) / 2))]
subset_urls = list(map(lambda record: record.key, subset))
results = await Predict.cli(
"record",
"-model",
"fake",
"-model-predict",
"fake",
"-model-features",
"fake",
"-sources",
"primary=json",
"-source-filename",
self.temp_filename,
"-keys",
*subset_urls,
)
self.assertEqual(len(results), len(subset))
results = {
record.key: record.prediction("fake").confidence
for record in results
}
for record in subset:
self.assertEqual(float(record.key), results[record.key])
|
b2ada36828af1498edffe1424e901801ea8769f2
|
8e90a7759ec7143427823547e0fbff58e0343aaa
|
/training_api/domain/services/contract/abstract_configuration_service.py
|
1ca350562a2d960b5fa2cc8a4ead8bcec204bdba
|
[
"LicenseRef-scancode-free-unknown",
"Apache-2.0"
] |
permissive
|
BMW-InnovationLab/BMW-TensorFlow-Training-GUI
|
646a6f86f26887e94351b4c572b7fe7f0842f75c
|
06531dae14365986c86baf735fd149317f4bb67a
|
refs/heads/master
| 2023-07-20T01:48:27.299962
| 2023-07-12T15:22:22
| 2023-07-12T15:22:22
| 227,429,492
| 1,030
| 198
|
Apache-2.0
| 2023-05-22T17:40:23
| 2019-12-11T18:06:11
|
Python
|
UTF-8
|
Python
| false
| false
| 302
|
py
|
abstract_configuration_service.py
|
from abc import ABC, abstractmethod, ABCMeta
from domain.models.network_information import NetworkInformation
class AbstractConfigurationService(ABC):
__metaclass__ = ABCMeta
@abstractmethod
def get_configurations(self, network_inf: NetworkInformation) -> str: raise NotImplementedError
|
34c4f562135c2cde68d69fc278f337727244e1ce
|
def993d87717cd42a9090a17d9c1df5648e924ce
|
/test/IECore/IndexedIO.py
|
06e0c58194133c28df8e3092ace353f1bf386a9c
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
ImageEngine/cortex
|
688388296aad2b36dd0bfb7da7b25dcbdc7bd856
|
6eec66f5dccfd50dda247b04453bce65abc595eb
|
refs/heads/main
| 2023-09-05T07:01:13.679207
| 2023-08-17T23:14:41
| 2023-08-17T23:14:41
| 10,654,465
| 439
| 104
|
NOASSERTION
| 2023-09-14T11:30:41
| 2013-06-12T23:12:28
|
C++
|
UTF-8
|
Python
| false
| false
| 24,380
|
py
|
IndexedIO.py
|
##########################################################################
#
# Copyright (c) 2007-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
"""Unit test for IndexedIO binding"""
import os
import unittest
import math
import random
import IECore
class TestIndexedIO(unittest.TestCase):
def testConstructors(self):
"""Test IndexedIO constuctors"""
self.assertRaises(RuntimeError, IECore.IndexedIO )
def testCreate(self):
"""Test IndexedIO create"""
io2 = IECore.IndexedIO.create( os.path.join( "test", "myFile.fio" ), [], IECore.IndexedIO.OpenMode.Write )
io2 = IECore.IndexedIO.create( os.path.join( "test", "myFile.fio" ), IECore.IndexedIO.OpenMode.Write )
self.assertRaises(RuntimeError, IECore.IndexedIO.create, "myFileWith.invalidExtension", [], IECore.IndexedIO.OpenMode.Write )
def testSupportedExtensions( self ) :
e = IECore.IndexedIO.supportedExtensions()
self.assertTrue( "fio" in e )
def testOpenMode( self ) :
for f in [ os.path.join( "test", "myFile.fio" ) ] :
io = IECore.IndexedIO.create( f, [], IECore.IndexedIO.OpenMode.Write | IECore.IndexedIO.OpenMode.Exclusive )
self.assertEqual( io.openMode(), IECore.IndexedIO.OpenMode.Write | IECore.IndexedIO.OpenMode.Exclusive )
del io
io = IECore.IndexedIO.create( f, [], IECore.IndexedIO.OpenMode.Read | IECore.IndexedIO.OpenMode.Exclusive )
self.assertEqual( io.openMode(), IECore.IndexedIO.OpenMode.Read | IECore.IndexedIO.OpenMode.Exclusive )
del io
def testEntryConstructor( self ) :
e = IECore.IndexedIO.Entry( "n", IECore.IndexedIO.EntryType.Directory, IECore.IndexedIO.DataType.Invalid, 0 )
self.assertEqual( e.id(), "n" )
self.assertEqual( e.entryType(), IECore.IndexedIO.EntryType.Directory )
def tearDown(self):
if os.path.isfile(os.path.join( "test", "myFile.fio" )):
os.remove(os.path.join( "test", "myFile.fio" ))
class TestMemoryIndexedIO(unittest.TestCase):
def testSaveWriteObjects(self):
"""Test MemoryIndexedIO read/write operations."""
f = IECore.MemoryIndexedIO( IECore.CharVectorData(), [], IECore.IndexedIO.OpenMode.Write)
self.assertEqual( f.path() , [] )
self.assertEqual( f.currentEntryId() , "/" )
txt = IECore.StringData("test1")
txt.save( f, "obj1" )
size1 = len( f.buffer() )
self.assertTrue( size1 > 0 )
txt.save( f, "obj2" )
size2 = len( f.buffer() )
self.assertTrue( size2 > size1 )
buf = f.buffer()
f2 = IECore.MemoryIndexedIO( buf, [], IECore.IndexedIO.OpenMode.Read)
self.assertEqual( txt, IECore.Object.load( f2, "obj1" ) )
self.assertEqual( txt, IECore.Object.load( f2, "obj2" ) )
@unittest.skipUnless( os.environ.get("CORTEX_PERFORMANCE_TEST", False), "'CORTEX_PERFORMANCE_TEST' env var not set" )
def testRmStress(self) :
"""Test MemoryIndexedIO rm (stress test)"""
random.seed( 19 )
dataPresent = set()
f = IECore.MemoryIndexedIO( IECore.CharVectorData(), [], IECore.IndexedIO.OpenMode.Write)
f = f.subdirectory("data", IECore.IndexedIO.MissingBehaviour.CreateIfMissing )
buf = f.buffer() # Fails under gcc 3.3.4 when no data has been written, as a result of the way in which std::ostringstream::seekp(0) works when the stream is currently empty. Fixed in gcc 3.4.x and later.
f = None
f = IECore.MemoryIndexedIO(buf, [], IECore.IndexedIO.OpenMode.Append)
f = f.subdirectory( "data" )
numLoops = 500
maxSize = 1000
for i in range( 0, numLoops ) :
for i in range( 0, maxSize ) :
index = int( random.random() * maxSize )
if not index in dataPresent :
f.write( "data"+str(index), i )
dataPresent.add( index )
else :
f.remove( "data"+str(index) )
dataPresent.remove( index )
# Reopen the file every now and then, to exercise the index reading/writing
if random.random() > 0.8 :
buf = f.buffer()
f = None
f = IECore.MemoryIndexedIO(buf, ["data"], IECore.IndexedIO.OpenMode.Append)
entryNames = f.entryIds()
for i in range( 0, maxSize ) :
dataName = "data"+str(i)
if dataName in entryNames :
self.assertTrue( i in dataPresent )
else :
self.assertFalse( i in dataPresent )
self.assertEqual( len(entryNames), len(dataPresent) )
class TestFileIndexedIO(unittest.TestCase):
badNames = ['*', '!', '&', '^', '@', '#', '$', '(', ')', '<', '+',
'>', '?', ',', '\', ''', ';', '{', '}', '[',
']', '=', '`' ]
def testConstructors(self):
"""Test FileIndexedIO constuctors"""
f = IECore.FileIndexedIO(os.path.join( ".", "test", "FileIndexedIO.fio" ), [], IECore.IndexedIO.OpenMode.Write)
self.assertEqual( f.fileName() , os.path.join( ".", "test", "FileIndexedIO.fio" ) )
self.assertEqual( f.path() , [] )
self.assertEqual( f.currentEntryId() , "/" )
self.assertRaises( RuntimeError, IECore.FileIndexedIO, os.path.join( ".", "test", "FileIndexedIO.fio" ), ["nonexistantentrypoint"], IECore.IndexedIO.OpenMode.Read)
f = None
f = IECore.FileIndexedIO(os.path.join( ".", "test", "FileIndexedIO.fio" ), [], IECore.IndexedIO.OpenMode.Read)
self.assertEqual( f.path() , [] )
f = IECore.FileIndexedIO(os.path.join( ".", "test", "FileIndexedIO.fio" ), IECore.IndexedIO.OpenMode.Read)
self.assertEqual( f.path() , [] )
def testEmptyWrite(self):
"""Test FileIndexedIO empty file writing"""
f = IECore.FileIndexedIO(os.path.join( ".", "test", "FileIndexedIO.fio" ), [], IECore.IndexedIO.OpenMode.Write)
self.assertEqual( f.path() , [] )
f = None
self.assertTrue( os.path.exists( os.path.join( ".", "test", "FileIndexedIO.fio" ) ) )
def testSaveWriteObjects(self):
"""Test FileIndexedIO read/write operations."""
f = IECore.FileIndexedIO( os.path.join( ".", "test", "FileIndexedIO.fio" ), [], IECore.IndexedIO.OpenMode.Write)
self.assertEqual( f.path() , [] )
self.assertEqual( f.currentEntryId() , "/" )
txt = IECore.StringData("test1")
txt.save( f, "obj1" )
txt.save( f, "obj2" )
del f
f2 = IECore.FileIndexedIO( os.path.join( ".", "test", "FileIndexedIO.fio" ), [], IECore.IndexedIO.OpenMode.Read)
self.assertEqual( txt, IECore.Object.load( f2, "obj1" ) )
self.assertEqual( txt, IECore.Object.load( f2, "obj2" ) )
def testResetRoot(self):
"""Test FileIndexedIO resetRoot"""
f = IECore.FileIndexedIO(os.path.join( ".", "test", "FileIndexedIO.fio" ), [], IECore.IndexedIO.OpenMode.Write)
g = f.subdirectory("sub1", IECore.IndexedIO.MissingBehaviour.CreateIfMissing )
g.subdirectory("sub2", IECore.IndexedIO.MissingBehaviour.CreateIfMissing )
e = g.entryIds()
self.assertEqual( len(e), 1 )
self.assertEqual( e, ["sub2"])
self.assertTrue( g.entry('sub2').entryType() == IECore.IndexedIO.EntryType.Directory )
def testMkdir(self):
"""Test FileIndexedIO mkdir"""
f = IECore.FileIndexedIO(os.path.join( ".", "test", "FileIndexedIO.fio" ), [], IECore.IndexedIO.OpenMode.Write)
g = f.subdirectory("sub1", IECore.IndexedIO.MissingBehaviour.CreateIfMissing )
self.assertEqual( f.path() , [] )
self.assertEqual( f.currentEntryId() , "/" )
self.assertEqual( g.path() , [ 'sub1' ] )
self.assertEqual( g.currentEntryId() , "sub1" )
g = f.createSubdirectory("sub2" )
self.assertEqual( f.path() , [] )
self.assertEqual( g.path() , [ 'sub2' ] )
self.assertEqual( g.currentEntryId() , "sub2" )
self.assertRaises( RuntimeError, f.createSubdirectory, "sub2" )
# test directory
h = f.directory(["sub2"], IECore.IndexedIO.MissingBehaviour.CreateIfMissing )
self.assertEqual( h.path() , ["sub2"] )
i = f.directory(["sub2","sub2.1"], IECore.IndexedIO.MissingBehaviour.CreateIfMissing )
self.assertEqual( i.path() , ["sub2","sub2.1"] )
j = h.directory(["sub2","sub2.1"], IECore.IndexedIO.MissingBehaviour.CreateIfMissing )
self.assertEqual( j.path() , ["sub2","sub2.1"] )
k = j.directory(["sub3"], IECore.IndexedIO.MissingBehaviour.CreateIfMissing )
self.assertEqual( k.path() , ["sub3"] )
l = f.directory(["sub4","sub4.1"], IECore.IndexedIO.MissingBehaviour.CreateIfMissing )
self.assertEqual( l.path() , ["sub4","sub4.1"] )
def testChdir(self):
"""Test FileIndexedIO chdir"""
f = IECore.FileIndexedIO(os.path.join( ".", "test", "FileIndexedIO.fio" ), [], IECore.IndexedIO.OpenMode.Write)
f.subdirectory("sub1", IECore.IndexedIO.MissingBehaviour.CreateIfMissing )
f.subdirectory("sub2", IECore.IndexedIO.MissingBehaviour.CreateIfMissing )
g = f.subdirectory("sub1")
self.assertEqual( g.path(), ["sub1"] )
self.assertEqual( g.currentEntryId() , "sub1" )
self.assertEqual( f.path(), [] )
g = f.subdirectory("sub2")
self.assertEqual( g.path(), ["sub2"] )
self.assertEqual( g.currentEntryId() , "sub2" )
e = g.subdirectory("sub2.1", IECore.IndexedIO.MissingBehaviour.CreateIfMissing )
self.assertEqual( e.path(), ["sub2","sub2.1"] )
self.assertEqual( e.currentEntryId() , "sub2.1" )
g = e.parentDirectory()
self.assertEqual( g.path(), ["sub2"] )
self.assertEqual( g.currentEntryId() , "sub2" )
f = g.parentDirectory()
self.assertEqual( f.path(), [] )
self.assertEqual( f.currentEntryId() , "/" )
g = f.subdirectory("sub2")
self.assertEqual( g.path(), ["sub2"] )
self.assertEqual( g.currentEntryId() , "sub2" )
e = g.subdirectory("sub2.1")
self.assertEqual( e.path(), ["sub2","sub2.1"] )
self.assertEqual( e.currentEntryId() , "sub2.1" )
# test directory function
h = f.directory( ["sub2","sub2.1"], IECore.IndexedIO.MissingBehaviour.ThrowIfMissing )
self.assertEqual( h.path(), ["sub2","sub2.1"], IECore.IndexedIO.MissingBehaviour.ThrowIfMissing )
h = g.directory( ["sub2","sub2.1"], IECore.IndexedIO.MissingBehaviour.ThrowIfMissing )
self.assertEqual( h.path(), ["sub2","sub2.1"], IECore.IndexedIO.MissingBehaviour.ThrowIfMissing )
h = e.directory( ["sub2","sub2.1"], IECore.IndexedIO.MissingBehaviour.ThrowIfMissing )
self.assertEqual( h.path(), ["sub2","sub2.1"], IECore.IndexedIO.MissingBehaviour.ThrowIfMissing )
# missingBehaviour should default to throw if missing
h = e.directory( ["sub2","sub2.1"] )
self.assertEqual( h.path(), ["sub2","sub2.1"] )
self.assertRaises( RuntimeError, e.directory, [ "i", "dont", "exist" ] )
self.assertRaises( RuntimeError, e.subdirectory, "idontexist" )
self.assertEqual( None, e.directory( [ "i", "dont", "exist" ], IECore.IndexedIO.MissingBehaviour.NullIfMissing ) )
self.assertEqual( None, e.subdirectory( "idontexist", IECore.IndexedIO.MissingBehaviour.NullIfMissing ) )
def testLs(self):
"""Test FileIndexedIO ls"""
f = IECore.FileIndexedIO(os.path.join( ".", "test", "FileIndexedIO.fio" ), [], IECore.IndexedIO.OpenMode.Write)
f.subdirectory("sub2", IECore.IndexedIO.MissingBehaviour.CreateIfMissing )
f = f.subdirectory("sub2")
f.subdirectory("sub2.1", IECore.IndexedIO.MissingBehaviour.CreateIfMissing )
# Filter for files
e = f.entryIds( IECore.IndexedIO.EntryType.File )
self.assertEqual( len(e), 0 )
# Filter for directories
e = f.entryIds( IECore.IndexedIO.EntryType.Directory )
self.assertEqual( len(e), 1 )
self.assertEqual( e[0], "sub2.1")
self.assertTrue( f.entry(e[0]).entryType() == IECore.IndexedIO.EntryType.Directory)
def testRm(self):
"""Test FileIndexedIO rm"""
f = IECore.FileIndexedIO(os.path.join( ".", "test", "FileIndexedIO.fio" ), [], IECore.IndexedIO.OpenMode.Write)
g = f.subdirectory("sub2", IECore.IndexedIO.MissingBehaviour.CreateIfMissing )
g.subdirectory("sub2.1", IECore.IndexedIO.MissingBehaviour.CreateIfMissing )
g.subdirectory("sub2.2", IECore.IndexedIO.MissingBehaviour.CreateIfMissing )
g.subdirectory("sub2.3", IECore.IndexedIO.MissingBehaviour.CreateIfMissing )
e = g.entryIds()
self.assertEqual( len(e), 3 )
g.remove("sub2.1")
e = g.entryIds()
self.assertEqual( len(e), 2 )
f.subdirectory('sub2').remove("sub2.2")
e = g.entryIds()
self.assertEqual( len(e), 1 )
g.remove("sub2.3")
e = g.entryIds()
self.assertEqual( len(e), 0 )
f.remove("sub2")
@unittest.skipUnless( os.environ.get("CORTEX_PERFORMANCE_TEST", False), "'CORTEX_PERFORMANCE_TEST' env var not set" )
def testRmStress(self) :
"""Test FileIndexedIO rm (stress test)"""
random.seed( 19 )
dataPresent = set()
f = IECore.FileIndexedIO(os.path.join( ".", "test", "FileIndexedIO.fio" ), [], IECore.IndexedIO.OpenMode.Write)
f = f.subdirectory("data", IECore.IndexedIO.MissingBehaviour.CreateIfMissing )
f = None
f = IECore.FileIndexedIO(os.path.join( ".", "test", "FileIndexedIO.fio" ), [], IECore.IndexedIO.OpenMode.Append)
f = f.subdirectory( "data" )
numLoops = 500
maxSize = 1000
for i in range( 0, numLoops ) :
for i in range( 0, maxSize ) :
index = int( random.random() * maxSize )
if not index in dataPresent :
f.write( "data"+str(index), i )
dataPresent.add( index )
else :
f.remove( "data"+str(index) )
dataPresent.remove( index )
# Reopen the file every now and then, to exercise the index reading/writing
if random.random() > 0.8 :
f = None
f = IECore.FileIndexedIO(os.path.join( ".", "test", "FileIndexedIO.fio" ), ["data"], IECore.IndexedIO.OpenMode.Append)
entryNames = f.entryIds()
for i in range( 0, maxSize ) :
dataName = "data"+str(i)
if dataName in entryNames :
self.assertTrue( i in dataPresent )
else :
self.assertFalse( i in dataPresent )
self.assertEqual( len(entryNames), len(dataPresent) )
def testReadWrite(self):
"""Test FileIndexedIO read/write(generic)"""
f = IECore.FileIndexedIO(os.path.join( ".", "test", "FileIndexedIO.fio" ), [], IECore.IndexedIO.OpenMode.Write)
self.assertRaises( RuntimeError, f.read, "DOESNOTEXIST")
# Name check
for n in self.badNames:
self.assertRaises(RuntimeError, f.read, n)
def testReadWriteFloatVector(self):
"""Test FileIndexedIO read/write(FloatVector)"""
f = IECore.FileIndexedIO(os.path.join( ".", "test", "FileIndexedIO.fio" ), [], IECore.IndexedIO.OpenMode.Write)
f = f.subdirectory("sub1", IECore.IndexedIO.MissingBehaviour.CreateIfMissing )
fv = IECore.FloatVectorData()
for n in range(0, 1000):
fv.append(n* n * math.sin(n))
name = "myFloatVector"
f.write(name, fv)
gv = f.read(name)
self.assertFalse(fv is gv)
self.assertEqual(len(fv), len(gv))
for n in range(0, 1000):
self.assertEqual(fv[n], gv[n])
def testReadWriteDoubleVector(self):
"""Test FileIndexedIO read/write(DoubleVector)"""
f = IECore.FileIndexedIO(os.path.join( ".", "test", "FileIndexedIO.fio" ), [], IECore.IndexedIO.OpenMode.Write)
f = f.subdirectory("sub1", IECore.IndexedIO.MissingBehaviour.CreateIfMissing )
fv = IECore.DoubleVectorData()
for n in range(0, 1000):
fv.append(n* n * math.sin(n))
name = "myDoubleVector"
f.write(name, fv)
gv = f.read(name)
self.assertFalse(fv is gv)
self.assertEqual(len(fv), len(gv))
for n in range(0, 1000):
self.assertEqual(fv[n], gv[n])
def testReadWriteIntVector(self):
"""Test FileIndexedIO read/write(IntVector)"""
f = IECore.FileIndexedIO(os.path.join( ".", "test", "FileIndexedIO.fio" ), [], IECore.IndexedIO.OpenMode.Write)
f = f.subdirectory("sub1", IECore.IndexedIO.MissingBehaviour.CreateIfMissing )
fv = IECore.IntVectorData()
for n in range(0, 1000):
fv.append(n * n)
name = "myIntVector"
f.write(name, fv)
gv = f.read(name)
self.assertFalse(fv is gv)
self.assertEqual(len(fv), len(gv))
for n in range(0, 1000):
self.assertEqual(fv[n], gv[n])
def testReadWriteStringVector(self):
"""Test FileIndexedIO read/write(StringVector)"""
f = IECore.FileIndexedIO(os.path.join( ".", "test", "FileIndexedIO.fio" ), [], IECore.IndexedIO.OpenMode.Write)
f = f.subdirectory("sub1", IECore.IndexedIO.MissingBehaviour.CreateIfMissing )
fv = IECore.StringVectorData()
for n in range(0, 1000):
fv.append(str(n))
name = "myStringVector"
f.write(name, fv)
gv = f.read(name)
self.assertFalse(fv is gv)
self.assertEqual(len(fv), len(gv))
for n in range(0, 1000):
self.assertEqual(str(fv[n]), str(gv[n]))
def testReadWriteStringVector(self):
"""Test FileIndexedIO read/write(InternedStringVector)"""
f = IECore.FileIndexedIO(os.path.join( ".", "test", "FileIndexedIO.fio" ), [], IECore.IndexedIO.OpenMode.Write)
f = f.subdirectory("sub1", IECore.IndexedIO.MissingBehaviour.CreateIfMissing )
fv = IECore.InternedStringVectorData()
for n in range(0, 1000):
fv.append(str(n))
name = "myInternedStringVector"
f.write(name, fv)
gv = f.read(name)
self.assertFalse(fv is gv)
self.assertEqual(len(fv), len(gv))
for n in range(0, 1000):
self.assertEqual(str(fv[n]), str(gv[n]))
def testReadWriteFloat(self):
"""Test FileIndexedIO read/write(Float/Double)"""
f = IECore.FileIndexedIO(os.path.join( ".", "test", "FileIndexedIO.fio" ), [], IECore.IndexedIO.OpenMode.Write)
f = f.subdirectory("sub1", IECore.IndexedIO.MissingBehaviour.CreateIfMissing )
fv = 2.0
name = "myFloat"
f.write(name, fv)
gv = f.read(name).value
self.assertFalse(fv is gv)
self.assertEqual(fv, gv)
def testReadWriteInt(self):
"""Test FileIndexedIO read/write(Int/Long)"""
f = IECore.FileIndexedIO(os.path.join( ".", "test", "FileIndexedIO.fio" ), [], IECore.IndexedIO.OpenMode.Write)
f = f.subdirectory("sub1", IECore.IndexedIO.MissingBehaviour.CreateIfMissing )
fv = 200
name = "myInt"
f.write(name, fv)
gv = f.read(name).value
self.assertEqual(fv, gv)
def testReadWriteString(self):
"""Test FileIndexedIO read/write(String)"""
f = IECore.FileIndexedIO(os.path.join( ".", "test", "FileIndexedIO.fio" ), [], IECore.IndexedIO.OpenMode.Write)
f = f.subdirectory("sub1", IECore.IndexedIO.MissingBehaviour.CreateIfMissing )
fv = "StringLiteral"
name = "myString"
f.write(name, fv)
gv = f.read(name).value
self.assertFalse(fv is gv)
self.assertEqual(fv, gv)
def testReadWriteSymbolicLink(self):
"""Test FileIndexedIO read/write(SymbolicLink)"""
# There isn't actually an explicit symbolic link capability in IndexedIO,
# but it's pretty straightforward to emulate it by writing paths
# into a file.
f = IECore.FileIndexedIO(os.path.join( ".", "test", "FileIndexedIO.fio" ), [], IECore.IndexedIO.OpenMode.Write)
g = f.subdirectory("sub1", IECore.IndexedIO.MissingBehaviour.CreateIfMissing )
h = g.subdirectory("sub2", IECore.IndexedIO.MissingBehaviour.CreateIfMissing )
fv = IECore.InternedStringVectorData( h.path() )
name = "myLink"
f.write( name, fv )
gv = f.read(name)
self.assertFalse(fv is gv)
self.assertEqual(fv, gv)
def testIncreasingCompressionLevelResultsInSmallerFile( self ):
previousSize = None
filePath = os.path.join( ".", "test", "FileIndexedIO.fio" )
for level in range(9):
options = IECore.CompoundData( { "compressor" : "lz4", "compressionLevel" : level } )
f = IECore.IndexedIO.create( filePath, [], IECore.IndexedIO.OpenMode.Write, options = options )
g = f.subdirectory("sub1", IECore.IndexedIO.MissingBehaviour.CreateIfMissing )
for b in range( 512 ):
g.write( "foo_" + str( b ), IECore.FloatVectorData( [random.random() for i in range( 4096 )] ) )
del g, f
size = os.path.getsize( filePath )
if previousSize:
self.assertTrue( previousSize > size )
previousSize = size
def testCanWriteBlockGreaterThanCompressedBlockSize( self ):
filePath = os.path.join( ".", "test", "FileIndexedIO.fio" )
# set the compressedBlockSize to 1MB to ensure we're creating multiple blocks
options = IECore.CompoundData( { "compressor" : "lz4", "compressionLevel" : 9, "maxCompressedBlockSize" : IECore.UIntData( 1024 * 1024 ) } )
f = IECore.IndexedIO.create( filePath, [], IECore.IndexedIO.OpenMode.Write, options = options )
g = f.subdirectory("sub1", IECore.IndexedIO.MissingBehaviour.CreateIfMissing )
# write 80 MB of integers
d = IECore.IntVectorData( 20 * 1024 * 1024 )
for i in range( 20 * 1024 * 1024 ):
d[i] = i
g.write( "foo", d )
del g, f
f = IECore.IndexedIO.create( filePath, [], IECore.IndexedIO.OpenMode.Read )
g = f.subdirectory("sub1", IECore.IndexedIO.MissingBehaviour.ThrowIfMissing)
d2 = g.read ( "foo" )
size = os.path.getsize( filePath )
# harsh test as this is 1/16 of the data size but ensures we've compressed the data
self.assertTrue( size < 5 * 1024 * 1024 )
self.assertEqual( d, d2 )
def testCompressionParametersAndVersionStoredInMetaData( self ):
options = IECore.CompoundData( { "compressor" : "zlib", "compressionLevel" : 3 } )
f = IECore.IndexedIO.create( os.path.join( ".", "test", "FileIndexedIO.fio" ), [], IECore.IndexedIO.OpenMode.Write, options = options )
g = f.subdirectory("sub1", IECore.IndexedIO.MissingBehaviour.CreateIfMissing )
g.write( "foo", IECore.IntVectorData( range( 4096 ) ) )
del g, f
f = IECore.IndexedIO.create( os.path.join( ".", "test", "FileIndexedIO.fio" ), [], IECore.IndexedIO.OpenMode.Read )
m = f.metadata()
self.assertEqual(m["compressor"], IECore.StringData("zlib") )
self.assertEqual(m["compressionLevel"], IECore.IntData(3) )
self.assertEqual(m["version"], IECore.IntData(7) )
def testInvalidCompressionParametersRevertsToSensibleDefaults( self ):
options = IECore.CompoundData( { "compressor" : "foobar", "compressionLevel" : 12, "compressionThreadCount" : 100, "decompressionThreadCount" : -10 } )
f = IECore.IndexedIO.create( os.path.join( ".", "test", "FileIndexedIO.fio" ), [], IECore.IndexedIO.OpenMode.Write, options = options )
g = f.subdirectory("sub1", IECore.IndexedIO.MissingBehaviour.CreateIfMissing )
self.assertEqual( f.metadata(),
IECore.CompoundData( { "compressor" : "lz4", "compressionLevel" : 9, 'version': IECore.IntData( 7 ), "compressionThreadCount" : 32, "decompressionThreadCount" : 1 } ) )
def testDefaultCompressionIsOff( self ):
filePath = os.path.join( ".", "test", "FileIndexedIO.fio" )
f = IECore.IndexedIO.create( filePath, [], IECore.IndexedIO.OpenMode.Write )
g = f.subdirectory("sub1", IECore.IndexedIO.MissingBehaviour.CreateIfMissing )
g.write( "foo", IECore.IntVectorData( range( 1024 ) ) )
del g, f
size = os.path.getsize( filePath )
self.assertTrue( ( size > (1024 * 4 ) ) )
self.assertTrue( ( size < (1024 * 4 + 512 ) ) )
f = IECore.IndexedIO.create( filePath, [], IECore.IndexedIO.OpenMode.Read )
self.assertEqual( f.metadata(),
IECore.CompoundData( { "compressor" : "lz4", "compressionLevel" : 0, 'version': IECore.IntData( 7 ), "compressionThreadCount" : 1, "decompressionThreadCount" : 1 } ) )
def setUp( self ):
if os.path.isfile(os.path.join( ".", "test", "FileIndexedIO.fio" )) :
os.remove(os.path.join( ".", "test", "FileIndexedIO.fio" ))
def tearDown(self):
# cleanup
if os.path.isfile(os.path.join( ".", "test", "FileIndexedIO.fio" )) :
os.remove(os.path.join( ".", "test", "FileIndexedIO.fio" ))
if __name__ == "__main__":
unittest.main()
|
648612a48f9c316f97b395cc19779f89d014b7c6
|
7607429663127ad7429d0815006dbb6aba2f3fd0
|
/veros/veros.py
|
c8858265e7234f230ad98b9dbf5927b425ccf900
|
[
"MIT"
] |
permissive
|
team-ocean/veros
|
df0d0e467579a09a973f45bc1b267aca9609e93a
|
416dbdac43b8ce80ce09171a3bd3fdb4814abae0
|
refs/heads/main
| 2023-08-05T03:39:31.640695
| 2023-08-03T08:00:37
| 2023-08-03T08:00:37
| 87,419,383
| 168
| 31
|
MIT
| 2023-09-14T04:50:21
| 2017-04-06T10:59:21
|
Python
|
UTF-8
|
Python
| false
| false
| 15,946
|
py
|
veros.py
|
import abc
# do not import veros.core here!
from veros import settings, time, signals, distributed, progress, runtime_settings as rs, logger
from veros.state import get_default_state
from veros.plugins import load_plugin
from veros.routines import veros_routine, is_veros_routine
from veros.timer import timer_context
class VerosSetup(metaclass=abc.ABCMeta):
"""Main class for Veros, used for building a model and running it.
Note:
This class is meant to be subclassed. Subclasses need to implement the
methods :meth:`set_parameter`, :meth:`set_topography`, :meth:`set_grid`,
:meth:`set_coriolis`, :meth:`set_initial_conditions`, :meth:`set_forcing`,
:meth:`set_diagnostics`, and :meth:`after_timestep`.
Example:
>>> import matplotlib.pyplot as plt
>>> from veros import VerosSetup
>>>
>>> class MyModel(VerosSetup):
>>> ...
>>>
>>> simulation = MyModel()
>>> simulation.run()
>>> plt.imshow(simulation.state.variables.psi[..., 0])
>>> plt.show()
"""
__veros_plugins__ = tuple()
def __init__(self, override=None):
self.override_settings = override or {}
# this should be the first time the core routines are imported
import veros.core # noqa: F401
self._plugin_interfaces = tuple(load_plugin(p) for p in self.__veros_plugins__)
self._setup_done = False
self.state = get_default_state(plugin_interfaces=self._plugin_interfaces)
@abc.abstractmethod
def set_parameter(self, state):
"""To be implemented by subclass.
First function to be called during setup.
Use this to modify the model settings.
Example:
>>> def set_parameter(self, state):
>>> settings = state.settings
>>> settings.nx, settings.ny, settings.nz = (360, 120, 50)
>>> settings.coord_degree = True
>>> settings.enable_cyclic = True
"""
pass
@abc.abstractmethod
def set_initial_conditions(self, state):
"""To be implemented by subclass.
May be used to set initial conditions.
Example:
>>> @veros_method
>>> def set_initial_conditions(self, state):
>>> vs = state.variables
>>> vs.u = update(vs.u, at[:, :, :, vs.tau], npx.random.rand(vs.u.shape[:-1]))
"""
pass
@abc.abstractmethod
def set_grid(self, state):
"""To be implemented by subclass.
Has to set the grid spacings :attr:`dxt`, :attr:`dyt`, and :attr:`dzt`,
along with the coordinates of the grid origin, :attr:`x_origin` and
:attr:`y_origin`.
Example:
>>> @veros_method
>>> def set_grid(self, state):
>>> vs = state.variables
>>> vs.x_origin, vs.y_origin = 0, 0
>>> vs.dxt = [0.1, 0.05, 0.025, 0.025, 0.05, 0.1]
>>> vs.dyt = 1.
>>> vs.dzt = [10, 10, 20, 50, 100, 200]
"""
pass
@abc.abstractmethod
def set_coriolis(self, state):
"""To be implemented by subclass.
Has to set the Coriolis parameter :attr:`coriolis_t` at T grid cells.
Example:
>>> @veros_method
>>> def set_coriolis(self, state):
>>> vs = state.variables
>>> vs.coriolis_t = 2 * vs.omega * npx.sin(vs.yt[npx.newaxis, :] / 180. * vs.pi)
"""
pass
@abc.abstractmethod
def set_topography(self, state):
"""To be implemented by subclass.
Must specify the model topography by setting :attr:`kbot`.
Example:
>>> @veros_method
>>> def set_topography(self, state):
>>> vs = state.variables
>>> vs.kbot = update(vs.kbot, at[...], 10)
>>> # add a rectangular island somewhere inside the domain
>>> vs.kbot = update(vs.kbot, at[10:20, 10:20], 0)
"""
pass
@abc.abstractmethod
def set_forcing(self, state):
"""To be implemented by subclass.
Called before every time step to update the external forcing, e.g. through
:attr:`forc_temp_surface`, :attr:`forc_salt_surface`, :attr:`surface_taux`,
:attr:`surface_tauy`, :attr:`forc_tke_surface`, :attr:`temp_source`, or
:attr:`salt_source`. Use this method to implement time-dependent forcing.
Example:
>>> @veros_method
>>> def set_forcing(self, state):
>>> vs = state.variables
>>> current_month = (vs.time / (31 * 24 * 60 * 60)) % 12
>>> vs.surface_taux = vs._windstress_data[:, :, current_month]
"""
pass
@abc.abstractmethod
def set_diagnostics(self, state):
"""To be implemented by subclass.
Called before setting up the :ref:`diagnostics <diagnostics>`. Use this method e.g. to
mark additional :ref:`variables <variables>` for output.
Example:
>>> @veros_method
>>> def set_diagnostics(self, state):
>>> state.diagnostics['snapshot'].output_variables += ['drho', 'dsalt', 'dtemp']
"""
pass
@abc.abstractmethod
def after_timestep(self, state):
"""Called at the end of each time step. Can be used to define custom, setup-specific
events.
"""
pass
def _ensure_setup_done(self):
if not self._setup_done:
raise RuntimeError("setup() method has to be called before running the model")
def setup(self):
from veros import diagnostics, restart
from veros.core import numerics, external, isoneutral
setup_funcs = (
self.set_parameter,
self.set_grid,
self.set_coriolis,
self.set_topography,
self.set_initial_conditions,
self.set_diagnostics,
self.set_forcing,
self.after_timestep,
)
for f in setup_funcs:
if not is_veros_routine(f):
raise RuntimeError(
f"{f.__name__} method is not a Veros routine. Please make sure to decorate it "
"with @veros_routine and try again."
)
logger.info("Running model setup")
with self.state.timers["setup"]:
with self.state.settings.unlock():
self.set_parameter(self.state)
for setting, value in self.override_settings.items():
setattr(self.state.settings, setting, value)
settings.check_setting_conflicts(self.state.settings)
distributed.validate_decomposition(self.state.dimensions)
self.state.initialize_variables()
self.state.diagnostics.update(diagnostics.create_default_diagnostics(self.state))
for plugin in self._plugin_interfaces:
for diagnostic in plugin.diagnostics:
self.state.diagnostics[diagnostic.name] = diagnostic()
self.set_grid(self.state)
numerics.calc_grid(self.state)
self.set_coriolis(self.state)
numerics.calc_beta(self.state)
self.set_topography(self.state)
numerics.calc_topo(self.state)
self.set_initial_conditions(self.state)
numerics.calc_initial_conditions(self.state)
if self.state.settings.enable_streamfunction:
external.streamfunction_init(self.state)
for plugin in self._plugin_interfaces:
plugin.setup_entrypoint(self.state)
self.set_diagnostics(self.state)
diagnostics.initialize(self.state)
restart.read_restart(self.state)
self.set_forcing(self.state)
isoneutral.check_isoneutral_slope_crit(self.state)
self._setup_done = True
@veros_routine
def step(self, state):
from veros import diagnostics, restart
from veros.core import idemix, eke, tke, momentum, thermodynamics, advection, utilities, isoneutral, numerics
self._ensure_setup_done()
vs = state.variables
settings = state.settings
with state.timers["diagnostics"]:
restart.write_restart(state)
with state.timers["main"]:
with state.timers["forcing"]:
self.set_forcing(state)
if state.settings.enable_idemix:
with state.timers["idemix"]:
idemix.set_idemix_parameter(state)
with state.timers["eke"]:
eke.set_eke_diffusivities(state)
with state.timers["tke"]:
tke.set_tke_diffusivities(state)
with state.timers["momentum"]:
momentum.momentum(state)
with state.timers["thermodynamics"]:
thermodynamics.thermodynamics(state)
if settings.enable_eke or settings.enable_tke or settings.enable_idemix:
with state.timers["advection"]:
advection.calculate_velocity_on_wgrid(state)
with state.timers["eke"]:
if state.settings.enable_eke:
eke.integrate_eke(state)
with state.timers["idemix"]:
if state.settings.enable_idemix:
idemix.integrate_idemix(state)
with state.timers["tke"]:
if state.settings.enable_tke:
tke.integrate_tke(state)
with state.timers["boundary_exchange"]:
vs.u = utilities.enforce_boundaries(vs.u, settings.enable_cyclic_x)
vs.v = utilities.enforce_boundaries(vs.v, settings.enable_cyclic_x)
if settings.enable_tke:
vs.tke = utilities.enforce_boundaries(vs.tke, settings.enable_cyclic_x)
if settings.enable_eke:
vs.eke = utilities.enforce_boundaries(vs.eke, settings.enable_cyclic_x)
if settings.enable_idemix:
vs.E_iw = utilities.enforce_boundaries(vs.E_iw, settings.enable_cyclic_x)
with state.timers["momentum"]:
momentum.vertical_velocity(state)
with state.timers["plugins"]:
for plugin in self._plugin_interfaces:
with state.timers[plugin.name]:
plugin.run_entrypoint(state)
vs.itt = vs.itt + 1
vs.time = vs.time + settings.dt_tracer
self.after_timestep(state)
with state.timers["diagnostics"]:
if not numerics.sanity_check(state):
raise RuntimeError(f"solution diverged at iteration {vs.itt}")
isoneutral.isoneutral_diag_streamfunction(state)
diagnostics.diagnose(state)
diagnostics.output(state)
# NOTE: benchmarks parse this, do not change / remove
logger.debug(" Time step took {:.2f}s", state.timers["main"].last_time)
# permutate time indices
vs.taum1, vs.tau, vs.taup1 = vs.tau, vs.taup1, vs.taum1
def run(self, show_progress_bar=None):
"""Main routine of the simulation.
Note:
Make sure to call :meth:`setup` prior to this function.
Arguments:
show_progress_bar (:obj:`bool`, optional): Whether to show fancy progress bar via tqdm.
By default, only show if stdout is a terminal and Veros is running on a single process.
"""
from veros import restart
self._ensure_setup_done()
vs = self.state.variables
settings = self.state.settings
time_length, time_unit = time.format_time(settings.runlen)
logger.info(f"\nStarting integration for {time_length:.1f} {time_unit}")
start_time = vs.time
# disable timers for first iteration
timer_context.active = False
pbar = progress.get_progress_bar(self.state, use_tqdm=show_progress_bar)
try:
with signals.signals_to_exception(), pbar:
while vs.time - start_time < settings.runlen:
self.step(self.state)
if not timer_context.active:
timer_context.active = True
pbar.advance_time(settings.dt_tracer)
except: # noqa: E722
logger.critical(f"Stopping integration at iteration {vs.itt}")
raise
else:
logger.success("Integration done\n")
finally:
restart.write_restart(self.state, force=True)
self._timing_summary()
def _timing_summary(self):
timing_summary = []
timing_summary.extend(
[
"",
"Timing summary:",
"(excluding first iteration)",
"---",
" setup time = {:.2f}s".format(self.state.timers["setup"].total_time),
" main loop time = {:.2f}s".format(self.state.timers["main"].total_time),
" forcing = {:.2f}s".format(self.state.timers["forcing"].total_time),
" momentum = {:.2f}s".format(self.state.timers["momentum"].total_time),
" pressure = {:.2f}s".format(self.state.timers["pressure"].total_time),
" friction = {:.2f}s".format(self.state.timers["friction"].total_time),
" thermodynamics = {:.2f}s".format(self.state.timers["thermodynamics"].total_time),
]
)
if rs.profile_mode:
timing_summary.extend(
[
" lateral mixing = {:.2f}s".format(self.state.timers["isoneutral"].total_time),
" vertical mixing = {:.2f}s".format(self.state.timers["vmix"].total_time),
" equation of state = {:.2f}s".format(self.state.timers["eq_of_state"].total_time),
]
)
timing_summary.extend(
[
" advection = {:.2f}s".format(self.state.timers["advection"].total_time),
" EKE = {:.2f}s".format(self.state.timers["eke"].total_time),
" IDEMIX = {:.2f}s".format(self.state.timers["idemix"].total_time),
" TKE = {:.2f}s".format(self.state.timers["tke"].total_time),
" boundary exchange = {:.2f}s".format(self.state.timers["boundary_exchange"].total_time),
" diagnostics and I/O = {:.2f}s".format(self.state.timers["diagnostics"].total_time),
" plugins = {:.2f}s".format(self.state.timers["plugins"].total_time),
]
)
timing_summary.extend(
[
" {:<22} = {:.2f}s".format(plugin.name, self.state.timers[plugin.name].total_time)
for plugin in self._plugin_interfaces
]
)
logger.debug("\n".join(timing_summary))
if rs.profile_mode:
print_profile_summary(self.state.profile_timers, self.state.timers["main"].total_time)
def print_profile_summary(profile_timers, main_loop_time):
profile_timings = ["", "Profile timings:", "[total time spent (% of main loop)]", "---"]
maxwidth = max(len(k) for k in profile_timers.keys())
profile_format_string = "{{:<{}}} = {{:.2f}}s ({{:.2f}}%)".format(maxwidth)
main_loop_time = max(main_loop_time, 1e-8) # prevent division by 0
for name, timer in profile_timers.items():
this_time = timer.total_time
if this_time == 0:
continue
profile_timings.append(profile_format_string.format(name, this_time, 100 * this_time / main_loop_time))
logger.diagnostic("\n".join(profile_timings))
|
bcd6ef50ec9f4d64eb41e6231d397396a78ee372
|
abe6c00f9790df7e6ef20dc02d0b1b225b5020cb
|
/src/prefect/server/database/migrations/versions/postgresql/2022_10_19_165110_8ea825da948d_track_retries_restarts.py
|
2d3db8e0f662b99e588a94946537725e46211c33
|
[
"Apache-2.0"
] |
permissive
|
PrefectHQ/prefect
|
000e6c5f7df80f76a181f0a30f8661c96417c8bd
|
2c50d2b64c811c364cbc5faa2b5c80a742572090
|
refs/heads/main
| 2023-09-05T20:25:42.965208
| 2023-09-05T18:58:06
| 2023-09-05T18:58:06
| 139,199,684
| 12,917
| 1,539
|
Apache-2.0
| 2023-09-14T20:25:45
| 2018-06-29T21:59:26
|
Python
|
UTF-8
|
Python
| false
| false
| 762
|
py
|
2022_10_19_165110_8ea825da948d_track_retries_restarts.py
|
"""Add retry and restart metadata
Revision ID: 8ea825da948d
Revises: ad4b1b4d1e9d
Create Date: 2022-10-19 16:51:10.239643
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "8ea825da948d"
down_revision = "3ced59d8806b"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column(
"task_run",
sa.Column(
"flow_run_run_count", sa.Integer(), server_default="0", nullable=False
),
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("task_run", "flow_run_run_count")
# ### end Alembic commands ###
|
6621f8febb637eddcfa74bfa8b3fccbcec9553bc
|
4674b8088ffdf55905d44995f08a0792a3e4cd5c
|
/tests/hwsim/netlink.py
|
eef79090ce10fa30afb3b1b276744afd80cd03a3
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
vanhoefm/krackattacks-scripts
|
41daca791638a92aa4cfa68a582e46119037560e
|
4b78669686f74efe664c6543b1b5b1616b22f902
|
refs/heads/research
| 2022-10-29T20:21:11.512335
| 2022-10-16T18:44:41
| 2022-10-16T18:44:41
| 107,408,514
| 2,184
| 577
|
NOASSERTION
| 2021-07-06T12:43:49
| 2017-10-18T12:58:08
|
C
|
UTF-8
|
Python
| false
| false
| 6,982
|
py
|
netlink.py
|
#
# (Generic) Netlink message generation/parsing
# Copyright (c) 2007 Johannes Berg <johannes@sipsolutions.net>
# Copyright (c) 2014 Intel Corporation
#
# This software may be distributed under the terms of the BSD license.
# See README for more details.
import struct, socket
# flags
NLM_F_REQUEST = 1
NLM_F_MULTI = 2
NLM_F_ACK = 4
NLM_F_ECHO = 8
# types
NLMSG_NOOP = 1
NLMSG_ERROR = 2
NLMSG_DONE = 3
NLMSG_OVERRUN = 4
NLMSG_MIN_TYPE = 0x10
class Attr(object):
def __init__(self, attr_type, data, *values):
self._type = attr_type
if len(values):
self._data = struct.pack(data, *values)
else:
self._data = data
def _dump(self):
hdr = struct.pack("HH", len(self._data) + 4, self._type)
length = len(self._data)
pad = ((length + 4 - 1) & ~3 ) - length
return hdr + self._data + '\0' * pad
def __repr__(self):
return '<Attr type %d, data "%s">' % (self._type, repr(self._data))
def u16(self):
return struct.unpack('H', self._data)[0]
def s16(self):
return struct.unpack('h', self._data)[0]
def u32(self):
return struct.unpack('I', self._data)[0]
def s32(self):
return struct.unpack('i', self._data)[0]
def str(self):
return self._data
def nulstr(self):
return self._data.split('\0')[0]
def nested(self):
return parse_attributes(self._data)
class StrAttr(Attr):
def __init__(self, attr_type, data):
Attr.__init__(self, attr_type, "%ds" % len(data), data)
class NulStrAttr(Attr):
def __init__(self, attr_type, data):
Attr.__init__(self, attr_type, "%dsB" % len(data), data, 0)
class U32Attr(Attr):
def __init__(self, attr_type, val):
Attr.__init__(self, attr_type, "I", val)
class U8Attr(Attr):
def __init__(self, attr_type, val):
Attr.__init__(self, attr_type, "B", val)
class FlagAttr(Attr):
def __init__(self, attr_type):
Attr.__init__(self, attr_type, "")
class Nested(Attr):
def __init__(self, attr_type, attrs):
self.attrs = attrs
self.type = attr_type
def _dump(self):
contents = []
for attr in self.attrs:
contents.append(attr._dump())
contents = ''.join(contents)
length = len(contents)
hdr = struct.pack("HH", length+4, self.type)
return hdr + contents
NETLINK_ROUTE = 0
NETLINK_UNUSED = 1
NETLINK_USERSOCK = 2
NETLINK_FIREWALL = 3
NETLINK_INET_DIAG = 4
NETLINK_NFLOG = 5
NETLINK_XFRM = 6
NETLINK_SELINUX = 7
NETLINK_ISCSI = 8
NETLINK_AUDIT = 9
NETLINK_FIB_LOOKUP = 10
NETLINK_CONNECTOR = 11
NETLINK_NETFILTER = 12
NETLINK_IP6_FW = 13
NETLINK_DNRTMSG = 14
NETLINK_KOBJECT_UEVENT = 15
NETLINK_GENERIC = 16
class Message(object):
def __init__(self, msg_type, flags=0, seq=-1, payload=None):
self.type = msg_type
self.flags = flags
self.seq = seq
self.pid = -1
payload = payload or []
if isinstance(payload, list):
contents = []
for attr in payload:
contents.append(attr._dump())
self.payload = ''.join(contents)
else:
self.payload = payload
def send(self, conn):
if self.seq == -1:
self.seq = conn.seq()
self.pid = conn.pid
length = len(self.payload)
hdr = struct.pack("IHHII", length + 4*4, self.type,
self.flags, self.seq, self.pid)
conn.send(hdr + self.payload)
def __repr__(self):
return '<netlink.Message type=%d, pid=%d, seq=%d, flags=0x%x "%s">' % (
self.type, self.pid, self.seq, self.flags, repr(self.payload))
@property
def ret(self):
assert self.type == NLMSG_ERROR
return struct.unpack("i", self.payload[:4])[0]
def send_and_recv(self, conn):
self.send(conn)
while True:
m = conn.recv()
if m.seq == self.seq:
return m
class Connection(object):
def __init__(self, nltype, groups=0, unexpected_msg_handler=None):
self.descriptor = socket.socket(socket.AF_NETLINK,
socket.SOCK_RAW, nltype)
self.descriptor.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 65536)
self.descriptor.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 65536)
self.descriptor.bind((0, groups))
self.pid, self.groups = self.descriptor.getsockname()
self._seq = 0
self.unexpected = unexpected_msg_handler
def send(self, msg):
self.descriptor.send(msg)
def recv(self):
contents = self.descriptor.recv(16384)
# XXX: python doesn't give us message flags, check
# len(contents) vs. msglen for TRUNC
msglen, msg_type, flags, seq, pid = struct.unpack("IHHII",
contents[:16])
msg = Message(msg_type, flags, seq, contents[16:])
msg.pid = pid
if msg.type == NLMSG_ERROR:
import os
errno = msg.ret
if errno < 0:
err = OSError("Netlink error: %s (%d)" % (
os.strerror(-errno), -errno))
err.errno = -errno
raise err
return msg
def seq(self):
self._seq += 1
return self._seq
def parse_attributes(data):
attrs = {}
while len(data):
attr_len, attr_type = struct.unpack("HH", data[:4])
attrs[attr_type] = Attr(attr_type, data[4:attr_len])
attr_len = ((attr_len + 4 - 1) & ~3 )
data = data[attr_len:]
return attrs
CTRL_CMD_UNSPEC = 0
CTRL_CMD_NEWFAMILY = 1
CTRL_CMD_DELFAMILY = 2
CTRL_CMD_GETFAMILY = 3
CTRL_CMD_NEWOPS = 4
CTRL_CMD_DELOPS = 5
CTRL_CMD_GETOPS = 6
CTRL_ATTR_UNSPEC = 0
CTRL_ATTR_FAMILY_ID = 1
CTRL_ATTR_FAMILY_NAME = 2
CTRL_ATTR_VERSION = 3
CTRL_ATTR_HDRSIZE = 4
CTRL_ATTR_MAXATTR = 5
CTRL_ATTR_OPS = 6
class GenlHdr(object):
def __init__(self, cmd, version = 0):
self.cmd = cmd
self.version = version
def _dump(self):
return struct.pack("BBxx", self.cmd, self.version)
def _genl_hdr_parse(data):
return GenlHdr(*struct.unpack("BBxx", data))
GENL_ID_CTRL = NLMSG_MIN_TYPE
class GenlMessage(Message):
def __init__(self, family, cmd, attrs=[], flags=0):
Message.__init__(self, family, flags=flags, payload=[GenlHdr(cmd)] + attrs)
class GenlController(object):
def __init__(self, conn):
self.conn = conn
def get_family_id(self, family):
a = NulStrAttr(CTRL_ATTR_FAMILY_NAME, family)
m = GenlMessage(GENL_ID_CTRL, CTRL_CMD_GETFAMILY, flags=NLM_F_REQUEST, attrs=[a])
m.send(self.conn)
m = self.conn.recv()
gh = _genl_hdr_parse(m.payload[:4])
attrs = parse_attributes(m.payload[4:])
return attrs[CTRL_ATTR_FAMILY_ID].u16()
genl_controller = GenlController(Connection(NETLINK_GENERIC))
|
f0c538e00b286b47a9134f53d7681c2d3a143d37
|
fbbe424559f64e9a94116a07eaaa555a01b0a7bb
|
/Keras_tensorflow_nightly/source2.7/tensorflow/contrib/gan/python/estimator/python/head_impl.py
|
a21358c50bbdb4a1a929b0c5bc322cec4c9923b5
|
[
"MIT"
] |
permissive
|
ryfeus/lambda-packs
|
6544adb4dec19b8e71d75c24d8ed789b785b0369
|
cabf6e4f1970dc14302f87414f170de19944bac2
|
refs/heads/master
| 2022-12-07T16:18:52.475504
| 2022-11-29T13:35:35
| 2022-11-29T13:35:35
| 71,386,735
| 1,283
| 263
|
MIT
| 2022-11-26T05:02:14
| 2016-10-19T18:22:39
|
Python
|
UTF-8
|
Python
| false
| false
| 8,120
|
py
|
head_impl.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A TFGAN-backed GAN Estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from tensorflow.contrib.gan.python import namedtuples as tfgan_tuples
from tensorflow.contrib.gan.python import train as tfgan_train
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.estimator.canned import head
from tensorflow.python.framework import ops
__all__ = [
'GANHead',
'gan_head',
]
def gan_head(generator_loss_fn, discriminator_loss_fn, generator_optimizer,
discriminator_optimizer, use_loss_summaries=True,
get_hooks_fn=tfgan_train.get_sequential_train_hooks(),
name=None):
"""Creates a `GANHead`.
Args:
generator_loss_fn: A TFGAN loss function for the generator. Takes a
`GANModel` and returns a scalar.
discriminator_loss_fn: Same as `generator_loss_fn`, but for the
discriminator.
generator_optimizer: The optimizer for generator updates.
discriminator_optimizer: Same as `generator_optimizer`, but for the
discriminator updates.
use_loss_summaries: If `True`, add loss summaries. If `False`, does not.
If `None`, uses defaults.
get_hooks_fn: A function that takes a GANTrainOps tuple and returns a list
of hooks.
name: name of the head. If provided, summary and metrics keys will be
suffixed by `"/" + name`.
Returns:
An instance of `GANHead`.
"""
return GANHead(generator_loss_fn=generator_loss_fn,
discriminator_loss_fn=discriminator_loss_fn,
generator_optimizer=generator_optimizer,
discriminator_optimizer=discriminator_optimizer,
use_loss_summaries=use_loss_summaries,
get_hooks_fn=get_hooks_fn,
name=name)
class GANHead(head._Head): # pylint: disable=protected-access
"""`Head` for a GAN."""
def __init__(self, generator_loss_fn, discriminator_loss_fn,
generator_optimizer, discriminator_optimizer,
use_loss_summaries=True,
get_hooks_fn=None,
name=None):
"""`Head` for GAN training.
Args:
generator_loss_fn: A TFGAN loss function for the generator. Takes a
`GANModel` and returns a scalar.
discriminator_loss_fn: Same as `generator_loss_fn`, but for the
discriminator.
generator_optimizer: The optimizer for generator updates.
discriminator_optimizer: Same as `generator_optimizer`, but for the
discriminator updates.
use_loss_summaries: If `True`, add loss summaries. If `False`, does not.
If `None`, uses defaults.
get_hooks_fn: A function that takes a GANTrainOps tuple and returns a list
of hooks. Defaults to `train.get_sequential_train_hooks()`
name: name of the head. If provided, summary and metrics keys will be
suffixed by `"/" + name`.
"""
if get_hooks_fn is None:
get_hooks_fn = tfgan_train.get_sequential_train_hooks()
# TODO(joelshor): Validate inputs.
if use_loss_summaries in [True, False]:
generator_loss_fn = functools.partial(
generator_loss_fn, add_summaries=use_loss_summaries)
discriminator_loss_fn = functools.partial(
discriminator_loss_fn, add_summaries=use_loss_summaries)
self._generator_loss_fn = generator_loss_fn
self._discriminator_loss_fn = discriminator_loss_fn
self._generator_optimizer = generator_optimizer
self._discriminator_optimizer = discriminator_optimizer
self._get_hooks_fn = get_hooks_fn
@property
def name(self):
return self._name
@property
def logits_dimension(self):
return None
def create_loss(self, features, mode, logits, labels):
"""Returns a GANLoss tuple from the provided GANModel.
See `Head` for more details.
Args:
features: Input `dict` of `Tensor` objects. Unused.
mode: Estimator's `ModeKeys`.
logits: A GANModel tuple.
labels: Must be `None`.
Returns:
A GANLoss tuple.
"""
_validate_logits_and_labels(logits, labels)
del mode, labels, features # unused for this head.
gan_model = logits # rename variable for clarity
return tfgan_tuples.GANLoss(
generator_loss=self._generator_loss_fn(gan_model),
discriminator_loss=self._discriminator_loss_fn(gan_model))
def create_estimator_spec(
self, features, mode, logits, labels=None,
train_op_fn=tfgan_train.gan_train_ops):
"""Returns `EstimatorSpec` that a model_fn can return.
See `Head` for more details.
Args:
features: Must be `None`.
mode: Estimator's `ModeKeys`.
logits: A GANModel tuple.
labels: Must be `None`.
train_op_fn: Function that takes a GANModel, GANLoss, generator optimizer,
and discriminator optimizer, and returns a `GANTrainOps` tuple. For
example, this function can come from TFGAN's `train.py` library, or can
be custom.
Returns:
`EstimatorSpec`.
Raises:
ValueError: If `features` isn't `None`.
ValueError: If `train_op_fn` isn't provided in train mode.
"""
_validate_logits_and_labels(logits, labels)
if features is not None:
raise ValueError('`features` should be `None`. Instead, found: %s' %
features)
gan_model = logits # rename variable for clarity
with ops.name_scope('GANHead'):
if mode == model_fn_lib.ModeKeys.PREDICT:
return model_fn_lib.EstimatorSpec(
mode=model_fn_lib.ModeKeys.PREDICT,
predictions=gan_model.generated_data)
elif mode == model_fn_lib.ModeKeys.EVAL:
gan_loss = self.create_loss(
features=None, mode=mode, logits=gan_model, labels=None)
scalar_loss = gan_loss.generator_loss + gan_loss.discriminator_loss
return model_fn_lib.EstimatorSpec(
mode=model_fn_lib.ModeKeys.EVAL,
predictions=gan_model.generated_data,
loss=scalar_loss,
# TODO(joelshor): Add metrics. If head name provided, append it to
# metric keys.
eval_metric_ops={})
elif mode == model_fn_lib.ModeKeys.TRAIN:
if train_op_fn is None:
raise ValueError('train_op_fn can not be None.')
gan_loss = self.create_loss(None, mode, gan_model, None)
scalar_loss = gan_loss.generator_loss + gan_loss.discriminator_loss
train_ops = train_op_fn(gan_model, gan_loss, self._generator_optimizer,
self._discriminator_optimizer)
training_hooks = self._get_hooks_fn(train_ops)
return model_fn_lib.EstimatorSpec(
loss=scalar_loss,
mode=model_fn_lib.ModeKeys.TRAIN,
train_op=train_ops.global_step_inc_op,
training_hooks=training_hooks)
else:
raise ValueError('Mode not recognized: %s' % mode)
def _validate_logits_and_labels(logits, labels):
if labels is not None:
raise ValueError('`GANHead`\'s `create_estimator_spec` input `labels` must '
'be `None`. Instead, found: %s' % labels)
if not isinstance(logits, tfgan_tuples.GANModel):
raise ValueError('`GANHead`\'s `create_estimator_spec` input `logits` must '
'be an instnace of a `GANModel`. Instead, found: %s' %
logits)
|
91395b53765c624a9ee80814facccd5a6dc736a9
|
517c4d617819aa2090094e92f1817e593354409d
|
/spectral/tests/run.py
|
38f144829f44d12da71be464bcbd2f8e0f282af5
|
[
"MIT"
] |
permissive
|
spectralpython/spectral
|
2080a7f5d304017827dded890c7617f57335f61b
|
0659ee71614455d99a80ffd4f5f5edd8d032608c
|
refs/heads/master
| 2023-09-04T07:34:08.699690
| 2022-11-13T16:54:18
| 2022-11-13T16:54:18
| 16,567,502
| 527
| 144
|
MIT
| 2023-09-07T11:33:02
| 2014-02-06T03:36:54
|
Python
|
UTF-8
|
Python
| false
| false
| 1,370
|
py
|
run.py
|
'''
Runs a set of unit tests for the spectral package.
To run all unit tests, type the following from the system command line:
# python -m spectral.tests.run
'''
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
from optparse import OptionParser
import spectral.tests
def parse_args():
parser = OptionParser()
parser.add_option('-c', '--continue', dest='continue_tests',
action='store_true', default=False,
help='Continue with remaining tests after a '
'failed test.')
(options, args) = parser.parse_args()
spectral.tests.abort_on_fail = not options.continue_tests
def reset_stats():
spectral.tests._num_tests_run = 0
spectral.tests._num_tests_failed = 0
def print_summary():
if spectral.tests._num_tests_failed > 0:
msg = '%d of %d tests FAILED.' % (spectral.tests._num_tests_failed,
spectral.tests._num_tests_run)
else:
msg = 'All %d tests PASSED!' % spectral.tests._num_tests_run
print('\n' + '-' * 72)
print(msg)
print('-' * 72)
if __name__ == '__main__':
logging.getLogger('spectral').setLevel(logging.ERROR)
parse_args()
reset_stats()
for test in spectral.tests.all_tests:
test.run()
print_summary()
|
63fd8552af9bcf7394e42bc729018c0c103b6d03
|
3bba998ece193dd4594059025d0ebdc86a982e18
|
/lib/rucio/core/oidc.py
|
df75ce0794868362324a7a0e6cfd81364209bce8
|
[
"Apache-2.0"
] |
permissive
|
rucio/rucio
|
d3320db046866be616f534baecdfdb2b28c8d0f1
|
7f0d229ac0b3bc7dec12c6e158bea2b82d414a3b
|
refs/heads/master
| 2023-09-02T15:17:54.376456
| 2023-08-14T12:54:06
| 2023-08-21T12:42:21
| 109,819,364
| 232
| 378
|
Apache-2.0
| 2023-09-14T12:52:36
| 2017-11-07T10:15:12
|
Python
|
UTF-8
|
Python
| false
| false
| 65,207
|
py
|
oidc.py
|
# -*- coding: utf-8 -*-
# Copyright European Organization for Nuclear Research (CERN) since 2012
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import random
import subprocess
import traceback
from datetime import datetime, timedelta
from typing import Any, TYPE_CHECKING, Optional
from urllib.parse import urlparse, parse_qs
from jwkest.jws import JWS
from jwkest.jwt import JWT
from math import floor
from oic import rndstr
from oic.oauth2.message import CCAccessTokenRequest
from oic.oic import Client, Grant, Token, REQUEST2ENDPOINT
from oic.oic.message import (AccessTokenResponse, AuthorizationResponse,
Message, RegistrationResponse)
from oic.utils import time_util
from oic.utils.authn.client import CLIENT_AUTHN_METHOD
from sqlalchemy import and_
from sqlalchemy.sql.expression import true
from rucio.common import types
from rucio.common.config import config_get, config_get_int
from rucio.common.exception import (CannotAuthenticate, CannotAuthorize,
RucioException)
from rucio.common.stopwatch import Stopwatch
from rucio.common.utils import all_oidc_req_claims_present, build_url, val_to_space_sep_str
from rucio.core.account import account_exists
from rucio.core.identity import exist_identity_account, get_default_account
from rucio.core.monitor import MetricManager
from rucio.db.sqla import filter_thread_work
from rucio.db.sqla import models
from rucio.db.sqla.constants import IdentityType
from rucio.db.sqla.session import read_session, transactional_session
if TYPE_CHECKING:
from sqlalchemy.orm import Session
METRICS = MetricManager(module=__name__)
# worokaround for a bug in pyoidc (as of Dec 2019)
REQUEST2ENDPOINT['CCAccessTokenRequest'] = 'token_endpoint'
# private/protected file containing Rucio Client secrets known to the Identity Provider as well
IDPSECRETS = config_get('oidc', 'idpsecrets', False)
ADMIN_ISSUER_ID = config_get('oidc', 'admin_issuer', False)
EXPECTED_OIDC_AUDIENCE = config_get('oidc', 'expected_audience', False, 'rucio')
EXPECTED_OIDC_SCOPE = config_get('oidc', 'expected_scope', False, 'openid profile')
EXCHANGE_GRANT_TYPE = config_get('oidc', 'exchange_grant_type', False, 'urn:ietf:params:oauth:grant-type:token-exchange')
REFRESH_LIFETIME_H = config_get_int('oidc', 'default_jwt_refresh_lifetime', False, 96)
# Allow 2 mins of leeway in case Rucio and IdP server clocks are not perfectly synchronized
# this affects the token issued time (a token could be issued in the future if IdP clock is ahead)
LEEWAY_SECS = 120
# TO-DO permission layer: if scope == 'wlcg.groups'
# --> check 'profile' info (requested profile scope)
def __get_rucio_oidc_clients(keytimeout: int = 43200) -> tuple[dict, dict]:
"""
Creates a Rucio OIDC Client instances per Identity Provider (IdP)
according to etc/idpsecrets.json configuration file.
Clients have to be pre-registered with the respective IdP with the appropriate settings:
allowed to request refresh tokens which have lifetime set in their unverified header,
allowed to request token exchange, immediate refresh tokens expiration after first use)
:returns: Dictionary of {'https://issuer_1/': <Rucio OIDC Client_1 instance>,
'https://issuer_2/': <Rucio OIDC Client_2 instance>,}.
In case of trouble, Exception is raised.
"""
clients = {}
admin_clients = {}
try:
with open(IDPSECRETS) as client_secret_file:
client_secrets = json.load(client_secret_file)
except:
return (clients, admin_clients)
for iss in client_secrets:
try:
client_secret = client_secrets[iss]
issuer = client_secret["issuer"]
client = Client(client_authn_method=CLIENT_AUTHN_METHOD)
# general parameter discovery about the Identity Provider via issuers URL
client.provider_config(issuer)
# storing client specific parameters into the client itself
client_reg = RegistrationResponse(**client_secret)
client.store_registration_info(client_reg)
# setting public_key cache timeout to 'keytimeout' seconds
keybundles = client.keyjar.issuer_keys[client.issuer]
for keybundle in keybundles:
keybundle.cache_time = keytimeout
clients[issuer] = client
# doing the same to store a Rucio Admin client
# which has client credential flow allowed
client_secret = client_secrets[iss]["SCIM"]
client = Client(client_authn_method=CLIENT_AUTHN_METHOD)
client.provider_config(issuer)
client_reg = RegistrationResponse(**client_secret)
client.store_registration_info(client_reg)
admin_clients[issuer] = client
except Exception as error:
raise RucioException(error.args) from error
return (clients, admin_clients)
# global variables to represent the IdP clients
OIDC_CLIENTS = {}
OIDC_ADMIN_CLIENTS = {}
def __initialize_oidc_clients() -> None:
"""
Initialising Rucio OIDC Clients
"""
try:
ALL_OIDC_CLIENTS = __get_rucio_oidc_clients()
global OIDC_CLIENTS
global OIDC_ADMIN_CLIENTS
OIDC_CLIENTS = ALL_OIDC_CLIENTS[0]
OIDC_ADMIN_CLIENTS = ALL_OIDC_CLIENTS[1]
except Exception as error:
logging.debug("OIDC clients not properly loaded: %s", error)
pass
# try loading OIDC clients uppon module import
__initialize_oidc_clients()
def __get_init_oidc_client(token_object: models.Token = None, token_type: str = None, **kwargs) -> dict[Any, Any]:
"""
Get an OIDC client object, (re-)initialised with parameters corresponding
to authorization flows used to get a token. For special cases - token refresh,
token exchange - these parameters are being mocked as pyoidc library
has to develop these areas. Initialisation can be made either by kwargs
(for a authorization code flow e.g.) or via kwargs (for token exchange or token refresh).
:param session_state: state value of the first authorization request
:param token_object: DB token token to be included in a Grant for
the token exchange or token refresh mechanisms
:param token_type: e.g. "subject_token" for token exchange or "refresh_token"
:param kwargs: optional strings which contain expected oauth session parameters:
issuer_id/issuer, redirect_uri, redirect_to, state, nonce, code,
scope, audience,
:returns: if first_init == True: dict {'client': oidc client object, 'request': auth_url}
for all other cases return oidc client object. If anything goes wrong, exception is thrown.
"""
if not OIDC_CLIENTS:
# retry once loading OIDC clients
__initialize_oidc_clients()
if not OIDC_CLIENTS:
raise CannotAuthenticate(traceback.format_exc())
try:
auth_args = {"grant_types": ["authorization_code"],
"response_type": "code",
"state": kwargs.get('state', rndstr()),
"nonce": kwargs.get('nonce', rndstr())}
auth_args["scope"] = token_object.oidc_scope if token_object else kwargs.get('scope', " ")
auth_args["audience"] = token_object.audience if token_object else kwargs.get('audience', " ")
if token_object:
issuer = token_object.identity.split(", ")[1].split("=")[1]
oidc_client = OIDC_CLIENTS[issuer]
auth_args["client_id"] = oidc_client.client_id
token = ''
if not token_type:
token_type = kwargs.get('token_type', None)
if token_type == 'subject_token':
token = token_object.token
# do not remove - even though None, oic expects this key to exist
auth_args["redirect_uri"] = None
if token_type == 'refresh_token':
token = token_object.refresh_token
# do not remove - even though None, oic expects this key to exist
auth_args["redirect_uri"] = None
if token_type and token:
oidc_client.grant[auth_args['state']] = Grant()
oidc_client.grant[auth_args['state']].grant_expiration_time = time_util.utc_time_sans_frac() + 300
resp = AccessTokenResponse()
resp[token_type] = token
oidc_client.grant[auth_args['state']].tokens.append(Token(resp))
else:
secrets, client_secret = {}, {}
try:
with open(IDPSECRETS) as client_secret_file:
secrets = json.load(client_secret_file)
except Exception as error:
raise CannotAuthenticate("Rucio server is missing information from the idpsecrets.json file.") from error
if 'issuer_id' in kwargs:
client_secret = secrets[kwargs.get('issuer_id', ADMIN_ISSUER_ID)]
elif 'issuer' in kwargs:
client_secret = next((secrets[i] for i in secrets if 'issuer' in secrets[i] and # NOQA: W504
kwargs.get('issuer') in secrets[i]['issuer']), None)
redirect_url = kwargs.get('redirect_uri', None)
if not redirect_url:
redirect_to = kwargs.get("redirect_to", "auth/oidc_token")
redirect_urls = [u for u in client_secret["redirect_uris"] if redirect_to in u]
redirect_url = random.choice(redirect_urls)
if not redirect_url:
raise CannotAuthenticate("Could not pick any redirect URL(s) from the ones defined "
+ "in Rucio OIDC Client configuration file.") # NOQA: W503
auth_args["redirect_uri"] = redirect_url
oidc_client = OIDC_CLIENTS[client_secret["issuer"]]
auth_args["client_id"] = oidc_client.client_id
if kwargs.get('first_init', False):
auth_url = build_url(oidc_client.authorization_endpoint, params=auth_args)
return {'redirect': redirect_url, 'auth_url': auth_url}
oidc_client.construct_AuthorizationRequest(request_args=auth_args)
# parsing the authorization query string by the Rucio OIDC Client (creates a Grant)
oidc_client.parse_response(AuthorizationResponse,
info='code=' + kwargs.get('code', rndstr()) + '&state=' + auth_args['state'],
sformat="urlencoded")
return {'client': oidc_client, 'state': auth_args['state']}
except Exception as error:
raise CannotAuthenticate(traceback.format_exc()) from error
@transactional_session
def get_auth_oidc(account: str, *, session: "Session", **kwargs) -> str:
"""
Assembles the authorization request of the Rucio Client tailored to the Rucio user
& Identity Provider. Saves authentication session parameters in the oauth_requests
DB table (for later use-cases). This information is saved for the token lifetime
of a token to allow token exchange and refresh.
Returns authorization URL as a string or a redirection url to
be used in user's browser for authentication.
:param account: Rucio Account identifier as a string.
:param auth_scope: space separated list of scope names. Scope parameter
defines which user's info the user allows to provide
to the Rucio Client.
:param audience: audience for which tokens are requested (EXPECTED_OIDC_AUDIENCE is the default)
:param auto: If True, the function will return authorization URL to the Rucio Client
which will log-in with user's IdP credentials automatically.
Also it will instruct the IdP to return an AuthZ code to another Rucio REST
endpoint /oidc_token. If False, the function will return a URL
to be used by the user in the browser in order to authenticate via IdP
(which will then return with AuthZ code to /oidc_code REST endpoint).
:param polling: If True, '_polling' string will be appended to the access_msg
in the DB oauth_requests table to inform the authorization stage
that the Rucio Client is polling the server for a token
(and no fetchcode needs to be returned at the end).
:param refresh_lifetime: specifies how long the OAuth daemon should
be refreshing this token. Default is 96 hours.
:param ip: IP address of the client as a string.
:param session: The database session in use.
:returns: User & Rucio OIDC Client specific Authorization or Redirection URL as a string
OR a redirection url to be used in user's browser for authentication.
"""
# TO-DO - implement a check if that account already has a valid
# token withthe required scope and audience and return such token !
auth_scope = kwargs.get('auth_scope', EXPECTED_OIDC_SCOPE)
if not auth_scope:
auth_scope = EXPECTED_OIDC_SCOPE
audience = kwargs.get('audience', EXPECTED_OIDC_AUDIENCE)
if not audience:
audience = EXPECTED_OIDC_AUDIENCE
# checking that minimal audience and scope requirements (required by Rucio) are satisfied !
if not all_oidc_req_claims_present(auth_scope, audience, EXPECTED_OIDC_SCOPE, EXPECTED_OIDC_AUDIENCE):
raise CannotAuthenticate("Requirements of scope and audience do not satisfy minimal requirements of the Rucio server.")
issuer_id = kwargs.get('issuer', ADMIN_ISSUER_ID)
if not issuer_id:
issuer_id = ADMIN_ISSUER_ID
auto = kwargs.get('auto', False)
polling = kwargs.get('polling', False)
refresh_lifetime = kwargs.get('refresh_lifetime', REFRESH_LIFETIME_H)
ip = kwargs.get('ip', None)
webhome = kwargs.get('webhome', None)
# For webui a mock account will be used here and default account
# will be assigned to the identity during get_token_oidc
if account.external == 'webui':
pass
else:
# Make sure the account exists
if not account_exists(account, session=session):
logging.debug("Account %s does not exist.", account)
return None
try:
stopwatch = Stopwatch()
# redirect_url needs to be specified & one of those defined
# in the Rucio OIDC Client configuration
redirect_to = "auth/oidc_code"
if auto:
redirect_to = "auth/oidc_token"
# random strings in order to keep track of responses to outstanding requests (state)
# and to associate a client session with an ID Token and to mitigate replay attacks (nonce).
state, nonce = rndstr(50), rndstr(50)
# in the following statement we retrieve the authorization endpoint
# from the client of the issuer and build url
oidc_dict = __get_init_oidc_client(issuer_id=issuer_id, redirect_to=redirect_to,
state=state, nonce=nonce,
scope=auth_scope, audience=audience, first_init=True)
auth_url = oidc_dict['auth_url']
redirect_url = oidc_dict['redirect']
# redirect code is put in access_msg and returned to the user (if auto=False)
access_msg = None
if not auto:
access_msg = rndstr(23)
if polling:
access_msg += '_polling'
if auto and webhome:
access_msg = str(webhome)
# Making sure refresh_lifetime is an integer or None.
if refresh_lifetime:
refresh_lifetime = int(refresh_lifetime)
# Specifying temporarily 5 min lifetime for the authentication session.
expired_at = datetime.utcnow() + timedelta(seconds=300)
# saving session parameters into the Rucio DB
oauth_session_params = models.OAuthRequest(account=account,
state=state,
nonce=nonce,
access_msg=access_msg,
redirect_msg=auth_url,
expired_at=expired_at,
refresh_lifetime=refresh_lifetime,
ip=ip)
oauth_session_params.save(session=session)
# If user selected authentication via web browser, a redirection
# URL is returned instead of the direct URL pointing to the IdP.
if not auto:
# the following takes into account deployments where the base url of the rucio server is
# not equivalent to the network location, e.g. if the server is proxied
auth_server = urlparse(redirect_url)
auth_url = build_url('https://' + auth_server.netloc, path='{}auth/oidc_redirect'.format(
auth_server.path.split('auth/')[0].lstrip('/')), params=access_msg)
METRICS.timer('IdP_authentication.request').observe(stopwatch.elapsed)
return auth_url
except Exception as error:
raise CannotAuthenticate(traceback.format_exc()) from error
@transactional_session
def get_token_oidc(auth_query_string: str, ip: str = None, *, session: "Session"):
"""
After Rucio User got redirected to Rucio /auth/oidc_token (or /auth/oidc_code)
REST endpoints with authz code and session state encoded within the URL.
These parameters are used to eventually gets user's info and tokens from IdP.
:param auth_query_string: IdP redirection URL query string (AuthZ code & user session state).
:param ip: IP address of the client as a string.
:param session: The database session in use.
:returns: One of the following tuples: ("fetchcode", <code>); ("token", <token>);
("polling", True); The result depends on the authentication strategy being used
(no auto, auto, polling).
"""
try:
stopwatch = Stopwatch()
parsed_authquery = parse_qs(auth_query_string)
state = parsed_authquery["state"][0]
code = parsed_authquery["code"][0]
# getting oauth request params from the oauth_requests DB Table
oauth_req_params = session.query(models.OAuthRequest).filter_by(state=state).first()
if oauth_req_params is None:
raise CannotAuthenticate("User related Rucio OIDC session could not keep "
+ "track of responses from outstanding requests.") # NOQA: W503
req_url = urlparse(oauth_req_params.redirect_msg or '')
issuer = req_url.scheme + "://" + req_url.netloc
req_params = parse_qs(req_url.query)
client_params = {}
for key in list(req_params):
client_params[key] = val_to_space_sep_str(req_params[key])
oidc_client = __get_init_oidc_client(issuer=issuer, code=code, **client_params)['client']
METRICS.counter(name='IdP_authentication.code_granted').inc()
# exchange access code for a access token
oidc_tokens = oidc_client.do_access_token_request(state=state,
request_args={"code": code},
authn_method="client_secret_basic",
skew=LEEWAY_SECS)
if 'error' in oidc_tokens:
raise CannotAuthorize(oidc_tokens['error'])
# mitigate replay attacks
nonce = oauth_req_params.nonce
if oidc_tokens['id_token']['nonce'] != nonce:
raise CannotAuthenticate("ID token could not be associated with the Rucio OIDC Client"
+ " session. This points to possible replay attack !") # NOQA: W503
# starting to fill dictionary with parameters for token DB row
jwt_row_dict, extra_dict = {}, {}
jwt_row_dict['identity'] = oidc_identity_string(oidc_tokens['id_token']['sub'],
oidc_tokens['id_token']['iss'])
jwt_row_dict['account'] = oauth_req_params.account
if jwt_row_dict['account'].external == 'webui':
try:
jwt_row_dict['account'] = get_default_account(jwt_row_dict['identity'], IdentityType.OIDC, True, session=session)
except Exception:
return {'webhome': None, 'token': None}
# check if given account has the identity registered
if not exist_identity_account(jwt_row_dict['identity'], IdentityType.OIDC, jwt_row_dict['account'], session=session):
raise CannotAuthenticate("OIDC identity '%s' of the '%s' account is unknown to Rucio."
% (jwt_row_dict['identity'], str(jwt_row_dict['account'])))
METRICS.counter(name='success').inc()
# get access token expiry timestamp
jwt_row_dict['lifetime'] = datetime.utcnow() + timedelta(seconds=oidc_tokens['expires_in'])
# get audience and scope info from the token
if 'scope' in oidc_tokens and 'audience' in oidc_tokens:
jwt_row_dict['authz_scope'] = val_to_space_sep_str(oidc_tokens['scope'])
jwt_row_dict['audience'] = val_to_space_sep_str(oidc_tokens['audience'])
elif 'access_token' in oidc_tokens:
try:
values = __get_keyvalues_from_claims(oidc_tokens['access_token'], ['scope', 'aud'])
jwt_row_dict['authz_scope'] = values['scope']
jwt_row_dict['audience'] = values['aud']
except Exception:
# we assume the Identity Provider did not do the right job here
jwt_row_dict['authz_scope'] = None
jwt_row_dict['audience'] = None
# groups = oidc_tokens['id_token']['groups']
# nothing done with group info for the moment - TO-DO !
# collect extra token DB row parameters
extra_dict = {}
extra_dict['ip'] = ip
extra_dict['state'] = state
# In case user requested to grant Rucio a refresh token,
# this token will get saved in the DB and an automatic refresh
# for a specified period of time will be initiated (done by the Rucio daemon).
if 'refresh_token' in oidc_tokens:
extra_dict['refresh_token'] = oidc_tokens['refresh_token']
extra_dict['refresh'] = True
extra_dict['refresh_lifetime'] = REFRESH_LIFETIME_H
try:
if oauth_req_params.refresh_lifetime is not None:
extra_dict['refresh_lifetime'] = int(oauth_req_params.refresh_lifetime)
except Exception:
pass
try:
values = __get_keyvalues_from_claims(oidc_tokens['refresh_token'], ['exp'])
exp = values['exp']
extra_dict['refresh_expired_at'] = datetime.utcfromtimestamp(float(exp))
except Exception:
# 4 day expiry period by default
extra_dict['refresh_expired_at'] = datetime.utcnow() + timedelta(hours=REFRESH_LIFETIME_H)
new_token = __save_validated_token(oidc_tokens['access_token'], jwt_row_dict, extra_dict=extra_dict, session=session)
METRICS.counter(name='IdP_authorization.access_token.saved').inc()
if 'refresh_token' in oidc_tokens:
METRICS.counter(name='IdP_authorization.refresh_token.saved').inc()
# In case authentication via browser was requested,
# we save the token in the oauth_requests table
if oauth_req_params.access_msg:
# If Rucio Client waits for a fetchcode, we save the token under this code in the DB.
if 'http' not in oauth_req_params.access_msg:
if '_polling' not in oauth_req_params.access_msg:
fetchcode = rndstr(50)
session.query(models.OAuthRequest).filter(models.OAuthRequest.state == state)\
.update({models.OAuthRequest.access_msg: fetchcode,
models.OAuthRequest.redirect_msg: new_token['token']})
# If Rucio Client was requested to poll the Rucio Auth server
# for a token automatically, we save the token under a access_msg.
else:
session.query(models.OAuthRequest).filter(models.OAuthRequest.state == state)\
.update({models.OAuthRequest.access_msg: oauth_req_params.access_msg,
models.OAuthRequest.redirect_msg: new_token['token']})
session.commit()
METRICS.timer('IdP_authorization').observe(stopwatch.elapsed)
if '_polling' in oauth_req_params.access_msg:
return {'polling': True}
elif 'http' in oauth_req_params.access_msg:
return {'webhome': oauth_req_params.access_msg, 'token': new_token}
else:
return {'fetchcode': fetchcode}
else:
METRICS.timer('IdP_authorization').observe(stopwatch.elapsed)
return {'token': new_token}
except Exception:
# TO-DO catch different exceptions - InvalidGrant etc. ...
METRICS.counter(name='IdP_authorization.access_token.exception').inc()
logging.debug(traceback.format_exc())
return None
# raise CannotAuthenticate(traceback.format_exc())
@transactional_session
def __get_admin_token_oidc(account: types.InternalAccount, req_scope, req_audience, issuer, *, session: "Session"):
"""
Get a token for Rucio application to act on behalf of itself.
client_credential flow is used for this purpose.
No refresh token is expected to be used.
:param account: the Rucio Admin account name to be used (InternalAccount object expected)
:param req_scope: the audience requested for the Rucio client's token
:param req_audience: the scope requested for the Rucio client's token
:param issuer: the Identity Provider nickname or the Rucio instance in use
:param session: The database session in use.
:returns: A dict with token and expires_at entries.
"""
if not OIDC_ADMIN_CLIENTS:
# retry once loading OIDC clients
__initialize_oidc_clients()
if not OIDC_ADMIN_CLIENTS:
raise CannotAuthenticate(traceback.format_exc())
try:
oidc_client = OIDC_ADMIN_CLIENTS[issuer]
args = {"client_id": oidc_client.client_id,
"client_secret": oidc_client.client_secret,
"grant_type": "client_credentials",
"scope": req_scope,
"audience": req_audience}
# in the future should use oauth2 pyoidc client (base) instead
oidc_tokens = oidc_client.do_any(request=CCAccessTokenRequest,
request_args=args,
response=AccessTokenResponse)
if 'error' in oidc_tokens:
raise CannotAuthorize(oidc_tokens['error'])
METRICS.counter(name='IdP_authentication.rucio_admin_token_granted').inc()
# save the access token in the Rucio DB
if 'access_token' in oidc_tokens:
validate_dict = __get_rucio_jwt_dict(oidc_tokens['access_token'], account=account, session=session)
if validate_dict:
METRICS.counter(name='IdP_authentication.success').inc()
new_token = __save_validated_token(oidc_tokens['access_token'], validate_dict, extra_dict={}, session=session)
METRICS.counter(name='IdP_authentication.access_token.saved').inc()
return new_token
else:
logging.debug("Rucio could not get a valid admin token from the Identity Provider.")
return None
else:
logging.debug("Rucio could not get its admin access token from the Identity Provider.")
return None
except Exception:
# TO-DO catch different exceptions - InvalidGrant etc. ...
METRICS.counter(name='IdP_authorization.access_token.exception').inc()
logging.debug(traceback.format_exc())
return None
# raise CannotAuthenticate(traceback.format_exc())
@read_session
def __get_admin_account_for_issuer(*, session: "Session"):
""" Gets admin account for the IdP issuer
:returns : dictionary { 'issuer_1': (account, identity), ... }
"""
if not OIDC_ADMIN_CLIENTS:
# retry once loading OIDC clients
__initialize_oidc_clients()
if not OIDC_ADMIN_CLIENTS:
raise CannotAuthenticate(traceback.format_exc())
issuer_account_dict = {}
for issuer in OIDC_ADMIN_CLIENTS:
admin_identity = oidc_identity_string(OIDC_ADMIN_CLIENTS[issuer].client_id, issuer)
admin_account = session.query(models.IdentityAccountAssociation)\
.filter_by(identity_type=IdentityType.OIDC, identity=admin_identity).first()
issuer_account_dict[issuer] = (admin_account.account, admin_identity)
return issuer_account_dict
@transactional_session
def get_token_for_account_operation(account: str, req_audience: str = None, req_scope: str = None, admin: bool = False, *, session: "Session"):
"""
Looks-up a JWT token with the required scope and audience claims with the account OIDC issuer.
If tokens are found, and none contains the requested audience and scope a new token is requested
(via token exchange or client credential grants in case admin = True)
:param account: Rucio account name in order to lookup the issuer and corresponding valid tokens
:param req_audience: audience required to be present in the token (e.g. 'fts:atlas')
:param req_scope: scope requested to be present in the token (e.g. fts:submit-transfer)
:param admin: If True tokens will be requested for the Rucio admin root account,
preferably with the same issuer as the requesting account OIDC identity
:param session: DB session in use
:return: token dictionary or None, throws an exception in case of problems
"""
try:
if not req_scope:
req_scope = EXPECTED_OIDC_SCOPE
if not req_audience:
req_audience = EXPECTED_OIDC_AUDIENCE
# get all identities for the corresponding account
identities_list = session.query(models.IdentityAccountAssociation.identity) \
.filter(models.IdentityAccountAssociation.identity_type == IdentityType.OIDC,
models.IdentityAccountAssociation.account == account).all()
identities = []
for identity in identities_list:
identities.append(identity[0])
# get all active/valid OIDC tokens
account_tokens = session.query(models.Token).filter(models.Token.identity.in_(identities),
models.Token.account == account,
models.Token.expired_at > datetime.utcnow()).with_for_update(skip_locked=True).all()
# for Rucio Admin account we ask IdP for a token via client_credential grant
# for each user account OIDC identity there is an OIDC issuer that must be, by construction,
# supported by Rucio server (have OIDC admin client registered as well)
# that is why we take the issuer of the account identity that has an active/valid token
# and look for admin account identity which has this issuer assigned
# requestor should always have at least one active subject token unless it is root
# this is why we first discover if the requestor is root or not
get_token_for_adminacc = False
admin_identity = None
admin_issuer = None
admin_iss_acc_idt_dict = __get_admin_account_for_issuer(session=session)
# check if preferred issuer exists - if multiple present last one is taken
preferred_issuer = None
for token in account_tokens:
preferred_issuer = token.identity.split(", ")[1].split("=")[1]
# loop through all OIDC identities registerd for the account of the requestor
for identity in identities:
issuer = identity.split(", ")[1].split("=")[1]
# compare the account of the requestor with the account of the admin
if account == admin_iss_acc_idt_dict[issuer][0]:
# take first matching case which means root is requesting OIDC authentication
admin_identity = admin_iss_acc_idt_dict[issuer][1]
if preferred_issuer and preferred_issuer != issuer:
continue
else:
admin_issuer = issuer
get_token_for_adminacc = True
break
# Rucio admin account requesting OIDC token
if get_token_for_adminacc:
# openid scope is not supported for client_credentials auth flow - removing it if being asked for
if 'openid' in req_scope:
req_scope = req_scope.replace("openid", "").strip()
# checking if there is not already a token to use
admin_account_tokens = session.query(models.Token).filter(models.Token.account == account,
models.Token.expired_at > datetime.utcnow()).all()
for admin_token in admin_account_tokens:
if hasattr(admin_token, 'audience') and hasattr(admin_token, 'oidc_scope') and\
all_oidc_req_claims_present(admin_token.oidc_scope, admin_token.audience, req_scope, req_audience):
return token_dictionary(admin_token)
# if not found request a new one
new_admin_token = __get_admin_token_oidc(account, req_scope, req_audience, admin_issuer, session=session)
return new_admin_token
# Rucio server requests Rucio user to be represented by Rucio admin OIDC identity
if admin and not get_token_for_adminacc:
# we require any other account than admin to have valid OIDC token in the Rucio DB
if not account_tokens:
logging.debug("No valid token exists for account %s.", account)
return None
# we also require that these tokens at least one has the Rucio scopes and audiences
valid_subject_token_exists = False
for account_token in account_tokens:
if all_oidc_req_claims_present(account_token.oidc_scope, account_token.audience, EXPECTED_OIDC_SCOPE, EXPECTED_OIDC_AUDIENCE):
valid_subject_token_exists = True
if not valid_subject_token_exists:
logging.debug("No valid audience/scope exists for account %s token.", account)
return None
# openid scope is not supported for client_credentials auth flow - removing it if being asked for
if 'openid' in req_scope:
req_scope = req_scope.replace("openid", "").strip()
admin_account = None
for account_token in account_tokens:
# for each valid account token in the DB we need to check if a valid root token does not exist with the required
# scope and audience
admin_issuer = account_token.identity.split(", ")[1].split("=")[1]
# assuming the requesting account is using Rucio supported IdPs, we check if any token of this admin identity
# has already a token with the requested scopes and audiences
admin_acc_idt_tuple = admin_iss_acc_idt_dict[admin_issuer]
admin_account = admin_acc_idt_tuple[0]
admin_identity = admin_acc_idt_tuple[1]
admin_account_tokens = session.query(models.Token).filter(models.Token.identity == admin_identity,
models.Token.account == admin_account,
models.Token.expired_at > datetime.utcnow()).all()
for admin_token in admin_account_tokens:
if hasattr(admin_token, 'audience') and hasattr(admin_token, 'oidc_scope') and\
all_oidc_req_claims_present(admin_token.oidc_scope, admin_token.audience, req_scope, req_audience):
return token_dictionary(admin_token)
# if no admin token existing was found for the issuer of the valid user token
# we request a new one
new_admin_token = __get_admin_token_oidc(admin_account, req_scope, req_audience, admin_issuer, session=session)
return new_admin_token
# Rucio server requests exchange token for a Rucio user
if not admin and not get_token_for_adminacc:
# we require any other account than admin to have valid OIDC token in the Rucio DB
if not account_tokens:
logging.debug("No valid token exists for account %s.", account)
return None
# we also require that these tokens at least one has the Rucio scopes and audiences
valid_subject_token_exists = False
for account_token in account_tokens:
if all_oidc_req_claims_present(account_token.oidc_scope, account_token.audience, EXPECTED_OIDC_SCOPE, EXPECTED_OIDC_AUDIENCE):
valid_subject_token_exists = True
if not valid_subject_token_exists:
logging.debug("No valid audience/scope exists for account %s token.", account)
return None
subject_token = None
for token in account_tokens:
if hasattr(token, 'audience') and hasattr(token, 'oidc_scope'):
if all_oidc_req_claims_present(token.oidc_scope, token.audience, req_scope, req_audience):
return token_dictionary(token)
# from available tokens select preferentially the one which are being refreshed
if hasattr(token, 'oidc_scope') and ('offline_access' in str(token['oidc_scope'])):
subject_token = token
# if not proceed with token exchange
if not subject_token:
subject_token = random.choice(account_tokens)
exchanged_token = __exchange_token_oidc(subject_token,
scope=req_scope,
audience=req_audience,
identity=subject_token.identity,
refresh_lifetime=subject_token.refresh_lifetime,
account=account,
session=session)
return exchanged_token
logging.debug("No token could be returned for account operation for account %s.", account)
return None
except Exception:
# raise CannotAuthorize(traceback.format_exc(), type(account), account)
logging.debug(traceback.format_exc())
return None
@METRICS.time_it
@transactional_session
def __exchange_token_oidc(subject_token_object: models.Token, *, session: "Session", **kwargs):
"""
Exchanged an access_token for a new one with different scope &/ audience
providing that the scope specified is registered with IdP for the Rucio OIDC Client
and the Rucio user has this scope linked to the subject token presented
for the token exchange.
:param subject_token_object: DB subject token to be exchanged
:param kwargs: 'scope', 'audience', 'grant_type', 'ip' and 'account' doing the exchange
:param session: The database session in use.
:returns: A dict with token and expires_at entries.
"""
grant_type = kwargs.get('grant_type', EXCHANGE_GRANT_TYPE)
jwt_row_dict, extra_dict = {}, {}
jwt_row_dict['account'] = kwargs.get('account', '')
jwt_row_dict['authz_scope'] = kwargs.get('scope', '')
jwt_row_dict['audience'] = kwargs.get('audience', '')
jwt_row_dict['identity'] = kwargs.get('identity', '')
extra_dict['ip'] = kwargs.get('ip', None)
# if subject token has offline access scope but *no* refresh token in the DB
# (happens when user presents subject token acquired from other sources then Rucio CLI mechanism),
# add offline_access scope to the token exchange request !
if 'offline_access' in str(subject_token_object.oidc_scope) and not subject_token_object.refresh_token:
jwt_row_dict['authz_scope'] += ' offline_access'
if not grant_type:
grant_type = EXCHANGE_GRANT_TYPE
try:
oidc_dict = __get_init_oidc_client(token_object=subject_token_object, token_type="subject_token")
oidc_client = oidc_dict['client']
args = {"subject_token": subject_token_object.token,
"scope": jwt_row_dict['authz_scope'],
"audience": jwt_row_dict['audience'],
"grant_type": grant_type}
# exchange , access token for a new one
oidc_token_response = oidc_dict['client'].do_any(Message,
endpoint=oidc_client.provider_info["token_endpoint"],
state=oidc_dict['state'],
request_args=args,
authn_method="client_secret_basic")
oidc_tokens = oidc_token_response.json()
if 'error' in oidc_tokens:
raise CannotAuthorize(oidc_tokens['error'])
# get audience and scope information
if 'scope' in oidc_tokens and 'audience' in oidc_tokens:
jwt_row_dict['authz_scope'] = val_to_space_sep_str(oidc_tokens['scope'])
jwt_row_dict['audience'] = val_to_space_sep_str(oidc_tokens['audience'])
elif 'access_token' in oidc_tokens:
values = __get_keyvalues_from_claims(oidc_tokens['access_token'], ['scope', 'aud'])
jwt_row_dict['authz_scope'] = values['scope']
jwt_row_dict['audience'] = values['aud']
jwt_row_dict['lifetime'] = datetime.utcnow() + timedelta(seconds=oidc_tokens['expires_in'])
if 'refresh_token' in oidc_tokens:
extra_dict['refresh_token'] = oidc_tokens['refresh_token']
extra_dict['refresh'] = True
extra_dict['refresh_lifetime'] = kwargs.get('refresh_lifetime', REFRESH_LIFETIME_H)
if extra_dict['refresh_lifetime'] is None:
extra_dict['refresh_lifetime'] = REFRESH_LIFETIME_H
try:
values = __get_keyvalues_from_claims(oidc_tokens['refresh_token'], ['exp'])
extra_dict['refresh_expired_at'] = datetime.utcfromtimestamp(float(values['exp']))
except Exception:
# 4 day expiry period by default
extra_dict['refresh_expired_at'] = datetime.utcnow() + timedelta(hours=REFRESH_LIFETIME_H)
new_token = __save_validated_token(oidc_tokens['access_token'], jwt_row_dict, extra_dict=extra_dict, session=session)
METRICS.counter(name='IdP_authorization.access_token.saved').inc()
if 'refresh_token' in oidc_tokens:
METRICS.counter(name='IdP_authorization.refresh_token.saved').inc()
return new_token
except Exception:
# raise CannotAuthorize(traceback.format_exc())
logging.debug(traceback.format_exc())
return None
@transactional_session
def __change_refresh_state(token: str, refresh: bool = False, *, session: "Session"):
"""
Changes token refresh state to True/False.
:param token: the access token for which the refresh value should be changed.
"""
try:
if refresh:
# update refresh column for a token to True
session.query(models.Token).filter(models.Token.token == token)\
.update({models.Token.refresh: True})
else:
session.query(models.Token).filter(models.Token.token == token)\
.update({models.Token.refresh: False,
models.Token.refresh_expired_at: datetime.utcnow()})
session.commit()
except Exception as error:
raise RucioException(error.args) from error
@transactional_session
def refresh_cli_auth_token(token_string: str, account: str, *, session: "Session"):
"""
Checks if there is active refresh token and if so returns
either active token with expiration timestamp or requests a new
refresh and returns new access token.
:param token_string: token string
:param account: Rucio account for which token refresh should be considered
:return: tuple of (access token, expiration epoch), None otherswise
"""
# only validated tokens are in the DB, check presence of token_string
account_token = session.query(models.Token) \
.filter(models.Token.token == token_string,
models.Token.account == account,
models.Token.expired_at > datetime.utcnow()) \
.with_for_update(skip_locked=True).first()
# if token does not exist in the DB, return None
if account_token is None:
logging.debug("No valid token exists for account %s.", account)
return None
# protection (!) no further action should be made
# for token_string without refresh_token in the DB !
if account_token.refresh_token is None:
logging.debug("No refresh token exists for account %s.", account)
return None
# if the token exists, check if it was refreshed already, if not, refresh it
if account_token.refresh:
# protection (!) returning the same token if the token_string
# is a result of a refresh which happened in the last 5 min
datetime_min_ago = datetime.utcnow() - timedelta(seconds=300)
if account_token.updated_at > datetime_min_ago:
epoch_exp = int(floor((account_token.expired_at - datetime(1970, 1, 1)).total_seconds()))
new_token_string = account_token.token
return new_token_string, epoch_exp
# asking for a refresh of this token
new_token = __refresh_token_oidc(account_token, session=session)
new_token_string = new_token['token']
epoch_exp = int(floor((new_token['expires_at'] - datetime(1970, 1, 1)).total_seconds()))
return new_token_string, epoch_exp
else:
# find account token with the same scope,
# audience and has a valid refresh token
new_token = session.query(models.Token) \
.filter(models.Token.refresh == true(),
models.Token.refresh_expired_at > datetime.utcnow(),
models.Token.account == account,
models.Token.expired_at > datetime.utcnow()) \
.with_for_update(skip_locked=True).first()
if new_token is None:
return None
# if the new_token has same audience and scopes as the original
# account_token --> return this token and exp timestamp to the user
if all_oidc_req_claims_present(new_token.oidc_scope, new_token.audience,
account_token.oidc_scope, account_token.audience):
epoch_exp = int(floor((new_token.expired_at - datetime(1970, 1, 1)).total_seconds()))
new_token_string = new_token.token
return new_token_string, epoch_exp
# if scopes and audience are not the same, return None
logging.debug("No token could be returned for refresh operation for account %s.", account)
return None
@transactional_session
def refresh_jwt_tokens(total_workers: int, worker_number: int, refreshrate: int = 3600, limit: int = 1000, *, session: "Session"):
"""
Refreshes tokens which expired or will expire before (now + refreshrate)
next run of this function and which have valid refresh token.
:param total_workers: Number of total workers.
:param worker_number: id of the executing worker.
:param limit: Maximum number of tokens to refresh per call.
:param session: Database session in use.
:return: numper of tokens refreshed
"""
nrefreshed = 0
try:
# get tokens for refresh that expire in the next <refreshrate> seconds
expiration_future = datetime.utcnow() + timedelta(seconds=refreshrate)
query = session.query(models.Token.token) \
.filter(and_(models.Token.refresh == true(),
models.Token.refresh_expired_at > datetime.utcnow(),
models.Token.expired_at < expiration_future))\
.order_by(models.Token.expired_at)
query = filter_thread_work(session=session, query=query, total_threads=total_workers, thread_id=worker_number, hash_variable='token')
# limiting the number of tokens for refresh
query = query.limit(limit)
filtered_tokens = []
for items in session.execute(query).partitions(10):
tokens = tuple(map(lambda row: row.token, items))
filtered_tokens += session.query(models.Token) \
.filter(models.Token.token.in_(tokens)) \
.with_for_update(skip_locked=True) \
.all()
# refreshing these tokens
for token in filtered_tokens:
new_token = __refresh_token_oidc(token, session=session)
if new_token:
nrefreshed += 1
except Exception as error:
raise RucioException(error.args) from error
return nrefreshed
@METRICS.time_it
@transactional_session
def __refresh_token_oidc(token_object: models.Token, *, session: "Session"):
"""
Requests new access and refresh tokens from the Identity Provider.
Assumption: The Identity Provider issues refresh tokens for one time use only and
with a limited lifetime. The refresh tokens are invalidated no matter which of these
situations happens first.
:param token_object: Rucio models.Token DB row object
:returns: A dict with token and expires_at entries if all went OK, None if
refresh was not possible due to token invalidity or refresh lifetime
constraints. Otherwise, throws an an Exception.
"""
try:
jwt_row_dict, extra_dict = {}, {}
jwt_row_dict['account'] = token_object.account
jwt_row_dict['identity'] = token_object.identity
extra_dict['refresh_start'] = datetime.utcnow()
# check if refresh token started in the past already
if hasattr(token_object, 'refresh_start'):
if token_object.refresh_start:
extra_dict['refresh_start'] = token_object.refresh_start
# check if refresh lifetime is set for the token
extra_dict['refresh_lifetime'] = REFRESH_LIFETIME_H
if token_object.refresh_lifetime:
extra_dict['refresh_lifetime'] = token_object.refresh_lifetime
# if the token has been refreshed for time exceeding
# the refresh_lifetime, the attempt will be aborted and refresh stopped
if datetime.utcnow() - extra_dict['refresh_start'] > timedelta(hours=extra_dict['refresh_lifetime']):
__change_refresh_state(token_object.token, refresh=False, session=session)
return None
oidc_dict = __get_init_oidc_client(token_object=token_object, token_type="refresh_token")
oidc_client = oidc_dict['client']
# getting a new refreshed set of tokens
state = oidc_dict['state']
oidc_tokens = oidc_client.do_access_token_refresh(state=state, skew=LEEWAY_SECS)
if 'error' in oidc_tokens:
raise CannotAuthorize(oidc_tokens['error'])
METRICS.counter(name='IdP_authorization.refresh_token.refreshed').inc()
# get audience and scope information
if 'scope' in oidc_tokens and 'audience' in oidc_tokens:
jwt_row_dict['authz_scope'] = val_to_space_sep_str(oidc_tokens['scope'])
jwt_row_dict['audience'] = val_to_space_sep_str(oidc_tokens['audience'])
elif 'access_token' in oidc_tokens:
values = __get_keyvalues_from_claims(oidc_tokens['access_token'], ['scope', 'aud'])
jwt_row_dict['authz_scope'] = values['scope']
jwt_row_dict['audience'] = values['aud']
# save new access and refresh tokens in the DB
if 'refresh_token' in oidc_tokens and 'access_token' in oidc_tokens:
# aborting refresh of the original token
# (keeping it in place until it expires)
__change_refresh_state(token_object.token, refresh=False, session=session)
# get access token expiry timestamp
jwt_row_dict['lifetime'] = datetime.utcnow() + timedelta(seconds=oidc_tokens['expires_in'])
extra_dict['refresh'] = True
extra_dict['refresh_token'] = oidc_tokens['refresh_token']
try:
values = __get_keyvalues_from_claims(oidc_tokens['refresh_token'], ['exp'])
extra_dict['refresh_expired_at'] = datetime.utcfromtimestamp(float(values['exp']))
except Exception:
# 4 day expiry period by default
extra_dict['refresh_expired_at'] = datetime.utcnow() + timedelta(hours=REFRESH_LIFETIME_H)
new_token = __save_validated_token(oidc_tokens['access_token'], jwt_row_dict, extra_dict=extra_dict, session=session)
METRICS.counter(name='IdP_authorization.access_token.saved').inc()
METRICS.counter(name='IdP_authorization.refresh_token.saved').inc()
else:
raise CannotAuthorize("OIDC identity '%s' of the '%s' account is did not " % (token_object.identity, token_object.account)
+ "succeed requesting a new access and refresh tokens.") # NOQA: W503
return new_token
except Exception as error:
METRICS.counter(name='IdP_authorization.refresh_token.exception').inc()
raise CannotAuthorize(traceback.format_exc()) from error
@transactional_session
def delete_expired_oauthrequests(total_workers: int, worker_number: int, limit: int = 1000, *, session: "Session"):
"""
Delete expired OAuth request parameters.
:param total_workers: Number of total workers.
:param worker_number: id of the executing worker.
:param limit: Maximum number of oauth request session parameters to delete.
:param session: Database session in use.
:returns: number of deleted rows
"""
try:
# get expired OAuth request parameters
query = session.query(models.OAuthRequest.state).filter(models.OAuthRequest.expired_at < datetime.utcnow())\
.order_by(models.OAuthRequest.expired_at)
query = filter_thread_work(session=session, query=query, total_threads=total_workers, thread_id=worker_number, hash_variable='state')
# limiting the number of oauth requests deleted at once
query = query.limit(limit)
ndeleted = 0
for items in session.execute(query).partitions(10):
states = tuple(map(lambda row: row.state, items))
ndeleted += session.query(models.OAuthRequest) \
.filter(models.OAuthRequest.state.in_(states)) \
.with_for_update(skip_locked=True) \
.delete(synchronize_session='fetch')
return ndeleted
except Exception as error:
raise RucioException(error.args) from error
def __get_keyvalues_from_claims(token: str, keys=None):
"""
Extracting claims from token, e.g. scope and audience.
:param token: the JWT to be unpacked
:param key: list of key names to extract from the token claims
:returns: The list of unicode values under the key, throws an exception otherwise.
"""
resdict = {}
try:
claims = JWT().unpack(token).payload()
if not keys:
keys = claims.keys()
for key in keys:
value = ''
if key in claims:
value = val_to_space_sep_str(claims[key])
resdict[key] = value
return resdict
except Exception as error:
raise CannotAuthenticate(traceback.format_exc()) from error
@read_session
def __get_rucio_jwt_dict(jwt: str, account=None, *, session: "Session"):
"""
Get a Rucio token dictionary from token claims.
Check token expiration and find default Rucio
account for token identity.
:param jwt: JSON Web Token to be inspected
:param session: DB session in use
:returns: Rucio token dictionary, None otherwise
"""
try:
# getting token paylod
token_payload = __get_keyvalues_from_claims(jwt)
identity_string = oidc_identity_string(token_payload['sub'], token_payload['iss'])
expiry_date = datetime.utcfromtimestamp(float(token_payload['exp']))
if expiry_date < datetime.utcnow(): # check if expired
logging.debug("Token has already expired since: %s", str(expiry_date))
return None
scope = None
audience = None
if 'scope' in token_payload:
scope = val_to_space_sep_str(token_payload['scope'])
if 'aud' in token_payload:
audience = val_to_space_sep_str(token_payload['aud'])
if not account:
# this assumes token has been previously looked up in DB
# before to be sure that we do not have the right account already in the DB !
account = get_default_account(identity_string, IdentityType.OIDC, True, session=session)
else:
if not exist_identity_account(identity_string, IdentityType.OIDC, account, session=session):
logging.debug("No OIDC identity exists for account: %s", str(account))
return None
value = {'account': account,
'identity': identity_string,
'lifetime': expiry_date,
'audience': audience,
'authz_scope': scope}
return value
except Exception:
logging.debug(traceback.format_exc())
return None
@transactional_session
def __save_validated_token(token, valid_dict, extra_dict=None, *, session: "Session"):
"""
Save JWT token to the Rucio DB.
:param token: Authentication token as a variable-length string.
:param valid_dict: Validation Rucio dictionary as the output
of the __get_rucio_jwt_dict function
:raises RucioException: on any error
:returns: A dict with token and expires_at entries.
"""
try:
if not extra_dict:
extra_dict = {}
new_token = models.Token(account=valid_dict.get('account', None),
token=token,
oidc_scope=valid_dict.get('authz_scope', None),
expired_at=valid_dict.get('lifetime', None),
audience=valid_dict.get('audience', None),
identity=valid_dict.get('identity', None),
refresh=extra_dict.get('refresh', False),
refresh_token=extra_dict.get('refresh_token', None),
refresh_expired_at=extra_dict.get('refresh_expired_at', None),
refresh_lifetime=extra_dict.get('refresh_lifetime', None),
refresh_start=extra_dict.get('refresh_start', None),
ip=extra_dict.get('ip', None))
new_token.save(session=session)
return token_dictionary(new_token)
except Exception as error:
raise RucioException(error.args) from error
@transactional_session
def validate_jwt(json_web_token: str, *, session: "Session") -> dict[str, Any]:
"""
Verifies signature and validity of a JSON Web Token.
Gets the issuer public keys from the oidc_client
and verifies the validity of the token.
Used only for external tokens, not known to Rucio DB.
:param json_web_token: the JWT string to verify
:returns: dictionary { account: <account name>,
identity: <identity>,
lifetime: <token lifetime>,
audience: <audience>,
authz_scope: <authz_scope> }
if successful.
:raises: CannotAuthenticate if unsuccessful
"""
if not OIDC_CLIENTS:
# retry once loading OIDC clients
__initialize_oidc_clients()
if not OIDC_CLIENTS:
raise CannotAuthenticate(traceback.format_exc())
try:
# getting issuer from the token payload
token_dict: Optional[dict[str, Any]] = __get_rucio_jwt_dict(json_web_token, session=session)
if not token_dict:
raise CannotAuthenticate(traceback.format_exc())
issuer = token_dict['identity'].split(", ")[1].split("=")[1]
oidc_client = OIDC_CLIENTS[issuer]
issuer_keys = oidc_client.keyjar.get_issuer_keys(issuer)
JWS().verify_compact(json_web_token, issuer_keys)
# if there is no audience and scope information,
# try to get it from IdP introspection endpoint
# TO-BE-REMOVED - once all IdPs support scope and audience in token claims !!!
if not token_dict['authz_scope'] or not token_dict['audience']:
clprocess = subprocess.Popen(['curl', '-s', '-L', '-u', '%s:%s'
% (oidc_client.client_id, oidc_client.client_secret),
'-d', 'token=%s' % (json_web_token),
oidc_client.introspection_endpoint],
shell=False, stdout=subprocess.PIPE)
inspect_claims = json.loads(clprocess.communicate()[0])
try:
token_dict['audience'] = inspect_claims['aud']
token_dict['authz_scope'] = inspect_claims['scope']
except:
pass
METRICS.counter(name='JSONWebToken.valid').inc()
# if token is valid and coming from known issuer --> check aud and scope and save it if unknown
if token_dict['authz_scope'] and token_dict['audience']:
if all_oidc_req_claims_present(token_dict['authz_scope'], token_dict['audience'], EXPECTED_OIDC_SCOPE, EXPECTED_OIDC_AUDIENCE):
# save the token in Rucio DB giving the permission to use it for Rucio operations
__save_validated_token(json_web_token, token_dict, session=session)
else:
logging.debug("Token audience [%s] or scope [%s] verification failed.", token_dict['audience'], token_dict['authz_scope'])
raise CannotAuthenticate(traceback.format_exc())
else:
logging.debug("Token audience or scope not present.")
raise CannotAuthenticate(traceback.format_exc())
METRICS.counter(name='JSONWebToken.saved').inc()
return token_dict
except Exception:
METRICS.counter(name='JSONWebToken.invalid').inc()
logging.debug(traceback.format_exc())
raise CannotAuthenticate(traceback.format_exc())
def oidc_identity_string(sub: str, iss: str):
"""
Transform IdP sub claim and issuers url into users identity string.
:param sub: users SUB claim from the Identity Provider
:param iss: issuer (IdP) https url
:returns: OIDC identity string "SUB=<usersid>, ISS=https://iam-test.ch/"
"""
return 'SUB=' + str(sub) + ', ISS=' + str(iss)
def token_dictionary(token: models.Token):
return {'token': token.token, 'expires_at': token.expired_at}
|
3b0d9a0c4b9411b001a1849c58fdd3e4840fc5a0
|
afc3558e47ea4c82cb70190743472274eae7aeb1
|
/tests/test_datasets/test_dataset_wrapper.py
|
b9c4c2682ebd988258badcef364b6317309a827c
|
[
"Apache-2.0"
] |
permissive
|
open-mmlab/mmocr
|
86a77fb77ca80cede9c41a9a22080eeeaf364002
|
9551af6e5a2482e72a2af1e3b8597fd54b999d69
|
refs/heads/main
| 2023-08-03T14:06:11.075037
| 2023-07-26T02:32:14
| 2023-07-26T02:32:14
| 355,559,187
| 3,734
| 801
|
Apache-2.0
| 2023-09-12T03:17:12
| 2021-04-07T13:40:21
|
Python
|
UTF-8
|
Python
| false
| false
| 4,324
|
py
|
test_dataset_wrapper.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
from copy import deepcopy
from unittest import TestCase
from unittest.mock import MagicMock
from mmengine.registry import init_default_scope
from mmocr.datasets import ConcatDataset, OCRDataset
from mmocr.registry import TRANSFORMS
class TestConcatDataset(TestCase):
@TRANSFORMS.register_module()
class MockTransform:
def __init__(self, return_value):
self.return_value = return_value
def __call__(self, *args, **kwargs):
return self.return_value
def setUp(self):
init_default_scope('mmocr')
dataset = OCRDataset
# create dataset_a
data_info = dict(filename='img_1.jpg', height=720, width=1280)
dataset.parse_data_info = MagicMock(return_value=data_info)
self.dataset_a = dataset(
data_root=osp.join(
osp.dirname(__file__), '../data/det_toy_dataset'),
data_prefix=dict(img_path='imgs'),
ann_file='textdet_test.json')
self.dataset_a_with_pipeline = dataset(
data_root=osp.join(
osp.dirname(__file__), '../data/det_toy_dataset'),
data_prefix=dict(img_path='imgs'),
ann_file='textdet_test.json',
pipeline=[dict(type='MockTransform', return_value=1)])
# create dataset_b
data_info = dict(filename='img_2.jpg', height=720, width=1280)
dataset.parse_data_info = MagicMock(return_value=data_info)
self.dataset_b = dataset(
data_root=osp.join(
osp.dirname(__file__), '../data/det_toy_dataset'),
data_prefix=dict(img_path='imgs'),
ann_file='textdet_test.json')
self.dataset_b_with_pipeline = dataset(
data_root=osp.join(
osp.dirname(__file__), '../data/det_toy_dataset'),
data_prefix=dict(img_path='imgs'),
ann_file='textdet_test.json',
pipeline=[dict(type='MockTransform', return_value=2)])
def test_init(self):
with self.assertRaises(TypeError):
ConcatDataset(datasets=[0])
with self.assertRaises(ValueError):
ConcatDataset(
datasets=[
deepcopy(self.dataset_a_with_pipeline),
deepcopy(self.dataset_b)
],
pipeline=[dict(type='MockTransform', return_value=3)])
with self.assertRaises(ValueError):
ConcatDataset(
datasets=[
deepcopy(self.dataset_a),
deepcopy(self.dataset_b_with_pipeline)
],
pipeline=[dict(type='MockTransform', return_value=3)])
with self.assertRaises(ValueError):
dataset_a = deepcopy(self.dataset_a)
dataset_b = OCRDataset(
metainfo=dict(dummy='dummy'),
data_root=osp.join(
osp.dirname(__file__), '../data/det_toy_dataset'),
data_prefix=dict(img_path='imgs'),
ann_file='textdet_test.json')
ConcatDataset(datasets=[dataset_a, dataset_b])
# test lazy init
ConcatDataset(
datasets=[deepcopy(self.dataset_a),
deepcopy(self.dataset_b)],
pipeline=[dict(type='MockTransform', return_value=3)],
lazy_init=True)
def test_getitem(self):
cat_datasets = ConcatDataset(
datasets=[deepcopy(self.dataset_a),
deepcopy(self.dataset_b)],
pipeline=[dict(type='MockTransform', return_value=3)])
for datum in cat_datasets:
self.assertEqual(datum, 3)
cat_datasets = ConcatDataset(
datasets=[
deepcopy(self.dataset_a_with_pipeline),
deepcopy(self.dataset_b)
],
pipeline=[dict(type='MockTransform', return_value=3)],
force_apply=True)
for datum in cat_datasets:
self.assertEqual(datum, 3)
cat_datasets = ConcatDataset(datasets=[
deepcopy(self.dataset_a_with_pipeline),
deepcopy(self.dataset_b_with_pipeline)
])
self.assertEqual(cat_datasets[0], 1)
self.assertEqual(cat_datasets[-1], 2)
|
fe395a36a22014e8fafdcd9faf792295cd16b7ec
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/monitor/azure-monitor-opentelemetry-exporter/samples/metrics/sample_views.py
|
b7caa688c63d2672e55f85a1858802a330b76620
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012
| 2023-09-06T01:08:06
| 2023-09-06T01:08:06
| 4,127,088
| 4,046
| 2,755
|
MIT
| 2023-09-14T21:48:49
| 2012-04-24T16:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 1,408
|
py
|
sample_views.py
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
"""
This example shows how to customize the metrics that are output by the SDK using Views. Metrics created
and recorded using the sdk are tracked and telemetry is exported to application insights with the
AzureMonitorMetricExporter.
"""
import os
from opentelemetry import metrics
from opentelemetry.sdk.metrics import Counter, MeterProvider
from opentelemetry.sdk.metrics.export import PeriodicExportingMetricReader
from opentelemetry.sdk.metrics.view import View
from azure.monitor.opentelemetry.exporter import AzureMonitorMetricExporter
exporter = AzureMonitorMetricExporter.from_connection_string(
os.environ["APPLICATIONINSIGHTS_CONNECTION_STRING"]
)
# Create a view matching the counter instrument `my.counter`
# and configure the new name `my.counter.total` for the result metrics stream
change_metric_name_view = View(
instrument_type=Counter,
instrument_name="my.counter",
name="my.counter.total",
)
# Metrics are reported every 1 minute
reader = PeriodicExportingMetricReader(exporter)
provider = MeterProvider(
metric_readers=[
reader,
],
views=[
change_metric_name_view,
],
)
metrics.set_meter_provider(provider)
meter = metrics.get_meter_provider().get_meter("view-name-change")
my_counter = meter.create_counter("my.counter")
my_counter.add(100)
|
bcffa46053829e3ac4dd01b3222e05b0d4ffa5db
|
e3ed3f09396bbce16a5ec571ea0ec26a4193ad2e
|
/artemis/ml/predictors/i_predictor.py
|
5ca3fd92f19eacb07403462571e772f029bbdfff
|
[
"BSD-2-Clause-Views"
] |
permissive
|
QUVA-Lab/artemis
|
ea8a3e05f6bf1249b5a027e3a06fe1dbbde271d4
|
84d3b1daf0de363cc823d99f978e2861ed400b5b
|
refs/heads/master
| 2023-01-29T01:57:00.504531
| 2018-10-02T16:32:10
| 2018-10-02T16:32:10
| 57,375,178
| 241
| 33
|
NOASSERTION
| 2019-04-17T04:25:08
| 2016-04-29T09:58:50
|
Python
|
UTF-8
|
Python
| false
| false
| 1,623
|
py
|
i_predictor.py
|
from abc import abstractmethod
import numpy as np
from artemis.ml.tools.processors import OneHotEncoding
__author__ = 'peter'
class IPredictor(object):
@abstractmethod
def train(self, input_data, target_data):
"""
:param input_data:
:param target_data:
:return:
"""
@abstractmethod
def predict(self, input_data):
"""
:return: The output given the input data
"""
class CategoricalPredictor(IPredictor):
"""
A wrapper that transforms a predictor that outputs a vector into
a predictor that outputs an integer "category" label.
"""
def __init__(self, predictor, n_categories = None, argmax_outputs = True):
self._predictor = predictor
self._n_categories = n_categories
self._encoder = None if n_categories is None else OneHotEncoding(n_categories)
self._argmax_outputs = argmax_outputs
def train(self, input_data, target_data):
if self._encoder is None:
raise Exception('If you call train before predict, you must provide the number of categories.')
new_target_data = self._encoder(target_data)
return self._predictor.train(input_data, new_target_data)
def predict(self, input_data):
out = self._predictor.predict(input_data)
if self._argmax_outputs:
if self._encoder is None:
assert out.ndim==2
self._n_categories = out.shape[1]
self._encoder = OneHotEncoding(self._n_categories)
return np.argmax(out, axis = 1)
else:
return out
|
5372b368ad0a4e44203e542233b81a757cd10d3b
|
52245910f830dbfb2b1432ad2a967df7321ee6de
|
/panel/links.py
|
b0d423e073cfafa5da1ebd354d88dddb978ed205
|
[
"BSD-3-Clause"
] |
permissive
|
holoviz/panel
|
92c19f979353d456512abbce5a027dff6ddb3a5c
|
2c6e165e2bba96c0cb97947aa072d4429133cf7a
|
refs/heads/main
| 2023-08-17T11:28:06.581979
| 2023-08-17T11:23:09
| 2023-08-17T11:23:09
| 145,848,899
| 2,544
| 373
|
BSD-3-Clause
| 2023-09-14T17:13:31
| 2018-08-23T12:14:24
|
Python
|
UTF-8
|
Python
| false
| false
| 29,213
|
py
|
links.py
|
"""
Defines Links which allow declaring links between bokeh properties.
"""
from __future__ import annotations
import difflib
import sys
import weakref
from typing import (
TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Sequence, Tuple, Type,
Union,
)
import param
from bokeh.models import CustomJS, LayoutDOM, Model as BkModel
from .io.datamodel import create_linked_datamodel
from .io.loading import LOADING_INDICATOR_CSS_CLASS
from .models import ReactiveHTML
from .reactive import Reactive
from .util.warnings import warn
from .viewable import Viewable
if TYPE_CHECKING:
from bokeh.model import Model
try:
from holoviews.core.dimension import Dimensioned
JSLinkTarget = Union[Reactive, BkModel, 'Dimensioned']
except Exception:
JSLinkTarget = Union[Reactive, BkModel] # type: ignore
SourceModelSpec = Tuple[Optional[str], str]
TargetModelSpec = Tuple[Optional[str], Optional[str]]
def assert_source_syncable(source: 'Reactive', properties: Iterable[str]) -> None:
for prop in properties:
if prop.startswith('event:'):
continue
elif hasattr(source, 'object') and isinstance(source.object, LayoutDOM):
current = source.object
for attr in prop.split('.'):
if hasattr(current, attr):
current = getattr(current, attr)
continue
raise ValueError(
f"Could not resolve {prop} on {source.object} model. "
"Ensure you jslink an attribute that exists on the "
"bokeh model."
)
elif (prop not in source.param and prop not in list(source._rename.values())):
matches = difflib.get_close_matches(prop, list(source.param))
if matches:
matches_repr = f' Similar parameters include: {matches!r}'
else:
matches_repr = ''
raise ValueError(
f"Could not jslink {prop!r} parameter (or property) "
f"on {type(source).__name__} object because it was not "
f"found. Similar parameters include: {matches_repr}."
)
elif (source._source_transforms.get(prop, False) is None or
source._rename.get(prop, False) is None):
raise ValueError(
f"Cannot jslink {prop!r} parameter on {type(source).__name__} "
"object, the parameter requires a live Python kernel "
"to have an effect."
)
def assert_target_syncable(
source: 'Reactive', target: 'JSLinkTarget', properties: Dict[str, str]
) -> None:
for k, p in properties.items():
if k.startswith('event:'):
continue
elif p not in target.param and p not in list(target._rename.values()):
matches = difflib.get_close_matches(p, list(target.param))
if matches:
matches_repr = ' Similar parameters include: %r' % matches
else:
matches_repr = ''
raise ValueError(
f"Could not jslink {p!r} parameter (or property) "
f"on {type(source).__name__} object because it was not "
f"found. Similar parameters include: {matches_repr}"
)
elif (target._source_transforms.get(p, False) is None or
target._rename.get(p, False) is None):
raise ValueError(
f"Cannot jslink {k!r} parameter on {type(source).__name__} "
f"object to {p!r} parameter on {type(target).__name__}. "
"It requires a live Python kernel to have an effect."
)
class Callback(param.Parameterized):
"""
A Callback defines some callback to be triggered when a property
changes on the source object. A Callback can execute arbitrary
Javascript code and will make all objects referenced in the args
available in the JS namespace.
"""
args = param.Dict(default={}, allow_None=True, doc="""
A mapping of names to Python objects. These objects are made
available to the callback's code snippet as the values of
named parameters to the callback.""")
code = param.Dict(default=None, doc="""
A dictionary mapping from a source specification to a JS code
snippet to be executed if the source property changes.""")
# Mapping from a source id to a Link instance
registry: weakref.WeakKeyDictionary[Reactive | BkModel, List['Callback']] = weakref.WeakKeyDictionary()
# Mapping to define callbacks by backend and Link type.
# e.g. Callback._callbacks[Link] = Callback
_callbacks: Dict[Type['Callback'], Type['CallbackGenerator']] = {}
# Whether the link requires a target
_requires_target: bool = False
def __init__(
self, source: 'Reactive', target: 'JSLinkTarget' = None,
args: Dict[str, Any] = None, code: Dict[str, str] = None,
**params
):
"""
A Callback defines some callback to be triggered when a
property changes on the source object. A Callback can execute
arbitrary Javascript code and will make all objects referenced
in the args available in the JS namespace.
Arguments
---------
source (Reactive):
The source object the callback is attached to.
target (Reactive | Model, optional):
An optional target to trigger some action on when the source
property changes.
args (Dict[str, Any], optional):
Additional args to make available in the Javascript namespace
indexed by name.
code (Dict[str, str], optional):
A dictionary mapping from the changed source property to
a JS code snippet to execute.
"""
if source is None:
raise ValueError('%s must define a source' % type(self).__name__)
# Source is stored as a weakref to allow it to be garbage collected
self._source = None if source is None else weakref.ref(source)
if not args:
args={}
super().__init__(args=args, code=code, **params)
self.init()
def init(self) -> None:
"""
Registers the Callback
"""
if Callback._process_callbacks not in Viewable._preprocessing_hooks:
Viewable._preprocessing_hooks.append(Callback._process_callbacks)
source = self.source
if source is None:
return
if source in self.registry:
links = self.registry[source]
params = {
k: v for k, v in self.param.values().items() if k != 'name'
}
for link in links:
link_params = {
k: v for k, v in link.param.values().items() if k != 'name'
}
if not hasattr(link, 'target'):
pass
elif (type(link) is type(self) and link.source is source
and link.target is self.target and params == link_params):
return
self.registry[source].append(self)
else:
self.registry[source] = [self]
@classmethod
def register_callback(cls, callback: Type['CallbackGenerator']) -> None:
"""
Register a LinkCallback providing the implementation for
the Link for a particular backend.
"""
cls._callbacks[cls] = callback
@property
def source(self) -> Reactive | None:
return self._source() if self._source else None
@classmethod
def _process_callbacks(
cls, root_view: Viewable, root_model: BkModel, changed: Viewable | None = None, old_models=None
):
if not root_model:
return
ref = root_model.ref['id']
if changed is not None:
inspect = root_view.select(Viewable)
if ref in changed._models:
inspect += changed._models[ref][0].select({'type' : BkModel})
targets = [link.target for links in cls.registry.values() for link in links if isinstance(link, Link)]
if not any(m in cls.registry or m in targets for m in inspect):
return
if root_view is changed:
linkable = inspect
else:
linkable = (
root_view.select(Viewable) + list(root_model.select({'type' : BkModel})) # type: ignore
)
if not linkable:
return
found = [
(link, src, getattr(link, 'target', None)) for src in linkable
for link in cls.registry.get(src, [])
if not link._requires_target or link.target in linkable
or isinstance(link.target, param.Parameterized)
]
arg_overrides: Dict[int, Dict[str, Any]] = {}
if 'holoviews' in sys.modules:
from holoviews.core.dimension import Dimensioned
from .pane.holoviews import HoloViews, generate_panel_bokeh_map
found = [
(link, src, tgt) for (link, src, tgt) in found
if not (isinstance(src, Dimensioned) or isinstance(tgt, Dimensioned))
]
hv_views = root_view.select(HoloViews)
map_hve_bk = generate_panel_bokeh_map(root_model, hv_views)
for src in linkable:
for link in cls.registry.get(src, []):
if hasattr(link, 'target'):
for tgt in map_hve_bk.get(link.target, []):
found.append((link, src, tgt))
arg_overrides[id(link)] = {}
for k, v in link.args.items():
# Not all args are hashable
try:
hv_objs = map_hve_bk.get(v, [])
except Exception:
continue
for tgt in hv_objs:
arg_overrides[id(link)][k] = tgt
for (link, src, tgt) in found:
cb = cls._callbacks[type(link)]
if ((src is None or ref not in getattr(src, '_models', [ref])) or
(getattr(link, '_requires_target', False) and tgt is None) or
(tgt is not None and ref not in getattr(tgt, '_models', [ref]))):
continue
overrides = arg_overrides.get(id(link), {})
cb(root_model, link, src, tgt, arg_overrides=overrides)
class Link(Callback):
"""
A Link defines some connection between a source and target model.
It allows defining callbacks in response to some change or event
on the source object. Instead a Link directly causes some action
to occur on the target, for JS based backends this usually means
that a corresponding JS callback will effect some change on the
target in response to a change on the source.
A Link must define a source object which is what triggers events,
but must not define a target. It is also possible to define bi-
directional links between the source and target object.
"""
bidirectional = param.Boolean(default=False, doc="""
Whether to link source and target in both directions.""")
properties = param.Dict(default={}, doc="""
A dictionary mapping between source specification to target
specification.""")
# Whether the link requires a target
_requires_target = True
def __init__(self, source: 'Reactive', target: Optional['JSLinkTarget'] = None, **params):
if self._requires_target and target is None:
raise ValueError('%s must define a target.' % type(self).__name__)
# Source is stored as a weakref to allow it to be garbage collected
self._target = None if target is None else weakref.ref(target)
super().__init__(source, **params)
@property
def target(self) -> 'JSLinkTarget' | None:
return self._target() if self._target else None
def link(self) -> None:
"""
Registers the Link
"""
self.init()
source = self.source
if source is None:
return
if source in self.registry:
links = self.registry[source]
params = {
k: v for k, v in self.param.values().items() if k != 'name'
}
for link in links:
link_params = {
k: v for k, v in link.param.values().items() if k != 'name'
}
if (type(link) is type(self) and link.source is source
and link.target is self.target and params == link_params):
return
self.registry[source].append(self)
else:
self.registry[source] = [self]
def unlink(self) -> None:
"""
Unregisters the Link
"""
source = self.source
if source is None:
return
links = self.registry.get(source, [])
if self in links:
links.remove(self)
class CallbackGenerator(object):
error = True
def __init__(
self, root_model: 'Model', link: 'Link', source: 'Reactive',
target: Optional['JSLinkTarget'] = None, arg_overrides: Dict[str, Any] = {}
):
self.root_model = root_model
self.link = link
self.source = source
self.target = target
self.arg_overrides = arg_overrides
self.validate()
specs = self._get_specs(link, source, target)
for src_spec, tgt_spec, code in specs:
if src_spec[1] and target is not None and src_spec[1].startswith('event:') and not tgt_spec[1]:
continue
try:
self._init_callback(root_model, link, source, src_spec, target, tgt_spec, code)
except Exception:
if self.error:
raise
else:
pass
@classmethod
def _resolve_model(
cls, root_model: 'Model', obj: 'JSLinkTarget', model_spec: str | None
) -> 'Model' | None:
"""
Resolves a model given the supplied object and a model_spec.
Arguments
----------
root_model: bokeh.model.Model
The root bokeh model often used to index models
obj: holoviews.plotting.ElementPlot or bokeh.model.Model or panel.Viewable
The object to look the model up on
model_spec: string
A string defining how to look up the model, can be a single
string defining the handle in a HoloViews plot or a path
split by periods (.) to indicate a multi-level lookup.
Returns
-------
model: bokeh.model.Model
The resolved bokeh model
"""
from .pane.holoviews import is_bokeh_element_plot
model = None
if 'holoviews' in sys.modules and is_bokeh_element_plot(obj):
if model_spec is None:
return obj.state
else:
model_specs = model_spec.split('.')
handle_spec = model_specs[0]
if len(model_specs) > 1:
model_spec = '.'.join(model_specs[1:])
else:
model_spec = None
model = obj.handles[handle_spec]
elif isinstance(obj, Viewable):
model, _ = obj._models.get(root_model.ref['id'], (None, None))
elif isinstance(obj, BkModel):
model = obj
elif isinstance(obj, param.Parameterized):
model = create_linked_datamodel(obj, root_model)
if model_spec is not None:
for spec in model_spec.split('.'):
model = getattr(model, spec)
return model
def _init_callback(
self, root_model: 'Model', link: 'Link', source: 'Reactive',
src_spec: 'SourceModelSpec', target: 'JSLinkTarget' | None,
tgt_spec: 'TargetModelSpec', code: Optional[str]
) -> None:
references = {k: v for k, v in link.param.values().items()
if k not in ('source', 'target', 'name', 'code', 'args')}
src_model = self._resolve_model(root_model, source, src_spec[0])
if src_model is None:
return
ref = root_model.ref['id']
link_id = (id(link), src_spec, tgt_spec)
callbacks = (
list(src_model.js_property_callbacks.values()) + # type: ignore
list(src_model.js_event_callbacks.values()) # type: ignore
)
# Skip registering callback if already registered
if any(link_id in cb.tags for cbs in callbacks for cb in cbs):
return
references['source'] = src_model
tgt_model = None
if link._requires_target:
tgt_model = self._resolve_model(root_model, target, tgt_spec[0])
if tgt_model is not None:
references['target'] = tgt_model
for k, v in dict(link.args, **self.arg_overrides).items():
arg_model = self._resolve_model(root_model, v, None)
if arg_model is not None:
references[k] = arg_model
elif not isinstance(v, param.Parameterized):
references[k] = v
if 'holoviews' in sys.modules:
from .pane.holoviews import HoloViews, is_bokeh_element_plot
if isinstance(source, HoloViews):
src = source._plots[ref][0]
else:
src = source
prefix = 'source_' if hasattr(link, 'target') else ''
if is_bokeh_element_plot(src):
for k, v in src.handles.items():
k = prefix + k
if isinstance(v, BkModel) and k not in references:
references[k] = v
if isinstance(target, HoloViews) and ref in target._plots:
tgt = target._plots[ref][0]
else:
tgt = target
if is_bokeh_element_plot(tgt):
for k, v in tgt.handles.items():
k = 'target_' + k
if isinstance(v, BkModel) and k not in references:
references[k] = v
# Handle links with ReactiveHTML DataModel
if isinstance(src_model, ReactiveHTML):
if src_spec[1] in src_model.data.properties(): # type: ignore
references['source'] = src_model = src_model.data # type: ignore
if isinstance(tgt_model, ReactiveHTML):
if tgt_spec[1] in tgt_model.data.properties(): # type: ignore
references['target'] = tgt_model = tgt_model.data # type: ignore
self._initialize_models(link, source, src_model, src_spec[1], target, tgt_model, tgt_spec[1])
self._process_references(references)
if code is None:
code = self._get_code(link, source, src_spec[1], target, tgt_spec[1])
else:
code = "try {{ {code} }} catch(err) {{ console.log(err) }}".format(code=code)
src_cb = CustomJS(args=references, code=code, tags=[link_id])
changes, events = self._get_triggers(link, src_spec)
for ch in changes:
src_model.js_on_change(ch, src_cb)
for ev in events:
src_model.js_on_event(ev, src_cb)
tgt_prop = tgt_spec[1]
if not getattr(link, 'bidirectional', False) or tgt_model is None or tgt_prop is None:
return
code = self._get_code(link, target, tgt_prop, source, src_spec[1])
reverse_references = dict(references)
reverse_references['source'] = tgt_model
reverse_references['target'] = src_model
tgt_cb = CustomJS(args=reverse_references, code=code, tags=[link_id])
changes, events = self._get_triggers(link, (None, tgt_prop))
properties = tgt_model.properties()
for ch in changes:
if ch not in properties:
msg = f"Could not link non-existent property '{ch}' on {tgt_model} model"
if self.error:
raise ValueError(msg)
else:
warn(msg)
tgt_model.js_on_change(ch, tgt_cb)
for ev in events:
tgt_model.js_on_event(ev, tgt_cb)
def _process_references(self, references):
"""
Method to process references in place.
"""
def _get_specs(
self, link: 'Link', source: 'Reactive', target: 'JSLinkTarget'
) -> Sequence[Tuple['SourceModelSpec', 'TargetModelSpec', str | None]]:
"""
Return a list of spec tuples that define source and target
models.
"""
return []
def _get_code(
self, link: 'Link', source: 'JSLinkTarget', src_spec: str,
target: 'JSLinkTarget' | None, tgt_spec: str | None
) -> str:
"""
Returns the code to be executed.
"""
return ''
def _get_triggers(
self, link: 'Link', src_spec: 'SourceModelSpec'
) -> Tuple[List[str], List[str]]:
"""
Returns the changes and events that trigger the callback.
"""
return [], []
def _initialize_models(
self, link, source: 'Reactive', src_model: 'Model', src_spec: str,
target: 'JSLinkTarget' | None, tgt_model: 'Model' | None, tgt_spec: str | None
) -> None:
"""
Applies any necessary initialization to the source and target
models.
"""
pass
def validate(self) -> None:
pass
class JSCallbackGenerator(CallbackGenerator):
def _get_triggers(
self, link: 'Link', src_spec: 'SourceModelSpec'
) -> Tuple[List[str], List[str]]:
if src_spec[1].startswith('event:'):
return [], [src_spec[1].split(':')[1]]
return [src_spec[1]], []
def _get_specs(
self, link: 'Link', source: 'Reactive', target: 'JSLinkTarget'
) -> Sequence[Tuple['SourceModelSpec', 'TargetModelSpec', str | None]]:
for src_spec, code in link.code.items():
src_specs = src_spec.split('.')
if src_spec.startswith('event:'):
src_spec = (None, src_spec)
elif len(src_specs) > 1:
src_spec = ('.'.join(src_specs[:-1]), src_specs[-1])
else:
src_prop = src_specs[0]
if isinstance(source, Reactive):
src_prop = source._rename.get(src_prop, src_prop)
src_spec = (None, src_prop)
return [(src_spec, (None, None), code)]
class JSLinkCallbackGenerator(JSCallbackGenerator):
_link_template = """
var value = source['{src_attr}'];
value = {src_transform};
value = {tgt_transform};
try {{
var property = target.properties['{tgt_attr}'];
if (property !== undefined) {{ property.validate(value); }}
}} catch(err) {{
console.log('WARNING: Could not set {tgt_attr} on target, raised error: ' + err);
return;
}}
try {{
target['{tgt_attr}'] = value;
}} catch(err) {{
console.log(err)
}}
"""
_event_link_template = """
var value = true
try {{
var property = target.properties['{tgt_attr}'];
if (property !== undefined) {{ property.validate(value); }}
}} catch(err) {{
console.log('WARNING: Could not set {tgt_attr} on target, raised error: ' + err);
return;
}}
try {{
target['{tgt_attr}'] = value;
}} catch(err) {{
console.log(err)
}}
"""
_loading_link_template = """
if ('{src_attr}'.startsWith('event:')) {{
var value = true
}} else {{
var value = source['{src_attr}'];
value = {src_transform};
}}
if (typeof value !== 'boolean' || source.labels !== ['Loading']) {{
value = true
}}
var css_classes = target.css_classes.slice()
var loading_css = ['{loading_css_class}', 'pn-{loading_spinner}']
if (value) {{
for (var css of loading_css) {{
if (!(css in css_classes)) {{
css_classes.push(css)
}}
}}
}} else {{
for (var css of loading_css) {{
var index = css_classes.indexOf(css)
if (index > -1) {{
css_classes.splice(index, 1)
}}
}}
}}
target['css_classes'] = css_classes
"""
def _get_specs(
self, link: 'Link', source: 'Reactive', target: 'JSLinkTarget'
) -> Sequence[Tuple['SourceModelSpec', 'TargetModelSpec', str | None]]:
if link.code:
return super()._get_specs(link, source, target)
specs = []
for src_spec, tgt_spec in link.properties.items():
src_specs = src_spec.split('.')
if len(src_specs) > 1:
src_spec = ('.'.join(src_specs[:-1]), src_specs[-1])
else:
src_prop = src_specs[0]
if isinstance(source, Reactive):
src_prop = source._rename.get(src_prop, src_prop)
src_spec = (None, src_prop)
tgt_specs = tgt_spec.split('.')
if len(tgt_specs) > 1:
tgt_spec = ('.'.join(tgt_specs[:-1]), tgt_specs[-1])
else:
tgt_prop = tgt_specs[0]
if isinstance(target, Reactive):
tgt_prop = target._rename.get(tgt_prop, tgt_prop)
tgt_spec = (None, tgt_prop)
specs.append((src_spec, tgt_spec, None))
return specs
def _initialize_models(
self, link, source: 'Reactive', src_model: 'Model', src_spec: str,
target: 'JSLinkTarget' | None, tgt_model: 'Model' | None, tgt_spec: str | None
) -> None:
if tgt_model is not None and src_spec and tgt_spec:
src_reverse = {v: k for k, v in getattr(source, '_rename', {}).items()}
src_param = src_reverse.get(src_spec, src_spec)
if src_spec.startswith('event:'):
return
if isinstance(source, Reactive) and src_param in source.param and isinstance(target, Reactive):
tgt_reverse = {v: k for k, v in target._rename.items()}
tgt_param = tgt_reverse.get(tgt_spec, tgt_spec)
value = getattr(source, src_param)
try:
msg = target._process_param_change({tgt_param: value})
except Exception:
msg = {}
if tgt_spec in msg:
value = msg[tgt_spec]
else:
value = getattr(src_model, src_spec)
if value and hasattr(tgt_model, tgt_spec):
if tgt_spec != 'value_throttled':
setattr(tgt_model, tgt_spec, value)
if tgt_model is None and not link.code:
raise ValueError('Model could not be resolved on target '
'%s and no custom code was specified.' %
type(self.target).__name__)
def _process_references(self, references: Dict[str, str]) -> None:
"""
Strips target_ prefix from references.
"""
for k in list(references):
if k == 'target' or not k.startswith('target_') or k[7:] in references:
continue
references[k[7:]] = references.pop(k)
def _get_code(
self, link: 'Link', source: 'JSLinkTarget', src_spec: str,
target: 'JSLinkTarget' | None, tgt_spec: str | None
) -> str:
if isinstance(source, Reactive):
src_reverse = {v: k for k, v in source._rename.items()}
src_param = src_reverse.get(src_spec, src_spec)
src_transform = source._source_transforms.get(src_param)
if src_transform is None:
src_transform = 'value'
else:
src_transform = 'value'
if isinstance(target, Reactive):
tgt_reverse = {v: k for k, v in target._rename.items()}
tgt_param = tgt_reverse.get(tgt_spec, tgt_spec)
if tgt_param is None:
tgt_transform = 'value'
else:
tgt_transform = target._target_transforms.get(tgt_param, 'value')
else:
tgt_transform = 'value'
if tgt_spec == 'loading':
from .config import config
return self._loading_link_template.format(
src_attr=src_spec, src_transform=src_transform,
loading_spinner=config.loading_spinner,
loading_css_class=LOADING_INDICATOR_CSS_CLASS
)
else:
if src_spec and src_spec.startswith('event:'):
template = self._event_link_template
else:
template = self._link_template
return template.format(
src_attr=src_spec,
tgt_attr=tgt_spec,
src_transform=src_transform,
tgt_transform=tgt_transform
)
Callback.register_callback(callback=JSCallbackGenerator)
Link.register_callback(callback=JSLinkCallbackGenerator)
__all__ = (
"Callback",
"CallbackGenerator",
"JSCallbackGenerator",
"JSLinkCallbackGenerator",
"Link"
)
|
3e14f45a0891fdbc44f6e944a5ae406964db3e48
|
6923f79f1eaaba0ab28b25337ba6cb56be97d32d
|
/An_Introduction_to_Computational_Fluid_Dynamics_FVM_Versteeg/CFD_books_codes-master/Versteeg/Chapter7/Example_7_5.py
|
cd1224f6ac94351db1ce4a6db956f9b0b72dbde0
|
[] |
no_license
|
burakbayramli/books
|
9fe7ba0cabf06e113eb125d62fe16d4946f4a4f0
|
5e9a0e03aa7ddf5e5ddf89943ccc68d94b539e95
|
refs/heads/master
| 2023-08-17T05:31:08.885134
| 2023-08-14T10:05:37
| 2023-08-14T10:05:37
| 72,460,321
| 223
| 174
| null | 2022-10-24T12:15:06
| 2016-10-31T17:24:00
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 3,316
|
py
|
Example_7_5.py
|
# Consider solving a one-dimensional conduction equation for an
# insulated metal rod which has internal heat generation. The
# dimensions and other data are as follows- length of the rod is 1 m,
# cross-sectional area of the rod is 0.01 m2, Thermal conductivity k =
# 5 W/mK, generation g = 20 kW/m3, the ends are at 100 C and 500 C
import numpy as np
from matplotlib import pyplot as plt
# Define the domain
x_len = 1.0
x_points = 20
del_x = x_len/float(x_points-1)
x_gr = np.linspace(0, x_len, x_points, dtype=float)
# Define the parameters
k = 5.0
g = 20.0 * 1000
A = 0.01
T_0 = 100
T_L = 500
# Generate the equations
coeff_mat = np.zeros((x_points,x_points))
sol_mat = np.zeros(x_points)
for i in range(1, x_points-1):
aW = k * A / float(del_x)
aE = k * A / float(del_x)
Sp = 0
Su = g*A*del_x
aP = aW + aE - Sp
coeff_mat[i,i] = aP
coeff_mat[i, i+1] = -1.0*aE
coeff_mat[i,i-1] = -1.0*aW
sol_mat[i] = Su
## Boundary 0
aW = 0
aE = k * A / float(del_x)
Sp = -1.0*k*A/float(del_x/float(2))
Su = g*A*del_x + T_0*k*A/float(del_x/float(2))
aP = aW + aE - Sp
coeff_mat[0,0] = aP
coeff_mat[0,1] = -1.0*aE
sol_mat[0] = Su
## Boundary -1
aE = 0
aW = k * A / float(del_x)
Sp = -1.0*k*A/float(del_x/float(2))
Su = g*A*del_x + T_L*k*A/float(del_x/float(2))
aP = aW + aE - Sp
coeff_mat[-1,-1] = aP
coeff_mat[-1,-2] = -1.0*aW
sol_mat[-1] = Su
# Solution using numpy (built-in function)
T_np = np.linalg.solve(coeff_mat,sol_mat)
print ("\n The solution vector using numpy is: \n" + str(T_np))
# Solution using TDMA (Tri diagonal matrix algorithm)
T_tdma = np.ones(x_points)
A = np.zeros(x_points)
C_dash = np.zeros(x_points)
A[0] = coeff_mat[0,1]/float(coeff_mat[0,0])
C_dash[0] = sol_mat[0]/float(coeff_mat[0,0])
for i in range(1, x_points-1):
A[i] = -1.0 * coeff_mat[i, i + 1] / float(coeff_mat[i, i] - -1.0 * coeff_mat[i, i-1] * A[i - 1])
C_dash[i] = (sol_mat[i] + -1.0 * coeff_mat[i, i-1] * C_dash[i - 1]) / float(coeff_mat[i, i] - -1.0 * coeff_mat[i, i-1] * A[i - 1])
C_dash[-1] = (sol_mat[-1] + -1.0 * coeff_mat[-1, -2] * C_dash[-2]) / float(coeff_mat[-1,-1] - -1.0 * coeff_mat[-1,-2] * A[-2])
A[-1] = 0
T_tdma[-1] = C_dash[-1]
for i in range(x_points-2, -1, -1):
T_tdma[i] = A[i]*T_tdma[i+1] + C_dash[i]
print ("\n The solution vector using TDMA is: \n" + str(T_tdma))
# Solution using Gauss-Siedel iteration
T_guess = 100*np.random.rand(1) # Starting guess temperature value - chosen random
print ("\n Guess temperature for iteration is : \t" + str(T_guess) + "\n")
T_gs = np.ones(x_points)*T_guess
num_itrs = 100 # Number of iterations in Gauss-Siedel method
for it in range(num_itrs):
for i in range(0, x_points):
sum = 0
for j in range(0, x_points):
if i != j:
sum = sum + (coeff_mat[i, j] * T_gs[j])
T_gs[i] = (sol_mat[i] - sum) / coeff_mat[i, i]
print ("\n The solution vector using Gauss Siedel is: \n" + str(T_gs))
# Plotting the solution vectors
plt.plot(x_gr, T_np, label = "numpy")
plt.plot(x_gr, T_tdma, label = "TDMA")
plt.plot(x_gr, T_gs, label = "Gauss-Siedel")
plt.xlabel("Length")
plt.ylabel("Temperature")
plt.legend()
plt.show()
|
37c00f07e714fedfb93ede4ccade2e7dd2a9034a
|
1dc70cc7479b0efe56954c08b0b7366a823f22c0
|
/HelperTools/AttributeRulesTools.pyt
|
1ef8f4de991aaeb3e65552516776b8290605226e
|
[
"Apache-2.0"
] |
permissive
|
Esri/arcade-expressions
|
b77de3054606296bce68a74cb2ef0e5d4f2d6e9e
|
b791359311f6e484fbc5de37b89afaefce5dd2a4
|
refs/heads/master
| 2023-07-08T23:42:38.213084
| 2023-04-04T19:34:46
| 2023-04-04T19:34:46
| 118,975,834
| 201
| 90
|
Apache-2.0
| 2023-08-22T00:08:21
| 2018-01-25T22:36:17
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 15,654
|
pyt
|
AttributeRulesTools.pyt
|
# -*- coding: utf-8 -*-
import os
from pathlib import Path
import pandas as pd
import re
from collections import defaultdict
import arcpy
class Toolbox(object):
def __init__(self):
"""Define the toolbox (the name of the toolbox is the name of the
.pyt file)."""
self.label = "Attribute Rules Tools"
self.alias = "artools"
# List of tool classes associated with this toolbox
self.tools = [ApplyIndustryRulesGP]
class ApplyIndustryRulesGP(object):
def __init__(self):
"""Define the tool (tool name is the name of the class)."""
self.label = "Apply Industry Rules"
self.description = ""
self.canRunInBackground = False
def getParameterInfo(self):
"""Define parameter definitions"""
industry = arcpy.Parameter(name='industry',
displayName='Industry',
direction='Input',
datatype='GPString',
parameterType='Required')
workspace = arcpy.Parameter(name='gdb',
displayName='Geodatabase',
direction='Input',
datatype='DEWorkspace',
parameterType='Required')
industry_folder = (Path(__file__).parents[1]) / 'Industry'
industry.filter.list = [p.name for p in industry_folder.glob('*')]
return [industry, workspace]
def isLicensed(self):
"""Set whether tool is licensed to execute."""
return True
def updateParameters(self, parameters):
"""Modify the values and properties of parameters before internal
validation is performed. This method is called whenever a parameter
has been changed."""
return
def updateMessages(self, parameters):
"""Modify the messages created by internal validation for each tool
parameter. This method is called after internal validation."""
return
def execute(self, parameters, messages):
"""The source code of the tool."""
params = [p.valueAsText for p in parameters]
industry, workspace = params
industry_folder = str(Path(__file__).parents[1] / 'Industry' / industry)
applyIndustry = ApplyIndustryRules(workspace, industry_folder)
applyIndustry.main()
class ApplyIndustryRules:
"""
Apply all Attribute Rules expressions found in a directory to a workspace
"""
comments_to_parameter = {
'Assigned To': "in_table",
'Name': "name",
'Type': 'type',
'Description': "description",
'Subtypes': "subtype",
'Field': "field",
'Trigger': "triggering_events",
'Exclude From Client': "exclude_from_client_evaluation",
'Error Number': "error_number",
'Error Message': "error_message",
'Is Editable': "is_editable",
'Disable': "is_enabled"
}
req_args = ['in_table', 'type', 'name']
def __init__(self, workspace: str, industry: str):
self.workspace = workspace
self.industry = Path(industry)
self.is_un = True
def build_all_args(self) -> list:
""" build list of all arguments for each attribute rule and sequence. build list of feature classes. """
fcs = set()
all_args = []
all_seq = []
pat = re.compile(r"(?:NextSequenceValue\(')(.*?)'", re.I)
for path in self.industry.rglob('*.js'):
if path.parent.name.lower() not in ['calculation', 'constraint', 'validation']:
continue
f = open(str(path), "r")
kwargs = {}
while True:
text_line = f.readline()
if not text_line.startswith('//'):
break
param, details = text_line.split(':', 1)
param = param.strip('/ ')
details = details.strip()
if param not in self.comments_to_parameter:
arcpy.AddMessage(f"{param} not defined in lookup")
continue
if param == 'Assigned To':
if self.is_un:
kwargs[self.comments_to_parameter[param]] = os.path.join(self.workspace, details)
if arcpy.Exists(os.path.join(self.workspace, details)) == False:
arcpy.AddMessage(f"{details} does not exist")
continue
fcs.add(os.path.join(self.workspace, details))
else:
kwargs[self.comments_to_parameter[param]] = details
elif param == 'Type':
kwargs[self.comments_to_parameter[param]] = details.upper()
elif param == 'Subtypes':
if self.is_un:
kwargs[self.comments_to_parameter[param]] = 'ALL' if details == 'All' else details
else:
kwargs[self.comments_to_parameter[param]] = None if details == 'All' else details
elif param == 'Trigger':
trigger_events = [det.strip().upper() for det in details.split(',')]
if self.is_un:
kwargs[self.comments_to_parameter[param]] = trigger_events
else:
kwargs['triggering_insert'] = 1 if 'INSERT' in trigger_events else 0
kwargs['triggering_delete'] = 1 if 'DELETE' in trigger_events else 0
kwargs['triggering_update'] = 1 if 'UPDATE' in trigger_events else 0
elif param == 'Exclude From Client':
details = details.lower() == 'true'
if self.is_un:
kwargs[self.comments_to_parameter[param]] = details
else:
kwargs[self.comments_to_parameter[param]] = 1 if details else 0
elif param == 'Disable':
details = details.lower() == 'false'
if self.is_un:
kwargs[self.comments_to_parameter[param]] = details
else:
kwargs[self.comments_to_parameter[param]] = 1 if details else 0
elif param in ('Description', 'Name', 'Error Number', 'Error Message', 'Field'):
kwargs[self.comments_to_parameter[param]] = details
f.seek(0, 0)
script_expression = f.read()
kwargs['script_expression'] = script_expression
if 'NextSequenceValue' in script_expression:
seq_names = pat.findall(script_expression) or []
if not seq_names:
arcpy.AddWarning(
f'***** Could not parse sequences, make sure to the dict format for definition-{os.path.basename(path)}')
else:
all_seq.extend([dict(seq_name=seq_name, seq_start_id=1, seq_inc_value=1) for seq_name in seq_names])
missing_args = [req_arg for req_arg in self.req_args if req_arg not in kwargs]
if missing_args:
arcpy.AddMessage(f'***** The args {missing_args} are missing from {path}')
all_args.append(kwargs)
return all_args, fcs, all_seq
def recreate_un_seq(self, all_seq):
""" Recreate database sequences in UN """
if not all_seq:
return
sequences = arcpy.da.ListDatabaseSequences(self.workspace)
existing_seq = {seq.name for seq in sequences}
seq_to_remove = set({seq['seq_name'] for seq in all_seq}).intersection(existing_seq)
for seq in seq_to_remove:
arcpy.AddMessage(f"Deleting seq {seq}")
arcpy.DeleteDatabaseSequence_management(self.workspace, seq)
for seq in all_seq:
arcpy.AddMessage(f"Creating seq {seq}")
arcpy.CreateDatabaseSequence_management(self.workspace, **seq)
def recreate_un_ar(self, fcs, all_args):
""" Create and/or replace all attribute rules from Industry folder in UN """
for fc in fcs:
att_rules = arcpy.Describe(fc).attributeRules
ar_names = [ar.name for ar in att_rules]
if ar_names:
arcpy.AddMessage(f"Deleting all rules on {fc}:")
arcpy.AddMessage(f"\t\t{str(ar_names)}")
arcpy.management.DeleteAttributeRule(fc, ar_names, '')
for kwargs in all_args:
arcpy.AddMessage(f"Creating {kwargs['name']} on {kwargs['in_table']}")
arcpy.AddAttributeRule_management(**kwargs)
def recreate_ap_seq(self, all_seq, seq_df):
""" Recreate database sequences in Asset Package """
arcpy.AddMessage(f"Removing all sequences in the Asset Package")
arcpy.TruncateTable_management(os.path.join(self.workspace, 'B_DatabaseSequence'))
if not all_seq:
return
seq_df = seq_df[~seq_df['seq_name'].isin([seq['seq_name'] for seq in all_seq])]
seq_df = seq_df.append(all_seq, ignore_index=True)
seq_df.loc[seq_df['current_value'].isnull(), 'current_value'] = 1
arcpy.AddMessage("Adding Seqs...")
arcpy.AddMessage(str(seq_df.seq_name.to_list()))
with arcpy.da.InsertCursor(os.path.join(self.workspace, 'B_DatabaseSequence'), list(seq_df)) as cursor:
df_to_cursor(seq_df, cursor)
def recreate_ap_ar(self, all_args, rules_df):
""" Create and/or replace all attribute rules from Industry folder in Asset Package B_AttributeRules table """
arcpy.AddMessage(f"Removing all existing Attribute Rules in the Asset Package")
arcpy.TruncateTable_management(os.path.join(self.workspace, 'B_AttributeRules'))
ar_names = [ar['name'] for ar in all_args]
rules_df = rules_df[~rules_df['name'].isin(ar_names)]
rules_df = rules_df.append(all_args, ignore_index=True)
rules_df.loc[((rules_df['is_editable'].isnull()) & (rules_df['type'] == 'CALCULATION')), 'is_editable'] = 1
arcpy.AddMessage("Adding ARs...")
arcpy.AddMessage(str(rules_df.name.to_list()))
with arcpy.da.InsertCursor(os.path.join(self.workspace, 'B_AttributeRules'), list(rules_df)) as cursor:
df_to_cursor(rules_df, cursor)
@staticmethod
def build_disable_lookup(all_args) -> defaultdict:
""" build default dict of Table Name: [list of attribute rule names] """
disable_lookup = defaultdict(lambda: [])
for args in all_args:
if 'is_enabled' not in args:
continue
# make sure to remove invalid param
is_enabled = args.pop('is_enabled')
if not is_enabled:
disable_lookup[args['in_table']].append(args['name'])
return disable_lookup
def main(self):
# check if asset package
if self.workspace.lower().endswith('.gdb') and arcpy.Exists(os.path.join(self.workspace, 'B_AttributeRules')):
# TODO: Should we preserve rules that are not in the github folder.
# rules_df = cursor_to_df(arcpy.da.SearchCursor(os.path.join(self.workspace, 'B_AttributeRules'), ['*']))
# seq_df = cursor_to_df(arcpy.da.SearchCursor(os.path.join(self.workspace, 'B_DatabaseSequence'), ['*']))
rules_df = cursor_to_df(
arcpy.da.SearchCursor(os.path.join(self.workspace, 'B_AttributeRules'), ['*'], '1=0'))
seq_df = cursor_to_df(
arcpy.da.SearchCursor(os.path.join(self.workspace, 'B_DatabaseSequence'), ['*'], '1=0'))
self.is_un = False
# build args, list of feature classes, and sequences
all_args, fcs, all_seq = self.build_all_args()
# if not asset package, build disable lookup and recreate attribute rules
if self.is_un:
disable_lookup = self.build_disable_lookup(all_args)
self.recreate_un_seq(all_seq)
self.recreate_un_ar(fcs, all_args)
else:
self.recreate_ap_ar(all_args, rules_df)
self.recreate_ap_seq(all_seq, seq_df)
# disable rules in un
if self.is_un and disable_lookup:
for tbl, rules in disable_lookup.items():
arcpy.AddMessage(f"Disabling {str(rules)} on {tbl}")
arcpy.management.DisableAttributeRules(tbl, rules)
def df_to_cursor(data_frame: pd.DataFrame, cursor, progressor_message: str = None):
"""Inserts rows from data_frame to cursor
Args:
data_frame (pandas.DataFrame): A DataFrame. Only the subset of fields used by the cursor will be inserted.
cursor (arcpy.da.InsertCursor): An opened insert cursor.
progressor_message (str): If not None, create a step progressor with this message and update progress.
"""
cursor_fields = [f.lower() for f in cursor.fields]
data_frame = data_frame.rename(columns={c: c.lower() for c in data_frame.columns})
# If there are fields in the cursor that aren't present in the DF, they need to be added.
for field in cursor_fields:
if field not in data_frame.columns:
data_frame[field] = None
# Keep only those fields that are present in the cursor.
data_frame = data_frame[cursor_fields]
records = len(data_frame)
if progressor_message and records > 1000:
arcpy.SetProgressorLabel(progressor_message)
arcpy.SetProgressor(type='STEP', message=progressor_message, min_range=0, max_range=records)
chunk = round(records / 100)
for i, row in enumerate(data_frame.itertuples(index=False, name=None)):
if not i % chunk:
arcpy.SetProgressorPosition(i)
cursor.insertRow(row)
arcpy.ResetProgressor()
return
for row in data_frame.itertuples(index=False, name=None):
cursor.insertRow(row)
def cursor_to_df(cursor, header=None, has_blob=False):
"""Converts a cursor object to pandas DataFrame
Args:
cursor (``arcpy.da.SearchCursor``): A cursor to iterate over.
header (list): The list of field names to use as header. Defaults to ``None`` which uses the field names as
reported by the cursor object.
has_blob (bool): If the cursor, contains blob fields, set this to True. Will process line by line instead of
loading directly from generator.
Returns:
pandas.DataFrame: DataFrame representation of the table.
Raises:
ValueError: If the number of fields does not match the record length.
Examples:
>>> cursor = arcpy.da.SearchCursor('data', ['OID@', 'SHAPE@X'])
>>> cursor_to_df(cursor, ['ID', 'X'])
ID X
0 1 5000
1 2 1500
"""
if header is None:
header = cursor.fields
if len(header) != len(cursor.fields):
raise ValueError('The length of header does not match the cursor.')
# Blob fields are special because they return memoryviews. They need to be cast to bytes otherwise the memoryviews
# all reference the most recent row. Because of this, the inner loop has to be a list comprehension.
if has_blob:
cursor = ([value.tobytes()
if isinstance(value, memoryview)
else value
for value in row]
for row in cursor)
return pd.DataFrame.from_records(cursor, columns=header)
|
163415e9f360ef705aa7a986b7ff58d3a22fcec9
|
744c3b66611b08782fcdd9d66261c4d55b00d426
|
/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/multiagent_mobility_gym_env.py
|
c716d367b42b73bb32750393a8a4eeb9e78edcb0
|
[
"Zlib"
] |
permissive
|
erwincoumans/bullet3
|
4ff9e0aa64b641c65b57b26f415dd69dbfb12256
|
6d181d78a5c7be8714c74055cddcf63d5ccef70a
|
refs/heads/master
| 2023-03-10T14:58:18.072562
| 2023-02-24T18:32:53
| 2023-02-24T18:32:53
| 31,621,748
| 103
| 29
|
NOASSERTION
| 2019-02-25T17:31:00
| 2015-03-03T21:15:54
|
C++
|
UTF-8
|
Python
| false
| false
| 18,188
|
py
|
multiagent_mobility_gym_env.py
|
"""This file implements the locomotion gym env."""
# pylint: disable=dangerous-default-value
import atexit
import collections
import time
import gin
from gym import spaces
import numpy as np
from pybullet_utils import bullet_client
from pybullet_envs.minitaur.envs import minitaur_logging
from pybullet_envs.minitaur.envs import minitaur_logging_pb2
from pybullet_envs.minitaur.envs_v2 import locomotion_gym_env
from pybullet_envs.minitaur.envs_v2.scenes import scene_base
from pybullet_envs.minitaur.envs_v2.sensors import sensor
from pybullet_envs.minitaur.envs_v2.sensors import space_utils
import pybullet
_ACTION_EPS = 0.01
_NUM_SIMULATION_ITERATION_STEPS = 300
_LOG_BUFFER_LENGTH = 5000
@gin.configurable
class MultiagentMobilityGymEnv(locomotion_gym_env.LocomotionGymEnv):
"""The gym environment for the locomotion tasks."""
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': 100
}
def __init__(self,
gym_config,
robot_classes,
scene: scene_base.SceneBase = scene_base.SceneBase(),
sensors=None,
tasks=[],
global_task=None,
single_reward=False,
env_randomizers=None):
"""Initializes the locomotion gym environment.
Args:
gym_config: An instance of LocomotionGymConfig.
robot_classes: A list of robot classes. We provide a class rather than an
instance due to hard_reset functionality. Parameters are expected to be
configured with gin.
scene: An object for managing the robot's surroundings.
sensors: A list of environmental sensors for observation. This does not
include on-robot sensors.
tasks: A list of callable function/class to calculate the reward and
termination condition. Takes the gym env as the argument when calling.
global_task: A callable function/class to calculate the reward and
termination condition for all robots. Takes the gym env as the argument
when calling.
single_reward: Whether the environment returns a single reward for all
agents or a dictionary.
env_randomizers: A list of EnvRandomizer(s). An EnvRandomizer may
randomize the physical property of minitaur, change the terrrain during
reset(), or add perturbation forces during step().
Raises:
ValueError: If the num_action_repeat is less than 1, or if number of
unique robot names do not match the number of robot classes.
"""
# TODO(sehoonha) split observation and full-state sensors (b/129858214)
# Makes sure that close() is always called to flush out the logs to the
# disk.
atexit.register(self.close)
self.seed()
self._gym_config = gym_config
self._robot_classes = robot_classes
# Checking uniqueness of names and number of names
self._scene = scene
# TODO(sehoonha) change the data structure to dictionary
# TODO(b/144521291) make sure sensors have their own robot names
self._sensors = sensors if sensors is not None else list()
self._log_path = gym_config.log_path
self._logging = minitaur_logging.MinitaurLogging(self._log_path)
self._episode_proto = minitaur_logging_pb2.MinitaurEpisode()
self._data_dir = gym_config.data_dir
# A dictionary containing the objects in the world other than the robot.
self._tasks = tasks
self._global_task = global_task
self._single_reward = single_reward
self._env_randomizers = env_randomizers if env_randomizers else []
# This is a workaround due to the issue in b/130128505#comment5
for task in self._tasks:
if isinstance(task, sensor.Sensor):
self._sensors.append(task)
if global_task and isinstance(global_task, sensor.Sensor):
self._sensors.append(global_task)
# Simulation related parameters.
self._num_action_repeat = gym_config.simulation_parameters.num_action_repeat
self._on_rack = gym_config.simulation_parameters.robot_on_rack
if self._num_action_repeat < 1:
raise ValueError('number of action repeats should be at least 1.')
self._sim_time_step = gym_config.simulation_parameters.sim_time_step_s
self._env_time_step = self._num_action_repeat * self._sim_time_step
self._env_step_counter = 0
# TODO(b/73829334): Fix the value of self._num_bullet_solver_iterations.
self._num_bullet_solver_iterations = int(_NUM_SIMULATION_ITERATION_STEPS /
self._num_action_repeat)
self._is_render = gym_config.simulation_parameters.enable_rendering
# The wall-clock time at which the last frame is rendered.
self._last_frame_time = 0.0
if self._is_render:
self._pybullet_client = bullet_client.BulletClient(
connection_mode=pybullet.GUI)
else:
self._pybullet_client = bullet_client.BulletClient()
if gym_config.simulation_parameters.egl_rendering:
self._pybullet_client.loadPlugin('eglRendererPlugin')
self._pybullet_client.enable_cns()
# If enabled, save the performance profile to profiling_path
# Use Google Chrome about://tracing to open the file
if gym_config.profiling_path is not None:
self._profiling_slot = self._pybullet_client.startStateLogging(
self._pybullet_client.STATE_LOGGING_PROFILE_TIMINGS,
gym_config.profiling_path)
self._profiling_counter = 10
else:
self._profiling_slot = -1
# Build the action space. The action space must be compatible with the
# robot configuration.
# The action list contains the name of all actions.
# TODO(b/144479707): Allow robots to set the action space automatically.
action_space = collections.OrderedDict()
for robot_name, action in gym_config.actions.items():
action_lower_bound = []
action_upper_bound = []
for action_scalar in action:
action_upper_bound.append(action_scalar.upper_bound)
action_lower_bound.append(action_scalar.lower_bound)
action_space[robot_name] = spaces.Box(
np.asarray(action_lower_bound),
np.asarray(action_upper_bound),
dtype=np.float32)
self.action_space = spaces.Dict(action_space)
# Set the default render options.
self._camera_dist = gym_config.simulation_parameters.camera_distance
self._camera_yaw = gym_config.simulation_parameters.camera_yaw
self._camera_pitch = gym_config.simulation_parameters.camera_pitch
self._render_width = gym_config.simulation_parameters.render_width
self._render_height = gym_config.simulation_parameters.render_height
self._hard_reset = True
self.reset()
self._hard_reset = gym_config.simulation_parameters.enable_hard_reset
# Construct the observation space from the list of sensors. Note that we
# will reconstruct the observation_space after the robot is created.
self.observation_space = (
space_utils.convert_sensors_to_gym_space_dictionary(self.all_sensors()))
def close(self):
if self._log_path is not None:
self._logging.save_episode(self._episode_proto)
for robot in self._robots:
robot.Terminate()
def all_sensors(self):
"""Returns all robot and environmental sensors."""
robot_sensors = []
for robot in self._robots:
robot_sensors += robot.GetAllSensors()
return robot_sensors + self._sensors
@gin.configurable('multiagent_mobility_gym_env.MultiagentMobilityGymEnv.reset'
)
def reset(self,
initial_motor_angles=None,
reset_duration=1.0,
reset_visualization_camera=True):
"""Resets the robot's position in the world or rebuild the sim world.
The simulation world will be rebuilt if self._hard_reset is True.
Args:
initial_motor_angles: A list of Floats. The desired joint angles after
reset. If None, the robot will use its built-in value.
reset_duration: Float. The time (in seconds) needed to rotate all motors
to the desired initial values.
reset_visualization_camera: Whether to reset debug visualization camera on
reset.
Returns:
A numpy array contains the initial observation after reset.
"""
if self._is_render:
self._pybullet_client.configureDebugVisualizer(
self._pybullet_client.COV_ENABLE_RENDERING, 0)
# Clear the simulation world and rebuild the robot interface.
if self._hard_reset:
self._pybullet_client.resetSimulation()
self._pybullet_client.setPhysicsEngineParameter(
numSolverIterations=self._num_bullet_solver_iterations)
self._pybullet_client.setTimeStep(self._sim_time_step)
self._pybullet_client.setGravity(0, 0, -10)
# Rebuild the world.
self._scene.build_scene(self._pybullet_client)
# Rebuild the robots
# TODO(b/144545080): Make this scale to more than two agents
# Have multiple robot classes as a list.
self._robots = []
for robot_class in self._robot_classes:
self._robots.append(
robot_class(
pybullet_client=self._pybullet_client,
# TODO(rosewang): Remove on rack in multiagent acase
on_rack=self._on_rack))
# Reset the pose of the robot.
for robot in self._robots:
robot.Reset(
reload_urdf=False,
default_motor_angles=initial_motor_angles,
reset_time=reset_duration)
self._env_step_counter = 0
self._pybullet_client.resetDebugVisualizerCamera(self._camera_dist,
self._camera_yaw,
self._camera_pitch,
[0, 0, 0])
# Flush the logs to disc and reinitialize the logging system.
if self._log_path is not None:
self._logging.save_episode(self._episode_proto)
self._episode_proto = minitaur_logging_pb2.MinitaurEpisode()
minitaur_logging.preallocate_episode_proto(self._episode_proto,
_LOG_BUFFER_LENGTH,
self._robots[0])
self._pybullet_client.setPhysicsEngineParameter(enableConeFriction=0)
self._env_step_counter = 0
if reset_visualization_camera:
self._pybullet_client.resetDebugVisualizerCamera(self._camera_dist,
self._camera_yaw,
self._camera_pitch,
[0, 0, 0])
self._last_action = {
robot_name: np.zeros(space.shape)
for robot_name, space in self.action_space.spaces.items()
}
if self._is_render:
self._pybullet_client.configureDebugVisualizer(
self._pybullet_client.COV_ENABLE_RENDERING, 1)
for s in self.all_sensors():
# set name
if any([r.name in s.get_name() for r in self.robots]):
robot = [r for r in self.robots if r.name in s.get_name()][0]
s.set_robot(robot)
for task in self._tasks:
if hasattr(task, 'reset'):
task.reset(self)
if self._global_task and hasattr(self._global_task, 'reset'):
self._global_task.reset(self)
# Loop over all env randomizers.
for env_randomizer in self._env_randomizers:
env_randomizer.randomize_env(self)
for s in self.all_sensors():
s.on_reset(self)
return self._get_observation()
def get_robot(self, name):
for robot in self.robots:
if robot.name == name:
return robot
def _reward(self):
"""Returns a list of rewards.
Returns:
A list of rewards corresponding to each robot and their task.
"""
global_reward = 0
if self._global_task:
global_reward = self._global_task(self)
if self._single_reward: # Needed for tfagents compatibility.
if self._tasks:
return min([task(self) + global_reward for task in self._tasks])
return 0
else:
if self._tasks:
return [task(self) + global_reward for task in self._tasks]
return [0 for _ in self.robots]
def step(self, actions):
"""Step forward the simulation, given the actions.
Args:
actions: A dictionary of actions for all robots. The action for each robot
can be joint angles for legged platforms like Laikago, and base
velocity/steering for kinematic robots such like Fetch.
Returns:
observations: The observation dictionary. The keys are the sensor names
and the values are the sensor readings.
reward: The reward for the current state-action pair.
done: Whether the episode has ended.
info: A dictionary that stores diagnostic information.
Raises:
ValueError: The action dimension is not the same as the number of motors.
ValueError: The magnitude of actions is out of bounds.
"""
self._last_base_position = [
robot.GetBasePosition() for robot in self._robots
]
self._last_action = actions
if self._is_render:
# Sleep, otherwise the computation takes less time than real time,
# which will make the visualization like a fast-forward video.
time_spent = time.time() - self._last_frame_time
self._last_frame_time = time.time()
time_to_sleep = self._env_time_step - time_spent
if time_to_sleep > 0:
time.sleep(time_to_sleep)
camera_target = np.mean(
[robot.GetBasePosition() for robot in self._robots], axis=0)
# Also keep the previous orientation of the camera set by the user.
[yaw, pitch,
dist] = self._pybullet_client.getDebugVisualizerCamera()[8:11]
self._pybullet_client.resetDebugVisualizerCamera(dist, yaw, pitch,
camera_target)
for env_randomizer in self._env_randomizers:
env_randomizer.randomize_step(self)
# Stepping broken down into their parts
for robot in self._robots:
robot.PreStepPerStep(actions)
for _ in range(self._num_action_repeat):
for robot in self._robots:
robot.PreStepPerActionRepeat(actions)
self._pybullet_client.stepSimulation()
for robot in self._robots:
robot.PostStepPerActionRepeat()
for robot in self._robots:
robot.PostStepPerStep()
if self._profiling_slot >= 0:
self._profiling_counter -= 1
if self._profiling_counter == 0:
self._pybullet_client.stopStateLogging(self._profiling_slot)
if self._log_path is not None:
minitaur_logging.update_episode_proto(self._episode_proto,
self._robots[0], actions,
self._env_step_counter)
reward = self._reward()
for s in self.all_sensors():
s.on_step(self)
for task in self._tasks:
if hasattr(task, 'update'):
task.update(self)
if self._global_task and hasattr(self._global_task, 'update'):
self._global_task.update(self)
done = self._termination()
self._env_step_counter += 1
if done:
for robot in self._robots:
robot.Terminate()
return self._get_observation(), reward, done, {}
def render(self, mode='rgb_array'):
if mode != 'rgb_array':
raise ValueError('Unsupported render mode:{}'.format(mode))
base_pos = np.mean([robot.GetBasePosition() for robot in self._robots],
axis=0)
view_matrix = self._pybullet_client.computeViewMatrixFromYawPitchRoll(
cameraTargetPosition=base_pos,
distance=self._camera_dist,
yaw=self._camera_yaw,
pitch=self._camera_pitch,
roll=0,
upAxisIndex=2)
proj_matrix = self._pybullet_client.computeProjectionMatrixFOV(
fov=60,
aspect=float(self._render_width) / self._render_height,
nearVal=0.1,
farVal=100.0)
(_, _, px, _, _) = self._pybullet_client.getCameraImage(
width=self._render_width,
height=self._render_height,
renderer=self._pybullet_client.ER_BULLET_HARDWARE_OPENGL,
viewMatrix=view_matrix,
projectionMatrix=proj_matrix)
rgb_array = np.array(px)
rgb_array = rgb_array[:, :, :3]
return rgb_array
def _termination(self):
if not all([robot.is_safe for robot in self._robots]):
return True
if self._tasks:
return (self._global_task and self._global_task.done(self)) or any(
[task.done(self) for task in self._tasks])
for s in self.all_sensors():
s.on_terminate(self)
return False
def set_time_step(self, num_action_repeat, sim_step=0.001):
"""Sets the time step of the environment.
Args:
num_action_repeat: The number of simulation steps/action repeats to be
executed when calling env.step().
sim_step: The simulation time step in PyBullet. By default, the simulation
step is 0.001s, which is a good trade-off between simulation speed and
accuracy.
Raises:
ValueError: If the num_action_repeat is less than 1.
"""
if num_action_repeat < 1:
raise ValueError('number of action repeats should be at least 1.')
self._sim_time_step = sim_step
self._num_action_repeat = num_action_repeat
self._env_time_step = sim_step * num_action_repeat
self._num_bullet_solver_iterations = (
_NUM_SIMULATION_ITERATION_STEPS / self._num_action_repeat)
self._pybullet_client.setPhysicsEngineParameter(
numSolverIterations=self._num_bullet_solver_iterations)
self._pybullet_client.setTimeStep(self._sim_time_step)
for robot in self._robots:
robot.SetTimeSteps(self._num_action_repeat, self._sim_time_step)
def get_time_since_reset(self):
"""Get the time passed (in seconds) since the last reset.
Returns:
List of time in seconds since the last reset for each robot.
"""
return self._robots[0].GetTimeSinceReset()
@property
def tasks(self):
return self._tasks
@property
def robots(self):
return self._robots
@property
def num_robots(self):
return len(self._robots)
|
2ff5b3a43dca8bd59b1e5a023e2531a9d0a37f81
|
c5ee0fa77a98683fff730d359ba205ca89514638
|
/examples/create_bids_folder.py
|
ba3c53220e46e23e821cda67e93d5cbf65d94daa
|
[
"BSD-3-Clause"
] |
permissive
|
mne-tools/mne-bids
|
70f70d6068ebdf1826d7340cd7573e41b813873a
|
d27cb465709f6a883276cde09a548245f20b0436
|
refs/heads/main
| 2023-08-31T14:41:59.882835
| 2023-08-26T07:18:23
| 2023-08-26T07:18:23
| 89,170,358
| 111
| 91
|
BSD-3-Clause
| 2023-09-11T06:03:27
| 2017-04-23T20:28:12
|
Python
|
UTF-8
|
Python
| false
| false
| 1,632
|
py
|
create_bids_folder.py
|
"""
=======================================================
11. Creating BIDS-compatible folder names and filenames
=======================================================
The Brain Imaging Data Structure (BIDS) has standard conventions for file
names and folder hierarchy. MNE-BIDS comes with convenience functions if you
wish to create these files/folders on your own.
.. note::
You may automatically convert Raw objects to BIDS-compatible files with
``write_raw_bids``. This example is for manually creating files/folders.
"""
# Authors: Chris Holdgraf <choldgraf@berkeley.edu>
#
# License: BSD-3-Clause
# %%
# First we will import the relevant functions
from mne_bids import BIDSPath
# %%
# Creating file names for BIDS
# ----------------------------
#
# BIDS requires a specific ordering and structure for metadata fields in
# file paths, the class `BIDSPath` allows you to specify many such
# pieces of metadata, ensuring that they are in the correct order in the
# final file path. Omitted keys will not be included in the file path.
bids_path = BIDSPath(
subject="test", session="two", task="mytask", suffix="events", extension=".tsv"
)
print(bids_path)
# %%
# You may also omit the suffix, which will result in *only* a prefix for a
# file name. This could then prepended to many more files.
bids_path = BIDSPath(subject="test", task="mytask")
print(bids_path)
# %%
# Creating folders
# ----------------
#
# You can also use MNE-BIDS to create folder hierarchies.
bids_path = BIDSPath(
subject="01", session="mysession", datatype="meg", root="path/to/project"
).mkdir()
print(bids_path.directory)
|
917d814a3ca88cf4f59ad65c99286f70e261eba6
|
d6aae799e18e907fb413b715200c7832252a87e5
|
/frame-interpolation/zooming-slow-mo/args.py
|
99f33704f8c2b7724cbc7f72b3cfe0dc56508cd2
|
[
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-proprietary-license",
"Apache-2.0",
"CC-BY-NC-4.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
sony/nnabla-examples
|
0d0bbd5df3028996e790bcf07248fdb0932697d1
|
41f71faa6efff7774a76bbd5af3198322a90a6ab
|
refs/heads/master
| 2023-09-04T03:45:54.023899
| 2023-08-22T03:31:21
| 2023-08-22T03:31:21
| 109,625,584
| 308
| 108
|
Apache-2.0
| 2023-08-22T03:31:23
| 2017-11-05T23:30:40
|
Python
|
UTF-8
|
Python
| false
| false
| 2,315
|
py
|
args.py
|
# Copyright 2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from utils import read_yaml
def get_config():
"""
Get command line arguments.
Arguments set the default values of command line arguments.
"""
parser = argparse.ArgumentParser(
description='ZoomingSloMo or only Slo-Mo training argument parser')
parser.add_argument('--cfg', default="./config.yaml")
args, _ = parser.parse_known_args()
conf = read_yaml(args.cfg)
parser.add_argument('--lmdb-data-gt', type=str, default="datasets/",
help='Path to HR frames lmdb for training')
parser.add_argument('--lmdb-data-lq', type=str, default="datasets/",
help='Path to LR frames lmdb for training')
parser.add_argument('--output-dir', type=str, default="models/",
help='Path to store trained models')
parser.add_argument('--batch-size', type=int, default="12",
help='Maximum number of iterations for training')
parser.add_argument('--gt-size', type=int, default=128,
help='Ground truth frame size')
parser.add_argument('--only-slomo', action='store_true', default=False,
help='If True, network will train for Slo-Mo only (No Zooming)')
args = parser.parse_args()
# Refine config file variables
conf.data.lmdb_data_gt = args.lmdb_data_gt
conf.data.lmdb_data_lq = args.lmdb_data_lq
conf.data.output_dir = args.output_dir
conf.train.batch_size = args.batch_size
conf.train.only_slomo = args.only_slomo
conf.data.gt_size = args.gt_size if not args.only_slomo else args.gt_size // 4
conf.data.lr_size = args.gt_size // 4
return conf
|
2b3bda4c8b499391329a679bf3b114c519f977a2
|
a3d6556180e74af7b555f8d47d3fea55b94bcbda
|
/tools/usb_gadget/usb_descriptors_test.py
|
3e8067f53509b868ad9ecc773799134ab9973c42
|
[
"BSD-3-Clause"
] |
permissive
|
chromium/chromium
|
aaa9eda10115b50b0616d2f1aed5ef35d1d779d6
|
a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c
|
refs/heads/main
| 2023-08-24T00:35:12.585945
| 2023-08-23T22:01:11
| 2023-08-23T22:01:11
| 120,360,765
| 17,408
| 7,102
|
BSD-3-Clause
| 2023-09-10T23:44:27
| 2018-02-05T20:55:32
| null |
UTF-8
|
Python
| false
| false
| 6,799
|
py
|
usb_descriptors_test.py
|
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
import hid_constants
import usb_descriptors
class DescriptorWithField(usb_descriptors.Descriptor):
pass
DescriptorWithField.AddField('bField', 'B')
class DescriptorWithDefault(usb_descriptors.Descriptor):
pass
DescriptorWithDefault.AddField('bDefault', 'B', default=42)
class DescriptorWithFixed(usb_descriptors.Descriptor):
pass
DescriptorWithFixed.AddFixedField('bFixed', 'B', 42)
class DescriptorWithComputed(usb_descriptors.Descriptor):
@property
def foo(self):
return 42
DescriptorWithComputed.AddComputedField('bComputed', 'B', 'foo')
class DescriptorWithDescriptors(usb_descriptors.DescriptorContainer):
pass
DescriptorWithDescriptors.AddField('bType', 'B')
class DescriptorTest(unittest.TestCase):
def test_default(self):
obj = DescriptorWithDefault()
self.assertEquals(obj.bDefault, 42)
def test_change_default(self):
obj = DescriptorWithDefault()
obj.bDefault = 1
self.assertEquals(obj.bDefault, 1)
def test_override_default(self):
obj = DescriptorWithDefault(bDefault=56)
self.assertEquals(obj.bDefault, 56)
def test_fixed(self):
obj = DescriptorWithFixed()
self.assertEquals(obj.bFixed, 42)
def test_set_fixed(self):
with self.assertRaises(RuntimeError):
DescriptorWithFixed(bFixed=1)
def test_modify_fixed(self):
obj = DescriptorWithFixed()
with self.assertRaises(RuntimeError):
obj.bFixed = 1
def test_computed(self):
obj = DescriptorWithComputed()
self.assertEquals(obj.bComputed, 42)
def test_set_computed(self):
with self.assertRaises(RuntimeError):
DescriptorWithComputed(bComputed=1)
def test_modify_computed(self):
obj = DescriptorWithComputed()
with self.assertRaises(RuntimeError):
obj.bComputed = 1
def test_unexpected(self):
with self.assertRaisesRegexp(TypeError, 'Unexpected'):
DescriptorWithField(bUnexpected=1)
def test_missing(self):
with self.assertRaisesRegexp(TypeError, 'Missing'):
DescriptorWithField()
def test_size(self):
obj = DescriptorWithField(bField=42)
self.assertEquals(obj.struct_size, 1)
self.assertEquals(obj.total_size, 1)
def test_encode(self):
obj = DescriptorWithField(bField=0xff)
self.assertEquals(obj.Encode(), '\xff')
def test_string(self):
obj = DescriptorWithField(bField=42)
string = str(obj)
self.assertIn('bField', string)
self.assertIn('42', string)
def test_container(self):
parent = DescriptorWithDescriptors(bType=0)
child1 = DescriptorWithField(bField=1)
parent.Add(child1)
child2 = DescriptorWithField(bField=2)
parent.Add(child2)
self.assertEquals(parent.total_size, 3)
self.assertEquals(parent.Encode(), '\x00\x01\x02')
string = str(parent)
self.assertIn('bType', string)
self.assertIn('bField', string)
class TestUsbDescriptors(unittest.TestCase):
def test_device_descriptor(self):
device_desc = usb_descriptors.DeviceDescriptor(
idVendor=0xDEAD,
idProduct=0xBEEF,
bcdDevice=0x0100,
bNumConfigurations=1)
self.assertEquals(
device_desc.Encode(),
'\x12\x01\x00\x02\x00\x00\x00\x40\xAD\xDE\xEF\xBE\x00\x01\x00\x00\x00'
'\x01')
def test_unique_interfaces(self):
interface_desc1 = usb_descriptors.InterfaceDescriptor(bInterfaceNumber=1)
interface_desc2 = usb_descriptors.InterfaceDescriptor(bInterfaceNumber=1,
bAlternateSetting=1)
interface_desc3 = usb_descriptors.InterfaceDescriptor(bInterfaceNumber=1)
configuration_desc = usb_descriptors.ConfigurationDescriptor(
bmAttributes=0xC0,
MaxPower=100)
configuration_desc.AddInterface(interface_desc1)
configuration_desc.AddInterface(interface_desc2)
with self.assertRaisesRegexp(RuntimeError, r'Interface 1 \(alternate 0\)'):
configuration_desc.AddInterface(interface_desc3)
def test_unique_endpoints(self):
endpoint_desc1 = usb_descriptors.EndpointDescriptor(
bEndpointAddress=0x01,
bmAttributes=0x02,
wMaxPacketSize=64,
bInterval=1)
endpoint_desc2 = usb_descriptors.EndpointDescriptor(
bEndpointAddress=0x81,
bmAttributes=0x02,
wMaxPacketSize=64,
bInterval=1)
endpoint_desc3 = usb_descriptors.EndpointDescriptor(
bEndpointAddress=0x01,
bmAttributes=0x01,
wMaxPacketSize=32,
bInterval=10)
interface_desc = usb_descriptors.InterfaceDescriptor(bInterfaceNumber=1)
interface_desc.AddEndpoint(endpoint_desc1)
interface_desc.AddEndpoint(endpoint_desc2)
with self.assertRaisesRegexp(RuntimeError, 'Endpoint 0x01 already defined'):
interface_desc.AddEndpoint(endpoint_desc3)
def test_configuration_descriptor(self):
endpoint_desc = usb_descriptors.EndpointDescriptor(
bEndpointAddress=0x01,
bmAttributes=0x02,
wMaxPacketSize=64,
bInterval=1)
encoded_endpoint = '\x07\x05\x01\x02\x40\x00\x01'
self.assertEquals(endpoint_desc.Encode(), encoded_endpoint)
interface_desc = usb_descriptors.InterfaceDescriptor(bInterfaceNumber=1)
interface_desc.AddEndpoint(endpoint_desc)
self.assertEquals([endpoint_desc], interface_desc.GetEndpoints())
encoded_interface = ('\x09\x04\x01\x00\x01\xFF\xFF\xFF\x00' +
encoded_endpoint)
self.assertEquals(interface_desc.Encode(), encoded_interface)
configuration_desc = usb_descriptors.ConfigurationDescriptor(
bmAttributes=0xC0,
MaxPower=100)
configuration_desc.AddInterface(interface_desc)
self.assertEquals([interface_desc], configuration_desc.GetInterfaces())
encoded_configuration = ('\x09\x02\x19\x00\x01\x01\x00\xC0\x64' +
encoded_interface)
self.assertEquals(configuration_desc.Encode(), encoded_configuration)
def test_encode_hid_descriptor(self):
hid_desc = usb_descriptors.HidDescriptor()
hid_desc.AddDescriptor(hid_constants.DescriptorType.REPORT, 0x80)
hid_desc.AddDescriptor(hid_constants.DescriptorType.PHYSICAL, 0x60)
encoded_desc = '\x0C\x21\x11\x01\x00\x02\x22\x80\x00\x23\x60\x00'
self.assertEquals(hid_desc.Encode(), encoded_desc)
def test_print_hid_descriptor(self):
hid_desc = usb_descriptors.HidDescriptor()
hid_desc.AddDescriptor(hid_constants.DescriptorType.REPORT, 0x80)
hid_desc.AddDescriptor(hid_constants.DescriptorType.PHYSICAL, 0x60)
string = str(hid_desc)
self.assertIn('0x22', string)
self.assertIn('0x23', string)
if __name__ == '__main__':
unittest.main()
|
2107be3dcb502f9de2a275f0a86aee40891c5519
|
99d79ada2d3b7746573f071823ec61f5f853d7a3
|
/tests/update_golds.py
|
749dc9159b829abb30d26a06f143fffa8c779d95
|
[
"MIT"
] |
permissive
|
phanrahan/magma
|
d8062c6163e2c2c2cedef82317dc8cc40038220a
|
b05fe5303ed17e668c6ec2ec3558cd5a52eff787
|
refs/heads/master
| 2023-08-23T18:08:22.494869
| 2023-08-08T18:53:05
| 2023-08-17T16:16:44
| 84,332,281
| 227
| 21
|
NOASSERTION
| 2023-09-14T21:32:19
| 2017-03-08T14:57:09
|
Python
|
UTF-8
|
Python
| false
| false
| 663
|
py
|
update_golds.py
|
"""
Expected to be run from repo root
"""
import shutil
import os
def copy_golds(dir_path):
for f in os.listdir(os.path.join(dir_path, "gold")):
try:
shutil.copy(
os.path.join(dir_path, "build", f),
os.path.join(dir_path, "gold", f)
)
except FileNotFoundError as e:
# corresponding build has different name or extra file
pass
copy_golds("tests")
for name in os.listdir("tests"):
if not os.path.isdir(os.path.join("tests", name)):
continue
if "gold" in os.listdir(os.path.join("tests", name)):
copy_golds(os.path.join("tests", name))
|
3871226a0a9e134eedd1640ff2697db3091b2812
|
fa1ad2e2ac7e376fc7cb3b3a6e1bb88eed3e80be
|
/dts/airbyte/airbyte-integrations/connectors/source-tempo/source_tempo/__init__.py
|
46ea9332ff05f280bf2cc16dafac48e37b54469a
|
[
"MIT",
"Elastic-2.0",
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
alldatacenter/alldata
|
7bc7713c9f1d56ad6b8e59ea03206d1073b7e047
|
8d5f9a2d49ab8f9e85ccf058cb02c2fda287afc6
|
refs/heads/master
| 2023-08-05T07:32:25.442740
| 2023-08-03T13:17:24
| 2023-08-03T13:17:24
| 213,321,771
| 774
| 250
|
Apache-2.0
| 2023-09-06T17:35:32
| 2019-10-07T07:36:18
| null |
UTF-8
|
Python
| false
| false
| 59
|
py
|
__init__.py
|
from .source import SourceTempo
__all__ = ["SourceTempo"]
|
fda0958faa0b20df943ed591064023f27b2eda53
|
6c066611b11a8de5e2c22c30cfcc578a4c49edce
|
/Utility/WaveForm/WaveForm.py
|
6e4d0a93ad3013c7b7d03255aa96bbc13ad7f1db
|
[] |
no_license
|
NatronGitHub/natron-plugins
|
ad2d9227637b4b86b45f92856fa54d327872a0a6
|
b0c499fb6391024f54be9f26ed41b5cf7475d574
|
refs/heads/master
| 2022-12-12T10:02:20.252222
| 2022-11-30T02:29:04
| 2022-11-30T02:29:04
| 130,576,224
| 332
| 67
| null | 2022-11-30T02:29:05
| 2018-04-22T14:39:29
|
Python
|
UTF-8
|
Python
| false
| false
| 12,839
|
py
|
WaveForm.py
|
# -*- coding: utf-8 -*-
# DO NOT EDIT THIS FILE
# This file was automatically generated by Natron PyPlug exporter version 10.
# Hand-written code should be added in a separate file named WaveFormExt.py
# See http://natron.readthedocs.org/en/master/groups.html#adding-hand-written-code-callbacks-etc
# Note that Viewers are never exported
import NatronEngine
import sys
# Try to import the extensions file where callbacks and hand-written code should be located.
try:
from WaveFormExt import *
except ImportError:
pass
def getPluginID():
return "natron.community.plugins.WaveForm"
def getLabel():
return "WaveForm"
def getVersion():
return 1
def getIconPath():
return "WaveForm.png"
def getGrouping():
return "Community/Utility"
def getPluginDescription():
return "Display a Luminance Waveform of the input.\nYou can also display RGB channels separately.\n\nAs Natron works in linear colorspace , there is an option to specify you display gamma. "
def createInstance(app,group):
# Create all nodes in the group
# Create the parameters of the group node the same way we did for all internal nodes
lastNode = group
# Create the user parameters
lastNode.Controls = lastNode.createPageParam("Controls", "Controls")
param = lastNode.createColorParam("Gamma1value", "Display Gamma", True)
param.setMinimum(0, 0)
param.setDisplayMinimum(0, 0)
param.setDisplayMaximum(4, 0)
param.setDefaultValue(1, 0)
param.restoreDefaultValue(0)
param.setMinimum(0, 1)
param.setDisplayMinimum(0, 1)
param.setDisplayMaximum(4, 1)
param.setDefaultValue(1, 1)
param.restoreDefaultValue(1)
param.setMinimum(0, 2)
param.setDisplayMinimum(0, 2)
param.setDisplayMaximum(4, 2)
param.setDefaultValue(1, 2)
param.restoreDefaultValue(2)
param.setMinimum(0, 3)
param.setDisplayMinimum(0, 3)
param.setDisplayMaximum(4, 3)
param.setDefaultValue(1, 3)
param.restoreDefaultValue(3)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setAddNewLine(True)
param.setAnimationEnabled(True)
param.setValue(2.2, 0)
param.setValue(2.2, 1)
param.setValue(2.2, 2)
param.setValue(2.2, 3)
lastNode.Gamma1value = param
del param
param = lastNode.createIntParam("accel", "Acceleration")
param.setMinimum(1, 0)
param.setMaximum(300, 0)
param.setDisplayMinimum(0, 0)
param.setDisplayMaximum(10, 0)
param.setDefaultValue(2, 0)
param.restoreDefaultValue(0)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("Skips some pixels to give faster result, but less accurate.")
param.setAddNewLine(True)
param.setAnimationEnabled(True)
param.setValue(5, 0)
lastNode.accel = param
del param
param = lastNode.createBooleanParam("rgb", "Show separate RGB waves")
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("Display separate channels instead of Luminance")
param.setAddNewLine(True)
param.setAnimationEnabled(True)
lastNode.rgb = param
del param
# Refresh the GUI with the newly created parameters
lastNode.setPagesOrder(['Controls', 'Node'])
lastNode.refreshUserParamsGUI()
del lastNode
# Start of node "LumaWave"
lastNode = app.createNode("net.sf.openfx.Shadertoy", 1, group)
lastNode.setScriptName("LumaWave")
lastNode.setLabel("LumaWave")
lastNode.setPosition(1234, 559)
lastNode.setSize(80, 43)
lastNode.setColor(0.3, 0.5, 0.2)
groupLumaWave = lastNode
param = lastNode.getParam("paramValueInt0")
if param is not None:
param.setValue(5, 0)
del param
param = lastNode.getParam("imageShaderSource")
if param is not None:
param.setValue("// Luma waveform for Natron (shadertoy OFX ) by sozap\n// you may use it and distribute it as you like ...\n\nuniform int step = 2 ;\n\nvoid mainImage( out vec4 fragColor, in vec2 fragCoord )\n{\n\t//vec3 img = texture2D(iChannel0, fragCoord.xy / iResolution.xy).rgb;\n\n vec2 uv = fragCoord.xy / iResolution.xy;\n vec2 ds = fragCoord.xy / iResolution.xy;\n\n\tfloat range=1.1260; // used to reduce the waveform\n\n\tfloat col=0.0;\n\tfloat aprox=0.005;\n\tfloat lum=0.0 ;\n\t//int step=10;\n\n\tfor (int i=0; i < iResolution.y ; i+=step){\n\t\tds.y=i/iResolution.y;\n\n\t\tvec3 tmp = texture2D(iChannel0, ds.xy).rgb;\n\n\t\t//rgb to luminance\n\t\tlum = (tmp.x*0.3)+(tmp.y*0.59)+(tmp.z*0.11) ;\n\n\t\tif (lum < (uv.y*range)) {if (lum+aprox > (uv.y*range)) col+=0.02 ;};\n\n\t}\n\n\tvec3 img=vec3((col*step)/7.0,col*step,(col*step)/7.0) ;\n\n\tif (mod(uv.y,.445)<=3./iResolution.y) img.x =0.5 ;\n\n fragColor = vec4(img,1.0);\n}\n")
del param
param = lastNode.getParam("mipmap0")
if param is not None:
param.set("Linear")
del param
param = lastNode.getParam("inputEnable1")
if param is not None:
param.setValue(False)
del param
param = lastNode.getParam("inputEnable2")
if param is not None:
param.setValue(False)
del param
param = lastNode.getParam("inputEnable3")
if param is not None:
param.setValue(False)
del param
param = lastNode.getParam("mouseParams")
if param is not None:
param.setValue(False)
del param
param = lastNode.getParam("paramCount")
if param is not None:
param.setValue(1, 0)
del param
param = lastNode.getParam("paramType0")
if param is not None:
param.set("int")
del param
param = lastNode.getParam("paramName0")
if param is not None:
param.setValue("step")
del param
param = lastNode.getParam("paramLabel0")
if param is not None:
param.setValue("step")
del param
param = lastNode.getParam("paramDefaultInt0")
if param is not None:
param.setValue(2, 0)
del param
del lastNode
# End of node "LumaWave"
# Start of node "Gamma1"
lastNode = app.createNode("net.sf.openfx.GammaPlugin", 2, group)
lastNode.setScriptName("Gamma1")
lastNode.setLabel("Gamma1")
lastNode.setPosition(1222, 458)
lastNode.setSize(104, 43)
lastNode.setColor(0.48, 0.66, 1)
groupGamma1 = lastNode
param = lastNode.getParam("NatronOfxParamProcessA")
if param is not None:
param.setValue(True)
del param
param = lastNode.getParam("value")
if param is not None:
param.setValue(2.2, 0)
param.setValue(2.2, 1)
param.setValue(2.2, 2)
param.setValue(2.2, 3)
del param
param = lastNode.getParam("invert")
if param is not None:
param.setValue(True)
del param
del lastNode
# End of node "Gamma1"
# Start of node "RGBWave"
lastNode = app.createNode("net.sf.openfx.Shadertoy", 1, group)
lastNode.setScriptName("RGBWave")
lastNode.setLabel("RGBWave")
lastNode.setPosition(1367, 562)
lastNode.setSize(80, 43)
lastNode.setColor(0.3, 0.5, 0.2)
groupRGBWave = lastNode
param = lastNode.getParam("paramValueInt0")
if param is not None:
param.setValue(5, 0)
del param
param = lastNode.getParam("imageShaderSource")
if param is not None:
param.setValue("// RGB waveform for Natron (shadertoy OFX ) by sozap\n// you may use it and distribute it as you like ...\n\nuniform int step = 2 ;\n\nvoid mainImage( out vec4 fragColor, in vec2 fragCoord )\n{\n vec2 uv = fragCoord.xy / iResolution.xy;\n vec2 ds = fragCoord.xy / iResolution.xy;\n\n\tfloat range=1.1260; // used to reduce the waveform\n\n \tfloat col=0.0;\n\tfloat aprox=0.005;\n\tfloat lum=0.0 ;\n\n\tfloat xstep=(iResolution.x/3);\n\n\t\tfor (int i=0; i < iResolution.y ; i+=step){\n\t\t\tds.y=i/iResolution.y;\n\t\t\tds.x=fragCoord.x / (iResolution.x/3);\n\t\t\tvec3 tmp = texture2D(iChannel0, ds.xy).rgb;\n\n\t\t\tif (fragCoord.x < xstep) lum=tmp.x ;\n\t\t\tif (fragCoord.x > xstep) lum=tmp.y ;\n\t\t\tif (fragCoord.x > (xstep*2)) lum=tmp.z ;\n\n\t\t\tif (lum < (uv.y*range)) {if (lum+aprox > (uv.y*range)) col+=0.02 ;};\n\n\t\t}\n\n\tcol=col*step;\n\tvec3 img=vec3(0.0,0.0,0.0) ;\n\n\t//R\n\tif (fragCoord.x < xstep) img=vec3((col)/7.0,0.01*col,0.01*col) ;\n\n\t//G\n\tif (fragCoord.x > xstep) img=vec3(0.0,(col)/7.0,0.0) ;\n\n\t//B\n\tif (fragCoord.x > (xstep*2)) img=vec3(0.01*col,0.01*col,(col)/7.0) ;\n\n\n\tif (mod(uv.y,.445)<=3./iResolution.y) img.x =0.5 ;\n\n fragColor = vec4(img,1.0);\n}\n")
del param
param = lastNode.getParam("mipmap0")
if param is not None:
param.set("Linear")
del param
param = lastNode.getParam("inputEnable1")
if param is not None:
param.setValue(False)
del param
param = lastNode.getParam("inputEnable2")
if param is not None:
param.setValue(False)
del param
param = lastNode.getParam("inputEnable3")
if param is not None:
param.setValue(False)
del param
param = lastNode.getParam("mouseParams")
if param is not None:
param.setValue(False)
del param
param = lastNode.getParam("paramCount")
if param is not None:
param.setValue(1, 0)
del param
param = lastNode.getParam("paramType0")
if param is not None:
param.set("int")
del param
param = lastNode.getParam("paramName0")
if param is not None:
param.setValue("step")
del param
param = lastNode.getParam("paramLabel0")
if param is not None:
param.setValue("step")
del param
param = lastNode.getParam("paramDefaultInt0")
if param is not None:
param.setValue(1, 0)
del param
del lastNode
# End of node "RGBWave"
# Start of node "Gamma2"
lastNode = app.createNode("net.sf.openfx.GammaPlugin", 2, group)
lastNode.setScriptName("Gamma2")
lastNode.setLabel("Gamma2")
lastNode.setPosition(1355, 665)
lastNode.setSize(104, 43)
lastNode.setColor(0.48, 0.66, 1)
groupGamma2 = lastNode
param = lastNode.getParam("value")
if param is not None:
param.setValue(1.4, 0)
param.setValue(1.4, 1)
param.setValue(1.4, 2)
param.setValue(1.4, 3)
del param
param = lastNode.getParam("premult")
if param is not None:
param.setValue(True)
del param
del lastNode
# End of node "Gamma2"
# Start of node "Switch1"
lastNode = app.createNode("net.sf.openfx.switchPlugin", 1, group)
lastNode.setScriptName("Switch1")
lastNode.setLabel("Switch1")
lastNode.setPosition(1222, 826)
lastNode.setSize(104, 43)
lastNode.setColor(0.3, 0.37, 0.776)
groupSwitch1 = lastNode
param = lastNode.getParam("which")
if param is not None:
param.setValue(0, 0)
del param
del lastNode
# End of node "Switch1"
# Start of node "Input"
lastNode = app.createNode("fr.inria.built-in.Input", 1, group)
lastNode.setScriptName("Input")
lastNode.setLabel("Input")
lastNode.setPosition(1220, 266)
lastNode.setSize(104, 43)
lastNode.setColor(0.3, 0.5, 0.2)
groupInput = lastNode
del lastNode
# End of node "Input"
# Start of node "Output1"
lastNode = app.createNode("fr.inria.built-in.Output", 1, group)
lastNode.setLabel("Output1")
lastNode.setPosition(1222, 1013)
lastNode.setSize(104, 30)
lastNode.setColor(0.7, 0.7, 0.7)
groupOutput1 = lastNode
del lastNode
# End of node "Output1"
# Now that all nodes are created we can connect them together, restore expressions
groupLumaWave.connectInput(0, groupGamma1)
groupGamma1.connectInput(0, groupInput)
groupRGBWave.connectInput(0, groupGamma1)
groupGamma2.connectInput(0, groupRGBWave)
groupSwitch1.connectInput(0, groupLumaWave)
groupSwitch1.connectInput(1, groupGamma2)
groupOutput1.connectInput(0, groupSwitch1)
param = groupLumaWave.getParam("paramValueInt0")
param.setExpression("thisGroup.accel.get()", False, 0)
del param
param = groupGamma1.getParam("value")
group.getParam("Gamma1value").setAsAlias(param)
del param
param = groupRGBWave.getParam("paramValueInt0")
param.setExpression("thisGroup.accel.get()", False, 0)
del param
param = groupSwitch1.getParam("which")
param.setExpression("thisGroup.rgb.get()", False, 0)
del param
try:
extModule = sys.modules["WaveFormExt"]
except KeyError:
extModule = None
if extModule is not None and hasattr(extModule ,"createInstanceExt") and hasattr(extModule.createInstanceExt,"__call__"):
extModule.createInstanceExt(app,group)
|
c63cb5ee6c88757f99046e35bea84a5373ecc517
|
3a6a211ea0d32405497fbd6486c490bb147e25f9
|
/third_party/google-endpoints/apitools/base/protorpclite/descriptor_test.py
|
fc27ec4171552d57525ad5685c269938be9fa74f
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
catapult-project/catapult
|
e2cbdd5eb89f3b1492fc8752494e62ea1df4bae0
|
53102de187a48ac2cfc241fef54dcbc29c453a8e
|
refs/heads/main
| 2021-05-25T07:37:22.832505
| 2021-05-24T08:01:49
| 2021-05-25T06:07:38
| 33,947,548
| 2,032
| 742
|
BSD-3-Clause
| 2022-08-26T16:01:18
| 2015-04-14T17:49:05
|
HTML
|
UTF-8
|
Python
| false
| false
| 17,140
|
py
|
descriptor_test.py
|
#!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests for apitools.base.protorpclite.descriptor."""
import platform
import types
import six
import unittest2
from apitools.base.protorpclite import descriptor
from apitools.base.protorpclite import message_types
from apitools.base.protorpclite import messages
from apitools.base.protorpclite import test_util
RUSSIA = u'\u0420\u043e\u0441\u0441\u0438\u044f'
class ModuleInterfaceTest(test_util.ModuleInterfaceTest,
test_util.TestCase):
MODULE = descriptor
class DescribeEnumValueTest(test_util.TestCase):
def testDescribe(self):
class MyEnum(messages.Enum):
MY_NAME = 10
expected = descriptor.EnumValueDescriptor()
expected.name = 'MY_NAME'
expected.number = 10
described = descriptor.describe_enum_value(MyEnum.MY_NAME)
described.check_initialized()
self.assertEquals(expected, described)
class DescribeEnumTest(test_util.TestCase):
def testEmptyEnum(self):
class EmptyEnum(messages.Enum):
pass
expected = descriptor.EnumDescriptor()
expected.name = 'EmptyEnum'
described = descriptor.describe_enum(EmptyEnum)
described.check_initialized()
self.assertEquals(expected, described)
def testNestedEnum(self):
class MyScope(messages.Message):
class NestedEnum(messages.Enum):
pass
expected = descriptor.EnumDescriptor()
expected.name = 'NestedEnum'
described = descriptor.describe_enum(MyScope.NestedEnum)
described.check_initialized()
self.assertEquals(expected, described)
@unittest2.skipIf('PyPy' in platform.python_implementation(),
'todo: reenable this')
def testEnumWithItems(self):
class EnumWithItems(messages.Enum):
A = 3
B = 1
C = 2
expected = descriptor.EnumDescriptor()
expected.name = 'EnumWithItems'
a = descriptor.EnumValueDescriptor()
a.name = 'A'
a.number = 3
b = descriptor.EnumValueDescriptor()
b.name = 'B'
b.number = 1
c = descriptor.EnumValueDescriptor()
c.name = 'C'
c.number = 2
expected.values = [b, c, a]
described = descriptor.describe_enum(EnumWithItems)
described.check_initialized()
self.assertEquals(expected, described)
class DescribeFieldTest(test_util.TestCase):
def testLabel(self):
for repeated, required, expected_label in (
(True, False, descriptor.FieldDescriptor.Label.REPEATED),
(False, True, descriptor.FieldDescriptor.Label.REQUIRED),
(False, False, descriptor.FieldDescriptor.Label.OPTIONAL)):
field = messages.IntegerField(
10, required=required, repeated=repeated)
field.name = 'a_field'
expected = descriptor.FieldDescriptor()
expected.name = 'a_field'
expected.number = 10
expected.label = expected_label
expected.variant = descriptor.FieldDescriptor.Variant.INT64
described = descriptor.describe_field(field)
described.check_initialized()
self.assertEquals(expected, described)
def testDefault(self):
test_cases = (
(messages.IntegerField, 200, '200'),
(messages.FloatField, 1.5, '1.5'),
(messages.FloatField, 1e6, '1000000.0'),
(messages.BooleanField, True, 'true'),
(messages.BooleanField, False, 'false'),
(messages.BytesField,
b''.join([six.int2byte(x) for x in (31, 32, 33)]),
b'\\x1f !'),
(messages.StringField, RUSSIA, RUSSIA),
)
for field_class, default, expected_default in test_cases:
field = field_class(10, default=default)
field.name = u'a_field'
expected = descriptor.FieldDescriptor()
expected.name = u'a_field'
expected.number = 10
expected.label = descriptor.FieldDescriptor.Label.OPTIONAL
expected.variant = field_class.DEFAULT_VARIANT
expected.default_value = expected_default
described = descriptor.describe_field(field)
described.check_initialized()
self.assertEquals(expected, described)
def testDefault_EnumField(self):
class MyEnum(messages.Enum):
VAL = 1
module_name = test_util.get_module_name(MyEnum)
field = messages.EnumField(MyEnum, 10, default=MyEnum.VAL)
field.name = 'a_field'
expected = descriptor.FieldDescriptor()
expected.name = 'a_field'
expected.number = 10
expected.label = descriptor.FieldDescriptor.Label.OPTIONAL
expected.variant = messages.EnumField.DEFAULT_VARIANT
expected.type_name = '%s.MyEnum' % module_name
expected.default_value = '1'
described = descriptor.describe_field(field)
self.assertEquals(expected, described)
def testMessageField(self):
field = messages.MessageField(descriptor.FieldDescriptor, 10)
field.name = 'a_field'
expected = descriptor.FieldDescriptor()
expected.name = 'a_field'
expected.number = 10
expected.label = descriptor.FieldDescriptor.Label.OPTIONAL
expected.variant = messages.MessageField.DEFAULT_VARIANT
expected.type_name = (
'apitools.base.protorpclite.descriptor.FieldDescriptor')
described = descriptor.describe_field(field)
described.check_initialized()
self.assertEquals(expected, described)
def testDateTimeField(self):
field = message_types.DateTimeField(20)
field.name = 'a_timestamp'
expected = descriptor.FieldDescriptor()
expected.name = 'a_timestamp'
expected.number = 20
expected.label = descriptor.FieldDescriptor.Label.OPTIONAL
expected.variant = messages.MessageField.DEFAULT_VARIANT
expected.type_name = (
'apitools.base.protorpclite.message_types.DateTimeMessage')
described = descriptor.describe_field(field)
described.check_initialized()
self.assertEquals(expected, described)
class DescribeMessageTest(test_util.TestCase):
def testEmptyDefinition(self):
class MyMessage(messages.Message):
pass
expected = descriptor.MessageDescriptor()
expected.name = 'MyMessage'
described = descriptor.describe_message(MyMessage)
described.check_initialized()
self.assertEquals(expected, described)
def testDefinitionWithFields(self):
class MessageWithFields(messages.Message):
field1 = messages.IntegerField(10)
field2 = messages.StringField(30)
field3 = messages.IntegerField(20)
expected = descriptor.MessageDescriptor()
expected.name = 'MessageWithFields'
expected.fields = [
descriptor.describe_field(
MessageWithFields.field_by_name('field1')),
descriptor.describe_field(
MessageWithFields.field_by_name('field3')),
descriptor.describe_field(
MessageWithFields.field_by_name('field2')),
]
described = descriptor.describe_message(MessageWithFields)
described.check_initialized()
self.assertEquals(expected, described)
def testNestedEnum(self):
class MessageWithEnum(messages.Message):
class Mood(messages.Enum):
GOOD = 1
BAD = 2
UGLY = 3
class Music(messages.Enum):
CLASSIC = 1
JAZZ = 2
BLUES = 3
expected = descriptor.MessageDescriptor()
expected.name = 'MessageWithEnum'
expected.enum_types = [descriptor.describe_enum(MessageWithEnum.Mood),
descriptor.describe_enum(MessageWithEnum.Music)]
described = descriptor.describe_message(MessageWithEnum)
described.check_initialized()
self.assertEquals(expected, described)
def testNestedMessage(self):
class MessageWithMessage(messages.Message):
class Nesty(messages.Message):
pass
expected = descriptor.MessageDescriptor()
expected.name = 'MessageWithMessage'
expected.message_types = [
descriptor.describe_message(MessageWithMessage.Nesty)]
described = descriptor.describe_message(MessageWithMessage)
described.check_initialized()
self.assertEquals(expected, described)
class DescribeFileTest(test_util.TestCase):
"""Test describing modules."""
def LoadModule(self, module_name, source):
result = {
'__name__': module_name,
'messages': messages,
}
exec(source, result)
module = types.ModuleType(module_name)
for name, value in result.items():
setattr(module, name, value)
return module
def testEmptyModule(self):
"""Test describing an empty file."""
module = types.ModuleType('my.package.name')
expected = descriptor.FileDescriptor()
expected.package = 'my.package.name'
described = descriptor.describe_file(module)
described.check_initialized()
self.assertEquals(expected, described)
def testNoPackageName(self):
"""Test describing a module with no module name."""
module = types.ModuleType('')
expected = descriptor.FileDescriptor()
described = descriptor.describe_file(module)
described.check_initialized()
self.assertEquals(expected, described)
def testPackageName(self):
"""Test using the 'package' module attribute."""
module = types.ModuleType('my.module.name')
module.package = 'my.package.name'
expected = descriptor.FileDescriptor()
expected.package = 'my.package.name'
described = descriptor.describe_file(module)
described.check_initialized()
self.assertEquals(expected, described)
def testMain(self):
"""Test using the 'package' module attribute."""
module = types.ModuleType('__main__')
module.__file__ = '/blim/blam/bloom/my_package.py'
expected = descriptor.FileDescriptor()
expected.package = 'my_package'
described = descriptor.describe_file(module)
described.check_initialized()
self.assertEquals(expected, described)
def testMessages(self):
"""Test that messages are described."""
module = self.LoadModule('my.package',
'class Message1(messages.Message): pass\n'
'class Message2(messages.Message): pass\n')
message1 = descriptor.MessageDescriptor()
message1.name = 'Message1'
message2 = descriptor.MessageDescriptor()
message2.name = 'Message2'
expected = descriptor.FileDescriptor()
expected.package = 'my.package'
expected.message_types = [message1, message2]
described = descriptor.describe_file(module)
described.check_initialized()
self.assertEquals(expected, described)
def testEnums(self):
"""Test that enums are described."""
module = self.LoadModule('my.package',
'class Enum1(messages.Enum): pass\n'
'class Enum2(messages.Enum): pass\n')
enum1 = descriptor.EnumDescriptor()
enum1.name = 'Enum1'
enum2 = descriptor.EnumDescriptor()
enum2.name = 'Enum2'
expected = descriptor.FileDescriptor()
expected.package = 'my.package'
expected.enum_types = [enum1, enum2]
described = descriptor.describe_file(module)
described.check_initialized()
self.assertEquals(expected, described)
class DescribeFileSetTest(test_util.TestCase):
"""Test describing multiple modules."""
def testNoModules(self):
"""Test what happens when no modules provided."""
described = descriptor.describe_file_set([])
described.check_initialized()
# The described FileSet.files will be None.
self.assertEquals(descriptor.FileSet(), described)
def testWithModules(self):
"""Test what happens when no modules provided."""
modules = [types.ModuleType('package1'), types.ModuleType('package1')]
file1 = descriptor.FileDescriptor()
file1.package = 'package1'
file2 = descriptor.FileDescriptor()
file2.package = 'package2'
expected = descriptor.FileSet()
expected.files = [file1, file1]
described = descriptor.describe_file_set(modules)
described.check_initialized()
self.assertEquals(expected, described)
class DescribeTest(test_util.TestCase):
def testModule(self):
self.assertEquals(descriptor.describe_file(test_util),
descriptor.describe(test_util))
def testField(self):
self.assertEquals(
descriptor.describe_field(test_util.NestedMessage.a_value),
descriptor.describe(test_util.NestedMessage.a_value))
def testEnumValue(self):
self.assertEquals(
descriptor.describe_enum_value(
test_util.OptionalMessage.SimpleEnum.VAL1),
descriptor.describe(test_util.OptionalMessage.SimpleEnum.VAL1))
def testMessage(self):
self.assertEquals(descriptor.describe_message(test_util.NestedMessage),
descriptor.describe(test_util.NestedMessage))
def testEnum(self):
self.assertEquals(
descriptor.describe_enum(test_util.OptionalMessage.SimpleEnum),
descriptor.describe(test_util.OptionalMessage.SimpleEnum))
def testUndescribable(self):
class NonService(object):
def fn(self):
pass
for value in (NonService,
NonService.fn,
1,
'string',
1.2,
None):
self.assertEquals(None, descriptor.describe(value))
class ModuleFinderTest(test_util.TestCase):
def testFindMessage(self):
self.assertEquals(
descriptor.describe_message(descriptor.FileSet),
descriptor.import_descriptor_loader(
'apitools.base.protorpclite.descriptor.FileSet'))
def testFindField(self):
self.assertEquals(
descriptor.describe_field(descriptor.FileSet.files),
descriptor.import_descriptor_loader(
'apitools.base.protorpclite.descriptor.FileSet.files'))
def testFindEnumValue(self):
self.assertEquals(
descriptor.describe_enum_value(
test_util.OptionalMessage.SimpleEnum.VAL1),
descriptor.import_descriptor_loader(
'apitools.base.protorpclite.test_util.'
'OptionalMessage.SimpleEnum.VAL1'))
class DescriptorLibraryTest(test_util.TestCase):
def setUp(self):
self.packageless = descriptor.MessageDescriptor()
self.packageless.name = 'Packageless'
self.library = descriptor.DescriptorLibrary(
descriptors={
'not.real.Packageless': self.packageless,
'Packageless': self.packageless,
})
def testLookupPackage(self):
self.assertEquals('csv', self.library.lookup_package('csv'))
self.assertEquals(
'apitools.base.protorpclite',
self.library.lookup_package('apitools.base.protorpclite'))
def testLookupNonPackages(self):
lib = 'apitools.base.protorpclite.descriptor.DescriptorLibrary'
for name in ('', 'a', lib):
self.assertRaisesWithRegexpMatch(
messages.DefinitionNotFoundError,
'Could not find definition for %s' % name,
self.library.lookup_package, name)
def testNoPackage(self):
self.assertRaisesWithRegexpMatch(
messages.DefinitionNotFoundError,
'Could not find definition for not.real',
self.library.lookup_package, 'not.real.Packageless')
self.assertEquals(None, self.library.lookup_package('Packageless'))
if __name__ == '__main__':
unittest2.main()
|
be95059d16202913753f619b854da87b17da9b30
|
afd2087e80478010d9df66e78280f75e1ff17d45
|
/test/onnx/internal/test_beartype.py
|
a2bef90510d7400957e2c977b0e2ab175452397d
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"LicenseRef-scancode-secret-labs-2011",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] |
permissive
|
pytorch/pytorch
|
7521ac50c47d18b916ae47a6592c4646c2cb69b5
|
a6f7dd4707ac116c0f5fb5f44f42429f38d23ab4
|
refs/heads/main
| 2023-08-03T05:05:02.822937
| 2023-08-03T00:40:33
| 2023-08-03T04:14:52
| 65,600,975
| 77,092
| 24,610
|
NOASSERTION
| 2023-09-14T21:58:39
| 2016-08-13T05:26:41
|
Python
|
UTF-8
|
Python
| false
| false
| 2,783
|
py
|
test_beartype.py
|
# Owner(s): ["module: onnx"]
"""Unit tests for the internal beartype wrapper module."""
import unittest
from torch.onnx._internal import _beartype
from torch.testing._internal import common_utils
def beartype_installed():
try:
import beartype # noqa: F401
except ImportError:
return False
return True
def skip_if_beartype_not_installed(test_case):
return unittest.skipIf(not beartype_installed(), "beartype is not installed")(
test_case
)
def func_with_type_hint(x: int) -> int:
return x
def func_with_incorrect_type_hint(x: int) -> str:
return x # type: ignore[return-value]
@common_utils.instantiate_parametrized_tests
class TestBeartype(common_utils.TestCase):
def test_create_beartype_decorator_returns_no_op_decorator_when_disabled(self):
decorator = _beartype._create_beartype_decorator(
_beartype.RuntimeTypeCheckState.DISABLED,
)
decorated = decorator(func_with_incorrect_type_hint)
decorated("string_input") # type: ignore[arg-type]
@skip_if_beartype_not_installed
def test_create_beartype_decorator_warns_when_warnings(self):
decorator = _beartype._create_beartype_decorator(
_beartype.RuntimeTypeCheckState.WARNINGS,
)
decorated = decorator(func_with_incorrect_type_hint)
with self.assertWarns(_beartype.CallHintViolationWarning):
decorated("string_input") # type: ignore[arg-type]
@common_utils.parametrize("arg", [1, "string_input"])
@skip_if_beartype_not_installed
def test_create_beartype_decorator_errors_when_errors(self, arg):
import beartype
decorator = _beartype._create_beartype_decorator(
_beartype.RuntimeTypeCheckState.ERRORS,
)
decorated = decorator(func_with_incorrect_type_hint)
with self.assertRaises(beartype.roar.BeartypeCallHintViolation):
decorated(arg)
@skip_if_beartype_not_installed
def test_create_beartype_decorator_warning_calls_function_once(self):
call_count = 0
def func_with_incorrect_type_hint_and_side_effect(x: int) -> str:
nonlocal call_count
call_count += 1
return x # type: ignore[return-value]
decorator = _beartype._create_beartype_decorator(
_beartype.RuntimeTypeCheckState.WARNINGS,
)
decorated = decorator(func_with_incorrect_type_hint_and_side_effect)
decorated("string_input") # type: ignore[arg-type]
self.assertEqual(call_count, 1)
decorated(1)
# The return value violates the type hint, but the function is called
# only once.
self.assertEqual(call_count, 2)
if __name__ == "__main__":
common_utils.run_tests()
|
281332ad10199cd735348c869372239ecedc40fc
|
d571d407cfda435fcab8b7ccadb1be812c7047c7
|
/guild/flag_util.py
|
ec50b6a2bf8f48fa03c949684eac515f507ad629
|
[
"Apache-2.0",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
guildai/guildai
|
2d8661a2a6bf0d1ced6334095c8bf5a8e391d8af
|
149055da49f57eaf4aec418f2e339c8905c1f02f
|
refs/heads/main
| 2023-08-25T10:09:58.560059
| 2023-08-12T20:19:05
| 2023-08-12T20:19:05
| 105,057,392
| 833
| 86
|
Apache-2.0
| 2023-08-07T19:34:27
| 2017-09-27T18:57:50
|
Python
|
UTF-8
|
Python
| false
| false
| 13,026
|
py
|
flag_util.py
|
# Copyright 2017-2023 Posit Software, PBC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import re
import yaml
from guild import util
from guild import yaml_util
log = logging.getLogger("guild")
NAMED_FUNCTION_P = re.compile(r"([a-zA-Z0-9_\-\.]+)\[(.*)\]")
LIST_CONCAT_P = re.compile(r"(\[.*\])\s*\*\s*([0-9]+)$")
FUNCTION_ARG_DELIM = ":"
SEQUENCE_FLAG_FUNCTIONS = ("range", "linspace", "geomspace", "logspace")
DEFAULT_FLOAT_TRUNC_LEN = 5
DEFAULT_SHORTENED_PATH_LEN = 20
def encode_flag_val(val):
if val is True:
return "yes"
if val is False:
return "no"
if val is None:
return "null"
if isinstance(val, list):
return _encode_list(val)
if isinstance(val, dict):
return _encode_dict(val)
return yaml_util.encode_yaml(val, default_flow_style=True)
def _encode_list(val_list):
joined = ", ".join([_encode_list_item(val) for val in val_list])
return f"[{joined}]"
def _encode_list_item(val):
encoded = encode_flag_val(val)
if isinstance(val, str) and "," in encoded:
return repr(encoded)
return encoded
def _encode_dict(d):
encoded_kv = [
(encode_flag_val(k), encode_flag_val(v)) for k, v in sorted(d.items())
]
dict_body = ", ".join([f"{key}: {val}" for key, val in encoded_kv])
return f"{{{dict_body}}}"
def decode_flag_val(s, flag_type=None):
decoded = _decode_flag_val(s, flag_type)
return _fix_surprising_number(decoded, s)
def _decode_flag_val(s, flag_type=None):
if s == "":
return s
decoders = _flag_decoders_for_type(flag_type)
for f, e_type in decoders:
try:
return f(s)
except e_type:
pass
except Exception as e:
log.warning("error decoding %r: %s", s, e)
return s
def _flag_decoders_for_type(flag_type):
return _base_decoders_for_type(flag_type) + _default_flag_decoders()
def _base_decoders_for_type(flag_type):
"""Return a list of base decoders for a flag type.
With an explicit flag type, we can provide decoders applicable for
the type without having to test decoding.
"""
if flag_type in (None, "auto"):
return []
if flag_type in ("string", "path", "existing-path"):
return [(_string_type, ValueError)]
if flag_type == "int":
return [(int, ValueError)]
if flag_type == "float":
return [(float, ValueError)]
if flag_type == "number":
return _number_decoders()
if flag_type == "boolean":
return [(_boolean_type, (ValueError, yaml.YAMLError))]
log.warning("uknown flag type %s, assuming 'auto'", flag_type)
return []
def _string_type(s):
# Special handling for strings that look like they're formatted as
# YAML.
if s[:1] in ("[", "'", "\"", "{"):
raise ValueError()
return str(s)
def _boolean_type(s):
val = yaml_util.decode_yaml(s)
if isinstance(val, (bool, int, float)):
return bool(val)
return val
def _default_flag_decoders():
return _number_decoders() + [
(_flag_function_or_expanded_sequence, ValueError),
(_concatenated_list, ValueError),
(yaml_util.decode_yaml, (ValueError, yaml.YAMLError)),
]
def _number_decoders():
# Order matters - try int first as float succeeds with ints.
return [(int, ValueError), (float, ValueError)]
def _flag_function_or_expanded_sequence(s):
"""Returns a flag function spec or expanded function if applicable.
If s can be decoded as a flag function it is returned as a string
value to prevent it from being decoded by YAML, which can
accidentally decode it as a list (see below for details).
If s can be expanded to a sequence, the return value is a list
containing the expanded items.
Guild treats "[1:2]" as an anonymous flag function, which is
passed along as a string to be decoded downstream. However, YAML
treats "[1:2]" as a list containing a time value (one minute, two
seconds), and therefore returns the value `[62]`. This function
ensures that anonymous flag functions are returned as decoded
string values. Provided this function is used prior to YAML
decoding, the value is decoded correctly.
Raises ValueError if s is not a special flag function.
Raises TypeError if the arguments provided to flag function are
invalid.
"""
name, args = decode_flag_function(s)
if _is_anonymous_flag_function(name, args):
return s
if _is_sequence_flag_function(name):
return _expand_sequence(name, args)
raise ValueError(s)
def _is_anonymous_flag_function(name, args):
return name is None and len(args) >= 2
def _is_sequence_flag_function(name):
return name in SEQUENCE_FLAG_FUNCTIONS
def _expand_sequence(name, args):
if name == "range":
return _expand_range(*args)
if name == "linspace":
return _expand_linspace(*args)
if name == "logspace":
return _expand_logspace(*args)
assert False, name
def _expand_range(*args):
import numpy as np
start, end, step = _expand_range_args(*args)
return [_np_seq_val(x) for x in np.arange(start, end, step)]
def _expand_range_args(start=None, end=None, step=1, *rest):
if rest:
log.warning("unsupported arguments for range function: %s - ignoring", rest)
_assert_required_function_args(start)
_assert_numeric_function_args(start, step)
if end is not None:
_assert_numeric_function_args(end)
end = end + min(step, 1)
return start, end, step
def _assert_required_function_args(*args):
for arg in args:
if arg is None:
raise TypeError(f"function requires at least {len(args)} arg(s)")
def _assert_numeric_function_args(*args):
for arg in args:
if not isinstance(arg, (int, float)):
raise TypeError(f"invalid arg {arg!r}: expected a number")
def _np_seq_val(x):
x = x.item()
if isinstance(x, float) and x > 1e-8:
return round(x, 8)
return x
def _expand_linspace(*args):
import numpy as np
start, end, count = _expand_linspace_args(*args)
return [_np_seq_val(x) for x in np.linspace(start, end, count)]
def _expand_linspace_args(start=None, end=None, count=5, *rest):
if rest:
log.warning("unsupported arguments for linspace function: %s - ignoring", rest)
_assert_required_function_args(start, end)
_assert_numeric_function_args(start, end, count)
return start, end, count
def _expand_logspace(*args):
import numpy as np
start, end, count, base = _expand_logspace_args(*args)
return [_np_seq_val(x) for x in np.logspace(start, end, count, base=base)]
def _expand_logspace_args(start=None, end=None, count=5, base=10, *rest):
if rest:
log.warning("unsupported arguments for logspace function: %s - ignoring", rest)
_assert_required_function_args(start, end)
_assert_numeric_function_args(start, end, count, base)
return start, end, count, base
def _concatenated_list(s):
"""Expands list concatantion syntax to a list of repeating values.
For example, the value "[1]*3" is expanded to `[1,1,1]`, the value
"[1,2]*2" to `[1,2,1,2]` and so on.
"""
m = LIST_CONCAT_P.match(s.strip())
if not m:
raise ValueError(s)
maybe_list = _decode_flag_val(m.group(1))
if isinstance(maybe_list, list):
return maybe_list * int(m.group(2))
return s
def _fix_surprising_number(val, s):
"""Returns s in cases where val is a surprising result given s.
Surprising results are numeric values for strings that contain '_'
and ':' chars. Later versions of Python for example treat '1_2_3'
as the integer 123. YAML treags '1:12' as a time value.
"""
if (
isinstance(val, (int, float)) and "!!" not in s
and _contains_non_numeric_chars(s)
):
return s
return val
def _contains_non_numeric_chars(s):
for char in s:
if char in ("_", ":"):
return True
return False
def decode_flag_function(s):
if not isinstance(s, str):
raise ValueError("requires string")
name, args_raw = _split_flag_function(s)
args_encoded = args_raw.split(FUNCTION_ARG_DELIM) if args_raw else []
args = [decode_flag_val(encoded.strip()) for encoded in args_encoded]
return name, tuple(args)
def _split_flag_function(s):
result = util.find_apply([_split_named_function, _split_anonymous_function], s)
if result is None:
raise ValueError("not a function")
return result
def _split_named_function(s):
m = NAMED_FUNCTION_P.match(s)
if not m:
return None
return m.group(1), m.group(2).strip()
def _split_anonymous_function(s):
# Regex is not sufficient to handle differences between anonymous
# functions and YAML encoded lists. We perform a sniff test to see
# if it might be an anonymous function and then confirm by
# decoding it as YAML and testing the result.
if s[:1] == "[" and s[-1:] == "]" and ":" in s:
try:
l = yaml_util.decode_yaml(s)
except Exception:
return None, s[1:-1]
else:
if len(l) == 1 and isinstance(l[0], (str, int)):
return None, s[1:-1]
return None
def is_flag_function(val):
if not isinstance(val, str):
return False
try:
_split_flag_function(val)
except ValueError:
return False
else:
return True
def flag_assigns(flags, truncate_floats=False, shorten_paths=False):
"""Returns a list of formatted flags for a map of flags.
Formatted flags are sorted by flag name and have the form
NAME=FORMATTED_VALUE.
"""
return [
flag_assign(name, val, truncate_floats, shorten_paths)
for name, val in sorted(flags.items())
]
def flag_assign(name, val, truncate_floats=False, shorten_paths=False):
formatted_val = format_flag(val, truncate_floats, shorten_paths)
return f"{name}={formatted_val}"
def format_flag(val, truncate_floats=False, shorten_paths=False):
encoded = encode_flag_val(val)
if truncate_floats and isinstance(val, float):
trunc_len = _trunc_len(truncate_floats)
encoded = _truncate_formatted_float(encoded, trunc_len)
if shorten_paths and _is_path(val):
path_len = _path_len(shorten_paths)
encoded = util.shorten_path(val, path_len)
return _quote_encoded(encoded, val)
def _trunc_len(truncate_floats):
if truncate_floats is True:
return DEFAULT_FLOAT_TRUNC_LEN
if not isinstance(truncate_floats, int):
raise ValueError(
f"invalid value for truncate_floats: {truncate_floats!r} (expected int)"
)
return truncate_floats
def _is_path(val):
return isinstance(val, str) and os.path.sep in val and os.path.exists(val)
def _path_len(shorten_paths):
if shorten_paths is True:
return DEFAULT_SHORTENED_PATH_LEN
if not isinstance(shorten_paths, int):
raise ValueError(
f"invalid value for shorten_paths: {shorten_paths!r} (expected int)"
)
return shorten_paths
def _quote_encoded(encoded, val):
if _needs_quote(encoded, val):
return _quote(encoded)
return encoded
def _needs_quote(encoded, val):
return isinstance(val, str) and encoded[0] not in ("'", "\"") and " " in encoded
def _quote(s):
return repr(s)
def _truncate_formatted_float(s, trunc_len):
parts = re.split(r"(\.[0-9]+)", s)
return "".join([_maybe_truncate_dec_part(part, trunc_len) for part in parts])
def _maybe_truncate_dec_part(part, trunc_len):
if part[:1] != ".":
return part
if len(part) <= trunc_len: # lte to include leading '.'
return part
return part[:trunc_len + 1]
def split_encoded_flag_val(encoded, split_spec):
if split_spec is True or split_spec == "shlex":
return util.shlex_split(encoded)
return _string_split(encoded, split_spec)
def _string_split(encoded, sep):
return [part for part in encoded.split(str(sep)) if part]
def join_splittable_flag_vals(vals, split_spec=None):
encoded_vals = [encode_flag_val(val) for val in vals]
if split_spec in (None, True, "shlex"):
return " ".join([util.shlex_quote(x) for x in encoded_vals])
if isinstance(split_spec, str):
return split_spec.join(encoded_vals)
raise ValueError(f"split_spec must be None, True, or a string: {split_spec!r}")
|
d813a6e62d6cb32edddfa963aa0525ebe0a24b58
|
ac235a23f22be0d6f1818bb53902177f9969813a
|
/tests/contrib/sqlalchemy/test_sqlite.py
|
be4a6711b89777cf994307f5d4783899aa01e757
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
DataDog/dd-trace-py
|
f09d6d48c4c69aea68f999fc8a458ade5c6150cf
|
1e3bd6d4edef5cda5a0831a6a7ec8e4046659d17
|
refs/heads/1.x
| 2023-09-01T20:25:26.746324
| 2023-09-01T18:54:37
| 2023-09-01T18:54:37
| 61,572,326
| 461
| 426
|
NOASSERTION
| 2023-09-14T20:38:57
| 2016-06-20T18:52:23
|
Python
|
UTF-8
|
Python
| false
| false
| 1,922
|
py
|
test_sqlite.py
|
import pytest
from sqlalchemy.exc import OperationalError
from ddtrace.constants import ERROR_MSG
from tests.utils import TracerTestCase
from tests.utils import assert_is_measured
from .mixins import SQLAlchemyTestMixin
class SQLiteTestCase(SQLAlchemyTestMixin, TracerTestCase):
"""TestCase for the SQLite engine"""
VENDOR = "sqlite"
SQL_DB = ":memory:"
SERVICE = "sqlite"
ENGINE_ARGS = {"url": "sqlite:///:memory:"}
def setUp(self):
super(SQLiteTestCase, self).setUp()
def tearDown(self):
super(SQLiteTestCase, self).tearDown()
def test_engine_execute_errors(self):
# ensures that SQL errors are reported
with pytest.raises(OperationalError):
with self.connection() as conn:
conn.execute("SELECT * FROM a_wrong_table").fetchall()
traces = self.pop_traces()
# trace composition
self.assertEqual(len(traces), 1)
self.assertEqual(len(traces[0]), 1)
span = traces[0][0]
# span fields
assert_is_measured(span)
self.assertEqual(span.name, "{}.query".format(self.VENDOR))
self.assertEqual(span.service, self.SERVICE)
self.assertEqual(span.resource, "SELECT * FROM a_wrong_table")
self.assertEqual(span.get_tag("sql.db"), self.SQL_DB)
self.assertIsNone(span.get_metric("db.row_count"))
self.assertEqual(span.get_tag("component"), "sqlalchemy")
self.assertEqual(span.get_tag("span.kind"), "client")
self.assertEqual(span.span_type, "sql")
self.assertTrue(span.duration > 0)
# check the error
self.assertEqual(span.error, 1)
self.assertEqual(span.get_tag(ERROR_MSG), "no such table: a_wrong_table")
self.assertTrue("OperationalError" in span.get_tag("error.type"))
self.assertTrue("OperationalError: no such table: a_wrong_table" in span.get_tag("error.stack"))
|
9470cd583d73a980e7599337ee8552f6f4a6c1ad
|
1095cfe2e29ddf4e4c5e12d713bd12f45c9b6f7d
|
/src/cpu/testers/directedtest/RubyDirectedTester.py
|
b9297b058b7266b7353f296e359080fba9ca10c6
|
[
"BSD-3-Clause",
"LicenseRef-scancode-proprietary-license",
"LGPL-2.0-or-later",
"MIT"
] |
permissive
|
gem5/gem5
|
9ec715ae036c2e08807b5919f114e1d38d189bce
|
48a40cf2f5182a82de360b7efa497d82e06b1631
|
refs/heads/stable
| 2023-09-03T15:56:25.819189
| 2023-08-31T05:53:03
| 2023-08-31T05:53:03
| 27,425,638
| 1,185
| 1,177
|
BSD-3-Clause
| 2023-09-14T08:29:31
| 2014-12-02T09:46:00
|
C++
|
UTF-8
|
Python
| false
| false
| 3,029
|
py
|
RubyDirectedTester.py
|
# Copyright (c) 2010 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from m5.SimObject import SimObject
from m5.params import *
from m5.proxy import *
from m5.objects.ClockedObject import ClockedObject
class DirectedGenerator(SimObject):
type = "DirectedGenerator"
abstract = True
cxx_header = "cpu/testers/directedtest/DirectedGenerator.hh"
cxx_class = "gem5::DirectedGenerator"
num_cpus = Param.Int("num of cpus")
system = Param.System(Parent.any, "System we belong to")
class SeriesRequestGenerator(DirectedGenerator):
type = "SeriesRequestGenerator"
cxx_header = "cpu/testers/directedtest/SeriesRequestGenerator.hh"
cxx_class = "gem5::SeriesRequestGenerator"
addr_increment_size = Param.Int(64, "address increment size")
num_series = Param.UInt32(
1, "number of different address streams to generate"
)
percent_writes = Param.Percent(50, "percent of access that are writes")
class InvalidateGenerator(DirectedGenerator):
type = "InvalidateGenerator"
cxx_header = "cpu/testers/directedtest/InvalidateGenerator.hh"
cxx_class = "gem5::InvalidateGenerator"
addr_increment_size = Param.Int(64, "address increment size")
class RubyDirectedTester(ClockedObject):
type = "RubyDirectedTester"
cxx_header = "cpu/testers/directedtest/RubyDirectedTester.hh"
cxx_class = "gem5::RubyDirectedTester"
cpuPort = VectorRequestPort("the cpu ports")
requests_to_complete = Param.Int("checks to complete")
generator = Param.DirectedGenerator("the request generator")
|
d0757147625278d8ad6ad6a39b3b125d09c443c3
|
187414dcb264fb49d82507a099fd5fdca6e55e38
|
/python/pyspark/pandas/config.py
|
79cb859faa2fca7ac559a3da710356cd5c1f9fcf
|
[
"BSD-3-Clause",
"CC0-1.0",
"CDDL-1.1",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference",
"EPL-2.0",
"CDDL-1.0",
"MIT",
"LGPL-2.0-or-later",
"Python-2.0",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-free-unknown",
"EPL-1.0",
"Classpath-exception-2.0",
"GCC-exception-3.1",
"CC-BY-SA-3.0",
"LGPL-2.1-only",
"LicenseRef-scancode-unicode",
"CPL-1.0",
"LicenseRef-scancode-other-permissive",
"GPL-2.0-only",
"CC-PDDC",
"NAIST-2003",
"LicenseRef-scancode-other-copyleft"
] |
permissive
|
apache/spark
|
8aeba2d80465a262acc95781ede105a5b5886f6d
|
60d8fc49bec5dae1b8cf39a0670cb640b430f520
|
refs/heads/master
| 2023-09-04T04:33:36.058199
| 2023-09-04T03:48:52
| 2023-09-04T03:48:52
| 17,165,658
| 39,983
| 32,449
|
Apache-2.0
| 2023-09-14T19:46:24
| 2014-02-25T08:00:08
|
Scala
|
UTF-8
|
Python
| false
| false
| 18,493
|
py
|
config.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Infrastructure of options for pandas-on-Spark.
"""
from contextlib import contextmanager
import json
from typing import Any, Callable, Dict, Iterator, List, Tuple, Union
from pyspark._globals import _NoValue, _NoValueType
from pyspark.pandas.utils import default_session
__all__ = ["get_option", "set_option", "reset_option", "options", "option_context"]
class Option:
"""
Option class that defines an option with related properties.
This class holds all information relevant to the one option. Also,
Its instance can validate if the given value is acceptable or not.
It is currently for internal usage only.
Parameters
----------
key: str, keyword-only argument
the option name to use.
doc: str, keyword-only argument
the documentation for the current option.
default: Any, keyword-only argument
default value for this option.
types: Union[Tuple[type, ...], type], keyword-only argument
default is str. It defines the expected types for this option. It is
used with `isinstance` to validate the given value to this option.
check_func: Tuple[Callable[[Any], bool], str], keyword-only argument
default is a function that always returns `True` with an empty string.
It defines:
- a function to check the given value to this option
- the error message to show when this check is failed
When new value is set to this option, this function is called to check
if the given value is valid.
Examples
--------
>>> option = Option(
... key='option.name',
... doc="this is a test option",
... default="default",
... types=(float, int),
... check_func=(lambda v: v > 0, "should be a positive float"))
>>> option.validate('abc') # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
TypeError: The value for option 'option.name' was <class 'str'>;
however, expected types are [(<class 'float'>, <class 'int'>)].
>>> option.validate(-1.1)
Traceback (most recent call last):
...
ValueError: should be a positive float
>>> option.validate(1.1)
"""
def __init__(
self,
*,
key: str,
doc: str,
default: Any,
types: Union[Tuple[type, ...], type] = str,
check_func: Tuple[Callable[[Any], bool], str] = (lambda v: True, ""),
):
self.key = key
self.doc = doc
self.default = default
self.types = types
self.check_func = check_func
def validate(self, v: Any) -> None:
"""
Validate the given value and throw an exception with related information such as key.
"""
if not isinstance(v, self.types):
raise TypeError(
"The value for option '%s' was %s; however, expected types are "
"[%s]." % (self.key, type(v), str(self.types))
)
if not self.check_func[0](v):
raise ValueError(self.check_func[1])
# Available options.
#
# NOTE: if you are fixing or adding an option here, make sure you execute `show_options()` and
# copy & paste the results into show_options
# 'docs/source/user_guide/pandas_on_spark/options.rst' as well.
# See the examples below:
# >>> from pyspark.pandas.config import show_options
# >>> show_options()
_options: List[Option] = [
Option(
key="display.max_rows",
doc=(
"This sets the maximum number of rows pandas-on-Spark should output when printing out "
"various output. For example, this value determines the number of rows to be "
"shown at the repr() in a dataframe. Set `None` to unlimit the input length. "
"Default is 1000."
),
default=1000,
types=(int, type(None)),
check_func=(
lambda v: v is None or v >= 0,
"'display.max_rows' should be greater than or equal to 0.",
),
),
Option(
key="compute.max_rows",
doc=(
"'compute.max_rows' sets the limit of the current pandas-on-Spark DataFrame. "
"Set `None` to unlimit the input length. When the limit is set, it is executed "
"by the shortcut by collecting the data into the driver, and then using the pandas "
"API. If the limit is unset, the operation is executed by PySpark. Default is 1000."
),
default=1000,
types=(int, type(None)),
check_func=(
lambda v: v is None or v >= 0,
"'compute.max_rows' should be greater than or equal to 0.",
),
),
Option(
key="compute.shortcut_limit",
doc=(
"'compute.shortcut_limit' sets the limit for a shortcut. "
"It computes the specified number of rows and uses its schema. When the dataframe "
"length is larger than this limit, pandas-on-Spark uses PySpark to compute."
),
default=1000,
types=int,
check_func=(
lambda v: v >= 0,
"'compute.shortcut_limit' should be greater than or equal to 0.",
),
),
Option(
key="compute.ops_on_diff_frames",
doc=(
"This determines whether or not to operate between two different dataframes. "
"For example, 'combine_frames' function internally performs a join operation which "
"can be expensive in general. So, if `compute.ops_on_diff_frames` variable is not "
"True, that method throws an exception."
),
default=False,
types=bool,
),
Option(
key="compute.default_index_type",
doc=("This sets the default index type: sequence, distributed and distributed-sequence."),
default="distributed-sequence",
types=str,
check_func=(
lambda v: v in ("sequence", "distributed", "distributed-sequence"),
"Index type should be one of 'sequence', 'distributed', 'distributed-sequence'.",
),
),
Option(
key="compute.default_index_cache",
doc=(
"This sets the default storage level for temporary RDDs cached in "
"distributed-sequence indexing: 'NONE', 'DISK_ONLY', 'DISK_ONLY_2', "
"'DISK_ONLY_3', 'MEMORY_ONLY', 'MEMORY_ONLY_2', 'MEMORY_ONLY_SER', "
"'MEMORY_ONLY_SER_2', 'MEMORY_AND_DISK', 'MEMORY_AND_DISK_2', "
"'MEMORY_AND_DISK_SER', 'MEMORY_AND_DISK_SER_2', 'OFF_HEAP', "
"'LOCAL_CHECKPOINT'."
),
default="MEMORY_AND_DISK_SER",
types=str,
check_func=(
lambda v: v
in (
"NONE",
"DISK_ONLY",
"DISK_ONLY_2",
"DISK_ONLY_3",
"MEMORY_ONLY",
"MEMORY_ONLY_2",
"MEMORY_ONLY_SER",
"MEMORY_ONLY_SER_2",
"MEMORY_AND_DISK",
"MEMORY_AND_DISK_2",
"MEMORY_AND_DISK_SER",
"MEMORY_AND_DISK_SER_2",
"OFF_HEAP",
"LOCAL_CHECKPOINT",
),
"Index type should be one of 'NONE', 'DISK_ONLY', 'DISK_ONLY_2', "
"'DISK_ONLY_3', 'MEMORY_ONLY', 'MEMORY_ONLY_2', 'MEMORY_ONLY_SER', "
"'MEMORY_ONLY_SER_2', 'MEMORY_AND_DISK', 'MEMORY_AND_DISK_2', "
"'MEMORY_AND_DISK_SER', 'MEMORY_AND_DISK_SER_2', 'OFF_HEAP', "
"'LOCAL_CHECKPOINT'.",
),
),
Option(
key="compute.ordered_head",
doc=(
"'compute.ordered_head' sets whether or not to operate head with natural ordering. "
"pandas-on-Spark does not guarantee the row ordering so `head` could return some "
"rows from distributed partitions. If 'compute.ordered_head' is set to True, "
"pandas-on-Spark performs natural ordering beforehand, but it will cause a "
"performance overhead."
),
default=False,
types=bool,
),
Option(
key="compute.eager_check",
doc=(
"'compute.eager_check' sets whether or not to launch some Spark jobs just for the sake "
"of validation. If 'compute.eager_check' is set to True, pandas-on-Spark performs the "
"validation beforehand, but it will cause a performance overhead. Otherwise, "
"pandas-on-Spark skip the validation and will be slightly different from pandas. "
"Affected APIs: `Series.dot`, `Series.asof`, `Series.compare`, "
"`FractionalExtensionOps.astype`, `IntegralExtensionOps.astype`, "
"`FractionalOps.astype`, `DecimalOps.astype`, `skipna of statistical functions`."
),
default=True,
types=bool,
),
Option(
key="compute.isin_limit",
doc=(
"'compute.isin_limit' sets the limit for filtering by 'Column.isin(list)'. "
"If the length of the ‘list’ is above the limit, broadcast join is used instead "
"for better performance."
),
default=80,
types=int,
check_func=(
lambda v: v >= 0,
"'compute.isin_limit' should be greater than or equal to 0.",
),
),
Option(
key="plotting.max_rows",
doc=(
"'plotting.max_rows' sets the visual limit on top-n-based plots such as `plot.bar` "
"and `plot.pie`. If it is set to 1000, the first 1000 data points will be used "
"for plotting. Default is 1000."
),
default=1000,
types=int,
check_func=(
lambda v: v >= 0,
"'plotting.max_rows' should be greater than or equal to 0.",
),
),
Option(
key="plotting.sample_ratio",
doc=(
"'plotting.sample_ratio' sets the proportion of data that will be plotted for sample-"
"based plots such as `plot.line` and `plot.area`. "
"This option defaults to 'plotting.max_rows' option."
),
default=None,
types=(float, type(None)),
check_func=(
lambda v: v is None or 1 >= v >= 0,
"'plotting.sample_ratio' should be 1.0 >= value >= 0.0.",
),
),
Option(
key="plotting.backend",
doc=(
"Backend to use for plotting. Default is plotly. "
"Supports any package that has a top-level `.plot` method. "
"Known options are: [matplotlib, plotly]."
),
default="plotly",
types=str,
),
]
_options_dict: Dict[str, Option] = dict(zip((option.key for option in _options), _options))
_key_format = "pandas_on_Spark.{}".format
class OptionError(AttributeError, KeyError):
pass
def show_options() -> None:
"""
Make a pretty table that can be copied and pasted into public documentation.
This is currently for an internal purpose.
Examples
--------
>>> show_options() # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
================... =======... =====================...
Option Default Description
================... =======... =====================...
display.max_rows 1000 This sets the maximum...
...
================... =======... =====================...
"""
import textwrap
header = ["Option", "Default", "Description"]
row_format = "{:<31} {:<23} {:<53}"
print(row_format.format("=" * 31, "=" * 23, "=" * 53))
print(row_format.format(*header))
print(row_format.format("=" * 31, "=" * 23, "=" * 53))
for option in _options:
doc = textwrap.fill(option.doc, 53)
formatted = "".join([line + "\n" + (" " * 56) for line in doc.split("\n")]).rstrip()
print(row_format.format(option.key, repr(option.default), formatted))
print(row_format.format("=" * 31, "=" * 23, "=" * 53))
def get_option(key: str, default: Union[Any, _NoValueType] = _NoValue) -> Any:
"""
Retrieves the value of the specified option.
Parameters
----------
key : str
The key which should match a single option.
default : object
The default value if the option is not set yet. The value should be JSON serializable.
Returns
-------
result : the value of the option
Raises
------
OptionError : if no such option exists and the default is not provided
"""
_check_option(key)
if default is _NoValue:
default = _options_dict[key].default
_options_dict[key].validate(default)
spark_session = default_session()
return json.loads(spark_session.conf.get(_key_format(key), default=json.dumps(default)))
def set_option(key: str, value: Any) -> None:
"""
Sets the value of the specified option.
Parameters
----------
key : str
The key which should match a single option.
value : object
New value of option. The value should be JSON serializable.
Returns
-------
None
"""
_check_option(key)
_options_dict[key].validate(value)
spark_session = default_session()
spark_session.conf.set(_key_format(key), json.dumps(value))
def reset_option(key: str) -> None:
"""
Reset one option to their default value.
Pass "all" as an argument to reset all options.
Parameters
----------
key : str
If specified only option will be reset.
Returns
-------
None
"""
_check_option(key)
default_session().conf.unset(_key_format(key))
@contextmanager
def option_context(*args: Any) -> Iterator[None]:
"""
Context manager to temporarily set options in the `with` statement context.
You need to invoke ``option_context(pat, val, [(pat, val), ...])``.
Examples
--------
>>> with option_context('display.max_rows', 10, 'compute.max_rows', 5):
... print(get_option('display.max_rows'), get_option('compute.max_rows'))
10 5
>>> print(get_option('display.max_rows'), get_option('compute.max_rows'))
1000 1000
"""
if len(args) == 0 or len(args) % 2 != 0:
raise ValueError("Need to invoke as option_context(pat, val, [(pat, val), ...]).")
opts = dict(zip(args[::2], args[1::2]))
orig_opts = {key: get_option(key) for key in opts}
try:
for key, value in opts.items():
set_option(key, value)
yield
finally:
for key, value in orig_opts.items():
set_option(key, value)
def _check_option(key: str) -> None:
if key not in _options_dict:
raise OptionError(
"No such option: '{}'. Available options are [{}]".format(
key, ", ".join(list(_options_dict.keys()))
)
)
class DictWrapper:
"""provide attribute-style access to a nested dict"""
def __init__(self, d: Dict[str, Option], prefix: str = ""):
object.__setattr__(self, "d", d)
object.__setattr__(self, "prefix", prefix)
def __setattr__(self, key: str, val: Any) -> None:
prefix = object.__getattribute__(self, "prefix")
d = object.__getattribute__(self, "d")
if prefix:
prefix += "."
canonical_key = prefix + key
candidates = [
k for k in d.keys() if all(x in k.split(".") for x in canonical_key.split("."))
]
if len(candidates) == 1 and candidates[0] == canonical_key:
set_option(canonical_key, val)
else:
raise OptionError(
"No such option: '{}'. Available options are [{}]".format(
key, ", ".join(list(_options_dict.keys()))
)
)
def __getattr__(self, key: str) -> Union["DictWrapper", Any]:
prefix = object.__getattribute__(self, "prefix")
d = object.__getattribute__(self, "d")
if prefix:
prefix += "."
canonical_key = prefix + key
candidates = [
k for k in d.keys() if all(x in k.split(".") for x in canonical_key.split("."))
]
if len(candidates) == 1 and candidates[0] == canonical_key:
return get_option(canonical_key)
elif len(candidates) == 0:
raise OptionError(
"No such option: '{}'. Available options are [{}]".format(
key, ", ".join(list(_options_dict.keys()))
)
)
else:
return DictWrapper(d, canonical_key)
def __dir__(self) -> List[str]:
prefix = object.__getattribute__(self, "prefix")
d = object.__getattribute__(self, "d")
if prefix == "":
candidates = d.keys()
offset = 0
else:
candidates = [k for k in d.keys() if all(x in k.split(".") for x in prefix.split("."))]
offset = len(prefix) + 1 # prefix (e.g. "compute.") to trim.
return [c[offset:] for c in candidates]
options = DictWrapper(_options_dict)
def _test() -> None:
import os
import doctest
import sys
from pyspark.sql import SparkSession
import pyspark.pandas.config
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.config.__dict__.copy()
globs["ps"] = pyspark.pandas
spark = (
SparkSession.builder.master("local[4]").appName("pyspark.pandas.config tests").getOrCreate()
)
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.config,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
|
19fd9b8b5d114084a0ec408bb3526b63cc61bb1b
|
9302b341ccdc78745a97ee3d2cd3c0e8ba3f80c9
|
/tests/fields/test_migrations_encrypted_default/0002_integerencryptedmodel_field_2.py
|
2bec6bf229b6a09a82bf42868cacb39780453a72
|
[] |
permissive
|
georgemarshall/django-cryptography
|
04188b94c9ed8d8a61c82c8382427510d85ff5b9
|
e2a42e1132fa28526002983063febaabbecdd660
|
refs/heads/master
| 2023-04-29T14:48:52.475078
| 2023-04-19T03:52:52
| 2023-04-19T03:52:52
| 52,944,543
| 359
| 70
|
BSD-3-Clause
| 2023-09-11T05:09:52
| 2016-03-02T07:55:01
|
Python
|
UTF-8
|
Python
| false
| false
| 497
|
py
|
0002_integerencryptedmodel_field_2.py
|
from django.db import migrations, models
import django_cryptography.fields
class Migration(migrations.Migration):
dependencies = [
("fields", "0001_initial"),
]
operations = [
migrations.AddField(
model_name="integerencrypteddefaultmodel",
name="field_2",
field=django_cryptography.fields.encrypt(
models.IntegerField(max_length=50, blank=True)
),
preserve_default=False,
),
]
|
3ab9babcaa4a669aab947bf1b0369645bd4e3b30
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/testData/inspections/ChainedComparison5.py
|
7bd80dd74807791c627737357c09d3538f0b64e0
|
[
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 164
|
py
|
ChainedComparison5.py
|
mapsize = 35
def test(x, y):
if <weak_warning descr="Simplify chained comparison">0 <= x < <caret>mapsize and y >= 0 and y < mapsize</weak_warning>:
return 1
|
c977a1ed27694d5ad8d668a6a1a791f1cd18d673
|
267ec08897c53cab1c3781ba0b6d061cb4410e84
|
/hypertools/tools/format_data.py
|
bdd7b4f626d2d1e18508570b431fba32f989f1d9
|
[
"MIT"
] |
permissive
|
ContextLab/hypertools
|
dd0fa798296b7b9b9d5f17a37086465ade6ca6a0
|
564c1d43da447da68ce3d76f51306725291630e0
|
refs/heads/master
| 2023-08-25T07:28:00.791443
| 2022-02-12T02:32:06
| 2022-02-12T02:32:06
| 69,400,415
| 1,768
| 189
|
MIT
| 2023-08-06T22:25:57
| 2016-09-27T21:31:25
|
Python
|
UTF-8
|
Python
| false
| false
| 7,299
|
py
|
format_data.py
|
import warnings
import numpy as np
from .._externals.ppca import PPCA
from .._shared.helpers import get_type
def format_data(x, vectorizer='CountVectorizer',
semantic='LatentDirichletAllocation', corpus='wiki', ppca=True, text_align='hyper'):
"""
Formats data into a list of numpy arrays
This function is useful to identify rows of your array that contain missing
data or nans. The returned indices can be used to remove the rows with
missing data, or label the missing data points that are interpolated
using PPCA.
Parameters
----------
x : numpy array, dataframe, string or (mixed) list
The data to convert
vectorizer : str, dict, class or class instance
The vectorizer to use. Built-in options are 'CountVectorizer' or
'TfidfVectorizer'. To change default parameters, set to a dictionary
e.g. {'model' : 'CountVectorizer', 'params' : {'max_features' : 10}}. See
http://scikit-learn.org/stable/modules/classes.html#module-sklearn.feature_extraction.text
for details. You can also specify your own vectorizer model as a class,
or class instance. With either option, the class must have a
fit_transform method (see here: http://scikit-learn.org/stable/data_transforms.html).
If a class, pass any parameters as a dictionary to vectorizer_params. If
a class instance, no parameters can be passed.
semantic : str, dict, class or class instance
Text model to use to transform text data. Built-in options are
'LatentDirichletAllocation' or 'NMF' (default: LDA). To change default
parameters, set to a dictionary e.g. {'model' : 'NMF', 'params' :
{'n_components' : 10}}. See
http://scikit-learn.org/stable/modules/classes.html#module-sklearn.decomposition
for details on the two model options. You can also specify your own
text model as a class, or class instance. With either option, the class
must have a fit_transform method (see here:
http://scikit-learn.org/stable/data_transforms.html).
If a class, pass any parameters as a dictionary to text_params. If
a class instance, no parameters can be passed.
corpus : list (or list of lists) of text samples or 'wiki', 'nips', 'sotus'.
Text to use to fit the semantic model (optional). If set to 'wiki', 'nips'
or 'sotus' and the default semantic and vectorizer models are used, a
pretrained model will be loaded which can save a lot of time.
ppca : bool
Performs PPCA to fill in missing values (default: True)
text_align : str
Alignment algorithm to use when both text and numerical data are passed.
If numerical arrays have the same shape, and the text data contains the
same number of samples, the text and numerical data are automatically
aligned to a common space. Example use case: an array of movie frames
(frames by pixels) and text descriptions of the frame. In this case,
the movie and text will be automatically aligned to the same space
(default: hyperalignment).
Returns
----------
data : list of numpy arrays
A list of formatted arrays
"""
# not sure why i needed to import here, but its the only way I could get it to work
from .df2mat import df2mat
from .text2mat import text2mat
from ..datageometry import DataGeometry
# if x is not a list, make it one
if type(x) is not list:
x = [x]
if all([isinstance(xi, str) for xi in x]):
x = [x]
# check data type for each element in list
dtypes = list(map(get_type, x))
# handle text data:
if any(map(lambda x: x in ['list_str', 'str', 'arr_str'], dtypes)):
# default text args
text_args = {
'vectorizer' : vectorizer,
'semantic' : semantic,
'corpus' : corpus
}
# filter text data
text_data = []
for i,j in zip(x, dtypes):
if j in ['list_str', 'str', 'arr_str']:
text_data.append(np.array(i).reshape(-1, 1))
# convert text to numerical matrices
text_data = text2mat(text_data, **text_args)
# replace the text data with transformed data
processed_x = []
textidx=0
for i, dtype in enumerate(dtypes):
if dtype in ['list_str', 'str', 'arr_str']:
processed_x.append(text_data[textidx])
textidx+=1
elif dtype == 'df':
processed_x.append(df2mat(x[i]))
elif dtype == 'geo':
text_args = {
'vectorizer' : vectorizer,
'semantic' : semantic,
'corpus' : corpus
}
for j in format_data(x[i].get_data(), **text_args):
processed_x.append(j)
else:
processed_x.append(x[i])
# reshape anything that is 1d
if any([i.ndim<=1 for i in processed_x]):
processed_x = [np.reshape(i,(i.shape[0],1)) if i.ndim==1 else i for i in processed_x]
contains_text = any([dtype in ['list_str', 'str', 'arr_str'] for dtype in dtypes])
contains_num = any([dtype in ['list_num', 'array', 'df', 'arr_num'] for dtype in dtypes])
# if there are any nans in any of the lists, use ppca
if ppca is True:
if contains_num:
num_data = []
for i,j in zip(processed_x, dtypes):
if j in ['list_num', 'array', 'df', 'arr_num']:
num_data.append(i)
if np.isnan(np.vstack(num_data)).any():
warnings.warn('Missing data: Inexact solution computed with PPCA (see https://github.com/allentran/pca-magic for details)')
num_data = fill_missing(num_data)
x_temp = []
for dtype in dtypes:
if dtype in ['list_str', 'str', 'arr_str']:
x_temp.append(text_data.pop(0))
elif dtype in ['list_num', 'array', 'df', 'arr_num']:
x_temp.append(num_data.pop(0))
processed_x = x_temp
# if input data contains both text and numerical data
if contains_num and contains_text:
# and if they have the same number of samples
if np.unique(np.array([i.shape[0] for i, j in zip(processed_x, dtypes)])).shape[0] == 1:
from .align import align as aligner
# align the data
warnings.warn('Numerical and text data with same number of '
'samples detected. Aligning data to a common space.')
processed_x = aligner(processed_x, align=text_align, format_data=False)
return processed_x
def fill_missing(x):
# ppca if missing data
m = PPCA()
m.fit(data=np.vstack(x))
x_pca = m.transform()
# if the whole row is missing, return nans
all_missing = [idx for idx, a in enumerate(np.vstack(x)) if all([type(b)==np.nan for b in a])]
if len(all_missing)>0:
for i in all_missing:
x_pca[i, :] = np.nan
# get the original lists back
if len(x)>1:
x_split = np.cumsum([i.shape[0] for i in x][:-1])
return list(np.split(x_pca, x_split, axis=0))
else:
return [x_pca]
|
80123be9221de26d1b01c1591c021276f20240cd
|
6a468c1650b3c083f102f19ace0b0d6e4d0686f7
|
/sympy/physics/quantum/matrixutils.py
|
081db881e56789c6a802c3a599a5d3f5f1bd465f
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
sympy/sympy
|
a5f8accaa7686c59d9b5c94212fef60d746dac4b
|
69f98fb2b0d845e76874067a381dba37b577e8c5
|
refs/heads/master
| 2023-09-01T15:51:37.886107
| 2023-08-31T20:54:33
| 2023-08-31T20:54:33
| 640,534
| 10,928
| 5,362
|
NOASSERTION
| 2023-09-14T17:29:13
| 2010-04-30T20:37:14
|
Python
|
UTF-8
|
Python
| false
| false
| 8,213
|
py
|
matrixutils.py
|
"""Utilities to deal with sympy.Matrix, numpy and scipy.sparse."""
from sympy.core.expr import Expr
from sympy.core.numbers import I
from sympy.core.singleton import S
from sympy.matrices.matrices import MatrixBase
from sympy.matrices import eye, zeros
from sympy.external import import_module
__all__ = [
'numpy_ndarray',
'scipy_sparse_matrix',
'sympy_to_numpy',
'sympy_to_scipy_sparse',
'numpy_to_sympy',
'scipy_sparse_to_sympy',
'flatten_scalar',
'matrix_dagger',
'to_sympy',
'to_numpy',
'to_scipy_sparse',
'matrix_tensor_product',
'matrix_zeros'
]
# Conditionally define the base classes for numpy and scipy.sparse arrays
# for use in isinstance tests.
np = import_module('numpy')
if not np:
class numpy_ndarray:
pass
else:
numpy_ndarray = np.ndarray # type: ignore
scipy = import_module('scipy', import_kwargs={'fromlist': ['sparse']})
if not scipy:
class scipy_sparse_matrix:
pass
sparse = None
else:
sparse = scipy.sparse
scipy_sparse_matrix = sparse.spmatrix # type: ignore
def sympy_to_numpy(m, **options):
"""Convert a SymPy Matrix/complex number to a numpy matrix or scalar."""
if not np:
raise ImportError
dtype = options.get('dtype', 'complex')
if isinstance(m, MatrixBase):
return np.array(m.tolist(), dtype=dtype)
elif isinstance(m, Expr):
if m.is_Number or m.is_NumberSymbol or m == I:
return complex(m)
raise TypeError('Expected MatrixBase or complex scalar, got: %r' % m)
def sympy_to_scipy_sparse(m, **options):
"""Convert a SymPy Matrix/complex number to a numpy matrix or scalar."""
if not np or not sparse:
raise ImportError
dtype = options.get('dtype', 'complex')
if isinstance(m, MatrixBase):
return sparse.csr_matrix(np.array(m.tolist(), dtype=dtype))
elif isinstance(m, Expr):
if m.is_Number or m.is_NumberSymbol or m == I:
return complex(m)
raise TypeError('Expected MatrixBase or complex scalar, got: %r' % m)
def scipy_sparse_to_sympy(m, **options):
"""Convert a scipy.sparse matrix to a SymPy matrix."""
return MatrixBase(m.todense())
def numpy_to_sympy(m, **options):
"""Convert a numpy matrix to a SymPy matrix."""
return MatrixBase(m)
def to_sympy(m, **options):
"""Convert a numpy/scipy.sparse matrix to a SymPy matrix."""
if isinstance(m, MatrixBase):
return m
elif isinstance(m, numpy_ndarray):
return numpy_to_sympy(m)
elif isinstance(m, scipy_sparse_matrix):
return scipy_sparse_to_sympy(m)
elif isinstance(m, Expr):
return m
raise TypeError('Expected sympy/numpy/scipy.sparse matrix, got: %r' % m)
def to_numpy(m, **options):
"""Convert a sympy/scipy.sparse matrix to a numpy matrix."""
dtype = options.get('dtype', 'complex')
if isinstance(m, (MatrixBase, Expr)):
return sympy_to_numpy(m, dtype=dtype)
elif isinstance(m, numpy_ndarray):
return m
elif isinstance(m, scipy_sparse_matrix):
return m.todense()
raise TypeError('Expected sympy/numpy/scipy.sparse matrix, got: %r' % m)
def to_scipy_sparse(m, **options):
"""Convert a sympy/numpy matrix to a scipy.sparse matrix."""
dtype = options.get('dtype', 'complex')
if isinstance(m, (MatrixBase, Expr)):
return sympy_to_scipy_sparse(m, dtype=dtype)
elif isinstance(m, numpy_ndarray):
if not sparse:
raise ImportError
return sparse.csr_matrix(m)
elif isinstance(m, scipy_sparse_matrix):
return m
raise TypeError('Expected sympy/numpy/scipy.sparse matrix, got: %r' % m)
def flatten_scalar(e):
"""Flatten a 1x1 matrix to a scalar, return larger matrices unchanged."""
if isinstance(e, MatrixBase):
if e.shape == (1, 1):
e = e[0]
if isinstance(e, (numpy_ndarray, scipy_sparse_matrix)):
if e.shape == (1, 1):
e = complex(e[0, 0])
return e
def matrix_dagger(e):
"""Return the dagger of a sympy/numpy/scipy.sparse matrix."""
if isinstance(e, MatrixBase):
return e.H
elif isinstance(e, (numpy_ndarray, scipy_sparse_matrix)):
return e.conjugate().transpose()
raise TypeError('Expected sympy/numpy/scipy.sparse matrix, got: %r' % e)
# TODO: Move this into sympy.matricies.
def _sympy_tensor_product(*matrices):
"""Compute the kronecker product of a sequence of SymPy Matrices.
"""
from sympy.matrices.expressions.kronecker import matrix_kronecker_product
return matrix_kronecker_product(*matrices)
def _numpy_tensor_product(*product):
"""numpy version of tensor product of multiple arguments."""
if not np:
raise ImportError
answer = product[0]
for item in product[1:]:
answer = np.kron(answer, item)
return answer
def _scipy_sparse_tensor_product(*product):
"""scipy.sparse version of tensor product of multiple arguments."""
if not sparse:
raise ImportError
answer = product[0]
for item in product[1:]:
answer = sparse.kron(answer, item)
# The final matrices will just be multiplied, so csr is a good final
# sparse format.
return sparse.csr_matrix(answer)
def matrix_tensor_product(*product):
"""Compute the matrix tensor product of sympy/numpy/scipy.sparse matrices."""
if isinstance(product[0], MatrixBase):
return _sympy_tensor_product(*product)
elif isinstance(product[0], numpy_ndarray):
return _numpy_tensor_product(*product)
elif isinstance(product[0], scipy_sparse_matrix):
return _scipy_sparse_tensor_product(*product)
def _numpy_eye(n):
"""numpy version of complex eye."""
if not np:
raise ImportError
return np.array(np.eye(n, dtype='complex'))
def _scipy_sparse_eye(n):
"""scipy.sparse version of complex eye."""
if not sparse:
raise ImportError
return sparse.eye(n, n, dtype='complex')
def matrix_eye(n, **options):
"""Get the version of eye and tensor_product for a given format."""
format = options.get('format', 'sympy')
if format == 'sympy':
return eye(n)
elif format == 'numpy':
return _numpy_eye(n)
elif format == 'scipy.sparse':
return _scipy_sparse_eye(n)
raise NotImplementedError('Invalid format: %r' % format)
def _numpy_zeros(m, n, **options):
"""numpy version of zeros."""
dtype = options.get('dtype', 'float64')
if not np:
raise ImportError
return np.zeros((m, n), dtype=dtype)
def _scipy_sparse_zeros(m, n, **options):
"""scipy.sparse version of zeros."""
spmatrix = options.get('spmatrix', 'csr')
dtype = options.get('dtype', 'float64')
if not sparse:
raise ImportError
if spmatrix == 'lil':
return sparse.lil_matrix((m, n), dtype=dtype)
elif spmatrix == 'csr':
return sparse.csr_matrix((m, n), dtype=dtype)
def matrix_zeros(m, n, **options):
""""Get a zeros matrix for a given format."""
format = options.get('format', 'sympy')
if format == 'sympy':
return zeros(m, n)
elif format == 'numpy':
return _numpy_zeros(m, n, **options)
elif format == 'scipy.sparse':
return _scipy_sparse_zeros(m, n, **options)
raise NotImplementedError('Invaild format: %r' % format)
def _numpy_matrix_to_zero(e):
"""Convert a numpy zero matrix to the zero scalar."""
if not np:
raise ImportError
test = np.zeros_like(e)
if np.allclose(e, test):
return 0.0
else:
return e
def _scipy_sparse_matrix_to_zero(e):
"""Convert a scipy.sparse zero matrix to the zero scalar."""
if not np:
raise ImportError
edense = e.todense()
test = np.zeros_like(edense)
if np.allclose(edense, test):
return 0.0
else:
return e
def matrix_to_zero(e):
"""Convert a zero matrix to the scalar zero."""
if isinstance(e, MatrixBase):
if zeros(*e.shape) == e:
e = S.Zero
elif isinstance(e, numpy_ndarray):
e = _numpy_matrix_to_zero(e)
elif isinstance(e, scipy_sparse_matrix):
e = _scipy_sparse_matrix_to_zero(e)
return e
|
b25d847d29acf65738fef2d9dabdf7491fbfcf48
|
fce81b804cae23f525a5ad4370b684bf0dc531a5
|
/numpy/lib/tests/test_utils.py
|
bceff04d518516c0c3852f285fe11c12b2077a20
|
[
"Zlib",
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] |
permissive
|
numpy/numpy
|
ba2abcc1d2d46affbb6aabe5aed6407b4b57507e
|
dc2ff125493777a1084044e6cd6857a42ee323d4
|
refs/heads/main
| 2023-09-05T10:10:52.767363
| 2023-09-04T18:03:29
| 2023-09-04T18:03:29
| 908,607
| 25,725
| 11,968
|
BSD-3-Clause
| 2023-09-14T21:26:09
| 2010-09-13T23:02:39
|
Python
|
UTF-8
|
Python
| false
| false
| 3,383
|
py
|
test_utils.py
|
import pytest
import numpy as np
from numpy.testing import assert_equal, assert_raises_regex
import numpy.lib._utils_impl as _utils_impl
from io import StringIO
class TestByteBounds:
def test_byte_bounds(self):
# pointer difference matches size * itemsize
# due to contiguity
a = np.arange(12).reshape(3, 4)
low, high = np.byte_bounds(a)
assert_equal(high - low, a.size * a.itemsize)
def test_unusual_order_positive_stride(self):
a = np.arange(12).reshape(3, 4)
b = a.T
low, high = np.byte_bounds(b)
assert_equal(high - low, b.size * b.itemsize)
def test_unusual_order_negative_stride(self):
a = np.arange(12).reshape(3, 4)
b = a.T[::-1]
low, high = np.byte_bounds(b)
assert_equal(high - low, b.size * b.itemsize)
def test_strided(self):
a = np.arange(12)
b = a[::2]
low, high = np.byte_bounds(b)
# the largest pointer address is lost (even numbers only in the
# stride), and compensate addresses for striding by 2
assert_equal(high - low, b.size * 2 * b.itemsize - b.itemsize)
def test_assert_raises_regex_context_manager():
with assert_raises_regex(ValueError, 'no deprecation warning'):
raise ValueError('no deprecation warning')
def test_info_method_heading():
# info(class) should only print "Methods:" heading if methods exist
class NoPublicMethods:
pass
class WithPublicMethods:
def first_method():
pass
def _has_method_heading(cls):
out = StringIO()
np.info(cls, output=out)
return 'Methods:' in out.getvalue()
assert _has_method_heading(WithPublicMethods)
assert not _has_method_heading(NoPublicMethods)
def test_drop_metadata():
def _compare_dtypes(dt1, dt2):
return np.can_cast(dt1, dt2, casting='no')
# structured dtype
dt = np.dtype([('l1', [('l2', np.dtype('S8', metadata={'msg': 'toto'}))])],
metadata={'msg': 'titi'})
dt_m = _utils_impl.drop_metadata(dt)
assert _compare_dtypes(dt, dt_m) is True
assert dt_m.metadata is None
assert dt_m['l1'].metadata is None
assert dt_m['l1']['l2'].metadata is None
# alignment
dt = np.dtype([('x', '<f8'), ('y', '<i4')],
align=True,
metadata={'msg': 'toto'})
dt_m = _utils_impl.drop_metadata(dt)
assert _compare_dtypes(dt, dt_m) is True
assert dt_m.metadata is None
# subdtype
dt = np.dtype('8f',
metadata={'msg': 'toto'})
dt_m = _utils_impl.drop_metadata(dt)
assert _compare_dtypes(dt, dt_m) is True
assert dt_m.metadata is None
# scalar
dt = np.dtype('uint32',
metadata={'msg': 'toto'})
dt_m = _utils_impl.drop_metadata(dt)
assert _compare_dtypes(dt, dt_m) is True
assert dt_m.metadata is None
@pytest.mark.parametrize("dtype",
[np.dtype("i,i,i,i")[["f1", "f3"]],
np.dtype("f8"),
np.dtype("10i")])
def test_drop_metadata_identity_and_copy(dtype):
# If there is no metadata, the identity is preserved:
assert _utils_impl.drop_metadata(dtype) is dtype
# If there is any, it is dropped (subforms are checked above)
dtype = np.dtype(dtype, metadata={1: 2})
assert _utils_impl.drop_metadata(dtype).metadata is None
|
9230911ea271d4eb8b5de679928a924b7d44f20e
|
dbb120cceaed09027f250bedbb6f5a8c5d4c71f5
|
/netket/vqs/mc/mc_mixed_state/expect_chunked.py
|
1631035b8933f01571f99480fa311163f6cdfde1
|
[
"Apache-2.0"
] |
permissive
|
netket/netket
|
b0ec4dc6e0ed5493299a38b8dbfd06e9f946e3b3
|
f4f2844739302fd7e044b722eae8a93d0bfc59ec
|
refs/heads/master
| 2023-08-29T12:03:29.446789
| 2023-08-20T10:21:41
| 2023-08-20T10:21:41
| 130,741,783
| 467
| 181
|
Apache-2.0
| 2023-09-14T20:40:47
| 2018-04-23T18:48:08
|
Python
|
UTF-8
|
Python
| false
| false
| 1,228
|
py
|
expect_chunked.py
|
# Copyright 2021 The NetKet Authors - All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from netket.utils.dispatch import dispatch
from netket.operator import (
AbstractSuperOperator,
DiscreteOperator,
Squared,
)
from netket.vqs.mc import kernels, get_local_kernel
from .state import MCMixedState
# Dispatches to select what expect-kernel to use
@dispatch
def get_local_kernel( # noqa: F811
vstate: MCMixedState, Ô: Squared[AbstractSuperOperator], chunk_size: int
):
return kernels.local_value_squared_kernel_chunked
@dispatch
def get_local_kernel( # noqa: F811
vstate: MCMixedState, Ô: DiscreteOperator, chunk_size: int
):
return kernels.local_value_op_op_cost_chunked
|
4e96246e4047cd51ea1d795778c07e7ea18864e4
|
8d1c7fba7cd15f8a1e33fd27d11eefd1c67d579f
|
/examples/py_native/fail.py
|
98e35f4ee7fc093b1230ebaef82c1b11ffa8b1a2
|
[
"Apache-2.0"
] |
permissive
|
bazelbuild/bazel
|
5896162455f032efc899b8de60aa39b9d2cad4a6
|
171aae3f9c57b41089e25ec61fc84c35baa3079d
|
refs/heads/master
| 2023-08-22T22:52:48.714735
| 2023-08-22T18:01:53
| 2023-08-22T18:01:53
| 20,773,773
| 20,294
| 4,383
|
Apache-2.0
| 2023-09-14T18:38:44
| 2014-06-12T16:00:38
|
Java
|
UTF-8
|
Python
| false
| false
| 278
|
py
|
fail.py
|
"""A tiny example binary for the native Python rules of Bazel."""
import unittest
from examples.py_native.lib import GetNumber
class TestGetNumber(unittest.TestCase):
def test_fail(self):
self.assertEquals(GetNumber(), 0)
if __name__ == '__main__':
unittest.main()
|
a56e1d1f8a492a53bbe2140715bfcf9390546af4
|
568a2667a1b6ec33a0dec9ac01844ef74e11ab2b
|
/landlab/graph/voronoi/dual_voronoi.py
|
a412ae608dc69efde575f1754fe451b6505e58b9
|
[
"MIT"
] |
permissive
|
landlab/landlab
|
0bcc9b7b1d8c4d7f79bad687e1526b80ebc83728
|
1cd72e5832ece1aa922cd1b239e2e94ed0f11f8b
|
refs/heads/master
| 2023-08-31T07:24:21.545523
| 2023-08-29T18:51:06
| 2023-08-29T18:51:06
| 19,599,383
| 326
| 313
|
MIT
| 2023-09-14T19:12:23
| 2014-05-09T04:52:50
|
Python
|
UTF-8
|
Python
| false
| false
| 2,475
|
py
|
dual_voronoi.py
|
import numpy as np
from ..dual import DualGraph
from ..graph import Graph
from .voronoi import DelaunayGraph
from .voronoi_to_graph import VoronoiDelaunayToGraph
class DualVoronoiGraph(DualGraph, DelaunayGraph):
def __init__(
self, node_y_and_x, max_node_spacing=None, sort=False, perimeter_links=None
):
"""Create a voronoi grid.
Parameters
----------
nodes : tuple of array_like
Coordinates of every node. First *y*, then *x*.
Examples
--------
>>> from landlab.graph import DualVoronoiGraph
>>> node_x = [0, 1, 2, 3,
... 0.2, 1.2, 2.2, 3.2,
... 0.4, 1.4, 2.4, 3.4]
>>> node_y = [0, 0, 0, 0,
... 1, 1, 1, 1,
... 2, 2, 2, 2]
>>> graph = DualVoronoiGraph((node_y, node_x), sort=True)
>>> graph.x_of_corner
array([ 0.5, 1.5, 2.5, 0.7, 1.7, 2.7, 0.7, 1.7, 2.7, 0.9, 1.9,
2.9])
>>> graph.y_of_corner # doctest: +NORMALIZE_WHITESPACE
array([ 0.42, 0.42, 0.42, 0.58, 0.58, 0.58, 1.42, 1.42, 1.42,
1.58, 1.58, 1.58])
>>> graph.corners_at_face # doctest: +NORMALIZE_WHITESPACE
array([[ 0, 3], [ 3, 1], [ 1, 4], [ 4, 2], [ 2, 5],
[ 3, 6], [ 4, 7], [ 5, 8],
[ 6, 9], [ 9, 7], [ 7, 10], [10, 8], [ 8, 11]])
>>> graph.faces_at_corner # doctest: +NORMALIZE_WHITESPACE
array([[ 0, -1, -1], [ 2, 1, -1], [ 4, 3, -1],
[ 5, 0, 1], [ 6, 2, 3], [ 7, 4, -1],
[ 8, 5, -1], [10, 9, 6], [12, 11, 7],
[ 8, 9, -1], [10, 11, -1], [12, -1, -1]])
>>> graph.node_at_cell
array([5, 6])
"""
mesh = VoronoiDelaunayToGraph(
np.vstack((node_y_and_x[1], node_y_and_x[0])).T,
perimeter_links=perimeter_links,
)
Graph.__init__(
self,
node_y_and_x,
links=mesh.nodes_at_link,
patches=mesh.links_at_patch,
sort=False,
)
dual_graph = Graph(
(mesh.y_of_corner, mesh.x_of_corner),
links=mesh.corners_at_face,
patches=mesh.faces_at_cell,
sort=False,
)
self.merge(
dual_graph, node_at_cell=mesh.node_at_cell, nodes_at_face=mesh.nodes_at_face
)
if sort:
self.sort()
|
6740e921a0b962ca4b2c9c0799628085a7833cc0
|
a3d6556180e74af7b555f8d47d3fea55b94bcbda
|
/third_party/protobuf/python/google/protobuf/internal/message_factory_test.py
|
97ef3aab52ab1b0752b7ee0f513bf083528fe9a5
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-protobuf",
"GPL-1.0-or-later",
"MIT",
"LGPL-2.0-or-later",
"Apache-2.0"
] |
permissive
|
chromium/chromium
|
aaa9eda10115b50b0616d2f1aed5ef35d1d779d6
|
a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c
|
refs/heads/main
| 2023-08-24T00:35:12.585945
| 2023-08-23T22:01:11
| 2023-08-23T22:01:11
| 120,360,765
| 17,408
| 7,102
|
BSD-3-Clause
| 2023-09-10T23:44:27
| 2018-02-05T20:55:32
| null |
UTF-8
|
Python
| false
| false
| 10,237
|
py
|
message_factory_test.py
|
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for google.protobuf.message_factory."""
__author__ = 'matthewtoia@google.com (Matt Toia)'
import unittest
from google.protobuf import descriptor_pb2
from google.protobuf.internal import api_implementation
from google.protobuf.internal import factory_test1_pb2
from google.protobuf.internal import factory_test2_pb2
from google.protobuf.internal import testing_refleaks
from google.protobuf import descriptor_database
from google.protobuf import descriptor_pool
from google.protobuf import message_factory
@testing_refleaks.TestCase
class MessageFactoryTest(unittest.TestCase):
def setUp(self):
self.factory_test1_fd = descriptor_pb2.FileDescriptorProto.FromString(
factory_test1_pb2.DESCRIPTOR.serialized_pb)
self.factory_test2_fd = descriptor_pb2.FileDescriptorProto.FromString(
factory_test2_pb2.DESCRIPTOR.serialized_pb)
def _ExerciseDynamicClass(self, cls):
msg = cls()
msg.mandatory = 42
msg.nested_factory_2_enum = 0
msg.nested_factory_2_message.value = 'nested message value'
msg.factory_1_message.factory_1_enum = 1
msg.factory_1_message.nested_factory_1_enum = 0
msg.factory_1_message.nested_factory_1_message.value = (
'nested message value')
msg.factory_1_message.scalar_value = 22
msg.factory_1_message.list_value.extend([u'one', u'two', u'three'])
msg.factory_1_message.list_value.append(u'four')
msg.factory_1_enum = 1
msg.nested_factory_1_enum = 0
msg.nested_factory_1_message.value = 'nested message value'
msg.circular_message.mandatory = 1
msg.circular_message.circular_message.mandatory = 2
msg.circular_message.scalar_value = 'one deep'
msg.scalar_value = 'zero deep'
msg.list_value.extend([u'four', u'three', u'two'])
msg.list_value.append(u'one')
msg.grouped.add()
msg.grouped[0].part_1 = 'hello'
msg.grouped[0].part_2 = 'world'
msg.grouped.add(part_1='testing', part_2='123')
msg.loop.loop.mandatory = 2
msg.loop.loop.loop.loop.mandatory = 4
serialized = msg.SerializeToString()
converted = factory_test2_pb2.Factory2Message.FromString(serialized)
reserialized = converted.SerializeToString()
self.assertEqual(serialized, reserialized)
result = cls.FromString(reserialized)
self.assertEqual(msg, result)
def testGetPrototype(self):
db = descriptor_database.DescriptorDatabase()
pool = descriptor_pool.DescriptorPool(db)
db.Add(self.factory_test1_fd)
db.Add(self.factory_test2_fd)
factory = message_factory.MessageFactory()
cls = factory.GetPrototype(pool.FindMessageTypeByName(
'google.protobuf.python.internal.Factory2Message'))
self.assertFalse(cls is factory_test2_pb2.Factory2Message)
self._ExerciseDynamicClass(cls)
cls2 = factory.GetPrototype(pool.FindMessageTypeByName(
'google.protobuf.python.internal.Factory2Message'))
self.assertTrue(cls is cls2)
def testCreatePrototypeOverride(self):
class MyMessageFactory(message_factory.MessageFactory):
def CreatePrototype(self, descriptor):
cls = super(MyMessageFactory, self).CreatePrototype(descriptor)
cls.additional_field = 'Some value'
return cls
db = descriptor_database.DescriptorDatabase()
pool = descriptor_pool.DescriptorPool(db)
db.Add(self.factory_test1_fd)
db.Add(self.factory_test2_fd)
factory = MyMessageFactory()
cls = factory.GetPrototype(pool.FindMessageTypeByName(
'google.protobuf.python.internal.Factory2Message'))
self.assertTrue(hasattr(cls, 'additional_field'))
def testGetMessages(self):
# performed twice because multiple calls with the same input must be allowed
for _ in range(2):
# GetMessage should work regardless of the order the FileDescriptorProto
# are provided. In particular, the function should succeed when the files
# are not in the topological order of dependencies.
# Assuming factory_test2_fd depends on factory_test1_fd.
self.assertIn(self.factory_test1_fd.name,
self.factory_test2_fd.dependency)
# Get messages should work when a file comes before its dependencies:
# factory_test2_fd comes before factory_test1_fd.
messages = message_factory.GetMessages([self.factory_test2_fd,
self.factory_test1_fd])
self.assertTrue(
set(['google.protobuf.python.internal.Factory2Message',
'google.protobuf.python.internal.Factory1Message'],
).issubset(set(messages.keys())))
self._ExerciseDynamicClass(
messages['google.protobuf.python.internal.Factory2Message'])
factory_msg1 = messages['google.protobuf.python.internal.Factory1Message']
self.assertTrue(set(
['google.protobuf.python.internal.Factory2Message.one_more_field',
'google.protobuf.python.internal.another_field'],).issubset(set(
ext.full_name
for ext in factory_msg1.DESCRIPTOR.file.pool.FindAllExtensions(
factory_msg1.DESCRIPTOR))))
msg1 = messages['google.protobuf.python.internal.Factory1Message']()
ext1 = msg1.Extensions._FindExtensionByName(
'google.protobuf.python.internal.Factory2Message.one_more_field')
ext2 = msg1.Extensions._FindExtensionByName(
'google.protobuf.python.internal.another_field')
self.assertEqual(0, len(msg1.Extensions))
msg1.Extensions[ext1] = 'test1'
msg1.Extensions[ext2] = 'test2'
self.assertEqual('test1', msg1.Extensions[ext1])
self.assertEqual('test2', msg1.Extensions[ext2])
self.assertEqual(None,
msg1.Extensions._FindExtensionByNumber(12321))
self.assertEqual(2, len(msg1.Extensions))
if api_implementation.Type() == 'cpp':
self.assertRaises(TypeError,
msg1.Extensions._FindExtensionByName, 0)
self.assertRaises(TypeError,
msg1.Extensions._FindExtensionByNumber, '')
else:
self.assertEqual(None,
msg1.Extensions._FindExtensionByName(0))
self.assertEqual(None,
msg1.Extensions._FindExtensionByNumber(''))
def testDuplicateExtensionNumber(self):
pool = descriptor_pool.DescriptorPool()
factory = message_factory.MessageFactory(pool=pool)
# Add Container message.
f = descriptor_pb2.FileDescriptorProto()
f.name = 'google/protobuf/internal/container.proto'
f.package = 'google.protobuf.python.internal'
msg = f.message_type.add()
msg.name = 'Container'
rng = msg.extension_range.add()
rng.start = 1
rng.end = 10
pool.Add(f)
msgs = factory.GetMessages([f.name])
self.assertIn('google.protobuf.python.internal.Container', msgs)
# Extend container.
f = descriptor_pb2.FileDescriptorProto()
f.name = 'google/protobuf/internal/extension.proto'
f.package = 'google.protobuf.python.internal'
f.dependency.append('google/protobuf/internal/container.proto')
msg = f.message_type.add()
msg.name = 'Extension'
ext = msg.extension.add()
ext.name = 'extension_field'
ext.number = 2
ext.label = descriptor_pb2.FieldDescriptorProto.LABEL_OPTIONAL
ext.type_name = 'Extension'
ext.extendee = 'Container'
pool.Add(f)
msgs = factory.GetMessages([f.name])
self.assertIn('google.protobuf.python.internal.Extension', msgs)
# Add Duplicate extending the same field number.
f = descriptor_pb2.FileDescriptorProto()
f.name = 'google/protobuf/internal/duplicate.proto'
f.package = 'google.protobuf.python.internal'
f.dependency.append('google/protobuf/internal/container.proto')
msg = f.message_type.add()
msg.name = 'Duplicate'
ext = msg.extension.add()
ext.name = 'extension_field'
ext.number = 2
ext.label = descriptor_pb2.FieldDescriptorProto.LABEL_OPTIONAL
ext.type_name = 'Duplicate'
ext.extendee = 'Container'
pool.Add(f)
with self.assertRaises(Exception) as cm:
factory.GetMessages([f.name])
self.assertIn(str(cm.exception),
['Extensions '
'"google.protobuf.python.internal.Duplicate.extension_field" and'
' "google.protobuf.python.internal.Extension.extension_field"'
' both try to extend message type'
' "google.protobuf.python.internal.Container"'
' with field number 2.',
'Double registration of Extensions'])
if __name__ == '__main__':
unittest.main()
|
dac9722e63f41fa2372719d1f855c842eaacbfc9
|
444a9480bce2035565332d4d4654244c0b5cd47b
|
/research/cv/ICNet/infer/sdk/main_icnet_mindspore.py
|
e1cc276a167d235096018f884a009aca992cc674
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
mindspore-ai/models
|
7ede9c6454e77e995e674628204e1c6e76bd7b27
|
eab643f51336dbf7d711f02d27e6516e5affee59
|
refs/heads/master
| 2023-07-20T01:49:34.614616
| 2023-07-17T11:43:18
| 2023-07-17T11:43:18
| 417,393,380
| 301
| 92
|
Apache-2.0
| 2023-05-17T11:22:28
| 2021-10-15T06:38:37
|
Python
|
UTF-8
|
Python
| false
| false
| 6,622
|
py
|
main_icnet_mindspore.py
|
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# less required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""the main sdk infer file"""
import argparse
import os
from PIL import Image
import numpy as np
from StreamManagerApi import MxDataInput, StringVector, StreamManagerApi
import MxpiDataType_pb2 as MxpiDataType
PIPELINE_PATH = "./config/icnet_mindspore.pipeline"
INFER_RESULT_DIR = "./result"
def _parse_args():
parser = argparse.ArgumentParser('mindspore icnet eval')
parser.add_argument('--data_root', type=str, default='',
help='root path of val data')
parser.add_argument('--num_classes', type=int, default=19,
help='number of classes')
args, _ = parser.parse_known_args()
return args
def _get_val_pairs(folder):
"""get val img_mask_path_pairs"""
split = 'val'
image_folder = os.path.join(folder, 'leftImg8bit/' + split)
mask_folder = os.path.join(folder, 'gtFine/' + split)
img_paths = []
mask_paths = []
for root, _, files in os.walk(image_folder):
for filename in files:
if filename.endswith('.png'):
imgpath = os.path.join(root, filename)
foldername = os.path.basename(os.path.dirname(imgpath))
maskname = filename.replace('leftImg8bit', 'gtFine_labelIds')
maskpath = os.path.join(mask_folder, foldername, maskname)
if os.path.isfile(imgpath) and os.path.isfile(maskpath):
img_paths.append(imgpath)
mask_paths.append(maskpath)
else:
print('cannot find the mask or image:', imgpath, maskpath)
print('Found {} images in the folder {}'.format(len(img_paths), image_folder))
return img_paths, mask_paths
def _do_infer(stream_manager_api, data_input):
"""
send images into stream to do infer
Returns:
infer result, numpy array
"""
stream_name = b'segmentation'
unique_id = stream_manager_api.SendData(
stream_name, 0, data_input)
if unique_id < 0:
raise RuntimeError("Failed to send data to stream.")
keys = [b"mxpi_tensorinfer0"]
keyVec = StringVector()
for key in keys:
keyVec.push_back(key)
infer_result = stream_manager_api.GetProtobuf(stream_name, 0, keyVec)
print(infer_result)
if infer_result.size() == 0:
print("infer_result is null")
exit()
TensorList = MxpiDataType.MxpiTensorPackageList()
TensorList.ParseFromString(infer_result[0].messageBuf)
data = np.frombuffer(
TensorList.tensorPackageVec[0].tensorVec[0].dataStr, dtype=np.float32)
data = data.reshape(1, 19, 1024, 2048)
return data
def _class_to_index(mask):
"""assert the value"""
values = np.unique(mask)
_key = np.array([-1, -1, -1, -1, -1, -1, -1, -1, 0, 1, -1, -1,
2, 3, 4, -1, -1, -1, 5, -1, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, -1, -1, 16, 17, 18])
_mapping = np.array(range(-1, len(_key) - 1)).astype('int32')
for value in values:
assert value in _mapping
index = np.digitize(mask.ravel(), _mapping, right=True)
return _key[index].reshape(mask.shape)
def batch_intersection_union(output, target, nclass):
"""mIoU"""
predict = np.argmax(output, axis=1) + 1
target = target.astype(float) + 1
predict = predict.astype(float) * np.array(target > 0).astype(float)
intersection = predict * np.array(predict == target).astype(float)
area_inter, _ = np.array(np.histogram(intersection, bins=nclass, range=(1, nclass+1)))
area_pred, _ = np.array(np.histogram(predict, bins=nclass, range=(1, nclass+1)))
area_lab, _ = np.array(np.histogram(target, bins=nclass, range=(1, nclass+1)))
area_all = area_pred + area_lab
area_union = area_all - area_inter
return area_inter, area_union
cityspallete = [
128, 64, 128,
244, 35, 232,
70, 70, 70,
102, 102, 156,
190, 153, 153,
153, 153, 153,
250, 170, 30,
220, 220, 0,
107, 142, 35,
152, 251, 152,
0, 130, 180,
220, 20, 60,
255, 0, 0,
0, 0, 142,
0, 0, 70,
0, 60, 100,
0, 80, 100,
0, 0, 230,
119, 11, 32,
]
def main():
args = _parse_args()
stream_manager_api = StreamManagerApi()
ret = stream_manager_api.InitManager()
if ret != 0:
print("Failed to init Stream manager, ret=%s" % str(ret))
exit()
with open(PIPELINE_PATH, 'rb') as f:
pipelineStr = f.read()
ret = stream_manager_api.CreateMultipleStreams(pipelineStr)
if ret != 0:
print("Failed to create Stream, ret=%s" % str(ret))
exit()
os.makedirs(INFER_RESULT_DIR, exist_ok=True)
data_input = MxDataInput()
total_inter = np.zeros(args.num_classes, dtype=np.float32)
total_union = np.zeros(args.num_classes, dtype=np.float32)
image_paths, mask_paths = _get_val_pairs(args.data_root)
for i in range(len(image_paths)):
print("img_path:", image_paths[i])
mask = Image.open(mask_paths[i])
mask = _class_to_index(np.array(mask).astype('int32'))
with open(image_paths[i], 'rb') as f:
data_input.data = f.read()
each_array = _do_infer(stream_manager_api, data_input)
mask = np.expand_dims(mask, axis=0)
pred = np.argmax(each_array, axis=1)
pred = pred.squeeze(0)
out_img = Image.fromarray(pred.astype('uint8'))
out_img.putpalette(cityspallete)
result_path = os.path.join(
INFER_RESULT_DIR,
f"{image_paths[i].split('/')[-1].split('.')[0]}sdk_infer.png")
out_img.save(result_path)
inter, union = batch_intersection_union(each_array, mask, args.num_classes)
total_inter = inter + total_inter
total_union = union + total_union
Iou = np.true_divide(total_inter, (2.220446049250313e-16 + total_union))
print("mean IoU", np.mean(Iou))
stream_manager_api.DestroyAllStreams()
if __name__ == '__main__':
main()
|
ebbb367d5543a3f58878d36d974d0457e0310e77
|
e8378466a24fb0fe0a1e9bf1af7f41b77d2c20b4
|
/hoomd/data/local_access.py
|
0e9180bb8aff20e011adc73d8f1322b9808d0822
|
[
"BSD-3-Clause"
] |
permissive
|
glotzerlab/hoomd-blue
|
eeabed0fee10a76b50d99b94ae0e93d3dc1d8037
|
abdd76bc854358426e4cf055badd27f80df6ec85
|
refs/heads/trunk-patch
| 2023-09-02T21:32:24.986845
| 2023-08-22T19:02:21
| 2023-08-22T19:02:21
| 147,663,007
| 287
| 131
|
BSD-3-Clause
| 2023-09-08T11:59:16
| 2018-09-06T11:23:27
|
C++
|
UTF-8
|
Python
| false
| false
| 15,393
|
py
|
local_access.py
|
# Copyright (c) 2009-2023 The Regents of the University of Michigan.
# Part of HOOMD-blue, released under the BSD 3-Clause License.
"""Access simulation state data directly."""
from abc import ABC, abstractmethod
from hoomd import Box
from hoomd import _hoomd
class _LocalAccess(ABC):
__slots__ = ('_entered', '_accessed_fields', '_cpp_obj')
_global_fields = {'rtag': 'getRTags'}
@property
@abstractmethod
def _fields(self):
pass
@property
@abstractmethod
def _array_cls(self):
pass
def __init__(self):
self._entered = False
self._accessed_fields = dict()
def __getattr__(self, attr):
if attr in self._accessed_fields:
return self._accessed_fields[attr]
elif attr in self._global_fields:
buff = getattr(self._cpp_obj, self._global_fields[attr])()
else:
raw_attr, flag = self._get_raw_attr_and_flag(attr)
if raw_attr in self._fields:
buff = getattr(self._cpp_obj, self._fields[raw_attr])(flag)
else:
raise AttributeError("{} object has no attribute {}".format(
type(self), attr))
self._accessed_fields[attr] = arr = self._array_cls(
buff, lambda: self._entered)
return arr
def _get_raw_attr_and_flag(self, attr):
ghosts_only = attr.startswith("ghost_")
with_ghosts = attr.endswith("_with_ghost")
raw_attr = attr.replace("_with_ghost", "").replace("ghost_", "")
if ghosts_only and with_ghosts:
raise ValueError("Attribute cannot be both prefixed with ghost_ "
"and suffixed with _with_ghost")
elif ghosts_only:
return raw_attr, _hoomd.GhostDataFlag.ghost
elif with_ghosts:
return raw_attr, _hoomd.GhostDataFlag.both
else:
return raw_attr, _hoomd.GhostDataFlag.standard
def __setattr__(self, attr, value):
if attr in self.__slots__:
super().__setattr__(attr, value)
return
try:
arr = getattr(self, attr)
except AttributeError:
raise AttributeError("{} object has no attribute {}.".format(
self.__class__, attr))
else:
if arr.read_only:
raise RuntimeError("Attribute {} is not settable.".format(attr))
arr[:] = value
def _enter(self):
self._cpp_obj.enter()
self._entered = True
def _exit(self):
self._cpp_obj.exit()
self._entered = False
self._accessed_fields = dict()
class ParticleLocalAccessBase(_LocalAccess):
"""Class for directly accessing HOOMD-blue particle data.
Attributes:
typeid ((N_particles) `hoomd.data.array` object of ``float``):
The integer type of a particle.
tag ((N_particles) `hoomd.data.array` object of ``int``):
The particle tags. Spatial sorting and MPI domain migration
reorder particles in memory. The particle tag identifies each
particle in the order it existed in the initial configuration.
rtag ((N_particles_global) `hoomd.data.array` object of ``int``):
The particle reverse tags. For a given particle tag ``tag``,
``i = particles.rtag[tag]`` is the array index holding that
particle.
position ((N_particles, 3) `hoomd.data.array` object of ``float``):
Particle positions :math:`[\\mathrm{length}]`.
image ((N_particles, 3) `hoomd.data.array` object of ``int``):
A count of how many times each particle crosses the periodic box
boundaries.
velocity ((N_particles, 3) `hoomd.data.array` object of ``float``):
Particle velocities :math:`[\\mathrm{velocity}]`.
acceleration ((N_particles, 3) `hoomd.data.array` object of ``float``):
Particle accelerations
:math:`[\\mathrm{velocity} \\cdot \\mathrm{time}^{-1}]`.
mass ((N_particles) `hoomd.data.array` object of ``float``):
Particle masses :math:`[\\mathrm{mass}]`.
orientation ((N_particles, 4) `hoomd.data.array` object of ``float``):
Particle orientations expressed as quaternions.
angmom ((N_particles, 4) `hoomd.data.array` object of \
``float``):
Particle angular momenta expressed as quaternions
:math:`[\\mathrm{mass} \\cdot \\mathrm{velocity} \\cdot
\\mathrm{length}]`.
moment_inertia ((N_particles, 3) `hoomd.data.array` object of \
``float``):
Particle principal moments of inertia
:math:`[\\mathrm{mass} \\cdot \\mathrm{length}^2]`.
charge ((N_particles) `hoomd.data.array` object of ``float``):
Particle electrical charges :math:`[\\mathrm{charge}]`.
diameter ((N_particles) `hoomd.data.array` object of ``float``):
Particle diameters :math:`[\\mathrm{length}]`.
body ((N_particles) `hoomd.data.array` object of ``int``):
The id of the rigid body the particle is in.
net_force ((N_particles, 3) `hoomd.data.array` object of ``float``):
Net force on particle :math:`[\\mathrm{force}]`.
net_torque ((N_particles, 3) `hoomd.data.array` object of ``float``):
Net torque on particle
:math:`[\\mathrm{force} \\cdot \\mathrm{length}]`.
net_virial ((N_particles, 6) `hoomd.data.array` object of ``float``):
Net virial on particle :math:`[\\mathrm{energy}]`.
net_energy ((N_particles,) `hoomd.data.array` object of ``float``):
Net energy of a particle :math:`[\\mathrm{energy}]`.
Note:
Changing some attributes (such as ``velocity`` and ``acceleration``)
may not alter the trajectory of the system as you would expect.
The `md.Integrator` is responsible for integrating the equations of
motion and manages the values in these arrays.
See Also:
`hoomd.State`
"""
@property
@abstractmethod
def _cpp_cls(self):
pass
_fields = {
'position': 'getPosition',
'typeid': 'getTypes',
'velocity': 'getVelocities',
'mass': 'getMasses',
'acceleration': 'getAcceleration',
'orientation': 'getOrientation',
'angmom': 'getAngularMomentum',
'moment_inertia': 'getMomentsOfInertia',
'charge': 'getCharge',
'diameter': 'getDiameter',
'image': 'getImages',
'tag': 'getTags',
'rtag': 'getRTags',
'body': 'getBodies',
'net_force': 'getNetForce',
'net_torque': 'getNetTorque',
'net_virial': 'getNetVirial',
'net_energy': 'getNetEnergy'
}
def __init__(self, state):
super().__init__()
self._cpp_obj = self._cpp_cls(state._cpp_sys_def.getParticleData())
class _GroupLocalAccess(_LocalAccess):
@property
@abstractmethod
def _cpp_cls(self):
pass
@property
@abstractmethod
def _cpp_get_data_method_name(self):
pass
_fields = {
'typeid': 'getTypeVal',
'group': 'getMembers',
'tag': 'getTags',
'rtag': 'getRTags'
}
def __init__(self, state):
super().__init__()
self._cpp_obj = self._cpp_cls(
getattr(state._cpp_sys_def, self._cpp_get_data_method_name)())
class BondLocalAccessBase(_GroupLocalAccess):
"""Class for directly accessing HOOMD-blue bond data.
Attributes:
typeid ((N_bonds) `hoomd.data.array` object of ``int``):
The integer type of a bond.
members ((N_bonds, 2) `hoomd.data.array` object of ``int``):
The tags of particles in a bond.
tag ((N_bonds) `hoomd.data.array` object of ``int``):
The bond tags. MPI domain migration reorder bonds in memory. The
bond tag identifies each bond in the order it existed in the initial
configuration.
rtag ((N_bonds_global) `hoomd.data.array` object of ``int``): the
The bond reverse tags. For a given bond tag ``tag``,
``i = bonds.rtag[tag]`` is the array index holding that
bond.
See Also:
`hoomd.State`
"""
_cpp_get_data_method_name = "getBondData"
class AngleLocalAccessBase(_GroupLocalAccess):
"""Class for directly accessing HOOMD-blue angle data.
Attributes:
typeid ((N_angles) `hoomd.data.array` object of ``int``):
The integer type of a angle.
members ((N_angles, 3) `hoomd.data.array` object of ``int``):
The tags of particles in a angle.
tag ((N_angles) `hoomd.data.array` object of ``int``):
The angle tags. MPI domain migration reorder angles in memory.
The angle tag identifies each angle in the order it existed in the
initial configuration.
rtag ((N_angles_global) `hoomd.data.array` object of ``int``):
The angle reverse tags. For a given angle tag ``tag``, ``i =
angles.rtag[tag]`` is the array index holding that angle.
See Also:
`hoomd.State`
"""
_cpp_get_data_method_name = "getAngleData"
class DihedralLocalAccessBase(_GroupLocalAccess):
"""Class for directly accessing HOOMD-blue dihedral data.
Attributes:
typeid ((N_dihedrals) `hoomd.data.array` object of ``int``): The integer
type of a dihedral.
members ((N_dihedrals, 4) `hoomd.data.array` object of ``int``): the
tags of particles in a dihedral.
tag ((N_dihedrals) `hoomd.data.array` object of ``int``):
The dihedral tags. MPI domain migration reorder dihedrals in
memory. The dihedral tag identifies each dihedral in the order it
existed in the initial configuration.
rtag ((N_dihedrals_global) `hoomd.data.array` object of ``int``):
The dihedral reverse tags. For a given dihedral tag ``tag``, ``i
= dihedrals.rtag[tag]`` is the array index holding that dihedral.
See Also:
`hoomd.State`
"""
_cpp_get_data_method_name = "getDihedralData"
class ImproperLocalAccessBase(_GroupLocalAccess):
"""Class for directly accessing HOOMD-blue improper data.
Attributes:
typeid ((N_impropers) `hoomd.data.array` object of ``int``):
The integer type of a improper.
members ((N_impropers, 3) `hoomd.data.array` object of ``int``):
The tags of particles in a improper.
tag ((N_impropers) `hoomd.data.array` object of ``int``):
The improper tags. MPI domain migration reorder impropers in
memory. The improper tag identifies each improper in the order it
existed in the initial configuration.
rtag ((N_impropers_global) `hoomd.data.array` object of ``int``):
The improper reverse tags. For a given improper tag ``tag``, ``i
= impropers.rtag[tag]`` is the array index holding that improper.
See Also:
`hoomd.State`
"""
_cpp_get_data_method_name = "getImproperData"
class ConstraintLocalAccessBase(_GroupLocalAccess):
"""Class for directly accessing HOOMD-blue constraint data.
Attributes:
value ((N_constraints) `hoomd.data.array` object of ``float``): The
constaint value.
members ((N_constraints, 3) `hoomd.data.array` object of ``int``): the
tags of particles in a constraint.
tag ((N_constraints) `hoomd.data.array` object of ``int``):
The constraint tags. MPI domain migration reorder constraints in
memory. The constraint tag identifies each constraint in the order
it existed in the initial configuration.
rtag ((N_constraints_global) `hoomd.data.array` object of ``int``):
The constraint reverse tags. For a given constraint tag ``tag``,
``i = constraints.rtag[tag]`` is the array index holding that
constraint.
See Also:
`hoomd.State`
"""
_fields = {
'value': 'getTypeVal',
'group': 'getMembers',
'tag': 'getTags',
'rtag': 'getRTags'
}
_cpp_get_data_method_name = "getConstraintData"
class PairLocalAccessBase(_GroupLocalAccess):
"""Class for directly accessing HOOMD-blue special pair data.
Attributes:
typeid ((N_pairs) `hoomd.data.array` object of ``float``): The type of
special pair.
members ((N_pairs, 3) `hoomd.data.array` object of ``int``): the tags of
particles in a special pair.
tag ((N_special_pairs) `hoomd.data.array` object of ``int``):
The special pair tags. MPI domain migration reorder special
pairs in memory. The special pair tag identifies each special pair
in the order it existed in the initial configuration.
rtag ((N_special_pairs_global) `hoomd.data.array` object of ``int``):
The special pair reverse tags. For a given special pair tag
``tag``, ``i = pairs.rtag[tag]`` is the array index holding that
special pair.
See Also:
`hoomd.State`
"""
_cpp_get_data_method_name = "getPairData"
class _LocalSnapshot:
def __init__(self, state):
self._state = state
self._box = state.box
self._local_box = state._cpp_sys_def.getParticleData().getBox()
@property
def global_box(self):
"""hoomd.Box: The global simulation box."""
return Box.from_box(self._box)
@property
def local_box(self):
"""hoomd.Box: The local box according to the domain decomposition."""
return Box.from_box(Box._from_cpp(self._local_box))
@property
def particles(self):
"""hoomd.data.ParticleLocalAccessBase: Local particle data."""
return self._particles
@property
def bonds(self):
"""hoomd.data.BondLocalAccessBase: Local bond data."""
return self._bonds
@property
def angles(self):
"""hoomd.data.AngleLocalAccessBase: Local angle data."""
return self._angles
@property
def dihedrals(self):
"""hoomd.data.DihedralLocalAccessBase: Local dihedral data."""
return self._dihedrals
@property
def impropers(self):
"""hoomd.data.ImproperLocalAccessBase: Local improper data."""
return self._impropers
@property
def constraints(self):
"""hoomd.data.ConstraintLocalAccessBase: Local constraint data."""
return self._constraints
@property
def pairs(self):
"""hoomd.data.PairLocalAccessBase: Local special pair data."""
return self._pairs
def __enter__(self):
self._state._in_context_manager = True
self._particles._enter()
self._bonds._enter()
self._angles._enter()
self._dihedrals._enter()
self._impropers._enter()
self._constraints._enter()
self._pairs._enter()
return self
def __exit__(self, type, value, traceback):
self._state._in_context_manager = False
self._particles._exit()
self._bonds._exit()
self._angles._exit()
self._dihedrals._exit()
self._impropers._exit()
self._constraints._exit()
self._pairs._exit()
|
c3481926069b027d454222f81b9c80698610d3bd
|
499f5402baed77d000c65f243b457c69dc3d2fe4
|
/pycatia/mec_mod_interfaces/vertex.py
|
02bc66b1ab761eaecd45eee1a87899fe88bc875d
|
[
"MIT"
] |
permissive
|
evereux/pycatia
|
416189b34f3c60effea8a76258e36ffc5ae86e22
|
5f5726d5dc66265b3eba8a01910c4aeae424365d
|
refs/heads/master
| 2023-08-21T10:03:41.660445
| 2023-08-09T16:21:10
| 2023-08-09T16:21:10
| 159,069,580
| 141
| 42
|
MIT
| 2023-08-09T11:15:27
| 2018-11-25T20:04:31
|
Python
|
UTF-8
|
Python
| false
| false
| 3,584
|
py
|
vertex.py
|
#! usr/bin/python3.9
"""
Module initially auto generated using V5Automation files from CATIA V5 R28 on 2020-06-11 12:40:47.360445
.. warning::
The notes denoted "CAA V5 Visual Basic Help" are to be used as reference only.
They are there as a guide as to how the visual basic / catscript functions work
and thus help debugging in pycatia.
"""
from pycatia.mec_mod_interfaces.boundary import Boundary
class Vertex(Boundary):
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445)
| System.IUnknown
| System.IDispatch
| System.CATBaseUnknown
| System.CATBaseDispatch
| System.AnyObject
| InfInterfaces.Reference
| MecModInterfaces.Boundary
| Vertex
|
| 0-D boundary.
| Role: This Boundary object may be, for example, the corner of a Pad resulting
| from the extrusion of a square.
| You will create an Vertex object using the Shapes.GetBoundary ,
| HybridShapes.GetBoundary , Sketches.GetBoundary or Selection.SelectElement2
| method. Then, you pass it to the operator (such as
| HybridShapeFactory.AddNewLinePtPt ).
| The lifetime of a Vertex object is limited, see Boundary.
| See also:
| TriDimFeatVertexOrBiDimFeatVertex , NotWireBoundaryMonoDimFeatVertex ,
| ZeroDimFeatVertexOrWireBoundaryMonoDimFeatVertex .
|
| Example:
| This example asks the end user to select successively two vertices. Then,
| it creates a line between these two vertices.
|
| Dim InputObjectType(0)
| Set Document = CATIA.ActiveDocument
| Set Selection = Document.Selection
| Set HybridBodies = Document.Part.HybridBodies
| Set HybridBody = HybridBodies.Item("Geometrical Set.1")
| 'We propose to the user that he select the first vertex
| InputObjectType(0)="Vertex"
| Status=Selection.SelectElement2(InputObjectType,"Select the first
| vertex",true)
| if (Status = "cancel") then Exit Sub
| Set FirstVertex = Selection.Item(1).Value
| Selection.Clear
| 'We propose to the user that he select the second vertex
| InputObjectType(0)="Vertex"
| Status=Selection.SelectElement2(InputObjectType,"Select the second
| vertex",true)
| if (Status = "cancel") then Exit Sub
| Set SecondVertex = Selection.Item(1).Value
| Set hybridShapeLinePtPt = HybridShapeFactory.AddNewLinePtPt(FirstVertex,SecondVertex)
| HybridBody.AppendHybridShape hybridShapeLinePtPt
| Document.Part.InWorkObject = hybridShapeLinePtPt
| Document.Part.Update
"""
def __init__(self, com_object):
super().__init__(com_object)
self.vertex = com_object
def __repr__(self):
return f'Vertex(name="{self.name}")'
|
600c3b7174a5f9b1ff01af07d0f5cb9d95eb36e2
|
a902290fb3b911676358ae4d93f83061a6c2bd0f
|
/InvenTree/stock/api.py
|
961ac7149ab0b2d5eddf566056953299f36eee2f
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
inventree/InvenTree
|
a15e54182c9bfafdf5348cc9a66da1004e23e760
|
e88a8e99a5f0b201c67a95cba097c729f090d5e2
|
refs/heads/master
| 2023-09-03T19:32:35.438375
| 2023-08-30T00:25:40
| 2023-08-30T00:25:40
| 85,894,461
| 3,077
| 549
|
MIT
| 2023-09-14T14:21:01
| 2017-03-23T01:44:10
|
Python
|
UTF-8
|
Python
| false
| false
| 50,871
|
py
|
api.py
|
"""JSON API for the Stock app."""
from collections import OrderedDict
from datetime import datetime, timedelta
from django.core.exceptions import ValidationError as DjangoValidationError
from django.db import transaction
from django.db.models import F, Q
from django.http import JsonResponse
from django.urls import include, path, re_path
from django.utils.translation import gettext_lazy as _
from django_filters import rest_framework as rest_filters
from rest_framework import status
from rest_framework.response import Response
from rest_framework.serializers import ValidationError
import common.models
import common.settings
import stock.serializers as StockSerializers
from build.models import Build
from build.serializers import BuildSerializer
from company.models import Company, SupplierPart
from company.serializers import CompanySerializer
from generic.states.api import StatusView
from InvenTree.api import (APIDownloadMixin, AttachmentMixin,
ListCreateDestroyAPIView, MetadataView)
from InvenTree.filters import (ORDER_FILTER, SEARCH_ORDER_FILTER,
SEARCH_ORDER_FILTER_ALIAS)
from InvenTree.helpers import (DownloadFile, extract_serial_numbers, isNull,
str2bool, str2int)
from InvenTree.mixins import (CreateAPI, CustomRetrieveUpdateDestroyAPI,
ListAPI, ListCreateAPI, RetrieveAPI,
RetrieveUpdateDestroyAPI)
from InvenTree.status_codes import StockHistoryCode, StockStatus
from order.models import (PurchaseOrder, ReturnOrder, SalesOrder,
SalesOrderAllocation)
from order.serializers import (PurchaseOrderSerializer, ReturnOrderSerializer,
SalesOrderSerializer)
from part.models import BomItem, Part, PartCategory
from part.serializers import PartBriefSerializer
from stock.admin import LocationResource, StockItemResource
from stock.models import (StockItem, StockItemAttachment, StockItemTestResult,
StockItemTracking, StockLocation)
class StockDetail(RetrieveUpdateDestroyAPI):
"""API detail endpoint for Stock object.
get:
Return a single StockItem object
post:
Update a StockItem
delete:
Remove a StockItem
"""
queryset = StockItem.objects.all()
serializer_class = StockSerializers.StockItemSerializer
def get_queryset(self, *args, **kwargs):
"""Annotate queryset."""
queryset = super().get_queryset(*args, **kwargs)
queryset = StockSerializers.StockItemSerializer.annotate_queryset(queryset)
return queryset
def get_serializer_context(self):
"""Extend serializer context."""
ctx = super().get_serializer_context()
ctx['user'] = getattr(self.request, 'user', None)
return ctx
def get_serializer(self, *args, **kwargs):
"""Set context before returning serializer."""
kwargs['part_detail'] = True
kwargs['location_detail'] = True
kwargs['supplier_part_detail'] = True
kwargs['context'] = self.get_serializer_context()
return self.serializer_class(*args, **kwargs)
class StockItemContextMixin:
"""Mixin class for adding StockItem object to serializer context."""
queryset = StockItem.objects.none()
def get_serializer_context(self):
"""Extend serializer context."""
context = super().get_serializer_context()
context['request'] = self.request
try:
context['item'] = StockItem.objects.get(pk=self.kwargs.get('pk', None))
except Exception:
pass
return context
class StockItemSerialize(StockItemContextMixin, CreateAPI):
"""API endpoint for serializing a stock item."""
serializer_class = StockSerializers.SerializeStockItemSerializer
class StockItemInstall(StockItemContextMixin, CreateAPI):
"""API endpoint for installing a particular stock item into this stock item.
- stock_item.part must be in the BOM for this part
- stock_item must currently be "in stock"
- stock_item must be serialized (and not belong to another item)
"""
serializer_class = StockSerializers.InstallStockItemSerializer
class StockItemUninstall(StockItemContextMixin, CreateAPI):
"""API endpoint for removing (uninstalling) items from this item."""
serializer_class = StockSerializers.UninstallStockItemSerializer
class StockItemConvert(StockItemContextMixin, CreateAPI):
"""API endpoint for converting a stock item to a variant part"""
serializer_class = StockSerializers.ConvertStockItemSerializer
class StockItemReturn(StockItemContextMixin, CreateAPI):
"""API endpoint for returning a stock item from a customer"""
serializer_class = StockSerializers.ReturnStockItemSerializer
class StockAdjustView(CreateAPI):
"""A generic class for handling stocktake actions.
Subclasses exist for:
- StockCount: count stock items
- StockAdd: add stock items
- StockRemove: remove stock items
- StockTransfer: transfer stock items
"""
queryset = StockItem.objects.none()
def get_serializer_context(self):
"""Extend serializer context."""
context = super().get_serializer_context()
context['request'] = self.request
return context
class StockChangeStatus(StockAdjustView):
"""API endpoint to change the status code of multiple StockItem objects."""
serializer_class = StockSerializers.StockChangeStatusSerializer
class StockCount(StockAdjustView):
"""Endpoint for counting stock (performing a stocktake)."""
serializer_class = StockSerializers.StockCountSerializer
class StockAdd(StockAdjustView):
"""Endpoint for adding a quantity of stock to an existing StockItem."""
serializer_class = StockSerializers.StockAddSerializer
class StockRemove(StockAdjustView):
"""Endpoint for removing a quantity of stock from an existing StockItem."""
serializer_class = StockSerializers.StockRemoveSerializer
class StockTransfer(StockAdjustView):
"""API endpoint for performing stock movements."""
serializer_class = StockSerializers.StockTransferSerializer
class StockAssign(CreateAPI):
"""API endpoint for assigning stock to a particular customer."""
queryset = StockItem.objects.all()
serializer_class = StockSerializers.StockAssignmentSerializer
def get_serializer_context(self):
"""Extend serializer context."""
ctx = super().get_serializer_context()
ctx['request'] = self.request
return ctx
class StockMerge(CreateAPI):
"""API endpoint for merging multiple stock items."""
queryset = StockItem.objects.none()
serializer_class = StockSerializers.StockMergeSerializer
def get_serializer_context(self):
"""Extend serializer context."""
ctx = super().get_serializer_context()
ctx['request'] = self.request
return ctx
class StockLocationList(APIDownloadMixin, ListCreateAPI):
"""API endpoint for list view of StockLocation objects.
- GET: Return list of StockLocation objects
- POST: Create a new StockLocation
"""
queryset = StockLocation.objects.all().prefetch_related(
'tags',
)
serializer_class = StockSerializers.LocationSerializer
def download_queryset(self, queryset, export_format):
"""Download the filtered queryset as a data file"""
dataset = LocationResource().export(queryset=queryset)
filedata = dataset.export(export_format)
filename = f"InvenTree_Locations.{export_format}"
return DownloadFile(filedata, filename)
def get_queryset(self, *args, **kwargs):
"""Return annotated queryset for the StockLocationList endpoint"""
queryset = super().get_queryset(*args, **kwargs)
queryset = StockSerializers.LocationSerializer.annotate_queryset(queryset)
return queryset
def filter_queryset(self, queryset):
"""Custom filtering: - Allow filtering by "null" parent to retrieve top-level stock locations."""
queryset = super().filter_queryset(queryset)
params = self.request.query_params
loc_id = params.get('parent', None)
cascade = str2bool(params.get('cascade', False))
depth = str2int(params.get('depth', None))
# Do not filter by location
if loc_id is None:
pass
# Look for top-level locations
elif isNull(loc_id):
# If we allow "cascade" at the top-level, this essentially means *all* locations
if not cascade:
queryset = queryset.filter(parent=None)
if cascade and depth is not None:
queryset = queryset.filter(level__lte=depth)
else:
try:
location = StockLocation.objects.get(pk=loc_id)
# All sub-locations to be returned too?
if cascade:
parents = location.get_descendants(include_self=True)
if depth is not None:
parents = parents.filter(level__lte=location.level + depth)
parent_ids = [p.id for p in parents]
queryset = queryset.filter(parent__in=parent_ids)
else:
queryset = queryset.filter(parent=location)
except (ValueError, StockLocation.DoesNotExist):
pass
# Exclude StockLocation tree
exclude_tree = params.get('exclude_tree', None)
if exclude_tree is not None:
try:
loc = StockLocation.objects.get(pk=exclude_tree)
queryset = queryset.exclude(
pk__in=[subloc.pk for subloc in loc.get_descendants(include_self=True)]
)
except (ValueError, StockLocation.DoesNotExist):
pass
return queryset
filter_backends = SEARCH_ORDER_FILTER
filterset_fields = [
'name',
'structural',
'external',
'tags__name',
'tags__slug',
]
search_fields = [
'name',
'description',
'tags__name',
'tags__slug',
]
ordering_fields = [
'name',
'pathstring',
'items',
'level',
'tree_id',
'lft',
]
ordering = [
'tree_id',
'lft',
'name',
]
class StockLocationTree(ListAPI):
"""API endpoint for accessing a list of StockLocation objects, ready for rendering as a tree."""
queryset = StockLocation.objects.all()
serializer_class = StockSerializers.LocationTreeSerializer
filter_backends = ORDER_FILTER
# Order by tree level (top levels first) and then name
ordering = ['level', 'name']
class StockFilter(rest_filters.FilterSet):
"""FilterSet for StockItem LIST API."""
class Meta:
"""Metaclass options for this filterset"""
model = StockItem
# Simple filter filters
fields = [
'supplier_part',
'belongs_to',
'build',
'customer',
'consumed_by',
'sales_order',
'purchase_order',
'tags__name',
'tags__slug',
]
# Relationship filters
manufacturer = rest_filters.ModelChoiceFilter(label='Manufacturer', queryset=Company.objects.filter(is_manufacturer=True), field_name='manufacturer_part__manufacturer')
supplier = rest_filters.ModelChoiceFilter(label='Supplier', queryset=Company.objects.filter(is_supplier=True), field_name='supplier_part__supplier')
# Part name filters
name = rest_filters.CharFilter(label='Part name (case insensitive)', field_name='part__name', lookup_expr='iexact')
name_contains = rest_filters.CharFilter(label='Part name contains (case insensitive)', field_name='part__name', lookup_expr='icontains')
name_regex = rest_filters.CharFilter(label='Part name (regex)', field_name='part__name', lookup_expr='iregex')
# Part IPN filters
IPN = rest_filters.CharFilter(label='Part IPN (case insensitive)', field_name='part__IPN', lookup_expr='iexact')
IPN_contains = rest_filters.CharFilter(label='Part IPN contains (case insensitive)', field_name='part__IPN', lookup_expr='icontains')
IPN_regex = rest_filters.CharFilter(label='Part IPN (regex)', field_name='part__IPN', lookup_expr='iregex')
# Part attribute filters
assembly = rest_filters.BooleanFilter(label="Assembly", field_name='part__assembly')
active = rest_filters.BooleanFilter(label="Active", field_name='part__active')
salable = rest_filters.BooleanFilter(label="Salable", field_name='part__salable')
min_stock = rest_filters.NumberFilter(label='Minimum stock', field_name='quantity', lookup_expr='gte')
max_stock = rest_filters.NumberFilter(label='Maximum stock', field_name='quantity', lookup_expr='lte')
status = rest_filters.NumberFilter(label='Status Code', method='filter_status')
def filter_status(self, queryset, name, value):
"""Filter by integer status code"""
return queryset.filter(status=value)
allocated = rest_filters.BooleanFilter(label='Is Allocated', method='filter_allocated')
def filter_allocated(self, queryset, name, value):
"""Filter by whether or not the stock item is 'allocated'"""
if str2bool(value):
# Filter StockItem with either build allocations or sales order allocations
return queryset.filter(Q(sales_order_allocations__isnull=False) | Q(allocations__isnull=False))
else:
# Filter StockItem without build allocations or sales order allocations
return queryset.filter(Q(sales_order_allocations__isnull=True) & Q(allocations__isnull=True))
expired = rest_filters.BooleanFilter(label='Expired', method='filter_expired')
def filter_expired(self, queryset, name, value):
"""Filter by whether or not the stock item has expired"""
if not common.settings.stock_expiry_enabled():
return queryset
if str2bool(value):
return queryset.filter(StockItem.EXPIRED_FILTER)
else:
return queryset.exclude(StockItem.EXPIRED_FILTER)
external = rest_filters.BooleanFilter(label=_('External Location'), method='filter_external')
def filter_external(self, queryset, name, value):
"""Filter by whether or not the stock item is located in an external location"""
if str2bool(value):
return queryset.filter(location__external=True)
else:
return queryset.exclude(location__external=True)
in_stock = rest_filters.BooleanFilter(label='In Stock', method='filter_in_stock')
def filter_in_stock(self, queryset, name, value):
"""Filter by if item is in stock."""
if str2bool(value):
return queryset.filter(StockItem.IN_STOCK_FILTER)
else:
return queryset.exclude(StockItem.IN_STOCK_FILTER)
available = rest_filters.BooleanFilter(label='Available', method='filter_available')
def filter_available(self, queryset, name, value):
"""Filter by whether the StockItem is "available" or not.
Here, "available" means that the allocated quantity is less than the total quantity
"""
if str2bool(value):
# The 'quantity' field is greater than the calculated 'allocated' field
# Note that the item must also be "in stock"
return queryset.filter(StockItem.IN_STOCK_FILTER).filter(Q(quantity__gt=F('allocated')))
else:
# The 'quantity' field is less than (or equal to) the calculated 'allocated' field
return queryset.filter(Q(quantity__lte=F('allocated')))
batch = rest_filters.CharFilter(label="Batch code filter (case insensitive)", lookup_expr='iexact')
batch_regex = rest_filters.CharFilter(label="Batch code filter (regex)", field_name='batch', lookup_expr='iregex')
is_building = rest_filters.BooleanFilter(label="In production")
# Serial number filtering
serial_gte = rest_filters.NumberFilter(label='Serial number GTE', field_name='serial_int', lookup_expr='gte')
serial_lte = rest_filters.NumberFilter(label='Serial number LTE', field_name='serial_int', lookup_expr='lte')
serial = rest_filters.CharFilter(label='Serial number', field_name='serial', lookup_expr='exact')
serialized = rest_filters.BooleanFilter(label='Has serial number', method='filter_serialized')
def filter_serialized(self, queryset, name, value):
"""Filter by whether the StockItem has a serial number (or not)."""
q = Q(serial=None) | Q(serial='')
if str2bool(value):
return queryset.exclude(q)
else:
return queryset.filter(q)
has_batch = rest_filters.BooleanFilter(label='Has batch code', method='filter_has_batch')
def filter_has_batch(self, queryset, name, value):
"""Filter by whether the StockItem has a batch code (or not)."""
q = Q(batch=None) | Q(batch='')
if str2bool(value):
return queryset.exclude(q)
else:
return queryset.filter(q)
tracked = rest_filters.BooleanFilter(label='Tracked', method='filter_tracked')
def filter_tracked(self, queryset, name, value):
"""Filter by whether this stock item is *tracked*.
Meaning either:
- It has a serial number
- It has a batch code
"""
q_batch = Q(batch=None) | Q(batch='')
q_serial = Q(serial=None) | Q(serial='')
if str2bool(value):
return queryset.exclude(q_batch & q_serial)
else:
return queryset.filter(q_batch & q_serial)
installed = rest_filters.BooleanFilter(label='Installed in other stock item', method='filter_installed')
def filter_installed(self, queryset, name, value):
"""Filter stock items by "belongs_to" field being empty."""
if str2bool(value):
return queryset.exclude(belongs_to=None)
else:
return queryset.filter(belongs_to=None)
has_installed_items = rest_filters.BooleanFilter(label='Has installed items', method='filter_has_installed')
def filter_has_installed(self, queryset, name, value):
"""Filter stock items by "belongs_to" field being empty."""
if str2bool(value):
return queryset.filter(installed_items__gt=0)
else:
return queryset.filter(installed_items=0)
sent_to_customer = rest_filters.BooleanFilter(label='Sent to customer', method='filter_sent_to_customer')
def filter_sent_to_customer(self, queryset, name, value):
"""Filter by sent to customer."""
if str2bool(value):
return queryset.exclude(customer=None)
else:
return queryset.filter(customer=None)
depleted = rest_filters.BooleanFilter(label='Depleted', method='filter_depleted')
def filter_depleted(self, queryset, name, value):
"""Filter by depleted items."""
if str2bool(value):
return queryset.filter(quantity__lte=0)
else:
return queryset.exclude(quantity__lte=0)
has_purchase_price = rest_filters.BooleanFilter(label='Has purchase price', method='filter_has_purchase_price')
def filter_has_purchase_price(self, queryset, name, value):
"""Filter by having a purchase price."""
if str2bool(value):
return queryset.exclude(purchase_price=None)
else:
return queryset.filter(purchase_price=None)
ancestor = rest_filters.ModelChoiceFilter(
label='Ancestor',
queryset=StockItem.objects.all(),
method='filter_ancestor'
)
def filter_ancestor(self, queryset, name, ancestor):
"""Filter based on ancestor stock item"""
return queryset.filter(
parent__in=ancestor.get_descendants(include_self=True)
)
# Update date filters
updated_before = rest_filters.DateFilter(label='Updated before', field_name='updated', lookup_expr='lte')
updated_after = rest_filters.DateFilter(label='Updated after', field_name='updated', lookup_expr='gte')
class StockList(APIDownloadMixin, ListCreateDestroyAPIView):
"""API endpoint for list view of Stock objects.
- GET: Return a list of all StockItem objects (with optional query filters)
- POST: Create a new StockItem
"""
serializer_class = StockSerializers.StockItemSerializer
queryset = StockItem.objects.all()
filterset_class = StockFilter
def get_serializer(self, *args, **kwargs):
"""Set context before returning serializer.
Extra detail may be provided to the serializer via query parameters:
- part_detail: Include detail about the StockItem's part
- location_detail: Include detail about the StockItem's location
- supplier_part_detail: Include detail about the StockItem's supplier_part
- tests: Include detail about the StockItem's test results
"""
try:
params = self.request.query_params
for key in ['part_detail', 'location_detail', 'supplier_part_detail', 'tests']:
kwargs[key] = str2bool(params.get(key, False))
except AttributeError:
pass
kwargs['context'] = self.get_serializer_context()
return self.serializer_class(*args, **kwargs)
def get_serializer_context(self):
"""Extend serializer context."""
ctx = super().get_serializer_context()
ctx['user'] = getattr(self.request, 'user', None)
return ctx
def create(self, request, *args, **kwargs):
"""Create a new StockItem object via the API.
We override the default 'create' implementation.
If a location is *not* specified, but the linked *part* has a default location,
we can pre-fill the location automatically.
"""
user = request.user
# Copy the request data, to side-step "mutability" issues
data = OrderedDict()
# Update with cleaned input data
data.update(self.clean_data(request.data))
quantity = data.get('quantity', None)
if quantity is None:
raise ValidationError({
'quantity': _('Quantity is required'),
})
try:
part = Part.objects.get(pk=data.get('part', None))
except (ValueError, Part.DoesNotExist):
raise ValidationError({
'part': _('Valid part must be supplied'),
})
# Set default location (if not provided)
if 'location' not in data:
location = part.get_default_location()
if location:
data['location'] = location.pk
expiry_date = data.get('expiry_date', None)
# An expiry date was *not* specified - try to infer it!
if expiry_date is None and part.default_expiry > 0:
data['expiry_date'] = datetime.now().date() + timedelta(days=part.default_expiry)
# Attempt to extract serial numbers from submitted data
serials = None
# Check if a set of serial numbers was provided
serial_numbers = data.get('serial_numbers', '')
# Check if the supplier_part has a package size defined, which is not 1
if 'supplier_part' in data and data['supplier_part'] is not None:
try:
supplier_part = SupplierPart.objects.get(pk=data.get('supplier_part', None))
except (ValueError, SupplierPart.DoesNotExist):
raise ValidationError({
'supplier_part': _('The given supplier part does not exist'),
})
if supplier_part.base_quantity() != 1:
# Skip this check if pack size is 1 - makes no difference
# use_pack_size = True -> Multiply quantity by pack size
# use_pack_size = False -> Use quantity as is
if 'use_pack_size' not in data:
raise ValidationError({
'use_pack_size': _('The supplier part has a pack size defined, but flag use_pack_size not set'),
})
else:
if bool(data.get('use_pack_size')):
quantity = data['quantity'] = supplier_part.base_quantity(quantity)
# Divide purchase price by pack size, to save correct price per stock item
if data['purchase_price'] and supplier_part.pack_quantity_native:
try:
data['purchase_price'] = float(data['purchase_price']) / float(supplier_part.pack_quantity_native)
except ValueError:
pass
# Now remove the flag from data, so that it doesn't interfere with saving
# Do this regardless of results above
if 'use_pack_size' in data:
data.pop('use_pack_size')
# Assign serial numbers for a trackable part
if serial_numbers:
if not part.trackable:
raise ValidationError({
'serial_numbers': [_("Serial numbers cannot be supplied for a non-trackable part")]
})
# If serial numbers are specified, check that they match!
try:
serials = extract_serial_numbers(
serial_numbers,
quantity,
part.get_latest_serial_number()
)
# Determine if any of the specified serial numbers are invalid
# Note "invalid" means either they already exist, or do not pass custom rules
invalid = []
errors = []
for serial in serials:
try:
part.validate_serial_number(serial, raise_error=True)
except DjangoValidationError as exc:
# Catch raised error to extract specific error information
invalid.append(serial)
if exc.message not in errors:
errors.append(exc.message)
if len(errors) > 0:
msg = _("The following serial numbers already exist or are invalid")
msg += " : "
msg += ",".join([str(e) for e in invalid])
raise ValidationError({
'serial_numbers': errors + [msg]
})
except DjangoValidationError as e:
raise ValidationError({
'quantity': e.messages,
'serial_numbers': e.messages,
})
if serials is not None:
"""If the stock item is going to be serialized, set the quantity to 1."""
data['quantity'] = 1
# De-serialize the provided data
serializer = self.get_serializer(data=data)
serializer.is_valid(raise_exception=True)
with transaction.atomic():
# Create an initial StockItem object
item = serializer.save()
if serials:
# Assign the first serial number to the "master" item
item.serial = serials[0]
# Save the item (with user information)
item.save(user=user)
if serials:
for serial in serials[1:]:
# Create a duplicate stock item with the next serial number
item.pk = None
item.serial = serial
item.save(user=user)
response_data = {
'quantity': quantity,
'serial_numbers': serials,
}
else:
response_data = serializer.data
return Response(response_data, status=status.HTTP_201_CREATED, headers=self.get_success_headers(serializer.data))
def download_queryset(self, queryset, export_format):
"""Download this queryset as a file.
Uses the APIDownloadMixin mixin class
"""
dataset = StockItemResource().export(queryset=queryset)
filedata = dataset.export(export_format)
filename = 'InvenTree_StockItems_{date}.{fmt}'.format(
date=datetime.now().strftime("%d-%b-%Y"),
fmt=export_format
)
return DownloadFile(filedata, filename)
def list(self, request, *args, **kwargs):
"""Override the 'list' method, as the StockLocation objects are very expensive to serialize.
So, we fetch and serialize the required StockLocation objects only as required.
"""
queryset = self.filter_queryset(self.get_queryset())
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True)
else:
serializer = self.get_serializer(queryset, many=True)
data = serializer.data
"""
Determine the response type based on the request.
a) For HTTP requests (e.g. via the browsable API) return a DRF response
b) For AJAX requests, simply return a JSON rendered response.
Note: b) is about 100x quicker than a), because the DRF framework adds a lot of cruft
"""
if page is not None:
return self.get_paginated_response(data)
elif request.is_ajax():
return JsonResponse(data, safe=False)
else:
return Response(data)
def get_queryset(self, *args, **kwargs):
"""Annotate queryset before returning."""
queryset = super().get_queryset(*args, **kwargs)
queryset = StockSerializers.StockItemSerializer.annotate_queryset(queryset)
return queryset
def filter_queryset(self, queryset):
"""Custom filtering for the StockItem queryset."""
params = self.request.query_params
queryset = super().filter_queryset(queryset)
if common.settings.stock_expiry_enabled():
# Filter by 'expiry date'
expired_date_lte = params.get('expiry_date_lte', None)
if expired_date_lte is not None:
try:
date_lte = datetime.fromisoformat(expired_date_lte)
queryset = queryset.filter(expiry_date__lte=date_lte)
except (ValueError, TypeError):
pass
expiry_date_gte = params.get('expiry_date_gte', None)
if expiry_date_gte is not None:
try:
date_gte = datetime.fromisoformat(expiry_date_gte)
queryset = queryset.filter(expiry_date__gte=date_gte)
except (ValueError, TypeError):
pass
# Filter by 'stale' status
stale = params.get('stale', None)
if stale is not None:
stale = str2bool(stale)
# How many days to account for "staleness"?
stale_days = common.models.InvenTreeSetting.get_setting('STOCK_STALE_DAYS')
if stale_days > 0:
stale_date = datetime.now().date() + timedelta(days=stale_days)
stale_filter = StockItem.IN_STOCK_FILTER & ~Q(expiry_date=None) & Q(expiry_date__lt=stale_date)
if stale:
queryset = queryset.filter(stale_filter)
else:
queryset = queryset.exclude(stale_filter)
# Exclude stock item tree
exclude_tree = params.get('exclude_tree', None)
if exclude_tree is not None:
try:
item = StockItem.objects.get(pk=exclude_tree)
queryset = queryset.exclude(
pk__in=[it.pk for it in item.get_descendants(include_self=True)]
)
except (ValueError, StockItem.DoesNotExist):
pass
# Filter by "part tree" - only allow parts within a given variant tree
part_tree = params.get('part_tree', None)
if part_tree is not None:
try:
part = Part.objects.get(pk=part_tree)
if part.tree_id is not None:
queryset = queryset.filter(part__tree_id=part.tree_id)
except Exception:
pass
# Exclude StockItems which are already allocated to a particular SalesOrder
exclude_so_allocation = params.get('exclude_so_allocation', None)
if exclude_so_allocation is not None:
try:
order = SalesOrder.objects.get(pk=exclude_so_allocation)
# Grab all the active SalesOrderAllocations for this order
allocations = SalesOrderAllocation.objects.filter(
line__pk__in=[
line.pk for line in order.lines.all()
]
)
# Exclude any stock item which is already allocated to the sales order
queryset = queryset.exclude(
pk__in=[
a.item.pk for a in allocations
]
)
except (ValueError, SalesOrder.DoesNotExist):
pass
# Does the client wish to filter by the Part ID?
part_id = params.get('part', None)
if part_id:
try:
part = Part.objects.get(pk=part_id)
# Do we wish to filter *just* for this part, or also for parts *under* this one?
include_variants = str2bool(params.get('include_variants', True))
if include_variants:
# Filter by any parts "under" the given part
parts = part.get_descendants(include_self=True)
queryset = queryset.filter(part__in=parts)
else:
queryset = queryset.filter(part=part)
except (ValueError, Part.DoesNotExist):
raise ValidationError({"part": "Invalid Part ID specified"})
# Does the client wish to filter by stock location?
loc_id = params.get('location', None)
cascade = str2bool(params.get('cascade', True))
if loc_id is not None:
# Filter by 'null' location (i.e. top-level items)
if isNull(loc_id):
if not cascade:
queryset = queryset.filter(location=None)
else:
try:
# If '?cascade=true' then include items which exist in sub-locations
if cascade:
location = StockLocation.objects.get(pk=loc_id)
queryset = queryset.filter(location__in=location.getUniqueChildren())
else:
queryset = queryset.filter(location=loc_id)
except (ValueError, StockLocation.DoesNotExist):
pass
# Does the client wish to filter by part category?
cat_id = params.get('category', None)
if cat_id:
try:
category = PartCategory.objects.get(pk=cat_id)
queryset = queryset.filter(part__category__in=category.getUniqueChildren())
except (ValueError, PartCategory.DoesNotExist):
raise ValidationError({"category": "Invalid category id specified"})
# Does the client wish to filter by BomItem
bom_item_id = params.get('bom_item', None)
if bom_item_id is not None:
try:
bom_item = BomItem.objects.get(pk=bom_item_id)
queryset = queryset.filter(bom_item.get_stock_filter())
except (ValueError, BomItem.DoesNotExist):
pass
# Filter by company (either manufacturer or supplier)
company = params.get('company', None)
if company is not None:
queryset = queryset.filter(Q(supplier_part__supplier=company) | Q(supplier_part__manufacturer_part__manufacturer=company))
return queryset
filter_backends = SEARCH_ORDER_FILTER_ALIAS
ordering_field_aliases = {
'location': 'location__pathstring',
'SKU': 'supplier_part__SKU',
'stock': ['quantity', 'serial_int', 'serial'],
}
ordering_fields = [
'batch',
'location',
'part__name',
'part__IPN',
'updated',
'stocktake_date',
'expiry_date',
'quantity',
'stock',
'status',
'SKU',
]
ordering = [
'part__name',
'quantity',
'location',
]
search_fields = [
'serial',
'batch',
'part__name',
'part__IPN',
'part__description',
'location__name',
'tags__name',
'tags__slug',
]
class StockAttachmentList(AttachmentMixin, ListCreateDestroyAPIView):
"""API endpoint for listing (and creating) a StockItemAttachment (file upload)."""
queryset = StockItemAttachment.objects.all()
serializer_class = StockSerializers.StockItemAttachmentSerializer
filter_backends = SEARCH_ORDER_FILTER
filterset_fields = [
'stock_item',
]
class StockAttachmentDetail(AttachmentMixin, RetrieveUpdateDestroyAPI):
"""Detail endpoint for StockItemAttachment."""
queryset = StockItemAttachment.objects.all()
serializer_class = StockSerializers.StockItemAttachmentSerializer
class StockItemTestResultDetail(RetrieveUpdateDestroyAPI):
"""Detail endpoint for StockItemTestResult."""
queryset = StockItemTestResult.objects.all()
serializer_class = StockSerializers.StockItemTestResultSerializer
class StockItemTestResultList(ListCreateDestroyAPIView):
"""API endpoint for listing (and creating) a StockItemTestResult object."""
queryset = StockItemTestResult.objects.all()
serializer_class = StockSerializers.StockItemTestResultSerializer
filter_backends = SEARCH_ORDER_FILTER
filterset_fields = [
'test',
'user',
'result',
'value',
]
ordering = 'date'
def filter_queryset(self, queryset):
"""Filter by build or stock_item."""
params = self.request.query_params
queryset = super().filter_queryset(queryset)
# Filter by 'build'
build = params.get('build', None)
if build is not None:
try:
build = Build.objects.get(pk=build)
queryset = queryset.filter(stock_item__build=build)
except (ValueError, Build.DoesNotExist):
pass
# Filter by stock item
item = params.get('stock_item', None)
if item is not None:
try:
item = StockItem.objects.get(pk=item)
items = [item]
# Do we wish to also include test results for 'installed' items?
include_installed = str2bool(params.get('include_installed', False))
if include_installed:
# Include items which are installed "underneath" this item
# Note that this function is recursive!
installed_items = item.get_installed_items(cascade=True)
items += list(installed_items)
queryset = queryset.filter(stock_item__in=items)
except (ValueError, StockItem.DoesNotExist):
pass
return queryset
def get_serializer(self, *args, **kwargs):
"""Set context before returning serializer."""
try:
kwargs['user_detail'] = str2bool(self.request.query_params.get('user_detail', False))
except Exception:
pass
kwargs['context'] = self.get_serializer_context()
return self.serializer_class(*args, **kwargs)
def perform_create(self, serializer):
"""Create a new test result object.
Also, check if an attachment was uploaded alongside the test result,
and save it to the database if it were.
"""
# Capture the user information
test_result = serializer.save()
test_result.user = self.request.user
test_result.save()
class StockTrackingDetail(RetrieveAPI):
"""Detail API endpoint for StockItemTracking model."""
queryset = StockItemTracking.objects.all()
serializer_class = StockSerializers.StockTrackingSerializer
class StockTrackingList(ListAPI):
"""API endpoint for list view of StockItemTracking objects.
StockItemTracking objects are read-only
(they are created by internal model functionality)
- GET: Return list of StockItemTracking objects
"""
queryset = StockItemTracking.objects.all()
serializer_class = StockSerializers.StockTrackingSerializer
def get_serializer(self, *args, **kwargs):
"""Set context before returning serializer."""
try:
kwargs['item_detail'] = str2bool(self.request.query_params.get('item_detail', False))
except Exception:
pass
try:
kwargs['user_detail'] = str2bool(self.request.query_params.get('user_detail', False))
except Exception:
pass
kwargs['context'] = self.get_serializer_context()
return self.serializer_class(*args, **kwargs)
def list(self, request, *args, **kwargs):
"""List all stock tracking entries."""
queryset = self.filter_queryset(self.get_queryset())
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True)
else:
serializer = self.get_serializer(queryset, many=True)
data = serializer.data
# Attempt to add extra context information to the historical data
for item in data:
deltas = item['deltas']
if not deltas:
deltas = {}
# Add part detail
if 'part' in deltas:
try:
part = Part.objects.get(pk=deltas['part'])
serializer = PartBriefSerializer(part)
deltas['part_detail'] = serializer.data
except Exception:
pass
# Add location detail
if 'location' in deltas:
try:
location = StockLocation.objects.get(pk=deltas['location'])
serializer = StockSerializers.LocationSerializer(location)
deltas['location_detail'] = serializer.data
except Exception:
pass
# Add stockitem detail
if 'stockitem' in deltas:
try:
stockitem = StockItem.objects.get(pk=deltas['stockitem'])
serializer = StockSerializers.StockItemSerializer(stockitem)
deltas['stockitem_detail'] = serializer.data
except Exception:
pass
# Add customer detail
if 'customer' in deltas:
try:
customer = Company.objects.get(pk=deltas['customer'])
serializer = CompanySerializer(customer)
deltas['customer_detail'] = serializer.data
except Exception:
pass
# Add PurchaseOrder detail
if 'purchaseorder' in deltas:
try:
order = PurchaseOrder.objects.get(pk=deltas['purchaseorder'])
serializer = PurchaseOrderSerializer(order)
deltas['purchaseorder_detail'] = serializer.data
except Exception:
pass
# Add SalesOrder detail
if 'salesorder' in deltas:
try:
order = SalesOrder.objects.get(pk=deltas['salesorder'])
serializer = SalesOrderSerializer(order)
deltas['salesorder_detail'] = serializer.data
except Exception:
pass
# Add ReturnOrder detail
if 'returnorder' in deltas:
try:
order = ReturnOrder.objects.get(pk=deltas['returnorder'])
serializer = ReturnOrderSerializer(order)
deltas['returnorder_detail'] = serializer.data
except Exception:
pass
# Add BuildOrder detail
if 'buildorder' in deltas:
try:
order = Build.objects.get(pk=deltas['buildorder'])
serializer = BuildSerializer(order)
deltas['buildorder_detail'] = serializer.data
except Exception:
pass
if page is not None:
return self.get_paginated_response(data)
if request.is_ajax():
return JsonResponse(data, safe=False)
else:
return Response(data)
def create(self, request, *args, **kwargs):
"""Create a new StockItemTracking object.
Here we override the default 'create' implementation,
to save the user information associated with the request object.
"""
# Clean up input data
data = self.clean_data(request.data)
serializer = self.get_serializer(data=data)
serializer.is_valid(raise_exception=True)
# Record the user who created this Part object
item = serializer.save()
item.user = request.user
item.system = False
# quantity field cannot be explicitly adjusted here
item.quantity = item.item.quantity
item.save()
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
filter_backends = SEARCH_ORDER_FILTER
filterset_fields = [
'item',
'user',
]
ordering = '-date'
ordering_fields = [
'date',
]
search_fields = [
'title',
'notes',
]
class LocationDetail(CustomRetrieveUpdateDestroyAPI):
"""API endpoint for detail view of StockLocation object.
- GET: Return a single StockLocation object
- PATCH: Update a StockLocation object
- DELETE: Remove a StockLocation object
"""
queryset = StockLocation.objects.all()
serializer_class = StockSerializers.LocationSerializer
def get_queryset(self, *args, **kwargs):
"""Return annotated queryset for the StockLocationList endpoint"""
queryset = super().get_queryset(*args, **kwargs)
queryset = StockSerializers.LocationSerializer.annotate_queryset(queryset)
return queryset
def destroy(self, request, *args, **kwargs):
"""Delete a Stock location instance via the API"""
delete_stock_items = 'delete_stock_items' in request.data and request.data['delete_stock_items'] == '1'
delete_sub_locations = 'delete_sub_locations' in request.data and request.data['delete_sub_locations'] == '1'
return super().destroy(request,
*args,
**dict(kwargs,
delete_sub_locations=delete_sub_locations,
delete_stock_items=delete_stock_items))
stock_api_urls = [
re_path(r'^location/', include([
re_path(r'^tree/', StockLocationTree.as_view(), name='api-location-tree'),
# Stock location detail endpoints
path(r'<int:pk>/', include([
re_path(r'^metadata/', MetadataView.as_view(), {'model': StockLocation}, name='api-location-metadata'),
re_path(r'^.*$', LocationDetail.as_view(), name='api-location-detail'),
])),
re_path(r'^.*$', StockLocationList.as_view(), name='api-location-list'),
])),
# Endpoints for bulk stock adjustment actions
re_path(r'^count/', StockCount.as_view(), name='api-stock-count'),
re_path(r'^add/', StockAdd.as_view(), name='api-stock-add'),
re_path(r'^remove/', StockRemove.as_view(), name='api-stock-remove'),
re_path(r'^transfer/', StockTransfer.as_view(), name='api-stock-transfer'),
re_path(r'^assign/', StockAssign.as_view(), name='api-stock-assign'),
re_path(r'^merge/', StockMerge.as_view(), name='api-stock-merge'),
re_path(r'^change_status/', StockChangeStatus.as_view(), name='api-stock-change-status'),
# StockItemAttachment API endpoints
re_path(r'^attachment/', include([
path(r'<int:pk>/', StockAttachmentDetail.as_view(), name='api-stock-attachment-detail'),
path('', StockAttachmentList.as_view(), name='api-stock-attachment-list'),
])),
# StockItemTestResult API endpoints
re_path(r'^test/', include([
path(r'<int:pk>/', include([
re_path(r'^metadata/', MetadataView.as_view(), {'model': StockItemTestResult}, name='api-stock-test-result-metadata'),
re_path(r'^.*$', StockItemTestResultDetail.as_view(), name='api-stock-test-result-detail'),
])),
re_path(r'^.*$', StockItemTestResultList.as_view(), name='api-stock-test-result-list'),
])),
# StockItemTracking API endpoints
re_path(r'^track/', include([
path(r'<int:pk>/', StockTrackingDetail.as_view(), name='api-stock-tracking-detail'),
# Stock tracking status code information
re_path(r'status/', StatusView.as_view(), {StatusView.MODEL_REF: StockHistoryCode}, name='api-stock-tracking-status-codes'),
re_path(r'^.*$', StockTrackingList.as_view(), name='api-stock-tracking-list'),
])),
# Detail views for a single stock item
path(r'<int:pk>/', include([
re_path(r'^convert/', StockItemConvert.as_view(), name='api-stock-item-convert'),
re_path(r'^install/', StockItemInstall.as_view(), name='api-stock-item-install'),
re_path(r'^metadata/', MetadataView.as_view(), {'model': StockItem}, name='api-stock-item-metadata'),
re_path(r'^return/', StockItemReturn.as_view(), name='api-stock-item-return'),
re_path(r'^serialize/', StockItemSerialize.as_view(), name='api-stock-item-serialize'),
re_path(r'^uninstall/', StockItemUninstall.as_view(), name='api-stock-item-uninstall'),
re_path(r'^.*$', StockDetail.as_view(), name='api-stock-detail'),
])),
# Stock item status code information
re_path(r'status/', StatusView.as_view(), {StatusView.MODEL_REF: StockStatus}, name='api-stock-status-codes'),
# Anything else
re_path(r'^.*$', StockList.as_view(), name='api-stock-list'),
]
|
2fd97b4db52b2429656f48f75313b216ba8aeec8
|
d125dc644ecb37c014c771c02549c1ebf35eca4d
|
/experiment/scheduler.py
|
0d9da0b22302ed86be7b12749917d370676a3a39
|
[
"Apache-2.0"
] |
permissive
|
google/fuzzbench
|
316d28b2eff2015fe2f479668151b3259bfa2579
|
ff8ef0c6da62268521061a432c5b9e228c2f53dc
|
refs/heads/master
| 2023-09-04T11:04:20.324945
| 2023-08-29T17:37:10
| 2023-08-29T17:37:10
| 238,105,619
| 1,005
| 402
|
Apache-2.0
| 2023-09-13T15:07:49
| 2020-02-04T02:22:18
|
Python
|
UTF-8
|
Python
| false
| false
| 35,428
|
py
|
scheduler.py
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code for starting and ending trials."""
import datetime
import math
import multiprocessing
import os
import sys
import random
import time
from typing import List, Dict
import jinja2
from common import benchmark_utils
from common import experiment_utils
from common import gcloud
from common import gce
from common import logs
from common import retry
from common import utils
from common import yaml_utils
from database import models
from database import utils as db_utils
# Give the trial runner a little extra time to shut down and account for how
# long it can take to actually start running once an instance is started. 5
# minutes is an arbitrary amount of time.
GRACE_TIME_SECONDS = 5 * 60
FAIL_WAIT_SECONDS = 10 * 60
logger = logs.Logger() # pylint: disable=invalid-name
RESOURCES_DIR = os.path.join(utils.ROOT_DIR, 'experiment', 'resources')
JINJA_ENV = jinja2.Environment(
undefined=jinja2.StrictUndefined,
loader=jinja2.FileSystemLoader(RESOURCES_DIR),
)
STARTED_TRIALS_FILTER = models.Trial.time_started.isnot(None)
NUM_RETRIES = 3
RETRY_WAIT_SECONDS = 3
def datetime_now() -> datetime.datetime:
"""Return datetime.datetime.utcnow(). This function is needed for
mocking."""
return datetime.datetime.now(
datetime.timezone.utc).replace(tzinfo=datetime.timezone.utc)
# TODO(metzman): Figure out what are the best practices for the functions which
# must return sqlalchemy.orm.Query. Importing it just for annotation might be
# confusing to readers. There may also be weird situations where it is
# acceptable to use a list or query (because of duck typing) but type hints
# prevents us unless handled intelligently.
def get_nonpreempted_trials(experiment: str):
"""Returns a query of trials in |experiment|."""
not_preempted_filter = models.Trial.preempted == False # pylint: disable=singleton-comparison
return get_experiment_trials(experiment).filter(not_preempted_filter)
def get_pending_trials(experiment: str):
"""Returns trial entities from |experiment| that have not run yet."""
return get_nonpreempted_trials(experiment).filter(~STARTED_TRIALS_FILTER)
def get_running_trials(experiment: str):
"""Returns trial entities from |experiment| that have been marked started
but not marked ended."""
return get_nonpreempted_trials(experiment).filter(
models.Trial.time_ended.is_(None), STARTED_TRIALS_FILTER)
def get_expired_trials(experiment: str, max_total_time: int):
"""Returns trial entities from |experiment| that have not ended and were
started more than |max_total_time| + |GRACE_TIME_SECONDS| ago."""
earliest_nonexpired_dt = datetime_now() - datetime.timedelta(
seconds=max_total_time + GRACE_TIME_SECONDS)
return get_nonpreempted_trials(experiment).filter(
models.Trial.time_started <= earliest_nonexpired_dt).filter(
models.Trial.time_ended.is_(None))
def all_trials_ended(experiment: str) -> bool:
"""Return a bool if there are any trials in |experiment| that have not
started."""
try:
return not get_experiment_trials(experiment).filter(
models.Trial.time_ended.is_(None)).all()
except RuntimeError:
logger.error('Failed to check whether all trials ended.')
return False
def delete_instances(instances, experiment_config):
"""Deletes |instances|."""
cloud_project = experiment_config['cloud_project']
cloud_compute_zone = experiment_config['cloud_compute_zone']
instances_to_delete = [
i for i in gce.get_instances(cloud_project, cloud_compute_zone)
if i in instances
]
return gcloud.delete_instances(instances_to_delete,
experiment_config['cloud_compute_zone'])
def end_expired_trials(experiment_config: dict, core_allocation: dict):
"""Get all expired trials, end them and return them."""
trials_past_expiry = get_expired_trials(experiment_config['experiment'],
experiment_config['max_total_time'])
expired_instances = []
expired_trial_ids = []
current_dt = datetime_now()
for trial in trials_past_expiry:
trial_id = trial.id
expired_instances.append(
experiment_utils.get_trial_instance_name(
experiment_config['experiment'], trial_id))
expired_trial_ids.append(trial_id)
trial.time_ended = current_dt
# Bail out here because trials_past_expiry will be truthy until evaluated.
if not expired_instances:
return
if core_allocation is not None:
for cpuset, trial_id in core_allocation.items():
if trial_id in expired_trial_ids:
core_allocation[cpuset] = None
if not experiment_utils.is_local_experiment() and not delete_instances(
expired_instances, experiment_config):
# If we failed to delete some instances, then don't update the status
# of expired trials in database as we don't know which instances were
# successfully deleted. Wait for next iteration of end_expired_trials.
logger.error('Failed to delete instances after trial expiry.')
return
db_utils.bulk_save(trials_past_expiry)
def get_experiment_trials(experiment: str):
"""Returns a query for trials in |experiment| ordered by id."""
with db_utils.session_scope() as session:
return session.query(models.Trial).filter(
models.Trial.experiment == experiment).order_by(models.Trial.id)
def get_started_trials(experiment: str):
"""Returns a query for trials in |experiment| that have been started."""
return get_experiment_trials(experiment).filter(STARTED_TRIALS_FILTER)
def get_last_trial_time_started(experiment: str):
"""Returns the time_started of the last trial that was started in
|experiment|. This function cannot be called if there are any unstarted
(e.g. pending trials). It will raise an assertion failure if there are any
pending trials because it does not make sense to call this function before
that time."""
assert get_pending_trials(experiment).first() is None
# Don't use get_experiment_trials because it already orders the results by
# id.
with db_utils.session_scope() as session:
last_trial = session.query(models.Trial).filter(
models.Trial.experiment == experiment,
STARTED_TRIALS_FILTER).order_by(
models.Trial.time_started.desc()).first()
return last_trial.time_started
def any_pending_trials(experiment):
"""Returns True if there are any pending trials in |experiment|."""
return bool(get_pending_trials(experiment).first())
def any_running_trials(experiment):
"""Returns True if there are any running trials in |experiment|."""
return bool(get_running_trials(experiment).first())
class TrialInstanceManager: # pylint: disable=too-many-instance-attributes
"""Manager for trial instances.
Public methods of this are safe to call in preemptible and nonpreemptible
experiments alike though the main purpose of this class is to manage
preempted trials.
This class object should be created at the start of scheduling and the
handle_preempted_trials method should be called in the scheduling loop.
See the docstring for handle_preempted_trials for how it works.
"""
# Hard limit on the number of nonpreemptibles we will use. This bounds
# costs.
MAX_NONPREEMPTIBLES = 500
# The maximum fraction of total trials in the experiment that can be done
# using nonpreemptibles. This helps bound the cost in unexpected situations.
NONPREEMPTIBLES_FRACTION = 1 / 10
# How long can we keep trying preemptibles before we have to switch to a
# nonpreemptibles or stopping the experiment.
PREEMPTIBLE_WINDOW_MULTIPLIER = 1
def __init__(self, num_trials, experiment_config):
self.experiment_config = experiment_config
self.num_trials = num_trials
self.num_preemptible_restarts = 0
self.num_preemptible_omits = 0
# Bound for the number of nonpreemptibles we can start if the experiment
# specified preemptible_runners.
self.max_nonpreemptibles = min(
math.ceil(self.num_trials * self.NONPREEMPTIBLES_FRACTION),
self.MAX_NONPREEMPTIBLES)
logger.info('Max nonpreemptibles: %d.', self.max_nonpreemptibles)
# Attributes for preemptible retry window. The preemptible retry window
# is a time period that starts when the last initial trial is started.
# It determines how long we can retry preempted trials using
# preemptibles. This bounds the length of time an experiment lasts.
self.preemptible_window = (experiment_config['max_total_time'] *
self.PREEMPTIBLE_WINDOW_MULTIPLIER)
self._initial_trials = list(
get_experiment_trials(experiment_config['experiment']))
self._max_time_started = None
self.preempted_trials = {}
self.preemptible_starts_futile = False
# Filter operations happening before the experiment started.
with db_utils.session_scope() as session:
self.last_preemptible_query = (session.query(
models.Experiment).filter(
models.Experiment.name == experiment_config['experiment']
).one().time_created.replace(tzinfo=datetime.timezone.utc))
def _get_max_time_started(self):
"""Returns the last time_started of the self._initial_trials. Returns
None if any initial trials haven't been started yet. This is needed so
that the preemptible retry window starts from the end of the last
initial trial to be started."""
if self._max_time_started is not None:
return self._max_time_started
max_time_started = None
for trial in self._initial_trials:
time_started = trial.time_started
if time_started is None:
# An initial trial has never been started. Therefore the max
# time started doesn't exist and the window hasn't started.
return None
if max_time_started is None:
max_time_started = time_started
continue
max_time_started = max(time_started, max_time_started)
assert max_time_started is not None
max_time_started = max_time_started.replace(
tzinfo=datetime.timezone.utc)
self._max_time_started = max_time_started
return max_time_started
def preemptible_window_passed(self) -> bool:
"""Returns True if the preemptible window has passed."""
max_time_started = self._get_max_time_started()
if max_time_started is None:
return False
preemptible_window_end_time = max_time_started + datetime.timedelta(
seconds=self.preemptible_window)
return datetime_now() > preemptible_window_end_time
def can_start_preemptible(self) -> bool:
"""Returns True if we can start a preemptible trial.
|preemptible_starts| is the number of preemptibles we've already
started."""
if not self.experiment_config.get('preemptible_runners'):
# This code shouldn't be executed in a nonpreemptible experiment.
# But just in case it is, it's not OK to create a preemptible trial
# in a non-preemptible experiment.
return False
if self.preemptible_window_passed():
# Don't keep creating preemptible instances forever. Don't create
# them if the experiment has already taken a certain amount of time
# longer than the equivalent nonpreemptible experiment.
# *NOTE*: preemptible_window_passed is slightly broken. When
# the measurer uses this method it may produce slightly different
# results than the scheduler because the initial trials may be
# different. This is unlikely to happen in the real world. It is
# probably benign as well because the measurer may think the window
# end is slightly later than the scheduler. The effect of this will
# simply be that the measurer may measure for slightly longer than
# needed.
return False
# Otherwise, it's fine to create a preemptible instance.
return True
def can_start_nonpreemptible(self, nonpreemptible_starts: int) -> bool:
"""Returns True if we can start a nonpreemptible trial."""
if not self.experiment_config.get('preemptible_runners'):
# This code shouldn't be executed in a nonpreemptible experiment.
# But just in case it is, it's always OK to start a non-preemptible
# trial in a non-preemptible experiment.
return True
if nonpreemptible_starts >= self.max_nonpreemptibles:
# Don't exceed the maximum number of nonpreemptibles.
return False
# Supplement with nonpreemptibles if the experiment results are not so
# messed up that doing so won't make the result useable.
return True
def get_nonpreemptible_starts(self) -> int:
"""Returns the count of nonpreemptible trials that have been started."""
return get_started_trials(self.experiment_config['experiment']).filter(
models.Trial.preemptible.is_(False)).count()
def _format_count_info(self, trial: models.Trial, count: int) -> str:
"""Formats a trial's count and information for logging."""
return (f'Trial ID: {trial.id}. '
f'Benchmark-Fuzzer pair: {trial.benchmark}-{trial.fuzzer}. '
f'Accumulating to {count/self.num_trials*100:3.2f}% '
f'({count} / {self.num_trials}) of all trials.')
def _log_restart(self, preemptible: bool, trial: models.Trial,
count: int) -> None:
"""Logs the count of restarting trials."""
logs.info('Restarting a preemptible trial as a %s one: %s',
'preemptible' if preemptible else 'nonpreemptible',
self._format_count_info(trial, count))
def _get_preempted_replacements(self,
preempted_trials) -> List[models.Trial]:
"""Returns a list containing a replacement trial for each trial that can
be replaced in |preempted_trials|."""
replacements = []
nonpreemptible_starts = self.get_nonpreemptible_starts()
# The time_ended won't be 100% accurate but that doesn't matter.
time_ended = datetime_now()
for trial in preempted_trials:
# Update the preempted trial.
trial.preempted = True
trial.time_ended = time_ended
# We try to start each replacement trial as a preemptible before
# trying nonpreemptible to minimize cost.
if self.can_start_preemptible():
# See if we can replace with a preemptible.
self.num_preemptible_restarts += 1
replacements.append(replace_trial(trial, preemptible=True))
self._log_restart(True, trial, self.num_preemptible_restarts)
continue
if self.can_start_nonpreemptible(nonpreemptible_starts):
# If a trial can't be replaced with a preemptible see if we can
# replace it with a nonpreemptible.
nonpreemptible_starts += 1
replacements.append(replace_trial(trial, preemptible=False))
self._log_restart(False, trial, nonpreemptible_starts)
continue
self.num_preemptible_omits += 1
logs.warning(
'Omitting a trial to cap cost: %s',
self._format_count_info(trial, self.num_preemptible_omits))
return replacements
def _get_started_unfinished_instances(self) -> Dict[str, models.Trial]:
"""Returns a dictionary of instance names to trials for trials were
started but not finished according to the database."""
experiment = self.experiment_config['experiment']
running_trials = get_running_trials(experiment)
return {
experiment_utils.get_trial_instance_name(experiment, trial.id):
trial for trial in running_trials
}
def get_preempted_trials(self) -> List[models.Trial]:
"""Returns a list of trials that were preempted."""
if not self.experiment_config.get('preemptible_runners'):
# No preempted trials in a nonpreemptible experiment.
assert not self.preempted_trials
return []
started_instances = self._get_started_unfinished_instances()
query_time = datetime_now()
preempted_instances = self._get_preempted_instances_with_retries()
trials = []
for instance in preempted_instances:
trial = started_instances.get(instance)
if trial is None:
# Preemption for this trial was probably handled already.
logs.warning('Instance: %s is preempted but is not running.',
instance)
continue
if trial.id in self.preempted_trials:
# We already know this instance was preempted.
continue
self.preempted_trials[trial.id] = trial
trials.append(trial)
# Update this now when we know that we have succeded processing the
# query. It's far worse if we update the query too early than if we
# don't update the query at this point (which will only result in
# redundant work).
self.last_preemptible_query = query_time
# Return all preempted instances, those we knew from beforehand and
# those we discovered in the query.
return trials
@retry.wrap(NUM_RETRIES, RETRY_WAIT_SECONDS,
'experiment.scheduler.TrialInstanceManager.'
'_get_preempted_instances_with_retries')
def _get_preempted_instances_with_retries(self):
project = self.experiment_config['cloud_project']
zone = self.experiment_config['cloud_compute_zone']
return list(gce.get_preempted_instances(project, zone))
def handle_preempted_trials(self):
"""Handle preempted trials by marking them as preempted and creating
replacement trials when appropriate.
This is the algorithm used by handle_preempted_trials:
1. Query the GCE API to find trials that were preempted since our last
query (or the start of the experiment on our first query.
2. For every preempted trial, ensure that it was not handled before and
if it wasn't then mark the trials as finished and preempted and create
replacement trials if appropriate.
This is how it is determined whether a preempted trial should be
replaced and what it should be replaced with:
1. First we see if we can replace it with a preemptible instance. We
will replace it with a preemptible instance if:
a. We haven't created more than double the number of preemptible trial
instances than the number of trial this experiment would take if it
were using non-preemptibles ("target_trials") . This bounds the cost
of our preemptible usage to <2X cost of using preemptibles naively
If preemptibles are 20% cost of non-preemptibles, then <40% the cost
of a non-preemptible experiment.
b. We haven't spent longer than 3X the duration of time the
experiment would take if using nonpreemptibles. This bounds the
duration of the experiment to 4X the length of the nonpreemptible
experiment.
2. If we can't create a preemptible replacement, we replace it with a
nonpreemptible if:
a. We haven't created more than target_trials/20 nonpreemptibles
already. This bounds the cost of the nonpreemptibles to 5% of the cost
of a 100% nonpreemptible experiment.
b. (TODO): Using preemptibles will actually help the results of this
experiment. If we can't create any preemptible instances but we need
to replace target_trials number of instances, replacing the tiny
fraction of them with preemptibles will give you a 5% complete
experiment. This is a hard issue to solve, because we restart
trials as they are preempted so we may not determine it is futile to
use nonpreemptibles until the last nonpreemptible above our limit is
reached.
3. TODO: There are other cases where we probably shouldn't replace
trials that we haven't implemented, but would like to such as:
a. If a trial is preempted very close to the end of its budgeted time.
In that case it's probably fine if the comparison on the benchmark
happens at 22:45 instead of 23:00.
b. If a trial is the only trial for the fuzzer-benchmark that was
preempted. In that case, not replacing the trial will save time and
not hurt results much.
The impact of this algorithm is that:
1. The cost of a preemptible experiment, in the worst case scenario is
45% of a nonpreemptible experiment. On average we find they will be
~30% the cost of a nonpreemptible experiment.
2. Time of an experiment will be 4X the length of a nonpreemptible
experiment in the worst case scenario. This is fine however because most
of the experiment will finish earlier, only a few trials that won't
change results very much will trickle in at the end.
3. Experiments are guaranteed to terminate but results won't necessarily
be complete if the preemption rate is pathologically high. This is
acceptable because a human should intervene in these edge cases.
"""
logger.info('Handling preempted.')
if not self.experiment_config.get('preemptible_runners'):
# Nothing to do here if not a preemptible experiment.
return []
preempted_trials = self.get_preempted_trials()
if not preempted_trials:
logs.info('No preempteds to handle.')
return []
replacements = self._get_preempted_replacements(preempted_trials)
experiment = self.experiment_config['experiment']
instances = [
experiment_utils.get_trial_instance_name(experiment, trial.id)
for trial in preempted_trials
]
logs.info('Deleting preempted instances: %s', instances)
if not delete_instances(instances, self.experiment_config):
logs.error('Could not delete preempted instances: %s', instances)
db_utils.add_all(preempted_trials + replacements)
logger.info('Done handling preempted.')
return replacements
def replace_trial(trial, preemptible):
"""Returns a new trial to replace |trial|. The trial is preemptible if
|preemptible|. Sets trial.replacement to the replacement trial."""
replacement = models.Trial(fuzzer=trial.fuzzer,
benchmark=trial.benchmark,
experiment=trial.experiment,
preemptible=preemptible)
trial.replacement = replacement.id
return replacement
def schedule(experiment_config: dict, pool, core_allocation=None):
"""Gets all pending trials for the current experiment and then schedules
those that are possible."""
logger.info('Finding trials to schedule.')
# End expired trials
end_expired_trials(experiment_config, core_allocation)
# Start pending trials.
pending_trials = list(get_pending_trials(experiment_config['experiment']))
started_trials = start_trials(pending_trials, experiment_config, pool,
core_allocation)
return started_trials
def schedule_loop(experiment_config: dict):
"""Continuously run the scheduler until there is nothing left to schedule.
Note that this should not be called unless
multiprocessing.set_start_method('spawn') was called first. Otherwise it
will use fork to create the Pool which breaks logging."""
# Create the thread pool once and reuse it to avoid leaking threads and
# other issues.
logger.info('Starting scheduler.')
num_trials = len(
get_experiment_trials(experiment_config['experiment']).all())
local_experiment = experiment_utils.is_local_experiment()
pool_args = ()
core_allocation = None
runners_cpus = experiment_config['runners_cpus']
if runners_cpus is not None:
if local_experiment:
runner_num_cpu_cores = experiment_config['runner_num_cpu_cores']
processes = runners_cpus // runner_num_cpu_cores
logger.info('Scheduling runners from core 0 to %d.',
runner_num_cpu_cores * processes - 1)
core_allocation = {}
for cpu in range(0, runner_num_cpu_cores * processes,
runner_num_cpu_cores):
core_allocation[
f'{cpu}-{cpu + runner_num_cpu_cores - 1}'] = None
pool_args = (processes,)
else:
pool_args = (runners_cpus,)
if not local_experiment:
gce.initialize()
trial_instance_manager = TrialInstanceManager(num_trials,
experiment_config)
experiment = experiment_config['experiment']
with multiprocessing.Pool(*pool_args) as pool:
handle_preempted = False
while not all_trials_ended(experiment):
try:
if (not local_experiment and not handle_preempted and
not any_pending_trials(experiment)):
# This ensures that:
# 1. handle_preempted will not becomes True when running
# locally.
# 2. Only start handling preempted instances once every
# initial trial was started.
handle_preempted = True
schedule(experiment_config, pool, core_allocation)
if handle_preempted:
trial_instance_manager.handle_preempted_trials()
except Exception: # pylint: disable=broad-except
logger.error('Error occurred during scheduling.')
# Either
# - We had an unexpected exception OR
# - We have not been able to start trials and still have some
# remaining. This can happen when we run out of instance quota.
# In these cases, sleep before retrying again.
time.sleep(FAIL_WAIT_SECONDS)
logger.info('Finished scheduling.')
def update_started_trials(trial_proxies, trial_id_mapping, core_allocation):
"""Update started trials in |trial_id_mapping| with results from
|trial_proxies| and save the updated trials."""
# Map proxies back to trials and mark trials as started when proxies were
# marked as such.
started_trials = []
for proxy in trial_proxies:
if not proxy:
continue
trial = trial_id_mapping[proxy.id]
trial.time_started = proxy.time_started
if core_allocation is not None:
core_allocation[proxy.cpuset] = proxy.id
started_trials.append(trial)
if started_trials:
db_utils.add_all(started_trials)
return started_trials
def start_trials(trials, experiment_config: dict, pool, core_allocation=None):
"""Start all |trials| that are possible to start. Marks the ones that were
started as started."""
logger.info('Starting trials.')
trial_id_mapping = {trial.id: trial for trial in trials}
# Shuffle trials so that we don't create trials for the same fuzzer
# benchmark close to one another. This *may* make the preemption rate more
# evenly distributed across fuzzer benchmarks which will help if we don't
# end up completing the target number of trials. A more rigourous approach
# where we increase the distance in between trials for the same
# fuzzer-benchmark might be useful.
shuffled_trials = list(trial_id_mapping.values())
random.shuffle(shuffled_trials)
free_cpusets = [
cpuset for cpuset, trial_id in core_allocation.items()
if trial_id is None
] if core_allocation is not None else None
start_trial_args = []
for index, trial in enumerate(shuffled_trials):
if free_cpusets is not None and index >= len(free_cpusets):
break
start_trial_args += [
(TrialProxy(trial), experiment_config,
free_cpusets[index] if free_cpusets is not None else None)
]
started_trial_proxies = pool.starmap(_start_trial, start_trial_args)
started_trials = update_started_trials(started_trial_proxies,
trial_id_mapping, core_allocation)
logger.info(f'Started {len(started_trials)} trials.')
return started_trials
class TrialProxy:
"""A proxy object for a model.Trial. TrialProxy's allow these fields to be
set and retreived without making any database calls."""
def __init__(self, trial):
self.id = trial.id # pylint: disable=invalid-name
self.fuzzer = trial.fuzzer
self.benchmark = trial.benchmark
self.time_started = trial.time_started
self.time_ended = trial.time_ended
self.preemptible = trial.preemptible
self.cpuset = None
def _initialize_logs(experiment):
"""Initialize logs. This must be called on process start."""
logs.initialize(
default_extras={
'experiment': experiment,
'component': 'dispatcher',
'subcomponent': 'scheduler'
})
# Restarting preemptibles gives us another 24h (upto). It resets the counter.
# https://cloud.google.com/compute/docs/instances/preemptible#preemption_selection
def _start_trial(trial: TrialProxy, experiment_config: dict, cpuset=None):
"""Start a trial if possible. Mark the trial as started if it was and then
return the Trial. Otherwise return None."""
# TODO(metzman): Add support for early exit (trial_creation_failed) that was
# removed when this started using multiprocessing.
# Also, support batched saves of trials (with a queue, like measurer uses)
# so that measuring a schedule doesn't require waiting until the map call
# that calls this function completely terminates.
_initialize_logs(experiment_config['experiment'])
logger.info('Start trial %d.', trial.id)
started = create_trial_instance(trial.fuzzer, trial.benchmark, trial.id,
experiment_config, trial.preemptible,
cpuset)
if started:
trial.time_started = datetime_now()
trial.cpuset = cpuset
return trial
logger.info('Trial: %d not started.', trial.id)
return None
def render_startup_script_template( # pylint: disable=too-many-arguments
instance_name: str,
fuzzer: str,
benchmark: str,
trial_id: int,
experiment_config: dict,
cpuset=None):
"""Render the startup script using the template and the parameters
provided and return the result."""
experiment = experiment_config['experiment']
docker_image_url = benchmark_utils.get_runner_image_url(
experiment, benchmark, fuzzer, experiment_config['docker_registry'])
fuzz_target = benchmark_utils.get_fuzz_target(benchmark)
local_experiment = experiment_utils.is_local_experiment()
template = JINJA_ENV.get_template('runner-startup-script-template.sh')
kwargs = {
'instance_name': instance_name,
'benchmark': benchmark,
'experiment': experiment,
'fuzzer': fuzzer,
'trial_id': trial_id,
'max_total_time': experiment_config['max_total_time'],
'snapshot_period': experiment_config['snapshot_period'],
'experiment_filestore': experiment_config['experiment_filestore'],
'report_filestore': experiment_config['report_filestore'],
'fuzz_target': fuzz_target,
'docker_image_url': docker_image_url,
'docker_registry': experiment_config['docker_registry'],
'local_experiment': local_experiment,
'no_seeds': experiment_config['no_seeds'],
'no_dictionaries': experiment_config['no_dictionaries'],
'oss_fuzz_corpus': experiment_config['oss_fuzz_corpus'],
'num_cpu_cores': experiment_config['runner_num_cpu_cores'],
'private': experiment_config['private'],
'cpuset': cpuset,
'custom_seed_corpus_dir': experiment_config['custom_seed_corpus_dir'],
}
if not local_experiment:
kwargs['cloud_compute_zone'] = experiment_config['cloud_compute_zone']
kwargs['cloud_project'] = experiment_config['cloud_project']
return template.render(**kwargs)
def create_trial_instance( # pylint: disable=too-many-arguments
fuzzer: str,
benchmark: str,
trial_id: int,
experiment_config: dict,
preemptible: bool,
cpuset=None) -> bool:
"""Create or start a trial instance for a specific
trial_id,fuzzer,benchmark."""
instance_name = experiment_utils.get_trial_instance_name(
experiment_config['experiment'], trial_id)
startup_script = render_startup_script_template(instance_name, fuzzer,
benchmark, trial_id,
experiment_config, cpuset)
startup_script_path = f'/tmp/{instance_name}-start-docker.sh'
with open(startup_script_path, 'w', encoding='utf-8') as file_handle:
file_handle.write(startup_script)
return gcloud.create_instance(instance_name,
gcloud.InstanceType.RUNNER,
experiment_config,
startup_script=startup_script_path,
preemptible=preemptible)
def main():
"""Main function for running scheduler independently."""
logs.initialize(default_extras={
'component': 'dispatcher',
'subcomponent': 'scheduler'
})
if len(sys.argv) != 2:
print(f'Usage: {sys.argv[0]} <experiment_config.yaml>')
return 1
experiment_config = yaml_utils.read(sys.argv[1])
schedule_loop(experiment_config)
return 0
if __name__ == '__main__':
sys.exit(main())
|
039267fe91689df239e27d159d0e4734ac90df04
|
57c77e6ca4867fe03f733c4b0778491c7042154b
|
/rlcard/agents/dmc_agent/__init__.py
|
ee835ad15988cec4e2692c978d7e222d0c116138
|
[
"MIT"
] |
permissive
|
datamllab/rlcard
|
60754423478a8854c0d3af6e3766c1c6f6ffb855
|
7fc56edebe9a2e39c94f872edd8dbe325c61b806
|
refs/heads/master
| 2023-08-17T06:01:08.615989
| 2023-07-11T22:19:45
| 2023-07-11T22:19:45
| 206,562,316
| 2,447
| 639
|
MIT
| 2023-07-11T22:19:46
| 2019-09-05T12:48:01
|
Python
|
UTF-8
|
Python
| false
| false
| 32
|
py
|
__init__.py
|
from .trainer import DMCTrainer
|
509aa14ddc61d419cb4ac2f230b5da435098a7d9
|
814023cb1f3f8d12f9093179ae23ac8e4a495a6d
|
/ch10/code/Ch10-04_drapeOrtho.py
|
f3201714a588e89914b3b1607e73554814e86a51
|
[
"MIT"
] |
permissive
|
mdiener21/python-geospatial-analysis-cookbook
|
45904a8b17f40f44a2de826fa27f71f2b942438e
|
0c10a32df428dab719d1a0333854fccdbab3309b
|
refs/heads/master
| 2023-03-30T05:14:30.194194
| 2023-03-17T15:10:39
| 2023-03-17T15:10:39
| 23,821,605
| 124
| 51
|
MIT
| 2023-03-17T15:10:40
| 2014-09-09T06:12:11
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,137
|
py
|
Ch10-04_drapeOrtho.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import subprocess
from PIL import Image
from jinja2 import Environment, FileSystemLoader
# convert from Canada UTM http://epsg.io/3157/map to 3857
subprocess.call("gdalwarp -s_srs EPSG:3157 -t_srs EPSG:3857 -overwrite "
"-te -13664479.091 6446253.250 -13636616.770 6489702.670"
" ../geodata/canimage_092j02_tif/092j02_1_1.tif ../geodata/whistler_ortho.tif")
subprocess.call("gdal_translate -outsize 200 200 "
"../geodata/whistler_ortho.tif "
"../geodata/whistler_ortho_f.tif")
infile = '../geodata/whistler_ortho_f.tif'
drape_texture = '../../geodata/whistler_ortho_f.jpg'
Image.open(infile).save(drape_texture)
env = Environment(loader=FileSystemLoader(["../www/templates"]))
template = env.get_template( "base-3d-map-drape.html")
dem_3d = "../../geodata/whistler2.bin"
out_html = "../www/html/ch10-04_dem3d_map_drape.html"
result = template.render(title="Threejs DEM Drape Viewer", dem_file=dem_3d,
texture_map=drape_texture)
with open(out_html,mode="w") as file:
file.write(result)
|
6eb0ea083ea864949b10caaf79d168ccc0449ce8
|
66bc394ad0aeb94298c9b0a0d16812d5408332e9
|
/torchgeo/trainers/segmentation.py
|
e0497de1b9c2a0bdab7bdb95c3fbd64628cb9dd8
|
[
"MIT"
] |
permissive
|
microsoft/torchgeo
|
e058a74ef51ba29aefd6ba8f0eb5e6070db310a7
|
29985861614b3b93f9ef5389469ebb98570de7dd
|
refs/heads/main
| 2023-08-20T01:11:41.549866
| 2023-08-18T22:58:31
| 2023-08-18T22:58:31
| 369,428,935
| 1,724
| 225
|
MIT
| 2023-09-14T20:33:24
| 2021-05-21T05:58:56
|
Python
|
UTF-8
|
Python
| false
| false
| 12,820
|
py
|
segmentation.py
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
"""Segmentation tasks."""
import os
import warnings
from typing import Any, cast
import matplotlib.pyplot as plt
import segmentation_models_pytorch as smp
import torch
import torch.nn as nn
from lightning.pytorch import LightningModule
from torch import Tensor
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torchmetrics import MetricCollection
from torchmetrics.classification import MulticlassAccuracy, MulticlassJaccardIndex
from torchvision.models._api import WeightsEnum
from ..datasets.utils import unbind_samples
from ..models import FCN, get_weight
from . import utils
class SemanticSegmentationTask(LightningModule):
"""LightningModule for semantic segmentation of images.
Supports `Segmentation Models Pytorch
<https://github.com/qubvel/segmentation_models.pytorch>`_
as an architecture choice in combination with any of these
`TIMM backbones <https://smp.readthedocs.io/en/latest/encoders_timm.html>`_.
"""
def config_task(self) -> None:
"""Configures the task based on kwargs parameters passed to the constructor."""
weights = self.hyperparams["weights"]
if self.hyperparams["model"] == "unet":
self.model = smp.Unet(
encoder_name=self.hyperparams["backbone"],
encoder_weights="imagenet" if weights is True else None,
in_channels=self.hyperparams["in_channels"],
classes=self.hyperparams["num_classes"],
)
elif self.hyperparams["model"] == "deeplabv3+":
self.model = smp.DeepLabV3Plus(
encoder_name=self.hyperparams["backbone"],
encoder_weights="imagenet" if weights is True else None,
in_channels=self.hyperparams["in_channels"],
classes=self.hyperparams["num_classes"],
)
elif self.hyperparams["model"] == "fcn":
self.model = FCN(
in_channels=self.hyperparams["in_channels"],
classes=self.hyperparams["num_classes"],
num_filters=self.hyperparams["num_filters"],
)
else:
raise ValueError(
f"Model type '{self.hyperparams['model']}' is not valid. "
f"Currently, only supports 'unet', 'deeplabv3+' and 'fcn'."
)
if self.hyperparams["loss"] == "ce":
ignore_value = -1000 if self.ignore_index is None else self.ignore_index
class_weights = None
if isinstance(self.class_weights, torch.Tensor):
class_weights = self.class_weights.to(dtype=torch.float32)
elif hasattr(self.class_weights, "__array__") or self.class_weights:
class_weights = torch.tensor(self.class_weights, dtype=torch.float32)
self.loss = nn.CrossEntropyLoss(
ignore_index=ignore_value, weight=class_weights
)
elif self.hyperparams["loss"] == "jaccard":
self.loss = smp.losses.JaccardLoss(
mode="multiclass", classes=self.hyperparams["num_classes"]
)
elif self.hyperparams["loss"] == "focal":
self.loss = smp.losses.FocalLoss(
"multiclass", ignore_index=self.ignore_index, normalized=True
)
else:
raise ValueError(
f"Loss type '{self.hyperparams['loss']}' is not valid. "
f"Currently, supports 'ce', 'jaccard' or 'focal' loss."
)
if self.hyperparams["model"] != "fcn":
if weights and weights is not True:
if isinstance(weights, WeightsEnum):
state_dict = weights.get_state_dict(progress=True)
elif os.path.exists(weights):
_, state_dict = utils.extract_backbone(weights)
else:
state_dict = get_weight(weights).get_state_dict(progress=True)
self.model.encoder.load_state_dict(state_dict)
# Freeze backbone
if self.hyperparams.get("freeze_backbone", False) and self.hyperparams[
"model"
] in ["unet", "deeplabv3+"]:
for param in self.model.encoder.parameters():
param.requires_grad = False
# Freeze decoder
if self.hyperparams.get("freeze_decoder", False) and self.hyperparams[
"model"
] in ["unet", "deeplabv3+"]:
for param in self.model.decoder.parameters():
param.requires_grad = False
def __init__(self, **kwargs: Any) -> None:
"""Initialize the LightningModule with a model and loss function.
Keyword Args:
model: Name of the segmentation model type to use
backbone: Name of the timm backbone to use
weights: Either a weight enum, the string representation of a weight enum,
True for ImageNet weights, False or None for random weights,
or the path to a saved model state dict. FCN model does not support
pretrained weights. Pretrained ViT weight enums are not supported yet.
in_channels: Number of channels in input image
num_classes: Number of semantic classes to predict
loss: Name of the loss function, currently supports
'ce', 'jaccard' or 'focal' loss
class_weights: Optional rescaling weight given to each
class and used with 'ce' loss
ignore_index: Optional integer class index to ignore in the loss and metrics
learning_rate: Learning rate for optimizer
learning_rate_schedule_patience: Patience for learning rate scheduler
freeze_backbone: Freeze the backbone network to fine-tune the
decoder and segmentation head
freeze_decoder: Freeze the decoder network to linear probe
the segmentation head
Raises:
ValueError: if kwargs arguments are invalid
.. versionchanged:: 0.3
The *ignore_zeros* parameter was renamed to *ignore_index*.
.. versionchanged:: 0.4
The *segmentation_model* parameter was renamed to *model*,
*encoder_name* renamed to *backbone*, and
*encoder_weights* to *weights*.
.. versionadded: 0.5
The *class_weights*, *freeze_backbone*,
and *freeze_decoder* parameters.
.. versionchanged:: 0.5
The *weights* parameter now supports WeightEnums and checkpoint paths.
"""
super().__init__()
# Creates `self.hparams` from kwargs
self.save_hyperparameters()
self.hyperparams = cast(dict[str, Any], self.hparams)
if not isinstance(kwargs["ignore_index"], (int, type(None))):
raise ValueError("ignore_index must be an int or None")
if (kwargs["ignore_index"] is not None) and (kwargs["loss"] == "jaccard"):
warnings.warn(
"ignore_index has no effect on training when loss='jaccard'",
UserWarning,
)
self.ignore_index = kwargs["ignore_index"]
self.class_weights = kwargs.get("class_weights", None)
self.config_task()
self.train_metrics = MetricCollection(
[
MulticlassAccuracy(
num_classes=self.hyperparams["num_classes"],
ignore_index=self.ignore_index,
multidim_average="global",
average="micro",
),
MulticlassJaccardIndex(
num_classes=self.hyperparams["num_classes"],
ignore_index=self.ignore_index,
average="micro",
),
],
prefix="train_",
)
self.val_metrics = self.train_metrics.clone(prefix="val_")
self.test_metrics = self.train_metrics.clone(prefix="test_")
def forward(self, *args: Any, **kwargs: Any) -> Any:
"""Forward pass of the model.
Args:
x: tensor of data to run through the model
Returns:
output from the model
"""
return self.model(*args, **kwargs)
def training_step(self, *args: Any, **kwargs: Any) -> Tensor:
"""Compute and return the training loss.
Args:
batch: the output of your DataLoader
Returns:
training loss
"""
batch = args[0]
x = batch["image"]
y = batch["mask"]
y_hat = self(x)
y_hat_hard = y_hat.argmax(dim=1)
loss = self.loss(y_hat, y)
# by default, the train step logs every `log_every_n_steps` steps where
# `log_every_n_steps` is a parameter to the `Trainer` object
self.log("train_loss", loss, on_step=True, on_epoch=False)
self.train_metrics(y_hat_hard, y)
return cast(Tensor, loss)
def on_train_epoch_end(self) -> None:
"""Logs epoch level training metrics."""
self.log_dict(self.train_metrics.compute())
self.train_metrics.reset()
def validation_step(self, *args: Any, **kwargs: Any) -> None:
"""Compute validation loss and log example predictions.
Args:
batch: the output of your DataLoader
batch_idx: the index of this batch
"""
batch = args[0]
batch_idx = args[1]
x = batch["image"]
y = batch["mask"]
y_hat = self(x)
y_hat_hard = y_hat.argmax(dim=1)
loss = self.loss(y_hat, y)
self.log("val_loss", loss, on_step=False, on_epoch=True)
self.val_metrics(y_hat_hard, y)
if (
batch_idx < 10
and hasattr(self.trainer, "datamodule")
and self.logger
and hasattr(self.logger, "experiment")
and hasattr(self.logger.experiment, "add_figure")
):
try:
datamodule = self.trainer.datamodule
batch["prediction"] = y_hat_hard
for key in ["image", "mask", "prediction"]:
batch[key] = batch[key].cpu()
sample = unbind_samples(batch)[0]
fig = datamodule.plot(sample)
summary_writer = self.logger.experiment
summary_writer.add_figure(
f"image/{batch_idx}", fig, global_step=self.global_step
)
plt.close()
except ValueError:
pass
def on_validation_epoch_end(self) -> None:
"""Logs epoch level validation metrics."""
self.log_dict(self.val_metrics.compute())
self.val_metrics.reset()
def test_step(self, *args: Any, **kwargs: Any) -> None:
"""Compute test loss.
Args:
batch: the output of your DataLoader
"""
batch = args[0]
x = batch["image"]
y = batch["mask"]
y_hat = self(x)
y_hat_hard = y_hat.argmax(dim=1)
loss = self.loss(y_hat, y)
# by default, the test and validation steps only log per *epoch*
self.log("test_loss", loss, on_step=False, on_epoch=True)
self.test_metrics(y_hat_hard, y)
def on_test_epoch_end(self) -> None:
"""Logs epoch level test metrics."""
self.log_dict(self.test_metrics.compute())
self.test_metrics.reset()
def predict_step(self, *args: Any, **kwargs: Any) -> Tensor:
"""Compute and return the predictions.
By default, this will loop over images in a dataloader and aggregate
predictions into a list. This may not be desirable if you have many images
or large images which could cause out of memory errors. In this case
it's recommended to override this with a custom predict_step.
Args:
batch: the output of your DataLoader
Returns:
predicted softmax probabilities
"""
batch = args[0]
x = batch["image"]
y_hat: Tensor = self(x).softmax(dim=1)
return y_hat
def configure_optimizers(self) -> dict[str, Any]:
"""Initialize the optimizer and learning rate scheduler.
Returns:
learning rate dictionary
"""
optimizer = torch.optim.Adam(
self.model.parameters(), lr=self.hyperparams["learning_rate"]
)
return {
"optimizer": optimizer,
"lr_scheduler": {
"scheduler": ReduceLROnPlateau(
optimizer,
patience=self.hyperparams["learning_rate_schedule_patience"],
),
"monitor": "val_loss",
},
}
|
ba0171e978fb5a0d528328e8ba964f4e88794603
|
c159bf835977682178e399b855eba40306b30c5c
|
/models/model.py
|
07647af8528b3cc71127f5dde5af59189a003412
|
[
"MIT"
] |
permissive
|
nicolasbonnici/cryptobot
|
0a4b2351f274d629397638eb1f8bfce740dc1cda
|
78bdf1e31916eb7f2b4899e398712a53b009c744
|
refs/heads/develop
| 2023-07-07T08:46:30.003275
| 2021-08-10T18:00:42
| 2021-08-10T18:00:42
| 336,254,511
| 210
| 96
|
MIT
| 2021-08-10T18:00:43
| 2021-02-05T11:38:45
|
Python
|
UTF-8
|
Python
| false
| false
| 192
|
py
|
model.py
|
from api.rest import Rest
class AbstractModel(Rest):
resource_name = ''
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
|
2deb68ce5bea0b30a3d64a32c8f903d81502341b
|
6923f79f1eaaba0ab28b25337ba6cb56be97d32d
|
/Python_Scripting_for_Computational_Science_Third_Edition/py/regex/realre.py
|
dd564b3ba7d3ae0ae1bc3477153fd78d9c4601b7
|
[] |
no_license
|
burakbayramli/books
|
9fe7ba0cabf06e113eb125d62fe16d4946f4a4f0
|
5e9a0e03aa7ddf5e5ddf89943ccc68d94b539e95
|
refs/heads/master
| 2023-08-17T05:31:08.885134
| 2023-08-14T10:05:37
| 2023-08-14T10:05:37
| 72,460,321
| 223
| 174
| null | 2022-10-24T12:15:06
| 2016-10-31T17:24:00
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,521
|
py
|
realre.py
|
#!/usr/bin/env python
"""
This file exemplifies regular expressions for real numbers
"""
import re
# real number in integer notation:
real_in = r'-?\d+'
# real number in scientific notation:
real_sn = r'-?[0-9](\.[0-9]+|)[Ee][+\-][0-9][0-9]?'
# or
real_sn = r'-?\d(\.\d+|)[Ee][+\-]\d\d?'
# or with embedded comments:
real_sn_x = r"""
-? # optional minus
\d(\.\d+|) # a number like 1 or 1.4098
[Ee][+\-]\d\d? # exponent, E-03, e-3, E+12
"""
# real number in decimal notation:
real_dn = r'-?(\d+\.\d*|\d*\.\d+)'
# regex for integer or real_sn or real_dn,
# with optional whitespace:
real = r'\s*(' + real_sn + '|' + real_dn + '|' + real_in + r')\s*'
# (always match the most complicated pattern first)
# (note: this one tests for int first and will, when used with
# findall, interpret 1.34 as 1 and .34)
s = 'some text, a=2.54E-05, inside a string' # test string
real_wrong = r'\s*('+real_in+'|'+real_dn+'|'+real_sn+r')\s*'
m = re.search(real_wrong, s)
print '\nerror: wrong number', m.group(0), \
"is extracted from '%s'" % s, '\nall groups:', m.groups()
real_wrong = r'\s*('+real_dn+'|'+real_sn+'|'+real_in+r')\s*'
m = re.search(real_wrong, s)
print 'error: wrong number', m.group(0),\
"is extracted from '%s'" % s, '\nall groups:', m.groups()
# shortened regex for real numbers:
real_short = r'-?(\d+(\.\d*)?|\d*\.\d+)([eE][+\-]?\d+)?'
m = re.search(real_short, s)
print "\ncorrect number", m.group(0),\
"is extracted from '%s'" % s, "\nall groups:", m.groups()
|
01680083a1dfe73faed550d6295e8f1ab56dc864
|
0ba2e5061577f6286ff9265ef1df9aca96769445
|
/math/prime_sieve/python/prime_sieve.py
|
ff8d0b31881e11e7d4ae87f48679eb26dec1f076
|
[
"CC0-1.0"
] |
permissive
|
ZoranPandovski/al-go-rithms
|
68d5d02f80a61de9baf8e50a81a52e7d0b3983a0
|
4ae6ba54e90af14af236e03e435eb0402dcac787
|
refs/heads/master
| 2023-09-04T16:04:04.321676
| 2023-06-06T15:22:16
| 2023-06-06T15:22:16
| 93,438,176
| 1,421
| 2,445
|
CC0-1.0
| 2023-06-15T14:24:28
| 2017-06-05T19:20:20
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 194
|
py
|
prime_sieve.py
|
from __future__ import print_function
limit = int(input("enter limit: "))
for prime in range(2, limit + 1):
if all(prime % i != 0 for i in range(2, prime)):
print(prime)
|
8a1640b7feba30b3ef221677a8d01abcc0319a47
|
cc8416a20b3aa9832dabf29112e52b5dfb367157
|
/stable_nalu/abstract/__init__.py
|
f8655f4f69fd08e98fc547b82a36043bafc46e5b
|
[
"MIT"
] |
permissive
|
AndreasMadsen/stable-nalu
|
ff877592ec965dca49a48bf94b38e343ba407411
|
b3296ace137ffa4854edeef3759f1578b7650210
|
refs/heads/master
| 2023-05-22T04:53:17.495712
| 2021-08-19T18:15:14
| 2021-08-19T18:23:45
| 177,330,156
| 149
| 19
|
MIT
| 2020-01-15T08:06:12
| 2019-03-23T19:13:34
|
HTML
|
UTF-8
|
Python
| false
| false
| 56
|
py
|
__init__.py
|
from ._extended_torch_module import ExtendedTorchModule
|
9349884f3c926d101cb48ed9ce06501a43b62438
|
7366f1c45b27eb5bb314fe65f53a474f22dcfb94
|
/haystack/panels.py
|
778954f18f62827cef9cc6174f347fcba38f2c3f
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
django-haystack/django-haystack
|
6cbb4e35ad13894bbf19c16dbf024bd818eda4f7
|
7fabd6215f9267e9911f0867a2d50537b6a1dbc5
|
refs/heads/master
| 2023-08-25T01:07:28.096704
| 2023-08-22T09:02:14
| 2023-08-22T09:02:14
| 117,549
| 2,249
| 934
|
NOASSERTION
| 2023-09-12T11:35:43
| 2009-01-29T20:23:57
|
Python
|
UTF-8
|
Python
| false
| false
| 2,823
|
py
|
panels.py
|
from debug_toolbar.panels import DebugPanel
from django.template.loader import render_to_string
from django.utils.translation import gettext_lazy as _
from haystack import connections
class HaystackDebugPanel(DebugPanel):
"""
Panel that displays information about the Haystack queries run while
processing the request.
"""
name = "Haystack"
has_content = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._offset = {
alias: len(connections[alias].queries)
for alias in connections.connections_info.keys()
}
self._search_time = 0
self._queries = []
self._backends = {}
def nav_title(self):
return _("Haystack")
def nav_subtitle(self):
self._queries = []
self._backends = {}
for alias in connections.connections_info.keys():
search_queries = connections[alias].queries[self._offset[alias] :]
self._backends[alias] = {
"time_spent": sum(float(q["time"]) for q in search_queries),
"queries": len(search_queries),
}
self._queries.extend([(alias, q) for q in search_queries])
self._queries.sort(key=lambda x: x[1]["start"])
self._search_time = sum([d["time_spent"] for d in self._backends.values()])
num_queries = len(self._queries)
return "%d %s in %.2fms" % (
num_queries,
(num_queries == 1) and "query" or "queries",
self._search_time,
)
def title(self):
return _("Search Queries")
def url(self):
return ""
def content(self):
width_ratio_tally = 0
for alias, query in self._queries:
query["alias"] = alias
query["query"] = query["query_string"]
if query.get("additional_kwargs"):
if query["additional_kwargs"].get("result_class"):
query["additional_kwargs"]["result_class"] = str(
query["additional_kwargs"]["result_class"]
)
try:
query["width_ratio"] = (float(query["time"]) / self._search_time) * 100
except ZeroDivisionError:
query["width_ratio"] = 0
query["start_offset"] = width_ratio_tally
width_ratio_tally += query["width_ratio"]
context = self.context.copy()
context.update(
{
"backends": sorted(
self._backends.items(), key=lambda x: -x[1]["time_spent"]
),
"queries": [q for a, q in self._queries],
"sql_time": self._search_time,
}
)
return render_to_string("panels/haystack.html", context)
|
12e41d5f3ab2d4d7339f8c1a84877f747934a5d2
|
69b82fb05d1507426f6101fda29e3c0137ed313b
|
/tests/countries/test_romania.py
|
1712e6e9c0edd2f1788460b6c3fdcb7c583085cf
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
dr-prodigy/python-holidays
|
e2313f71a08717decd70a71c1ac55beeb5605100
|
f8c90952bf409703d0af5d89a202e21a90e2317f
|
refs/heads/master
| 2023-08-24T21:25:55.852737
| 2023-08-21T18:48:00
| 2023-08-21T18:48:00
| 23,284,211
| 919
| 426
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,368
|
py
|
test_romania.py
|
# python-holidays
# ---------------
# A fast, efficient Python library for generating country, province and state
# specific sets of holidays on the fly. It aims to make determining whether a
# specific date is a holiday as fast and flexible as possible.
#
# Authors: dr-prodigy <dr.prodigy.github@gmail.com> (c) 2017-2023
# ryanss <ryanssdev@icloud.com> (c) 2014-2017
# Website: https://github.com/dr-prodigy/python-holidays
# License: MIT (see LICENSE file)
from holidays.countries.romania import Romania, RO, ROU
from tests.common import TestCase
class TestRomania(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass(Romania)
def test_country_aliases(self):
self.assertCountryAliases(Romania, RO, ROU)
def test_from_2024(self):
self.assertHoliday("2024-01-06", "2024-01-07")
self.assertNoHoliday("2023-01-06", "2023-01-07")
self.assertNoHolidayName("Bobotează", Romania(years=2023))
self.assertNoHolidayName("Sfântul Ion", Romania(years=2023))
def test_unification_day(self):
self.assertHoliday("2016-01-24")
self.assertNoHoliday("2015-01-24")
self.assertNoHolidayName("Ziua Unirii Principatelor Române", Romania(years=2015))
def test_easter(self):
self.assertHoliday(
"2017-04-16",
"2017-04-17",
"2018-04-06",
"2018-04-08",
"2018-04-09",
)
self.assertNoHoliday("2016-04-29", "2017-04-14")
def test_childrens_day(self):
self.assertHoliday("2017-06-01")
self.assertNoHoliday("2016-06-01")
self.assertNoHolidayName("Ziua Copilului", Romania(years=2016))
def test_assumption_day(self):
self.assertHoliday("2009-08-15")
self.assertNoHoliday("2008-08-15")
self.assertNoHolidayName("Adormirea Maicii Domnului", Romania(years=2008))
def test_saint_andrews_day(self):
self.assertHoliday("2012-11-30")
self.assertNoHoliday("2011-11-30")
self.assertNoHolidayName("Sfantul Apostol Andrei cel Intai chemat", Romania(years=2011))
def test_2020(self):
# https://publicholidays.ro/2020-dates/
self.assertHolidayDates(
Romania(years=2020),
"2020-01-01",
"2020-01-02",
"2020-01-24",
"2020-04-17",
"2020-04-19",
"2020-04-20",
"2020-05-01",
"2020-06-01",
"2020-06-07",
"2020-06-08",
"2020-08-15",
"2020-11-30",
"2020-12-01",
"2020-12-25",
"2020-12-26",
)
def test_2022(self):
# https://publicholidays.ro/2022-dates/
self.assertHolidayDates(
Romania(years=2022),
"2022-01-01",
"2022-01-02",
"2022-01-24",
"2022-04-22",
"2022-04-24",
"2022-04-25",
"2022-05-01",
"2022-06-01",
"2022-06-12",
"2022-06-13",
"2022-08-15",
"2022-11-30",
"2022-12-01",
"2022-12-25",
"2022-12-26",
)
def test_2023(self):
# https://publicholidays.ro/2023-dates/
self.assertHolidayDates(
Romania(years=2023),
"2023-01-01",
"2023-01-02",
"2023-01-24",
"2023-04-14",
"2023-04-16",
"2023-04-17",
"2023-05-01",
"2023-06-01",
"2023-06-04",
"2023-06-05",
"2023-08-15",
"2023-11-30",
"2023-12-01",
"2023-12-25",
"2023-12-26",
)
def test_l10n_default(self):
self.assertLocalizedHolidays(
("2018-01-01", "Anul Nou"),
("2018-01-02", "Anul Nou"),
("2018-01-24", "Ziua Unirii Principatelor Române"),
("2018-04-06", "Paștele"),
("2018-04-08", "Paștele"),
("2018-04-09", "Paștele"),
("2018-05-01", "Ziua Muncii"),
("2018-05-27", "Rusaliile"),
("2018-05-28", "Rusaliile"),
("2018-06-01", "Ziua Copilului"),
("2018-08-15", "Adormirea Maicii Domnului"),
("2018-11-30", "Sfantul Apostol Andrei cel Intai chemat"),
("2018-12-01", "Ziua Națională a României"),
("2018-12-25", "Crăciunul"),
("2018-12-26", "Crăciunul"),
)
def test_l10n_en_us(self):
self.assertLocalizedHolidays(
"en_US",
("2018-01-01", "New Year's Day"),
("2018-01-02", "New Year's Day"),
("2018-01-24", "Unification of the Romanian Principalities Day"),
("2018-04-06", "Easter"),
("2018-04-08", "Easter"),
("2018-04-09", "Easter"),
("2018-05-01", "Labour Day"),
("2018-05-27", "Pentecost"),
("2018-05-28", "Pentecost"),
("2018-06-01", "Children's Day"),
("2018-08-15", "Dormition of the Mother of God"),
("2018-11-30", "Saint Andrew's Day"),
("2018-12-01", "National Day"),
("2018-12-25", "Christmas Day"),
("2018-12-26", "Christmas Day"),
)
def test_l10n_uk(self):
self.assertLocalizedHolidays(
"uk",
("2018-01-01", "Новий рік"),
("2018-01-02", "Новий рік"),
("2018-01-24", "День обʼєднання Дунайських князівств"),
("2018-04-06", "Великдень"),
("2018-04-08", "Великдень"),
("2018-04-09", "Великдень"),
("2018-05-01", "День праці"),
("2018-05-27", "Трійця"),
("2018-05-28", "Трійця"),
("2018-06-01", "День захисту дітей"),
("2018-08-15", "Успіння Пресвятої Богородиці"),
("2018-11-30", "День святого Андрія Первозваного"),
("2018-12-01", "Національний день Румунії"),
("2018-12-25", "Різдво Христове"),
("2018-12-26", "Різдво Христове"),
)
|
518582469cca5ce525b14b3e5ad7ae655355e20f
|
dfd9b83c93395e75d92be8d9ff5175174dafdd82
|
/Server/integrations/deduce/PasswordlessAuthenticationWithDeduceImpossTravel.py
|
3ed30499264e027f1d4647c4ffeb6ac19124d388
|
[
"MIT"
] |
permissive
|
GluuFederation/oxAuth
|
1ebc5775da8762db1e59a7cbb0769b6fc3cf21fc
|
0933f5ef56fc78d65cbec6e4a4310ffa3d175cec
|
refs/heads/master
| 2023-08-23T16:09:14.123800
| 2023-08-01T07:39:01
| 2023-08-01T07:39:01
| 18,150,497
| 425
| 187
|
MIT
| 2023-09-07T11:04:16
| 2014-03-26T19:14:35
|
Java
|
UTF-8
|
Python
| false
| false
| 24,980
|
py
|
PasswordlessAuthenticationWithDeduceImpossTravel.py
|
# Author: Jose Gonzalez
# Author: Madhumita Subramaniam
from java.lang import System
from java.net import URLDecoder, URLEncoder
from java.util import Arrays, ArrayList, Collections, HashMap
from org.gluu.oxauth.service import ClientService
from org.gluu.service.net import NetworkService
from org.gluu.oxauth.service.net import HttpService
from javax.faces.application import FacesMessage
from javax.servlet.http import Cookie
from javax.faces.context import FacesContext
from org.oxauth.persistence.model.configuration import GluuConfiguration
from org.gluu.oxauth.security import Identity
from org.gluu.oxauth.util import ServerUtil
from org.gluu.service import CacheService
from org.gluu.oxauth.service import AuthenticationService, UserService
from org.gluu.oxauth.service.custom import CustomScriptService
from org.gluu.model.custom.script import CustomScriptType
from org.gluu.model.custom.script.type.auth import PersonAuthenticationType
from org.gluu.model import SimpleCustomProperty
from org.gluu.persist import PersistenceEntryManager
from org.gluu.service.cdi.util import CdiUtil
from org.gluu.util import StringHelper
from java.time import LocalDateTime, Duration
from org.gluu.jsf2.message import FacesMessages
try:
import json
except ImportError:
import simplejson as json
import sys
class PersonAuthentication(PersonAuthenticationType):
def __init__(self, currentTimeMillis):
self.currentTimeMillis = currentTimeMillis
self.ACR_SG = "super_gluu"
self.PREV_LOGIN_SETTING = "prevLoginsCookieSettings"
self.modulePrefix = "pwdless-external_"
# expiration time in seconds - adds up to 1 year
self.lockExpirationTime = 31536000
def init(self, customScript, configurationAttributes):
print "Deduce Passwordless. init called"
if not configurationAttributes.containsKey("DEDUCE_ENDPOINT"):
print "Deduce Passwordless. Initialization. Property DEDUCE_ENDPOINT is mandatory"
return False
self.DEDUCE_ENDPOINT = configurationAttributes.get("DEDUCE_ENDPOINT").getValue2()
if not configurationAttributes.containsKey("DEDUCE_SITE"):
print "Deduce Passwordless. Initialization. Property DEDUCE_SITE is mandatory"
return False
self.DEDUCE_SITE = configurationAttributes.get("DEDUCE_SITE").getValue2()
if not configurationAttributes.containsKey("DEDUCE_API_KEY"):
print "Deduce Passwordless. Initialization. Property DEDUCE_API_KEY is mandatory"
return False
self.DEDUCE_API_KEY = configurationAttributes.get("DEDUCE_API_KEY").getValue2()
self.authenticators = {}
self.uid_attr = self.getLocalPrimaryKey()
self.prevLoginsSettings = self.computePrevLoginsSettings(configurationAttributes.get(self.PREV_LOGIN_SETTING))
custScriptService = CdiUtil.bean(CustomScriptService)
self.scriptsList = custScriptService.findCustomScripts(Collections.singletonList(CustomScriptType.PERSON_AUTHENTICATION), "oxConfigurationProperty", "displayName", "oxEnabled")
dynamicMethods = self.computeMethods(configurationAttributes.get("snd_step_methods"), self.scriptsList)
if len(dynamicMethods) > 0:
print "Deduce Passwordless. init. Loading scripts for dynamic modules: %s" % dynamicMethods
for acr in dynamicMethods:
moduleName = self.modulePrefix + acr
try:
external = __import__(moduleName, globals(), locals(), ["PersonAuthentication"], -1)
module = external.PersonAuthentication(self.currentTimeMillis)
print "Deduce Passwordless. init. Got dynamic module for acr %s" % acr
configAttrs = self.getConfigurationAttributes(acr, self.scriptsList)
if acr == self.ACR_SG:
application_id = configurationAttributes.get("supergluu_app_id").getValue2()
configAttrs.put("application_id", SimpleCustomProperty("application_id", application_id))
if module.init(None, configAttrs):
module.configAttrs = configAttrs
self.authenticators[acr] = module
else:
print "Deduce Passwordless. init. Call to init in module '%s' returned False" % moduleName
except:
print "Deduce Passwordless. init. Failed to load module %s" % moduleName
print "Exception: ", sys.exc_info()[1]
else:
print "Deduce Passwordless. init. Not enough custom scripts enabled. Check config property 'snd_step_methods'"
return False
print "Deduce Passwordless. init. Initialized successfully"
return True
def destroy(self, configurationAttributes):
return True
def getApiVersion(self):
return 11
def getAuthenticationMethodClaims(self, configurationAttributes):
return None
def isValidAuthenticationMethod(self, usageType, configurationAttributes):
return True
def getAlternativeAuthenticationMethod(self, usageType, configurationAttributes):
return None
def authenticate(self, configurationAttributes, requestParameters, step):
print "Deduce Passwordless. authenticate for step %d" % step
userService = CdiUtil.bean(UserService)
authenticationService = CdiUtil.bean(AuthenticationService)
identity = CdiUtil.bean(Identity)
if step == 1:
user_name = identity.getCredentials().getUsername()
if StringHelper.isNotEmptyString(user_name):
foundUser = userService.getUserByAttribute(self.uid_attr, user_name)
if foundUser == None:
print "Deduce Passwordless. Unknown username '%s'" % user_name
elif authenticationService.authenticate(user_name):
availMethods = self.getAvailMethodsUser(foundUser)
# deduce - impossible travel feature
identity = CdiUtil.bean(Identity)
#session_attributes = identity.getSessionId().getSessionAttributes()
ip = str( ServerUtil.getFirstValue(requestParameters, "loginForm:clientIP")).encode('utf-8')
email = str(foundUser.getAttribute("mail")).encode('utf-8')
print "platform %s" %str( ServerUtil.getFirstValue(requestParameters, "loginForm:platform"))
platform = json.loads(str( ServerUtil.getFirstValue(requestParameters, "loginForm:platform")).encode('utf-8'))
action = "auth.success"
impossibleTravel = self.sendInsightsRequest(ip, email, platform["name"], action)
if (impossibleTravel is True):
self.lockUser(user_name)
else:
if availMethods.size() > 0:
acr = availMethods.get(0)
print "Deduce Passwordless. Method to try in 2nd step will be: %s" % acr
module = self.authenticators[acr]
logged_in = module.authenticate(module.configAttrs, requestParameters, step)
if logged_in:
identity.setWorkingParameter("ACR", acr)
print "Deduce Passwordless. Authentication passed for step %d" % step
return True
else:
self.setError("Cannot proceed. You don't have suitable credentials for passwordless login")
else:
self.setError("Wrong username or password")
else:
print "authenticate step 2"
user = authenticationService.getAuthenticatedUser()
if user == None:
print "Deduce Passwordless. authenticate for step 2. Cannot retrieve logged user"
return False
#see alternative.xhtml
identity = CdiUtil.bean(Identity)
session_attributes = identity.getSessionId().getSessionAttributes()
alter = session_attributes.get("alternativeMethod")
if alter != None:
#bypass the rest of this step if an alternative method was provided. Current step will be retried (see getNextStep)
self.simulateFirstStep(requestParameters, alter)
return True
session_attributes = identity.getSessionId().getSessionAttributes()
acr = session_attributes.get("ACR")
#this working parameter is used in alternative.xhtml
identity.setWorkingParameter("methods", self.getAvailMethodsUser(user, acr))
success = False
if acr in self.authenticators:
module = self.authenticators[acr]
success = module.authenticate(module.configAttrs, requestParameters, step)
if success:
print "Deduce Passwordless. authenticate. 2FA authentication was successful"
if self.prevLoginsSettings != None:
self.persistCookie(user)
else:
print "Deduce Passwordless. authenticate. 2FA authentication failed"
return success
return False
def prepareForStep(self, configurationAttributes, requestParameters, step):
print "Deduce Passwordless. prepareForStep %d" % step
identity = CdiUtil.bean(Identity)
session_attributes = identity.getSessionId().getSessionAttributes()
if step == 1:
try:
loginHint = session_attributes.get("login_hint")
print "Deduce Passwordless. prepareForStep. Login hint is %s" % loginHint
isLoginHint = loginHint != None
if self.prevLoginsSettings == None:
if isLoginHint:
identity.setWorkingParameter("loginHint", loginHint)
else:
users = self.getCookieValue()
if isLoginHint:
idx = self.findUid(loginHint, users)
if idx >= 0:
u = users.pop(idx)
users.insert(0, u)
else:
identity.setWorkingParameter("loginHint", loginHint)
if len(users) > 0:
identity.setWorkingParameter("users", json.dumps(users, separators=(',',':')))
# In login.xhtml both loginHint and users are used to properly display the login form
except:
print "Deduce Passwordless. prepareForStep. Error!", sys.exc_info()[1]
return True
else:
user = CdiUtil.bean(AuthenticationService).getAuthenticatedUser()
if user == None:
print "Deduce Passwordless. prepareForStep. Cannot retrieve logged user"
return False
acr = session_attributes.get("ACR")
print "Deduce Passwordless. prepareForStep. ACR = %s" % acr
identity.setWorkingParameter("methods", ArrayList(self.getAvailMethodsUser(user, acr)))
if acr in self.authenticators:
module = self.authenticators[acr]
return module.prepareForStep(module.configAttrs, requestParameters, step)
else:
return False
def getExtraParametersForStep(self, configurationAttributes, step):
print "Deduce Passwordless. getExtraParametersForStep %d" % step
list = ArrayList()
if step > 1:
acr = CdiUtil.bean(Identity).getWorkingParameter("ACR")
if acr in self.authenticators:
module = self.authenticators[acr]
params = module.getExtraParametersForStep(module.configAttrs, step)
if params != None:
list.addAll(params)
list.addAll(Arrays.asList("ACR", "methods"))
print "extras are %s" % list
return list
def getCountAuthenticationSteps(self, configurationAttributes):
return 2
def getPageForStep(self, configurationAttributes, step):
if step > 1:
identity = CdiUtil.bean(Identity)
acr = CdiUtil.bean(Identity).getWorkingParameter("ACR")
if acr in self.authenticators:
module = self.authenticators[acr]
page = module.getPageForStep(module.configAttrs, step)
print "Deduce Passwordless. getPageForStep %d is %s" % (step, page)
return page
return "/auth/deduce/loginD.xhtml"
def getNextStep(self, configurationAttributes, requestParameters, step):
print "Deduce Passwordless. getNextStep called %d" % step
identity = CdiUtil.bean(Identity)
if step > 1:
session_attributes = identity.getSessionId().getSessionAttributes()
acr = session_attributes.get("alternativeMethod")
if acr != None:
print "Deduce Passwordless. getNextStep. Use alternative method %s" % acr
CdiUtil.bean(Identity).setWorkingParameter("ACR", acr)
#retry step with different acr
return 2
return -1
def logout(self, configurationAttributes, requestParameters):
return True
# Miscelaneous
def getLocalPrimaryKey(self):
entryManager = CdiUtil.bean(PersistenceEntryManager)
config = GluuConfiguration()
config = entryManager.find(config.getClass(), "ou=configuration,o=gluu")
#Pick (one) attribute where user id is stored (e.g. uid/mail)
uid_attr = config.getOxIDPAuthentication().get(0).getConfig().getPrimaryKey()
print "Deduce Passwordless. init. uid attribute is '%s'" % uid_attr
return uid_attr
def setError(self, msg):
facesMessages = CdiUtil.bean(FacesMessages)
facesMessages.setKeepMessages()
facesMessages.clear()
facesMessages.add(FacesMessage.SEVERITY_ERROR, msg)
def computeMethods(self, sndStepMethods, scriptsList):
snd_step_methods = [] if sndStepMethods == None else StringHelper.split(sndStepMethods.getValue2(), ",")
methods = []
for m in snd_step_methods:
for customScript in scriptsList:
if customScript.getName() == m and customScript.isEnabled():
methods.append(m)
print "Deduce Passwordless. computeMethods. %s" % methods
return methods
def getConfigurationAttributes(self, acr, scriptsList):
configMap = HashMap()
for customScript in scriptsList:
if customScript.getName() == acr:
for prop in customScript.getConfigurationProperties():
configMap.put(prop.getValue1(), SimpleCustomProperty(prop.getValue1(), prop.getValue2()))
print "Deduce Passwordless. getConfigurationAttributes. %d configuration properties were found for %s" % (configMap.size(), acr)
return configMap
def getAvailMethodsUser(self, user, skip=None):
methods = ArrayList()
for method in self.authenticators:
try:
module = self.authenticators[method]
if module.hasEnrollments(module.configAttrs, user) and (skip == None or skip != method):
methods.add(method)
except:
print "Deduce Passwordless. getAvailMethodsUser. hasEnrollments call could not be issued for %s module" % method
print "Exception: ", sys.exc_info()[1]
print "Deduce Passwordless. getAvailMethodsUser %s" % methods.toString()
return methods
def simulateFirstStep(self, requestParameters, acr):
#To simulate 1st step, there is no need to call:
# getPageforstep (no need as user/pwd won't be shown again)
# isValidAuthenticationMethod (by restriction, it returns True)
# prepareForStep (by restriction, it returns True)
# getExtraParametersForStep (by restriction, it returns None)
print "Deduce Passwordless. simulateFirstStep. Calling authenticate (step 1) for %s module" % acr
if acr in self.authenticators:
module = self.authenticators[acr]
auth = module.authenticate(module.configAttrs, requestParameters, 1)
print "Deduce Passwordless. simulateFirstStep. returned value was %s" % auth
def computePrevLoginsSettings(self, customProperty):
settings = None
if customProperty == None:
print "Deduce Passwordless. Previous logins feature is not configured. Set config property '%s' if desired" % self.PREV_LOGIN_SETTING
else:
try:
settings = json.loads(customProperty.getValue2())
if settings['enabled']:
print "Deduce Passwordless. PrevLoginsSettings are %s" % settings
else:
settings = None
print "Deduce Passwordless. Previous logins feature is disabled"
except:
print "Deduce Passwordless. Unparsable config property '%s'" % self.PREV_LOGIN_SETTING
return settings
def getCookieValue(self):
ulist = []
coo = None
httpRequest = ServerUtil.getRequestOrNull()
if httpRequest != None:
for cookie in httpRequest.getCookies():
if cookie.getName() == self.prevLoginsSettings['cookieName']:
coo = cookie
if coo == None:
print "Deduce Passwordless. getCookie. No cookie found"
else:
print "Deduce Passwordless. getCookie. Found cookie"
forgetMs = self.prevLoginsSettings['forgetEntriesAfterMinutes'] * 60 * 1000
try:
now = System.currentTimeMillis()
value = URLDecoder.decode(coo.getValue(), "utf-8")
# value is an array of objects with properties: uid, displayName, lastLogon
value = json.loads(value)
for v in value:
if now - v['lastLogon'] < forgetMs:
ulist.append(v)
# print "==========", ulist
except:
print "Deduce Passwordless. getCookie. Unparsable value, dropping cookie..."
return ulist
def findUid(self, uid, users):
i = 0
idx = -1
for user in users:
if user['uid'] == uid:
idx = i
break
i+=1
return idx
def persistCookie(self, user):
try:
now = System.currentTimeMillis()
uid = user.getUserId()
dname = user.getAttribute("displayName")
users = self.getCookieValue()
idx = self.findUid(uid, users)
if idx >= 0:
u = users.pop(idx)
else:
u = { 'uid': uid, 'displayName': '' if dname == None else dname }
u['lastLogon'] = now
# The most recent goes first :)
users.insert(0, u)
excess = len(users) - self.prevLoginsSettings['maxListSize']
if excess > 0:
print "Deduce Passwordless. persistCookie. Shortening list..."
users = users[:self.prevLoginsSettings['maxListSize']]
value = json.dumps(users, separators=(',',':'))
value = URLEncoder.encode(value, "utf-8")
coo = Cookie(self.prevLoginsSettings['cookieName'], value)
coo.setSecure(True)
coo.setHttpOnly(True)
# One week
coo.setMaxAge(7 * 24 * 60 * 60)
response = self.getHttpResponse()
if response != None:
print "Deduce Passwordless. persistCookie. Adding cookie to response"
response.addCookie(coo)
except:
print "Deduce Passwordless. persistCookie. Exception: ", sys.exc_info()[1]
def getHttpResponse(self):
try:
return FacesContext.getCurrentInstance().getExternalContext().getResponse()
except:
print "Deduce Passwordless. Error accessing HTTP response object: ", sys.exc_info()[1]
return None
def getSessionAttribute(self, attribute_name):
identity = CdiUtil.bean(Identity)
# Try to get attribute value from Seam event context
if identity.isSetWorkingParameter(attribute_name):
return identity.getWorkingParameter(attribute_name)
# Try to get attribute from persistent session
session_id = identity.getSessionId()
if session_id == None:
return None
session_attributes = session_id.getSessionAttributes()
if session_attributes == None:
return None
if session_attributes.containsKey(attribute_name):
return session_attributes.get(attribute_name)
return None
def sendInsightsRequest(self, ip, email, platform, action):
httpService = CdiUtil.bean(HttpService)
http_client = httpService.getHttpsClient()
http_client_params = http_client.getParams()
data = { "site":self.DEDUCE_SITE,"apikey":self.DEDUCE_API_KEY,"ip":ip, "email":email,"user_agent":platform, "action": action }
payload = json.dumps(data)
headers = { "Accept" : "application/json" }
print "Deduce Passwordless. payload %s " % payload
try:
http_service_response = httpService.executePost(http_client, self.DEDUCE_ENDPOINT, None, headers, payload)
http_response = http_service_response.getHttpResponse()
print "http_response %s" % http_response
except:
print "Deduce Passwordless. Exception: ", sys.exc_info()[1]
return False
try:
if not httpService.isResponseStastusCodeOk(http_response):
print "Deduce Passwordless. Got invalid response from validation server: ", str(http_response.getStatusLine().getStatusCode())
httpService.consume(http_response)
return False
response_bytes = httpService.getResponseContent(http_response)
response_string = httpService.convertEntityToString(response_bytes)
print "response_string %s" %response_string
httpService.consume(http_response)
if ("IMPOSSIBLE_TRAVEL" in response_string):
return True
finally:
http_service_response.closeConnection()
if response_string is None:
print "Deduce Passwordless. Got empty response from validation server"
return False
return False
def lockUser(self, user_name):
if StringHelper.isEmpty(user_name):
return None
userService = CdiUtil.bean(UserService)
cacheService= CdiUtil.bean(CacheService)
find_user_by_uid = userService.getUser(user_name)
if (find_user_by_uid == None):
return None
status_attribute_value = userService.getCustomAttribute(find_user_by_uid, "gluuStatus")
if status_attribute_value != None:
user_status = status_attribute_value.getValue()
if StringHelper.equals(user_status, "inactive"):
print "Deduce Passwordless. (lock account). Lock user. User '%s' locked already" % user_name
return
userService.setCustomAttribute(find_user_by_uid, "gluuStatus", "inactive")
updated_user = userService.updateUser(find_user_by_uid)
object_to_store = json.dumps({'locked': True, 'created': LocalDateTime.now().toString()}, separators=(',',':'))
cacheService.put(StringHelper.toString(self.lockExpirationTime), "lock_user_"+user_name, object_to_store);
self.setError( "Impossible travel detected. Your account has been locked. Please contact the administrator")
print "Deduce Passwordless. (lock account). Lock user. User '%s' locked" % user_name
|
8070fb642d8a66f7a8492ca4f48cbbcf0c2d99a9
|
e2f9f79e4fcbb398be6dab049197ad1fb71cb9cf
|
/python-threatexchange/threatexchange/cli/label_cmd.py
|
7efb1301e3b75aee0b1ef1af1653308602a322eb
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
facebook/ThreatExchange
|
4fbaca7fb4713ca84a64c11379ce44f6bde90f63
|
45dff06ba21f1afdb43a434c9ba0fc49e07b48d8
|
refs/heads/main
| 2023-09-05T23:08:36.580229
| 2023-09-01T10:21:37
| 2023-09-01T10:21:37
| 30,729,678
| 1,146
| 341
|
NOASSERTION
| 2023-09-14T18:08:08
| 2015-02-12T22:53:08
|
C++
|
UTF-8
|
Python
| false
| false
| 5,094
|
py
|
label_cmd.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
from argparse import ArgumentParser, ArgumentTypeError
import argparse
import pathlib
import typing as t
from threatexchange.cli.helpers import FlexFilesInputAction
from threatexchange.exchanges.fetch_state import SignalOpinion, SignalOpinionCategory
from threatexchange.signal_type.signal_base import MatchesStr, SignalType, TextHasher
from threatexchange import common
from threatexchange.cli.cli_config import CLISettings
from threatexchange.content_type.content_base import ContentType
from threatexchange.exchanges.collab_config import CollaborationConfigBase
from threatexchange.cli import command_base
class LabelCommand(command_base.Command):
"""
[WIP] Label signals and content for sharing.
Warning! This command is still under construction, and is not yet stable.
Please open an issue at https://github.com/facebook/ThreatExchange/issues
if you need the development team to prioritize stabilitizing it.
There are three main types of labeling:
1. Seen
Marking that you've observed a match, which can help others prioritize
review, or track cross-platform spread.
2. True Positive / False Positive
After you've confirmed the results of a match, contributing what that
result is can help others priotize signals with more precision.
3. Upload
If you have your own curated signals, sharing them with others can
help them find matches, and give them opportunities to label your
signals.
Examples:
```
$ threatexchange label "Sample Collab" text -l example,foo -- Some text I'm labeling
```
"""
@classmethod
def init_argparse(cls, settings: CLISettings, ap: ArgumentParser) -> None:
ap.add_argument(
"--labels",
"-l",
type=lambda s: set(s.strip().split(",")),
metavar="CSV",
default=set(),
help="labels to apply to item",
)
signal_group = ap.add_mutually_exclusive_group()
signal_group.add_argument(
"--only-signals",
"-S",
nargs="+",
type=common.argparse_choices_pre_type(
[s.get_name() for s in settings.get_all_signal_types()],
settings.get_signal_type,
),
default=[],
help="limit to this signal type",
)
signal_group.add_argument(
"--as-hash",
"-H",
metavar="SIGNAL_TYPE",
type=common.argparse_choices_pre_type(
[s.get_name() for s in settings.get_all_signal_types()],
settings.get_signal_type,
),
help="interpret input as a hash of this type",
)
ap.add_argument(
"collab",
type=lambda n: _collab_type(n, settings),
help="The name of the collaboration",
)
ap.add_argument(
"content_type",
type=common.argparse_choices_pre_type(
[c.get_name() for c in settings.get_all_content_types()],
settings.get_content_type,
),
help="the type of what you are labeling",
)
ap.add_argument(
"files",
nargs=argparse.REMAINDER,
action=FlexFilesInputAction,
help="list of files or -- to interpret remainder as a string",
)
def __init__(
self,
content_type: t.Type[ContentType],
files: t.List[pathlib.Path],
as_hash: t.Optional[t.Type[SignalType]],
collab: CollaborationConfigBase,
only_signals: t.List[t.Type[SignalType]],
labels: t.Set[str],
) -> None:
self.collab = collab
self.content_type = content_type
self.files = files
self.labels = labels
self.only_signals = only_signals
self.as_hash = as_hash
if self.collab is None:
raise ArgumentTypeError("No such collaboration!")
def execute(self, settings: CLISettings) -> None:
self.stderr("This command is not implemented yet, and most actions won't work")
api = settings.apis.get_instance_for_collab(self.collab)
# signal_types = self.only_signals or settings.get_signal_types_for_content(
# self.content_type
# )
if self.as_hash is not None:
for f in self.files:
signal_type = self.as_hash
hash_val = signal_type.validate_signal_str(f.read_text())
api.report_opinion(
signal_type,
hash_val,
SignalOpinion(
True, SignalOpinionCategory.POSITIVE_CLASS, self.labels
),
)
return
raise NotImplementedError
def _collab_type(name: str, settings: CLISettings) -> CollaborationConfigBase:
ret = settings.get_collab(name)
if ret is None:
raise ArgumentTypeError(f"No such collab '{name}'!")
return ret
|
f9dfc4a97b3f7549450b6077ad4c82829eea466c
|
abe6c00f9790df7e6ef20dc02d0b1b225b5020cb
|
/tests/server/models/test_block_schemas.py
|
db6cb32d5bf76b03b33c7c9d2962d5b0215a88ca
|
[
"Apache-2.0"
] |
permissive
|
PrefectHQ/prefect
|
000e6c5f7df80f76a181f0a30f8661c96417c8bd
|
2c50d2b64c811c364cbc5faa2b5c80a742572090
|
refs/heads/main
| 2023-09-05T20:25:42.965208
| 2023-09-05T18:58:06
| 2023-09-05T18:58:06
| 139,199,684
| 12,917
| 1,539
|
Apache-2.0
| 2023-09-14T20:25:45
| 2018-06-29T21:59:26
|
Python
|
UTF-8
|
Python
| false
| false
| 35,476
|
py
|
test_block_schemas.py
|
import warnings
from typing import List, Union
import pytest
import sqlalchemy as sa
from pydantic import BaseModel
from prefect.blocks.core import Block
from prefect.server import models, schemas
from prefect.server.models.block_schemas import read_block_schema_by_checksum
from prefect.server.schemas.filters import BlockSchemaFilter
from prefect.utilities.collections import AutoEnum
EMPTY_OBJECT_CHECKSUM = Block._calculate_schema_checksum({})
class TestCreateBlockSchema:
async def test_create_block_schema(self, session, block_type_x):
block_schema = await models.block_schemas.create_block_schema(
session=session,
block_schema=schemas.actions.BlockSchemaCreate(
fields={
"title": "x",
"type": "object",
"properties": {
"access_key_id": {"title": "Access Key Id", "type": "string"},
"secret_access_key": {
"title": "Secret Access Key",
"type": "string",
},
"session_token": {"title": "Session Token", "type": "string"},
},
"block_type_slug": "x",
"block_schema_references": {},
},
block_type_id=block_type_x.id,
capabilities=["this block can test"],
),
)
assert block_schema.fields == {
"title": "x",
"type": "object",
"properties": {
"access_key_id": {"title": "Access Key Id", "type": "string"},
"secret_access_key": {"title": "Secret Access Key", "type": "string"},
"session_token": {"title": "Session Token", "type": "string"},
},
"block_type_slug": "x",
"block_schema_references": {},
}
assert (
block_schema.checksum
== "sha256:4448d5cf2ddb989f7fde8b2c36ec89527ca30e0e8ef041eed8bd15c11fe6cfee"
)
assert block_schema.block_type_id == block_type_x.id
db_block_schema = await models.block_schemas.read_block_schema(
session=session, block_schema_id=block_schema.id
)
assert db_block_schema.checksum == block_schema.checksum
assert db_block_schema.fields == block_schema.fields
assert db_block_schema.block_type_id == block_schema.block_type_id
assert db_block_schema.capabilities == ["this block can test"]
async def test_create_nested_block_schema(self, session, block_type_x):
class Y(Block):
a: str
b: str
class X(Block):
_block_type_id = block_type_x.id
_block_type_name = block_type_x.name
y: Y
z: str
await models.block_types.create_block_type(
session=session, block_type=Y._to_block_type()
)
await models.block_schemas.create_block_schema(
session=session, block_schema=X._to_block_schema()
)
nested_block_schema = await read_block_schema_by_checksum(
session=session,
checksum=Y._calculate_schema_checksum(),
)
assert nested_block_schema is not None
assert nested_block_schema.fields == {
"block_schema_references": {},
"block_type_slug": "y",
"properties": {
"a": {"title": "A", "type": "string"},
"b": {"title": "B", "type": "string"},
},
"required": ["a", "b"],
"title": "Y",
"type": "object",
"secret_fields": [],
}
assert nested_block_schema.fields == Y.schema()
async def test_create_multiply_nested_block_schema(self, session, block_type_x):
class A(Block):
d: str
e: str
class Z(Block):
a: A
c: int
class Y(Block):
b: str
c: int
class X(Block):
_block_type_id = block_type_x.id
_block_type_name = block_type_x.name
y: Y
z: Z
await models.block_types.create_block_type(
session=session, block_type=A._to_block_type()
)
await models.block_types.create_block_type(
session=session, block_type=Z._to_block_type()
)
await models.block_types.create_block_type(
session=session, block_type=Y._to_block_type()
)
await models.block_schemas.create_block_schema(
session=session, block_schema=X._to_block_schema()
)
block_schemas = await models.block_schemas.read_block_schemas(session=session)
assert len(block_schemas) == 4
nested_block_schema = await read_block_schema_by_checksum(
session=session,
checksum=A._calculate_schema_checksum(),
)
assert nested_block_schema is not None
assert nested_block_schema.fields == {
"block_schema_references": {},
"block_type_slug": "a",
"properties": {
"d": {"title": "D", "type": "string"},
"e": {"title": "E", "type": "string"},
},
"required": ["d", "e"],
"title": "A",
"type": "object",
"secret_fields": [],
}
assert nested_block_schema.fields == A.schema()
async def test_create_nested_block_schema_with_multiply_used_blocks(self, session):
warnings.filterwarnings("ignore", category=UserWarning)
class A(Block):
d: str
e: str
class Z(Block):
a: A
c: int
class Y(Block):
a: A
b: str
c: int
class X(Block):
y: Y
z: Z
await models.block_types.create_block_type(
session=session, block_type=A._to_block_type()
)
await models.block_types.create_block_type(
session=session, block_type=Z._to_block_type()
)
await models.block_types.create_block_type(
session=session, block_type=Y._to_block_type()
)
block_type_x = await models.block_types.create_block_type(
session=session, block_type=X._to_block_type()
)
await models.block_schemas.create_block_schema(
session=session,
block_schema=X._to_block_schema(block_type_id=block_type_x.id),
)
block_schemas = await models.block_schemas.read_block_schemas(session=session)
assert len(block_schemas) == 4
nested_block_schema_a = await read_block_schema_by_checksum(
session=session,
checksum=A._calculate_schema_checksum(),
)
assert nested_block_schema_a is not None
assert nested_block_schema_a.fields == {
"block_schema_references": {},
"block_type_slug": "a",
"properties": {
"d": {"title": "D", "type": "string"},
"e": {"title": "E", "type": "string"},
},
"required": ["d", "e"],
"title": "A",
"type": "object",
"secret_fields": [],
}
assert nested_block_schema_a.fields == A.schema()
nested_block_schema_z = (
await models.block_schemas.read_block_schema_by_checksum(
session=session, checksum=Z._calculate_schema_checksum()
)
)
assert nested_block_schema_z is not None
assert nested_block_schema_z.fields == Z.schema()
assert (
Z.schema()["block_schema_references"]["a"]["block_schema_checksum"]
== A._calculate_schema_checksum()
)
nested_block_schema_y = (
await models.block_schemas.read_block_schema_by_checksum(
session=session, checksum=Y._calculate_schema_checksum()
)
)
assert nested_block_schema_y is not None
assert nested_block_schema_y.fields == Y.schema()
assert (
Y.schema()["block_schema_references"]["a"]["block_schema_checksum"]
== A._calculate_schema_checksum()
)
async def test_create_block_schema_with_union(
self, session, block_type_x, block_type_y, block_type_z
):
class Z(Block):
_block_type_id = block_type_z.id
_block_type_name = block_type_z.name
b: str
class Y(Block):
_block_type_id = block_type_y.id
_block_type_name = block_type_y.name
a: str
class X(Block):
_block_type_id = block_type_x.id
_block_type_name = block_type_x.name
y_or_z: Union[Y, Z]
block_schema = await models.block_schemas.create_block_schema(
session=session,
block_schema=X._to_block_schema(),
)
assert block_schema.checksum == X._calculate_schema_checksum()
assert block_schema.fields == X.schema()
async def test_create_block_schema_is_idempotent(self, session, block_type_x):
first_create_response = await models.block_schemas.create_block_schema(
session=session,
block_schema=schemas.actions.BlockSchemaCreate(
fields={},
block_type_id=block_type_x.id,
),
)
# Should not raise
second_create_response = await models.block_schemas.create_block_schema(
session=session,
block_schema=schemas.actions.BlockSchemaCreate(
fields={},
block_type_id=block_type_x.id,
),
)
# Should not raise
third_create_response = await models.block_schemas.create_block_schema(
session=session,
block_schema=schemas.actions.BlockSchemaCreate(
fields={},
block_type_id=block_type_x.id,
),
)
assert (
first_create_response.id
== second_create_response.id
== third_create_response.id
)
async def test_create_block_schema_is_idempotent_for_nested_blocks(self, session):
class Child(Block):
age: int
class Parent(Block):
child: Child
parent_block_type = await models.block_types.create_block_type(
session=session, block_type=Parent._to_block_type()
)
child_block_type = await models.block_types.create_block_type(
session=session, block_type=Child._to_block_type()
)
parent_create_response = await models.block_schemas.create_block_schema(
session=session,
block_schema=Parent._to_block_schema(block_type_id=parent_block_type.id),
)
# Should not raise
child_create_response = await models.block_schemas.create_block_schema(
session=session,
block_schema=Child._to_block_schema(block_type_id=child_block_type.id),
)
assert (
parent_create_response.fields["block_schema_references"]["child"][
"block_schema_checksum"
]
== child_create_response.checksum
)
class TestReadBlockSchemas:
@pytest.fixture
async def block_schemas_with_capabilities(self, session):
class CanRun(Block):
_block_schema_capabilities = ["run"]
def run(self):
pass
class CanFly(Block):
_block_schema_capabilities = ["fly"]
def fly(self):
pass
class CanSwim(Block):
_block_schema_capabilities = ["swim"]
def swim(self):
pass
class Duck(CanSwim, CanFly, Block):
a: str
class Bird(CanFly, Block):
b: str
class Cat(CanRun, Block):
c: str
block_type_duck = await models.block_types.create_block_type(
session=session, block_type=Duck._to_block_type()
)
block_schema_duck = await models.block_schemas.create_block_schema(
session=session,
block_schema=Duck._to_block_schema(block_type_id=block_type_duck.id),
)
block_type_bird = await models.block_types.create_block_type(
session=session, block_type=Bird._to_block_type()
)
block_schema_bird = await models.block_schemas.create_block_schema(
session=session,
block_schema=Bird._to_block_schema(block_type_id=block_type_bird.id),
)
block_type_cat = await models.block_types.create_block_type(
session=session, block_type=Cat._to_block_type()
)
block_schema_cat = await models.block_schemas.create_block_schema(
session=session,
block_schema=Cat._to_block_schema(block_type_id=block_type_cat.id),
)
await session.commit()
return block_schema_duck, block_schema_bird, block_schema_cat
async def test_read_block_schema(self, session, nested_block_schema):
db_block_schema = await models.block_schemas.read_block_schema(
session=session, block_schema_id=nested_block_schema.id
)
assert db_block_schema.id == nested_block_schema.id
assert db_block_schema.checksum == nested_block_schema.checksum
assert db_block_schema.fields == nested_block_schema.fields
assert db_block_schema.block_type_id == nested_block_schema.block_type_id
async def test_read_block_schema_by_checksum(self, session, block_type_x):
block_schema = await models.block_schemas.create_block_schema(
session=session,
block_schema=schemas.actions.BlockSchemaCreate(
fields={},
block_type_id=block_type_x.id,
),
)
db_block_schema = await models.block_schemas.read_block_schema_by_checksum(
session=session, checksum=EMPTY_OBJECT_CHECKSUM
)
assert db_block_schema.id == block_schema.id
assert db_block_schema.checksum == block_schema.checksum
assert db_block_schema.fields == block_schema.fields
assert db_block_schema.block_type_id == block_schema.block_type_id
async def test_read_block_schema_by_checksum_with_version(
self, session, client, block_type_x
):
# Create two block schemas with the same checksum, but different versions
block_schema_0 = await models.block_schemas.create_block_schema(
session=session,
block_schema=schemas.actions.BlockSchemaCreate(
fields={}, block_type_id=block_type_x.id, version="1.0.1"
),
)
block_schema_1 = await models.block_schemas.create_block_schema(
session=session,
block_schema=schemas.actions.BlockSchemaCreate(
fields={}, block_type_id=block_type_x.id, version="1.1.0"
),
)
assert block_schema_0.checksum == block_schema_1.checksum
# Read first version with version query parameter
read_block_schema = await models.block_schemas.read_block_schema_by_checksum(
session=session, checksum=block_schema_0.checksum, version="1.0.1"
)
assert read_block_schema.id == block_schema_0.id
# Read second version with version
read_block_schema = await models.block_schemas.read_block_schema_by_checksum(
session=session, checksum=block_schema_1.checksum, version="1.1.0"
)
assert read_block_schema.id == block_schema_1.id
# Read without version. Should return most recently created block schema.
read_block_schema = await models.block_schemas.read_block_schema_by_checksum(
session=session, checksum=block_schema_0.checksum
)
assert read_block_schema.id == block_schema_1.id
async def test_read_block_schema_does_not_hardcode_references(
self, session, db, block_type_x
):
block_schema = await models.block_schemas.create_block_schema(
override=True,
session=session,
block_schema=schemas.actions.BlockSchemaCreate(
fields={
"title": "JSON",
"description": "A block that represents JSON",
"type": "object",
"properties": {
"value": {
"title": "Value",
"description": "A JSON-compatible value",
}
},
"required": ["value"],
"block_type_slug": "json",
"secret_fields": [],
"block_schema_references": {},
},
block_type_id=block_type_x.id,
),
)
before_read = (
await session.execute(
sa.select(db.BlockSchema).where(db.BlockSchema.id == block_schema.id)
)
).scalar()
assert before_read.fields.get("block_schema_references") is None
await models.block_schemas.read_block_schema(
session=session, block_schema_id=block_schema.id
)
await session.commit()
after_read = (
await session.execute(
sa.select(db.BlockSchema).where(db.BlockSchema.id == block_schema.id)
)
).scalar()
assert after_read.fields.get("block_schema_references") is None
@pytest.fixture
async def nested_schemas(self, session):
# Ignore warnings caused by Block reuse in fixture
warnings.filterwarnings("ignore", category=UserWarning)
class A(Block):
d: str
e: str
class Z(Block):
a: A
c: int
class Y(Block):
b: str
c: int
class X(Block):
_block_schema_version = "1.1.0"
y: Y
z: Z
await models.block_types.create_block_type(
session=session, block_type=A._to_block_type()
)
await models.block_types.create_block_type(
session=session, block_type=Z._to_block_type()
)
block_type_y = await models.block_types.create_block_type(
session=session, block_type=Y._to_block_type()
)
block_type_x = await models.block_types.create_block_type(
session=session, block_type=X._to_block_type()
)
await models.block_schemas.create_block_schema(
session=session,
block_schema=X._to_block_schema(
block_type_id=block_type_x.id,
),
)
await session.commit()
return (A, X, Y, Z, block_type_x, block_type_y)
async def test_read_all_block_schemas(self, session, nested_schemas):
A, X, Y, Z, block_type_x, block_type_y = nested_schemas
db_block_schemas = await models.block_schemas.read_block_schemas(
session=session
)
assert len(db_block_schemas) == 4
assert db_block_schemas[0].checksum == A._calculate_schema_checksum()
assert db_block_schemas[1].checksum == Z._calculate_schema_checksum()
assert db_block_schemas[2].checksum == Y._calculate_schema_checksum()
assert db_block_schemas[3].checksum == X._calculate_schema_checksum()
assert db_block_schemas[0].fields == A.schema()
assert db_block_schemas[1].fields == Z.schema()
assert db_block_schemas[2].fields == Y.schema()
assert db_block_schemas[3].fields == X.schema()
async def test_read_all_block_schemas_with_limit(self, session, nested_schemas):
A, X, Y, Z, block_type_x, block_type_y = nested_schemas
db_block_schemas = await models.block_schemas.read_block_schemas(
session=session, limit=2
)
assert len(db_block_schemas) == 2
assert db_block_schemas[0].checksum == A._calculate_schema_checksum()
assert db_block_schemas[1].checksum == Z._calculate_schema_checksum()
assert db_block_schemas[0].fields == A.schema()
assert db_block_schemas[1].fields == Z.schema()
async def test_read_all_block_schemas_with_limit_and_offset(
self, session, nested_schemas
):
A, X, Y, Z, block_type_x, block_type_y = nested_schemas
db_block_schemas = await models.block_schemas.read_block_schemas(
session=session, limit=2, offset=2
)
assert len(db_block_schemas) == 2
assert db_block_schemas[0].checksum == Y._calculate_schema_checksum()
assert db_block_schemas[1].checksum == X._calculate_schema_checksum()
assert db_block_schemas[0].fields == Y.schema()
assert db_block_schemas[1].fields == X.schema()
async def test_read_all_block_schemas_with_filters(self, session, nested_schemas):
A, X, Y, Z, block_type_x, block_type_y = nested_schemas
db_block_schemas = await models.block_schemas.read_block_schemas(
session=session,
block_schema_filter=schemas.filters.BlockSchemaFilter(
block_type_id=dict(any_=[block_type_x.id])
),
)
assert len(db_block_schemas) == 1
assert db_block_schemas[0].block_type_id == block_type_x.id
db_block_schemas = await models.block_schemas.read_block_schemas(
session=session,
block_schema_filter=schemas.filters.BlockSchemaFilter(
block_type_id=dict(any_=[block_type_x.id, block_type_y.id])
),
)
assert len(db_block_schemas) == 2
assert db_block_schemas[0].block_type_id == block_type_y.id
assert db_block_schemas[1].block_type_id == block_type_x.id
async def test_read_all_block_schemas_with_block_type_and_version_filters(
self, session, nested_schemas
):
A, X, Y, Z, block_type_x, block_type_y = nested_schemas
db_block_schemas = await models.block_schemas.read_block_schemas(
session=session,
block_schema_filter=schemas.filters.BlockSchemaFilter(
block_type_id=dict(any_=[block_type_x.id]),
version=dict(any_=[X._block_schema_version]),
),
)
assert len(db_block_schemas) == 1
assert db_block_schemas[0].block_type_id == block_type_x.id
db_block_schemas = await models.block_schemas.read_block_schemas(
session=session,
block_schema_filter=schemas.filters.BlockSchemaFilter(
block_type_id=dict(any_=[block_type_x.id]), version=dict(any_=["1.1.1"])
),
)
assert len(db_block_schemas) == 0
async def test_read_block_schemas_with_capabilities_filter(
self, session, block_schemas_with_capabilities
):
fly_and_swim_block_schemas = await models.block_schemas.read_block_schemas(
session=session,
block_schema_filter=BlockSchemaFilter(
block_capabilities=dict(all_=["fly", "swim"])
),
)
assert len(fly_and_swim_block_schemas) == 1
assert [b.id for b in fly_and_swim_block_schemas] == [
block_schemas_with_capabilities[0].id
]
fly_block_schemas = await models.block_schemas.read_block_schemas(
session=session,
block_schema_filter=BlockSchemaFilter(
block_capabilities=dict(all_=["fly"])
),
)
assert len(fly_block_schemas) == 2
assert [b.id for b in fly_block_schemas] == [
block_schemas_with_capabilities[1].id,
block_schemas_with_capabilities[0].id,
]
swim_block_schemas = await models.block_schemas.read_block_schemas(
session=session,
block_schema_filter=BlockSchemaFilter(
block_capabilities=dict(all_=["swim"])
),
)
assert len(swim_block_schemas) == 1
assert [b.id for b in swim_block_schemas] == [
block_schemas_with_capabilities[0].id
]
@pytest.mark.flaky(
max_runs=3
) # Order of block schema references sometimes doesn't match
async def test_read_block_schema_with_union(
self, session, block_type_x, block_type_y, block_type_z
):
class Z(Block):
_block_type_id = block_type_z.id
_block_type_name = block_type_z.name
b: str
class Y(Block):
_block_type_id = block_type_y.id
_block_type_name = block_type_y.name
a: str
class X(Block):
_block_type_id = block_type_x.id
_block_type_name = block_type_x.name
y_or_z: Union[Y, Z]
await models.block_schemas.create_block_schema(
session=session,
block_schema=X._to_block_schema(),
)
block_schema = await models.block_schemas.read_block_schema_by_checksum(
session=session, checksum=X._calculate_schema_checksum()
)
assert block_schema.fields == X.schema()
async def test_read_block_schemas_with_id_list(self, session, nested_schemas):
A, X, Y, Z, block_type_x, block_type_y = nested_schemas
a_id = (
await read_block_schema_by_checksum(
session=session, checksum=A._calculate_schema_checksum()
)
).id
y_id = (
await read_block_schema_by_checksum(
session=session, checksum=Y._calculate_schema_checksum()
)
).id
result = await models.block_schemas.read_block_schemas(
session=session,
block_schema_filter=BlockSchemaFilter(id=dict(any_=[a_id, y_id])),
)
assert len(result) == 2
assert [a_id, y_id] == [b.id for b in result]
async def test_read_block_with_non_block_object_attributes(self, session):
class NotABlock(BaseModel):
alias: str
class AlsoNotABlock(BaseModel):
pseudonym: str
child: NotABlock
class IsABlock(Block):
size: int
contents: AlsoNotABlock
block_type = await models.block_types.create_block_type(
session=session, block_type=IsABlock._to_block_type()
)
block_schema = await models.block_schemas.create_block_schema(
session=session,
block_schema=IsABlock._to_block_schema(block_type_id=block_type.id),
)
assert block_schema.fields == IsABlock.schema()
assert block_schema.checksum == IsABlock._calculate_schema_checksum()
read_block_schema = await models.block_schemas.read_block_schema(
session=session, block_schema_id=block_schema.id
)
assert read_block_schema.fields == IsABlock.schema()
async def test_read_block_with_enum_attribute(self, session):
class Fruit(AutoEnum):
APPLE = AutoEnum.auto()
BANANA = AutoEnum.auto()
ORANGE = AutoEnum.auto()
class IsABlock(Block):
size: int
contents: Fruit
block_type = await models.block_types.create_block_type(
session=session, block_type=IsABlock._to_block_type()
)
block_schema = await models.block_schemas.create_block_schema(
session=session,
block_schema=IsABlock._to_block_schema(block_type_id=block_type.id),
)
assert block_schema.fields == IsABlock.schema()
assert block_schema.checksum == IsABlock._calculate_schema_checksum()
read_block_schema = await models.block_schemas.read_block_schema(
session=session, block_schema_id=block_schema.id
)
assert read_block_schema.fields == IsABlock.schema()
async def test_read_block_with_non_block_union_attribute(self, session):
class NotABlock(BaseModel):
alias: str
class AlsoNotABlock(BaseModel):
pseudonym: str
class IsABlock(Block):
size: int
contents: Union[NotABlock, AlsoNotABlock]
block_type = await models.block_types.create_block_type(
session=session, block_type=IsABlock._to_block_type()
)
block_schema = await models.block_schemas.create_block_schema(
session=session,
block_schema=IsABlock._to_block_schema(block_type_id=block_type.id),
)
assert block_schema.fields == IsABlock.schema()
assert block_schema.checksum == IsABlock._calculate_schema_checksum()
read_block_schema = await models.block_schemas.read_block_schema(
session=session, block_schema_id=block_schema.id
)
assert read_block_schema.fields == IsABlock.schema()
async def test_read_block_with_non_block_list_attribute(self, session):
class NotABlock(BaseModel):
alias: str
class IsABlock(Block):
size: int
contents: List[NotABlock]
block_type = await models.block_types.create_block_type(
session=session, block_type=IsABlock._to_block_type()
)
block_schema = await models.block_schemas.create_block_schema(
session=session,
block_schema=IsABlock._to_block_schema(block_type_id=block_type.id),
)
assert block_schema.fields == IsABlock.schema()
assert block_schema.checksum == IsABlock._calculate_schema_checksum()
read_block_schema = await models.block_schemas.read_block_schema(
session=session, block_schema_id=block_schema.id
)
assert read_block_schema.fields == IsABlock.schema()
async def test_read_block_with_both_block_and_non_block_attributes(self, session):
class NotABlock(BaseModel):
alias: str
class IsABlock(Block):
size: int
class IsAlsoABlock(Block):
size: float
contents: IsABlock
config: NotABlock
await models.block_types.create_block_type(
session=session, block_type=IsABlock._to_block_type()
)
block_type = await models.block_types.create_block_type(
session=session, block_type=IsAlsoABlock._to_block_type()
)
block_schema = await models.block_schemas.create_block_schema(
session=session,
block_schema=IsAlsoABlock._to_block_schema(block_type_id=block_type.id),
)
assert block_schema.fields == IsAlsoABlock.schema()
assert block_schema.checksum == IsAlsoABlock._calculate_schema_checksum()
read_parent_block_schema = await models.block_schemas.read_block_schema(
session=session, block_schema_id=block_schema.id
)
assert read_parent_block_schema.fields == IsAlsoABlock.schema()
read_child_block_schema = (
await models.block_schemas.read_block_schema_by_checksum(
session=session, checksum=IsABlock._calculate_schema_checksum()
)
)
assert read_child_block_schema.fields == IsABlock.schema()
async def test_read_block_schema_with_list_block_attribute(self, session):
class Child(Block):
age: float
class Parent(Block):
age: int
children: List[Child]
await models.block_types.create_block_type(
session=session, block_type=Child._to_block_type()
)
block_type = await models.block_types.create_block_type(
session=session, block_type=Parent._to_block_type()
)
block_schema = await models.block_schemas.create_block_schema(
session=session,
block_schema=Parent._to_block_schema(block_type_id=block_type.id),
)
assert block_schema.fields == Parent.schema()
assert block_schema.checksum == Parent._calculate_schema_checksum()
assert block_schema.fields["block_schema_references"] == {
"children": {
"block_type_slug": "child",
"block_schema_checksum": Child._calculate_schema_checksum(),
}
}
read_block_schema = await models.block_schemas.read_block_schema(
session=session, block_schema_id=block_schema.id
)
assert read_block_schema.fields == Parent.schema()
class TestDeleteBlockSchema:
async def test_delete_block_schema(self, session, block_schema):
block_schema_id = block_schema.id
assert await models.block_schemas.delete_block_schema(
session=session, block_schema_id=block_schema_id
)
assert not await models.block_schemas.read_block_schema(
session=session, block_schema_id=block_schema_id
)
async def test_delete_block_schema_fails_gracefully(self, session, block_schema):
block_schema_id = block_schema.id
assert await models.block_schemas.delete_block_schema(
session=session, block_schema_id=block_schema_id
)
assert not await models.block_schemas.delete_block_schema(
session=session, block_schema_id=block_schema_id
)
@pytest.fixture
async def block_schemas_with_capabilities(session):
class CanRun(Block):
_block_schema_capabilities = ["run"]
def run(self):
pass
class CanFly(Block):
_block_schema_capabilities = ["fly"]
def fly(self):
pass
class CanSwim(Block):
_block_schema_capabilities = ["swim"]
def swim(self):
pass
class Duck(CanSwim, CanFly, Block):
a: str
class Bird(CanFly, Block):
b: str
class Cat(CanRun, Block):
c: str
block_type_a = await models.block_types.create_block_type(
session=session, block_type=Duck._to_block_type()
)
await models.block_schemas.create_block_schema(
session=session,
block_schema=Duck._to_block_schema(block_type_id=block_type_a.id),
)
block_type_b = await models.block_types.create_block_type(
session=session, block_type=Bird._to_block_type()
)
await models.block_schemas.create_block_schema(
session=session,
block_schema=Bird._to_block_schema(block_type_id=block_type_b.id),
)
block_type_c = await models.block_types.create_block_type(
session=session, block_type=Cat._to_block_type()
)
await models.block_schemas.create_block_schema(
session=session,
block_schema=Cat._to_block_schema(block_type_id=block_type_c.id),
)
class TestListAvailableBlockCapabilities:
async def test_list_available_block_capabilities(
self, session, block_schemas_with_capabilities
):
assert sorted(
await models.block_schemas.read_available_block_capabilities(
session=session
)
) == sorted(["run", "fly", "swim"])
async def test_list_available_block_capabilities_with_no_schemas(self, session):
assert (
await models.block_schemas.read_available_block_capabilities(
session=session
)
== []
)
|
e35ed9d2dfc11f959757e020b614be4e19627b6f
|
e3bb1df7fa4c51900dec7e9ddf5295e1a80938bd
|
/test/hummingbot/strategy/__init__.py
|
8daf8fe12da5f6df0cbfaed5ca3984479ffaa49a
|
[
"Apache-2.0"
] |
permissive
|
CoinAlpha/hummingbot
|
0d1e2bd94de1280748647108c7d7800a09546eb8
|
c3f101759ab7e7a2165cd23a3a3e94c90c642a9b
|
refs/heads/development
| 2023-09-01T11:24:43.322137
| 2023-08-31T03:08:06
| 2023-08-31T03:08:06
| 439,330,952
| 135
| 98
|
Apache-2.0
| 2023-08-30T13:55:08
| 2021-12-17T12:50:42
|
Python
|
UTF-8
|
Python
| false
| false
| 223
|
py
|
__init__.py
|
from typing import Dict
from hummingbot.client.config.config_var import ConfigVar
def assign_config_default(config_map: Dict[str, ConfigVar]):
for key, value in config_map.items():
value.value = value.default
|
293a38ba2689ebfd8fc399bb33eecd0a8884d9a0
|
069c2295076c482afadfe6351da5ae02be8e18e6
|
/tests/admin_widgets/widgetadmin.py
|
deb07adeccd6410c25cb5d596d0a896ac6fe0c47
|
[
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"GPL-1.0-or-later",
"Python-2.0.1",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-other-permissive",
"Python-2.0"
] |
permissive
|
django/django
|
5eb557f57053631cd4f566f451e43197309dbeeb
|
c74a6fad5475495756a5bdb18b2cab2b68d429bc
|
refs/heads/main
| 2023-09-01T03:43:44.033530
| 2023-08-31T08:27:32
| 2023-08-31T08:27:32
| 4,164,482
| 73,530
| 38,187
|
BSD-3-Clause
| 2023-09-14T20:03:48
| 2012-04-28T02:47:18
|
Python
|
UTF-8
|
Python
| false
| false
| 1,589
|
py
|
widgetadmin.py
|
from django.contrib import admin
from .models import (
Advisor,
Album,
Band,
Bee,
Car,
CarTire,
Event,
Inventory,
Member,
Profile,
ReleaseEvent,
School,
Student,
User,
VideoStream,
)
class WidgetAdmin(admin.AdminSite):
pass
class CarAdmin(admin.ModelAdmin):
list_display = ["make", "model", "owner"]
list_editable = ["owner"]
class CarTireAdmin(admin.ModelAdmin):
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "car":
kwargs["queryset"] = Car.objects.filter(owner=request.user)
return db_field.formfield(**kwargs)
return super().formfield_for_foreignkey(db_field, request, **kwargs)
class EventAdmin(admin.ModelAdmin):
raw_id_fields = ["main_band", "supporting_bands"]
class AlbumAdmin(admin.ModelAdmin):
fields = (
"name",
"cover_art",
)
readonly_fields = ("cover_art",)
class SchoolAdmin(admin.ModelAdmin):
filter_vertical = ("students",)
filter_horizontal = ("alumni",)
site = WidgetAdmin(name="widget-admin")
site.register(User)
site.register(Car, CarAdmin)
site.register(CarTire, CarTireAdmin)
site.register(Member)
site.register(Band)
site.register(Event, EventAdmin)
site.register(Album, AlbumAdmin)
site.register(ReleaseEvent, search_fields=["name"])
site.register(VideoStream, autocomplete_fields=["release_event"])
site.register(Inventory)
site.register(Bee)
site.register(Advisor)
site.register(School, SchoolAdmin)
site.register(Student)
site.register(Profile)
|
0fbb8b331d90e9fa5a749458fbb54d4ff1c85fb6
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/response/AlipayBossProdAntlegalchainNdalistQueryResponse.py
|
ee29d51f974944a3d750de1345da3b32695ed98a
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 2,291
|
py
|
AlipayBossProdAntlegalchainNdalistQueryResponse.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.AntlcNdaProtocolSignRecordExtDO import AntlcNdaProtocolSignRecordExtDO
class AlipayBossProdAntlegalchainNdalistQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayBossProdAntlegalchainNdalistQueryResponse, self).__init__()
self._items_per_page = None
self._page = None
self._page_list = None
self._total_items = None
self._total_pages = None
@property
def items_per_page(self):
return self._items_per_page
@items_per_page.setter
def items_per_page(self, value):
self._items_per_page = value
@property
def page(self):
return self._page
@page.setter
def page(self, value):
self._page = value
@property
def page_list(self):
return self._page_list
@page_list.setter
def page_list(self, value):
if isinstance(value, list):
self._page_list = list()
for i in value:
if isinstance(i, AntlcNdaProtocolSignRecordExtDO):
self._page_list.append(i)
else:
self._page_list.append(AntlcNdaProtocolSignRecordExtDO.from_alipay_dict(i))
@property
def total_items(self):
return self._total_items
@total_items.setter
def total_items(self, value):
self._total_items = value
@property
def total_pages(self):
return self._total_pages
@total_pages.setter
def total_pages(self, value):
self._total_pages = value
def parse_response_content(self, response_content):
response = super(AlipayBossProdAntlegalchainNdalistQueryResponse, self).parse_response_content(response_content)
if 'items_per_page' in response:
self.items_per_page = response['items_per_page']
if 'page' in response:
self.page = response['page']
if 'page_list' in response:
self.page_list = response['page_list']
if 'total_items' in response:
self.total_items = response['total_items']
if 'total_pages' in response:
self.total_pages = response['total_pages']
|
d24dba4c73600d07d14c55e7281d720c34f8123c
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/Geometry/EcalAlgo/python/EcalEndcapGeometryDBWriter_cfi.py
|
13efc7afb3bc307f4537d5ff98d26c7197cc1b79
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 871
|
py
|
EcalEndcapGeometryDBWriter_cfi.py
|
import FWCore.ParameterSet.Config as cms
EcalEndcapGeometryEP = cms.ESProducer("EcalEndcapGeometryEP",
applyAlignment = cms.bool(False),
appendToDataLabel = cms.string("_master")
)
_EcalEndcapGeometryEP_dd4hep = cms.ESProducer("EcalEndcapGeometryEPdd4hep",
applyAlignment = cms.bool(False),
appendToDataLabel = cms.string("_master")
)
from Configuration.ProcessModifiers.dd4hep_cff import dd4hep
dd4hep.toReplaceWith(EcalEndcapGeometryEP, _EcalEndcapGeometryEP_dd4hep)
EcalEndcapGeometryToDBEP = cms.ESProducer("EcalEndcapGeometryToDBEP",
applyAlignment = cms.bool(False),
appendToDataLabel = cms.string("_toDB")
)
|
0f9e0b068044f61eff9afa6ca4242920e95f2d9a
|
7895cbaced82455230c2bc4d42be5b3e4c67cab8
|
/examples/plots/plot_axis_angle_from_two_vectors.py
|
7c7e4a01cdc69cd36b3fcd8d2359cbf66069ac6f
|
[
"BSD-3-Clause"
] |
permissive
|
dfki-ric/pytransform3d
|
0ee7e37c92e7bd328f31813610a9797d296e3cd3
|
cc923cd91417a41ab08b32278eeabc2e31ab6a93
|
refs/heads/main
| 2023-08-18T07:22:09.271385
| 2023-08-10T08:41:15
| 2023-08-10T08:45:10
| 91,809,394
| 168
| 25
|
NOASSERTION
| 2023-08-22T20:09:27
| 2017-05-19T13:35:08
|
Python
|
UTF-8
|
Python
| false
| false
| 1,158
|
py
|
plot_axis_angle_from_two_vectors.py
|
"""
====================================================
Axis-Angle Representation from Two Direction Vectors
====================================================
This example shows how we can compute the axis-angle representation of a
rotation that transforms a direction given by a vector 'a' to a direction
given by a vector 'b'. We show both vectors, the rotation about the rotation
axis and the initial and resulting coordinate frame, where the vector 'b'
and its corresponding frame after the rotation are represented by shorter
lines.
"""
import numpy as np
import matplotlib.pyplot as plt
from pytransform3d.rotations import (
axis_angle_from_two_directions, matrix_from_axis_angle, plot_axis_angle,
plot_basis)
from pytransform3d.plot_utils import make_3d_axis, plot_vector
a = np.array([1.0, 0.0, 0.0])
b = np.array([0.76958075, -0.49039301, -0.40897453])
aa = axis_angle_from_two_directions(a, b)
ax = make_3d_axis(ax_s=1)
plot_vector(ax, start=np.zeros(3), direction=a, s=1.0)
plot_vector(ax, start=np.zeros(3), direction=b, s=0.5)
plot_axis_angle(ax, aa)
plot_basis(ax)
plot_basis(ax, R=matrix_from_axis_angle(aa), s=0.5)
plt.show()
|
865d3229794069e9ea85aaa300bcae4d4879006b
|
fa1ad2e2ac7e376fc7cb3b3a6e1bb88eed3e80be
|
/ai/modelscope/modelscope/preprocessors/nlp/space_T_en/fields/process_dataset.py
|
88059351526f1e306fb17c60483de1ce84dd4542
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
alldatacenter/alldata
|
7bc7713c9f1d56ad6b8e59ea03206d1073b7e047
|
8d5f9a2d49ab8f9e85ccf058cb02c2fda287afc6
|
refs/heads/master
| 2023-08-05T07:32:25.442740
| 2023-08-03T13:17:24
| 2023-08-03T13:17:24
| 213,321,771
| 774
| 250
|
Apache-2.0
| 2023-09-06T17:35:32
| 2019-10-07T07:36:18
| null |
UTF-8
|
Python
| false
| false
| 2,111
|
py
|
process_dataset.py
|
# Copyright (c) rhythmcao modified from https://github.com/rhythmcao/text2sql-lgesql.
import os
import pickle
import sys
from text2sql_lgesql.asdl.asdl import ASDLGrammar
from text2sql_lgesql.asdl.transition_system import TransitionSystem
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
def process_example(processor, entry, db, trans, verbose=False):
# preprocess raw tokens, schema linking and subgraph extraction
entry = processor.pipeline(entry, db, verbose=verbose)
# generate target output actions
entry['ast'] = []
entry['actions'] = []
return entry
def process_tables(processor, tables_list, output_path=None, verbose=False):
tables = {}
for each in tables_list:
if verbose:
print('*************** Processing database %s **************' %
(each['db_id']))
tables[each['db_id']] = processor.preprocess_database(
each, verbose=verbose)
print('In total, process %d databases .' % (len(tables)))
if output_path is not None:
pickle.dump(tables, open(output_path, 'wb'))
return tables
def process_dataset(model_dir,
processor,
dataset,
tables,
output_path=None,
skip_large=False,
verbose=False):
grammar = ASDLGrammar.from_filepath(
os.path.join(model_dir, 'sql_asdl_v2.txt'))
trans = TransitionSystem.get_class_by_lang('sql')(grammar)
processed_dataset = []
for idx, entry in enumerate(dataset):
if skip_large and len(tables[entry['db_id']]['column_names']) > 100:
continue
if verbose:
print('*************** Processing %d-th sample **************' %
(idx))
entry = process_example(
processor, entry, tables[entry['db_id']], trans, verbose=verbose)
processed_dataset.append(entry)
if output_path is not None:
# serialize preprocessed dataset
pickle.dump(processed_dataset, open(output_path, 'wb'))
return processed_dataset
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.