blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
65f19364567ad1b3f441f85e6cba2b993cf71d25
|
9803232b04daa00eb4038be338b833907fd1625f
|
/blender_bindings/material_loader/shaders/source2_shaders/sky.py
|
d4ba8d8297c1dad708e42d8f3ccbb32cb909fa45
|
[
"MIT"
] |
permissive
|
REDxEYE/SourceIO
|
a0ff3cff37504afdb906e4ee20c1077a8daf2912
|
85661fe057cef1ad2a779a9d48e810ea214f4f07
|
refs/heads/master
| 2023-08-08T18:35:28.771447
| 2023-08-07T22:26:59
| 2023-08-07T22:26:59
| 170,197,673
| 409
| 53
|
MIT
| 2023-08-23T18:40:38
| 2019-02-11T20:33:55
|
Python
|
UTF-8
|
Python
| false
| false
| 3,661
|
py
|
sky.py
|
from pathlib import Path
import bpy
import numpy as np
from .....library.shared.content_providers.content_manager import \
ContentManager
from .....library.source2 import CompiledTextureResource
from .....library.source2.data_types.blocks.texture_data import VTexFormat
from .....library.utils.thirdparty.equilib.cube2equi_numpy import \
run as convert_to_eq
from .....logger import SLoggingManager
from ...shader_base import Nodes
from ..source2_shader_base import Source2ShaderBase
log_manager = SLoggingManager()
class Skybox(Source2ShaderBase):
SHADER: str = 'sky.vfx'
def __init__(self, source2_material):
super().__init__(source2_material)
self.logger = log_manager.get_logger(f'Shaders::{self.SHADER}')
self.do_arrange = True
@property
def sky_texture(self):
texture_path = self._material_resource.get_texture_property('g_tSkyTexture', None)
if texture_path:
texture_resource = self._material_resource.get_child_resource(texture_path, ContentManager(),
CompiledTextureResource)
(width, height) = texture_resource.get_resolution(0)
faces = {}
for i, k in enumerate("FBLRUD"):
data, _ = texture_resource.get_cubemap_face(i, 0)
side = data.reshape((width, height, 4))
if k == 'B':
side = np.rot90(side, 2)
if k == 'L':
side = np.rot90(side, 3)
if k == 'R':
side = np.rot90(side, 1)
faces[k] = side.T
pixel_data = convert_to_eq(faces, "dict", 2048, 1024, 'default', 'bilinear').T
pixel_data = np.rot90(pixel_data, 1)
# pixel_data = np.flipud(pixel_data)
name = Path(texture_path).stem
image = bpy.data.images.new(
name + '.tga',
width=2048,
height=1024,
alpha=True
)
image.alpha_mode = 'CHANNEL_PACKED'
if pixel_data.shape[0] == 0:
return None
pixel_format = texture_resource.get_texture_format()
if pixel_format in (VTexFormat.RGBA16161616F, VTexFormat.BC6H):
image.use_generated_float = True
image.file_format = 'HDR'
image.pixels.foreach_set(pixel_data.astype(np.float32).ravel())
else:
image.file_format = 'PNG'
image.pixels.foreach_set(pixel_data.ravel())
image.pack()
return image
return None
def create_nodes(self, material_name):
self.logger.info(f'Creating material {repr(material_name)}')
self.bpy_material = bpy.data.worlds.get(material_name, False) or bpy.data.worlds.new(material_name)
if self.bpy_material is None:
self.logger.error('Failed to get or create material')
return 'UNKNOWN'
if self.bpy_material.get('source_loaded'):
return 'LOADED'
self.bpy_material.use_nodes = True
self.clean_nodes()
self.bpy_material['source_loaded'] = True
material_output = self.create_node(Nodes.ShaderNodeOutputWorld)
shader = self.create_node(Nodes.ShaderNodeBackground, self.SHADER)
self.connect_nodes(shader.outputs['Background'], material_output.inputs['Surface'])
texture = self.create_node(Nodes.ShaderNodeTexEnvironment)
texture.image = self.sky_texture
self.connect_nodes(texture.outputs['Color'], shader.inputs['Color'])
|
7f8ae6042590c28a0e6eebfddf708a77cd4fb890
|
b1acd84bdd4cfb952081080bf6cfdf1dfdaadfbb
|
/tests/test_onnxfx.py
|
23eb92c538a55b034af17a3dbd936ab93a0969cb
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] |
permissive
|
microsoft/onnxconverter-common
|
b5b03bccae1c6c40cdcfd8d2e9b18db7e3f0d61b
|
8d2f85f0a7039a0b0defd6c7509cb905914a4d2f
|
refs/heads/master
| 2023-08-31T14:19:50.159510
| 2023-08-22T04:06:15
| 2023-08-22T04:06:15
| 181,770,857
| 182
| 59
|
MIT
| 2023-08-22T04:23:04
| 2019-04-16T21:35:19
|
Python
|
UTF-8
|
Python
| false
| false
| 3,333
|
py
|
test_onnxfx.py
|
import unittest
import numpy as np
import onnxruntime as _ort
import packaging.version as pv
from onnxconverter_common.onnx_fx import Graph, OnnxOperatorBuilderX
from onnxconverter_common.onnx_fx import GraphFunctionType as _Ty
from onnxconverter_common.onnx_ex import get_maximum_opset_supported
from onnxconverter_common.optimizer import optimize_onnx_model
def _ort_inference(mdl, inputs):
sess = _ort.InferenceSession(mdl.SerializeToString())
return sess.run(None, inputs)
Graph.inference_runtime = _ort_inference
Graph.opset = 9
onnx_function = Graph.trace
@unittest.skipIf(get_maximum_opset_supported() < 9, "onnx_fx only supports ONNX opset 9 and greater")
class ONNXFunctionTest(unittest.TestCase):
# this works, and the exported graph is usable:
def test_core(self):
@onnx_function
def f(x, y):
return x + y
@onnx_function
def g(x, y):
return x.ox.abs(f(x, y) + 1.0)
self.assertTrue(
np.allclose(g([2.0], [-5.0]), np.array([2.0])))
def test_loop(self):
@onnx_function(outputs=['y1', 'y2', 'y3', 'y4'],
input_types=[_Ty.I([1])],
output_types=[_Ty.F([None]), _Ty.F([None]), _Ty.F([None, 1]), _Ty.F([None, 1])])
def loop_test(len):
ox = len.ox
s_len = ox.squeeze(len, axes=[0])
is_true = ox.constant(value=True)
@onnx_function(outputs=['c_o', 'i_o', 'j_o', 'all_i', 'all_j'],
output_types=[_Ty.b, _Ty.f, _Ty.f, _Ty.f, _Ty.f],
input_types=[_Ty.I([1]), _Ty.b, _Ty.F([1]), _Ty.F([1])])
def range_body(iter_n, cond, i, j):
return (is_true,
i + i.ox.constant(value=1.0), j + 2.0, i, j)
one_c = ox.constant(value=-1.0)
y1, y2, y3, y4 = ox.loop(s_len, is_true, range_body, inputs=[one_c, one_c],
outputs=['y1_o', 'y2_o', 'y3_o', 'y4_o'])
return y1, y2, y3, y4
self.assertEqual(
loop_test(np.array([16], dtype=np.int64))[2][4], 3.0)
@unittest.skipIf(pv.Version(_ort.__version__.split('-')[0]) < pv.Version("1.4.0"),
"onnxruntime fixed the issue in matmul since 1.4.0")
def test_matmul_opt(self):
@onnx_function(outputs=['z'],
input_types=(_Ty.F([1, 1, 6, 1])),
output_types=[_Ty.f])
def transpose_n_matmul(x):
ox = x.ox # type: OnnxOperatorBuilderX
wm = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]).astype(np.float32).reshape([2, 6])
b = ox.constant(value=wm)
a = ox.transpose(x, perm=[0, 1, 3, 2])
c = ox.transpose(b, perm=[1, 0])
return ox.matmul([a, c])
m1 = np.array([[2, 3], [4, 5], [6, 7]]).astype(np.float32).reshape([1, 1, 6, 1])
expected = transpose_n_matmul(m1)
opted = optimize_onnx_model(transpose_n_matmul.to_model())
actual = _ort_inference(opted, {'x': m1})
self.assertTrue(np.allclose(expected, actual), "The result mismatch")
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(ONNXFunctionTest)
# suite.debug()
unittest.TextTestRunner().run(suite)
|
37fc88659d001aad4ef61194e434291406a22f86
|
6ffd23679939f59f0a09c9507a126ba056b239d7
|
/imperative/python/megengine/data/dataset/__init__.py
|
ea80e8be3c80a105597c66b604d72ec847fdd39d
|
[
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] |
permissive
|
MegEngine/MegEngine
|
74c1c9b6022c858962caf7f27e6f65220739999f
|
66b79160d35b2710c00befede0c3fd729109e474
|
refs/heads/master
| 2023-08-23T20:01:32.476848
| 2023-08-01T07:12:01
| 2023-08-11T06:04:12
| 248,175,118
| 5,697
| 585
|
Apache-2.0
| 2023-07-19T05:11:07
| 2020-03-18T08:21:58
|
C++
|
UTF-8
|
Python
| false
| false
| 124
|
py
|
__init__.py
|
# -*- coding: utf-8 -*-
from .meta_dataset import ArrayDataset, ConcatDataset, Dataset, StreamDataset
from .vision import *
|
fb092d0af95dd53c7a3e61a63392c6b7467bc35c
|
4bcc9806152542ab43fc2cf47c499424f200896c
|
/tensorflow/compiler/mlir/tfr/examples/mnist/mnist_train.py
|
2e12b0eb1ee8b491cb6fd3906c9cf5637b60055c
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla",
"BSD-2-Clause"
] |
permissive
|
tensorflow/tensorflow
|
906276dbafcc70a941026aa5dc50425ef71ee282
|
a7f3934a67900720af3d3b15389551483bee50b8
|
refs/heads/master
| 2023-08-25T04:24:41.611870
| 2023-08-25T04:06:24
| 2023-08-25T04:14:08
| 45,717,250
| 208,740
| 109,943
|
Apache-2.0
| 2023-09-14T20:55:50
| 2015-11-07T01:19:20
|
C++
|
UTF-8
|
Python
| false
| false
| 6,645
|
py
|
mnist_train.py
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MNIST model float training script with TensorFlow graph execution."""
import os
from absl import flags
import tensorflow as tf
import tensorflow_datasets as tfds
from tensorflow.compiler.mlir.tfr.examples.mnist import gen_mnist_ops
from tensorflow.compiler.mlir.tfr.examples.mnist import ops_defs # pylint: disable=unused-import
from tensorflow.python.framework import load_library
flags.DEFINE_integer('train_steps', 200, 'Number of steps in training.')
_lib_dir = os.path.dirname(gen_mnist_ops.__file__)
_lib_name = os.path.basename(gen_mnist_ops.__file__)[4:].replace('.py', '.so')
load_library.load_op_library(os.path.join(_lib_dir, _lib_name))
# MNIST dataset parameters.
num_classes = 10 # total classes (0-9 digits).
num_features = 784 # data features (img shape: 28*28).
num_channels = 1
# Training parameters.
learning_rate = 0.001
display_step = 10
batch_size = 32
# Network parameters.
n_hidden_1 = 32 # 1st conv layer number of neurons.
n_hidden_2 = 64 # 2nd conv layer number of neurons.
n_hidden_3 = 64 # 1st fully connected layer of neurons.
flatten_size = num_features // 16 * n_hidden_2
seed = 66478
class FloatModel(tf.Module):
"""Float inference for mnist model."""
def __init__(self):
self.weights = {
'f1':
tf.Variable(
tf.random.truncated_normal([5, 5, num_channels, n_hidden_1],
stddev=0.1,
seed=seed)),
'f2':
tf.Variable(
tf.random.truncated_normal([5, 5, n_hidden_1, n_hidden_2],
stddev=0.1,
seed=seed)),
'f3':
tf.Variable(
tf.random.truncated_normal([n_hidden_3, flatten_size],
stddev=0.1,
seed=seed)),
'f4':
tf.Variable(
tf.random.truncated_normal([num_classes, n_hidden_3],
stddev=0.1,
seed=seed)),
}
self.biases = {
'b1': tf.Variable(tf.zeros([n_hidden_1])),
'b2': tf.Variable(tf.zeros([n_hidden_2])),
'b3': tf.Variable(tf.zeros([n_hidden_3])),
'b4': tf.Variable(tf.zeros([num_classes])),
}
@tf.function
def __call__(self, data):
"""The Model definition."""
x = tf.reshape(data, [-1, 28, 28, 1])
# 2D convolution, with 'SAME' padding (i.e. the output feature map has
# the same size as the input).
# NOTE: The data/x/input is always specified in floating point precision.
# output shape: [-1, 28, 28, 32]
conv1 = gen_mnist_ops.new_conv2d(x, self.weights['f1'], self.biases['b1'],
1, 1, 1, 1, 'SAME', 'RELU')
# Max pooling. The kernel size spec {ksize} also follows the layout of
# the data. Here we have a pooling window of 2, and a stride of 2.
# output shape: [-1, 14, 14, 32]
max_pool1 = gen_mnist_ops.new_max_pool(conv1, 2, 2, 2, 2, 'SAME')
# output shape: [-1, 14, 14, 64]
conv2 = gen_mnist_ops.new_conv2d(max_pool1, self.weights['f2'],
self.biases['b2'], 1, 1, 1, 1, 'SAME',
'RELU')
# output shape: [-1, 7, 7, 64]
max_pool2 = gen_mnist_ops.new_max_pool(conv2, 2, 2, 2, 2, 'SAME')
# Reshape the feature map cuboid into a 2D matrix to feed it to the
# fully connected layers.
# output shape: [-1, 7*7*64]
reshape = tf.reshape(max_pool2, [-1, flatten_size])
# output shape: [-1, 1024]
fc1 = gen_mnist_ops.new_fully_connected(reshape, self.weights['f3'],
self.biases['b3'], 'RELU')
# output shape: [-1, 10]
return gen_mnist_ops.new_fully_connected(fc1, self.weights['f4'],
self.biases['b4'])
def main(strategy):
"""Trains an MNIST model using the given tf.distribute.Strategy."""
# TODO(fengliuai): put this in some automatically generated code.
os.environ[
'TF_MLIR_TFR_LIB_DIR'] = 'tensorflow/compiler/mlir/tfr/examples/mnist'
ds_train = tfds.load('mnist', split='train', shuffle_files=True)
ds_train = ds_train.shuffle(1024).batch(batch_size).prefetch(64)
ds_train = strategy.experimental_distribute_dataset(ds_train)
with strategy.scope():
# Create an mnist float model with the specified float state.
model = FloatModel()
optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)
def train_step(features):
inputs = tf.image.convert_image_dtype(
features['image'], dtype=tf.float32, saturate=False)
labels = tf.one_hot(features['label'], num_classes)
with tf.GradientTape() as tape:
logits = model(inputs)
loss_value = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels, logits))
grads = tape.gradient(loss_value, model.trainable_variables)
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
optimizer.apply_gradients(zip(grads, model.trainable_variables))
return accuracy, loss_value
@tf.function
def distributed_train_step(dist_inputs):
per_replica_accuracy, per_replica_losses = strategy.run(
train_step, args=(dist_inputs,))
accuracy = strategy.reduce(
tf.distribute.ReduceOp.MEAN, per_replica_accuracy, axis=None)
loss_value = strategy.reduce(
tf.distribute.ReduceOp.MEAN, per_replica_losses, axis=None)
return accuracy, loss_value
iterator = iter(ds_train)
accuracy = 0.0
for step in range(flags.FLAGS.train_steps):
accuracy, loss_value = distributed_train_step(next(iterator))
if step % display_step == 0:
tf.print('Step %d:' % step)
tf.print(' Loss = %f' % loss_value)
tf.print(' Batch accuracy = %f' % accuracy)
return accuracy
|
07985c28a5330514f5c73b1b089c7fcd460ee56a
|
7d571b303508d302bf22e4fba3e19044af309fb8
|
/batchflow/models/torch/blocks/transformer_blocks.py
|
04b92965a58d45b784a5cd24fbd0bedc3293d438
|
[
"Apache-2.0"
] |
permissive
|
analysiscenter/batchflow
|
0a62943836ff41c24274118c410af763750b490b
|
bcc2c723976cb5780d7b2876f2c2df74c186d343
|
refs/heads/master
| 2023-09-01T08:29:06.906776
| 2023-08-23T14:11:03
| 2023-08-23T14:11:03
| 84,835,419
| 110
| 42
|
Apache-2.0
| 2023-09-08T10:25:34
| 2017-03-13T14:22:53
|
Python
|
UTF-8
|
Python
| false
| false
| 3,035
|
py
|
transformer_blocks.py
|
""" Transformer blocks from various NN architectures. """
from .core import Block
from ..utils import get_num_channels
class SegFormerBlock(Block):
""" SegFormer block: semantic segmentation block, based on transformer architectures.
Essentially, a sequence of efficient self attention and MLP block.
Enze Xie et al. "`SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers
<https://arxiv.org/abs/2105.15203>`_"
Key difference to other transformer-based networks is the absence of positional encoding.
Pairing this block, authors propose to use simple MLP decoder: it is available as :class:`~.MLPDecoderModule`.
Parameters
----------
ratio : int
Spatial reduction ratio for self attention.
num_heads : int
Number of parallel attention heads. Must be a divisor of input number of channels.
mlp_expansion : int
Expansion ratio for channels in MLP block.
"""
def __init__(self, inputs=None, layout='RnS! Rnccac!', ratio=4, num_heads=8,
mlp_expansion=4, drop_path=0.0, layer_scale=1, **kwargs):
in_channels = get_num_channels(inputs)
kwargs = {
'attention': 'emha',
'self_attention': {'ratio': ratio, 'num_heads': num_heads},
'channels': [in_channels, in_channels*mlp_expansion, in_channels],
'kernel_size': [1, 3, 1],
'groups': [1, in_channels, 1],
'stride': 1,
'bias': True,
'activation': 'GELU',
'branch_end': {'drop_path': drop_path, 'layer_scale': layer_scale},
**kwargs
}
super().__init__(inputs=inputs, layout=layout, **kwargs)
class MOATBlock(Block):
""" MOAT block: combination of Mobile Conv blocks (inverted residuals) and transformer-like self attention.
Yang et al. "`MOAT: Alternating Mobile Convolution and Attention Brings Strong Vision Models
<https://arxiv.org/abs/2210.01820>`_"
Parameters
----------
ratio : int
Spatial reduction ratio for self attention.
num_heads : int
Number of parallel attention heads. Must be a divisor of input number of channels.
expansion : int
Expansion ratio for channels in MBConv part block.
"""
def __init__(self, inputs=None, layout='Rnc nac nac! RnS!', ratio=4, num_heads=8,
expansion=4, drop_path=0.0, layer_scale=1, **kwargs):
in_channels = get_num_channels(inputs)
kwargs = {
'channels': [in_channels, in_channels*expansion, in_channels],
'kernel_size': [1, 3, 1],
'groups': [1, in_channels, 1],
'stride': 1,
'bias': False,
'attention': 'emha',
'self_attention': {'ratio': ratio, 'num_heads': num_heads},
'activation': 'GELU',
'branch_end': {'drop_path': drop_path, 'layer_scale': layer_scale},
**kwargs
}
super().__init__(inputs=inputs, layout=layout, **kwargs)
|
736a6fda0492c215ce582cc56496f4b01f347dd4
|
f0b741f24ccf8bfe9bd1950425d83b6291d21b10
|
/sdk/python/kfp/deprecated/cli/experiment.py
|
543f66c584fdc6106e50cc7312fda41f4c76ddee
|
[
"Apache-2.0"
] |
permissive
|
kubeflow/pipelines
|
e678342b8a325559dec0a6e1e484c525fdcc8ce8
|
3fb199658f68e7debf4906d9ce32a9a307e39243
|
refs/heads/master
| 2023-09-04T11:54:56.449867
| 2023-09-01T19:07:33
| 2023-09-01T19:12:27
| 133,100,880
| 3,434
| 1,675
|
Apache-2.0
| 2023-09-14T20:19:06
| 2018-05-12T00:31:47
|
Python
|
UTF-8
|
Python
| false
| false
| 4,548
|
py
|
experiment.py
|
import click
import json
from typing import List
from kfp.deprecated.cli.output import print_output, OutputFormat
import kfp_server_api
from kfp_server_api.models.api_experiment import ApiExperiment
@click.group()
def experiment():
"""Manage experiment resources."""
@experiment.command()
@click.option('-d', '--description', help="Description of the experiment.")
@click.argument("name")
@click.pass_context
def create(ctx: click.Context, description: str, name: str):
"""Create an experiment."""
client = ctx.obj["client"]
output_format = ctx.obj["output"]
response = client.create_experiment(name, description=description)
_display_experiment(response, output_format)
@experiment.command()
@click.option(
'--page-token', default='', help="Token for starting of the page.")
@click.option(
'-m', '--max-size', default=100, help="Max size of the listed experiments.")
@click.option(
'--sort-by',
default="created_at desc",
help="Can be '[field_name]', '[field_name] desc'. For example, 'name desc'."
)
@click.option(
'--filter',
help=(
"filter: A url-encoded, JSON-serialized Filter protocol buffer "
"(see [filter.proto](https://github.com/kubeflow/pipelines/blob/master/backend/api/filter.proto))."
))
@click.pass_context
def list(ctx: click.Context, page_token: str, max_size: int, sort_by: str,
filter: str):
"""List experiments."""
client = ctx.obj['client']
output_format = ctx.obj['output']
response = client.list_experiments(
page_token=page_token,
page_size=max_size,
sort_by=sort_by,
filter=filter)
if response.experiments:
_display_experiments(response.experiments, output_format)
else:
if output_format == OutputFormat.json.name:
msg = json.dumps([])
else:
msg = "No experiments found"
click.echo(msg)
@experiment.command()
@click.argument("experiment-id")
@click.pass_context
def get(ctx: click.Context, experiment_id: str):
"""Get detailed information about an experiment."""
client = ctx.obj["client"]
output_format = ctx.obj["output"]
response = client.get_experiment(experiment_id)
_display_experiment(response, output_format)
@experiment.command()
@click.argument("experiment-id")
@click.pass_context
def delete(ctx: click.Context, experiment_id: str):
"""Delete an experiment."""
confirmation = "Caution. The RunDetails page could have an issue" \
" when it renders a run that has no experiment." \
" Do you want to continue?"
if not click.confirm(confirmation):
return
client = ctx.obj["client"]
client.delete_experiment(experiment_id)
click.echo("{} is deleted.".format(experiment_id))
def _display_experiments(experiments: List[ApiExperiment],
output_format: OutputFormat):
headers = ["Experiment ID", "Name", "Created at"]
data = [
[exp.id, exp.name, exp.created_at.isoformat()] for exp in experiments
]
print_output(data, headers, output_format, table_format="grid")
def _display_experiment(exp: kfp_server_api.ApiExperiment,
output_format: OutputFormat):
table = [
["ID", exp.id],
["Name", exp.name],
["Description", exp.description],
["Created at", exp.created_at.isoformat()],
]
if output_format == OutputFormat.table.name:
print_output([], ["Experiment Details"], output_format)
print_output(table, [], output_format, table_format="plain")
elif output_format == OutputFormat.json.name:
print_output(dict(table), [], output_format)
@experiment.command()
@click.option(
"--experiment-id",
default=None,
help="The ID of the experiment to archive, can only supply either an experiment ID or name."
)
@click.option(
"--experiment-name",
default=None,
help="The name of the experiment to archive, can only supply either an experiment ID or name."
)
@click.pass_context
def archive(ctx: click.Context, experiment_id: str, experiment_name: str):
"""Archive an experiment."""
client = ctx.obj["client"]
if (experiment_id is None) == (experiment_name is None):
raise ValueError('Either experiment_id or experiment_name is required')
if not experiment_id:
experiment = client.get_experiment(experiment_name=experiment_name)
experiment_id = experiment.id
client.archive_experiment(experiment_id=experiment_id)
|
8c1d6066faf2b565c56456fec5d66d27725352f6
|
3a8678a73ff5caa3df02da97a0a0b49ab4482994
|
/python/tests/catalog/test_base.py
|
29e93d0c9d052b8d24599b08aa18c343baebb47e
|
[
"Apache-2.0"
] |
permissive
|
apache/iceberg
|
b21a9c1bfbb328919f51cd257772dfd1bd86aaff
|
c9ce6a123b49c1c4e5bd950b388d69e6ff849b5d
|
refs/heads/master
| 2023-09-03T15:54:18.098529
| 2023-09-03T12:37:39
| 2023-09-03T12:37:39
| 158,256,479
| 4,358
| 1,659
|
Apache-2.0
| 2023-09-14T16:31:51
| 2018-11-19T16:26:46
|
Java
|
UTF-8
|
Python
| false
| false
| 23,152
|
py
|
test_base.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint:disable=redefined-outer-name
from typing import (
Dict,
List,
Optional,
Set,
Union,
)
import pytest
from pyiceberg.catalog import (
Catalog,
Identifier,
Properties,
PropertiesUpdateSummary,
)
from pyiceberg.exceptions import (
NamespaceAlreadyExistsError,
NamespaceNotEmptyError,
NoSuchNamespaceError,
NoSuchTableError,
TableAlreadyExistsError,
)
from pyiceberg.io import load_file_io
from pyiceberg.partitioning import UNPARTITIONED_PARTITION_SPEC, PartitionField, PartitionSpec
from pyiceberg.schema import Schema
from pyiceberg.table import (
AddSchemaUpdate,
CommitTableRequest,
CommitTableResponse,
SetCurrentSchemaUpdate,
Table,
)
from pyiceberg.table.metadata import TableMetadata, TableMetadataV1, new_table_metadata
from pyiceberg.table.sorting import UNSORTED_SORT_ORDER, SortOrder
from pyiceberg.transforms import IdentityTransform
from pyiceberg.typedef import EMPTY_DICT
from pyiceberg.types import IntegerType, LongType, NestedField
class InMemoryCatalog(Catalog):
"""An in-memory catalog implementation for testing purposes."""
__tables: Dict[Identifier, Table]
__namespaces: Dict[Identifier, Properties]
def __init__(self, name: str, **properties: str) -> None:
super().__init__(name, **properties)
self.__tables = {}
self.__namespaces = {}
def create_table(
self,
identifier: Union[str, Identifier],
schema: Schema,
location: Optional[str] = None,
partition_spec: PartitionSpec = UNPARTITIONED_PARTITION_SPEC,
sort_order: SortOrder = UNSORTED_SORT_ORDER,
properties: Properties = EMPTY_DICT,
) -> Table:
identifier = Catalog.identifier_to_tuple(identifier)
namespace = Catalog.namespace_from(identifier)
if identifier in self.__tables:
raise TableAlreadyExistsError(f"Table already exists: {identifier}")
else:
if namespace not in self.__namespaces:
self.__namespaces[namespace] = {}
new_location = location or f's3://warehouse/{"/".join(identifier)}/data'
metadata = TableMetadataV1(
**{
"format-version": 1,
"table-uuid": "d20125c8-7284-442c-9aea-15fee620737c",
"location": new_location,
"last-updated-ms": 1602638573874,
"last-column-id": schema.highest_field_id,
"schema": schema.model_dump(),
"partition-spec": partition_spec.model_dump()["fields"],
"properties": properties,
"current-snapshot-id": -1,
"snapshots": [{"snapshot-id": 1925, "timestamp-ms": 1602638573822}],
}
)
table = Table(
identifier=identifier,
metadata=metadata,
metadata_location=f's3://warehouse/{"/".join(identifier)}/metadata/metadata.json',
io=load_file_io(),
catalog=self,
)
self.__tables[identifier] = table
return table
def _commit_table(self, table_request: CommitTableRequest) -> CommitTableResponse:
new_metadata: Optional[TableMetadata] = None
metadata_location = ""
for update in table_request.updates:
if isinstance(update, AddSchemaUpdate):
add_schema_update: AddSchemaUpdate = update
identifier = Catalog.identifier_to_tuple(table_request.identifier)
table = self.__tables[("com", *identifier)]
new_metadata = new_table_metadata(
add_schema_update.schema_,
table.metadata.partition_specs[0],
table.sort_order(),
table.location(),
table.properties,
table.metadata.table_uuid,
)
table = Table(
identifier=identifier,
metadata=new_metadata,
metadata_location=f's3://warehouse/{"/".join(identifier)}/metadata/metadata.json',
io=load_file_io(),
catalog=self,
)
self.__tables[identifier] = table
metadata_location = f's3://warehouse/{"/".join(identifier)}/metadata/metadata.json'
return CommitTableResponse(
metadata=new_metadata.model_dump() if new_metadata else {},
metadata_location=metadata_location if metadata_location else "",
)
def load_table(self, identifier: Union[str, Identifier]) -> Table:
identifier = Catalog.identifier_to_tuple(identifier)
try:
return self.__tables[identifier]
except KeyError as error:
raise NoSuchTableError(f"Table does not exist: {identifier}") from error
def drop_table(self, identifier: Union[str, Identifier]) -> None:
identifier = Catalog.identifier_to_tuple(identifier)
try:
self.__tables.pop(identifier)
except KeyError as error:
raise NoSuchTableError(f"Table does not exist: {identifier}") from error
def purge_table(self, identifier: Union[str, Identifier]) -> None:
self.drop_table(identifier)
def rename_table(self, from_identifier: Union[str, Identifier], to_identifier: Union[str, Identifier]) -> Table:
from_identifier = Catalog.identifier_to_tuple(from_identifier)
try:
table = self.__tables.pop(from_identifier)
except KeyError as error:
raise NoSuchTableError(f"Table does not exist: {from_identifier}") from error
to_identifier = Catalog.identifier_to_tuple(to_identifier)
to_namespace = Catalog.namespace_from(to_identifier)
if to_namespace not in self.__namespaces:
self.__namespaces[to_namespace] = {}
self.__tables[to_identifier] = Table(
identifier=to_identifier,
metadata=table.metadata,
metadata_location=table.metadata_location,
io=load_file_io(),
catalog=self,
)
return self.__tables[to_identifier]
def create_namespace(self, namespace: Union[str, Identifier], properties: Properties = EMPTY_DICT) -> None:
namespace = Catalog.identifier_to_tuple(namespace)
if namespace in self.__namespaces:
raise NamespaceAlreadyExistsError(f"Namespace already exists: {namespace}")
else:
self.__namespaces[namespace] = properties if properties else {}
def drop_namespace(self, namespace: Union[str, Identifier]) -> None:
namespace = Catalog.identifier_to_tuple(namespace)
if [table_identifier for table_identifier in self.__tables.keys() if namespace == table_identifier[:-1]]:
raise NamespaceNotEmptyError(f"Namespace is not empty: {namespace}")
try:
self.__namespaces.pop(namespace)
except KeyError as error:
raise NoSuchNamespaceError(f"Namespace does not exist: {namespace}") from error
def list_tables(self, namespace: Optional[Union[str, Identifier]] = None) -> List[Identifier]:
if namespace:
namespace = Catalog.identifier_to_tuple(namespace)
list_tables = [table_identifier for table_identifier in self.__tables.keys() if namespace == table_identifier[:-1]]
else:
list_tables = list(self.__tables.keys())
return list_tables
def list_namespaces(self, namespace: Union[str, Identifier] = ()) -> List[Identifier]:
# Hierarchical namespace is not supported. Return an empty list
if namespace:
return []
return list(self.__namespaces.keys())
def load_namespace_properties(self, namespace: Union[str, Identifier]) -> Properties:
namespace = Catalog.identifier_to_tuple(namespace)
try:
return self.__namespaces[namespace]
except KeyError as error:
raise NoSuchNamespaceError(f"Namespace does not exist: {namespace}") from error
def update_namespace_properties(
self, namespace: Union[str, Identifier], removals: Optional[Set[str]] = None, updates: Properties = EMPTY_DICT
) -> PropertiesUpdateSummary:
removed: Set[str] = set()
updated: Set[str] = set()
namespace = Catalog.identifier_to_tuple(namespace)
if namespace in self.__namespaces:
if removals:
for key in removals:
if key in self.__namespaces[namespace]:
del self.__namespaces[namespace][key]
removed.add(key)
if updates:
for key, value in updates.items():
self.__namespaces[namespace][key] = value
updated.add(key)
else:
raise NoSuchNamespaceError(f"Namespace does not exist: {namespace}")
expected_to_change = removed.difference(removals or set())
return PropertiesUpdateSummary(
removed=list(removed or []), updated=list(updates.keys() if updates else []), missing=list(expected_to_change)
)
@pytest.fixture
def catalog() -> InMemoryCatalog:
return InMemoryCatalog("test.in.memory.catalog", **{"test.key": "test.value"})
TEST_TABLE_IDENTIFIER = ("com", "organization", "department", "my_table")
TEST_TABLE_NAMESPACE = ("com", "organization", "department")
TEST_TABLE_NAME = "my_table"
TEST_TABLE_SCHEMA = Schema(
NestedField(1, "x", LongType()),
NestedField(2, "y", LongType(), doc="comment"),
NestedField(3, "z", LongType()),
)
TEST_TABLE_LOCATION = "protocol://some/location"
TEST_TABLE_PARTITION_SPEC = PartitionSpec(PartitionField(name="x", transform=IdentityTransform(), source_id=1, field_id=1000))
TEST_TABLE_PROPERTIES = {"key1": "value1", "key2": "value2"}
NO_SUCH_TABLE_ERROR = "Table does not exist: \\('com', 'organization', 'department', 'my_table'\\)"
TABLE_ALREADY_EXISTS_ERROR = "Table already exists: \\('com', 'organization', 'department', 'my_table'\\)"
NAMESPACE_ALREADY_EXISTS_ERROR = "Namespace already exists: \\('com', 'organization', 'department'\\)"
NO_SUCH_NAMESPACE_ERROR = "Namespace does not exist: \\('com', 'organization', 'department'\\)"
NAMESPACE_NOT_EMPTY_ERROR = "Namespace is not empty: \\('com', 'organization', 'department'\\)"
def given_catalog_has_a_table(catalog: InMemoryCatalog) -> Table:
return catalog.create_table(
identifier=TEST_TABLE_IDENTIFIER,
schema=TEST_TABLE_SCHEMA,
location=TEST_TABLE_LOCATION,
partition_spec=TEST_TABLE_PARTITION_SPEC,
properties=TEST_TABLE_PROPERTIES,
)
def test_namespace_from_tuple() -> None:
# Given
identifier = ("com", "organization", "department", "my_table")
# When
namespace_from = Catalog.namespace_from(identifier)
# Then
assert namespace_from == ("com", "organization", "department")
def test_namespace_from_str() -> None:
# Given
identifier = "com.organization.department.my_table"
# When
namespace_from = Catalog.namespace_from(identifier)
# Then
assert namespace_from == ("com", "organization", "department")
def test_name_from_tuple() -> None:
# Given
identifier = ("com", "organization", "department", "my_table")
# When
name_from = Catalog.table_name_from(identifier)
# Then
assert name_from == "my_table"
def test_name_from_str() -> None:
# Given
identifier = "com.organization.department.my_table"
# When
name_from = Catalog.table_name_from(identifier)
# Then
assert name_from == "my_table"
def test_create_table(catalog: InMemoryCatalog) -> None:
table = catalog.create_table(
identifier=TEST_TABLE_IDENTIFIER,
schema=TEST_TABLE_SCHEMA,
location=TEST_TABLE_LOCATION,
partition_spec=TEST_TABLE_PARTITION_SPEC,
properties=TEST_TABLE_PROPERTIES,
)
assert catalog.load_table(TEST_TABLE_IDENTIFIER) == table
def test_create_table_raises_error_when_table_already_exists(catalog: InMemoryCatalog) -> None:
# Given
given_catalog_has_a_table(catalog)
# When
with pytest.raises(TableAlreadyExistsError, match=TABLE_ALREADY_EXISTS_ERROR):
catalog.create_table(
identifier=TEST_TABLE_IDENTIFIER,
schema=TEST_TABLE_SCHEMA,
)
def test_load_table(catalog: InMemoryCatalog) -> None:
# Given
given_table = given_catalog_has_a_table(catalog)
# When
table = catalog.load_table(TEST_TABLE_IDENTIFIER)
# Then
assert table == given_table
def test_table_raises_error_on_table_not_found(catalog: InMemoryCatalog) -> None:
with pytest.raises(NoSuchTableError, match=NO_SUCH_TABLE_ERROR):
catalog.load_table(TEST_TABLE_IDENTIFIER)
def test_drop_table(catalog: InMemoryCatalog) -> None:
# Given
given_catalog_has_a_table(catalog)
# When
catalog.drop_table(TEST_TABLE_IDENTIFIER)
# Then
with pytest.raises(NoSuchTableError, match=NO_SUCH_TABLE_ERROR):
catalog.load_table(TEST_TABLE_IDENTIFIER)
def test_drop_table_that_does_not_exist_raise_error(catalog: InMemoryCatalog) -> None:
with pytest.raises(NoSuchTableError, match=NO_SUCH_TABLE_ERROR):
catalog.load_table(TEST_TABLE_IDENTIFIER)
def test_purge_table(catalog: InMemoryCatalog) -> None:
# Given
given_catalog_has_a_table(catalog)
# When
catalog.purge_table(TEST_TABLE_IDENTIFIER)
# Then
with pytest.raises(NoSuchTableError, match=NO_SUCH_TABLE_ERROR):
catalog.load_table(TEST_TABLE_IDENTIFIER)
def test_rename_table(catalog: InMemoryCatalog) -> None:
# Given
given_catalog_has_a_table(catalog)
# When
new_table = "new.namespace.new_table"
table = catalog.rename_table(TEST_TABLE_IDENTIFIER, new_table)
# Then
assert table.identifier == Catalog.identifier_to_tuple(new_table)
# And
table = catalog.load_table(new_table)
assert table.identifier == Catalog.identifier_to_tuple(new_table)
# And
assert ("new", "namespace") in catalog.list_namespaces()
# And
with pytest.raises(NoSuchTableError, match=NO_SUCH_TABLE_ERROR):
catalog.load_table(TEST_TABLE_IDENTIFIER)
def test_create_namespace(catalog: InMemoryCatalog) -> None:
# When
catalog.create_namespace(TEST_TABLE_NAMESPACE, TEST_TABLE_PROPERTIES)
# Then
assert TEST_TABLE_NAMESPACE in catalog.list_namespaces()
assert TEST_TABLE_PROPERTIES == catalog.load_namespace_properties(TEST_TABLE_NAMESPACE)
def test_create_namespace_raises_error_on_existing_namespace(catalog: InMemoryCatalog) -> None:
# Given
catalog.create_namespace(TEST_TABLE_NAMESPACE, TEST_TABLE_PROPERTIES)
# When
with pytest.raises(NamespaceAlreadyExistsError, match=NAMESPACE_ALREADY_EXISTS_ERROR):
catalog.create_namespace(TEST_TABLE_NAMESPACE, TEST_TABLE_PROPERTIES)
def test_get_namespace_metadata_raises_error_when_namespace_does_not_exist(catalog: InMemoryCatalog) -> None:
with pytest.raises(NoSuchNamespaceError, match=NO_SUCH_NAMESPACE_ERROR):
catalog.load_namespace_properties(TEST_TABLE_NAMESPACE)
def test_list_namespaces(catalog: InMemoryCatalog) -> None:
# Given
catalog.create_namespace(TEST_TABLE_NAMESPACE, TEST_TABLE_PROPERTIES)
# When
namespaces = catalog.list_namespaces()
# Then
assert TEST_TABLE_NAMESPACE in namespaces
def test_drop_namespace(catalog: InMemoryCatalog) -> None:
# Given
catalog.create_namespace(TEST_TABLE_NAMESPACE, TEST_TABLE_PROPERTIES)
# When
catalog.drop_namespace(TEST_TABLE_NAMESPACE)
# Then
assert TEST_TABLE_NAMESPACE not in catalog.list_namespaces()
def test_drop_namespace_raises_error_when_namespace_does_not_exist(catalog: InMemoryCatalog) -> None:
with pytest.raises(NoSuchNamespaceError, match=NO_SUCH_NAMESPACE_ERROR):
catalog.drop_namespace(TEST_TABLE_NAMESPACE)
def test_drop_namespace_raises_error_when_namespace_not_empty(catalog: InMemoryCatalog) -> None:
# Given
given_catalog_has_a_table(catalog)
# When
with pytest.raises(NamespaceNotEmptyError, match=NAMESPACE_NOT_EMPTY_ERROR):
catalog.drop_namespace(TEST_TABLE_NAMESPACE)
def test_list_tables(catalog: InMemoryCatalog) -> None:
# Given
given_catalog_has_a_table(catalog)
# When
tables = catalog.list_tables()
# Then
assert tables
assert TEST_TABLE_IDENTIFIER in tables
def test_list_tables_under_a_namespace(catalog: InMemoryCatalog) -> None:
# Given
given_catalog_has_a_table(catalog)
new_namespace = ("new", "namespace")
catalog.create_namespace(new_namespace)
# When
all_tables = catalog.list_tables()
new_namespace_tables = catalog.list_tables(new_namespace)
# Then
assert all_tables
assert TEST_TABLE_IDENTIFIER in all_tables
assert new_namespace_tables == []
def test_update_namespace_metadata(catalog: InMemoryCatalog) -> None:
# Given
catalog.create_namespace(TEST_TABLE_NAMESPACE, TEST_TABLE_PROPERTIES)
# When
new_metadata = {"key3": "value3", "key4": "value4"}
summary = catalog.update_namespace_properties(TEST_TABLE_NAMESPACE, updates=new_metadata)
# Then
assert TEST_TABLE_NAMESPACE in catalog.list_namespaces()
assert new_metadata.items() <= catalog.load_namespace_properties(TEST_TABLE_NAMESPACE).items()
assert summary == PropertiesUpdateSummary(removed=[], updated=["key3", "key4"], missing=[])
def test_update_namespace_metadata_removals(catalog: InMemoryCatalog) -> None:
# Given
catalog.create_namespace(TEST_TABLE_NAMESPACE, TEST_TABLE_PROPERTIES)
# When
new_metadata = {"key3": "value3", "key4": "value4"}
remove_metadata = {"key1"}
summary = catalog.update_namespace_properties(TEST_TABLE_NAMESPACE, remove_metadata, new_metadata)
# Then
assert TEST_TABLE_NAMESPACE in catalog.list_namespaces()
assert new_metadata.items() <= catalog.load_namespace_properties(TEST_TABLE_NAMESPACE).items()
assert remove_metadata.isdisjoint(catalog.load_namespace_properties(TEST_TABLE_NAMESPACE).keys())
assert summary == PropertiesUpdateSummary(removed=["key1"], updated=["key3", "key4"], missing=[])
def test_update_namespace_metadata_raises_error_when_namespace_does_not_exist(catalog: InMemoryCatalog) -> None:
with pytest.raises(NoSuchNamespaceError, match=NO_SUCH_NAMESPACE_ERROR):
catalog.update_namespace_properties(TEST_TABLE_NAMESPACE, updates=TEST_TABLE_PROPERTIES)
def test_commit_table(catalog: InMemoryCatalog) -> None:
# Given
given_table = given_catalog_has_a_table(catalog)
new_schema = Schema(
NestedField(1, "x", LongType()),
NestedField(2, "y", LongType(), doc="comment"),
NestedField(3, "z", LongType()),
NestedField(4, "add", LongType()),
)
# When
response = given_table.catalog._commit_table( # pylint: disable=W0212
CommitTableRequest(
identifier=given_table.identifier[1:],
updates=[
AddSchemaUpdate(schema=new_schema, last_column_id=new_schema.highest_field_id),
SetCurrentSchemaUpdate(schema_id=-1),
],
)
)
# Then
assert response.metadata.table_uuid == given_table.metadata.table_uuid
assert len(response.metadata.schemas) == 1
assert response.metadata.schemas[0] == new_schema
def test_add_column(catalog: InMemoryCatalog) -> None:
given_table = given_catalog_has_a_table(catalog)
given_table.update_schema().add_column(name="new_column1", type_var=IntegerType()).commit()
assert given_table.schema() == Schema(
NestedField(field_id=1, name="x", field_type=LongType(), required=True),
NestedField(field_id=2, name="y", field_type=LongType(), required=True, doc="comment"),
NestedField(field_id=3, name="z", field_type=LongType(), required=True),
NestedField(field_id=4, name="new_column1", field_type=IntegerType(), required=False),
schema_id=0,
identifier_field_ids=[],
)
transaction = given_table.transaction()
transaction.update_schema().add_column(name="new_column2", type_var=IntegerType(), doc="doc").commit()
transaction.commit_transaction()
assert given_table.schema() == Schema(
NestedField(field_id=1, name="x", field_type=LongType(), required=True),
NestedField(field_id=2, name="y", field_type=LongType(), required=True, doc="comment"),
NestedField(field_id=3, name="z", field_type=LongType(), required=True),
NestedField(field_id=4, name="new_column1", field_type=IntegerType(), required=False),
NestedField(field_id=5, name="new_column2", field_type=IntegerType(), required=False, doc="doc"),
schema_id=0,
identifier_field_ids=[],
)
def test_add_column_with_statement(catalog: InMemoryCatalog) -> None:
given_table = given_catalog_has_a_table(catalog)
with given_table.update_schema() as tx:
tx.add_column(name="new_column1", type_var=IntegerType())
assert given_table.schema() == Schema(
NestedField(field_id=1, name="x", field_type=LongType(), required=True),
NestedField(field_id=2, name="y", field_type=LongType(), required=True, doc="comment"),
NestedField(field_id=3, name="z", field_type=LongType(), required=True),
NestedField(field_id=4, name="new_column1", field_type=IntegerType(), required=False),
schema_id=0,
identifier_field_ids=[],
)
with given_table.transaction() as tx:
tx.update_schema().add_column(name="new_column2", type_var=IntegerType(), doc="doc").commit()
assert given_table.schema() == Schema(
NestedField(field_id=1, name="x", field_type=LongType(), required=True),
NestedField(field_id=2, name="y", field_type=LongType(), required=True, doc="comment"),
NestedField(field_id=3, name="z", field_type=LongType(), required=True),
NestedField(field_id=4, name="new_column1", field_type=IntegerType(), required=False),
NestedField(field_id=5, name="new_column2", field_type=IntegerType(), required=False, doc="doc"),
schema_id=0,
identifier_field_ids=[],
)
|
831a9ef0cc4a1bc0f4a85c6e69c3fda17858fa2f
|
a64eeba4575eee849b459dab9c7000350ee636f1
|
/mediapipe/model_maker/python/core/data/data_util_test.py
|
8bed8ef7c601c88b7f1562eebc0c0e9f64b7dc59
|
[
"Apache-2.0",
"dtoa"
] |
permissive
|
google/mediapipe
|
0b6b56aff8bacc7b680c205f0788f1b49dd33f5e
|
007824594bf1d07c7c1467df03a43886f8a4b3ad
|
refs/heads/master
| 2023-09-01T16:11:21.218234
| 2023-09-01T11:55:21
| 2023-09-01T11:57:34
| 191,820,100
| 23,940
| 5,164
|
Apache-2.0
| 2023-09-14T09:01:36
| 2019-06-13T19:16:41
|
C++
|
UTF-8
|
Python
| false
| false
| 1,287
|
py
|
data_util_test.py
|
# Copyright 2022 The MediaPipe Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
# Dependency imports
from absl import flags
import tensorflow as tf
from mediapipe.model_maker.python.core.data import data_util
_WORKSPACE = "mediapipe"
_TEST_DATA_DIR = os.path.join(
_WORKSPACE, 'mediapipe/model_maker/python/core/data/testdata')
FLAGS = flags.FLAGS
class DataUtilTest(tf.test.TestCase):
def test_load_rgb_image(self):
image_path = os.path.join(FLAGS.test_srcdir, _TEST_DATA_DIR, 'test.jpg')
image_data = data_util.load_image(image_path)
self.assertEqual(image_data.shape, (5184, 3456, 3))
if __name__ == '__main__':
tf.test.main()
|
6c880ea49d6eda7fdf9376a0ea3b0fa080bb0183
|
9472c7d1608e318e46214f231773fbb3f33de0f1
|
/kats/tests/models/test_var_model.py
|
f1466a304dafd6fc2ec9b5139d25a110d540632f
|
[
"MIT"
] |
permissive
|
facebookresearch/Kats
|
16eee984bc1c482bd709cb5d62c226d4ad85f216
|
00ab9a3db27218b4817eae2e05dc602e437f634f
|
refs/heads/main
| 2023-08-30T23:33:12.654847
| 2023-08-25T16:02:04
| 2023-08-25T16:02:04
| 342,388,745
| 4,514
| 517
|
MIT
| 2023-09-14T15:28:43
| 2021-02-25T21:51:06
|
Python
|
UTF-8
|
Python
| false
| false
| 4,846
|
py
|
test_var_model.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from unittest import TestCase
from unittest.mock import MagicMock, patch
import matplotlib.pyplot as plt
import pandas as pd
from kats.consts import TimeSeriesData
from kats.data.utils import load_data
from kats.models.var import VARModel, VARParams
from parameterized.parameterized import parameterized
TEST_DATA = {
"multivariate": {
"ts": TimeSeriesData(load_data("multivariate_anomaly_simulated_data.csv")),
},
"multivariate_2": {
"ts": TimeSeriesData(load_data("multi_ts.csv")),
},
}
class testVARModel(TestCase):
def test_fit_forecast(self, steps: int = 5) -> None:
ts = TEST_DATA["multivariate_2"]["ts"]
params = VARParams()
train, truth = ts[:-steps], ts[-steps:]
m = VARModel(train, params)
m.fit()
pred = m.predict(steps=steps)
# check whether the time indices of each forecasted feature are the same
index = [v.to_dataframe().time for _, v in pred.items()]
self.assertTrue(all(x.equals(index[0]) for x in index))
# check whether the values are close and shapes are correct
truth = truth.to_dataframe().iloc[:, 1:]
pred_forecast = pd.concat(
[v["fcst"].to_dataframe().iloc[:, 1:2] for _, v in pred.items()], axis=1
)
pred_forecast.columns = truth.columns
self.assertTrue(truth.subtract(pred_forecast).values.max() < 5)
def test_invalid_params(self) -> None:
params = VARParams()
input_data = TimeSeriesData(pd.DataFrame())
with self.assertRaises(ValueError):
m = VARModel(input_data, params)
m.fit()
m.predict(steps=30, include_history=True)
# pyre-fixme[56]
@parameterized.expand([[TEST_DATA["multivariate"]["ts"]]])
@patch("pandas.concat")
def test_predict_exception(self, ts: TimeSeriesData, mock_obj: MagicMock) -> None:
mock_obj.side_effect = Exception
with self.assertRaisesRegex(
Exception, "^Failed to generate in-sample forecasts for historical data"
):
params = VARParams()
m = VARModel(ts, params)
m.fit()
m.predict(steps=30, include_history=True)
# pyre-fixme[56]
@parameterized.expand(
[
[TEST_DATA["multivariate"]["ts"]],
[TEST_DATA["multivariate_2"]["ts"]],
]
)
def test_predict_unfit(self, ts: TimeSeriesData) -> None:
with self.assertRaises(ValueError):
m = VARModel(ts, VARParams())
m.predict(steps=30)
# pyre-fixme[56]
@parameterized.expand(
[
[TEST_DATA["multivariate"]["ts"]],
[TEST_DATA["multivariate_2"]["ts"]],
]
)
def test_search_space(self, ts: TimeSeriesData) -> None:
params = VARParams()
params.validate_params()
with self.assertRaises(NotImplementedError):
VARModel.get_parameter_search_space()
# @pytest.mark.image_compare
# pyre-fixme[56]
@parameterized.expand([[TEST_DATA["multivariate_2"]["ts"]]])
def test_plot(self, ts: TimeSeriesData) -> plt.Figure:
# Test the example from the 201 notebook.
m = VARModel(ts, VARParams())
m.fit()
m.predict(steps=90)
m.plot()
return plt.gcf()
# @pytest.mark.image_compare
# pyre-fixme[56]
@parameterized.expand([[TEST_DATA["multivariate_2"]["ts"]]])
def test_plot_include_history(self, ts: TimeSeriesData) -> plt.Figure:
# This shouldn't error, but currently does.
with self.assertRaises(ValueError):
m = VARModel(ts, VARParams())
m.fit()
m.predict(steps=90, include_history=True)
m.plot()
return plt.gcf()
# pyre-fixme[56]
@parameterized.expand([[TEST_DATA["multivariate"]["ts"]]])
def test_plot_ax_not_supported(self, ts: TimeSeriesData) -> None:
with self.assertRaises(ValueError):
_, ax = plt.subplots()
m = VARModel(ts, VARParams())
m.fit()
m.predict(steps=5)
m.plot(ax=ax)
# pyre-fixme[56]
@parameterized.expand([[TEST_DATA["multivariate"]["ts"]]])
def test_plot_unpredict(self, ts: TimeSeriesData) -> None:
with self.assertRaises(ValueError):
m = VARModel(ts, VARParams())
m.plot()
# pyre-fixme[56]
@parameterized.expand([[TEST_DATA["multivariate"]["ts"]]])
def test_str(self, ts: TimeSeriesData) -> None:
result = str(VARModel(ts, VARParams()))
self.assertEqual("VAR", result)
if __name__ == "__main__":
unittest.main()
|
d80701202d7feef537c6261f31349ca5c97577fd
|
da0413cec35467f9d92c0ff719130212be039ef5
|
/core/roslib/test/test_roslib_names.py
|
751d17b8ad6c1be9bd34b25c4a28eb7edd0b565e
|
[
"BSD-3-Clause"
] |
permissive
|
ros/ros
|
dc7ed8f12227ac154c1d6dff58c2baa3ddf38ee6
|
93d8da32091b8b43702eab5d3202f4511dfeb7dc
|
refs/heads/noetic-devel
| 2023-09-04T12:49:31.954556
| 2021-09-21T00:23:55
| 2021-09-21T00:23:55
| 7,789,923
| 2,619
| 820
|
BSD-3-Clause
| 2023-02-13T04:30:28
| 2013-01-24T03:44:36
|
Python
|
UTF-8
|
Python
| false
| false
| 13,915
|
py
|
test_roslib_names.py
|
# Software License Agreement (BSD License)
#
# Copyright (c) 2009, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
import sys
import unittest
import roslib.names
class NamesTest(unittest.TestCase):
def test_get_ros_namespace(self):
if 'ROS_NAMESPACE' in os.environ:
rosns = os.environ['ROS_NAMESPACE']
del os.environ['ROS_NAMESPACE']
else:
rosns = None
sysargv = sys.argv
try:
sys.argv = []
self.assertEquals('/', roslib.names.get_ros_namespace())
self.assertEquals('/', roslib.names.get_ros_namespace(argv=[]))
self.assertEquals('/', roslib.names.get_ros_namespace(env={}))
self.assertEquals('/', roslib.names.get_ros_namespace(env={}, argv=[]))
os.environ['ROS_NAMESPACE'] = 'unresolved'
self.assertEquals('/unresolved/', roslib.names.get_ros_namespace())
self.assertEquals('/unresolved/', roslib.names.get_ros_namespace(env={'ROS_NAMESPACE': 'unresolved'}))
sys.argv = ['foo', '__ns:=unresolved_override']
self.assertEquals('/unresolved_override/', roslib.names.get_ros_namespace(env={'ROS_NAMESPACE': 'unresolved'}))
self.assertEquals('/override2/', roslib.names.get_ros_namespace(env={'ROS_NAMESPACE': 'unresolved'}, argv=['foo', '__ns:=override2']))
sys.argv = []
os.environ['ROS_NAMESPACE'] = '/resolved/'
self.assertEquals('/resolved/', roslib.names.get_ros_namespace())
self.assertEquals('/resolved/', roslib.names.get_ros_namespace(env={'ROS_NAMESPACE': '/resolved'}))
del os.environ['ROS_NAMESPACE']
sys.argv = ['foo', '__ns:=unresolved_ns']
self.assertEquals('/unresolved_ns/', roslib.names.get_ros_namespace())
self.assertEquals('/unresolved_ns2/', roslib.names.get_ros_namespace(argv=['foo', '__ns:=unresolved_ns2']))
sys.argv = ['foo', '__ns:=/resolved_ns/']
self.assertEquals('/resolved_ns/', roslib.names.get_ros_namespace())
self.assertEquals('/resolved_ns2/', roslib.names.get_ros_namespace(argv=['foo', '__ns:=resolved_ns2']))
finally:
sys.argv = sysargv
# restore
if rosns:
os.environ['ROS_NAMESPACE'] = rosns
def test_make_global_ns(self):
from roslib.names import make_global_ns
for n in ['~foo']:
try:
make_global_ns(n)
self.fail('make_global_ns should fail on %s' % n)
except ValueError:
pass
self.assertEquals('/foo/', make_global_ns('foo'))
self.assertEquals('/', make_global_ns(''))
self.assertEquals('/foo/', make_global_ns('/foo'))
self.assertEquals('/foo/', make_global_ns('/foo/'))
self.assertEquals('/foo/bar/', make_global_ns('/foo/bar'))
self.assertEquals('/foo/bar/', make_global_ns('/foo/bar/'))
def test_is_global(self):
try:
roslib.names.is_global(None)
self.fail('is_global should raise exception on invalid param')
except Exception:
pass
tests = ['/', '/global', '/global2']
for t in tests:
self.assert_(roslib.names.is_global(t))
fails = ['', 'not_global', 'not/global']
for t in fails:
self.failIf(roslib.names.is_global(t))
def test_is_private(self):
try:
roslib.names.is_private(None)
self.fail('is_private should raise exception on invalid param')
except Exception:
pass
tests = ['~name', '~name/sub']
for t in tests:
self.assert_(roslib.names.is_private(t))
fails = ['', 'not_private', 'not/private', 'not/~private', '/not/~private']
for t in fails:
self.failIf(roslib.names.is_private(t))
def test_namespace(self):
from roslib.names import namespace
try:
namespace(1)
self.fail('1')
except TypeError:
pass
try:
namespace(None)
self.fail('None')
except ValueError:
pass
self.assertEquals('/', namespace(''))
self.assertEquals('/', namespace('/'))
self.assertEquals('/', namespace('/foo'))
self.assertEquals('/', namespace('/foo/'))
self.assertEquals('/foo/', namespace('/foo/bar'))
self.assertEquals('/foo/', namespace('/foo/bar/'))
self.assertEquals('/foo/bar/', namespace('/foo/bar/baz'))
self.assertEquals('/foo/bar/', namespace('/foo/bar/baz/'))
# unicode tests
self.assertEquals(u'/', namespace(u''))
self.assertEquals(u'/', namespace(u'/'))
self.assertEquals(u'/foo/bar/', namespace(u'/foo/bar/baz/'))
def test_nsjoin(self):
from roslib.names import ns_join
# private and global names cannot be joined
self.assertEquals('~name', ns_join('/foo', '~name'))
self.assertEquals('/name', ns_join('/foo', '/name'))
self.assertEquals('~name', ns_join('~', '~name'))
self.assertEquals('/name', ns_join('/', '/name'))
# ns can be '~' or '/'
self.assertEquals('~name', ns_join('~', 'name'))
self.assertEquals('/name', ns_join('/', 'name'))
self.assertEquals('/ns/name', ns_join('/ns', 'name'))
self.assertEquals('/ns/name', ns_join('/ns/', 'name'))
self.assertEquals('/ns/ns2/name', ns_join('/ns', 'ns2/name'))
self.assertEquals('/ns/ns2/name', ns_join('/ns/', 'ns2/name'))
# allow ns to be empty
self.assertEquals('name', ns_join('', 'name'))
def test_load_mappings(self):
from roslib.names import load_mappings
self.assertEquals({}, load_mappings([]))
self.assertEquals({}, load_mappings(['foo']))
self.assertEquals({}, load_mappings([':=']))
self.assertEquals({}, load_mappings([':=:=']))
self.assertEquals({}, load_mappings(['f:=']))
self.assertEquals({}, load_mappings([':=b']))
self.assertEquals({}, load_mappings(['foo:=bar:=baz']))
# should ignore node param assignments
self.assertEquals({}, load_mappings(['_foo:=bar']))
self.assertEquals({'foo': 'bar'}, load_mappings(['foo:=bar']))
# should allow double-underscore names
self.assertEquals({'__foo': 'bar'}, load_mappings(['__foo:=bar']))
self.assertEquals({'foo': 'bar'}, load_mappings(['./f', '-x', '--blah', 'foo:=bar']))
self.assertEquals({'a': '1', 'b': '2', 'c': '3'}, load_mappings(['c:=3', 'c:=', ':=3', 'a:=1', 'b:=2']))
def test_resource_name(self):
from roslib.names import resource_name
self.assertEquals('foo/bar', resource_name('foo', 'bar'))
self.assertEquals('bar', resource_name('foo', 'bar', my_pkg='foo'))
self.assertEquals('foo/bar', resource_name('foo', 'bar', my_pkg='bar'))
self.assertEquals('foo/bar', resource_name('foo', 'bar', my_pkg=''))
self.assertEquals('foo/bar', resource_name('foo', 'bar', my_pkg=None))
def test_resource_name_base(self):
from roslib.names import resource_name_base
self.assertEquals('', resource_name_base(''))
self.assertEquals('bar', resource_name_base('bar'))
self.assertEquals('bar', resource_name_base('foo/bar'))
self.assertEquals('bar', resource_name_base('/bar'))
self.assertEquals('', resource_name_base('foo/'))
def test_resource_name_package(self):
from roslib.names import resource_name_package
self.assertEquals(None, resource_name_package(''))
self.assertEquals(None, resource_name_package('foo'))
self.assertEquals('foo', resource_name_package('foo/'))
self.assertEquals('foo', resource_name_package('foo/bar'))
def test_package_resource_name(self):
from roslib.names import package_resource_name
self.assertEquals(('', ''), package_resource_name(''))
self.assertEquals(('', 'foo'), package_resource_name('foo'))
self.assertEquals(('foo', 'bar'), package_resource_name('foo/bar'))
self.assertEquals(('foo', ''), package_resource_name('foo/'))
try:
# only allowed single separator
package_resource_name('foo/bar/baz')
self.fail('should have raised ValueError')
except ValueError:
pass
def test_is_legal_resource_name(self):
from roslib.names import is_legal_resource_name
failures = [None, '', 'hello\n', '\t', 'foo++', 'foo-bar', '#foo',
' name', 'name ',
'~name', '/name',
'1name', 'foo\\']
for f in failures:
self.failIf(is_legal_resource_name(f), f)
tests = ['f', 'f1', 'f_', 'foo', 'foo_bar', 'foo/bar', 'roslib/Log']
for t in tests:
self.assert_(is_legal_resource_name(t), t)
def test_is_legal_name(self):
from roslib.names import is_legal_name
failures = [None,
'foo++', 'foo-bar', '#foo',
'hello\n', '\t', ' name', 'name ',
'f//b',
'1name', 'foo\\']
for f in failures:
self.failIf(is_legal_name(f), f)
tests = ['',
'f', 'f1', 'f_', 'f/', 'foo', 'foo_bar', 'foo/bar', 'foo/bar/baz',
'~f', '~a/b/c',
'~/f',
'/a/b/c/d', '/']
for t in tests:
self.assert_(is_legal_name(t), '[%s]' % t)
def test_is_legal_base_name(self):
from roslib.names import is_legal_base_name
failures = [None, '', 'hello\n', '\t', 'foo++', 'foo-bar', '#foo',
'f/', 'foo/bar', '/', '/a',
'f//b',
'~f', '~a/b/c',
' name', 'name ',
'1name', 'foo\\']
for f in failures:
self.failIf(is_legal_base_name(f), f)
tests = ['f', 'f1', 'f_', 'foo', 'foo_bar']
for t in tests:
self.assert_(is_legal_base_name(t), '[%s]' % t)
def test_is_legal_resource_base_name(self):
from roslib.names import is_legal_resource_base_name
failures = [None, '', 'hello\n', '\t', 'foo++', 'foo-bar', '#foo',
'f/', 'foo/bar', '/', '/a',
'f//b',
'~f', '~a/b/c',
'~/f',
' name', 'name ',
'1name', 'foo\\']
for f in failures:
self.failIf(is_legal_resource_base_name(f), f)
tests = ['f', 'f1', 'f_', 'foo', 'foo_bar']
for t in tests:
self.assert_(is_legal_resource_base_name(t), '[%s]' % t)
def test_resolve_name(self):
from roslib.names import resolve_name
# TODO: test with remappings
tests = [
('', '/', '/'),
('', '/node', '/'),
('', '/ns1/node', '/ns1/'),
('foo', '', '/foo'),
('foo/', '', '/foo'),
('/foo', '', '/foo'),
('/foo/', '', '/foo'),
('/foo', '/', '/foo'),
('/foo/', '/', '/foo'),
('/foo', '/bar', '/foo'),
('/foo/', '/bar', '/foo'),
('foo', '/ns1/ns2', '/ns1/foo'),
('foo', '/ns1/ns2/', '/ns1/foo'),
('foo', '/ns1/ns2/ns3/', '/ns1/ns2/foo'),
('foo/', '/ns1/ns2', '/ns1/foo'),
('/foo', '/ns1/ns2', '/foo'),
('foo/bar', '/ns1/ns2', '/ns1/foo/bar'),
('foo//bar', '/ns1/ns2', '/ns1/foo/bar'),
('foo/bar', '/ns1/ns2/ns3', '/ns1/ns2/foo/bar'),
('foo//bar//', '/ns1/ns2/ns3', '/ns1/ns2/foo/bar'),
('~foo', '/', '/foo'),
('~foo', '/node', '/node/foo'),
('~foo', '/ns1/ns2', '/ns1/ns2/foo'),
('~foo/', '/ns1/ns2', '/ns1/ns2/foo'),
('~foo/bar', '/ns1/ns2', '/ns1/ns2/foo/bar'),
# #3044
('~/foo', '/', '/foo'),
('~/foo', '/node', '/node/foo'),
('~/foo', '/ns1/ns2', '/ns1/ns2/foo'),
('~/foo/', '/ns1/ns2', '/ns1/ns2/foo'),
('~/foo/bar', '/ns1/ns2', '/ns1/ns2/foo/bar'),
]
for name, node_name, v in tests:
self.assertEquals(v, resolve_name(name, node_name))
|
8e93e3d4671a347bddee3d8cc39ede07504fddb0
|
5bd1490ada452d262819b51d240b519b7264dbd8
|
/Chapter 10/ch10_10.py
|
90b05b73907904eaae4e4aecf3e18ad9839d0f9a
|
[] |
no_license
|
PacktPublishing/Mastering-Natural-Language-Processing-with-Python
|
59feee3a1ac0751f97256af328c6957adaeb7111
|
61fb2091f8c2d42fa5f14cb02664b0f2ca9127a1
|
refs/heads/master
| 2022-11-05T20:29:52.245545
| 2022-10-28T07:52:43
| 2022-10-28T07:52:43
| 60,772,409
| 142
| 124
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 117
|
py
|
ch10_10.py
|
import nltk
from nltk.stem.lancaster import LancasterStemmer
stri=LancasterStemmer()
print(stri.stem('achievement'))
|
73493d0b7bad4abe3d681ade24910e3f7ece0cf4
|
5105403f2b75990654519438d8ceabcf80962ebf
|
/tests/codebase/test_python_execution_with_OO.py
|
1bda05beff4e60932d1015cc60d0fbabd5f3f9aa
|
[
"BSD-3-Clause",
"0BSD",
"Unlicense",
"WTFPL",
"AFL-2.1",
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
bokeh/bokeh
|
ed1d81eb07d27d27c6710c9fec9114886047f528
|
310cb2cbeabc4c4b8180cbda566df16039737cdc
|
refs/heads/branch-3.3
| 2023-08-31T23:53:06.537061
| 2023-08-30T03:43:05
| 2023-08-30T03:43:05
| 3,834,332
| 17,174
| 5,251
|
BSD-3-Clause
| 2023-09-14T11:37:23
| 2012-03-26T15:40:01
|
Python
|
UTF-8
|
Python
| false
| false
| 1,797
|
py
|
test_python_execution_with_OO.py
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2023, Anaconda, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations # isort:skip
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
from subprocess import PIPE, Popen
from sys import executable as python
from typing import Sequence
# Bokeh imports
from tests.support.util.project import ls_modules
#-----------------------------------------------------------------------------
# Tests
#-----------------------------------------------------------------------------
SKIP: Sequence[str] = []
def test_python_execution_with_OO() -> None:
''' Running python with -OO will discard docstrings (__doc__ is None)
which can cause problems if docstrings are naively formatted.
This test ensures that the all modules are importable, even with -OO set.
If you encounter a new problem with docstrings being formatted, try
using format_docstring.
'''
imports = [f"import {mod}" for mod in ls_modules(skip_prefixes=SKIP)]
proc = Popen([python, "-OO", "-"], stdout=PIPE, stdin=PIPE)
proc.communicate("\n".join(imports).encode("utf-8"))
proc.wait()
assert proc.returncode == 0, "Execution with -OO failed"
|
2924e4c7c8777d33152f8f24504b6dcb32abcde7
|
dcdca5501b18a675bb6190f3102dbefe0309e78c
|
/gym_donkeycar/core/sim_client.py
|
d4543735663a0daeaefe42c8adc30939ef398721
|
[
"MIT"
] |
permissive
|
tawnkramer/gym-donkeycar
|
bc8c462f6e8c3d44352ffc9f83c7002b00bb8c46
|
de43ae14fc2b1fa79c06926bc5cad5009c400283
|
refs/heads/master
| 2023-08-18T23:08:00.925087
| 2023-04-25T01:21:00
| 2023-04-25T01:21:00
| 152,908,652
| 146
| 119
|
NOASSERTION
| 2023-09-08T11:55:37
| 2018-10-13T19:48:14
|
Python
|
UTF-8
|
Python
| false
| false
| 1,717
|
py
|
sim_client.py
|
"""
author: Tawn Kramer
date: 9 Dec 2019
file: sim_client.py
notes: wraps a tcp socket client with a handler to talk to the unity donkey simulator
"""
import json
from typing import Any, Dict, Tuple
from gym_donkeycar.core.message import IMesgHandler
from .client import SDClient
class SimClient(SDClient):
"""
Handles messages from a single TCP client.
"""
def __init__(self, address: Tuple[str, int], msg_handler: IMesgHandler):
# we expect an IMesgHandler derived handler
# assert issubclass(msg_handler, IMesgHandler)
# hold onto the handler
self.msg_handler = msg_handler
# connect to sim
super().__init__(*address)
# we connect right away
msg_handler.on_connect(self)
def send_now(self, msg: Dict[str, Any]) -> None: # pytype: disable=signature-mismatch
# takes a dict input msg, converts to json string
# and sends immediately. right now, no queue.
json_msg = json.dumps(msg)
super().send_now(json_msg)
def queue_message(self, msg: Dict[str, Any]) -> None:
# takes a dict input msg, converts to json string
# and adds to a lossy queue that sends only the last msg
json_msg = json.dumps(msg)
self.send(json_msg)
def on_msg_recv(self, json_obj: Dict[str, Any]) -> None:
# pass message on to handler
self.msg_handler.on_recv_message(json_obj)
def is_connected(self) -> bool:
return not self.aborted
def __del__(self) -> None:
self.close()
def close(self) -> None:
# Called to close client connection
self.stop()
if self.msg_handler:
self.msg_handler.on_close()
|
69cdf31e16920607e2cf6a21d482b523efaddf37
|
01857ef455ea60eccaf03b5a9059ec83e9803c2e
|
/nicegui/ui.py
|
8b48b918d1392d5468f9cc4423161ea8e058712d
|
[
"MIT"
] |
permissive
|
zauberzeug/nicegui
|
f08312cc1f393deca79e0e84a2506d3a35efff16
|
c61b1315f29d51e26cc1168207f5616b302f8df0
|
refs/heads/main
| 2023-08-18T18:09:30.937322
| 2023-08-18T15:04:00
| 2023-08-18T15:04:00
| 365,250,183
| 5,128
| 271
|
MIT
| 2023-09-14T01:50:56
| 2021-05-07T13:55:05
|
Python
|
UTF-8
|
Python
| false
| false
| 5,572
|
py
|
ui.py
|
__all__ = [
'element',
'aggrid',
'audio',
'avatar',
'badge',
'button',
'card',
'card_actions',
'card_section',
'carousel',
'carousel_slide',
'chart',
'chat_message',
'checkbox',
'color_input',
'color_picker',
'colors',
'column',
'dark_mode',
'date',
'dialog',
'echart',
'expansion',
'grid',
'html',
'icon',
'image',
'input',
'interactive_image',
'joystick',
'keyboard',
'knob',
'label',
'line_plot',
'link',
'link_target',
'log',
'markdown',
'menu',
'menu_item',
'mermaid',
'number',
'plotly',
'circular_progress',
'linear_progress',
'pyplot',
'query',
'radio',
'row',
'scene',
'scroll_area',
'select',
'separator',
'slider',
'spinner',
'splitter',
'step',
'stepper',
'stepper_navigation',
'switch',
'table',
'tab',
'tab_panel',
'tab_panels',
'tabs',
'textarea',
'time',
'toggle',
'tooltip',
'tree',
'upload',
'video',
'download',
'add_body_html',
'add_head_html',
'run_javascript',
'notify',
'open',
'refreshable',
'timer',
'update',
'page',
'drawer',
'footer',
'header',
'left_drawer',
'page_sticky',
'right_drawer',
'run',
'run_with',
]
from .element import Element as element
from .elements.aggrid import AgGrid as aggrid
from .elements.audio import Audio as audio
from .elements.avatar import Avatar as avatar
from .elements.badge import Badge as badge
from .elements.button import Button as button
from .elements.card import Card as card
from .elements.card import CardActions as card_actions
from .elements.card import CardSection as card_section
from .elements.carousel import Carousel as carousel
from .elements.carousel import CarouselSlide as carousel_slide
from .elements.chart import Chart as chart
from .elements.chat_message import ChatMessage as chat_message
from .elements.checkbox import Checkbox as checkbox
from .elements.color_input import ColorInput as color_input
from .elements.color_picker import ColorPicker as color_picker
from .elements.colors import Colors as colors
from .elements.column import Column as column
from .elements.dark_mode import DarkMode as dark_mode
from .elements.date import Date as date
from .elements.dialog import Dialog as dialog
from .elements.echart import EChart as echart
from .elements.expansion import Expansion as expansion
from .elements.grid import Grid as grid
from .elements.html import Html as html
from .elements.icon import Icon as icon
from .elements.image import Image as image
from .elements.input import Input as input
from .elements.interactive_image import InteractiveImage as interactive_image
from .elements.joystick import Joystick as joystick
from .elements.keyboard import Keyboard as keyboard
from .elements.knob import Knob as knob
from .elements.label import Label as label
from .elements.line_plot import LinePlot as line_plot
from .elements.link import Link as link
from .elements.link import LinkTarget as link_target
from .elements.log import Log as log
from .elements.markdown import Markdown as markdown
from .elements.menu import Menu as menu
from .elements.menu import MenuItem as menu_item
from .elements.mermaid import Mermaid as mermaid
from .elements.number import Number as number
from .elements.plotly import Plotly as plotly
from .elements.progress import CircularProgress as circular_progress
from .elements.progress import LinearProgress as linear_progress
from .elements.pyplot import Pyplot as pyplot
from .elements.query import query
from .elements.radio import Radio as radio
from .elements.row import Row as row
from .elements.scene import Scene as scene
from .elements.scroll_area import ScrollArea as scroll_area
from .elements.select import Select as select
from .elements.separator import Separator as separator
from .elements.slider import Slider as slider
from .elements.spinner import Spinner as spinner
from .elements.splitter import Splitter as splitter
from .elements.stepper import Step as step
from .elements.stepper import Stepper as stepper
from .elements.stepper import StepperNavigation as stepper_navigation
from .elements.switch import Switch as switch
from .elements.table import Table as table
from .elements.tabs import Tab as tab
from .elements.tabs import TabPanel as tab_panel
from .elements.tabs import TabPanels as tab_panels
from .elements.tabs import Tabs as tabs
from .elements.textarea import Textarea as textarea
from .elements.time import Time as time
from .elements.toggle import Toggle as toggle
from .elements.tooltip import Tooltip as tooltip
from .elements.tree import Tree as tree
from .elements.upload import Upload as upload
from .elements.video import Video as video
from .functions.download import download
from .functions.html import add_body_html, add_head_html
from .functions.javascript import run_javascript
from .functions.notify import notify
from .functions.open import open
from .functions.refreshable import refreshable
from .functions.timer import Timer as timer
from .functions.update import update
from .page import page
from .page_layout import Drawer as drawer
from .page_layout import Footer as footer
from .page_layout import Header as header
from .page_layout import LeftDrawer as left_drawer
from .page_layout import PageSticky as page_sticky
from .page_layout import RightDrawer as right_drawer
from .run import run
from .run_with import run_with
|
707d3a8e8c356fe0d143ed31760185ba5f2119ca
|
184bef1a322e2d3d80a167d21a4611f24190c3c6
|
/dbdb/core/common/searchvector.py
|
60bab4910c110f737e15321b4c258b9401e2d117
|
[
"Apache-2.0"
] |
permissive
|
cmu-db/dbdb.io
|
4a5020ae7832915db9008ef3a717a1ca6ccfbeb4
|
49b30cad616e1dff2406a65d52fbc86bc51414fd
|
refs/heads/master
| 2023-08-22T08:01:32.673909
| 2023-08-20T15:00:09
| 2023-08-20T15:00:09
| 19,864,037
| 395
| 34
|
Apache-2.0
| 2023-07-26T17:16:21
| 2014-05-16T16:59:01
|
Python
|
UTF-8
|
Python
| false
| false
| 728
|
py
|
searchvector.py
|
from django.contrib.postgres.search import SearchConfig, SearchVectorField
from django.db.models import Func
from django.db.models.expressions import ExpressionList
class SearchVector(Func):
"""
Replacement of `django.contrib.postgres.search.SearchVector` that
works around limitations of the later with regards to indexing.
See https://code.djangoproject.com/ticket/31304#comment:6
"""
function = 'to_tsvector'
output_field = SearchVectorField()
def __init__(self, *expressions, config=None):
expressions = (
SearchConfig.from_parameter(config),
ExpressionList(*expressions, arg_joiner=" || ' ' || "),
)
super().__init__(*expressions)
pass
|
6fff1ad10fc554589035eead530012ea7a728064
|
baf824f8819f90928e11480d0eae89efb60341a1
|
/lib/aia_doc/svd2doc.py
|
fd708b6cf9ff019582040ceb84c6e6fff1ae555e
|
[
"MIT"
] |
permissive
|
RockySong/micropython-rocky
|
549770723ba92cb311c468880ead0ffdd4fa8fe5
|
2d728f414bf8d041ca609e00448850759aade3cd
|
refs/heads/omv_initial_integrate
| 2021-05-12T12:20:18.404341
| 2021-01-15T01:15:48
| 2021-01-15T01:15:48
| 117,408,452
| 198
| 90
|
MIT
| 2020-08-25T03:31:32
| 2018-01-14T06:40:36
|
C
|
UTF-8
|
Python
| false
| false
| 14,680
|
py
|
svd2doc.py
|
import argparse
import sys
import copy
import xml.etree.ElementTree as ET
def iter_clusters(ptag):
registers = ptag.find('registers')
if registers is None:
return []
else:
return registers.findall('cluster')
def iter_registers(ptag):
registers = ptag.find('registers')
if registers is None:
return []
else:
return registers.findall('register')
def iter_fields(rtag):
fields = rtag.find('fields')
if fields is None:
return []
else:
return fields.findall('field')
def iter_enumerated_values(rtag):
enumVals = rtag.find('enumeratedValues')
if enumVals is None:
return []
else:
return enumVals.findall('enumeratedValue')
ACCESS = {
"read-only": "ro",
"read-write": "rw",
"write-only": "wo",
}
def get_access(tag):
"""
Reads and formats the access attribute of the tag.
If possible it is shortened to ro/rw/wo, and then
returned inside brackets with a leading space.
"""
access = get_string(tag, 'access')
if access is not None:
return " (" + ACCESS.get(access, access) + ")"
else:
return ""
def get_string(node, tag, default=None):
text = node.findtext(tag, default=default)
if text == default:
return text
return " ".join(text.split())
def get_int(node, tag, default=None):
text = get_string(node, tag, default=default)
if text == default:
return text
text = text.lower().strip()
if text == "true":
return 1
elif text == "false":
return 0
elif text[:2] == "0x":
return int(text[2:], 16)
elif text[:2] == "0b":
return int(text[2:], 2)
else:
return int(text, 10)
def expand_dim(node):
"""
Given a node (a cluster or a register) which may have a `dim` child,
returns an expanded list of all such nodes with '%s' in the name replaced
by the appropriate index. If there is no `dim` child, a list containing
just the original node is returned.
"""
dim = node.findtext('dim')
if dim is None:
return [node]
inc = get_int(node, 'dimIncrement')
idxs = get_string(node, 'dimIndex')
if idxs is None:
if isinstance(dim, str):
dim = int(dim) if dim.isdigit() else 0
idxs = list(range(dim))
else:
if "," in idxs:
idxs = idxs.split(",")
elif "-" in idxs:
li, ri = idxs.split("-")
idxs = list(range(int(li), int(ri)+1))
else:
raise ValueError(f"Unknown dimIndex: '{idxs}'")
nodes = []
for cnt, idx in enumerate(idxs):
name = get_string(node, 'name').replace("%s", str(idx))
dim_node = copy.deepcopy(node)
dim_node.find('name').text = name
addr = get_int(dim_node, 'addressOffset') + cnt * inc
dim_node.find('addressOffset').text = f"0x{addr:08x}"
dim_node.attrib['dim_index'] = idx
nodes.append(dim_node)
return nodes
def expand_cluster(node):
"""
Given a cluster, returns a list of all registers inside the cluster,
with their names updated to include the cluster index and their address
offsets updated to include the cluster address offset.
The returned register nodes are as though they were never in a cluster.
"""
if node.attrib.get('dim_index') is None:
raise ValueError("Can't process cluster without dim_index")
cluster_idx = node.attrib['dim_index']
cluster_addr = get_int(node, 'addressOffset')
nodes = []
for rtag in node.findall('register'):
addr = cluster_addr + get_int(rtag, 'addressOffset')
name = get_string(rtag, 'name') + str(cluster_idx)
new_rtag = copy.deepcopy(rtag)
new_rtag.find('addressOffset').text = f"0x{addr:08x}"
new_rtag.find('name').text = name
nodes.append(new_rtag)
return nodes
def parse_register(rtag):
"""
Extract register and field information from a register node into a dict.
"""
fields = {}
rname = get_string(rtag, 'name')
rDispName = get_string(rtag, 'displayName')
rdesc = get_string(rtag, 'description')
if rDispName != None:
rdesc = rname + ', ' + rdesc
if rDispName != None:
rname = rDispName
raccess = get_access(rtag)
roffset = get_int(rtag, 'addressOffset')
for ftag in iter_fields(rtag):
fname = get_string(ftag, 'name')
foffset = get_int(ftag, 'bitOffset')
fwidth = get_int(ftag, 'bitWidth')
fdesc = get_string(ftag, 'description')
faccess = get_access(ftag)
dictEnumVals = {}
for etag in iter_enumerated_values(ftag):
dictEnumVals[get_string(etag, 'name')] = (get_string(etag, 'description') , get_string(etag, 'value'))
fields[fname] = {"name": fname, "offset": foffset,
"width": fwidth, "description": fdesc,
"access": faccess, 'enumVals' : dictEnumVals}
return {"name": rname, "offset": roffset, "description": rdesc,
"access": raccess, "fields": fields}
def parse(svdfile):
"""
Parse SVD file into dict of peripherals, registers, and fields.
"""
tree = ET.parse(svdfile)
peripherals = {}
device_interrupts = {}
grps_maybe = tree.find('Groups')
if grps_maybe == None:
gtag = []
else:
gtag = grps_maybe.findall('Group')
if len(gtag) == 0:
gtag = [tree]
for tree in gtag:
perips_maybe = tree.find('peripherals')
if perips_maybe == None:
continue
for ptag in perips_maybe.findall('peripheral'):
interrupts = {}
registers = {}
clusters = {}
pname = get_string(ptag, 'name')
pbase = get_int(ptag, 'baseAddress')
for itag in ptag.findall('interrupt'):
iname = get_string(itag, 'name')
idesc = get_string(itag, 'description')
ival = get_int(itag, 'value')
interrupt = {"name": iname, "description": idesc, "value": ival,
"pname": pname}
interrupts[iname] = device_interrupts[ival] = interrupt
for ctag in iter_clusters(ptag):
for ctag in expand_dim(ctag):
cname = get_string(ctag, 'name')
cdesc = get_string(ctag, 'description')
coff = get_int(ctag, 'addressOffset')
for rtag in expand_cluster(ctag):
register = parse_register(rtag)
registers[register['name']] = register
clusters[cname] = {"name": cname, "description": cdesc,
"offset": coff}
for rtag in iter_registers(ptag):
for rtag in expand_dim(rtag):
register = parse_register(rtag)
registers[register['name']] = register
peripherals[pname] = {"name": pname, "base": pbase,
"interrupts": interrupts, "registers": registers,
"clusters": clusters}
if 'derivedFrom' in ptag.attrib:
peripherals[pname]["derives"] = ptag.attrib["derivedFrom"]
for pname, periph in list(peripherals.items()):
if 'derives' in periph:
peripherals[pname]['registers'] = \
peripherals[periph['derives']]['registers']
return {"name": svdfile.split(".")[0], "peripherals": peripherals,
"interrupts": device_interrupts}
def to_text(device):
"""
Output sorted text of every peripheral, register, field, and interrupt
in the device, such that automated diffing is possible.
"""
mmap = []
for i in device['interrupts'].values():
mmap.append(f"INTERRUPT {i['value']:03d}: "
+ f"{i['name']} ({i['pname']}): {i['description']}")
for p in device['peripherals'].values():
mmap.append(f"0x{p['base']:08X} A PERIPHERAL {p['name']}")
for c in p['clusters'].values():
addr = p['base'] + c['offset']
mmap.append(f"0x{addr:08X} B CLUSTER {c['name']}: "
+ f"{c['description']}")
for r in p['registers'].values():
addr = p['base'] + r['offset']
mmap.append(f"0x{addr:08X} B REGISTER {r['name']}{r['access']}: "
+ f"{r['description']}")
for f in r['fields'].values():
offset, width = f['offset'], f['width']
mmap.append(f"0x{addr:08X} C FIELD {offset:02d}w{width:02d} "
+ f"{f['name']}{f['access']}: "
+ f"{f['description']}")
return "\n".join(sorted(mmap))
def _gen_lines_from_keys(dict, isSort = True):
ndx = 0
sLine = ''
lst = []
if isSort:
keys = sorted(dict.keys())
else:
keys = dict.keys()
for key in keys:
ndx += 1
sLine += key + (' ' if len(key) > 15 else ' ' * (16 - len(key)))
if ndx % 4 == 0 or len(sLine) >= 64:
lst.append(sLine)
ndx = 0
sLine = ''
if len(sLine) > 0:
lst.append(sLine)
return lst
def indentify(sIn = '', maxLen = 80, indent = 4):
sOut = ''
inLen = len(sIn)
ndx = 0
maxLen -= indent
while ndx < inLen:
sOut += ' ' * indent
lineLen = maxLen if ndx+maxLen < inLen else inLen - ndx
sOut += sIn[ndx:ndx+lineLen] + ('' if ndx+lineLen>=inLen else '\n')
ndx += lineLen
return sOut
def doc_gen_peripherals(device = {}):
lst = ['#### p.']
lst.append('<lang=dft>')
dictP = device['peripherals']
CSI_START = '\033['
CSI_END = '\033[0m'
NOCOLOR="\033[0m"
BLACK="\033[0;30m"
HL_BLACK="\033[1;30m"
RED="\033[0;31m"
HL_RED="\033[1;31m"
GREEN="\033[0;32m"
HL_GREEN="\033[1;32m"
BROWN="\033[0;33m"
YELLOW="\033[1;33m"
BLUE="\033[0;34m"
HL_BLUE="\033[1;34m"
PURPLE="\033[0;35m"
HL_PURPLE="\033[1;35m"
CYAN="\033[0;36m"
HL_CYAN="\033[1;36m"
GRAY="\033[0;37m"
WHITE="\033[1;37m"
lst += _gen_lines_from_keys(dictP)
lst.append('type help(\'p.{perip name}\') such as p.GPIO1 to check details')
lst.append('输入help(\'p.外设名\') 例如 p.GPIO1 以查看细节')
lst.append('</lang>')
lst.append('#### perip')
lst.append('<link=peripherals>')
lst.append('#### jicunqi')
lst.append('<link=peripherals>')
sOut = '\n'.join(lst)
dictRegRepeat = {}
# gen peripherals
for key in dictP.keys():
lst.append('#### p.%s' % key)
lst.append('<lang=dft>')
dictPInfo = dictP[key]
dictRegs = dictPInfo['registers']
baseAddr = dictPInfo['base']
lstCtnt = ['base: 0x%08x' % (baseAddr)]
lstCtnt += _gen_lines_from_keys(dictRegs)
lst += lstCtnt
lst.append('输入 p.%s.{reg_name} 以查看寄存器的详细信息' % key)
lst.append('type p.%s.{reg_name} to check details of registers' % key)
lst.append('</lang>')
lst.append('#### p.%s' % key.lower())
lst.append('<link=p.%s>' % key)
for regName in dictRegs.keys():
comboRegName = dictPInfo['name'] + '.' + regName
dictReg = dictRegs[regName]
if regName in dictRegRepeat.keys():
dictRegRepeat[regName] += [comboRegName]
else:
dictRegRepeat[regName] = [comboRegName]
lst.append('#### p.%s' % comboRegName.upper())
lst.append('<lang=dft>')
lst.append('%s %s0x%08x%s (0x%08x + 0x%04x)' %
(dictReg['access'], YELLOW, baseAddr + dictReg['offset'], NOCOLOR, baseAddr, dictReg['offset']))
lst.append(dictReg['description'])
dictBFs = dictReg['fields']
for keyBF in dictBFs.keys():
dictBF = dictBFs[keyBF]
bOfs = dictBF['offset']
bLen = dictBF['width']
if bOfs == None or bLen == None:
continue
sDesc = indentify(dictBF['description'], 80, 1)
sBF = '%s (%02d) %s%s%s - [%02d:%02d] - %s' % \
(dictBF['access'], bLen, GREEN, keyBF, NOCOLOR, bOfs+bLen-1, bOfs, sDesc)
lst.append(sBF)
if not 'enumVals' in dictBF.keys():
continue
dictEnum = dictBF['enumVals']
for keyEnum in dictEnum.keys():
tupEnum = dictEnum[keyEnum]
sEV = ' %s - %s :' % (tupEnum[1], keyEnum)
lst.append(sEV)
sHelp = indentify(tupEnum[0], 80, 9)
lst.append((sHelp))
lst.append('</lang>')
lst.append('#### p.%s' % comboRegName.lower())
lst.append('<link=p.%s>' % comboRegName.upper())
lst.append('#### %s' % comboRegName.lower())
lst.append('<link=p.%s>' % comboRegName.upper())
lst.append('#### %s' % comboRegName)
lst.append('<link=p.%s>' % comboRegName.upper())
# generate fast registers
for reg in dictRegRepeat.keys():
lstVal = dictRegRepeat[reg]
lst.append('#### %s' % reg)
if len(lstVal) == 1:
lst.append('<link=p.%s>' % lstVal[0])
else:
lst.append('<lang=dft>')
lst.append('多个外设有这个寄存器/ multiple same names:')
for s1 in lstVal:
lst.append(' p.%s' % s1)
lst.append('#### %s' % reg.lower())
lst.append('<link=%s>' % reg)
sOut = '\r\n'.join(lst)
return sOut
import os.path as path
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-v','--verbose',help='print more infos', action='store_true')
parser.add_argument('-f', '--file', type=str, default = 'imxrt1062.svd',
help='svd file')
args,unparsed = parser.parse_known_args()
lstPath = args.file.split(path.sep)
sOutFile = lstPath[-1][:-4] + '.md'
device = parse(args.file)
sOut = '''
#### mcu
<lang=chs>
i.MX RT1050/1060 系列跨界处理器
输入 help('p.') 以查看可用的外设列表
<lang=dft>
i.MX RT1050/1060 series cross-over processors
type help('p.') to list available peripherals
</lang>
'''
sOut += doc_gen_peripherals(device)
fd = open('I:\\aia_doc\\inputs\\%s' % sOutFile, 'wb')
fd.write(sOut.encode())
fd.close()
if __name__ == '__main__':
main()
|
b7f20348d5e3a85150250c86c1940f92e5905582
|
3c41443364da8b44c74dce08ef94a1acd1b66b3e
|
/framework/auth/__init__.py
|
bd46acb9dd5bd47533f0fdcc39933697d09ab6ac
|
[
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-warranty-disclaimer",
"AGPL-3.0-only",
"LGPL-2.0-or-later",
"LicenseRef-scancode-proprietary-license",
"MPL-1.1",
"CPAL-1.0",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause",
"Apache-2.0"
] |
permissive
|
CenterForOpenScience/osf.io
|
71d9540be7989f7118a33e15bc4a6ce2d2492ac1
|
a3e0a0b9ddda5dd75fc8248d58f3bcdeece0323e
|
refs/heads/develop
| 2023-09-04T03:21:14.970917
| 2023-08-31T14:49:20
| 2023-08-31T14:49:20
| 10,199,599
| 683
| 390
|
Apache-2.0
| 2023-09-14T17:07:52
| 2013-05-21T15:53:37
|
Python
|
UTF-8
|
Python
| false
| false
| 7,986
|
py
|
__init__.py
|
# -*- coding: utf-8 -*-
import uuid
from django.utils import timezone
from framework import bcrypt
from framework.auth import signals
from framework.auth.core import Auth
from framework.auth.core import get_user, generate_verification_key
from framework.auth.exceptions import DuplicateEmailError
from framework.auth.tasks import update_user_from_activity
from framework.auth.utils import LogLevel, print_cas_log
from framework.celery_tasks.handlers import enqueue_task
from framework.sessions import get_session, create_session
from framework.sessions.utils import remove_session
__all__ = [
'Auth',
'get_user',
'check_password',
'authenticate',
'external_first_login_authenticate',
'logout',
'register_unconfirmed',
]
# check_password(actual_pw_hash, given_password) -> Boolean
check_password = bcrypt.check_password_hash
def authenticate(user, response, user_updates=None):
data = {
'auth_user_username': user.username,
'auth_user_id': user._primary_key,
'auth_user_fullname': user.fullname,
}
print_cas_log(f'Finalizing authentication - data updated: user=[{user._id}]', LogLevel.INFO)
enqueue_task(update_user_from_activity.s(user._id, timezone.now().timestamp(), cas_login=True, updates=user_updates))
print_cas_log(f'Finalizing authentication - user update queued: user=[{user._id}]', LogLevel.INFO)
user_session, response = create_session(response, data=data)
if not user_session:
return response
from osf.models import UserSessionMap
UserSessionMap.objects.get_or_create(user=user, session_key=user_session.session_key)
print_cas_log(f'Finalizing authentication - session created: user=[{user._id}]', LogLevel.INFO)
return response
def external_first_login_authenticate(user_dict, response):
"""
Create a special unauthenticated session for user login through external identity provider for the first time.
:param user_dict: the user with external credential
:param response: the response to return
:return: the response
"""
data = {
'auth_user_external_id_provider': user_dict['external_id_provider'],
'auth_user_external_id': user_dict['external_id'],
'auth_user_fullname': user_dict['fullname'],
'auth_user_external_first_login': True,
'service_url': user_dict['service_url'],
}
user_identity = '{}#{}'.format(user_dict['external_id_provider'], user_dict['external_id'])
print_cas_log(
f'Finalizing first-time login from external IdP - data updated: user=[{user_identity}]',
LogLevel.INFO,
)
# Note: we don't need to keep track of this anonymous session, and thus no entry is created in `UserSessionMap`
user_session, response = create_session(response, data=data)
if user_session:
print_cas_log(
f'Finalizing first-time login from external IdP - anonymous session created: user=[{user_identity}]',
LogLevel.INFO,
)
return response
def logout():
"""Clear users' session(s) and log them out of OSF."""
remove_session(get_session())
def register_unconfirmed(username, password, fullname, campaign=None, accepted_terms_of_service=None):
from osf.models import OSFUser
user = get_user(email=username)
if not user:
user = OSFUser.create_unconfirmed(
username=username,
password=password,
fullname=fullname,
campaign=campaign,
accepted_terms_of_service=accepted_terms_of_service
)
user.save()
signals.unconfirmed_user_created.send(user)
elif not user.is_registered: # User is in db but not registered
user.add_unconfirmed_email(username)
user.set_password(password)
user.fullname = fullname
user.update_guessed_names()
user.save()
else:
raise DuplicateEmailError('OSFUser {0!r} already exists'.format(username))
return user
def get_or_create_institutional_user(fullname, sso_email, sso_identity, primary_institution):
"""
Get or create an institutional user by fullname, email address and sso identity and institution.
Returns a tuple of five objects ``(user, is_created, duplicate_user, email_to_add, identity_to_add)``:
1. the user to authenticate
2. whether the user is newly created or not
3. whether a potential duplicate user is found or not
4. the extra email to add to the user account
5. the sso identity to add to the affiliation
Note: secondary institution always have a primary institution which shares its email and identity
:param str fullname: user's full name
:param str sso_email: user's email, which comes from the email attribute during SSO
:param str sso_identity: user's institutional identity, which comes from the identity attribute during SSO
:param Institution primary_institution: the primary institution
:raises ``InstitutionAffiliationStateError`` when same SSO identity is found on more than one users per institution
"""
from osf.models import OSFUser
from osf.models.institution_affiliation import get_user_by_institution_identity
user_by_email = get_user(email=sso_email)
# ``InstitutionAffiliationStateError`` can be raised by ``get_user_by_institution_identity()``, the caller of
# ``get_or_create_institutional_user()`` must handle it properly
user_by_identity, is_identity_eligible = get_user_by_institution_identity(primary_institution, sso_identity)
# Avoid adding an sso identity that is not eligible
if not is_identity_eligible:
sso_identity = None
if user_by_identity:
# CASE 1/5: the user is only found by identity but not by email, return the user and the sso email to add
if not user_by_email:
return user_by_identity, False, None, sso_email, None
# CASE 2/5: the same user is found by both email and identity, return the user
if user_by_email == user_by_identity:
return user_by_email, False, None, None, None
# CASE 3/5: two different users are found, one by email and the other by identity, return the user found
# by email as the authn user and the user found by identity as the duplicate; in addition, return the
# sso identity to be added to the user found by email
return user_by_email, False, user_by_identity, None, sso_identity
# CASE 4/5: the user is only found by email but not by identity, return the user and the sso identity to add
if user_by_email:
return user_by_email, False, None, None, sso_identity
# CASE 5/5: If no user is found, create a confirmed user and return the user and sso identity.
# Note: Institution users are created as confirmed with a strong and random password. Users don't need the
# password since they sign in via SSO. They can reset their password to enable email/password login.
user = OSFUser.create_confirmed(sso_email, str(uuid.uuid4()), fullname)
return user, True, None, None, sso_identity
def get_or_create_user(fullname, address, reset_password=True, is_spam=False):
"""
Get or create user by fullname and email address.
:param str fullname: user full name
:param str address: user email address
:param boolean reset_password: ask user to reset their password
:param bool is_spam: user flagged as potential spam
:return: tuple of (user, created)
"""
from osf.models import OSFUser
user = get_user(email=address)
if user:
return user, False
else:
password = str(uuid.uuid4())
user = OSFUser.create_confirmed(address, password, fullname)
if reset_password:
user.verification_key_v2 = generate_verification_key(verification_type='password')
if is_spam:
user.save() # need to save in order to add a tag
user.add_system_tag('is_spam')
return user, True
|
067ca9a36b980ba220d092407c705c9dbf41bd7e
|
97e78e8f9b1510eae91f00ee6abb06b235f0f5dc
|
/acsconv/operators/conv2_5d.py
|
2e1aa14107637e96991dd8782a09d90ec6074f98
|
[
"Apache-2.0"
] |
permissive
|
M3DV/ACSConv
|
7fc5c01751b005be8cb39dea39258457c7cd3cb4
|
95dc860a77e309f010a3d8be1f675e77c7dfeda4
|
refs/heads/master
| 2023-08-16T23:48:37.571616
| 2023-08-08T01:39:38
| 2023-08-08T01:39:38
| 223,711,776
| 156
| 22
|
Apache-2.0
| 2023-08-08T01:39:39
| 2019-11-24T08:06:30
|
Python
|
UTF-8
|
Python
| false
| false
| 1,202
|
py
|
conv2_5d.py
|
import torch.nn as nn
class Conv2_5d(nn.Conv3d):
"""
Decorater class for Conv2_5d, in which kernel size is (1, K, K) or (K, 1, K) or (K, K, 1).
Args:
unsqueeze_axis: optional, the default axis is -3, resulting in a kernel size of (1, K, K)
Other arguments are the same as torch.nn.Conv3d
Examples:
>>> import Conv2_5d
>>> x = torch.rand(batch_size, 1, D, H, W)
>>> # kernel size is (1, K, K)
>>> conv = Conv2_5d(1, 64, 3, padding=1)
>>> out = conv(x)
>>> # kernel size is (K, K, 1)
>>> conv = Conv2_5d(3, 64, 1, padding=1, unsqueeze_axis=-1)
>>> out = conv(x)
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, padding_mode=None,
unsqueeze_axis=-3):
self.unsqueeze_axis = unsqueeze_axis
unsqueeze_axis += 3
kernel_size = [kernel_size, kernel_size]
padding = [padding, padding]
kernel_size.insert(unsqueeze_axis, 1)
padding.insert(unsqueeze_axis, 0)
super().__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias)
|
a7d0b2d9aae01d8c483cf3a9958a3fa9f0f6e008
|
e993a7972529f60210d9dd6d7c4097c62c37bcdf
|
/sample/edit.py
|
13457e3b8dcf80aebda4ea0cfe15c3efff268e09
|
[
"MIT"
] |
permissive
|
GuyTevet/motion-diffusion-model
|
64756013105a80ea2a3180a73ac86519b361e53b
|
8139dda55d90a58aa5a257ebf159b2ecfb78c632
|
refs/heads/main
| 2023-09-01T05:00:14.156745
| 2023-06-06T23:42:33
| 2023-06-06T23:42:33
| 543,082,997
| 2,302
| 265
|
MIT
| 2023-08-29T09:27:54
| 2022-09-29T11:24:35
|
Python
|
UTF-8
|
Python
| false
| false
| 9,841
|
py
|
edit.py
|
# This code is based on https://github.com/openai/guided-diffusion
"""
Generate a large batch of image samples from a model and save them as a large
numpy array. This can be used to produce samples for FID evaluation.
"""
from utils.fixseed import fixseed
import os
import numpy as np
import torch
from utils.parser_util import edit_args
from utils.model_util import create_model_and_diffusion, load_model_wo_clip
from utils import dist_util
from model.cfg_sampler import ClassifierFreeSampleModel
from data_loaders.get_data import get_dataset_loader
from data_loaders.humanml.scripts.motion_process import recover_from_ric
from data_loaders import humanml_utils
import data_loaders.humanml.utils.paramUtil as paramUtil
from data_loaders.humanml.utils.plot_script import plot_3d_motion
import shutil
def main():
args = edit_args()
fixseed(args.seed)
out_path = args.output_dir
name = os.path.basename(os.path.dirname(args.model_path))
niter = os.path.basename(args.model_path).replace('model', '').replace('.pt', '')
max_frames = 196 if args.dataset in ['kit', 'humanml'] else 60
fps = 12.5 if args.dataset == 'kit' else 20
dist_util.setup_dist(args.device)
if out_path == '':
out_path = os.path.join(os.path.dirname(args.model_path),
'edit_{}_{}_{}_seed{}'.format(name, niter, args.edit_mode, args.seed))
if args.text_condition != '':
out_path += '_' + args.text_condition.replace(' ', '_').replace('.', '')
print('Loading dataset...')
assert args.num_samples <= args.batch_size, \
f'Please either increase batch_size({args.batch_size}) or reduce num_samples({args.num_samples})'
# So why do we need this check? In order to protect GPU from a memory overload in the following line.
# If your GPU can handle batch size larger then default, you can specify it through --batch_size flag.
# If it doesn't, and you still want to sample more prompts, run this script with different seeds
# (specify through the --seed flag)
args.batch_size = args.num_samples # Sampling a single batch from the testset, with exactly args.num_samples
data = get_dataset_loader(name=args.dataset,
batch_size=args.batch_size,
num_frames=max_frames,
split='test',
hml_mode='train') # in train mode, you get both text and motion.
# data.fixed_length = n_frames
total_num_samples = args.num_samples * args.num_repetitions
print("Creating model and diffusion...")
model, diffusion = create_model_and_diffusion(args, data)
print(f"Loading checkpoints from [{args.model_path}]...")
state_dict = torch.load(args.model_path, map_location='cpu')
load_model_wo_clip(model, state_dict)
model = ClassifierFreeSampleModel(model) # wrapping model with the classifier-free sampler
model.to(dist_util.dev())
model.eval() # disable random masking
iterator = iter(data)
input_motions, model_kwargs = next(iterator)
input_motions = input_motions.to(dist_util.dev())
texts = [args.text_condition] * args.num_samples
model_kwargs['y']['text'] = texts
if args.text_condition == '':
args.guidance_param = 0. # Force unconditioned generation
# add inpainting mask according to args
assert max_frames == input_motions.shape[-1]
gt_frames_per_sample = {}
model_kwargs['y']['inpainted_motion'] = input_motions
if args.edit_mode == 'in_between':
model_kwargs['y']['inpainting_mask'] = torch.ones_like(input_motions, dtype=torch.bool,
device=input_motions.device) # True means use gt motion
for i, length in enumerate(model_kwargs['y']['lengths'].cpu().numpy()):
start_idx, end_idx = int(args.prefix_end * length), int(args.suffix_start * length)
gt_frames_per_sample[i] = list(range(0, start_idx)) + list(range(end_idx, max_frames))
model_kwargs['y']['inpainting_mask'][i, :, :,
start_idx: end_idx] = False # do inpainting in those frames
elif args.edit_mode == 'upper_body':
model_kwargs['y']['inpainting_mask'] = torch.tensor(humanml_utils.HML_LOWER_BODY_MASK, dtype=torch.bool,
device=input_motions.device) # True is lower body data
model_kwargs['y']['inpainting_mask'] = model_kwargs['y']['inpainting_mask'].unsqueeze(0).unsqueeze(
-1).unsqueeze(-1).repeat(input_motions.shape[0], 1, input_motions.shape[2], input_motions.shape[3])
all_motions = []
all_lengths = []
all_text = []
for rep_i in range(args.num_repetitions):
print(f'### Start sampling [repetitions #{rep_i}]')
# add CFG scale to batch
model_kwargs['y']['scale'] = torch.ones(args.batch_size, device=dist_util.dev()) * args.guidance_param
sample_fn = diffusion.p_sample_loop
sample = sample_fn(
model,
(args.batch_size, model.njoints, model.nfeats, max_frames),
clip_denoised=False,
model_kwargs=model_kwargs,
skip_timesteps=0, # 0 is the default value - i.e. don't skip any step
init_image=None,
progress=True,
dump_steps=None,
noise=None,
const_noise=False,
)
# Recover XYZ *positions* from HumanML3D vector representation
if model.data_rep == 'hml_vec':
n_joints = 22 if sample.shape[1] == 263 else 21
sample = data.dataset.t2m_dataset.inv_transform(sample.cpu().permute(0, 2, 3, 1)).float()
sample = recover_from_ric(sample, n_joints)
sample = sample.view(-1, *sample.shape[2:]).permute(0, 2, 3, 1)
all_text += model_kwargs['y']['text']
all_motions.append(sample.cpu().numpy())
all_lengths.append(model_kwargs['y']['lengths'].cpu().numpy())
print(f"created {len(all_motions) * args.batch_size} samples")
all_motions = np.concatenate(all_motions, axis=0)
all_motions = all_motions[:total_num_samples] # [bs, njoints, 6, seqlen]
all_text = all_text[:total_num_samples]
all_lengths = np.concatenate(all_lengths, axis=0)[:total_num_samples]
if os.path.exists(out_path):
shutil.rmtree(out_path)
os.makedirs(out_path)
npy_path = os.path.join(out_path, 'results.npy')
print(f"saving results file to [{npy_path}]")
np.save(npy_path,
{'motion': all_motions, 'text': all_text, 'lengths': all_lengths,
'num_samples': args.num_samples, 'num_repetitions': args.num_repetitions})
with open(npy_path.replace('.npy', '.txt'), 'w') as fw:
fw.write('\n'.join(all_text))
with open(npy_path.replace('.npy', '_len.txt'), 'w') as fw:
fw.write('\n'.join([str(l) for l in all_lengths]))
print(f"saving visualizations to [{out_path}]...")
skeleton = paramUtil.kit_kinematic_chain if args.dataset == 'kit' else paramUtil.t2m_kinematic_chain
# Recover XYZ *positions* from HumanML3D vector representation
if model.data_rep == 'hml_vec':
input_motions = data.dataset.t2m_dataset.inv_transform(input_motions.cpu().permute(0, 2, 3, 1)).float()
input_motions = recover_from_ric(input_motions, n_joints)
input_motions = input_motions.view(-1, *input_motions.shape[2:]).permute(0, 2, 3, 1).cpu().numpy()
for sample_i in range(args.num_samples):
caption = 'Input Motion'
length = model_kwargs['y']['lengths'][sample_i]
motion = input_motions[sample_i].transpose(2, 0, 1)[:length]
save_file = 'input_motion{:02d}.mp4'.format(sample_i)
animation_save_path = os.path.join(out_path, save_file)
rep_files = [animation_save_path]
print(f'[({sample_i}) "{caption}" | -> {save_file}]')
plot_3d_motion(animation_save_path, skeleton, motion, title=caption,
dataset=args.dataset, fps=fps, vis_mode='gt',
gt_frames=gt_frames_per_sample.get(sample_i, []))
for rep_i in range(args.num_repetitions):
caption = all_text[rep_i*args.batch_size + sample_i]
if caption == '':
caption = 'Edit [{}] unconditioned'.format(args.edit_mode)
else:
caption = 'Edit [{}]: {}'.format(args.edit_mode, caption)
length = all_lengths[rep_i*args.batch_size + sample_i]
motion = all_motions[rep_i*args.batch_size + sample_i].transpose(2, 0, 1)[:length]
save_file = 'sample{:02d}_rep{:02d}.mp4'.format(sample_i, rep_i)
animation_save_path = os.path.join(out_path, save_file)
rep_files.append(animation_save_path)
print(f'[({sample_i}) "{caption}" | Rep #{rep_i} | -> {save_file}]')
plot_3d_motion(animation_save_path, skeleton, motion, title=caption,
dataset=args.dataset, fps=fps, vis_mode=args.edit_mode,
gt_frames=gt_frames_per_sample.get(sample_i, []))
# Credit for visualization: https://github.com/EricGuo5513/text-to-motion
all_rep_save_file = os.path.join(out_path, 'sample{:02d}.mp4'.format(sample_i))
ffmpeg_rep_files = [f' -i {f} ' for f in rep_files]
hstack_args = f' -filter_complex hstack=inputs={args.num_repetitions+1}'
ffmpeg_rep_cmd = f'ffmpeg -y -loglevel warning ' + ''.join(ffmpeg_rep_files) + f'{hstack_args} {all_rep_save_file}'
os.system(ffmpeg_rep_cmd)
print(f'[({sample_i}) "{caption}" | all repetitions | -> {all_rep_save_file}]')
abs_path = os.path.abspath(out_path)
print(f'[Done] Results are at [{abs_path}]')
if __name__ == "__main__":
main()
|
ddbbf541b9f7dcccca9805318d6905f12cbfa90e
|
8488fa51bd937bc9403d636279ba03ee5b1bd4c0
|
/trac/upgrades/db36.py
|
8f6a94699d9e1f183659303bd20089c6333bd5f2
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
edgewall/trac
|
e7ecd994121c6e30b39e98dc6ad9b9edf5be4559
|
f7eba7b121c9ff227b062e9d032ff4d4582adc39
|
refs/heads/trunk
| 2023-08-17T00:13:12.555838
| 2023-07-02T15:13:51
| 2023-07-02T15:13:51
| 615,096
| 399
| 173
|
NOASSERTION
| 2023-03-07T13:46:40
| 2010-04-17T15:10:51
|
Python
|
UTF-8
|
Python
| false
| false
| 1,037
|
py
|
db36.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2023 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at https://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at https://trac.edgewall.org/log/.
import os
from trac.util import backup_config_file
def do_upgrade(env, version, cursor):
"""Change [authz_policy] authz_file to be relative to the `conf`
directory.
"""
authz_file = env.config.get('authz_policy', 'authz_file')
if authz_file and not os.path.isabs(authz_file):
parts = os.path.split(authz_file)
if len(parts) == 2 and parts[0] == 'conf':
env.config.set('authz_policy', 'authz_file', parts[1])
backup_config_file(env, '.db36.bak')
env.config.save()
|
0780d0d6cd996aec892792fd516841438c35e557
|
660c4c4c14b29109a772b00169a0fd50108273fa
|
/matrixprofile/discover.py
|
9f0466324971b891b660e1255d9ccaaf892efe98
|
[
"Apache-2.0"
] |
permissive
|
matrix-profile-foundation/matrixprofile
|
80c3e026c11b39e6431b0e248cdd04f9eb482858
|
6fbd5fe2fd0e93162ef77c4da1b30188072dd404
|
refs/heads/master
| 2022-11-28T13:26:12.289263
| 2022-11-25T13:40:05
| 2022-11-25T13:40:05
| 198,119,545
| 345
| 77
|
Apache-2.0
| 2023-08-17T17:40:45
| 2019-07-22T00:33:37
|
Python
|
UTF-8
|
Python
| false
| false
| 428
|
py
|
discover.py
|
from matrixprofile.algorithms.top_k_discords import top_k_discords as discords
from matrixprofile.algorithms.top_k_motifs import top_k_motifs as motifs
from matrixprofile.algorithms.snippets import snippets
from matrixprofile.algorithms.regimes import extract_regimes as regimes
from matrixprofile.algorithms.statistics import statistics
from matrixprofile.algorithms.hierarchical_clustering import (
hierarchical_clusters
)
|
c50659168a3246aa4284be8a590c942a0b50d558
|
c5fd80ede07f0972a9b99d0c65a0df40e6d487fa
|
/pyocd/utility/sockets.py
|
61d365da5c6ab8c6682d57b1db0fcfd54b4bb40d
|
[
"CC-BY-4.0",
"Apache-2.0"
] |
permissive
|
pyocd/pyOCD
|
46330f3a10c9be381293d220cc025e0e347513ce
|
9253740baf46ebf4eacbce6bf3369150c5fb8ee0
|
refs/heads/main
| 2023-08-18T07:56:54.205305
| 2023-08-13T19:11:01
| 2023-08-13T19:11:01
| 13,862,423
| 507
| 204
|
Apache-2.0
| 2023-09-09T20:13:57
| 2013-10-25T14:10:05
|
Python
|
UTF-8
|
Python
| false
| false
| 4,741
|
py
|
sockets.py
|
# pyOCD debugger
# Copyright (c) 2006-2020 Arm Limited
# Copyright (c) 2021 Chris Reed
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import socket
import select
class ListenerSocket(object):
def __init__(self, port, packet_size):
self.packet_size = packet_size
self.listener = None
self.conn = None
self.port = port
self.host = 'localhost'
def init(self):
if self.listener is None:
self.listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.listener.bind((self.host, self.port))
# If we were asked for port 0, that's treated as "auto".
# Read back the port - allows our user to find (and print) it,
# and means that if we're closed then re-opened, as happens when
# persisting for multiple sessions, we reuse the same port, which
# is convenient.
if self.port == 0:
self.port = self.listener.getsockname()[1]
self.listener.listen(1)
def connect(self):
self.conn = None
self.init()
rr, _, _ = select.select([self.listener], [], [], 0.5)
if rr:
self.conn, _ = self.listener.accept()
return self.conn
def read(self, packet_size=None):
if packet_size is None:
packet_size = self.packet_size
return self.conn.recv(packet_size)
def write(self, data):
return self.conn.send(data)
def close(self):
return_value = None
if self.conn is not None:
return_value = self.conn.close()
self.conn = None
return return_value
def cleanup(self):
self.close()
if self.listener is not None:
self.listener.close()
self.listener = None
def set_blocking(self, blocking):
self.conn.setblocking(blocking)
def set_timeout(self, timeout):
self.conn.settimeout(timeout)
class ClientSocket(object):
"""@brief Simple client-side TCP socket.
Provides a file-like interface to a TCP socket. Blocking and timeout are configurable.
"""
DEFAULT_TIMEOUT = 10.0
def __init__(self, host, port, packet_size=4096, timeout=None):
self._address = (host, port)
self._packet_size = packet_size
self._timeout = timeout or self.DEFAULT_TIMEOUT
self._socket = None
self._buffer = bytearray()
def connect(self):
self._socket = socket.create_connection(self._address, self._timeout)
def close(self):
if self._socket is not None:
# Close both ends of the connection, then close the socket itself.
self._socket.shutdown(socket.SHUT_RDWR)
self._socket.close()
self._socket = None
def set_blocking(self, blocking):
self._socket.setblocking(blocking)
def set_timeout(self, timeout):
"""@brief Change the socket to blocking with timeout mode."""
self._socket.settimeout(timeout)
def read(self, packet_size=None):
if packet_size is None:
packet_size = self._packet_size
# Pull from the buffer first.
# if len(self._buffer):
# length = min(len(self._buffer), packet_size)
# data = self._buffer[:length]
# self._buffer = self._buffer[length:]
# return data
return self._socket.recv(packet_size)
def write(self, data):
return self._socket.sendall(data)
def readline(self):
while True:
# Try to extract a line from the buffer.
offset = self._buffer.find(b'\n')
if offset != -1:
offset += 1 # include lf
data = self._buffer[:offset]
del self._buffer[:offset]
return data
# Read a chunk and put in the buffer, then try again.
while True:
try:
data = self.read()
except socket.timeout:
pass
else:
break
self._buffer += data
|
0df61675ebd19a9ae2c421a12b35624bf5f955e3
|
58919431a7fb4da999b8584d5fa7ace5232b3e8a
|
/heudiconv/info.py
|
e76bc37f3a9d05b03724878b6d94b3770119040a
|
[
"Apache-2.0"
] |
permissive
|
nipy/heudiconv
|
1eb767cbf2bcf00b6d08d78584a9ee6b037cab2e
|
bf9b75b34ea002f73cc6cf54189e4de5efcb2a91
|
refs/heads/master
| 2023-08-08T01:00:14.261232
| 2023-07-25T19:55:56
| 2023-07-25T19:55:56
| 42,650,211
| 207
| 141
|
NOASSERTION
| 2023-09-11T13:52:41
| 2015-09-17T10:34:11
|
Python
|
UTF-8
|
Python
| false
| false
| 1,524
|
py
|
info.py
|
__author__ = "HeuDiConv team and contributors"
__url__ = "https://github.com/nipy/heudiconv"
__packagename__ = "heudiconv"
__description__ = "Heuristic DICOM Converter"
__license__ = "Apache 2.0"
__longdesc__ = """Convert DICOM dirs based on heuristic info - HeuDiConv
uses the dcmstack package and dcm2niix tool to convert DICOM directories or
tarballs into collections of NIfTI files following pre-defined heuristic(s)."""
CLASSIFIERS = [
"Environment :: Console",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Topic :: Scientific/Engineering",
"Typing :: Typed",
]
PYTHON_REQUIRES = ">=3.7"
REQUIRES = [
# not usable in some use cases since might be just a downloader, not binary
# 'dcm2niix',
"dcmstack>=0.8",
"etelemetry",
"filelock>=3.0.12",
"nibabel",
"nipype >=1.2.3",
"pydicom >= 1.0.0",
]
TESTS_REQUIRES = [
"pytest",
"tinydb",
"inotify",
]
MIN_DATALAD_VERSION = "0.13.0"
EXTRA_REQUIRES = {
"tests": TESTS_REQUIRES,
"extras": [
"duecredit", # optional dependency
], # Requires patched version ATM ['dcmstack'],
"datalad": ["datalad >=%s" % MIN_DATALAD_VERSION],
}
# Flatten the lists
EXTRA_REQUIRES["all"] = sum(EXTRA_REQUIRES.values(), [])
|
9fb653e42c293fe0fa8b6515232e5ba744487ebb
|
faa390890e17219fd763bd66e66bb6753c692b14
|
/jacinle/__init__.py
|
84d0c2baf87baadaf5d9c2634e6de2382d51751e
|
[
"MIT"
] |
permissive
|
vacancy/Jacinle
|
7170b1c798e4a903186abe74d28e4a7e034ec766
|
20021790fd32ef1ad40c67fba7582c6db54235da
|
refs/heads/master
| 2023-07-20T03:54:46.693649
| 2023-07-12T21:00:10
| 2023-07-12T21:00:10
| 117,910,172
| 135
| 275
|
MIT
| 2023-01-18T17:41:33
| 2018-01-18T00:35:55
|
Python
|
UTF-8
|
Python
| false
| false
| 8,379
|
py
|
__init__.py
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# File : __init__.py
# Author : Jiayuan Mao
# Email : maojiayuan@gmail.com
# Date : 01/18/2018
#
# This file is part of Jacinle.
# Distributed under terms of the MIT license.
"""The Jacinle library.
This main library contains a set of useful utility functions and classes for general Python scripting.
There are a few automatically imported submodules that can be accessed by ``jacinle.<submodule>``.
.. rubric:: Command Line Tools
.. autosummary::
~jacinle.cli.argument.JacArgumentParser
~jacinle.cli.keyboard.yes_or_no
~jacinle.cli.keyboard.maybe_mkdir
~jacinle.cli.git.git_guard
.. rubric:: Logging
.. autosummary::
~jacinle.logging.get_logger
~jacinle.logging.set_logger_output_file
.. rubric:: Configuration
See :doc:`jacinle.config.environ_v2` for more details.
.. autosummary::
~jacinle.config.environ_v2.configs
~jacinle.config.environ_v2.def_configs
~jacinle.config.environ_v2.def_configs_func
~jacinle.config.environ_v2.set_configs
~jacinle.config.environ_v2.set_configs_func
~jacinle.utils.env.jac_getenv
~jacinle.utils.env.jac_is_verbose
~jacinle.utils.env.jac_is_debug
.. rubric:: Utilities (Core)
.. autosummary::
~jacinle.utils.context.EmptyContext
~jacinle.utils.context.KeyboardInterruptContext
~jacinle.utils.enum.JacEnum
~jacinle.utils.meta.Clock
~jacinle.utils.deprecated.deprecated
~jacinle.utils.imp.load_module
~jacinle.utils.imp.load_module_filename
~jacinle.utils.imp.load_source
~jacinle.utils.meta.gofor
~jacinle.utils.meta.run_once
~jacinle.utils.meta.try_run
~jacinle.utils.meta.map_exec
~jacinle.utils.meta.filter_exec
~jacinle.utils.meta.first
~jacinle.utils.meta.first_n
~jacinle.utils.meta.stmap
~jacinle.utils.meta.method2func
~jacinle.utils.meta.map_exec_method
~jacinle.utils.meta.decorator_with_optional_args
~jacinle.utils.meta.cond_with
~jacinle.utils.meta.cond_with_group
~jacinle.utils.meta.merge_iterable
~jacinle.utils.meta.dict_deep_update
~jacinle.utils.meta.dict_deep_kv
~jacinle.utils.meta.dict_deep_keys
~jacinle.utils.meta.assert_instance
~jacinle.utils.meta.assert_none
~jacinle.utils.meta.assert_notnone
~jacinle.utils.meta.notnone_property
~jacinle.utils.meta.synchronized
~jacinle.utils.meta.timeout
~jacinle.utils.meta.make_dummy_func
~jacinle.utils.meta.repr_from_str
~jacinle.utils.inspect.class_name
~jacinle.utils.inspect.func_name
~jacinle.utils.inspect.method_name
~jacinle.utils.inspect.class_name_of_method
~jacinle.utils.printing.indent_text
~jacinle.utils.printing.stprint
~jacinle.utils.printing.stformat
~jacinle.utils.printing.kvprint
~jacinle.utils.printing.kvformat
~jacinle.utils.printing.print_to_string
~jacinle.utils.printing.print_to
~jacinle.utils.printing.suppress_stdout
.. rubric:: Utilities (IO)
.. autosummary::
~jacinle.io.fs.load
~jacinle.io.fs.dump
~jacinle.io.fs.load_json
~jacinle.io.fs.dump_json
~jacinle.io.fs.load_pkl
~jacinle.io.fs.dump_pkl
~jacinle.io.fs.lsdir
~jacinle.io.fs.mkdir
.. rubric:: Utilities (Cache)
.. autosummary::
~jacinle.utils.cache.cached_property
~jacinle.utils.cache.cached_result
~jacinle.utils.cache.fs_cached_result
.. rubric:: Utilities (TQDM)
.. autosummary::
~jacinle.utils.tqdm.get_current_tqdm
~jacinle.utils.tqdm.tqdm
~jacinle.utils.tqdm.tqdm_pbar
~jacinle.utils.tqdm.tqdm_gofor
~jacinle.utils.tqdm.tqdm_zip
~jacinle.concurrency.pool.TQDMPool
.. rubric:: Utilities (Math)
.. autosummary::
~jacinle.utils.meter.GroupMeters
~jacinle.utils.numeric.safe_sum
~jacinle.utils.numeric.mean
~jacinle.utils.numeric.std
~jacinle.utils.numeric.rms
~jacinle.utils.numeric.prod
~jacinle.utils.numeric.divup
~jacinle.random.rng.reset_global_seed
~jacinle.random.rng.seed
~jacinle.random.rng.with_seed
.. rubric:: Utilities (Container)
.. autosummary::
~jacinle.utils.container.g
~jacinle.utils.container.G
~jacinle.utils.container.GView
~jacinle.utils.container.SlotAttrObject
~jacinle.utils.container.OrderedSet
.. rubric:: Utilities (Defaults)
See :doc:`jacinle.utils.defaults` for more details.
.. autosummary::
~jacinle.utils.defaults.defaults_manager
~jacinle.utils.defaults.wrap_custom_as_default
~jacinle.utils.defaults.gen_get_default
~jacinle.utils.defaults.gen_set_default
~jacinle.utils.defaults.option_context
~jacinle.utils.defaults.FileOptions
~jacinle.utils.defaults.default_args
~jacinle.utils.defaults.ARGDEF
.. rubric:: Utilities (Exception and Debugging)
.. autosummary::
~jacinle.utils.debug.hook_exception_ipdb
~jacinle.utils.debug.exception_hook
~jacinle.utils.debug.timeout_ipdb
~jacinle.utils.debug.log_function
~jacinle.utils.debug.profile
~jacinle.utils.debug.time
~jacinle.utils.exception.format_exc
.. rubric:: Utilities (Network and Misc)
.. autosummary::
~jacinle.utils.network.get_local_addr
~jacinle.utils.uid.gen_time_string
~jacinle.utils.uid.gen_uuid4
"""
from jacinle.utils.init import init_main
init_main()
del init_main
from jacinle.utils.env import jac_getenv, jac_is_verbose, jac_is_debug
if jac_getenv('IMPORT_ALL', 'true', 'bool'):
from jacinle.cli.argument import JacArgumentParser
from jacinle.cli.keyboard import yes_or_no, maybe_mkdir
from jacinle.cli.git import git_guard
from jacinle.concurrency.pool import TQDMPool
from jacinle.config.environ_v2 import configs, def_configs, def_configs_func, set_configs, set_configs_func
from jacinle.logging import get_logger, set_logger_output_file
from jacinle.utils.cache import cached_property, cached_result, fs_cached_result
from jacinle.utils.container import G, g, GView, SlotAttrObject, OrderedSet
from jacinle.utils.context import EmptyContext, KeyboardInterruptContext
from jacinle.utils.debug import hook_exception_ipdb, exception_hook, timeout_ipdb, log_function, profile, time
from jacinle.utils.defaults import (
defaults_manager, wrap_custom_as_default, gen_get_default, gen_set_default,
option_context, FileOptions,
default_args, ARGDEF
)
from jacinle.utils.deprecated import deprecated
from jacinle.utils.enum import JacEnum
from jacinle.utils.env import jac_getenv, jac_is_debug, jac_is_verbose
from jacinle.utils.exception import format_exc
from jacinle.utils.imp import load_module, load_module_filename, load_source
from jacinle.utils.meta import (
gofor,
run_once, try_run,
map_exec, filter_exec, first, first_n, stmap,
method2func, map_exec_method,
decorator_with_optional_args,
cond_with, cond_with_group,
merge_iterable,
dict_deep_update, dict_deep_kv, dict_deep_keys,
assert_instance, assert_none, assert_notnone,
notnone_property, synchronized, timeout, Clock, make_dummy_func,
repr_from_str
)
from jacinle.utils.meter import AverageMeter, GroupMeters
from jacinle.utils.inspect import class_name, func_name, method_name, class_name_of_method
from jacinle.utils.network import get_local_addr
from jacinle.utils.numeric import safe_sum, mean, std, rms, prod, divup
from jacinle.utils.printing import indent_text, stprint, stformat, kvprint, kvformat, print_to_string, print_to, suppress_stdout
from jacinle.utils.tqdm import get_current_tqdm, tqdm, tqdm_pbar, tqdm_gofor, tqdm_zip
from jacinle.utils.uid import gen_time_string, gen_uuid4
from jacinle.io.fs import load, dump, mkdir, lsdir
from jacinle.io.fs import load_pkl, dump_pkl
from jacinle.io.pretty import load_json, dump_json
from jacinle.random import reset_global_seed, seed, with_seed
import jacinle.cli.git as git
import jacinle.io as io
import jacinle.nd as nd
import jacinle.random as random
try:
from IPython import embed
except ImportError:
pass
try:
from pprint import pprint
except ImportError:
pass
try:
from tabulate import tabulate
except ImportError:
pass
JAC_VERBOSE = jac_is_verbose()
JAC_DEBUG = jac_is_debug()
|
2d4329d86c6643c39e81a45a21d6d11d61206965
|
d139049fe87b8c209058bea3a885ea36e51b4895
|
/naslib/predictors/trees/ngb.py
|
78326b0e3bf70e8246b3b054ae6b5956c8be8ec5
|
[
"Apache-2.0"
] |
permissive
|
automl/NASLib
|
4541086deb82137c4c47d54ee164d3b636123588
|
dfa2e67e8bcb1222b0c5332580eb25f822198411
|
refs/heads/Develop
| 2023-08-08T01:46:57.115368
| 2023-07-21T12:15:23
| 2023-07-21T12:15:23
| 185,628,775
| 431
| 118
|
Apache-2.0
| 2023-07-21T12:15:54
| 2019-05-08T15:03:37
|
Python
|
UTF-8
|
Python
| false
| false
| 2,804
|
py
|
ngb.py
|
import numpy as np
from functools import wraps
from ngboost import NGBRegressor
from ngboost.distns import Normal
from ngboost.scores import LogScore
from sklearn.tree import DecisionTreeRegressor
from naslib.predictors.trees import BaseTree
def parse_params(params, identifier="base"):
parsed_params = {}
for k, v in params.items():
if k.startswith(identifier):
parsed_params[k.replace(identifier, "")] = v
return parsed_params
def loguniform(low=0, high=1, size=None):
return np.exp(np.random.uniform(np.log(low), np.log(high), size))
class NGBoost(BaseTree):
@property
def default_hyperparams(self):
params = {
"param:n_estimators": 505,
"param:learning_rate": 0.08127053060223186,
"base:max_depth": 6,
"base:max_features": 0.7920456318712875,
#'early_stopping_rounds': 100,
#'verbose': -1
}
return params
def set_random_hyperparams(self):
if self.hyperparams is None:
# evaluate the default config first during HPO
params = self.default_hyperparams.copy()
else:
params = {
"param:n_estimators": int(loguniform(128, 512)),
"param:learning_rate": loguniform(0.001, 0.1),
"base:max_depth": np.random.choice(24) + 1,
"base:max_features": np.random.uniform(0.1, 1),
}
self.hyperparams = params
return params
def get_dataset(self, encodings, labels=None):
if labels is None:
return encodings
else:
return (encodings, (labels - self.mean) / self.std)
def train(self, train_data):
X_train, y_train = train_data
# note: cross-validation will error unless these values are set:
min_samples_leaf = 1
min_samples_split = 2
minibatch_frac = 0.5
base_learner = DecisionTreeRegressor(
criterion="friedman_mse",
min_samples_leaf=min_samples_leaf,
min_samples_split=min_samples_split,
random_state=None,
splitter="best",
**parse_params(self.hyperparams, identifier="base:")
)
model = NGBRegressor(
Dist=Normal,
Base=base_learner,
Score=LogScore,
minibatch_frac=minibatch_frac,
verbose=True,
**parse_params(self.hyperparams, identifier="param:")
)
return model.fit(X_train, y_train)
def fit(self, xtrain, ytrain, train_info=None, params=None, **kwargs):
if self.hyperparams is None:
self.hyperparams = self.default_hyperparams.copy()
return super(NGBoost, self).fit(xtrain, ytrain, train_info, params, **kwargs)
|
be7e30120a1af70fc5c81542c0996e9f9d418d09
|
368960cc5dc09284dff33129b2b8c31773705b81
|
/docs/conf.py
|
fad2b42c4fd4dcc39d7109e1c4b3a1c5abb65da8
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
collective/icalendar
|
802f70ecaa325cc5a8ce65e0c41bfcbdccf0a97b
|
8fb3353408df94d54b74a7e05586fd6c99eed7ef
|
refs/heads/master
| 2023-09-01T08:50:59.574456
| 2023-08-31T06:05:59
| 2023-08-31T06:05:59
| 2,222,138
| 716
| 166
|
NOASSERTION
| 2023-09-06T17:54:06
| 2011-08-17T14:16:35
|
Python
|
UTF-8
|
Python
| false
| false
| 1,061
|
py
|
conf.py
|
# icalendar documentation build configuration file
import pkg_resources
import datetime
import os
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
try:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
except ImportError:
html_theme = 'default'
if not on_rtd:
print('-' * 74)
print('Warning: sphinx-rtd-theme not installed, building with default '
'theme.')
print('-' * 74)
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.viewcode'
]
source_suffix = '.rst'
master_doc = 'index'
project = 'icalendar'
this_year = datetime.date.today().year
copyright = f'{this_year}, Plone Foundation'
version = pkg_resources.get_distribution('icalendar').version
release = version
exclude_patterns = ['_build', 'lib', 'bin', 'include', 'local']
pygments_style = 'sphinx'
htmlhelp_basename = 'icalendardoc'
man_pages = [
('index', 'icalendar', 'icalendar Documentation',
['Plone Foundation'], 1)
]
|
4037f988ea4a83678a11c077fcdff572fa0ae138
|
af101b467134e10270bb72d02f41f07daa7f57d8
|
/configs/dim/dim_stage2-v16-pln_1xb1-1000k_comp1k.py
|
4ad154c51152759901565f3f21cb825f5c4a78ac
|
[
"Apache-2.0"
] |
permissive
|
open-mmlab/mmagic
|
4d864853417db300de4dfe7e83ce380fd1557a23
|
a382f143c0fd20d227e1e5524831ba26a568190d
|
refs/heads/main
| 2023-08-31T14:40:24.936423
| 2023-08-30T05:05:56
| 2023-08-30T05:05:56
| 203,999,962
| 1,370
| 192
|
Apache-2.0
| 2023-09-14T11:39:18
| 2019-08-23T13:04:29
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 505
|
py
|
dim_stage2-v16-pln_1xb1-1000k_comp1k.py
|
_base_ = ['./dim_stage1-v16_1xb1-1000k_comp1k.py']
save_dir = './work_dirs/'
experiment_name = 'dim_stage2-v16-pln_1xb1-1000k_comp1k'
# model settings
model = dict(
refiner=dict(type='PlainRefiner'),
loss_refine=dict(type='CharbonnierLoss'),
train_cfg=dict(train_backbone=False, train_refiner=True),
test_cfg=dict(refine=True),
)
# load_from = \
# 'https://download.openmmlab.com/mmediting/mattors/dim/'\
# 'dim_stage1_v16_1x1_1000k_comp1k_SAD-53.8_20200605_140257-979a420f.pth'
|
35000b7c7e288b150fdd94278cac056324ea9ebc
|
2cfa0c5555d1720b3955bd08f0594d8e24b99f2d
|
/fastcore/script.py
|
cb545791747b2a7c8560f0dc705eba4e4e15d7ee
|
[
"Apache-2.0"
] |
permissive
|
fastai/fastcore
|
7aa8b4badec8e6ea00412f673abe8a7fcbd4781e
|
1cb38a0d8d97091cd3365e669e39db64be40aaa2
|
refs/heads/master
| 2023-09-04T00:26:32.976525
| 2023-06-25T23:23:50
| 2023-06-25T23:23:50
| 225,460,599
| 877
| 319
|
Apache-2.0
| 2023-06-25T23:23:51
| 2019-12-02T20:16:53
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 5,106
|
py
|
script.py
|
# AUTOGENERATED! DO NOT EDIT! File to edit: ../nbs/08_script.ipynb.
# %% auto 0
__all__ = ['SCRIPT_INFO', 'store_true', 'store_false', 'bool_arg', 'clean_type_str', 'Param', 'anno_parser', 'args_from_prog',
'call_parse']
# %% ../nbs/08_script.ipynb 13
import inspect,argparse,shutil
from functools import wraps,partial
from .imports import *
from .utils import *
from .docments import docments
# %% ../nbs/08_script.ipynb 15
def store_true():
"Placeholder to pass to `Param` for `store_true` action"
pass
# %% ../nbs/08_script.ipynb 16
def store_false():
"Placeholder to pass to `Param` for `store_false` action"
pass
# %% ../nbs/08_script.ipynb 17
def bool_arg(v):
"Use as `type` for `Param` to get `bool` behavior"
return str2bool(v)
# %% ../nbs/08_script.ipynb 18
def clean_type_str(x:str):
x = str(x)
x = re.sub("(enum |class|function|__main__\.|\ at.*)", '', x)
x = re.sub("(<|>|'|\ )", '', x) # spl characters
return x
# %% ../nbs/08_script.ipynb 21
class Param:
"A parameter in a function used in `anno_parser` or `call_parse`"
def __init__(self, help="", type=None, opt=True, action=None, nargs=None, const=None,
choices=None, required=None, default=None):
if type in (store_true,bool): type,action,default=None,'store_true' ,False
if type==store_false: type,action,default=None,'store_false',True
if type and isinstance(type,typing.Type) and issubclass(type,enum.Enum) and not choices: choices=list(type)
help = help or ""
store_attr()
def set_default(self, d):
if self.default is None:
if d==inspect.Parameter.empty: self.opt = False
else: self.default = d
if self.default is not None:
self.help += f" (default: {self.default})"
@property
def pre(self): return '--' if self.opt else ''
@property
def kwargs(self): return {k:v for k,v in self.__dict__.items()
if v is not None and k!='opt' and k[0]!='_'}
def __repr__(self):
if not self.help and self.type is None: return ""
if not self.help and self.type is not None: return f"{clean_type_str(self.type)}"
if self.help and self.type is None: return f"<{self.help}>"
if self.help and self.type is not None: return f"{clean_type_str(self.type)} <{self.help}>"
# %% ../nbs/08_script.ipynb 28
class _HelpFormatter(argparse.HelpFormatter):
def __init__(self, prog, indent_increment=2):
cols = shutil.get_terminal_size((120,30))[0]
super().__init__(prog, max_help_position=cols//2, width=cols, indent_increment=indent_increment)
def _expand_help(self, action): return self._get_help_string(action)
# %% ../nbs/08_script.ipynb 29
def anno_parser(func, # Function to get arguments from
prog:str=None): # The name of the program
"Look at params (annotated with `Param`) in func and return an `ArgumentParser`"
p = argparse.ArgumentParser(description=func.__doc__, prog=prog, formatter_class=_HelpFormatter)
for k,v in docments(func, full=True, returns=False, eval_str=True).items():
param = v.anno
if not isinstance(param,Param): param = Param(v.docment, v.anno)
param.set_default(v.default)
p.add_argument(f"{param.pre}{k}", **param.kwargs)
p.add_argument(f"--pdb", help=argparse.SUPPRESS, action='store_true')
p.add_argument(f"--xtra", help=argparse.SUPPRESS, type=str)
return p
# %% ../nbs/08_script.ipynb 34
def args_from_prog(func, prog):
"Extract args from `prog`"
if prog is None or '#' not in prog: return {}
if '##' in prog: _,prog = prog.split('##', 1)
progsp = prog.split("#")
args = {progsp[i]:progsp[i+1] for i in range(0, len(progsp), 2)}
annos = type_hints(func)
for k,v in args.items():
t = annos.get(k, Param()).type
if t: args[k] = t(v)
return args
# %% ../nbs/08_script.ipynb 37
SCRIPT_INFO = SimpleNamespace(func=None)
# %% ../nbs/08_script.ipynb 39
def call_parse(func=None, nested=False):
"Decorator to create a simple CLI from `func` using `anno_parser`"
if func is None: return partial(call_parse, nested=nested)
@wraps(func)
def _f(*args, **kwargs):
mod = inspect.getmodule(inspect.currentframe().f_back)
if not mod: return func(*args, **kwargs)
if not SCRIPT_INFO.func and mod.__name__=="__main__": SCRIPT_INFO.func = func.__name__
if len(sys.argv)>1 and sys.argv[1]=='': sys.argv.pop(1)
p = anno_parser(func)
if nested: args, sys.argv[1:] = p.parse_known_args()
else: args = p.parse_args()
args = args.__dict__
xtra = otherwise(args.pop('xtra', ''), eq(1), p.prog)
tfunc = trace(func) if args.pop('pdb', False) else func
return tfunc(**merge(args, args_from_prog(func, xtra)))
mod = inspect.getmodule(inspect.currentframe().f_back)
if getattr(mod, '__name__', '') =="__main__":
setattr(mod, func.__name__, _f)
SCRIPT_INFO.func = func.__name__
return _f()
else: return _f
|
8c41b0c401b2c14d35f425893656df41ab20b8a5
|
0e4860fecfdd34a3255003cc8c8df086c14083dd
|
/python/practise/learn-python/python_basic/none_and_range.py
|
27be7cc420b77f20c94f3adcbe003ea24f3cb6e1
|
[] |
no_license
|
anzhihe/learning
|
503ab9a58f280227011da5eaa4b14b46c678e6f3
|
66f7f801e1395207778484e1543ea26309d4b354
|
refs/heads/master
| 2023-08-08T11:42:11.983677
| 2023-07-29T09:19:47
| 2023-07-29T09:19:47
| 188,768,643
| 1,443
| 617
| null | 2023-08-24T02:10:34
| 2019-05-27T04:04:10
|
Python
|
UTF-8
|
Python
| false
| false
| 2,383
|
py
|
none_and_range.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@FileName: none_and_range.py
@Function: python None & range
@Author: Zhihe An
@Site: https://chegva.com
@Time: 2021/6/21
"""
"""一、对象None"""
"""
1、什么是对象None
对象None用于表示数据值的不存在
对象None是占据一定的内存空间的,它并不意味着"空"或"没有定义"
也就是说,None是"something",而不是"nothing"
"""
# 调用内置函数id查看对象None的内存地址
print(id(None)) # 4485465520
"""
2、对象None的使用场景
对象None经常用于变量的初始化,或将变量重置为"数据值不存在"的状态
"""
a = None
print(a) # None
b = 18
print(b) # 18
b = None
print(b) # None
"""二、序列类型range"""
"""
1、什么是range?
range是一种序列类型,range类型用于表示不可变的整数序列
可以调用内置函数range(类range的构造方法)创建range类型的对象,有三种调用方式:
(1) range(stop)
(2) range(start, stop)
(3) range(start, stop, step)
其中,整数序列的起始值的默认值是0,可以使用参数start指定
可以使用参数stop指定整数序列的结束值,创建的range对象不包含stop
整数序列的步长的默认值是1,可以使用参数step进行指定
内置函数range的返回值是一个迭代器对象。为了清楚地表示返回的迭代器对象所表示的整数序列,可以将其转换成列表
range类型的优点在于:不管range对象表示的整数序列有多长,所有range对象占用的内存空间都是相同的,
因为仅仅需要存储start、stop和step。只有当用到range对象时,才会去计算序列中的相关元素
"""
print(range(5)) # range(0, 5)
print(list(range(5))) # [0, 1, 2, 3, 4]
print(list(range(0, 5, 1))) # [0, 1, 2, 3, 4]
print(list(range(1, 5))) # [1, 2, 3, 4]
print(list(range(1, 5, 1))) # [1, 2, 3, 4]
print(list(range(0, 20, 4))) # [0, 4, 8, 12, 16]
print(list(range(0, -20, -4))) # [0, -4, -8, -12, -16]
"""
2、判断range对象中是否存在(不存在)指定的整数
可以使用运算符in(not in)检查range对象表示的整数序列中是否存在(不存在)指定的整数
"""
print(3 in range(5)) # True
print(8 not in range(5)) # True
|
84efa137fe5a5ba489f83451b22a3eb88b90e7b7
|
f062af64ce156719203b79de9c2502b265af27de
|
/tensorflow_datasets/image/lsun.py
|
38c3f56496fc0f935b369ac4efd352c72abfb694
|
[
"Apache-2.0"
] |
permissive
|
tensorflow/datasets
|
d0c58f3db7ce06347671558b9e5a41e12e6913ce
|
41ae3cf1439711ed2f50f99caa0e6702082e6d37
|
refs/heads/master
| 2023-08-31T03:23:16.581638
| 2023-08-30T17:25:34
| 2023-08-30T17:29:38
| 148,221,325
| 4,224
| 1,738
|
Apache-2.0
| 2023-09-14T14:04:22
| 2018-09-10T21:27:22
|
Python
|
UTF-8
|
Python
| false
| false
| 5,334
|
py
|
lsun.py
|
# coding=utf-8
# Copyright 2023 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""LSUN dataset.
Large scene understanding dataset.
"""
import io
import os
from tensorflow_datasets.core.utils.lazy_imports_utils import tensorflow as tf
import tensorflow_datasets.public_api as tfds
LSUN_SCENE_URL = "http://dl.yf.io/lsun/scenes/%s_%s_lmdb.zip"
LSUN_OBJECT_URL = "http://dl.yf.io/lsun/objects/%s.zip"
_CITATION = """\
@article{journals/corr/YuZSSX15,
added-at = {2018-08-13T00:00:00.000+0200},
author = {Yu, Fisher and Zhang, Yinda and Song, Shuran and Seff, Ari and Xiao, Jianxiong},
biburl = {https://www.bibsonomy.org/bibtex/2446d4ffb99a5d7d2ab6e5417a12e195f/dblp},
ee = {http://arxiv.org/abs/1506.03365},
interhash = {3e9306c4ce2ead125f3b2ab0e25adc85},
intrahash = {446d4ffb99a5d7d2ab6e5417a12e195f},
journal = {CoRR},
keywords = {dblp},
timestamp = {2018-08-14T15:08:59.000+0200},
title = {LSUN: Construction of a Large-scale Image Dataset using Deep Learning with Humans in the Loop.},
url = {http://dblp.uni-trier.de/db/journals/corr/corr1506.html#YuZSSX15},
volume = {abs/1506.03365},
year = 2015
}
"""
# From http://dl.yf.io/lsun/categories.txt minus "test"
_SCENES_CATEGORIES = [
"classroom",
"bedroom",
"bridge",
"church_outdoor",
"conference_room",
"dining_room",
"kitchen",
"living_room",
"restaurant",
"tower",
]
# From http://dl.yf.io/lsun/objects/
_OBJECTS_CATEGORIES = [
"airplane",
"bicycle",
"bird",
"boat",
"bottle",
"bus",
"car",
"cat",
"chair",
"cow",
"dining_table",
"dog",
"horse",
"motorbike",
"person",
"potted_plant",
"sheep",
"sofa",
"train",
"tv-monitor",
]
class Lsun(tfds.core.GeneratorBasedBuilder):
"""Lsun dataset."""
BUILDER_CONFIGS = [
tfds.core.BuilderConfig( # pylint: disable=g-complex-comprehension
name=category,
description="Images of category %s" % category,
version=tfds.core.Version("3.1.0"),
release_notes={
"3.0.0": "New split API (https://tensorflow.org/datasets/splits)",
"3.1.0": (
"Add builder config for missing `person` object category, "
"and add `id` to the feature dict"
),
},
)
for category in (_SCENES_CATEGORIES + _OBJECTS_CATEGORIES)
]
def _info(self):
return tfds.core.DatasetInfo(
builder=self,
description=(
"Large scale images showing different objects "
"from given categories like bedroom, tower etc."
),
features=tfds.features.FeaturesDict({
"id": tfds.features.Text(),
"image": tfds.features.Image(encoding_format="jpeg"),
}),
homepage="https://www.yf.io/p/lsun",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
if self.builder_config.name in _SCENES_CATEGORIES:
extracted_dirs = dl_manager.download_and_extract({
"train": LSUN_SCENE_URL % (self.builder_config.name, "train"),
"val": LSUN_SCENE_URL % (self.builder_config.name, "val"),
})
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
gen_kwargs={
"extracted_dir": extracted_dirs["train"],
"file_path": "%s_%s_lmdb" % (
self.builder_config.name,
"train",
),
},
),
tfds.core.SplitGenerator(
name=tfds.Split.VALIDATION,
gen_kwargs={
"extracted_dir": extracted_dirs["val"],
"file_path": "%s_%s_lmdb" % (self.builder_config.name, "val"),
},
),
]
else:
extracted_dirs = dl_manager.download_and_extract(
{
"train": LSUN_OBJECT_URL % self.builder_config.name,
}
)
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
gen_kwargs={
"extracted_dir": extracted_dirs["train"],
"file_path": self.builder_config.name,
},
)
]
def _generate_examples(self, extracted_dir, file_path):
with tf.Graph().as_default():
path = os.path.join(extracted_dir, file_path, "data.mdb")
if not tf.io.gfile.exists(path):
raise RuntimeError(f"Could not open file {path}!")
dataset = tfds.core.lazy_imports.tensorflow_io.IODataset.from_lmdb(path)
for i, (id_bytes, jpeg_image) in enumerate(tfds.as_numpy(dataset)):
record = {
"id": id_bytes.decode("utf-8"),
"image": io.BytesIO(jpeg_image),
}
yield i, record
|
6b6d9b9d4994fcaafae194d789603dc047395577
|
e7efae2b83216d9621bd93390959d652de779c3d
|
/ddev/src/ddev/plugin/specs.py
|
911af22354a5880a15291314b917df8bd6ae870a
|
[
"MIT",
"BSD-3-Clause",
"BSD-3-Clause-Modification",
"Unlicense",
"Apache-2.0",
"LGPL-3.0-only",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"CC0-1.0"
] |
permissive
|
DataDog/integrations-core
|
ee1886cc7655972b2791e6ab8a1c62ab35afdb47
|
406072e4294edff5b46b513f0cdf7c2c00fac9d2
|
refs/heads/master
| 2023-08-31T04:08:06.243593
| 2023-08-30T18:22:10
| 2023-08-30T18:22:10
| 47,203,045
| 852
| 1,548
|
BSD-3-Clause
| 2023-09-14T16:39:54
| 2015-12-01T16:41:45
|
Python
|
UTF-8
|
Python
| false
| false
| 246
|
py
|
specs.py
|
# (C) Datadog, Inc. 2022-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import pluggy
spec = pluggy.HookspecMarker('ddev')
@spec
def register_commands():
"""Register new commands with the CLI."""
|
4d527bcc964c1e083b3fc3ffd7c2d303b8a452fa
|
67c0bc2b2292857fcc19b3c6e6da5570dc09749c
|
/chapter_2_collection/pyAudioAnalysis3/audacityAnnotation2WAVs.py.bak
|
d506d42df4beba69fc886be60e05e508deebbb10
|
[
"Apache-2.0"
] |
permissive
|
jim-schwoebel/voicebook
|
9d28f638fa6a31cb8c4915f9871c07da261b3ea6
|
0e8eae0f01487f15589c0daa2cf7ca3c6f3b8ad3
|
refs/heads/master
| 2022-12-11T13:41:24.005431
| 2021-04-15T13:51:35
| 2021-04-15T13:51:35
| 137,778,789
| 363
| 84
|
Apache-2.0
| 2022-12-08T03:58:01
| 2018-06-18T16:37:37
|
Python
|
UTF-8
|
Python
| false
| false
| 2,585
|
bak
|
audacityAnnotation2WAVs.py.bak
|
import glob, os
import audioBasicIO, sys, csv
import scipy.io.wavfile as wavfile
def annotation2files(wavFile, csvFile):
'''
Break an audio stream to segments of interest,
defined by a csv file
- wavFile: path to input wavfile
- csvFile: path to csvFile of segment limits
Input CSV file must be of the format <T1>\t<T2>\t<Label>
'''
[Fs, x] = audioBasicIO.readAudioFile(wavFile)
with open(csvFile, 'rb') as csvfile:
reader = csv.reader(csvfile, delimiter='\t', quotechar='|')
for j, row in enumerate(reader):
T1 = float(row[0].replace(",","."))
T2 = float(row[1].replace(",","."))
label = "%s_%s_%.2f_%.2f.wav" % (wavFile, row[2], T1, T2)
label = label.replace(" ", "_")
xtemp = x[int(round(T1*Fs)):int(round(T2*Fs))]
print T1, T2, label, xtemp.shape
wavfile.write(label, Fs, xtemp)
def main(argv):
if argv[1] == "-f":
wavFile = argv[2]
annotationFile = argv[3]
annotation2files(wavFile, annotationFile)
elif argv[1] == "-d":
inputFolder = argv[2]
types = ('*.txt', '*.csv')
annotationFilesList = []
for files in types:
annotationFilesList.extend(glob.glob(os.path.join(inputFolder, files)))
for anFile in annotationFilesList:
wavFile = os.path.splitext(anFile)[0] + ".wav"
if not os.path.isfile(wavFile):
wavFile = os.path.splitext(anFile)[0] + ".mp3"
if not os.path.isfile(wavFile):
print "Audio file not found!"
return
annotation2files(wavFile, anFile)
if __name__ == '__main__':
# Used to extract a series of annotated WAV files based on (a) an audio file (mp3 or wav) and
# (b) a segment annotation file e.g. a "label" file generated in audacity
#
# usage 1:
# python audacityAnnotation2WAVs.py -f <audiofilepath> <annotationfilepath>
# The <annotationfilepath> is actually a tab-seperated file where each line has the format <startTime>\t<entTime>\t<classLabel>
# The result of this process is a series of WAV files with a file name <audiofilepath>_<startTime>_<endTime>_<classLabel>
#
# usage 2:
# python audacityAnnotation2WAVs.py -d <annotationfolderpath>
# Same but searches all .txt and .csv annotation files. Audio files are supposed to be in the same path / filename with a WAV extension
main(sys.argv)
|
b042e26c909417f280aa0d28d626644d5329814c
|
091a6200be74bf6577c86f623665bcc24e16b02b
|
/Tilemap_Game_With_CircuitPython/tilegame_assets/__init__.py
|
58d656b41fbf94d192804af366558325c26f00ca
|
[
"MIT"
] |
permissive
|
adafruit/Adafruit_Learning_System_Guides
|
b5f7bce40a16da64e7a79d4b39de032f2cca41d4
|
5eaa7a15a437c533b89f359a25983e24bb6b5438
|
refs/heads/main
| 2023-09-05T18:31:41.621956
| 2023-09-05T15:36:09
| 2023-09-05T15:36:09
| 105,065,494
| 937
| 937
|
MIT
| 2023-09-12T18:48:53
| 2017-09-27T20:22:44
|
C
|
UTF-8
|
Python
| false
| false
| 179
|
py
|
__init__.py
|
# SPDX-FileCopyrightText: 2018 Limor Fried/ladyada for Adafruit Industries
# SPDX-FileCopyrightText: 2019 Brennen Bearnes for Adafruit Industries
#
# SPDX-License-Identifier: MIT
|
ec56330d7b96363a0de4bbd6eccd5b409c1c5c29
|
e8cf6493fee2383f31e77d30c78e47e694dd298f
|
/tools/patch_codegen/typeinf_batch.py
|
1b8612ac838ca06c57799c86dccb0a0c5b9ff88b
|
[
"BSD-3-Clause"
] |
permissive
|
idapython/src
|
30b5af8e819e2d86736cd63527dcda0e4696c680
|
e1c108a7df4b5d80d14d8b0c14ae73b924bff6f4
|
refs/heads/master
| 2023-09-04T08:27:09.228901
| 2023-07-31T14:26:58
| 2023-07-31T14:26:58
| 32,229,857
| 1,371
| 298
|
NOASSERTION
| 2023-07-28T12:34:06
| 2015-03-14T20:09:27
|
Python
|
UTF-8
|
Python
| false
| false
| 303
|
py
|
typeinf_batch.py
|
{
"requires_idb" : [
".*",
"-is_type_.*",
"-get_base_type",
"-get_type_flags",
"-get_full_type",
"-is_typeid_last",
"-is_tah_byte",
"-is_sdacl_byte",
"-get_cc",
"-is_.*_cc",
"-convert_pt_flags_to_hti",
],
}
|
37bbb9f846506642e14a0157e2ce310698d289c4
|
391dfd77c1bb85c08b4ead451ecdab0858eb141f
|
/tests/test_windowconfig.py
|
0829dd2c4b2927da3a63c12a21002ac07e9799c6
|
[
"MIT"
] |
permissive
|
moderngl/moderngl-window
|
308682b5aa625dbb49ca554459bed9853a5e69c3
|
200f2b9ea8b350b0ac9bb6a2d24310c0d8227794
|
refs/heads/master
| 2023-05-28T00:33:49.924394
| 2023-05-18T11:06:26
| 2023-05-18T11:06:26
| 172,498,670
| 205
| 48
|
MIT
| 2023-09-01T17:45:51
| 2019-02-25T12:05:57
|
Python
|
UTF-8
|
Python
| false
| false
| 6,409
|
py
|
test_windowconfig.py
|
from pathlib import Path
import moderngl
from headless import WindowConfigTestCase
from moderngl_window import WindowConfig
from moderngl_window.scene import Scene
class WindowConfigTestCase(WindowConfigTestCase):
class TestConfig(WindowConfig):
window_size = (16, 32)
aspect_ratio = 1.0
gl_version = (4, 1)
title = "Test"
resource_dir = Path(__file__).parent / 'fixtures' / 'resources'
def create_window_config(self, cls):
"""Create a WindowConfig instance passing in the standard params"""
instance = cls(ctx=self.window.ctx, wnd=self.window, timer=None)
instance.window_size = self.config.window_size
instance.aspect_ratio = self.config.aspect_ratio
instance.gl_version = self.config.gl_version
return instance
def test_properties(self):
"""Ensure all callback funcs are callable"""
# Configured Values
self.assertIsInstance(self.window.config, WindowConfig)
self.assertEqual(self.window.size, self.config.window_size)
self.assertEqual(self.window.width, self.config.window_size[0])
self.assertEqual(self.window.height, self.config.window_size[1])
self.assertEqual(self.window.title, self.config.title)
self.assertEqual(self.window.gl_version, self.config.gl_version)
self.assertEqual(self.config.aspect_ratio, self.window.aspect_ratio)
self.assertIsInstance(self.window.ctx, moderngl.Context)
self.assertIsInstance(self.window.fbo, moderngl.Framebuffer)
self.assertEqual(self.window.vsync, False)
# Defaults
self.assertEqual(self.config.resizable, True) # Disabled in headless
self.assertEqual(self.config.cursor, True) # Disabled in headless
self.assertEqual(self.config.samples, self.window.samples)
self.assertIsInstance(self.config.resource_dir, Path)
self.assertEqual(self.config.clear_color, (0, 0, 0, 0))
# Ensure callback funcs are actual callable
self.assertTrue(callable(self.window.resize_func))
self.assertTrue(callable(self.window.key_event_func))
self.assertTrue(callable(self.window.mouse_position_event_func))
self.assertTrue(callable(self.window.mouse_press_event_func))
self.assertTrue(callable(self.window.mouse_release_event_func))
self.assertTrue(callable(self.window.mouse_drag_event_func))
self.assertEqual(self.window.pixel_ratio, 1.0)
self.assertEqual(self.window.buffer_size, self.config.window_size)
self.assertEqual(self.window.buffer_width, self.config.window_size[0])
self.assertEqual(self.window.buffer_height, self.config.window_size[1])
# Other windows properties
self.assertEqual(self.window.viewport, (0, 8, 16, 16))
self.assertEqual(self.window.viewport_size, (16, 16))
self.assertEqual(self.window.viewport_width, 16)
self.assertEqual(self.window.viewport_height, 16)
self.assertEqual(self.window.frames, 0)
# set properties
self.window.title = "Modified Title"
self.assertEqual(self.window.title, "Modified Title")
value = not self.window.cursor
self.window.cursor = value
self.assertEqual(self.window.cursor, value)
value = self.window.position[0] + 10, self.window.position[1] + 10
self.window.position = value
self.assertEqual(self.window.position, value)
def test_missing_wnd_ctx(self):
"""Attempt creating WindogConfig without a window or ctx"""
class TestConfig(WindowConfig):
pass
with self.assertRaises(ValueError):
TestConfig(ctx=self.window.ctx)
with self.assertRaises(ValueError):
TestConfig(wnd=self.window)
def test_set_bad_callback(self):
"""Attempt setting bad callbacks"""
class TextConfig(WindowConfig):
pass
with self.assertRaises(ValueError):
self.window.resize_func = None
with self.assertRaises(ValueError):
self.window.resize_func = "Hello"
def test_load_texture_2d(self):
"""Load texture with shortcut method"""
texture = self.config.load_texture_2d(
"textures/crate.png",
flip=True,
mipmap_levels=(0, 2),
anisotropy=4.0,
)
self.assertIsInstance(texture, moderngl.Texture)
self.assertEqual(texture.anisotropy, 4.0)
def test_load_texture_array(self):
"""Load texture array with shortcut method"""
texture = self.config.load_texture_array(
'textures/array.png',
layers=10,
flip=True,
mipmap=False,
mipmap_levels=(0, 2),
anisotropy=4.0,
)
self.assertIsInstance(texture, moderngl.TextureArray)
self.assertEqual(texture.anisotropy, 4.0)
self.assertEqual(texture.layers, 10)
def test_load_program_single(self):
"""Load a single glsl program"""
prog = self.config.load_program(path='programs/white.glsl')
self.assertIsInstance(prog, moderngl.Program)
def test_load_program_multiple(self):
"""Load program from multiple shader files"""
prog = self.config.load_program(
vertex_shader='programs/terrain/terrain_vs.glsl',
fragment_shader='programs/terrain/terrain_fs.glsl',
tess_control_shader='programs/terrain/terrain_tc.glsl',
tess_evaluation_shader='programs/terrain/terrain_te.glsl',
)
self.assertIsInstance(prog, moderngl.Program)
def test_load_text(self):
"""Load text file"""
text = self.config.load_text('data/data.txt')
self.assertEqual(text, "Hello")
def test_load_json(self):
"""Load a json file"""
json = self.config.load_json('data/data.json')
self.assertEqual(json, {"test": "Hello"})
def test_load_binary(self):
"""Load binary file"""
data = self.config.load_binary('data/data.bin')
self.assertEqual(data, b'Hello')
def test_load_scene(self):
"""Load a scene"""
scene = self.config.load_scene(
path='scenes/BoxTextured/glTF/BoxTextured.gltf',
cache=False,
attr_names=None,
kind=None,
)
self.assertIsInstance(scene, Scene)
|
6c72ba1607fdda71ca2d139da3e5dc26b7103ec7
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/testData/inspections/returnValueFromInit/src/test17.py
|
7c08df34b4a3751b28bac59b85542ce9bab8132f
|
[
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 618
|
py
|
test17.py
|
'doc'
class X:
'should get a warning for returning value from __init__'
def __init__(self):
print 'howdy'
return 1
class Y:
'should get a warning for returning value from __init__'
def __init__(self, x):
if x == 0 :
return 0
if x == 1 :
return 53
return None
class Z:
'should not get a warning'
def __init__(self, x):
return
class Q(Z):
'd'
def __init__(self):
v = lambda : None
Z.__init__(self, v)
class S(Z):
'd'
def __init__(self):
Z.__init__(self,lambda x: x in ['p','f'])
|
815e9a978920d9b25e0ce9f6beadbb980a530a26
|
316e768ac2ba60fb393a8b914f5c761e077609d1
|
/archivebox/config_stubs.py
|
2c42e8089277bf212b5de1aee65d54aeb8c0e9ad
|
[
"MIT",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
ArchiveBox/ArchiveBox
|
88fc98ac08800d9785d4333572627a7f354f3a43
|
73a5f74d3840284bceaabced9cf99575b8c15d54
|
refs/heads/dev
| 2023-09-03T15:31:13.265845
| 2023-08-31T22:17:45
| 2023-08-31T22:17:45
| 90,356,372
| 9,794
| 606
|
MIT
| 2023-09-04T05:04:41
| 2017-05-05T08:50:14
|
Python
|
UTF-8
|
Python
| false
| false
| 3,041
|
py
|
config_stubs.py
|
from pathlib import Path
from typing import Optional, Dict, Union, Tuple, Callable, Pattern, Type, Any, List
from mypy_extensions import TypedDict
SimpleConfigValue = Union[str, bool, int, None, Pattern, Dict[str, Any]]
SimpleConfigValueDict = Dict[str, SimpleConfigValue]
SimpleConfigValueGetter = Callable[[], SimpleConfigValue]
ConfigValue = Union[SimpleConfigValue, SimpleConfigValueDict, SimpleConfigValueGetter]
class BaseConfig(TypedDict):
pass
class ConfigDict(BaseConfig, total=False):
"""
# Regenerate by pasting this quine into `archivebox shell` 🥚
from archivebox.config import ConfigDict, CONFIG_DEFAULTS
print('class ConfigDict(BaseConfig, total=False):')
print(' ' + '"'*3 + ConfigDict.__doc__ + '"'*3)
for section, configs in CONFIG_DEFAULTS.items():
for key, attrs in configs.items():
Type, default = attrs['type'], attrs['default']
if default is None:
print(f' {key}: Optional[{Type.__name__}]')
else:
print(f' {key}: {Type.__name__}')
print()
"""
IS_TTY: bool
USE_COLOR: bool
SHOW_PROGRESS: bool
IN_DOCKER: bool
PACKAGE_DIR: Path
OUTPUT_DIR: Path
CONFIG_FILE: Path
ONLY_NEW: bool
TIMEOUT: int
MEDIA_TIMEOUT: int
OUTPUT_PERMISSIONS: str
RESTRICT_FILE_NAMES: str
URL_BLACKLIST: str
SECRET_KEY: Optional[str]
BIND_ADDR: str
ALLOWED_HOSTS: str
DEBUG: bool
PUBLIC_INDEX: bool
PUBLIC_SNAPSHOTS: bool
FOOTER_INFO: str
SAVE_TITLE: bool
SAVE_FAVICON: bool
SAVE_WGET: bool
SAVE_WGET_REQUISITES: bool
SAVE_SINGLEFILE: bool
SAVE_READABILITY: bool
SAVE_MERCURY: bool
SAVE_PDF: bool
SAVE_SCREENSHOT: bool
SAVE_DOM: bool
SAVE_WARC: bool
SAVE_GIT: bool
SAVE_MEDIA: bool
SAVE_ARCHIVE_DOT_ORG: bool
RESOLUTION: str
GIT_DOMAINS: str
CHECK_SSL_VALIDITY: bool
CURL_USER_AGENT: str
WGET_USER_AGENT: str
CHROME_USER_AGENT: str
COOKIES_FILE: Union[str, Path, None]
CHROME_USER_DATA_DIR: Union[str, Path, None]
CHROME_TIMEOUT: int
CHROME_HEADLESS: bool
CHROME_SANDBOX: bool
USE_CURL: bool
USE_WGET: bool
USE_SINGLEFILE: bool
USE_READABILITY: bool
USE_MERCURY: bool
USE_GIT: bool
USE_CHROME: bool
USE_YOUTUBEDL: bool
CURL_BINARY: str
GIT_BINARY: str
WGET_BINARY: str
SINGLEFILE_BINARY: str
READABILITY_BINARY: str
MERCURY_BINARY: str
YOUTUBEDL_BINARY: str
CHROME_BINARY: Optional[str]
YOUTUBEDL_ARGS: List[str]
WGET_ARGS: List[str]
CURL_ARGS: List[str]
GIT_ARGS: List[str]
TAG_SEPARATOR_PATTERN: str
ConfigDefaultValueGetter = Callable[[ConfigDict], ConfigValue]
ConfigDefaultValue = Union[ConfigValue, ConfigDefaultValueGetter]
ConfigDefault = TypedDict('ConfigDefault', {
'default': ConfigDefaultValue,
'type': Optional[Type],
'aliases': Optional[Tuple[str, ...]],
}, total=False)
ConfigDefaultDict = Dict[str, ConfigDefault]
|
ae1064948511eed1703b407747106a7702cdc7e8
|
7e5887a4db2c9937b61d54ac1d34f589558c74c7
|
/ldapauthenticator/__init__.py
|
99008a7d848d14dcaa6dba58f3ebfceecfa4e3b0
|
[
"BSD-3-Clause"
] |
permissive
|
jupyterhub/ldapauthenticator
|
840823cb0a508b9f2674c9bd31d85b1a445e223f
|
0760d6d849d950e8ee9175d97db0635f72356b59
|
refs/heads/main
| 2023-09-03T12:34:08.800500
| 2023-06-05T08:00:57
| 2023-06-05T08:00:57
| 49,804,028
| 198
| 170
|
BSD-3-Clause
| 2023-07-04T06:27:47
| 2016-01-17T05:11:49
|
Python
|
UTF-8
|
Python
| false
| false
| 233
|
py
|
__init__.py
|
from ldapauthenticator.ldapauthenticator import LDAPAuthenticator # noqa
# __version__ should be updated using tbump, based on configuration in
# pyproject.toml, according to instructions in RELEASE.md.
#
__version__ = "1.3.3.dev"
|
da5e0b57da8070a8cebea640744fb2841e9378c1
|
813f67f6815d0389589b719625c46b2265ca0f87
|
/tests/test_middleware.py
|
b2a5384ac6347447779d1ef0f3e70ffeedd29896
|
[
"Apache-2.0"
] |
permissive
|
stephenhillier/starlette_exporter
|
c0916acb4c592617c31acbb3616a2f48c41aeb45
|
0de22e78233cf88e746b68db76945965d0bfbedf
|
refs/heads/master
| 2023-08-16T17:29:50.915043
| 2023-08-06T13:50:36
| 2023-08-06T13:50:36
| 217,774,698
| 266
| 30
|
Apache-2.0
| 2023-08-06T13:50:37
| 2019-10-26T22:04:25
|
Python
|
UTF-8
|
Python
| false
| false
| 24,728
|
py
|
test_middleware.py
|
import time
from http import HTTPStatus
import pytest
from prometheus_client import REGISTRY
from starlette.applications import Starlette
from starlette.background import BackgroundTask
from starlette.exceptions import HTTPException
from starlette.responses import JSONResponse, Response
from starlette.routing import Mount, Route
from starlette.staticfiles import StaticFiles
from starlette.testclient import TestClient
import starlette_exporter
from starlette_exporter import (
PrometheusMiddleware,
handle_metrics,
from_header,
handle_openmetrics,
)
from starlette_exporter.optional_metrics import response_body_size, request_body_size
@pytest.fixture
def testapp():
"""create a test app with various endpoints for the test scenarios"""
# unregister all the collectors before we start
collectors = list(REGISTRY._collector_to_names.keys())
for collector in collectors:
REGISTRY.unregister(collector)
PrometheusMiddleware._metrics = {}
def _testapp(**middleware_options):
app = Starlette()
app.add_middleware(
starlette_exporter.PrometheusMiddleware, **middleware_options
)
app.add_route("/metrics", handle_metrics)
app.add_route("/openmetrics", handle_openmetrics)
def normal_response(request):
return JSONResponse({"message": "Hello World"})
app.add_route("/200", normal_response)
app.add_route(
"/200/{test_param}", normal_response, methods=["GET", "POST", "OPTIONS"]
)
def httpstatus_response(request):
"""
Returns a JSON Response using status_code = HTTPStatus.OK if the param is set to OK
otherewise it returns a JSON response with status_code = 200
"""
if request.path_params["test_param"] == "OK":
return Response(status_code=HTTPStatus.OK)
else:
return Response(status_code=200)
app.add_route(
"/200_or_httpstatus/{test_param}",
httpstatus_response,
methods=["GET", "OPTIONS"],
)
async def error(request):
raise HTTPException(status_code=500, detail="this is a test error")
app.add_route("/500", error)
app.add_route("/500/{test_param}", error)
async def unhandled(request):
test_dict = {"yup": 123}
return JSONResponse({"message": test_dict["value_error"]})
app.add_route("/unhandled", unhandled)
app.add_route("/unhandled/{test_param}", unhandled)
async def background(request):
def backgroundtask():
time.sleep(0.1)
task = BackgroundTask(backgroundtask)
return JSONResponse({"message": "task started"}, background=task)
app.add_route("/background", background)
def healthcheck(request):
return JSONResponse({"message": "Healthcheck route"})
app.add_route("/health", healthcheck)
# testing routes added using Mount
async def test_mounted_function(request):
return JSONResponse({"message": "Hello World"})
async def test_mounted_function_param(request):
return JSONResponse({"message": request.path_params.get("item")})
mounted_routes = Mount(
"/",
routes=[
Route("/test/{item}", test_mounted_function_param, methods=["GET"]),
Route("/test", test_mounted_function),
],
)
app.mount("/mounted", mounted_routes)
app.mount("/static", app=StaticFiles(directory="tests/static"), name="static")
return app
return _testapp
class TestMiddleware:
@pytest.fixture
def client(self, testapp):
return TestClient(testapp())
def test_200(self, client):
"""test that requests appear in the counter"""
client.get("/200")
metrics = client.get("/metrics").content.decode()
assert (
"""starlette_requests_total{app_name="starlette",method="GET",path="/200",status_code="200"} 1.0"""
in metrics
)
def test_500(self, client):
"""test that a handled exception (HTTPException) gets logged in the requests counter"""
client.get("/500")
metrics = client.get("/metrics").content.decode()
assert (
"""starlette_requests_total{app_name="starlette",method="GET",path="/500",status_code="500"} 1.0"""
in metrics
)
def test_unhandled(self, client):
"""test that an unhandled exception still gets logged in the requests counter"""
try:
client.get("/unhandled")
except:
pass
metrics = client.get("/metrics").content.decode()
assert (
"""starlette_requests_total{app_name="starlette",method="GET",path="/unhandled",status_code="500"} 1.0"""
in metrics
)
def test_histogram(self, client):
"""test that histogram buckets appear after making requests"""
client.get("/200")
client.get("/500")
try:
client.get("/unhandled")
except:
pass
metrics = client.get("/metrics").content.decode()
assert (
"""starlette_request_duration_seconds_bucket{app_name="starlette",le="0.005",method="GET",path="/200",status_code="200"}"""
in metrics
)
assert (
"""starlette_request_duration_seconds_bucket{app_name="starlette",le="0.005",method="GET",path="/500",status_code="500"}"""
in metrics
)
assert (
"""starlette_request_duration_seconds_bucket{app_name="starlette",le="0.005",method="GET",path="/unhandled",status_code="500"}"""
in metrics
)
def test_histogram_custom_buckets(self, testapp):
"""test that custom histogram buckets appear after making requests"""
buckets = (10, 20, 30, 40, 50)
client = TestClient(testapp(buckets=buckets))
client.get("/200")
client.get("/500")
try:
client.get("/unhandled")
except:
pass
metrics = client.get("/metrics").content.decode()
assert (
"""starlette_request_duration_seconds_bucket{app_name="starlette",le="50.0",method="GET",path="/200",status_code="200"}"""
in metrics
)
assert (
"""starlette_request_duration_seconds_bucket{app_name="starlette",le="50.0",method="GET",path="/500",status_code="500"}"""
in metrics
)
assert (
"""starlette_request_duration_seconds_bucket{app_name="starlette",le="50.0",method="GET",path="/unhandled",status_code="500"}"""
in metrics
)
def test_app_name(self, testapp):
"""test that app_name label is populated correctly"""
client = TestClient(testapp(app_name="testing"))
client.get("/200")
metrics = client.get("/metrics").content.decode()
assert (
"""starlette_requests_total{app_name="testing",method="GET",path="/200",status_code="200"} 1.0"""
in metrics
)
def test_filter_unhandled_paths(self, testapp):
"""test that app_name label is populated correctly"""
client = TestClient(testapp(filter_unhandled_paths=True))
client.get("/this_path_does_not_exist")
metrics = client.get("/metrics").content.decode()
assert "this_path_does_not_exist" not in metrics
def test_mounted_path(self, testapp):
"""test that mounted paths appear even when filter_unhandled_paths is True"""
client = TestClient(testapp(filter_unhandled_paths=True))
client.get("/mounted/test")
metrics = client.get("/metrics").content.decode()
assert (
"""starlette_requests_total{app_name="starlette",method="GET",path="/mounted/test",status_code="200"} 1.0"""
in metrics
)
def test_mounted_path_with_param(self, testapp):
"""test that mounted paths appear even when filter_unhandled_paths is True
this test uses a path param that needs to be found within the mounted route.
"""
client = TestClient(testapp(filter_unhandled_paths=True, group_paths=True))
client.get("/mounted/test/123")
metrics = client.get("/metrics").content.decode()
assert (
"""starlette_requests_total{app_name="starlette",method="GET",path="/mounted/test/{item}",status_code="200"} 1.0"""
in metrics
)
def test_mounted_path_unhandled(self, testapp):
"""test an unhandled path that will be partially matched at the mounted base path"""
client = TestClient(testapp(filter_unhandled_paths=True))
client.get("/mounted/unhandled/123")
metrics = client.get("/metrics").content.decode()
assert """path="/mounted/unhandled""" not in metrics
assert """path="/mounted""" not in metrics
def test_mounted_path_unhandled_grouped(self, testapp):
"""test an unhandled path that will be partially matched at the mounted base path (grouped paths)"""
client = TestClient(testapp(filter_unhandled_paths=True, group_paths=True))
client.get("/mounted/unhandled/123")
metrics = client.get("/metrics").content.decode()
assert """path="/mounted/unhandled""" not in metrics
assert """path="/mounted""" not in metrics
def test_staticfiles_path(self, testapp):
"""test a static file path"""
client = TestClient(testapp(filter_unhandled_paths=True))
client.get("/static/test.txt")
metrics = client.get("/metrics").content.decode()
assert """path="/static/test.txt""" in metrics
def test_prefix(self, testapp):
"""test that metric prefixes work"""
client = TestClient(testapp(prefix="myapp"))
client.get("/200")
metrics = client.get("/metrics").content.decode()
assert (
"""myapp_requests_total{app_name="starlette",method="GET",path="/200",status_code="200"} 1.0"""
in metrics
)
def test_multi_init(self, testapp):
"""test that the middleware is happy being initialised multiple times"""
# newer starlette versions do this
# prometheus doesn't like the same metric being registered twice.
PrometheusMiddleware(None)
PrometheusMiddleware(None)
def test_multi_prefix(self, testapp):
"""test that two collecting apps don't clash"""
client1 = TestClient(testapp(prefix="app1"))
client2 = TestClient(testapp(prefix="app2"))
client1.get("/200")
client2.get("/200")
# both will return the same metrics though
metrics1 = client1.get("/metrics").content.decode()
metrics2 = client2.get("/metrics").content.decode()
assert (
"""app1_requests_total{app_name="starlette",method="GET",path="/200",status_code="200"} 1.0"""
in metrics1
)
assert (
"""app2_requests_total{app_name="starlette",method="GET",path="/200",status_code="200"} 1.0"""
in metrics1
)
assert (
"""app1_requests_total{app_name="starlette",method="GET",path="/200",status_code="200"} 1.0"""
in metrics2
)
assert (
"""app2_requests_total{app_name="starlette",method="GET",path="/200",status_code="200"} 1.0"""
in metrics2
)
def test_requests_in_progress(self, client):
"""test that the requests_in_progress metric (a gauge) is incremented after one request.
This test is fairly trivial and doesn't cover decrementing at the end of the request.
TODO: create a second asyncronous request and check that the counter is incremented
multiple times (and decremented back to zero when all requests done).
"""
metrics = client.get("/metrics").content.decode()
assert (
"""starlette_requests_in_progress{app_name="starlette",method="GET"} 1.0"""
in metrics
)
# try a second time as an alternate way to check that the requests_in_progress metric
# was decremented at the end of the first request. This test could be improved, but
# at the very least, it checks that the gauge wasn't incremented multiple times without
# also being decremented.
metrics = client.get("/metrics").content.decode()
assert (
"""starlette_requests_in_progress{app_name="starlette",method="GET"} 1.0"""
in metrics
)
def test_skip_paths(self, testapp):
"""test that requests doesn't appear in the counter"""
client = TestClient(testapp(skip_paths=["/health"]))
client.get("/health")
metrics = client.get("/metrics").content.decode()
assert """path="/health""" not in metrics
def test_skip_methods(self, testapp):
"""test that requests doesn't appear in the counter"""
client = TestClient(testapp(skip_methods=["POST"]))
client.post("/200")
metrics = client.get("/metrics").content.decode()
assert """path="/200""" not in metrics
class TestMiddlewareGroupedPaths:
"""tests for group_paths option (using named parameters to group endpoint metrics with path params together)"""
@pytest.fixture
def client(self, testapp):
return TestClient(testapp(group_paths=True))
def test_200(self, client):
"""test that metrics are grouped by endpoint"""
client.get("/200/111")
metrics = client.get("/metrics").content.decode()
assert (
"""starlette_requests_total{app_name="starlette",method="GET",path="/200/{test_param}",status_code="200"} 1.0"""
in metrics
)
def test_200_options(self, client):
"""test that metrics are grouped by endpoint"""
client.options("/200/111")
metrics = client.get("/metrics").content.decode()
assert (
"""starlette_requests_total{app_name="starlette",method="OPTIONS",path="/200/{test_param}",status_code="200"} 1.0"""
in metrics
)
assert """method="OPTIONS",path="/200/111""" not in metrics
def test_500(self, client):
"""test that a handled exception (HTTPException) gets logged in the requests counter"""
client.get("/500/1111")
metrics = client.get("/metrics").content.decode()
assert (
"""starlette_requests_total{app_name="starlette",method="GET",path="/500/{test_param}",status_code="500"} 1.0"""
in metrics
)
def test_unhandled(self, client):
"""test that an unhandled exception still gets logged in the requests counter"""
try:
client.get("/unhandled/11111")
except:
pass
metrics = client.get("/metrics").content.decode()
assert (
"""starlette_requests_total{app_name="starlette",method="GET",path="/unhandled/{test_param}",status_code="500"} 1.0"""
in metrics
)
def test_staticfiles_path(self, testapp):
"""test a static file path, with group_paths=True"""
client = TestClient(testapp(filter_unhandled_paths=True, group_paths=True))
client.get("/static/test.txt")
metrics = client.get("/metrics").content.decode()
assert 'path="/static"' in metrics
def test_404(self, client):
"""test that a 404 is handled properly, even though the path won't be matched"""
try:
client.get("/not_found/11111")
except:
pass
metrics = client.get("/metrics").content.decode()
assert (
"""starlette_requests_total{app_name="starlette",method="GET",path="/not_found/11111",status_code="404"} 1.0"""
in metrics
)
def test_histogram(self, client):
"""test that histogram buckets appear after making requests"""
client.get("/200/1")
client.get("/500/12")
try:
client.get("/unhandled/111")
except:
pass
metrics = client.get("/metrics").content.decode()
assert (
"""starlette_request_duration_seconds_bucket{app_name="starlette",le="0.005",method="GET",path="/200/{test_param}",status_code="200"}"""
in metrics
)
assert (
"""starlette_request_duration_seconds_bucket{app_name="starlette",le="0.005",method="GET",path="/500/{test_param}",status_code="500"}"""
in metrics
)
assert (
"""starlette_request_duration_seconds_bucket{app_name="starlette",le="0.005",method="GET",path="/unhandled/{test_param}",status_code="500"}"""
in metrics
)
class TestBackgroundTasks:
"""tests for ensuring the middleware handles requests involving background tasks"""
@pytest.fixture
def client(self, testapp):
return TestClient(testapp())
def test_background_task_endpoint(self, client):
client.get("/background")
metrics = client.get("/metrics").content.decode()
background_metric = [
s
for s in metrics.split("\n")
if (
"starlette_request_duration_seconds_sum" in s
and 'path="/background"' in s
)
]
duration = background_metric[0].split("} ")[1]
# the test function contains a 0.1 second background task. Ensure the metric records the response
# as smaller than 0.1 second.
assert float(duration) < 0.1
class TestOptionalMetrics:
"""tests for optional additional metrics
thanks to Stephen
"""
@pytest.fixture
def client(self, testapp):
return TestClient(
testapp(optional_metrics=[response_body_size, request_body_size])
)
def test_response_body_size(self, client):
client.get("/200")
metrics = client.get("/metrics").content.decode()
response_size_metric = [
s
for s in metrics.split("\n")
if ("starlette_response_body_bytes_total" in s and 'path="/200"' in s)
]
response_size = response_size_metric[0].split("} ")[1]
assert float(response_size) > 0.1
def test_receive_body_size(self, client):
client.post("/200", json={"test_post": ["d", "a"]})
metrics = client.get("/metrics").content.decode()
rec_size_metric = [
s
for s in metrics.split("\n")
if ("starlette_request_body_bytes_total" in s and 'path="/200"' in s)
]
rec_size = rec_size_metric[0].split("} ")[1]
assert float(rec_size) > 0.1
class TestAlwaysUseIntStatus:
"""Tests for always_use_int_status flag"""
def test_200_with_always_use_int_status_set(self, testapp):
"""test that even though the endpoint resturns a response with HTTP status it is converted to 200"""
client = TestClient(testapp(always_use_int_status=True))
client.get("/200_or_httpstatus/OK")
metrics = client.get("/metrics").content.decode()
assert (
"""starlette_requests_total{app_name="starlette",method="GET",path="/200_or_httpstatus/OK",status_code="200"} 1.0"""
in metrics
), metrics
def test_200_always_use_int_status_set(self, testapp):
"""Test that status_code metric is 200 if status_code=200 in the response and always_use_int_status is set"""
client = TestClient(testapp(always_use_int_status=True))
client.get("/200")
metrics = client.get("/metrics").content.decode()
assert (
"""starlette_requests_total{app_name="starlette",method="GET",path="/200",status_code="200"} 1.0"""
in metrics
), metrics
class TestDefaultLabels:
"""tests for the default labels option (`labels` argument on the middleware constructor)"""
def test_str_default_labels(self, testapp):
"""test setting default labels with string values"""
labels = {"foo": "bar", "hello": "world"}
client = TestClient(testapp(labels=labels))
client.get("/200")
metrics = client.get("/metrics").content.decode()
assert (
"""starlette_requests_total{app_name="starlette",foo="bar",hello="world",method="GET",path="/200",status_code="200"} 1.0"""
in metrics
), metrics
def test_callable_default_values(self, testapp):
"""test using callables for the default value"""
# set up a callable that retrieves a header value from the request
f = lambda x: x.headers.get("foo")
labels = {"foo": f, "hello": "world"}
client = TestClient(testapp(labels=labels))
client.get("/200", headers={"foo": "bar"})
metrics = client.get("/metrics").content.decode()
assert (
"""starlette_requests_total{app_name="starlette",foo="bar",hello="world",method="GET",path="/200",status_code="200"} 1.0"""
in metrics
), metrics
def test_async_callable(self, testapp):
"""test that we can use an async callable to populate label values"""
async def async_bar(request):
return "bar"
labels = {
"bar": async_bar,
"hello": "world",
}
client = TestClient(testapp(labels=labels))
client.get("/200")
metrics = client.get("/metrics").content.decode()
assert (
"""starlette_requests_total{app_name="starlette",bar="bar",hello="world",method="GET",path="/200",status_code="200"} 1.0"""
in metrics
), metrics
def test_from_header(self, testapp):
"""test with the library-provided from_header function"""
labels = {"foo": from_header("foo"), "hello": "world"}
client = TestClient(testapp(labels=labels))
client.get("/200", headers={"foo": "bar"})
metrics = client.get("/metrics").content.decode()
assert (
"""starlette_requests_total{app_name="starlette",foo="bar",hello="world",method="GET",path="/200",status_code="200"} 1.0"""
in metrics
), metrics
def test_from_header_allowed_values(self, testapp):
"""test with the library-provided from_header function"""
labels = {
"foo": from_header("foo", allowed_values=("bar", "baz")),
"hello": "world",
}
client = TestClient(testapp(labels=labels))
client.get("/200", headers={"foo": "bar"})
metrics = client.get("/metrics").content.decode()
assert (
"""starlette_requests_total{app_name="starlette",foo="bar",hello="world",method="GET",path="/200",status_code="200"} 1.0"""
in metrics
), metrics
def test_from_header_allowed_values_disallowed_value(self, testapp):
"""test with the library-provided from_header function"""
labels = {
"foo": from_header("foo", allowed_values=("bar", "baz")),
"hello": "world",
}
client = TestClient(testapp(labels=labels))
client.get("/200", headers={"foo": "zounds"})
metrics = client.get("/metrics").content.decode()
assert (
"""starlette_requests_total{app_name="starlette",foo="zounds",hello="world",method="GET",path="/200",status_code="200"} 1.0"""
not in metrics
), metrics
assert (
"""starlette_requests_total{app_name="starlette",foo="",hello="world",method="GET",path="/200",status_code="200"} 1.0"""
in metrics
), metrics
class TestExemplars:
"""tests for adding an exemplar to the histogram and counters"""
def test_exemplar(self, testapp):
"""test setting default labels with string values"""
# create a callable that returns a label/value pair to
# be used as an exemplar.
def exemplar_fn():
return {"trace_id": "abc123"}
# create a label for this test so we have a unique output line
labels = {"test": "exemplar"}
client = TestClient(testapp(exemplars=exemplar_fn, labels=labels))
client.get("/200")
metrics = client.get(
"/openmetrics", headers={"Accept": "application/openmetrics-text"}
).content.decode()
assert (
"""starlette_requests_total{app_name="starlette",method="GET",path="/200",status_code="200",test="exemplar"} 1.0 # {trace_id="abc123"}"""
in metrics
), metrics
|
b14d9c03018649b80abbca59748886a825708b8f
|
2b7272f3cf9b38071d3e0b6588503e03dafed174
|
/dff_rfcn/test.py
|
28b413fed50dc7fb48dd18d9351baf949f0610e3
|
[
"MIT"
] |
permissive
|
BitconFeng/Deep-Feature-video
|
5f55bea5e2810d896749c0a2eb7605d911b3b546
|
fff73fbcd0e21d5db566d2b63c644e18b2732551
|
refs/heads/master
| 2021-10-09T08:41:13.697770
| 2018-12-15T09:20:00
| 2018-12-15T09:20:00
| 46,565,187
| 240
| 3
|
MIT
| 2020-02-26T17:42:18
| 2015-11-20T14:01:47
|
Python
|
UTF-8
|
Python
| false
| false
| 2,276
|
py
|
test.py
|
# --------------------------------------------------------
# Deep Feature Flow
# Copyright (c) 2017 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Modified by Yuwen Xiong
# --------------------------------------------------------
# Based on:
# MX-RCNN
# Copyright (c) 2016 by Contributors
# Licence under The Apache 2.0 License
# https://github.com/ijkguo/mx-rcnn/
# --------------------------------------------------------
import _init_paths
import cv2
import argparse
import os
import sys
import time
import logging
from config.config import config, update_config
def parse_args():
parser = argparse.ArgumentParser(description='Test a R-FCN network')
# general
parser.add_argument('--cfg', help='experiment configure file name', required=True, type=str)
args, rest = parser.parse_known_args()
update_config(args.cfg)
# rcnn
parser.add_argument('--vis', help='turn on visualization', action='store_true')
parser.add_argument('--ignore_cache', help='ignore cached results boxes', action='store_true')
parser.add_argument('--thresh', help='valid detection threshold', default=1e-4, type=float)
parser.add_argument('--shuffle', help='shuffle data on visualization', action='store_true')
args = parser.parse_args()
return args
args = parse_args()
curr_path = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, os.path.join(curr_path, '../external/mxnet', config.MXNET_VERSION))
import mxnet as mx
from function.test_rcnn import test_rcnn
from utils.create_logger import create_logger
def main():
ctx = [mx.gpu(int(i)) for i in config.gpus.split(',')]
print args
logger, final_output_path = create_logger(config.output_path, args.cfg, config.dataset.test_image_set)
test_rcnn(config, config.dataset.dataset, config.dataset.test_image_set, config.dataset.root_path, config.dataset.dataset_path,
ctx, os.path.join(final_output_path, '..', '_'.join([iset for iset in config.dataset.image_set.split('+')]), config.TRAIN.model_prefix), config.TEST.test_epoch,
args.vis, args.ignore_cache, args.shuffle, config.TEST.HAS_RPN, config.dataset.proposal, args.thresh, logger=logger, output_path=final_output_path)
if __name__ == '__main__':
main()
|
6f6af337df461d2f3222ebb7301ae10bf58fcce3
|
b4b0181f8abf041e619607bc35b5b24771ebf0ea
|
/yapf/yapflib/reformatter.py
|
319fc9cc04635528070efd80bd7884dffe02b729
|
[
"Apache-2.0"
] |
permissive
|
google/yapf
|
2683a5a262cb3c46b8bafd637f3bcb40b34a7afa
|
cc9ae943495e1f67852ee897c397e9560feb78ed
|
refs/heads/main
| 2023-09-04T03:42:58.877584
| 2023-09-01T22:40:13
| 2023-09-01T22:40:13
| 32,476,524
| 14,961
| 1,239
|
Apache-2.0
| 2023-09-14T14:49:54
| 2015-03-18T18:22:31
|
Python
|
UTF-8
|
Python
| false
| false
| 28,024
|
py
|
reformatter.py
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Decide what the format for the code should be.
The `logical_line.LogicalLine`s are now ready to be formatted. LogicalLInes that
can be merged together are. The best formatting is returned as a string.
Reformat(): the main function exported by this module.
"""
import collections
import heapq
import re
from yapf_third_party._ylib2to3 import pytree
from yapf_third_party._ylib2to3.pgen2 import token
from yapf.pytree import pytree_utils
from yapf.yapflib import format_decision_state
from yapf.yapflib import format_token
from yapf.yapflib import line_joiner
from yapf.yapflib import style
def Reformat(llines, lines=None):
"""Reformat the logical lines.
Arguments:
llines: (list of logical_line.LogicalLine) Lines we want to format.
lines: (set of int) The lines which can be modified or None if there is no
line range restriction.
Returns:
A string representing the reformatted code.
"""
final_lines = []
prev_line = None # The previous line.
indent_width = style.Get('INDENT_WIDTH')
for lline in _SingleOrMergedLines(llines):
first_token = lline.first
_FormatFirstToken(first_token, lline.depth, prev_line, final_lines)
indent_amt = indent_width * lline.depth
state = format_decision_state.FormatDecisionState(lline, indent_amt)
state.MoveStateToNextToken()
if not lline.disable:
if lline.first.is_comment:
lline.first.value = lline.first.value.rstrip()
elif lline.last.is_comment:
lline.last.value = lline.last.value.rstrip()
if prev_line and prev_line.disable:
# Keep the vertical spacing between a disabled and enabled formatting
# region.
_RetainRequiredVerticalSpacingBetweenTokens(lline.first, prev_line.last,
lines)
if any(tok.is_comment for tok in lline.tokens):
_RetainVerticalSpacingBeforeComments(lline)
if lline.disable or _LineHasContinuationMarkers(lline):
_RetainHorizontalSpacing(lline)
_RetainRequiredVerticalSpacing(lline, prev_line, lines)
_EmitLineUnformatted(state)
elif (_LineContainsPylintDisableLineTooLong(lline) or
_LineContainsI18n(lline)):
# Don't modify vertical spacing, but fix any horizontal spacing issues.
_RetainRequiredVerticalSpacing(lline, prev_line, lines)
_EmitLineUnformatted(state)
elif _CanPlaceOnSingleLine(lline) and not any(tok.must_break_before
for tok in lline.tokens):
# The logical line fits on one line.
while state.next_token:
state.AddTokenToState(newline=False, dry_run=False)
elif not _AnalyzeSolutionSpace(state):
# Failsafe mode. If there isn't a solution to the line, then just emit
# it as is.
state = format_decision_state.FormatDecisionState(lline, indent_amt)
state.MoveStateToNextToken()
_RetainHorizontalSpacing(lline)
_RetainRequiredVerticalSpacing(lline, prev_line, None)
_EmitLineUnformatted(state)
final_lines.append(lline)
prev_line = lline
_AlignTrailingComments(final_lines)
return _FormatFinalLines(final_lines)
def _RetainHorizontalSpacing(line):
"""Retain all horizontal spacing between tokens."""
for tok in line.tokens:
tok.RetainHorizontalSpacing(line.first.column, line.depth)
def _RetainRequiredVerticalSpacing(cur_line, prev_line, lines):
"""Retain all vertical spacing between lines."""
prev_tok = None
if prev_line is not None:
prev_tok = prev_line.last
if cur_line.disable:
# After the first token we are acting on a single line. So if it is
# disabled we must not reformat.
lines = set()
for cur_tok in cur_line.tokens:
_RetainRequiredVerticalSpacingBetweenTokens(cur_tok, prev_tok, lines)
prev_tok = cur_tok
def _RetainRequiredVerticalSpacingBetweenTokens(cur_tok, prev_tok, lines):
"""Retain vertical spacing between two tokens if not in editable range."""
if prev_tok is None:
return
if prev_tok.is_string:
prev_lineno = prev_tok.lineno + prev_tok.value.count('\n')
elif prev_tok.is_pseudo:
if not prev_tok.previous_token.is_multiline_string:
prev_lineno = prev_tok.previous_token.lineno
else:
prev_lineno = prev_tok.lineno
else:
prev_lineno = prev_tok.lineno
if cur_tok.is_comment:
cur_lineno = cur_tok.lineno - cur_tok.value.count('\n')
else:
cur_lineno = cur_tok.lineno
if not prev_tok.is_comment and prev_tok.value.endswith('\\'):
prev_lineno += prev_tok.value.count('\n')
required_newlines = cur_lineno - prev_lineno
if cur_tok.is_comment and not prev_tok.is_comment:
# Don't adjust between a comment and non-comment.
pass
elif lines and lines.intersection(range(prev_lineno, cur_lineno + 1)):
desired_newlines = cur_tok.whitespace_prefix.count('\n')
whitespace_lines = range(prev_lineno + 1, cur_lineno)
deletable_lines = len(lines.intersection(whitespace_lines))
required_newlines = max(required_newlines - deletable_lines,
desired_newlines)
cur_tok.AdjustNewlinesBefore(required_newlines)
def _RetainVerticalSpacingBeforeComments(line):
"""Retain vertical spacing before comments."""
prev_token = None
for tok in line.tokens:
if tok.is_comment and prev_token:
if tok.lineno - tok.value.count('\n') - prev_token.lineno > 1:
tok.AdjustNewlinesBefore(ONE_BLANK_LINE)
prev_token = tok
def _EmitLineUnformatted(state):
"""Emit the line without formatting.
The line contains code that if reformatted would break a non-syntactic
convention. E.g., i18n comments and function calls are tightly bound by
convention. Instead, we calculate when / if a newline should occur and honor
that. But otherwise the code emitted will be the same as the original code.
Arguments:
state: (format_decision_state.FormatDecisionState) The format decision
state.
"""
while state.next_token:
previous_token = state.next_token.previous_token
previous_lineno = previous_token.lineno
if previous_token.is_multiline_string or previous_token.is_string:
previous_lineno += previous_token.value.count('\n')
if previous_token.is_continuation:
newline = False
else:
newline = state.next_token.lineno > previous_lineno
state.AddTokenToState(newline=newline, dry_run=False)
def _LineContainsI18n(line):
"""Return true if there are i18n comments or function calls in the line.
I18n comments and pseudo-function calls are closely related. They cannot
be moved apart without breaking i18n.
Arguments:
line: (logical_line.LogicalLine) The line currently being formatted.
Returns:
True if the line contains i18n comments or function calls. False otherwise.
"""
if style.Get('I18N_COMMENT'):
for tok in line.tokens:
if tok.is_comment and re.match(style.Get('I18N_COMMENT'), tok.value):
# Contains an i18n comment.
return True
if style.Get('I18N_FUNCTION_CALL'):
length = len(line.tokens)
for index in range(length - 1):
if (line.tokens[index + 1].value == '(' and
line.tokens[index].value in style.Get('I18N_FUNCTION_CALL')):
return True
return False
def _LineContainsPylintDisableLineTooLong(line):
"""Return true if there is a "pylint: disable=line-too-long" comment."""
return re.search(r'\bpylint:\s+disable=line-too-long\b', line.last.value)
def _LineHasContinuationMarkers(line):
"""Return true if the line has continuation markers in it."""
return any(tok.is_continuation for tok in line.tokens)
def _CanPlaceOnSingleLine(line):
"""Determine if the logical line can go on a single line.
Arguments:
line: (logical_line.LogicalLine) The line currently being formatted.
Returns:
True if the line can or should be added to a single line. False otherwise.
"""
token_names = [x.name for x in line.tokens]
if (style.Get('FORCE_MULTILINE_DICT') and 'LBRACE' in token_names):
return False
indent_amt = style.Get('INDENT_WIDTH') * line.depth
last = line.last
last_index = -1
if (last.is_pylint_comment or last.is_pytype_comment or
last.is_copybara_comment):
last = last.previous_token
last_index = -2
if last is None:
return True
return (last.total_length + indent_amt <= style.Get('COLUMN_LIMIT') and
not any(tok.is_comment for tok in line.tokens[:last_index]))
def _AlignTrailingComments(final_lines):
"""Align trailing comments to the same column."""
final_lines_index = 0
while final_lines_index < len(final_lines):
line = final_lines[final_lines_index]
assert line.tokens
processed_content = False
for tok in line.tokens:
if (tok.is_comment and isinstance(tok.spaces_required_before, list) and
tok.value.startswith('#')):
# All trailing comments and comments that appear on a line by themselves
# in this block should be indented at the same level. The block is
# terminated by an empty line or EOF. Enumerate through each line in
# the block and calculate the max line length. Once complete, use the
# first col value greater than that value and create the necessary for
# each line accordingly.
all_pc_line_lengths = [] # All pre-comment line lengths
max_line_length = 0
while True:
# EOF
if final_lines_index + len(all_pc_line_lengths) == len(final_lines):
break
this_line = final_lines[final_lines_index + len(all_pc_line_lengths)]
# Blank line - note that content is preformatted so we don't need to
# worry about spaces/tabs; a blank line will always be '\n\n'.
assert this_line.tokens
if (all_pc_line_lengths and
this_line.tokens[0].formatted_whitespace_prefix.startswith('\n\n')
):
break
if this_line.disable:
all_pc_line_lengths.append([])
continue
# Calculate the length of each line in this logical line.
line_content = ''
pc_line_lengths = []
for line_tok in this_line.tokens:
whitespace_prefix = line_tok.formatted_whitespace_prefix
newline_index = whitespace_prefix.rfind('\n')
if newline_index != -1:
max_line_length = max(max_line_length, len(line_content))
line_content = ''
whitespace_prefix = whitespace_prefix[newline_index + 1:]
if line_tok.is_comment:
pc_line_lengths.append(len(line_content))
else:
line_content += '{}{}'.format(whitespace_prefix, line_tok.value)
if pc_line_lengths:
max_line_length = max(max_line_length, max(pc_line_lengths))
all_pc_line_lengths.append(pc_line_lengths)
# Calculate the aligned column value
max_line_length += 2
aligned_col = None
for potential_col in tok.spaces_required_before:
if potential_col > max_line_length:
aligned_col = potential_col
break
if aligned_col is None:
aligned_col = max_line_length
# Update the comment token values based on the aligned values
for all_pc_line_lengths_index, pc_line_lengths in enumerate(
all_pc_line_lengths):
if not pc_line_lengths:
continue
this_line = final_lines[final_lines_index + all_pc_line_lengths_index]
pc_line_length_index = 0
for line_tok in this_line.tokens:
if line_tok.is_comment:
assert pc_line_length_index < len(pc_line_lengths)
assert pc_line_lengths[pc_line_length_index] < aligned_col
# Note that there may be newlines embedded in the comments, so
# we need to apply a whitespace prefix to each line.
whitespace = ' ' * (
aligned_col - pc_line_lengths[pc_line_length_index] - 1)
pc_line_length_index += 1
line_content = []
for comment_line_index, comment_line in enumerate(
line_tok.value.split('\n')):
line_content.append('{}{}'.format(whitespace,
comment_line.strip()))
if comment_line_index == 0:
whitespace = ' ' * (aligned_col - 1)
line_content = '\n'.join(line_content)
# Account for initial whitespace already slated for the
# beginning of the line.
existing_whitespace_prefix = \
line_tok.formatted_whitespace_prefix.lstrip('\n')
if line_content.startswith(existing_whitespace_prefix):
line_content = line_content[len(existing_whitespace_prefix):]
line_tok.value = line_content
assert pc_line_length_index == len(pc_line_lengths)
final_lines_index += len(all_pc_line_lengths)
processed_content = True
break
if not processed_content:
final_lines_index += 1
def _FormatFinalLines(final_lines):
"""Compose the final output from the finalized lines."""
formatted_code = []
for line in final_lines:
formatted_line = []
for tok in line.tokens:
if not tok.is_pseudo:
formatted_line.append(tok.formatted_whitespace_prefix)
formatted_line.append(tok.value)
elif (not tok.next_token.whitespace_prefix.startswith('\n') and
not tok.next_token.whitespace_prefix.startswith(' ')):
if (tok.previous_token.value == ':' or
tok.next_token.value not in ',}])'):
formatted_line.append(' ')
formatted_code.append(''.join(formatted_line))
return ''.join(formatted_code) + '\n'
class _StateNode(object):
"""An edge in the solution space from 'previous.state' to 'state'.
Attributes:
state: (format_decision_state.FormatDecisionState) The format decision state
for this node.
newline: If True, then on the edge from 'previous.state' to 'state' a
newline is inserted.
previous: (_StateNode) The previous state node in the graph.
"""
# TODO(morbo): Add a '__cmp__' method.
def __init__(self, state, newline, previous):
self.state = state.Clone()
self.newline = newline
self.previous = previous
def __repr__(self): # pragma: no cover
return 'StateNode(state=[\n{0}\n], newline={1})'.format(
self.state, self.newline)
# A tuple of (penalty, count) that is used to prioritize the BFS. In case of
# equal penalties, we prefer states that were inserted first. During state
# generation, we make sure that we insert states first that break the line as
# late as possible.
_OrderedPenalty = collections.namedtuple('OrderedPenalty', ['penalty', 'count'])
# An item in the prioritized BFS search queue. The 'StateNode's 'state' has
# the given '_OrderedPenalty'.
_QueueItem = collections.namedtuple('QueueItem',
['ordered_penalty', 'state_node'])
def _AnalyzeSolutionSpace(initial_state):
"""Analyze the entire solution space starting from initial_state.
This implements a variant of Dijkstra's algorithm on the graph that spans
the solution space (LineStates are the nodes). The algorithm tries to find
the shortest path (the one with the lowest penalty) from 'initial_state' to
the state where all tokens are placed.
Arguments:
initial_state: (format_decision_state.FormatDecisionState) The initial state
to start the search from.
Returns:
True if a formatting solution was found. False otherwise.
"""
count = 0
seen = set()
p_queue = []
# Insert start element.
node = _StateNode(initial_state, False, None)
heapq.heappush(p_queue, _QueueItem(_OrderedPenalty(0, count), node))
count += 1
while p_queue:
item = p_queue[0]
penalty = item.ordered_penalty.penalty
node = item.state_node
if not node.state.next_token:
break
heapq.heappop(p_queue)
if count > 10000:
node.state.ignore_stack_for_comparison = True
# Unconditionally add the state and check if it was present to avoid having
# to hash it twice in the common case (state hashing is expensive).
before_seen_count = len(seen)
seen.add(node.state)
# If seen didn't change size, the state was already present.
if before_seen_count == len(seen):
continue
# FIXME(morbo): Add a 'decision' element?
count = _AddNextStateToQueue(penalty, node, False, count, p_queue)
count = _AddNextStateToQueue(penalty, node, True, count, p_queue)
if not p_queue:
# We weren't able to find a solution. Do nothing.
return False
_ReconstructPath(initial_state, heapq.heappop(p_queue).state_node)
return True
def _AddNextStateToQueue(penalty, previous_node, newline, count, p_queue):
"""Add the following state to the analysis queue.
Assume the current state is 'previous_node' and has been reached with a
penalty of 'penalty'. Insert a line break if 'newline' is True.
Arguments:
penalty: (int) The penalty associated with the path up to this point.
previous_node: (_StateNode) The last _StateNode inserted into the priority
queue.
newline: (bool) Add a newline if True.
count: (int) The number of elements in the queue.
p_queue: (heapq) The priority queue representing the solution space.
Returns:
The updated number of elements in the queue.
"""
must_split = previous_node.state.MustSplit()
if newline and not previous_node.state.CanSplit(must_split):
# Don't add a newline if the token cannot be split.
return count
if not newline and must_split:
# Don't add a token we must split but where we aren't splitting.
return count
node = _StateNode(previous_node.state, newline, previous_node)
penalty += node.state.AddTokenToState(
newline=newline, dry_run=True, must_split=must_split)
heapq.heappush(p_queue, _QueueItem(_OrderedPenalty(penalty, count), node))
return count + 1
def _ReconstructPath(initial_state, current):
"""Reconstruct the path through the queue with lowest penalty.
Arguments:
initial_state: (format_decision_state.FormatDecisionState) The initial state
to start the search from.
current: (_StateNode) The node in the decision graph that is the end point
of the path with the least penalty.
"""
path = collections.deque()
while current.previous:
path.appendleft(current)
current = current.previous
for node in path:
initial_state.AddTokenToState(newline=node.newline, dry_run=False)
NESTED_DEPTH = []
def _FormatFirstToken(first_token, indent_depth, prev_line, final_lines):
"""Format the first token in the logical line.
Add a newline and the required indent before the first token of the logical
line.
Arguments:
first_token: (format_token.FormatToken) The first token in the logical line.
indent_depth: (int) The line's indentation depth.
prev_line: (list of logical_line.LogicalLine) The logical line previous to
this line.
final_lines: (list of logical_line.LogicalLine) The logical lines that have
already been processed.
"""
global NESTED_DEPTH
while NESTED_DEPTH and NESTED_DEPTH[-1] > indent_depth:
NESTED_DEPTH.pop()
first_nested = False
if _IsClassOrDef(first_token):
if not NESTED_DEPTH:
NESTED_DEPTH = [indent_depth]
elif NESTED_DEPTH[-1] < indent_depth:
first_nested = True
NESTED_DEPTH.append(indent_depth)
first_token.AddWhitespacePrefix(
_CalculateNumberOfNewlines(first_token, indent_depth, prev_line,
final_lines, first_nested),
indent_level=indent_depth)
NO_BLANK_LINES = 1
ONE_BLANK_LINE = 2
TWO_BLANK_LINES = 3
def _IsClassOrDef(tok):
if tok.value in {'class', 'def', '@'}:
return True
return (tok.next_token and tok.value == 'async' and
tok.next_token.value == 'def')
def _CalculateNumberOfNewlines(first_token, indent_depth, prev_line,
final_lines, first_nested):
"""Calculate the number of newlines we need to add.
Arguments:
first_token: (format_token.FormatToken) The first token in the logical
line.
indent_depth: (int) The line's indentation depth.
prev_line: (list of logical_line.LogicalLine) The logical line previous to
this line.
final_lines: (list of logical_line.LogicalLine) The logical lines that have
already been processed.
first_nested: (boolean) Whether this is the first nested class or function.
Returns:
The number of newlines needed before the first token.
"""
# TODO(morbo): Special handling for imports.
# TODO(morbo): Create a knob that can tune these.
if prev_line is None:
# The first line in the file. Don't add blank lines.
# FIXME(morbo): Is this correct?
if first_token.newlines is not None:
first_token.newlines = None
return 0
if first_token.is_docstring:
if (prev_line.first.value == 'class' and
style.Get('BLANK_LINE_BEFORE_CLASS_DOCSTRING')):
# Enforce a blank line before a class's docstring.
return ONE_BLANK_LINE
elif (prev_line.first.value.startswith('#') and
style.Get('BLANK_LINE_BEFORE_MODULE_DOCSTRING')):
# Enforce a blank line before a module's docstring.
return ONE_BLANK_LINE
# The docstring shouldn't have a newline before it.
return NO_BLANK_LINES
if first_token.is_name and not indent_depth:
if prev_line.first.value in {'from', 'import'}:
# Support custom number of blank lines between top-level imports and
# variable definitions.
return 1 + style.Get(
'BLANK_LINES_BETWEEN_TOP_LEVEL_IMPORTS_AND_VARIABLES')
prev_last_token = prev_line.last
if prev_last_token.is_docstring:
if (not indent_depth and first_token.value in {'class', 'def', 'async'}):
# Separate a class or function from the module-level docstring with
# appropriate number of blank lines.
return 1 + style.Get('BLANK_LINES_AROUND_TOP_LEVEL_DEFINITION')
if (first_nested and
not style.Get('BLANK_LINE_BEFORE_NESTED_CLASS_OR_DEF') and
_IsClassOrDef(first_token)):
first_token.newlines = None
return NO_BLANK_LINES
if _NoBlankLinesBeforeCurrentToken(prev_last_token.value, first_token,
prev_last_token):
return NO_BLANK_LINES
else:
return ONE_BLANK_LINE
if _IsClassOrDef(first_token):
# TODO(morbo): This can go once the blank line calculator is more
# sophisticated.
if not indent_depth:
# This is a top-level class or function.
is_inline_comment = prev_last_token.whitespace_prefix.count('\n') == 0
if (not prev_line.disable and prev_last_token.is_comment and
not is_inline_comment):
# This token follows a non-inline comment.
if _NoBlankLinesBeforeCurrentToken(prev_last_token.value, first_token,
prev_last_token):
# Assume that the comment is "attached" to the current line.
# Therefore, we want two blank lines before the comment.
index = len(final_lines) - 1
while index > 0:
if not final_lines[index - 1].is_comment:
break
index -= 1
if final_lines[index - 1].first.value == '@':
final_lines[index].first.AdjustNewlinesBefore(NO_BLANK_LINES)
else:
prev_last_token.AdjustNewlinesBefore(
1 + style.Get('BLANK_LINES_AROUND_TOP_LEVEL_DEFINITION'))
if first_token.newlines is not None:
first_token.newlines = None
return NO_BLANK_LINES
elif _IsClassOrDef(prev_line.first):
if first_nested and not style.Get(
'BLANK_LINE_BEFORE_NESTED_CLASS_OR_DEF'):
first_token.newlines = None
return NO_BLANK_LINES
# Calculate how many newlines were between the original lines. We want to
# retain that formatting if it doesn't violate one of the style guide rules.
if first_token.is_comment:
first_token_lineno = first_token.lineno - first_token.value.count('\n')
else:
first_token_lineno = first_token.lineno
prev_last_token_lineno = prev_last_token.lineno
if prev_last_token.is_multiline_string:
prev_last_token_lineno += prev_last_token.value.count('\n')
if first_token_lineno - prev_last_token_lineno > 1:
return ONE_BLANK_LINE
return NO_BLANK_LINES
def _SingleOrMergedLines(lines):
"""Generate the lines we want to format.
Arguments:
lines: (list of logical_line.LogicalLine) Lines we want to format.
Yields:
Either a single line, if the current line cannot be merged with the
succeeding line, or the next two lines merged into one line.
"""
index = 0
last_was_merged = False
while index < len(lines):
if lines[index].disable:
line = lines[index]
index += 1
while index < len(lines):
column = line.last.column + 2
if lines[index].lineno != line.lineno:
break
if line.last.value != ':':
leaf = pytree.Leaf(
type=token.SEMI, value=';', context=('', (line.lineno, column)))
line.AppendToken(
format_token.FormatToken(leaf, pytree_utils.NodeName(leaf)))
for tok in lines[index].tokens:
line.AppendToken(tok)
index += 1
yield line
elif line_joiner.CanMergeMultipleLines(lines[index:], last_was_merged):
# TODO(morbo): This splice is potentially very slow. Come up with a more
# performance-friendly way of determining if two lines can be merged.
next_line = lines[index + 1]
for tok in next_line.tokens:
lines[index].AppendToken(tok)
if (len(next_line.tokens) == 1 and next_line.first.is_multiline_string):
# This may be a multiline shebang. In that case, we want to retain the
# formatting. Otherwise, it could mess up the shell script's syntax.
lines[index].disable = True
yield lines[index]
index += 2
last_was_merged = True
else:
yield lines[index]
index += 1
last_was_merged = False
def _NoBlankLinesBeforeCurrentToken(text, cur_token, prev_token):
"""Determine if there are no blank lines before the current token.
The previous token is a docstring or comment. The prev_token_lineno is the
start of the text of that token. Counting the number of newlines in its text
gives us the extent and thus where the line number of the end of the
docstring or comment. After that, we just compare it to the current token's
line number to see if there are blank lines between them.
Arguments:
text: (unicode) The text of the docstring or comment before the current
token.
cur_token: (format_token.FormatToken) The current token in the logical line.
prev_token: (format_token.FormatToken) The previous token in the logical
line.
Returns:
True if there is no blank line before the current token.
"""
cur_token_lineno = cur_token.lineno
if cur_token.is_comment:
cur_token_lineno -= cur_token.value.count('\n')
num_newlines = text.count('\n') if not prev_token.is_comment else 0
return prev_token.lineno + num_newlines == cur_token_lineno - 1
|
78527e9af0df1b1f8d367a2eb6b267a2396b7ce3
|
e7e536df0263ae2a7ac44ef30f19110f891213a9
|
/src/pretalx/api/serializers/review.py
|
8131b8f1a80a743e36851af0f01ef73267dee523
|
[
"Apache-2.0"
] |
permissive
|
pretalx/pretalx
|
b3b3808266f4810dfc8445dc1ed33ba398e7a9c2
|
269dce90a6fb1ce0064008c40ce5dd4dad61e2e3
|
refs/heads/main
| 2023-09-05T11:09:23.538325
| 2023-09-04T19:57:47
| 2023-09-04T19:57:47
| 83,081,285
| 563
| 195
|
Apache-2.0
| 2023-09-13T19:12:28
| 2017-02-24T20:46:51
|
Python
|
UTF-8
|
Python
| false
| false
| 1,060
|
py
|
review.py
|
from rest_framework.serializers import (
ModelSerializer,
SerializerMethodField,
SlugRelatedField,
)
from pretalx.api.serializers.question import AnswerSerializer
from pretalx.submission.models import Answer, Review
class AnonymousReviewSerializer(ModelSerializer):
"""Does not include the user and answer fields."""
submission = SlugRelatedField(slug_field="code", read_only=True)
class Meta:
model = Review
fields = [
"id",
"submission",
"text",
"score",
"created",
"updated",
]
class ReviewSerializer(AnonymousReviewSerializer):
user = SlugRelatedField(slug_field="name", read_only=True)
answers = SerializerMethodField()
def get_answers(self, obj):
return AnswerSerializer(Answer.objects.filter(review=obj), many=True).data
class Meta(AnonymousReviewSerializer.Meta):
model = Review
fields = AnonymousReviewSerializer.Meta.fields + [
"answers",
"user",
]
|
e03f5d9e5ad77b96bb8c99722df199cc3318adef
|
568fa58296378fa129ab3349adf010daa44ed45b
|
/tests/common/test_op/ascend/xdivy.py
|
5a3b190ed18037dff78d223f00df46d054741ee7
|
[
"Apache-2.0",
"BSD-3-Clause",
"NCSA",
"X11-distribute-modifications-variant",
"Zlib",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"LLVM-exception",
"BSD-2-Clause"
] |
permissive
|
mindspore-ai/akg
|
37f471badc66de6a831f1f45ad84344f34d23ef2
|
99f33858d6972741748cbfc9ab0bf9600428fef7
|
refs/heads/master
| 2023-07-25T23:03:17.672665
| 2023-07-11T07:33:57
| 2023-07-11T07:33:57
| 274,077,856
| 319
| 36
|
Apache-2.0
| 2021-12-30T13:43:08
| 2020-06-22T08:09:05
|
Python
|
UTF-8
|
Python
| false
| false
| 4,184
|
py
|
xdivy.py
|
# Copyright 2020-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""operator dsl function: xdivy"""
import akg
from akg import tvm
from akg.ops.math import divide
from akg.utils.format_transform import get_shape
from akg.utils.dsl_create import produce_shapes
import akg.utils as utils
# define a scalar , value = 1
SCALAR_ONE = 1
# minimun num of float32 2**(-126)
MININUM_NUM_FLOAT = 2**(-126)
# minimun num of float16 2**(-24)
MININUM_NUM_HALF = 2**(-24)
# max num of float32 is 2**126, but cce can only support 2**62,
# so use 62/62/2 to adaptor 149
MAX_ONE_CONST_FLOAT = 2**62
MAX_TWO_CONST_FLOAT = 2**2
# max num of float16 is 2**24, but cce can only support 2**12,
# so use 12/12 to adaptor 24
MAX_CONST_HALF = 2**12
def xdivy_compute(input_x, input_y):
"""xdivy compute"""
_, _, shape_res = produce_shapes(get_shape(input_x), get_shape(input_y))
utils.check_shape(shape_res)
dtype = input_x.dtype
broadcast_x = akg.lang.ascend.broadcast(input_x, shape_res)
broadcast_y = akg.lang.ascend.broadcast(input_y, shape_res)
broadcast_one = akg.lang.ascend.broadcast(
tvm.const(SCALAR_ONE, dtype), shape_res, dtype)
abs_x = akg.lang.ascend.vabs(broadcast_x)
abs_y = akg.lang.ascend.vabs(broadcast_y)
add_x_y = akg.lang.ascend.vadd(abs_x, abs_y)
if dtype == "float32":
data_min = akg.lang.ascend.broadcast(
tvm.const(MININUM_NUM_FLOAT, dtype=dtype), shape_res, dtype)
elif dtype == "float16":
data_min = akg.lang.ascend.broadcast(
tvm.const(MININUM_NUM_HALF, dtype=dtype), shape_res, dtype)
zero_x_y = akg.lang.ascend.vmin(add_x_y, data_min)
if dtype == "float32":
data_mul1 = akg.lang.ascend.vmuls(
zero_x_y, tvm.const(MAX_ONE_CONST_FLOAT, dtype=dtype))
data_mul2 = akg.lang.ascend.vmuls(
data_mul1, tvm.const(MAX_ONE_CONST_FLOAT, dtype=dtype))
mul_data = akg.lang.ascend.vmuls(
data_mul2, tvm.const(MAX_TWO_CONST_FLOAT, dtype=dtype))
elif dtype == "float16":
data_mul1 = akg.lang.ascend.vmuls(
zero_x_y, tvm.const(MAX_CONST_HALF, dtype=dtype))
mul_data = akg.lang.ascend.vmuls(
data_mul1, tvm.const(MAX_CONST_HALF, dtype=dtype))
sub_x_y_zero = akg.lang.ascend.vsub(mul_data, broadcast_one)
abs_x_y_zero = akg.lang.ascend.vabs(sub_x_y_zero)
input_y_revised = akg.lang.ascend.vadd(broadcast_y, abs_x_y_zero)
if dtype == "float16":
broadcast_x = akg.lang.ascend.cast_to(broadcast_x, "float32")
input_y_revised = akg.lang.ascend.cast_to(input_y_revised, "float32")
res = divide(broadcast_x, input_y_revised, target="cce")
if dtype == "float16":
res = akg.lang.ascend.cast_to(res, dtype)
return res
@utils.check_input_type(akg.tvm.tensor.Tensor, akg.tvm.tensor.Tensor, (str, type(None)))
def xdivy(data_x1, data_x2, target=utils.CCE):
"""
Calculate data_x1 divided by data_x2.
.. math::
y = \\left\\{
\\begin{aligned}
0, && if \\quad x1 == 0 \\\\
\\dfrac{x1}{x2}, && otherwise
\\end{aligned}
\\right.
Args:
data_x1 (tvm.tensor.Tensor): Tensor of dtype "float16" or "float32"
data_x2 (tvm.tensor.Tensor): Tensor of dtype "float16" or "float32"
Returns:
tvm.tensor.Tensor
"""
shape_x1 = get_shape(data_x1)
shape_x2 = get_shape(data_x2)
utils.check_shape(shape_x1)
utils.check_shape(shape_x2)
utils.elemwise_dtype_check(data_x1.dtype, data_x2.dtype)
dtype = data_x1.dtype
utils.ops_dtype_check(dtype, utils.DtypeForDavinci.ALL_FLOAT)
return xdivy_compute(data_x1, data_x2)
|
65705e988b12d719e853396ea4d1fa36c0ea91db
|
f80ef3a3cf859b13e8af8433af549b6b1043bf6e
|
/pyobjc-framework-WebKit/PyObjCTest/test_domtreewalker.py
|
f1bdb485e75ab2523719f3b7e4493a023f51db5a
|
[
"MIT"
] |
permissive
|
ronaldoussoren/pyobjc
|
29dc9ca0af838a56105a9ddd62fb38ec415f0b86
|
77b98382e52818690449111cd2e23cd469b53cf5
|
refs/heads/master
| 2023-09-01T05:15:21.814504
| 2023-06-13T20:00:17
| 2023-06-13T20:00:17
| 243,933,900
| 439
| 49
| null | 2023-06-25T02:49:07
| 2020-02-29T08:43:12
|
Python
|
UTF-8
|
Python
| false
| false
| 200
|
py
|
test_domtreewalker.py
|
from PyObjCTools.TestSupport import TestCase
import WebKit
class TestDOMTreeWalker(TestCase):
def testMethods(self):
self.assertResultIsBOOL(WebKit.DOMTreeWalker.expandEntityReferences)
|
3fc6c4f5c2298ab8f21bd655831319aae9cf7385
|
975b2d421d3661e6770b601929d5f11d981d8985
|
/msgraph/generated/models/service_health_status.py
|
f05a4f400ccaf8614740806bafa44572eb399aa5
|
[
"MIT"
] |
permissive
|
microsoftgraph/msgraph-sdk-python
|
a7c551b85daadeebf76ec4ae12668664ea639b42
|
27de7ccbe688d7614b2f6bde0fdbcda4bc5cc949
|
refs/heads/main
| 2023-09-03T21:45:27.989672
| 2023-08-31T06:22:18
| 2023-08-31T06:22:18
| 534,665,999
| 135
| 18
|
MIT
| 2023-09-14T11:04:11
| 2022-09-09T14:00:17
|
Python
|
UTF-8
|
Python
| false
| false
| 816
|
py
|
service_health_status.py
|
from enum import Enum
class ServiceHealthStatus(str, Enum):
ServiceOperational = "serviceOperational",
Investigating = "investigating",
RestoringService = "restoringService",
VerifyingService = "verifyingService",
ServiceRestored = "serviceRestored",
PostIncidentReviewPublished = "postIncidentReviewPublished",
ServiceDegradation = "serviceDegradation",
ServiceInterruption = "serviceInterruption",
ExtendedRecovery = "extendedRecovery",
FalsePositive = "falsePositive",
InvestigationSuspended = "investigationSuspended",
Resolved = "resolved",
MitigatedExternal = "mitigatedExternal",
Mitigated = "mitigated",
ResolvedExternal = "resolvedExternal",
Confirmed = "confirmed",
Reported = "reported",
UnknownFutureValue = "unknownFutureValue",
|
6591c5665da32216c38ed951bdcdf10c2b577f20
|
99833651e4a6a0bc1221d577d9fc43b8568abedd
|
/nltk_contrib/lpath/at_lite/tableio.py
|
6a582ad1a7095122c8258d363b30dc42590d96aa
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
nltk/nltk_contrib
|
689e2683aa01b120c7473b9a4fc50bc49f014390
|
95d1806e2f4e89e960b76a685b1fba2eaa7d5142
|
refs/heads/master
| 2023-07-31T13:32:47.358897
| 2022-11-21T18:49:33
| 2022-11-21T18:49:33
| 2,530,774
| 145
| 127
|
NOASSERTION
| 2022-11-21T18:49:34
| 2011-10-07T05:59:13
|
Python
|
UTF-8
|
Python
| false
| false
| 4,385
|
py
|
tableio.py
|
#
# ChangeLogs:
# $Log: tableio.py,v $
# Revision 1.9 2006/06/27 19:03:28 haejoong
# tdf reader now handles different formats of newline characters
#
# Revision 1.8 2006/04/12 14:55:24 haejoong
# fixes for error handling
#
# Revision 1.7 2006/01/26 15:46:46 haejoong
# *** empty log message ***
#
# Revision 1.6 2006/01/23 16:32:29 haejoong
# improved exception handling
#
# Revision 1.5 2006/01/19 17:53:17 haejoong
# added some error handling for importTdf
#
# Revision 1.4 2005/12/15 19:08:40 haejoong
# added missing "
#
# Revision 1.3 2005/12/15 19:05:55 haejoong
# added error handling for TableIo.importTdf
#
#
import codecs
import re
from error import *
__all__ = ['TableIo']
version = "$Revision: 1.9 $"
class TableIo:
def printTable(self):
size = [len(str(x)) for x,t in self.header]
for row in self.table:
for i,c in enumerate(row):
if type(c)==str or type(c)==unicode:
n = len(c)
else:
n = len(str(c))
if n > size[i]:
size[i] = n
def printRow(row,bar=True):
s = ""
for i,c in enumerate(row):
if type(c) == int or type(c) == float:
s += "%%%ds|" % size[i] % str(c)
else:
s += "%%-%ds|" % size[i] % c
print s[:-1]
printRow([s for s,t in self.header])
for row in self.table:
printRow(row)
def importList(cls, L):
data = cls(L[0])
for i,row in enumerate(L[1:]):
data.insertRow(i,row)
data.resetUndoStack()
return data
importList = classmethod(importList)
def exportTdf(self, filename):
try:
_,_,_,writer = codecs.lookup('utf-8')
f = writer(file(filename,'w'))
f.write("\t".join([a[0]+';'+a[1].__name__
for a in self.header]) + "\n")
for item in self.metadata.items():
f.write(";;MM %s\t%s\n" % item)
for row in self.table:
for c in row[:-1]:
if c is None:
f.write("\t")
else:
t = type(c)
if t==str or t==unicode:
f.write(c+"\t")
else:
f.write(str(c)+"\t")
if row[-1] is None:
f.write("\n")
else:
t = type(row[-1])
if t==str or t==unicode:
f.write(row[-1]+"\n")
else:
f.write(str(row[-1])+"\n")
except IOError, e:
raise Error(ERR_TDF_EXPORT, str(e))
def importTdf(cls, filename):
_,_,reader,_ = codecs.lookup('utf-8')
try:
f = reader(file(filename))
except IOError, e:
raise Error(ERR_TDF_IMPORT, e)
head = []
for h in f.readline().rstrip("\r\n").split("\t"):
try:
a,b = h.split(';')
except ValueError:
raise Error(ERR_TDF_IMPORT, "invalid header")
head.append((a,eval(b)))
tab = cls(head)
l = f.readline().rstrip('\r\n')
lno = 2
while l:
if l[:2] != ';;': break
if l[2:4] == 'MM':
nam,val = re.split("\t+",l[4:].strip())
tab.metadata[nam] = val
l = f.readline().rstrip('\r\n')
lno += 1
while l:
if l[:2] != ';;':
row = []
try:
for i,cell in enumerate(l.rstrip("\n").split("\t")):
row.append(head[i][1](cell))
except ValueError, e:
raise Error(ERR_TDF_IMPORT,
"[%d:%d] %s" % (lno,i,str(e)))
except IndexError, e:
msg = "record has too many fields"
raise Error(ERR_TDF_IMPORT,
"[%d:%d] %s" % (lno,i,msg))
tab.insertRow(None,row)
l = f.readline().rstrip('\r\n')
lno += 1
tab.resetUndoStack()
return tab
importTdf = classmethod(importTdf)
|
e12276b67a2cecf75d584e541f6e9dc9f7aa8b69
|
65fb54d999e7c81dc0bbf89bfd3ac06c1b27df59
|
/yacv/grammar.py
|
77f6b3fe6c02ec235219a45b93873c722aebea8c
|
[
"MIT"
] |
permissive
|
ashutoshbsathe/yacv
|
c474b9b0df7353ad5eba59b2d5e723929680513e
|
763d6263620aece090ee546aed4729905e4e5430
|
refs/heads/main
| 2023-07-10T10:54:57.950482
| 2021-04-22T12:53:19
| 2021-04-22T12:53:19
| 338,791,868
| 132
| 8
|
MIT
| 2021-08-19T16:15:58
| 2021-02-14T11:40:15
|
Python
|
UTF-8
|
Python
| false
| false
| 8,092
|
py
|
grammar.py
|
import logging
from collections import OrderedDict
from pprint import pprint
from yacv.constants import *
from yacv.utils import YACVError
class Production(object):
def __init__(self, lhs=None, rhs=[]):
self.lhs = lhs
self.rhs = rhs
def __str__(self):
rhs = 'ϵ' if self.rhs[0] == YACV_EPSILON else ''.join(self.rhs)
return '{} -> {}'.format(self.lhs, rhs)
def __repr__(self):
return str(self)
def __eq__(self, other):
if not isinstance(other, Production):
return False
return self.lhs == other.lhs and self.rhs == other.rhs
def __ne__(self, other):
return not self == other
def first(g, s):
# g: Grammar object
# s: RHS or Part of RHS as list
if not s:
return set() # empty set
if s[0] == YACV_EPSILON:
return set([YACV_EPSILON]) # set with epsilon in it
if s[0] not in g.nonterminals.keys():
return set([s[0]])
# At this point, s[0] must be a non terminal
ret = set()
for prodno in g.nonterminals[s[0]]['prods_lhs']:
rhs = g.prods[prodno].rhs
if rhs[0] == s[0]:
# left recursion
continue
x = first(g, rhs)
ret = ret.union(x)
if YACV_EPSILON in ret:
x = first(g, s[1:])
ret = ret.union(x)
return ret
class Grammar(object):
def __init__(self, fname='simple-grammar.txt'):
lines = [x.strip() for x in open(fname).readlines()]
self.prods = [] # list containing all the productions
all_symbols = set()
for line in lines:
if line == '':
continue
try:
lhs, rhs = line.split('->')
except ValueError as e:
raise YACVError('Invalid grammar file')
lhs = lhs.strip()
rhs = [x for x in rhs.split(' ') if x]
# TODO: find a better way to do this
for i, _ in enumerate(rhs):
if rhs[i] == "\'\'":
rhs[i] = YACV_EPSILON
self.prods.append(
Production(lhs, rhs)
)
all_symbols = all_symbols.union(rhs)
# Augment the grammar
self.prods.insert(0, Production('S\'', [self.prods[0].lhs, '$']))
# Accumulate nonterminal information
self.nonterminals = OrderedDict()
for i, prod in enumerate(self.prods):
lhs, rhs = prod.lhs, prod.rhs
if lhs not in self.nonterminals.keys():
self.nonterminals[lhs] = {
# number of productions this nonterminal is on the LHS of
'prods_lhs' : [i],
# where does this non terminal appear on RHS ?
# what prod and what place ?
'prods_rhs' : [],
'first' : set(),
'follow' : set(),
'nullable' : False
}
else:
self.nonterminals[lhs]['prods_lhs'].append(i)
self.terminals = all_symbols.difference(set(self.nonterminals.keys()))
if YACV_EPSILON in self.terminals:
self.terminals = self.terminals.difference(set([YACV_EPSILON]))
self.terminals.add('$')
self.terminals = sorted(self.terminals)
# Update nonterminals_on_rhs for every prod using above data
for prodno, prod in enumerate(self.prods):
lhs, rhs = prod.lhs, prod.rhs
for i, symbol in enumerate(rhs):
if symbol in self.nonterminals.keys():
self.nonterminals[symbol]['prods_rhs'].append((prodno, i))
self.build_first()
self.build_follow()
def build_first(self):
# inefficient method, but should work fine for most small grammars
for nt in self.nonterminals.keys():
tmp = first(self, [nt])
self.nonterminals[nt]['first'] = tmp
for prod_id in self.nonterminals[nt]['prods_lhs']:
if self.prods[prod_id].rhs[0] == YACV_EPSILON:
self.nonterminals[nt]['nullable'] = True
changed = True
while changed:
changed = False
for nt in self.nonterminals.keys():
for prod_id in self.nonterminals[nt]['prods_lhs']:
rhs = self.prods[prod_id].rhs
count = 0
for symbol in rhs:
if symbol in self.nonterminals.keys() and \
self.nonterminals[symbol]['nullable']:
count += 1
if count == len(rhs) and not self.nonterminals[nt]['nullable']:
self.nonterminals[nt]['nullable'] = True
changed = True
for nt in self.nonterminals.keys():
f = self.nonterminals[nt]['first']
if self.nonterminals[nt]['nullable']:
self.nonterminals[nt]['first'] = f.union(set([YACV_EPSILON]))
else:
self.nonterminals[nt]['first'] = f.difference(set([YACV_EPSILON]))
def build_follow(self):
log = logging.getLogger('yacv')
self.nonterminals[self.prods[0].lhs]['follow'].add('$')
for nt in self.nonterminals.keys():
# Where does this symbol occur on RHS ?
s = set()
for prodno, idx in self.nonterminals[nt]['prods_rhs']:
log.debug('Needed FIRST({}) = {} for FOLLOW({})'.format(
self.prods[prodno].rhs[idx+1:],
first(self, self.prods[prodno].rhs[idx+1:]),
nt
))
f = first(self, self.prods[prodno].rhs[idx+1:])
if not f or YACV_EPSILON in f:
f.add('$')
s = s.union(f)
s = s.difference(set([YACV_EPSILON]))
self.nonterminals[nt]['follow'] = s
log.debug('FOLLOW({}) = {}'.format(nt, s))
for prod in self.prods:
log.debug('At production {}'.format(prod))
# Is there a production A -> BC such that C is NULLABLE ?
lhs, rhs = prod.lhs, prod.rhs
reversed_rhs = rhs[::-1]
for i, symbol in enumerate(reversed_rhs):
log.debug('i = {}, symbol = {}'.format(i, symbol))
if symbol not in self.nonterminals.keys():
break
if self.nonterminals[symbol]['nullable'] and (i+1) < len(rhs):
if reversed_rhs[i+1] in self.terminals:
continue
s1 = self.nonterminals[reversed_rhs[i+1]]['follow']
s2 = self.nonterminals[lhs]['follow']
s1 = s1.union(s2)
self.nonterminals[reversed_rhs[i+1]]['follow'] = s1
if i == 0:
s3 = self.nonterminals[symbol]['follow']
s3 = s3.union(s2)
self.nonterminals[symbol]['follow'] = s3
log.debug('Production {} has nullable symbol {} at the end, updated FOLLOW({}) = {}'.format(prod, symbol, symbol, s3))
log.debug('Production {} has nullable symbol {}, changed FOLLOW({}) to {}'.format(prod, symbol, reversed_rhs[i+1], s1))
if rhs[-1] in self.nonterminals.keys():
self.nonterminals[rhs[-1]]['follow'] = \
self.nonterminals[rhs[-1]]['follow'].union(
self.nonterminals[lhs]['follow']
)
log.debug('End of iteration' + 16*'-')
if __name__ == '__main__':
import sys
if len(sys.argv) == 1:
g = Grammar()
else:
g = Grammar(sys.argv[1])
pprint(g.prods)
print(64*'-')
for nt in g.nonterminals.keys():
print('FIRST({}) = {}'.format(nt, g.nonterminals[nt]['first']))
print(64*'-')
for nt in g.nonterminals.keys():
print('FOLLOW({}) = {}'.format(nt, g.nonterminals[nt]['follow']))
|
ad8bdebfea36d100488a449e4e0dcc65bea72dc1
|
4feb5744ab5a26aeeb04573e4944d2bf4d1a6a2a
|
/peeringdb_server/migrations/0031_auto_20200404_0910.py
|
b9a3534050e25a136707fc53a8795d4014d94cf9
|
[
"BSD-2-Clause"
] |
permissive
|
peeringdb/peeringdb
|
cb79f809c4bb8cc5192180366df1f05d8fc0111f
|
3f62b2d97c78ccf151fb1a5761637e28463b9541
|
refs/heads/master
| 2023-09-04T09:26:43.741086
| 2023-08-22T19:20:34
| 2023-08-22T19:20:34
| 60,563,174
| 311
| 121
|
BSD-2-Clause
| 2023-09-13T02:13:42
| 2016-06-06T21:49:25
|
Python
|
UTF-8
|
Python
| false
| false
| 577
|
py
|
0031_auto_20200404_0910.py
|
# Generated by Django 2.2.9 on 2020-04-04 09:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("peeringdb_server", "0030_affiliation_request_status_add_canceled"),
]
operations = [
migrations.AlterField(
model_name="network",
name="info_never_via_route_servers",
field=models.BooleanField(
default=False,
help_text="Indicates if this network will announce its routes via route servers or not",
),
),
]
|
e8bdbe41c530eb4b779a4d88a87318fe979890b1
|
d110546d747d7e3865ce5742d5fca09f404623c0
|
/tests/integration/modules/test_disk.py
|
844817d7c22a689592da6b9b27465aeedf1fa15f
|
[
"Apache-2.0",
"MIT",
"BSD-2-Clause"
] |
permissive
|
saltstack/salt
|
354fc86a7be1f69514b3dd3b2edb9e6f66844c1d
|
1ef90cbdc7203f97775edb7666db86a41eb9fc15
|
refs/heads/master
| 2023-07-19T20:56:20.210556
| 2023-06-29T23:12:28
| 2023-07-19T11:47:47
| 1,390,248
| 11,026
| 6,296
|
Apache-2.0
| 2023-09-14T20:45:37
| 2011-02-20T20:16:56
|
Python
|
UTF-8
|
Python
| false
| false
| 2,672
|
py
|
test_disk.py
|
import os
import shutil
import pytest
import salt.utils.platform
from tests.support.case import ModuleCase
@pytest.mark.windows_whitelisted
@pytest.mark.skip_on_darwin(reason="No mtab on Darwin")
@pytest.mark.skip_on_freebsd
@pytest.mark.skip_on_windows(reason="No mtab on Windows")
@pytest.mark.destructive_test
class DiskModuleVirtualizationTest(ModuleCase):
"""
Test to make sure we return a clean result under Docker. Refs #8976
This is factored into its own class so that we can have some certainty that setUp() and tearDown() are run.
"""
def setUp(self):
# Make /etc/mtab unreadable
if os.path.isfile("/etc/mtab"):
shutil.move("/etc/mtab", "/tmp/mtab")
def test_no_mtab(self):
ret = self.run_function("disk.usage")
self.assertDictEqual(ret, {})
def tearDown(self):
if os.path.isfile("/tmp/mtab"):
shutil.move("/tmp/mtab", "/etc/mtab")
@pytest.mark.windows_whitelisted
class DiskModuleTest(ModuleCase):
"""
Validate the disk module
"""
@pytest.mark.slow_test
def test_usage(self):
"""
disk.usage
"""
ret = self.run_function("disk.usage")
self.assertTrue(isinstance(ret, dict))
if not isinstance(ret, dict):
return
if salt.utils.platform.is_darwin():
for key, val in ret.items():
self.assertTrue("filesystem" in val)
self.assertTrue("512-blocks" in val)
self.assertTrue("used" in val)
self.assertTrue("available" in val)
self.assertTrue("capacity" in val)
self.assertTrue("iused" in val)
self.assertTrue("ifree" in val)
self.assertTrue("%iused" in val)
else:
for key, val in ret.items():
self.assertTrue("filesystem" in val)
self.assertTrue("1K-blocks" in val)
self.assertTrue("used" in val)
self.assertTrue("available" in val)
self.assertTrue("capacity" in val)
@pytest.mark.skip_on_windows(reason="inode info not available on Windows")
def test_inodeusage(self):
"""
disk.inodeusage
"""
ret = self.run_function("disk.inodeusage")
self.assertTrue(isinstance(ret, dict))
if not isinstance(ret, dict):
return
for key, val in ret.items():
self.assertTrue("inodes" in val)
self.assertTrue("used" in val)
self.assertTrue("free" in val)
self.assertTrue("use" in val)
self.assertTrue("filesystem" in val)
|
03e62d8e1661a280de4cf287d9b1dee4673e12b6
|
dcc7dd6c65cb13d3619689b2c794b450e503b100
|
/tests/console/commands/test_run.py
|
53c5dbbb3d39238727a54fb65caecbb1fdd0e96f
|
[
"MIT"
] |
permissive
|
python-poetry/poetry
|
6b83f8db6a15b132fd252b68ed3bbee51b4e64f0
|
02448cf7f184dea204156f7dcb620a4f01a0068e
|
refs/heads/master
| 2023-09-04T12:23:02.700442
| 2023-09-02T10:46:06
| 2023-09-02T10:46:06
| 123,303,402
| 20,127
| 2,081
|
MIT
| 2023-09-12T09:41:09
| 2018-02-28T15:23:47
|
Python
|
UTF-8
|
Python
| false
| false
| 6,649
|
py
|
test_run.py
|
from __future__ import annotations
import subprocess
from typing import TYPE_CHECKING
import pytest
from poetry.utils._compat import WINDOWS
if TYPE_CHECKING:
from cleo.testers.application_tester import ApplicationTester
from cleo.testers.command_tester import CommandTester
from pytest_mock import MockerFixture
from poetry.poetry import Poetry
from poetry.utils.env import MockEnv
from poetry.utils.env import VirtualEnv
from tests.types import CommandTesterFactory
from tests.types import FixtureDirGetter
from tests.types import ProjectFactory
@pytest.fixture
def tester(command_tester_factory: CommandTesterFactory) -> CommandTester:
return command_tester_factory("run")
@pytest.fixture(autouse=True)
def patches(mocker: MockerFixture, env: MockEnv) -> None:
mocker.patch("poetry.utils.env.EnvManager.get", return_value=env)
@pytest.fixture
def poetry_with_scripts(
project_factory: ProjectFactory, fixture_dir: FixtureDirGetter
) -> Poetry:
source = fixture_dir("scripts")
return project_factory(
name="scripts",
pyproject_content=(source / "pyproject.toml").read_text(encoding="utf-8"),
source=source,
)
def test_run_passes_all_args(app_tester: ApplicationTester, env: MockEnv) -> None:
app_tester.execute("run python -V")
assert [["python", "-V"]] == env.executed
def test_run_keeps_options_passed_before_command(
app_tester: ApplicationTester, env: MockEnv
) -> None:
app_tester.execute("-V --no-ansi run python", decorated=True)
assert not app_tester.io.is_decorated()
assert app_tester.io.fetch_output() == app_tester.io.remove_format(
app_tester.application.long_version + "\n"
)
assert [] == env.executed
def test_run_has_helpful_error_when_command_not_found(
app_tester: ApplicationTester, env: MockEnv, capfd: pytest.CaptureFixture[str]
) -> None:
nonexistent_command = "nonexistent-command"
env._execute = True
app_tester.execute(f"run {nonexistent_command}")
assert env.executed == [[nonexistent_command]]
assert app_tester.status_code == 1
if WINDOWS:
# On Windows we use a shell to run commands which provides its own error
# message when a command is not found that is not captured by the
# ApplicationTester but is captured by pytest, and we can access it via capfd.
# The exact error message depends on the system language. Thus, we check only
# for the name of the command.
assert nonexistent_command in capfd.readouterr().err
else:
assert (
app_tester.io.fetch_error() == f"Command not found: {nonexistent_command}\n"
)
@pytest.mark.skipif(
not WINDOWS,
reason=(
"Poetry only installs CMD script files for console scripts of editable"
" dependencies on Windows"
),
)
def test_run_console_scripts_of_editable_dependencies_on_windows(
tmp_venv: VirtualEnv,
command_tester_factory: CommandTesterFactory,
) -> None:
"""
On Windows, Poetry installs console scripts of editable dependencies by creating
in the environment's `Scripts/` directory both:
A) a Python file named after the console script (no `.py` extension) which
imports and calls the console script using Python code
B) a CMD script file also named after the console script
(with `.cmd` extension) which calls `python.exe` to execute (A)
This configuration enables calling the console script by name from `cmd.exe`
because the `.cmd` file extension appears by default in the PATHEXT environment
variable that `cmd.exe` uses to determine which file should be executed if a
filename without an extension is executed as a command.
This test validates that you can also run such a CMD script file via `poetry run`
just by providing the script's name without the `.cmd` extension.
"""
tester = command_tester_factory("run", environment=tmp_venv)
cmd_script_file = tmp_venv._bin_dir / "quix.cmd"
# `/b` ensures we only exit the script instead of any cmd.exe proc that called it
cmd_script_file.write_text("exit /b 123")
# We prove that the CMD script executed successfully by verifying the exit code
# matches what we wrote in the script
assert tester.execute("quix") == 123
def test_run_script_exit_code(
poetry_with_scripts: Poetry,
command_tester_factory: CommandTesterFactory,
tmp_venv: VirtualEnv,
mocker: MockerFixture,
) -> None:
mocker.patch(
"os.execvpe",
lambda file, args, env: subprocess.call([file] + args[1:], env=env),
)
install_tester = command_tester_factory(
"install",
poetry=poetry_with_scripts,
environment=tmp_venv,
)
assert install_tester.execute() == 0
tester = command_tester_factory(
"run", poetry=poetry_with_scripts, environment=tmp_venv
)
assert tester.execute("exit-code") == 42
assert tester.execute("return-code") == 42
@pytest.mark.parametrize(
"installed_script", [False, True], ids=["not installed", "installed"]
)
def test_run_script_sys_argv0(
installed_script: bool,
poetry_with_scripts: Poetry,
command_tester_factory: CommandTesterFactory,
tmp_venv: VirtualEnv,
mocker: MockerFixture,
) -> None:
"""
If RunCommand calls an installed script defined in pyproject.toml,
sys.argv[0] must be set to the full path of the script.
"""
mocker.patch("poetry.utils.env.EnvManager.get", return_value=tmp_venv)
mocker.patch(
"os.execvpe",
lambda file, args, env: subprocess.call([file] + args[1:], env=env),
)
install_tester = command_tester_factory(
"install",
poetry=poetry_with_scripts,
environment=tmp_venv,
)
assert install_tester.execute() == 0
if not installed_script:
for path in tmp_venv.script_dirs[0].glob("check-argv0*"):
path.unlink()
tester = command_tester_factory(
"run", poetry=poetry_with_scripts, environment=tmp_venv
)
argv1 = "absolute" if installed_script else "relative"
assert tester.execute(f"check-argv0 {argv1}") == 0
if installed_script:
expected_message = ""
else:
expected_message = """\
Warning: 'check-argv0' is an entry point defined in pyproject.toml, but it's not \
installed as a script. You may get improper `sys.argv[0]`.
The support to run uninstalled scripts will be removed in a future release.
Run `poetry install` to resolve and get rid of this message.
"""
assert tester.io.fetch_error() == expected_message
|
e76178691f1784fa47b1595152e45f224107ce0f
|
7a6644b553316ece2498e4f8f629454e0b379d23
|
/docs/source/manim_example_ext.py
|
0feef36d69ce976eb20c11be1b21ac363ea38da5
|
[
"MIT"
] |
permissive
|
manim-kindergarten/manim
|
9f17cac6c1c4db5db6e7f4edfe4885eee9ec1f5e
|
99fe80a55cdc5c2fcc249b3645d7f1cd19852bcd
|
refs/heads/master
| 2023-06-27T19:44:05.384032
| 2022-12-08T04:00:27
| 2022-12-08T04:00:27
| 245,434,121
| 130
| 27
|
MIT
| 2023-06-17T07:15:31
| 2020-03-06T14:00:57
|
Python
|
UTF-8
|
Python
| false
| false
| 2,468
|
py
|
manim_example_ext.py
|
from docutils import nodes
from docutils.parsers.rst import directives, Directive
import jinja2
import os
class skip_manim_node(nodes.Admonition, nodes.Element):
pass
def visit(self, node, name=""):
self.visit_admonition(node, name)
def depart(self, node):
self.depart_admonition(node)
class ManimExampleDirective(Directive):
has_content = True
required_arguments = 1
optional_arguments = 0
option_spec = {
"hide_code": bool,
"media": str,
}
final_argument_whitespace = True
def run(self):
hide_code = "hide_code" in self.options
scene_name = self.arguments[0]
media_file_name = self.options["media"]
source_block = [
".. code-block:: python",
"",
*[" " + line for line in self.content],
]
source_block = "\n".join(source_block)
state_machine = self.state_machine
document = state_machine.document
if any(media_file_name.endswith(ext) for ext in [".png", ".jpg", ".gif"]):
is_video = False
else:
is_video = True
rendered_template = jinja2.Template(TEMPLATE).render(
scene_name=scene_name,
scene_name_lowercase=scene_name.lower(),
hide_code=hide_code,
is_video=is_video,
media_file_name=media_file_name,
source_block=source_block,
)
state_machine.insert_input(
rendered_template.split("\n"), source=document.attributes["source"]
)
return []
def setup(app):
app.add_node(skip_manim_node, html=(visit, depart))
setup.app = app
setup.config = app.config
setup.confdir = app.confdir
app.add_directive("manim-example", ManimExampleDirective)
metadata = {"parallel_read_safe": False, "parallel_write_safe": True}
return metadata
TEMPLATE = r"""
{% if not hide_code %}
.. raw:: html
<div class="manim-example">
{% endif %}
{% if is_video %}
.. raw:: html
<video id="{{ scene_name_lowercase }}" class="manim-video" controls src="{{ media_file_name }}"></video>
{% else %}
.. image:: {{ media_file_name }}
:align: center
:name: {{ scene_name_lowercase }}
{% endif %}
{% if not hide_code %}
.. raw:: html
<h5 class="example-header">{{ scene_name }}<a class="headerlink" href="#{{ scene_name_lowercase }}">¶</a></h5>
{{ source_block }}
{% endif %}
.. raw:: html
</div>
"""
|
3393decaad23f4ce126b21ac9565fec0f3414a2a
|
cb560437b44771d9b7cba2043501a1f3d54f3d40
|
/cyp/analysis/__init__.py
|
9f1161c4e63f03c9dff07cf756f084626718fe19
|
[
"MIT"
] |
permissive
|
gabrieltseng/pycrop-yield-prediction
|
88c0a9fec4de10a0338f8cc208d4686fbb7123ea
|
b4790dc2f87a73e8a0604e8c22466314090c5abf
|
refs/heads/master
| 2022-02-02T14:30:59.868766
| 2022-01-17T15:32:54
| 2022-01-17T15:32:54
| 171,045,076
| 140
| 56
|
MIT
| 2022-01-17T15:32:54
| 2019-02-16T19:50:51
|
Python
|
UTF-8
|
Python
| false
| false
| 46
|
py
|
__init__.py
|
from .counties_plot import plot_county_errors
|
072a1ae5fbe63583e1bc7ab4372a5fe0af04e49e
|
a12b448f44beb4d521cb7e31677281f41df35f0b
|
/utils/code_generator/time_measurement/filter_schedules.py
|
59515b7fbf0706c44e87e219a86faffdd4cfd675
|
[
"MIT"
] |
permissive
|
Tiramisu-Compiler/tiramisu
|
d45f65dd9c35f643b3531ec79df1203c7ea3371d
|
f13e480f0ddb142cec371b7d7431a41d8ca885ec
|
refs/heads/master
| 2023-08-25T12:21:26.889736
| 2023-05-09T18:40:52
| 2023-05-09T18:40:52
| 58,378,976
| 906
| 168
|
MIT
| 2023-09-08T11:47:06
| 2016-05-09T13:33:51
|
C++
|
UTF-8
|
Python
| false
| false
| 6,063
|
py
|
filter_schedules.py
|
import pickle
import json
from pathlib import Path
from multiprocessing import Pool
MAX_NB_ITERATORS = 4
nb_threads = 48
chunksize = 100
# Path to the programs folder
progs_path = Path("programs")
# Path where to store the list of filtered programs
# Each element of the list has the format (func_id, sched_id)
dst_path = Path("progs_list.pickle")
# Process a program folder containing schedules
def process_programs(func_path):
output = []
func_id = func_path.name
with open(str(func_path / (func_id + ".json"))) as f:
prog_desc = json.load(f)
# A dictionary containing the pairs iterator_id : iterator_extent
iterators = dict()
# Get information about loop iterators
for i, loop_it in enumerate(prog_desc["loops"]["loops_array"]):
for it in prog_desc["iterators"]["iterators_array"]:
if loop_it["loop_it"] == it["it_id"]:
iterators[it["it_id"]] = it["upper_bound"] - it["lower_bound"]
# If true, do not apply respectively interchange, tiling to every schedule
no_interchange = True
no_tiling = True
for comp in prog_desc["computations"]["computations_array"]:
if not no_interchange and not no_tiling:
break
# If this computation is a reduction, we try to apply tiling
if "reduction_axes" in comp.keys() and len(comp["reduction_axes"]) != 0:
no_tiling = False
accesses = comp["rhs_accesses"]["accesses"]
# Check if it can be useful to apply interchange
if no_interchange:
for el in accesses:
access = el["access"]
# Check the number of access iterators in each dimension
# If it's more than one, we try interchange
for i, line in enumerate(access):
if line[:-1].count(1) > 1:
no_interchange = False
break
if not no_interchange:
break
# Check the order of access iterators
cur = 0
for line in access:
i = line[:-1].index(1)
if i < cur:
no_interchange = False
break
cur = i
if not no_interchange:
break
# Check if it can be useful to apply tiling
if no_tiling:
# Check if every buffer is used at most once
buf_set = set()
for el in accesses:
if el["comp_id"] in buf_set:
no_tiling = False
break
buf_set.add(el["comp_id"])
if no_tiling:
# Check the access matrices
for el in accesses:
acc_matrix = el["access"]
# The number of iterators is less than the number of dimensions of the buffer
# Thus, this can't be an element-wise operation
if len(acc_matrix) < len(iterators):
no_tiling = False
break
# Check if every dimension contains only one iterator
# If a dimension is accessed with two iterators (for example i + j),
# this can't be an element-wise operation.
for line in acc_matrix:
if sum(line[:-1]) > 1:
no_tiling = False
break
if not no_tiling:
break
# Check if there's not an iterator that is used in two dimensions or more.
# If an iterator is used in multiple dimensions, it might be useful to try tiling.
for i in range(len(acc_matrix[0])):
sum_var = 0
for j in range(len(acc_matrix)):
sum_var = sum_var + acc_matrix[j][i]
if sum_var > 1:
no_tiling = False
break
if not no_tiling:
break
# Process schedules
for sched_path in func_path.iterdir():
if not sched_path.is_dir():
continue
sched_id = sched_path.name
with open(str(sched_path / (sched_id + ".json"))) as f:
sched_desc = json.load(f)
# Discard this schedule if it applies interchange and we don't want it.
if len(sched_desc["interchange_dims"]) != 0 and no_interchange:
continue
if sched_desc["tiling"] != None:
# Discard this schedule if it applies tiling and we don't want it.
if no_tiling:
continue
# Check the tiling factors (must be at least half of the iterators extent)
tiling_facts_big = False
tdims = sched_desc["tiling"]["tiling_dims"]
tfacts = sched_desc["tiling"]["tiling_factors"]
for i in range(len(tdims)):
if iterators[tdims[i]] / tfacts[i] < 2.0:
tiling_facts_big = True
break
if tiling_facts_big:
continue
output.append((func_id, sched_id))
return output
if __name__ == "__main__":
progs = list(progs_path.iterdir())
with Pool(nb_threads) as p:
map_ret = p.map(process_programs, progs, chunksize=chunksize)
scheds_list = []
# Flatten map_ret
for i1 in map_ret:
scheds_list.extend(i1)
print(len(scheds_list))
# Save the filtered programs list
with open(dst_path, "wb") as f:
pickle.dump(scheds_list, f)
|
efdd6fd5542078b8f3ecbfea7e641421802ad5ab
|
c168fe819b446640957e5e310ef89fcfe28662b3
|
/torchbenchmark/models/opacus_cifar10/__init__.py
|
f66a49dbc23d5f0a25fea2f59a2468a1b8b0f75e
|
[
"BSD-3-Clause"
] |
permissive
|
pytorch/benchmark
|
7b55e8d714de2ea873e03df43811aab3848485dd
|
df4da9bdff11a2f948d5bd4ac83da7922e6f44f4
|
refs/heads/main
| 2023-08-29T13:06:09.671728
| 2023-08-28T16:51:55
| 2023-08-28T16:51:55
| 92,541,759
| 685
| 220
|
BSD-3-Clause
| 2023-09-14T18:10:18
| 2017-05-26T19:21:12
|
Python
|
UTF-8
|
Python
| false
| false
| 3,259
|
py
|
__init__.py
|
import torch
import torch.optim as optim
import torch.nn as nn
import torch.utils.data as data
import torchvision.models as models
from opacus import PrivacyEngine
from opacus.validators.module_validator import ModuleValidator
from typing import Tuple
from ...util.model import BenchmarkModel
from torchbenchmark.tasks import OTHER
class Model(BenchmarkModel):
task = OTHER.OTHER_TASKS
DEFAULT_TRAIN_BSIZE = 64
DEFAULT_EVAL_BSIZE = 64
def __init__(self, test, device, batch_size=None, extra_args=[]):
# disable torchdynamo-fx2trt because it never terminates
if "--torchdynamo" in extra_args and "fx2trt" in extra_args:
raise NotImplementedError("TorchDynamo Fx2trt is not supported because of hanging issue. "
"See: https://github.com/facebookresearch/torchdynamo/issues/109")
super().__init__(test=test, device=device, batch_size=batch_size, extra_args=extra_args)
self.model = models.resnet18(num_classes=10)
self.model = ModuleValidator.fix(self.model)
self.model = self.model.to(device)
# Cifar10 images are 32x32 and have 10 classes
self.example_inputs = (
torch.randn((self.batch_size, 3, 32, 32), device=self.device),
)
self.example_target = torch.randint(0, 10, (self.batch_size,), device=self.device)
dataset = data.TensorDataset(self.example_inputs[0], self.example_target)
self.dummy_loader = data.DataLoader(dataset, batch_size=self.batch_size)
self.noise_multiplier: float=1.0
self.max_grad_norm: float=1.0
self.poisson_sampling: bool=False
self.optimizer = optim.Adam(self.model.parameters(), lr=0.001)
self.criterion = nn.CrossEntropyLoss()
self.privacy_engine = PrivacyEngine()
self.model, self.optimizer, _ = self.privacy_engine.make_private(
module=self.model,
optimizer=self.optimizer,
data_loader=self.dummy_loader,
noise_multiplier=self.noise_multiplier,
max_grad_norm=self.max_grad_norm,
poisson_sampling=self.poisson_sampling,
)
def get_module(self):
return self.model, self.example_inputs
def get_optimizer(self):
return self.optimizer
def set_optimizer(self, optimizer) -> None:
self.optimizer = optimizer
self.model, self.optimizer, _ = self.privacy_engine.make_private(
module=self.model,
optimizer=self.optimizer,
data_loader=self.dummy_loader,
noise_multiplier=1.0,
max_grad_norm=1.0,
poisson_sampling=False,
)
def train(self):
model = self.model
(images, ) = self.example_inputs
model.train()
targets = self.example_target
output = model(images)
loss = self.criterion(output, targets)
loss.backward()
self.optimizer.step()
self.optimizer.zero_grad()
def eval(self) -> Tuple[torch.Tensor]:
model = self.model
(images, ) = self.example_inputs
model.eval()
targets = self.example_target
with torch.no_grad():
out = model(images)
return (out, )
|
4c881a5005d45b91fd1b13d1c1e659f0a4faa439
|
a3e9a59ace71575122d306dbfe4cfef762cf185d
|
/preprocess.py
|
fa9a9a1316656601cd2a3a632d35839bc57eb903
|
[
"MIT"
] |
permissive
|
fatchord/WaveRNN
|
cf5cfa790c44aa3d9bef9536ec47374532f39b92
|
83c08fdcd625be56244f4145f41500468974b144
|
refs/heads/master
| 2022-07-06T09:01:09.663457
| 2022-07-02T14:21:21
| 2022-07-02T14:21:21
| 125,524,069
| 2,149
| 604
|
MIT
| 2022-07-02T14:21:22
| 2018-03-16T14:03:52
|
Python
|
UTF-8
|
Python
| false
| false
| 3,160
|
py
|
preprocess.py
|
import glob
from utils.display import *
from utils.dsp import *
from utils import hparams as hp
from multiprocessing import Pool, cpu_count
from utils.paths import Paths
import pickle
import argparse
from utils.text.recipes import ljspeech
from utils.files import get_files
from pathlib import Path
# Helper functions for argument types
def valid_n_workers(num):
n = int(num)
if n < 1:
raise argparse.ArgumentTypeError('%r must be an integer greater than 0' % num)
return n
parser = argparse.ArgumentParser(description='Preprocessing for WaveRNN and Tacotron')
parser.add_argument('--path', '-p', help='directly point to dataset path (overrides hparams.wav_path')
parser.add_argument('--extension', '-e', metavar='EXT', default='.wav', help='file extension to search for in dataset folder')
parser.add_argument('--num_workers', '-w', metavar='N', type=valid_n_workers, default=cpu_count()-1, help='The number of worker threads to use for preprocessing')
parser.add_argument('--hp_file', metavar='FILE', default='hparams.py', help='The file to use for the hyperparameters')
args = parser.parse_args()
hp.configure(args.hp_file) # Load hparams from file
if args.path is None:
args.path = hp.wav_path
extension = args.extension
path = args.path
def convert_file(path: Path):
y = load_wav(path)
peak = np.abs(y).max()
if hp.peak_norm or peak > 1.0:
y /= peak
mel = melspectrogram(y)
if hp.voc_mode == 'RAW':
quant = encode_mu_law(y, mu=2**hp.bits) if hp.mu_law else float_2_label(y, bits=hp.bits)
elif hp.voc_mode == 'MOL':
quant = float_2_label(y, bits=16)
return mel.astype(np.float32), quant.astype(np.int64)
def process_wav(path: Path):
wav_id = path.stem
m, x = convert_file(path)
np.save(paths.mel/f'{wav_id}.npy', m, allow_pickle=False)
np.save(paths.quant/f'{wav_id}.npy', x, allow_pickle=False)
return wav_id, m.shape[-1]
wav_files = get_files(path, extension)
paths = Paths(hp.data_path, hp.voc_model_id, hp.tts_model_id)
print(f'\n{len(wav_files)} {extension[1:]} files found in "{path}"\n')
if len(wav_files) == 0:
print('Please point wav_path in hparams.py to your dataset,')
print('or use the --path option.\n')
else:
if not hp.ignore_tts:
text_dict = ljspeech(path)
with open(paths.data/'text_dict.pkl', 'wb') as f:
pickle.dump(text_dict, f)
n_workers = max(1, args.num_workers)
simple_table([
('Sample Rate', hp.sample_rate),
('Bit Depth', hp.bits),
('Mu Law', hp.mu_law),
('Hop Length', hp.hop_length),
('CPU Usage', f'{n_workers}/{cpu_count()}')
])
pool = Pool(processes=n_workers)
dataset = []
for i, (item_id, length) in enumerate(pool.imap_unordered(process_wav, wav_files), 1):
dataset += [(item_id, length)]
bar = progbar(i, len(wav_files))
message = f'{bar} {i}/{len(wav_files)} '
stream(message)
with open(paths.data/'dataset.pkl', 'wb') as f:
pickle.dump(dataset, f)
print('\n\nCompleted. Ready to run "python train_tacotron.py" or "python train_wavernn.py". \n')
|
c8256a572fe477cb7bf6e61180426393e14db929
|
e6ffff4ae45863b33eee0e1e9b9c61f7401db0d3
|
/elsapy/elsentity.py
|
31c7877666e874765788be9e0f7e18cdf934f9d8
|
[
"BSD-3-Clause"
] |
permissive
|
ElsevierDev/elsapy
|
1d0e19eba3e5fad11aa0464a9da453b8b4d6fdf6
|
08e231f561c3b0c90e1040b0bc61fef27ca804ec
|
refs/heads/master
| 2022-11-30T08:09:44.294809
| 2022-11-09T18:48:45
| 2022-11-09T18:48:45
| 70,745,932
| 316
| 166
|
BSD-3-Clause
| 2022-11-09T18:48:46
| 2016-10-12T22:10:47
|
Python
|
UTF-8
|
Python
| false
| false
| 3,481
|
py
|
elsentity.py
|
"""The (abstract) base entity module for elsapy. Used by elsprofile, elsdoc.
Additional resources:
* https://github.com/ElsevierDev/elsapy
* https://dev.elsevier.com
* https://api.elsevier.com"""
import requests, json, urllib
from abc import ABCMeta, abstractmethod
from . import log_util
logger = log_util.get_logger(__name__)
class ElsEntity(metaclass=ABCMeta):
"""An abstract class representing an entity in Elsevier's data model"""
# constructors
@abstractmethod
def __init__(self, uri):
"""Initializes a data entity with its URI"""
self._uri = uri
self._data = None
self._client = None
# properties
@property
def uri(self):
"""Get the URI of the entity instance"""
return self._uri
@uri.setter
def uri(self, uri):
"""Set the URI of the entity instance"""
self._uri = uri
@property
def id(self):
"""Get the dc:identifier of the entity instance"""
return self.data["coredata"]["dc:identifier"]
@property
def int_id(self):
"""Get the (non-URI, numbers only) ID of the entity instance"""
dc_id = self.data["coredata"]["dc:identifier"]
return dc_id[dc_id.find(':') + 1:]
@property
def data(self):
"""Get the full JSON data for the entity instance"""
return self._data
@property
def client(self):
"""Get the elsClient instance currently used by this entity instance"""
return self._client
@client.setter
def client(self, elsClient):
"""Set the elsClient instance to be used by thisentity instance"""
self._client = elsClient
# modifier functions
@abstractmethod
def read(self, payloadType, elsClient):
"""Fetches the latest data for this entity from api.elsevier.com.
Returns True if successful; else, False."""
if elsClient:
self._client = elsClient;
elif not self.client:
raise ValueError('''Entity object not currently bound to elsClient instance. Call .read() with elsClient argument or set .client attribute.''')
try:
api_response = self.client.exec_request(self.uri)
if isinstance(api_response[payloadType], list):
self._data = api_response[payloadType][0]
else:
self._data = api_response[payloadType]
## TODO: check if URI is the same, if necessary update and log warning.
logger.info("Data loaded for " + self.uri)
return True
except (requests.HTTPError, requests.RequestException) as e:
for elm in e.args:
logger.warning(elm)
return False
def write(self):
"""If data exists for the entity, writes it to disk as a .JSON file with
the url-encoded URI as the filename and returns True. Else, returns
False."""
if (self.data):
dataPath = self.client.local_dir / (urllib.parse.quote_plus(self.uri)+'.json')
with dataPath.open(mode='w') as dump_file:
json.dump(self.data, dump_file)
dump_file.close()
logger.info('Wrote ' + self.uri + ' to file')
return True
else:
logger.warning('No data to write for ' + self.uri)
return False
|
d84824b1e5599fce1bdd9816c94b7cac5a695cd1
|
5130754859e274cd06f63260439e5203c2000a11
|
/core/jobs/batch_jobs/suggestion_migration_jobs_test.py
|
870b8c6049c05e0c00d10094995309890d48d32b
|
[
"Apache-2.0"
] |
permissive
|
oppia/oppia
|
8ebc9c7c7f2b336e9a79ce04533abe3956f48cbe
|
d16fdf23d790eafd63812bd7239532256e30a21d
|
refs/heads/develop
| 2023-09-04T07:50:13.661276
| 2023-09-03T09:21:32
| 2023-09-03T09:21:32
| 40,687,563
| 6,172
| 4,666
|
Apache-2.0
| 2023-09-14T18:25:11
| 2015-08-14T00:16:14
|
Python
|
UTF-8
|
Python
| false
| false
| 25,059
|
py
|
suggestion_migration_jobs_test.py
|
# coding: utf-8
#
# Copyright 2021 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for jobs.batch_jobs.suggestion_migration_jobs."""
from __future__ import annotations
from core import feconf
from core.domain import question_domain
from core.domain import question_fetchers
from core.domain import skill_services
from core.domain import suggestion_services
from core.domain import translation_domain
from core.jobs import job_test_utils
from core.jobs.batch_jobs import suggestion_migration_jobs
from core.jobs.types import job_run_result
from core.platform import models
from core.tests import test_utils
from typing import Dict, Union
from typing_extensions import Final
MYPY = False
if MYPY:
from mypy_imports import exp_models
from mypy_imports import suggestion_models
(exp_models, suggestion_models) = models.Registry.import_models([
models.Names.EXPLORATION, models.Names.SUGGESTION
])
class MigrateSuggestionJobTests(job_test_utils.JobTestBase):
JOB_CLASS = (
suggestion_migration_jobs
.RegenerateContentIdForTranslationSuggestionsInReviewJob
)
TARGET_ID = 'exp1'
def setUp(self) -> None:
super().setUp()
self.STATE_DICT_IN_V52 = {
'content': {'content_id': 'content', 'html': ''},
'param_changes': [],
'interaction': {
'solution': None,
'answer_groups': [],
'default_outcome': {
'param_changes': [],
'feedback': {
'content_id': 'default_outcome',
'html': 'Default outcome'
},
'dest': 'Introduction',
'dest_if_really_stuck': None,
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None,
'labelled_as_correct': False
},
'customization_args': {
'catchMisspellings': {
'value': False
},
'rows': {
'value': 1
},
'placeholder': {
'value': {
'unicode_str': '',
'content_id': 'ca_placeholder_1'
}
}
},
'confirmed_unclassified_answers': [],
'id': 'TextInput',
'hints': []
},
'linked_skill_id': None,
'recorded_voiceovers': {
'voiceovers_mapping': {
'content': {},
'default_outcome': {},
'ca_placeholder_1': {}
}
},
'written_translations': {
'translations_mapping': {
'content': {},
'default_outcome': {},
'ca_placeholder_1': {}
}
},
'classifier_model_id': None,
'card_is_checkpoint': False,
'solicit_answer_details': False,
'next_content_id_index': 2
}
self.exp_1 = self.create_model(
exp_models.ExplorationModel,
id=self.TARGET_ID,
title='title',
init_state_name=feconf.DEFAULT_INIT_STATE_NAME,
category=feconf.DEFAULT_EXPLORATION_CATEGORY,
objective=feconf.DEFAULT_EXPLORATION_OBJECTIVE,
language_code='en',
tags=['Topic'],
blurb='blurb',
author_notes='author notes',
states_schema_version=52,
param_specs={},
param_changes=[],
auto_tts_enabled=feconf.DEFAULT_AUTO_TTS_ENABLED,
correctness_feedback_enabled=False,
states={feconf.DEFAULT_INIT_STATE_NAME: self.STATE_DICT_IN_V52},
)
self.put_multi([self.exp_1])
def test_empty_storage(self) -> None:
self.assert_job_output_is_empty()
def test_unmigrated_suggestion_is_migrated(self) -> None:
change_dict = {
'cmd': 'add_translation',
'content_id': 'default_outcome',
'language_code': 'hi',
'content_html': 'Content',
'state_name': 'Introduction',
'translation_html': '<p>Translation for content.</p>'
}
suggestion_1_model = self.create_model(
suggestion_models.GeneralSuggestionModel,
suggestion_type=feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT,
author_id='user1',
change_cmd=change_dict,
score_category='irrelevant',
status=suggestion_models.STATUS_IN_REVIEW,
target_type='exploration',
target_id=self.TARGET_ID,
target_version_at_submission=0,
language_code='bn'
)
suggestion_1_model.update_timestamps()
suggestion_models.GeneralSuggestionModel.put_multi([
suggestion_1_model])
unmigrated_suggestion_model = (
suggestion_models.GeneralSuggestionModel.get(suggestion_1_model.id)
)
self.assertEqual(
unmigrated_suggestion_model.change_cmd['content_id'],
'default_outcome'
)
self.assert_job_output_is([
job_run_result.JobRunResult(
stdout='SUGGESTION TARGET PROCESSED SUCCESS: 1'
),
job_run_result.JobRunResult(
stdout='SUGGESTION MIGRATED SUCCESS: 1'
)
])
migrated_suggestion_model = (
suggestion_models.GeneralSuggestionModel.get(suggestion_1_model.id)
)
self.assertEqual(
migrated_suggestion_model.change_cmd['content_id'],
'default_outcome_1'
)
def test_unmigrated_invalid_suggestion_raises_error(self) -> None:
change_dict = {
'cmd': 'add_translation',
'content_id': 'default_outcome',
'language_code': 'hi',
'content_html': 'Content',
'state_name': 'invalid_state_name',
'translation_html': '<p>Translation for content.</p>'
}
suggestion_1_model = self.create_model(
suggestion_models.GeneralSuggestionModel,
id=16,
suggestion_type=feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT,
author_id='user1',
change_cmd=change_dict,
score_category='irrelevant',
status=suggestion_models.STATUS_IN_REVIEW,
target_type='exploration',
target_id=self.TARGET_ID,
target_version_at_submission=0,
language_code='bn'
)
suggestion_1_model.update_timestamps()
change_dict = {
'cmd': 'add_translation',
'content_id': 'invalid',
'language_code': 'hi',
'content_html': 'Content',
'state_name': 'Introduction',
'translation_html': '<p>Translation for content.</p>'
}
suggestion_2_model = self.create_model(
suggestion_models.GeneralSuggestionModel,
id=17,
suggestion_type=feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT,
author_id='user1',
change_cmd=change_dict,
score_category='irrelevant',
status=suggestion_models.STATUS_IN_REVIEW,
target_type='exploration',
target_id=self.TARGET_ID,
target_version_at_submission=0,
language_code='bn'
)
suggestion_2_model.update_timestamps()
change_dict = {
'cmd': 'add_translation',
'content_id': 'default_outcome',
'language_code': 'hi',
'content_html': 'Content',
'state_name': 'Introduction',
'translation_html': '<p>Translation for content.</p>'
}
suggestion_3_model = self.create_model(
suggestion_models.GeneralSuggestionModel,
suggestion_type=feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT,
author_id='user1',
change_cmd=change_dict,
score_category='irrelevant',
status=suggestion_models.STATUS_IN_REVIEW,
target_type='exploration',
target_id=self.TARGET_ID,
target_version_at_submission=0,
language_code='bn'
)
suggestion_3_model.update_timestamps()
suggestion_models.GeneralSuggestionModel.put_multi([
suggestion_1_model, suggestion_2_model, suggestion_3_model])
unmigrated_suggestion_model = (
suggestion_models.GeneralSuggestionModel.get(suggestion_1_model.id)
)
self.assertEqual(
unmigrated_suggestion_model.change_cmd['content_id'],
'default_outcome'
)
self.assert_job_output_is([
job_run_result.JobRunResult(
stdout='SUGGESTION TARGET PROCESSED SUCCESS: 1'),
job_run_result.JobRunResult(
stdout='SUGGESTION MIGRATED SUCCESS: 1'),
job_run_result.JobRunResult(
stderr=(
'SUGGESTION TARGET PROCESSED ERROR: \"(16, '
'\'State name invalid_state_name does not exist in the '
'exploration\')\": 1')
), job_run_result.JobRunResult(
stderr=(
'SUGGESTION TARGET PROCESSED ERROR: '
'\"(17, \'Content ID invalid does not exist in the '
'exploration\')\": 1')
),
])
def test_suggestion_with_invalid_content_id_raise_error(self) -> None:
change_dict = {
'cmd': 'add_translation',
'content_id': 'invalid_id',
'language_code': 'hi',
'content_html': 'Content',
'state_name': 'Introduction',
'translation_html': '<p>Translation for content.</p>'
}
suggestion_1_model = self.create_model(
suggestion_models.GeneralSuggestionModel,
id='111',
suggestion_type=feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT,
author_id='user1',
change_cmd=change_dict,
score_category='irrelevant',
status=suggestion_models.STATUS_IN_REVIEW,
target_type='exploration',
target_id=self.TARGET_ID,
target_version_at_submission=0,
language_code='bn'
)
suggestion_1_model.update_timestamps()
suggestion_models.GeneralSuggestionModel.put_multi([
suggestion_1_model])
self.assert_job_output_is([
job_run_result.JobRunResult(
stderr=(
'SUGGESTION TARGET PROCESSED ERROR: "(\'111\', '
'\'Content ID invalid_id does not exist in the exploration'
'\')": 1')),
])
unmigrated_suggestion_model = (
suggestion_models.GeneralSuggestionModel.get(suggestion_1_model.id)
)
self.assertEqual(
unmigrated_suggestion_model.change_cmd['content_id'], 'invalid_id')
class AuditMigrateSuggestionJobTests(job_test_utils.JobTestBase):
JOB_CLASS = (
suggestion_migration_jobs
.AuditRegenerateContentIdForTranslationSuggestionsInReviewJob
)
TARGET_ID = 'exp1'
def setUp(self) -> None:
super().setUp()
self.STATE_DICT_IN_V52 = {
'content': {'content_id': 'content', 'html': ''},
'param_changes': [],
'interaction': {
'solution': None,
'answer_groups': [],
'default_outcome': {
'param_changes': [],
'feedback': {
'content_id': 'default_outcome',
'html': 'Default outcome'
},
'dest': 'Introduction',
'dest_if_really_stuck': None,
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None,
'labelled_as_correct': False
},
'customization_args': {
'catchMisspellings': {
'value': False
},
'rows': {
'value': 1
},
'placeholder': {
'value': {
'unicode_str': '',
'content_id': 'ca_placeholder_1'
}
}
},
'confirmed_unclassified_answers': [],
'id': 'TextInput',
'hints': []
},
'linked_skill_id': None,
'recorded_voiceovers': {
'voiceovers_mapping': {
'content': {},
'default_outcome': {},
'ca_placeholder_1': {}
}
},
'written_translations': {
'translations_mapping': {
'content': {},
'default_outcome': {},
'ca_placeholder_1': {}
}
},
'classifier_model_id': None,
'card_is_checkpoint': False,
'solicit_answer_details': False,
'next_content_id_index': 2
}
self.exp_1 = self.create_model(
exp_models.ExplorationModel,
id=self.TARGET_ID,
title='title',
init_state_name=feconf.DEFAULT_INIT_STATE_NAME,
category=feconf.DEFAULT_EXPLORATION_CATEGORY,
objective=feconf.DEFAULT_EXPLORATION_OBJECTIVE,
language_code='en',
tags=['Topic'],
blurb='blurb',
author_notes='author notes',
states_schema_version=52,
param_specs={},
param_changes=[],
auto_tts_enabled=feconf.DEFAULT_AUTO_TTS_ENABLED,
correctness_feedback_enabled=False,
states={feconf.DEFAULT_INIT_STATE_NAME: self.STATE_DICT_IN_V52},
)
self.put_multi([self.exp_1])
def test_empty_storage(self) -> None:
self.assert_job_output_is_empty()
def test_unmigrated_suggestion_is_not_migrated(self) -> None:
change_dict = {
'cmd': 'add_translation',
'content_id': 'default_outcome',
'language_code': 'hi',
'content_html': 'Content',
'state_name': 'Introduction',
'translation_html': '<p>Translation for content.</p>'
}
suggestion_1_model = self.create_model(
suggestion_models.GeneralSuggestionModel,
suggestion_type=feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT,
author_id='user1',
change_cmd=change_dict,
score_category='irrelevant',
status=suggestion_models.STATUS_IN_REVIEW,
target_type='exploration',
target_id=self.TARGET_ID,
target_version_at_submission=0,
language_code='bn'
)
suggestion_1_model.update_timestamps()
suggestion_models.GeneralSuggestionModel.put_multi([
suggestion_1_model])
unmigrated_suggestion_model = (
suggestion_models.GeneralSuggestionModel.get(suggestion_1_model.id)
)
self.assertEqual(
unmigrated_suggestion_model.change_cmd['content_id'],
'default_outcome'
)
self.assert_job_output_is([
job_run_result.JobRunResult(
stdout='SUGGESTION TARGET PROCESSED SUCCESS: 1'
),
job_run_result.JobRunResult(
stdout='SUGGESTION MIGRATED SUCCESS: 1'
)
])
migrated_suggestion_model = (
suggestion_models.GeneralSuggestionModel.get(suggestion_1_model.id)
)
self.assertEqual(
migrated_suggestion_model.change_cmd['content_id'],
'default_outcome'
)
def test_suggestion_with_invalid_content_id_raise_error(self) -> None:
change_dict = {
'cmd': 'add_translation',
'content_id': 'invalid_id',
'language_code': 'hi',
'content_html': 'Content',
'state_name': 'Introduction',
'translation_html': '<p>Translation for content.</p>'
}
suggestion_1_model = self.create_model(
suggestion_models.GeneralSuggestionModel,
id=15,
suggestion_type=feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT,
author_id='user1',
change_cmd=change_dict,
score_category='irrelevant',
status=suggestion_models.STATUS_IN_REVIEW,
target_type='exploration',
target_id=self.TARGET_ID,
target_version_at_submission=0,
language_code='bn'
)
suggestion_1_model.update_timestamps()
suggestion_models.GeneralSuggestionModel.put_multi([
suggestion_1_model])
self.assert_job_output_is([
job_run_result.JobRunResult(
stderr=(
'SUGGESTION TARGET PROCESSED ERROR: "(15, '
'\'Content ID invalid_id does not exist in the exploration'
'\')": 1')),
])
unmigrated_suggestion_model = (
suggestion_models.GeneralSuggestionModel.get(suggestion_1_model.id)
)
self.assertEqual(
unmigrated_suggestion_model.change_cmd['content_id'], 'invalid_id')
class MigrateQuestionSuggestionsJobTests(
job_test_utils.JobTestBase, test_utils.GenericTestBase):
JOB_CLASS = suggestion_migration_jobs.MigrateQuestionSuggestionsJob
AUTHOR_EMAIL: Final = 'author@example.com'
def setUp(self) -> None:
super().setUp()
self.signup(self.AUTHOR_EMAIL, 'author')
self.author_id = self.get_user_id_from_email(self.AUTHOR_EMAIL)
def test_empty_storage(self) -> None:
self.assert_job_output_is_empty()
def test_migrated_question_is_not_migrated(self) -> None:
skill_id = skill_services.get_new_skill_id()
self.save_new_skill(
skill_id, self.author_id, description='description')
content_id_generator = translation_domain.ContentIdGenerator()
state = self._create_valid_question_data(
'default-state', content_id_generator)
suggestion_change: Dict[
str, Union[str, float, question_domain.QuestionDict]
] = {
'cmd': (
question_domain
.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION),
'question_dict': {
'id': 'test_id',
'version': 12,
'question_state_data': state.to_dict(),
'language_code': 'en',
'question_state_data_schema_version': (
feconf.CURRENT_STATE_SCHEMA_VERSION),
'linked_skill_ids': ['skill_1'],
'inapplicable_skill_misconception_ids': ['skillid12345-1'],
'next_content_id_index': (
content_id_generator.next_content_id_index)
},
'skill_id': skill_id,
'skill_difficulty': 0.3
}
suggestion_services.create_suggestion(
feconf.SUGGESTION_TYPE_ADD_QUESTION,
feconf.ENTITY_TYPE_SKILL, skill_id, 1,
self.author_id, suggestion_change, 'test description')
self.assert_job_output_is([
job_run_result.JobRunResult(
stdout='QUESTION MODELS COUNT SUCCESS: 1')
])
def test_unmigrated_question_suggestion_is_migrated(self) -> None:
skill_id = skill_services.get_new_skill_id()
self.save_new_skill(
skill_id, self.author_id, description='description')
suggestion_id = (
self.save_new_question_suggestion_with_state_data_schema_v27(
self.author_id, skill_id
)
)
suggestion = suggestion_models.GeneralSuggestionModel.get_by_id(
suggestion_id)
self.assertEqual(
suggestion.change_cmd['question_dict'][
'question_state_data_schema_version'],
27
)
self.assert_job_output_is([
job_run_result.JobRunResult(
stdout='QUESTION MODELS COUNT SUCCESS: 1'),
job_run_result.JobRunResult(
stdout='SUGGESTION MIGRATED SUCCESS: 1')
])
suggestion = suggestion_models.GeneralSuggestionModel.get_by_id(
suggestion_id)
self.assertEqual(
suggestion.change_cmd['question_dict'][
'question_state_data_schema_version'],
feconf.CURRENT_STATE_SCHEMA_VERSION
)
def test_migration_errors_are_reported_in_job_result(self) -> None:
skill_id = skill_services.get_new_skill_id()
self.save_new_skill(
skill_id, self.author_id, description='description')
suggestion_id = (
self.save_new_question_suggestion_with_state_data_schema_v27(
self.author_id, skill_id)
)
migrate_state_schema_raise = self.swap_to_always_raise(
question_fetchers, 'migrate_state_schema')
with migrate_state_schema_raise:
self.assert_job_output_is([
job_run_result.JobRunResult(
stderr=(
'SUGGESTION MIGRATED ERROR: "(\'%s\', '
'Exception())": 1' % suggestion_id)
),
job_run_result.JobRunResult(
stdout='QUESTION MODELS COUNT SUCCESS: 1'),
])
class AuditMigrateQuestionSuggestionsJobTests(
job_test_utils.JobTestBase, test_utils.GenericTestBase):
JOB_CLASS = suggestion_migration_jobs.AuditMigrateQuestionSuggestionsJob
AUTHOR_EMAIL: Final = 'author@example.com'
def setUp(self) -> None:
super().setUp()
self.signup(self.AUTHOR_EMAIL, 'author')
self.author_id = self.get_user_id_from_email(self.AUTHOR_EMAIL)
def test_empty_storage(self) -> None:
self.assert_job_output_is_empty()
def test_unmigrated_question_suggestion_is_not_migrated(self) -> None:
skill_id = skill_services.get_new_skill_id()
self.save_new_skill(
skill_id, self.author_id, description='description')
suggestion_id = (
self.save_new_question_suggestion_with_state_data_schema_v27(
self.author_id, skill_id
)
)
suggestion = suggestion_models.GeneralSuggestionModel.get_by_id(
suggestion_id)
self.assertEqual(
suggestion.change_cmd['question_dict'][
'question_state_data_schema_version'],
27
)
self.assert_job_output_is([
job_run_result.JobRunResult(
stdout='QUESTION MODELS COUNT SUCCESS: 1'),
job_run_result.JobRunResult(
stdout='SUGGESTION MIGRATED SUCCESS: 1')
])
suggestion = suggestion_models.GeneralSuggestionModel.get_by_id(
suggestion_id)
self.assertEqual(
suggestion.change_cmd['question_dict'][
'question_state_data_schema_version'],
27
)
def test_audit_errors_are_reported_in_job_result(self) -> None:
skill_id = skill_services.get_new_skill_id()
self.save_new_skill(
skill_id, self.author_id, description='description')
suggestion_id = (
self.save_new_question_suggestion_with_state_data_schema_v27(
self.author_id, skill_id)
)
migrate_state_schema_raise = self.swap_to_always_raise(
question_fetchers, 'migrate_state_schema')
with migrate_state_schema_raise:
self.assert_job_output_is([
job_run_result.JobRunResult(
stderr=(
'SUGGESTION MIGRATED ERROR: "(\'%s\', '
'Exception())": 1' % suggestion_id)
),
job_run_result.JobRunResult(
stdout='QUESTION MODELS COUNT SUCCESS: 1'),
])
|
ceebd985edc20c82801059d7772fa4ae765e7098
|
bb021c074c95c4fb684cd543b288bc0b976df188
|
/intake/interface/__init__.py
|
580f0e1869128fab90306f694f44f6bf9431ab70
|
[
"BSD-2-Clause"
] |
permissive
|
intake/intake
|
6c96d4bf32f125fbd5df322377ae2a98ac76be99
|
81b1567a2030adfb22b856b4f63cefe35de68983
|
refs/heads/master
| 2023-08-25T14:07:08.855001
| 2023-08-24T19:49:13
| 2023-08-24T19:49:13
| 100,307,970
| 774
| 116
|
BSD-2-Clause
| 2023-09-11T13:51:16
| 2017-08-14T20:44:22
|
Python
|
UTF-8
|
Python
| false
| false
| 1,248
|
py
|
__init__.py
|
# -----------------------------------------------------------------------------
# Copyright (c) 2012 - 2018, Anaconda, Inc. and Intake contributors
# All rights reserved.
#
# The full license is in the LICENSE file, distributed with this software.
# -----------------------------------------------------------------------------
from packaging.version import Version
gl = globals()
def do_import():
try:
import hvplot
import panel as pn
error = Version(pn.__version__) < Version("1") or Version(hvplot.__version__) < Version("0.8.1")
except ImportError:
error = True
if error:
raise RuntimeError("Please install panel and hvplot to use the GUI\n" "`conda install -c conda-forge 'panel>=1' 'hvplot>=0.8.1'`")
from .gui import GUI
css = """
.scrolling {
overflow: scroll;
}
"""
pn.config.raw_css.append(css) # add scrolling class from css (panel GH#383, GH#384)
ex = pn.extension("codeeditor", template="fast")
gl["instance"] = GUI()
return ex
def __getattr__(attr):
if attr in {"instance", "gui"}:
do_import()
return gl[attr]
def output_notebook(*_, **__):
"""
Load the notebook extension
"""
return do_import()
|
3e87f8e4f1d8c4f11f02bf68ac768d36e15c9408
|
8287c1677795d23856edefecaf5e878f348e99ba
|
/biostar/planet/models.py
|
e19a8e4ab14db84ce2c3581b5fb67d6a648e2c9f
|
[
"MIT"
] |
permissive
|
ialbert/biostar-central
|
ba325593d6b3a9e2b1ebaddb6257b863b22eface
|
a051511350871dcd82bdf0b88ce5cda9fd9ef141
|
refs/heads/master
| 2023-08-30T06:07:44.892831
| 2023-07-24T15:43:02
| 2023-07-24T15:43:02
| 1,511,294
| 535
| 271
|
MIT
| 2023-02-15T18:49:10
| 2011-03-22T13:09:30
|
Python
|
UTF-8
|
Python
| false
| false
| 3,185
|
py
|
models.py
|
from django.db import models
from django.conf import settings
import os, logging, datetime
from urllib import request
import feedparser
from django.utils.timezone import utc
from biostar.accounts.models import User
import uuid
logger = logging.getLogger("engine")
MAX_TEXT_LEN = 10000
def now():
return datetime.datetime.utcnow().replace(tzinfo=utc)
def abspath(*args):
"""
Generates absolute paths
"""
return os.path.abspath(os.path.join(*args))
def get_uuid(limit=32):
return str(uuid.uuid4())[:limit]
class Blog(models.Model):
"""
Represents a blog
"""
title = models.CharField(max_length=255, default="")
desc = models.TextField(default='', blank=True)
feed = models.URLField()
link = models.URLField()
active = models.BooleanField(default=True)
list_order = models.IntegerField(default=0)
# Adding field that indicates a remote blog
remote = models.BooleanField(default=True)
@property
def fname(self):
fname = abspath(settings.PLANET_DIR, f"{self.id}.xml")
return fname
def parse(self):
try:
doc = feedparser.parse(self.feed)
except Exception as exc:
logger.error(f"Error parsing feed. {exc}")
doc = None
return doc
def download(self):
try:
stream = request.urlopen(self.feed)
text = stream.read().decode("utf-8", errors="replace")
stream = open(self.fname, 'w', encoding='utf-8')
stream.write(text)
stream.close()
except Exception as exc:
logger.error(f"Error downloading {exc}")
def __str__(self):
return self.title
class BlogPost(models.Model):
"Represents an entry of a Blog"
# The blog that generated the entry
blog = models.ForeignKey(Blog, on_delete=models.CASCADE)
# A unique id for this entry
uid = models.CharField(max_length=200, unique=True)
# The title of the entry
title = models.CharField(max_length=200, null=False)
# The content of the feed
content = models.TextField(default='', max_length=20000)
# Sanitized HTML
html = models.TextField(default='')
# Date related fields.
creation_date = models.DateTimeField(db_index=True)
# Date at which the post has been inserted into the database
insert_date = models.DateTimeField(db_index=True, null=True)
# Has the entry been published
published = models.BooleanField(default=False)
# The link to the entry
link = models.URLField()
# Posts should be ranked by this.
rank = models.DateTimeField(db_index=True, null=True)
@property
def get_title(self):
return f"BLOG: {self.title}"
def get_absolute_url(self):
return self.link
def save(self, *args, **kwargs):
self.insert_date = self.insert_date or now()
# Set the rank
self.rank = self.rank or self.insert_date
#self.html = self.hmtl or
# SET THE HTML
#self.html = ''
self.uid = self.uid or get_uuid(10)
super(BlogPost, self).save(*args, **kwargs)
def __str__(self):
return self.title
|
d6be033778895e8fbffedd790476c08604205861
|
262af3a61864ba2aec01247075162a886551a439
|
/thingsboard_gateway/connectors/mqtt/json_mqtt_uplink_converter.py
|
212713b8d95369071e64c5c47f26b8d04152e1f5
|
[
"Apache-2.0"
] |
permissive
|
thingsboard/thingsboard-gateway
|
1331e58013afd0872ce28120229237a886af4d0f
|
0e3996bb29c60a784d990fbcdf0fcbb69a4a82ae
|
refs/heads/master
| 2023-08-31T07:30:03.340388
| 2023-08-25T05:01:08
| 2023-08-25T05:01:08
| 78,083,065
| 1,484
| 836
|
Apache-2.0
| 2023-09-12T07:50:51
| 2017-01-05T05:41:03
|
Python
|
UTF-8
|
Python
| false
| false
| 7,401
|
py
|
json_mqtt_uplink_converter.py
|
# Copyright 2022. ThingsBoard
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from re import search
from simplejson import dumps
from thingsboard_gateway.gateway.constants import SEND_ON_CHANGE_PARAMETER
from thingsboard_gateway.connectors.mqtt.mqtt_uplink_converter import MqttUplinkConverter, log
from thingsboard_gateway.tb_utility.tb_utility import TBUtility
from thingsboard_gateway.gateway.statistics_service import StatisticsService
class JsonMqttUplinkConverter(MqttUplinkConverter):
def __init__(self, config):
self.__config = config.get('converter')
self.__send_data_on_change = self.__config.get(SEND_ON_CHANGE_PARAMETER)
@property
def config(self):
return self.__config
@config.setter
def config(self, value):
self.__config = value
@StatisticsService.CollectStatistics(start_stat_type='receivedBytesFromDevices',
end_stat_type='convertedBytesFromDevice')
def convert(self, topic, data):
if isinstance(data, list):
converted_data = []
for item in data:
converted_data.append(self._convert_single_item(topic, item))
return converted_data
else:
return self._convert_single_item(topic, data)
def _convert_single_item(self, topic, data):
datatypes = {"attributes": "attributes",
"timeseries": "telemetry"}
dict_result = {
"deviceName": self.parse_device_name(topic, data, self.__config),
"deviceType": self.parse_device_type(topic, data, self.__config),
"attributes": [],
"telemetry": []
}
if isinstance(self.__send_data_on_change, bool):
dict_result[SEND_ON_CHANGE_PARAMETER] = self.__send_data_on_change
try:
for datatype in datatypes:
timestamp = data.get("ts", data.get("timestamp")) if datatype == 'timeseries' else None
dict_result[datatypes[datatype]] = []
for datatype_config in self.__config.get(datatype, []):
if isinstance(datatype_config, str) and datatype_config == "*":
for item in data:
dict_result[datatypes[datatype]].append(
self.create_timeseries_record(item, data[item], timestamp))
else:
values = TBUtility.get_values(datatype_config["value"], data, datatype_config["type"],
expression_instead_none=False)
values_tags = TBUtility.get_values(datatype_config["value"], data, datatype_config["type"],
get_tag=True)
keys = TBUtility.get_values(datatype_config["key"], data, datatype_config["type"],
expression_instead_none=False)
keys_tags = TBUtility.get_values(datatype_config["key"], data, get_tag=True)
full_key = datatype_config["key"]
for (key, key_tag) in zip(keys, keys_tags):
is_valid_key = "${" in datatype_config["key"] and "}" in \
datatype_config["key"]
full_key = full_key.replace('${' + str(key_tag) + '}',
str(key)) if is_valid_key else key_tag
full_value = datatype_config["value"]
for (value, value_tag) in zip(values, values_tags):
is_valid_value = "${" in datatype_config["value"] and "}" in \
datatype_config["value"]
full_value = full_value.replace('${' + str(value_tag) + '}',
str(value)) if is_valid_value else value
if full_key != 'None' and full_value != 'None':
dict_result[datatypes[datatype]].append(
self.create_timeseries_record(full_key, full_value, timestamp))
except Exception as e:
log.error('Error in converter, for config: \n%s\n and message: \n%s\n', dumps(self.__config), str(data))
log.exception(e)
return dict_result
@staticmethod
def create_timeseries_record(key, value, timestamp):
value_item = {key: value}
return {"ts": timestamp, 'values': value_item} if timestamp else value_item
@staticmethod
def parse_device_name(topic, data, config):
return JsonMqttUplinkConverter.parse_device_info(
topic, data, config, "deviceNameJsonExpression", "deviceNameTopicExpression")
@staticmethod
def parse_device_type(topic, data, config):
return JsonMqttUplinkConverter.parse_device_info(
topic, data, config, "deviceTypeJsonExpression", "deviceTypeTopicExpression")
@staticmethod
def parse_device_info(topic, data, config, json_expression_config_name, topic_expression_config_name):
result = None
try:
if config.get(json_expression_config_name) is not None:
expression = config.get(json_expression_config_name)
result_tags = TBUtility.get_values(expression, data, get_tag=True)
result_values = TBUtility.get_values(expression, data, expression_instead_none=True)
result = expression
for (result_tag, result_value) in zip(result_tags, result_values):
is_valid_key = "${" in expression and "}" in expression
result = result.replace('${' + str(result_tag) + '}',
str(result_value)) if is_valid_key else result_tag
elif config.get(topic_expression_config_name) is not None:
expression = config.get(topic_expression_config_name)
search_result = search(expression, topic)
if search_result is not None:
result = search_result.group(0)
else:
log.debug(
"Regular expression result is None. deviceNameTopicExpression parameter will be interpreted "
"as a deviceName\n Topic: %s\nRegex: %s", topic, expression)
result = expression
else:
log.error("The expression for looking \"deviceName\" not found in config %s", dumps(config))
except Exception as e:
log.error('Error in converter, for config: \n%s\n and message: \n%s\n', dumps(config), data)
log.exception(e)
return result
|
58d821e76e46eadc201e036fd5276f5ee371b56f
|
39241620c6271758ea97d92fa4cf9f45aa7d2510
|
/src/python/aim/_ext/tracking/__init__.py
|
dd8d4a2764c86c351f220c7fb99bb6ac78302244
|
[
"Apache-2.0"
] |
permissive
|
aimhubio/aim
|
4618e5ebdaf69d216cbe61c6187b005d443839a9
|
34e5c2c29abe9b26699760074adcadfe8fd4cfe0
|
refs/heads/main
| 2023-09-01T04:23:22.276343
| 2023-09-01T03:47:13
| 2023-09-01T03:47:13
| 189,640,071
| 4,091
| 276
|
Apache-2.0
| 2023-09-14T15:27:03
| 2019-05-31T18:25:07
|
Python
|
UTF-8
|
Python
| false
| false
| 4,755
|
py
|
__init__.py
|
import logging
import os
import json
import sys
import uuid
import contextlib
import segment.analytics as sa # noqa
logger = logging.getLogger(__name__)
aim_profile_path = os.path.expanduser('~/.aim_profile')
class Analytics:
SEGMENT_WRITE_KEY = 'RrVqLHHD6WDXoFBkodO9KidodTtU92XO'
def __init__(self):
self.dev_mode = False
self.initialized = False
if os.path.exists(aim_profile_path):
with open(aim_profile_path, 'r') as fh:
try:
self._profile = json.load(fh)
except json.JSONDecodeError:
logger.error('Corrupted .aim_profile. Replacing with default.')
with self._autocommit():
self._profile = self.default_profile()
else:
with self._autocommit():
self._profile = self.default_profile()
self._user_id = self._profile['user-id']
def track_install_event(self) -> None:
if not self.dev_mode and self.telemetry_enabled:
env_key = sys.exec_prefix
if env_key in self._profile['envs']:
is_new_env = False
from aim.__version__ import __version__ as aim_version
if aim_version == self._profile['envs'][env_key]:
return
else:
is_new_env = True
from aim.__version__ import __version__ as aim_version
event_name = '[Aim] install' if is_new_env else '[Aim] upgrade'
self.track_event(event_name=event_name, aim_version=aim_version)
with self._autocommit():
self._profile['envs'][env_key] = aim_version
def track_event(self, *, event_name: str, **kwargs) -> None:
if not self.dev_mode and self.telemetry_enabled:
try:
self.initialize()
self._warn_once()
# sa.track(self._user_id, event=event_name, properties=kwargs)
except Exception as e: # noqa
logger.debug(f'Failed to track event {event_name}. Reason: {e}.')
def initialize(self) -> None:
if self.initialized:
return
segment_logger = logging.getLogger('segment')
segment_logger.disabled = True
# sa.write_key = Analytics.SEGMENT_WRITE_KEY
# sa.timeout = 2 # set send request timeout to 2 seconds
# sa.max_retries = 2 # set maximum send request retries to 2
#
# sa.identify(user_id=self._user_id)
self.initialized = True
@property
def telemetry_enabled(self) -> bool:
return self._profile['telemetry']['enable']
@telemetry_enabled.setter
def telemetry_enabled(self, enable: bool):
if enable != self.telemetry_enabled:
with self._autocommit():
self._profile['telemetry']['enable'] = enable
@property
def warning_shown(self) -> bool:
return self._profile['telemetry']['warning-shown']
@contextlib.contextmanager
def _autocommit(self):
yield
with open(aim_profile_path, 'w+') as fh:
json.dump(self._profile, fh, indent=2)
def _warn_once(self):
assert self.telemetry_enabled
if not self.warning_shown:
alert_msg = 'Aim collects anonymous usage analytics.'
opt_out_msg = 'Read how to opt-out here: '
opt_out_url = 'https://aimstack.readthedocs.io/en/latest/community/telemetry.html'
line_width = max(len(opt_out_msg), len(alert_msg), len(opt_out_url)) + 8
logger.warning('-' * line_width)
logger.warning('{}{}{}'.format(' ' * ((line_width - len(alert_msg)) // 2),
alert_msg,
' ' * ((line_width - len(alert_msg)) // 2)))
logger.warning('{}{}{}'.format(' ' * ((line_width - len(opt_out_msg)) // 2),
opt_out_msg,
' ' * ((line_width - len(opt_out_msg)) // 2)))
logger.warning('{}{}{}'.format(' ' * ((line_width - len(opt_out_url)) // 2),
opt_out_url,
' ' * ((line_width - len(opt_out_url)) // 2)))
logger.warning('-' * line_width)
with self._autocommit():
self._profile['telemetry']['warning-shown'] = True
@staticmethod
def default_profile():
return {
'envs': {},
'telemetry': {
'enable': True,
'warning-shown': False
},
'user-id': str(uuid.uuid4())
}
analytics = Analytics()
|
6589a208dabea124ee6b867592ef987fba29a147
|
62179a165ec620ba967dbc20016e890978fbff50
|
/tests/onnx/test_layer_attributes.py
|
685507c7a18993aeba13d363d21620ac351261cf
|
[
"Apache-2.0"
] |
permissive
|
openvinotoolkit/nncf
|
91fcf153a96f85da166aacb7a70ca4941e4ba4a4
|
c027c8b43c4865d46b8de01d8350dd338ec5a874
|
refs/heads/develop
| 2023-08-24T11:25:05.704499
| 2023-08-23T14:44:05
| 2023-08-23T14:44:05
| 263,687,600
| 558
| 157
|
Apache-2.0
| 2023-09-14T17:06:41
| 2020-05-13T16:41:05
|
Python
|
UTF-8
|
Python
| false
| false
| 4,363
|
py
|
test_layer_attributes.py
|
# Copyright (c) 2023 Intel Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import onnx
import pytest
from nncf.onnx.graph.metatypes.onnx_metatypes import GENERAL_WEIGHT_LAYER_METATYPES
from nncf.onnx.graph.nncf_graph_builder import GraphConverter
from nncf.onnx.graph.nncf_graph_builder import ONNXLayerAttributes
from tests.onnx.models import OPSET_VERSION
from tests.onnx.models import create_initializer_tensor
class ONNXNodeCreator:
def __init__(self):
self._initializers = []
self._node = None
@property
def initializers(self):
return self._initializers
@property
def node(self):
return self._node
class ONNXConvCreator(ONNXNodeCreator):
def __init__(self, node_name, input_name, output_name, input_shape):
super().__init__()
in_ch = input_shape[0]
conv1_in_channels, conv1_out_channels, conv1_kernel_shape = in_ch, in_ch, (1, 1)
conv1_W = np.ones(shape=(conv1_out_channels, conv1_in_channels, *conv1_kernel_shape))
conv1_B = np.ones(shape=conv1_out_channels)
conv1_W_initializer_tensor_name = "Conv1_W"
conv1_W_initializer_tensor = create_initializer_tensor(
name=conv1_W_initializer_tensor_name, tensor_array=conv1_W, data_type=onnx.TensorProto.FLOAT
)
conv1_B_initializer_tensor_name = "Conv1_B"
conv1_B_initializer_tensor = create_initializer_tensor(
name=conv1_B_initializer_tensor_name, tensor_array=conv1_B, data_type=onnx.TensorProto.FLOAT
)
self._initializers = [conv1_W_initializer_tensor, conv1_B_initializer_tensor]
self._node = onnx.helper.make_node(
name=node_name,
op_type="Conv",
inputs=[input_name, conv1_W_initializer_tensor_name, conv1_B_initializer_tensor_name],
outputs=[output_name],
kernel_shape=conv1_kernel_shape,
)
class ONNXIdentityCreator(ONNXNodeCreator):
def __init__(self, node_name, input_name, output_name, input_shape):
super().__init__()
self._node = onnx.helper.make_node(
name=node_name, op_type="Identity", inputs=[input_name], outputs=[output_name]
)
def get_one_layer_model(op_name: str, node_creator: ONNXNodeCreator, input_shape):
model_input_name = "X"
model_output_name = "Y"
X = onnx.helper.make_tensor_value_info(model_input_name, onnx.TensorProto.FLOAT, input_shape)
Y = onnx.helper.make_tensor_value_info(model_output_name, onnx.TensorProto.FLOAT, input_shape)
node_desc = node_creator(op_name, model_input_name, model_output_name, input_shape)
graph_def = onnx.helper.make_graph(
nodes=[node_desc.node],
name="ConvNet",
inputs=[X],
outputs=[Y],
initializer=node_desc.initializers,
)
op = onnx.OperatorSetIdProto()
op.version = OPSET_VERSION
model = onnx.helper.make_model(graph_def, opset_imports=[op])
onnx.checker.check_model(model)
return model
@pytest.mark.parametrize(
"node_creator, ref_layer_attrs",
[
(ONNXIdentityCreator, None),
(
ONNXConvCreator,
ONNXLayerAttributes(
weight_attrs={1: {"name": "Conv1_W", "shape": [3, 3, 1, 1]}}, bias_attrs={"name": "Conv1_B"}
),
),
],
)
def test_layer_attributes(node_creator, ref_layer_attrs):
input_shape = [3, 3, 3]
op_name = "test_node"
onnx_model = get_one_layer_model(op_name, node_creator, input_shape)
nncf_graph = GraphConverter.create_nncf_graph(onnx_model)
node = nncf_graph.get_node_by_name(op_name)
if node.metatype in GENERAL_WEIGHT_LAYER_METATYPES:
assert node.layer_attributes.__dict__ == ref_layer_attrs.__dict__
else:
assert node.layer_attributes.__dict__ == ONNXLayerAttributes().__dict__
|
72c05a6eb2ae8db90698f88172885cd32ccdbd9f
|
83e7dc1281874779c46dfadcc15b2bb66d8e599c
|
/SConscript
|
d457d9a47de43c0b93aab4704490660219dbb580
|
[
"MIT"
] |
permissive
|
lvgl/lvgl
|
7d51d6774d6ac71df7101fc7ded56fea4b70be01
|
5c984b4a5364b6455966eb3a860153806c51626f
|
refs/heads/master
| 2023-08-30T22:39:20.283922
| 2023-08-30T19:55:29
| 2023-08-30T19:55:29
| 60,667,730
| 9,296
| 2,218
|
MIT
| 2023-09-14T17:59:34
| 2016-06-08T04:14:34
|
C
|
UTF-8
|
Python
| false
| false
| 192
|
SConscript
|
# RT-Thread building script for bridge
import os
from building import *
objs = []
cwd = GetCurrentDir()
objs = objs + SConscript(cwd + '/env_support/rt-thread/SConscript')
Return('objs')
|
|
4eea4c6388a527f26ef72c89dfbd6414c6f33802
|
e459a9608225b81bdb0a5b85cd19b7bd0f6df38e
|
/function_scheduling_distributed_framework/consumers/redis_consumer.py
|
a81e983cb79532155278eafa9ad44a3e06a95438
|
[
"Apache-2.0"
] |
permissive
|
ydf0509/distributed_framework
|
722be4957df97bfece9ca5b43d81b4e3bb09ed8e
|
1b1f32ed928fa44e0fb13fc738de90cb4339f408
|
refs/heads/master
| 2022-05-20T10:19:05.727086
| 2022-03-31T11:16:22
| 2022-03-31T11:16:22
| 201,225,545
| 359
| 90
|
Apache-2.0
| 2021-05-08T16:50:53
| 2019-08-08T09:30:13
|
Python
|
UTF-8
|
Python
| false
| false
| 2,592
|
py
|
redis_consumer.py
|
# -*- coding: utf-8 -*-
# @Author : ydf
# @Time : 2019/8/8 0008 13:32
import json
# import time
from function_scheduling_distributed_framework.consumers.base_consumer import AbstractConsumer
from function_scheduling_distributed_framework.utils import RedisMixin
class RedisConsumer(AbstractConsumer, RedisMixin):
"""
redis作为中间件实现的,使用redis list 结构实现的。
这个如果消费脚本在运行时候随意反复重启或者非正常关闭或者消费宕机,会丢失大批任务。高可靠需要用rabbitmq或者redis_ack_able或者redis_stream的中间件方式。
"""
BROKER_KIND = 2
# noinspection DuplicatedCode
def _shedual_task000(self):
while True:
result = self.redis_db_frame.blpop(self._queue_name, timeout=60)
if result:
# self.logger.debug(f'从redis的 [{self._queue_name}] 队列中 取出的消息是: {result[1].decode()} ')
self._print_message_get_from_broker('reids', result[1].decode())
task_dict = json.loads(result[1])
kw = {'body': task_dict}
self._submit_task(kw)
# noinspection DuplicatedCode
def _shedual_task(self):
while True:
with self.redis_db_frame_version3.pipeline() as p:
get_msg_batch_size = 100
p.lrange(self._queue_name, 0, get_msg_batch_size - 1)
p.ltrim(self._queue_name, get_msg_batch_size, -1)
task_str_list = p.execute()[0]
if task_str_list:
# self.logger.debug(f'从redis的 [{self._queue_name}] 队列中 取出的消息是: {task_str_list} ')
self._print_message_get_from_broker('redis', task_str_list)
for task_str in task_str_list:
kw = {'body': json.loads(task_str)}
self._submit_task(kw)
else:
result = self.redis_db_frame.brpop(self._queue_name, timeout=60)
if result:
# self.logger.debug(f'从redis的 [{self._queue_name}] 队列中 取出的消息是: {result[1].decode()} ')
self._print_message_get_from_broker('redis', result[1].decode())
task_dict = json.loads(result[1])
kw = {'body': task_dict}
self._submit_task(kw)
def _confirm_consume(self, kw):
pass # redis没有确认消费的功能。
def _requeue(self, kw):
self.redis_db_frame.rpush(self._queue_name, json.dumps(kw['body']))
|
3d1b00bb907d030f7add1c9666b414251d752f7c
|
4e1dd6791505a154d92d6352e619c254e12ed574
|
/wagtail_localize/migrations/0003_delete_translation_sources.py
|
bc4009a76fa0a7ed08f93824ad0a4a22e822af9a
|
[
"BSD-3-Clause"
] |
permissive
|
wagtail/wagtail-localize
|
fba4b9db6c1b043a96d59178dede31dc117f9674
|
3f060bd061249815a8fbc79dc1c6e3e954ee3ecb
|
refs/heads/main
| 2023-08-04T14:32:06.683373
| 2023-07-27T15:02:52
| 2023-07-27T15:02:52
| 179,706,908
| 183
| 72
|
NOASSERTION
| 2023-09-07T08:33:48
| 2019-04-05T15:21:14
|
Python
|
UTF-8
|
Python
| false
| false
| 1,028
|
py
|
0003_delete_translation_sources.py
|
# Generated by Django 3.0.8 on 2020-08-05 09:04
from django.db import migrations
from django.db.models import OuterRef, Subquery
def delete_translation_sources(apps, schema_editor):
# Delete all but the latest source for each object
TranslatableObject = apps.get_model("wagtail_localize.TranslatableObject")
TranslationSource = apps.get_model("wagtail_localize.TranslationSource")
lastest_sources = (
TranslationSource.objects.filter(object_id=OuterRef("translation_key"))
.order_by("-created_at")
.values("id")[:1]
)
sources_to_keep = TranslatableObject.objects.annotate(
latest_source_id=Subquery(lastest_sources)
).values_list("latest_source_id", flat=True)
TranslationSource.objects.exclude(id__in=sources_to_keep).delete()
class Migration(migrations.Migration):
dependencies = [
("wagtail_localize", "0002_translation"),
]
operations = [
migrations.RunPython(delete_translation_sources, migrations.RunPython.noop),
]
|
0a8c555a7402c114e42ac6cbd8a415c76a9805d5
|
6fd2ab69501d71844a7329f62a3e62718fe9a9dd
|
/faiss/python/class_wrappers.py
|
3beb66141c946ed1bdaf2549040abafab7869818
|
[
"MIT"
] |
permissive
|
facebookresearch/faiss
|
f6a7b9df838309e8a231653df2dc764ef43d355e
|
9dc75d026d25b340771a7ef5d99b0f81a0dc5e34
|
refs/heads/main
| 2023-09-02T15:12:01.311542
| 2023-09-01T14:06:14
| 2023-09-01T14:06:14
| 81,227,005
| 24,723
| 3,437
|
MIT
| 2023-09-14T20:41:39
| 2017-02-07T16:07:05
|
C++
|
UTF-8
|
Python
| false
| false
| 39,537
|
py
|
class_wrappers.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import inspect
import faiss
import numpy as np
from faiss.loader import (
DirectMap,
IDSelector,
IDSelectorArray,
IDSelectorBatch,
OperatingPoints,
RangeSearchResult,
rev_swig_ptr,
swig_ptr,
try_extract_index_ivf,
)
##################################################################
# The functions below add or replace some methods for classes
# this is to be able to pass in numpy arrays directly
# The C++ version of the classnames will be suffixed with _c
#
# The docstrings in the wrappers are intended to be similar to numpy
# comments, they will appear with help(Class.method) or ?Class.method
# For methods that are not replaced, the C++ documentation will be used if
# swig 4.x is run with -doxygen.
##################################################################
# For most arrays we force the convesion to the target type with
# np.ascontiguousarray, but for uint8 codes, we raise a type error
# because it is unclear how the conversion should occur: with a view
# (= cast) or conversion?
def _check_dtype_uint8(codes):
if codes.dtype != 'uint8':
raise TypeError("Input argument %s must be ndarray of dtype "
" uint8, but found %s" % ("codes", codes.dtype))
return np.ascontiguousarray(codes)
def replace_method(the_class, name, replacement, ignore_missing=False):
""" Replaces a method in a class with another version. The old method
is renamed to method_name_c (because presumably it was implemented in C) """
try:
orig_method = getattr(the_class, name)
except AttributeError:
if ignore_missing:
return
raise
if orig_method.__name__ == 'replacement_' + name:
# replacement was done in parent class
return
setattr(the_class, name + '_c', orig_method)
setattr(the_class, name, replacement)
def handle_Clustering(the_class):
def replacement_train(self, x, index, weights=None):
"""Perform clustering on a set of vectors. The index is used for assignment.
Parameters
----------
x : array_like
Training vectors, shape (n, self.d). `dtype` must be float32.
index : faiss.Index
Index used for assignment. The dimension of the index should be `self.d`.
weights : array_like, optional
Per training sample weight (size n) used when computing the weighted
average to obtain the centroid (default is 1 for all training vectors).
"""
n, d = x.shape
x = np.ascontiguousarray(x, dtype='float32')
assert d == self.d
if weights is not None:
weights = np.ascontiguousarray(weights, dtype='float32')
assert weights.shape == (n, )
self.train_c(n, swig_ptr(x), index, swig_ptr(weights))
else:
self.train_c(n, swig_ptr(x), index)
def replacement_train_encoded(self, x, codec, index, weights=None):
""" Perform clustering on a set of compressed vectors. The index is used for assignment.
The decompression is performed on-the-fly.
Parameters
----------
x : array_like
Training vectors, shape (n, codec.code_size()). `dtype` must be `uint8`.
codec : faiss.Index
Index used to decode the vectors. Should have dimension `self.d`.
index : faiss.Index
Index used for assignment. The dimension of the index should be `self.d`.
weigths : array_like, optional
Per training sample weight (size n) used when computing the weighted
average to obtain the centroid (default is 1 for all training vectors).
"""
n, d = x.shape
x = _check_dtype_uint8(x)
assert d == codec.sa_code_size()
assert codec.d == index.d
if weights is not None:
weights = np.ascontiguousarray(weights, dtype='float32')
assert weights.shape == (n, )
self.train_encoded_c(n, swig_ptr(x), codec,
index, swig_ptr(weights))
else:
self.train_encoded_c(n, swig_ptr(x), codec, index)
replace_method(the_class, 'train', replacement_train)
replace_method(the_class, 'train_encoded', replacement_train_encoded)
def handle_Clustering1D(the_class):
def replacement_train_exact(self, x):
"""Perform clustering on a set of 1D vectors.
Parameters
----------
x : array_like
Training vectors, shape (n, 1). `dtype` must be float32.
"""
n, d = x.shape
x = np.ascontiguousarray(x, dtype='float32')
assert d == self.d
self.train_exact_c(n, swig_ptr(x))
replace_method(the_class, 'train_exact', replacement_train_exact)
def handle_Quantizer(the_class):
def replacement_train(self, x):
""" Train the quantizer on a set of training vectors.
Parameters
----------
x : array_like
Training vectors, shape (n, self.d). `dtype` must be float32.
"""
n, d = x.shape
x = np.ascontiguousarray(x, dtype='float32')
assert d == self.d
self.train_c(n, swig_ptr(x))
def replacement_compute_codes(self, x):
""" Compute the codes corresponding to a set of vectors.
Parameters
----------
x : array_like
Vectors to encode, shape (n, self.d). `dtype` must be float32.
Returns
-------
codes : array_like
Corresponding code for each vector, shape (n, self.code_size)
and `dtype` uint8.
"""
n, d = x.shape
x = np.ascontiguousarray(x, dtype='float32')
assert d == self.d
codes = np.empty((n, self.code_size), dtype='uint8')
self.compute_codes_c(swig_ptr(x), swig_ptr(codes), n)
return codes
def replacement_decode(self, codes):
"""Reconstruct an approximation of vectors given their codes.
Parameters
----------
codes : array_like
Codes to decode, shape (n, self.code_size). `dtype` must be uint8.
Returns
-------
Reconstructed vectors for each code, shape `(n, d)` and `dtype` float32.
"""
n, cs = codes.shape
codes = _check_dtype_uint8(codes)
assert cs == self.code_size
x = np.empty((n, self.d), dtype='float32')
self.decode_c(swig_ptr(codes), swig_ptr(x), n)
return x
replace_method(the_class, 'train', replacement_train)
replace_method(the_class, 'compute_codes', replacement_compute_codes)
replace_method(the_class, 'decode', replacement_decode)
def handle_NSG(the_class):
def replacement_build(self, x, graph):
n, d = x.shape
assert d == self.d
assert graph.ndim == 2
assert graph.shape[0] == n
K = graph.shape[1]
x = np.ascontiguousarray(x, dtype='float32')
graph = np.ascontiguousarray(graph, dtype='int64')
self.build_c(n, swig_ptr(x), swig_ptr(graph), K)
replace_method(the_class, 'build', replacement_build)
def handle_Index(the_class):
def replacement_add(self, x):
"""Adds vectors to the index.
The index must be trained before vectors can be added to it.
The vectors are implicitly numbered in sequence. When `n` vectors are
added to the index, they are given ids `ntotal`, `ntotal + 1`, ..., `ntotal + n - 1`.
Parameters
----------
x : array_like
Query vectors, shape (n, d) where d is appropriate for the index.
`dtype` must be float32.
"""
n, d = x.shape
assert d == self.d
x = np.ascontiguousarray(x, dtype='float32')
self.add_c(n, swig_ptr(x))
def replacement_add_with_ids(self, x, ids):
"""Adds vectors with arbitrary ids to the index (not all indexes support this).
The index must be trained before vectors can be added to it.
Vector `i` is stored in `x[i]` and has id `ids[i]`.
Parameters
----------
x : array_like
Query vectors, shape (n, d) where d is appropriate for the index.
`dtype` must be float32.
ids : array_like
Array if ids of size n. The ids must be of type `int64`. Note that `-1` is reserved
in result lists to mean "not found" so it's better to not use it as an id.
"""
n, d = x.shape
assert d == self.d
x = np.ascontiguousarray(x, dtype='float32')
ids = np.ascontiguousarray(ids, dtype='int64')
assert ids.shape == (n, ), 'not same nb of vectors as ids'
self.add_with_ids_c(n, swig_ptr(x), swig_ptr(ids))
def replacement_assign(self, x, k, labels=None):
"""Find the k nearest neighbors of the set of vectors x in the index.
This is the same as the `search` method, but discards the distances.
Parameters
----------
x : array_like
Query vectors, shape (n, d) where d is appropriate for the index.
`dtype` must be float32.
k : int
Number of nearest neighbors.
labels : array_like, optional
Labels array to store the results.
Returns
-------
labels: array_like
Labels of the nearest neighbors, shape (n, k).
When not enough results are found, the label is set to -1
"""
n, d = x.shape
assert d == self.d
x = np.ascontiguousarray(x, dtype='float32')
if labels is None:
labels = np.empty((n, k), dtype=np.int64)
else:
assert labels.shape == (n, k)
self.assign_c(n, swig_ptr(x), swig_ptr(labels), k)
return labels
def replacement_train(self, x):
"""Trains the index on a representative set of vectors.
The index must be trained before vectors can be added to it.
Parameters
----------
x : array_like
Query vectors, shape (n, d) where d is appropriate for the index.
`dtype` must be float32.
"""
n, d = x.shape
assert d == self.d
x = np.ascontiguousarray(x, dtype='float32')
self.train_c(n, swig_ptr(x))
def replacement_search(self, x, k, *, params=None, D=None, I=None):
"""Find the k nearest neighbors of the set of vectors x in the index.
Parameters
----------
x : array_like
Query vectors, shape (n, d) where d is appropriate for the index.
`dtype` must be float32.
k : int
Number of nearest neighbors.
params : SearchParameters
Search parameters of the current search (overrides the class-level params)
D : array_like, optional
Distance array to store the result.
I : array_like, optional
Labels array to store the results.
Returns
-------
D : array_like
Distances of the nearest neighbors, shape (n, k). When not enough results are found
the label is set to +Inf or -Inf.
I : array_like
Labels of the nearest neighbors, shape (n, k).
When not enough results are found, the label is set to -1
"""
n, d = x.shape
x = np.ascontiguousarray(x, dtype='float32')
assert d == self.d
assert k > 0
if D is None:
D = np.empty((n, k), dtype=np.float32)
else:
assert D.shape == (n, k)
if I is None:
I = np.empty((n, k), dtype=np.int64)
else:
assert I.shape == (n, k)
self.search_c(n, swig_ptr(x), k, swig_ptr(D), swig_ptr(I), params)
return D, I
def replacement_search_and_reconstruct(self, x, k, *, params=None, D=None, I=None, R=None):
"""Find the k nearest neighbors of the set of vectors x in the index,
and return an approximation of these vectors.
Parameters
----------
x : array_like
Query vectors, shape (n, d) where d is appropriate for the index.
`dtype` must be float32.
k : int
Number of nearest neighbors.
params : SearchParameters
Search parameters of the current search (overrides the class-level params)
D : array_like, optional
Distance array to store the result.
I : array_like, optional
Labels array to store the result.
R : array_like, optional
reconstruction array to store
Returns
-------
D : array_like
Distances of the nearest neighbors, shape (n, k). When not enough results are found
the label is set to +Inf or -Inf.
I : array_like
Labels of the nearest neighbors, shape (n, k). When not enough results are found,
the label is set to -1
R : array_like
Approximate (reconstructed) nearest neighbor vectors, shape (n, k, d).
"""
n, d = x.shape
assert d == self.d
x = np.ascontiguousarray(x, dtype='float32')
assert k > 0
if D is None:
D = np.empty((n, k), dtype=np.float32)
else:
assert D.shape == (n, k)
if I is None:
I = np.empty((n, k), dtype=np.int64)
else:
assert I.shape == (n, k)
if R is None:
R = np.empty((n, k, d), dtype=np.float32)
else:
assert R.shape == (n, k, d)
self.search_and_reconstruct_c(
n, swig_ptr(x),
k, swig_ptr(D),
swig_ptr(I), swig_ptr(R), params
)
return D, I, R
def replacement_remove_ids(self, x):
"""Remove some ids from the index.
This is a O(ntotal) operation by default, so could be expensive.
Parameters
----------
x : array_like or faiss.IDSelector
Either an IDSelector that returns True for vectors to remove, or a
list of ids to reomove (1D array of int64). When `x` is a list,
it is wrapped into an IDSelector.
Returns
-------
n_remove: int
number of vectors that were removed
"""
if isinstance(x, IDSelector):
sel = x
else:
assert x.ndim == 1
index_ivf = try_extract_index_ivf(self)
x = np.ascontiguousarray(x, dtype='int64')
if index_ivf and index_ivf.direct_map.type == DirectMap.Hashtable:
sel = IDSelectorArray(x.size, swig_ptr(x))
else:
sel = IDSelectorBatch(x.size, swig_ptr(x))
return self.remove_ids_c(sel)
def replacement_reconstruct(self, key, x=None):
"""Approximate reconstruction of one vector from the index.
Parameters
----------
key : int
Id of the vector to reconstruct
x : array_like, optional
pre-allocated array to store the results
Returns
-------
x : array_like reconstructed vector, size `self.d`, `dtype`=float32
"""
if x is None:
x = np.empty(self.d, dtype=np.float32)
else:
assert x.shape == (self.d, )
self.reconstruct_c(key, swig_ptr(x))
return x
def replacement_reconstruct_batch(self, key, x=None):
"""Approximate reconstruction of several vectors from the index.
Parameters
----------
key : array of ints
Ids of the vectors to reconstruct
x : array_like, optional
pre-allocated array to store the results
Returns
-------
x : array_like
reconstrcuted vectors, size `len(key), self.d`
"""
key = np.ascontiguousarray(key, dtype='int64')
n, = key.shape
if x is None:
x = np.empty((n, self.d), dtype=np.float32)
else:
assert x.shape == (n, self.d)
self.reconstruct_batch_c(n, swig_ptr(key), swig_ptr(x))
return x
def replacement_reconstruct_n(self, n0=0, ni=-1, x=None):
"""Approximate reconstruction of vectors `n0` ... `n0 + ni - 1` from the index.
Missing vectors trigger an exception.
Parameters
----------
n0 : int
Id of the first vector to reconstruct (default 0)
ni : int
Number of vectors to reconstruct (-1 = default = ntotal)
x : array_like, optional
pre-allocated array to store the results
Returns
-------
x : array_like
Reconstructed vectors, size (`ni`, `self.d`), `dtype`=float32
"""
if ni == -1:
ni = self.ntotal - n0
if x is None:
x = np.empty((ni, self.d), dtype=np.float32)
else:
assert x.shape == (ni, self.d)
self.reconstruct_n_c(n0, ni, swig_ptr(x))
return x
def replacement_update_vectors(self, keys, x):
n = keys.size
assert keys.shape == (n, )
assert x.shape == (n, self.d)
x = np.ascontiguousarray(x, dtype='float32')
keys = np.ascontiguousarray(keys, dtype='int64')
self.update_vectors_c(n, swig_ptr(keys), swig_ptr(x))
# No support passed-in for output buffers
def replacement_range_search(self, x, thresh, *, params=None):
"""Search vectors that are within a distance of the query vectors.
Parameters
----------
x : array_like
Query vectors, shape (n, d) where d is appropriate for the index.
`dtype` must be float32.
thresh : float
Threshold to select neighbors. All elements within this radius are returned,
except for maximum inner product indexes, where the elements above the
threshold are returned
params : SearchParameters
Search parameters of the current search (overrides the class-level params)
Returns
-------
lims: array_like
Starting index of the results for each query vector, size n+1.
D : array_like
Distances of the nearest neighbors, shape `lims[n]`. The distances for
query i are in `D[lims[i]:lims[i+1]]`.
I : array_like
Labels of nearest neighbors, shape `lims[n]`. The labels for query i
are in `I[lims[i]:lims[i+1]]`.
"""
n, d = x.shape
assert d == self.d
x = np.ascontiguousarray(x, dtype='float32')
thresh = float(thresh)
res = RangeSearchResult(n)
self.range_search_c(n, swig_ptr(x), thresh, res, params)
# get pointers and copy them
lims = rev_swig_ptr(res.lims, n + 1).copy()
nd = int(lims[-1])
D = rev_swig_ptr(res.distances, nd).copy()
I = rev_swig_ptr(res.labels, nd).copy()
return lims, D, I
def replacement_search_preassigned(self, x, k, Iq, Dq, *, params=None, D=None, I=None):
"""Find the k nearest neighbors of the set of vectors x in an IVF index,
with precalculated coarse quantization assignment.
Parameters
----------
x : array_like
Query vectors, shape (n, d) where d is appropriate for the index.
`dtype` must be float32.
k : int
Number of nearest neighbors.
Dq : array_like, optional
Distance array to the centroids, size (n, nprobe)
Iq : array_like, optional
Nearest centroids, size (n, nprobe)
params : SearchParameters
Search parameters of the current search (overrides the class-level params)
D : array_like, optional
Distance array to store the result.
I : array_like, optional
Labels array to store the results.
Returns
-------
D : array_like
Distances of the nearest neighbors, shape (n, k). When not enough results are found
the label is set to +Inf or -Inf.
I : array_like
Labels of the nearest neighbors, shape (n, k).
When not enough results are found, the label is set to -1
"""
n, d = x.shape
x = np.ascontiguousarray(x, dtype='float32')
assert d == self.d
assert k > 0
if D is None:
D = np.empty((n, k), dtype=np.float32)
else:
assert D.shape == (n, k)
if I is None:
I = np.empty((n, k), dtype=np.int64)
else:
assert I.shape == (n, k)
Iq = np.ascontiguousarray(Iq, dtype='int64')
assert params is None, "params not supported"
assert Iq.shape == (n, self.nprobe)
if Dq is not None:
Dq = np.ascontiguousarray(Dq, dtype='float32')
assert Dq.shape == Iq.shape
self.search_preassigned_c(
n, swig_ptr(x),
k,
swig_ptr(Iq), swig_ptr(Dq),
swig_ptr(D), swig_ptr(I),
False
)
return D, I
def replacement_range_search_preassigned(self, x, thresh, Iq, Dq, *, params=None):
"""Search vectors that are within a distance of the query vectors.
Parameters
----------
x : array_like
Query vectors, shape (n, d) where d is appropriate for the index.
`dtype` must be float32.
thresh : float
Threshold to select neighbors. All elements within this radius are returned,
except for maximum inner product indexes, where the elements above the
threshold are returned
Iq : array_like, optional
Nearest centroids, size (n, nprobe)
Dq : array_like, optional
Distance array to the centroids, size (n, nprobe)
params : SearchParameters
Search parameters of the current search (overrides the class-level params)
Returns
-------
lims: array_like
Starting index of the results for each query vector, size n+1.
D : array_like
Distances of the nearest neighbors, shape `lims[n]`. The distances for
query i are in `D[lims[i]:lims[i+1]]`.
I : array_like
Labels of nearest neighbors, shape `lims[n]`. The labels for query i
are in `I[lims[i]:lims[i+1]]`.
"""
n, d = x.shape
assert d == self.d
x = np.ascontiguousarray(x, dtype='float32')
Iq = np.ascontiguousarray(Iq, dtype='int64')
assert params is None, "params not supported"
assert Iq.shape == (n, self.nprobe)
if Dq is not None:
Dq = np.ascontiguousarray(Dq, dtype='float32')
assert Dq.shape == Iq.shape
thresh = float(thresh)
res = RangeSearchResult(n)
self.range_search_preassigned_c(
n, swig_ptr(x), thresh,
swig_ptr(Iq), swig_ptr(Dq),
res
)
# get pointers and copy them
lims = rev_swig_ptr(res.lims, n + 1).copy()
nd = int(lims[-1])
D = rev_swig_ptr(res.distances, nd).copy()
I = rev_swig_ptr(res.labels, nd).copy()
return lims, D, I
def replacement_sa_encode(self, x, codes=None):
n, d = x.shape
assert d == self.d
x = np.ascontiguousarray(x, dtype='float32')
if codes is None:
codes = np.empty((n, self.sa_code_size()), dtype=np.uint8)
else:
assert codes.shape == (n, self.sa_code_size())
self.sa_encode_c(n, swig_ptr(x), swig_ptr(codes))
return codes
def replacement_sa_decode(self, codes, x=None):
n, cs = codes.shape
assert cs == self.sa_code_size()
codes = _check_dtype_uint8(codes)
if x is None:
x = np.empty((n, self.d), dtype=np.float32)
else:
assert x.shape == (n, self.d)
self.sa_decode_c(n, swig_ptr(codes), swig_ptr(x))
return x
def replacement_add_sa_codes(self, codes, ids=None):
n, cs = codes.shape
assert cs == self.sa_code_size()
codes = _check_dtype_uint8(codes)
if ids is not None:
assert ids.shape == (n,)
ids = swig_ptr(ids)
self.add_sa_codes_c(n, swig_ptr(codes), ids)
def replacement_permute_entries(self, perm):
n, = perm.shape
assert n == self.ntotal
perm = np.ascontiguousarray(perm, dtype='int64')
self.permute_entries_c(faiss.swig_ptr(perm))
replace_method(the_class, 'add', replacement_add)
replace_method(the_class, 'add_with_ids', replacement_add_with_ids)
replace_method(the_class, 'assign', replacement_assign)
replace_method(the_class, 'train', replacement_train)
replace_method(the_class, 'search', replacement_search)
replace_method(the_class, 'remove_ids', replacement_remove_ids)
replace_method(the_class, 'reconstruct', replacement_reconstruct)
replace_method(the_class, 'reconstruct_batch',
replacement_reconstruct_batch)
replace_method(the_class, 'reconstruct_n', replacement_reconstruct_n)
replace_method(the_class, 'range_search', replacement_range_search)
replace_method(the_class, 'update_vectors', replacement_update_vectors,
ignore_missing=True)
replace_method(the_class, 'search_and_reconstruct',
replacement_search_and_reconstruct, ignore_missing=True)
# these ones are IVF-specific
replace_method(the_class, 'search_preassigned',
replacement_search_preassigned, ignore_missing=True)
replace_method(the_class, 'range_search_preassigned',
replacement_range_search_preassigned, ignore_missing=True)
replace_method(the_class, 'sa_encode', replacement_sa_encode)
replace_method(the_class, 'sa_decode', replacement_sa_decode)
replace_method(the_class, 'add_sa_codes', replacement_add_sa_codes,
ignore_missing=True)
replace_method(the_class, 'permute_entries', replacement_permute_entries,
ignore_missing=True)
# get/set state for pickle
# the data is serialized to std::vector -> numpy array -> python bytes
# so not very efficient for now.
def index_getstate(self):
return {"this": faiss.serialize_index(self).tobytes()}
def index_setstate(self, st):
index2 = faiss.deserialize_index(np.frombuffer(st["this"], dtype="uint8"))
self.this = index2.this
the_class.__getstate__ = index_getstate
the_class.__setstate__ = index_setstate
def handle_IndexBinary(the_class):
def replacement_add(self, x):
n, d = x.shape
x = _check_dtype_uint8(x)
assert d == self.code_size
self.add_c(n, swig_ptr(x))
def replacement_add_with_ids(self, x, ids):
n, d = x.shape
x = _check_dtype_uint8(x)
ids = np.ascontiguousarray(ids, dtype='int64')
assert d == self.code_size
assert ids.shape == (n, ), 'not same nb of vectors as ids'
self.add_with_ids_c(n, swig_ptr(x), swig_ptr(ids))
def replacement_train(self, x):
n, d = x.shape
x = _check_dtype_uint8(x)
assert d == self.code_size
self.train_c(n, swig_ptr(x))
def replacement_reconstruct(self, key):
x = np.empty(self.code_size, dtype=np.uint8)
self.reconstruct_c(key, swig_ptr(x))
return x
def replacement_reconstruct_n(self, n0=0, ni=-1, x=None):
if ni == -1:
ni = self.ntotal - n0
if x is None:
x = np.empty((ni, self.code_size), dtype=np.uint8)
else:
assert x.shape == (ni, self.code_size)
self.reconstruct_n_c(n0, ni, swig_ptr(x))
return x
def replacement_search(self, x, k):
x = _check_dtype_uint8(x)
n, d = x.shape
assert d == self.code_size
assert k > 0
distances = np.empty((n, k), dtype=np.int32)
labels = np.empty((n, k), dtype=np.int64)
self.search_c(n, swig_ptr(x),
k, swig_ptr(distances),
swig_ptr(labels))
return distances, labels
def replacement_search_preassigned(self, x, k, Iq, Dq):
n, d = x.shape
x = _check_dtype_uint8(x)
assert d == self.code_size
assert k > 0
D = np.empty((n, k), dtype=np.int32)
I = np.empty((n, k), dtype=np.int64)
Iq = np.ascontiguousarray(Iq, dtype='int64')
assert Iq.shape == (n, self.nprobe)
if Dq is not None:
Dq = np.ascontiguousarray(Dq, dtype='int32')
assert Dq.shape == Iq.shape
self.search_preassigned_c(
n, swig_ptr(x),
k,
swig_ptr(Iq), swig_ptr(Dq),
swig_ptr(D), swig_ptr(I),
False
)
return D, I
def replacement_range_search(self, x, thresh):
n, d = x.shape
x = _check_dtype_uint8(x)
assert d == self.code_size
res = RangeSearchResult(n)
self.range_search_c(n, swig_ptr(x), thresh, res)
# get pointers and copy them
lims = rev_swig_ptr(res.lims, n + 1).copy()
nd = int(lims[-1])
D = rev_swig_ptr(res.distances, nd).copy()
I = rev_swig_ptr(res.labels, nd).copy()
return lims, D, I
def replacement_range_search_preassigned(self, x, thresh, Iq, Dq, *, params=None):
n, d = x.shape
x = _check_dtype_uint8(x)
assert d == self.code_size
Iq = np.ascontiguousarray(Iq, dtype='int64')
assert params is None, "params not supported"
assert Iq.shape == (n, self.nprobe)
if Dq is not None:
Dq = np.ascontiguousarray(Dq, dtype='int32')
assert Dq.shape == Iq.shape
thresh = int(thresh)
res = RangeSearchResult(n)
self.range_search_preassigned_c(
n, swig_ptr(x), thresh,
swig_ptr(Iq), swig_ptr(Dq),
res
)
# get pointers and copy them
lims = rev_swig_ptr(res.lims, n + 1).copy()
nd = int(lims[-1])
D = rev_swig_ptr(res.distances, nd).copy()
I = rev_swig_ptr(res.labels, nd).copy()
return lims, D, I
def replacement_remove_ids(self, x):
if isinstance(x, IDSelector):
sel = x
else:
assert x.ndim == 1
x = np.ascontiguousarray(x, dtype='int64')
sel = IDSelectorBatch(x.size, swig_ptr(x))
return self.remove_ids_c(sel)
replace_method(the_class, 'add', replacement_add)
replace_method(the_class, 'add_with_ids', replacement_add_with_ids)
replace_method(the_class, 'train', replacement_train)
replace_method(the_class, 'search', replacement_search)
replace_method(the_class, 'range_search', replacement_range_search)
replace_method(the_class, 'reconstruct', replacement_reconstruct)
replace_method(the_class, 'reconstruct_n', replacement_reconstruct_n)
replace_method(the_class, 'remove_ids', replacement_remove_ids)
replace_method(the_class, 'search_preassigned',
replacement_search_preassigned, ignore_missing=True)
replace_method(the_class, 'range_search_preassigned',
replacement_range_search_preassigned, ignore_missing=True)
def handle_VectorTransform(the_class):
def apply_method(self, x):
n, d = x.shape
x = np.ascontiguousarray(x, dtype='float32')
assert d == self.d_in
y = np.empty((n, self.d_out), dtype=np.float32)
self.apply_noalloc(n, swig_ptr(x), swig_ptr(y))
return y
def replacement_reverse_transform(self, x):
n, d = x.shape
x = np.ascontiguousarray(x, dtype='float32')
assert d == self.d_out
y = np.empty((n, self.d_in), dtype=np.float32)
self.reverse_transform_c(n, swig_ptr(x), swig_ptr(y))
return y
def replacement_vt_train(self, x):
n, d = x.shape
x = np.ascontiguousarray(x, dtype='float32')
assert d == self.d_in
self.train_c(n, swig_ptr(x))
replace_method(the_class, 'train', replacement_vt_train)
# apply is reserved in Pyton...
the_class.apply_py = apply_method
the_class.apply = apply_method
replace_method(the_class, 'reverse_transform',
replacement_reverse_transform)
def handle_AutoTuneCriterion(the_class):
def replacement_set_groundtruth(self, D, I):
if D:
assert I.shape == D.shape
self.nq, self.gt_nnn = I.shape
self.set_groundtruth_c(
self.gt_nnn, swig_ptr(D) if D else None, swig_ptr(I))
def replacement_evaluate(self, D, I):
assert I.shape == D.shape
assert I.shape == (self.nq, self.nnn)
return self.evaluate_c(swig_ptr(D), swig_ptr(I))
replace_method(the_class, 'set_groundtruth', replacement_set_groundtruth)
replace_method(the_class, 'evaluate', replacement_evaluate)
def handle_ParameterSpace(the_class):
def replacement_explore(self, index, xq, crit):
assert xq.shape == (crit.nq, index.d)
xq = np.ascontiguousarray(xq, dtype='float32')
ops = OperatingPoints()
self.explore_c(index, crit.nq, swig_ptr(xq),
crit, ops)
return ops
replace_method(the_class, 'explore', replacement_explore)
def handle_MatrixStats(the_class):
original_init = the_class.__init__
def replacement_init(self, m):
assert len(m.shape) == 2
m = np.ascontiguousarray(m, dtype='float32')
original_init(self, m.shape[0], m.shape[1], swig_ptr(m))
the_class.__init__ = replacement_init
def handle_IOWriter(the_class):
""" add a write_bytes method """
def write_bytes(self, b):
return self(swig_ptr(b), 1, len(b))
the_class.write_bytes = write_bytes
def handle_IOReader(the_class):
""" add a read_bytes method """
def read_bytes(self, totsz):
buf = bytearray(totsz)
was_read = self(swig_ptr(buf), 1, len(buf))
return bytes(buf[:was_read])
the_class.read_bytes = read_bytes
def handle_IndexRowwiseMinMax(the_class):
def replacement_train_inplace(self, x):
"""Trains the index on a representative set of vectors inplace.
The index must be trained before vectors can be added to it.
This call WILL change the values in the input array, because
of two scaling proceduces being performed inplace.
Parameters
----------
x : array_like
Query vectors, shape (n, d) where d is appropriate for the index.
`dtype` must be float32.
"""
n, d = x.shape
assert d == self.d
x = np.ascontiguousarray(x, dtype='float32')
self.train_inplace_c(n, swig_ptr(x))
replace_method(the_class, 'train_inplace', replacement_train_inplace)
def handle_CodePacker(the_class):
def replacement_pack_1(self, x, offset, block):
assert x.shape == (self.code_size,)
nblock, block_size = block.shape
assert block_size == self.block_size
assert 0 <= offset < block_size * self.nvec
self.pack_1_c(swig_ptr(x), offset, faiss.swig_ptr(block))
def replacement_unpack_1(self, block, offset):
nblock, block_size = block.shape
assert block_size == self.block_size
assert 0 <= offset < block_size * self.nvec
x = np.zeros(self.code_size, dtype='uint8')
self.unpack_1_c(faiss.swig_ptr(block), offset, swig_ptr(x))
return x
replace_method(the_class, 'pack_1', replacement_pack_1)
replace_method(the_class, 'unpack_1', replacement_unpack_1)
######################################################
# MapLong2Long interface
######################################################
def handle_MapLong2Long(the_class):
def replacement_map_add(self, keys, vals):
n, = keys.shape
assert (n,) == vals.shape
self.add_c(n, swig_ptr(keys), swig_ptr(vals))
def replacement_map_search_multiple(self, keys):
n, = keys.shape
vals = np.empty(n, dtype='int64')
self.search_multiple_c(n, swig_ptr(keys), swig_ptr(vals))
return vals
replace_method(the_class, 'add', replacement_map_add)
replace_method(the_class, 'search_multiple',
replacement_map_search_multiple)
######################################################
# SearchParameters and related interface
######################################################
def add_to_referenced_objects(self, ref):
if not hasattr(self, 'referenced_objects'):
self.referenced_objects = [ref]
else:
self.referenced_objects.append(ref)
class RememberSwigOwnership:
"""
SWIG's seattr transfers ownership of SWIG wrapped objects to the class
(btw this seems to contradict https://www.swig.org/Doc1.3/Python.html#Python_nn22
31.4.2)
This interferes with how we manage ownership: with the referenced_objects
table. Therefore, we reset the thisown field in this context manager.
"""
def __init__(self, obj):
self.obj = obj
def __enter__(self):
if hasattr(self.obj, "thisown"):
self.old_thisown = self.obj.thisown
else:
self.old_thisown = None
def __exit__(self, *ignored):
if self.old_thisown is not None:
self.obj.thisown = self.old_thisown
def handle_SearchParameters(the_class):
""" this wrapper is to enable initializations of the form
SearchParametersXX(a=3, b=SearchParamsYY)
This also requires the enclosing class to keep a reference on the
sub-object, since the C++ code assumes the object ownwership is
handled externally.
"""
the_class.original_init = the_class.__init__
def replacement_init(self, **args):
self.original_init()
for k, v in args.items():
assert hasattr(self, k)
with RememberSwigOwnership(v):
setattr(self, k, v)
if type(v) not in (int, float, bool, str):
add_to_referenced_objects(self, v)
the_class.__init__ = replacement_init
def handle_IDSelectorSubset(the_class, class_owns, force_int64=True):
the_class.original_init = the_class.__init__
def replacement_init(self, *args):
if len(args) == 1:
# assume it's an array
subset, = args
if force_int64:
subset = np.ascontiguousarray(subset, dtype='int64')
args = (len(subset), faiss.swig_ptr(subset))
if not class_owns:
add_to_referenced_objects(self, subset)
self.original_init(*args)
the_class.__init__ = replacement_init
def handle_CodeSet(the_class):
def replacement_insert(self, codes, inserted=None):
n, d = codes.shape
assert d == self.d
codes = np.ascontiguousarray(codes, dtype=np.uint8)
if inserted is None:
inserted = np.empty(n, dtype=bool)
else:
assert inserted.shape == (n, )
self.insert_c(n, swig_ptr(codes), swig_ptr(inserted))
return inserted
replace_method(the_class, 'insert', replacement_insert)
|
488ece148f356b6605bb9ac2175e9e6581bcfa0a
|
21e67cc6406a3c8063fae691a5f8b5c46bf5d53f
|
/tests/unit/test_client.py
|
36ac9288f74353b0efa749069c62f63d3e2e960e
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
hetznercloud/hcloud-python
|
6b524f4519fb933d65fbf039e7f78b251b493d10
|
982e35b47e36e24b140fcad84817a4b28dc09c2d
|
refs/heads/main
| 2023-09-03T14:08:03.920174
| 2023-08-25T14:14:21
| 2023-08-25T14:14:21
| 162,585,627
| 221
| 49
|
MIT
| 2023-09-12T07:02:46
| 2018-12-20T13:48:22
|
Python
|
UTF-8
|
Python
| false
| false
| 7,128
|
py
|
test_client.py
|
from __future__ import annotations
import json
from unittest.mock import MagicMock
import pytest
import requests
from hcloud import APIException, Client
class TestHetznerClient:
@pytest.fixture()
def client(self):
Client._version = "0.0.0"
client = Client(token="project_token")
client._requests_session = MagicMock()
return client
@pytest.fixture()
def response(self):
response = requests.Response()
response.status_code = 200
response._content = json.dumps({"result": "data"}).encode("utf-8")
return response
@pytest.fixture()
def fail_response(self, response):
response.status_code = 422
error = {
"code": "invalid_input",
"message": "invalid input in field 'broken_field': is too long",
"details": {
"fields": [{"name": "broken_field", "messages": ["is too long"]}]
},
}
response._content = json.dumps({"error": error}).encode("utf-8")
return response
@pytest.fixture()
def rate_limit_response(self, response):
response.status_code = 422
error = {
"code": "rate_limit_exceeded",
"message": "limit of 10 requests per hour reached",
"details": {},
}
response._content = json.dumps({"error": error}).encode("utf-8")
return response
def test__get_user_agent(self, client):
user_agent = client._get_user_agent()
assert user_agent == "hcloud-python/0.0.0"
def test__get_user_agent_with_application_name(self, client):
client = Client(token="project_token", application_name="my-app")
user_agent = client._get_user_agent()
assert user_agent == "my-app hcloud-python/0.0.0"
def test__get_user_agent_with_application_name_and_version(self, client):
client = Client(
token="project_token",
application_name="my-app",
application_version="1.0.0",
)
user_agent = client._get_user_agent()
assert user_agent == "my-app/1.0.0 hcloud-python/0.0.0"
def test__get_headers(self, client):
headers = client._get_headers()
assert headers == {
"User-Agent": "hcloud-python/0.0.0",
"Authorization": "Bearer project_token",
}
def test_request_library_mocked(self, client):
response = client.request("POST", "url", params={"1": 2})
assert response.__class__.__name__ == "MagicMock"
def test_request_ok(self, client, response):
client._requests_session.request.return_value = response
response = client.request(
"POST", "/servers", params={"argument": "value"}, timeout=2
)
client._requests_session.request.assert_called_once_with(
method="POST",
url="https://api.hetzner.cloud/v1/servers",
headers={
"User-Agent": "hcloud-python/0.0.0",
"Authorization": "Bearer project_token",
},
params={"argument": "value"},
timeout=2,
)
assert response == {"result": "data"}
def test_request_fails(self, client, fail_response):
client._requests_session.request.return_value = fail_response
with pytest.raises(APIException) as exception_info:
client.request(
"POST", "http://url.com", params={"argument": "value"}, timeout=2
)
error = exception_info.value
assert error.code == "invalid_input"
assert error.message == "invalid input in field 'broken_field': is too long"
assert error.details["fields"][0]["name"] == "broken_field"
def test_request_500(self, client, fail_response):
fail_response.status_code = 500
fail_response.reason = "Internal Server Error"
fail_response._content = "Internal Server Error"
client._requests_session.request.return_value = fail_response
with pytest.raises(APIException) as exception_info:
client.request(
"POST", "http://url.com", params={"argument": "value"}, timeout=2
)
error = exception_info.value
assert error.code == 500
assert error.message == "Internal Server Error"
assert error.details["content"] == "Internal Server Error"
def test_request_broken_json_200(self, client, response):
content = b"{'key': 'value'"
response.reason = "OK"
response._content = content
client._requests_session.request.return_value = response
with pytest.raises(APIException) as exception_info:
client.request(
"POST", "http://url.com", params={"argument": "value"}, timeout=2
)
error = exception_info.value
assert error.code == 200
assert error.message == "OK"
assert error.details["content"] == content
def test_request_empty_content_200(self, client, response):
content = ""
response.reason = "OK"
response._content = content
client._requests_session.request.return_value = response
response = client.request(
"POST", "http://url.com", params={"argument": "value"}, timeout=2
)
assert response == ""
def test_request_500_empty_content(self, client, fail_response):
fail_response.status_code = 500
fail_response.reason = "Internal Server Error"
fail_response._content = ""
client._requests_session.request.return_value = fail_response
with pytest.raises(APIException) as exception_info:
client.request(
"POST", "http://url.com", params={"argument": "value"}, timeout=2
)
error = exception_info.value
assert error.code == 500
assert error.message == "Internal Server Error"
assert error.details["content"] == ""
assert str(error) == "Internal Server Error"
def test_request_limit(self, client, rate_limit_response):
client._retry_wait_time = 0
client._requests_session.request.return_value = rate_limit_response
with pytest.raises(APIException) as exception_info:
client.request(
"POST", "http://url.com", params={"argument": "value"}, timeout=2
)
error = exception_info.value
assert client._requests_session.request.call_count == 5
assert error.code == "rate_limit_exceeded"
assert error.message == "limit of 10 requests per hour reached"
def test_request_limit_then_success(self, client, rate_limit_response):
client._retry_wait_time = 0
response = requests.Response()
response.status_code = 200
response._content = json.dumps({"result": "data"}).encode("utf-8")
client._requests_session.request.side_effect = [rate_limit_response, response]
client.request(
"POST", "http://url.com", params={"argument": "value"}, timeout=2
)
assert client._requests_session.request.call_count == 2
|
59076009515369e36c552b93e3dfb10ad3065333
|
c9fdae5bb4deecb3c95db7a1fb9b08f95ef9547b
|
/examples/map.py
|
e248d1b7a9d8fc93082615af84a3ba13c61c5b00
|
[
"MIT"
] |
permissive
|
meraki-analytics/cassiopeia
|
2b6767116e076d883b426e6e0393ec814e8054a9
|
5002c578270ab5636ecff29357048ab02985418f
|
refs/heads/master
| 2023-08-24T07:10:20.130880
| 2023-07-29T00:33:15
| 2023-07-29T00:33:15
| 35,975,234
| 548
| 207
|
MIT
| 2023-08-09T05:59:04
| 2015-05-20T21:28:16
|
Python
|
UTF-8
|
Python
| false
| false
| 282
|
py
|
map.py
|
import cassiopeia as cass
from cassiopeia import Map, Maps
def get_maps():
maps = cass.get_maps(region="NA")
for map in maps:
print(map.name, map.id)
map = Map(name="Summoner's Rift", region="NA")
print(map.id)
if __name__ == "__main__":
get_maps()
|
b38a610339e2bd88563d5c0a0b03742792ea8df2
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/tests/components/cpuspeed/conftest.py
|
be5a87b8d132dd713a8f742991fc5e60befdfe25
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 2,068
|
py
|
conftest.py
|
"""Fixtures for CPU Speed integration tests."""
from __future__ import annotations
from collections.abc import Generator
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from homeassistant.components.cpuspeed.const import DOMAIN
from homeassistant.core import HomeAssistant
from tests.common import MockConfigEntry
@pytest.fixture
def mock_config_entry() -> MockConfigEntry:
"""Return the default mocked config entry."""
return MockConfigEntry(
title="CPU Speed",
domain=DOMAIN,
data={},
unique_id=DOMAIN,
)
@pytest.fixture
def mock_cpuinfo_config_flow() -> Generator[MagicMock, None, None]:
"""Return a mocked get_cpu_info.
It is only used to check truthy or falsy values, so it is mocked
to return True.
"""
with patch(
"homeassistant.components.cpuspeed.config_flow.cpuinfo.get_cpu_info",
return_value=True,
) as cpuinfo_mock:
yield cpuinfo_mock
@pytest.fixture
def mock_setup_entry() -> Generator[AsyncMock, None, None]:
"""Mock setting up a config entry."""
with patch(
"homeassistant.components.cpuspeed.async_setup_entry", return_value=True
) as mock_setup:
yield mock_setup
@pytest.fixture
def mock_cpuinfo() -> Generator[MagicMock, None, None]:
"""Return a mocked get_cpu_info."""
info = {
"hz_actual": (3200000001, 0),
"arch_string_raw": "aargh",
"brand_raw": "Intel Ryzen 7",
"hz_advertised": (3600000001, 0),
}
with patch(
"homeassistant.components.cpuspeed.cpuinfo.get_cpu_info",
return_value=info,
) as cpuinfo_mock:
yield cpuinfo_mock
@pytest.fixture
async def init_integration(
hass: HomeAssistant, mock_config_entry: MockConfigEntry, mock_cpuinfo: MagicMock
) -> MockConfigEntry:
"""Set up the CPU Speed integration for testing."""
mock_config_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_config_entry.entry_id)
await hass.async_block_till_done()
return mock_config_entry
|
e8731ff67e1f153a8b7d70fa0baae74fa92a0019
|
704976ea552111c6a5af9cd7cb62b9d9abaf3996
|
/pypy/module/__pypy__/interp_builders.py
|
75af127636d132c9795aba178a3ad9c58b9a9531
|
[
"BSD-3-Clause"
] |
permissive
|
mesalock-linux/mesapy
|
4f02c5819ce7f2f6e249d34840f1aa097577645d
|
ed546d59a21b36feb93e2309d5c6b75aa0ad95c9
|
refs/heads/mesapy2.7
| 2023-08-16T21:33:02.239581
| 2019-08-13T10:29:43
| 2019-08-13T18:06:45
| 136,080,721
| 396
| 33
|
NOASSERTION
| 2020-04-01T03:05:18
| 2018-06-04T20:45:17
|
Python
|
UTF-8
|
Python
| false
| false
| 2,415
|
py
|
interp_builders.py
|
from pypy.interpreter.baseobjspace import W_Root
from pypy.interpreter.error import oefmt
from pypy.interpreter.gateway import interp2app, unwrap_spec
from pypy.interpreter.typedef import TypeDef
from rpython.rlib.rstring import UnicodeBuilder, StringBuilder
from rpython.tool.sourcetools import func_with_new_name
def create_builder(name, strtype, builder_cls, newmethod):
if strtype is str:
unwrap = 'bytes'
else:
unwrap = unicode
class W_Builder(W_Root):
def __init__(self, space, size):
if size < 0:
self.builder = builder_cls()
else:
self.builder = builder_cls(size)
@unwrap_spec(size=int)
def descr__new__(space, w_subtype, size=-1):
return W_Builder(space, size)
@unwrap_spec(s=unwrap)
def descr_append(self, space, s):
self.builder.append(s)
@unwrap_spec(s=unwrap, start=int, end=int)
def descr_append_slice(self, space, s, start, end):
if not 0 <= start <= end <= len(s):
raise oefmt(space.w_ValueError, "bad start/stop")
self.builder.append_slice(s, start, end)
def descr_build(self, space):
w_s = getattr(space, newmethod)(self.builder.build())
# after build(), we can continue to append more strings
# to the same builder. This is supported since
# 2ff5087aca28 in RPython.
return w_s
def descr_len(self, space):
if self.builder is None:
raise oefmt(space.w_ValueError, "no length of built builder")
return space.newint(self.builder.getlength())
W_Builder.__name__ = "W_%s" % name
W_Builder.typedef = TypeDef(name,
__new__ = interp2app(func_with_new_name(
W_Builder.descr__new__.im_func,
'%s_new' % (name,))),
append = interp2app(W_Builder.descr_append),
append_slice = interp2app(W_Builder.descr_append_slice),
build = interp2app(W_Builder.descr_build),
__len__ = interp2app(W_Builder.descr_len),
)
W_Builder.typedef.acceptable_as_base_class = False
return W_Builder
W_StringBuilder = create_builder("StringBuilder", str, StringBuilder, "newbytes")
W_UnicodeBuilder = create_builder("UnicodeBuilder", unicode, UnicodeBuilder, "newunicode")
|
41bed5d0719cd53622e30ba1d348147aa3a70ae3
|
05b85a5260e6a7b236693300208b35bde1ca73ee
|
/tests/test_auth_app/test_include_admin_routes.py
|
0b57425447c49ac61c5a5c8774c922b1c04a59d0
|
[
"MIT"
] |
permissive
|
dmontagu/fastapi-auth
|
d05e3440ee829e57cc7cdac1a987ac51b4e87f3a
|
d0e86774f66bd43e80376de19bdf034eb228dc07
|
refs/heads/master
| 2023-02-16T20:41:35.883202
| 2019-12-18T03:10:51
| 2019-12-18T04:11:46
| 228,746,139
| 131
| 10
|
MIT
| 2023-02-14T21:35:22
| 2019-12-18T03:00:54
|
Python
|
UTF-8
|
Python
| false
| false
| 297
|
py
|
test_include_admin_routes.py
|
from tests.test_auth_app.build_app import get_test_app
def test_excluded() -> None:
app = get_test_app(include_admin_routes=False)
assert len(app.router.routes) == 9
def test_included() -> None:
app = get_test_app(include_admin_routes=True)
assert len(app.router.routes) == 13
|
eae5b883e17aad20df91761eb03b9ba9fadeb6dc
|
cb35df97989fcc46831a8adb8de3434b94fd2ecd
|
/tests/benchmarks/bm_mesh_io.py
|
37155fae34691442d4f1acb0e47a1563f0746ba6
|
[
"MIT",
"BSD-3-Clause"
] |
permissive
|
facebookresearch/pytorch3d
|
6d93b28c0f36a4b7efa0a8143726200c252d3502
|
a3d99cab6bf5eb69be8d5eb48895da6edd859565
|
refs/heads/main
| 2023-09-01T16:26:58.756831
| 2023-08-26T20:55:56
| 2023-08-26T20:55:56
| 217,433,767
| 7,964
| 1,342
|
NOASSERTION
| 2023-08-25T10:00:26
| 2019-10-25T02:23:45
|
Python
|
UTF-8
|
Python
| false
| false
| 2,621
|
py
|
bm_mesh_io.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from itertools import product
from fvcore.common.benchmark import benchmark
from tests.test_io_obj import TestMeshObjIO
from tests.test_io_ply import TestMeshPlyIO
def bm_save_load() -> None:
simple_kwargs_list = [
{"V": 100, "F": 200},
{"V": 1000, "F": 2000},
{"V": 10000, "F": 20000},
]
benchmark(
TestMeshObjIO.bm_load_simple_obj_with_init,
"LOAD_SIMPLE_OBJ",
simple_kwargs_list,
warmup_iters=1,
)
benchmark(
TestMeshObjIO.bm_save_simple_obj_with_init,
"SAVE_SIMPLE_OBJ",
simple_kwargs_list,
warmup_iters=1,
)
benchmark(
TestMeshPlyIO.bm_load_simple_ply_with_init,
"LOAD_SIMPLE_PLY",
simple_kwargs_list,
warmup_iters=1,
)
benchmark(
TestMeshPlyIO.bm_save_simple_ply_with_init,
"SAVE_SIMPLE_PLY",
simple_kwargs_list,
warmup_iters=1,
)
complex_kwargs_list = [{"N": 8}, {"N": 32}, {"N": 128}]
benchmark(
TestMeshObjIO.bm_load_complex_obj,
"LOAD_COMPLEX_OBJ",
complex_kwargs_list,
warmup_iters=1,
)
benchmark(
TestMeshObjIO.bm_save_complex_obj,
"SAVE_COMPLEX_OBJ",
complex_kwargs_list,
warmup_iters=1,
)
benchmark(
TestMeshPlyIO.bm_load_complex_ply,
"LOAD_COMPLEX_PLY",
complex_kwargs_list,
warmup_iters=1,
)
benchmark(
TestMeshPlyIO.bm_save_complex_ply,
"SAVE_COMPLEX_PLY",
complex_kwargs_list,
warmup_iters=1,
)
# Texture loading benchmarks
kwargs_list = [{"R": 2}, {"R": 4}, {"R": 10}, {"R": 15}, {"R": 20}]
benchmark(
TestMeshObjIO.bm_load_texture_atlas,
"PYTORCH3D_TEXTURE_ATLAS",
kwargs_list,
warmup_iters=1,
)
kwargs_list = []
S = [64, 256, 1024]
F = [100, 1000, 10000]
R = [5, 10, 20]
test_cases = product(S, F, R)
for case in test_cases:
s, f, r = case
kwargs_list.append({"S": s, "F": f, "R": r})
benchmark(
TestMeshObjIO.bm_bilinear_sampling_vectorized,
"BILINEAR_VECTORIZED",
kwargs_list,
warmup_iters=1,
)
benchmark(
TestMeshObjIO.bm_bilinear_sampling_grid_sample,
"BILINEAR_GRID_SAMPLE",
kwargs_list,
warmup_iters=1,
)
if __name__ == "__main__":
bm_save_load()
|
5643164cf51476f88bc20610e186a6110a31c763
|
3dee0bd6d42e548ef15ad0f43c9f810f18455b70
|
/bulkprovision/lambda_email.py
|
344603606f3e0147c5e6c738dde332eca27b8285
|
[
"Apache-2.0"
] |
permissive
|
aws-samples/aws-service-catalog-reference-architectures
|
aa38e9ef9ad2fb7f4fdf09971ec490fa7e8e1647
|
e57697a3dfa330fecb249f508467f2f97a061a16
|
refs/heads/master
| 2023-09-01T14:36:35.804458
| 2022-12-20T21:06:32
| 2022-12-20T21:06:32
| 125,886,663
| 424
| 238
|
Apache-2.0
| 2023-08-24T13:31:13
| 2018-03-19T16:17:54
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 3,873
|
py
|
lambda_email.py
|
import os
import json
import boto3
import logging
import time
logger = logging.getLogger()
#Ken Walsh 4-20-2020
#Reporting
s3client = boto3.client('s3')
dyclient = boto3.client('dynamodb')
ses_client = boto3.client('ses')
def lambda_handler(event,context):
_beg = "<html><head><title>Servic Catalog Bulk Deployment for </title>"
_beg += "<link rel=stylesheet href=https://s3.amazonaws.com/kenwalshtestad/cfn/public/css/styletable.css>"
_beg += "</head><body>"
_beg += "<table id=customers border=2>"
localtime = time.asctime( time.localtime(time.time()) )
_th='<tr><th colspan=4>%s</th></tr><tr>' % localtime
_r=''
_th += '<th>Status</th>'
_th += '<th>BatchId</th>'
_th += '<th>User</th>'
_th += '<th>SC Status</th>'
##########
tablename = os.environ['DynamoTablename']
sresults = dyclient.scan(TableName=tablename)
if 'Items' in sresults:
for t in sresults['Items']:
_r += '<td>%s</td>' % t['status']['S']
_r += '<td>%s</td>' % t['launchparams']['M']['BatchId']['S'] if "BatchId" in t['launchparams']['M'] else "NONE"
_r += '<td>%s</td>' % t['launchparams']['M']['UserName']['S']
_r += '<td>%s</td></tr><tr>' % t['scproductdetails']['M']['Status']['S']
_ret = _beg + '<tr>'+_th +'</tr>\n<tr>'+ _r +'</tr></table>'
#======================
m_event ={}
m_event['etoemail'] = event['ReportEmail']
m_event['esubject'] = 'sc work spaces bulk deployment'
DestBucket= os.environ['LambdaZipsBucket']
_skey = 'content/out/report.html'
b_putpriv(DestBucket,_skey,_ret,"text/html")
_l = gen_surl(DestBucket,_skey)
m_event['ebody'] = _ret +'<br><a href="' + _l +'">Click me Report</a>'
if check_for_ses_email(m_event) == True:
logger.info(sendemail(m_event))
event['EmailInfo'] = m_event
return event
##########################
###S3 unctions #############################
def gen_surl(bucketname,keyname):
url = s3client.generate_presigned_url(ClientMethod='get_object',Params={'Bucket': bucketname,'Key': keyname})
return url
def b_putpriv(bucket,key,body,ctype):
srep = s3client.put_object( ACL='private',Body=body,Bucket=bucket,Key=key, ContentType=ctype,)
logger.info(srep)
return srep
###SES functions############################
def check_for_ses_email(event):
emails = ses_client.list_verified_email_addresses()
_ret = False
#logger.info(emails)
if event['etoemail'] in emails['VerifiedEmailAddresses']:
logger.info('Found ' + event['etoemail'] )
_ret = True
else:
response = ses_client.verify_email_identity(EmailAddress= event['etoemail'])
logger.info('Email address not Found ' + event['etoemail'] +" Verification sent")
return _ret
def sendemail(event):
charset = "UTF-8"
try:
#Provide the contents of the email.
response = ses_client.send_email(
Destination={
'ToAddresses': [
event['etoemail'],
],
},
Message={
'Body': {
'Html': {
'Charset': charset,
'Data': event['ebody'],
},
'Text': {
'Charset': charset,
'Data': event['ebody'],
},
},
'Subject': {
'Charset': charset,
'Data': event['esubject'],
},
},
Source=event['etoemail'],
)
except Exception as e:
logger.exception(e)
return(e)
else:
return('EmailSent to ' + event['etoemail'])
|
738aef224aae3f2d625199376d53ca62d1e90bdd
|
11bd2e7d6e686f3bcf5fe0f0e5da0a155aeb009f
|
/gcp_variant_transforms/transforms/combine_sample_ids_test.py
|
85420a7ee0a3a147388812a0f72145db38b8a1cd
|
[
"Apache-2.0"
] |
permissive
|
googlegenomics/gcp-variant-transforms
|
723e08983b69e0767d28cd41d5650f9c10cda00a
|
47844f7f1d2d40948674d127037eb666ae552beb
|
refs/heads/master
| 2022-05-17T23:53:25.225569
| 2022-03-31T18:31:06
| 2022-03-31T18:31:06
| 110,025,283
| 131
| 66
|
Apache-2.0
| 2022-03-29T12:06:10
| 2017-11-08T20:12:08
|
Python
|
UTF-8
|
Python
| false
| false
| 4,272
|
py
|
combine_sample_ids_test.py
|
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for `combine_sample_ids` module."""
import unittest
from apache_beam import combiners
from apache_beam import transforms
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
from gcp_variant_transforms.beam_io import vcfio
from gcp_variant_transforms.testing.testdata_util import hash_name
from gcp_variant_transforms.transforms import combine_sample_ids
class GetSampleIdsTest(unittest.TestCase):
"""Test cases for the `SampleIdsCombiner` transform."""
def test_sample_ids_combiner_pipeline_preserve_sample_order_error(self):
sample_ids = [hash_name('sample1'),
hash_name('sample2'),
hash_name('sample3')]
variant_calls = [
vcfio.VariantCall(sample_id=sample_ids[0]),
vcfio.VariantCall(sample_id=sample_ids[1]),
vcfio.VariantCall(sample_id=sample_ids[2])
]
variants = [
vcfio.Variant(calls=[variant_calls[0], variant_calls[1]]),
vcfio.Variant(calls=[variant_calls[1], variant_calls[2]])
]
pipeline = TestPipeline()
_ = (
pipeline
| transforms.Create(variants)
| 'CombineSampleIds' >>
combine_sample_ids.SampleIdsCombiner(preserve_sample_order=True)
| combiners.ToList())
with self.assertRaises(ValueError):
pipeline.run()
def test_sample_ids_combiner_pipeline_preserve_sample_order(self):
sample_ids = [hash_name('sample2'),
hash_name('sample1'),
hash_name('sample3')]
variant_calls = [
vcfio.VariantCall(sample_id=sample_ids[0]),
vcfio.VariantCall(sample_id=sample_ids[1]),
vcfio.VariantCall(sample_id=sample_ids[2])
]
variants = [
vcfio.Variant(calls=[variant_calls[0],
variant_calls[1],
variant_calls[2]]),
vcfio.Variant(calls=[variant_calls[0],
variant_calls[1],
variant_calls[2]])
]
pipeline = TestPipeline()
combined_sample_ids = (
pipeline
| transforms.Create(variants)
| 'CombineSampleIds' >>
combine_sample_ids.SampleIdsCombiner(preserve_sample_order=True)
| combiners.ToList())
assert_that(combined_sample_ids, equal_to([sample_ids]))
pipeline.run()
def test_sample_ids_combiner_pipeline(self):
sample_ids = [hash_name('sample3'),
hash_name('sample2'),
hash_name('sample1')]
variant_calls = [
vcfio.VariantCall(sample_id=sample_ids[0]),
vcfio.VariantCall(sample_id=sample_ids[1]),
vcfio.VariantCall(sample_id=sample_ids[2])
]
variants = [
vcfio.Variant(calls=[variant_calls[0], variant_calls[1]]),
vcfio.Variant(calls=[variant_calls[1], variant_calls[2]])
]
pipeline = TestPipeline()
combined_sample_ids = (
pipeline
| transforms.Create(variants)
| 'CombineSampleIds' >> combine_sample_ids.SampleIdsCombiner()
| combiners.ToList())
assert_that(combined_sample_ids, equal_to([sample_ids]))
pipeline.run()
def test_sample_ids_combiner_pipeline_duplicate_sample_ids(self):
variant_call = vcfio.VariantCall(sample_id=hash_name('sample1'))
variants = [vcfio.Variant(calls=[variant_call, variant_call])]
pipeline = TestPipeline()
_ = (
pipeline
| transforms.Create(variants)
| 'CombineSampleIds' >> combine_sample_ids.SampleIdsCombiner()
| combiners.ToList())
with self.assertRaises(ValueError):
pipeline.run()
|
757a9f24da4ddfeff7e6898d2d82f9a7269f7d73
|
632f6f14abb1dbdf86aca1506b8012392bef2a41
|
/tools/combine_images.py
|
789f8df0bb675441700c13b7b4cfdc6e4beeac2a
|
[
"Apache-2.0"
] |
permissive
|
ARMmbed/DAPLink
|
a34f7ce41d6bfc38d49283766a03280f52322f2a
|
19f797fa6396b726250c57eb9be80245a5f877dd
|
refs/heads/main
| 2023-08-23T20:37:22.744671
| 2023-06-29T19:36:00
| 2023-08-16T16:39:58
| 24,571,059
| 1,865
| 883
|
Apache-2.0
| 2023-08-16T16:39:59
| 2014-09-28T21:38:24
|
C
|
UTF-8
|
Python
| false
| false
| 3,696
|
py
|
combine_images.py
|
#!/usr/bin/env python
#
# DAPLink Interface Firmware
# Copyright (c) 2009-2016, ARM Limited, All Rights Reserved
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import itertools
import os.path
import sys
from intelhex import IntelHex
int_types = (int, long) if sys.version_info[0] == 2 else (int,)
def ranges(i):
for _, b in itertools.groupby(enumerate(i), lambda xy: xy[1] - xy[0]):
b = list(b)
yield b[0][1], b[-1][1]
def print_hex_info(filename, intel_hex):
print("File: %s" % filename)
addresses = intel_hex.addresses()
addresses.sort()
data_list = list(ranges(addresses))
for start, end in data_list:
print(" 0x%x, 0x%x" % (start, end))
def merge_hex(hex1, hex2):
hex1_dict = hex1.todict()
hex2_dict = hex2.todict()
# The key "start_addr" shows up the hex
# dictionary so remove it
if "start_addr" in hex1_dict:
del hex1_dict["start_addr"]
if "start_addr" in hex2_dict:
del hex2_dict["start_addr"]
keys = sorted(hex2_dict.keys())
# Verify nothing unexpected is in the dict
for key in keys:
if not type(key) in int_types:
print('Unknown key "%s" of type %s' % (key, type(key)))
for key in keys:
if key in hex1_dict:
raise Exception("Overlapping data at 0x%x" % key)
hex1_dict[key] = hex2_dict[key]
return IntelHex(hex1_dict)
parser = argparse.ArgumentParser(description='Hex file merger')
parser.add_argument("--hex", type=str, default=[], action='append', help="Hex file to add to list of files to merge. This can be specified multiple times.")
parser.add_argument("--bin", nargs=2, type=str, default=[], metavar=('FILE', 'ADDR'), action='append', help="Binary file to add to list of files to merge. This can be specified multiple times.")
parser.add_argument("--output_file", type=str, default='image.hex', help="Name of output hex file.")
parser.add_argument("--output_bin_file", type=str, help="Name of output binary file. May be specified in addition to --output_file.")
def main():
args = parser.parse_args()
base_hex = IntelHex()
# Merge in hex files
for file_name in args.hex:
file_name = os.path.expanduser(file_name)
new_hex_data = IntelHex()
print("opening file %s" % file_name)
new_hex_data.fromfile(file_name, format='hex')
print_hex_info(file_name, new_hex_data)
base_hex = merge_hex(base_hex, new_hex_data)
# Merge in binary files
for file_name, addr_str in args.bin:
file_name = os.path.expanduser(file_name)
offset = int(addr_str, 0)
new_hex_data = IntelHex()
new_hex_data.loadbin(file_name, offset=offset)
print_hex_info(file_name, new_hex_data)
base_hex = merge_hex(base_hex, new_hex_data)
# Write out data
output_hex_filename = os.path.expanduser(args.output_file)
print_hex_info(output_hex_filename, base_hex)
base_hex.tofile(output_hex_filename, 'hex')
if args.output_bin_file is not None:
base_hex.tofile(os.path.expanduser(args.output_bin_file), 'bin')
if __name__ == '__main__':
main()
|
6fc2558b006aa209ae460d3921eba10dea5fe197
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/testData/completion/heavyStarPropagation/lib/_pkg0/_pkg0_0/_pkg0_0_0/_pkg0_0_0_1/_pkg0_0_0_1_0/_mod0_0_0_1_0_0.py
|
f6d90f0c9b03b02d9d0d2a209186524504868cc2
|
[
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 128
|
py
|
_mod0_0_0_1_0_0.py
|
name0_0_0_1_0_0_0 = None
name0_0_0_1_0_0_1 = None
name0_0_0_1_0_0_2 = None
name0_0_0_1_0_0_3 = None
name0_0_0_1_0_0_4 = None
|
42970b5418e408895c8dfed06cd61abd2ec543da
|
9940f6579e010bb7c1fa13885c49bbaf6164723b
|
/lbry/wallet/bcd_data_stream.py
|
1c04be0055d91d6336afb066cf642e0d0011ab96
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
lbryio/lbry-sdk
|
feaf1143b178b496a9d81c99faf51fac60e6fed1
|
eb5da9511e162ef1080cb34af2ee087383cfa94a
|
refs/heads/master
| 2023-08-18T13:06:16.106204
| 2023-02-07T18:50:25
| 2023-04-03T17:34:36
| 41,103,286
| 5,272
| 291
|
MIT
| 2023-06-28T16:36:20
| 2015-08-20T15:24:10
|
Python
|
UTF-8
|
Python
| false
| false
| 2,998
|
py
|
bcd_data_stream.py
|
import struct
from io import BytesIO
class BCDataStream:
def __init__(self, data=None):
self.data = BytesIO(data)
def reset(self):
self.data.seek(0)
def get_bytes(self):
return self.data.getvalue()
def read(self, size):
return self.data.read(size)
def write(self, data):
self.data.write(data)
def write_many(self, many):
self.data.writelines(many)
def read_string(self):
return self.read(self.read_compact_size())
def write_string(self, s):
self.write_compact_size(len(s))
self.write(s)
def read_compact_size(self):
size = self.read_uint8()
if size < 253:
return size
if size == 253:
return self.read_uint16()
if size == 254:
return self.read_uint32()
if size == 255:
return self.read_uint64()
def write_compact_size(self, size):
if size < 253:
self.write_uint8(size)
elif size <= 0xFFFF:
self.write_uint8(253)
self.write_uint16(size)
elif size <= 0xFFFFFFFF:
self.write_uint8(254)
self.write_uint32(size)
else:
self.write_uint8(255)
self.write_uint64(size)
def read_boolean(self):
return self.read_uint8() != 0
def write_boolean(self, val):
return self.write_uint8(1 if val else 0)
int8 = struct.Struct('b')
uint8 = struct.Struct('B')
int16 = struct.Struct('<h')
uint16 = struct.Struct('<H')
int32 = struct.Struct('<i')
uint32 = struct.Struct('<I')
int64 = struct.Struct('<q')
uint64 = struct.Struct('<Q')
def _read_struct(self, fmt):
value = self.read(fmt.size)
if value:
return fmt.unpack(value)[0]
def read_int8(self):
return self._read_struct(self.int8)
def read_uint8(self):
return self._read_struct(self.uint8)
def read_int16(self):
return self._read_struct(self.int16)
def read_uint16(self):
return self._read_struct(self.uint16)
def read_int32(self):
return self._read_struct(self.int32)
def read_uint32(self):
return self._read_struct(self.uint32)
def read_int64(self):
return self._read_struct(self.int64)
def read_uint64(self):
return self._read_struct(self.uint64)
def write_int8(self, val):
self.write(self.int8.pack(val))
def write_uint8(self, val):
self.write(self.uint8.pack(val))
def write_int16(self, val):
self.write(self.int16.pack(val))
def write_uint16(self, val):
self.write(self.uint16.pack(val))
def write_int32(self, val):
self.write(self.int32.pack(val))
def write_uint32(self, val):
self.write(self.uint32.pack(val))
def write_int64(self, val):
self.write(self.int64.pack(val))
def write_uint64(self, val):
self.write(self.uint64.pack(val))
|
29766d87003088aa23016f5bc90552db9da14368
|
753cd066a9bd26b6c37c8d53a86c7a9c659ec18c
|
/nlp/dolly/popxl/modelling/attention.py
|
317ed015c31ff792194e42d3eaf7930e3ff33336
|
[
"MIT",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-free-unknown",
"Apache-2.0"
] |
permissive
|
graphcore/examples
|
ac872015808ed2a913d4d7bf0d63202ce15ebbae
|
e2f834dd60e7939672c1795b4ac62e89ad0bca49
|
refs/heads/master
| 2023-08-05T02:08:12.341836
| 2023-07-27T11:13:10
| 2023-07-27T11:13:10
| 143,977,106
| 311
| 80
|
MIT
| 2023-09-11T16:42:56
| 2018-08-08T07:29:17
|
Python
|
UTF-8
|
Python
| false
| false
| 7,066
|
py
|
attention.py
|
# Copyright (c) 2023 Graphcore Ltd. All rights reserved.
import numpy as np
from typing import Dict
import math
import popxl
from popxl import ops, ReplicaGrouping
from popxl.utils import to_numpy
from typing import Optional
import popxl_addons as addons
from popxl_addons import NamedTensors
from popxl_addons.array_munging import shard, repeat_axis
from popxl_addons.layers import Linear
from popxl_addons.ops.replicated_all_reduce_TP import replicated_all_reduce
from .rotary_pos_embed import rotary_pos_embed, trig_table_constants
from config import DollyConfig
from transformers.models.gpt_neox.modeling_gpt_neox import GPTNeoXAttention as HFModel
def reshape_for_scores(x: popxl.Tensor, sequence_length: int, heads: int) -> popxl.Tensor:
assert len(x.shape) == 2
micro_batch_size = x.shape[0] // sequence_length
head_hidden_size = x.shape[1] // heads
return x.reshape((micro_batch_size, sequence_length, heads, head_hidden_size))
class DollyAttentionHeads(addons.Module):
def __init__(self, config: DollyConfig, replica_grouping: Optional[ReplicaGrouping] = None):
super().__init__()
self.config = config
self.replica_grouping = replica_grouping
if self.replica_grouping:
n_heads_groups = self.replica_grouping.num_groups
else:
n_heads_groups = 1
assert (
self.config.model.attention.heads % n_heads_groups == 0
), f"{self.config.model.attention.heads} % {n_heads_groups} != 0"
self.n_heads_groups = n_heads_groups
self.n_heads = self.config.model.attention.heads // n_heads_groups
self.qkv = Linear(3 * self.config.model.hidden_size // n_heads_groups, replica_grouping=replica_grouping)
self.rotary_dim = self.config.model.attention.rotary_dim or self.config.model.hidden_size // self.n_heads
def build(self, x: popxl.Tensor):
# x: [batch*seq, hidden]
qkv_act = self.qkv(x)
query, key, value = ops.split(qkv_act, 3, axis=-1)
#: [batch, seq, heads, head_size]
query = reshape_for_scores(query, self.config.model.sequence_length, self.n_heads)
key = reshape_for_scores(key, self.config.model.sequence_length, self.n_heads)
value = reshape_for_scores(value, self.config.model.sequence_length, self.n_heads)
sin, cos = trig_table_constants(
self.config.model.sequence_length,
self.rotary_dim,
self.config.model.attention.rotary_positional_embeddings_base,
self.config.model.dtype,
)
query = rotary_pos_embed(query, sin, cos, self.rotary_dim).transpose((0, 2, 1, 3))
key = rotary_pos_embed(key, sin, cos, self.rotary_dim).transpose((0, 2, 3, 1))
value = value.transpose((0, 2, 1, 3))
causal_mask = popxl.constant(
# HF version 1e9 to mask. However, this model runs in float16 and 1e9 is beyond the float16 range, therefore 1e4 is used to similar effect.
1e4 * (np.tril(np.ones((self.config.model.sequence_length, self.config.model.sequence_length))) - 1),
query.dtype,
name="causal_mask",
)
attn_output = self.attention_block(query, key, value, causal_mask)
return attn_output.transpose((0, 2, 1, 3)).reshape(
(self.config.execution.micro_batch_size * self.config.model.sequence_length, -1)
)
def attention_block(self, query: popxl.Tensor, key: popxl.Tensor, value: popxl.Tensor, mask: popxl.Tensor):
attn_weights = query @ key
attn_weights = attn_weights * (1 / math.sqrt(value.shape[-1]))
attn_weights = attn_weights + mask
attn_scores = ops.softmax(attn_weights, axis=-1)
return attn_scores @ value
class DollySelfAttentionTP(addons.Module):
def __init__(self, config: DollyConfig):
super().__init__()
self.config = config
attn_tp = (
config.execution.tensor_parallel
if config.execution.attention_tensor_parallel is None
else config.execution.attention_tensor_parallel
)
tp = attn_tp
dp = config.execution.data_parallel * (config.execution.tensor_parallel // attn_tp)
self.replica_grouping = popxl.gcg().ir.replica_grouping(stride=tp, group_size=dp)
# Sharded across devices
self.heads = DollyAttentionHeads(config=config, replica_grouping=self.replica_grouping)
# Sharded across devices
self.output = Linear(self.config.model.hidden_size, bias=False, replica_grouping=self.replica_grouping)
def build(self, x: popxl.Tensor) -> popxl.Tensor:
"""Identical inputs and identical outputs across shards"""
# ----- Sharded computation -----
z = self.heads(x)
z = self.output(z)
z = replicated_all_reduce(z, group=self.replica_grouping.transpose())
self.output_bias = self.add_variable_input(
"output_bias",
lambda: np.zeros(z.shape[-1]),
z.dtype,
)
z = z + self.output_bias
return z
@staticmethod
def hf_mapping(config, variables: NamedTensors, hf_model: HFModel) -> Dict[popxl.Tensor, np.ndarray]:
dtype = config.model.dtype
hidden_dim = config.model.hidden_size
attn_tp = (
config.execution.tensor_parallel
if config.execution.attention_tensor_parallel is None
else config.execution.attention_tensor_parallel
)
heads = config.model.attention.heads
qkv_w = hf_model.query_key_value.weight.data.T.reshape(hidden_dim, heads, 3, -1)
hf_query_w, hf_key_w, hf_value_w = np.split(qkv_w, 3, axis=-2)
hf_query_w, hf_key_w, hf_value_w = map(
lambda p: to_numpy(p, dtype).reshape(hidden_dim, hidden_dim), (hf_query_w, hf_key_w, hf_value_w)
)
query_w, key_w, value_w = map(lambda p: shard(p, attn_tp, axis=-1), (hf_query_w, hf_key_w, hf_value_w))
qkv_weight = np.concatenate((query_w, key_w, value_w), axis=-1)
# qkv_weight = repeat_axis(qkv_weight, n=repeat_tp, axis=0)
qkv_b = hf_model.query_key_value.bias.data.reshape(heads, 3, -1)
hf_query_b, hf_key_b, hf_value_b = np.split(qkv_b, 3, axis=1)
hf_query_b, hf_key_b, hf_value_b = map(
lambda p: to_numpy(p, dtype).reshape(-1), (hf_query_b, hf_key_b, hf_value_b)
)
query_b, key_b, value_b = map(lambda p: np.split(p, attn_tp, axis=0), (hf_query_b, hf_key_b, hf_value_b))
qkv_bias = np.concatenate((query_b, key_b, value_b), axis=1)
# qkv_bias = repeat_axis(qkv_bias, n=repeat_tp, axis=0)
out_proj_w = to_numpy(hf_model.dense.weight.data.T, dtype)
weights = {
variables.heads.qkv.weight: qkv_weight.squeeze(),
variables.heads.qkv.bias: qkv_bias.squeeze(),
variables.output.weight: shard(out_proj_w, attn_tp, axis=0).squeeze(),
variables.output_bias: to_numpy(hf_model.dense.bias.data, dtype),
}
return weights
|
07715060331b52ac81b2002b012e997f444ef9d7
|
1fb7d21fdb9d0ee6f2a53e626f862d54dcccdbce
|
/utils/request.py
|
46d76ddde2a4e24b1fe4ab41979991aaa3a63ea7
|
[
"MIT"
] |
permissive
|
xfgryujk/blivechat
|
2b1db9de1047344e45051d680461d8ff61bdd99e
|
fe141bc8fda73e2a683c43181bc4043f9739bb1b
|
refs/heads/dev
| 2023-09-04T10:21:50.371035
| 2023-09-03T14:53:15
| 2023-09-03T14:53:15
| 187,993,487
| 1,695
| 196
|
MIT
| 2023-09-11T11:07:26
| 2019-05-22T08:13:21
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 747
|
py
|
request.py
|
# -*- coding: utf-8 -*-
import asyncio
from typing import *
import aiohttp
# 不带这堆头部有时候也能成功请求,但是带上后成功的概率更高
BILIBILI_COMMON_HEADERS = {
'Origin': 'https://www.bilibili.com',
'Referer': 'https://www.bilibili.com/',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko)'
' Chrome/114.0.0.0 Safari/537.36'
}
http_session: Optional[aiohttp.ClientSession] = None
def init():
# ClientSession要在异步函数中创建
async def do_init():
global http_session
http_session = aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=10))
asyncio.get_event_loop().run_until_complete(do_init())
|
f060d68711fda96ea1bd7bd8f15aef90586685cc
|
15f0514701a78e12750f68ba09d68095172493ee
|
/Python3/1189.py
|
46a8a971049cf43ac0d2c9b49c4a50fe7e603e72
|
[
"MIT"
] |
permissive
|
strengthen/LeetCode
|
5e38c8c9d3e8f27109b9124ae17ef8a4139a1518
|
3ffa6dcbeb787a6128641402081a4ff70093bb61
|
refs/heads/master
| 2022-12-04T21:35:17.872212
| 2022-11-30T06:23:24
| 2022-11-30T06:23:24
| 155,958,163
| 936
| 365
|
MIT
| 2021-11-15T04:02:45
| 2018-11-03T06:47:38
| null |
UTF-8
|
Python
| false
| false
| 534
|
py
|
1189.py
|
__________________________________________________________________________________________________
sample 36 ms submission
class Solution:
def maxNumberOfBalloons(self, text: str) -> int:
counter = collections.Counter(text)
return min(counter["a"], counter["b"], counter["n"], min(counter["l"], counter["o"]) // 2)
__________________________________________________________________________________________________
__________________________________________________________________________________________________
|
cf24e6aff0114f60f0ac3f6ee5371ad290f4f7a4
|
0577a46d8d28e1fd8636893bbdd2b18270bb8eb8
|
/chromium/third_party/blink/web_tests/external/wpt/webdriver/tests/support/image.py
|
2e7ad8c1309b2b4a1e02b424cbd74a610535a4ed
|
[
"BSD-3-Clause",
"LGPL-2.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"GPL-1.0-or-later",
"GPL-2.0-only",
"LGPL-2.0-only",
"BSD-2-Clause",
"LicenseRef-scancode-other-copyleft",
"Apache-2.0",
"MIT"
] |
permissive
|
ric2b/Vivaldi-browser
|
388a328b4cb838a4c3822357a5529642f86316a5
|
87244f4ee50062e59667bf8b9ca4d5291b6818d7
|
refs/heads/master
| 2022-12-21T04:44:13.804535
| 2022-12-17T16:30:35
| 2022-12-17T16:30:35
| 86,637,416
| 166
| 41
|
BSD-3-Clause
| 2021-03-31T18:49:30
| 2017-03-29T23:09:05
| null |
UTF-8
|
Python
| false
| false
| 299
|
py
|
image.py
|
import math
import struct
from base64 import decodebytes
from tests.support.asserts import assert_png
def png_dimensions(screenshot):
assert_png(screenshot)
image = decodebytes(screenshot.encode())
width, height = struct.unpack(">LL", image[16:24])
return int(width), int(height)
|
90ac16233cbc8296be3021434053799785f3aecc
|
f509ab9825c542e09b0c6591d86ef1f9feb540a6
|
/pkgs/sdk-pkg/src/genie/libs/sdk/apis/iosxr/interface/configure.py
|
82e7270b631e6abf0c8f5a9df3fb439d421a0970
|
[
"Apache-2.0"
] |
permissive
|
CiscoTestAutomation/genielibs
|
97f597117193aaa18028defeb69078ebb241173a
|
e42e51475cddcb10f5c7814d0fe892ac865742ba
|
refs/heads/master
| 2023-08-11T16:39:41.959947
| 2023-07-27T17:58:42
| 2023-07-27T17:58:42
| 130,717,047
| 109
| 60
|
Apache-2.0
| 2023-08-29T22:32:08
| 2018-04-23T15:21:56
|
Python
|
UTF-8
|
Python
| false
| false
| 2,965
|
py
|
configure.py
|
"""Common configure functions for Interface"""
# Python
import logging
# Unicon
from unicon.core.errors import SubCommandFailure
log = logging.getLogger(__name__)
def configure_interface_point_to_point(device, process_id, interface):
""" Configure Interface point to point
Args:
device ('obj'): Device object
process_id ('str'): Router ISIS process ID
interface ('str'): Interface to configure
Returns:
None
Raises:
SubCommandFailure
"""
try:
device.configure(
"router isis {process_id}\n"
" interface {interface}\n"
" point-to-point\n"
" !\n".format(
process_id=process_id,
interface=interface,
)
)
except SubCommandFailure:
raise SubCommandFailure(
"Could not configure interface point-to-point on {device}".format(
device=device.name,
)
)
def configure_interface_passive(device, process_id, interface):
""" Configure Interface passive
Args:
device ('obj'): Device object
process_id ('str'): Router ISIS process ID
interface ('str'): Interface to configure
Returns:
None
Raises:
SubCommandFailure
"""
try:
device.configure(
"router isis {process_id}\n"
" interface {interface}\n"
" passive\n"
" !\n".format(
process_id=process_id,
interface=interface,
)
)
except SubCommandFailure:
raise SubCommandFailure(
"Could not configure interface passive on {device}".format(
device=device.name,
)
)
def configure_interfaces_shutdown(device, interfaces):
""" Shutdown the listed interfaces in the given list on the device
Args:
List['string']: Interfaces to shutdown
device ('obj'): Device object
"""
config_cmd = []
for interface in interfaces:
config_cmd += ["interface {interface}".format(interface=interface), "shutdown"]
try:
device.configure(config_cmd)
except SubCommandFailure as e:
log.error('Failed to shutdown interfaces on device {}: {}'.format(device.name, e))
def configure_interfaces_unshutdown(device, interfaces):
""" Enable the listed interfaces in the given list on the device
Args:
List['string']: Interfaces to enable
device ('obj'): Device object
"""
config_cmd = []
for interface in interfaces:
config_cmd += ["interface {interface}".format(interface=interface), "no shutdown"]
try:
device.configure(config_cmd, prompt_recovery=True)
except SubCommandFailure as e:
log.error('Failed to enable interfaces on device {}: {}'.format(device.name, e))
|
34690e144734c4fc34140bf7bd80b21faabbf199
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/domain/AlipayDataDataserviceAntdataassetsUploadjobCreateModel.py
|
4c32a24b4150854ecfdfb69ee5941c2b5142028f
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 2,069
|
py
|
AlipayDataDataserviceAntdataassetsUploadjobCreateModel.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AntdataassetsOdpsColumn import AntdataassetsOdpsColumn
class AlipayDataDataserviceAntdataassetsUploadjobCreateModel(object):
def __init__(self):
self._guid = None
self._odps_columns = None
@property
def guid(self):
return self._guid
@guid.setter
def guid(self, value):
self._guid = value
@property
def odps_columns(self):
return self._odps_columns
@odps_columns.setter
def odps_columns(self, value):
if isinstance(value, list):
self._odps_columns = list()
for i in value:
if isinstance(i, AntdataassetsOdpsColumn):
self._odps_columns.append(i)
else:
self._odps_columns.append(AntdataassetsOdpsColumn.from_alipay_dict(i))
def to_alipay_dict(self):
params = dict()
if self.guid:
if hasattr(self.guid, 'to_alipay_dict'):
params['guid'] = self.guid.to_alipay_dict()
else:
params['guid'] = self.guid
if self.odps_columns:
if isinstance(self.odps_columns, list):
for i in range(0, len(self.odps_columns)):
element = self.odps_columns[i]
if hasattr(element, 'to_alipay_dict'):
self.odps_columns[i] = element.to_alipay_dict()
if hasattr(self.odps_columns, 'to_alipay_dict'):
params['odps_columns'] = self.odps_columns.to_alipay_dict()
else:
params['odps_columns'] = self.odps_columns
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayDataDataserviceAntdataassetsUploadjobCreateModel()
if 'guid' in d:
o.guid = d['guid']
if 'odps_columns' in d:
o.odps_columns = d['odps_columns']
return o
|
3a35c20c005d07458058203ad21ba4d9e4f7265c
|
9161d1421be019e0573bd123460fe69e7cce4cb9
|
/mosqito/sq_metrics/loudness/loudness_zwtv/_third_octave_levels.py
|
790edbb1a1f3d7925913fd517b1d514b8925be60
|
[
"Apache-2.0",
"GPL-1.0-or-later"
] |
permissive
|
Eomys/MoSQITo
|
dadbc9159bfef348b1b762a0c8bef8a7f3ed1ef0
|
b6bf207ef4ac422fa075b5117bb186281b52b7c1
|
refs/heads/master
| 2023-08-03T13:02:07.943373
| 2022-12-23T15:31:36
| 2022-12-23T15:31:36
| 249,368,386
| 107
| 40
|
Apache-2.0
| 2023-08-02T15:47:18
| 2020-03-23T07:56:37
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 9,549
|
py
|
_third_octave_levels.py
|
# -*- coding: utf-8 -*-
"""
@date Created on Fri May 22 2020
@author martin_g for Eomys
"""
# Third party imports
import numpy as np
from scipy import signal
# Local application imports
from mosqito.sq_metrics.loudness.loudness_zwtv._square_and_smooth import (
_square_and_smooth,
)
def _third_octave_levels(sig, fs):
"""3rd octave filtering, squaring, smoothing, level calculation and
downsampling to temporal resolution: 0,5 ms, i.e. sampling rate: 2 kHz
See ISO 532-1 section 6.3
Parameters
----------
sig : numpy.ndarray
time signal sampled at 48 kHz[pa]
fs : int
time signal sampling frequency
Outputs
-------
third_octave_levels : numpy.ndarray
Set of time signals filtered per third octave bands
"""
# Sampling frequency shall be equal to 48 kHz (as per ISO 532)
if fs != 48000:
raise ValueError("""ERROR: Sampling frequency shall be equal to 48 kHz""")
# Constants
n_level_band = 28
n_filter_coeff = 6
dec_factor = int(fs / 2000)
# Initialisation
coeff = np.zeros(n_filter_coeff)
# Filter coefficients of one-third-octave-band filters (reference
# table)
# ISO 532-1 Table A.1
third_octave_filter_ref = np.array(
[[1, 2, 1, 1, -2, 1], [1, 0, -1, 1, -2, 1], [1, -2, 1, 1, -2, 1]]
)
# Filter coefficients of one-third-octave-band filters (difference to
# reference table for 28 one-third-octave-band filters)
# ISO 532-1 Table A.2
third_octave_filter = np.array(
[
[
[0, 0, 0, 0, -6.70260e-004, 6.59453e-004],
[0, 0, 0, 0, -3.75071e-004, 3.61926e-004],
[0, 0, 0, 0, -3.06523e-004, 2.97634e-004],
],
[
[0, 0, 0, 0, -8.47258e-004, 8.30131e-004],
[0, 0, 0, 0, -4.76448e-004, 4.55616e-004],
[0, 0, 0, 0, -3.88773e-004, 3.74685e-004],
],
[
[0, 0, 0, 0, -1.07210e-003, 1.04496e-003],
[0, 0, 0, 0, -6.06567e-004, 5.73553e-004],
[0, 0, 0, 0, -4.94004e-004, 4.71677e-004],
],
[
[0, 0, 0, 0, -1.35836e-003, 1.31535e-003],
[0, 0, 0, 0, -7.74327e-004, 7.22007e-004],
[0, 0, 0, 0, -6.29154e-004, 5.93771e-004],
],
[
[0, 0, 0, 0, -1.72380e-003, 1.65564e-003],
[0, 0, 0, 0, -9.91780e-004, 9.08866e-004],
[0, 0, 0, 0, -8.03529e-004, 7.47455e-004],
],
[
[0, 0, 0, 0, -2.19188e-003, 2.08388e-003],
[0, 0, 0, 0, -1.27545e-003, 1.14406e-003],
[0, 0, 0, 0, -1.02976e-003, 9.40900e-004],
],
[
[0, 0, 0, 0, -2.79386e-003, 2.62274e-003],
[0, 0, 0, 0, -1.64828e-003, 1.44006e-003],
[0, 0, 0, 0, -1.32520e-003, 1.18438e-003],
],
[
[0, 0, 0, 0, -3.57182e-003, 3.30071e-003],
[0, 0, 0, 0, -2.14252e-003, 1.81258e-003],
[0, 0, 0, 0, -1.71397e-003, 1.49082e-003],
],
[
[0, 0, 0, 0, -4.58305e-003, 4.15355e-003],
[0, 0, 0, 0, -2.80413e-003, 2.28135e-003],
[0, 0, 0, 0, -2.23006e-003, 1.87646e-003],
],
[
[0, 0, 0, 0, -5.90655e-003, 5.22622e-003],
[0, 0, 0, 0, -3.69947e-003, 2.87118e-003],
[0, 0, 0, 0, -2.92205e-003, 2.36178e-003],
],
[
[0, 0, 0, 0, -7.65243e-003, 6.57493e-003],
[0, 0, 0, 0, -4.92540e-003, 3.61318e-003],
[0, 0, 0, 0, -3.86007e-003, 2.97240e-003],
],
[
[0, 0, 0, 0, -1.00023e-002, 8.29610e-003],
[0, 0, 0, 0, -6.63788e-003, 4.55999e-003],
[0, 0, 0, 0, -5.15982e-003, 3.75306e-003],
],
[
[0, 0, 0, 0, -1.31230e-002, 1.04220e-002],
[0, 0, 0, 0, -9.02274e-003, 5.73132e-003],
[0, 0, 0, 0, -6.94543e-003, 4.71734e-003],
],
[
[0, 0, 0, 0, -1.73693e-002, 1.30947e-002],
[0, 0, 0, 0, -1.24176e-002, 7.20526e-003],
[0, 0, 0, 0, -9.46002e-003, 5.93145e-003],
],
[
[0, 0, 0, 0, -2.31934e-002, 1.64308e-002],
[0, 0, 0, 0, -1.73009e-002, 9.04761e-003],
[0, 0, 0, 0, -1.30358e-002, 7.44926e-003],
],
[
[0, 0, 0, 0, -3.13292e-002, 2.06370e-002],
[0, 0, 0, 0, -2.44342e-002, 1.13731e-002],
[0, 0, 0, 0, -1.82108e-002, 9.36778e-003],
],
[
[0, 0, 0, 0, -4.28261e-002, 2.59325e-002],
[0, 0, 0, 0, -3.49619e-002, 1.43046e-002],
[0, 0, 0, 0, -2.57855e-002, 1.17912e-002],
],
[
[0, 0, 0, 0, -5.91733e-002, 3.25054e-002],
[0, 0, 0, 0, -5.06072e-002, 1.79513e-002],
[0, 0, 0, 0, -3.69401e-002, 1.48094e-002],
],
[
[0, 0, 0, 0, -8.26348e-002, 4.05894e-002],
[0, 0, 0, 0, -7.40348e-002, 2.24476e-002],
[0, 0, 0, 0, -5.34977e-002, 1.85371e-002],
],
[
[0, 0, 0, 0, -1.17018e-001, 5.08116e-002],
[0, 0, 0, 0, -1.09516e-001, 2.81387e-002],
[0, 0, 0, 0, -7.85097e-002, 2.32872e-002],
],
[
[0, 0, 0, 0, -1.67714e-001, 6.37872e-002],
[0, 0, 0, 0, -1.63378e-001, 3.53729e-002],
[0, 0, 0, 0, -1.16419e-001, 2.93723e-002],
],
[
[0, 0, 0, 0, -2.42528e-001, 7.98576e-002],
[0, 0, 0, 0, -2.45161e-001, 4.43370e-002],
[0, 0, 0, 0, -1.73972e-001, 3.70015e-002],
],
[
[0, 0, 0, 0, -3.53142e-001, 9.96330e-002],
[0, 0, 0, 0, -3.69163e-001, 5.53535e-002],
[0, 0, 0, 0, -2.61399e-001, 4.65428e-002],
],
[
[0, 0, 0, 0, -5.16316e-001, 1.24177e-001],
[0, 0, 0, 0, -5.55473e-001, 6.89403e-002],
[0, 0, 0, 0, -3.93998e-001, 5.86715e-002],
],
[
[0, 0, 0, 0, -7.56635e-001, 1.55023e-001],
[0, 0, 0, 0, -8.34281e-001, 8.58123e-002],
[0, 0, 0, 0, -5.94547e-001, 7.43960e-002],
],
[
[0, 0, 0, 0, -1.10165e000, 1.91713e-001],
[0, 0, 0, 0, -1.23939e000, 1.05243e-001],
[0, 0, 0, 0, -8.91666e-001, 9.40354e-002],
],
[
[0, 0, 0, 0, -1.58477e000, 2.39049e-001],
[0, 0, 0, 0, -1.80505e000, 1.28794e-001],
[0, 0, 0, 0, -1.32500e000, 1.21333e-001],
],
[
[0, 0, 0, 0, -2.50630e000, 1.42308e-001],
[0, 0, 0, 0, -2.19464e000, 2.76470e-001],
[0, 0, 0, 0, -1.90231e000, 1.47304e-001],
],
]
)
# Filter gain values
# ISO 532-1 Table A.2
filter_gain = np.array(
[
4.30764e-011,
8.59340e-011,
1.71424e-010,
3.41944e-010,
6.82035e-010,
1.36026e-009,
2.71261e-009,
5.40870e-009,
1.07826e-008,
2.14910e-008,
4.28228e-008,
8.54316e-008,
1.70009e-007,
3.38215e-007,
6.71990e-007,
1.33531e-006,
2.65172e-006,
5.25477e-006,
1.03780e-005,
2.04870e-005,
4.05198e-005,
7.97914e-005,
1.56511e-004,
3.04954e-004,
5.99157e-004,
1.16544e-003,
2.27488e-003,
3.91006e-003,
]
)
# Definition of the range of preferred filter center frequency
freq = [
25,
31.5,
40,
50,
63,
80,
100,
125,
160,
200,
250,
315,
400,
500,
630,
800,
1000,
1250,
1600,
2000,
2500,
3150,
4000,
5000,
6300,
8000,
10000,
12500,
]
n_time = len(sig[::dec_factor])
time_axis = np.linspace(0, len(sig) / fs, num=n_time)
third_octave_level = np.zeros((n_level_band, n_time))
for i_bands in range(n_level_band):
# Initialisation
tiny_value = 10 ** -12
i_ref = 4 * 10 ** -10
# 2nd order fltering (See ISO 532-1 section 6.3 and A.2)
coeff = third_octave_filter_ref - third_octave_filter[i_bands, :, :]
sig_filt = filter_gain[i_bands] * signal.sosfilt(coeff, sig)
# Calculate center frequency of filter
center_freq = 10 ** ((i_bands - 16) / 10) * 1000
# Squaring and smoothing of filtered signal
sig_filt = _square_and_smooth(sig_filt, center_freq, 48000)
# SPL calculation and decimation
third_octave_level[i_bands, :] = 10 * np.log10(
(sig_filt[::dec_factor] + tiny_value) / i_ref
)
return third_octave_level, time_axis, freq
|
047c7335d73e4c5815064f66c2a739fa124233a9
|
4658aa41017b2e6da830f1e879774e4a7296c314
|
/holoviews/tests/util/test_help.py
|
583bd1756fe507eaddee85bfdcf44a9e5e5e9a49
|
[
"BSD-3-Clause"
] |
permissive
|
holoviz/holoviews
|
3f133e572933c94cedad7bae6fb6d071152842fc
|
e3dee5443dad84b507734c0a3d2bba8ec44f5653
|
refs/heads/main
| 2023-09-03T05:08:42.682432
| 2023-08-28T20:40:36
| 2023-08-28T20:40:36
| 19,542,768
| 1,223
| 223
|
BSD-3-Clause
| 2023-09-14T18:15:53
| 2014-05-07T16:59:22
|
Python
|
UTF-8
|
Python
| false
| false
| 230
|
py
|
test_help.py
|
import holoviews as hv
def test_help_pattern(capsys):
import holoviews.plotting.bokeh # noqa
hv.help(hv.Curve, pattern='border')
captured = capsys.readouterr()
assert '\x1b[43;1;30mborder\x1b[0m' in captured.out
|
24db45ae614a8532f8598232455bd41cea1b0469
|
47542e6b98c19592f44ce44297771c698d4987f7
|
/ch09/09_12.py
|
0fd6de775716186167aa6b90818281bcdd7fddac
|
[
"Apache-2.0"
] |
permissive
|
sharebook-kr/book-cryptocurrency
|
235b6998668265ec804451afddd245a52824f51a
|
847ba97ba096c257b35f5e507cd33fa6a0724860
|
refs/heads/master
| 2022-12-14T05:24:52.765589
| 2022-11-30T01:35:08
| 2022-11-30T01:35:08
| 128,632,349
| 162
| 141
|
Apache-2.0
| 2022-11-30T01:35:09
| 2018-04-08T11:05:17
|
Python
|
UTF-8
|
Python
| false
| false
| 332
|
py
|
09_12.py
|
# korbit websocket connection
import websockets
import asyncio
async def korbit_ws_client():
uri = "wss://ws.korbit.co.kr/v1/user/push"
async with websockets.connect(uri) as websocket:
greeting = await websocket.recv()
print(greeting)
async def main():
await korbit_ws_client()
asyncio.run(main())
|
c35a9f8348878dd82f775f6c5898c004fa755e54
|
f80ef3a3cf859b13e8af8433af549b6b1043bf6e
|
/pyobjc-framework-ScreenSaver/Examples/SillyBallsSaver/setup.py
|
04606b9d7fc35d6ce26bcc586fb5a3e86c341a6b
|
[
"MIT"
] |
permissive
|
ronaldoussoren/pyobjc
|
29dc9ca0af838a56105a9ddd62fb38ec415f0b86
|
77b98382e52818690449111cd2e23cd469b53cf5
|
refs/heads/master
| 2023-09-01T05:15:21.814504
| 2023-06-13T20:00:17
| 2023-06-13T20:00:17
| 243,933,900
| 439
| 49
| null | 2023-06-25T02:49:07
| 2020-02-29T08:43:12
|
Python
|
UTF-8
|
Python
| false
| false
| 378
|
py
|
setup.py
|
"""
Script for building the example.
Usage:
python3 setup.py py2app
"""
from setuptools import setup
plist = {"NSPrincipalClass": "SillyBalls"}
setup(
plugin=["SillyBalls.py"],
data_files=["English.lproj"],
options={"py2app": {"extension": ".saver", "plist": plist}},
setup_requires=["py2app", "pyobjc-framework-Cocoa", "pyobjc-framework-ScreenSaver"],
)
|
17abc6dea2b28f927f76f52266e851dd01025f93
|
391dfd77c1bb85c08b4ead451ecdab0858eb141f
|
/moderngl_window/context/headless/keys.py
|
1d5fbd08f5a82874b4e52be9c594c1996f63a887
|
[
"MIT"
] |
permissive
|
moderngl/moderngl-window
|
308682b5aa625dbb49ca554459bed9853a5e69c3
|
200f2b9ea8b350b0ac9bb6a2d24310c0d8227794
|
refs/heads/master
| 2023-05-28T00:33:49.924394
| 2023-05-18T11:06:26
| 2023-05-18T11:06:26
| 172,498,670
| 205
| 48
|
MIT
| 2023-09-01T17:45:51
| 2019-02-25T12:05:57
|
Python
|
UTF-8
|
Python
| false
| false
| 88
|
py
|
keys.py
|
from moderngl_window.context.base import BaseKeys
class Keys(BaseKeys):
pass
|
40fd0766b16621e11c27d221b99fb66326216167
|
e61e664d95af3b93150cda5b92695be6551d2a7c
|
/vega/algorithms/nas/modnas/core/__init__.py
|
477f5ca64f6d8f307b69a94656242612ba76b0ea
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
huawei-noah/vega
|
44aaf8bb28b45f707ed6cd4e871ba70fc0c04846
|
12e37a1991eb6771a2999fe0a46ddda920c47948
|
refs/heads/master
| 2023-09-01T20:16:28.746745
| 2023-02-15T09:36:59
| 2023-02-15T09:36:59
| 273,667,533
| 850
| 184
|
NOASSERTION
| 2023-02-15T09:37:01
| 2020-06-20T08:20:06
|
Python
|
UTF-8
|
Python
| false
| false
| 1,372
|
py
|
__init__.py
|
# -*- coding:utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import wraps, partial
from typing import Callable, Type, List, Any
def make_decorator(func: Callable) -> Callable:
"""Return wrapped function that acts as decorator if no extra positional args are given."""
@wraps(func)
def wrapped(*args, **kwargs):
if len(args) == 0 and len(kwargs) > 0:
return partial(func, *args, **kwargs)
return func(*args, **kwargs)
return wrapped
def singleton(cls: Type) -> Callable:
"""Return wrapped class that has only one instance."""
inst: List[Any] = []
@wraps(cls)
def get_instance(*args, **kwargs):
if not inst:
inst.append(cls(*args, **kwargs))
return inst[0]
return get_instance
|
3087920af33efb6e030b43b949281196f6d7ac7d
|
2181883c8faac55bfc969a97d22d9b24a3e81ab3
|
/com/win32com/test/testStreams.py
|
b43da524f3f006b823430657d0a3ddb8ba545514
|
[
"PSF-2.0"
] |
permissive
|
mhammond/pywin32
|
574bf121cfeac8c7a9d28f94ee0f2069a425e8ab
|
2a7137f21965013020ef9e4f27565db6dea59003
|
refs/heads/main
| 2023-09-02T13:16:52.307262
| 2023-08-17T19:42:26
| 2023-08-17T19:42:26
| 108,187,130
| 4,757
| 907
| null | 2023-08-23T01:45:49
| 2017-10-24T21:44:27
|
C++
|
UTF-8
|
Python
| false
| false
| 4,213
|
py
|
testStreams.py
|
import unittest
import pythoncom
import win32com.server.util
import win32com.test.util
class Persists:
_public_methods_ = [
"GetClassID",
"IsDirty",
"Load",
"Save",
"GetSizeMax",
"InitNew",
]
_com_interfaces_ = [pythoncom.IID_IPersistStreamInit]
def __init__(self):
self.data = b"abcdefg"
self.dirty = 1
def GetClassID(self):
return pythoncom.IID_NULL
def IsDirty(self):
return self.dirty
def Load(self, stream):
self.data = stream.Read(26)
def Save(self, stream, clearDirty):
stream.Write(self.data)
if clearDirty:
self.dirty = 0
def GetSizeMax(self):
return 1024
def InitNew(self):
pass
class Stream:
_public_methods_ = ["Read", "Write", "Seek"]
_com_interfaces_ = [pythoncom.IID_IStream]
def __init__(self, data):
self.data = data
self.index = 0
def Read(self, amount):
result = self.data[self.index : self.index + amount]
self.index = self.index + amount
return result
def Write(self, data):
self.data = data
self.index = 0
return len(data)
def Seek(self, dist, origin):
if origin == pythoncom.STREAM_SEEK_SET:
self.index = dist
elif origin == pythoncom.STREAM_SEEK_CUR:
self.index = self.index + dist
elif origin == pythoncom.STREAM_SEEK_END:
self.index = len(self.data) + dist
else:
raise ValueError("Unknown Seek type: " + str(origin))
if self.index < 0:
self.index = 0
else:
self.index = min(self.index, len(self.data))
return self.index
class BadStream(Stream):
"""PyGStream::Read could formerly overflow buffer if the python implementation
returned more data than requested.
"""
def Read(self, amount):
return b"x" * (amount + 1)
class StreamTest(win32com.test.util.TestCase):
def _readWrite(self, data, write_stream, read_stream=None):
if read_stream is None:
read_stream = write_stream
write_stream.Write(data)
read_stream.Seek(0, pythoncom.STREAM_SEEK_SET)
got = read_stream.Read(len(data))
self.assertEqual(data, got)
read_stream.Seek(1, pythoncom.STREAM_SEEK_SET)
got = read_stream.Read(len(data) - 2)
self.assertEqual(data[1:-1], got)
def testit(self):
mydata = b"abcdefghijklmnopqrstuvwxyz"
# First test the objects just as Python objects...
s = Stream(mydata)
p = Persists()
p.Load(s)
p.Save(s, 0)
self.assertEqual(s.data, mydata)
# Wrap the Python objects as COM objects, and make the calls as if
# they were non-Python COM objects.
s2 = win32com.server.util.wrap(s, pythoncom.IID_IStream)
p2 = win32com.server.util.wrap(p, pythoncom.IID_IPersistStreamInit)
self._readWrite(mydata, s, s)
self._readWrite(mydata, s, s2)
self._readWrite(mydata, s2, s)
self._readWrite(mydata, s2, s2)
self._readWrite(b"string with\0a NULL", s2, s2)
# reset the stream
s.Write(mydata)
p2.Load(s2)
p2.Save(s2, 0)
self.assertEqual(s.data, mydata)
def testseek(self):
s = Stream(b"yo")
s = win32com.server.util.wrap(s, pythoncom.IID_IStream)
# we used to die passing a value > 32bits
s.Seek(0x100000000, pythoncom.STREAM_SEEK_SET)
def testerrors(self):
# setup a test logger to capture tracebacks etc.
records, old_log = win32com.test.util.setup_test_logger()
## check for buffer overflow in Read method
badstream = BadStream("Check for buffer overflow")
badstream2 = win32com.server.util.wrap(badstream, pythoncom.IID_IStream)
self.assertRaises(pythoncom.com_error, badstream2.Read, 10)
win32com.test.util.restore_test_logger(old_log)
# there's 1 error here
self.assertEqual(len(records), 1)
self.assertTrue(records[0].msg.startswith("pythoncom error"))
if __name__ == "__main__":
unittest.main()
|
a5201bb34b3c1a3828602ad264b02f331097cbee
|
820b6af9fd43b270749224bb278e5f714f655ac9
|
/Filters/Points/Testing/Python/TestPointInterpolator2D2.py
|
dc2d2647bb2fff971d931609f5b606d63e45b07b
|
[
"BSD-3-Clause"
] |
permissive
|
Kitware/VTK
|
49dee7d4f83401efce8826f1759cd5d9caa281d1
|
dd4138e17f1ed5dfe6ef1eab0ff6643fdc07e271
|
refs/heads/master
| 2023-09-01T10:21:57.496189
| 2023-09-01T08:20:15
| 2023-09-01T08:21:05
| 631,615
| 2,253
| 1,243
|
NOASSERTION
| 2023-09-14T07:53:03
| 2010-04-27T15:12:58
|
C++
|
UTF-8
|
Python
| false
| false
| 3,490
|
py
|
TestPointInterpolator2D2.py
|
#!/usr/bin/env python
from vtkmodules.vtkCommonCore import (
vtkLookupTable,
vtkMath,
)
from vtkmodules.vtkCommonSystem import vtkTimerLog
from vtkmodules.vtkFiltersCore import vtkContourFilter
from vtkmodules.vtkFiltersPoints import (
vtkGaussianKernel,
vtkPointInterpolator2D,
)
from vtkmodules.vtkFiltersSources import vtkPlaneSource
from vtkmodules.vtkIOImage import vtkDEMReader
from vtkmodules.vtkRenderingCore import (
vtkActor,
vtkPolyDataMapper,
vtkRenderWindow,
vtkRenderWindowInteractor,
vtkRenderer,
)
import vtkmodules.vtkInteractionStyle
import vtkmodules.vtkRenderingFreeType
import vtkmodules.vtkRenderingOpenGL2
from vtkmodules.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Parameters for debugging
res = 200
math = vtkMath()
# create pipeline: use terrain dataset. Just for kicks we'll treat the elevations
# as a "point cloud" and interpolate the elevation onto a plane.
#
# Read the data: a height field results
demReader = vtkDEMReader()
demReader.SetFileName(VTK_DATA_ROOT + "/Data/SainteHelens.dem")
demReader.Update()
lo = demReader.GetOutput().GetScalarRange()[0]
hi = demReader.GetOutput().GetScalarRange()[1]
bds = demReader.GetOutput().GetBounds()
center = demReader.GetOutput().GetCenter()
# Create a plane of onto which to map the elevation data
plane = vtkPlaneSource()
plane.SetResolution(res,res)
plane.SetOrigin(bds[0],bds[2],bds[4])
plane.SetPoint1(bds[1],bds[2],bds[4])
plane.SetPoint2(bds[0],bds[3],bds[4])
plane.Update()
# Gaussian kernel-------------------------------------------------------
gaussianKernel = vtkGaussianKernel()
gaussianKernel.SetSharpness(2)
gaussianKernel.SetRadius(200)
interp = vtkPointInterpolator2D()
interp.SetInputConnection(plane.GetOutputPort())
interp.SetSourceConnection(demReader.GetOutputPort())
interp.SetKernel(gaussianKernel)
interp.SetNullPointsStrategyToClosestPoint()
interp.GetLocator().SetNumberOfPointsPerBucket(1)
interp.InterpolateZOff()
# Time execution
timer = vtkTimerLog()
timer.StartTimer()
interp.Update()
timer.StopTimer()
time = timer.GetElapsedTime()
print("Interpolate Terrain Points (Gaussian): {0}".format(time))
scalarRange = interp.GetOutput().GetPointData().GetArray("Elevation").GetRange()
lut = vtkLookupTable()
lut.SetHueRange(0.6, 0)
lut.SetSaturationRange(1.0, 0)
lut.SetValueRange(0.5, 1.0)
intMapper = vtkPolyDataMapper()
intMapper.SetInputConnection(interp.GetOutputPort())
intMapper.SetScalarModeToUsePointFieldData()
intMapper.SelectColorArray("Elevation")
intMapper.SetScalarRange(scalarRange)
intMapper.SetLookupTable(lut)
intActor = vtkActor()
intActor.SetMapper(intMapper)
# Create some contours
cf = vtkContourFilter()
cf.SetInputConnection(interp.GetOutputPort())
cf.GenerateValues(20,scalarRange)
cfMapper = vtkPolyDataMapper()
cfMapper.SetInputConnection(cf.GetOutputPort())
cfActor = vtkActor()
cfActor.SetMapper(cfMapper)
# Create the RenderWindow, Renderer and both Actors
#
ren0 = vtkRenderer()
renWin = vtkRenderWindow()
renWin.AddRenderer(ren0)
iren = vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Add the actors to the renderer, set the background and size
#
ren0.AddActor(intActor)
ren0.AddActor(cfActor)
ren0.SetBackground(0.1, 0.2, 0.4)
renWin.SetSize(300, 300)
cam = ren0.GetActiveCamera()
cam.SetFocalPoint(center)
fp = cam.GetFocalPoint()
cam.SetPosition(fp[0]+.2,fp[1]+.1,fp[2]+1)
ren0.ResetCamera()
iren.Initialize()
# render the image
#
renWin.Render()
iren.Start()
|
47e38367c5bf19b5ea429b40166c23add2f52e96
|
cb8734b96e22532dce6380f1e9f621324b1fc076
|
/menpo3d/io/input/mesh/base.py
|
9617338ae09b2586fff7e46d7cc7b07b68126aa3
|
[
"BSD-3-Clause"
] |
permissive
|
menpo/menpo3d
|
3168d3d23f9b3f0520ab0834dc3011dec5069b14
|
6650918e786ac98112387b97f5ecf8cc67025f9f
|
refs/heads/master
| 2022-11-10T05:45:06.267175
| 2022-11-09T14:51:55
| 2022-11-09T14:51:55
| 24,769,446
| 168
| 53
|
NOASSERTION
| 2022-11-09T14:46:57
| 2014-10-03T18:38:31
|
Python
|
UTF-8
|
Python
| false
| false
| 11,567
|
py
|
base.py
|
import warnings
import json
import numpy as np
import menpo.io as mio
from menpo.shape import ColouredTriMesh, TexturedTriMesh, TriMesh, PointCloud
def _construct_shape_type(points, trilist, tcoords, texture, colour_per_vertex):
r"""
Construct the correct Shape subclass given the inputs. TexturedTriMesh
can only be created when tcoords and texture are available. ColouredTriMesh
can only be created when colour_per_vertex is non None and TriMesh
can only be created when trilist is non None. Worst case fall back is
PointCloud.
Parameters
----------
points : ``(N, D)`` `ndarray`
The N-D points.
trilist : ``(N, 3)`` `ndarray`` or ``None``
Triangle list or None.
tcoords : ``(N, 2)`` `ndarray` or ``None``
Texture coordinates.
texture : :map:`Image` or ``None``
Texture.
colour_per_vertex : ``(N, 1)`` or ``(N, 3)`` `ndarray` or ``None``
The colour per vertex.
Returns
-------
shape : :map:`PointCloud` or subclass
The correct shape for the given inputs.
"""
# Four different outcomes - either we have a textured mesh, a coloured
# mesh or just a plain mesh or we fall back to a plain pointcloud.
if trilist is None:
obj = PointCloud(points, copy=False)
elif tcoords is not None and texture is not None:
obj = TexturedTriMesh(points, tcoords, texture, trilist=trilist, copy=False)
elif colour_per_vertex is not None:
obj = ColouredTriMesh(
points, trilist=trilist, colours=colour_per_vertex, copy=False
)
else:
# TriMesh fall through
obj = TriMesh(points, trilist=trilist, copy=False)
if tcoords is not None and texture is None:
warnings.warn(
"tcoords were found, but no texture was recovered, "
"reverting to an untextured mesh."
)
if texture is not None and tcoords is None:
warnings.warn(
"texture was found, but no tcoords were recovered, "
"reverting to an untextured mesh."
)
return obj
def vtk_ensure_trilist(polydata):
try:
import vtk
from vtk.util.numpy_support import vtk_to_numpy
trilist = vtk_to_numpy(polydata.GetPolys().GetData())
# 5 is the triangle type - if we have another type we need to
# use a vtkTriangleFilter
c = vtk.vtkCellTypes()
polydata.GetCellTypes(c)
if c.GetNumberOfTypes() != 1 or polydata.GetCellType(0) != 5:
warnings.warn(
"Non-triangular mesh connectivity was detected - "
"this is currently unsupported and thus the "
"connectivity is being coerced into a triangular "
"mesh. This may have unintended consequences."
)
t_filter = vtk.vtkTriangleFilter()
t_filter.SetInputData(polydata)
t_filter.Update()
trilist = vtk_to_numpy(t_filter.GetOutput().GetPolys().GetData())
return trilist.reshape([-1, 4])[:, 1:]
except Exception as e:
warnings.warn(str(e))
return None
def wrl_importer(filepath, asset=None, texture_resolver=None, **kwargs):
"""Allows importing VRML 2.0 meshes.
Uses VTK and assumes that the first actor in the scene is the one
that we want.
Parameters
----------
asset : `object`, optional
An optional asset that may help with loading. This is unused for this
implementation.
texture_resolver : `callable`, optional
A callable that recieves the mesh filepath and returns a single
path to the texture to load.
\**kwargs : `dict`, optional
Any other keyword arguments.
Returns
-------
shape : :map:`PointCloud` or subclass
The correct shape for the given inputs.
"""
import vtk
from vtk.util.numpy_support import vtk_to_numpy
vrml_importer = vtk.vtkVRMLImporter()
vrml_importer.SetFileName(str(filepath))
vrml_importer.Update()
# Get the first actor.
actors = vrml_importer.GetRenderer().GetActors()
actors.InitTraversal()
mapper = actors.GetNextActor().GetMapper()
mapper_dataset = mapper.GetInput()
if actors.GetNextActor():
# There was more than one actor!
warnings.warn(
"More than one actor was detected in the scene. Only "
"single scene actors are currently supported."
)
# Get the Data
polydata = vtk.vtkPolyData.SafeDownCast(mapper_dataset)
# We must have point data!
points = vtk_to_numpy(polydata.GetPoints().GetData()).astype(np.float)
trilist = vtk_ensure_trilist(polydata)
texture = None
if texture_resolver is not None:
texture_path = texture_resolver(filepath)
if texture_path is not None and texture_path.exists():
texture = mio.import_image(texture_path)
# Three different outcomes - either we have a textured mesh, a coloured
# mesh or just a plain mesh. Let's try each in turn.
# Textured
tcoords = None
try:
tcoords = vtk_to_numpy(polydata.GetPointData().GetTCoords())
except Exception:
pass
if isinstance(tcoords, np.ndarray) and tcoords.size == 0:
tcoords = None
# Colour-per-vertex
try:
colour_per_vertex = vtk_to_numpy(mapper.GetLookupTable().GetTable()) / 255.0
except Exception:
pass
if isinstance(colour_per_vertex, np.ndarray) and colour_per_vertex.size == 0:
colour_per_vertex = None
return _construct_shape_type(points, trilist, tcoords, texture, colour_per_vertex)
def obj_importer(filepath, asset=None, texture_resolver=None, **kwargs):
"""Allows importing Wavefront (OBJ) files.
Uses VTK.
Parameters
----------
asset : `object`, optional
An optional asset that may help with loading. This is unused for this
implementation.
texture_resolver : `callable`, optional
A callable that recieves the mesh filepath and returns a single
path to the texture to load.
\**kwargs : `dict`, optional
Any other keyword arguments.
Returns
-------
shape : :map:`PointCloud` or subclass
The correct shape for the given inputs.
"""
import vtk
from vtk.util.numpy_support import vtk_to_numpy
obj_importer = vtk.vtkOBJReader()
obj_importer.SetFileName(str(filepath))
obj_importer.Update()
# Get the output
polydata = obj_importer.GetOutput()
# We must have point data!
points = vtk_to_numpy(polydata.GetPoints().GetData()).astype(np.float)
trilist = np.require(vtk_ensure_trilist(polydata), requirements=["C"])
texture = None
if texture_resolver is not None:
texture_path = texture_resolver(filepath)
if texture_path is not None and texture_path.exists():
texture = mio.import_image(texture_path)
tcoords = None
if texture is not None:
try:
tcoords = vtk_to_numpy(polydata.GetPointData().GetTCoords())
except Exception:
pass
if isinstance(tcoords, np.ndarray) and tcoords.size == 0:
tcoords = None
colour_per_vertex = None
return _construct_shape_type(points, trilist, tcoords, texture, colour_per_vertex)
def ply_importer(filepath, asset=None, texture_resolver=None, **kwargs):
"""Allows importing Wavefront (OBJ) files.
Uses VTK.
Parameters
----------
asset : `object`, optional
An optional asset that may help with loading. This is unused for this
implementation.
texture_resolver : `callable`, optional
A callable that recieves the mesh filepath and returns a single
path to the texture to load.
\**kwargs : `dict`, optional
Any other keyword arguments.
Returns
-------
shape : :map:`PointCloud` or subclass
The correct shape for the given inputs.
"""
import vtk
from vtk.util.numpy_support import vtk_to_numpy
ply_importer = vtk.vtkPLYReader()
ply_importer.SetFileName(str(filepath))
ply_importer.Update()
# Get the output
polydata = ply_importer.GetOutput()
# We must have point data!
points = vtk_to_numpy(polydata.GetPoints().GetData()).astype(np.float)
trilist = np.require(vtk_ensure_trilist(polydata), requirements=["C"])
point_data = polydata.GetPointData()
texture = None
if texture_resolver is not None:
texture_path = texture_resolver(filepath)
if texture_path is not None and texture_path.exists():
texture = mio.import_image(texture_path)
tcoords = None
if texture is not None:
try:
tcoords = vtk_to_numpy(point_data.GetTCoords())
except Exception:
pass
if isinstance(tcoords, np.ndarray) and tcoords.size == 0:
tcoords = None
scalar_per_vertex = point_data.GetScalars()
if scalar_per_vertex is not None:
scalar_per_vertex = vtk_to_numpy(scalar_per_vertex)[:, :3]
if scalar_per_vertex.dtype == np.uint8:
# Convert to [0, 1] floats
scalar_per_vertex = scalar_per_vertex * (1 / 255.0)
return _construct_shape_type(points, trilist, tcoords, texture, scalar_per_vertex)
def stl_importer(filepath, asset=None, **kwargs):
"""Allows importing Stereolithography CAD (STL) files.
Uses VTK.
Parameters
----------
asset : `object`, optional
An optional asset that may help with loading. This is unused for this
implementation.
\**kwargs : `dict`, optional
Any other keyword arguments.
Returns
-------
shape : :map:`PointCloud` or subclass
The correct shape for the given inputs.
"""
import vtk
from vtk.util.numpy_support import vtk_to_numpy
stl_importer = vtk.vtkSTLReader()
stl_importer.SetFileName(str(filepath))
stl_importer.Update()
# Get the output
polydata = stl_importer.GetOutput()
# We must have point data!
points = vtk_to_numpy(polydata.GetPoints().GetData()).astype(np.float)
trilist = np.require(vtk_ensure_trilist(polydata), requirements=["C"])
colour_per_vertex = None
tcoords = None
texture = None
return _construct_shape_type(points, trilist, tcoords, texture, colour_per_vertex)
def mjson_importer(filepath, asset=None, texture_resolver=True, **kwargs):
"""
Import meshes that are in a simple JSON format.
Parameters
----------
asset : `object`, optional
An optional asset that may help with loading. This is unused for this
implementation.
texture_resolver : `callable`, optional
A callable that recieves the mesh filepath and returns a single
path to the texture to load.
\**kwargs : `dict`, optional
Any other keyword arguments.
Returns
-------
shape : :map:`PointCloud` or subclass
The correct shape for the given inputs.
"""
with open(str(filepath), "rb") as f:
mesh_json = json.load(f)
texture = None
if texture_resolver is not None:
texture_path = texture_resolver(filepath)
if texture_path is not None and texture_path.exists():
texture = mio.import_image(texture_path)
points = mesh_json["points"]
trilist = mesh_json["trilist"]
tcoords = (mesh_json.get("tcoords"),)
colour_per_vertex = mesh_json.get("colour_per_vertex")
return _construct_shape_type(points, trilist, tcoords, texture, colour_per_vertex)
|
8ee114919d023e649698183cfa04a0d6b35c0211
|
d975a74c5efe4d9790566818c3ca43a70c6ce817
|
/cvise/passes/line_markers.py
|
e08c42ac3d6273358e0ae0a694a450959da61fd3
|
[
"NCSA",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
marxin/cvise
|
cd24ce051a90b709b5a8e919ff8db2805734981d
|
594573e041cc2e906c1a81a4aca81ede93efee4a
|
refs/heads/master
| 2023-08-31T13:24:35.500801
| 2023-08-20T13:16:15
| 2023-08-20T13:16:15
| 256,755,439
| 164
| 17
|
NOASSERTION
| 2023-09-14T14:41:58
| 2020-04-18T13:04:05
|
C++
|
UTF-8
|
Python
| false
| false
| 1,491
|
py
|
line_markers.py
|
import os
import re
import shutil
import tempfile
from cvise.passes.abstract import AbstractPass, BinaryState, PassResult
class LineMarkersPass(AbstractPass):
line_regex = re.compile('^\\s*#\\s*[0-9]+')
def check_prerequisites(self):
return True
def __count_instances(self, test_case):
count = 0
with open(test_case) as in_file:
for line in in_file.readlines():
if self.line_regex.search(line):
count += 1
return count
def new(self, test_case, _=None):
return BinaryState.create(self.__count_instances(test_case))
def advance(self, test_case, state):
return state.advance()
def advance_on_success(self, test_case, state):
return state.advance_on_success(self.__count_instances(test_case))
def transform(self, test_case, state, process_event_notifier):
tmp = os.path.dirname(test_case)
with tempfile.NamedTemporaryFile(mode='w+', delete=False, dir=tmp) as tmp_file:
with open(test_case) as in_file:
i = 0
for line in in_file.readlines():
if self.line_regex.search(line):
if i < state.index or i >= state.end():
tmp_file.write(line)
i += 1
else:
tmp_file.write(line)
shutil.move(tmp_file.name, test_case)
return (PassResult.OK, state)
|
b9b940c27bf5d95e09ac17ae96eef306a349b826
|
c2457b9e41bd8670801bb56919b9f081e2c1b03f
|
/py/kubeflow/kubeflow/cd/notebook_servers/notebook_server_jupyter_tensorflow_full_runner.py
|
1a8395f8984cd7644e880adeaa030870ede54e97
|
[
"Apache-2.0"
] |
permissive
|
kubeflow/kubeflow
|
54e8615bfab8364a598298510f414fa682ed77e3
|
d1c40f916225ecb40826400595ea99096a2adbb7
|
refs/heads/master
| 2023-09-03T19:52:25.590966
| 2023-08-25T21:07:33
| 2023-08-25T21:07:33
| 112,647,343
| 10,983
| 2,282
|
Apache-2.0
| 2023-09-12T23:24:22
| 2017-11-30T18:44:19
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 234
|
py
|
notebook_server_jupyter_tensorflow_full_runner.py
|
# This file is only intended for development purposes
from kubeflow.kubeflow.cd import base_runner
base_runner.main(
component_name="notebook_servers.notebook_server_jupyter_tensorflow_full",
workflow_name="nb-j-tf-f-build")
|
da87e4bcdd0f564cab8c871271061ed80700317a
|
c50e7eb190802d7849c0d0cea02fb4d2f0021777
|
/src/db-up/azext_db_up/vendored_sdks/azure_mgmt_sql/sql/models/sql_management_client_enums.py
|
90b01d122079dbf46632af8be5116589115d94ca
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
Azure/azure-cli-extensions
|
c1615b19930bba7166c282918f166cd40ff6609c
|
b8c2cf97e991adf0c0a207d810316b8f4686dc29
|
refs/heads/main
| 2023-08-24T12:40:15.528432
| 2023-08-24T09:17:25
| 2023-08-24T09:17:25
| 106,580,024
| 336
| 1,226
|
MIT
| 2023-09-14T10:48:57
| 2017-10-11T16:27:31
|
Python
|
UTF-8
|
Python
| false
| false
| 13,625
|
py
|
sql_management_client_enums.py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from enum import Enum
class CheckNameAvailabilityReason(str, Enum):
invalid = "Invalid"
already_exists = "AlreadyExists"
class ServerConnectionType(str, Enum):
default = "Default"
proxy = "Proxy"
redirect = "Redirect"
class SecurityAlertPolicyState(str, Enum):
new = "New"
enabled = "Enabled"
disabled = "Disabled"
class SecurityAlertPolicyEmailAccountAdmins(str, Enum):
enabled = "Enabled"
disabled = "Disabled"
class SecurityAlertPolicyUseServerDefault(str, Enum):
enabled = "Enabled"
disabled = "Disabled"
class DataMaskingState(str, Enum):
disabled = "Disabled"
enabled = "Enabled"
class DataMaskingRuleState(str, Enum):
disabled = "Disabled"
enabled = "Enabled"
class DataMaskingFunction(str, Enum):
default = "Default"
ccn = "CCN"
email = "Email"
number = "Number"
ssn = "SSN"
text = "Text"
class GeoBackupPolicyState(str, Enum):
disabled = "Disabled"
enabled = "Enabled"
class DatabaseEdition(str, Enum):
web = "Web"
business = "Business"
basic = "Basic"
standard = "Standard"
premium = "Premium"
premium_rs = "PremiumRS"
free = "Free"
stretch = "Stretch"
data_warehouse = "DataWarehouse"
system = "System"
system2 = "System2"
class ServiceObjectiveName(str, Enum):
system = "System"
system0 = "System0"
system1 = "System1"
system2 = "System2"
system3 = "System3"
system4 = "System4"
system2_l = "System2L"
system3_l = "System3L"
system4_l = "System4L"
free = "Free"
basic = "Basic"
s0 = "S0"
s1 = "S1"
s2 = "S2"
s3 = "S3"
s4 = "S4"
s6 = "S6"
s7 = "S7"
s9 = "S9"
s12 = "S12"
p1 = "P1"
p2 = "P2"
p3 = "P3"
p4 = "P4"
p6 = "P6"
p11 = "P11"
p15 = "P15"
prs1 = "PRS1"
prs2 = "PRS2"
prs4 = "PRS4"
prs6 = "PRS6"
dw100 = "DW100"
dw200 = "DW200"
dw300 = "DW300"
dw400 = "DW400"
dw500 = "DW500"
dw600 = "DW600"
dw1000 = "DW1000"
dw1200 = "DW1200"
dw1000c = "DW1000c"
dw1500 = "DW1500"
dw1500c = "DW1500c"
dw2000 = "DW2000"
dw2000c = "DW2000c"
dw3000 = "DW3000"
dw2500c = "DW2500c"
dw3000c = "DW3000c"
dw6000 = "DW6000"
dw5000c = "DW5000c"
dw6000c = "DW6000c"
dw7500c = "DW7500c"
dw10000c = "DW10000c"
dw15000c = "DW15000c"
dw30000c = "DW30000c"
ds100 = "DS100"
ds200 = "DS200"
ds300 = "DS300"
ds400 = "DS400"
ds500 = "DS500"
ds600 = "DS600"
ds1000 = "DS1000"
ds1200 = "DS1200"
ds1500 = "DS1500"
ds2000 = "DS2000"
elastic_pool = "ElasticPool"
class StorageKeyType(str, Enum):
storage_access_key = "StorageAccessKey"
shared_access_key = "SharedAccessKey"
class AuthenticationType(str, Enum):
sql = "SQL"
ad_password = "ADPassword"
class UnitType(str, Enum):
count = "count"
bytes = "bytes"
seconds = "seconds"
percent = "percent"
count_per_second = "countPerSecond"
bytes_per_second = "bytesPerSecond"
class PrimaryAggregationType(str, Enum):
none = "None"
average = "Average"
count = "Count"
minimum = "Minimum"
maximum = "Maximum"
total = "Total"
class UnitDefinitionType(str, Enum):
count = "Count"
bytes = "Bytes"
seconds = "Seconds"
percent = "Percent"
count_per_second = "CountPerSecond"
bytes_per_second = "BytesPerSecond"
class ElasticPoolEdition(str, Enum):
basic = "Basic"
standard = "Standard"
premium = "Premium"
class ReplicationRole(str, Enum):
primary = "Primary"
secondary = "Secondary"
non_readable_secondary = "NonReadableSecondary"
source = "Source"
copy = "Copy"
class ReplicationState(str, Enum):
pending = "PENDING"
seeding = "SEEDING"
catch_up = "CATCH_UP"
suspended = "SUSPENDED"
class RecommendedIndexAction(str, Enum):
create = "Create"
drop = "Drop"
rebuild = "Rebuild"
class RecommendedIndexState(str, Enum):
active = "Active"
pending = "Pending"
executing = "Executing"
verifying = "Verifying"
pending_revert = "Pending Revert"
reverting = "Reverting"
reverted = "Reverted"
ignored = "Ignored"
expired = "Expired"
blocked = "Blocked"
success = "Success"
class RecommendedIndexType(str, Enum):
clustered = "CLUSTERED"
nonclustered = "NONCLUSTERED"
columnstore = "COLUMNSTORE"
clusteredcolumnstore = "CLUSTERED COLUMNSTORE"
class TransparentDataEncryptionStatus(str, Enum):
enabled = "Enabled"
disabled = "Disabled"
class TransparentDataEncryptionActivityStatus(str, Enum):
encrypting = "Encrypting"
decrypting = "Decrypting"
class AutomaticTuningMode(str, Enum):
inherit = "Inherit"
custom = "Custom"
auto = "Auto"
unspecified = "Unspecified"
class AutomaticTuningOptionModeDesired(str, Enum):
off = "Off"
on = "On"
default = "Default"
class AutomaticTuningOptionModeActual(str, Enum):
off = "Off"
on = "On"
class AutomaticTuningDisabledReason(str, Enum):
default = "Default"
disabled = "Disabled"
auto_configured = "AutoConfigured"
inherited_from_server = "InheritedFromServer"
query_store_off = "QueryStoreOff"
query_store_read_only = "QueryStoreReadOnly"
not_supported = "NotSupported"
class ServerKeyType(str, Enum):
service_managed = "ServiceManaged"
azure_key_vault = "AzureKeyVault"
class ReadWriteEndpointFailoverPolicy(str, Enum):
manual = "Manual"
automatic = "Automatic"
class ReadOnlyEndpointFailoverPolicy(str, Enum):
disabled = "Disabled"
enabled = "Enabled"
class FailoverGroupReplicationRole(str, Enum):
primary = "Primary"
secondary = "Secondary"
class IdentityType(str, Enum):
system_assigned = "SystemAssigned"
class OperationOrigin(str, Enum):
user = "user"
system = "system"
class SyncAgentState(str, Enum):
online = "Online"
offline = "Offline"
never_connected = "NeverConnected"
class SyncMemberDbType(str, Enum):
azure_sql_database = "AzureSqlDatabase"
sql_server_database = "SqlServerDatabase"
class SyncGroupLogType(str, Enum):
all = "All"
error = "Error"
warning = "Warning"
success = "Success"
class SyncConflictResolutionPolicy(str, Enum):
hub_win = "HubWin"
member_win = "MemberWin"
class SyncGroupState(str, Enum):
not_ready = "NotReady"
error = "Error"
warning = "Warning"
progressing = "Progressing"
good = "Good"
class SyncDirection(str, Enum):
bidirectional = "Bidirectional"
one_way_member_to_hub = "OneWayMemberToHub"
one_way_hub_to_member = "OneWayHubToMember"
class SyncMemberState(str, Enum):
sync_in_progress = "SyncInProgress"
sync_succeeded = "SyncSucceeded"
sync_failed = "SyncFailed"
disabled_tombstone_cleanup = "DisabledTombstoneCleanup"
disabled_backup_restore = "DisabledBackupRestore"
sync_succeeded_with_warnings = "SyncSucceededWithWarnings"
sync_cancelling = "SyncCancelling"
sync_cancelled = "SyncCancelled"
un_provisioned = "UnProvisioned"
provisioning = "Provisioning"
provisioned = "Provisioned"
provision_failed = "ProvisionFailed"
de_provisioning = "DeProvisioning"
de_provisioned = "DeProvisioned"
de_provision_failed = "DeProvisionFailed"
reprovisioning = "Reprovisioning"
reprovision_failed = "ReprovisionFailed"
un_reprovisioned = "UnReprovisioned"
class VirtualNetworkRuleState(str, Enum):
initializing = "Initializing"
in_progress = "InProgress"
ready = "Ready"
deleting = "Deleting"
unknown = "Unknown"
class BlobAuditingPolicyState(str, Enum):
enabled = "Enabled"
disabled = "Disabled"
class JobAgentState(str, Enum):
creating = "Creating"
ready = "Ready"
updating = "Updating"
deleting = "Deleting"
disabled = "Disabled"
class JobExecutionLifecycle(str, Enum):
created = "Created"
in_progress = "InProgress"
waiting_for_child_job_executions = "WaitingForChildJobExecutions"
waiting_for_retry = "WaitingForRetry"
succeeded = "Succeeded"
succeeded_with_skipped = "SucceededWithSkipped"
failed = "Failed"
timed_out = "TimedOut"
canceled = "Canceled"
skipped = "Skipped"
class ProvisioningState(str, Enum):
created = "Created"
in_progress = "InProgress"
succeeded = "Succeeded"
failed = "Failed"
canceled = "Canceled"
class JobTargetType(str, Enum):
target_group = "TargetGroup"
sql_database = "SqlDatabase"
sql_elastic_pool = "SqlElasticPool"
sql_shard_map = "SqlShardMap"
sql_server = "SqlServer"
class JobScheduleType(str, Enum):
once = "Once"
recurring = "Recurring"
class JobStepActionType(str, Enum):
tsql = "TSql"
class JobStepActionSource(str, Enum):
inline = "Inline"
class JobStepOutputType(str, Enum):
sql_database = "SqlDatabase"
class JobTargetGroupMembershipType(str, Enum):
include = "Include"
exclude = "Exclude"
class ManagedDatabaseStatus(str, Enum):
online = "Online"
offline = "Offline"
shutdown = "Shutdown"
creating = "Creating"
inaccessible = "Inaccessible"
class CatalogCollationType(str, Enum):
database_default = "DATABASE_DEFAULT"
sql_latin1_general_cp1_ci_as = "SQL_Latin1_General_CP1_CI_AS"
class ManagedDatabaseCreateMode(str, Enum):
default = "Default"
restore_external_backup = "RestoreExternalBackup"
point_in_time_restore = "PointInTimeRestore"
class AutomaticTuningServerMode(str, Enum):
custom = "Custom"
auto = "Auto"
unspecified = "Unspecified"
class AutomaticTuningServerReason(str, Enum):
default = "Default"
disabled = "Disabled"
auto_configured = "AutoConfigured"
class RestorePointType(str, Enum):
continuous = "CONTINUOUS"
discrete = "DISCRETE"
class ManagementOperationState(str, Enum):
pending = "Pending"
in_progress = "InProgress"
succeeded = "Succeeded"
failed = "Failed"
cancel_in_progress = "CancelInProgress"
cancelled = "Cancelled"
class MaxSizeUnit(str, Enum):
megabytes = "Megabytes"
gigabytes = "Gigabytes"
terabytes = "Terabytes"
petabytes = "Petabytes"
class LogSizeUnit(str, Enum):
megabytes = "Megabytes"
gigabytes = "Gigabytes"
terabytes = "Terabytes"
petabytes = "Petabytes"
percent = "Percent"
class CapabilityStatus(str, Enum):
visible = "Visible"
available = "Available"
default = "Default"
disabled = "Disabled"
class PerformanceLevelUnit(str, Enum):
dtu = "DTU"
vcores = "VCores"
class CreateMode(str, Enum):
default = "Default"
copy = "Copy"
secondary = "Secondary"
point_in_time_restore = "PointInTimeRestore"
restore = "Restore"
recovery = "Recovery"
restore_external_backup = "RestoreExternalBackup"
restore_external_backup_secondary = "RestoreExternalBackupSecondary"
restore_long_term_retention_backup = "RestoreLongTermRetentionBackup"
online_secondary = "OnlineSecondary"
class SampleName(str, Enum):
adventure_works_lt = "AdventureWorksLT"
wide_world_importers_std = "WideWorldImportersStd"
wide_world_importers_full = "WideWorldImportersFull"
class DatabaseStatus(str, Enum):
online = "Online"
restoring = "Restoring"
recovery_pending = "RecoveryPending"
recovering = "Recovering"
suspect = "Suspect"
offline = "Offline"
standby = "Standby"
shutdown = "Shutdown"
emergency_mode = "EmergencyMode"
auto_closed = "AutoClosed"
copying = "Copying"
creating = "Creating"
inaccessible = "Inaccessible"
offline_secondary = "OfflineSecondary"
pausing = "Pausing"
paused = "Paused"
resuming = "Resuming"
scaling = "Scaling"
class DatabaseLicenseType(str, Enum):
license_included = "LicenseIncluded"
base_price = "BasePrice"
class DatabaseReadScale(str, Enum):
enabled = "Enabled"
disabled = "Disabled"
class ElasticPoolState(str, Enum):
creating = "Creating"
ready = "Ready"
disabled = "Disabled"
class ElasticPoolLicenseType(str, Enum):
license_included = "LicenseIncluded"
base_price = "BasePrice"
class VulnerabilityAssessmentScanTriggerType(str, Enum):
on_demand = "OnDemand"
recurring = "Recurring"
class VulnerabilityAssessmentScanState(str, Enum):
passed = "Passed"
failed = "Failed"
failed_to_run = "FailedToRun"
in_progress = "InProgress"
class InstanceFailoverGroupReplicationRole(str, Enum):
primary = "Primary"
secondary = "Secondary"
class LongTermRetentionDatabaseState(str, Enum):
all = "All"
live = "Live"
deleted = "Deleted"
class VulnerabilityAssessmentPolicyBaselineName(str, Enum):
master = "master"
default = "default"
class CapabilityGroup(str, Enum):
supported_editions = "supportedEditions"
supported_elastic_pool_editions = "supportedElasticPoolEditions"
supported_managed_instance_versions = "supportedManagedInstanceVersions"
|
48a1ed660adcbbce36718d4ab7c680d0163935d4
|
551990e68feda34d2a9173b05cc3a7259f4e8c9a
|
/direct/data/datasets_config.py
|
2d4e4d324fd0e2e1a9f28b7b4a5fc4c0c7e77e37
|
[
"Apache-2.0"
] |
permissive
|
NKI-AI/direct
|
a5c1ca0cb75d709b62e94ff76aba361e188d2d59
|
2a4c29342bc52a404aae097bc2654fb4323e1ac8
|
refs/heads/main
| 2023-08-03T11:37:52.941124
| 2023-06-28T14:11:56
| 2023-06-28T14:11:56
| 269,966,010
| 151
| 35
|
Apache-2.0
| 2023-06-28T14:11:58
| 2020-06-06T11:53:07
|
Python
|
UTF-8
|
Python
| false
| false
| 3,419
|
py
|
datasets_config.py
|
# coding=utf-8
# Copyright (c) DIRECT Contributors
"""Classes holding the typed configurations for the datasets."""
from dataclasses import dataclass
from typing import List, Optional, Tuple
from omegaconf import MISSING
from direct.common.subsample_config import MaskingConfig
from direct.config.defaults import BaseConfig
@dataclass
class CropTransformConfig(BaseConfig):
crop: Optional[str] = None
crop_type: Optional[str] = "uniform"
image_center_crop: bool = False
@dataclass
class SensitivityMapEstimationTransformConfig(BaseConfig):
estimate_sensitivity_maps: bool = True
sensitivity_maps_type: str = "rss_estimate"
sensitivity_maps_espirit_threshold: Optional[float] = 0.05
sensitivity_maps_espirit_kernel_size: Optional[int] = 6
sensitivity_maps_espirit_crop: Optional[float] = 0.95
sensitivity_maps_espirit_max_iters: Optional[int] = 30
sensitivity_maps_gaussian: Optional[float] = 0.7
@dataclass
class RandomAugmentationTransformsConfig(BaseConfig):
random_rotation: bool = False
random_rotation_degrees: Tuple[int, ...] = (-90, 90)
random_rotation_probability: Optional[float] = 0.5
random_flip: bool = False
random_flip_type: Optional[str] = "random"
random_flip_probability: Optional[float] = 0.5
@dataclass
class NormalizationTransformConfig(BaseConfig):
scaling_key: Optional[str] = "masked_kspace"
scale_percentile: Optional[float] = 0.99
@dataclass
class TransformsConfig(BaseConfig):
masking: MaskingConfig = MaskingConfig()
cropping: CropTransformConfig = CropTransformConfig()
random_augmentations: RandomAugmentationTransformsConfig = RandomAugmentationTransformsConfig()
padding_eps: float = 0.001
estimate_body_coil_image: bool = False
sensitivity_map_estimation: SensitivityMapEstimationTransformConfig = SensitivityMapEstimationTransformConfig()
normalization: NormalizationTransformConfig = NormalizationTransformConfig()
delete_acs_mask: bool = True
delete_kspace: bool = True
image_recon_type: str = "rss"
pad_coils: Optional[int] = None
use_seed: bool = True
@dataclass
class DatasetConfig(BaseConfig):
name: str = MISSING
transforms: BaseConfig = TransformsConfig()
text_description: Optional[str] = None
@dataclass
class H5SliceConfig(DatasetConfig):
regex_filter: Optional[str] = None
input_kspace_key: Optional[str] = None
input_image_key: Optional[str] = None
kspace_context: int = 0
pass_mask: bool = False
data_root: Optional[str] = None
filenames_filter: Optional[List[str]] = None
filenames_lists: Optional[List[str]] = None
filenames_lists_root: Optional[str] = None
@dataclass
class FastMRIConfig(H5SliceConfig):
pass_attrs: bool = True
@dataclass
class CalgaryCampinasConfig(H5SliceConfig):
crop_outer_slices: bool = False
@dataclass
class FakeMRIBlobsConfig(DatasetConfig):
pass_attrs: bool = True
@dataclass
class SheppLoganDatasetConfig(DatasetConfig):
shape: Tuple[int, int, int] = (100, 100, 30)
num_coils: int = 12
seed: Optional[int] = None
B0: float = 3.0
zlimits: Tuple[float, float] = (-0.929, 0.929)
@dataclass
class SheppLoganProtonConfig(SheppLoganDatasetConfig):
pass
@dataclass
class SheppLoganT1Config(SheppLoganDatasetConfig):
pass
@dataclass
class SheppLoganT2Config(SheppLoganDatasetConfig):
T2_star: bool = False
|
916b3cf0ec218741e8827b003c2c3fdf04801d9e
|
2d6323b8ccaf08a8929dba79fb9575c436977bd4
|
/docassemble_webapp/docassemble/webapp/worker_tasks.py
|
9fe9d2f81cda83c79787829c2d805a07aabb28e0
|
[
"MIT"
] |
permissive
|
jhpyle/docassemble
|
f1c36e73d02807a7052b860dfceecdfa88e728c7
|
8726242cfbe3a15cad610dc2b518346be68ab142
|
refs/heads/master
| 2023-09-01T20:03:39.497473
| 2023-08-26T12:44:45
| 2023-08-26T12:44:45
| 34,148,903
| 691
| 300
|
MIT
| 2023-09-09T20:08:14
| 2015-04-18T02:09:32
|
Python
|
UTF-8
|
Python
| false
| false
| 84,270
|
py
|
worker_tasks.py
|
import copy
import datetime
import json
import os
import re
import subprocess
import sys
import time
import traceback
from urllib.parse import quote
import httplib2
import iso8601
import oauth2client.client
from docassemble.base.logger import logmessage
from docassemble.base.config import daconfig, hostname
from docassemble.base.error import DAError
from docassemble.webapp.files import SavedFile
from docassemble.webapp.worker_common import worker_controller, workerapp, process_error, error_object
USING_SUPERVISOR = bool(os.environ.get('SUPERVISOR_SERVER_URL', None))
WEBAPP_PATH = daconfig.get('webapp', '/usr/share/docassemble/webapp/docassemble.wsgi')
CONTAINER_ROLE = ':' + os.environ.get('CONTAINERROLE', '') + ':'
ONEDRIVE_CHUNK_SIZE = 2000000
SUPERVISORCTL = [daconfig.get('supervisorctl', 'supervisorctl')]
if daconfig['supervisor'].get('username', None):
SUPERVISORCTL.extend(['--username', daconfig['supervisor']['username'], '--password', daconfig['supervisor']['password']])
class RedisCredStorage(oauth2client.client.Storage):
def __init__(self, r, user_id, oauth_app='googledrive'):
self.r = r
self.key = 'da:' + oauth_app + ':userid:' + str(user_id)
self.lockkey = 'da:' + oauth_app + ':lock:userid:' + str(user_id)
super().__init__()
def acquire_lock(self):
pipe = self.r.pipeline()
pipe.set(self.lockkey, 1)
pipe.expire(self.lockkey, 5)
pipe.execute()
def release_lock(self):
self.r.delete(self.lockkey)
def locked_get(self):
json_creds = self.r.get(self.key)
creds = None
if json_creds is not None:
json_creds = json_creds.decode()
try:
creds = oauth2client.client.Credentials.new_from_json(json_creds)
except:
logmessage("RedisCredStorage: could not read credentials from " + str(json_creds))
return creds
def locked_put(self, credentials):
self.r.set(self.key, credentials.to_json())
def locked_delete(self):
self.r.delete(self.key)
def ensure_directories(the_path):
the_dir = os.path.dirname(the_path)
os.makedirs(the_dir, exist_ok=True)
@workerapp.task
def sync_with_google_drive(user_id):
logmessage("sync_with_google_drive: starting")
worker_controller.initialize()
logmessage("sync_with_google_drive: continuing")
storage = RedisCredStorage(worker_controller.r, user_id, oauth_app='googledrive')
credentials = storage.get()
if not credentials or credentials.invalid:
logmessage("sync_with_google_drive: credentials failed")
return worker_controller.functions.ReturnValue(ok=False, error="credentials expired", restart=False)
try:
with worker_controller.flaskapp.app_context():
http = credentials.authorize(httplib2.Http())
service = worker_controller.apiclient.discovery.build('drive', 'v3', http=http)
key = 'da:googledrive:mapping:userid:' + str(user_id)
the_folder = worker_controller.r.get(key)
if the_folder is None:
raise DAError("Please go to your profile and set up Google Drive synchronization again.")
the_folder = the_folder.decode()
response = service.files().get(fileId=the_folder, fields="mimeType, id, name, trashed").execute()
the_mime_type = response.get('mimeType', None)
trashed = response.get('trashed', False)
if trashed is True or the_mime_type != "application/vnd.google-apps.folder":
return worker_controller.functions.ReturnValue(ok=False, error="error accessing Google Drive", restart=False)
local_files = {}
local_modtimes = {}
gd_files = {}
gd_dirlist = {}
gd_ids = {}
gd_modtimes = {}
gd_deleted = {}
gd_zero = {}
sections_modified = set()
commentary = ''
for section in ['static', 'templates', 'questions', 'modules', 'sources', 'packages']:
local_files[section] = set()
local_modtimes[section] = {}
if section == 'questions':
the_section = 'playground'
elif section == 'templates':
the_section = 'playgroundtemplate'
else:
the_section = 'playground' + section
area = SavedFile(user_id, fix=True, section=the_section)
for f in area.list_of_files():
local_files[section].add(f)
local_modtimes[section][f] = os.path.getmtime(os.path.join(area.directory, f))
subdirs = []
page_token = None
while True:
param = {'spaces': 'drive', 'fields': 'nextPageToken, files(id, name)', 'q': "mimeType: 'application/vnd.google-apps.folder' and trashed=false and name='" + section + "' and '" + str(the_folder) + "' in parents"}
if page_token is not None:
param['pageToken'] = page_token
response = service.files().list(**param).execute()
for the_file in response.get('files', []):
if 'id' in the_file:
subdirs.append(the_file['id'])
page_token = response.get('nextPageToken', None)
if page_token is None:
break
if len(subdirs) == 0:
file_metadata = {
'name': section,
'mimeType': 'application/vnd.google-apps.folder',
'parents': [the_folder]
}
new_dir = service.files().create(body=file_metadata,
fields='id').execute()
new_id = new_dir.get('id', None)
if new_id is None:
return worker_controller.functions.ReturnValue(ok=False, error="error accessing " + section + " in Google Drive", restart=False)
subdirs.append(new_id)
if len(subdirs) == 0:
return worker_controller.functions.ReturnValue(ok=False, error="error accessing " + section + " in Google Drive", restart=False)
subdir = subdirs[0]
gd_files[section] = set()
gd_dirlist[section] = {}
gd_ids[section] = {}
gd_modtimes[section] = {}
gd_deleted[section] = set()
gd_zero[section] = set()
page_token = None
while True:
param = {'spaces': "drive", 'fields': "nextPageToken, files(id, mimeType, name, modifiedTime, trashed, size)", 'q': "'" + str(subdir) + "' in parents"}
if page_token is not None:
param['pageToken'] = page_token
response = service.files().list(**param).execute()
for the_file in response.get('files', []):
logmessage("GD found " + the_file['name'])
if the_file['mimeType'] == 'application/vnd.google-apps.folder':
# logmessage("sync_with_google_drive: found a folder " + repr(the_file))
gd_dirlist[section][the_file['name']] = the_file['id']
continue
if re.search(r'(\.tmp|\.gdoc|\#)$', the_file['name']):
continue
if re.search(r'^(\~)', the_file['name']):
continue
if 'size' not in the_file:
continue
gd_ids[section][the_file['name']] = the_file['id']
gd_modtimes[section][the_file['name']] = epoch_from_iso(the_file['modifiedTime'])
if int(the_file['size']) == 0:
gd_zero[section].add(the_file['name'])
logmessage("Google says modtime on " + str(the_file['name']) + " is " + str(the_file['modifiedTime']) + ", which is " + str(gd_modtimes[section][the_file['name']]))
if the_file['trashed']:
gd_deleted[section].add(the_file['name'])
continue
gd_files[section].add(the_file['name'])
page_token = response.get('nextPageToken', None)
if page_token is None:
break
for subdir_name, subdir_id in gd_dirlist[section].items():
page_token = None
while True:
param = {'spaces': "drive", 'fields': "nextPageToken, files(id, name, modifiedTime, trashed, size)", 'q': "mimeType!='application/vnd.google-apps.folder' and '" + str(subdir_id) + "' in parents"}
if page_token is not None:
param['pageToken'] = page_token
response = service.files().list(**param).execute()
for the_file in response.get('files', []):
logmessage("GD found " + the_file['name'] + " in subdir " + subdir_name)
if re.search(r'(\.tmp|\.gdoc|\#)$', the_file['name']):
continue
if re.search(r'^(\~)', the_file['name']):
continue
if 'size' not in the_file:
continue
path_name = os.path.join(subdir_name, the_file['name'])
gd_ids[section][path_name] = the_file['id']
gd_modtimes[section][path_name] = epoch_from_iso(the_file['modifiedTime'])
if int(the_file['size']) == 0:
gd_zero[section].add(path_name)
logmessage("Google says modtime on " + str(path_name) + " is " + str(the_file['modifiedTime']) + ", which is " + str(gd_modtimes[section][path_name]))
if the_file['trashed']:
gd_deleted[section].add(path_name)
continue
gd_files[section].add(path_name)
page_token = response.get('nextPageToken', None)
if page_token is None:
break
gd_deleted[section] = gd_deleted[section] - gd_files[section]
for f in gd_files[section]:
logmessage("Considering " + str(f) + " on GD")
if f in local_files[section]:
logmessage("Local timestamp was " + str(local_modtimes[section][f]) + " while timestamp on Google Drive was " + str(gd_modtimes[section][f]))
if f not in local_files[section] or gd_modtimes[section][f] - local_modtimes[section][f] > 3:
logmessage("Going to copy " + str(f) + " from Google Drive to local")
sections_modified.add(section)
commentary += "Copied " + str(f) + " from Google Drive.\n"
the_path = os.path.join(area.directory, f)
ensure_directories(the_path)
if f in gd_zero[section]:
with open(the_path, 'a', encoding='utf-8'):
os.utime(the_path, (gd_modtimes[section][f], gd_modtimes[section][f]))
else:
with open(the_path, 'wb') as fh:
response = service.files().get_media(fileId=gd_ids[section][f])
downloader = worker_controller.apiclient.http.MediaIoBaseDownload(fh, response)
done = False
while done is False:
status, done = downloader.next_chunk() # pylint: disable=unused-variable
# logmessage("Download %d%%." % int(status.progress() * 100))
os.utime(the_path, (gd_modtimes[section][f], gd_modtimes[section][f]))
for f in local_files[section]:
logmessage("Considering " + str(f) + ", which is a local file")
if f in gd_files[section]:
logmessage("Local timestamp was " + str(local_modtimes[section][f]) + " while timestamp on Google Drive was " + str(gd_modtimes[section][f]))
if f not in gd_deleted[section]:
logmessage("Considering " + str(f) + " is not in Google Drive deleted")
if f not in gd_files[section]:
logmessage("Considering " + str(f) + " is not in Google Drive")
the_path = os.path.join(area.directory, f)
if os.path.getsize(the_path) == 0 and not the_path.endswith('.placeholder'):
logmessage("Found zero byte file: " + str(the_path))
continue
logmessage("Copying " + str(f) + " to Google Drive.")
if not the_path.endswith('.placeholder'):
commentary += "Copied " + str(f) + " to Google Drive.\n"
extension, mimetype = worker_controller.get_ext_and_mimetype(the_path) # pylint: disable=unused-variable
the_modtime = iso_from_epoch(local_modtimes[section][f])
logmessage("Setting GD modtime on new file " + str(f) + " to " + str(the_modtime))
dir_part, file_part = os.path.split(f)
if dir_part != '':
if dir_part not in gd_dirlist[section]:
file_metadata = {
'name': dir_part,
'mimeType': 'application/vnd.google-apps.folder',
'parents': [subdir]
}
new_file = service.files().create(body=file_metadata,
fields='id').execute()
gd_dirlist[section][dir_part] = new_file.get('id', None)
parent_to_use = gd_dirlist[section][dir_part]
else:
parent_to_use = subdir
file_metadata = {'name': file_part, 'parents': [parent_to_use], 'modifiedTime': the_modtime, 'createdTime': the_modtime}
media = worker_controller.apiclient.http.MediaFileUpload(the_path, mimetype=mimetype)
the_new_file = service.files().create(body=file_metadata,
media_body=media,
fields='id').execute()
the_new_file.get('id')
elif local_modtimes[section][f] - gd_modtimes[section][f] > 3:
logmessage("Considering " + str(f) + " is in Google Drive but local is more recent")
the_path = os.path.join(area.directory, f)
if os.path.getsize(the_path) == 0 and not the_path.endswith('.placeholder'):
logmessage("Found zero byte file during update: " + str(the_path))
continue
commentary += "Updated " + str(f) + " on Google Drive.\n"
extension, mimetype = worker_controller.get_ext_and_mimetype(the_path)
the_modtime = iso_from_epoch(local_modtimes[section][f])
logmessage("Updating on Google Drive and setting GD modtime on modified " + str(f) + " to " + str(the_modtime))
file_metadata = {'modifiedTime': the_modtime}
media = worker_controller.apiclient.http.MediaFileUpload(the_path, mimetype=mimetype)
updated_file = service.files().update(fileId=gd_ids[section][f],
body=file_metadata,
media_body=media,
fields='modifiedTime').execute()
gd_modtimes[section][f] = epoch_from_iso(updated_file['modifiedTime'])
logmessage("After update, timestamp on Google Drive is " + str(gd_modtimes[section][f]))
logmessage("After update, timestamp on local system is " + str(os.path.getmtime(the_path)))
for f in gd_deleted[section]:
logmessage("Considering " + str(f) + " is deleted on Google Drive")
if f in local_files[section]:
logmessage("Considering " + str(f) + " is deleted on Google Drive but exists locally")
logmessage("Local timestamp was " + str(local_modtimes[section][f]) + " while timestamp on Google Drive was " + str(gd_modtimes[section][f]))
if local_modtimes[section][f] - gd_modtimes[section][f] > 3:
logmessage("Considering " + str(f) + " is deleted on Google Drive but exists locally and needs to be undeleted on GD")
commentary += "Undeleted and updated " + str(f) + " on Google Drive.\n"
the_path = os.path.join(area.directory, f)
extension, mimetype = worker_controller.get_ext_and_mimetype(the_path)
the_modtime = iso_from_epoch(local_modtimes[section][f])
logmessage("Setting GD modtime on undeleted file " + str(f) + " to " + str(the_modtime))
file_metadata = {'modifiedTime': the_modtime, 'trashed': False}
media = worker_controller.apiclient.http.MediaFileUpload(the_path, mimetype=mimetype)
updated_file = service.files().update(fileId=gd_ids[section][f],
body=file_metadata,
media_body=media,
fields='modifiedTime').execute()
gd_modtimes[section][f] = epoch_from_iso(updated_file['modifiedTime'])
else:
logmessage("Considering " + str(f) + " is deleted on Google Drive but exists locally and needs to deleted locally")
sections_modified.add(section)
commentary += "Deleted " + str(f) + " from Playground.\n"
the_path = os.path.join(area.directory, f)
if os.path.isfile(the_path):
area.delete_file(f)
for f in os.listdir(area.directory):
the_path = os.path.join(area.directory, f)
logmessage("Before finalizing, " + str(f) + " has a modtime of " + str(os.path.getmtime(the_path)))
area.finalize()
for f in os.listdir(area.directory):
if f not in gd_files[section]:
continue
local_files[section].add(f)
the_path = os.path.join(area.directory, f)
local_modtimes[section][f] = os.path.getmtime(the_path)
logmessage("After finalizing, " + str(f) + " has a modtime of " + str(local_modtimes[section][f]))
if abs(local_modtimes[section][f] - gd_modtimes[section][f]) > 3:
the_modtime = iso_from_epoch(local_modtimes[section][f])
logmessage("post-finalize: updating GD modtime on file " + str(f) + " to " + str(the_modtime))
file_metadata = {'modifiedTime': the_modtime}
updated_file = service.files().update(fileId=gd_ids[section][f],
body=file_metadata,
fields='modifiedTime').execute()
gd_modtimes[section][f] = epoch_from_iso(updated_file['modifiedTime'])
for key in worker_controller.r.keys('da:interviewsource:docassemble.playground' + str(user_id) + ':*'):
worker_controller.r.incr(key)
if commentary != '':
logmessage(commentary)
do_restart = bool('modules' in sections_modified)
return worker_controller.functions.ReturnValue(ok=True, summary=commentary, restart=do_restart)
except DAError as e:
return worker_controller.functions.ReturnValue(ok=False, error=str(e), restart=False)
except Exception as e:
return worker_controller.functions.ReturnValue(ok=False, error="Error syncing with Google Drive: " + worker_controller.noquote(str(e)), restart=False)
def try_request(*pargs, **kwargs):
start_time = time.time()
args = list(pargs)
http = args.pop(0)
tries = 1
while tries < 5:
r, content = http.request(*args, **kwargs)
if int(r['status']) != 504:
break
logmessage("Got a 504 after try " + str(tries))
time.sleep(2*tries)
tries += 1
logmessage("try_request: duration was %.2f seconds" % (time.time() - start_time, ))
return r, content
def epoch_from_iso(datestring):
return (iso8601.parse_date(datestring) - datetime.datetime(1970, 1, 1, tzinfo=datetime.timezone.utc)).total_seconds()
def iso_from_epoch(seconds):
return datetime.datetime.utcfromtimestamp(seconds).replace(tzinfo=datetime.timezone.utc).isoformat().replace('+00:00', 'Z')
@workerapp.task
def sync_with_onedrive(user_id):
logmessage("sync_with_onedrive: starting")
worker_controller.initialize()
logmessage("sync_with_onedrive: continuing")
storage = RedisCredStorage(worker_controller.r, user_id, oauth_app='onedrive')
credentials = storage.get()
if not credentials or credentials.invalid:
logmessage("sync_with_onedrive: credentials failed")
return worker_controller.functions.ReturnValue(ok=False, error="credentials expired", restart=False)
try:
with worker_controller.flaskapp.app_context():
http = credentials.authorize(httplib2.Http())
# r, content = try_request(http, "https://graph.microsoft.com/v1.0/me/drive", "GET")
# drive_id = json.loads(content)['id']
# r, content = try_request(http, "https://graph.microsoft.com/v1.0/drive/special/approot")
# if int(r['status']) != 200:
# return worker_controller.functions.ReturnValue(ok=False, error="Could not verify application root", restart=False)
key = 'da:onedrive:mapping:userid:' + str(user_id)
the_folder = worker_controller.r.get(key)
if the_folder is None:
raise DAError("Please go to your profile and set up OneDrive synchronization again.")
the_folder = the_folder.decode()
r, content = try_request(http, "https://graph.microsoft.com/v1.0/me/drive/items/" + quote(the_folder), "GET")
if int(r['status']) != 200:
trashed = True
else:
info = json.loads(content.decode())
trashed = bool('deleted' in info)
if trashed is True:
logmessage('trash_gd_file: folder did not exist')
return False
if trashed is True or 'folder' not in info:
return worker_controller.functions.ReturnValue(ok=False, error="error accessing OneDrive", restart=False)
local_files = {}
local_modtimes = {}
od_files = {}
od_dirlist = {}
od_ids = {}
od_modtimes = {}
od_createtimes = {}
od_deleted = {}
od_zero = {}
sections_modified = set()
commentary = ''
subdirs = {}
subdir_count = {}
r, content = try_request(http, "https://graph.microsoft.com/v1.0/me/drive/items/" + quote(the_folder) + "/children?$select=id,name,deleted,folder", "GET")
while True:
if int(r['status']) != 200:
return worker_controller.functions.ReturnValue(ok=False, error="error accessing OneDrive subfolders", restart=False)
info = json.loads(content.decode())
for item in info['value']:
if 'deleted' in item or 'folder' not in item:
continue
if item['name'] in ('static', 'templates', 'questions', 'modules', 'sources', 'packages'):
subdirs[item['name']] = item['id']
subdir_count[item['name']] = item['folder']['childCount']
if "@odata.nextLink" not in info:
break
r, content = try_request(http, info["@odata.nextLink"], "GET")
for section in ['static', 'templates', 'questions', 'modules', 'sources', 'packages']:
logmessage("sync_with_onedrive: processing " + section)
if section not in subdirs:
headers = {'Content-Type': 'application/json'}
data = {}
data['name'] = section
data['folder'] = {}
data["@microsoft.graph.conflictBehavior"] = "rename"
resp, content = http.request("https://graph.microsoft.com/v1.0/me/drive/items/" + str(the_folder) + "/children", "POST", headers=headers, body=json.dumps(data))
if int(resp['status']) != 201:
worker_controller.functions.ReturnValue(ok=False, error="error accessing " + section + " in OneDrive", restart=False)
new_item = json.loads(content.decode())
subdirs[section] = new_item['id']
subdir_count[section] = 0
if section not in subdirs:
worker_controller.functions.ReturnValue(ok=False, error="error accessing " + section + " in OneDrive", restart=False)
local_files[section] = set()
local_modtimes[section] = {}
if section == 'questions':
the_section = 'playground'
elif section == 'templates':
the_section = 'playgroundtemplate'
else:
the_section = 'playground' + section
area = SavedFile(user_id, fix=True, section=the_section)
for f in area.list_of_files():
local_files[section].add(f)
local_modtimes[section][f] = os.path.getmtime(os.path.join(area.directory, f))
od_files[section] = set()
od_ids[section] = {}
od_modtimes[section] = {}
od_createtimes[section] = {}
od_deleted[section] = set()
od_zero[section] = set()
od_dirlist[section] = {}
if subdir_count[section] == 0:
logmessage("sync_with_onedrive: skipping " + section + " because empty on remote")
else:
r, content = try_request(http, "https://graph.microsoft.com/v1.0/me/drive/items/" + quote(subdirs[section]) + "/children?$select=id,name,deleted,fileSystemInfo,folder,size", "GET")
logmessage("sync_with_onedrive: processing " + section + ", which is " + str(subdirs[section]))
while True:
if int(r['status']) != 200:
return worker_controller.functions.ReturnValue(ok=False, error="error accessing OneDrive subfolder " + section + " " + str(r['status']) + ": " + content.decode() + " looking for " + str(subdirs[section]), restart=False)
info = json.loads(content.decode())
# logmessage("sync_with_onedrive: result was " + repr(info))
for the_file in info['value']:
if 'folder' in the_file:
# logmessage("sync_with_onedrive: found a folder " + repr(the_file))
od_dirlist[section][the_file['name']] = the_file['id']
continue
# logmessage("sync_with_onedrive: found a file " + repr(the_file))
if re.search(r'^(\~)', the_file['name']):
continue
od_ids[section][the_file['name']] = the_file['id']
od_modtimes[section][the_file['name']] = epoch_from_iso(the_file['fileSystemInfo']['lastModifiedDateTime'])
od_createtimes[section][the_file['name']] = epoch_from_iso(the_file['fileSystemInfo']['createdDateTime'])
if the_file['size'] == 0:
od_zero[section].add(the_file['name'])
logmessage("OneDrive says modtime on " + str(the_file['name']) + " in " + section + " is " + str(the_file['fileSystemInfo']['lastModifiedDateTime']) + ", which is " + str(od_modtimes[section][the_file['name']]))
if the_file.get('deleted', None):
od_deleted[section].add(the_file['name'])
continue
od_files[section].add(the_file['name'])
if "@odata.nextLink" not in info:
break
r, content = try_request(http, info["@odata.nextLink"], "GET")
for subdir_name, subdir_id in od_dirlist[section].items():
r, content = try_request(http, "https://graph.microsoft.com/v1.0/me/drive/items/" + quote(subdir_id) + "/children?$select=id,name,deleted,fileSystemInfo,folder,size", "GET")
logmessage("sync_with_onedrive: processing " + section + " subdir " + subdir_name + ", which is " + str(subdir_id))
while True:
if int(r['status']) != 200:
return worker_controller.functions.ReturnValue(ok=False, error="error accessing OneDrive subfolder " + section + " subdir " + subdir_name + " " + str(r['status']) + ": " + content.decode() + " looking for " + str(subdir_id), restart=False)
info = json.loads(content.decode())
for the_file in info['value']:
if 'folder' in the_file:
continue
# logmessage("sync_with_onedrive: found a file " + repr(the_file))
if re.search(r'^(\~)', the_file['name']):
continue
path_name = os.path.join(subdir_name, the_file['name'])
od_ids[section][path_name] = the_file['id']
od_modtimes[section][path_name] = epoch_from_iso(the_file['fileSystemInfo']['lastModifiedDateTime'])
od_createtimes[section][path_name] = epoch_from_iso(the_file['fileSystemInfo']['createdDateTime'])
if the_file['size'] == 0:
od_zero[section].add(path_name)
logmessage("OneDrive says modtime on " + str(path_name) + " in " + section + " is " + str(the_file['fileSystemInfo']['lastModifiedDateTime']) + ", which is " + str(od_modtimes[section][path_name]))
if the_file.get('deleted', None):
od_deleted[section].add(path_name)
continue
od_files[section].add(path_name)
if "@odata.nextLink" not in info:
break
r, content = try_request(http, info["@odata.nextLink"], "GET")
od_deleted[section] = od_deleted[section] - od_files[section]
for f in od_files[section]:
logmessage("Considering " + str(f) + " on OD")
if f in local_files[section]:
logmessage("Local timestamp was " + str(local_modtimes[section][f]) + " while timestamp on OneDrive was " + str(od_modtimes[section][f]))
if f not in local_files[section] or od_modtimes[section][f] - local_modtimes[section][f] > 3:
logmessage("Going to copy " + str(f) + " from OneDrive to local")
sections_modified.add(section)
commentary += "Copied " + str(f) + " from OneDrive.\n"
the_path = os.path.join(area.directory, f)
ensure_directories(the_path)
if f in od_zero[section]:
with open(the_path, 'a', encoding='utf-8'):
os.utime(the_path, (od_modtimes[section][f], od_modtimes[section][f]))
else:
r, content = try_request(http, "https://graph.microsoft.com/v1.0/me/drive/items/" + quote(od_ids[section][f]) + "/content", "GET")
with open(the_path, 'wb') as fh:
fh.write(content)
os.utime(the_path, (od_modtimes[section][f], od_modtimes[section][f]))
for f in local_files[section]:
logmessage("Considering " + str(f) + ", which is a local file")
if f in od_files[section]:
logmessage("Local timestamp was " + str(local_modtimes[section][f]) + " while timestamp on OneDrive was " + str(od_modtimes[section][f]))
if f not in od_deleted[section]:
logmessage("Considering " + str(f) + " is not in OneDrive deleted")
if f not in od_files[section]:
logmessage("Considering " + str(f) + " is not in OneDrive")
the_path = os.path.join(area.directory, f)
dir_name = os.path.dirname(f)
base_name = os.path.basename(f)
if os.path.getsize(the_path) == 0 and not the_path.endswith('.placeholder'):
logmessage("Found zero byte file: " + str(the_path))
continue
logmessage("Copying " + str(f) + " to OneDrive.")
if not the_path.endswith('.placeholder'):
commentary += "Copied " + str(f) + " to OneDrive.\n"
the_modtime = iso_from_epoch(local_modtimes[section][f])
logmessage("Setting OD modtime on new file " + str(f) + " to " + str(the_modtime) + " which is " + str(local_modtimes[section][f]))
data = {}
data['name'] = base_name
data['description'] = ''
data["fileSystemInfo"] = {"createdDateTime": the_modtime, "lastModifiedDateTime": the_modtime}
# data["fileSystemInfo"] = { "createdDateTime": the_modtime, "lastAccessedDateTime": the_modtime, "lastModifiedDateTime": the_modtime }
# data["@microsoft.graph.conflictBehavior"] = "replace"
if dir_name != '':
if dir_name not in od_dirlist[section]:
headers = {'Content-Type': 'application/json'}
dirdata = {}
dirdata['name'] = dir_name
dirdata['folder'] = {}
dirdata["@microsoft.graph.conflictBehavior"] = "rename"
r, content = http.request("https://graph.microsoft.com/v1.0/me/drive/items/" + str(subdirs[section]) + "/children", "POST", headers=headers, body=json.dumps(dirdata))
if int(r['status']) != 201:
raise DAError("sync_with_onedrive: could not create subfolder " + dir_name + ' in ' + str(subdirs[section]) + '. ' + content.decode() + ' status: ' + str(r['status']))
new_item = json.loads(content.decode())
od_dirlist[section][dir_name] = new_item['id']
result = onedrive_upload(http, od_dirlist[section][dir_name], dir_name, data, the_path)
else:
result = onedrive_upload(http, subdirs[section], section, data, the_path)
if isinstance(result, worker_controller.functions.ReturnValue):
return result
od_files[section].add(f)
od_ids[section][f] = result
od_modtimes[section][f] = local_modtimes[section][f]
od_createtimes[section][f] = local_modtimes[section][f]
elif local_modtimes[section][f] - od_modtimes[section][f] > 3:
logmessage("Considering " + str(f) + " is in OneDrive but local is more recent")
the_path = os.path.join(area.directory, f)
if os.path.getsize(the_path) == 0 and not the_path.endswith('.placeholder'):
logmessage("Found zero byte file during update: " + str(the_path))
continue
commentary += "Updated " + str(f) + " on OneDrive.\n"
the_modtime = iso_from_epoch(local_modtimes[section][f])
logmessage("Updating on OneDrive and setting OD modtime on modified " + str(f) + " to " + str(the_modtime))
data = {}
data['name'] = f
data['description'] = ''
data["fileSystemInfo"] = {"createdDateTime": iso_from_epoch(od_createtimes[section][f]), "lastModifiedDateTime": the_modtime}
# data["fileSystemInfo"] = {"createdDateTime": od_createtimes[section][f], "lastAccessedDateTime": the_modtime, "lastModifiedDateTime": the_modtime}
# data["@microsoft.graph.conflictBehavior"] = "replace"
result = onedrive_upload(http, subdirs[section], section, data, the_path, new_item_id=od_ids[section][f])
if isinstance(result, worker_controller.functions.ReturnValue):
return result
od_modtimes[section][f] = local_modtimes[section][f]
logmessage("After update, timestamp on OneDrive is " + str(od_modtimes[section][f]))
logmessage("After update, timestamp on local system is " + str(os.path.getmtime(the_path)))
for f in od_deleted[section]:
logmessage("Considering " + str(f) + " is deleted on OneDrive")
if f in local_files[section]:
logmessage("Considering " + str(f) + " is deleted on OneDrive but exists locally")
logmessage("Local timestamp was " + str(local_modtimes[section][f]) + " while timestamp on OneDrive was " + str(od_modtimes[section][f]))
if local_modtimes[section][f] - od_modtimes[section][f] > 3:
logmessage("Considering " + str(f) + " is deleted on OneDrive but exists locally and needs to be undeleted on OD")
commentary += "Undeleted and updated " + str(f) + " on OneDrive.\n"
the_path = os.path.join(area.directory, f)
the_modtime = iso_from_epoch(local_modtimes[section][f])
logmessage("Setting OD modtime on undeleted file " + str(f) + " to " + str(the_modtime))
data = {}
data['name'] = f
data['description'] = ''
# data["fileSystemInfo"] = {"createdDateTime": od_createtimes[section][f], "lastAccessedDateTime": the_modtime, "lastModifiedDateTime": the_modtime}
data["fileSystemInfo"] = {"createdDateTime": iso_from_epoch(od_createtimes[section][f]), "lastModifiedDateTime": the_modtime}
# data["@microsoft.graph.conflictBehavior"] = "replace"
result = onedrive_upload(http, subdirs[section], section, data, the_path, new_item_id=od_ids[section][f])
if isinstance(result, worker_controller.functions.ReturnValue):
return result
od_modtimes[section][f] = local_modtimes[section][f]
else:
logmessage("Considering " + str(f) + " is deleted on OneDrive but exists locally and needs to deleted locally")
sections_modified.add(section)
commentary += "Deleted " + str(f) + " from Playground.\n"
the_path = os.path.join(area.directory, f)
if os.path.isfile(the_path):
area.delete_file(f)
for f in os.listdir(area.directory):
the_path = os.path.join(area.directory, f)
logmessage("Before finalizing, " + str(f) + " has a modtime of " + str(os.path.getmtime(the_path)))
area.finalize()
for f in os.listdir(area.directory):
if f not in od_files[section]:
continue
local_files[section].add(f)
the_path = os.path.join(area.directory, f)
local_modtimes[section][f] = os.path.getmtime(the_path)
logmessage("After finalizing, " + str(f) + " has a modtime of " + str(local_modtimes[section][f]))
if abs(local_modtimes[section][f] - od_modtimes[section][f]) > 3:
the_modtime = iso_from_epoch(local_modtimes[section][f])
logmessage("post-finalize: updating OD modtime on file " + str(f) + " to " + str(the_modtime))
headers = {'Content-Type': 'application/json'}
r, content = try_request(http, "https://graph.microsoft.com/v1.0/me/drive/items/" + quote(od_ids[section][f]), "PATCH", headers=headers, body=json.dumps({'fileSystemInfo': {"createdDateTime": iso_from_epoch(od_createtimes[section][f]), "lastModifiedDateTime": the_modtime}}, sort_keys=True))
if int(r['status']) != 200:
return worker_controller.functions.ReturnValue(ok=False, error="error updating OneDrive file in subfolder " + section + " " + str(r['status']) + ": " + content.decode(), restart=False)
od_modtimes[section][f] = local_modtimes[section][f]
for key in worker_controller.r.keys('da:interviewsource:docassemble.playground' + str(user_id) + ':*'):
worker_controller.r.incr(key)
if commentary != '':
logmessage(commentary)
do_restart = bool('modules' in sections_modified)
return worker_controller.functions.ReturnValue(ok=True, summary=commentary, restart=do_restart)
except DAError as e:
return worker_controller.functions.ReturnValue(ok=False, error=str(e), restart=False)
except Exception as e:
return worker_controller.functions.ReturnValue(ok=False, error="Error syncing with OneDrive: " + str(e) + str(traceback.format_tb(e.__traceback__)), restart=False)
def onedrive_upload(http, folder_id, folder_name, data, the_path, new_item_id=None):
headers = {'Content-Type': 'application/json'}
is_new = bool(new_item_id is None)
total_bytes = os.path.getsize(the_path)
if total_bytes == 0:
r, content = try_request(http, 'https://graph.microsoft.com/v1.0/me/drive/items/' + quote(folder_id) + ':/' + quote(data['name']) + ':/content', 'PUT', headers={'Content-Type': 'text/plain'}, body=bytes())
if int(r['status']) not in (200, 201):
logmessage("Error0")
logmessage(str(r['status']))
logmessage(content.decode())
return worker_controller.functions.ReturnValue(ok=False, error="error uploading zero-byte file to OneDrive subfolder " + folder_id + " " + str(r['status']) + ": " + content.decode(), restart=False)
if new_item_id is None:
new_item_id = json.loads(content.decode())['id']
else:
the_url = 'https://graph.microsoft.com/v1.0/me/drive/items/' + quote(folder_id) + ':/' + quote(data['name']) + ':/createUploadSession'
body_data = {"item": {"@microsoft.graph.conflictBehavior": "replace"}}
r, content = try_request(http, the_url, 'POST', headers=headers, body=json.dumps(body_data, sort_keys=True))
if int(r['status']) != 200:
return worker_controller.functions.ReturnValue(ok=False, error="error uploading to OneDrive subfolder " + folder_id + " " + str(r['status']) + ": " + content.decode() + " and url was " + the_url + " and folder name was " + folder_name + " and path was " + the_path + " and data was " + json.dumps(body_data, sort_keys=True) + " and is_new is " + repr(is_new), restart=False)
logmessage("Upload session created.")
upload_url = json.loads(content.decode())["uploadUrl"]
logmessage("Upload url obtained.")
start_byte = 0
with open(the_path, 'rb') as fh:
while start_byte < total_bytes:
num_bytes = min(ONEDRIVE_CHUNK_SIZE, total_bytes - start_byte)
custom_headers = {'Content-Length': str(num_bytes), 'Content-Range': 'bytes ' + str(start_byte) + '-' + str(start_byte + num_bytes - 1) + '/' + str(total_bytes), 'Content-Type': 'application/octet-stream'}
# logmessage("url is " + repr(upload_url) + " and headers are " + repr(custom_headers))
r, content = try_request(http, upload_url, 'PUT', headers=custom_headers, body=bytes(fh.read(num_bytes)))
logmessage("Sent request")
start_byte += num_bytes
if start_byte == total_bytes:
logmessage("Reached end")
if int(r['status']) not in (200, 201):
logmessage("Error1")
logmessage(str(r['status']))
logmessage(content.decode())
return worker_controller.functions.ReturnValue(ok=False, error="error uploading file to OneDrive subfolder " + folder_id + " " + str(r['status']) + ": " + content.decode(), restart=False)
if new_item_id is None:
new_item_id = json.loads(content.decode())['id']
else:
if int(r['status']) != 202:
logmessage("Error2")
logmessage(str(r['status']))
logmessage(content.decode())
return worker_controller.functions.ReturnValue(ok=False, error="error during upload of file to OneDrive subfolder " + folder_id + " " + str(r['status']) + ": " + content.decode(), restart=False)
logmessage("Got 202")
item_data = copy.deepcopy(data)
if 'fileSystemInfo' in item_data and 'createdDateTime' in item_data['fileSystemInfo']:
del item_data['fileSystemInfo']['createdDateTime']
item_data['name'] = re.sub(r'.*/', '', item_data['name'])
logmessage("Patching with " + repr(item_data) + " to " + "https://graph.microsoft.com/v1.0/me/drive/items/" + quote(new_item_id) + " and headers " + repr(headers))
r, content = try_request(http, "https://graph.microsoft.com/v1.0/me/drive/items/" + quote(new_item_id), "PATCH", headers=headers, body=json.dumps(item_data, sort_keys=True))
logmessage("PATCH request sent")
if int(r['status']) != 200:
return worker_controller.functions.ReturnValue(ok=False, error="error during updating of uploaded file to OneDrive subfolder " + folder_id + " " + str(r['status']) + ": " + content.decode(), restart=False)
# tries = 1
# start_time = time.time()
# while tries < 3:
# logmessage("Checking in on results " + "https://graph.microsoft.com/v1.0/me/drive/items/" + quote(new_item_id) + " at " + str(time.time() - start_time))
# r, content = try_request(http, "https://graph.microsoft.com/v1.0/me/drive/items/" + quote(new_item_id), "GET")
# if int(r['status']) != 200:
# return worker_controller.functions.ReturnValue(ok=False, error="error during updating of uploaded file to OneDrive subfolder " + folder_id + " " + str(r['status']) + ": " + str(content), restart=False)
# logmessage("Metadata is now " + str(content))
# time.sleep(5)
# tries += 1
logmessage("Returning " + str(new_item_id))
return new_item_id
@workerapp.task
def ocr_dummy(doc, indexno, **kwargs):
logmessage("ocr_dummy started in worker")
worker_controller.initialize()
url_root = kwargs.get('url_root', daconfig.get('url root', 'http://localhost') + daconfig.get('root', '/'))
url = kwargs.get('url', url_root + 'interview')
with worker_controller.flaskapp.app_context():
with worker_controller.flaskapp.test_request_context(base_url=url_root, path=url):
worker_controller.functions.reset_local_variables()
worker_controller.functions.set_uid(kwargs['user_code'])
user_info = kwargs['user']
if not str(user_info['the_user_id']).startswith('t'):
user_object = worker_controller.get_user_object(user_info['theid'])
worker_controller.login_user(user_object, remember=False)
worker_controller.set_request_active(False)
if doc._is_pdf():
return worker_controller.functions.ReturnValue(ok=True, value={'indexno': indexno, 'doc': doc})
return worker_controller.functions.ReturnValue(ok=True, value={'indexno': indexno, 'doc': worker_controller.util.pdf_concatenate(doc)})
@workerapp.task
def ocr_page(indexno, **kwargs):
logmessage("ocr_page started in worker")
worker_controller.initialize()
url_root = kwargs.get('url_root', daconfig.get('url root', 'http://localhost') + daconfig.get('root', '/'))
url = kwargs.get('url', url_root + 'interview')
with worker_controller.flaskapp.app_context():
with worker_controller.flaskapp.test_request_context(base_url=url_root, path=url):
worker_controller.functions.reset_local_variables()
worker_controller.functions.set_uid(kwargs['user_code'])
user_info = kwargs['user']
if not str(user_info['the_user_id']).startswith('t'):
user_object = worker_controller.get_user_object(user_info['theid'])
worker_controller.login_user(user_object, remember=False)
worker_controller.set_request_active(False)
return worker_controller.functions.ReturnValue(ok=True, value=worker_controller.util.ocr_page(indexno, **kwargs))
@workerapp.task
def ocr_finalize(*pargs, **kwargs):
logmessage("ocr_finalize started in worker")
worker_controller.initialize()
url_root = kwargs.get('url_root', daconfig.get('url root', 'http://localhost') + daconfig.get('root', '/'))
url = kwargs.get('url', url_root + 'interview')
with worker_controller.flaskapp.app_context():
with worker_controller.flaskapp.test_request_context(base_url=url_root, path=url):
worker_controller.functions.set_uid(kwargs['user_code'])
user_info = kwargs['user']
if not str(user_info['the_user_id']).startswith('t'):
user_object = worker_controller.get_user_object(user_info['theid'])
worker_controller.login_user(user_object, remember=False)
worker_controller.set_request_active(False)
if 'message' in kwargs and kwargs['message']:
message = kwargs['message']
else:
message = worker_controller.functions.word("OCR succeeded")
try:
if kwargs.get('pdf', False):
try:
(target, dafilelist) = worker_controller.util.ocr_finalize(*pargs, **kwargs)
except Exception as e:
return error_object(e)
user_info = kwargs['user']
yaml_filename = kwargs['yaml_filename']
session_code = kwargs['user_code']
secret = kwargs['secret']
if not str(user_info['the_user_id']).startswith('t'):
user_object = worker_controller.get_user_object(user_info['theid'])
worker_controller.login_user(user_object, remember=False)
# logmessage("ocr_finalize: yaml_filename is " + str(yaml_filename) + " and session code is " + str(session_code))
the_current_info = {'user': user_info, 'session': session_code, 'secret': secret, 'yaml_filename': yaml_filename, 'url': url, 'url_root': url_root, 'interface': 'worker'}
worker_controller.functions.this_thread.current_info = the_current_info
worker_controller.set_request_active(False)
worker_controller.obtain_lock_patiently(session_code, yaml_filename)
try:
steps, user_dict, is_encrypted = worker_controller.fetch_user_dict(session_code, yaml_filename, secret=secret)
except Exception as the_err:
worker_controller.release_lock(session_code, yaml_filename)
error_message = "ocr_finalize: could not obtain dictionary because of " + str(the_err.__class__.__name__) + ": " + str(the_err)
logmessage(error_message)
return worker_controller.functions.ReturnValue(ok=False, error_message=error_message, error_type=DAError)
if user_dict is None:
worker_controller.release_lock(session_code, yaml_filename)
error_message = "ocr_finalize: dictionary could not be found"
logmessage(error_message)
return worker_controller.functions.ReturnValue(ok=False, error_message=error_message, error_type=DAError)
user_dict['__PDF_OCR_OBJECT'] = target
try:
assert worker_controller.functions.illegal_variable_name(target.instanceName) is not True
for attribute in ('number', 'file_info', 'filename', 'has_specific_filename', 'ok', 'extension', 'mimetype', 'page_task', 'screen_task'):
if hasattr(target, attribute):
exec(target.instanceName + '.' + attribute + ' = __PDF_OCR_OBJECT.' + attribute, user_dict)
else:
exec(target.instanceName + '.delattr(' + repr(attribute) + ')', user_dict)
if dafilelist:
assert worker_controller.functions.illegal_variable_name(dafilelist.instanceName) is not True
exec(dafilelist.instanceName + '.elements = [' + dafilelist.instanceName + '.elements[0]]', user_dict)
except Exception as the_err:
worker_controller.release_lock(session_code, yaml_filename)
error_message = "ocr_pdf: could not save file object: " + str(the_err.__class__.__name__) + ": " + str(the_err)
logmessage(error_message)
return worker_controller.functions.ReturnValue(ok=False, error_message=error_message, error_type=DAError)
del user_dict['__PDF_OCR_OBJECT']
if str(user_info.get('the_user_id', None)).startswith('t'):
worker_controller.save_user_dict(session_code, user_dict, yaml_filename, secret=secret, encrypt=is_encrypted, steps=steps)
else:
worker_controller.save_user_dict(session_code, user_dict, yaml_filename, secret=secret, encrypt=is_encrypted, manual_user_id=user_info['theid'], steps=steps)
worker_controller.release_lock(session_code, yaml_filename)
return worker_controller.functions.ReturnValue(ok=True, value=True)
return worker_controller.functions.ReturnValue(ok=True, value=message, content=worker_controller.util.ocr_finalize(*pargs, **kwargs), extra=kwargs.get('extra', None))
except Exception as the_error:
logmessage("Error in ocr_finalize: " + the_error.__class__.__name__ + ': ' + str(the_error))
return worker_controller.functions.ReturnValue(ok=False, value=str(the_error), error_message=str(the_error), extra=kwargs.get('extra', None))
@workerapp.task
def make_png_for_pdf(doc, prefix, resolution, user_code, pdf_to_png, page=None):
logmessage("make_png_for_pdf started in worker for size " + prefix)
worker_controller.initialize()
url_root = daconfig.get('url root', 'http://localhost') + daconfig.get('root', '/')
url = url_root + 'interview'
with worker_controller.flaskapp.app_context():
with worker_controller.flaskapp.test_request_context(base_url=url_root, path=url):
worker_controller.functions.reset_local_variables()
worker_controller.functions.set_uid(user_code)
worker_controller.util.make_png_for_pdf(doc, prefix, resolution, pdf_to_png, page=page)
@workerapp.task
def reset_server(result, run_create=None):
logmessage("reset_server in worker: starting with run_create " + repr(run_create))
if hasattr(result, 'ok') and not result.ok:
logmessage("reset_server in worker: not resetting because result did not succeed.")
return result
if not run_create:
worker_controller.initialize()
pipe = worker_controller.r.pipeline()
pipe.set('da:skip_create_tables', 1)
pipe.expire('da:skip_create_tables', 10)
logmessage("reset_server in worker: setting da:skip_create_tables.")
pipe.execute()
if USING_SUPERVISOR:
if re.search(r':(web|celery|all):', CONTAINER_ROLE):
if result.hostname == hostname:
hostname_to_use = 'localhost'
else:
hostname_to_use = result.hostname
args = SUPERVISORCTL + ['-s', 'http://' + hostname_to_use + ':9001', 'start', 'reset']
result = subprocess.run(args, check=False).returncode
logmessage("reset_server in worker: called " + ' '.join(args))
else:
logmessage("reset_server in worker: did not reset due to container role")
else:
logmessage("reset_server in worker: supervisor not active, touching WSGI file")
wsgi_file = WEBAPP_PATH
if os.path.isfile(wsgi_file):
with open(wsgi_file, 'a', encoding='utf-8'):
os.utime(wsgi_file, None)
logmessage("reset_server in worker: finishing")
return result
@workerapp.task
def update_packages(restart=True):
start_time = time.time()
logmessage("update_packages in worker: starting")
worker_controller.initialize()
logmessage("update_packages in worker: continuing after " + str(time.time() - start_time) + " seconds")
try:
with worker_controller.flaskapp.app_context():
worker_controller.set_request_active(False)
logmessage("update_packages in worker: starting update after " + str(time.time() - start_time) + " seconds")
ok, logmessages, results = worker_controller.update.check_for_updates(start_time=start_time, full=restart)
logmessage("update_packages in worker: update completed after " + str(time.time() - start_time) + " seconds")
if restart and ':all:' not in CONTAINER_ROLE:
worker_controller.trigger_update(except_for=hostname)
logmessage("update_packages in worker: trigger completed after " + str(time.time() - start_time) + " seconds")
return worker_controller.functions.ReturnValue(ok=ok, logmessages=logmessages, results=results, hostname=hostname, restart=restart)
except:
e = sys.exc_info()[0]
error_mess = sys.exc_info()[1]
logmessage("update_packages in worker: error was " + str(e) + " with message " + str(error_mess))
return worker_controller.functions.ReturnValue(ok=False, error_message=str(e), restart=False)
logmessage("update_packages in worker: all done")
return worker_controller.functions.ReturnValue(ok=False, error_message="Reached end", restart=False)
@workerapp.task
def email_attachments(user_code, email_address, attachment_info, language, subject=None, body=None, html=None, config=None):
success = False
worker_controller.initialize()
url_root = daconfig.get('url root', 'http://localhost') + daconfig.get('root', '/')
url = url_root + 'interview'
if config is None:
config = 'default'
with worker_controller.flaskapp.app_context():
with worker_controller.flaskapp.test_request_context(base_url=url_root, path=url):
worker_controller.functions.reset_local_variables()
worker_controller.functions.set_uid(user_code)
if language and language != '*':
worker_controller.functions.set_language(language)
worker_controller.set_request_active(False)
doc_names = []
for attach_info in attachment_info:
if attach_info['attachment']['name'] not in doc_names:
doc_names.append(attach_info['attachment']['name'])
if subject is None:
subject = worker_controller.functions.comma_and_list(doc_names)
if body is None:
if len(doc_names) > 1:
body = worker_controller.functions.word("Your documents, %s, are attached.") % (worker_controller.functions.comma_and_list(doc_names),)
else:
body = worker_controller.functions.word("Your document, %s, is attached.") % (worker_controller.functions.comma_and_list(doc_names),)
if html is None:
html = "<p>" + body + "</p>"
msg = worker_controller.Message(subject, recipients=[email_address], body=body, html=html)
success_attach = True
for attach_info in attachment_info:
file_info = worker_controller.get_info_from_file_number(attach_info['number'])
if 'fullpath' in file_info:
with open(file_info['fullpath'], 'rb') as fp:
msg.attach(attach_info['filename'], attach_info['mimetype'], fp.read())
else:
success_attach = False
if success_attach:
try:
logmessage("Starting to send")
worker_controller.da_send_mail(msg, config=config)
logmessage("Finished sending")
success = True
except Exception as errmess:
try:
logmessage(str(errmess.__class__.__name__) + ": " + str(errmess))
except:
logmessage("Error of type" + str(errmess.__class__.__name__) + " that could not be displayed")
success = False
if success:
return worker_controller.functions.ReturnValue(value=worker_controller.functions.word("E-mail was sent to") + " " + email_address, extra='flash')
return worker_controller.functions.ReturnValue(value=worker_controller.functions.word("Unable to send e-mail to") + " " + email_address, extra='flash')
# @workerapp.task
# def old_email_attachments(yaml_filename, user_info, user_code, secret, url, url_root, email_address, question_number, include_editable):
# success = False
# worker_controller.initialize()
# worker_controller.functions.set_uid(user_code)
# with worker_controller.flaskapp.app_context():
# worker_controller.set_request_active(False)
# # the_user_dict, encrypted = worker_controller.get_attachment_info(user_code, question_number, yaml_filename, secret)
# steps, the_user_dict, is_encrypted = worker_controller.fetch_user_dict(user_code, yaml_filename, secret=secret)
# if the_user_dict is not None:
# interview = worker_controller.interview_cache.get_interview(yaml_filename)
# interview_status = worker_controller.parse.InterviewStatus(current_info=dict(user=user_info, session=user_code, secret=secret, yaml_filename=yaml_filename, url=url, url_root=url_root, encrypted=encrypted, interface='worker', arguments={}))
# interview.assemble(the_user_dict, interview_status)
# if len(interview_status.attachments) > 0:
# attached_file_count = 0
# attachment_info = []
# for the_attachment in interview_status.attachments:
# file_formats = []
# if 'pdf' in the_attachment['valid_formats'] or '*' in the_attachment['valid_formats']:
# file_formats.append('pdf')
# if include_editable or 'pdf' not in file_formats:
# if 'rtf' in the_attachment['valid_formats'] or '*' in the_attachment['valid_formats']:
# file_formats.append('rtf')
# if 'docx' in the_attachment['valid_formats']:
# file_formats.append('docx')
# for the_format in file_formats:
# the_filename = the_attachment['file'][the_format]
# if the_format == "pdf":
# mime_type = 'application/pdf'
# elif the_format == "rtf":
# mime_type = 'application/rtf'
# elif the_format == "docx":
# mime_type = 'application/vnd.openxmlformats-officedocument.wordprocessingml.document'
# attachment_info.append({'filename': str(the_attachment['filename']) + '.' + str(the_format), 'path': str(the_filename), 'mimetype': str(mime_type), 'attachment': the_attachment})
# # logmessage("Need to attach to the e-mail a file called " + str(the_attachment['filename']) + '.' + str(the_format) + ", which is located on the server at " + str(the_filename) + ", with mime type " + str(mime_type))
# attached_file_count += 1
# if attached_file_count > 0:
# doc_names = []
# for attach_info in attachment_info:
# if attach_info['attachment']['name'] not in doc_names:
# doc_names.append(attach_info['attachment']['name'])
# subject = worker_controller.functions.comma_and_list(doc_names)
# if len(doc_names) > 1:
# body = worker_controller.functions.word("Your documents, ") + " " + subject + worker_controller.functions.word(", are attached") + "."
# else:
# body = worker_controller.functions.word("Your document, ") + " " + subject + worker_controller.functions.word(", is attached") + "."
# html = "<p>" + body + "</p>"
# # logmessage("Need to send an e-mail with subject " + subject + " to " + str(email_address) + " with " + str(attached_file_count) + " attachment(s)")
# msg = worker_controller.Message(subject, recipients=[email_address], body=body, html=html)
# for attach_info in attachment_info:
# with open(attach_info['path'], 'rb') as fp:
# msg.attach(attach_info['filename'], attach_info['mimetype'], fp.read())
# try:
# logmessage("Starting to send")
# worker_controller.da_send_mail(msg)
# logmessage("Finished sending")
# success = True
# except Exception as errmess:
# logmessage(str(errmess))
# success = False
# if success:
# return worker_controller.functions.ReturnValue(value=worker_controller.functions.word("E-mail was sent to") + " " + email_address, extra='flash')
# else:
# return worker_controller.functions.ReturnValue(value=worker_controller.functions.word("Unable to send e-mail to") + " " + email_address, extra='flash')
@workerapp.task
def background_action(yaml_filename, user_info, session_code, secret, url, url_root, action, extra=None):
if url_root is None:
url_root = daconfig.get('url root', 'http://localhost') + daconfig.get('root', '/')
if url is None:
url = url_root + 'interview'
time.sleep(1.0)
worker_controller.initialize()
worker_controller.functions.reset_local_variables()
worker_controller.functions.set_uid(session_code)
with worker_controller.flaskapp.app_context():
with worker_controller.flaskapp.test_request_context(base_url=url_root, path=url):
if not str(user_info['the_user_id']).startswith('t'):
user_object = worker_controller.get_user_object(user_info['theid'])
worker_controller.login_user(user_object, remember=False)
worker_controller.update_last_login(user_object)
logmessage("background_action: yaml_filename is " + str(yaml_filename) + " and session code is " + str(session_code) + " and action is " + repr(action))
worker_controller.set_request_active(False)
if action['action'] == 'incoming_email':
if 'id' in action['arguments']:
action['arguments'] = {'email': worker_controller.retrieve_email(action['arguments']['id'])}
the_current_info = {'user': user_info, 'session': session_code, 'secret': secret, 'yaml_filename': yaml_filename, 'url': url, 'url_root': url_root, 'encrypted': True, 'action': action['action'], 'interface': 'worker', 'arguments': action['arguments']}
worker_controller.functions.this_thread.current_info = the_current_info
interview = worker_controller.interview_cache.get_interview(yaml_filename)
worker_controller.obtain_lock_patiently(session_code, yaml_filename)
try:
steps, user_dict, is_encrypted = worker_controller.fetch_user_dict(session_code, yaml_filename, secret=secret)
except Exception as the_err:
worker_controller.release_lock(session_code, yaml_filename)
logmessage("background_action: could not obtain dictionary because of " + str(the_err.__class__.__name__) + ": " + str(the_err))
return worker_controller.functions.ReturnValue(extra=extra)
the_current_info['encrypted'] = is_encrypted
worker_controller.release_lock(session_code, yaml_filename)
if user_dict is None:
logmessage("background_action: dictionary could not be found")
return worker_controller.functions.ReturnValue(extra=extra)
start_time = time.time()
interview_status = worker_controller.parse.InterviewStatus(current_info=the_current_info)
old_language = worker_controller.functions.get_language()
try:
interview.assemble(user_dict, interview_status)
except Exception as e:
if hasattr(e, '__traceback__'):
logmessage("Error in assembly: " + str(e.__class__.__name__) + ": " + str(e) + ": " + str(traceback.format_tb(e.__traceback__)))
else:
logmessage("Error in assembly: " + str(e.__class__.__name__) + ": " + str(e))
error_type = e.__class__.__name__
error_message = str(e)
if hasattr(e, '__traceback__'):
error_trace = ''.join(traceback.format_tb(e.__traceback__))
if hasattr(e, 'da_line_with_error'):
error_trace += "\nIn line: " + str(e.da_line_with_error)
else:
error_trace = None
variables = list(reversed(list(worker_controller.functions.this_thread.current_variable)))
worker_controller.error_notification(e, message=error_message, trace=error_trace)
if 'on_error' not in worker_controller.functions.this_thread.current_info:
return worker_controller.functions.ReturnValue(ok=False, error_message=error_message, error_type=error_type, error_trace=error_trace, variables=variables)
logmessage("Time in background action before error callback was " + str(time.time() - start_time))
worker_controller.functions.set_language(old_language)
return process_error(interview, session_code, yaml_filename, secret, user_info, url, url_root, is_encrypted, error_type, error_message, error_trace, variables, extra)
worker_controller.functions.set_language(old_language)
logmessage("Time in background action was " + str(time.time() - start_time))
if not hasattr(interview_status, 'question'):
# logmessage("background_action: status had no question")
return worker_controller.functions.ReturnValue(extra=extra)
if interview_status.question.question_type in ["restart", "exit", "exit_logout"]:
# logmessage("background_action: status was restart or exit")
worker_controller.obtain_lock_patiently(session_code, yaml_filename)
if str(user_info.get('the_user_id', None)).startswith('t'):
worker_controller.reset_user_dict(session_code, yaml_filename, temp_user_id=user_info.get('theid', None))
else:
worker_controller.reset_user_dict(session_code, yaml_filename, user_id=user_info.get('theid', None))
worker_controller.release_lock(session_code, yaml_filename)
# if interview_status.question.question_type in ["restart", "exit", "logout", "exit_logout", "new_session"]:
# # There is no lock to release. Why is this here?
# # worker_controller.release_lock(session_code, yaml_filename)
# pass
if interview_status.question.question_type == "response":
# logmessage("background_action: status was response")
if hasattr(interview_status.question, 'all_variables'):
pass
elif not hasattr(interview_status.question, 'binaryresponse'):
sys.stdout.write(interview_status.questionText.rstrip().encode('utf8') + "\n")
if interview_status.question.question_type == "backgroundresponse":
# logmessage("background_action: status was backgroundresponse")
return worker_controller.functions.ReturnValue(value=interview_status.question.backgroundresponse, extra=extra)
if interview_status.question.question_type == "backgroundresponseaction":
# logmessage("background_action: status was backgroundresponseaction")
start_time = time.time()
new_action = interview_status.question.action
# logmessage("new action is " + repr(new_action))
the_current_info = {'user': user_info, 'session': session_code, 'secret': secret, 'yaml_filename': yaml_filename, 'url': url, 'url_root': url_root, 'encrypted': True, 'interface': 'worker', 'action': new_action['action'], 'arguments': new_action['arguments']}
worker_controller.functions.this_thread.current_info = the_current_info
worker_controller.obtain_lock_patiently(session_code, yaml_filename)
steps, user_dict, is_encrypted = worker_controller.fetch_user_dict(session_code, yaml_filename, secret=secret)
the_current_info['encrypted'] = is_encrypted
interview_status = worker_controller.parse.InterviewStatus(current_info=the_current_info)
old_language = worker_controller.functions.get_language()
try:
interview.assemble(user_dict, interview_status)
has_error = False
except Exception as e:
if hasattr(e, 'traceback'):
logmessage("Error in assembly during callback: " + str(e.__class__.__name__) + ": " + str(e) + ": " + str(e.traceback))
else:
logmessage("Error in assembly during callback: " + str(e.__class__.__name__) + ": " + str(e))
error_type = e.__class__.__name__
error_message = str(e)
if hasattr(e, 'traceback'):
error_trace = str(e.traceback)
if hasattr(e, 'da_line_with_error'):
error_trace += "\nIn line: " + str(e.da_line_with_error)
else:
error_trace = None
variables = list(reversed(list(worker_controller.functions.this_thread.current_variable)))
worker_controller.error_notification(e, message=error_message, trace=error_trace)
has_error = True
# is this right? Save even though there was an error on assembly?
worker_controller.functions.set_language(old_language)
save_status = worker_controller.functions.this_thread.misc.get('save_status', 'new')
if (not has_error) and save_status != 'ignore':
if str(user_info.get('the_user_id', None)).startswith('t'):
worker_controller.save_user_dict(session_code, user_dict, yaml_filename, secret=secret, encrypt=is_encrypted, steps=steps)
else:
worker_controller.save_user_dict(session_code, user_dict, yaml_filename, secret=secret, encrypt=is_encrypted, manual_user_id=user_info['theid'], steps=steps)
worker_controller.release_lock(session_code, yaml_filename)
if has_error:
return worker_controller.functions.ReturnValue(ok=False, error_type=error_type, error_trace=error_trace, error_message=error_message, variables=variables, extra=extra)
if hasattr(interview_status, 'question'):
if interview_status.question.question_type == "response":
# logmessage("background_action: status was response")
if hasattr(interview_status.question, 'all_variables'):
pass
elif not hasattr(interview_status.question, 'binaryresponse'):
sys.stdout.write(interview_status.questionText.rstrip().encode('utf8') + "\n")
elif interview_status.question.question_type == "backgroundresponse":
logmessage("Time in background response action was " + str(time.time() - start_time))
return worker_controller.functions.ReturnValue(value=interview_status.question.backgroundresponse, extra=extra)
logmessage("Time in background response action was " + str(time.time() - start_time))
return worker_controller.functions.ReturnValue(value=new_action, extra=extra)
if hasattr(interview_status, 'questionText') and interview_status.questionText:
if interview_status.orig_sought != interview_status.sought:
sought_message = str(interview_status.orig_sought) + " (" + interview_status.sought + ")"
else:
sought_message = str(interview_status.orig_sought)
logmessage("background_action: The end result of the background action was the seeking of the variable " + sought_message + ", which resulted in asking this question: " + repr(str(interview_status.questionText).strip()))
logmessage("background_action: Perhaps your interview did not ask all of the questions needed for the background action to do its work.")
logmessage("background_action: Or perhaps your background action did its job, but you did not end it with a call to background_response().")
error_type = 'QuestionError'
error_trace = None
error_message = interview_status.questionText
variables = list(reversed(list(worker_controller.functions.this_thread.current_variable)))
worker_controller.error_notification(Exception("The end result of the background action was the seeking of the variable " + sought_message + ", which resulted in asking this question: " + repr(str(interview_status.questionText).strip())))
if 'on_error' not in worker_controller.functions.this_thread.current_info:
return worker_controller.functions.ReturnValue(ok=False, error_type=error_type, error_trace=error_trace, error_message=error_message, variables=variables, extra=extra)
return process_error(interview, session_code, yaml_filename, secret, user_info, url, url_root, is_encrypted, error_type, error_message, error_trace, variables, extra)
logmessage("background_action: finished")
return worker_controller.functions.ReturnValue(extra=extra)
@workerapp.task
def ocr_google(image_file, raw_result, user_code):
logmessage("ocr_google started in worker")
worker_controller.initialize()
url_root = daconfig.get('url root', 'http://localhost') + daconfig.get('root', '/')
url = url_root + 'interview'
with worker_controller.flaskapp.app_context():
with worker_controller.flaskapp.test_request_context(base_url=url_root, path=url):
worker_controller.functions.reset_local_variables()
worker_controller.functions.set_uid(user_code)
worker_controller.set_request_active(False)
return worker_controller.util.google_ocr_file(image_file, raw_result=raw_result)
|
a3efbbd6d4e5fd0c62b517d129dcabdf1a24ad07
|
a12b448f44beb4d521cb7e31677281f41df35f0b
|
/benchmarks/DNN/blocks/fusedresNet_inference/cpu/sparse/convert_dense_to_csr.py
|
fd21e678ab1d836f5bb11475ccd39da8f355c9a9
|
[
"MIT"
] |
permissive
|
Tiramisu-Compiler/tiramisu
|
d45f65dd9c35f643b3531ec79df1203c7ea3371d
|
f13e480f0ddb142cec371b7d7431a41d8ca885ec
|
refs/heads/master
| 2023-08-25T12:21:26.889736
| 2023-05-09T18:40:52
| 2023-05-09T18:40:52
| 58,378,976
| 906
| 168
|
MIT
| 2023-09-08T11:47:06
| 2016-05-09T13:33:51
|
C++
|
UTF-8
|
Python
| false
| false
| 2,009
|
py
|
convert_dense_to_csr.py
|
import numpy as np
# Blocking to set in order to generate a data where FIN is blocked (for blocked traversal)
############ PARAMETERS TO SET
dense_filename = "resnet_10.npy"
sparse_output_filename = "resnet_10.csr"
N = 224 # Input height and width
FIN_BL = 8
###############################
# The output file's format is :
'''
FOut, FIn, K, N, NNZ
/
all non zero values separated by line breaks
/
all rowptr values separated by line breaks
/
all colidx values separated by line breaks
'''
def denseToCSR(arr, N):
nnz=0
FOut = arr.shape[0] # get number of output features
FIn = arr.shape[1] # get number of input features
K = arr.shape[2] # get kernel size
vals=[] # csr array of values (size NNZ)
indexes=[] # csr array of col indexes (size NNZ)
finptr=[] # csr array of row ptr (size FOut + 1)
for fout in range(FOut):
finptr.append(nnz)
for fin_b in range(int(FIn/FIN_BL)):
for ky in range(K):
for(kx) in range(K):
for ffin in range(FIN_BL):
if v[fout, fin_b * FIN_BL + ffin, ky, kx] != 0:
vals.append(v[fout, (fin_b * FIN_BL + ffin), ky, kx])
indexes.append((fin_b * FIN_BL + ffin) * (N + 2) * (N + 2) + ky * (N + 2) + kx)
nnz+=1
finptr.append(nnz)
print("FOUT = ", FOut)
print("FIn = ", FIn)
print("K = ", K)
print("Density = ", nnz / (FOut * FIn * K * K))
return vals, finptr, indexes, nnz
v = np.load(dense_filename)
values, rowptr, colidx, nnz = denseToCSR(v, N)
with open(sparse_output_filename, 'w') as f:
f.write("%d, %d, %d, %d, %d \n" % (v.shape[0], v.shape[1], v.shape[2], N, nnz))
f.write("/\n")
# Write values
for item in values:
f.write("%s\n" % item)
f.write("/\n")
# Write rowptr
for item in rowptr:
f.write("%s\n" % item)
f.write("/\n")
# Write colidx
for item in colidx:
f.write("%s\n" % item)
|
f1ac207b50eb38ab8297131b426acc0f9b8b9218
|
1ba293e5a7e675c07ea2fb0ef6393eeb482c12fb
|
/src/kwja/utils/dependency_parsing.py
|
842c38ad9ceb77eaf19aee424e8dbd5eeb0acc53
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
ku-nlp/kwja
|
b75ed5a19543dac0fed6db0de7efb0ee41f41edd
|
53594c7808de1845ebf8d5afe7b5c81bfffa7ba1
|
refs/heads/main
| 2023-09-02T22:30:46.843141
| 2023-08-28T07:57:23
| 2023-08-28T07:57:23
| 496,122,031
| 111
| 5
|
MIT
| 2023-09-05T01:25:47
| 2022-05-25T07:12:43
|
Python
|
UTF-8
|
Python
| false
| false
| 1,270
|
py
|
dependency_parsing.py
|
from collections import defaultdict
from typing import Dict, Set
class DependencyManager:
def __init__(self) -> None:
self.directed_graph: Dict[int, Set[int]] = defaultdict(set)
self.root = False
def add_edge(self, source: int, target: int) -> None:
self.directed_graph[source].add(target)
def remove_edge(self, source: int, target: int) -> None:
self.directed_graph[source].remove(target)
def is_cyclic(self, source: int, visited: Set[int], cache: Dict[int, bool]) -> bool:
if source in cache:
return cache[source]
if source in visited:
return True
else:
visited.add(source)
ret = False
for target in self.directed_graph[source]:
if self.is_cyclic(target, visited, cache):
ret = True
break
cache[source] = ret
return ret
def has_cycle(self) -> bool:
visited: Set[int] = set()
cache: Dict[int, bool] = {}
# cast keys to list to avoid RuntimeError: dictionary changed size during iteration
for source in list(self.directed_graph.keys()):
if self.is_cyclic(source, visited, cache):
return True
return False
|
f34f60e0b4e4b668b827250a382484c9dc049f83
|
41aacb6e11df413da7babb9471698464204d0132
|
/allel/abc.py
|
c9192b312f5bdfc54b09c7fdba725fc32799bf56
|
[
"MIT"
] |
permissive
|
cggh/scikit-allel
|
005e59e31aecb4663e2415f7c4f3f905bd338555
|
184392f2fa3a681c6d2863be2187dd5cccb7f08a
|
refs/heads/master
| 2023-08-05T22:54:25.849812
| 2023-05-17T19:19:38
| 2023-05-17T19:19:38
| 29,883,079
| 265
| 57
|
MIT
| 2023-09-07T15:42:09
| 2015-01-26T21:42:44
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 14,839
|
py
|
abc.py
|
# -*- coding: utf-8 -*-
# third-party imports
import numpy as np
class ArrayWrapper(object):
"""Abstract base class that delegates to a wrapped array-like object."""
def __init__(self, data):
if isinstance(data, ArrayWrapper):
# don't wrap a wrapper
data = data.values
if not hasattr(data, 'shape') or not hasattr(data, 'dtype'):
raise TypeError('values must be array-like')
self._values = data
@property
def values(self):
"""The underlying array of values.
Returns
-------
ndarray
"""
return self._values
@property
def caption(self):
return '<%s shape=%s dtype=%s>' % (type(self).__name__, self.shape, self.dtype)
def __repr__(self):
return self.caption
def __getattr__(self, item):
if item in {'__array_struct__', '__array_interface__'}:
# don't pass these through because we want to use __array__ to control numpy
# behaviour
raise AttributeError
return getattr(self.values, item)
def __getitem__(self, item):
return self.values[item]
def __setitem__(self, item, value):
self.values[item] = value
def __iter__(self):
return iter(self.values)
def __len__(self):
return len(self.values)
def __array__(self, *args):
v = self.values[:]
a = np.asanyarray(v)
if args:
a = a.astype(args[0])
return a
def __eq__(self, other):
if isinstance(other, ArrayWrapper):
other = other.values
return self.values == other
def __ne__(self, other):
if isinstance(other, ArrayWrapper):
other = other.values
return self.values != other
def __lt__(self, other):
if isinstance(other, ArrayWrapper):
other = other.values
return self.values < other
def __gt__(self, other):
if isinstance(other, ArrayWrapper):
other = other.values
return self.values > other
def __le__(self, other):
if isinstance(other, ArrayWrapper):
other = other.values
return self.values <= other
def __ge__(self, other):
if isinstance(other, ArrayWrapper):
other = other.values
return self.values >= other
def __abs__(self):
return abs(self.values)
def __add__(self, other):
if isinstance(other, ArrayWrapper):
other = other.values
return self.values + other
def __and__(self, other):
if isinstance(other, ArrayWrapper):
other = other.values
return self.values & other
def __div__(self, other):
if isinstance(other, ArrayWrapper):
other = other.values
return self.values.__div__(other)
def __floordiv__(self, other):
if isinstance(other, ArrayWrapper):
other = other.values
return self.values // other
def __inv__(self):
return ~self.values
def __invert__(self):
return ~self.values
def __lshift__(self, other):
if isinstance(other, ArrayWrapper):
other = other.values
return self.values << other
def __mod__(self, other):
if isinstance(other, ArrayWrapper):
other = other.values
return self.values % other
def __mul__(self, other):
if isinstance(other, ArrayWrapper):
other = other.values
return self.values * other
def __neg__(self):
return -self.values
def __or__(self, other):
if isinstance(other, ArrayWrapper):
other = other.values
return self.values | other
def __pos__(self):
return +self.values
def __pow__(self, other):
if isinstance(other, ArrayWrapper):
other = other.values
return self.values ** other
def __rshift__(self, other):
if isinstance(other, ArrayWrapper):
other = other.values
return self.values >> other
def __sub__(self, other):
if isinstance(other, ArrayWrapper):
other = other.values
return self.values - other
def __truediv__(self, other):
if isinstance(other, ArrayWrapper):
other = other.values
return self.values.__truediv__(other)
def __xor__(self, other):
if isinstance(other, ArrayWrapper):
other = other.values
return self.values ^ other
ellipsis_str = '...'
def arr1d_to_html(indices, items, caption):
# N.B., table captions don't render in jupyter notebooks on GitHub,
# so put caption outside table element
html = '<div class="allel allel-DisplayAs1D">'
# sanitize caption
caption = caption.replace('<', '<').replace('>', '>')
html += '<span>%s</span>' % caption
# build table
html += '<table>'
html += '<thead>'
html += '<tr>'
for i in indices:
html += '<th style="text-align: center">%s</th>' % i
html += '</tr>'
html += '</thead>'
html += '<tbody>'
html += '<tr>'
for item in items:
html += '<td style="text-align: center">%s</td>' % item
html += '</tr>'
html += '</tbody>'
html += '</table>'
html += '</div>'
return html
_row_index_style = ('text-align: center; '
'background-color: white; '
'border-right: 1px solid black; ')
def arr2d_to_html(row_indices, col_indices, items, caption):
# N.B., table captions don't render in jupyter notebooks on GitHub,
# so put caption outside table element
html = '<div class="allel allel-DisplayAs2D">'
# sanitize caption
caption = caption.replace('<', '<').replace('>', '>')
html += '<span>%s</span>' % caption
# build table
html += '<table>'
html += '<thead>'
html += '<tr><th></th>'
for i in col_indices:
html += '<th style="text-align: center">%s</th>' % i
html += '</tr>'
html += '</thead>'
html += '<tbody>'
for row_index, row in zip(row_indices, items):
if row_index == ellipsis_str:
html += (('<tr><th style="%s">...</th>' % _row_index_style) +
('<td style="text-align: center" colspan="%s">...</td></tr>' %
(len(col_indices) + 1)))
else:
html += '<tr><th style="%s">%s</th>' % (_row_index_style, row_index)
for item in row:
html += '<td style="text-align: center">%s</td>' % item
html += '</tr>'
html += '</tbody>'
html += '</table>'
html += '</div>'
return html
def recarr_to_html(names, indices, items, caption):
# N.B., table captions don't render in jupyter notebooks on GitHub,
# so put caption outside table element
html = '<div class="allel allel-DisplayAsTable">'
# sanitize caption
caption = caption.replace('<', '<').replace('>', '>')
html += '<span>%s</span>' % caption
# build table
html += '<table>'
html += '<thead>'
html += '<tr><th></th>'
for n in names:
html += '<th style="text-align: center">%s</th>' % n
html += '</tr>'
html += '</thead>'
html += '<tbody>'
for row_index, row in zip(indices, items):
if row_index == ellipsis_str:
html += (('<tr><th style="%s">...</th>' % _row_index_style) +
('<td style="text-align: center" colspan="%s">...</td></tr>' %
(len(names) + 1)))
else:
html += '<tr><th style="%s">%s</th>' % (_row_index_style, row_index)
for item in row:
html += '<td style="text-align: center">%s</td>' % item
html += '</tr>'
html += '</tbody>'
html += '</table>'
html += '</div>'
return html
class DisplayableArray(ArrayWrapper):
def __repr__(self):
return self.caption + '\n' + str(self)
def __str__(self):
return self.to_str()
def _repr_html_(self):
return self.to_html()
# noinspection PyAbstractClass
class DisplayAs1D(DisplayableArray):
def str_items(self):
# can be overridden in sub-class to provide custom display behaviour
return [repr(i) for i in self]
def get_display_items(self, threshold=10, edgeitems=5):
# ensure threshold
if threshold is None:
threshold = self.shape[0]
# ensure sensible edgeitems
edgeitems = min(edgeitems, threshold // 2)
# determine indices of items to show
if self.shape[0] > threshold:
indices = (
list(range(edgeitems)) + [ellipsis_str] +
list(range(self.shape[0] - edgeitems, self.shape[0], 1))
)
head = self[:edgeitems].str_items()
tail = self[self.shape[0] - edgeitems:].str_items()
items = head + [ellipsis_str] + tail
else:
indices = list(range(self.shape[0]))
items = self[:].str_items()
return indices, items
def to_str(self, threshold=10, edgeitems=5):
_, items = self.get_display_items(threshold, edgeitems)
s = '[' + ', '.join(items) + ']'
return s
def to_html(self, threshold=10, edgeitems=5, caption=None):
indices, items = self.get_display_items(threshold, edgeitems)
if caption is None:
caption = self.caption
return arr1d_to_html(indices, items, caption)
def display(self, threshold=10, edgeitems=5, caption=None):
html = self.to_html(threshold, edgeitems, caption)
from IPython.display import display_html
display_html(html, raw=True)
def displayall(self, caption=None):
self.display(threshold=None, caption=caption)
# noinspection PyAbstractClass
class DisplayAs2D(DisplayableArray):
def str_items(self):
# can be overridden in sub-class to provide custom display behaviour
return [[repr(i) for i in row] for row in self]
def get_display_items(self, row_threshold, col_threshold, row_edgeitems, col_edgeitems):
# ensure threshold
if row_threshold is None:
row_threshold = self.shape[0]
if col_threshold is None:
col_threshold = self.shape[1]
# ensure sensible edgeitems
row_edgeitems = min(row_edgeitems, row_threshold // 2)
col_edgeitems = min(col_edgeitems, col_threshold // 2)
# determine row indices of items to show
if self.shape[0] > row_threshold:
row_indices = (
list(range(row_edgeitems)) + [ellipsis_str] +
list(range(self.shape[0] - row_edgeitems, self.shape[0], 1))
)
head = self[:row_edgeitems].str_items()
tail = self[self.shape[0] - row_edgeitems:].str_items()
items = head + [ellipsis_str] + tail
else:
row_indices = list(range(self.shape[0]))
items = self[:].str_items()
# determine col indices of items to show
if self.shape[1] > col_threshold:
col_indices = (
list(range(col_edgeitems)) + [ellipsis_str] +
list(range(self.shape[1] - col_edgeitems, self.shape[1], 1))
)
items = [
row if row == ellipsis_str else
(row[:col_edgeitems] + [ellipsis_str] + row[self.shape[1] - col_edgeitems:])
for row in items
]
else:
col_indices = list(range(self.shape[1]))
# items unchanged
return row_indices, col_indices, items
def to_str(self, row_threshold=6, col_threshold=10, row_edgeitems=3, col_edgeitems=5):
_, _, items = self.get_display_items(row_threshold, col_threshold, row_edgeitems,
col_edgeitems)
s = ''
for row in items:
if row == ellipsis_str:
s += row + '\n'
else:
s += ' '.join(row) + '\n'
return s
def to_html(self, row_threshold=6, col_threshold=10, row_edgeitems=3, col_edgeitems=5,
caption=None):
row_indices, col_indices, items = self.get_display_items(
row_threshold, col_threshold, row_edgeitems, col_edgeitems
)
if caption is None:
caption = self.caption
return arr2d_to_html(row_indices, col_indices, items, caption)
def display(self, row_threshold=6, col_threshold=10, row_edgeitems=3,
col_edgeitems=5, caption=None):
html = self.to_html(row_threshold, col_threshold, row_edgeitems, col_edgeitems,
caption)
from IPython.display import display_html
display_html(html, raw=True)
def displayall(self, caption=None):
self.display(row_threshold=None, col_threshold=None, caption=caption)
class DisplayAsTable(DisplayableArray):
@property
def names(self):
"""Column names."""
return self.dtype.names
def str_items(self):
tmp = self[:]
items = [[str(x) for x in row] for row in tmp]
return items
def get_display_items(self, threshold=6, edgeitems=3):
# ensure threshold
if threshold is None:
threshold = self.shape[0]
# ensure sensible edgeitems
edgeitems = min(edgeitems, threshold // 2)
# determine indices of items to show
if self.shape[0] > threshold:
indices = (
list(range(edgeitems)) + [ellipsis_str] +
list(range(self.shape[0] - edgeitems, self.shape[0], 1))
)
head = self[:edgeitems].str_items()
tail = self[self.shape[0] - edgeitems:].str_items()
items = head + [ellipsis_str] + tail
else:
indices = list(range(self.shape[0]))
items = self[:].str_items()
return indices, items
def to_str(self, threshold=6, edgeitems=3):
_, items = self.get_display_items(threshold, edgeitems)
s = ' '.join(items)
return s
def to_html(self, threshold=6, edgeitems=3, caption=None):
indices, items = self.get_display_items(threshold, edgeitems)
if caption is None:
caption = self.caption
return recarr_to_html(self.names, indices, items, caption)
def display(self, threshold=6, edgeitems=3, caption=None):
html = self.to_html(threshold, edgeitems, caption)
from IPython.display import display_html
display_html(html, raw=True)
def displayall(self, caption=None):
self.display(threshold=None, caption=caption)
def __str__(self):
# stick with default string output of values
return str(self.values)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.