blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7c769ad6cf9a73b1cea6417113b4bac867f2f54e
|
9abd182d02355ddf0b79afd4a35f7127a4a66f7a
|
/scripts/detection/center_net/fit_center_net.py
|
812087e8e36d4ef40c3728af7c087df56b5a10cd
|
[
"Apache-2.0"
] |
permissive
|
dmlc/gluon-cv
|
e1303086419a5733661d0fcb9095c09d4f2382ad
|
567775619f3b97d47e7c360748912a4fd883ff52
|
refs/heads/master
| 2023-07-19T12:02:36.824294
| 2023-01-19T00:37:33
| 2023-01-19T00:37:33
| 122,896,249
| 6,064
| 1,458
|
Apache-2.0
| 2023-01-19T00:37:35
| 2018-02-26T01:33:21
|
Python
|
UTF-8
|
Python
| false
| false
| 961
|
py
|
fit_center_net.py
|
import logging
import gluoncv as gcv
gcv.utils.check_version('0.8.0')
from gluoncv.auto.estimators import CenterNetEstimator
from gluoncv.auto.tasks.utils import config_to_nested
from d8.object_detection import Dataset
if __name__ == '__main__':
# specify hyperparameters
config = {
'dataset': 'sheep',
'gpus': [0, 1, 2, 3, 4, 5, 6, 7],
'estimator': 'center_net',
'base_network': 'resnet50_v1b',
'batch_size': 64, # range [8, 16, 32, 64]
'epochs': 3
}
config = config_to_nested(config)
config.pop('estimator')
# specify dataset
dataset = Dataset.get('sheep')
train_data, valid_data = dataset.split(0.8)
# specify estimator
estimator = CenterNetEstimator(config)
# fit estimator
estimator.fit(train_data, valid_data)
# evaluate auto estimator
eval_map = estimator.evaluate(valid_data)
logging.info('evaluation: mAP={}'.format(eval_map[-1][-1]))
|
5300d1315b01eb5a43c6d3888c93cd44bec04d38
|
2d0bada349646b801a69c542407279cc7bc25013
|
/src/vai_optimizer/example/pruning/tensorflow_v1/mnist_pruning.py
|
8044d1c9698868f6f913dd1e827ce1f26a1e858d
|
[
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSD-3-Clause-Open-MPI",
"LicenseRef-scancode-free-unknown",
"Libtool-exception",
"GCC-exception-3.1",
"LicenseRef-scancode-mit-old-style",
"OFL-1.1",
"JSON",
"LGPL-2.1-only",
"LGPL-2.0-or-later",
"ICU",
"LicenseRef-scancode-other-permissive",
"GPL-2.0-or-later",
"GPL-3.0-only",
"LicenseRef-scancode-issl-2018",
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-unicode",
"LGPL-3.0-only",
"LicenseRef-scancode-warranty-disclaimer",
"GPL-3.0-or-later",
"Zlib",
"BSD-Source-Code",
"ClArtistic",
"LicenseRef-scancode-unknown-license-reference",
"ISC",
"NCSA",
"LicenseRef-scancode-proprietary-license",
"GPL-2.0-only",
"CC-BY-4.0",
"FSFULLR",
"Minpack",
"Unlicense",
"BSL-1.0",
"NAIST-2003",
"Apache-2.0",
"LicenseRef-scancode-protobuf",
"LicenseRef-scancode-public-domain",
"Libpng",
"Spencer-94",
"BSD-2-Clause",
"Intel",
"GPL-1.0-or-later",
"MPL-2.0"
] |
permissive
|
Xilinx/Vitis-AI
|
31e664f7adff0958bb7d149883ab9c231efb3541
|
f74ddc6ed086ba949b791626638717e21505dba2
|
refs/heads/master
| 2023-08-31T02:44:51.029166
| 2023-07-27T06:50:28
| 2023-07-27T06:50:28
| 215,649,623
| 1,283
| 683
|
Apache-2.0
| 2023-08-17T09:24:55
| 2019-10-16T21:41:54
|
Python
|
UTF-8
|
Python
| false
| false
| 2,974
|
py
|
mnist_pruning.py
|
# MIT License
#
# Copyright (c) 2023 Advanced Micro Devices, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from tf1_nndct.optimization.pruning import IterativePruningRunner
import tensorflow as tf
from tensorflow.keras import layers
import numpy as np
keras = tf.keras
def mnist_convnet():
num_classes = 10
input_shape = (28, 28, 1)
model = keras.Sequential([
layers.InputLayer(input_shape=input_shape),
layers.Conv2D(16, kernel_size=(3, 3), activation="relu"),
layers.BatchNormalization(),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Conv2D(32, kernel_size=(3, 3), activation="relu"),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Flatten(),
layers.Dropout(0.5),
layers.Dense(num_classes),
])
return model, input_shape
def eval_fn(frozen_graph_def: tf.compat.v1.GraphDef) -> float:
with tf.compat.v1.Session().as_default() as sess:
tf.import_graph_def(frozen_graph_def, name="")
inp = np.random.rand(1, 28, 28, 1)
a = sess.run(sess.graph.get_tensor_by_name('dense/BiasAdd:0'), feed_dict={'input_1:0': inp})
print(a)
print("in eval_fn done")
return 0.5
def prune():
with tf.Session() as sess:
model, input_shape = mnist_convnet()
sess.run(tf.global_variables_initializer())
input_specs={'input_1:0': tf.TensorSpec(shape=(1, 28, 28, 1), dtype=tf.dtypes.float32)}
pruner = IterativePruningRunner("mnist", sess, input_specs, ["dense/BiasAdd"])
pruner.ana(eval_fn, gpu_ids=['/GPU:0', '/GPU:1'])
shape_tensors, masks = pruner.prune(sparsity=0.5)
def loss_fn():
images = np.ones((1, 28, 28, 1), dtype=np.float32)
out = model(images, training=True)
return tf.reduce_sum(out)
opt = tf.compat.v1.train.GradientDescentOptimizer(3.0)
sess.run(opt.minimize(loss_fn, var_list=tf.trainable_variables()))
slim_graph_def = pruner.get_slim_graph_def(shape_tensors, masks)
if __name__ == "__main__":
prune()
|
96ad443149a4eefd210144a9bcc868230c727c10
|
501887e82173e138683aff04fc579618786fd795
|
/seldom/utils/diff.py
|
2fdd83d6a5373134b84a04fa863b6825707d7507
|
[
"Apache-2.0"
] |
permissive
|
SeldomQA/seldom
|
a66e6975f0e09e341d8adb6f89b2bf5a69458cd5
|
b3ad3da2ecfd5b3c56520179e431f9b34ba47c69
|
refs/heads/master
| 2023-09-02T18:48:32.656208
| 2023-09-02T12:47:53
| 2023-09-02T12:47:53
| 39,141,541
| 544
| 198
|
Apache-2.0
| 2023-08-08T16:59:37
| 2015-07-15T14:29:33
|
Python
|
UTF-8
|
Python
| false
| false
| 2,327
|
py
|
diff.py
|
"""
diff file
"""
from typing import Any
from seldom.logging import log
class AssertInfo:
warning = []
error = []
def diff_json(response_data: Any, assert_data: Any, exclude: list = None) -> None:
"""
Compare the JSON data format
"""
if exclude is None:
exclude = []
if isinstance(response_data, dict) and isinstance(assert_data, dict):
# dict format
for key in assert_data:
# skip check
if key in exclude:
continue
if key not in response_data:
AssertInfo.error.append(f"❌ Response data has no key: {key}")
for key in response_data:
# skip check
if key in exclude:
continue
if key in assert_data:
# recursion
diff_json(response_data[key], assert_data[key], exclude)
else:
AssertInfo.warning.append(f"💡 Assert data has not key: {key}")
elif isinstance(response_data, list) and isinstance(assert_data, list):
# list format
if len(response_data) == 0:
log.info("response is []")
else:
if isinstance(response_data[0], dict):
try:
response_data = sorted(response_data, key=lambda x: x[list(response_data[0].keys())[0]])
except TypeError:
response_data = response_data
else:
response_data = sorted(response_data)
if len(response_data) != len(assert_data):
log.info(f"list len: '{len(response_data)}' != '{len(assert_data)}'")
if len(assert_data) > 0:
if isinstance(assert_data[0], dict):
try:
assert_data = sorted(assert_data, key=lambda x: x[list(assert_data[0].keys())[0]])
except TypeError:
assert_data = assert_data
else:
assert_data = sorted(assert_data)
for src_list, dst_list in zip(response_data, assert_data):
# recursion
diff_json(src_list, dst_list, exclude)
else:
# different format
if str(response_data) != str(assert_data):
AssertInfo.error.append(f"❌ Value are not equal: {assert_data} != {response_data}")
|
4d683ad95c3c40e480755d7d82edb267c2508ffc
|
07c27d72f10dbf64c11b6d6cae83d4dc757a2384
|
/pbxproj/pbxcli/pbxproj_show.py
|
e263752402e0366665b939ec4bbd102f29c7b019
|
[
"MIT"
] |
permissive
|
kronenthaler/mod-pbxproj
|
a481cded65a4f082338c1c8b290aa3382124b6ce
|
e0c42005cdcee3b10522ba5709f4228ec10c049c
|
refs/heads/master
| 2023-06-09T03:12:32.120613
| 2022-10-04T12:14:11
| 2022-10-04T12:14:11
| 8,949,989
| 1,089
| 278
|
MIT
| 2023-01-09T10:50:43
| 2013-03-22T10:50:15
|
Python
|
UTF-8
|
Python
| false
| false
| 3,870
|
py
|
pbxproj_show.py
|
"""
usage:
pbxproj show [options] <project>
pbxproj show [options] (--target <target>...) <project> [(-s | --source-files) |
(-H | --header-files) |
(-r | --resource-files) |
(-f | --framework-files) |
(--build-phase-files <build_phase_type>)]
positional arguments:
<project> Project path to the .xcodeproj folder.
generic options:
-h, --help This message.
-t, --target <target> Target name to be modified. If there is no target specified, all targets are used.
-b, --backup Creates a backup before start processing the command.
target options:
-s, --source-files Show the source files attached to the target
-r, --resource-files Show the resource files attached to the target
-f, --framework-files Show the library files attached to the target
-H, --header-files Show the header files attached to the target
-c, --configurations Show the configurations attached to the target
-B, --build-phase-files <type> Show the files associated to the build phase of the given type.
"""
def execute(project, args):
# make a decision of what function to call based on the -D flag
if args['--target']:
return _target_info(project, args['--target'], args)
else:
return _summary(project, args)
def _summary(project, _):
info = ''
for target in project.objects.get_targets():
configs = ', '.join([c.name for c in project.objects.get_configurations_on_targets(target.name)])
info += f"{target.name}:\n" \
f"\tTarget type: {target.isa}\n" \
f"\tProduct name: {target.productName}\n" \
f"\tConfigurations: {configs}\n" \
for build_phase_id in target.buildPhases:
build_phase = project.objects[build_phase_id]
info += f"\t{build_phase._get_comment()} ({build_phase.isa}) file count: {build_phase.files.__len__()}\n"
info += "\n"
return info
def _target_info(project, target_name, args):
build_phases = _process_parameters(args)
info = ''
for target in project.objects.get_targets(target_name):
info += f"{target.name}:\n" \
f"\tProduct name: {target.productName}\n"
if args['--configurations']:
configs = ', '.join([c.name for c in project.objects.get_configurations_on_targets(target.name)])
info += f"\tConfigurations: {configs}\n"
for build_phase_id in target.buildPhases:
build_phase = project.objects[build_phase_id]
if build_phase.isa in build_phases:
info += f"\t{build_phase._get_comment()}: \n\t\t"
files = []
for build_file_id in build_phase.files:
build_file = project.objects[build_file_id]
files.append(project.objects[build_file.fileRef]._get_comment())
formatted_files = "\n\t\t".join(sorted(files))
info += f'{formatted_files}\n'
info += '\n'
return info
def _process_parameters(args):
build_phases = []
if args['--source-files']:
build_phases += ['PBXSourcesBuildPhase']
elif args['--header-files']:
build_phases += ['PBXHeadersBuildPhase']
elif args['--resource-files']:
build_phases += ['PBXResourcesBuildPhase']
elif args['--framework-files']:
build_phases += ['PBXFrameworksBuildPhase']
elif args['--build-phase-files']:
build_phases += [args['--build-phase-files']]
return build_phases
|
bd1ab8435f0e785c554c40b788e8947b6b69bf12
|
bb381602af72b83a38b381a6518c4d01af3c8409
|
/robosuite/models/objects/object_groups.py
|
eb1100920fbb4a49db4c30bef139870f215dac30
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
ARISE-Initiative/robosuite
|
64b8daeec1b2aba26f87300b0fbbde1ffe44a6a7
|
eb01e1ffa46f1af0a3aa3ac363d5e63097a6cbcc
|
refs/heads/master
| 2023-08-21T20:04:15.283656
| 2023-07-28T08:22:40
| 2023-07-28T08:22:40
| 154,626,012
| 670
| 247
|
NOASSERTION
| 2023-09-14T02:34:46
| 2018-10-25T07:11:07
|
Python
|
UTF-8
|
Python
| false
| false
| 1,533
|
py
|
object_groups.py
|
class ObjectGroup:
"""
An abstraction that encompasses a group of objects that interact together in a meaningful way
name (str): Name of this object group. This will be prepended to all objects generated by this group.
"""
def __init__(self, name):
# Store internal variables
self.name = name
self.sim = None # Reference to shared mjsim object
self._objects = {} # maps object names to object class instances
# Generate objects
self._generate_objects()
def get_states(self):
"""
Function to grab group-relevant states. This should be implemented by the subclass.
Returns:
dict: Keyword-mapped states for this group
"""
raise NotImplementedError
def update_sim(self, sim):
"""
Updates internal reference to sim and all other relevant references
Args:
sim (MjSim): Active mujoco sim reference
"""
self.sim = sim
def _generate_objects(self):
"""
Internal helper function that generates the objects for this group. Should populate self._objects mapping
names of objects to their actual object class instances.
"""
raise NotImplementedError
@property
def objects(self):
"""
Contains references to all objects owned by this group. Mapped from names to object instances
Returns:
dict: keyword-mapped object class instances
"""
return self._objects
|
88baf23fb43679c1a3f4e7734143b248d83b6696
|
0869d7edac80e8aebe951682a2cc311a083eade3
|
/Python/tdw/add_ons/physics_audio_recorder.py
|
29955bf6493b46199d4f5caf478d09da16668288
|
[
"BSD-2-Clause"
] |
permissive
|
threedworld-mit/tdw
|
7d5b4453832647733ff91ad7a7ce7ec2320454c1
|
9df96fba455b327bb360d8dd5886d8754046c690
|
refs/heads/master
| 2023-09-01T11:45:28.132298
| 2023-08-31T16:13:30
| 2023-08-31T16:13:30
| 245,492,977
| 427
| 75
|
BSD-2-Clause
| 2023-09-14T17:36:12
| 2020-03-06T18:42:09
|
Python
|
UTF-8
|
Python
| false
| false
| 4,773
|
py
|
physics_audio_recorder.py
|
from typing import List, Union
from pathlib import Path
import numpy as np
from tdw.output_data import OutputData, AudioSources, Rigidbodies, Transforms
from tdw.audio_utils import AudioUtils
from tdw.add_ons.add_on import AddOn
class PhysicsAudioRecorder(AddOn):
"""
Record audio generated by physics events.
"""
def __init__(self, max_frames: int = -1, record_audio: bool = True):
"""
:param max_frames: If greater than 0, stop recording after this many frames even if objects are still moving or making sound.
:param record_audio: If True, record audio to a file. If False, only listen to audio events.
"""
super().__init__()
""":field
# If greater than 0, stop recording after this many frames even if objects are still moving or making sound.
"""
self.max_frames: int = max_frames
# The current frame.
self._frame: int = 0
""":field
The path to the next audio file.
"""
self.path: Path = Path.home()
""":field
If False, there is an ongoing audio.
"""
self.done: bool = True
self._record_audio: bool = record_audio
def get_initialization_commands(self) -> List[dict]:
return []
def on_send(self, resp: List[bytes]) -> None:
if self.done:
return
# Stop recording at the maximum number of frames.
self._frame += 1
if 0 < self.max_frames <= self._frame:
if self._record_audio:
AudioUtils.stop()
return
# Get any objects that fell below the floor.
below_floor: List[int] = list()
for i in range(len(resp) - 1):
r_id = OutputData.get_data_type_id(resp[i])
if r_id == "tran":
transforms = Transforms(resp[i])
for j in range(transforms.get_num()):
if transforms.get_position(j)[1] < -0.1:
below_floor.append(transforms.get_id(j))
# Check if objects have stopped moving and no audio is playing.
sleeping = True
playing_audio = False
for i in range(len(resp) - 1):
r_id = OutputData.get_data_type_id(resp[i])
if r_id == "rigi":
rigidbodies = Rigidbodies(resp[i])
for j in range(rigidbodies.get_num()):
if rigidbodies.get_id(j) not in below_floor and not rigidbodies.get_sleeping(j):
sleeping = False
break
elif r_id == "audi":
audio_sources = AudioSources(resp[i])
for j in range(audio_sources.get_num()):
if audio_sources.get_is_playing(j):
playing_audio = True
break
# Check if the simulation is totally silent (there might be Resonance Audio reverb).
if not playing_audio and np.max(audio_sources.get_samples()) > 0:
playing_audio = True
if sleeping and not playing_audio:
self.stop()
def start(self, path: Union[str, Path] = None, device_name: str = None) -> None:
"""
Start recording.
:param path: The path to the output .wav file. If None, defaults to the current working directory.
:param device_name: The name of the audio capture device. If None, defaults to `"Stereo Mix"` (Windows and Linux) or `"iShowU Audio Capture"` (OS X).
"""
# Don't start a new recording if one is ongoing.
if not self.done:
return
self.done = False
if self._record_audio:
if path is None:
self.path = Path("")
elif isinstance(path, str):
self.path = Path(path)
else:
self.path = path
if not self.path.parent.exists:
self.path.parent.mkdir(parents=True)
if self.path.exists():
self.path.unlink()
self._frame = 0
# Start listening.
if self._record_audio:
AudioUtils.start(output_path=self.path, device_name=device_name)
self.commands.extend([{"$type": "send_audio_sources",
"frequency": "always"},
{"$type": "send_rigidbodies",
"frequency": "always"},
{"$type": "send_transforms",
"frequency": "always"}])
def stop(self) -> None:
"""
Stop an ongoing recording. Use ffmpeg to remove initial silence.
"""
if self._record_audio:
AudioUtils.stop()
self.done = True
|
4ae482c425e81de3583c15c3726dccc8dcb800de
|
87bcc79d3742213175de9d2c81a830a942ef8cd9
|
/tools/utils.py
|
b07beb9bc9db39a46d6803d06839206df543a829
|
[
"Apache-2.0"
] |
permissive
|
saltstack/salt-bootstrap
|
4840efe648e99c19820eebb478e565aa5bda3e9a
|
61709aac2b077372e6166e6f1e78191d8bcc5eea
|
refs/heads/develop
| 2023-09-01T20:32:10.115517
| 2023-08-10T21:45:00
| 2023-08-11T17:09:50
| 6,910,286
| 713
| 554
|
NOASSERTION
| 2023-09-11T20:13:58
| 2012-11-28T21:30:33
|
Shell
|
UTF-8
|
Python
| false
| false
| 2,547
|
py
|
utils.py
|
# pylint: disable=resource-leakage,broad-except,3rd-party-module-not-gated
from __future__ import annotations
import os
import pathlib
from ptscripts import Context
from rich.progress import BarColumn
from rich.progress import Column
from rich.progress import DownloadColumn
from rich.progress import Progress
from rich.progress import TextColumn
from rich.progress import TimeRemainingColumn
from rich.progress import TransferSpeedColumn
REPO_ROOT = pathlib.Path(__file__).resolve().parent.parent
GPG_KEY_FILENAME = "SALT-PROJECT-GPG-PUBKEY-2023"
SPB_ENVIRONMENT = os.environ.get("SPB_ENVIRONMENT") or "prod"
RELEASE_BUCKET_NAME = f"salt-project-{SPB_ENVIRONMENT}-salt-artifacts-release"
class UpdateProgress:
def __init__(self, progress, task):
self.progress = progress
self.task = task
def __call__(self, chunk_size):
self.progress.update(self.task, advance=chunk_size)
def create_progress_bar(file_progress: bool = False, **kwargs):
if file_progress:
return Progress(
TextColumn("[progress.description]{task.description}"),
BarColumn(),
DownloadColumn(),
TransferSpeedColumn(),
TextColumn("eta"),
TimeRemainingColumn(),
**kwargs,
)
return Progress(
TextColumn(
"[progress.description]{task.description}", table_column=Column(ratio=3)
),
BarColumn(),
expand=True,
**kwargs,
)
def export_gpg_key(ctx: Context, key_id: str, export_path: pathlib.Path):
keyfile_gpg = export_path.joinpath(GPG_KEY_FILENAME).with_suffix(".gpg")
if keyfile_gpg.exists():
keyfile_gpg.unlink()
ctx.info(f"Exporting GnuPG Key '{key_id}' to {keyfile_gpg} ...")
ctx.run("gpg", "--output", str(keyfile_gpg), "--export", key_id)
keyfile_pub = export_path.joinpath(GPG_KEY_FILENAME).with_suffix(".pub")
if keyfile_pub.exists():
keyfile_pub.unlink()
ctx.info(f"Exporting GnuPG Key '{key_id}' to {keyfile_pub} ...")
ctx.run("gpg", "--armor", "--output", str(keyfile_pub), "--export", key_id)
def gpg_sign(ctx: Context, key_id: str, path: pathlib.Path):
ctx.info(f"GPG Signing '{path}' ...")
signature_fpath = path.parent / f"{path.name}.asc"
if signature_fpath.exists():
signature_fpath.unlink()
ctx.run(
"gpg",
"--local-user",
key_id,
"--output",
str(signature_fpath),
"--armor",
"--detach-sign",
"--sign",
str(path),
)
|
dff7e3e1e28b647a8ae43d3fe3f98d55b9b54bca
|
e885a46c240a8579d01489787c9b03f4e6e0f0a2
|
/sd_material_ui/CollapseTransition.py
|
43bed6e60397a924ce0d0e3a1a7cee24fd3928b0
|
[
"MIT"
] |
permissive
|
StratoDem/sd-material-ui
|
4b5212ab6c037fbbef3a51aec05ec2e041dda396
|
4c6332946092685ce9a39b46f66704c6de3f5139
|
refs/heads/master
| 2023-01-30T14:37:35.403181
| 2021-12-21T18:27:13
| 2021-12-21T18:27:13
| 112,852,873
| 189
| 29
|
MIT
| 2023-01-23T10:41:30
| 2017-12-02T15:25:33
|
Python
|
UTF-8
|
Python
| false
| false
| 1,711
|
py
|
CollapseTransition.py
|
# AUTO GENERATED FILE - DO NOT EDIT
from dash.development.base_component import Component, _explicitize_args
class CollapseTransition(Component):
"""A CollapseTransition component.
Keyword arguments:
- children (a list of or a singular dash component, string or number; optional):
The contents of the transition element.
- id (string; required):
Dash ID of the transition element.
- className (string; optional):
CSS class name of the root element.
- collapsedHeight (number; default 0):
The pixel height of the child element when collapsed.
- visible (boolean; default True):
If True, the transition element is displayed, else it will be
hidden."""
@_explicitize_args
def __init__(self, children=None, className=Component.UNDEFINED, collapsedHeight=Component.UNDEFINED, id=Component.REQUIRED, visible=Component.UNDEFINED, **kwargs):
self._prop_names = ['children', 'id', 'className', 'collapsedHeight', 'visible']
self._type = 'CollapseTransition'
self._namespace = 'sd_material_ui'
self._valid_wildcard_attributes = []
self.available_properties = ['children', 'id', 'className', 'collapsedHeight', 'visible']
self.available_wildcard_properties = []
_explicit_args = kwargs.pop('_explicit_args')
_locals = locals()
_locals.update(kwargs) # For wildcard attrs
args = {k: _locals[k] for k in _explicit_args if k != 'children'}
for k in ['id']:
if k not in args:
raise TypeError(
'Required argument `' + k + '` was not specified.')
super(CollapseTransition, self).__init__(children=children, **args)
|
1aebf7a496b75d24ae5dad7fe8ce779d22572f34
|
bad44a92fb338260f9c077689d7fa5472526c3fe
|
/models/tensorflow/lstm.py
|
c51f80aa034d95444562fa87e2e9c061683a4156
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
microsoft/nnfusion
|
ebc4c06331b8e93dbf5e176e5ecd3382e322ff21
|
bd4f6feed217a43c9ee9be16f02fa8529953579a
|
refs/heads/main
| 2023-08-25T17:41:37.517769
| 2022-09-16T05:59:01
| 2022-09-16T05:59:01
| 252,069,995
| 872
| 157
|
MIT
| 2023-07-19T03:06:21
| 2020-04-01T04:15:38
|
C++
|
UTF-8
|
Python
| false
| false
| 3,665
|
py
|
lstm.py
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
import sys
import tensorflow as tf
from tensorflow.python.client import timeline
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.framework import graph_util
flags = tf.flags
logging = tf.logging
logging.set_verbosity(tf.logging.ERROR)
flags.DEFINE_integer("num_step", 100, "sequence length")
flags.DEFINE_integer("num_layer", 10, "num layer")
flags.DEFINE_integer("hidden_size", 256, "hidden size")
flags.DEFINE_integer("batch_size", 1, "mini batch size")
flags.DEFINE_boolean('profile', False, 'profile kernel runtime')
flags.DEFINE_string('backend', 'tf', 'tf or wolong or ngraph')
flags.DEFINE_integer("num_iter", 10, "mini batch size")
flags.DEFINE_integer("warmup", 5, "mini batch size")
flags.DEFINE_boolean('xla', False, 'enable xla')
flags.DEFINE_string('frozen_file', '', 'output path for the frozen pb file')
flags.DEFINE_integer("parallel", 0, "tf.ConfigProto.inter_op_parallelism_threads")
FLAGS = flags.FLAGS
class LSTMCell(object):
W = []
U = []
b = []
def __init__(self, hidden_size, scope):
with tf.variable_scope(scope):
self.W = []
self.U = []
self.b = []
self.num_unit = hidden_size
for i in range(4):
W = tf.get_variable(
"W%d" % (i), [self.num_unit, self.num_unit], dtype=tf.float32)
U = tf.get_variable(
"U%d" % (i), [self.num_unit, self.num_unit], dtype=tf.float32)
b = tf.get_variable("bias%d" % (i), [self.num_unit], dtype=tf.float32,
initializer=init_ops.constant_initializer(0, dtype=tf.float32))
self.W.append(W)
self.U.append(U)
self.b.append(b)
def call(self, inputs, state):
c, h = state
res = []
for i in range(4):
res.append(math_ops.matmul(
inputs, self.W[i]) + math_ops.matmul(h, self.U[i]) + self.b[i])
i, j, f, o = (res[0], res[1], res[2], res[3])
new_c = (c * math_ops.sigmoid(f + 1.0) +
math_ops.sigmoid(i) * math_ops.tanh(j))
new_h = math_ops.tanh(new_c) * math_ops.sigmoid(o)
new_state = tf.nn.rnn_cell.LSTMStateTuple(new_c, new_h)
return new_h, new_state
class LSTMModel(object):
stacked_cells = []
def __init__(self, num_layer, hidden_size):
self.stacked_cells = []
self.num_layer = num_layer
self.num_unit = hidden_size
for layer in range(self.num_layer):
self.stacked_cells.append(
LSTMCell(self.num_unit, "LSTMLayer%d" % (layer)))
def run(self, inputs, batch_size, num_step):
self.batch_size = batch_size
self.num_step = num_step
cell = tf.nn.rnn_cell.BasicLSTMCell(
self.num_unit, forget_bias=1.0, state_is_tuple=True)
self._initial_state = cell.zero_state(batch_size, tf.float32)
self.state = [self._initial_state for layer in range(self.num_layer)]
for step in range(self.num_step):
cur_input = inputs[step, :, :]
for layer in range(self.num_layer):
cell_output, self.state[layer] = self.stacked_cells[layer].call(
cur_input, self.state[layer])
cur_input = cell_output
self.output = cell_output
return self.output, self.state[-1]
|
e0190b261d721ab2c642b26544a26353d27ace11
|
12f0bd77926127cdacc2452d6f9cfed91806b2fe
|
/idaes/apps/caprese/examples/cstr_nmpc.py
|
c388d520361a5ee44d2a6f91a0306ac8f42a83a1
|
[
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
IDAES/idaes-pse
|
e03d2583ae1ba968a7099f9f439fd8c3efa12904
|
deacf4c422bc9e50cb347e11a8cbfa0195bd4274
|
refs/heads/main
| 2023-08-16T19:13:00.355572
| 2023-08-04T04:19:29
| 2023-08-04T04:19:29
| 168,622,088
| 173
| 227
|
NOASSERTION
| 2023-09-11T16:04:55
| 2019-02-01T01:12:51
|
Python
|
UTF-8
|
Python
| false
| false
| 8,863
|
py
|
cstr_nmpc.py
|
#################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES).
#
# Copyright (c) 2018-2023 by the software owners: The Regents of the
# University of California, through Lawrence Berkeley National Laboratory,
# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon
# University, West Virginia University Research Corporation, et al.
# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md
# for full copyright and license information.
#################################################################################
"""
Example for Caprese's module for NMPC.
"""
import random
from idaes.apps.caprese.nmpc import NMPCSim
from idaes.apps.caprese.util import apply_noise_with_bounds
from pyomo.environ import SolverFactory
from pyomo.dae.initialization import solve_consistent_initial_conditions
import idaes.logger as idaeslog
from idaes.apps.caprese.examples.cstr_model import make_model
from idaes.core.solvers import get_solver
import pandas as pd
import matplotlib.pyplot as plt
__author__ = "Robert Parker"
# See if ipopt is available and set up solver
if SolverFactory("ipopt").available():
solver = get_solver(
solver="ipopt",
options={
"tol": 1e-6,
"bound_push": 1e-8,
"halt_on_ampl_error": "yes",
"linear_solver": "ma57",
},
)
else:
solver = None
class PlotData(object):
def __init__(self, group, location, name=None, t_switch=None):
# Would really like a PlotData class that is constructed based on an
# NMPCVar object that contains necessary setpoint/reference
# information, instead of having to access that in the NMPCVarGroup
time = group.index_set
if t_switch == None:
t_switch = group.t0
self.name = name
var = group.varlist[location]
initial = group.reference[location]
setpoint = group.setpoint[location]
self.data_series = pd.Series(
[var[t].value for t in time], index=[t for t in time]
)
self.setpoint_series = pd.Series(
[initial if t < t_switch else setpoint for t in time]
)
def plot(self):
# fig, ax can be formatted to the user's liking
fig, ax = plt.subplots()
if self.name is not None:
self.data_series.plot(label=self.name)
else:
self.data_series.plot()
return fig, ax
def main(plot_switch=False):
# This tests the same model constructed in the test_nmpc_constructor_1 file
m_controller = make_model(horizon=3, ntfe=30, ntcp=2, bounds=True)
sample_time = 0.5
m_plant = make_model(horizon=sample_time, ntfe=5, ntcp=2)
time_plant = m_plant.fs.time
simulation_horizon = 60
n_samples_to_simulate = round(simulation_horizon / sample_time)
samples_to_simulate = [
time_plant.first() + i * sample_time for i in range(1, n_samples_to_simulate)
]
# We must identify for the controller which variables are our
# inputs and measurements.
inputs = [
m_plant.fs.mixer.S_inlet.flow_vol[0],
m_plant.fs.mixer.E_inlet.flow_vol[0],
]
measurements = [
m_controller.fs.cstr.outlet.conc_mol[0, "C"],
m_controller.fs.cstr.outlet.conc_mol[0, "E"],
m_controller.fs.cstr.outlet.conc_mol[0, "S"],
m_controller.fs.cstr.outlet.conc_mol[0, "P"],
m_controller.fs.cstr.outlet.temperature[0],
m_controller.fs.cstr.volume[0],
]
# Construct the "NMPC simulator" object
nmpc = NMPCSim(
plant_model=m_plant,
plant_time_set=m_plant.fs.time,
controller_model=m_controller,
controller_time_set=m_controller.fs.time,
inputs_at_t0=inputs,
measurements=measurements,
sample_time=sample_time,
)
plant = nmpc.plant
controller = nmpc.controller
p_t0 = nmpc.plant.time.first()
c_t0 = nmpc.controller.time.first()
p_ts = nmpc.plant.sample_points[1]
c_ts = nmpc.controller.sample_points[1]
solve_consistent_initial_conditions(plant, plant.time, solver)
solve_consistent_initial_conditions(controller, controller.time, solver)
# We now perform the "RTO" calculation: Find the optimal steady state
# to achieve the following setpoint
setpoint = [
(controller.mod.fs.cstr.outlet.conc_mol[0, "P"], 0.4),
(controller.mod.fs.cstr.outlet.conc_mol[0, "S"], 0.0),
(controller.mod.fs.cstr.control_volume.energy_holdup[0, "aq"], 300),
(controller.mod.fs.mixer.E_inlet.flow_vol[0], 0.1),
(controller.mod.fs.mixer.S_inlet.flow_vol[0], 2.0),
(controller.mod.fs.cstr.volume[0], 1.0),
]
setpoint_weights = [
(controller.mod.fs.cstr.outlet.conc_mol[0, "P"], 1.0),
(controller.mod.fs.cstr.outlet.conc_mol[0, "S"], 1.0),
(controller.mod.fs.cstr.control_volume.energy_holdup[0, "aq"], 1.0),
(controller.mod.fs.mixer.E_inlet.flow_vol[0], 1.0),
(controller.mod.fs.mixer.S_inlet.flow_vol[0], 1.0),
(controller.mod.fs.cstr.volume[0], 1.0),
]
# Some of the "differential variables" that have been fixed in the
# model file are different from the measurements listed above. We
# unfix them here so the RTO solve is not overconstrained.
# (The RTO solve will only automatically unfix inputs and measurements.)
nmpc.controller.mod.fs.cstr.control_volume.material_holdup[0, ...].unfix()
nmpc.controller.mod.fs.cstr.control_volume.energy_holdup[0, ...].unfix()
nmpc.controller.mod.fs.cstr.volume[0].unfix()
nmpc.controller.add_setpoint_objective(setpoint, setpoint_weights)
nmpc.controller.solve_setpoint(solver)
# Now we are ready to construct the tracking NMPC problem
tracking_weights = [
*((v, 1.0) for v in nmpc.controller.vectors.differential[:, 0]),
*((v, 1.0) for v in nmpc.controller.vectors.input[:, 0]),
]
nmpc.controller.add_tracking_objective(tracking_weights)
nmpc.controller.constrain_control_inputs_piecewise_constant()
nmpc.controller.initialize_to_initial_conditions()
# Solve the first control problem
nmpc.controller.vectors.input[...].unfix()
nmpc.controller.vectors.input[:, 0].fix()
solver.solve(nmpc.controller, tee=True)
# For a proper NMPC simulation, we must have noise.
# We do this by treating inputs and measurements as Gaussian random
# variables with the following variances (and bounds).
cstr = nmpc.controller.mod.fs.cstr
variance = [
(cstr.outlet.conc_mol[0.0, "S"], 0.2),
(cstr.outlet.conc_mol[0.0, "E"], 0.05),
(cstr.outlet.conc_mol[0.0, "C"], 0.1),
(cstr.outlet.conc_mol[0.0, "P"], 0.05),
(cstr.outlet.temperature[0.0], 5.0),
(cstr.volume[0.0], 0.05),
]
nmpc.controller.set_variance(variance)
measurement_variance = [v.variance for v in controller.measurement_vars]
measurement_noise_bounds = [
(0.0, var[c_t0].ub) for var in controller.measurement_vars
]
mx = plant.mod.fs.mixer
variance = [
(mx.S_inlet_state[0.0].flow_vol, 0.02),
(mx.E_inlet_state[0.0].flow_vol, 0.001),
]
nmpc.plant.set_variance(variance)
input_variance = [v.variance for v in plant.input_vars]
input_noise_bounds = [(0.0, var[p_t0].ub) for var in plant.input_vars]
random.seed(246)
# Extract inputs from controller and inject them into plant
inputs = controller.generate_inputs_at_time(c_ts)
plant.inject_inputs(inputs)
# This "initialization" really simulates the plant with the new inputs.
nmpc.plant.initialize_by_solving_elements(solver)
solver.solve(nmpc.plant)
for i in range(1, 11):
print("\nENTERING NMPC LOOP ITERATION %s\n" % i)
measured = nmpc.plant.generate_measurements_at_time(p_ts)
nmpc.plant.advance_one_sample()
nmpc.plant.initialize_to_initial_conditions()
measured = apply_noise_with_bounds(
measured,
measurement_variance,
random.gauss,
measurement_noise_bounds,
)
nmpc.controller.advance_one_sample()
nmpc.controller.load_measurements(measured)
solver.solve(nmpc.controller, tee=True)
inputs = controller.generate_inputs_at_time(c_ts)
inputs = apply_noise_with_bounds(
inputs,
input_variance,
random.gauss,
input_noise_bounds,
)
plant.inject_inputs(inputs)
nmpc.plant.initialize_by_solving_elements(solver)
solver.solve(nmpc.plant)
if __name__ == "__main__":
main()
|
1ea22e3efea4cb007026337c5ec7d719406138b6
|
cb6f6c0facd81317508fec7b7da160c111446113
|
/tests/test_internal_db.py
|
b41cabb431bf9c5f0675396cb5493e8b35fa1f02
|
[
"Apache-2.0",
"CC-BY-4.0"
] |
permissive
|
simonw/datasette
|
d3c96e9eb2538abd7a87103224d3c654c6965cfe
|
fd083e37ec53e7e625111168d324a572344a3b19
|
refs/heads/main
| 2023-09-04T04:58:07.794317
| 2023-08-31T23:06:30
| 2023-08-31T23:06:30
| 107,914,493
| 8,274
| 628
|
Apache-2.0
| 2023-09-14T13:27:41
| 2017-10-23T00:39:03
|
Python
|
UTF-8
|
Python
| false
| false
| 1,771
|
py
|
test_internal_db.py
|
import pytest
# ensure refresh_schemas() gets called before interacting with internal_db
async def ensure_internal(ds_client):
await ds_client.get("/fixtures.json?sql=select+1")
return ds_client.ds.get_internal_database()
@pytest.mark.asyncio
async def test_internal_databases(ds_client):
internal_db = await ensure_internal(ds_client)
databases = await internal_db.execute("select * from catalog_databases")
assert len(databases) == 1
assert databases.rows[0]["database_name"] == "fixtures"
@pytest.mark.asyncio
async def test_internal_tables(ds_client):
internal_db = await ensure_internal(ds_client)
tables = await internal_db.execute("select * from catalog_tables")
assert len(tables) > 5
table = tables.rows[0]
assert set(table.keys()) == {"rootpage", "table_name", "database_name", "sql"}
@pytest.mark.asyncio
async def test_internal_indexes(ds_client):
internal_db = await ensure_internal(ds_client)
indexes = await internal_db.execute("select * from catalog_indexes")
assert len(indexes) > 5
index = indexes.rows[0]
assert set(index.keys()) == {
"partial",
"name",
"table_name",
"unique",
"seq",
"database_name",
"origin",
}
@pytest.mark.asyncio
async def test_internal_foreign_keys(ds_client):
internal_db = await ensure_internal(ds_client)
foreign_keys = await internal_db.execute("select * from catalog_foreign_keys")
assert len(foreign_keys) > 5
foreign_key = foreign_keys.rows[0]
assert set(foreign_key.keys()) == {
"table",
"seq",
"on_update",
"on_delete",
"to",
"id",
"match",
"database_name",
"table_name",
"from",
}
|
a2421ef665935028f7360e8eaa68a188210daf99
|
578db86c51d44ebddd0dc7b1738985b3dc69eb74
|
/corehq/apps/reports/generic.py
|
88d134894ca7897f3723312cf15cbf3852c2af01
|
[
"BSD-3-Clause"
] |
permissive
|
dimagi/commcare-hq
|
a43c7dd32b5f89c89fd5aa1b1359ab7301f4ff6b
|
e7391ddae1af1dbf118211ecb52c83fc508aa656
|
refs/heads/master
| 2023-08-16T22:38:27.853437
| 2023-08-16T19:07:19
| 2023-08-16T19:07:19
| 247,278
| 499
| 203
|
BSD-3-Clause
| 2023-09-14T19:03:24
| 2009-07-09T17:00:07
|
Python
|
UTF-8
|
Python
| false
| false
| 42,263
|
py
|
generic.py
|
import datetime
import io
import json
import re
from itertools import chain
from django.http import (
Http404,
HttpResponse,
HttpResponseRedirect,
JsonResponse,
)
from django.shortcuts import render
from django.template.loader import render_to_string
from django.urls import NoReverseMatch
from django.utils.safestring import mark_safe
from django.utils.translation import gettext
from django.utils.html import conditional_escape
from celery.utils.log import get_task_logger
from memoized import memoized
from corehq.util.timezones.utils import get_timezone
from couchexport.export import export_from_tables, get_writer
from couchexport.shortcuts import export_response
from dimagi.utils.modules import to_function
from dimagi.utils.parsing import string_to_boolean
from dimagi.utils.web import json_request, json_response
from corehq.apps.domain.utils import normalize_domain_name
from corehq.apps.hqwebapp.crispy import CSS_ACTION_CLASS
from corehq.apps.hqwebapp.decorators import (
use_datatables,
use_daterangepicker,
use_jquery_ui,
use_nvd3,
)
from corehq.apps.reports.cache import request_cache
from corehq.apps.reports.datatables import DataTablesHeader
from corehq.apps.reports.filters.dates import DatespanFilter
from corehq.apps.reports.tasks import export_all_rows_task
from corehq.apps.reports.util import DatatablesParams
from corehq.apps.saved_reports.models import ReportConfig
from corehq.apps.users.models import CouchUser
from corehq.util.view_utils import absolute_reverse, request_as_dict, reverse
CHART_SPAN_MAP = {1: '10', 2: '6', 3: '4', 4: '3', 5: '2', 6: '2'}
def _sanitize_rows(rows):
return [_sanitize_row(row) for row in rows]
def _sanitize_row(row):
return [_sanitize_col(col) for col in row]
def _sanitize_col(col):
if isinstance(col, str):
return conditional_escape(col)
# HACK: dictionaries make it here. The dictionaries I've seen have an 'html' key
# which I expect to be sanitized already, but there is no guaranteee
return col
class GenericReportView(object):
"""
A generic report structure for viewing a report
(or pages that follow the reporting structure closely---though that seems a bit hacky)
This object is handled by the ReportDispatcher and served as a django view based on
the report maps specified in settings.py
To make the report return anything, override any or all of the following properties:
@property
template_context
- returns a dict to be inserted into self.context
- only items relevant to base_template structure should be placed here. Anything
related to report data and async templates should be done in report_context
@property
report_context
- returns a dict to be inserted into self.context
- this is where the main processing of the report data should happen
Note: In general you should not be inserting things into self.context directly, unless absolutely
necessary. Please use the structure in the above properties for updating self.context
in the relevant places.
@property
json_dict
- returns a dict to be parsed and returned as json for the json version of this report
(generally has only been useful for datatables paginated reports)
@property
export_table
- returns a multi-dimensional list formatted as export_from_tables would expect:
[ ['table_or_sheet_name', [['header'] ,['row']] ] ]
"""
# required to create a report based on this
name = None # Human-readable name to be used in the UI
slug = None # Name to be used in the URL (with lowercase and underscores)
section_name = None # string. ex: "Reports"
dispatcher = None # ReportDispatcher subclass
toggles = () # Optionally provide toggles to turn on/off the report
# whether to use caching on @request_cache methods. will ignore this if CACHE_REPORTS is set to False
is_cacheable = False
# Code can expect `fields` to be an iterable even when empty (never None)
fields = ()
# not required
description = None # Human-readable description of the report
documentation_link = None # Link to docs page if available
report_template_path = None
report_partial_path = None
asynchronous = False
hide_filters = False
emailable = False
printable = False
exportable = False
exportable_all = False # also requires overriding self.get_all_rows
export_format_override = None
icon = None
# the defaults for this should be sufficient. But if they aren't, well go for it.
base_template = None
base_template_async = None
base_template_filters = None
print_override_template = "reports/async/print_report.html"
flush_layout = False
# Todo: maybe make these a little cleaner?
show_timezone_notice = False
show_time_notice = False
is_admin_report = False
special_notice = None
# whether to ignore the permissions check that's done when rendering
# the report
override_permissions_check = False
report_title = None
report_subtitles = []
# For drilldown reports, we hide the child reports from navigation.
# For those child reports, set the parent's report class here so that we
# still include these reports in the list of reports we do access control
# against.
parent_report_class = None
is_deprecated = False
deprecation_email_message = gettext("This report has been deprecated.")
deprecation_message = gettext("This report has been deprecated.")
def __init__(self, request, base_context=None, domain=None, **kwargs):
if not self.name or not self.section_name or self.slug is None or not self.dispatcher:
raise NotImplementedError(
f'Missing a required parameter: (name: {self.name}, '
f'section_name: {self.section_name}, slug: {self.slug}, '
f'dispatcher: {self.dispatcher}'
)
from corehq.apps.reports.dispatcher import ReportDispatcher
if isinstance(self.dispatcher, ReportDispatcher):
raise ValueError("Class property dispatcher should point to a subclass of ReportDispatcher.")
self.request = request
self.request_params = json_request(self.request.GET if self.request.method == 'GET' else self.request.POST)
self.domain = normalize_domain_name(domain)
self.context = base_context or {}
self._update_initial_context()
self.is_rendered_as_email = False # setting this to true in email_response
self.is_rendered_as_export = False
self.override_template = "reports/async/email_report.html"
def __str__(self):
if self.fields:
field_lines = "\n -".join(self.fields)
fields = f"\n Report Fields: \n -{field_lines}"
else:
fields = ""
if self.description:
desc = f"\n Report Description: {self.description}"
else:
desc = ""
return (
f"{self.__class__.__name__} report named '{self.name}' with slug "
f"'{self.slug}' in section '{self.section_name}'.{desc}{fields}"
)
def __getstate__(self):
"""
For pickling the report when passing it to Celery.
"""
request = request_as_dict(self.request)
return dict(
request=request,
request_params=self.request_params,
domain=self.domain,
context={}
)
_caching = False
def __setstate__(self, state):
"""
For unpickling a pickled report.
"""
logging = get_task_logger(__name__) # logging lis likely to happen within celery.
self.domain = state.get('domain')
self.context = state.get('context', {})
class FakeHttpRequest(object):
method = 'GET'
domain = ''
GET = {}
META = {}
couch_user = None
datespan = None
can_access_all_locations = None
request_data = state.get('request')
request = FakeHttpRequest()
request.domain = self.domain
request.GET = request_data.get('GET', {})
request.META = request_data.get('META', {})
request.datespan = request_data.get('datespan')
request.can_access_all_locations = request_data.get('can_access_all_locations')
try:
couch_user = CouchUser.get_by_user_id(request_data.get('couch_user'))
request.couch_user = couch_user
except Exception as e:
logging.error("Could not unpickle couch_user from request for report %s. Error: %s" %
(self.name, e))
self.request = request
self._caching = True
self.request_params = state.get('request_params')
self._update_initial_context()
@property
@memoized
def url_root(self):
path = self.request.META.get('PATH_INFO', "")
try:
root = path[0:path.index(self.slug)]
except ValueError:
root = None
return root
@property
def queried_path(self):
path = self.request.META.get('PATH_INFO')
query = self.request.META.get('QUERY_STRING')
return "%s:%s" % (path, query)
@property
@memoized
def domain_object(self):
if self.domain is not None:
from corehq.apps.domain.models import Domain
return Domain.get_by_name(self.domain)
return None
@property
@memoized
def timezone(self):
return get_timezone(self.request, self.domain)
@property
@memoized
def template_base(self):
return self.base_template
@property
@memoized
def template_async_base(self):
if self.asynchronous:
return self.base_template_async or "reports/async/default.html"
return self.template_base
@property
@memoized
def template_report(self):
original_template = self.report_template_path or "reports/async/basic.html"
if self.is_rendered_as_email:
self.context.update(original_template=original_template)
return self.override_template
return original_template
@property
@memoized
def template_report_partial(self):
return self.report_partial_path
@property
@memoized
def template_filters(self):
return self.base_template_filters or "reports/async/filters.html"
@property
@memoized
def rendered_report_title(self):
return gettext(self.name)
@property
@memoized
def filter_classes(self):
filters = []
fields = self.fields
for field in fields or []:
if isinstance(field, str):
klass = to_function(field, failhard=True)
else:
klass = field
filters.append(
klass(self.request, self.domain, self.timezone)
)
return filters
@property
@memoized
def export_format(self):
from couchexport.models import Format
return self.export_format_override or self.request.GET.get('format', Format.XLS_2007)
@property
def export_name(self):
return self.slug
@property
def export_target(self):
writer = get_writer(self.export_format)
return writer.target_app
@property
def default_report_url(self):
return "#"
@property
def breadcrumbs(self):
"""
Override this for custom breadcrumbs.
Use the format:
dict(
title="breadcrumb title",
link="url title links to"
)
This breadcrumb does not include the report title, it's only the links in between the section name
and the report title.
"""
return None
@property
def template_context(self):
"""
Intention: Override if necessary.
Update context specific to the wrapping template here.
Nothing specific to the report should go here, use report_context for that.
Must return a dict.
"""
return {
'rendered_as': self.rendered_as,
}
@property
def report_context(self):
"""
Intention: Override
!!! CRUCIAL: This is where ALL the intense processing of the report data happens.
DO NOT update self.context from here or anything that gets processed in here.
The dictionary returned by this function can get cached in memcached to optimize a report.
Must return a dict.
"""
return dict()
@property
def json_dict(self):
"""
Intention: Override
Return a json-parsable dict, as needed by your report.
"""
return {}
@property
def export_table(self):
"""
Intention: Override
Returns an export table to be parsed by export_from_tables.
"""
return [
[
'table_or_sheet_name',
[
['header'],
['row 1'],
['row 2'],
]
]
]
@property
def filter_set(self):
"""
Whether a report has any filters set. Based on whether or not there
is a query string. This gets carried to additional asynchronous calls
"""
are_filters_set = bool(self.request.META.get('QUERY_STRING'))
if "filterSet" in self.request.GET:
try:
are_filters_set = string_to_boolean(self.request.GET.get("filterSet"))
except ValueError:
# not a parseable boolean
pass
return are_filters_set
@property
def needs_filters(self):
"""
Whether a report needs filters. A shortcut for hide_filters is false and
filter_set is false.
If no filters are used, False is automatically returned.
"""
if len(self.fields) == 0:
return False
else:
return not self.hide_filters and not self.filter_set
def _validate_context_dict(self, property):
if not isinstance(property, dict):
raise TypeError("property must return a dict")
return property
def _update_initial_context(self):
"""
Intention: Don't override.
"""
report_configs = ReportConfig.by_domain_and_owner(self.domain,
self.request.couch_user._id, report_slug=self.slug)
current_config_id = self.request.GET.get('config_id', '')
default_config = ReportConfig.default()
def is_editable_datespan(field):
field_fn = to_function(field) if isinstance(field, str) else field
return issubclass(field_fn, DatespanFilter) and field_fn.is_editable
has_datespan = any([is_editable_datespan(field) for field in self.fields])
self.context.update(
report=dict(
title=self.rendered_report_title,
description=self.description,
documentation_link=self.documentation_link,
section_name=self.section_name,
slug=self.slug,
sub_slug=None,
type=self.dispatcher.prefix,
url_root=self.url_root,
is_async=self.asynchronous,
is_exportable=self.exportable,
dispatcher=self.dispatcher,
filter_set=self.filter_set,
needs_filters=self.needs_filters,
has_datespan=has_datespan,
show=(
self.override_permissions_check
or self.request.couch_user.can_view_some_reports(self.domain)
),
is_emailable=self.emailable,
is_export_all = self.exportable_all,
is_printable=self.printable,
is_admin=self.is_admin_report,
special_notice=self.special_notice,
report_title=self.report_title or self.rendered_report_title,
report_subtitles=self.report_subtitles,
export_target=self.export_target,
js_options=self.js_options,
),
current_config_id=current_config_id,
default_config=default_config,
report_configs=report_configs,
show_time_notice=self.show_time_notice,
domain=self.domain,
layout_flush_content=self.flush_layout,
)
@property
def js_options(self):
try:
async_url = self.get_url(domain=self.domain, render_as='async', relative=True)
except NoReverseMatch:
async_url = ''
return {
'async': self.asynchronous,
'domain': self.domain,
'filterSet': self.filter_set,
'isEmailable': self.emailable,
'isExportAll': self.exportable_all,
'isExportable': self.exportable,
'needsFilters': self.needs_filters,
'slug': self.slug,
'subReportSlug': None,
'emailDefaultSubject': self.rendered_report_title,
'type': self.dispatcher.prefix,
'urlRoot': self.url_root,
'asyncUrl': async_url
}
def update_filter_context(self):
"""
Intention: This probably does not need to be overridden in general.
Updates the context with filter information.
"""
self.context.update({
'report_filters': [
dict(field=f.render(), slug=f.slug) for f in self.filter_classes
],
})
def update_template_context(self):
"""
Intention: This probably does not need to be overridden in general.
Please override template_context instead.
"""
self.context.update(rendered_as=self.rendered_as)
self.context.update({
'report_filter_form_action_css_class': CSS_ACTION_CLASS,
})
self.context['report'].update(
show_filters=self.fields or not self.hide_filters,
breadcrumbs=self.breadcrumbs,
default_url=self.default_report_url,
url=self.get_url(domain=self.domain),
title=self.rendered_report_title
)
if hasattr(self, 'datespan'):
self.context.update(datespan=self.datespan)
if self.show_timezone_notice:
self.context.update(timezone=dict(
now=datetime.datetime.now(tz=self.timezone),
zone=self.timezone.zone
))
self.context.update(self._validate_context_dict(self.template_context))
def update_report_context(self):
"""
Intention: This probably does not need to be overridden in general.
Please override report_context instead.
"""
self.context.update(
report_partial=self.template_report_partial,
report_base=self.template_async_base
)
self.context['report'].update(
title=self.rendered_report_title, # overriding the default title
)
self.context.update(self._validate_context_dict(self.report_context))
@property
def deprecate_response(self):
from django.contrib import messages
messages.warning(
self.request,
self.deprecation_message
)
return HttpResponseRedirect(self.request.META.get('HTTP_REFERER', '/'))
@property
def view_response(self):
"""
Intention: Not to be overridden in general.
Renders the general view of the report template.
"""
if self.is_deprecated:
return self.deprecate_response
else:
self.update_template_context()
template = self.template_base
if not self.asynchronous:
self.update_filter_context()
self.update_report_context()
template = self.template_report
return render(self.request, template, self.context)
@property
def email_response(self):
"""
This renders a json object containing a pointer to the static html
content of the report. It is intended for use by the report scheduler.
"""
self.is_rendered_as_email = True
return self.async_response
@property
@request_cache()
def async_response(self):
"""
Intention: Not to be overridden in general.
Renders the asynchronous view of the report template, returned as json.
"""
return JsonResponse(self._async_context())
def _async_context(self):
self.update_template_context()
self.update_report_context()
rendered_filters = None
if bool(self.request.GET.get('hq_filters')):
self.update_filter_context()
rendered_filters = render_to_string(
self.template_filters, self.context, request=self.request
)
rendered_report = render_to_string(
self.template_report, self.context, request=self.request
)
report_table_js_options = {}
if 'report_table_js_options' in self.context:
report_table_js_options = self.context['report_table_js_options']
return dict(
filters=rendered_filters,
report=rendered_report,
report_table_js_options=report_table_js_options,
title=self.rendered_report_title,
slug=self.slug,
url_root=self.url_root,
)
@property
def excel_response(self):
file = io.BytesIO()
export_from_tables(self.export_table, file, self.export_format)
return file
@property
@request_cache(expiry=60 * 10)
def filters_response(self):
"""
Intention: Not to be overridden in general.
Renders just the filters for the report to be fetched asynchronously.
"""
self.update_filter_context()
rendered_filters = render_to_string(
self.template_filters, self.context, request=self.request
)
return HttpResponse(json.dumps(dict(
filters=rendered_filters,
slug=self.slug,
url_root=self.url_root
)))
@property
@request_cache()
def json_response(self):
"""
Intention: Not to be overridden in general.
Renders the json version for the report, if available.
"""
return json_response(self.json_dict)
@property
def export_response(self):
"""
Intention: Not to be overridden in general.
Returns the tabular export of the data, if available.
"""
self.is_rendered_as_export = True
if self.exportable_all:
export_all_rows_task.delay(self.__class__, self.__getstate__())
return HttpResponse()
else:
# We only want to cache the responses which serve files directly
# The response which return 200 and emails the reports should not be cached
return self._export_response_direct()
@request_cache()
def _export_response_direct(self):
temp = io.BytesIO()
export_from_tables(self.export_table, temp, self.export_format)
return export_response(temp, self.export_format, self.export_name)
@property
@request_cache()
def print_response(self):
"""
Returns the report for printing.
"""
self.is_rendered_as_email = True
self.use_datatables = False
self.override_template = self.print_override_template
return HttpResponse(self._async_context()['report'])
@property
def partial_response(self):
"""
Use this response for rendering smaller chunks of your report.
(Great if you have a giant report with annoying, complex indicators.)
"""
raise Http404
@classmethod
def get_url(cls, domain=None, render_as=None, relative=False, **kwargs):
# NOTE: I'm pretty sure this doesn't work if you ever pass in render_as
# but leaving as is for now, as it should be obvious as soon as that
# breaks something
if isinstance(cls, cls):
domain = getattr(cls, 'domain')
render_as = getattr(cls, 'rendered_as')
if render_as is not None and render_as not in cls.dispatcher.allowed_renderings():
raise ValueError('The render_as parameter is not one of the following allowed values: %s' %
', '.join(cls.dispatcher.allowed_renderings()))
url_args = [domain] if domain is not None else []
if render_as is not None:
url_args.append(render_as+'/')
if relative:
return reverse(cls.dispatcher.name(), args=url_args + [cls.slug])
return absolute_reverse(cls.dispatcher.name(), args=url_args + [cls.slug])
@classmethod
def allow_access(cls, request):
"""
Override to add additional constraints on report access on top of
what's provided by the dispatcher. For feature flags, see the toggles
attribute
"""
return True
@classmethod
def show_in_navigation(cls, domain=None, project=None, user=None):
return True
@classmethod
def show_in_user_roles(cls, domain=None, project=None, user=None):
"""
User roles can specify specific reports that users can view. Return True if this report should show in
the list of specific reports that can be viewed.
"""
return cls.show_in_navigation(domain, project, user)
@classmethod
def display_in_dropdown(cls, domain=None, project=None, user=None):
return False
@classmethod
def get_subpages(cls):
"""
List of subpages to show in sidebar navigation.
"""
return []
@use_nvd3
@use_jquery_ui
@use_datatables
@use_daterangepicker
def decorator_dispatcher(self, request, *args, **kwargs):
"""
Decorate this method in your report subclass and call super to make sure
appropriate decorators are used to render the page and its javascript
libraries.
example:
class MyNewReport(GenericReport):
...
@use_nvd3
def decorator_dispatcher(self, request, *args, **kwargs):
super(MyNewReport, self).decorator_dispatcher(request, *args, **kwargs)
"""
pass
class GenericTabularReport(GenericReportView):
"""
Override the following properties:
@property
headers
- returns a DataTablesHeader object
@property
rows
- returns a 2D list of rows.
## AJAX pagination
If you plan on using ajax pagination, take into consideration
the following properties when rendering self.rows:
self.pagination.start (skip)
self.pagination.count (limit)
Make sure you also override the following properties as necessary:
@property
total_records
- returns an integer
- the total records of what you are paginating over
@property
shared_pagination_GET_params
- this is where you select the GET parameters to pass to the paginator
- returns a list formatted like [dict(name='group', value=self.group_id)]
## Charts
To include charts in the report override the following property.
@property
charts
- returns a list of Chart objects e.g. PieChart, MultiBarChart
You can also adjust the following properties:
charts_per_row
- the number of charts to show in a row. 1, 2, 3, 4, or 6
"""
# new class properties
total_row = None
statistics_rows = None
default_rows = 10
start_at_row = 0
show_all_rows = False
fix_left_col = False
disable_pagination = False
ajax_pagination = False
use_datatables = True
charts_per_row = 1
bad_request_error_text = None
exporting_as_excel = False
# Sets bSort in the datatables instance to true/false (config.dataTables.bootstrap.js)
sortable = True
# override old class properties
report_template_path = "reports/tabular.html"
flush_layout = True
# set to a list of functions that take in a report object
# and return a dictionary of items that will show up in
# the report context
extra_context_providers = []
@property
def headers(self):
"""
Override this method to create a functional tabular report.
Returns a DataTablesHeader() object (or a list, but preferably the former.
"""
return DataTablesHeader()
@property
def rows(self):
"""
Override this method to create a functional tabular report.
Returns 2D list of rows.
[['row1'],[row2']]
"""
return []
@property
def get_all_rows(self):
"""
Override this method to return all records to export
"""
return []
@property
def total_records(self):
"""
Override for pagination.
Returns an integer.
"""
return 0
@property
def total_filtered_records(self):
"""
Override for pagination.
Returns an integer.
return -1 if you want total_filtered_records to equal whatever the value of total_records is.
"""
return -1
@property
def charts(self):
"""
Override to return a list of Chart objects.
"""
return []
@property
def shared_pagination_GET_params(self):
"""
Override.
Should return a list of dicts with the name and value of the GET parameters
that you'd like to pass to the server-side pagination.
ex: [dict(name='group', value=self.group_id)]
"""
return []
@property
def pagination_source(self):
return self.get_url(domain=self.domain, render_as='json')
_pagination = None
@property
def pagination(self):
if self._pagination is None:
self._pagination = DatatablesParams.from_request_dict(
self.request.POST if self.request.method == 'POST' else self.request.GET
)
return self._pagination
@property
def json_dict(self):
"""
When you implement self.rows for a paginated report,
it should take into consideration the following:
self.pagination.start (skip)
self.pagination.count (limit)
"""
rows = _sanitize_rows(self.rows)
total_records = self.total_records
if not isinstance(total_records, int):
raise ValueError("Property 'total_records' should return an int.")
total_filtered_records = self.total_filtered_records
if not isinstance(total_filtered_records, int):
raise ValueError("Property 'total_filtered_records' should return an int.")
ret = dict(
sEcho=self.pagination.echo,
iTotalRecords=total_records,
iTotalDisplayRecords=total_filtered_records if total_filtered_records >= 0 else total_records,
aaData=rows,
)
if self.total_row:
ret["total_row"] = list(self.total_row)
if self.statistics_rows:
ret["statistics_rows"] = list(self.statistics_rows)
return ret
@property
def fixed_cols_spec(self):
"""
Override
Returns a dict formatted like:
dict(num=<num_cols_to_fix>, width=<width_of_total_fixed_cols>)
"""
return dict(num=1, width=200)
@staticmethod
def _strip_tags(value):
"""
Strip HTML tags from a value
"""
# Uses regex. Regex is much faster than using an HTML parser, but will
# strip "<2 && 3>" from a value like "1<2 && 3>2". A parser will treat
# each cell like an HTML document, which might be overkill, but if
# using regex breaks values then we should use a parser instead, and
# take the knock. Assuming we won't have values with angle brackets,
# using regex for now.
if isinstance(value, str):
return re.sub('<[^>]*?>', '', value)
return value
@property
def override_export_sheet_name(self):
"""
Override the export sheet name here. Return a string.
"""
return None
_export_sheet_name = None
@property
def export_sheet_name(self):
if self._export_sheet_name is None:
override = self.override_export_sheet_name
self._export_sheet_name = override if isinstance(override, str) else self.name # unicode?
return self._export_sheet_name
@property
def export_table(self):
"""
Exports the report as excel.
When rendering a complex cell, it will assign a value in the following order:
1. cell['raw']
2. cell['sort_key']
3. str(cell)
"""
headers = self.headers
def _unformat_row(row):
def _unformat_val(val):
if isinstance(val, dict):
return val.get('raw', val.get('sort_key', val))
return self._strip_tags(val)
return [_unformat_val(val) for val in row]
table = headers.as_export_table
self.exporting_as_excel = True
rows = (_unformat_row(row) for row in self.export_rows)
table = chain(table, rows)
if self.total_row:
table = chain(table, [_unformat_row(self.total_row)])
if self.statistics_rows:
table = chain(table, [_unformat_row(row) for row in self.statistics_rows])
return [[self.export_sheet_name, table]]
@property
def export_rows(self):
"""
The rows that will be used in an export. Useful if you want to apply any additional
custom formatting to mirror something that would be done in a template.
"""
if self.exportable_all:
return self.get_all_rows
else:
return self.rows
@property
@request_cache()
def report_context(self):
"""
Don't override.
Override the properties headers and rows instead of this.
"""
headers = self.headers # not all headers have been memoized
assert isinstance(headers, (DataTablesHeader, list))
if isinstance(headers, list):
raise DeprecationWarning("Property 'headers' should be a DataTablesHeader object, not a list.")
if self.ajax_pagination and self.is_rendered_as_email:
rows = self.get_all_rows
charts = []
elif self.ajax_pagination or self.needs_filters:
rows = []
charts = []
else:
rows = list(self.rows)
charts = list(self.charts)
if self.total_row is not None:
self.total_row = list(self.total_row)
if self.statistics_rows is not None:
self.statistics_rows = list(self.statistics_rows)
pagination_spec = dict(is_on=self.ajax_pagination and not self.is_rendered_as_email)
if self.ajax_pagination:
shared_params = list(self.shared_pagination_GET_params)
pagination_spec.update(
params=shared_params,
source=self.pagination_source,
filter=False
)
if self.disable_pagination:
pagination_spec['hide'] = True
left_col = dict(is_fixed=self.fix_left_col)
if self.fix_left_col:
spec = dict(self.fixed_cols_spec)
left_col.update(fixed=spec)
context = dict(
report_table=dict(
headers=headers,
rows=rows,
total_row=self.total_row,
statistics_rows=self.statistics_rows,
default_rows=self.default_rows,
start_at_row=self.start_at_row,
show_all_rows=self.show_all_rows,
pagination=pagination_spec,
left_col=left_col,
datatables=self.use_datatables,
bad_request_error_text=self.bad_request_error_text,
sortable=self.sortable,
),
charts=charts,
chart_span=CHART_SPAN_MAP[self.charts_per_row]
)
report_table = context['report_table']
pagination_on = report_table['pagination']['is_on']
context.update({
'report_table_js_options': {
'datatables': report_table['datatables'],
'default_rows': report_table['default_rows'] or 10,
'start_at_row': report_table['start_at_row'] or 0,
'show_all_rows': report_table['show_all_rows'],
'sortable': report_table['sortable'],
'headers': {
'render_aoColumns': report_table['headers'].render_aoColumns,
'auto_width': report_table['headers'].auto_width,
'custom_sort': report_table['headers'].custom_sort,
},
'bad_request_error_text': report_table['bad_request_error_text'],
'pagination': {
'hide': getattr(report_table['pagination'], 'hide', False),
'is_on': pagination_on,
'source': report_table['pagination']['source'] if pagination_on else None,
'params': report_table['pagination']['params'] if pagination_on else None,
},
'left_col': {
'is_fixed': report_table['left_col']['is_fixed'],
'fixed': {
'num': report_table['left_col']['fixed']['num'],
'width': report_table['left_col']['fixed']['width'],
} if report_table['left_col']['is_fixed'] else {},
},
},
})
for provider_function in self.extra_context_providers:
context.update(provider_function(self))
return context
def table_cell(self, value, html=None, zerostyle=False):
empty_indicator = mark_safe('<span class="text-muted">0</span>') # nosec: no user input
styled_value = empty_indicator if zerostyle and value == 0 else value
return dict(
sort_key=value,
html=styled_value if html is None else html
)
def summary_context(report):
# will intentionally break if used with something that doesn't have
# a summary_values attribute
return {"summary_values": report.summary_values}
class ProjectInspectionReportParamsMixin(object):
@property
def shared_pagination_GET_params(self):
# This was moved from ProjectInspectionReport so that it could be included in CaseReassignmentInterface too
# I tried a number of other inheritance schemes, but none of them worked because of the already
# complicated multiple-inheritance chain
# todo: group this kind of stuff with the field object in a comprehensive field refactor
return [dict(name='individual', value=self.individual),
dict(name='group', value=self.group_id),
dict(name='case_type', value=self.case_type),
dict(name='ufilter', value=[f.type for f in self.user_filter if f.show])]
class PaginatedReportMixin(object):
default_sort = None
def get_sorting_block(self):
res = []
#the NUMBER of cols sorting
sort_cols = int(self.request.GET.get('iSortingCols', 0))
if sort_cols > 0:
for x in range(sort_cols):
col_key = 'iSortCol_%d' % x
sort_dir = self.request.GET['sSortDir_%d' % x]
col_id = int(self.request.GET[col_key])
col = self.headers.header[col_id]
if col.prop_name is not None:
sort_dict = {col.prop_name: sort_dir}
res.append(sort_dict)
if len(res) == 0 and self.default_sort is not None:
res.append(self.default_sort)
return res
class GetParamsMixin(object):
@property
def shared_pagination_GET_params(self):
"""
Override the params and applies all the params of the originating view to the GET
so as to get sorting working correctly with the context of the GET params
"""
ret = super(GetParamsMixin, self).shared_pagination_GET_params
for k, v in self.request.GET.lists():
ret.append(dict(name=k, value=v))
return ret
class ElasticProjectInspectionReport(GetParamsMixin, ProjectInspectionReportParamsMixin,
PaginatedReportMixin, GenericTabularReport):
"""
Tabular report that provides framework for doing elasticsearch backed tabular reports.
Main thing of interest is that you need es_results to hit ES, and that your datatable headers
must use prop_name kwarg to enable column sorting.
"""
@property
def es_results(self):
"""
Main meat - run your ES query and return the raw results here.
"""
raise NotImplementedError("ES Query not implemented")
@property
def total_records(self):
"""
Override for pagination slice from ES
Returns an integer.
"""
res = self.es_results
if res is not None:
return res['hits'].get('total', 0)
else:
return 0
|
8b9d0e913e8a4209d804cc32a71af403bbe0775f
|
1dbbb05b30d27c6419b9f34eea3b9a47f92582a0
|
/parlai/tasks/multidogo/build.py
|
e8adedc05b8e2d3d6f2457179753a4f571f787f9
|
[
"MIT"
] |
permissive
|
facebookresearch/ParlAI
|
815334323d0ebef51bf9837336fe3eef6fe1655d
|
e1d899edfb92471552bae153f59ad30aa7fca468
|
refs/heads/main
| 2023-08-31T22:20:45.918129
| 2023-08-14T19:39:56
| 2023-08-14T19:39:56
| 89,266,735
| 10,943
| 2,395
|
MIT
| 2023-09-13T23:07:40
| 2017-04-24T17:10:44
|
Python
|
UTF-8
|
Python
| false
| false
| 12,446
|
py
|
build.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import parlai.core.build_data as build_data
from parlai.core.build_data import DownloadableFile
import csv
from itertools import islice
from pathlib import Path
import os
import json
import re
import tqdm
DEBUG_MISSING_RAW_CONVERSATIONS = False # Unnecessary once Amazon fixes multidogo
RESOURCE = DownloadableFile(
"https://github.com/awslabs/multi-domain-goal-oriented-dialogues-dataset/archive/master.zip",
"raw_data.zip",
"fb59c7261da2d30d9d24b9af309ebb4bf0e5b39f97d718201a7160e591e76a3c",
zipped=True,
)
RAW_DATA_PREFIX = "multi-domain-goal-oriented-dialogues-dataset-master/data/"
RAW_DATA_ANNOTATED_DATA_PATH = "paper_splits"
RAW_DATA_UNANNOTATED_DATA_PATH = "unannotated"
TURN_INTENT = "turn"
SENTENCE_INTENT = "sentence"
TURN_AND_SENTENCE_INTENT = "both"
RAW_DATA_SENTENCE_INTENT_PATH = "splits_annotated_at_sentence_level"
RAW_DATA_TURN_INTENT_PATH = "splits_annotated_at_turn_level"
RAW_DATA_INTENT_BY_TYPE_PATH = {
TURN_INTENT: RAW_DATA_TURN_INTENT_PATH,
SENTENCE_INTENT: RAW_DATA_SENTENCE_INTENT_PATH,
}
DOMAINS = ["airline", "fastfood", "finance", "insurance", "media", "software"]
DATATYPE_TO_RAW_DATA_FILE_NAME = {
"test": "test.tsv",
"train": "train.tsv",
"valid": "dev.tsv",
}
PROCESSED = "processed/"
def _preprocess(opt, datapath, datatype, version):
"""
MultiDoGo conversations take place between an "agent" and a customer". Labeled
customer data is stored in one set of files while the agent data is in another.
There is a common conversation ID between the two, but the conversations are not
listed in a consistent way between the documents. Since we'll have to do work to
associate the data between the files anyway, we might as well process the data into
a new file that'll be easier to deal with.
Stores the data as <multidogo_data_path>/processed/<domain>/<datatype>.txt.
Will skip preprocessing if this file already exists.
"""
domains = opt.get("domains", DOMAINS)
intent_type = opt.get("intent_type", TURN_INTENT)
for domain in domains:
out_dir = get_processed_multidogo_folder(
datapath, domain, datatype, intent_type
)
if build_data.built(out_dir, version):
continue
print(
f" Preprocessing '{domain}' data for '{datatype}' with '{intent_type}' intent labels."
)
Path(out_dir).mkdir(parents=True, exist_ok=True)
# The agent responses for *all* datatypes are in one file.
# We need to iterate through the datatype file to know which lines
# we'll actually need... so build a quick lookup table to know which
# lines in the tsv file we'll need to care about so we're not scanning
# through the whole thing a bunch
unannotated_id_map = _build_conversation_span_map(
_get_unannotated_tsv_data(datapath, domain)
)
# Actually do the work of collating all of the conversations + annotations
# For turn + sentence intent labels, we do two passes, one for sentence
# then one for turn so that we do not add two sets of labels for the
# same conversation ID. We can use this forced structure to do the
# separate categories of turn intent and sentence intent labels. We
# also do a bit of chuking
file_idx = 0
seen_conversations_set = set()
if intent_type == TURN_AND_SENTENCE_INTENT or intent_type == SENTENCE_INTENT:
file_idx, seen_conversations_set = _aggregate_and_write_conversations(
intent_type,
SENTENCE_INTENT,
datapath,
domain,
datatype,
unannotated_id_map,
start_file_idx=file_idx,
skip_ids=set(),
)
if intent_type == TURN_AND_SENTENCE_INTENT or intent_type == TURN_INTENT:
_, _ = _aggregate_and_write_conversations(
intent_type,
TURN_INTENT,
datapath,
domain,
datatype,
unannotated_id_map,
start_file_idx=file_idx,
skip_ids=seen_conversations_set,
)
# mark that we've built this combinations
build_data.mark_done(out_dir, version_string=version)
def get_processed_multidogo_folder(datapath, domain, datatype, intent_type):
return os.path.join(datapath, PROCESSED, domain, intent_type, datatype)
# unannotated data is UNANNOTATED_DATA_PROFIX + <domain> + '.tsv'
# annotated data is ANNOTATED_DATA_PATH + <annotations type> + <domain> + '/' + <datatype> + '.tsv'
def _get_unannotated_tsv_data(datapath, domain):
file_name = os.path.join(
datapath, RAW_DATA_PREFIX, RAW_DATA_UNANNOTATED_DATA_PATH, domain + ".tsv"
)
return csv.reader(open(file_name, "r"), delimiter=",") # comma-separated tsv, lol
def _get_annotated_tsv_data(datapath, domain, datatype, annotation_type):
file_name = os.path.join(
datapath,
RAW_DATA_PREFIX,
RAW_DATA_ANNOTATED_DATA_PATH,
RAW_DATA_INTENT_BY_TYPE_PATH[annotation_type],
domain,
DATATYPE_TO_RAW_DATA_FILE_NAME[datatype],
)
return csv.reader(open(file_name, "r"), delimiter="\t")
def _get_annotated_tsv_data_size(datapath, domain, datatype, annotation_type):
file_name = os.path.join(
datapath,
RAW_DATA_PREFIX,
RAW_DATA_ANNOTATED_DATA_PATH,
RAW_DATA_INTENT_BY_TYPE_PATH[annotation_type],
domain,
DATATYPE_TO_RAW_DATA_FILE_NAME[datatype],
)
return sum(1 for line in open(file_name, 'r'))
def _build_conversation_span_map(unannotated_tsv_object):
result = {} # conversationId to (start line, length) map
start = 0
prev_conversation_id = ""
length = 0
for i, row in enumerate(unannotated_tsv_object):
conversation_id = row[0][
4:-2
] # do substring cause conversationId has extra filler in unannotated
if conversation_id != prev_conversation_id:
result[prev_conversation_id] = (start, length)
start = i
prev_conversation_id = conversation_id
length = 0
length += 1
result[conversation_id] = (start, length)
return result
def _get_slots_map(utterance, slot_string):
values = slot_string.split(" ")
cleaned = re.sub(r"[^\w\s]", "", utterance)
words = cleaned.split(" ")
result = {}
for i in range(len(words)):
if values[i] != "O":
result[values[i]] = words[i]
return result
def _aggregate_and_write_conversations(
raw_intent_type,
fetch_intent_type,
datapath,
domain,
datatype,
unannotated_id_map,
skip_ids,
start_file_idx=0,
):
conversations_to_write = {} # conversationId -> list of turns
seen_conversations = set()
out_dir = get_processed_multidogo_folder(
datapath, domain, datatype, raw_intent_type
)
file_idx = start_file_idx
intent_tsv = _get_annotated_tsv_data(datapath, domain, datatype, fetch_intent_type)
next(intent_tsv) # don't need the header in the first line
print(f"Processing for {domain}, {fetch_intent_type}, {datatype}")
for labeled_line in tqdm.tqdm(
intent_tsv,
total=_get_annotated_tsv_data_size(
datapath, domain, datatype, fetch_intent_type
)
- 1,
):
conversation_id = labeled_line[0]
if conversation_id in skip_ids:
continue
if conversation_id not in seen_conversations:
# new conversation, add text of conversation to conversations_to_write
conversations_to_write[conversation_id] = {}
found_raw_conversation = _add_utterances(
unannotated_id_map,
conversation_id,
conversations_to_write,
datapath,
domain,
)
seen_conversations.add(conversation_id)
if not found_raw_conversation:
if DEBUG_MISSING_RAW_CONVERSATIONS:
print(f"Could not find raw conversations for {conversation_id}")
skip_ids.add(conversation_id)
conversations_to_write.pop(conversation_id, None)
continue
if fetch_intent_type == SENTENCE_INTENT:
_get_sentence_labels_and_slots_map(labeled_line, conversations_to_write)
elif fetch_intent_type == TURN_INTENT:
_get_turn_labels_and_slots_map(labeled_line, conversations_to_write)
else:
raise KeyError(
"Invalid `fetch_intent_type`. This case should never be hit. Something is broken in the `build.py` file."
)
# Don't forget to dump out last file
with open(f"{out_dir}/{file_idx}.json", "w+") as out_file:
json.dump(conversations_to_write, out_file, indent=4)
file_idx += 1
# Return necessary outputs for next pass
return file_idx, seen_conversations
def _add_utterances(
unannotated_id_map, conversation_id, conversations_to_write, datapath, domain
):
try:
start, length = unannotated_id_map[conversation_id]
except KeyError:
return False
conversation_text = islice(
_get_unannotated_tsv_data(datapath, domain), start, start + length
)
for line in conversation_text:
# Format of unannotated: conversationId,turnNumber,utteranceId,utterance,authorRole
conversations_to_write[conversation_id] = {
**conversations_to_write[conversation_id],
int(line[1]): {"text": line[3], "role": line[4]},
}
return True
def _get_sentence_labels_and_slots_map(labeled_line, output):
# Sentence tsv format: conversationId turnNumber sentenceNumber utteranceId utterance slot-labels intent
conversation_id = labeled_line[0]
turn_number = int(float(labeled_line[1])) # cause a few got saved as float.
if conversation_id not in output:
raise RuntimeError("Should never happen; raw conversation text should be here")
if turn_number not in output[conversation_id]:
output[conversation_id][turn_number] = {}
output[conversation_id][turn_number] = {
**output[conversation_id][turn_number],
"slots": _get_slots_map(labeled_line[4], labeled_line[5]),
}
if "intents" not in output[conversation_id][turn_number]:
output[conversation_id][turn_number]["intents"] = []
output[conversation_id][turn_number]["intents"].append(labeled_line[6])
def _get_turn_labels_and_slots_map(labeled_line, output):
# Turn tsv format: conversationId turnNumber utteranceId utterance slot-labels intent
conversation_id = labeled_line[0]
turn_number = int(float(labeled_line[1])) # cause a few got saved as float
if conversation_id not in output:
raise RuntimeError("Should never happen; raw conversation text should be here")
if turn_number not in output[conversation_id]:
output[conversation_id][turn_number] = {}
output[conversation_id][turn_number] = {
**output[conversation_id][turn_number],
"slots": _get_slots_map(labeled_line[3], labeled_line[4]),
"intents": [labeled_line[5]],
}
def build(opt):
# get path to data directory
datapath = os.path.join(opt["datapath"], "multidogo")
# define version if any
version = "v1.1"
# check if data had been previously downloaded
if not build_data.built(datapath, version_string=version):
print("[building data: " + datapath + "]")
# make a clean directory if needed
if build_data.built(datapath):
# an older version exists, so remove these outdated files.
build_data.remove_dir(datapath)
build_data.make_dir(datapath)
# Download the data.
RESOURCE.download_file(datapath)
# mark the data as built
build_data.mark_done(datapath, version_string=version)
# do preprocessing on the data to put it into FBDialogueData format. There's a lot so check to make sure it's okay
for fold in ["train", "valid", "test"]:
_preprocess(opt, datapath, fold, version)
|
4e3a433b683459515a4760647a7a7deca26889ac
|
6c3259af340d28cdfe7aad63cfb1fb6bc81fb3da
|
/docs/conf.py
|
668bfaeeb471bbfb34d4713d907f8d22e714770b
|
[
"BSD-3-Clause"
] |
permissive
|
pytest-dev/pytest-django
|
df91bfb5536d6fb0a94cff647c4eb68c005c9e0d
|
53373573f905ec5e0ec5786f49efdcdca5ae41fd
|
refs/heads/master
| 2023-08-31T08:33:58.355321
| 2023-04-05T08:15:33
| 2023-04-05T08:15:33
| 2,484,397
| 1,169
| 344
|
NOASSERTION
| 2023-08-05T14:24:05
| 2011-09-29T19:35:55
|
Python
|
UTF-8
|
Python
| false
| false
| 1,634
|
py
|
conf.py
|
import os
import sys
import datetime
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "_ext")))
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'pytestdocs',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'pytest-django'
copyright = f'{datetime.date.today().year}, Andreas Pelme and contributors'
exclude_patterns = ['_build']
pygments_style = 'sphinx'
html_theme = 'sphinx_rtd_theme'
# Output file base name for HTML help builder.
htmlhelp_basename = 'pytest-djangodoc'
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
'django': ('https://docs.djangoproject.com/en/stable/',
'https://docs.djangoproject.com/en/stable/_objects/'),
'pytest': ('https://docs.pytest.org/en/stable/', None),
}
def setup(app):
# Allow linking to pytest's confvals.
app.add_object_type(
"confval",
"pytest-confval",
objname="configuration value",
indextemplate="pair: %s; configuration value",
)
|
fdcac0a733c57300abcc2b39aae1e84de5876c8c
|
7fe5cb99188c7270f726b1145fc356fd2922df9d
|
/geoapi-java-python/src/main/python/opengis/bridge/java/referencing.py
|
e21992ff8a312ea341a4d4a6360f75724c084673
|
[
"Apache-2.0"
] |
permissive
|
opengeospatial/geoapi
|
eedcf2d3259284fc614f7fa132c77fcef67d7ecc
|
7a0872373e4ad70039b5ec9be2f086081fb5c746
|
refs/heads/master
| 2023-08-17T23:37:23.874101
| 2023-08-10T20:02:27
| 2023-08-10T20:02:27
| 27,879,839
| 111
| 33
|
NOASSERTION
| 2021-06-28T08:08:29
| 2014-12-11T16:13:54
|
Java
|
UTF-8
|
Python
| false
| false
| 2,870
|
py
|
referencing.py
|
#
# GeoAPI - Programming interfaces for OGC/ISO standards
# Copyright © 2019-2023 Open Geospatial Consortium, Inc.
# http://www.geoapi.org
#
#
# This module requires jpy module to be installed on the local machine.
# That Java-Python module is documented at http://jpy.readthedocs.io/.
# In addition, the PYTHONPATH environmental variable must be set to the
# "geoapi/src/main/python" directory, preferably using absolute path.
#
import jpy
import opengis.referencing.cs
import opengis.referencing.crs
import opengis.referencing.datum
import opengis.referencing.operation
from opengis.bridge.java.metadata import Identifier
class IdentifiedObject(opengis.referencing.datum.IdentifiedObject):
def __init__(self, proxy):
self._proxy = proxy
@property
def name(self):
return Identifier(self._proxy.getName())
def to_wkt(self):
return self._proxy.toWKT()
def __str__(self):
return self._proxy.toString()
class GeodeticDatum(IdentifiedObject, opengis.referencing.datum.GeodeticDatum):
def __init__(self, proxy):
super().__init__(proxy)
@property
def ellipsoid(self):
return None
@property
def prime_meridian(self):
return None
class CoordinateSystemAxis(IdentifiedObject, opengis.referencing.datum.IdentifiedObject):
def __init__(self, proxy):
super().__init__(proxy)
@property
def abbreviation(self):
return self._proxy.getAbbreviation()
@property
def direction(self):
return None
@property
def unit(self):
return None
class CoordinateSystem(IdentifiedObject, opengis.referencing.cs.CoordinateSystem):
def __init__(self, proxy):
super().__init__(proxy)
def dimension(self):
return self._proxy.getDimension()
def axis(self, dim: int):
return self._proxy.getAxis(dim)
class CartesianCS(CoordinateSystem, opengis.referencing.cs.CoordinateSystem):
def __init__(self, proxy):
super().__init__(proxy)
class EllipsoidalCS(CoordinateSystem, opengis.referencing.cs.CoordinateSystem):
def __init__(self, proxy):
super().__init__(proxy)
class CoordinateReferenceSystem(IdentifiedObject, opengis.referencing.crs.ReferenceSystem):
def __init__(self, proxy):
super().__init__(proxy)
@property
def coordinate_system(self):
return self._proxy.getCoordinateSystem()
def datum(self):
return self._proxy.getDatum()
class GeographicCRS(CoordinateReferenceSystem, opengis.referencing.crs.GeodeticCRS):
def __init__(self, proxy):
super().__init__(proxy)
class ProjectedCRS(IdentifiedObject, opengis.referencing.crs.GeneralDerivedCRS):
def __init__(self, proxy):
super().__init__(proxy)
@property
def base_crs(self):
return self._proxy.getBaseCRS()
|
b81f9a52fd7a3ed1440d96e21d5989270e7ad4b6
|
6bb45c5892b4c9692dcc44116fb73dc9e7ab90ff
|
/introduction_to_applying_machine_learning/visual_object_detection/src/prepare_RecordIO.py
|
ca3f183363b220fe821e17ffab2242c30d1a5a6f
|
[
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
aws/amazon-sagemaker-examples
|
8359afe544e873662bda5b8d2b07399c437213c9
|
43dae4b28531cde167598f104f582168b0a4141f
|
refs/heads/main
| 2023-08-26T04:42:52.342776
| 2023-08-25T14:37:19
| 2023-08-25T14:37:19
| 107,937,815
| 4,797
| 3,519
|
Apache-2.0
| 2023-09-14T19:47:03
| 2017-10-23T05:55:22
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 5,847
|
py
|
prepare_RecordIO.py
|
import json
import os
import random
import argparse
import numpy as np
from collections import defaultdict
from pathlib import Path
def write_line(img_path, width, height, boxes, ids, idx):
"""Create a line for each image with annotations, width, height and image name."""
# for header, we use minimal length 2, plus width and height
# with A: 4, B: 5, C: width, D: height
A = 4
B = 5
C = width
D = height
# concat id and bboxes
labels = np.hstack((ids.reshape(-1, 1), boxes)).astype("float")
# normalized bboxes (recommanded)
labels[:, (1, 3)] /= float(width)
labels[:, (2, 4)] /= float(height)
# flatten
labels = labels.flatten().tolist()
str_idx = [str(idx)]
str_header = [str(x) for x in [A, B, C, D]]
str_labels = [str(x) for x in labels]
str_path = [img_path]
line = "\t".join(str_idx + str_header + str_labels + str_path) + "\n"
return line
# adapt from __main__ from im2rec.py
def write_lst(output_file, ids, images_annotations):
all_labels = set()
image_info = {}
for entry in images_annotations['images']:
if entry["id"] in ids:
image_info[entry["id"]] = entry
annotations_info = {} # one annotation for each id (ie., image)
for entry in images_annotations['annotations']:
image_id = entry['image_id']
if image_id in ids:
if image_id not in annotations_info:
annotations_info[image_id] = {'boxes': [], 'labels': []}
annotations_info[image_id]['boxes'].append(entry['bbox'])
annotations_info[image_id]['labels'].append(entry['category_id'])
all_labels.add(entry['category_id'])
labels_list = [label for label in all_labels]
class_to_idx_mapping = {label: idx for idx, label in enumerate(labels_list)}
with open(output_file, "w") as fw:
for i, image_id in enumerate(annotations_info):
im_info = image_info[image_id]
image_file = im_info['file_name']
height = im_info['height']
width = im_info['width']
an_info = annotations_info[image_id]
boxes = np.array(an_info['boxes'])
labels = np.array([class_to_idx_mapping[label] for label in an_info['labels']])
line = write_line(image_file, width, height, boxes, labels, i)
fw.write(line)
def create_lst(data_dir, args, rnd_seed=100):
"""Generate an lst file based on annotations file which is used to convert the input data to .rec format."""
with open(os.path.join(data_dir, 'annotations.json')) as f:
images_annotations = json.loads(f.read())
# Size of each class
class_ids = defaultdict(list)
for entry in images_annotations['images']:
cls_ = entry['file_name'].split('_')[0]
class_ids[cls_].append(entry['id'])
print('\ncategory\tnum of images')
print('---------------')
for cls_ in class_ids.keys():
print(f"{cls_}\t{len(class_ids[cls_])}")
random.seed(rnd_seed)
# Split train/val/test image ids
if args.test_ratio:
test_ids = []
if args.train_ratio + args.test_ratio < 1.0:
val_ids = []
train_ids = []
for cls_ in class_ids.keys():
random.shuffle(class_ids[cls_])
N = len(class_ids[cls_])
ids = class_ids[cls_]
sep = int(N * args.train_ratio)
sep_test = int(N * args.test_ratio)
if args.train_ratio == 1.0:
train_ids.extend(ids)
else:
if args.test_ratio:
test_ids.extend(ids[:sep_test])
if args.train_ratio + args.test_ratio < 1.0:
val_ids.extend(ids[sep_test + sep:])
train_ids.extend(ids[sep_test: sep_test + sep])
write_lst(args.prefix + "_train.lst", train_ids, images_annotations)
lsts = [args.prefix + "_train.lst"]
if args.test_ratio:
write_lst(args.prefix + "_test.lst", test_ids, images_annotations)
lsts.append(args.prefix + "_test.lst")
if args.train_ratio + args.test_ratio < 1.0:
write_lst(args.prefix + "_val.lst", val_ids, images_annotations)
lsts.append(args.prefix + "_val.lst")
return lsts
def parse_args():
"""Defines all arguments.
Returns:
args object that contains all the params
"""
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="Create an image list or \
make a record database by reading from an image list",
)
parser.add_argument("prefix", help="prefix of input/output lst and rec files.")
parser.add_argument("root", help="path to folder containing images.")
cgroup = parser.add_argument_group("Options for creating image lists")
cgroup.add_argument(
"--exts", nargs="+", default=[".jpeg", ".jpg", ".png"], help="list of acceptable image extensions."
)
cgroup.add_argument("--train-ratio", type=float, default=0.8, help="Ratio of images to use for training.")
cgroup.add_argument("--test-ratio", type=float, default=0, help="Ratio of images to use for testing.")
cgroup.add_argument(
"--recursive",
action="store_true",
help="If true recursively walk through subdirs and assign an unique label\
to images in each folder. Otherwise only include images in the root folder\
and give them label 0.",
)
args = parser.parse_args()
args.prefix = os.path.abspath(args.prefix)
args.root = os.path.abspath(args.root)
return args
if __name__ == '__main__':
args = parse_args()
data_dir = Path(args.root).parent
lsts = create_lst(data_dir, args)
print()
for lst in lsts:
os.system(f"python3 ./src/im2rec.py {lst} {os.path.join(data_dir, 'images')} --pass-through --pack-label")
print()
|
0ba6a124c275c4e35a3a32d60fcf7db07a7d520e
|
ff7afe98eaaf7b5b0b7d77ea37dacb0e355fc141
|
/test/test_nonparametric.py
|
bf40d1730201cd7f216b3594165bbd7c23d8fd4d
|
[
"MIT"
] |
permissive
|
jameschapman19/cca_zoo
|
d433c14037ec22ce464531a2a523899d76c3a139
|
084901cf1a0d484d0d8c6a30775971569b5b8da0
|
refs/heads/main
| 2023-08-08T00:39:16.412081
| 2023-08-07T12:31:07
| 2023-08-07T12:31:07
| 303,801,602
| 166
| 34
|
MIT
| 2023-09-14T11:34:24
| 2020-10-13T18:58:42
|
Python
|
UTF-8
|
Python
| false
| false
| 2,694
|
py
|
test_nonparametric.py
|
"""
Test the kernel methods
"""
import numpy as np
from cca_zoo.linear import GCCA, MCCA, TCCA
from cca_zoo.nonparametric import KCCA, KGCCA, KTCCA, NCCA
np.random.seed(1)
X = np.random.normal(size=(100, 10))
Y = np.random.normal(size=(100, 10))
Z = np.random.normal(size=(100, 10))
# test that MCCA is the same as KCCA with linear kernel
def test_MCCA_KCCA():
kcca = KCCA(latent_dimensions=2)
mcca = MCCA(latent_dimensions=2)
kcca.fit([X, Y, Z])
mcca.fit([X, Y, Z])
mcca_score = mcca.score([X, Y, Z])
kcca_score = kcca.score([X, Y, Z])
assert np.allclose(mcca_score, kcca_score)
# test that GCCA is the same as KGCCA with linear kernel
def test_GCCA_KGCCA():
kgcca = KGCCA(latent_dimensions=2)
gcca = GCCA(latent_dimensions=2)
kgcca.fit([X, Y, Z])
gcca.fit([X, Y, Z])
gcca_score = gcca.score([X, Y, Z])
kgcca_score = kgcca.score([X, Y, Z])
assert np.allclose(gcca_score, kgcca_score)
# test that TCCA is the same as KTCCA with linear kernel
def test_TCCA_KTCCA():
ktcca = KTCCA(latent_dimensions=2)
tcca = TCCA(latent_dimensions=2)
ktcca.fit([X, Y, Z])
tcca.fit([X, Y, Z])
tcca_score = tcca.score([X, Y, Z])
ktcca_score = ktcca.score([X, Y, Z])
assert np.allclose(tcca_score, ktcca_score)
def test_rbf_kernel():
kcca = KCCA(latent_dimensions=2, kernel="rbf").fit((X, Y, Z))
kgcca = KGCCA(latent_dimensions=2, kernel="rbf").fit((X, Y, Z))
ktcca = KTCCA(latent_dimensions=2, kernel="rbf").fit((X, Y, Z))
def test_poly_kernel():
kcca = KCCA(latent_dimensions=2, kernel="poly").fit((X, Y, Z))
kgcca = KGCCA(latent_dimensions=2, kernel="poly").fit((X, Y, Z))
ktcca = KTCCA(latent_dimensions=2, kernel="poly").fit((X, Y, Z))
def test_sigmoid_kernel():
kcca = KCCA(latent_dimensions=2, kernel="sigmoid").fit((X, Y, Z))
kgcca = KGCCA(latent_dimensions=2, kernel="sigmoid").fit((X, Y, Z))
ktcca = KTCCA(latent_dimensions=2, kernel="sigmoid").fit((X, Y, Z))
def test_cosine_kernel():
kcca = KCCA(latent_dimensions=2, kernel="cosine").fit((X, Y, Z))
kgcca = KGCCA(latent_dimensions=2, kernel="cosine").fit((X, Y, Z))
ktcca = KTCCA(latent_dimensions=2, kernel="cosine").fit((X, Y, Z))
def test_callable_kernel():
def my_kernel(X, Y, **kwargs):
return np.dot(X, Y.T)
kcca = KCCA(latent_dimensions=2, kernel=my_kernel).fit((X, Y, Z))
kgcca = KGCCA(latent_dimensions=2, kernel=my_kernel).fit((X, Y, Z))
ktcca = KTCCA(latent_dimensions=2, kernel=my_kernel).fit((X, Y, Z))
def test_NCCA():
latent_dims = 1
ncca = NCCA(latent_dimensions=latent_dims).fit((X, Y))
corr_ncca = ncca.score((X, Y))
assert corr_ncca > 0.9
|
3e97b33167652834528854352a033fcb2c8ea592
|
8c0b804f1cc8cbf2f8788727df22a2cc149f7b5c
|
/gala/potential/scf/__init__.py
|
34778fe9caa2cf4084b70d98e7b769701342661a
|
[
"MIT"
] |
permissive
|
adrn/gala
|
579cc5a4ecb22df118e1c8a2322a46e935825054
|
f62e1a6ae7a8466a4db5c8407471b524cf085637
|
refs/heads/main
| 2023-09-04T11:42:07.278388
| 2023-08-18T18:04:35
| 2023-08-18T18:04:35
| 17,577,779
| 115
| 89
|
MIT
| 2023-09-05T11:40:10
| 2014-03-10T00:56:18
|
Python
|
UTF-8
|
Python
| false
| false
| 198
|
py
|
__init__.py
|
"""
Implementation of the Self-Consistent Field (SCF) expansion method.
"""
from .core import compute_coeffs, compute_coeffs_discrete
from ._bfe_class import SCFPotential, SCFInterpolatedPotential
|
98beb624c8b780855cbd25aea50fcbc7e78a277b
|
55defa28b5bd395e7ead2f9ca848f378ee2c8b13
|
/python/tvm/contrib/hexagon/build.py
|
ef081f2d79e3eabffed031212549eb4631c2e215
|
[
"Apache-2.0",
"BSD-3-Clause",
"Zlib",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"BSD-2-Clause"
] |
permissive
|
neo-ai/tvm
|
456d48c8d80bd7190c91b488b8f9d6cf22918706
|
da529bf421fcfddd914b41bbe9bf9d5863671266
|
refs/heads/dev
| 2023-03-06T03:28:18.303189
| 2022-05-09T04:25:16
| 2022-05-09T04:25:16
| 167,632,700
| 101
| 43
|
Apache-2.0
| 2023-02-17T20:49:09
| 2019-01-26T00:35:54
|
Python
|
UTF-8
|
Python
| false
| false
| 10,516
|
py
|
build.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Defines top-level glue functions for building Hexagon."""
import pathlib
import os
import subprocess
from typing import Union
import stat
import datetime
import tvm
from ..._ffi import libinfo
from .session import Session
RPC_SERVER_FILES = ["tvm_rpc_android", "libtvm_runtime.so", "android_bash.sh"]
HEXAGON_FILES = ["libhexagon_rpc_skel.so"]
HEXAGON_RPC_DIR = None
ANDROID_HEXAGON_TEST_BASE_DIR = pathlib.Path("/data/local/tmp/hexagon_test")
def get_hexagon_rpc_dir() -> pathlib.Path:
"""Find the Hexagon library.
Returns
-------
str :
The path to the Hexagon library
"""
global HEXAGON_RPC_DIR
if HEXAGON_RPC_DIR is None:
for path in libinfo.find_lib_path():
rpc_dir = os.path.join(os.path.dirname(path), "hexagon_rpc")
if os.path.isdir(rpc_dir):
HEXAGON_RPC_DIR = pathlib.Path(rpc_dir)
break
else:
raise "hexagon_rpc was not found."
return HEXAGON_RPC_DIR
class HexagonLauncher:
"""Hexagon Launcher"""
def __init__(self, serial_number: str, workspace_size_gb: int = 1):
"""Configure a new HexagonLauncher
Parameters
----------
serial_number : str
Android device serial number from android 'adb' command.
"""
# Hexagon RPCSession
self.session = None
self._serial_number = serial_number
self._adb_device_sub_cmd = ["adb", "-s", self._serial_number]
self._mod = None
self._workspace = None
self._workspace_max_size_mb = workspace_size_gb * 1024
HEXAGON_REMOTE_DEVICE_KEY = "hexagon-dev"
def android_run_rpc(
self,
workspace_dir: Union[str, pathlib.Path] = None,
rpc_server_port: int = 7070,
rpc_tracker_host: str = "0.0.0.0",
rpc_tracker_port: int = 9190,
):
"""Upload Android artifacts and run RPC server on Android.
Parameters
----------
workspace_dir : Union[str, pathlib.Path]
Workspace directory used on Android to upload artifacts.
rpc_server_port : int
Android RPC server port number
rpc_tracker_host : str
RPC tracker IP on host
rpc_tracker_port : int
RPC tracker port on host
"""
# Create test base directory
subprocess.check_call(
self._adb_device_sub_cmd + ["shell", "mkdir", "-p", ANDROID_HEXAGON_TEST_BASE_DIR]
)
# Check size of base directory and cleanup if needed
while self._get_workspace_size() > self._workspace_max_size_mb:
self._workspace_remove_latest()
if not workspace_dir:
self._workspace = str(
ANDROID_HEXAGON_TEST_BASE_DIR
/ datetime.datetime.now().strftime("%Y-%m-%dT%H-%M-%S")
)
else:
self._workspace = workspace_dir
# Upload RPC server and libraries
subprocess.check_call(self._adb_device_sub_cmd + ["shell", "mkdir", "-p", self._workspace])
# Create bash script
android_bash_script_path = get_hexagon_rpc_dir() / "android_bash.sh"
with open(get_hexagon_rpc_dir() / "android_bash.sh.template", "r") as src_f:
if os.path.exists(android_bash_script_path):
os.remove(android_bash_script_path)
with open(android_bash_script_path, "w") as dest_f:
for line in src_f.readlines():
if "<RPC_TRACKER_HOST>" in line:
line = line.replace("<RPC_TRACKER_HOST>", str(rpc_tracker_host))
if "<RPC_TRACKER_PORT>" in line:
line = line.replace("<RPC_TRACKER_PORT>", str(rpc_tracker_port))
if "<HEXAGON_REMOTE_DEVICE_KEY>" in line:
line = line.replace(
"<HEXAGON_REMOTE_DEVICE_KEY>", self.HEXAGON_REMOTE_DEVICE_KEY
)
if "<RPC_SERVER_PORT>" in line:
line = line.replace("<RPC_SERVER_PORT>", str(rpc_server_port))
dest_f.write(line)
# Make shell script executable
android_bash_stat = os.stat(android_bash_script_path)
os.chmod(android_bash_script_path, android_bash_stat.st_mode | stat.S_IEXEC)
# Push files
for item in RPC_SERVER_FILES:
src_path = get_hexagon_rpc_dir() / item
destination = f"{self._workspace}/{item}"
subprocess.check_call(self._adb_device_sub_cmd + ["push", src_path, destination])
# Removed pre-defined forward/reverse rules
subprocess.check_call(self._adb_device_sub_cmd + ["forward", "--remove-all"])
subprocess.check_call(self._adb_device_sub_cmd + ["reverse", "--remove-all"])
# Enable port reverse for RPC tracker
subprocess.check_call(
self._adb_device_sub_cmd
+ ["reverse", f"tcp:{rpc_tracker_port}", f"tcp:{rpc_tracker_port}"]
)
# Enable port forward for RPC server. We forward 9 ports after the rpc_server_port.
for i in range(0, 10):
subprocess.check_call(
self._adb_device_sub_cmd
+ ["forward", f"tcp:{rpc_server_port+i}", f"tcp:{rpc_server_port+i}"]
)
# Run server and connect to tracker
subprocess.Popen(
self._adb_device_sub_cmd + ["shell", f"cd {self._workspace} && ./android_bash.sh"],
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
)
def hexagon_setup(self):
"""Upload Hexagon artifacts on Android."""
for item in HEXAGON_FILES:
src_path = get_hexagon_rpc_dir() / item
dst_path = f"{self._workspace}/{item}"
subprocess.check_call(self._adb_device_sub_cmd + ["push", src_path, dst_path])
def hexagon_session_setup(self, remote_kw: dict):
"""Setup Hexagon RPC Session from host to Hexagon device.
Parameters
----------
remote_kw : dict
RPC tracker configs.
"""
hexagon_remote_kw = dict(remote_kw)
hexagon_remote_kw["key"] = self.HEXAGON_REMOTE_DEVICE_KEY
self.session = Session(hexagon_remote_kw)
def get_module(self, module_name: str):
"""Load a Hexagon TVM module, already uploaded on Android, on Hexagon and return the module.
Parameters
----------
module_name : str
Module filename.
Returns
-------
TVMModule :
A TVM Module loaded on hexagon.
"""
module_path = f"{self._workspace}/{module_name}"
self._mod = self.session.load_module(module_path)
return self._mod
def upload(self, host_path: Union[str, pathlib.Path], remote_filename: str):
"""Upload a file to remote(Android).
Parameters
----------
host_path : Union[str, pathlib.Path]
File path on host.
remote_filename : str
File name on remote(Android).
Returns
-------
TVMModule :
A TVM Module loaded on hexagon.
"""
src_path = str(host_path)
dst_remote_path = f"{self._workspace}/{remote_filename}"
subprocess.check_call(self._adb_device_sub_cmd + ["push", src_path, dst_remote_path])
def get_graph_executor(self, libmod, remote_libmod_filename: str):
"""Create a local GraphModule which consumes a remote libmod.
Parameters
----------
libmod : tvm.runtime.Module
The module of the corresponding function.
This library module is for remote hexagon runtime.
remote_libmod_filename : str
Module filename on remote. It is assumed this file lives under self._workspace path.
Returns
-------
graph_module : GraphModule
Runtime graph module that can be used to execute the graph.
"""
self.session.__enter__()
hexagon_mod = self.get_module(remote_libmod_filename)
return tvm.contrib.graph_executor.create(
libmod.get_graph_json(), hexagon_mod, self.session.device
)
def close(self):
"""Close RPC server on Android"""
# Kill process childs
subprocess.Popen(
self._adb_device_sub_cmd + ["shell", f"pkill -P `cat {self._workspace}/rpc_pid.txt`"]
)
# Kill main process
subprocess.Popen(
self._adb_device_sub_cmd + ["shell", f"kill `cat {self._workspace}/rpc_pid.txt`"]
)
def _get_workspace_size(self) -> int:
"""Get workspace base directory size in MB"""
line = subprocess.check_output(
self._adb_device_sub_cmd + ["shell", "du", "-shm", str(ANDROID_HEXAGON_TEST_BASE_DIR)],
encoding="utf-8",
)
return int(line.split("\t")[0])
def _workspace_remove_latest(self):
# Find oldest(lower number) directory
latest_dir = subprocess.check_output(
self._adb_device_sub_cmd
+ [
"shell",
"find",
str(ANDROID_HEXAGON_TEST_BASE_DIR),
"!",
"-path",
".",
"-type",
"d",
"|",
"sort",
"-n",
"|",
"head",
"-1",
],
encoding="utf-8",
)
latest_dir = latest_dir.replace("\n", "").replace("\t", "")
subprocess.check_call(self._adb_device_sub_cmd + ["shell", "rm", "-rf", latest_dir])
|
30a84c439cd38adac02774ed4ebe9269a4ce48ab
|
7f16d69f3f66ce1973ee0325924685bd8cfbfb51
|
/assignment1/q3_sgd.py
|
5b4d7af1ee51afea6a156e795786f9934087c3d5
|
[
"MIT"
] |
permissive
|
bogatyy/cs224d
|
f9c894d1cfb23b1e75ab1cc797429a3a21660300
|
cfa36b582f681d425ae373baf61a6a1940170c9a
|
refs/heads/master
| 2021-07-11T22:11:04.427078
| 2020-06-11T10:38:57
| 2020-06-11T10:38:57
| 68,249,301
| 256
| 117
| null | 2019-03-01T02:14:44
| 2016-09-14T22:44:17
|
Python
|
UTF-8
|
Python
| false
| false
| 4,021
|
py
|
q3_sgd.py
|
# Save parameters every a few SGD iterations as fail-safe
SAVE_PARAMS_EVERY = 1000
import glob
import random
import numpy as np
import os.path as op
import cPickle as pickle
def load_saved_params():
""" A helper function that loads previously saved parameters and resets iteration start """
st = 0
for f in glob.glob("saved_params_*.npy"):
iter = int(op.splitext(op.basename(f))[0].split("_")[2])
if (iter > st):
st = iter
if st > 0:
print "Loading saved params %d" % st
with open("saved_params_%d.npy" % st, "r") as f:
params = pickle.load(f)
state = pickle.load(f)
return st, params, state
else:
return st, None, None
def save_params(iter, params):
with open("saved_params_%d.npy" % iter, "w") as f:
pickle.dump(params, f)
pickle.dump(random.getstate(), f)
def sgd(f_and_grad, x0, step, iterations, postprocess = None, useSaved = False, PRINT_EVERY=10):
""" Stochastic Gradient Descent """
# Implement the stochastic gradient descent method in this
# function.
# Inputs:
# - f_and_grad: the function to optimize, it should take a single
# argument and yield two outputs, a cost and the gradient
# with respect to the arguments
# - x0: the initial point to start SGD from
# - step: the step size for SGD
# - iterations: total iterations to run SGD for
# - postprocess: postprocessing function for the parameters
# if necessary. In the case of word2vec we will need to
# normalize the word vectors to have unit length.
# - PRINT_EVERY: specifies every how many iterations to output
# Output:
# - x: the parameter value after SGD finishes
# Anneal learning rate every several iterations
ANNEAL_EVERY = 20000
if useSaved:
start_iter, oldx, state = load_saved_params()
if start_iter > 0:
x0 = oldx;
step *= 0.5 ** (start_iter / ANNEAL_EVERY)
if state:
random.setstate(state)
else:
start_iter = 0
x = x0
if not postprocess:
postprocess = lambda x: x
expcost = None
for iter in xrange(start_iter + 1, iterations + 1):
### Don't forget to apply the postprocessing after every iteration!
### You might want to print the progress every few iterations.
cost, grad = f_and_grad(x)
x = postprocess(x - step * grad)
if iter % PRINT_EVERY == 0:
if not expcost:
expcost = cost
else:
expcost = .95 * expcost + .05 * cost
print "iter %d: %f" % (iter, expcost)
if iter % SAVE_PARAMS_EVERY == 0 and useSaved:
save_params(iter, x)
if iter % ANNEAL_EVERY == 0:
step *= 0.5
return x
def sanity_check():
quad = lambda x: (np.sum(x ** 2), x * 2)
print "Running sanity checks..."
t1 = sgd(quad, 0.5, 0.01, 1000, PRINT_EVERY=100)
print "test 1 result:", t1
assert abs(t1) <= 1e-6
t2 = sgd(quad, 0.0, 0.01, 1000, PRINT_EVERY=100)
print "test 2 result:", t2
assert abs(t2) <= 1e-6
t3 = sgd(quad, -1.5, 0.01, 1000, PRINT_EVERY=100)
print "test 3 result:", t3
assert abs(t3) <= 1e-6
print ""
def your_sanity_checks():
"""
Use this space add any additional sanity checks by running:
python q3_sgd.py
This function will not be called by the autograder, nor will
your additional tests be graded.
"""
print "Running your sanity checks..."
if __name__ == "__main__":
sanity_check();
your_sanity_checks();
|
1b04a603cf29fa41db15d18210b3727c19853543
|
9704880cedb299fff566cb0a2b8996abedf945fe
|
/Data Structures and Algorithms Nanodegree/P1 - Unscramble Computer Science Problems/Task2.py
|
4f61bd2e1c50398d36a4b15981dd3efac493febd
|
[
"MIT"
] |
permissive
|
manishbisht/Udacity
|
b25eeaf0267bfedeac81b01b673d533a0e35952b
|
b53690e508c2ae3fa04b56393e6c8eea3c60f940
|
refs/heads/master
| 2023-03-07T03:02:58.993961
| 2022-09-08T14:39:09
| 2022-09-08T14:39:09
| 63,041,835
| 173
| 143
|
MIT
| 2023-03-03T19:30:26
| 2016-07-11T06:19:30
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,119
|
py
|
Task2.py
|
"""
Read file into texts and calls.
It's ok if you don't understand how to read files
"""
import csv
with open('texts.csv', 'r') as f:
reader = csv.reader(f)
texts = list(reader)
with open('calls.csv', 'r') as f:
reader = csv.reader(f)
calls = list(reader)
"""
TASK 2: Which telephone number spent the longest time on the phone
during the period? Don't forget that time spent answering a call is
also time spent on the phone.
Print a message:
"<telephone number> spent the longest time, <total time> seconds, on the phone during
September 2016.".
"""
callDuration = {}
for call in calls:
if call[0] in callDuration.keys():
callDuration[call[0]] += int(call[3])
else:
callDuration[call[0]] = int(call[3])
if call[1] in callDuration.keys():
callDuration[call[1]] += int(call[3])
else:
callDuration[call[1]] = int(call[3])
sortedCallDuration = sorted(callDuration.items(), key=lambda x: x[1], reverse=True)
print "{} spent the longest time, {} seconds, on the phone during September 2016.".format(sortedCallDuration[0][0], sortedCallDuration[0][1])
|
552e148741f844f395bc8cc5e53a4611f4a0dd7a
|
40062e1545591a1a93c04e0ea084c48405c665fa
|
/examples/python/basic/benchmarks.py
|
a895522eedeada476af51e1f120ca1d3d3ea65c0
|
[
"MIT"
] |
permissive
|
neka-nat/cupoch
|
ad1f55eba2f4fb810ff2cd3af620f11a90aef69a
|
122b69b5312885734c224f9deef813e2b1463017
|
refs/heads/master
| 2023-07-09T00:08:50.033219
| 2023-06-30T07:53:53
| 2023-06-30T07:53:53
| 216,847,240
| 739
| 103
|
MIT
| 2023-04-04T15:30:55
| 2019-10-22T15:26:24
|
C++
|
UTF-8
|
Python
| false
| false
| 2,900
|
py
|
benchmarks.py
|
import os
os.environ["OMP_NUM_THREADS"] = str(1)
import numpy as np
import cupoch as cph
cph.initialize_allocator(cph.PoolAllocation, 1000000000)
import open3d as o3d
import time
def measure_time(obj, method_name, device, *args):
fn = getattr(obj, method_name)
start = time.time()
res = fn(*args)
elapsed_time = time.time() - start
print("%s (%s) [sec]:" % (method_name, device), elapsed_time)
return res, elapsed_time
pcd_file = "../../testdata/fragment.ply"
pc_cpu, tc = measure_time(o3d.io, "read_point_cloud", "CPU", pcd_file)
print(pc_cpu)
pc_gpu, tg = measure_time(cph.io, "read_point_cloud", "GPU", pcd_file)
print(pc_gpu)
speeds = {}
tf = np.identity(4)
_, tc = measure_time(pc_cpu, "transform", "CPU", tf)
_, tg = measure_time(pc_gpu, "transform", "GPU", tf)
speeds["transform"] = tc / tg
_, tc = measure_time(pc_cpu, "estimate_normals", "CPU")
_, tg = measure_time(pc_gpu, "estimate_normals", "GPU")
speeds["estimate_normals"] = tc / tg
_, tc = measure_time(pc_cpu, "voxel_down_sample", "CPU", 0.005)
_, tg = measure_time(pc_gpu, "voxel_down_sample", "GPU", 0.005)
speeds["voxel_down_sample"] = tc / tg
_, tc = measure_time(pc_cpu, "remove_radius_outlier", "CPU", 10, 0.1)
_, tg = measure_time(pc_gpu, "remove_radius_outlier", "GPU", 10, 0.1)
speeds["remove_radius_outlier"] = tc / tg
_, tc = measure_time(pc_cpu, "remove_statistical_outlier", "CPU", 20, 2.0)
_, tg = measure_time(pc_gpu, "remove_statistical_outlier", "GPU", 20, 2.0)
speeds["remove_statistical_outlier"] = tc / tg
trans_init = np.asarray(
[
[np.cos(np.deg2rad(30.0)), -np.sin(np.deg2rad(30.0)), 0.0, 0.0],
[np.sin(np.deg2rad(30.0)), np.cos(np.deg2rad(30.0)), 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
]
)
tg_cpu = pc_cpu
tg_cpu.transform(trans_init)
tg_gpu = pc_gpu
tg_gpu.transform(trans_init)
threshold = 0.02
_, tc = measure_time(
o3d.pipelines.registration,
"registration_icp",
"CPU",
pc_cpu,
tg_cpu,
threshold,
trans_init,
o3d.pipelines.registration.TransformationEstimationPointToPoint(),
)
_, tg = measure_time(
cph.registration,
"registration_icp",
"GPU",
pc_gpu,
tg_gpu,
threshold,
trans_init.astype(np.float32),
cph.registration.TransformationEstimationPointToPoint(),
)
speeds["registration_icp"] = tc / tg
_, tc = measure_time(pc_cpu, "cluster_dbscan", "CPU", 0.02, 10)
_, tg = measure_time(pc_gpu, "cluster_dbscan", "GPU", 0.02, 10)
speeds["cluster_dbscan"] = tc / tg
import matplotlib.pyplot as plt
plt.style.use("seaborn")
plt.title("Speedup over CPU (%d points)" % np.asarray(pc_gpu.points.cpu()).shape[0])
plt.yscale("log")
plt.grid(which="major", color="white", linestyle="-")
plt.grid(which="minor", color="white", linestyle="-")
plt.ylabel("Speedup")
plt.bar(speeds.keys(), speeds.values())
plt.xticks(rotation=70)
plt.tight_layout()
plt.show()
|
993bfbaec4aeac075f9c68edb2b30fb95acdd4d0
|
749af8e81d5ccd2d8714a34434a9c77772df551b
|
/statsmodels/datasets/danish_data/__init__.py
|
58e9c0abac4c7493518e4a550694016fae82ff21
|
[
"BSD-3-Clause"
] |
permissive
|
statsmodels/statsmodels
|
98ca67192c08bcc611ed3a75edaded2c7181ab98
|
01b19d7d111b29c183f620ff0a949ef6391ff8ee
|
refs/heads/main
| 2023-09-05T13:05:49.497076
| 2023-09-01T10:54:50
| 2023-09-01T10:54:50
| 1,885,237
| 8,666
| 3,023
|
BSD-3-Clause
| 2023-09-13T17:51:48
| 2011-06-12T17:04:50
|
Python
|
UTF-8
|
Python
| false
| false
| 264
|
py
|
__init__.py
|
__all__ = [
"load",
"load_pandas",
"COPYRIGHT",
"TITLE",
"SOURCE",
"DESCRSHORT",
"DESCRLONG",
"NOTE",
]
from .data import (
load,
load_pandas,
COPYRIGHT,
TITLE,
SOURCE,
DESCRSHORT,
DESCRLONG,
NOTE,
)
|
789eeb26892d9c1eaf43eaa0bfa3ef15791a37df
|
32e910f5440c10b384bb26b5555ac7adb77540ee
|
/tools/InterfaceGenerator/generator/generators/__init__.py
|
57e528dc8daa176fd9673bb7fa5413555b96c13f
|
[] |
permissive
|
smartdevicelink/sdl_core
|
76658282fd85b16ed6d91d8d4087d8cd1353db76
|
7343fc72c12edc8ac42a62556c9e4b29c9408bc3
|
refs/heads/master
| 2022-11-04T12:17:58.725371
| 2022-10-26T15:34:13
| 2022-10-26T15:34:13
| 24,724,170
| 269
| 306
|
BSD-3-Clause
| 2022-10-26T15:34:15
| 2014-10-02T15:16:26
|
C++
|
UTF-8
|
Python
| false
| false
| 97
|
py
|
__init__.py
|
"""Package that contains generators from internal model representation to
different formats.
"""
|
5787492820b96b6fe7e60564e3d32cca06e60c10
|
77706ad1eef0f6ff1d8963311018a28949ffbf1e
|
/Examples/Simple Beam - Factored Envelope.py
|
859c7b07d548c19acd20a0f7958b28b4faaf75e1
|
[
"MIT"
] |
permissive
|
JWock82/PyNite
|
883b6ad3bbbea1c68e5de7ae1a4ae23a3e60e17f
|
2e43996278a590ba0681190bfea9d3dabe5470af
|
refs/heads/master
| 2023-08-17T23:51:33.874310
| 2023-08-12T23:57:21
| 2023-08-12T23:57:21
| 109,416,263
| 327
| 89
|
MIT
| 2023-08-26T05:32:42
| 2017-11-03T16:11:07
|
Python
|
UTF-8
|
Python
| false
| false
| 2,434
|
py
|
Simple Beam - Factored Envelope.py
|
# Units used for the model in this example are inches and kips
# Import `FEModel3D` from `PyNite`
from PyNite import FEModel3D
from matplotlib import pyplot as plt
import numpy as np
# Import 'Visualization' for rendering the model
from PyNite import Visualization
# Create a new finite element model
simple_beam = FEModel3D()
# Add nodes (14 ft apart)
simple_beam.add_node('N1', 0, 0, 0)
simple_beam.add_node('N2', 14*12, 0, 0)
# Define a material
E = 29000 # Modulus of elasticity (ksi)
G = 11200 # Shear modulus of elasticity (ksi)
nu = 0.3 # Poisson's ratio
rho = 2.836e-4 # Density (kci)
simple_beam.add_material('Steel', E, G, nu, rho)
# Add a beam with the following properties:
# Iy = 100 in^4, Iz = 150 in^4, J = 250 in^4, A = 20 in^2
simple_beam.add_member('M1', 'N1', 'N2', 'Steel', 100, 150, 250, 20)
# Provide simple supports
simple_beam.def_support('N1', True, True, True, True, False, False) # Constrained for torsion at 'N1'
simple_beam.def_support('N2', True, True, True, False, False, False) # Not constrained for torsion at 'N2'
# Add a downward point load of 5 kips at the midspan of the beam
simple_beam.add_member_pt_load('M1', 'Fy', 5, 4*12, 'D') # 5 kips Dead load
simple_beam.add_member_pt_load('M1', 'Fy', 8, 9*12, 'S') # 8 kips Live load
simple_beam.add_member_pt_load('M1', 'Fy', -8, 12*12, 'L') # 8 kips Live load
# Add load combinations
simple_beam.add_load_combo('1.4D', {'D':1.4})
simple_beam.add_load_combo('1.2D+1.6L', {'D':1.2, 'L':1.6})
simple_beam.add_load_combo('1.2D+1.6S', {'D':1.2, 'S':1.6})
# Analyze the beam and perform a statics check
simple_beam.analyze(check_statics=True)
# Visualization.render_model(simple_beam, annotation_size=10, deformed_shape=True, deformed_scale=30, render_loads=True, combo_name='1.2D+1.6L')
# Plot the shear diagram with all load cases and max/min envelope
x, M1 = simple_beam.Members['M1'].moment_array("Mz", n_points=400, combo_name='1.4D')
_, M2 = simple_beam.Members['M1'].moment_array("Mz", n_points=400, combo_name='1.2D+1.6L')
_, M3 = simple_beam.Members['M1'].moment_array("Mz", n_points=400, combo_name='1.2D+1.6S')
max_envelope = np.maximum(np.maximum(M1, M2), M3)
min_envelope = np.minimum(np.minimum(M1, M2), M3)
plt.plot(x, np.zeros(len(x)), c="black", lw=3)
plt.plot(x, M1)
plt.plot(x, M2)
plt.plot(x, M3)
plt.plot(x, max_envelope, alpha=0.3, c="green", lw=5)
plt.plot(x, min_envelope, alpha=0.3, c="red", lw=5)
|
92aecbd662e949161dd51cfc6ac4365af5632e4a
|
c18fcfe534a1c5ff8112aa84076fb43e17da5f2a
|
/recipes/quast/quast-download-busco
|
e75e2f7173c9a9ad281f626aba7b88eaab036a28
|
[
"MIT"
] |
permissive
|
bioconda/bioconda-recipes
|
fa262767710502be6d174b1d1409f551f1bb5e91
|
78414cd3322c817449fccbdf6ebc07eca9f95901
|
refs/heads/master
| 2023-08-17T01:20:42.943416
| 2023-08-17T00:25:32
| 2023-08-17T00:25:32
| 42,372,094
| 1,664
| 4,797
|
MIT
| 2023-09-14T20:55:01
| 2015-09-12T20:33:30
|
Shell
|
UTF-8
|
Python
| false
| false
| 293
|
quast-download-busco
|
#!/usr/bin/env python
from quast_libs import qconfig
from quast_libs.log import get_logger
from quast_libs.run_busco import download_all_db, download_augustus
logger = get_logger(qconfig.LOGGER_DEFAULT_NAME)
logger.set_up_console_handler()
download_augustus(logger)
download_all_db(logger)
|
|
32ce98122f5996b93617b1d2021c128a3eac27e0
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/homeassistant/components/netatmo/application_credentials.py
|
5536343ebe6a6888c0c44e5ad3ef88dfdc314aff
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 467
|
py
|
application_credentials.py
|
"""Application credentials platform for Netatmo."""
from homeassistant.components.application_credentials import AuthorizationServer
from homeassistant.core import HomeAssistant
from .const import OAUTH2_AUTHORIZE, OAUTH2_TOKEN
async def async_get_authorization_server(hass: HomeAssistant) -> AuthorizationServer:
"""Return authorization server."""
return AuthorizationServer(
authorize_url=OAUTH2_AUTHORIZE,
token_url=OAUTH2_TOKEN,
)
|
33da5f1bd797f4798d293d353e6a448235735e60
|
2f1e3f24f2798507c9eb73185a955c9bfb735140
|
/libserving/sanic_serving/tf_deploy.py
|
f58c7f9a02b7546f32e0f55b0573fc2da75e4fe8
|
[
"MIT"
] |
permissive
|
massquantity/LibRecommender
|
e4f55b06b2208c794a3f97f7ff89413fa9beaffa
|
8d5fbe9c177f5b91c2b6f19a155a83320dd0e20c
|
refs/heads/master
| 2023-08-31T23:48:37.634663
| 2023-08-20T11:58:15
| 2023-08-20T11:58:15
| 174,493,761
| 251
| 55
|
MIT
| 2023-08-20T11:58:16
| 2019-03-08T07:58:27
|
Python
|
UTF-8
|
Python
| false
| false
| 7,895
|
py
|
tf_deploy.py
|
import os
from typing import Any, Dict, List, Optional, Tuple
import aiohttp
import numpy as np
import redis.asyncio as redis
import ujson
from sanic import Sanic
from sanic.exceptions import SanicException
from sanic.log import logger
from sanic.request import Request
from sanic.response import HTTPResponse, json
from .common import Params, validate
app = Sanic("tf-serving")
@app.post("/tf/recommend")
@validate(model=Params)
async def tf_serving(request: Request, params: Params) -> HTTPResponse:
r: redis.Redis = app.ctx.redis
user = params.user
n_rec = params.n_rec
if not await r.hexists("user2id", user):
raise SanicException(f"Invalid user {user} doesn't exist", status_code=400)
logger.info(f"recommend {n_rec} items for user {user}")
user_id = await r.hget("user2id", user)
reco_list = await recommend_on_features(user_id, n_rec, r)
return json({f"Recommend result for user {user}": reco_list})
async def recommend_on_features(user_id: str, n_rec: int, r: redis.Redis) -> List[str]:
n_items = int(await r.get("n_items"))
u_consumed = ujson.loads(await r.hget("user_consumed", user_id))
candidate_num = n_rec + len(u_consumed)
features = {
"user_indices": np.full(n_items, int(user_id)).tolist(),
"item_indices": list(range(n_items)),
}
user_sparse_col_index, user_sparse_values = await get_one_from_redis(
"user_sparse_col_index", "user_sparse_values", user_id, r
)
item_sparse_col_index, item_sparse_values = await get_all_from_redis(
"item_sparse_col_index", "item_sparse_values", r
)
if user_sparse_col_index or item_sparse_col_index:
features.update(
await build_features(
user_sparse_col_index,
item_sparse_col_index,
user_sparse_values,
item_sparse_values,
n_items,
"int",
"sparse_indices",
)
)
user_dense_col_index, user_dense_values = await get_one_from_redis(
"user_dense_col_index", "user_dense_values", user_id, r
)
item_dense_col_index, item_dense_values = await get_all_from_redis(
"item_dense_col_index", "item_dense_values", r
)
if user_dense_col_index or item_dense_col_index:
features.update(
await build_features(
user_dense_col_index,
item_dense_col_index,
user_dense_values,
item_dense_values,
n_items,
"float",
"dense_values",
)
)
model_name = await r.get("model_name")
if model_name in ("YouTubeRanking", "DIN"):
features.update(await get_last_interaction(model_name, u_consumed, n_items, r))
scores = await request_tf_serving(features, model_name)
return await rank_items_by_score(scores, n_rec, candidate_num, u_consumed, r)
async def get_one_from_redis(
index_name: str,
value_name: str,
id_: str,
r: redis.Redis,
) -> Optional[Tuple[List[int], List[Any]]]:
if await r.exists(index_name):
index = ujson.loads(await r.get(index_name))
values = ujson.loads(await r.hget(value_name, id_))
else:
index = values = None
return index, values
async def get_all_from_redis(
index_name: str, value_name: str, r: redis.Redis
) -> Optional[Tuple[List[int], List[Any]]]:
if await r.exists(index_name):
index = ujson.loads(await r.get(index_name))
all_values = await r.lrange(value_name, 0, -1)
values = [ujson.loads(v) for v in all_values]
else:
index = values = None
return index, values
async def build_features(
user_col_index: List[int],
item_col_index: List[int],
user_values: List[Any],
item_values: List[List[Any]],
n_items: int,
type_: str,
feature_name: str,
) -> Dict[str, List[List[Any]]]:
dtype = np.int32 if type_.startswith("int") else np.float32
if user_col_index and item_col_index:
dim = len(user_col_index) + len(item_col_index)
features = np.empty((n_items, dim), dtype=dtype)
for item_id in range(n_items):
for i, v in zip(user_col_index, user_values):
features[item_id, i] = v
for i, v in zip(item_col_index, item_values[item_id]):
features[item_id, i] = v
return {feature_name: features.tolist()}
elif user_col_index:
features = np.empty(len(user_col_index), dtype=dtype)
for i, v in zip(user_col_index, user_values):
features[i] = v
features = features.tolist()
return {feature_name: [features] * n_items}
else:
features = np.empty((n_items, len(item_col_index)), dtype=dtype)
for item_id in range(n_items):
for i, v in zip(item_col_index, item_values[item_id]):
features[item_id, i] = v
return {feature_name: features.tolist()}
async def get_last_interaction(
model_name: str, user_consumed: List[int], n_items: int, r: redis.Redis
) -> Dict[str, Any]:
if not await r.exists("max_seq_len"):
raise SanicException(
f"Missing `max_seq_len` attribute in {model_name}", status_code=500
)
num_consumed = len(user_consumed)
max_seq_len = int(await r.get("max_seq_len"))
if num_consumed >= max_seq_len:
u_last_interacted = user_consumed[-max_seq_len:]
u_interacted_len = max_seq_len
else:
u_last_interacted = np.full(max_seq_len, n_items, dtype=np.int32)
u_last_interacted[-num_consumed:] = user_consumed
u_last_interacted = u_last_interacted.tolist()
u_interacted_len = num_consumed
return {
"user_interacted_seq": [u_last_interacted] * n_items,
"user_interacted_len": [u_interacted_len] * n_items,
}
async def request_tf_serving(
features: Dict[str, List[Any]], model_name: str
) -> List[float]:
host = os.getenv("TF_SERVING_HOST", "localhost")
url = f"http://{host}:8501/v1/models/{model_name.lower()}:predict"
data = {"signature_name": "predict", "inputs": features}
async with aiohttp.ClientSession() as session:
async with session.post(url, json=data) as resp:
if resp.status != 200:
raise SanicException(
f"Error when requesting TensorFlow Serving model: {resp.text}",
status_code=500,
)
result = await resp.json()
return result["outputs"]
async def rank_items_by_score(
scores: List[float],
n_rec: int,
candidate_num: int,
u_consumed: List[int],
r: redis.Redis,
) -> List[str]:
scores = np.array(scores)
ids = np.argpartition(scores, -candidate_num)[-candidate_num:]
rank_items = sorted(zip(ids, scores[ids]), key=lambda x: x[1], reverse=True)
consumed_set = set(u_consumed)
reco_list = []
for i, _ in rank_items:
if i in consumed_set:
continue
reco_list.append(await r.hget("id2item", str(i)))
if len(reco_list) == n_rec:
break
return reco_list
@app.before_server_start
async def redis_setup(app: Sanic):
host = os.getenv("REDIS_HOST", "localhost")
app.ctx.redis = await redis.from_url(f"redis://{host}", decode_responses=True)
app.ctx.user_sparse = bool(await app.ctx.redis.hexists("feature", "user_sparse"))
app.ctx.item_sparse = bool(await app.ctx.redis.hexists("feature", "item_sparse"))
app.ctx.user_dense = bool(await app.ctx.redis.hexists("feature", "user_dense"))
app.ctx.item_dense = bool(await app.ctx.redis.hexists("feature", "item_dense"))
@app.after_server_stop
async def redis_close(app: Sanic):
await app.ctx.redis.close()
if __name__ == "__main__":
app.run(host="0.0.0.0", port=8000, debug=False, access_log=False)
|
a378cccd23d961f1bf2bc36393aa045984d2ffb4
|
70ae32de39e93b5c7514ed9c54ebb98fa1e56e38
|
/src/sidecar.py
|
a48b0a1634cb98090808c6d05c265155a471e1e4
|
[
"MIT"
] |
permissive
|
kiwigrid/k8s-sidecar
|
33cd58072027942705d2904b1ce7917036db9166
|
1d7a3566c129b8f7c0def292e9407ee856a9e0d4
|
refs/heads/master
| 2023-08-31T13:14:19.637183
| 2023-08-29T13:24:51
| 2023-08-29T13:24:51
| 131,828,020
| 437
| 175
|
MIT
| 2023-09-12T19:31:20
| 2018-05-02T09:24:28
|
Python
|
UTF-8
|
Python
| false
| false
| 6,040
|
py
|
sidecar.py
|
#!/usr/bin/env python
import os
import re
from kubernetes import client, config
from kubernetes.client import ApiException
from kubernetes.config.kube_config import KUBE_CONFIG_DEFAULT_LOCATION
from requests.packages.urllib3.util.retry import Retry
from helpers import REQ_RETRY_TOTAL, REQ_RETRY_CONNECT, REQ_RETRY_READ, REQ_RETRY_BACKOFF_FACTOR
from logger import get_logger
from resources import list_resources, watch_for_changes, prepare_payload
METHOD = "METHOD"
UNIQUE_FILENAMES = "UNIQUE_FILENAMES"
SKIP_TLS_VERIFY = "SKIP_TLS_VERIFY"
FOLDER = "FOLDER"
FOLDER_ANNOTATION = "FOLDER_ANNOTATION"
LABEL = "LABEL"
LABEL_VALUE = "LABEL_VALUE"
RESOURCE = "RESOURCE"
REQ_PAYLOAD = "REQ_PAYLOAD"
REQ_URL = "REQ_URL"
REQ_METHOD = "REQ_METHOD"
SCRIPT = "SCRIPT"
ENABLE_5XX = "ENABLE_5XX"
IGNORE_ALREADY_PROCESSED = "IGNORE_ALREADY_PROCESSED"
# Get logger
logger = get_logger()
def main():
logger.info("Starting collector")
folder_annotation = os.getenv(FOLDER_ANNOTATION)
if folder_annotation is None:
logger.warning("No folder annotation was provided, "
"defaulting to k8s-sidecar-target-directory")
folder_annotation = "k8s-sidecar-target-directory"
label = os.getenv(LABEL)
if label is None:
logger.fatal("Should have added {LABEL} as environment variable! Exit")
return -1
label_value = os.getenv(LABEL_VALUE)
if label_value:
logger.debug(f"Filter labels with value: {label_value}")
target_folder = os.getenv(FOLDER)
if target_folder is None:
logger.fatal(f"Should have added {FOLDER} as environment variable! Exit")
return -1
resources = os.getenv(RESOURCE, "configmap")
resources = ("secret", "configmap") if resources == "both" else (resources,)
logger.debug(f"Selected resource type: {resources}")
request_method = os.getenv(REQ_METHOD)
request_url = os.getenv(REQ_URL)
request_payload = os.getenv(REQ_PAYLOAD)
if request_payload:
request_payload = prepare_payload(os.getenv(REQ_PAYLOAD))
script = os.getenv(SCRIPT)
_initialize_kubeclient_configuration()
unique_filenames = os.getenv(UNIQUE_FILENAMES)
if unique_filenames is not None and unique_filenames.lower() == "true":
logger.info(f"Unique filenames will be enforced.")
unique_filenames = True
else:
logger.info(f"Unique filenames will not be enforced.")
unique_filenames = False
enable_5xx = os.getenv(ENABLE_5XX)
if enable_5xx is not None and enable_5xx.lower() == "true":
logger.info(f"5xx response content will be enabled.")
enable_5xx = True
else:
logger.info(f"5xx response content will not be enabled.")
enable_5xx = False
ignore_already_processed = False
if os.getenv(IGNORE_ALREADY_PROCESSED) is not None and os.getenv(IGNORE_ALREADY_PROCESSED).lower() == "true":
# Check API version
try:
version = client.VersionApi().get_code()
# Filter version content and retain only numbers
v_major = re.sub(r'\D', '', version.major)
v_minor = re.sub(r'\D', '', version.minor)
if len(v_major) and len(v_minor) and (int(v_major) > 1 or (int(v_major) == 1 and int(v_minor) >= 19)):
logger.info("Ignore already processed resource version will be enabled.")
ignore_already_processed = True
else:
logger.info("Can't enable 'ignore already processed resource version', "
f"kubernetes api version (%s) is lower than v1.19 or unrecognized format." % version.git_version)
except ApiException as e:
logger.error("Exception when calling VersionApi", exc_info=True)
if not ignore_already_processed:
logger.debug("Ignore already processed resource version will not be enabled.")
with open("/var/run/secrets/kubernetes.io/serviceaccount/namespace") as f:
namespace = os.getenv("NAMESPACE", f.read())
method = os.getenv(METHOD)
if method == "LIST":
for res in resources:
for ns in namespace.split(','):
list_resources(label, label_value, target_folder, request_url, request_method, request_payload,
ns, folder_annotation, res, unique_filenames, script, enable_5xx,
ignore_already_processed)
else:
watch_for_changes(method, label, label_value, target_folder, request_url, request_method, request_payload,
namespace, folder_annotation, resources, unique_filenames, script, enable_5xx,
ignore_already_processed)
def _initialize_kubeclient_configuration():
"""
Updates the default configuration of the kubernetes client. This is
picked up later on automatically then.
"""
# this is where kube_config is going to look for a config file
kube_config = os.path.expanduser(KUBE_CONFIG_DEFAULT_LOCATION)
if os.path.exists(kube_config):
logger.info(f"Loading config from '{kube_config}'...")
config.load_kube_config(kube_config)
else:
logger.info("Loading incluster config ...")
config.load_incluster_config()
if os.getenv(SKIP_TLS_VERIFY) == "true":
configuration = client.Configuration.get_default_copy()
configuration.verify_ssl = False
configuration.debug = False
client.Configuration.set_default(configuration)
# push urllib3 retries to k8s client config
configuration = client.Configuration.get_default_copy()
configuration.retries = Retry(total=REQ_RETRY_TOTAL,
connect=REQ_RETRY_CONNECT,
read=REQ_RETRY_READ,
backoff_factor=REQ_RETRY_BACKOFF_FACTOR)
client.Configuration.set_default(configuration)
logger.info(f"Config for cluster api at '{configuration.host}' loaded...")
if __name__ == "__main__":
main()
|
42c6535bff72b6599e26212666d25235cbd24e97
|
0760fb4901a75766921a205b55686d6d6f049b30
|
/python/ray/train/_internal/storage.py
|
1c54276605af281a07652eadf5c3fbba4ca91d3d
|
[
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
ray-project/ray
|
a4bb6940b08b59a61ef0b8e755a52d8563a2f867
|
edba68c3e7cf255d1d6479329f305adb7fa4c3ed
|
refs/heads/master
| 2023-08-31T03:36:48.164405
| 2023-08-31T03:20:38
| 2023-08-31T03:20:38
| 71,932,349
| 29,482
| 5,669
|
Apache-2.0
| 2023-09-14T21:48:14
| 2016-10-25T19:38:30
|
Python
|
UTF-8
|
Python
| false
| false
| 24,808
|
py
|
storage.py
|
import dataclasses
import fnmatch
import logging
import os
from pathlib import Path
import shutil
from typing import Callable, Dict, List, Optional, Tuple, Type, Union, TYPE_CHECKING
try:
import fsspec # noqa
from fsspec.implementations.local import LocalFileSystem
except (ImportError, ModuleNotFoundError) as e:
raise RuntimeError(
"fsspec is a required dependency of Ray Train and Ray Tune. "
"Please install with: `pip install fsspec`"
) from e
try:
import pyarrow
import pyarrow.fs
except (ImportError, ModuleNotFoundError) as e:
raise RuntimeError(
"pyarrow is a required dependency of Ray Train and Ray Tune. "
"Please install with: `pip install pyarrow`"
) from e
from ray._private.storage import _get_storage_uri
from ray.air._internal.filelock import TempFileLock
from ray.train._internal.syncer import Syncer, SyncConfig, _BackgroundSyncer
from ray.train.constants import _get_defaults_results_dir
if TYPE_CHECKING:
from ray.train._checkpoint import Checkpoint
logger = logging.getLogger(__name__)
_VALIDATE_STORAGE_MARKER_FILENAME = ".validate_storage_marker"
def _use_storage_context() -> bool:
# Whether to enable the new simple persistence mode.
from ray.train.constants import RAY_AIR_NEW_PERSISTENCE_MODE
return bool(int(os.environ.get(RAY_AIR_NEW_PERSISTENCE_MODE, "1")))
class _ExcludingLocalFilesystem(LocalFileSystem):
"""LocalFileSystem wrapper to exclude files according to patterns.
Args:
root_path: Root path to strip when matching with the exclude pattern.
Ex: root_path="/tmp/a/b/c", exclude=["*a*"], will exclude
/tmp/a/b/c/_a_.txt but not ALL of /tmp/a/*.
exclude: List of patterns that are applied to files returned by
``self.find()``. If a file path matches this pattern, it will
be excluded.
"""
def __init__(self, root_path: Path, exclude: List[str], **kwargs):
super().__init__(**kwargs)
self._exclude = exclude
self._root_path = root_path
@property
def fsid(self):
return "_excluding_local"
def _should_exclude(self, path: str) -> bool:
"""Return True if `path` (relative to `root_path`) matches any of the
`self._exclude` patterns."""
path = Path(path)
relative_path = path.relative_to(self._root_path).as_posix()
alt = os.path.join(relative_path, "") if path.is_dir() else None
for excl in self._exclude:
if fnmatch.fnmatch(relative_path, excl):
return True
if alt and fnmatch.fnmatch(alt, excl):
return True
return False
def find(self, path, maxdepth=None, withdirs=False, detail=False, **kwargs):
"""Call parent find() and exclude from result."""
paths = super().find(
path, maxdepth=maxdepth, withdirs=withdirs, detail=detail, **kwargs
)
if detail:
return {
path: out
for path, out in paths.items()
if not self._should_exclude(path)
}
else:
return [path for path in paths if not self._should_exclude(path)]
def _pyarrow_fs_copy_files(
source, destination, source_filesystem=None, destination_filesystem=None, **kwargs
):
if isinstance(destination_filesystem, pyarrow.fs.S3FileSystem):
# Workaround multi-threading issue with pyarrow. Note that use_threads=True
# is safe for download, just not for uploads, see:
# https://github.com/apache/arrow/issues/32372
kwargs.setdefault("use_threads", False)
# Use a large chunk size to speed up large checkpoint transfers.
kwargs.setdefault("chunk_size", 64 * 1024 * 1024)
return pyarrow.fs.copy_files(
source,
destination,
source_filesystem=source_filesystem,
destination_filesystem=destination_filesystem,
**kwargs,
)
# TODO(justinvyu): Add unit tests for all these utils.
def _delete_fs_path(fs: pyarrow.fs.FileSystem, fs_path: str):
is_dir = _is_directory(fs, fs_path)
try:
if is_dir:
fs.delete_dir(fs_path)
else:
fs.delete_file(fs_path)
except Exception:
logger.exception(f"Caught exception when deleting path at ({fs}, {fs_path}):")
def _download_from_fs_path(
fs: pyarrow.fs.FileSystem,
fs_path: str,
local_path: str,
filelock: bool = True,
):
"""Downloads a directory or file from (fs, fs_path) to a local path.
If fs_path points to a directory:
- The full directory contents are downloaded directly into `local_path`,
rather than to a subdirectory of `local_path`.
If fs_path points to a file:
- The file is downloaded to `local_path`, which is expected to be a file path.
If the download fails, the `local_path` contents are
cleaned up before raising, if the directory did not previously exist.
NOTE: This method creates `local_path`'s parent directories if they do not
already exist. If the download fails, this does NOT clean up all the parent
directories that were created.
Args:
fs: The filesystem to download from.
fs_path: The filesystem path (either a directory or a file) to download.
local_path: The local path to download to.
filelock: Whether to require a file lock before downloading, useful for
multiple downloads to the same directory that may be happening in parallel.
Raises:
FileNotFoundError: if (fs, fs_path) doesn't exist.
"""
_local_path = Path(local_path).resolve()
exists_before = _local_path.exists()
if _is_directory(fs=fs, fs_path=fs_path):
_local_path.mkdir(parents=True, exist_ok=True)
else:
_local_path.parent.mkdir(parents=True, exist_ok=True)
try:
if filelock:
with TempFileLock(f"{os.path.normpath(local_path)}.lock"):
_pyarrow_fs_copy_files(fs_path, local_path, source_filesystem=fs)
else:
_pyarrow_fs_copy_files(fs_path, local_path, source_filesystem=fs)
except Exception as e:
# Clean up the directory if downloading was unsuccessful
if not exists_before:
shutil.rmtree(local_path, ignore_errors=True)
raise e
def _upload_to_fs_path(
local_path: str,
fs: pyarrow.fs.FileSystem,
fs_path: str,
exclude: Optional[List[str]] = None,
) -> None:
"""Uploads a local directory or file to (fs, fs_path).
NOTE: This will create all necessary parent directories at the destination.
Args:
local_path: The local path to upload.
fs: The filesystem to upload to.
fs_path: The filesystem path where the dir/file will be uploaded to.
exclude: A list of filename matches to exclude from upload. This includes
all files under subdirectories as well.
This pattern will match with the relative paths of all files under
`local_path`.
Ex: ["*.png"] to exclude all .png images.
"""
if not exclude:
# TODO(justinvyu): uploading a single file doesn't work
# (since we always create a directory at fs_path)
_create_directory(fs=fs, fs_path=fs_path)
_pyarrow_fs_copy_files(local_path, fs_path, destination_filesystem=fs)
return
_upload_to_uri_with_exclude_fsspec(
local_path=local_path, fs=fs, fs_path=fs_path, exclude=exclude
)
def _upload_to_uri_with_exclude_fsspec(
local_path: str, fs: "pyarrow.fs", fs_path: str, exclude: Optional[List[str]]
) -> None:
local_fs = _ExcludingLocalFilesystem(root_path=local_path, exclude=exclude)
handler = pyarrow.fs.FSSpecHandler(local_fs)
source_fs = pyarrow.fs.PyFileSystem(handler)
_create_directory(fs=fs, fs_path=fs_path)
_pyarrow_fs_copy_files(
local_path, fs_path, source_filesystem=source_fs, destination_filesystem=fs
)
def _list_at_fs_path(fs: pyarrow.fs.FileSystem, fs_path: str) -> List[str]:
"""Returns the list of filenames at (fs, fs_path), similar to os.listdir.
If the path doesn't exist, returns an empty list.
"""
selector = pyarrow.fs.FileSelector(fs_path, allow_not_found=True, recursive=False)
return [
os.path.relpath(file_info.path.lstrip("/"), start=fs_path.lstrip("/"))
for file_info in fs.get_file_info(selector)
]
def _exists_at_fs_path(fs: pyarrow.fs.FileSystem, fs_path: str) -> bool:
"""Returns True if (fs, fs_path) exists."""
valid = fs.get_file_info(fs_path)
return valid.type != pyarrow.fs.FileType.NotFound
def _is_directory(fs: pyarrow.fs.FileSystem, fs_path: str) -> bool:
"""Checks if (fs, fs_path) is a directory or a file.
Raises:
FileNotFoundError: if (fs, fs_path) doesn't exist.
"""
file_info = fs.get_file_info(fs_path)
if file_info.type == pyarrow.fs.FileType.NotFound:
raise FileNotFoundError(f"Path not found: ({fs}, {fs_path})")
return not file_info.is_file
def _create_directory(fs: pyarrow.fs.FileSystem, fs_path: str) -> None:
"""Create directory at (fs, fs_path).
Some external filesystems require directories to already exist, or at least
the `netloc` to be created (e.g. PyArrows ``mock://`` filesystem).
Generally this should be done before and outside of Ray applications. This
utility is thus primarily used in testing, e.g. of ``mock://` URIs.
"""
try:
fs.create_dir(fs_path)
except Exception:
logger.exception(
f"Caught exception when creating directory at ({fs}, {fs_path}):"
)
def get_fs_and_path(
storage_path: Union[str, os.PathLike],
storage_filesystem: Optional[pyarrow.fs.FileSystem] = None,
) -> Tuple[pyarrow.fs.FileSystem, str]:
"""Returns the fs and path from a storage path and an optional custom fs.
Args:
storage_path: A storage path or URI. (ex: s3://bucket/path or /tmp/ray_results)
storage_filesystem: A custom filesystem to use. If not provided,
this will be auto-resolved by pyarrow. If provided, the storage_path
is assumed to be prefix-stripped already, and must be a valid path
on the filesystem.
"""
storage_path = str(storage_path)
if storage_filesystem:
return storage_filesystem, storage_path
return pyarrow.fs.FileSystem.from_uri(storage_path)
class _FilesystemSyncer(_BackgroundSyncer):
"""Syncer between local filesystem and a `storage_filesystem`."""
def __init__(self, storage_filesystem: Optional["pyarrow.fs.FileSystem"], **kwargs):
self.storage_filesystem = storage_filesystem
super().__init__(**kwargs)
def _sync_up_command(
self, local_path: str, uri: str, exclude: Optional[List] = None
) -> Tuple[Callable, Dict]:
# TODO(justinvyu): Defer this cleanup up as part of the
# external-facing Syncer deprecation.
fs_path = uri
return (
_upload_to_fs_path,
dict(
local_path=local_path,
fs=self.storage_filesystem,
fs_path=fs_path,
exclude=exclude,
),
)
def _sync_down_command(self, uri: str, local_path: str) -> Tuple[Callable, Dict]:
fs_path = uri
return (
_download_from_fs_path,
dict(
fs=self.storage_filesystem,
fs_path=fs_path,
local_path=local_path,
),
)
def _delete_command(self, uri: str) -> Tuple[Callable, Dict]:
fs_path = uri
return _delete_fs_path, dict(fs=self.storage_filesystem, fs_path=fs_path)
class StorageContext:
"""Shared context that holds all paths and storage utilities, passed along from
the driver to workers.
The properties of this context may not all be set at once, depending on where
the context lives.
For example, on the driver, the storage context is initialized, only knowing
the experiment path. On the Trainable actor, the trial_dir_name is accessible.
There are 2 types of paths:
1. *_fs_path: A path on the `storage_filesystem`. This is a regular path
which has been prefix-stripped by pyarrow.fs.FileSystem.from_uri and
can be joined with `os.path.join`.
2. *_local_path: The path on the local filesystem where results are saved to
before persisting to storage.
Example with storage_path="mock:///bucket/path?param=1":
>>> from ray.train._internal.storage import StorageContext
>>> import os
>>> os.environ["RAY_AIR_LOCAL_CACHE_DIR"] = "/tmp/ray_results"
>>> storage = StorageContext(
... storage_path="mock://netloc/bucket/path?param=1",
... experiment_dir_name="exp_name",
... )
>>> storage.storage_filesystem # Auto-resolved # doctest: +ELLIPSIS
<pyarrow._fs._MockFileSystem object...
>>> storage.experiment_fs_path
'bucket/path/exp_name'
>>> storage.experiment_local_path
'/tmp/ray_results/exp_name'
>>> storage.trial_dir_name = "trial_dir"
>>> storage.trial_fs_path
'bucket/path/exp_name/trial_dir'
>>> storage.trial_local_path
'/tmp/ray_results/exp_name/trial_dir'
>>> storage.current_checkpoint_index = 1
>>> storage.checkpoint_fs_path
'bucket/path/exp_name/trial_dir/checkpoint_000001'
Example with storage_path=None:
>>> from ray.train._internal.storage import StorageContext
>>> import os
>>> os.environ["RAY_AIR_LOCAL_CACHE_DIR"] = "/tmp/ray_results"
>>> storage = StorageContext(
... storage_path=None,
... experiment_dir_name="exp_name",
... )
>>> storage.storage_path # Auto-resolved
'/tmp/ray_results'
>>> storage.storage_local_path
'/tmp/ray_results'
>>> storage.experiment_local_path
'/tmp/ray_results/exp_name'
>>> storage.experiment_fs_path
'/tmp/ray_results/exp_name'
>>> storage.syncer is None
True
>>> storage.storage_filesystem # Auto-resolved # doctest: +ELLIPSIS
<pyarrow._fs.LocalFileSystem object...
Internal Usage Examples:
- To copy files to the trial directory on the storage filesystem:
pyarrow.fs.copy_files(
local_dir,
os.path.join(storage.trial_fs_path, "subdir"),
destination_filesystem=storage.filesystem
)
"""
def __init__(
self,
storage_path: Optional[Union[str, os.PathLike]],
experiment_dir_name: str,
sync_config: Optional[SyncConfig] = None,
storage_filesystem: Optional[pyarrow.fs.FileSystem] = None,
trial_dir_name: Optional[str] = None,
current_checkpoint_index: int = 0,
):
custom_fs_provided = storage_filesystem is not None
self.storage_local_path = _get_defaults_results_dir()
# If no remote path is set, try to get Ray Storage URI
ray_storage_uri: Optional[str] = _get_storage_uri()
if ray_storage_uri and storage_path is None:
logger.info(
"Using configured Ray Storage URI as the `storage_path`: "
f"{ray_storage_uri}"
)
# If `storage_path=None`, then set it to the local path.
# Invariant: (`storage_filesystem`, `storage_path`) is the location where
# *all* results can be accessed.
self.storage_path = storage_path or ray_storage_uri or self.storage_local_path
self.experiment_dir_name = experiment_dir_name
self.trial_dir_name = trial_dir_name
self.current_checkpoint_index = current_checkpoint_index
self.sync_config = (
dataclasses.replace(sync_config) if sync_config else SyncConfig()
)
self.storage_filesystem, self.storage_fs_path = get_fs_and_path(
self.storage_path, storage_filesystem
)
# Syncing is always needed if a custom `storage_filesystem` is provided.
# Otherwise, syncing is only needed if storage_local_path
# and storage_fs_path point to different locations.
syncing_needed = (
custom_fs_provided or self.storage_fs_path != self.storage_local_path
)
self.syncer: Optional[Syncer] = (
_FilesystemSyncer(
storage_filesystem=self.storage_filesystem,
sync_period=self.sync_config.sync_period,
sync_timeout=self.sync_config.sync_timeout,
)
if syncing_needed
else None
)
self._create_validation_file()
self._check_validation_file()
def __str__(self):
attrs = [
"storage_path",
"storage_local_path",
"storage_filesystem",
"storage_fs_path",
"experiment_dir_name",
"trial_dir_name",
"current_checkpoint_index",
]
attr_str = "\n".join([f" {attr}={getattr(self, attr)}" for attr in attrs])
return f"StorageContext<\n{attr_str}\n>"
def _create_validation_file(self):
"""On the creation of a storage context, create a validation file at the
storage path to verify that the storage path can be written to.
This validation file is also used to check whether the storage path is
accessible by all nodes in the cluster."""
valid_file = os.path.join(
self.experiment_fs_path, _VALIDATE_STORAGE_MARKER_FILENAME
)
self.storage_filesystem.create_dir(self.experiment_fs_path)
with self.storage_filesystem.open_output_stream(valid_file):
pass
def _check_validation_file(self):
"""Checks that the validation file exists at the storage path."""
valid_file = os.path.join(
self.experiment_fs_path, _VALIDATE_STORAGE_MARKER_FILENAME
)
if not _exists_at_fs_path(fs=self.storage_filesystem, fs_path=valid_file):
raise RuntimeError(
f"Unable to set up cluster storage at storage_path={self.storage_path}"
"\nCheck that all nodes in the cluster have read/write access "
"to the configured storage path."
)
def persist_current_checkpoint(self, checkpoint: "Checkpoint") -> "Checkpoint":
"""Persists a given checkpoint to the current checkpoint path on the filesystem.
"Current" is defined by the `current_checkpoint_index` attribute of the
storage context.
This method copies the checkpoint files to the storage location.
It's up to the user to delete the original checkpoint files if desired.
For example, the original directory is typically a local temp directory.
Args:
checkpoint: The checkpoint to persist to (fs, checkpoint_fs_path).
Returns:
Checkpoint: A Checkpoint pointing to the persisted checkpoint location.
"""
# TODO(justinvyu): Fix this cyclical import.
from ray.train._checkpoint import Checkpoint
logger.debug(
"Copying checkpoint files to storage path:\n"
"({source_fs}, {source}) -> ({dest_fs}, {destination})".format(
source=checkpoint.path,
destination=self.checkpoint_fs_path,
source_fs=checkpoint.filesystem,
dest_fs=self.storage_filesystem,
)
)
# Raise an error if the storage path is not accessible when
# attempting to upload a checkpoint from a remote worker.
# Ex: If storage_path is a local path, then a validation marker
# will only exist on the head node but not the worker nodes.
self._check_validation_file()
self.storage_filesystem.create_dir(self.checkpoint_fs_path)
_pyarrow_fs_copy_files(
source=checkpoint.path,
destination=self.checkpoint_fs_path,
source_filesystem=checkpoint.filesystem,
destination_filesystem=self.storage_filesystem,
)
persisted_checkpoint = Checkpoint(
filesystem=self.storage_filesystem,
path=self.checkpoint_fs_path,
)
logger.info(f"Checkpoint successfully created at: {persisted_checkpoint}")
return persisted_checkpoint
def persist_artifacts(self, force: bool = False) -> None:
"""Persists all artifacts within `trial_local_dir` to storage.
This method possibly launches a background task to sync the trial dir,
depending on the `sync_period` + `sync_artifacts_on_checkpoint`
settings of `SyncConfig`.
`(local_fs, trial_local_path) -> (storage_filesystem, trial_fs_path)`
Args:
force: If True, wait for a previous sync to finish, launch a new one,
and wait for that one to finish. By the end of a `force=True` call, the
latest version of the trial artifacts will be persisted.
"""
if not self.sync_config.sync_artifacts:
return
# Skip if we don't need to sync (e.g., storage_path == storage_local_path, and
# all trial artifacts are already in the right place)
if not self.syncer:
return
if force:
self.syncer.wait()
self.syncer.sync_up(
local_dir=self.trial_local_path, remote_dir=self.trial_fs_path
)
self.syncer.wait()
else:
self.syncer.sync_up_if_needed(
local_dir=self.trial_local_path, remote_dir=self.trial_fs_path
)
@property
def experiment_fs_path(self) -> str:
"""The path on the `storage_filesystem` to the experiment directory.
NOTE: This does not have a URI prefix anymore, since it has been stripped
by pyarrow.fs.FileSystem.from_uri already. The URI scheme information is
kept in `storage_filesystem` instead.
"""
return os.path.join(self.storage_fs_path, self.experiment_dir_name)
@property
def experiment_local_path(self) -> str:
"""The local filesystem path to the experiment directory.
This local "cache" path refers to location where files are dumped before
syncing them to the `storage_path` on the `storage_filesystem`.
"""
return os.path.join(self.storage_local_path, self.experiment_dir_name)
@property
def trial_local_path(self) -> str:
"""The local filesystem path to the trial directory.
Raises a ValueError if `trial_dir_name` is not set beforehand.
"""
if self.trial_dir_name is None:
raise RuntimeError(
"Should not access `trial_local_path` without setting `trial_dir_name`"
)
return os.path.join(self.experiment_local_path, self.trial_dir_name)
@property
def trial_fs_path(self) -> str:
"""The trial directory path on the `storage_filesystem`.
Raises a ValueError if `trial_dir_name` is not set beforehand.
"""
if self.trial_dir_name is None:
raise RuntimeError(
"Should not access `trial_fs_path` without setting `trial_dir_name`"
)
return os.path.join(self.experiment_fs_path, self.trial_dir_name)
@property
def checkpoint_fs_path(self) -> str:
"""The current checkpoint directory path on the `storage_filesystem`.
"Current" refers to the checkpoint that is currently being created/persisted.
The user of this class is responsible for setting the `current_checkpoint_index`
(e.g., incrementing when needed).
"""
return os.path.join(self.trial_fs_path, self.checkpoint_dir_name)
@property
def checkpoint_dir_name(self) -> str:
"""The current checkpoint directory name, based on the checkpoint index."""
return StorageContext._make_checkpoint_dir_name(self.current_checkpoint_index)
@staticmethod
def get_experiment_dir_name(run_obj: Union[str, Callable, Type]) -> str:
from ray.tune.experiment import Experiment
from ray.tune.utils import date_str
run_identifier = Experiment.get_trainable_name(run_obj)
if bool(int(os.environ.get("TUNE_DISABLE_DATED_SUBDIR", 0))):
dir_name = run_identifier
else:
dir_name = "{}_{}".format(run_identifier, date_str())
return dir_name
@staticmethod
def _make_checkpoint_dir_name(index: int):
"""Get the name of the checkpoint directory, given an index."""
return f"checkpoint_{index:06d}"
|
701bdf5892c2dfc27ccd0d3df2a4f2f1ab6a5b44
|
7453911cee47edd9414ecfc66d189dc578f7e421
|
/src/gevent/_socketcommon.py
|
9d883bfc87a067152db4465ad931de842b79addc
|
[
"Python-2.0",
"MIT"
] |
permissive
|
gevent/gevent
|
f20eca1852098e47f32eb062db646acfead36e71
|
6b22af0fa8eb2efa89fce36c35808948c67352b0
|
refs/heads/master
| 2023-08-31T19:27:29.410236
| 2023-08-31T10:26:35
| 2023-08-31T10:26:35
| 5,801,666
| 4,981
| 866
|
NOASSERTION
| 2023-09-13T14:16:59
| 2012-09-13T22:03:03
|
Python
|
UTF-8
|
Python
| false
| false
| 26,199
|
py
|
_socketcommon.py
|
# Copyright (c) 2009-2014 Denis Bilenko and gevent contributors. See LICENSE for details.
from __future__ import absolute_import
# standard functions and classes that this module re-implements in a gevent-aware way:
_implements = [
'create_connection',
'socket',
'SocketType',
'fromfd',
'socketpair',
]
__dns__ = [
'getaddrinfo',
'gethostbyname',
'gethostbyname_ex',
'gethostbyaddr',
'getnameinfo',
'getfqdn',
]
_implements += __dns__
# non-standard functions that this module provides:
__extensions__ = [
'cancel_wait',
'wait_read',
'wait_write',
'wait_readwrite',
]
# standard functions and classes that this module re-imports
__imports__ = [
'error',
'gaierror',
'herror',
'htonl',
'htons',
'ntohl',
'ntohs',
'inet_aton',
'inet_ntoa',
'inet_pton',
'inet_ntop',
'timeout',
'gethostname',
'getprotobyname',
'getservbyname',
'getservbyport',
'getdefaulttimeout',
'setdefaulttimeout',
# Windows:
'errorTab',
# Python 3
'AddressFamily',
'SocketKind',
'CMSG_LEN',
'CMSG_SPACE',
'dup',
'if_indextoname',
'if_nameindex',
'if_nametoindex',
'sethostname',
'create_server',
'has_dualstack_ipv6',
]
import time
from gevent._hub_local import get_hub_noargs as get_hub
from gevent._compat import string_types, integer_types
from gevent._compat import PY39
from gevent._compat import WIN as is_windows
from gevent._compat import OSX as is_macos
from gevent._compat import exc_clear
from gevent._util import copy_globals
from gevent._greenlet_primitives import get_memory as _get_memory
from gevent._hub_primitives import wait_on_socket as _wait_on_socket
from gevent.timeout import Timeout
if PY39:
__imports__.extend([
'recv_fds',
'send_fds',
])
# pylint:disable=no-name-in-module,unused-import
if is_windows:
# no such thing as WSAEPERM or error code 10001 according to winsock.h or MSDN
from errno import WSAEINVAL as EINVAL
from errno import WSAEWOULDBLOCK as EWOULDBLOCK
from errno import WSAEINPROGRESS as EINPROGRESS
from errno import WSAEALREADY as EALREADY
from errno import WSAEISCONN as EISCONN
from gevent.win32util import formatError as strerror
EAGAIN = EWOULDBLOCK
else:
from errno import EINVAL
from errno import EWOULDBLOCK
from errno import EINPROGRESS
from errno import EALREADY
from errno import EAGAIN
from errno import EISCONN
from os import strerror
try:
from errno import EBADF
except ImportError:
EBADF = 9
try:
from errno import EHOSTUNREACH
except ImportError:
EHOSTUNREACH = -1
try:
from errno import ECONNREFUSED
except ImportError:
ECONNREFUSED = -1
# macOS can return EPROTOTYPE when writing to a socket that is shutting
# Down. Retrying the write should return the expected EPIPE error.
# Downstream classes (like pywsgi) know how to handle/ignore EPIPE.
# This set is used by socket.send() to decide whether the write should
# be retried. The default is to retry only on EWOULDBLOCK. Here we add
# EPROTOTYPE on macOS to handle this platform-specific race condition.
GSENDAGAIN = (EWOULDBLOCK,)
if is_macos:
from errno import EPROTOTYPE
GSENDAGAIN += (EPROTOTYPE,)
import _socket
_realsocket = _socket.socket
import socket as __socket__
try:
# Provide implementation of socket.socketpair on Windows < 3.5.
import backports.socketpair
except ImportError:
pass
_SocketError = __socket__.error
_name = _value = None
__imports__ = copy_globals(__socket__, globals(),
only_names=__imports__,
ignore_missing_names=True)
for _name in __socket__.__all__:
_value = getattr(__socket__, _name)
if isinstance(_value, (integer_types, string_types)):
globals()[_name] = _value
__imports__.append(_name)
del _name, _value
_timeout_error = timeout # pylint: disable=undefined-variable
from gevent import _hub_primitives
_hub_primitives.set_default_timeout_error(_timeout_error)
wait = _hub_primitives.wait_on_watcher
wait_read = _hub_primitives.wait_read
wait_write = _hub_primitives.wait_write
wait_readwrite = _hub_primitives.wait_readwrite
#: The exception raised by default on a call to :func:`cancel_wait`
class cancel_wait_ex(error): # pylint: disable=undefined-variable
def __init__(self):
super(cancel_wait_ex, self).__init__(
EBADF,
'File descriptor was closed in another greenlet')
def cancel_wait(watcher, error=cancel_wait_ex):
"""See :meth:`gevent.hub.Hub.cancel_wait`"""
get_hub().cancel_wait(watcher, error)
def gethostbyname(hostname):
"""
gethostbyname(host) -> address
Return the IP address (a string of the form '255.255.255.255') for a host.
.. seealso:: :doc:`/dns`
"""
return get_hub().resolver.gethostbyname(hostname)
def gethostbyname_ex(hostname):
"""
gethostbyname_ex(host) -> (name, aliaslist, addresslist)
Return the true host name, a list of aliases, and a list of IP addresses,
for a host. The host argument is a string giving a host name or IP number.
Resolve host and port into list of address info entries.
.. seealso:: :doc:`/dns`
"""
return get_hub().resolver.gethostbyname_ex(hostname)
def getaddrinfo(host, port, family=0, type=0, proto=0, flags=0):
"""
Resolve host and port into list of address info entries.
Translate the host/port argument into a sequence of 5-tuples that contain
all the necessary arguments for creating a socket connected to that service.
host is a domain name, a string representation of an IPv4/v6 address or
None. port is a string service name such as 'http', a numeric port number or
None. By passing None as the value of host and port, you can pass NULL to
the underlying C API.
The family, type and proto arguments can be optionally specified in order to
narrow the list of addresses returned. Passing zero as a value for each of
these arguments selects the full range of results.
.. seealso:: :doc:`/dns`
"""
# Also, on Python 3, we need to translate into the special enums.
# Our lower-level resolvers, including the thread and blocking, which use _socket,
# function simply with integers.
addrlist = get_hub().resolver.getaddrinfo(host, port, family, type, proto, flags)
result = [
# pylint:disable=undefined-variable
(_intenum_converter(af, AddressFamily),
_intenum_converter(socktype, SocketKind),
proto, canonname, sa)
for af, socktype, proto, canonname, sa
in addrlist
]
return result
def _intenum_converter(value, enum_klass):
try:
return enum_klass(value)
except ValueError: # pragma: no cover
return value
def gethostbyaddr(ip_address):
"""
gethostbyaddr(ip_address) -> (name, aliaslist, addresslist)
Return the true host name, a list of aliases, and a list of IP addresses,
for a host. The host argument is a string giving a host name or IP number.
.. seealso:: :doc:`/dns`
"""
return get_hub().resolver.gethostbyaddr(ip_address)
def getnameinfo(sockaddr, flags):
"""
getnameinfo(sockaddr, flags) -> (host, port)
Get host and port for a sockaddr.
.. seealso:: :doc:`/dns`
"""
return get_hub().resolver.getnameinfo(sockaddr, flags)
def getfqdn(name=''):
"""Get fully qualified domain name from name.
An empty argument is interpreted as meaning the local host.
First the hostname returned by gethostbyaddr() is checked, then
possibly existing aliases. In case no FQDN is available, hostname
from gethostname() is returned.
.. versionchanged:: 23.7.0
The IPv6 generic address '::' now returns the result of
``gethostname``, like the IPv4 address '0.0.0.0'.
"""
# pylint: disable=undefined-variable
name = name.strip()
# IPv6 added in a late Python 3.10/3.11 patch release.
# https://github.com/python/cpython/issues/100374
if not name or name in ('0.0.0.0', '::'):
name = gethostname()
try:
hostname, aliases, _ = gethostbyaddr(name)
except error:
pass
else:
aliases.insert(0, hostname)
for name in aliases: # EWW! pylint:disable=redefined-argument-from-local
if isinstance(name, bytes):
if b'.' in name:
break
elif '.' in name:
break
else:
name = hostname
return name
def __send_chunk(socket, data_memory, flags, timeleft, end, timeout=_timeout_error):
"""
Send the complete contents of ``data_memory`` before returning.
This is the core loop around :meth:`send`.
:param timeleft: Either ``None`` if there is no timeout involved,
or a float indicating the timeout to use.
:param end: Either ``None`` if there is no timeout involved, or
a float giving the absolute end time.
:return: An updated value for ``timeleft`` (or None)
:raises timeout: If ``timeleft`` was given and elapsed while
sending this chunk.
"""
data_sent = 0
len_data_memory = len(data_memory)
started_timer = 0
while data_sent < len_data_memory:
chunk = data_memory[data_sent:]
if timeleft is None:
data_sent += socket.send(chunk, flags)
elif started_timer and timeleft <= 0:
# Check before sending to guarantee a check
# happens even if each chunk successfully sends its data
# (especially important for SSL sockets since they have large
# buffers). But only do this if we've actually tried to
# send something once to avoid spurious timeouts on non-blocking
# sockets.
raise timeout('timed out')
else:
started_timer = 1
data_sent += socket.send(chunk, flags, timeout=timeleft)
timeleft = end - time.time()
return timeleft
def _sendall(socket, data_memory, flags,
SOL_SOCKET=__socket__.SOL_SOCKET, # pylint:disable=no-member
SO_SNDBUF=__socket__.SO_SNDBUF): # pylint:disable=no-member
"""
Send the *data_memory* (which should be a memoryview)
using the gevent *socket*, performing well on PyPy.
"""
# On PyPy up through 5.10.0, both PyPy2 and PyPy3, subviews
# (slices) of a memoryview() object copy the underlying bytes the
# first time the builtin socket.send() method is called. On a
# non-blocking socket (that thus calls socket.send() many times)
# with a large input, this results in many repeated copies of an
# ever smaller string, depending on the networking buffering. For
# example, if each send() can process 1MB of a 50MB input, and we
# naively pass the entire remaining subview each time, we'd copy
# 49MB, 48MB, 47MB, etc, thus completely killing performance. To
# workaround this problem, we work in reasonable, fixed-size
# chunks. This results in a 10x improvement to bench_sendall.py,
# while having no measurable impact on CPython (since it doesn't
# copy at all the only extra overhead is a few python function
# calls, which is negligible for large inputs).
# On one macOS machine, PyPy3 5.10.1 produced ~ 67.53 MB/s before this change,
# and ~ 616.01 MB/s after.
# See https://bitbucket.org/pypy/pypy/issues/2091/non-blocking-socketsend-slow-gevent
# Too small of a chunk (the socket's buf size is usually too
# small) results in reduced perf due to *too many* calls to send and too many
# small copies. With a buffer of 143K (the default on my system), for
# example, bench_sendall.py yields ~264MB/s, while using 1MB yields
# ~653MB/s (matching CPython). 1MB is arbitrary and might be better
# chosen, say, to match a page size?
len_data_memory = len(data_memory)
if not len_data_memory:
# Don't try to send empty data at all, no point, and breaks ssl
# See issue 719
return 0
chunk_size = max(socket.getsockopt(SOL_SOCKET, SO_SNDBUF), 1024 * 1024)
data_sent = 0
end = None
timeleft = None
if socket.timeout is not None:
timeleft = socket.timeout
end = time.time() + timeleft
while data_sent < len_data_memory:
chunk_end = min(data_sent + chunk_size, len_data_memory)
chunk = data_memory[data_sent:chunk_end]
timeleft = __send_chunk(socket, chunk, flags, timeleft, end)
data_sent += len(chunk) # Guaranteed it sent the whole thing
# pylint:disable=no-member
_RESOLVABLE_FAMILIES = (__socket__.AF_INET,)
if __socket__.has_ipv6:
_RESOLVABLE_FAMILIES += (__socket__.AF_INET6,)
def _resolve_addr(sock, address):
# Internal method: resolve the AF_INET[6] address using
# getaddrinfo.
if sock.family not in _RESOLVABLE_FAMILIES or not isinstance(address, tuple):
return address
# address is (host, port) (ipv4) or (host, port, flowinfo, scopeid) (ipv6).
# If it's already resolved, no need to go through getaddrinfo() again.
# That can lose precision (e.g., on IPv6, it can lose scopeid). The standard library
# does this in socketmodule.c:setipaddr. (This is only part of the logic, the real
# thing is much more complex.)
try:
if __socket__.inet_pton(sock.family, address[0]):
return address
except AttributeError: # pragma: no cover
# inet_pton might not be available.
pass
except _SocketError:
# Not parseable, needs resolved.
pass
# We don't pass the port to getaddrinfo because the C
# socket module doesn't either (on some systems its
# illegal to do that without also passing socket type and
# protocol). Instead we join the port back at the end.
# See https://github.com/gevent/gevent/issues/1252
host, port = address[:2]
r = getaddrinfo(host, None, sock.family)
address = r[0][-1]
if len(address) == 2:
address = (address[0], port)
else:
address = (address[0], port, address[2], address[3])
return address
timeout_default = object()
class SocketMixin(object):
# pylint:disable=too-many-public-methods
__slots__ = (
'hub',
'timeout',
'_read_event',
'_write_event',
'_sock',
'__weakref__',
)
def __init__(self):
# Writing:
# (self.a, self.b) = (None,) * 2
# generates the fastest bytecode. But At least on PyPy,
# where the SSLSocket subclass has a timeout property,
# it results in the settimeout() method getting the tuple
# as the value, not the unpacked None.
self._read_event = None
self._write_event = None
self._sock = None
self.hub = None
self.timeout = None
def _drop_events_and_close(self, closefd=True, _cancel_wait_ex=cancel_wait_ex):
hub = self.hub
read_event = self._read_event
write_event = self._write_event
self._read_event = self._write_event = None
hub.cancel_waits_close_and_then(
(read_event, write_event),
_cancel_wait_ex,
# Pass the socket to keep it alive until such time as
# the waiters are guaranteed to be closed.
self._drop_ref_on_close if closefd else id,
self._sock
)
def _drop_ref_on_close(self, sock):
raise NotImplementedError
def _get_ref(self):
return self._read_event.ref or self._write_event.ref
def _set_ref(self, value):
self._read_event.ref = value
self._write_event.ref = value
ref = property(_get_ref, _set_ref)
_wait = _wait_on_socket
###
# Common methods defined here need to be added to the
# API documentation specifically.
###
def settimeout(self, howlong):
if howlong is not None:
try:
f = howlong.__float__
except AttributeError:
raise TypeError('a float is required', howlong, type(howlong))
howlong = f()
if howlong < 0.0:
raise ValueError('Timeout value out of range')
# avoid recursion with any property on self.timeout
SocketMixin.timeout.__set__(self, howlong)
def gettimeout(self):
# avoid recursion with any property on self.timeout
return SocketMixin.timeout.__get__(self, type(self))
def setblocking(self, flag):
# Beginning in 3.6.0b3 this is supposed to raise
# if the file descriptor is closed, but the test for it
# involves closing the fileno directly. Since we
# don't touch the fileno here, it doesn't make sense for
# us.
if flag:
self.timeout = None
else:
self.timeout = 0.0
def shutdown(self, how):
if how == 0: # SHUT_RD
self.hub.cancel_wait(self._read_event, cancel_wait_ex)
elif how == 1: # SHUT_WR
self.hub.cancel_wait(self._write_event, cancel_wait_ex)
else:
self.hub.cancel_wait(self._read_event, cancel_wait_ex)
self.hub.cancel_wait(self._write_event, cancel_wait_ex)
self._sock.shutdown(how)
# pylint:disable-next=undefined-variable
family = property(lambda self: _intenum_converter(self._sock.family, AddressFamily))
# pylint:disable-next=undefined-variable
type = property(lambda self: _intenum_converter(self._sock.type, SocketKind))
proto = property(lambda self: self._sock.proto)
def fileno(self):
return self._sock.fileno()
def getsockname(self):
return self._sock.getsockname()
def getpeername(self):
return self._sock.getpeername()
def bind(self, address):
return self._sock.bind(address)
def listen(self, *args):
return self._sock.listen(*args)
def getsockopt(self, *args):
return self._sock.getsockopt(*args)
def setsockopt(self, *args):
return self._sock.setsockopt(*args)
if hasattr(__socket__.socket, 'ioctl'): # os.name == 'nt'
def ioctl(self, *args):
return self._sock.ioctl(*args)
if hasattr(__socket__.socket, 'sleeptaskw'): # os.name == 'riscos
def sleeptaskw(self, *args):
return self._sock.sleeptaskw(*args)
def getblocking(self):
"""
Returns whether the socket will approximate blocking
behaviour.
.. versionadded:: 1.3a2
Added in Python 3.7.
"""
return self.timeout != 0.0
def connect(self, address):
"""
Connect to *address*.
.. versionchanged:: 20.6.0
If the host part of the address includes an IPv6 scope ID,
it will be used instead of ignored, if the platform supplies
:func:`socket.inet_pton`.
"""
# In the standard library, ``connect`` and ``connect_ex`` are implemented
# in C, and they both call a C function ``internal_connect`` to do the real
# work. This means that it is a visible behaviour difference to have our
# Python implementation of ``connect_ex`` simply call ``connect``:
# it could be overridden in a subclass or at runtime! Because of our exception handling,
# this can make a difference for known subclasses like SSLSocket.
self._internal_connect(address)
def connect_ex(self, address):
"""
Connect to *address*, returning a result code.
.. versionchanged:: 23.7.0
No longer uses an overridden ``connect`` method on
this object. Instead, like the standard library, this method always
uses a non-replacable internal connection function.
"""
try:
return self._internal_connect(address) or 0
except __socket__.timeout:
return EAGAIN
except __socket__.gaierror: # pylint:disable=try-except-raise
# gaierror/overflowerror/typerror is not silenced by connect_ex;
# gaierror extends error so catch it first
raise
except _SocketError as ex:
# Python 3: error is now OSError and it has various subclasses.
# Only those that apply to actually connecting are silenced by
# connect_ex.
# On Python 3, we want to check ex.errno; on Python 2
# there is no such attribute, we need to look at the first
# argument.
try:
err = ex.errno
except AttributeError:
err = ex.args[0]
if err:
return err
raise
def _internal_connect(self, address):
# Like the C function ``internal_connect``, not meant to be overridden,
# but exposed for testing.
if self.timeout == 0.0:
return self._sock.connect(address)
address = _resolve_addr(self._sock, address)
with Timeout._start_new_or_dummy(self.timeout, __socket__.timeout("timed out")):
while 1:
err = self.getsockopt(__socket__.SOL_SOCKET, __socket__.SO_ERROR)
if err:
raise _SocketError(err, strerror(err))
result = self._sock.connect_ex(address)
if not result or result == EISCONN:
break
if (result in (EWOULDBLOCK, EINPROGRESS, EALREADY)) or (result == EINVAL and is_windows):
self._wait(self._write_event)
else:
if (isinstance(address, tuple)
and address[0] == 'fe80::1'
and result == EHOSTUNREACH):
# On Python 3.7 on mac, we see EHOSTUNREACH
# returned for this link-local address, but it really is
# supposed to be ECONNREFUSED according to the standard library
# tests (test_socket.NetworkConnectionNoServer.test_create_connection)
# (On previous versions, that code passed the '127.0.0.1' IPv4 address, so
# ipv6 link locals were never a factor; 3.7 passes 'localhost'.)
# It is something of a mystery how the stdlib socket code doesn't
# produce EHOSTUNREACH---I (JAM) can't see how socketmodule.c would avoid
# that. The normal connect just calls connect_ex much like we do.
result = ECONNREFUSED
raise _SocketError(result, strerror(result))
def recv(self, *args):
while 1:
try:
return self._sock.recv(*args)
except _SocketError as ex:
if ex.args[0] != EWOULDBLOCK or self.timeout == 0.0:
raise
# QQQ without clearing exc_info test__refcount.test_clean_exit fails
exc_clear() # Python 2
self._wait(self._read_event)
def recvfrom(self, *args):
while 1:
try:
return self._sock.recvfrom(*args)
except _SocketError as ex:
if ex.args[0] != EWOULDBLOCK or self.timeout == 0.0:
raise
exc_clear() # Python 2
self._wait(self._read_event)
def recvfrom_into(self, *args):
while 1:
try:
return self._sock.recvfrom_into(*args)
except _SocketError as ex:
if ex.args[0] != EWOULDBLOCK or self.timeout == 0.0:
raise
exc_clear() # Python 2
self._wait(self._read_event)
def recv_into(self, *args):
while 1:
try:
return self._sock.recv_into(*args)
except _SocketError as ex:
if ex.args[0] != EWOULDBLOCK or self.timeout == 0.0:
raise
exc_clear() # Python 2
self._wait(self._read_event)
def sendall(self, data, flags=0):
# this sendall is also reused by gevent.ssl.SSLSocket subclass,
# so it should not call self._sock methods directly
data_memory = _get_memory(data)
return _sendall(self, data_memory, flags)
def sendto(self, *args):
try:
return self._sock.sendto(*args)
except _SocketError as ex:
if ex.args[0] != EWOULDBLOCK or self.timeout == 0.0:
raise
exc_clear()
self._wait(self._write_event)
try:
return self._sock.sendto(*args)
except _SocketError as ex2:
if ex2.args[0] == EWOULDBLOCK:
exc_clear()
return 0
raise
def send(self, data, flags=0, timeout=timeout_default):
if timeout is timeout_default:
timeout = self.timeout
try:
return self._sock.send(data, flags)
except _SocketError as ex:
if ex.args[0] not in GSENDAGAIN or timeout == 0.0:
raise
exc_clear()
self._wait(self._write_event)
try:
return self._sock.send(data, flags)
except _SocketError as ex2:
if ex2.args[0] == EWOULDBLOCK:
exc_clear()
return 0
raise
@classmethod
def _fixup_docstrings(cls):
for k, v in vars(cls).items():
if k.startswith('_'):
continue
if not hasattr(v, '__doc__') or v.__doc__:
continue
smeth = getattr(__socket__.socket, k, None)
if not smeth or not smeth.__doc__:
continue
try:
v.__doc__ = smeth.__doc__
except (AttributeError, TypeError):
# slots can't have docs. Py2 raises TypeError,
# Py3 raises AttributeError
continue
SocketMixin._fixup_docstrings()
del SocketMixin._fixup_docstrings
|
aa339b957cfc8e7135a2060240ad4fe4f66796d8
|
3de3dae722829727edfdd6cc3b67443a69043475
|
/cave/com.raytheon.viz.gfe/localization/gfe/userPython/procedures/SendProposedToWFO.py
|
6bc101527f298be05140af049cb4ead808090625
|
[
"LicenseRef-scancode-public-domain",
"Apache-2.0"
] |
permissive
|
Unidata/awips2
|
9aee5b7ec42c2c0a2fa4d877cb7e0b399db74acb
|
d76c9f96e6bb06f7239c563203f226e6a6fffeef
|
refs/heads/unidata_18.2.1
| 2023-08-18T13:00:15.110785
| 2023-08-09T06:06:06
| 2023-08-09T06:06:06
| 19,332,079
| 161
| 75
|
NOASSERTION
| 2023-09-13T19:06:40
| 2014-05-01T00:59:04
|
Java
|
UTF-8
|
Python
| false
| false
| 5,191
|
py
|
SendProposedToWFO.py
|
# ------------------------------------------------------------------------------
# This software is in the public domain, furnished "as is", without technical
# support, and with no warranty, express or implied, as to its usefulness for
# any purpose.
#
# SendProposedToWFO
# Version 3.0 - Code cleanup and refactoring
# Author: Tom LeFebvre and Pablo Santos
# ------------------------------------------------------------------------------
#
# SOFTWARE HISTORY
#
# Date Ticket# Engineer Description
# ------------ ---------- ----------- ------------------------------------------
# Sep 19, 2016 19293 randerso Initial baseline check in
# Feb 21, 2017 29544 randerso Set anyChanges to None when calling
# notifyWFOs so only those WFOs with active
# surge event are notified
#
################################################################################
##
# This is an absolute override file, indicating that a higher priority version
# of the file will completely replace a lower priority version of the file.
##
# The MenuItems list defines the GFE menu item(s) under which the
# Procedure is to appear.
# Possible items are: Populate, Edit, Consistency, Verify, Hazards
MenuItems = ["None"]
import ProcessVariableList
import TropicalUtility
import numpy as np
class Procedure (TropicalUtility.TropicalUtility):
def __init__(self, dbss):
TropicalUtility.TropicalUtility.__init__(self, dbss)
def execute(self):
# Create a GUI
# variableList = []
# variableList.append(("Send pop-up banner to WFOs", "Yes", "radio",
# ["Yes", "No"]))
#
# # Display the GUI
# # Doing this here so we get OK/Cancel instead of Run/Run Dismiss/Cancel
# varDict = {}
# processVarList = ProcessVariableList.ProcessVariableList(
# "Send WFO pop-up banner", variableList, varDict)
# status = processVarList.status()
# if status.upper() != "OK":
# self.cancel()
# Copy proposed to initial grid.
propWEName = "ProposedSS"
trList = self.GM_getWEInventory(propWEName, self._mutableID)
if len(trList) == 0:
self.statusBarMsg("No " + propWEName + " grid found", "S")
return
propGrid, propKeys = self.getGrids(self._mutableID, propWEName, "SFC",
trList[0])
# Fetch the storm surge edit area and make the mask
ssEditArea = self.getEditArea("StormSurgeWW_EditArea")
boolMask = self.empty(np.bool)
ssMask = self.encodeEditArea(ssEditArea)
# Set all points to None outside the StormSurge edit area
noneIndex = self.getIndex("<None>", propKeys)
propGrid[~ssMask] = noneIndex
self.createGrid(self._mutableID, propWEName, "DISCRETE",
(propGrid, propKeys), trList[0])
# Replace the Initial grid with proposed
initWEName = propWEName.replace("Proposed", "Initial")
self.createGrid(self._mutableID, initWEName, "DISCRETE",
(propGrid, propKeys), trList[0])
# Keep only the last proposed grid
self.removeEarlierTRs(initWEName)
# Handle fields which should be masked to the storm surge area only
maskedElements = ["InundationMax", "InundationTiming",
"SurgeHtPlusTideMHHW", "SurgeHtPlusTideMLLW",
"SurgeHtPlusTideMSL", "SurgeHtPlusTideNAVD"]
savedElements = ["ProposedSS", "InitialSS"]
# Process those masked fields
for weName in maskedElements:
trList = self.GM_getWEInventory(weName)
# If there is nothing to do - say so
if len(trList) == 0:
# self.statusBarMsg("No " + weName + " grid found.", "S")
continue
# Add to saved element list if element exists
savedElements.append(weName)
# Get the limits for this field
minLimit, maxLimit = self.getParmMinMaxLimits(self._mutableID,
weName)
# Process all the grids we have left
for tr in trList:
# Get the grid for this time range
grid = self.getGrids(self._mutableID, weName, "SFC", tr)
# Mask the grid outside of the mask area
grid[~ssMask] = minLimit
# Put the masked grid back into this time range
self.createGrid(self._mutableID, weName, "SCALAR", grid, tr)
# Make a list of fields which will be automatically saved
# savedElements = ["ProposedSS", "InitialSS"] + maskedElements
# Save those fields
self.saveElements(savedElements)
# Notify the WFOS, as appropriate
testMode = self._testMode
if not testMode:
self.notifyWFOs("ProposedSS", anyChanges=None)
self.statusBarMsg("Procedure completed. Sent pop-up banners to WFOs", "A")
|
e4bb92c1bb5bd901535bafd6ce294bcd4dcc7b9b
|
f07e66293cc41a9fe71fc44f765b432fd7a0997c
|
/selfdrive/car/fw_versions.py
|
2584618ebddaa944372938dcef21bfdf76e839e0
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
kegman/openpilot
|
c9ba96a72d905956f02c684e065091e023942883
|
b35291c91783657a5fc83abfff012d3bb49dd89f
|
refs/heads/kegman-ultimate
| 2022-05-22T17:07:16.656336
| 2021-10-25T13:35:28
| 2021-10-25T13:35:28
| 229,979,925
| 105
| 212
|
MIT
| 2022-03-13T05:47:51
| 2019-12-24T17:27:11
|
C
|
UTF-8
|
Python
| false
| false
| 7,982
|
py
|
fw_versions.py
|
#!/usr/bin/env python3
import struct
import traceback
from typing import Any
from tqdm import tqdm
import panda.python.uds as uds
from cereal import car
from selfdrive.car.fingerprints import FW_VERSIONS, get_attr_from_cars
from selfdrive.car.isotp_parallel_query import IsoTpParallelQuery
from selfdrive.car.toyota.values import CAR as TOYOTA
from selfdrive.swaglog import cloudlog
Ecu = car.CarParams.Ecu
def p16(val):
return struct.pack("!H", val)
TESTER_PRESENT_REQUEST = bytes([uds.SERVICE_TYPE.TESTER_PRESENT, 0x0])
TESTER_PRESENT_RESPONSE = bytes([uds.SERVICE_TYPE.TESTER_PRESENT + 0x40, 0x0])
SHORT_TESTER_PRESENT_REQUEST = bytes([uds.SERVICE_TYPE.TESTER_PRESENT])
SHORT_TESTER_PRESENT_RESPONSE = bytes([uds.SERVICE_TYPE.TESTER_PRESENT + 0x40])
DEFAULT_DIAGNOSTIC_REQUEST = bytes([uds.SERVICE_TYPE.DIAGNOSTIC_SESSION_CONTROL,
uds.SESSION_TYPE.DEFAULT])
DEFAULT_DIAGNOSTIC_RESPONSE = bytes([uds.SERVICE_TYPE.DIAGNOSTIC_SESSION_CONTROL + 0x40,
uds.SESSION_TYPE.DEFAULT, 0x0, 0x32, 0x1, 0xf4])
EXTENDED_DIAGNOSTIC_REQUEST = bytes([uds.SERVICE_TYPE.DIAGNOSTIC_SESSION_CONTROL,
uds.SESSION_TYPE.EXTENDED_DIAGNOSTIC])
EXTENDED_DIAGNOSTIC_RESPONSE = bytes([uds.SERVICE_TYPE.DIAGNOSTIC_SESSION_CONTROL + 0x40,
uds.SESSION_TYPE.EXTENDED_DIAGNOSTIC, 0x0, 0x32, 0x1, 0xf4])
UDS_VERSION_REQUEST = bytes([uds.SERVICE_TYPE.READ_DATA_BY_IDENTIFIER]) + \
p16(uds.DATA_IDENTIFIER_TYPE.APPLICATION_SOFTWARE_IDENTIFICATION)
UDS_VERSION_RESPONSE = bytes([uds.SERVICE_TYPE.READ_DATA_BY_IDENTIFIER + 0x40]) + \
p16(uds.DATA_IDENTIFIER_TYPE.APPLICATION_SOFTWARE_IDENTIFICATION)
HYUNDAI_VERSION_REQUEST_SHORT = bytes([uds.SERVICE_TYPE.READ_DATA_BY_IDENTIFIER]) + \
p16(0xf1a0) # 4 Byte version number
HYUNDAI_VERSION_REQUEST_LONG = bytes([uds.SERVICE_TYPE.READ_DATA_BY_IDENTIFIER]) + \
p16(0xf100) # Long description
HYUNDAI_VERSION_REQUEST_MULTI = bytes([uds.SERVICE_TYPE.READ_DATA_BY_IDENTIFIER]) + \
p16(uds.DATA_IDENTIFIER_TYPE.VEHICLE_MANUFACTURER_SPARE_PART_NUMBER) + \
p16(uds.DATA_IDENTIFIER_TYPE.APPLICATION_SOFTWARE_IDENTIFICATION) + \
p16(0xf100) + \
p16(0xf1a0)
HYUNDAI_VERSION_RESPONSE = bytes([uds.SERVICE_TYPE.READ_DATA_BY_IDENTIFIER + 0x40])
TOYOTA_VERSION_REQUEST = b'\x1a\x88\x01'
TOYOTA_VERSION_RESPONSE = b'\x5a\x88\x01'
OBD_VERSION_REQUEST = b'\x09\x04'
OBD_VERSION_RESPONSE = b'\x49\x04'
# supports subaddressing, request, response
REQUESTS = [
# Hundai
(
"hyundai",
[HYUNDAI_VERSION_REQUEST_SHORT],
[HYUNDAI_VERSION_RESPONSE],
),
(
"hyundai",
[HYUNDAI_VERSION_REQUEST_LONG],
[HYUNDAI_VERSION_RESPONSE],
),
(
"hyundai",
[HYUNDAI_VERSION_REQUEST_MULTI],
[HYUNDAI_VERSION_RESPONSE],
),
# Honda
(
"honda",
[UDS_VERSION_REQUEST],
[UDS_VERSION_RESPONSE],
),
# Toyota
(
"toyota",
[SHORT_TESTER_PRESENT_REQUEST, TOYOTA_VERSION_REQUEST],
[SHORT_TESTER_PRESENT_RESPONSE, TOYOTA_VERSION_RESPONSE],
),
(
"toyota",
[SHORT_TESTER_PRESENT_REQUEST, OBD_VERSION_REQUEST],
[SHORT_TESTER_PRESENT_RESPONSE, OBD_VERSION_RESPONSE],
),
(
"toyota",
[TESTER_PRESENT_REQUEST, DEFAULT_DIAGNOSTIC_REQUEST, EXTENDED_DIAGNOSTIC_REQUEST, UDS_VERSION_REQUEST],
[TESTER_PRESENT_RESPONSE, DEFAULT_DIAGNOSTIC_RESPONSE, EXTENDED_DIAGNOSTIC_RESPONSE, UDS_VERSION_RESPONSE],
)
]
def chunks(l, n=128):
for i in range(0, len(l), n):
yield l[i:i + n]
def match_fw_to_car(fw_versions):
candidates = FW_VERSIONS
invalid = []
fw_versions_dict = {}
for fw in fw_versions:
addr = fw.address
sub_addr = fw.subAddress if fw.subAddress != 0 else None
fw_versions_dict[(addr, sub_addr)] = fw.fwVersion
for candidate, fws in candidates.items():
for ecu, expected_versions in fws.items():
ecu_type = ecu[0]
addr = ecu[1:]
found_version = fw_versions_dict.get(addr, None)
ESSENTIAL_ECUS = [Ecu.engine, Ecu.eps, Ecu.esp, Ecu.fwdRadar, Ecu.fwdCamera, Ecu.vsa, Ecu.electricBrakeBooster]
if ecu_type == Ecu.esp and candidate in [TOYOTA.RAV4, TOYOTA.COROLLA, TOYOTA.HIGHLANDER] and found_version is None:
continue
# TODO: on some toyota, the engine can show on two different addresses
if ecu_type == Ecu.engine and candidate in [TOYOTA.COROLLA_TSS2, TOYOTA.CHR, TOYOTA.LEXUS_IS, TOYOTA.AVALON] and found_version is None:
continue
# ignore non essential ecus
if ecu_type not in ESSENTIAL_ECUS and found_version is None:
continue
if found_version not in expected_versions:
invalid.append(candidate)
break
return set(candidates.keys()) - set(invalid)
def get_fw_versions(logcan, sendcan, bus, extra=None, timeout=0.1, debug=False, progress=False):
ecu_types = {}
# Extract ECU adresses to query from fingerprints
# ECUs using a subadress need be queried one by one, the rest can be done in parallel
addrs = []
parallel_addrs = []
versions = get_attr_from_cars('FW_VERSIONS', combine_brands=False)
if extra is not None:
versions.update(extra)
for brand, brand_versions in versions.items():
for c in brand_versions.values():
for ecu_type, addr, sub_addr in c.keys():
a = (brand, addr, sub_addr)
if a not in ecu_types:
ecu_types[(addr, sub_addr)] = ecu_type
if sub_addr is None:
if a not in parallel_addrs:
parallel_addrs.append(a)
else:
if [a] not in addrs:
addrs.append([a])
addrs.insert(0, parallel_addrs)
fw_versions = {}
for i, addr in enumerate(tqdm(addrs, disable=not progress)):
for addr_chunk in chunks(addr):
for brand, request, response in REQUESTS:
try:
addrs = [(a, s) for (b, a, s) in addr_chunk if b in (brand, 'any')]
if addrs:
query = IsoTpParallelQuery(sendcan, logcan, bus, addrs, request, response, debug=debug)
t = 2 * timeout if i == 0 else timeout
fw_versions.update(query.get_data(t))
except Exception:
cloudlog.warning(f"FW query exception: {traceback.format_exc()}")
# Build capnp list to put into CarParams
car_fw = []
for addr, version in fw_versions.items():
f = car.CarParams.CarFw.new_message()
f.ecu = ecu_types[addr]
f.fwVersion = version
f.address = addr[0]
if addr[1] is not None:
f.subAddress = addr[1]
car_fw.append(f)
return car_fw
if __name__ == "__main__":
import time
import argparse
import cereal.messaging as messaging
from selfdrive.car.vin import get_vin
parser = argparse.ArgumentParser(description='Get firmware version of ECUs')
parser.add_argument('--scan', action='store_true')
parser.add_argument('--debug', action='store_true')
args = parser.parse_args()
logcan = messaging.sub_sock('can')
sendcan = messaging.pub_sock('sendcan')
extra: Any = None
if args.scan:
extra = {}
# Honda
for i in range(256):
extra[(Ecu.unknown, 0x18da00f1 + (i << 8), None)] = []
extra[(Ecu.unknown, 0x700 + i, None)] = []
extra[(Ecu.unknown, 0x750, i)] = []
extra = {"any": {"debug": extra}}
time.sleep(1.)
t = time.time()
print("Getting vin...")
addr, vin = get_vin(logcan, sendcan, 1, retry=10, debug=args.debug)
print(f"VIN: {vin}")
print("Getting VIN took %.3f s" % (time.time() - t))
print()
t = time.time()
fw_vers = get_fw_versions(logcan, sendcan, 1, extra=extra, debug=args.debug, progress=True)
candidates = match_fw_to_car(fw_vers)
print()
print("Found FW versions")
print("{")
for version in fw_vers:
subaddr = None if version.subAddress == 0 else hex(version.subAddress)
print(f" (Ecu.{version.ecu}, {hex(version.address)}, {subaddr}): [{version.fwVersion}]")
print("}")
print()
print("Possible matches:", candidates)
print("Getting fw took %.3f s" % (time.time() - t))
|
5eb656a3a1ec5aa8a671cb895d31177bb3862e90
|
c3542b98289c1ba85f62d08b5edbe1a3c18f3c80
|
/FizzBuzz.py
|
59c78fad2a92034ad6580ba199fbd9b4ed1e7a09
|
[
"LicenseRef-scancode-unknown",
"GPL-1.0-or-later",
"MIT"
] |
permissive
|
geekcomputers/Python
|
16674289843f89f6cc287097f033b928f4181d84
|
bc55e2a2c5a98f4c7597e901a04457dfb9d5df0c
|
refs/heads/master
| 2023-08-18T21:04:18.163283
| 2023-08-17T17:38:16
| 2023-08-17T17:38:16
| 2,881,789
| 32,418
| 15,024
|
MIT
| 2023-09-02T18:40:33
| 2011-11-30T09:04:08
|
Python
|
UTF-8
|
Python
| false
| false
| 639
|
py
|
FizzBuzz.py
|
# FizzBuzz
# A program that prints the numbers from 1 to num (User given number)!
# For multiples of ‘3’ print “Fizz” instead of the number.
# For the multiples of ‘5’ print “Buzz”.
# If the number is divisible by both 3 and 5 then print "FizzBuzz".
# If none of the given conditions are true then just print the number!
def FizzBuzz(num):
for i in range(1, num + 1):
if i % 3 == 0 and i % 5 == 0:
print("FizzBuzz")
elif i % 3 == 0:
print("Fizz")
elif i % 5 == 0:
print("Buzz")
else:
print(i)
FizzBuzz(20) # prints FizzBuzz up to 20
|
4b6ee0092078b2efe698fc7cceba1de5b5fa6007
|
ffdc77394c5b5532b243cf3c33bd584cbdc65cb7
|
/tests/ut/python/parallel/test_comparison_function_info.py
|
8e1930ee2a856f6a2a4b2b9883117993ceae17ae
|
[
"Apache-2.0",
"LicenseRef-scancode-proprietary-license",
"MPL-1.0",
"OpenSSL",
"LGPL-3.0-only",
"LicenseRef-scancode-warranty-disclaimer",
"BSD-3-Clause-Open-MPI",
"MIT",
"MPL-2.0-no-copyleft-exception",
"NTP",
"BSD-3-Clause",
"GPL-1.0-or-later",
"0BSD",
"MPL-2.0",
"LicenseRef-scancode-free-unknown",
"AGPL-3.0-only",
"Libpng",
"MPL-1.1",
"IJG",
"GPL-2.0-only",
"BSL-1.0",
"Zlib",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-python-cwi",
"BSD-2-Clause",
"LicenseRef-scancode-gary-s-brown",
"LGPL-2.1-only",
"LicenseRef-scancode-other-permissive",
"Python-2.0",
"LicenseRef-scancode-mit-nagy",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense"
] |
permissive
|
mindspore-ai/mindspore
|
ca7d5bb51a3451c2705ff2e583a740589d80393b
|
54acb15d435533c815ee1bd9f6dc0b56b4d4cf83
|
refs/heads/master
| 2023-07-29T09:17:11.051569
| 2023-07-17T13:14:15
| 2023-07-17T13:14:15
| 239,714,835
| 4,178
| 768
|
Apache-2.0
| 2023-07-26T22:31:11
| 2020-02-11T08:43:48
|
C++
|
UTF-8
|
Python
| false
| false
| 13,842
|
py
|
test_comparison_function_info.py
|
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import mindspore as ms
import mindspore.nn as nn
from mindspore import Tensor
from mindspore import context
from mindspore.common.api import _cell_graph_executor
from mindspore.ops import composite as C
from mindspore.ops import operations as P
from tests.ut.python.ops.test_math_ops import VirtualLoss
def setup_function():
context.set_auto_parallel_context(dataset_strategy="full_batch")
grad_all = C.GradOperation(get_all=True)
class NetWithLoss(nn.Cell):
def __init__(self, network):
super(NetWithLoss, self).__init__()
self.loss = VirtualLoss()
self.network = network
def construct(self, x, y, b):
predict = self.network(x, y, b)
return self.loss(predict)
class GradWrap(nn.Cell):
def __init__(self, network):
super(GradWrap, self).__init__()
self.network = network
def construct(self, x, y, b):
return grad_all(self.network)(x, y, b)
def compile_net(net, x, y, b):
net.set_train()
_cell_graph_executor.compile(net, x, y, b)
def test_matmul_equal():
class Net(nn.Cell):
def __init__(self, strategy1, strategy2):
super().__init__()
self.matmul = P.MatMul().shard(strategy1)
self.equal = P.Equal().shard(strategy2)
def construct(self, x, y, b):
out = self.matmul(x, y)
out = self.equal(out, b)
return out
context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel")
strategy1 = ((2, 2), (2, 2))
strategy2 = ((4, 2), (4, 2))
net = GradWrap(NetWithLoss(Net(strategy1, strategy2)))
x = Tensor(np.ones([128, 32]), dtype=ms.float32)
y = Tensor(np.ones([32, 64]), dtype=ms.float32)
b = Tensor(np.ones([128, 64]), dtype=ms.float32)
compile_net(net, x, y, b)
def test_matmul_not_equal():
class Net(nn.Cell):
def __init__(self, strategy1, strategy2):
super().__init__()
self.matmul = P.MatMul().shard(strategy1)
self.notequal = P.NotEqual().shard(strategy2)
def construct(self, x, y, b):
out = self.matmul(x, y)
out = self.notequal(out, b)
return out
context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel")
strategy1 = ((2, 2), (2, 2))
strategy2 = ((4, 2), (4, 2))
net = GradWrap(NetWithLoss(Net(strategy1, strategy2)))
x = Tensor(np.ones([128, 32]), dtype=ms.float32)
y = Tensor(np.ones([32, 64]), dtype=ms.float32)
b = Tensor(np.ones([128, 64]), dtype=ms.float32)
compile_net(net, x, y, b)
def test_matmul_approximateEqual():
class Net(nn.Cell):
def __init__(self, strategy1, strategy2):
super().__init__()
self.matmul = P.MatMul().shard(strategy1)
self.approximateEqual = P.ApproximateEqual(tolerance=0.5).shard(strategy2)
def construct(self, x, y, b):
out = self.matmul(x, y)
out = self.approximateEqual(out, b)
return out
context.set_auto_parallel_context(device_num=8, global_rank=0)
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
strategy1 = ((2, 2), (2, 2))
strategy2 = ((4, 2), (4, 2))
net = GradWrap(NetWithLoss(Net(strategy1, strategy2)))
x = Tensor(np.ones([64, 32]), dtype=ms.float32)
y = Tensor(np.ones([32, 64]), dtype=ms.float32)
b = Tensor(np.ones([64, 64]), dtype=ms.float32)
compile_net(net, x, y, b)
def test_matmul_greater():
class Net(nn.Cell):
def __init__(self, strategy1, strategy2):
super().__init__()
self.matmul = P.MatMul().shard(strategy1)
self.greater = P.Greater().shard(strategy2)
def construct(self, x, y, b):
out = self.matmul(x, y)
out = self.greater(out, b)
return out
context.set_auto_parallel_context(device_num=8, global_rank=0)
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
strategy1 = ((2, 2), (2, 2))
strategy2 = ((4, 2), (4, 2))
net = GradWrap(NetWithLoss(Net(strategy1, strategy2)))
x = Tensor(np.ones([64, 32]), dtype=ms.float32)
y = Tensor(np.ones([32, 64]), dtype=ms.float32)
b = Tensor(np.ones([64, 64]), dtype=ms.float32)
compile_net(net, x, y, b)
def test_matmul_greaterEqual():
class Net(nn.Cell):
def __init__(self, strategy1, strategy2):
super().__init__()
self.matmul = P.MatMul().shard(strategy1)
self.greaterEqual = P.GreaterEqual().shard(strategy2)
def construct(self, x, y, b):
out = self.matmul(x, y)
out = self.greaterEqual(out, b)
return out
context.set_auto_parallel_context(device_num=8, global_rank=0)
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
strategy1 = ((2, 2), (2, 2))
strategy2 = ((4, 2), (4, 2))
net = GradWrap(NetWithLoss(Net(strategy1, strategy2)))
x = Tensor(np.ones([64, 32]), dtype=ms.float32)
y = Tensor(np.ones([32, 64]), dtype=ms.float32)
b = Tensor(np.ones([64, 64]), dtype=ms.float32)
compile_net(net, x, y, b)
def test_matmul_less():
class Net(nn.Cell):
def __init__(self, strategy1, strategy2):
super().__init__()
self.matmul = P.MatMul().shard(strategy1)
self.less = P.Less().shard(strategy2)
def construct(self, x, y, b):
out = self.matmul(x, y)
out = self.less(out, b)
return out
context.set_auto_parallel_context(device_num=8, global_rank=0)
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
strategy1 = ((2, 2), (2, 2))
strategy2 = ((4, 2), (4, 2))
net = GradWrap(NetWithLoss(Net(strategy1, strategy2)))
x = Tensor(np.ones([64, 32]), dtype=ms.float32)
y = Tensor(np.ones([32, 64]), dtype=ms.float32)
b = Tensor(np.ones([64, 64]), dtype=ms.float32)
compile_net(net, x, y, b)
def test_matmul_lessEqual():
class Net(nn.Cell):
def __init__(self, strategy1, strategy2):
super().__init__()
self.matmul = P.MatMul().shard(strategy1)
self.lessEqual = P.LessEqual().shard(strategy2)
def construct(self, x, y, b):
out = self.matmul(x, y)
out = self.lessEqual(out, b)
return out
context.set_auto_parallel_context(device_num=8, global_rank=0)
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
strategy1 = ((2, 2), (2, 2))
strategy2 = ((4, 2), (4, 2))
net = GradWrap(NetWithLoss(Net(strategy1, strategy2)))
x = Tensor(np.ones([64, 32]), dtype=ms.float32)
y = Tensor(np.ones([32, 64]), dtype=ms.float32)
b = Tensor(np.ones([64, 64]), dtype=ms.float32)
compile_net(net, x, y, b)
def test_matmul_not_equal_repeated_calculation():
class Net(nn.Cell):
def __init__(self, strategy1, strategy2):
super().__init__()
self.matmul = P.MatMul().shard(strategy1)
self.notequal = P.NotEqual().shard(strategy2)
def construct(self, x, y, b):
out = self.matmul(x, y)
out = self.notequal(out, b)
return out
context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel")
strategy1 = ((2, 2), (2, 2))
strategy2 = ((4, 1), (4, 1))
net = GradWrap(NetWithLoss(Net(strategy1, strategy2)))
x = Tensor(np.ones([128, 32]), dtype=ms.float32)
y = Tensor(np.ones([32, 64]), dtype=ms.float32)
b = Tensor(np.ones([128, 64]), dtype=ms.float32)
compile_net(net, x, y, b)
def test_matmul_maximum():
class Net(nn.Cell):
def __init__(self, strategy1, strategy2):
super().__init__()
self.matmul = P.MatMul().shard(strategy1)
self.maximum = P.Maximum().shard(strategy2)
def construct(self, x, y, b):
out = self.matmul(x, y)
out = self.maximum(out, b)
return out
context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel")
strategy1 = ((2, 2), (2, 2))
strategy2 = ((4, 2), (4, 2))
net = GradWrap(NetWithLoss(Net(strategy1, strategy2)))
x = Tensor(np.ones([64, 32]), dtype=ms.float32)
y = Tensor(np.ones([32, 64]), dtype=ms.float32)
b = Tensor(np.ones([64, 64]), dtype=ms.float32)
compile_net(net, x, y, b)
def test_matmul_maximum_broadcast():
class Net(nn.Cell):
def __init__(self, strategy1, strategy2):
super().__init__()
self.matmul = P.MatMul().shard(strategy1)
self.maximum = P.Maximum().shard(strategy2)
def construct(self, x, y, b):
out = self.matmul(x, y)
out = self.maximum(out, b)
return out
context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel")
strategy1 = ((2, 2), (2, 2))
strategy2 = ((4, 2), (2,))
net = GradWrap(NetWithLoss(Net(strategy1, strategy2)))
x = Tensor(np.ones([64, 32]), dtype=ms.float32)
y = Tensor(np.ones([32, 64]), dtype=ms.float32)
b = Tensor(np.ones([64]), dtype=ms.float32)
compile_net(net, x, y, b)
def test_matmul_maximum_broadcast2():
class Net(nn.Cell):
def __init__(self, strategy1, strategy2):
super().__init__()
self.matmul = P.MatMul().shard(strategy1)
self.maximum = P.Maximum().shard(strategy2)
def construct(self, x, y, b):
out = self.matmul(x, y)
out = self.maximum(out, b)
return out
context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel")
strategy1 = ((2, 4), (4, 1))
strategy2 = ((4, 1), (1, 2))
net = GradWrap(NetWithLoss(Net(strategy1, strategy2)))
x = Tensor(np.ones([64, 32]), dtype=ms.float32)
y = Tensor(np.ones([32, 1]), dtype=ms.float32)
b = Tensor(np.ones([1, 64]), dtype=ms.float32)
compile_net(net, x, y, b)
def test_matmul_minimum():
class Net(nn.Cell):
def __init__(self, strategy1, strategy2):
super().__init__()
self.matmul = P.MatMul().shard(strategy1)
self.minimum = P.Minimum().shard(strategy2)
def construct(self, x, y, b):
out = self.matmul(x, y)
out = self.minimum(out, b)
return out
context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel")
strategy1 = ((2, 2), (2, 2))
strategy2 = ((4, 2), (4, 2))
net = GradWrap(NetWithLoss(Net(strategy1, strategy2)))
x = Tensor(np.ones([64, 32]), dtype=ms.float32)
y = Tensor(np.ones([32, 64]), dtype=ms.float32)
b = Tensor(np.ones([64, 64]), dtype=ms.float32)
compile_net(net, x, y, b)
def test_matmul_minimum_broadcast():
class Net(nn.Cell):
def __init__(self, strategy1, strategy2):
super().__init__()
self.matmul = P.MatMul().shard(strategy1)
self.minimum = P.Maximum().shard(strategy2)
def construct(self, x, y, b):
out = self.matmul(x, y)
out = self.minimum(out, b)
return out
context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel")
strategy1 = ((2, 2), (2, 2))
strategy2 = ((4, 2), (2,))
net = GradWrap(NetWithLoss(Net(strategy1, strategy2)))
x = Tensor(np.ones([64, 32]), dtype=ms.float32)
y = Tensor(np.ones([32, 64]), dtype=ms.float32)
b = Tensor(np.ones([64]), dtype=ms.float32)
compile_net(net, x, y, b)
def test_matmul_minimum_broadcast2():
class Net(nn.Cell):
def __init__(self, strategy1, strategy2):
super().__init__()
self.matmul = P.MatMul().shard(strategy1)
self.minimum = P.Minimum().shard(strategy2)
def construct(self, x, y, b):
out = self.matmul(x, y)
out = self.minimum(out, b)
return out
context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel")
strategy1 = ((2, 4), (4, 1))
strategy2 = ((4, 1), (1, 2))
net = GradWrap(NetWithLoss(Net(strategy1, strategy2)))
x = Tensor(np.ones([64, 32]), dtype=ms.float32)
y = Tensor(np.ones([32, 1]), dtype=ms.float32)
b = Tensor(np.ones([1, 64]), dtype=ms.float32)
compile_net(net, x, y, b)
def test_matmul_minimum_auto_parallel():
class Net(nn.Cell):
def __init__(self):
super().__init__()
self.matmul = P.MatMul()
self.minimum = P.Minimum()
def construct(self, x, y, b):
out = self.matmul(x, y)
out = self.minimum(out, b)
return out
context.set_auto_parallel_context(dataset_strategy="full_batch")
context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="auto_parallel")
net = GradWrap(NetWithLoss(Net()))
x = Tensor(np.ones([64, 32]), dtype=ms.float32)
y = Tensor(np.ones([32, 1]), dtype=ms.float32)
b = Tensor(np.ones([1, 64]), dtype=ms.float32)
compile_net(net, x, y, b)
|
d048c088441791f6f6d5e652b91d8d803d17413f
|
fa1ad2e2ac7e376fc7cb3b3a6e1bb88eed3e80be
|
/govern/data-meta/amundsen/databuilder/databuilder/models/query/query_join.py
|
971686073d956b95581f91614ca73d8020cd904c
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"MIT"
] |
permissive
|
alldatacenter/alldata
|
7bc7713c9f1d56ad6b8e59ea03206d1073b7e047
|
8d5f9a2d49ab8f9e85ccf058cb02c2fda287afc6
|
refs/heads/master
| 2023-08-05T07:32:25.442740
| 2023-08-03T13:17:24
| 2023-08-03T13:17:24
| 213,321,771
| 774
| 250
|
Apache-2.0
| 2023-09-06T17:35:32
| 2019-10-07T07:36:18
| null |
UTF-8
|
Python
| false
| false
| 8,878
|
py
|
query_join.py
|
# Copyright Contributors to the Amundsen project.
# SPDX-License-Identifier: Apache-2.0
from typing import Iterator, Optional
from databuilder.models.graph_node import GraphNode
from databuilder.models.graph_relationship import GraphRelationship
from databuilder.models.graph_serializable import GraphSerializable
from databuilder.models.query.query import QueryMetadata
from databuilder.models.table_metadata import ColumnMetadata, TableMetadata
class QueryJoinMetadata(GraphSerializable):
"""
A Join clause used between two tables within a query
"""
NODE_LABEL = 'Join'
KEY_FORMAT = '{join_type}-{left_column_key}-{operator}-{right_column_key}'
# Relation between entity and query
COLUMN_JOIN_RELATION_TYPE = 'COLUMN_JOINS_WITH'
INVERSE_COLUMN_JOIN_RELATION_TYPE = 'JOIN_OF_COLUMN'
QUERY_JOIN_RELATION_TYPE = 'QUERY_JOINS_WITH'
INVERSE_QUERY_JOIN_RELATION_TYPE = 'JOIN_OF_QUERY'
# Node attributes
JOIN_TYPE = 'join_type'
JOIN_OPERATOR = 'operator'
JOIN_SQL = 'join_sql'
LEFT_TABLE_KEY = 'left_table_key'
LEFT_DATABASE = 'left_database'
LEFT_CLUSTER = 'left_cluster'
LEFT_SCHEMA = 'left_schema'
LEFT_TABLE = 'left_table'
RIGHT_TABLE_KEY = 'right_table_key'
RIGHT_DATABASE = 'right_database'
RIGHT_CLUSTER = 'right_cluster'
RIGHT_SCHEMA = 'right_schema'
RIGHT_TABLE = 'right_table'
def __init__(self,
left_table: TableMetadata,
right_table: TableMetadata,
left_column: ColumnMetadata,
right_column: ColumnMetadata,
join_type: str,
join_operator: str,
join_sql: str,
query_metadata: Optional[QueryMetadata] = None,
yield_relation_nodes: bool = False):
"""
:param left_table: The table joined on the left side of the join clause
:param right_table: The table joined on the right side of the join clause
:param left_column: The column from the left table used in the join
:param right_column: The column from the right table used in the join
:param join_type: A free form string representing the type of join, examples
include: inner join, right join, full join, etc.
:param join_operator: The operator used in the join, examples include: =, >, etc.
:param query_metadata: The Query metadata object that this where clause belongs to, this
is optional to allow creating static QueryJoinMetadata objects to show on tables
without the complexity of creating QueryMetadata
:param yield_relation_nodes: A boolean, indicating whether or not the query metadata
and tables associated to this Join should have nodes created if they does not
already exist.
"""
# For inner joins we don't want to duplicate joins if the other table
# comes first in the join clause since it produces the same effect.
# This ONLY applies to inner join and you may need to massage your data
# for join_type to have the proper value
swap_left_right = False
if join_operator == '=' and join_type == 'inner join':
tables_sorted = sorted([left_table._get_table_key(), right_table._get_table_key()])
if tables_sorted[0] == right_table:
swap_left_right = True
self.left_table = right_table if swap_left_right else left_table
self.right_table = left_table if swap_left_right else right_table
self.left_column = right_column if swap_left_right else left_column
self.right_column = left_column if swap_left_right else right_column
self.join_type = join_type
self.join_operator = join_operator
self.join_sql = join_sql
self.query_metadata = query_metadata
self.yield_relation_nodes = yield_relation_nodes
self._node_iter = self._create_next_node()
self._relation_iter = self._create_relation_iterator()
def __repr__(self) -> str:
return (
f'QueryJoinMetadata(Left Table: {self.left_table._get_table_key()}, '
f'Right Table: {self.left_table._get_table_key()})'
)
def create_next_node(self) -> Optional[GraphNode]:
# return the string representation of the data
try:
return next(self._node_iter)
except StopIteration:
return None
def create_next_relation(self) -> Optional[GraphRelationship]:
try:
return next(self._relation_iter)
except StopIteration:
return None
@staticmethod
def get_key(left_column_key: str, right_column_key: str, join_type: str, operator: str) -> str:
join_no_space = join_type.replace(' ', '-')
return QueryJoinMetadata.KEY_FORMAT.format(left_column_key=left_column_key,
right_column_key=right_column_key,
join_type=join_no_space,
operator=operator)
def get_key_self(self) -> str:
return QueryJoinMetadata.get_key(left_column_key=self.left_table._get_col_key(col=self.left_column),
right_column_key=self.right_table._get_col_key(col=self.right_column),
join_type=self.join_type,
operator=self.join_operator)
def get_query_relations(self) -> Iterator[GraphRelationship]:
# Left Column
yield GraphRelationship(
start_label=ColumnMetadata.COLUMN_NODE_LABEL,
end_label=self.NODE_LABEL,
start_key=self.left_table._get_col_key(col=self.left_column),
end_key=self.get_key_self(),
type=self.COLUMN_JOIN_RELATION_TYPE,
reverse_type=self.INVERSE_COLUMN_JOIN_RELATION_TYPE,
attributes={}
)
# Right Column
yield GraphRelationship(
start_label=ColumnMetadata.COLUMN_NODE_LABEL,
end_label=self.NODE_LABEL,
start_key=self.right_table._get_col_key(col=self.right_column),
end_key=self.get_key_self(),
type=self.COLUMN_JOIN_RELATION_TYPE,
reverse_type=self.INVERSE_COLUMN_JOIN_RELATION_TYPE,
attributes={}
)
if self.query_metadata:
yield GraphRelationship(
start_label=QueryMetadata.NODE_LABEL,
end_label=self.NODE_LABEL,
start_key=self.query_metadata.get_key_self(),
end_key=self.get_key_self(),
type=self.QUERY_JOIN_RELATION_TYPE,
reverse_type=self.INVERSE_QUERY_JOIN_RELATION_TYPE,
attributes={}
)
def _create_next_node(self) -> Iterator[GraphNode]:
"""
Create query nodes
:return:
"""
yield GraphNode(
key=self.get_key_self(),
label=self.NODE_LABEL,
attributes={
self.JOIN_TYPE: self.join_type,
self.JOIN_OPERATOR: self.join_operator,
self.JOIN_SQL: self.join_sql,
self.LEFT_TABLE_KEY: self.left_table._get_table_key(),
self.LEFT_DATABASE: self.left_table.database,
self.LEFT_CLUSTER: self.left_table.cluster,
self.LEFT_SCHEMA: self.left_table.schema,
self.LEFT_TABLE: self.left_table.name,
self.RIGHT_TABLE_KEY: self.right_table._get_table_key(),
self.RIGHT_DATABASE: self.right_table.database,
self.RIGHT_CLUSTER: self.right_table.cluster,
self.RIGHT_SCHEMA: self.right_table.schema,
self.RIGHT_TABLE: self.right_table.name
}
)
if self.yield_relation_nodes:
for l_tbl_item in self.left_table._create_next_node():
yield l_tbl_item
for r_tbl_item in self.right_table._create_next_node():
yield r_tbl_item
if self.query_metadata:
for query_item in self.query_metadata._create_next_node():
yield query_item
def _create_relation_iterator(self) -> Iterator[GraphRelationship]:
relations = self.get_query_relations()
for relation in relations:
yield relation
if self.yield_relation_nodes:
for l_tbl_rel in self.left_table._create_next_relation():
yield l_tbl_rel
for r_tbl_rel in self.right_table._create_next_relation():
yield r_tbl_rel
if self.query_metadata:
for query_rel in self.query_metadata._create_relation_iterator():
yield query_rel
|
08179727805c9d038376f96d8de1a42a49d24151
|
2feeb893ef5e25383ca5029be8bd556538639982
|
/commands/developer-utils/sentry/sentry-unresolved-issues-by-project.template.py
|
a4b51945f3b2d42f3cd67b9864c2ee3ae9807be3
|
[
"MIT"
] |
permissive
|
raycast/script-commands
|
c62175cc1e897174407cecdda2e6b053f397f131
|
e74432024f71f783f7125eba56603cc82160149c
|
refs/heads/master
| 2023-09-03T21:06:51.794246
| 2023-08-28T09:32:36
| 2023-08-28T09:32:36
| 299,712,981
| 5,249
| 1,133
|
MIT
| 2023-09-07T15:33:07
| 2020-09-29T19:06:27
|
Shell
|
UTF-8
|
Python
| false
| false
| 3,038
|
py
|
sentry-unresolved-issues-by-project.template.py
|
#!/usr/bin/env python3
# How to use this script?
# It's a template which needs further setup. Duplicate the file,
# remove `.template.` from the filename and set an API token as
# well as the Sentry organization.
#
# API: https://docs.sentry.io/api/events/list-a-projects-issues/
# Parameters
# Required parameters:
# @raycast.schemaVersion 1
# @raycast.title Unresolved Issues By Project
# @raycast.mode fullOutput
# Conditional parameters:
# @raycast.refreshTime 1h
# Optional parameters:
# @raycast.packageName Sentry
# @raycast.icon images/sentry.png
# @raycast.iconDark images/sentry-dark.png
# @raycast.argument1 { "type": "text", "placeholder": "Project" }
# Documentation:
# @raycast.author Phil Salant
# @raycast.authorURL https://github.com/PSalant726
# @raycast.author Thomas Paul Mann
# @raycast.authorURL https://github.com/thomaspaulmann
# @raycast.description Show unresolved issues in the last 24 hours (by project) from Sentry.
#########################
##### Configuration #####
#########################
# API token with `project:read` scope (https://sentry.io/settings/account/api/auth-tokens/)
API_TOKEN = ""
# Slug of organization the issues belong to
ORGANIZATION = ""
if not API_TOKEN:
print(error("No API token provided"))
exit(1)
if not ORGANIZATION:
print(error("No Sentry organization provided"))
exit(1)
# Main program
import json, sys, urllib.request
from datetime import datetime as dt
colors = {
'ok': '\033[92m',
'error': '\033[91m',
'end': '\033[0m',
'warn': '\033[93m',
}
def error(message):
return f"{colors['error']}{message}{colors['end']}"
def ok(message):
return f"{colors['ok']}{message}{colors['end']}"
def warn(message):
return f"{colors['warn']}{message}{colors['end']}"
project = sys.argv[1]
if not project:
print(error("No Sentry project provided"))
exit(1)
request = urllib.request.Request(
method="GET",
url=f"https://sentry.io/api/0/projects/{ORGANIZATION}/{project}/issues/?statsPeriod=24h&query=is:unresolved",
headers={ "Authorization": f"Bearer {API_TOKEN}" }
)
try:
response = urllib.request.urlopen(request)
except urllib.error.HTTPError as e:
print(f"{error('Failed to get unresolved issues from Sentry:')} {e.code} {e.reason}")
exit(1)
except urllib.error.URLError as e:
print(f"{error('Failed to reach Sentry:')} {e.reason}")
exit(1)
else:
unresolved_issues = json.loads(response.read().decode("utf-8"))
unresolved_issues_count = len(unresolved_issues)
if unresolved_issues_count == 0:
print(ok("No unresolved issues in the last 24 hours."))
else:
issue_text = "issue" if unresolved_issues_count == 1 else "issues"
print(error(f"{unresolved_issues_count} unresolved {issue_text} in the last 24 hours:\n"))
for i, issue in enumerate(unresolved_issues, 1):
last_seen = dt.strptime(issue['lastSeen'], "%Y-%m-%dT%H:%M:%S.%fZ").strftime('%b %d, %Y at %I:%M %p')
print(f"{i}. {warn(issue['title'])}")
print(f" Last seen {last_seen}.")
print(f" {issue['permalink']}\n")
|
39e8f014caf36218fe8ccce2be3a8fbd5ebeee8c
|
3e1f6dfde5c940f7acde208d098e56a54550945f
|
/dash_docs/chapters/dash_vtk/other/examples/t05_reader.py
|
de148a2bd0c9ed523e7886ae8641709d4250a936
|
[
"MIT"
] |
permissive
|
plotly/dash-docs
|
a4d1b9e450aa19e811f8ae043fd56de330cce63a
|
f494e987701be1085ba9fb7b29bd875ee2146d5b
|
refs/heads/master
| 2023-08-03T02:18:16.257115
| 2021-12-14T18:51:52
| 2021-12-14T18:51:52
| 84,095,619
| 396
| 210
|
MIT
| 2023-01-18T20:29:56
| 2017-03-06T16:30:08
|
Python
|
UTF-8
|
Python
| false
| false
| 693
|
py
|
t05_reader.py
|
import os
import dash
import dash_html_components as html
import dash_vtk
# Get it here: https://github.com/plotly/dash-vtk/blob/master/demos/data/cow-nonormals.obj
obj_file = "datasets/cow-nonormals.obj"
txt_content = None
with open(obj_file, 'r') as file:
txt_content = file.read()
content = dash_vtk.View([
dash_vtk.GeometryRepresentation([
dash_vtk.Reader(
vtkClass="vtkOBJReader",
parseAsText=txt_content,
),
]),
])
# Dash setup
app = dash.Dash(__name__)
server = app.server
app.layout = html.Div(
style={"width": "100%", "height": "400px"},
children=[content],
)
if __name__ == "__main__":
app.run_server(debug=True)
|
77e8f22553213ff2b050713464b66ad2b4816e37
|
38e5c18fdb3da2fd51d6ffcdbd30fca1f4197220
|
/events/migrations/0046_require_sponsor_logo.py
|
a4088394fd3f175fb3bce02dba36cffd7990d4a6
|
[
"BSD-2-Clause"
] |
permissive
|
GetTogetherComm/GetTogether
|
3472c00e94c25930bb5f854bdf5ddf6f0b25fe70
|
6708944bbcecb6d3d1467b096b2d72e991583d51
|
refs/heads/master
| 2023-08-20T17:57:30.082021
| 2022-04-18T22:22:54
| 2022-04-18T22:22:54
| 115,438,321
| 462
| 106
|
BSD-2-Clause
| 2023-02-15T18:23:18
| 2017-12-26T16:34:28
|
Python
|
UTF-8
|
Python
| false
| false
| 569
|
py
|
0046_require_sponsor_logo.py
|
# Generated by Django 2.0 on 2018-09-26 22:14
from django.db import migrations
import imagekit.models.fields
class Migration(migrations.Migration):
dependencies = [("events", "0045_change_field_help_text")]
operations = [
migrations.AlterField(
model_name="sponsor",
name="logo",
field=imagekit.models.fields.ProcessedImageField(
help_text="Will be scaled and cropped to max 250x200 px.",
upload_to="sponsors",
verbose_name="Logo",
),
)
]
|
543a6c75ff117a835ecff14f5fff20c62298d2b5
|
e8846f706a428a91659ac6e24974dc696089fe4a
|
/pandapower/pf/pfsoln_numba.py
|
d43a593d6bba5f31b2274ffae75bf05d6e2fdd64
|
[
"BSD-3-Clause"
] |
permissive
|
e2nIEE/pandapower
|
3e434bf81b29e9c88905abbd82fd0309e2191ffb
|
5592ba1f6fcd727053a37dcf246b9bf36874c24a
|
refs/heads/develop
| 2023-09-03T23:21:25.979973
| 2023-08-31T11:00:17
| 2023-08-31T11:00:17
| 78,748,060
| 608
| 481
|
NOASSERTION
| 2023-09-14T18:22:08
| 2017-01-12T13:27:53
|
Python
|
UTF-8
|
Python
| false
| false
| 5,857
|
py
|
pfsoln_numba.py
|
# -*- coding: utf-8 -*-
# Copyright 1996-2015 PSERC. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
# Copyright (c) 2016-2023 by University of Kassel and Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
"""Updates bus, gen, branch data structures to match power flow soln.
"""
from numpy import conj, zeros, complex128, abs, float64, sqrt, real, isin, arange
from numpy import finfo, c_, flatnonzero as find, setdiff1d, r_, int64
from pandapower.pypower.idx_brch import F_BUS, T_BUS, PF, PT, QF, QT
from pandapower.pypower.idx_bus import PD, QD
from pandapower.pypower.idx_gen import GEN_BUS, GEN_STATUS, PG, QG
from pandapower.pypower.idx_ssc import SSC_Q
from pandapower.pypower.idx_svc import SVC_Q
from pandapower.pypower.idx_tcsc import TCSC_QF, TCSC_QT
from pandapower.pypower.pfsoln import _update_v, _update_q, _update_p
from pandapower.auxiliary import version_check
try:
from numba import jit
version_check('numba')
except ImportError:
from pandapower.pf.no_numba import jit
EPS = finfo(float).eps
def pfsoln(baseMVA, bus, gen, branch, svc, tcsc, ssc, Ybus, Yf, Yt, V, ref, ref_gens, Ibus=None,
limited_gens=None):
"""Updates bus, gen, branch data structures to match power flow soln.
@author: Ray Zimmerman (PSERC Cornell)
@author: Richard Lincoln
"""
# generator info
on = find(gen[:, GEN_STATUS] > 0) # which generators are on?
gbus = gen[on, GEN_BUS].astype(int64) # what buses are they at?
# xward: add ref buses that are not at the generators
xbus = setdiff1d(ref, gbus)
# compute total injected bus powers
Ibus = zeros(len(V)) if Ibus is None else Ibus
Sbus = V * conj(Ybus * V - Ibus)
_update_v(bus, V)
# update gen results
_update_q(baseMVA, bus, gen, gbus, Sbus[gbus], on)
if limited_gens is not None and len(limited_gens) > 0:
on = find((gen[:, GEN_STATUS] > 0) | isin(arange(len(gen)), limited_gens))
gbus = gen[on, GEN_BUS].astype(int64)
_update_p(baseMVA, bus, gen, ref, gbus, Sbus, ref_gens)
# ----- update/compute branch power flows -----
branch = _update_branch_flows(Yf, Yt, V, baseMVA, branch)
return bus, gen, branch
def pf_solution_single_slack(baseMVA, bus, gen, branch, svc, tcsc, ssc, Ybus, Yf, Yt, V, ref, ref_gens,
Ibus=None, limited_gens=None):
"""
faster version of pfsoln for a grid with a single slack bus
NOTE: Do not use in combination with shunts (check if ppc["bus"][:, GS/BS] are != 0.)
NOTE: Do not use in combination with voltage dependend loads
"""
# ----- update bus voltages -----
_update_v(bus, V)
# ----- update/compute branch power flows -----
branch = _update_branch_flows(Yf, Yt, V, baseMVA, branch)
p_bus = bus[:, PD].sum()
q_bus = bus[:, QD].sum()
p_loss = branch[:, [PF, PT]].sum()
q_loss = branch[:, [QF, QT]].sum()
# consider FACTS devices:
q_facts = svc[:, SVC_Q].sum() + tcsc[:, [TCSC_QF, TCSC_QT]].sum() + ssc[:, SSC_Q].sum()
# slack p = sum of branch losses and p demand at all buses
gen[:, PG] = p_loss.real + p_bus # branch p losses + p demand
gen[:, QG] = q_loss.real + q_bus + q_facts # branch q losses + q demand
return bus, gen, branch
def _update_branch_flows(Yf, Yt, V, baseMVA, branch):
f_bus = real(branch[:, F_BUS]).astype(int64)
t_bus = real(branch[:, T_BUS]).astype(int64)
# complex power at "from" bus
Sf = calc_branch_flows(Yf.data, Yf.indptr, Yf.indices, V, baseMVA, Yf.shape[0], f_bus)
# complex power injected at "to" bus
St = calc_branch_flows(Yt.data, Yt.indptr, Yt.indices, V, baseMVA, Yt.shape[0], t_bus)
branch[:, [PF, QF, PT, QT]] = c_[Sf.real, Sf.imag, St.real, St.imag]
return branch
@jit(nopython=True, cache=False)
def calc_branch_flows(Yy_x, Yy_p, Yy_j, v, baseMVA, dim_x, bus_ind): # pragma: no cover
Sx = zeros(dim_x, dtype=complex128)
# iterate through sparse matrix and get Sx = conj(Y_kj* V[j])
for r in range(len(Yy_p) - 1):
for k in range(Yy_p[r], Yy_p[r + 1]):
Sx[r] += conj(Yy_x[k] * v[Yy_j[k]]) * baseMVA
# finally get Sx = V[k] * conj(Y_kj* V[j])
Sx *= v[bus_ind]
return Sx
@jit(nopython=True, cache=False)
def calc_branch_flows_batch(Yy_x, Yy_p, Yy_j, V, baseMVA, dim_x, bus_ind, base_kv): # pragma: no cover
"""
Function to get branch flows with a batch computation for the timeseries module
Parameters
----------
Yy_x, Yy_p, Yy_j - Yt or Yf CSR represenation
V - complex voltage matrix results from time series
baseMVA - base MVA from ppc
dim_x - shape of Y
bus_ind - f_bus or t_bus
base_kv - pcci["bus"] BASE_KV values
Returns
----------
i_abs, s_abs - absolute branch currents and power flows. This is "i_ft" / "s_ft" in results_branch.py
S - complex Sf / St values frpm ppci
"""
S = zeros((V.shape[0], dim_x), dtype=complex128)
s_abs = zeros((V.shape[0], dim_x), dtype=float64)
i_abs = zeros((V.shape[0], dim_x), dtype=float64)
sqrt_3 = sqrt(3)
# iterate over entries in V (v= complex V result of each time step)
for t in range(V.shape[0]):
v = V[t]
vm = abs(v)
Sx = zeros(dim_x, dtype=complex128)
# iterate through sparse matrix and get Sx = conj(Y_kj* V[j])
for r in range(len(Yy_p) - 1):
for k in range(Yy_p[r], Yy_p[r + 1]):
Sx[r] += conj(Yy_x[k] * v[Yy_j[k]]) * baseMVA
# finally get Sx = V[k] * conj(Y_kj* V[j])
Sx *= v[bus_ind]
S[t, :] = Sx
s_abs[t, :] = abs(Sx)
i_abs[t, :] = s_abs[t, :] / (vm[bus_ind] * base_kv[bus_ind]) / sqrt_3
return S, s_abs, i_abs
|
b45c8f8450d53f0a63702ab7aef9f470ed3c1963
|
8f48cf56bbb19560c8f65a81e0ce42e2a9fc27a6
|
/tools/schemacode/bidsschematools/tests/test_render_tables.py
|
be41b8c5ebe91d125c09f52ffe70d4427b5e4765
|
[
"LicenseRef-scancode-public-domain",
"CC-BY-4.0",
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
bids-standard/bids-specification
|
7b48af9353a805aa34e90bb669b0a7564fa377b4
|
e1f853873036e2079d39a4fcb1357a04c8afbb80
|
refs/heads/master
| 2023-08-23T09:18:46.250752
| 2023-08-22T05:45:00
| 2023-08-22T05:45:00
| 150,465,237
| 242
| 164
|
CC-BY-4.0
| 2023-09-13T14:09:30
| 2018-09-26T17:35:34
|
Python
|
UTF-8
|
Python
| false
| false
| 5,353
|
py
|
test_render_tables.py
|
"""Tests for the bidsschematools package."""
from bidsschematools.render import tables
def test_make_entity_table(schema_obj):
"""
Test whether expected entities are present and listed correctly.
This should be robust with respect to schema format.
"""
entity_table = tables.make_entity_table(schema_obj)
# Non-exhaustive list covering both value and index formats
expected_entities = [
"[`acq-<label>`](./appendices/entities.md#acq)",
"[`ses-<label>`](./appendices/entities.md#ses)",
"[`sample-<label>`](./appendices/entities.md#sample)",
"[`task-<label>`](./appendices/entities.md#task)",
"[`acq-<label>`](./appendices/entities.md#acq)",
"[`ce-<label>`](./appendices/entities.md#ce)",
"[`trc-<label>`](./appendices/entities.md#trc)",
"[`stain-<label>`](./appendices/entities.md#stain)",
"[`rec-<label>`](./appendices/entities.md#rec)",
"[`dir-<label>`](./appendices/entities.md#dir)",
"[`run-<index>`](./appendices/entities.md#run)",
]
for expected_entity in expected_entities:
assert expected_entity in entity_table
def test_make_suffix_table(schema_obj):
"""
Test whether expected suffixes are present and listed with correct names.
Values are hard-coded from the present YAML, but should nevertheless be robust
with respect to schema format, other than case changes for the names.
"""
target_suffixes = [
"beh",
"cbv",
"dwi",
]
suffix_table = tables.make_suffix_table(schema_obj, target_suffixes)
expected_names = [
"Behavioral recording",
"Cerebral blood volume image",
"Diffusion-weighted image",
]
for expected_name in expected_names:
assert expected_name in suffix_table
def test_make_sidecar_table(schema_obj):
"""
Test whether expected metadata fields are present and the requirement level is
applied correctly.
This should be robust with respect to schema format.
"""
# mri.MRISpatialEncoding selected for having some level and description addenda
rendered_table = tables.make_sidecar_table(schema_obj, "mri.MRISpatialEncoding").split("\n")
assert rendered_table[0].startswith("| **Key name**")
assert rendered_table[1].startswith("|-------------")
fields = schema_obj.rules.sidecars.mri.MRISpatialEncoding.fields
assert len(rendered_table) == len(fields) + 2
for field, render_row in zip(fields, rendered_table[2:]):
assert render_row.startswith(f"| [{field}](")
spec = fields[field]
if isinstance(spec, str):
level = spec
level_addendum = ""
description_addendum = ""
else:
level = spec["level"]
level_addendum = spec.get("level_addendum", "").replace("required", "REQUIRED")
description_addendum = spec.get("description_addendum", "")
assert level.upper() in render_row
assert level_addendum.split("\n")[0] in render_row
assert description_addendum.split("\n")[0] in render_row
def test_make_metadata_table(schema_obj):
"""
Test whether expected metadata fields are present and the requirement level is
applied correctly.
This should be robust with respect to schema format.
"""
target_metadata = {
"Authors": "required",
"BIDSVersion": "required",
"DatasetDOI": "optional",
}
metadata_table = tables.make_metadata_table(schema_obj, target_metadata).split("\n")
metadata_tracking = list(target_metadata.keys())
for line in metadata_table:
for i in metadata_tracking:
if i in line:
# Is the requirement level displayed correctly?
assert target_metadata[i].upper() in line
# Mark found
metadata_tracking.remove(i)
# Have we found all fields?
assert len(metadata_tracking) == 0
def test_make_columns_table(schema_obj):
"""
Test whether expected columns are present and the requirement level is
applied correctly.
This should be robust with respect to schema format.
"""
# mri.MRISpatialEncoding selected for having some level and description addenda
rendered_table = tables.make_columns_table(
schema_obj,
"modality_agnostic.Participants",
).split("\n")
assert rendered_table[0].startswith("| **Column name**")
assert rendered_table[1].startswith("|----------------")
fields = schema_obj.rules.tabular_data.modality_agnostic.Participants.columns
assert len(rendered_table) == len(fields) + 3 # header + orientation + add. cols. row
for field, render_row in zip(fields, rendered_table[2:-1]):
assert render_row.startswith(f"| [{field}](")
spec = fields[field]
if isinstance(spec, str):
level = spec
level_addendum = ""
description_addendum = ""
else:
level = spec["level"]
level_addendum = spec.get("level_addendum", "").replace("required", "REQUIRED")
description_addendum = spec.get("description_addendum", "")
assert level.upper() in render_row
assert level_addendum.split("\n")[0] in render_row
assert description_addendum.split("\n")[0] in render_row
|
c172a9a2691735bd329c940c255256c1a5a16fa7
|
ecaba173879f92f24e3c951866fda23c0a4fc426
|
/perfkitbenchmarker/linux_benchmarks/tensorflow_benchmark.py
|
97cc07c6dd391bbb86d113dd696151ada1b73aab
|
[
"Classpath-exception-2.0",
"BSD-3-Clause",
"AGPL-3.0-only",
"MIT",
"GPL-2.0-only",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause"
] |
permissive
|
GoogleCloudPlatform/PerfKitBenchmarker
|
2f4917fd796db4eb90822c557d8fa08a497fbd48
|
d0699f32998898757b036704fba39e5471641f01
|
refs/heads/master
| 2023-09-02T08:14:54.110308
| 2023-09-01T20:28:01
| 2023-09-01T20:28:38
| 21,950,910
| 1,923
| 567
|
Apache-2.0
| 2023-09-13T22:37:42
| 2014-07-17T17:23:26
|
Python
|
UTF-8
|
Python
| false
| false
| 23,728
|
py
|
tensorflow_benchmark.py
|
# Copyright 2017 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run Tensorflow benchmarks (https://github.com/tensorflow/benchmarks).
This benchmark suports distributed and non-distributed runs. Distributed
TensorFlow involves splitting the job to different vms/nodes. To train a dataset
using hundreds of GPUs, use distributed TensorFlow. In Distributed TensorFlow,
there is communication between the parameter servers and the workers, and also
between the workers. Each worker process runs the same model. When a worker
needs a variable, it accesses it from the parameter server directly.
"""
import collections
import posixpath
from absl import flags
from perfkitbenchmarker import background_tasks
from perfkitbenchmarker import configs
from perfkitbenchmarker import linux_packages
from perfkitbenchmarker import regex_util
from perfkitbenchmarker import sample
from perfkitbenchmarker.linux_packages import nvidia_driver
from perfkitbenchmarker.linux_packages import tensorflow
from six.moves import range
FLAGS = flags.FLAGS
BENCHMARK_NAME = 'tensorflow'
BENCHMARK_CONFIG = """
tensorflow:
description: Runs Tensorflow Benchmark.
vm_groups:
default:
vm_spec:
GCP:
machine_type: n1-standard-4
zone: us-east1-d
boot_disk_size: 200
AWS:
machine_type: p2.xlarge
zone: us-east-1
boot_disk_size: 200
Azure:
machine_type: Standard_NC6
zone: eastus
"""
GPU = 'gpu'
CPU = 'cpu'
NCHW = 'NCHW'
NHWC = 'NHWC'
PID_PREFIX = 'TF_PS_PID'
MODELS = ['vgg11', 'vgg16', 'vgg19', 'lenet', 'googlenet', 'overfeat',
'alexnet', 'trivial', 'inception3', 'inception4', 'resnet50',
'resnet101', 'resnet152']
FP16 = 'float16'
FP32 = 'float32'
flags.DEFINE_boolean('tf_forward_only', False, '''whether use forward-only or
training for benchmarking''')
flags.DEFINE_list('tf_models', ['inception3', 'vgg16', 'alexnet', 'resnet50',
'resnet152'], 'name of the models to run')
flags.register_validator('tf_models',
lambda models: models and set(models).issubset(MODELS),
'Invalid models list. tf_models must be a subset of '
+ ', '.join(MODELS))
flags.DEFINE_string('tf_data_dir', None,
'Path to dataset in TFRecord format (aka Example '
'protobufs). If not specified, synthetic data will be '
'used.')
flags.DEFINE_string('tf_data_module', 'tensorflow/ILSVRC2012',
'Data path in preprovisioned data bucket.')
flags.DEFINE_integer('tf_num_files_train', 1024,
'The number of files for training')
flags.DEFINE_integer('tf_num_files_val', 128,
'The number of files for validation')
flags.DEFINE_enum('tf_data_name', 'imagenet', ['imagenet', 'flowers'],
'Name of dataset: imagenet or flowers.')
flags.DEFINE_list('tf_batch_sizes', None, 'batch sizes per compute device. '
'If not provided, the suggested batch size is used for '
'the given model')
flags.DEFINE_enum('tf_variable_update', 'parameter_server',
['parameter_server', 'replicated',
'distributed_replicated', 'independent'],
'''The method for managing variables: parameter_server,
replicated, distributed_replicated, independent''')
flags.DEFINE_enum('tf_local_parameter_device', CPU, [CPU, GPU],
'''Device to use as parameter server: cpu or gpu. For
distributed training, it can affect where caching of
variables happens.''')
flags.DEFINE_enum('tf_device', GPU, [CPU, GPU],
'Device to use for computation: cpu or gpu')
flags.DEFINE_enum('tf_data_format', NCHW, [NCHW, NHWC], '''Data layout to
use: NHWC (TF native) or NCHW (cuDNN native).''')
flags.DEFINE_boolean('tf_distortions', True,
'''Enable/disable distortions during image preprocessing.
These include bbox and color distortions.''')
flags.DEFINE_boolean('tf_distributed', False, 'Run TensorFlow distributed')
flags.DEFINE_string('tf_distributed_port', '2222',
'The port to use in TensorFlow distributed job')
flags.DEFINE_enum('tf_precision', FP32, [FP16, FP32],
'Use 16-bit floats for certain tensors instead of 32-bit '
'floats. This is currently experimental.')
flags.DEFINE_boolean('tf_use_local_data', False, 'Whether to use data from '
'local machine. If true, the benchmark will use data from '
'cloud storage (GCS, S3, etc).')
flags.DEFINE_string('tf_benchmark_args', None,
'Arguments (as a string) to pass to tf_cnn_benchmarks. '
'This can be used to run a benchmark with arbitrary '
'parameters. Arguments will be parsed and added to the '
'sample metadata. For example, '
'--tf_benchmark_args="--nodistortions --optimizer=sgd '
'will run tf_cnn_benchmarks.py '
'--nodistortions --optimizer=sgd '
'and put the following in the metadata: '
'{\'nodistortions\': \'True\', \'optimizer\': \'sgd\'}. '
'All arguments must be in the form --arg_name=value. '
'If there are GPUs on the VM and no \'num_gpus\' flag in '
'the tf_benchmarks_args flag, the num_gpus flag will '
'automatically be populated with the number of available '
'GPUs.')
def LocalParameterDeviceValidator(value):
if FLAGS.tf_device == CPU:
return value == CPU
return True
flags.register_validator('tf_local_parameter_device',
LocalParameterDeviceValidator)
NVIDIA_TESLA_P4 = nvidia_driver.NVIDIA_TESLA_P4
NVIDIA_TESLA_K80 = nvidia_driver.NVIDIA_TESLA_K80
NVIDIA_TESLA_P100 = nvidia_driver.NVIDIA_TESLA_P100
NVIDIA_TESLA_V100 = nvidia_driver.NVIDIA_TESLA_V100
DEFAULT_BATCH_SIZE = 64
DEFAULT_BATCH_SIZES = {
CPU: {
'alexnet': 512,
'inception3': 64,
'resnet50': 64,
'resnet152': 32,
'vgg16': 32,
},
NVIDIA_TESLA_K80: {
'alexnet': 512,
'inception3': 64,
'resnet50': 64,
'resnet152': 32,
'vgg16': 32,
},
NVIDIA_TESLA_P4: {
'alexnet': 512,
'inception3': 128,
'resnet50': 128,
'resnet152': 64,
'vgg16': 64,
},
NVIDIA_TESLA_P100: {
'alexnet': 512,
'inception3': 256,
'resnet50': 256,
'resnet152': 128,
'vgg16': 128,
},
NVIDIA_TESLA_V100: {
'alexnet': 512,
'inception3': 256,
'resnet50': 256,
'resnet152': 128,
'vgg16': 128,
},
}
DATA_DIR = posixpath.join(linux_packages.INSTALL_DIR, 'imagenet')
class TFParseOutputException(Exception):
pass
class TFParsePsPidException(Exception):
pass
class TFDataDirException(Exception):
pass
def GetConfig(user_config):
"""Load and return benchmark config.
Args:
user_config: user supplied configuration (flags and config file)
Returns:
loaded benchmark configuration
"""
return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
def _GetDefaultBatchSizeByModel(model, gpu_type):
"""Return the default batch size for a given model and gpu / cpu type.
If gpu_type is none, it is assumed that the model will be running on the CPU.
If there is no default for the given model and gpu_type, a default batch
size will be returned as defined by DEFAULT_BATCH_SIZE.
Args:
model: name of the Tensorflow model
gpu_type: type of the GPU, or None
Returns:
default batch size for the given model / gpu_type,
or the default batch size.
"""
computation_device = gpu_type or CPU
try:
return DEFAULT_BATCH_SIZES[computation_device][model]
except KeyError:
return DEFAULT_BATCH_SIZE
def _GetBatchSizes(model, gpu_type):
"""Return the batch_size flag if specified, or the appropriate default if not.
Args:
model: name of the Tensorflow model
gpu_type: type of the GPU, or None
Returns:
value of the batch_size flag if specified, or the default batch size for the
given model / gpu_type.
"""
return FLAGS.tf_batch_sizes or [_GetDefaultBatchSizeByModel(model, gpu_type)]
def _UpdateBenchmarkSpecWithFlags(benchmark_spec):
"""Update the benchmark_spec with supplied command line flags.
Args:
benchmark_spec: benchmark specification to update
"""
benchmark_spec.forward_only = FLAGS.tf_forward_only
benchmark_spec.data_name = FLAGS.tf_data_name
benchmark_spec.data_dir = (DATA_DIR if FLAGS.tf_use_local_data else
FLAGS.tf_data_dir)
benchmark_spec.use_local_data = FLAGS.tf_use_local_data
benchmark_spec.variable_update = FLAGS.tf_variable_update
benchmark_spec.distortions = FLAGS.tf_distortions
benchmark_spec.cnn_benchmarks_branch = FLAGS.tf_cnn_benchmarks_branch
benchmark_spec.tensorflow_cpu_pip_package = FLAGS.tf_cpu_pip_package
benchmark_spec.tensorflow_gpu_pip_package = FLAGS.tf_gpu_pip_package
benchmark_spec.distributed = FLAGS.tf_distributed
benchmark_spec.precision = FLAGS.tf_precision
benchmark_spec.benchmark_args = FLAGS.tf_benchmark_args
def _PrepareVm(vm):
"""Install and set up TensorFlow on the target vm.
The TensorFlow benchmarks are also installed.
A specific branch of the benchmarks cnn_tf_v1.10_compatible which works best
with TensorFlow 1.10 is used and can be overridden with the flag
tf_cnn_benchmarks_branch.
Args:
vm: virtual machine on which to install TensorFlow
"""
if FLAGS.tf_data_dir and FLAGS.tf_use_local_data:
def _DownloadData(num_files, mode):
for i in range(num_files):
filename = '{}-{:05}-of-{:05}'.format(mode, i, num_files)
vm.DownloadPreprovisionedData(DATA_DIR, FLAGS.tf_data_module, filename)
_DownloadData(FLAGS.tf_num_files_train, 'train')
_DownloadData(FLAGS.tf_num_files_val, 'validation')
vm.Install('tensorflow')
vm.InstallPackages('git')
def Prepare(benchmark_spec):
"""Install and set up TensorFlow on the target vm.
Args:
benchmark_spec: The benchmark specification
"""
_UpdateBenchmarkSpecWithFlags(benchmark_spec)
vms = benchmark_spec.vms
background_tasks.RunThreaded(_PrepareVm, vms)
benchmark_spec.tensorflow_version = tensorflow.GetTensorFlowVersion(vms[0])
if nvidia_driver.CheckNvidiaGpuExists(vms[0]):
benchmark_spec.gpu_type = nvidia_driver.GetGpuType(vms[0])
def _GetMetadataFromBenchmarkArgs(tf_cnn_benchmark_args):
"""Return a dictionary of arg names and values.
Only supports arguments in the following format:
--arg_name=arg_value
The above string will result in this function returning a dictionary
like so: {'arg_name': 'arg_value'}
Because this and other PKB benchmarks use the 'precision' flag to specify
fp16 or fp32, this function will convert the Tensorflow-specific precision
flag ('use_fp16') to 'precision' to keep results consistent. All other command
line arguments are extracted as is without being renamed.
Args:
tf_cnn_benchmark_args: string. The command line args to parse into a dict.
Returns:
A dictionary mapping argument names to their values.
"""
args = tf_cnn_benchmark_args.split(' ')
args_dict = {arg.split('=')[0].replace('--', ''): arg.split('=')[1]
for arg in args}
if 'use_fp16' in args_dict:
if args_dict['use_fp16'].lower() == 'true':
args_dict['precision'] = FP16
else:
args_dict['precision'] = FP32
return args_dict
def _CreateMetadataDict(benchmark_spec, model, batch_size):
"""Create metadata dict to be used in run results.
Args:
benchmark_spec: benchmark spec
model: model which was run
batch_size: batch sized used
Returns:
metadata dict
"""
vm = benchmark_spec.vms[0]
metadata = {}
if nvidia_driver.CheckNvidiaGpuExists(vm):
metadata.update(nvidia_driver.GetMetadata(vm))
metadata['command_line'] = benchmark_spec.tf_cnn_benchmark_cmd
metadata['cnn_benchmarks_branch'] = benchmark_spec.cnn_benchmarks_branch
metadata['tensorflow_version'] = benchmark_spec.tensorflow_version
metadata['tensorflow_cpu_pip_package'] = (
benchmark_spec.tensorflow_cpu_pip_package)
metadata['tensorflow_gpu_pip_package'] = (
benchmark_spec.tensorflow_gpu_pip_package)
# If we ran a custom command-line through the benchmark_args flag,
# add the metadata from that command and return. We don't need anymore
# metadata from this function as it is likely invalid.
if getattr(benchmark_spec, 'benchmark_args', None):
metadata.update(
_GetMetadataFromBenchmarkArgs(benchmark_spec.benchmark_args))
return metadata
metadata['model'] = model
metadata['batch_size'] = batch_size
metadata['forward_only'] = benchmark_spec.forward_only
metadata['data_name'] = benchmark_spec.data_name
metadata['data_dir'] = benchmark_spec.data_dir
metadata['use_local_data'] = benchmark_spec.use_local_data
metadata['variable_update'] = benchmark_spec.variable_update
metadata['local_parameter_device'] = benchmark_spec.local_parameter_device
metadata['device'] = benchmark_spec.device
metadata['data_format'] = benchmark_spec.data_format
metadata['distortions'] = benchmark_spec.distortions
metadata['distributed'] = benchmark_spec.distributed
metadata['precision'] = benchmark_spec.precision
metadata['num_gpus'] = benchmark_spec.num_gpus
return metadata
def _ExtractThroughput(output):
"""Extract throughput from TensorFlow output.
Args:
output: TensorFlow output
Returns:
throuput (float)
"""
regex = r'total images/sec: (\S+)'
try:
return regex_util.ExtractFloat(regex, output)
except:
raise TFParseOutputException('Unable to parse TensorFlow output')
def _ExtractTfParameterServerPid(output):
"""Extract the process identification number from TensorFlow parameter server.
Args:
output: string, Remote command output
Returns:
string, process identification number from TensorFlow parameter server
Raises:
TFParsePsPidException
"""
regex = r'{pid} (\S+)'.format(pid=PID_PREFIX)
try:
return regex_util.ExtractExactlyOneMatch(regex, output)
except:
raise TFParsePsPidException('Unable to parse process identification number '
'of TensorFlow parameter server from remote '
'command output.')
def _MakeSamplesFromOutput(benchmark_spec, output, model, batch_size):
"""Create a sample containing the measured TensorFlow throughput.
Args:
benchmark_spec: benchmark spec
output: TensorFlow output
model: model which was run
batch_size: batch sized used
Returns:
a Sample containing the TensorFlow throughput in Gflops
"""
metadata = _CreateMetadataDict(benchmark_spec, model, batch_size)
tensorflow_throughput = _ExtractThroughput(output)
return sample.Sample('Training synthetic data', tensorflow_throughput,
'images/sec', metadata)
def _GetTfCnnBenchmarkCommand(vm, model, batch_size, benchmark_spec,
args='', job_name=''):
"""Create the command used to run the tf_cnn_benchmarks script.
The command is either formulated using flag values stored on the
benchmark_spec, or is essentially provided outright through the
benchmark_args flag.
Args:
vm: the VM to run on.
model: name of the model to run.
batch_size: batch size to use for training.
benchmark_spec: the benchmark spec object.
args: string, distributed arguments
job_name: string, distributed job name
Returns:
A string that runs the tf_cnn_benchmarks.py script
with the desired arguments.
"""
num_gpus = (nvidia_driver.QueryNumberOfGpus(vm) if
nvidia_driver.CheckNvidiaGpuExists(vm) else 0)
benchmark_spec.num_gpus = num_gpus
if benchmark_spec.benchmark_args is not None:
cmd = 'python tf_cnn_benchmarks.py ' + benchmark_spec.benchmark_args
# If the user didn't specify num_gpus in the benchmark_args string,
# use all the GPUs on the system.
if '--num_gpus' not in benchmark_spec.benchmark_args and num_gpus:
cmd = '{cmd} --num_gpus={num_gpus}'.format(cmd=cmd, num_gpus=num_gpus)
return cmd
benchmark_spec.local_parameter_device = FLAGS.tf_local_parameter_device
benchmark_spec.device = FLAGS.tf_device
benchmark_spec.data_format = FLAGS.tf_data_format
if num_gpus == 0:
benchmark_spec.local_parameter_device = CPU
benchmark_spec.device = CPU
benchmark_spec.data_format = NHWC
cmd = (
'{env_vars} python tf_cnn_benchmarks.py '
'--local_parameter_device={local_parameter_device} '
'--batch_size={batch_size} '
'--model={model} '
'{data} '
'--data_name={data_name} '
'--variable_update={variable_update} '
'--distortions={distortions} '
'--device={device} '
'--data_format={data_format} '
'--forward_only={forward_only} '
'--use_fp16={use_fp16} '
'{num_gpus} '
'{job_name}'.format(
env_vars=tensorflow.GetEnvironmentVars(vm),
local_parameter_device=benchmark_spec.local_parameter_device,
batch_size=batch_size,
model=model,
data=('--data_dir={}'.format(benchmark_spec.data_dir) if
benchmark_spec.data_dir else ''),
data_name=benchmark_spec.data_name,
variable_update=benchmark_spec.variable_update,
distortions=benchmark_spec.distortions,
device=benchmark_spec.device,
data_format=benchmark_spec.data_format,
forward_only=benchmark_spec.forward_only,
use_fp16=(benchmark_spec.precision == FP16),
num_gpus='--num_gpus={}'.format(num_gpus) if num_gpus else '',
job_name='--job_name={0} {1}'.format(job_name, args) if args else ''))
return cmd
def _RunModelOnVm(vm, model, batch_size, benchmark_spec, args='', job_name=''):
"""Runs a TensorFlow benchmark on a single VM.
Args:
vm: VM to run on
model: string, the name of model to run
batch_size: int, training batch size
benchmark_spec: BenchmarkSpec object
args: string, distributed arguments
job_name: string, distributed job name
Returns:
a Sample containing the TensorFlow throughput or the process
identification number from TensorFlow parameter server.
"""
tf_cnn_benchmark_cmd = _GetTfCnnBenchmarkCommand(
vm, model, batch_size, benchmark_spec, args, job_name)
benchmark_spec.tf_cnn_benchmark_cmd = tf_cnn_benchmark_cmd
tf_cnn_benchmark_dir = 'benchmarks/scripts/tf_cnn_benchmarks'
run_command = 'cd {path} ; {cmd}'.format(path=tf_cnn_benchmark_dir,
cmd=tf_cnn_benchmark_cmd)
output, _ = vm.RobustRemoteCommand(run_command)
if job_name == 'ps':
return _ExtractTfParameterServerPid(output)
else:
return _MakeSamplesFromOutput(benchmark_spec, output, model, batch_size)
def _RunOnVm(vm, benchmark_spec):
"""Runs a TensorFlow benchmark on a single VM.
Args:
vm: VM to run on
benchmark_spec: benchmark_spec object
Returns:
A list of samples containing the TensorFlow throughput from different models
"""
samples = []
if FLAGS.tf_benchmark_args:
return [_RunModelOnVm(vm, None, None, benchmark_spec)]
gpu_type = getattr(benchmark_spec, 'gpu_type', None)
for model in FLAGS.tf_models:
for batch_size in _GetBatchSizes(model, gpu_type):
samples.append(_RunModelOnVm(vm, model, batch_size, benchmark_spec))
return samples
def _GetHostsArgs(hosts):
return ','.join('{ip}:{port}'.format(ip=vm.internal_ip,
port=FLAGS.tf_distributed_port)
for vm in hosts)
def _RunDistributedTf(benchmark_spec):
"""Run distributed TensorFlow for each model specified.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
A list of sample.Sample objects.
"""
ps_hosts = benchmark_spec.vm_groups['parameter_server_hosts']
worker_hosts = benchmark_spec.vm_groups['worker_hosts']
dist_args = '--ps_hosts={ps_args} --worker_hosts={worker_args}'.format(
ps_args=_GetHostsArgs(ps_hosts), worker_args=_GetHostsArgs(worker_hosts))
flattened_results = []
vm_pid = collections.namedtuple('vm_pid', 'vm pid')
gpu_type = getattr(benchmark_spec, 'gpu_type', None)
for model in FLAGS.tf_models:
for batch_size in _GetBatchSizes(model, gpu_type):
ps_pids = []
for task_index, vm in enumerate(ps_hosts):
dist_ps_args = ('{args} --task_index={index} &\n'
'echo {pid} $!').format(args=dist_args,
index=task_index,
pid=PID_PREFIX)
pid = _RunModelOnVm(vm, model, batch_size, benchmark_spec, dist_ps_args,
'ps')
ps_pids.append(vm_pid(vm=vm, pid=pid))
args = []
for task_index, vm in enumerate(worker_hosts):
dist_worker_args = ('{args} --job_name=worker '
'--task_index={index}').format(args=dist_args,
index=task_index)
args.append(((vm, model, batch_size, benchmark_spec, dist_worker_args,
'worker'), {}))
result = background_tasks.RunThreaded(_RunModelOnVm, args)
for ps_pid in ps_pids:
ps_pid.vm.RemoteCommand('kill -9 %s' % ps_pid.pid)
flattened_results.extend(vm_result for vm_result in result)
return flattened_results
def _RunTf(benchmark_spec):
"""Run TensorFlow for each model specified.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
A list of sample.Sample objects.
"""
vms = benchmark_spec.vms
args = [((vm, benchmark_spec), {}) for vm in vms]
run_results = background_tasks.RunThreaded(_RunOnVm, args)
# Add vm index to results metadata
for idx, vm_result in enumerate(run_results):
for result_sample in vm_result:
result_sample.metadata['vm_index'] = idx
# Flatten the list
return [samples for vm_results in run_results for samples in vm_results]
def Run(benchmark_spec):
"""Run TensorFlow on the cluster for each model specified.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
A list of sample.Sample objects.
"""
_UpdateBenchmarkSpecWithFlags(benchmark_spec)
if benchmark_spec.distributed:
return _RunDistributedTf(benchmark_spec)
else:
return _RunTf(benchmark_spec)
def Cleanup(unused_benchmark_spec):
"""Cleanup TensorFlow on the cluster."""
pass
|
58de7c6e040abeededadd28c0013b49c2195eb1a
|
6ed034d0a5e239d7b0c528b287451409ffb4a494
|
/mmpose/datasets/datasets/mesh/mesh_h36m_dataset.py
|
9ac9ead1f5c1c1de40604c6830f6b0c762ad70eb
|
[
"Apache-2.0"
] |
permissive
|
ViTAE-Transformer/ViTPose
|
8f9462bd5bc2fb3e66de31ca1d03e5a9135cb2bf
|
d5216452796c90c6bc29f5c5ec0bdba94366768a
|
refs/heads/main
| 2023-05-23T16:32:22.359076
| 2023-03-01T06:42:22
| 2023-03-01T06:42:22
| 485,999,907
| 869
| 132
|
Apache-2.0
| 2023-03-01T06:42:24
| 2022-04-27T01:09:19
|
Python
|
UTF-8
|
Python
| false
| false
| 3,823
|
py
|
mesh_h36m_dataset.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import os
from collections import OrderedDict
import json_tricks as json
import numpy as np
from mmpose.core.evaluation import keypoint_mpjpe
from mmpose.datasets.builder import DATASETS
from .mesh_base_dataset import MeshBaseDataset
@DATASETS.register_module()
class MeshH36MDataset(MeshBaseDataset):
"""Human3.6M Dataset for 3D human mesh estimation. It inherits all function
from MeshBaseDataset and has its own evaluate function.
The dataset loads raw features and apply specified transforms
to return a dict containing the image tensors and other information.
Args:
ann_file (str): Path to the annotation file.
img_prefix (str): Path to a directory where images are held.
Default: None.
data_cfg (dict): config
pipeline (list[dict | callable]): A sequence of data transforms.
test_mode (bool): Store True when building test or
validation dataset. Default: False.
"""
def evaluate(self, outputs, res_folder, metric='joint_error', logger=None):
"""Evaluate 3D keypoint results."""
metrics = metric if isinstance(metric, list) else [metric]
allowed_metrics = ['joint_error']
for metric in metrics:
if metric not in allowed_metrics:
raise KeyError(f'metric {metric} is not supported')
res_file = os.path.join(res_folder, 'result_keypoints.json')
kpts = []
for out in outputs:
for (keypoints, image_path) in zip(out['keypoints_3d'],
out['image_path']):
kpts.append({
'keypoints': keypoints.tolist(),
'image': image_path,
})
self._write_keypoint_results(kpts, res_file)
info_str = self._report_metric(res_file)
name_value = OrderedDict(info_str)
return name_value
@staticmethod
def _write_keypoint_results(keypoints, res_file):
"""Write results into a json file."""
with open(res_file, 'w') as f:
json.dump(keypoints, f, sort_keys=True, indent=4)
def _report_metric(self, res_file):
"""Keypoint evaluation.
Report mean per joint position error (MPJPE) and mean per joint
position error after rigid alignment (MPJPE-PA)
"""
with open(res_file, 'r') as fin:
preds = json.load(fin)
assert len(preds) == len(self.db)
pred_joints_3d = [pred['keypoints'] for pred in preds]
gt_joints_3d = [item['joints_3d'] for item in self.db]
gt_joints_visible = [item['joints_3d_visible'] for item in self.db]
pred_joints_3d = np.array(pred_joints_3d)
gt_joints_3d = np.array(gt_joints_3d)
gt_joints_visible = np.array(gt_joints_visible)
# we only evaluate on 14 lsp joints
joint_mapper = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 18]
pred_joints_3d = pred_joints_3d[:, joint_mapper, :]
pred_pelvis = (pred_joints_3d[:, 2] + pred_joints_3d[:, 3]) / 2
pred_joints_3d = pred_joints_3d - pred_pelvis[:, None, :]
gt_joints_3d = gt_joints_3d[:, joint_mapper, :]
gt_pelvis = (gt_joints_3d[:, 2] + gt_joints_3d[:, 3]) / 2
gt_joints_3d = gt_joints_3d - gt_pelvis[:, None, :]
gt_joints_visible = gt_joints_visible[:, joint_mapper, 0] > 0
mpjpe = keypoint_mpjpe(pred_joints_3d, gt_joints_3d, gt_joints_visible)
mpjpe_pa = keypoint_mpjpe(
pred_joints_3d,
gt_joints_3d,
gt_joints_visible,
alignment='procrustes')
info_str = []
info_str.append(('MPJPE', mpjpe * 1000))
info_str.append(('MPJPE-PA', mpjpe_pa * 1000))
return info_str
|
dc8dcc117094fb48dc9cf5f4ba7b44fb1c0d019c
|
c2d48caa5db7e746a38beca625406fcf47379d3c
|
/src/olympia/amo/management/commands/get_changed_files.py
|
a84d4b530fad56ee0b264cc058e1644e65aa0997
|
[] |
permissive
|
mozilla/addons-server
|
1f6269ec0a4aa5a0142a5f81978ef674daf213a7
|
e0f043bca8a64478e2ba62f877c9dc28620be22f
|
refs/heads/master
| 2023-09-01T09:34:41.867534
| 2023-09-01T07:21:22
| 2023-09-01T07:21:22
| 16,416,867
| 920
| 590
|
BSD-3-Clause
| 2023-09-14T16:15:01
| 2014-01-31T18:44:15
|
Python
|
UTF-8
|
Python
| false
| false
| 3,542
|
py
|
get_changed_files.py
|
import os
from datetime import datetime, timedelta
from os import scandir
from django.conf import settings
from django.core.management.base import BaseCommand
from olympia.addons.models import Addon, Preview
from olympia.amo.utils import id_to_path
from olympia.blocklist.utils import datetime_to_ts
from olympia.files.models import File
from olympia.git.utils import AddonGitRepository
from olympia.hero.models import PrimaryHeroImage
from olympia.users.models import UserProfile
from olympia.versions.models import Version, VersionPreview
def collect_user_pics(since):
qs = UserProfile.objects.filter(modified__gt=since).only('id', 'username')
return [user.picture_dir for user in qs.iterator()]
def collect_files(since):
path = settings.ADDONS_PATH
id_iter = (
File.objects.filter(modified__gt=since)
.values_list('version__addon_id', flat=True)
.iterator()
)
return list({os.path.join(path, id_to_path(id_, breadth=2)) for id_ in id_iter})
def collect_sources(since):
path = os.path.join(settings.MEDIA_ROOT, 'version_source')
id_iter = Version.unfiltered.filter(modified__gt=since).values_list('id', flat=True)
return [os.path.join(path, id_to_path(id_, breadth=1)) for id_ in id_iter]
def _get_previews(since, PreviewModel):
out = set()
qs = PreviewModel.objects.filter(created__gt=since).only('id', 'sizes')
for preview in qs.iterator():
out = out | {
os.path.dirname(preview.thumbnail_path),
os.path.dirname(preview.image_path),
os.path.dirname(preview.original_path),
}
return list(out)
def collect_addon_previews(since):
return _get_previews(since, Preview)
def collect_theme_previews(since):
return _get_previews(since, VersionPreview)
def collect_addon_icons(since):
qs = Addon.unfiltered.filter(modified__gt=since).only('id')
return list({addon.get_icon_dir() for addon in qs.iterator()})
def collect_editoral(since):
return (
[os.path.join(settings.MEDIA_ROOT, 'hero-featured-image')]
if PrimaryHeroImage.objects.filter(modified__gt=since).exists()
else []
)
def collect_git(since):
qs_iter = (
File.objects.filter(modified__gt=since)
.values_list('version__addon_id', flat=True)
.iterator()
)
return list(
{AddonGitRepository(addon_id).git_repository_path for addon_id in qs_iter}
)
def collect_blocklist(since):
path = settings.MLBF_STORAGE_PATH
since_ts = datetime_to_ts(since)
return [
file_.path
for file_ in scandir(path)
if file_.is_dir() and file_.name.isdigit() and int(file_.name) >= since_ts
]
class Command(BaseCommand):
help = (
'Get folders containing files that have changed on the filesystem in the past '
'X seconds'
)
def add_arguments(self, parser):
parser.add_argument('since', type=int)
def get_collectors(self):
return [
collect_user_pics,
collect_files,
collect_sources,
collect_addon_previews,
collect_theme_previews,
collect_addon_icons,
collect_editoral,
collect_git,
collect_blocklist,
]
def handle(self, *args, **options):
since = datetime.now() - timedelta(seconds=options['since'])
for func in self.get_collectors():
items = func(since)
[self.stdout.write(os.path.normpath(item)) for item in items]
|
a2a785a76b2c7f033e1c9ea74c776ab680eb995f
|
4658aa41017b2e6da830f1e879774e4a7296c314
|
/holoviews/core/data/cudf.py
|
4a608a4fd6af778ddfd71207a37e97425083da4f
|
[
"BSD-3-Clause"
] |
permissive
|
holoviz/holoviews
|
3f133e572933c94cedad7bae6fb6d071152842fc
|
e3dee5443dad84b507734c0a3d2bba8ec44f5653
|
refs/heads/main
| 2023-09-03T05:08:42.682432
| 2023-08-28T20:40:36
| 2023-08-28T20:40:36
| 19,542,768
| 1,223
| 223
|
BSD-3-Clause
| 2023-09-14T18:15:53
| 2014-05-07T16:59:22
|
Python
|
UTF-8
|
Python
| false
| false
| 12,589
|
py
|
cudf.py
|
import sys
import warnings
from itertools import product
import numpy as np
import pandas as pd
from .. import util
from ..dimension import dimension_name
from ..element import Element
from ..ndmapping import NdMapping, item_check, sorted_context
from .interface import DataError, Interface
from .pandas import PandasInterface
from .util import finite_range
class cuDFInterface(PandasInterface):
"""
The cuDFInterface allows a Dataset objects to wrap a cuDF
DataFrame object. Using cuDF allows working with columnar
data on a GPU. Most operations leave the data in GPU memory,
however to plot the data it has to be loaded into memory.
The cuDFInterface covers almost the complete API exposed
by the PandasInterface with two notable exceptions:
1) Aggregation and groupby do not have a consistent sort order
(see https://github.com/rapidsai/cudf/issues/4237)
3) Not all functions can be easily applied to a cuDF so
some functions applied with aggregate and reduce will not work.
"""
datatype = 'cuDF'
types = ()
@classmethod
def loaded(cls):
return 'cudf' in sys.modules
@classmethod
def applies(cls, obj):
if not cls.loaded():
return False
import cudf
return isinstance(obj, (cudf.DataFrame, cudf.Series))
@classmethod
def init(cls, eltype, data, kdims, vdims):
import cudf
element_params = eltype.param.objects()
kdim_param = element_params['kdims']
vdim_param = element_params['vdims']
if isinstance(data, (cudf.Series, pd.Series)):
data = data.to_frame()
if not isinstance(data, cudf.DataFrame):
data, _, _ = PandasInterface.init(eltype, data, kdims, vdims)
data = cudf.from_pandas(data)
columns = list(data.columns)
ncols = len(columns)
index_names = [data.index.name]
if index_names == [None]:
index_names = ['index']
if eltype._auto_indexable_1d and ncols == 1 and kdims is None:
kdims = list(index_names)
if isinstance(kdim_param.bounds[1], int):
ndim = min([kdim_param.bounds[1], len(kdim_param.default)])
else:
ndim = None
nvdim = vdim_param.bounds[1] if isinstance(vdim_param.bounds[1], int) else None
if kdims and vdims is None:
vdims = [c for c in columns if c not in kdims]
elif vdims and kdims is None:
kdims = [c for c in columns if c not in vdims][:ndim]
elif kdims is None:
kdims = list(columns[:ndim])
if vdims is None:
vdims = [d for d in columns[ndim:((ndim+nvdim) if nvdim else None)]
if d not in kdims]
elif kdims == [] and vdims is None:
vdims = list(columns[:nvdim if nvdim else None])
# Handle reset of index if kdims reference index by name
for kd in kdims:
kd = dimension_name(kd)
if kd in columns:
continue
if any(kd == ('index' if name is None else name)
for name in index_names):
data = data.reset_index()
break
if any(isinstance(d, (np.int64, int)) for d in kdims+vdims):
raise DataError("cudf DataFrame column names used as dimensions "
"must be strings not integers.", cls)
if kdims:
kdim = dimension_name(kdims[0])
if eltype._auto_indexable_1d and ncols == 1 and kdim not in columns:
data = data.copy()
data.insert(0, kdim, np.arange(len(data)))
for d in kdims+vdims:
d = dimension_name(d)
if len([c for c in columns if c == d]) > 1:
raise DataError('Dimensions may not reference duplicated DataFrame '
'columns (found duplicate %r columns). If you want to plot '
'a column against itself simply declare two dimensions '
'with the same name. '% d, cls)
return data, {'kdims':kdims, 'vdims':vdims}, {}
@classmethod
def range(cls, dataset, dimension):
dimension = dataset.get_dimension(dimension, strict=True)
column = dataset.data[dimension.name]
if dimension.nodata is not None:
column = cls.replace_value(column, dimension.nodata)
if column.dtype.kind == 'O':
return np.NaN, np.NaN
else:
return finite_range(column, column.min(), column.max())
@classmethod
def values(cls, dataset, dim, expanded=True, flat=True, compute=True,
keep_index=False):
dim = dataset.get_dimension(dim, strict=True)
data = dataset.data[dim.name]
if not expanded:
data = data.unique()
return data.values_host if compute else data.values
elif keep_index:
return data
elif compute:
return data.values_host
try:
return data.values
except Exception:
return data.values_host
@classmethod
def groupby(cls, dataset, dimensions, container_type, group_type, **kwargs):
# Get dimensions information
dimensions = [dataset.get_dimension(d).name for d in dimensions]
kdims = [kdim for kdim in dataset.kdims if kdim not in dimensions]
# Update the kwargs appropriately for Element group types
group_kwargs = {}
group_type = dict if group_type == 'raw' else group_type
if issubclass(group_type, Element):
group_kwargs.update(util.get_param_values(dataset))
group_kwargs['kdims'] = kdims
group_kwargs.update(kwargs)
# Propagate dataset
group_kwargs['dataset'] = dataset.dataset
# Find all the keys along supplied dimensions
keys = product(*(dataset.data[dimensions[0]].unique().values_host for d in dimensions))
# Iterate over the unique entries applying selection masks
grouped_data = []
for unique_key in util.unique_iterator(keys):
group_data = dataset.select(**dict(zip(dimensions, unique_key)))
if not len(group_data):
continue
group_data = group_type(group_data, **group_kwargs)
grouped_data.append((unique_key, group_data))
if issubclass(container_type, NdMapping):
with item_check(False), sorted_context(False):
kdims = [dataset.get_dimension(d) for d in dimensions]
return container_type(grouped_data, kdims=kdims)
else:
return container_type(grouped_data)
@classmethod
def select_mask(cls, dataset, selection):
"""
Given a Dataset object and a dictionary with dimension keys and
selection keys (i.e. tuple ranges, slices, sets, lists, or literals)
return a boolean mask over the rows in the Dataset object that
have been selected.
"""
mask = None
for dim, sel in selection.items():
if isinstance(sel, tuple):
sel = slice(*sel)
arr = cls.values(dataset, dim, keep_index=True)
if util.isdatetime(arr):
try:
sel = util.parse_datetime_selection(sel)
except Exception:
pass
new_masks = []
if isinstance(sel, slice):
with warnings.catch_warnings():
warnings.filterwarnings('ignore', r'invalid value encountered')
if sel.start is not None:
new_masks.append(sel.start <= arr)
if sel.stop is not None:
new_masks.append(arr < sel.stop)
if not new_masks:
continue
new_mask = new_masks[0]
for imask in new_masks[1:]:
new_mask &= imask
elif isinstance(sel, (set, list)):
for v in sel:
new_masks.append(arr==v)
if not new_masks:
continue
new_mask = new_masks[0]
for imask in new_masks[1:]:
new_mask |= imask
elif callable(sel):
new_mask = sel(arr)
else:
new_mask = arr == sel
if mask is None:
mask = new_mask
else:
mask &= new_mask
return mask
@classmethod
def select(cls, dataset, selection_mask=None, **selection):
df = dataset.data
if selection_mask is None:
selection_mask = cls.select_mask(dataset, selection)
indexed = cls.indexed(dataset, selection)
if selection_mask is not None:
df = df.iloc[selection_mask]
if indexed and len(df) == 1 and len(dataset.vdims) == 1:
return df[dataset.vdims[0].name].iloc[0]
return df
@classmethod
def concat_fn(cls, dataframes, **kwargs):
import cudf
return cudf.concat(dataframes, **kwargs)
@classmethod
def add_dimension(cls, dataset, dimension, dim_pos, values, vdim):
data = dataset.data.copy()
if dimension.name not in data:
data[dimension.name] = values
return data
@classmethod
def aggregate(cls, dataset, dimensions, function, **kwargs):
data = dataset.data
cols = [d.name for d in dataset.kdims if d in dimensions]
vdims = dataset.dimensions('value', label='name')
reindexed = data[cols+vdims]
agg = function.__name__
if len(dimensions):
agg_map = {'amin': 'min', 'amax': 'max'}
agg = agg_map.get(agg, agg)
grouped = reindexed.groupby(cols, sort=False)
if not hasattr(grouped, agg):
raise ValueError(f'{agg} aggregation is not supported on cudf DataFrame.')
df = getattr(grouped, agg)().reset_index()
else:
agg_map = {'amin': 'min', 'amax': 'max', 'size': 'count'}
agg = agg_map.get(agg, agg)
if not hasattr(reindexed, agg):
raise ValueError(f'{agg} aggregation is not supported on cudf DataFrame.')
agg = getattr(reindexed, agg)()
try:
data = {col: [v] for col, v in zip(agg.index.values_host, agg.to_numpy())}
except Exception:
# Give FutureWarning: 'The to_array method will be removed in a future cuDF release.
# Consider using `to_numpy` instead.'
# Seen in cudf=21.12.01
data = {col: [v] for col, v in zip(agg.index.values_host, agg.to_array())}
df = pd.DataFrame(data, columns=list(agg.index.values_host))
dropped = []
for vd in vdims:
if vd not in df.columns:
dropped.append(vd)
return df, dropped
@classmethod
def iloc(cls, dataset, index):
import cudf
rows, cols = index
scalar = False
columns = list(dataset.data.columns)
if isinstance(cols, slice):
cols = [d.name for d in dataset.dimensions()][cols]
elif np.isscalar(cols):
scalar = np.isscalar(rows)
cols = [dataset.get_dimension(cols).name]
else:
cols = [dataset.get_dimension(d).name for d in index[1]]
col_index = [columns.index(c) for c in cols]
if np.isscalar(rows):
rows = [rows]
if scalar:
return dataset.data[cols[0]].iloc[rows[0]]
result = dataset.data.iloc[rows, col_index]
# cuDF does not handle single rows and cols indexing correctly
# as of cudf=0.10.0 so we have to convert Series back to DataFrame
if isinstance(result, cudf.Series):
if len(cols) == 1:
result = result.to_frame(cols[0])
else:
result = result.to_frame().T
return result
@classmethod
def sort(cls, dataset, by=[], reverse=False):
cols = [dataset.get_dimension(d, strict=True).name for d in by]
return dataset.data.sort_values(by=cols, ascending=not reverse)
@classmethod
def dframe(cls, dataset, dimensions):
if dimensions:
return dataset.data[dimensions].to_pandas()
else:
return dataset.data.to_pandas()
Interface.register(cuDFInterface)
|
568b1fe51241f4f32195f307a478d8c325d70f62
|
3ef70fe63acaa665e2b163f30f1abd0a592231c1
|
/stackoverflow/venv/lib/python3.6/site-packages/pip-19.0.3-py3.6.egg/pip/_internal/cli/main_parser.py
|
b17c7492803db60d9f5eb404aa24138bae462836
|
[
"MIT"
] |
permissive
|
wistbean/learn_python3_spider
|
14914b63691ac032955ba1adc29ad64976d80e15
|
40861791ec4ed3bbd14b07875af25cc740f76920
|
refs/heads/master
| 2023-08-16T05:42:27.208302
| 2023-03-30T17:03:58
| 2023-03-30T17:03:58
| 179,152,420
| 14,403
| 3,556
|
MIT
| 2022-05-20T14:08:34
| 2019-04-02T20:19:54
|
Python
|
UTF-8
|
Python
| false
| false
| 3,016
|
py
|
main_parser.py
|
"""A single place for constructing and exposing the main parser
"""
import os
import sys
from pip import __version__
from pip._internal.cli import cmdoptions
from pip._internal.cli.parser import (
ConfigOptionParser, UpdatingDefaultsHelpFormatter,
)
from pip._internal.commands import (
commands_dict, get_similar_commands, get_summaries,
)
from pip._internal.exceptions import CommandError
from pip._internal.utils.misc import get_prog
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import Tuple, List # noqa: F401
__all__ = ["create_main_parser", "parse_command"]
def create_main_parser():
# type: () -> ConfigOptionParser
"""Creates and returns the main parser for pip's CLI
"""
parser_kw = {
'usage': '\n%prog <command> [options]',
'add_help_option': False,
'formatter': UpdatingDefaultsHelpFormatter(),
'name': 'global',
'prog': get_prog(),
}
parser = ConfigOptionParser(**parser_kw)
parser.disable_interspersed_args()
pip_pkg_dir = os.path.abspath(os.path.join(
os.path.dirname(__file__), "..", "..",
))
parser.version = 'pip %s from %s (python %s)' % (
__version__, pip_pkg_dir, sys.version[:3],
)
# add the general options
gen_opts = cmdoptions.make_option_group(cmdoptions.general_group, parser)
parser.add_option_group(gen_opts)
# so the help formatter knows
parser.main = True # type: ignore
# create command listing for description
command_summaries = get_summaries()
description = [''] + ['%-27s %s' % (i, j) for i, j in command_summaries]
parser.description = '\n'.join(description)
return parser
def parse_command(args):
# type: (List[str]) -> Tuple[str, List[str]]
parser = create_main_parser()
# Note: parser calls disable_interspersed_args(), so the result of this
# call is to split the initial args into the general options before the
# subcommand and everything else.
# For example:
# args: ['--timeout=5', 'install', '--user', 'INITools']
# general_options: ['--timeout==5']
# args_else: ['install', '--user', 'INITools']
general_options, args_else = parser.parse_args(args)
# --version
if general_options.version:
sys.stdout.write(parser.version) # type: ignore
sys.stdout.write(os.linesep)
sys.exit()
# pip || pip help -> print_help()
if not args_else or (args_else[0] == 'help' and len(args_else) == 1):
parser.print_help()
sys.exit()
# the subcommand name
cmd_name = args_else[0]
if cmd_name not in commands_dict:
guess = get_similar_commands(cmd_name)
msg = ['unknown command "%s"' % cmd_name]
if guess:
msg.append('maybe you meant "%s"' % guess)
raise CommandError(' - '.join(msg))
# all the args without the subcommand
cmd_args = args[:]
cmd_args.remove(cmd_name)
return cmd_name, cmd_args
|
94d297ca4eb8cd991e71b3fee4e8dfa44a2d57aa
|
ccb50906170929899c1eb0b115ee2e4e25380019
|
/tests/conftest.py
|
5f6c0fa5b21660c1f0c24369f3636a41d545d50c
|
[
"MIT"
] |
permissive
|
click-contrib/sphinx-click
|
1ba02a04998ba9f84f494dffc53c819d03380a6a
|
620ce83b652f6a2af671fa14584ff5efcae6cda0
|
refs/heads/master
| 2023-08-21T16:30:10.475787
| 2023-08-17T16:39:56
| 2023-08-17T16:43:37
| 87,362,999
| 199
| 65
|
MIT
| 2023-08-09T09:45:02
| 2017-04-05T22:41:38
|
Python
|
UTF-8
|
Python
| false
| false
| 855
|
py
|
conftest.py
|
import pathlib
import shutil
import sphinx
import pytest
# this is necessary because Sphinx isn't exposing its fixtures
# https://docs.pytest.org/en/7.1.x/how-to/writing_plugins.html#requiring-loading-plugins-in-a-test-module-or-conftest-file
pytest_plugins = ['sphinx.testing.fixtures']
# TODO: Remove when we no longer care about Sphinx < 7.2
@pytest.fixture
def rootdir(tmpdir):
if sphinx.version_info >= (7, 2, 0):
src = pathlib.Path(__file__).parent.absolute().joinpath('roots')
dst = tmpdir.join('roots')
shutil.copytree(src, dst)
roots = pathlib.Path(dst)
else:
from sphinx.testing import path
src = path.path(__file__).parent.abspath() / 'roots'
dst = tmpdir.join('roots')
shutil.copytree(src, dst)
roots = path.path(dst)
yield roots
shutil.rmtree(dst)
|
ae93172d7fb0c416215cc2829803b5c304607470
|
15f0514701a78e12750f68ba09d68095172493ee
|
/Python3/799.py
|
592e5e37d53d50f50370a9544e6af6344660f22e
|
[
"MIT"
] |
permissive
|
strengthen/LeetCode
|
5e38c8c9d3e8f27109b9124ae17ef8a4139a1518
|
3ffa6dcbeb787a6128641402081a4ff70093bb61
|
refs/heads/master
| 2022-12-04T21:35:17.872212
| 2022-11-30T06:23:24
| 2022-11-30T06:23:24
| 155,958,163
| 936
| 365
|
MIT
| 2021-11-15T04:02:45
| 2018-11-03T06:47:38
| null |
UTF-8
|
Python
| false
| false
| 1,469
|
py
|
799.py
|
__________________________________________________________________________________________________
sample 56 ms submission
class Solution:
def champagneTower(self, poured: int, query_row: int, query_glass: int) -> float:
d = query_row - query_glass
q = collections.deque([(0, 0, poured)])
while q:
r, c, w = q.popleft()
if r == query_row and c == query_glass:
return min(1.0, w)
if w > 1.0 and c >= (r-d) and c <= query_glass:
w = (w-1.0)/2.0
if q and q[-1][1] == c:
q[-1][2] += w
else:
q.append([r+1, c, w])
q.append([r+1, c+1, w])
return 0.0
__________________________________________________________________________________________________
sample 13092 kb submission
class Solution:
def champagneTower(self, poured: int, query_row: int, query_glass: int) -> float:
cur = [poured]
for i in range(query_row):
curLen = len(cur)
next = [0] * (curLen + 1)
for j in range(len(cur)):
if cur[j] <= 1:
continue
halfFall = (cur[j] - 1) / 2
next[j] += halfFall
next[j+1] += halfFall
cur = next
return min(cur[query_glass], 1)
__________________________________________________________________________________________________
|
d344c9eefd418478922306cffe6800b023645944
|
0ba2e5061577f6286ff9265ef1df9aca96769445
|
/cryptography/operations/ExtendedGreatestCommonDivisor.py
|
d597961f7af9224604bee9f9da47baa059e12ba3
|
[
"CC0-1.0"
] |
permissive
|
ZoranPandovski/al-go-rithms
|
68d5d02f80a61de9baf8e50a81a52e7d0b3983a0
|
4ae6ba54e90af14af236e03e435eb0402dcac787
|
refs/heads/master
| 2023-09-04T16:04:04.321676
| 2023-06-06T15:22:16
| 2023-06-06T15:22:16
| 93,438,176
| 1,421
| 2,445
|
CC0-1.0
| 2023-06-15T14:24:28
| 2017-06-05T19:20:20
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 469
|
py
|
ExtendedGreatestCommonDivisor.py
|
# extended gcd and inverse mod
from math import gcd
def extgcd(a, b):
if (abs(a) >= abs(b)):
if a % b == 0:
v1 = [b, 0, 1]
return v1
else:
q = a // b
r = a % b
D, s, t = extgcd(b, r)
v2 = [D, t, s - t*q]
return v2
else:
return extgcd(abs(b), abs(a))
def modinv(a, n):
if (gcd(a, n) == 1):
_, _, m = extgcd(a, n)
return m % n
|
a7cca741c8730b192bc630a9f00cf03b623cb0ce
|
b8bbdfc593b6d816e67a344f720f90ec05236778
|
/dev/breeze/src/airflow_breeze/utils/shared_options.py
|
6e9b1e5b54633a335523739a598c210bf07f4e87
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
apache/airflow
|
ed78db0a8bab7e096990e143926e52f518e288ab
|
1b122c15030e99cef9d4ff26d3781a7a9d6949bc
|
refs/heads/main
| 2023-09-01T08:37:34.556097
| 2023-09-01T06:49:05
| 2023-09-01T06:49:05
| 33,884,891
| 22,756
| 11,558
|
Apache-2.0
| 2023-09-14T20:12:36
| 2015-04-13T18:04:58
|
Python
|
UTF-8
|
Python
| false
| false
| 1,957
|
py
|
shared_options.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import os
from airflow_breeze.utils.coertions import coerce_bool_value
def __get_default_bool_value(env_var: str) -> bool:
string_val = os.environ.get(env_var, "")
return coerce_bool_value(string_val)
__verbose_value: bool = __get_default_bool_value("VERBOSE")
def set_verbose(verbose: bool):
global __verbose_value
__verbose_value = verbose
def get_verbose(verbose_override: bool | None = None) -> bool:
if verbose_override is None:
return __verbose_value
return verbose_override
__dry_run_value: bool = __get_default_bool_value("DRY_RUN")
def set_dry_run(dry_run: bool):
global __dry_run_value
__dry_run_value = dry_run
def get_dry_run(dry_run_override: bool | None = None) -> bool:
if dry_run_override is None:
return __dry_run_value
return dry_run_override
__forced_answer: str | None = None
def set_forced_answer(answer: str | None):
global __forced_answer
__forced_answer = answer
def get_forced_answer(answer_override: str | None = None) -> str | None:
if answer_override is None:
return __forced_answer
return answer_override
|
a8515a976877ab4e1084fb6f9f519e803f00502a
|
70238f403826253b36323e0c4700795788c61187
|
/thirdparty/iPlug2/Scripts/select_host.py
|
bd6b9439b1b2ae35fa0a7f2dc9e593b3a295a320
|
[
"MIT",
"LicenseRef-scancode-other-permissive",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
tommitytom/RetroPlug
|
d4d1c9d671cec7373bf3e27030bc0d29b46191be
|
40c6f01352d50cb31b1d4f31b7cc17fd2cf47ce8
|
refs/heads/main
| 2023-06-30T00:28:58.282288
| 2022-11-01T02:03:57
| 2022-11-01T02:03:57
| 185,368,230
| 262
| 11
|
MIT
| 2021-08-15T04:15:59
| 2019-05-07T09:23:48
|
C++
|
UTF-8
|
Python
| false
| false
| 4,256
|
py
|
select_host.py
|
#!/usr/bin/python
#python shell script to modify common-win.props to select a host for debugging VST plug-ins
SAVIHOST_PATH = "$(ProgramFiles)\\vsthost\savihost.exe"
LIVE_PATH = "$(ProgramData)\Ableton\Live 10 Suite\Program\Ableton Live 10 Suite.exe"
FL_PATH = "$(ProgramFiles)\Image-Line\FL Studio 20\FL.exe"
CUBASE_PATH = "$(ProgramFiles)\Steinberg\Cubase 10.5\Cubase10.5.exe"
S1_PATH = "$(ProgramFiles)\PreSonus\Studio One 4\Studio One.exe"
REAPER_PATH = "$(ProgramFiles)\REAPER\\reaper.exe"
SONAR_PATH = "$(ProgramFiles)\Cakewalk\SONAR X3 Producer\SONARPDR.exe"
VST3TESTHOST_PATH = "$(ProgramFiles)\Steinberg\VST3PluginTestHost\VST3PluginTestHost.exe"
SAVIHOST_X64_PATH = "$(ProgramW6432)\\vsthost\savihost.exe"
LIVE_X64_PATH = "$(ProgramData)\Ableton\Live 10 Suite\Program\Ableton Live 10 Suite.exe"
FL_X64_PATH = "$(ProgramFiles)\Image-Line\FL Studio 20\FL64.exe"
CUBASE_X64_PATH = "$(ProgramW6432)\Steinberg\Cubase 10.5\Cubase10.5.exe"
S1_X64_PATH = "$(ProgramW6432)\PreSonus\Studio One 4\Studio One.exe"
REAPER_X64_PATH = "$(ProgramW6432)\REAPER (x64)\\reaper.exe"
SONAR_X64_PATH = "$(ProgramW6432)\Cakewalk\SONAR X3 Producer\SONARPDR.exe"
VST3TESTHOST_X64_PATH = "$(ProgramW6432)\Steinberg\VST3PluginTestHost\VST3PluginTestHost.exe"
SAVIHOST_ARGS = "$(TargetPath) /noload /nosave /noexc /noft"
REAPER_ARGS = "$(SolutionDir)$(SolutionName).RPP"
PATHS = [SAVIHOST_PATH, LIVE_PATH, FL_PATH, CUBASE_PATH, S1_PATH, REAPER_PATH, SONAR_PATH, VST3TESTHOST_PATH]
PATHS_X64 = [SAVIHOST_X64_PATH, LIVE_X64_PATH, FL_X64_PATH, CUBASE_X64_PATH, S1_X64_PATH, REAPER_X64_PATH, SONAR_X64_PATH, VST3TESTHOST_X64_PATH]
ARGS = [SAVIHOST_ARGS, "", "", "", "", REAPER_ARGS, "", ""]
from xml.dom import minidom as md
doc = md.parse('..\common-win.props')
print("HOST options:")
print(" 1 - Savihost")
print(" 2 - Live")
print(" 3 - FLStudio")
print(" 4 - Cubase")
print(" 5 - StudioOne")
print(" 6 - Reaper")
print(" 7 - Sonar")
print(" 8 - VST3 Test Host")
# vst2/32bit
print("choose a host to use for 32bit VST2 debugging...")
choice = int(raw_input("Choice>>"))
elem = doc.getElementsByTagName('VST2_32_HOST_PATH')[0]
for child in elem.childNodes:
elem.removeChild(child)
if PATHS[choice-1]:
text = doc.createTextNode(PATHS[choice-1])
elem.appendChild(text)
elem = doc.getElementsByTagName('VST2_32_COMMAND_ARGS')[0]
for child in elem.childNodes:
elem.removeChild(child)
if ARGS[choice-1]:
text = doc.createTextNode(ARGS[choice-1])
elem.appendChild(text)
# vst2/64bit
print("choose a host to use for 64bit VST2 debugging...")
choice = int(raw_input("Choice>>"))
elem = doc.getElementsByTagName('VST2_64_HOST_PATH')[0]
for child in elem.childNodes:
elem.removeChild(child)
if PATHS_X64[choice-1]:
text = doc.createTextNode(PATHS_X64[choice-1])
elem.appendChild(text)
elem = doc.getElementsByTagName('VST2_64_COMMAND_ARGS')[0]
for child in elem.childNodes:
elem.removeChild(child)
if ARGS[choice-1]:
text = doc.createTextNode(ARGS[choice-1])
elem.appendChild(text)
# vst3/32bit
print("choose a host to use for 32bit VST3 debugging...")
choice = int(raw_input("Choice>>"))
elem = doc.getElementsByTagName('VST3_32_HOST_PATH')[0]
for child in elem.childNodes:
elem.removeChild(child)
if PATHS[choice-1]:
text = doc.createTextNode(PATHS[choice-1])
elem.appendChild(text)
elem = doc.getElementsByTagName('VST3_32_COMMAND_ARGS')[0]
for child in elem.childNodes:
elem.removeChild(child)
if ARGS[choice-1]:
text = doc.createTextNode(ARGS[choice-1])
elem.appendChild(text)
# vst3/64bit
print("choose a host to use for 64bit VST3 debugging...")
choice = int(raw_input("Choice>>"))
elem = doc.getElementsByTagName('VST3_64_HOST_PATH')[0]
for child in elem.childNodes:
elem.removeChild(child)
if PATHS_X64[choice-1]:
text = doc.createTextNode(PATHS_X64[choice-1])
elem.appendChild(text)
elem = doc.getElementsByTagName('VST3_64_COMMAND_ARGS')[0]
for child in elem.childNodes:
elem.removeChild(child)
if ARGS[choice-1]:
text = doc.createTextNode(ARGS[choice-1])
elem.appendChild(text)
#elem = doc.getElementsByTagName('COPY_VST2')
#elem[0].firstChild.nodeValue = (choice != 1)
xml_file = open('../common-win.props', "w")
doc.writexml(xml_file, encoding="utf-8")
xml_file.close()
print("now restart visual studio...");
|
59de5ec87ad56d78ff8a8c5995e9851ab42eec31
|
d795a741bf0fadaa36d48cc758d9b750953539e4
|
/railroad.py
|
33fa5770a86ead46147971877da57a9d235b90b3
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
tabatkins/railroad-diagrams
|
9e3f99981025bd1c7c718a221c7669a8aab796e2
|
c3a16b9dcb06f5d0ae2260f8414136917871d4c5
|
refs/heads/gh-pages
| 2023-02-22T02:17:40.209289
| 2023-02-14T21:23:30
| 2023-02-14T21:23:30
| 5,742,095
| 1,464
| 156
|
MIT
| 2022-06-24T23:56:13
| 2012-09-09T22:03:24
|
Python
|
UTF-8
|
Python
| false
| false
| 49,093
|
py
|
railroad.py
|
# -*- coding: utf-8 -*-
from __future__ import annotations
import math as Math
import sys
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from typing import (
Any,
Callable,
Dict,
Generator,
List,
Optional as Opt,
Sequence as Seq,
Tuple,
Type,
TypeVar,
Union,
)
T = TypeVar("T")
Node = Union[str, DiagramItem] # pylint: disable=used-before-assignment
WriterF = Callable[[str], Any]
WalkerF = Callable[[DiagramItem], Any] # pylint: disable=used-before-assignment
AttrsT = Dict[str, Any]
# Display constants
DEBUG = False # if true, writes some debug information into attributes
VS = 8 # minimum vertical separation between things. For a 3px stroke, must be at least 4
AR = 10 # radius of arcs
DIAGRAM_CLASS = "railroad-diagram" # class to put on the root <svg>
STROKE_ODD_PIXEL_LENGTH = (
True # is the stroke width an odd (1px, 3px, etc) pixel length?
)
INTERNAL_ALIGNMENT = (
"center" # how to align items when they have extra space. left/right/center
)
CHAR_WIDTH = 8.5 # width of each monospace character. play until you find the right value for your font
COMMENT_CHAR_WIDTH = 7 # comments are in smaller text by default
def escapeAttr(val: Union[str, float]) -> str:
if isinstance(val, str):
return val.replace("&", "&").replace("'", "'").replace('"', """)
return f"{val:g}"
def escapeHtml(val: str) -> str:
return escapeAttr(val).replace("<", "<")
def determineGaps(outer: float, inner: float) -> Tuple[float, float]:
diff = outer - inner
if INTERNAL_ALIGNMENT == "left":
return 0, diff
elif INTERNAL_ALIGNMENT == "right":
return diff, 0
else:
return diff / 2, diff / 2
def doubleenumerate(seq: Seq[T]) -> Generator[Tuple[int, int, T], None, None]:
length = len(list(seq))
for i, item in enumerate(seq):
yield i, i - length, item
def addDebug(el: DiagramItem) -> None:
if not DEBUG:
return
el.attrs["data-x"] = "{0} w:{1} h:{2}/{3}/{4}".format(
type(el).__name__, el.width, el.up, el.height, el.down
)
class DiagramItem:
def __init__(self, name: str, attrs: Opt[AttrsT] = None, text: Opt[Node] = None):
self.name = name
# up = distance it projects above the entry line
self.up: float = 0
# height = distance between the entry/exit lines
self.height: float = 0
# down = distance it projects below the exit line
self.down: float = 0
# width = distance between the entry/exit lines horizontally
self.width: float = 0
# Whether the item is okay with being snug against another item or not
self.needsSpace = False
# DiagramItems pull double duty as SVG elements.
self.attrs: AttrsT = attrs or {}
# Subclasses store their meaningful children as .item or .items;
# .children instead stores their formatted SVG nodes.
self.children: List[Union[Node, Path, Style]] = [text] if text else []
def format(self, x: float, y: float, width: float) -> DiagramItem:
raise NotImplementedError # Virtual
def addTo(self, parent: DiagramItem) -> DiagramItem:
parent.children.append(self)
return self
def writeSvg(self, write: WriterF) -> None:
write("<{0}".format(self.name))
for name, value in sorted(self.attrs.items()):
write(' {0}="{1}"'.format(name, escapeAttr(value)))
write(">")
if self.name in ["g", "svg"]:
write("\n")
for child in self.children:
if isinstance(child, (DiagramItem, Path, Style)):
child.writeSvg(write)
else:
write(escapeHtml(child))
write("</{0}>".format(self.name))
def walk(self, cb: WalkerF) -> None:
cb(self)
class DiagramMultiContainer(DiagramItem):
def __init__(
self,
name: str,
items: Seq[Node],
attrs: Opt[Dict[str, str]] = None,
text: Opt[str] = None,
):
DiagramItem.__init__(self, name, attrs, text)
self.items: List[DiagramItem] = [wrapString(item) for item in items]
def format(self, x: float, y: float, width: float) -> DiagramItem:
raise NotImplementedError # Virtual
def walk(self, cb: WalkerF) -> None:
cb(self)
for item in self.items:
item.walk(cb)
class Path:
def __init__(self, x: float, y: float):
self.x = x
self.y = y
self.attrs = {"d": f"M{x} {y}"}
def m(self, x: float, y: float) -> Path:
self.attrs["d"] += f"m{x} {y}"
return self
def l(self, x: float, y: float) -> Path:
self.attrs["d"] += f"l{x} {y}"
return self
def h(self, val: float) -> Path:
self.attrs["d"] += f"h{val}"
return self
def right(self, val: float) -> Path:
return self.h(max(0, val))
def left(self, val: float) -> Path:
return self.h(-max(0, val))
def v(self, val: float) -> Path:
self.attrs["d"] += f"v{val}"
return self
def down(self, val: float) -> Path:
return self.v(max(0, val))
def up(self, val: float) -> Path:
return self.v(-max(0, val))
def arc_8(self, start: str, dir: str) -> Path:
# 1/8 of a circle
arc = AR
s2 = 1 / Math.sqrt(2) * arc
s2inv = arc - s2
sweep = "1" if dir == "cw" else "0"
path = f"a {arc} {arc} 0 0 {sweep} "
sd = start + dir
offset: List[float]
if sd == "ncw":
offset = [s2, s2inv]
elif sd == "necw":
offset = [s2inv, s2]
elif sd == "ecw":
offset = [-s2inv, s2]
elif sd == "secw":
offset = [-s2, s2inv]
elif sd == "scw":
offset = [-s2, -s2inv]
elif sd == "swcw":
offset = [-s2inv, -s2]
elif sd == "wcw":
offset = [s2inv, -s2]
elif sd == "nwcw":
offset = [s2, -s2inv]
elif sd == "nccw":
offset = [-s2, s2inv]
elif sd == "nwccw":
offset = [-s2inv, s2]
elif sd == "wccw":
offset = [s2inv, s2]
elif sd == "swccw":
offset = [s2, s2inv]
elif sd == "sccw":
offset = [s2, -s2inv]
elif sd == "seccw":
offset = [s2inv, -s2]
elif sd == "eccw":
offset = [-s2inv, -s2]
elif sd == "neccw":
offset = [-s2, -s2inv]
path += " ".join(str(x) for x in offset)
self.attrs["d"] += path
return self
def arc(self, sweep: str) -> Path:
x = AR
y = AR
if sweep[0] == "e" or sweep[1] == "w":
x *= -1
if sweep[0] == "s" or sweep[1] == "n":
y *= -1
cw = 1 if sweep in ("ne", "es", "sw", "wn") else 0
self.attrs["d"] += f"a{AR} {AR} 0 0 {cw} {x} {y}"
return self
def addTo(self, parent: DiagramItem) -> Path:
parent.children.append(self)
return self
def writeSvg(self, write: WriterF) -> None:
write("<path")
for name, value in sorted(self.attrs.items()):
write(f' {name}="{escapeAttr(value)}"')
write(" />")
def format(self) -> Path:
self.attrs["d"] += "h.5"
return self
def __repr__(self) -> str:
return f"Path({repr(self.x)}, {repr(self.y)})"
def wrapString(value: Node) -> DiagramItem:
return value if isinstance(value, DiagramItem) else Terminal(value)
DEFAULT_STYLE = """\
svg.railroad-diagram {
background-color:hsl(30,20%,95%);
}
svg.railroad-diagram path {
stroke-width:3;
stroke:black;
fill:rgba(0,0,0,0);
}
svg.railroad-diagram text {
font:bold 14px monospace;
text-anchor:middle;
}
svg.railroad-diagram text.label{
text-anchor:start;
}
svg.railroad-diagram text.comment{
font:italic 12px monospace;
}
svg.railroad-diagram rect{
stroke-width:3;
stroke:black;
fill:hsl(120,100%,90%);
}
svg.railroad-diagram rect.group-box {
stroke: gray;
stroke-dasharray: 10 5;
fill: none;
}
"""
class Style:
def __init__(self, css: str):
self.css = css
def __repr__(self) -> str:
return f"Style({repr(self.css)})"
def addTo(self, parent: DiagramItem) -> Style:
parent.children.append(self)
return self
def format(self) -> Style:
return self
def writeSvg(self, write: WriterF) -> None:
# Write included stylesheet as CDATA. See https:#developer.mozilla.org/en-US/docs/Web/SVG/Element/style
cdata = "/* <![CDATA[ */\n{css}\n/* ]]> */\n".format(css=self.css)
write("<style>{cdata}</style>".format(cdata=cdata))
class Diagram(DiagramMultiContainer):
def __init__(self, *items: Node, **kwargs: str):
# Accepts a type=[simple|complex] kwarg
DiagramMultiContainer.__init__(
self,
"svg",
list(items),
{
"class": DIAGRAM_CLASS,
},
)
self.type = kwargs.get("type", "simple")
if items and not isinstance(items[0], Start):
self.items.insert(0, Start(self.type))
if items and not isinstance(items[-1], End):
self.items.append(End(self.type))
self.up = 0
self.down = 0
self.height = 0
self.width = 0
for item in self.items:
if isinstance(item, Style):
continue
self.width += item.width + (20 if item.needsSpace else 0)
self.up = max(self.up, item.up - self.height)
self.height += item.height
self.down = max(self.down - item.height, item.down)
if self.items[0].needsSpace:
self.width -= 10
if self.items[-1].needsSpace:
self.width -= 10
self.formatted = False
def __repr__(self) -> str:
items = ", ".join(map(repr, self.items[1:-1]))
pieces = [] if not items else [items]
if self.type != "simple":
pieces.append(f"type={repr(self.type)}")
return f'Diagram({", ".join(pieces)})'
def format(
self,
paddingTop: float = 20,
paddingRight: Opt[float] = None,
paddingBottom: Opt[float] = None,
paddingLeft: Opt[float] = None,
) -> Diagram:
if paddingRight is None:
paddingRight = paddingTop
if paddingBottom is None:
paddingBottom = paddingTop
if paddingLeft is None:
paddingLeft = paddingRight
assert paddingRight is not None
assert paddingBottom is not None
assert paddingLeft is not None
x = paddingLeft
y = paddingTop + self.up
g = DiagramItem("g")
if STROKE_ODD_PIXEL_LENGTH:
g.attrs["transform"] = "translate(.5 .5)"
for item in self.items:
if item.needsSpace:
Path(x, y).h(10).addTo(g)
x += 10
item.format(x, y, item.width).addTo(g)
x += item.width
y += item.height
if item.needsSpace:
Path(x, y).h(10).addTo(g)
x += 10
self.attrs["width"] = str(self.width + paddingLeft + paddingRight)
self.attrs["height"] = str(
self.up + self.height + self.down + paddingTop + paddingBottom
)
self.attrs["viewBox"] = f"0 0 {self.attrs['width']} {self.attrs['height']}"
g.addTo(self)
self.formatted = True
return self
def writeSvg(self, write: WriterF) -> None:
if not self.formatted:
self.format()
return DiagramItem.writeSvg(self, write)
def writeStandalone(self, write: WriterF, css: str|None = None) -> None:
if not self.formatted:
self.format()
if css is None:
css = DEFAULT_STYLE
Style(css).addTo(self)
self.attrs["xmlns"] = "http://www.w3.org/2000/svg"
self.attrs['xmlns:xlink'] = "http://www.w3.org/1999/xlink"
DiagramItem.writeSvg(self, write)
self.children.pop()
del self.attrs["xmlns"]
del self.attrs["xmlns:xlink"]
class Sequence(DiagramMultiContainer):
def __init__(self, *items: Node):
DiagramMultiContainer.__init__(self, "g", items)
self.needsSpace = True
self.up = 0
self.down = 0
self.height = 0
self.width = 0
for item in self.items:
self.width += item.width + (20 if item.needsSpace else 0)
self.up = max(self.up, item.up - self.height)
self.height += item.height
self.down = max(self.down - item.height, item.down)
if self.items[0].needsSpace:
self.width -= 10
if self.items[-1].needsSpace:
self.width -= 10
addDebug(self)
def __repr__(self) -> str:
items = ", ".join(repr(item) for item in self.items)
return f"Sequence({items})"
def format(self, x: float, y: float, width: float) -> Sequence:
leftGap, rightGap = determineGaps(width, self.width)
Path(x, y).h(leftGap).addTo(self)
Path(x + leftGap + self.width, y + self.height).h(rightGap).addTo(self)
x += leftGap
for i, item in enumerate(self.items):
if item.needsSpace and i > 0:
Path(x, y).h(10).addTo(self)
x += 10
item.format(x, y, item.width).addTo(self)
x += item.width
y += item.height
if item.needsSpace and i < len(self.items) - 1:
Path(x, y).h(10).addTo(self)
x += 10
return self
class Stack(DiagramMultiContainer):
def __init__(self, *items: Node):
DiagramMultiContainer.__init__(self, "g", items)
self.needsSpace = True
self.width = max(
item.width + (20 if item.needsSpace else 0) for item in self.items
)
# pretty sure that space calc is totes wrong
if len(self.items) > 1:
self.width += AR * 2
self.up = self.items[0].up
self.down = self.items[-1].down
self.height = 0
last = len(self.items) - 1
for i, item in enumerate(self.items):
self.height += item.height
if i > 0:
self.height += max(AR * 2, item.up + VS)
if i < last:
self.height += max(AR * 2, item.down + VS)
addDebug(self)
def __repr__(self) -> str:
items = ", ".join(repr(item) for item in self.items)
return f"Stack({items})"
def format(self, x: float, y: float, width: float) -> Stack:
leftGap, rightGap = determineGaps(width, self.width)
Path(x, y).h(leftGap).addTo(self)
x += leftGap
xInitial = x
if len(self.items) > 1:
Path(x, y).h(AR).addTo(self)
x += AR
innerWidth = self.width - AR * 2
else:
innerWidth = self.width
for i, item in enumerate(self.items):
item.format(x, y, innerWidth).addTo(self)
x += innerWidth
y += item.height
if i != len(self.items) - 1:
(
Path(x, y)
.arc("ne")
.down(max(0, item.down + VS - AR * 2))
.arc("es")
.left(innerWidth)
.arc("nw")
.down(max(0, self.items[i + 1].up + VS - AR * 2))
.arc("ws")
.addTo(self)
)
y += max(item.down + VS, AR * 2) + max(
self.items[i + 1].up + VS, AR * 2
)
x = xInitial + AR
if len(self.items) > 1:
Path(x, y).h(AR).addTo(self)
x += AR
Path(x, y).h(rightGap).addTo(self)
return self
class OptionalSequence(DiagramMultiContainer):
def __new__(cls, *items: Node) -> Any:
if len(items) <= 1:
return Sequence(*items)
else:
return super(OptionalSequence, cls).__new__(cls)
def __init__(self, *items: Node):
DiagramMultiContainer.__init__(self, "g", items)
self.needsSpace = False
self.width = 0
self.up = 0
self.height = sum(item.height for item in self.items)
self.down = self.items[0].down
heightSoFar: float = 0
for i, item in enumerate(self.items):
self.up = max(self.up, max(AR * 2, item.up + VS) - heightSoFar)
heightSoFar += item.height
if i > 0:
self.down = (
max(
self.height + self.down,
heightSoFar + max(AR * 2, item.down + VS),
)
- self.height
)
itemWidth = item.width + (10 if item.needsSpace else 0)
if i == 0:
self.width += AR + max(itemWidth, AR)
else:
self.width += AR * 2 + max(itemWidth, AR) + AR
addDebug(self)
def __repr__(self) -> str:
items = ", ".join(repr(item) for item in self.items)
return f"OptionalSequence({items})"
def format(self, x: float, y: float, width: float) -> OptionalSequence:
leftGap, rightGap = determineGaps(width, self.width)
Path(x, y).right(leftGap).addTo(self)
Path(x + leftGap + self.width, y + self.height).right(rightGap).addTo(self)
x += leftGap
upperLineY = y - self.up
last = len(self.items) - 1
for i, item in enumerate(self.items):
itemSpace = 10 if item.needsSpace else 0
itemWidth = item.width + itemSpace
if i == 0:
# Upper skip
(
Path(x, y)
.arc("se")
.up(y - upperLineY - AR * 2)
.arc("wn")
.right(itemWidth - AR)
.arc("ne")
.down(y + item.height - upperLineY - AR * 2)
.arc("ws")
.addTo(self)
)
# Straight line
(Path(x, y).right(itemSpace + AR).addTo(self))
item.format(x + itemSpace + AR, y, item.width).addTo(self)
x += itemWidth + AR
y += item.height
elif i < last:
# Upper skip
(
Path(x, upperLineY)
.right(AR * 2 + max(itemWidth, AR) + AR)
.arc("ne")
.down(y - upperLineY + item.height - AR * 2)
.arc("ws")
.addTo(self)
)
# Straight line
(Path(x, y).right(AR * 2).addTo(self))
item.format(x + AR * 2, y, item.width).addTo(self)
(
Path(x + item.width + AR * 2, y + item.height)
.right(itemSpace + AR)
.addTo(self)
)
# Lower skip
(
Path(x, y)
.arc("ne")
.down(item.height + max(item.down + VS, AR * 2) - AR * 2)
.arc("ws")
.right(itemWidth - AR)
.arc("se")
.up(item.down + VS - AR * 2)
.arc("wn")
.addTo(self)
)
x += AR * 2 + max(itemWidth, AR) + AR
y += item.height
else:
# Straight line
(Path(x, y).right(AR * 2).addTo(self))
item.format(x + AR * 2, y, item.width).addTo(self)
(
Path(x + AR * 2 + item.width, y + item.height)
.right(itemSpace + AR)
.addTo(self)
)
# Lower skip
(
Path(x, y)
.arc("ne")
.down(item.height + max(item.down + VS, AR * 2) - AR * 2)
.arc("ws")
.right(itemWidth - AR)
.arc("se")
.up(item.down + VS - AR * 2)
.arc("wn")
.addTo(self)
)
return self
class AlternatingSequence(DiagramMultiContainer):
def __new__(cls, *items: Node) -> AlternatingSequence:
if len(items) == 2:
return super(AlternatingSequence, cls).__new__(cls)
else:
raise Exception(
"AlternatingSequence takes exactly two arguments, but got {0} arguments.".format(
len(items)
)
)
def __init__(self, *items: Node):
DiagramMultiContainer.__init__(self, "g", items)
self.needsSpace = False
arc = AR
vert = VS
first = self.items[0]
second = self.items[1]
arcX = 1 / Math.sqrt(2) * arc * 2
arcY = (1 - 1 / Math.sqrt(2)) * arc * 2
crossY = max(arc, vert)
crossX = (crossY - arcY) + arcX
firstOut = max(
arc + arc, crossY / 2 + arc + arc, crossY / 2 + vert + first.down
)
self.up = firstOut + first.height + first.up
secondIn = max(arc + arc, crossY / 2 + arc + arc, crossY / 2 + vert + second.up)
self.down = secondIn + second.height + second.down
self.height = 0
firstWidth = (20 if first.needsSpace else 0) + first.width
secondWidth = (20 if second.needsSpace else 0) + second.width
self.width = 2 * arc + max(firstWidth, crossX, secondWidth) + 2 * arc
addDebug(self)
def __repr__(self) -> str:
items = ", ".join(repr(item) for item in self.items)
return f"AlternatingSequence({items})"
def format(self, x: float, y: float, width: float) -> AlternatingSequence:
arc = AR
gaps = determineGaps(width, self.width)
Path(x, y).right(gaps[0]).addTo(self)
x += gaps[0]
Path(x + self.width, y).right(gaps[1]).addTo(self)
# bounding box
# Path(x+gaps[0], y).up(self.up).right(self.width).down(self.up+self.down).left(self.width).up(self.down).addTo(self)
first = self.items[0]
second = self.items[1]
# top
firstIn = self.up - first.up
firstOut = self.up - first.up - first.height
Path(x, y).arc("se").up(firstIn - 2 * arc).arc("wn").addTo(self)
first.format(x + 2 * arc, y - firstIn, self.width - 4 * arc).addTo(self)
Path(x + self.width - 2 * arc, y - firstOut).arc("ne").down(
firstOut - 2 * arc
).arc("ws").addTo(self)
# bottom
secondIn = self.down - second.down - second.height
secondOut = self.down - second.down
Path(x, y).arc("ne").down(secondIn - 2 * arc).arc("ws").addTo(self)
second.format(x + 2 * arc, y + secondIn, self.width - 4 * arc).addTo(self)
Path(x + self.width - 2 * arc, y + secondOut).arc("se").up(
secondOut - 2 * arc
).arc("wn").addTo(self)
# crossover
arcX = 1 / Math.sqrt(2) * arc * 2
arcY = (1 - 1 / Math.sqrt(2)) * arc * 2
crossY = max(arc, VS)
crossX = (crossY - arcY) + arcX
crossBar = (self.width - 4 * arc - crossX) / 2
(
Path(x + arc, y - crossY / 2 - arc)
.arc("ws")
.right(crossBar)
.arc_8("n", "cw")
.l(crossX - arcX, crossY - arcY)
.arc_8("sw", "ccw")
.right(crossBar)
.arc("ne")
.addTo(self)
)
(
Path(x + arc, y + crossY / 2 + arc)
.arc("wn")
.right(crossBar)
.arc_8("s", "ccw")
.l(crossX - arcX, -(crossY - arcY))
.arc_8("nw", "cw")
.right(crossBar)
.arc("se")
.addTo(self)
)
return self
class Choice(DiagramMultiContainer):
def __init__(self, default: int, *items: Node):
DiagramMultiContainer.__init__(self, "g", items)
assert default < len(items)
self.default = default
self.width = AR * 4 + max(item.width for item in self.items)
self.up = self.items[0].up
self.down = self.items[-1].down
self.height = self.items[default].height
for i, item in enumerate(self.items):
if i in [default - 1, default + 1]:
arcs = AR * 2
else:
arcs = AR
if i < default:
self.up += max(
arcs, item.height + item.down + VS + self.items[i + 1].up
)
elif i == default:
continue
else:
self.down += max(
arcs,
item.up + VS + self.items[i - 1].down + self.items[i - 1].height,
)
self.down -= self.items[default].height # already counted in self.height
addDebug(self)
def __repr__(self) -> str:
items = ", ".join(repr(item) for item in self.items)
return "Choice(%r, %s)" % (self.default, items)
def format(self, x: float, y: float, width: float) -> Choice:
leftGap, rightGap = determineGaps(width, self.width)
# Hook up the two sides if self is narrower than its stated width.
Path(x, y).h(leftGap).addTo(self)
Path(x + leftGap + self.width, y + self.height).h(rightGap).addTo(self)
x += leftGap
innerWidth = self.width - AR * 4
default = self.items[self.default]
# Do the elements that curve above
above = self.items[: self.default][::-1]
if above:
distanceFromY = max(
AR * 2, default.up + VS + above[0].down + above[0].height
)
for i, ni, item in doubleenumerate(above):
Path(x, y).arc("se").up(distanceFromY - AR * 2).arc("wn").addTo(self)
item.format(x + AR * 2, y - distanceFromY, innerWidth).addTo(self)
Path(x + AR * 2 + innerWidth, y - distanceFromY + item.height).arc(
"ne"
).down(distanceFromY - item.height + default.height - AR * 2).arc(
"ws"
).addTo(
self
)
if ni < -1:
distanceFromY += max(
AR, item.up + VS + above[i + 1].down + above[i + 1].height
)
# Do the straight-line path.
Path(x, y).right(AR * 2).addTo(self)
self.items[self.default].format(x + AR * 2, y, innerWidth).addTo(self)
Path(x + AR * 2 + innerWidth, y + self.height).right(AR * 2).addTo(self)
# Do the elements that curve below
below = self.items[self.default + 1 :]
if below:
distanceFromY = max(
AR * 2, default.height + default.down + VS + below[0].up
)
for i, item in enumerate(below):
Path(x, y).arc("ne").down(distanceFromY - AR * 2).arc("ws").addTo(self)
item.format(x + AR * 2, y + distanceFromY, innerWidth).addTo(self)
Path(x + AR * 2 + innerWidth, y + distanceFromY + item.height).arc("se").up(
distanceFromY - AR * 2 + item.height - default.height
).arc("wn").addTo(self)
distanceFromY += max(
AR,
item.height
+ item.down
+ VS
+ (below[i + 1].up if i + 1 < len(below) else 0),
)
return self
class MultipleChoice(DiagramMultiContainer):
def __init__(self, default: int, type: str, *items: Node):
DiagramMultiContainer.__init__(self, "g", items)
assert 0 <= default < len(items)
assert type in ["any", "all"]
self.default = default
self.type = type
self.needsSpace = True
self.innerWidth = max(item.width for item in self.items)
self.width = 30 + AR + self.innerWidth + AR + 20
self.up = self.items[0].up
self.down = self.items[-1].down
self.height = self.items[default].height
for i, item in enumerate(self.items):
if i in [default - 1, default + 1]:
minimum = 10 + AR
else:
minimum = AR
if i < default:
self.up += max(
minimum, item.height + item.down + VS + self.items[i + 1].up
)
elif i == default:
continue
else:
self.down += max(
minimum,
item.up + VS + self.items[i - 1].down + self.items[i - 1].height,
)
self.down -= self.items[default].height # already counted in self.height
addDebug(self)
def __repr__(self) -> str:
items = ", ".join(repr(item) for item in self.items)
return f"MultipleChoice({repr(self.default)}, {repr(self.type)}, {items})"
def format(self, x: float, y: float, width: float) -> MultipleChoice:
leftGap, rightGap = determineGaps(width, self.width)
# Hook up the two sides if self is narrower than its stated width.
Path(x, y).h(leftGap).addTo(self)
Path(x + leftGap + self.width, y + self.height).h(rightGap).addTo(self)
x += leftGap
default = self.items[self.default]
# Do the elements that curve above
above = self.items[: self.default][::-1]
if above:
distanceFromY = max(
10 + AR, default.up + VS + above[0].down + above[0].height
)
for i, ni, item in doubleenumerate(above):
(Path(x + 30, y).up(distanceFromY - AR).arc("wn").addTo(self))
item.format(x + 30 + AR, y - distanceFromY, self.innerWidth).addTo(self)
(
Path(x + 30 + AR + self.innerWidth, y - distanceFromY + item.height)
.arc("ne")
.down(distanceFromY - item.height + default.height - AR - 10)
.addTo(self)
)
if ni < -1:
distanceFromY += max(
AR, item.up + VS + above[i + 1].down + above[i + 1].height
)
# Do the straight-line path.
Path(x + 30, y).right(AR).addTo(self)
self.items[self.default].format(x + 30 + AR, y, self.innerWidth).addTo(self)
Path(x + 30 + AR + self.innerWidth, y + self.height).right(AR).addTo(self)
# Do the elements that curve below
below = self.items[self.default + 1 :]
if below:
distanceFromY = max(
10 + AR, default.height + default.down + VS + below[0].up
)
for i, item in enumerate(below):
(Path(x + 30, y).down(distanceFromY - AR).arc("ws").addTo(self))
item.format(x + 30 + AR, y + distanceFromY, self.innerWidth).addTo(self)
(
Path(x + 30 + AR + self.innerWidth, y + distanceFromY + item.height)
.arc("se")
.up(distanceFromY - AR + item.height - default.height - 10)
.addTo(self)
)
distanceFromY += max(
AR,
item.height
+ item.down
+ VS
+ (below[i + 1].up if i + 1 < len(below) else 0),
)
text = DiagramItem("g", attrs={"class": "diagram-text"}).addTo(self)
DiagramItem(
"title",
text="take one or more branches, once each, in any order"
if self.type == "any"
else "take all branches, once each, in any order",
).addTo(text)
DiagramItem(
"path",
attrs={
"d": "M {x} {y} h -26 a 4 4 0 0 0 -4 4 v 12 a 4 4 0 0 0 4 4 h 26 z".format(
x=x + 30, y=y - 10
),
"class": "diagram-text",
},
).addTo(text)
DiagramItem(
"text",
text="1+" if self.type == "any" else "all",
attrs={"x": x + 15, "y": y + 4, "class": "diagram-text"},
).addTo(text)
DiagramItem(
"path",
attrs={
"d": "M {x} {y} h 16 a 4 4 0 0 1 4 4 v 12 a 4 4 0 0 1 -4 4 h -16 z".format(
x=x + self.width - 20, y=y - 10
),
"class": "diagram-text",
},
).addTo(text)
DiagramItem(
"text",
text="↺",
attrs={"x": x + self.width - 10, "y": y + 4, "class": "diagram-arrow"},
).addTo(text)
return self
class HorizontalChoice(DiagramMultiContainer):
def __new__(cls, *items: Node) -> Any:
if len(items) <= 1:
return Sequence(*items)
else:
return super(HorizontalChoice, cls).__new__(cls)
def __init__(self, *items: Node):
DiagramMultiContainer.__init__(self, "g", items)
allButLast = self.items[:-1]
middles = self.items[1:-1]
first = self.items[0]
last = self.items[-1]
self.needsSpace = False
self.width = (
AR # starting track
+ AR * 2 * (len(self.items) - 1) # inbetween tracks
+ sum(x.width + (20 if x.needsSpace else 0) for x in self.items) # items
+ (AR if last.height > 0 else 0) # needs space to curve up
+ AR
) # ending track
# Always exits at entrance height
self.height = 0
# All but the last have a track running above them
self._upperTrack = max(AR * 2, VS, max(x.up for x in allButLast) + VS)
self.up = max(self._upperTrack, last.up)
# All but the first have a track running below them
# Last either straight-lines or curves up, so has different calculation
self._lowerTrack = max(
VS,
max(x.height + max(x.down + VS, AR * 2) for x in middles) if middles else 0,
last.height + last.down + VS,
)
if first.height < self._lowerTrack:
# Make sure there's at least 2*AR room between first exit and lower track
self._lowerTrack = max(self._lowerTrack, first.height + AR * 2)
self.down = max(self._lowerTrack, first.height + first.down)
addDebug(self)
def format(self, x: float, y: float, width: float) -> HorizontalChoice:
# Hook up the two sides if self is narrower than its stated width.
leftGap, rightGap = determineGaps(width, self.width)
Path(x, y).h(leftGap).addTo(self)
Path(x + leftGap + self.width, y + self.height).h(rightGap).addTo(self)
x += leftGap
first = self.items[0]
last = self.items[-1]
# upper track
upperSpan = (
sum(x.width + (20 if x.needsSpace else 0) for x in self.items[:-1])
+ (len(self.items) - 2) * AR * 2
- AR
)
(
Path(x, y)
.arc("se")
.up(self._upperTrack - AR * 2)
.arc("wn")
.h(upperSpan)
.addTo(self)
)
# lower track
lowerSpan = (
sum(x.width + (20 if x.needsSpace else 0) for x in self.items[1:])
+ (len(self.items) - 2) * AR * 2
+ (AR if last.height > 0 else 0)
- AR
)
lowerStart = x + AR + first.width + (20 if first.needsSpace else 0) + AR * 2
(
Path(lowerStart, y + self._lowerTrack)
.h(lowerSpan)
.arc("se")
.up(self._lowerTrack - AR * 2)
.arc("wn")
.addTo(self)
)
# Items
for [i, item] in enumerate(self.items):
# input track
if i == 0:
(Path(x, y).h(AR).addTo(self))
x += AR
else:
(
Path(x, y - self._upperTrack)
.arc("ne")
.v(self._upperTrack - AR * 2)
.arc("ws")
.addTo(self)
)
x += AR * 2
# item
itemWidth = item.width + (20 if item.needsSpace else 0)
item.format(x, y, itemWidth).addTo(self)
x += itemWidth
# output track
if i == len(self.items) - 1:
if item.height == 0:
(Path(x, y).h(AR).addTo(self))
else:
(Path(x, y + item.height).arc("se").addTo(self))
elif i == 0 and item.height > self._lowerTrack:
# Needs to arc up to meet the lower track, not down.
if item.height - self._lowerTrack >= AR * 2:
(
Path(x, y + item.height)
.arc("se")
.v(self._lowerTrack - item.height + AR * 2)
.arc("wn")
.addTo(self)
)
else:
# Not enough space to fit two arcs
# so just bail and draw a straight line for now.
(
Path(x, y + item.height)
.l(AR * 2, self._lowerTrack - item.height)
.addTo(self)
)
else:
(
Path(x, y + item.height)
.arc("ne")
.v(self._lowerTrack - item.height - AR * 2)
.arc("ws")
.addTo(self)
)
return self
def Optional(item: Node, skip: bool = False) -> Choice:
return Choice(0 if skip else 1, Skip(), item)
class OneOrMore(DiagramItem):
def __init__(self, item: Node, repeat: Opt[Node] = None):
DiagramItem.__init__(self, "g")
self.item = wrapString(item)
repeat = repeat or Skip()
self.rep = wrapString(repeat)
self.width = max(self.item.width, self.rep.width) + AR * 2
self.height = self.item.height
self.up = self.item.up
self.down = max(
AR * 2, self.item.down + VS + self.rep.up + self.rep.height + self.rep.down
)
self.needsSpace = True
addDebug(self)
def format(self, x: float, y: float, width: float) -> OneOrMore:
leftGap, rightGap = determineGaps(width, self.width)
# Hook up the two sides if self is narrower than its stated width.
Path(x, y).h(leftGap).addTo(self)
Path(x + leftGap + self.width, y + self.height).h(rightGap).addTo(self)
x += leftGap
# Draw item
Path(x, y).right(AR).addTo(self)
self.item.format(x + AR, y, self.width - AR * 2).addTo(self)
Path(x + self.width - AR, y + self.height).right(AR).addTo(self)
# Draw repeat arc
distanceFromY = max(
AR * 2, self.item.height + self.item.down + VS + self.rep.up
)
Path(x + AR, y).arc("nw").down(distanceFromY - AR * 2).arc("ws").addTo(self)
self.rep.format(x + AR, y + distanceFromY, self.width - AR * 2).addTo(self)
Path(x + self.width - AR, y + distanceFromY + self.rep.height).arc("se").up(
distanceFromY - AR * 2 + self.rep.height - self.item.height
).arc("en").addTo(self)
return self
def walk(self, cb: WalkerF) -> None:
cb(self)
self.item.walk(cb)
self.rep.walk(cb)
def __repr__(self) -> str:
return f"OneOrMore({repr(self.item)}, repeat={repr(self.rep)})"
def ZeroOrMore(item: Node, repeat: Opt[Node] = None, skip: bool = False) -> Choice:
result = Optional(OneOrMore(item, repeat), skip)
return result
class Group(DiagramItem):
def __init__(self, item: Node, label: Opt[Node] = None):
DiagramItem.__init__(self, "g")
self.item = wrapString(item)
self.label: Opt[DiagramItem]
if isinstance(label, DiagramItem):
self.label = label
elif label:
self.label = Comment(label)
else:
self.label = None
self.width = max(
self.item.width + (20 if self.item.needsSpace else 0),
self.label.width if self.label else 0,
AR * 2,
)
self.height = self.item.height
self.boxUp = max(self.item.up + VS, AR)
self.up = self.boxUp
if self.label:
self.up += self.label.up + self.label.height + self.label.down
self.down = max(self.item.down + VS, AR)
self.needsSpace = True
addDebug(self)
def format(self, x: float, y: float, width: float) -> Group:
leftGap, rightGap = determineGaps(width, self.width)
Path(x, y).h(leftGap).addTo(self)
Path(x + leftGap + self.width, y + self.height).h(rightGap).addTo(self)
x += leftGap
DiagramItem(
"rect",
{
"x": x,
"y": y - self.boxUp,
"width": self.width,
"height": self.boxUp + self.height + self.down,
"rx": AR,
"ry": AR,
"class": "group-box",
},
).addTo(self)
self.item.format(x, y, self.width).addTo(self)
if self.label:
self.label.format(
x,
y - (self.boxUp + self.label.down + self.label.height),
self.label.width,
).addTo(self)
return self
def walk(self, cb: WalkerF) -> None:
cb(self)
self.item.walk(cb)
if self.label:
self.label.walk(cb)
class Start(DiagramItem):
def __init__(self, type: str = "simple", label: Opt[str] = None):
DiagramItem.__init__(self, "g")
if label:
self.width = max(20, len(label) * CHAR_WIDTH + 10)
else:
self.width = 20
self.up = 10
self.down = 10
self.type = type
self.label = label
addDebug(self)
def format(self, x: float, y: float, width: float) -> Start:
path = Path(x, y - 10)
if self.type == "complex":
path.down(20).m(0, -10).right(self.width).addTo(self)
else:
path.down(20).m(10, -20).down(20).m(-10, -10).right(self.width).addTo(self)
if self.label:
DiagramItem(
"text",
attrs={"x": x, "y": y - 15, "style": "text-anchor:start"},
text=self.label,
).addTo(self)
return self
def __repr__(self) -> str:
return f"Start(type={repr(self.type)}, label={repr(self.label)})"
class End(DiagramItem):
def __init__(self, type: str = "simple"):
DiagramItem.__init__(self, "path")
self.width = 20
self.up = 10
self.down = 10
self.type = type
addDebug(self)
def format(self, x: float, y: float, width: float) -> End:
if self.type == "simple":
self.attrs["d"] = "M {0} {1} h 20 m -10 -10 v 20 m 10 -20 v 20".format(x, y)
elif self.type == "complex":
self.attrs["d"] = "M {0} {1} h 20 m 0 -10 v 20".format(x, y)
return self
def __repr__(self) -> str:
return f"End(type={repr(self.type)})"
class Terminal(DiagramItem):
def __init__(
self, text: str, href: Opt[str] = None, title: Opt[str] = None, cls: str = ""
):
DiagramItem.__init__(self, "g", {"class": " ".join(["terminal", cls])})
self.text = text
self.href = href
self.title = title
self.cls = cls
self.width = len(text) * CHAR_WIDTH + 20
self.up = 11
self.down = 11
self.needsSpace = True
addDebug(self)
def __repr__(self) -> str:
return f"Terminal({repr(self.text)}, href={repr(self.href)}, title={repr(self.title)}, cls={repr(self.cls)})"
def format(self, x: float, y: float, width: float) -> Terminal:
leftGap, rightGap = determineGaps(width, self.width)
# Hook up the two sides if self is narrower than its stated width.
Path(x, y).h(leftGap).addTo(self)
Path(x + leftGap + self.width, y).h(rightGap).addTo(self)
DiagramItem(
"rect",
{
"x": x + leftGap,
"y": y - 11,
"width": self.width,
"height": self.up + self.down,
"rx": 10,
"ry": 10,
},
).addTo(self)
text = DiagramItem(
"text", {"x": x + leftGap + self.width / 2, "y": y + 4}, self.text
)
if self.href is not None:
a = DiagramItem("a", {"xlink:href": self.href}, text).addTo(self)
text.addTo(a)
else:
text.addTo(self)
if self.title is not None:
DiagramItem("title", {}, self.title).addTo(self)
return self
class NonTerminal(DiagramItem):
def __init__(
self, text: str, href: Opt[str] = None, title: Opt[str] = None, cls: str = ""
):
DiagramItem.__init__(self, "g", {"class": " ".join(["non-terminal", cls])})
self.text = text
self.href = href
self.title = title
self.cls = cls
self.width = len(text) * CHAR_WIDTH + 20
self.up = 11
self.down = 11
self.needsSpace = True
addDebug(self)
def __repr__(self) -> str:
return f"NonTerminal({repr(self.text)}, href={repr(self.href)}, title={repr(self.title)}, cls={repr(self.cls)})"
def format(self, x: float, y: float, width: float) -> NonTerminal:
leftGap, rightGap = determineGaps(width, self.width)
# Hook up the two sides if self is narrower than its stated width.
Path(x, y).h(leftGap).addTo(self)
Path(x + leftGap + self.width, y).h(rightGap).addTo(self)
DiagramItem(
"rect",
{
"x": x + leftGap,
"y": y - 11,
"width": self.width,
"height": self.up + self.down,
},
).addTo(self)
text = DiagramItem(
"text", {"x": x + leftGap + self.width / 2, "y": y + 4}, self.text
)
if self.href is not None:
a = DiagramItem("a", {"xlink:href": self.href}, text).addTo(self)
text.addTo(a)
else:
text.addTo(self)
if self.title is not None:
DiagramItem("title", {}, self.title).addTo(self)
return self
class Comment(DiagramItem):
def __init__(
self, text: str, href: Opt[str] = None, title: Opt[str] = None, cls: str = ""
):
DiagramItem.__init__(self, "g", {"class": " ".join(["non-terminal", cls])})
self.text = text
self.href = href
self.title = title
self.cls = cls
self.width = len(text) * COMMENT_CHAR_WIDTH + 10
self.up = 8
self.down = 8
self.needsSpace = True
addDebug(self)
def __repr__(self) -> str:
return f"Comment({repr(self.text)}, href={repr(self.href)}, title={repr(self.title)}, cls={repr(self.cls)})"
def format(self, x: float, y: float, width: float) -> Comment:
leftGap, rightGap = determineGaps(width, self.width)
# Hook up the two sides if self is narrower than its stated width.
Path(x, y).h(leftGap).addTo(self)
Path(x + leftGap + self.width, y).h(rightGap).addTo(self)
text = DiagramItem(
"text",
{"x": x + leftGap + self.width / 2, "y": y + 5, "class": "comment"},
self.text,
)
if self.href is not None:
a = DiagramItem("a", {"xlink:href": self.href}, text).addTo(self)
text.addTo(a)
else:
text.addTo(self)
if self.title is not None:
DiagramItem("title", {}, self.title).addTo(self)
return self
class Skip(DiagramItem):
def __init__(self) -> None:
DiagramItem.__init__(self, "g")
self.width = 0
self.up = 0
self.down = 0
addDebug(self)
def format(self, x: float, y: float, width: float) -> Skip:
Path(x, y).right(width).addTo(self)
return self
def __repr__(self) -> str:
return "Skip()"
if __name__ == "__main__":
def add(name: str, diagram: DiagramItem) -> None:
sys.stdout.write(f"<h1>{escapeHtml(name)}</h1>\n")
diagram.writeSvg(sys.stdout.write)
sys.stdout.write("\n")
sys.stdout.write("<!doctype html><title>Test</title><body>")
with open("test.py", "r", encoding="utf-8") as fh:
exec(fh.read()) # pylint: disable=exec-used
sys.stdout.write(
"""
<style>
.blue text { fill: blue; }
</style>
"""
)
sys.stdout.write("</body></html>")
|
234c1dea838108f50869ddc9510fc891a92aeccc
|
eea5b171d492bd7f3f8caa800dce34cfa034bc8f
|
/src/pydoc_markdown/util/misc_test.py
|
40a05dd5ebec4fcfbc69cd780825d8bcec0d5df5
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
NiklasRosenstein/pydoc-markdown
|
356538ca2964ea7f324e5b71a23c15199f2bdb6b
|
e2e1acde6434779239878fcb60077a688289b97e
|
refs/heads/develop
| 2023-09-03T07:30:25.142166
| 2023-07-31T03:42:04
| 2023-07-31T03:42:04
| 40,528,750
| 421
| 142
|
NOASSERTION
| 2023-07-31T03:42:06
| 2015-08-11T07:47:39
|
Python
|
UTF-8
|
Python
| false
| false
| 378
|
py
|
misc_test.py
|
from pydoc_markdown.util.misc import escape_except_blockquotes
def test__escape_except_blockquotes() -> None:
assert (
escape_except_blockquotes(
"""
1 < 2?
```
Yes, 1 < 2.
```
"""
)
== (
"""
1 < 2?
```
Yes, 1 < 2.
```
"""
)
)
|
9ea29ddcf6541d34052b77f52e9bf01ed2a4c9c1
|
61096fcd8c78814a17933adb3d35299d93945ba0
|
/src/09-built-on-asyncio/the_trio/prod_trio.py
|
90723ff8fc2070cc65d6901dc179ba114c9ad514
|
[
"MIT"
] |
permissive
|
talkpython/async-techniques-python-course
|
88261a7fa4deb95d262f475662ab41e0e836f3d7
|
3780f9ea2ebe89b2f4e01191927594c8d1c33fb0
|
refs/heads/master
| 2023-08-06T06:57:10.804176
| 2022-10-21T14:55:16
| 2022-10-21T14:55:16
| 145,053,438
| 441
| 247
|
MIT
| 2022-01-24T18:48:55
| 2018-08-17T00:42:04
|
Python
|
UTF-8
|
Python
| false
| false
| 1,876
|
py
|
prod_trio.py
|
import datetime
import colorama
import random
import trio
async def main():
t0 = datetime.datetime.now()
print(colorama.Fore.WHITE + "App started.", flush=True)
"""
trio.Queue was removed in v0.11.0:
- Replacing the call to trio.Queue() by trio.open_memory_channel()
- Using a MemorySendChannel object in generate_data function
- Using a MemoryReceiveChannel object in process_data function
- Updating requirements.txt with trio v0.16.0 and trio_asyncio v0.11.0
"""
send_channel, receive_channel = trio.open_memory_channel(max_buffer_size=10)
with trio.move_on_after(5):
async with trio.open_nursery() as nursery:
nursery.start_soon(generate_data, 20, send_channel, name='Prod 1')
nursery.start_soon(generate_data, 20, send_channel, name='Prod 2')
nursery.start_soon(process_data, 40, receive_channel, name='Consumer')
dt = datetime.datetime.now() - t0
print(colorama.Fore.WHITE + "App exiting, total time: {:,.2f} sec.".format(
dt.total_seconds()), flush=True)
async def generate_data(num: int, data: trio.MemorySendChannel):
for idx in range(1, num + 1):
item = idx*idx
await data.send((item, datetime.datetime.now()))
print(colorama.Fore.YELLOW + f" -- generated item {idx}", flush=True)
await trio.sleep(random.random() + .5)
async def process_data(num: int, data: trio.MemoryReceiveChannel):
processed = 0
while processed < num:
item = await data.receive()
processed += 1
value = item[0]
t = item[1]
dt = datetime.datetime.now() - t
print(colorama.Fore.CYAN +
" +++ Processed value {} after {:,.2f} sec.".format(
value, dt.total_seconds()), flush=True)
await trio.sleep(.5)
if __name__ == '__main__':
trio.run(main)
|
260b4ae5b99dcb726a35531e4618a6e5ec4a4a57
|
df1654f9c70184c04206616b446ce3cde9e737a8
|
/src/scripts/dev_tools/gen_mp_monty.py
|
44ce2d1fb606ef05fc6f9fd8869d9dbf1ae5d1b3
|
[
"BSD-2-Clause"
] |
permissive
|
randombit/botan
|
cade596b58b74c12fb70a58baf1a40bf2f5b1b85
|
560aec3a8bfa2456cc309bac478aca9ae53f0fff
|
refs/heads/master
| 2023-09-01T07:01:06.013013
| 2023-09-01T03:07:14
| 2023-09-01T03:07:14
| 8,608,254
| 2,362
| 686
|
BSD-2-Clause
| 2023-09-14T19:18:13
| 2013-03-06T16:51:39
|
C++
|
UTF-8
|
Python
| false
| false
| 2,534
|
py
|
gen_mp_monty.py
|
#!/usr/bin/env python3
import sys
import datetime
# (C) 2018,2023 Jack Lloyd
# Botan is released under the Simplified BSD License (see license.txt)
# Used to generate src/lib/math/mp/mp_monty_n.cpp
def monty_redc_code(n):
lines = []
lines.append("word w2 = 0, w1 = 0, w0 = 0;")
lines.append("w0 = z[0];")
lines.append("ws[0] = w0 * p_dash;")
lines.append("word3_muladd(&w2, &w1, &w0, ws[0], p[0]);")
lines.append("w0 = w1;")
lines.append("w1 = w2;")
lines.append("w2 = 0;")
for i in range(1, n):
for j in range(0, i):
lines.append("word3_muladd(&w2, &w1, &w0, ws[%d], p[%d]);" % (j, i-j))
lines.append("word3_add(&w2, &w1, &w0, z[%d]);" % (i))
lines.append("ws[%d] = w0 * p_dash;" % (i))
lines.append("word3_muladd(&w2, &w1, &w0, ws[%d], p[0]);" % (i))
lines.append("w0 = w1;")
lines.append("w1 = w2;")
lines.append("w2 = 0;")
for i in range(0, n - 1):
for j in range(i + 1, n):
lines.append("word3_muladd(&w2, &w1, &w0, ws[%d], p[%d]);" % (j, n + i-j))
lines.append("word3_add(&w2, &w1, &w0, z[%d]);" % (n+i))
lines.append("ws[%d] = w0;" % (i))
lines.append("w0 = w1;")
lines.append("w1 = w2;")
lines.append("w2 = 0;")
lines.append("word3_add(&w2, &w1, &w0, z[%d]);" % (2*n-1));
lines.append("ws[%d] = w0;" % (n - 1))
lines.append("ws[%d] = w1;" % (n))
lines.append("word borrow = bigint_sub3(z, ws, %d + 1, p, %d);" % (n, n))
lines.append("CT::conditional_assign_mem(borrow, z, ws, %d);" % (n))
lines.append("clear_mem(z + %d, %d);" % (n, n))
for line in lines:
print(" %s" % (line))
def main(args = None):
if args is None:
args = sys.argv
if len(args) <= 1:
sizes = [4, 6, 8, 16, 24, 32]
else:
sizes = map(int, args[1:])
print("""/*
* This file was automatically generated by %s on %s
* All manual changes will be lost. Edit the script instead.
*
* Botan is released under the Simplified BSD License (see license.txt)
*/
#include <botan/internal/mp_core.h>
#include <botan/internal/ct_utils.h>
namespace Botan {
""" % (sys.argv[0], datetime.date.today().strftime("%Y-%m-%d")))
for n in sizes:
print("void bigint_monty_redc_%d(word z[%d], const word p[%d], word p_dash, word ws[]) {" % (n, 2*n, n))
monty_redc_code(n)
print("}\n")
print("} // namespace Botan")
return 0
if __name__ == '__main__':
sys.exit(main())
|
3aadd3aaeddceb53b63ab5c661263d915cf859a9
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/DQMOffline/Trigger/python/MuonMonitor_cfi.py
|
da4f079ad4cbe40a8630b2f69cbbb848143a1895
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 2,662
|
py
|
MuonMonitor_cfi.py
|
import FWCore.ParameterSet.Config as cms
from DQMOffline.Trigger.muonMonitoring_cfi import muonMonitoring
hltMuonmonitoring = muonMonitoring.clone(
FolderName = 'HLT/Muon/TrkMu16_DoubleTrkMu6NoFiltersNoVtx/',
met = "pfMet", # pfMet
muons = "muons", # while pfIsolatedElectronsEI are reco::PFCandidate !
nmuons = 0,
histoPSet = dict(
lsPSet = dict(
nbins = 250 ,
xmin = 0.,
xmax = 2500.),
muonPSet = dict(
nbins = 500 , ### THIS SHOULD BE VARIABLE BINNING !!!!!
xmin = 0.0,
xmax = 500),
),
numGenericTriggerEventPSet = dict(
andOr = False,
#dbLabel = "ExoDQMTrigger", # it does not exist yet, we should consider the possibility of using the DB, but as it is now it will need a label per path !
andOrHlt = True,# True:=OR; False:=AND
hltInputTag = "TriggerResults::HLT" ,
hltPaths = ["HLT_TrkMu16_DoubleTrkMu6NoFiltersNoVtx_v*"], # HLT_ZeroBias_v*
#hltDBKey = "EXO_HLT_MET",
errorReplyHlt = False,
verbosityLevel = 1),
denGenericTriggerEventPSet = dict(
andOr = False,
andOrHlt = True,
hltInputTag = "TriggerResults::HLT",
hltPaths = [""], # HLT_ZeroBias_v*
errorReplyHlt = False,
dcsInputTag = "scalersRawToDigi",
dcsRecordInputTag = "onlineMetaDataDigis",
dcsPartitions = [24, 25, 26, 27, 28, 29], # 24-27: strip, 28-29: pixel, we should add all other detectors !
andOrDcs = False,
errorReplyDcs = True,
verbosityLevel = 1)
)
from Configuration.Eras.Modifier_stage2L1Trigger_cff import stage2L1Trigger
stage2L1Trigger.toModify(hltMuonmonitoring,
numGenericTriggerEventPSet = dict(stage2 = cms.bool(True),
l1tAlgBlkInputTag = cms.InputTag("gtStage2Digis"),
l1tExtBlkInputTag = cms.InputTag("gtStage2Digis"),
ReadPrescalesFromFile = cms.bool(True)),
denGenericTriggerEventPSet = dict(stage2 = cms.bool(True),
l1tAlgBlkInputTag = cms.InputTag("gtStage2Digis"),
l1tExtBlkInputTag = cms.InputTag("gtStage2Digis"),
ReadPrescalesFromFile = cms.bool(True)))
|
929159d54e03427bf1fea2711123c28bbb4cd003
|
889bc07d40091813906bf253785b08333196e6db
|
/nodebook/ipython/nodebookext.py
|
85040923b2979976246150990753011dfa5d0b67
|
[
"Apache-2.0"
] |
permissive
|
stitchfix/nodebook
|
7715f9731f9d01d7ccfd30a4bdc4d31da1b92ecb
|
fc7103451ed9cc64ff627afbde910b5ae6e771ed
|
refs/heads/master
| 2022-08-22T08:22:30.083249
| 2022-07-28T17:20:46
| 2022-07-28T17:20:46
| 98,476,264
| 271
| 22
|
Apache-2.0
| 2022-07-28T17:20:47
| 2017-07-27T00:20:18
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 3,360
|
py
|
nodebookext.py
|
from __future__ import absolute_import
import six.moves.cPickle as pickle
import os
import sys
import errno
from nodebook.nodebookcore import Node, Nodebook, ReferenceFinder
from nodebook.pickledict import PickleDict
NODEBOOK_STATE = {
"cache_dir": None,
"nodebook": None,
}
MODE_DISK = "disk"
MODE_MEMORY = "memory"
ALLOWED_MODES = [MODE_DISK, MODE_MEMORY]
def nodebook(line):
"""
ipython magic for initializing nodebook, expects name for nodebook database
"""
args = line.lstrip().split(' ')
try:
mode = args[0]
assert mode in ALLOWED_MODES
except (IndexError, AssertionError):
raise SyntaxError("Must specify mode as %s" % str(ALLOWED_MODES))
if mode == MODE_MEMORY:
persist = False
else:
persist = True
if persist:
NODEBOOK_STATE['cache_dir'] = 'nodebook_cache/'
try:
NODEBOOK_STATE['cache_dir'] += args[1]
except IndexError:
NODEBOOK_STATE['cache_dir'] += 'default'
try:
os.makedirs(NODEBOOK_STATE['cache_dir'])
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(NODEBOOK_STATE['cache_dir']):
pass
else:
raise
try:
with open(os.path.join(NODEBOOK_STATE['cache_dir'], 'nodebook.p'), 'rb') as f:
NODEBOOK_STATE['nodebook'] = pickle.load(f)
except IOError:
var_store = PickleDict(NODEBOOK_STATE['cache_dir'])
NODEBOOK_STATE['nodebook'] = Nodebook(var_store)
else:
var_store = PickleDict()
NODEBOOK_STATE['nodebook'] = Nodebook(var_store)
if len(NODEBOOK_STATE['nodebook'].nodes) > 0:
NODEBOOK_STATE['nodebook'].update_all_prompts(get_ipython().payload_manager)
def execute_cell(line, cell):
"""
ipython magic for executing nodebook cell, expects cell id and parent id inline, followed by code
"""
assert NODEBOOK_STATE['nodebook'] is not None, "Nodebook not initialized, please use %nodebook {nodebook_name}"
try:
cell_id, parent_id = line.lstrip().split(' ')
except ValueError:
cell_id = line.lstrip()
parent_id = None
# make sure cell exists and is in the right position
NODEBOOK_STATE['nodebook'].insert_node_after(cell_id, parent_id)
# update code and run
NODEBOOK_STATE['nodebook'].update_code(cell_id, cell)
res, objs = NODEBOOK_STATE['nodebook'].run_node(cell_id)
# update prompts
NODEBOOK_STATE['nodebook'].update_all_prompts(get_ipython().payload_manager)
# update cache if needed
if NODEBOOK_STATE['cache_dir'] is not None:
with open(os.path.join(NODEBOOK_STATE['cache_dir'], 'nodebook.p'), 'wb') as f:
pickle.dump(NODEBOOK_STATE['nodebook'], f, protocol=2)
# UGLY HACK - inject outputs into global environment for autocomplete support
# TODO: find a better way to handle autocomplete
sys._getframe(2).f_globals.update(objs)
return res
def load_ipython_extension(ipython):
ipython.register_magic_function(nodebook, magic_kind='line')
ipython.register_magic_function(execute_cell, magic_kind='cell')
ipython.run_cell_magic('javascript', '', "Jupyter.utils.load_extensions('nodebook/nodebookext')")
def unload_ipython_extension(ipython):
pass
|
f0f52bc5b22c001b87294ced924ca154c75b0a05
|
270937d32c2bc8e33622d96c045b43b8a23bc086
|
/ion/text/fonts/font_target.gypi
|
a60e2a2bf5ce66df517b31ef75f4cc312c902583
|
[
"Apache-2.0"
] |
permissive
|
google/ion
|
32491fc26a0a2a5fd602e4008296ccba5017d689
|
514ce797458d02e7cd3a1b2d0b5c7ff8ccb5f5d1
|
refs/heads/master
| 2023-09-04T18:10:40.343013
| 2022-06-09T07:56:18
| 2022-06-09T07:56:18
| 50,387,855
| 1,651
| 132
|
Apache-2.0
| 2018-05-12T02:16:47
| 2016-01-25T23:13:46
|
C++
|
UTF-8
|
Python
| false
| false
| 1,764
|
gypi
|
font_target.gypi
|
#
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generates a zipasset from a ttf file and a header file containing the
# necessary declarations for the ION_FONT macro to work.
#
# Every target in fonts.gyp should include this file.
{
'hard_dependency': '1',
'rules': [
{
'rule_name': 'generate_font_zipasset_and_header',
'extension': 'ttf',
'message': 'Generating zipasset and header for <(_target_name)',
'action': [
'<(python)', 'generate_font_zipasset_and_header.py',
# The font name, e.g. 'roboto_regular'.
'<(_target_name)',
# The full path to the ttf file, e.g.:
# '../../../third_party/webfonts/apache/roboto/Roboto-Regular.ttf'
'<(RULE_INPUT_PATH)',
# Intermediate directory to store the generated header and cc file.
'<(genfiles_dir)',
],
'outputs': [
# A header containing the necessary declaration for the ION_FONT macro
# to work with this font.
'<(genfiles_dir)/<(_target_name).h',
# The processed zipasset of the ttf file, ready for compilation.
'<(genfiles_dir)/<(_target_name).cc',
],
'process_outputs_as_sources': 1,
},
]
}
|
b1a1eac20de8049b21fa8a3e71f3aa8a80b926b5
|
e910cca862905577212a514727ca8cbfa9213839
|
/test/querying/test_query_actions.py
|
7fcec85df81e2b918c7349835d6fa084023ac94d
|
[
"MIT"
] |
permissive
|
salesforce/policy_sentry
|
1b0da6af01dd5507087ae7b266389036855ddb94
|
a06d95c4c20722e4a1c51da9943c52bac2154b41
|
refs/heads/master
| 2023-08-31T15:24:58.480240
| 2023-08-28T20:47:16
| 2023-08-29T20:52:56
| 209,652,627
| 1,864
| 151
|
MIT
| 2023-09-09T19:55:39
| 2019-09-19T21:35:53
|
Python
|
UTF-8
|
Python
| false
| false
| 25,083
|
py
|
test_query_actions.py
|
import unittest
import os
import json
from schema import Optional, Schema, And, Use, SchemaError
from policy_sentry.shared.iam_data import get_service_prefix_data
from policy_sentry.querying.actions import (
get_actions_for_service,
get_privilege_info,
get_action_data,
get_actions_that_support_wildcard_arns_only,
get_actions_at_access_level_that_support_wildcard_arns_only,
get_actions_with_access_level,
get_actions_with_arn_type_and_access_level,
remove_actions_not_matching_access_level,
get_dependent_actions,
remove_actions_that_are_not_wildcard_arn_only,
get_actions_matching_condition_key,
get_actions_matching_arn,
get_actions_matching_arn_type,
get_api_documentation_link_for_action,
get_all_action_links
# get_actions_matching_condition_crud_and_arn
)
from policy_sentry.writing.validate import check
class QueryActionsTestCase(unittest.TestCase):
def test_get_service_prefix_data(self):
result = get_service_prefix_data("cloud9")
desired_output_schema = Schema(
{
"service_name": "AWS Cloud9",
"service_authorization_url": "https://docs.aws.amazon.com/service-authorization/latest/reference/list_awscloud9.html",
"prefix": "cloud9",
"privileges": dict,
"privileges_lower_name": dict,
"resources": dict,
"resources_lower_name": dict,
"conditions": dict
}
)
valid_output = check(desired_output_schema, result)
# print(json.dumps(result, indent=4))
self.assertTrue(valid_output)
def test_get_actions_for_service(self):
"""querying.actions.get_actions_for_service"""
expected_results = [
"ram:AcceptResourceShareInvitation",
"ram:AssociateResourceShare",
"ram:AssociateResourceSharePermission",
"ram:CreateResourceShare",
"ram:DeleteResourceShare",
"ram:DisassociateResourceShare",
"ram:DisassociateResourceSharePermission",
"ram:EnableSharingWithAwsOrganization",
"ram:GetPermission",
"ram:GetResourcePolicies",
"ram:GetResourceShareAssociations",
"ram:GetResourceShareInvitations",
"ram:GetResourceShares",
"ram:ListPendingInvitationResources",
"ram:ListPermissions",
"ram:ListPrincipals",
"ram:ListResourceSharePermissions",
"ram:ListResources",
"ram:RejectResourceShareInvitation",
"ram:TagResource",
"ram:UntagResource",
"ram:UpdateResourceShare"
]
results = get_actions_for_service("ram")
print(json.dumps(results, indent=4))
self.maxDiff = None
for expected_result in expected_results:
self.assertTrue(expected_result in results)
# self.assertListEqual(desired_output, output)
# performance notes
# old: 0.021s
# this one: 0.005s
def test_get_actions_for_invalid_service(self):
"""querying.actions.get_actions_for_service
for invalid service
"""
output = get_actions_for_service("invalid_service")
self.assertListEqual([], output)
def test_get_privilege_info(self):
expected_results_file = os.path.abspath(
os.path.join(
os.path.dirname(__file__),
"get_privilege_info_cloud9.json",
)
)
# print(expected_results_file)
with open(expected_results_file) as json_file:
expected_results = json.load(json_file)
results = get_privilege_info("cloud9", "CreateEnvironmentEC2")
print(json.dumps(results, indent=4))
# Future proofing the unit tests
self.assertEqual(results["privilege"], expected_results["privilege"])
self.assertEqual(results["access_level"], expected_results["access_level"])
self.assertEqual(results["resource_types"][""]["resource_type"], expected_results["resource_types"][""]["resource_type"])
self.assertTrue("aws:RequestTag/${TagKey}" in results["resource_types"][""]["condition_keys"])
self.assertTrue("ec2:DescribeSubnets" in results["resource_types"][""]["dependent_actions"])
self.assertTrue("ec2:DescribeVpcs" in results["resource_types"][""]["dependent_actions"])
self.assertTrue("iam:CreateServiceLinkedRole" in results["resource_types"][""]["dependent_actions"])
self.assertTrue("environment" in results["service_resources"].keys())
expected_service_conditions = [
'aws:RequestTag/${TagKey}',
'aws:ResourceTag/${TagKey}',
'aws:TagKeys',
'cloud9:EnvironmentId',
'cloud9:EnvironmentName',
'cloud9:InstanceType',
'cloud9:OwnerArn',
'cloud9:Permissions',
'cloud9:SubnetId',
'cloud9:UserArn'
]
for expected_condition in expected_service_conditions:
self.assertTrue(expected_condition in results["service_conditions"].keys())
# def test_get_privilege_info_2(self):
# results = get_privilege_info("ram", "CreateResourceShare")
# # results = get_privilege_info("ram", "createresourceshare")
# print(json.dumps(results, indent=4))
def test_get_action_data(self):
"""querying.actions.test_get_action_data"""
desired_output = {
"ram": [
{
"action": "ram:TagResource",
"description": "Grants permission to tag the specified resource share or permission",
"access_level": "Tagging",
"api_documentation_link": "https://docs.aws.amazon.com/ram/latest/APIReference/API_TagResource.html",
"resource_arn_format": "arn:${Partition}:ram:${Region}:${Account}:permission/${ResourcePath}",
"condition_keys": [
"aws:ResourceTag/${TagKey}",
"ram:PermissionArn",
"ram:PermissionResourceType",
],
"dependent_actions": [],
},
{
"action": "ram:TagResource",
"description": "Grants permission to tag the specified resource share or permission",
"access_level": "Tagging",
"api_documentation_link": "https://docs.aws.amazon.com/ram/latest/APIReference/API_TagResource.html",
"resource_arn_format": "arn:${Partition}:ram:${Region}:${Account}:resource-share/${ResourcePath}",
"condition_keys": [
"aws:ResourceTag/${TagKey}",
"ram:AllowsExternalPrincipals",
"ram:ResourceShareName"
],
"dependent_actions": []
},
{
"action": "ram:TagResource",
"description": "Grants permission to tag the specified resource share or permission",
"access_level": "Tagging",
"api_documentation_link": "https://docs.aws.amazon.com/ram/latest/APIReference/API_TagResource.html",
"resource_arn_format": "*",
"condition_keys": [
"aws:ResourceTag/${TagKey}",
"ram:AllowsExternalPrincipals",
"ram:ResourceShareName"
],
"dependent_actions": []
}
]
}
output = get_action_data("ram", "TagResource")
print(json.dumps(output, indent=4))
self.maxDiff = None
self.assertDictEqual(desired_output, output)
def test_get_actions_that_support_wildcard_arns_only(self):
"""querying.actions.get_actions_that_support_wildcard_arns_only"""
# Variant 1: Secrets manager
expected_results = [
"secretsmanager:GetRandomPassword",
"secretsmanager:ListSecrets",
]
results = get_actions_that_support_wildcard_arns_only("secretsmanager")
self.maxDiff = None
for result in results:
self.assertTrue(result in expected_results)
# Variant 2: ECR
results = get_actions_that_support_wildcard_arns_only("ecr")
expected_results = [
"ecr:DeleteRegistryPolicy",
"ecr:DescribeRegistry",
"ecr:GetAuthorizationToken",
"ecr:GetRegistryPolicy",
"ecr:PutRegistryPolicy",
"ecr:PutReplicationConfiguration"
]
# print(json.dumps(results, indent=4))
for item in expected_results:
self.assertTrue(item in results)
# Variant 3: All actions
output = get_actions_that_support_wildcard_arns_only("all")
# print(len(output))
self.assertTrue(len(output) > 3000)
def test_get_actions_at_access_level_that_support_wildcard_arns_only(self):
"""querying.actions.get_actions_at_access_level_that_support_wildcard_arns_only"""
read_output = get_actions_at_access_level_that_support_wildcard_arns_only(
"secretsmanager", "Read"
)
list_output = get_actions_at_access_level_that_support_wildcard_arns_only(
"secretsmanager", "List"
)
write_output = get_actions_at_access_level_that_support_wildcard_arns_only(
"secretsmanager", "Write"
)
tagging_output = get_actions_at_access_level_that_support_wildcard_arns_only(
"secretsmanager", "Tagging"
)
permissions_output = get_actions_at_access_level_that_support_wildcard_arns_only(
"s3", "Permissions management"
)
# print(json.dumps(read_output, indent=4))
# print(json.dumps(list_output, indent=4))
# print(json.dumps(write_output, indent=4))
# print(json.dumps(tagging_output, indent=4))
# print(json.dumps(permissions_output, indent=4))
self.assertListEqual(read_output, ['secretsmanager:GetRandomPassword'])
self.assertListEqual(list_output, ['secretsmanager:ListSecrets'])
self.assertListEqual(write_output, [])
self.assertListEqual(tagging_output, [])
for item in ["s3:PutAccountPublicAccessBlock"]:
self.assertTrue(item in permissions_output)
all_permissions_output = get_actions_at_access_level_that_support_wildcard_arns_only(
"all", "Permissions management"
)
all_write_output = get_actions_at_access_level_that_support_wildcard_arns_only(
"all", "Write"
)
print(len(all_permissions_output) + len(all_write_output))
# print(len(all_write_output))
# print(json.dumps(all_write_output, indent=4))
# print(json.dumps(all_permissions_output, indent=4))
def test_get_actions_with_access_level(self):
"""querying.actions.get_actions_with_access_level"""
desired_output = ['workspaces:CreateTags', 'workspaces:DeleteTags']
output = get_actions_with_access_level(
"workspaces", "Tagging"
)
self.maxDiff = None
self.assertListEqual(desired_output, output)
# output = get_actions_with_access_level(
# "all", "Tagging"
# )
# print(output)
def test_get_actions_with_arn_type_and_access_level_case_1(self):
"""querying.actions.get_actions_with_arn_type_and_access_level"""
desired_output = [
's3:DeleteBucketPolicy',
's3:PutBucketAcl',
's3:PutBucketPolicy',
's3:PutBucketPublicAccessBlock'
]
output = get_actions_with_arn_type_and_access_level(
# "ram", "resource-share", "Write"
"s3", "bucket", "Permissions management"
)
self.maxDiff = None
self.assertListEqual(desired_output, output)
def test_get_actions_with_arn_type_and_access_level_case_2(self):
"""querying.actions.get_actions_with_arn_type_and_access_level with arn type"""
desired_output = [
'ssm:DeleteParameter',
'ssm:DeleteParameters',
'ssm:LabelParameterVersion',
'ssm:PutParameter'
]
output = get_actions_with_arn_type_and_access_level(
"ssm", "parameter", "Write"
)
for item in desired_output:
self.assertTrue(item in output)
def test_get_actions_with_arn_type_and_access_level_case_3(self):
"""querying.actions.get_actions_with_arn_type_and_access_level with arn type"""
desired_output = [
's3:PutAccountPublicAccessBlock',
's3:PutAccessPointPublicAccessBlock'
]
output = get_actions_with_arn_type_and_access_level(
# "ram", "resource-share", "Write"
"s3", "*", "Permissions management"
)
print(output)
for item in desired_output:
self.assertTrue(item in output)
# self.assertListEqual(desired_output, output)
def test_get_actions_with_arn_type_and_access_level_case_4(self):
"""querying.actions.get_actions_with_arn_type_and_access_level with arn type"""
desired_output = [
'secretsmanager:ListSecrets'
]
output = get_actions_with_arn_type_and_access_level(
"secretsmanager", "*", "List"
)
self.assertListEqual(desired_output, output)
def test_get_actions_with_arn_type_and_access_level_case_5(self):
"""querying.actions.get_actions_with_arn_type_and_access_level with arn type"""
output = get_actions_with_arn_type_and_access_level(
"s3", "object", "List"
)
self.assertTrue("s3:ListMultipartUploadParts" in output)
def test_get_actions_matching_arn_type_case_1(self):
"""querying.actions.get_actions_matching_arn_type"""
expected_results = [
"ecr:DeleteRegistryPolicy",
"ecr:DescribeRegistry",
"ecr:GetAuthorizationToken",
"ecr:GetRegistryPolicy",
"ecr:PutRegistryPolicy",
"ecr:PutReplicationConfiguration"
]
results = get_actions_matching_arn_type('ecr', '*')
print(json.dumps(results, indent=4))
for item in expected_results:
self.assertTrue(item in results)
# self.assertEqual(output, ["ecr:GetAuthorizationToken"])
def test_get_actions_matching_arn_type_case_2(self):
"""querying.actions.get_actions_matching_arn_type"""
output = get_actions_matching_arn_type('all', 'object')
self.assertTrue('s3:AbortMultipartUpload' in output)
def test_get_actions_matching_arn_type_case_3(self):
"""querying.actions.get_actions_matching_arn_type"""
output = get_actions_matching_arn_type('rds', 'object')
self.assertTrue(len(output) == 0)
def test_get_actions_matching_arn_type_case_4(self):
"""querying.actions.get_actions_matching_arn_type"""
desired_output = [
'codestar:CreateUserProfile',
'codestar:DeleteUserProfile',
'codestar:UpdateUserProfile'
]
output = get_actions_matching_arn_type("codestar", "user")
self.assertTrue(output, desired_output)
def test_get_actions_matching_condition_key(self):
"""querying.actions.get_actions_matching_condition_key"""
results = get_actions_matching_condition_key(
"ses", "ses:FeedbackAddress"
)
expected_results = [
# 'ses:SendBulkTemplatedEmail',
# 'ses:SendCustomVerificationEmail',
'ses:SendEmail',
# 'ses:SendRawEmail',
# 'ses:SendTemplatedEmail'
]
# print(output)
self.maxDiff = None
print(results)
for expected_result in expected_results:
self.assertTrue(expected_result in results)
# self.assertListEqual(results, desired_results)
# def test_get_actions_matching_condition_crud_and_arn(self):
# """querying.actions.get_actions_matching_condition_crud_and_arn"""
# results = get_actions_matching_condition_crud_and_arn(
# "elasticbeanstalk:InApplication",
# "List",
# "arn:${Partition}:elasticbeanstalk:${Region}:${Account}:environment/${ApplicationName}/${EnvironmentName}",
# )
# desired_results = [
# "elasticbeanstalk:DescribeEnvironments",
# ]
# print(results)
# self.assertListEqual(results, desired_results)
#
# def test_get_actions_matching_condition_crud_and_wildcard_arn(self):
# """querying.actions.get_actions_matching_condition_crud_and_wildcard_arn"""
# desired_results = [
# "swf:PollForActivityTask",
# "swf:PollForDecisionTask",
# "swf:RespondActivityTaskCompleted",
# "swf:StartWorkflowExecution",
# ]
# results = get_actions_matching_condition_crud_and_arn(
# "swf:taskList.name", "Write", "*"
# )
# print(results)
# self.assertListEqual(desired_results, results)
#
# # This one leverages a condition key that is partway through a string in the database
# # - luckily, SQLAlchemy's ilike function allows us to find it anyway because it's a substring
# # kms:CallerAccount,kms:EncryptionAlgorithm,kms:EncryptionContextKeys,kms:ViaService
# desired_results = [
# "kms:Decrypt",
# "kms:Encrypt",
# "kms:GenerateDataKey",
# "kms:GenerateDataKeyPair",
# "kms:GenerateDataKeyPairWithoutPlaintext",
# "kms:GenerateDataKeyWithoutPlaintext",
# "kms:ReEncryptFrom",
# "kms:ReEncryptTo",
# ]
# print(results)
# results = get_actions_matching_condition_crud_and_arn(
# "kms:EncryptionAlgorithm", "Write", "*"
# )
# self.assertListEqual(desired_results, results)
def test_remove_actions_not_matching_access_level(self):
# TODO: This method normalized the access level unnecessarily. Make sure to change that in the final iteration
"""querying.actions.remove_actions_not_matching_access_level"""
actions_list = [
"ecr:batchgetimage", # read
"ecr:createrepository", # write
"ecr:describerepositories", # list
"ecr:tagresource", # tagging
"ecr:setrepositorypolicy", # permissions management
]
self.maxDiff = None
# Read
result = remove_actions_not_matching_access_level(
actions_list, "Read"
)
expected = ["ecr:BatchGetImage"]
self.assertListEqual(result, ["ecr:BatchGetImage", "ecr:DescribeRepositories"])
# Write
result = remove_actions_not_matching_access_level(
actions_list, "Write"
)
self.assertListEqual(result, ["ecr:CreateRepository"])
# List
result = remove_actions_not_matching_access_level(
actions_list, "List"
)
# self.assertListEqual(result, ["ecr:DescribeRepositories"])
# DescribeRepositories is no longer considered a "list" action.
self.assertListEqual(result, [])
# Tagging
result = remove_actions_not_matching_access_level(
actions_list, "Tagging"
)
self.assertListEqual(result, ["ecr:TagResource"])
# Permissions management
result = remove_actions_not_matching_access_level(
actions_list, "Permissions management"
)
self.assertListEqual(result, ["ecr:SetRepositoryPolicy"])
bad_actions_list = [
"codecommit:CreatePullRequest",
"codecommit:CreatePullRequestApprovalRule",
"codecommit:CreateRepository",
"codecommit:CreateUnreferencedMergeCommit",
"codecommit:DeleteBranch",
"codecommit:DeleteFile",
]
def test_get_dependent_actions(self):
"""querying.actions.get_dependent_actions"""
dependent_actions_single = ["ec2:associateiaminstanceprofile"]
dependent_actions_double = ["shield:associatedrtlogbucket"]
dependent_actions_several = ["chime:getcdrbucket"]
self.assertEqual(
get_dependent_actions(dependent_actions_single),
["iam:PassRole"],
)
self.assertCountEqual(
get_dependent_actions(dependent_actions_double),
["s3:GetBucketPolicy", "s3:PutBucketPolicy"],
)
self.assertCountEqual(
get_dependent_actions(dependent_actions_several),
[
"s3:GetBucketAcl",
"s3:GetBucketLocation",
"s3:GetBucketLogging",
"s3:GetBucketVersioning",
"s3:GetBucketWebsite",
],
)
def test_remove_actions_that_are_not_wildcard_arn_only(self):
"""querying.actions.remove_actions_that_are_not_wildcard_arn_only"""
provided_actions_list = [
# 3 wildcard only actions
"secretsmanager:getrandompassword",
"secretsmanager:listsecrets",
# These ones are wildcard OR "secret"
"secretsmanager:createsecret",
"secretsmanager:putsecretvalue",
]
desired_output = [
# 2 wildcard only actions
"secretsmanager:GetRandomPassword",
"secretsmanager:ListSecrets",
]
output = remove_actions_that_are_not_wildcard_arn_only(
provided_actions_list
)
self.maxDiff = None
self.assertCountEqual(desired_output, output)
def test_weird_lowercase_uppercase(self):
"""test_weird_lowercase_uppercase: Same as test_remove_actions_that_are_not_wildcard_arn_only, but with wEiRd cases"""
provided_actions_list = [
# 2 wildcard only actions
"secretsmanager:gEtRaNdOmPasSwOrD",
"secretsmanager:LIstsEcretS",
# This one is wildcard OR "secret"
"secretsmanager:cReAtEsEcReT",
"secretsmanager:pUtSeCrEtVaLuE",
]
desired_output = [
# 2 wildcard only actions
"secretsmanager:GetRandomPassword",
"secretsmanager:ListSecrets",
]
output = remove_actions_that_are_not_wildcard_arn_only(
provided_actions_list
)
print(json.dumps(output))
self.maxDiff = None
self.assertCountEqual(desired_output, output)
def test_get_actions_matching_arn(self):
"""querying.actions.get_actions_matching_arn"""
arn = "arn:aws:cloud9:us-east-1:account-id:environment:123456"
results = get_actions_matching_arn(arn)
# print(json.dumps(results, indent=4))
# Don't want to keep an updated list of actions in these tests,
# so let's just test the lengths and look for some contents that should or should not be in there.
self.assertTrue(len(results) > 10)
self.assertTrue("cloud9:ListEnvironments" not in results)
self.assertTrue("cloud9:DeleteEnvironment" in results)
def test_gh_226_elasticloadbalancing_v1_and_v2(self):
"""Test that elasticloadbalancing combines v1 and v2"""
results = get_actions_for_service("elasticloadbalancing")
# print(json.dumps(results, indent=4))
lb_v1_only_action = "elasticloadbalancing:CreateTargetGroup"
lb_v2_only_action = "elasticloadbalancing:SetSecurityGroups"
self.assertTrue(lb_v1_only_action in results)
self.assertTrue(lb_v2_only_action in results)
def test_get_api_documentation_link_for_action(self):
"""querying.actions.get_api_documentation_link_for_action"""
service_prefix = "cloud9"
action_name = "CreateEnvironmentEC2"
result = get_api_documentation_link_for_action(service_prefix, action_name)
print(result)
# Link should be: https://docs.aws.amazon.com/cloud9/latest/APIReference/API_CreateEnvironmentEC2.html
# We will just check the https and subdomain.domain in case they change the format in the future.
self.assertTrue("https://docs.aws.amazon.com" in result)
def test_get_all_links(self):
"""querying.actions.get_all_action_links"""
results = get_all_action_links()
self.assertTrue(len(results.keys()) > 8000)
# def test_research_access_levels(self):
# """querying.actions.get_actions_with_access_level"""
# service_prefix = "sns"
# access_level = "Write"
# # access_level = "Permissions management"
# results = get_actions_with_access_level(service_prefix, access_level)
# print(json.dumps(results, indent=4))
|
4c1101cf0ad60d99c3fb26775488225d3b70e8c1
|
bad6940b201987d55edb3d7128f2ce3718d3b40f
|
/tests/int/test_corpora.py
|
c352353b70b8d39db954d98093a2bc9fecaf32b0
|
[
"MIT"
] |
permissive
|
pyconll/pyconll
|
ff985ef2e80820a0dd86b5d7c81914712f5dd5cd
|
1bb8cd31fe18bfb048e6e26bc583004952094bff
|
refs/heads/master
| 2023-06-23T12:17:10.758862
| 2023-06-21T03:20:45
| 2023-06-21T03:20:45
| 107,349,042
| 148
| 11
|
MIT
| 2023-06-21T03:13:56
| 2017-10-18T02:30:31
|
Python
|
UTF-8
|
Python
| false
| false
| 15,498
|
py
|
test_corpora.py
|
import hashlib
import logging
import operator
import os
from pathlib import Path
import tarfile
from urllib import parse
import pytest
import requests
import pyconll
from .workflow import conditional as _if, fail, partial, pipe, sequence, value
def _cross_platform_stable_fs_iter(dir):
"""
Provides a stable ordering across platforms over a directory Path.
This allows iteration across filesystems in a consistent way such that case
sensitivity of the underlying system does not affect how the files are
iterated.
Args:
dir: The Path object that points to the directory to iterate over.
Returns:
An iterator that goes over the paths in the directory, defined in such a
way that is consistent across platforms.
"""
# As this should work across platforms, paths cannot be compared as is, and
# the paths must be compared with another in string format. Paths are
# ordered by case insensitivity, and then ordered by case within any paths
# that are only different by case. There is a double sort because, a simple
# case insensitive sort will provide inconsistent results on linux since
# iterdir does not provide consistently ordered results.
tupled = map(lambda p: (str(p), p), dir.iterdir())
by_case = sorted(tupled, key=operator.itemgetter(0))
by_case_insensitive = sorted(by_case, key=lambda el: el[0].lower())
only_paths = map(operator.itemgetter(1), by_case_insensitive)
return only_paths
def _add_file_to_hash(hash_obj, path, block_size):
"""
Helper method in calculating a path's hash to add the hash of a file.
Args:
hash_obj: The object that is able to hash the file contents.
path: The location of the file.
block_size: The size of the blocks to read in.
"""
with open(str(path), 'rb') as f:
buffer = f.read(block_size)
while buffer:
hash_obj.update(buffer)
buffer = f.read(block_size)
def _hash_path_helper(hash_obj, path, block_size):
"""
Helper to wrap the functionality of updating the actual hash object.
Args:
hash_obj: The object that is able to hash the file contents.
path: The location of the file.
block_size: The size of the blocks to read in, in bytes.
"""
if path.is_dir():
fs_iter = _cross_platform_stable_fs_iter(path)
for child in fs_iter:
tag = child.name.encode(encoding='utf-8', errors='replace')
hash_obj.update(tag)
_hash_path_helper(hash_obj, child, block_size)
hash_obj.update(tag)
else:
_add_file_to_hash(hash_obj, path, block_size)
def hash_path(hash_obj, path, block_size):
"""
Hash a path with a provided hash object with a digest and update function.
Args:
hash_obj: The object that is able to hash the file contents.
path: The location of the file.
block_size: The size of the blocks to read in, in bytes.
Returns:
The hash of the file object at the path as a string in hex format.
"""
_hash_path_helper(hash_obj, path, block_size)
return hash_obj.hexdigest()
def _get_filename_from_url(url):
"""
For a url that represents a network file, return the filename part.
Args:
url: The url to extract the filename from.
Returns:
The filename part at the end of the url with the extension.
"""
parsed = parse.urlparse(url)
name = Path(parsed.path).name
unquoted = parse.unquote(name)
return unquoted
def download_file(url, dest, chunk_size, attempts):
"""
Downloads a file from a url, resilient to failures and controlling speed.
Args:
url: The url to download the file from.
dest: The location on disk to store the downloaded file to.
chunk_size: The size of the file chunks when streaming the download.
attempts: The number of failures to be resistant to. Assumes the server
can accept ranged requests on download.
"""
head_r = requests.head(url)
content_length = int(head_r.headers['Content-Length'])
attempt = 0
dest_loc = str(dest)
byte_loc = 0
with open(dest_loc, 'wb') as f:
while attempt < attempts:
with requests.get(url,
headers={'Range': 'bytes={}-'.format(byte_loc)},
stream=True) as r:
for chunk in r.iter_content(chunk_size=chunk_size):
f.write(chunk)
f.flush()
byte_loc = os.stat(dest_loc).st_size
if byte_loc >= content_length:
break
attempt += 1
def delete_dir(path):
"""
Recursively deletes a directory and all contents within it.
Args:
path: The path to the directory to delete. Must point to a directory.
"""
for child in path.iterdir():
if child.is_file():
child.unlink()
elif child.is_dir:
delete_dir(child)
path.rmdir()
@partial
def validate_hash_sha256(p, hash_s):
"""
Check that a path's SHA256 hash matches the provided string.
Args:
p: The path to hash.
hash_s: The expected hash of the path as a string.
"""
if p.exists():
s = hash_path(hashlib.sha256(), p, 8192)
r = s == hash_s
if r:
logging.info('Hash for %s matched expected %s.', p, hash_s)
else:
logging.info(
'The current contents of %s do not hash to the expected %s.',
p, hash_s)
logging.info('Instead %s hashed as %s. Recreating fixture', p, s)
return r
else:
logging.info('File, %s, does not exist.', p)
return False
@partial
def clean_subdir(direc, subdir):
"""
Create a clean subdirectory in the provided path.
If the path already exists it is deleted, and then recreated.
Args:
direc: The parent directory.
subdir: The path within the parent to be clean.
"""
direc.mkdir(exist_ok=True)
p = direc / subdir
if not p.exists():
p.mkdir()
else:
delete_dir(p)
p.mkdir()
@partial
def download_file_to_dir(url, direc):
"""
Download a file (final name matching the URL) to a specified directory.
Args:
url: The url of the file to download
direc: The directory to download the file to.
"""
tmp = direc / _get_filename_from_url(url)
if tmp.exists():
tmp.unlink()
logging.info('Starting to download %s to %s.', url, tmp)
download_file(url, tmp, 16384, 15)
logging.info('Download succeeded to %s.', tmp)
return tmp
@partial
def extract_tgz(p, tgz):
"""
Extracts a tarfile to a directory.
Args:
p: The path to extract to.
tgz: The tarfile to extract from.
"""
logging.info('Extracting tarfile to %s.', p)
with tarfile.open(str(tgz)) as tf:
tf.extractall(str(p))
def url_zip(entry_id, fixture_cache, contents_hash, zip_hash, url):
"""
Creates a cacheable fixture that is a url download that is a zip.
Args:
entry_id: The unique id of the entry in the cache.
fixture_cache: The cache location for the fixtures.
contents_hash: The hexdigest format hash of the fixture's contents.
zip_hash: The hexdigest format hash of the zip file's contents.
url: The url of the zip download.
Returns:
The path of the fixture within the cache as the fixture value.
"""
final_path = fixture_cache / entry_id
fn = _get_filename_from_url(url)
zip_path = fixture_cache / fn
download = \
sequence(
clean_subdir(fixture_cache, entry_id),
pipe(
download_file_to_dir(url, fixture_cache),
extract_tgz(final_path)
),
_if(
validate_hash_sha256(final_path, contents_hash),
value(final_path),
fail('Fixture for {} in {} was not able to be properly setup.'.
format(url, final_path))))
w = _if(
validate_hash_sha256(final_path, contents_hash),
value(final_path),
_if(
validate_hash_sha256(zip_path, zip_hash),
sequence(
extract_tgz(final_path, zip_path),
_if(
validate_hash_sha256(final_path, contents_hash),
value(final_path),
sequence(
zip_path.unlink,
download
)
)
),
download)) # yapf: disable
return w
# This is the registration for the different corpora. It includes an id, and a
# method of creation as a key-value pair. This registration structure allows
# for the same corpora to easily be used in different tests which are designed
# to holistically evaluate pyconll across large scenarios, like correctness or
# performance. Given the structure of exceptions and marks, I may still need
# some tweaking of what structure works best, but this is a definite improvement
# and is on a path toward more flexibility and robustness.
corpora = {
'UD v2.8':
url_zip(
'UD v2.8', Path('tests/int/_corpora_cache'),
'eb5d8941be917d2cb46677cb575f18dd6218bddec446b428a5b96d96ab44c0cd',
'95d2f4370dc5fe93653eb36e7268f4ec0c1bd012e51e943d55430f1e9d0d7e05',
'https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3687/ud-treebanks-v2.8.tgz'
),
'UD v2.7':
url_zip(
'UD v2.7', Path('tests/int/_corpora_cache'),
'38e7d484b0125aaf7101a8c447fd2cb3833235cf428cf3c5749128ade73ecee2',
'ee61f186ac5701440f9d2889ca26da35f18d433255b5a188b0df30bc1525502b',
'https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3424/ud-treebanks-v2.7.tgz'
),
'UD v2.6':
url_zip(
'UD v2.6', Path('tests/int/_corpora_cache'),
'a28fdc1bdab09ad597a873da62d99b268bdfef57b64faa25b905136194915ddd',
'a462a91606c6b2534a767bbe8e3f154b678ef3cc81b64eedfc9efe9d60ceeb9e',
'https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3226/ud-treebanks-v2.6.tgz'
),
'UD v2.5':
url_zip(
'UD v2.5', Path('tests/int/_corpora_cache'),
'4761846e8c5f7ec7e40a6591f7ef5307ca9e7264da894d05d135514a4ea22a10',
'5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409',
'https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz'
),
'UD v2.4':
url_zip(
'UD v2.4', Path('tests/int/_corpora_cache'),
'000646eb71cec8608bd95730d41e45fac319480c6a78132503e0efe2f0ddd9a9',
'252a937038d88587842f652669cdf922b07d0f1ed98b926f738def662791eb62',
'https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-2988/ud-treebanks-v2.4.tgz'
),
'UD v2.3':
url_zip(
'UD v2.3', Path('tests/int/_corpora_cache'),
'359e1989771268ab475c429a1b9e8c2f6c76649b18dd1ff6568c127fb326dd8f',
'122e93ad09684b971fd32b4eb4deeebd9740cd96df5542abc79925d74976efff',
'https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-2895/ud-treebanks-v2.3.tgz'
),
'UD v2.2':
url_zip(
'UD v2.2', Path('tests/int/_corpora_cache'),
'fa3a09f2c4607e19d7385a5e975316590f902fa0c1f4440c843738fbc95e3e2a',
'a9580ac2d3a6d70d6a9589d3aeb948fbfba76dca813ef7ca7668eb7be2eb4322',
'https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-2837/ud-treebanks-v2.2.tgz'
),
'UD v2.1':
url_zip(
'UD v2.1', Path('tests/int/_corpora_cache'),
'36921a1d8410dc5e22ef9f64d95885dc60c11811a91e173e1fd21706b83fdfee',
'446cc70f2194d0141fb079fb22c05b310cae9213920e3036b763899f349fee9b',
'https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-2515/ud-treebanks-v2.1.tgz'
),
'UD v2.0':
url_zip(
'UD v2.0', Path('tests/int/_corpora_cache'),
'4f08c84bec5bafc87686409800a9fe9b5ac21434f0afd9afe1cc12afe8aa90ab',
'c6c6428f709102e64f608e9f251be59d35e4add1dd842d8dc5a417d01415eb29',
'https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-1983/ud-treebanks-v2.0.tgz'
)
}
marks = {'UD v2.7': pytest.mark.latest}
exceptions = {
'UD v2.5': [
Path(
# There is one token with less than 10 columns.
'ud-treebanks-v2.5/UD_Russian-SynTagRus/ru_syntagrus-ud-train.conllu'
)
]
}
@pytest.fixture
def corpus(request):
"""
A utility fixture to merely execute the actual fixture logic as necessary.
Args:
request: The pytest indirect request object, which has a param object
for the underlying fixture argument.
Returns:
The value of the execution of the corpus fixture.
"""
return request.param()
def pytest_generate_tests(metafunc):
"""
A pytest utility function for generating tests automatically.
The current policy is for tests that depend on the corpus fixture, they
are automatically given tests for all corpora. So any performance,
correctness, or other tests which are valid across corpora, should include
the 'corpus' fixture. This corpus fixture will be parameterized by the
different registered corpora and the data location passed on to the test.
Args:
metafunc: The object to parameterize the tests with.
"""
if 'corpus' in metafunc.fixturenames:
testdata = []
for item in corpora.items():
exc = exceptions.get(item[0]) or []
mark = marks.get(item[0])
if mark:
p = pytest.param(item[1], exc, marks=mark, id=item[0])
else:
p = pytest.param(item[1], exc, id=item[0])
testdata.append(p)
metafunc.parametrize(argnames=('corpus', 'exceptions'),
argvalues=testdata,
indirect=['corpus'])
def test_corpus(corpus, exceptions):
"""
Tests a corpus using the fixture path and the glob for files to test.
Args:
corpus: The path where the corpus is.
exceptions: A list of paths relative to fixture that are known failures.
"""
globs = corpus.glob('**/*.conllu')
for path in globs:
is_exp = any(path == corpus / exp for exp in exceptions)
if is_exp:
logging.info('Skipping over %s because it is a known failure.',
path)
else:
_test_treebank(str(path))
def _test_treebank(treebank_path):
"""
Test that the provided treebank can be parsed and written without error.
Args:
treebank_path: The path to the treebank file that is to be parsed and written.
"""
TMP_OUTPUT_FILE = '__tmp__ud.conllu'
logging.info('Starting to parse %s', treebank_path)
treebank = pyconll.iter_from_file(treebank_path)
# For each sentence write back and make sure to include the proper
# newlines between sentences.
with open(TMP_OUTPUT_FILE, 'w', encoding='utf-8') as f:
for sentence in treebank:
f.write(sentence.conll())
f.write('\n\n')
# Clean up afterwards.
os.remove(TMP_OUTPUT_FILE)
|
5feb63c16068bdc24e551c7d9a7e5279e8d1e096
|
803bab6f782099d995bcdb99d163486f4fff8c50
|
/habitat-lab/habitat/datasets/utils.py
|
ef8b08d692acfe7d848db88514926f35e66f76c8
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"CC-BY-NC-SA-3.0"
] |
permissive
|
facebookresearch/habitat-lab
|
7088506509f64da6d682f5dc69427589f71a58a9
|
f5b29e62df0788d70ba3618fc738fa4e947428ba
|
refs/heads/main
| 2023-08-24T14:00:02.707343
| 2023-08-23T04:53:48
| 2023-08-23T04:53:48
| 169,164,391
| 792
| 298
|
MIT
| 2023-09-14T15:20:03
| 2019-02-04T23:12:51
|
Python
|
UTF-8
|
Python
| false
| false
| 6,402
|
py
|
utils.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Tokenize and vocabulary utils originally authored by @apsdehal and are
taken from Pythia.
"""
import json
import os
import re
import typing
from collections import Counter
from typing import Iterable, List
from habitat.core.logging import logger
from habitat.core.simulator import ShortestPathPoint
from habitat.sims.habitat_simulator.actions import HabitatSimActions
from habitat.utils.geometry_utils import quaternion_to_list
try:
from habitat.sims.habitat_simulator.habitat_simulator import HabitatSim
from habitat.tasks.nav.shortest_path_follower import ShortestPathFollower
except ImportError:
pass
SENTENCE_SPLIT_REGEX = re.compile(r"([^\w-]+)")
DEFAULT_PHYSICS_CONFIG_PATH = "data/default.physics_config.json"
def tokenize(
sentence, regex=SENTENCE_SPLIT_REGEX, keep=("'s"), remove=(",", "?")
) -> List[str]:
sentence = sentence.lower()
for token in keep:
sentence = sentence.replace(token, " " + token)
for token in remove:
sentence = sentence.replace(token, "")
tokens = regex.split(sentence)
tokens = [t.strip() for t in tokens if len(t.strip()) > 0]
return tokens
def load_str_list(fname):
with open(fname) as f:
lines = f.readlines()
lines = [l.strip() for l in lines]
return lines
class VocabDict:
UNK_TOKEN = "<unk>"
PAD_TOKEN = "<pad>"
START_TOKEN = "<s>"
END_TOKEN = "</s>"
def __init__(self, word_list=None, filepath=None):
if word_list is not None:
self.word_list = word_list
self._build()
elif filepath:
self.word_list = load_str_list(filepath)
self._build()
def _build(self):
if self.UNK_TOKEN not in self.word_list:
self.word_list = [self.UNK_TOKEN] + self.word_list
self.word2idx_dict = {w: n_w for n_w, w in enumerate(self.word_list)}
# String (word) to integer (index) dict mapping
self.stoi = self.word2idx_dict
# Integer to string (word) reverse mapping
self.itos = self.word_list
self.num_vocab = len(self.word_list)
self.UNK_INDEX = (
self.word2idx_dict[self.UNK_TOKEN]
if self.UNK_TOKEN in self.word2idx_dict
else None
)
self.PAD_INDEX = (
self.word2idx_dict[self.PAD_TOKEN]
if self.PAD_TOKEN in self.word2idx_dict
else None
)
def idx2word(self, n_w):
return self.word_list[n_w]
def token_idx_2_string(self, tokens: Iterable[int]) -> str:
q_string = ""
for token in tokens:
if token != 0:
q_string += self.idx2word(token) + " "
q_string += "?"
return q_string
def __len__(self):
return len(self.word_list)
def get_size(self):
return len(self.word_list)
def get_unk_index(self):
return self.UNK_INDEX
def get_unk_token(self):
return self.UNK_TOKEN
def word2idx(self, w):
if w in self.word2idx_dict:
return self.word2idx_dict[w]
elif self.UNK_INDEX is not None:
return self.UNK_INDEX
else:
raise ValueError(
"word %s not in dictionary \
(while dictionary does not contain <unk>)"
% w
)
def tokenize_and_index(
self,
sentence,
regex=SENTENCE_SPLIT_REGEX,
keep=("'s"),
remove=(",", "?"),
) -> List[int]:
inds = [
self.word2idx(w)
for w in tokenize(sentence, regex=regex, keep=keep, remove=remove)
]
return inds
class VocabFromText(VocabDict):
DEFAULT_TOKENS = [
VocabDict.PAD_TOKEN,
VocabDict.UNK_TOKEN,
VocabDict.START_TOKEN,
VocabDict.END_TOKEN,
]
def __init__(
self,
sentences,
min_count=1,
regex=SENTENCE_SPLIT_REGEX,
keep=(),
remove=(),
only_unk_extra=False,
):
token_counter: typing.Counter[str] = Counter()
for sentence in sentences:
tokens = tokenize(sentence, regex=regex, keep=keep, remove=remove)
token_counter.update(tokens)
token_list = []
for token in token_counter:
if token_counter[token] >= min_count:
token_list.append(token)
extras = self.DEFAULT_TOKENS
if only_unk_extra:
extras = [self.UNK_TOKEN]
super(VocabFromText, self).__init__(word_list=extras + token_list)
def get_action_shortest_path(
sim: "HabitatSim",
source_position: List[float],
source_rotation: List[float],
goal_position: List[float],
success_distance: float = 0.05,
max_episode_steps: int = 500,
) -> List[ShortestPathPoint]:
sim.reset()
sim.set_agent_state(source_position, source_rotation)
follower = ShortestPathFollower(sim, success_distance, False)
shortest_path = []
step_count = 0
action = follower.get_next_action(goal_position)
while (
action is not HabitatSimActions.stop and step_count < max_episode_steps
):
state = sim.get_agent_state()
shortest_path.append(
ShortestPathPoint(
state.position.tolist(),
quaternion_to_list(state.rotation),
action,
)
)
sim.step(action)
step_count += 1
action = follower.get_next_action(goal_position)
if step_count == max_episode_steps:
logger.warning("Shortest path wasn't found.")
return shortest_path
def check_and_gen_physics_config():
if os.path.exists(DEFAULT_PHYSICS_CONFIG_PATH):
return
# Config is sourced from
# https://github.com/facebookresearch/habitat-sim/blob/main/data/default.physics_config.json
physics_config = {
"physics_simulator": "bullet",
"timestep": 0.008,
"gravity": [0, -9.8, 0],
"friction_coefficient": 0.4,
"restitution_coefficient": 0.1,
"rigid object paths": ["objects"],
}
with open(DEFAULT_PHYSICS_CONFIG_PATH, "w") as f:
json.dump(physics_config, f)
|
b920ac8ce8aef98ac06565161ae4f48fedfa7164
|
e8b38b8dfa348ff006eb197a7906ca8e491a23dc
|
/tests/codegen/fcode/scripts/classes_4.py
|
29a24d78af6cc212c2abab54adfbe3823e98a469
|
[
"MIT"
] |
permissive
|
pyccel/pyccel
|
d79a81dbdff1172839a6a1227abfcc1f97e6c97b
|
1896b761ba662c90b14c195bbb6eb5cddc57cbfc
|
refs/heads/devel
| 2023-08-30T12:15:25.244401
| 2023-08-28T09:31:32
| 2023-08-28T09:31:32
| 100,463,736
| 307
| 39
|
MIT
| 2023-09-14T19:29:26
| 2017-08-16T07:59:14
|
Python
|
UTF-8
|
Python
| false
| false
| 1,388
|
py
|
classes_4.py
|
# pylint: disable=missing-function-docstring, missing-module-docstring, missing-class-docstring
#An example of a class
#$ header class Shape(public)
#$ header method __init__(Shape, double, double)
#$ header method area(Shape) results(double)
#$ header method perimeter(Shape) results(double)
#$ header method describe(Shape,str)
#$ header method authorName(Shape,str)
#$ header method scaleSize(Shape, double)
class Shape:
def __init__(self, x, y):
self.x = x
self.y = y
self.description = "This shape has not been described yet"
self.author = "Nobody has claimed to make this shape yet"
def area(self):
y = self.x * self.y
return y
def perimeter(self):
x = 2 * self.x + 2 * self.y
return x
def describe(self, text):
self.description = text
def authorName(self, text):
self.author = text
def scaleSize(self, scale):
self.x = self.x * scale
self.y = self.y * scale
rectangle = Shape(100., 45.)
#finding the area of your rectangle:
print(rectangle.area())
#finding the perimeter of your rectangle:
print(rectangle.perimeter())
#describing the rectangle
rectangle.describe("A wide rectangle, more than twice as wide as it is tall")
#making the rectangle 50% smaller
rectangle.scaleSize(0.5)
#re-printing the new area of the rectangle
print(rectangle.area())
|
16c26d9cbe7e088af24ab846e69e1e95652eb768
|
68073b5bbec051890bce2cdb0abbf1c7652002ed
|
/src/robotide/lib/robot/utils/robottypes.py
|
2ebb1d002204a21702d1f4e334042629a8d3133f
|
[
"Apache-2.0"
] |
permissive
|
robotframework/RIDE
|
3b6dc9629e34b6f350e154e5f76d106fa48eaaa8
|
ed4d650dbd806672401d4341fecc30274c4972c7
|
refs/heads/master
| 2023-09-05T15:59:01.151700
| 2023-09-02T22:39:16
| 2023-09-02T22:39:16
| 2,467,257
| 897
| 419
|
Apache-2.0
| 2023-09-10T03:43:39
| 2011-09-27T11:53:40
|
Python
|
UTF-8
|
Python
| false
| false
| 2,016
|
py
|
robottypes.py
|
# Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .platform import PY2
if PY2:
from .robottypes2 import (is_bytes, is_dict_like, is_integer, is_list_like,
is_number, is_string, is_unicode, is_tuple, type_name)
unicode = unicode
else:
from .robottypes3 import (is_bytes, is_dict_like, is_integer, is_list_like,
is_number, is_string, is_unicode, is_tuple, type_name)
unicode = str
TRUE_STRINGS = {'TRUE', 'YES', 'ON', '1'}
FALSE_STRINGS = {'FALSE', 'NO', 'OFF', '0', 'NONE', ''}
def is_truthy(item):
"""Returns `True` or `False` depending is the item considered true or not.
Validation rules:
- If the value is a string, it is considered false if it is `'FALSE'`,
`'NO'`, `'OFF'`, `'0'`, `'NONE'` or `''`, case-insensitively.
Considering `'NONE'` false is new in RF 3.0.3 and considering `'OFF'`
and `'0'` false is new in RF 3.1.
- Other strings are considered true.
- Other values are handled by using the standard `bool()` function.
Designed to be used also by external test libraries that want to handle
Boolean values similarly as Robot Framework itself. See also
:func:`is_falsy`.
"""
if is_string(item):
return item.upper() not in FALSE_STRINGS
return bool(item)
def is_falsy(item):
"""Opposite of :func:`is_truthy`."""
return not is_truthy(item)
|
80a2891951136990874fdc82cde97246db02badd
|
a0447b03ad89a41a5c2e2073e32aeaf4d6279340
|
/ironic/db/api.py
|
8aeda4b2939a15ecf975d2f91a22f53124e9463b
|
[
"Apache-2.0"
] |
permissive
|
openstack/ironic
|
2ae87e36d7a62d44b7ed62cad4e2e294d48e061b
|
ab76ff12e1c3c2208455e917f1a40d4000b4e990
|
refs/heads/master
| 2023-08-31T11:08:34.486456
| 2023-08-31T04:45:05
| 2023-08-31T04:45:05
| 10,066,301
| 411
| 365
|
Apache-2.0
| 2023-07-25T02:05:53
| 2013-05-14T22:28:24
|
Python
|
UTF-8
|
Python
| false
| false
| 55,952
|
py
|
api.py
|
# -*- encoding: utf-8 -*-
#
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Base classes for storage engines
"""
import abc
from oslo_config import cfg
from oslo_db import api as db_api
_BACKEND_MAPPING = {'sqlalchemy': 'ironic.db.sqlalchemy.api'}
IMPL = db_api.DBAPI.from_config(cfg.CONF, backend_mapping=_BACKEND_MAPPING,
lazy=True)
def get_instance():
"""Return a DB API instance."""
return IMPL
class Connection(object, metaclass=abc.ABCMeta):
"""Base class for storage system connections."""
@abc.abstractmethod
def __init__(self):
"""Constructor."""
@abc.abstractmethod
def get_nodeinfo_list(self, columns=None, filters=None, limit=None,
marker=None, sort_key=None, sort_dir=None):
"""Get specific columns for matching nodes.
Return a list of the specified columns for all nodes that match the
specified filters.
:param columns: List of column names to return.
Defaults to 'id' column when columns == None.
:param filters: Filters to apply. Defaults to None.
:associated: True | False
:chassis_uuid: uuid of chassis
:conductor_group: conductor group name
:console_enabled: True | False
:description_contains: substring in description
:driver: driver's name
:fault: current fault type
:id: numeric ID
:inspection_started_before:
nodes with inspection_started_at field before this
interval in seconds
:instance_uuid: uuid of instance
:lessee: node's lessee (e.g. project ID)
:maintenance: True | False
:owner: node's owner (e.g. project ID)
:project: either owner or lessee
:reserved: True | False
:reserved_by_any_of: [conductor1, conductor2]
:resource_class: resource class name
:retired: True | False
:shard_in: shard (multiple possibilities)
:provision_state: provision state of node
:provision_state_in:
provision state of node (multiple possibilities)
:provisioned_before:
nodes with provision_updated_at field before this
interval in seconds
:uuid: uuid of node
:uuid_in: uuid of node (multiple possibilities)
:with_power_state: True | False
:param limit: Maximum number of nodes to return.
:param marker: the last item of the previous page; we return the next
result set.
:param sort_key: Attribute by which results should be sorted.
:param sort_dir: direction in which results should be sorted.
(asc, desc)
:returns: A list of tuples of the specified columns.
"""
@abc.abstractmethod
def get_node_list(self, filters=None, limit=None, marker=None,
sort_key=None, sort_dir=None, fields=None):
"""Return a list of nodes.
:param filters: Filters to apply. Defaults to None.
:associated: True | False
:reserved: True | False
:maintenance: True | False
:chassis_uuid: uuid of chassis
:driver: driver's name
:provision_state: provision state of node
:provisioned_before:
nodes with provision_updated_at field before this
interval in seconds
:shard: nodes with the given shard
:param limit: Maximum number of nodes to return.
:param marker: the last item of the previous page; we return the next
result set.
:param sort_key: Attribute by which results should be sorted.
:param sort_dir: direction in which results should be sorted.
(asc, desc)
:param fields: Comma separated field list to return, to allow for
only specific fields to be returned to have maximum
API performance calls where not all columns are
needed from the database.
"""
@abc.abstractmethod
def check_node_list(self, idents):
"""Check a list of node identities and map it to UUIDs.
This call takes a list of node names and/or UUIDs and tries to convert
them to UUIDs. It fails early if any identities cannot possible be used
as names or UUIDs.
:param idents: List of identities.
:returns: A mapping from requests identities to node UUIDs.
:raises: NodeNotFound if some identities were not found or cannot be
valid names or UUIDs.
"""
@abc.abstractmethod
def reserve_node(self, tag, node_id):
"""Reserve a node.
To prevent other ManagerServices from manipulating the given
Node while a Task is performed, mark it reserved by this host.
:param tag: A string uniquely identifying the reservation holder.
:param node_id: A node id or uuid.
:returns: A Node object.
:raises: NodeNotFound if the node is not found.
:raises: NodeLocked if the node is already reserved.
"""
@abc.abstractmethod
def release_node(self, tag, node_id):
"""Release the reservation on a node.
:param tag: A string uniquely identifying the reservation holder.
:param node_id: A node id or uuid.
:raises: NodeNotFound if the node is not found.
:raises: NodeLocked if the node is reserved by another host.
:raises: NodeNotLocked if the node was found to not have a
reservation at all.
"""
@abc.abstractmethod
def create_node(self, values):
"""Create a new node.
:param values: A dict containing several items used to identify
and track the node, and several dicts which are passed
into the Drivers when managing this node. For example:
::
{
'uuid': uuidutils.generate_uuid(),
'instance_uuid': None,
'power_state': states.POWER_OFF,
'provision_state': states.AVAILABLE,
'driver': 'ipmi',
'driver_info': { ... },
'properties': { ... },
'extra': { ... },
}
:raises: InvalidParameterValue if 'values' contains 'tags' or 'traits'.
:returns: A node.
"""
@abc.abstractmethod
def get_node_by_id(self, node_id):
"""Return a node.
:param node_id: The id of a node.
:returns: A node.
"""
@abc.abstractmethod
def get_node_by_uuid(self, node_uuid):
"""Return a node.
:param node_uuid: The uuid of a node.
:returns: A node.
"""
@abc.abstractmethod
def get_node_by_name(self, node_name):
"""Return a node.
:param node_name: The logical name of a node.
:returns: A node.
"""
@abc.abstractmethod
def get_node_by_instance(self, instance):
"""Return a node.
:param instance: The instance uuid to search for.
:returns: A node.
:raises: InstanceNotFound if the instance is not found.
:raises: InvalidUUID if the instance uuid is invalid.
"""
@abc.abstractmethod
def destroy_node(self, node_id):
"""Destroy a node and its associated resources.
Destroy a node, including any associated ports, port groups,
tags, traits, volume connectors, and volume targets.
:param node_id: The ID or UUID of a node.
"""
@abc.abstractmethod
def update_node(self, node_id, values):
"""Update properties of a node.
:param node_id: The id or uuid of a node.
:param values: Dict of values to update.
May be a partial list, eg. when setting the
properties for a driver. For example:
::
{
'driver_info':
{
'my-field-1': val1,
'my-field-2': val2,
}
}
:returns: A node.
:raises: NodeAssociated
:raises: NodeNotFound
"""
@abc.abstractmethod
def get_port_by_id(self, port_id):
"""Return a network port representation.
:param port_id: The id of a port.
:returns: A port.
"""
@abc.abstractmethod
def get_port_by_uuid(self, port_uuid):
"""Return a network port representation.
:param port_uuid: The uuid of a port.
:returns: A port.
"""
@abc.abstractmethod
def get_port_by_address(self, address):
"""Return a network port representation.
:param address: The MAC address of a port.
:returns: A port.
"""
@abc.abstractmethod
def get_port_by_name(self, port_name):
"""Return a network port representation.
:param port_name: The name of a port.
:returns: A port.
"""
@abc.abstractmethod
def get_port_list(self, limit=None, marker=None,
sort_key=None, sort_dir=None):
"""Return a list of ports.
:param limit: Maximum number of ports to return.
:param marker: the last item of the previous page; we return the next
result set.
:param sort_key: Attribute by which results should be sorted.
:param sort_dir: direction in which results should be sorted.
(asc, desc)
"""
@abc.abstractmethod
def get_ports_by_shards(self, shards, limit=None, marker=None,
sort_key=None, sort_dir=None):
"""Return a list of ports contained in the provided shards.
:param shard_ids: A list of shards to filter ports by.
"""
@abc.abstractmethod
def get_ports_by_node_id(self, node_id, limit=None, marker=None,
sort_key=None, sort_dir=None):
"""List all the ports for a given node.
:param node_id: The integer node ID.
:param limit: Maximum number of ports to return.
:param marker: the last item of the previous page; we return the next
result set.
:param sort_key: Attribute by which results should be sorted
:param sort_dir: direction in which results should be sorted
(asc, desc)
:returns: A list of ports.
"""
@abc.abstractmethod
def get_ports_by_portgroup_id(self, portgroup_id, limit=None, marker=None,
sort_key=None, sort_dir=None):
"""List all the ports for a given portgroup.
:param portgroup_id: The integer portgroup ID.
:param limit: Maximum number of ports to return.
:param marker: The last item of the previous page; we return the next
result set.
:param sort_key: Attribute by which results should be sorted
:param sort_dir: Direction in which results should be sorted
(asc, desc)
:returns: A list of ports.
"""
@abc.abstractmethod
def create_port(self, values):
"""Create a new port.
:param values: Dict of values.
"""
@abc.abstractmethod
def update_port(self, port_id, values):
"""Update properties of an port.
:param port_id: The id or MAC of a port.
:param values: Dict of values to update.
:returns: A port.
"""
@abc.abstractmethod
def destroy_port(self, port_id):
"""Destroy an port.
:param port_id: The id or MAC of a port.
"""
@abc.abstractmethod
def get_portgroup_by_id(self, portgroup_id):
"""Return a network portgroup representation.
:param portgroup_id: The id of a portgroup.
:returns: A portgroup.
:raises: PortgroupNotFound
"""
@abc.abstractmethod
def get_portgroup_by_uuid(self, portgroup_uuid):
"""Return a network portgroup representation.
:param portgroup_uuid: The uuid of a portgroup.
:returns: A portgroup.
:raises: PortgroupNotFound
"""
@abc.abstractmethod
def get_portgroup_by_address(self, address, project=None):
"""Return a network portgroup representation.
:param address: The MAC address of a portgroup.
:param project: A node owner or lessee to filter by.
:returns: A portgroup.
:raises: PortgroupNotFound
"""
@abc.abstractmethod
def get_portgroup_by_name(self, name):
"""Return a network portgroup representation.
:param name: The logical name of a portgroup.
:returns: A portgroup.
:raises: PortgroupNotFound
"""
@abc.abstractmethod
def get_portgroup_list(self, limit=None, marker=None,
sort_key=None, sort_dir=None,
project=None):
"""Return a list of portgroups.
:param limit: Maximum number of portgroups to return.
:param marker: The last item of the previous page; we return the next
result set.
:param sort_key: Attribute by which results should be sorted.
:param sort_dir: Direction in which results should be sorted.
(asc, desc)
:param project: A node owner or lessee to filter by.
:returns: A list of portgroups.
"""
@abc.abstractmethod
def get_portgroups_by_node_id(self, node_id, limit=None, marker=None,
sort_key=None, sort_dir=None,
project=None):
"""List all the portgroups for a given node.
:param node_id: The integer node ID.
:param limit: Maximum number of portgroups to return.
:param marker: The last item of the previous page; we return the next
result set.
:param sort_key: Attribute by which results should be sorted
:param sort_dir: Direction in which results should be sorted
(asc, desc)
:param project: A node owner or lessee to filter by.
:returns: A list of portgroups.
"""
@abc.abstractmethod
def create_portgroup(self, values):
"""Create a new portgroup.
:param values: Dict of values with the following keys:
'id'
'uuid'
'name'
'node_id'
'address'
'extra'
'created_at'
'updated_at'
:returns: A portgroup
:raises: PortgroupDuplicateName
:raises: PortgroupMACAlreadyExists
:raises: PortgroupAlreadyExists
"""
@abc.abstractmethod
def update_portgroup(self, portgroup_id, values):
"""Update properties of a portgroup.
:param portgroup_id: The UUID or MAC of a portgroup.
:param values: Dict of values to update.
May contain the following keys:
'uuid'
'name'
'node_id'
'address'
'extra'
'created_at'
'updated_at'
:returns: A portgroup.
:raises: InvalidParameterValue
:raises: PortgroupNotFound
:raises: PortgroupDuplicateName
:raises: PortgroupMACAlreadyExists
"""
@abc.abstractmethod
def destroy_portgroup(self, portgroup_id):
"""Destroy a portgroup.
:param portgroup_id: The UUID or MAC of a portgroup.
:raises: PortgroupNotEmpty
:raises: PortgroupNotFound
"""
@abc.abstractmethod
def create_chassis(self, values):
"""Create a new chassis.
:param values: Dict of values.
"""
@abc.abstractmethod
def get_chassis_by_id(self, chassis_id):
"""Return a chassis representation.
:param chassis_id: The id of a chassis.
:returns: A chassis.
"""
@abc.abstractmethod
def get_chassis_by_uuid(self, chassis_uuid):
"""Return a chassis representation.
:param chassis_uuid: The uuid of a chassis.
:returns: A chassis.
"""
@abc.abstractmethod
def get_chassis_list(self, limit=None, marker=None,
sort_key=None, sort_dir=None):
"""Return a list of chassis.
:param limit: Maximum number of chassis to return.
:param marker: the last item of the previous page; we return the next
result set.
:param sort_key: Attribute by which results should be sorted.
:param sort_dir: direction in which results should be sorted.
(asc, desc)
"""
@abc.abstractmethod
def update_chassis(self, chassis_id, values):
"""Update properties of an chassis.
:param chassis_id: The id or the uuid of a chassis.
:param values: Dict of values to update.
:returns: A chassis.
"""
@abc.abstractmethod
def destroy_chassis(self, chassis_id):
"""Destroy a chassis.
:param chassis_id: The id or the uuid of a chassis.
"""
@abc.abstractmethod
def register_conductor(self, values, update_existing=False):
"""Register an active conductor with the cluster.
:param values: A dict of values which must contain the following:
::
{
'hostname': the unique hostname which identifies
this Conductor service.
'drivers': a list of supported drivers.
'version': the version of the object.Conductor
}
:param update_existing: When false, registration will raise an
exception when a conflicting online record
is found. When true, will overwrite the
existing record. Default: False.
:returns: A conductor.
:raises: ConductorAlreadyRegistered
"""
@abc.abstractmethod
def get_conductor_list(self, limit=None, marker=None,
sort_key=None, sort_dir=None):
"""Return a list of conductors.
:param limit: Maximum number of conductors to return.
:param marker: the last item of the previous page; we return the next
result set.
:param sort_key: Attribute by which results should be sorted.
:param sort_dir: direction in which results should be sorted.
(asc, desc)
"""
@abc.abstractmethod
def get_conductor(self, hostname, online=True):
"""Retrieve a conductor's service record from the database.
:param hostname: The hostname of the conductor service.
:param online: Specify the filter value on the `online` field when
querying conductors. The ``online`` field is ignored if
this value is set to None.
:returns: A conductor.
:raises: ConductorNotFound if the conductor with given hostname does
not exist or doesn't meet the specified online expectation.
"""
@abc.abstractmethod
def unregister_conductor(self, hostname):
"""Remove this conductor from the service registry immediately.
:param hostname: The hostname of this conductor service.
:raises: ConductorNotFound
"""
@abc.abstractmethod
def touch_conductor(self, hostname):
"""Mark a conductor as active by updating its 'updated_at' property.
:param hostname: The hostname of this conductor service.
:raises: ConductorNotFound
"""
@abc.abstractmethod
def get_active_hardware_type_dict(self, use_groups=False):
"""Retrieve hardware types for the registered and active conductors.
:param use_groups: Whether to factor conductor_group into the keys.
:returns: A dict which maps hardware type names to the set of hosts
which support them. For example:
::
{hardware-type-a: set([host1, host2]),
hardware-type-b: set([host2, host3])}
"""
@abc.abstractmethod
def get_offline_conductors(self, field='hostname'):
"""Get a list conductors that are offline (dead).
:param field: A field to return, hostname by default.
:returns: A list of requested fields of offline conductors.
"""
@abc.abstractmethod
def get_online_conductors(self):
"""Get a list conductor hostnames that are online and active.
:returns: A list of conductor hostnames.
"""
@abc.abstractmethod
def list_conductor_hardware_interfaces(self, conductor_id):
"""List all registered hardware interfaces for a conductor.
:param conductor_id: Database ID of conductor.
:returns: List of ``ConductorHardwareInterfaces`` objects.
"""
@abc.abstractmethod
def list_hardware_type_interfaces(self, hardware_types):
"""List registered hardware interfaces for given hardware types.
This is restricted to only active conductors.
:param hardware_types: list of hardware types to filter by.
:returns: list of ``ConductorHardwareInterfaces`` objects.
"""
@abc.abstractmethod
def register_conductor_hardware_interfaces(self, conductor_id,
hardware_type, interface_type,
interfaces, default_interface):
"""Registers hardware interfaces for a conductor.
:param conductor_id: Database ID of conductor to register for.
:param hardware_type: Name of hardware type for the interfaces.
:param interface_type: Type of interfaces, e.g. 'deploy' or 'boot'.
:param interfaces: List of interface names to register.
:param default_interface: String, the default interface for this
hardware type and interface type.
:raises: ConductorHardwareInterfacesAlreadyRegistered if at least one
of the interfaces in the combination of all parameters is
already registered.
"""
@abc.abstractmethod
def unregister_conductor_hardware_interfaces(self, conductor_id):
"""Unregisters all hardware interfaces for a conductor.
:param conductor_id: Database ID of conductor to unregister for.
"""
@abc.abstractmethod
def touch_node_provisioning(self, node_id):
"""Mark the node's provisioning as running.
Mark the node's provisioning as running by updating its
'provision_updated_at' property.
:param node_id: The id of a node.
:raises: NodeNotFound
"""
@abc.abstractmethod
def set_node_tags(self, node_id, tags):
"""Replace all of the node tags with specified list of tags.
This ignores duplicate tags in the specified list.
:param node_id: The id of a node.
:param tags: List of tags.
:returns: A list of NodeTag objects.
:raises: NodeNotFound if the node is not found.
"""
@abc.abstractmethod
def unset_node_tags(self, node_id):
"""Remove all tags of the node.
:param node_id: The id of a node.
:raises: NodeNotFound if the node is not found.
"""
@abc.abstractmethod
def get_node_tags_by_node_id(self, node_id):
"""Get node tags based on its id.
:param node_id: The id of a node.
:returns: A list of NodeTag objects.
:raises: NodeNotFound if the node is not found.
"""
@abc.abstractmethod
def add_node_tag(self, node_id, tag):
"""Add tag to the node.
If the node_id and tag pair already exists, this should still
succeed.
:param node_id: The id of a node.
:param tag: A tag string.
:returns: the NodeTag object.
:raises: NodeNotFound if the node is not found.
"""
@abc.abstractmethod
def delete_node_tag(self, node_id, tag):
"""Delete specified tag from the node.
:param node_id: The id of a node.
:param tag: A tag string.
:raises: NodeNotFound if the node is not found.
:raises: NodeTagNotFound if the tag is not found.
"""
@abc.abstractmethod
def node_tag_exists(self, node_id, tag):
"""Check if the specified tag exist on the node.
:param node_id: The id of a node.
:param tag: A tag string.
:returns: True if the tag exists otherwise False.
:raises: NodeNotFound if the node is not found.
"""
@abc.abstractmethod
def get_node_by_port_addresses(self, addresses):
"""Find a node by any matching port address.
:param addresses: list of port addresses (e.g. MACs).
:returns: Node object.
:raises: NodeNotFound if none or several nodes are found.
"""
@abc.abstractmethod
def get_volume_connector_list(self, limit=None, marker=None,
sort_key=None, sort_dir=None,
project=None):
"""Return a list of volume connectors.
:param limit: Maximum number of volume connectors to return.
:param marker: The last item of the previous page; we return the next
result set.
:param sort_key: Attribute by which results should be sorted.
:param sort_dir: Direction in which results should be sorted.
(asc, desc)
:param project: The associated node project to search with.
:returns: a list of :class:`VolumeConnector` objects
:returns: A list of volume connectors.
:raises: InvalidParameterValue If sort_key does not exist.
"""
@abc.abstractmethod
def get_volume_connector_by_id(self, db_id):
"""Return a volume connector representation.
:param db_id: The integer database ID of a volume connector.
:returns: A volume connector with the specified ID.
:raises: VolumeConnectorNotFound If a volume connector
with the specified ID is not found.
"""
@abc.abstractmethod
def get_volume_connector_by_uuid(self, connector_uuid):
"""Return a volume connector representation.
:param connector_uuid: The UUID of a connector.
:returns: A volume connector with the specified UUID.
:raises: VolumeConnectorNotFound If a volume connector
with the specified UUID is not found.
"""
@abc.abstractmethod
def get_volume_connectors_by_node_id(self, node_id, limit=None,
marker=None, sort_key=None,
sort_dir=None, project=None):
"""List all the volume connectors for a given node.
:param node_id: The integer node ID.
:param limit: Maximum number of volume connectors to return.
:param marker: The last item of the previous page; we return the next
result set.
:param sort_key: Attribute by which results should be sorted
:param sort_dir: Direction in which results should be sorted
(asc, desc)
:param project: The associated node project to search with.
:returns: a list of :class:`VolumeConnector` objects
:returns: A list of volume connectors.
:raises: InvalidParameterValue If sort_key does not exist.
"""
@abc.abstractmethod
def create_volume_connector(self, connector_info):
"""Create a new volume connector.
:param connector_info: Dictionary containing information about the
connector. Example::
{
'uuid': '000000-..',
'type': 'wwnn',
'connector_id': '00:01:02:03:04:05:06',
'node_id': 2
}
:returns: A volume connector.
:raises: VolumeConnectorTypeAndIdAlreadyExists If a connector
already exists with a matching type and connector_id.
:raises: VolumeConnectorAlreadyExists If a volume connector with
the same UUID already exists.
"""
@abc.abstractmethod
def update_volume_connector(self, ident, connector_info):
"""Update properties of a volume connector.
:param ident: The UUID or integer ID of a volume connector.
:param connector_info: Dictionary containing the information about
connector to update.
:returns: A volume connector.
:raises: VolumeConnectorTypeAndIdAlreadyExists If another
connector already exists with a matching type and
connector_id field.
:raises: VolumeConnectorNotFound If a volume connector
with the specified ident does not exist.
:raises: InvalidParameterValue When a UUID is included in
connector_info.
"""
@abc.abstractmethod
def destroy_volume_connector(self, ident):
"""Destroy a volume connector.
:param ident: The UUID or integer ID of a volume connector.
:raises: VolumeConnectorNotFound If a volume connector
with the specified ident does not exist.
"""
@abc.abstractmethod
def get_volume_target_list(self, limit=None, marker=None,
sort_key=None, sort_dir=None,
project=None):
"""Return a list of volume targets.
:param limit: Maximum number of volume targets to return.
:param marker: the last item of the previous page; we return the next
result set.
:param sort_key: Attribute by which results should be sorted.
:param sort_dir: direction in which results should be sorted.
(asc, desc)
:param project: The associated node project to search with.
:returns: a list of :class:`VolumeConnector` objects
:returns: A list of volume targets.
:raises: InvalidParameterValue if sort_key does not exist.
"""
@abc.abstractmethod
def get_volume_target_by_id(self, db_id):
"""Return a volume target representation.
:param db_id: The database primary key (integer) ID of a volume target.
:returns: A volume target.
:raises: VolumeTargetNotFound if no volume target with this ID
exists.
"""
@abc.abstractmethod
def get_volume_target_by_uuid(self, uuid):
"""Return a volume target representation.
:param uuid: The UUID of a volume target.
:returns: A volume target.
:raises: VolumeTargetNotFound if no volume target with this UUID
exists.
"""
@abc.abstractmethod
def get_volume_targets_by_node_id(self, node_id, limit=None,
marker=None, sort_key=None,
sort_dir=None, project=None):
"""List all the volume targets for a given node.
:param node_id: The integer node ID.
:param limit: Maximum number of volume targets to return.
:param marker: the last item of the previous page; we return the next
result set.
:param sort_key: Attribute by which results should be sorted
:param sort_dir: direction in which results should be sorted
(asc, desc)
:param project: The associated node project to search with.
:returns: a list of :class:`VolumeConnector` objects
:returns: A list of volume targets.
:raises: InvalidParameterValue if sort_key does not exist.
"""
@abc.abstractmethod
def get_volume_targets_by_volume_id(self, volume_id, limit=None,
marker=None, sort_key=None,
sort_dir=None, project=None):
"""List all the volume targets for a given volume id.
:param volume_id: The UUID of the volume.
:param limit: Maximum number of volume targets to return.
:param marker: the last item of the previous page; we return the next
result set.
:param sort_key: Attribute by which results should be sorted
:param sort_dir: direction in which results should be sorted
(asc, desc)
:returns: A list of volume targets.
:raises: InvalidParameterValue if sort_key does not exist.
"""
@abc.abstractmethod
def create_volume_target(self, target_info):
"""Create a new volume target.
:param target_info: Dictionary containing the information about the
volume target. Example::
{
'uuid': '000000-..',
'node_id': 2,
'boot_index': 0,
'volume_id': '12345678-...'
'volume_type': 'some type',
}
:returns: A volume target.
:raises: VolumeTargetBootIndexAlreadyExists if a volume target already
exists with the same boot index and node ID.
:raises: VolumeTargetAlreadyExists if a volume target with the same
UUID exists.
"""
@abc.abstractmethod
def update_volume_target(self, ident, target_info):
"""Update information for a volume target.
:param ident: The UUID or integer ID of a volume target.
:param target_info: Dictionary containing the information about
volume target to update.
:returns: A volume target.
:raises: InvalidParameterValue if a UUID is included in target_info.
:raises: VolumeTargetBootIndexAlreadyExists if a volume target already
exists with the same boot index and node ID.
:raises: VolumeTargetNotFound if no volume target with this ident
exists.
"""
@abc.abstractmethod
def destroy_volume_target(self, ident):
"""Destroy a volume target.
:param ident: The UUID or integer ID of a volume target.
:raises: VolumeTargetNotFound if a volume target with the specified
ident does not exist.
"""
@abc.abstractmethod
def check_versions(self, ignore_models=()):
"""Checks the whole database for incompatible objects.
This scans all the tables in search of objects that are not supported;
i.e., those that are not specified in
`ironic.common.release_mappings.RELEASE_MAPPING`.
:param ignore_models: List of model names to skip.
:returns: A Boolean. True if all the objects have supported versions;
False otherwise.
"""
@abc.abstractmethod
def update_to_latest_versions(self, context, max_count):
"""Updates objects to their latest known versions.
This scans all the tables and for objects that are not in their latest
version, updates them to that version.
:param context: the admin context
:param max_count: The maximum number of objects to migrate. Must be
>= 0. If zero, all the objects will be migrated.
:returns: A 2-tuple, 1. the total number of objects that need to be
migrated (at the beginning of this call) and 2. the number
of migrated objects.
"""
@abc.abstractmethod
def set_node_traits(self, node_id, traits, version):
"""Replace all of the node traits with specified list of traits.
This ignores duplicate traits in the specified list.
:param node_id: The id of a node.
:param traits: List of traits.
:param version: the version of the object.Trait.
:returns: A list of NodeTrait objects.
:raises: InvalidParameterValue if setting the traits would exceed the
per-node traits limit.
:raises: NodeNotFound if the node is not found.
"""
@abc.abstractmethod
def unset_node_traits(self, node_id):
"""Remove all traits of the node.
:param node_id: The id of a node.
:raises: NodeNotFound if the node is not found.
"""
@abc.abstractmethod
def get_node_traits_by_node_id(self, node_id):
"""Get node traits based on its id.
:param node_id: The id of a node.
:returns: A list of NodeTrait objects.
:raises: NodeNotFound if the node is not found.
"""
@abc.abstractmethod
def add_node_trait(self, node_id, trait, version):
"""Add trait to the node.
If the node_id and trait pair already exists, this should still
succeed.
:param node_id: The id of a node.
:param trait: A trait string.
:param version: the version of the object.Trait.
:returns: the NodeTrait object.
:raises: InvalidParameterValue if adding the trait would exceed the
per-node traits limit.
:raises: NodeNotFound if the node is not found.
"""
@abc.abstractmethod
def delete_node_trait(self, node_id, trait):
"""Delete specified trait from the node.
:param node_id: The id of a node.
:param trait: A trait string.
:raises: NodeNotFound if the node is not found.
:raises: NodeTraitNotFound if the trait is not found.
"""
@abc.abstractmethod
def node_trait_exists(self, node_id, trait):
"""Check if the specified trait exists on the node.
:param node_id: The id of a node.
:param trait: A trait string.
:returns: True if the trait exists otherwise False.
:raises: NodeNotFound if the node is not found.
"""
@abc.abstractmethod
def create_bios_setting_list(self, node_id, settings, version):
"""Create a list of BIOSSetting records for a given node.
:param node_id: The node id.
:param settings: A list of BIOS Settings to be created.
::
[
{
'name': String,
'value': String,
additional settings from BIOS registry
},
{
'name': String,
'value': String,
additional settings from BIOS registry
},
...
]
:param version: the version of the object.BIOSSetting.
:returns: A list of BIOSSetting object.
:raises: NodeNotFound if the node is not found.
:raises: BIOSSettingAlreadyExists if any of the setting records
already exists.
"""
@abc.abstractmethod
def update_bios_setting_list(self, node_id, settings, version):
"""Update a list of BIOSSetting records.
:param node_id: The node id.
:param settings: A list of BIOS Settings to be updated.
::
[
{
'name': String,
'value': String,
additional settings from BIOS registry
},
{
'name': String,
'value': String,
additional settings from BIOS registry
},
...
]
:param version: the version of the object.BIOSSetting.
:returns: A list of BIOSSetting objects.
:raises: NodeNotFound if the node is not found.
:raises: BIOSSettingNotFound if any of the settings is not found.
"""
@abc.abstractmethod
def delete_bios_setting_list(self, node_id, names):
"""Delete a list of BIOS settings.
:param node_id: The node id.
:param names: List of BIOS setting names to be deleted.
:raises: NodeNotFound if the node is not found.
:raises: BIOSSettingNotFound if any of BIOS setting name is not found.
"""
@abc.abstractmethod
def get_bios_setting(self, node_id, name):
"""Retrieve BIOS setting value.
:param node_id: The node id.
:param name: String containing name of BIOS setting to be retrieved.
:returns: The BIOSSetting object.
:raises: NodeNotFound if the node is not found.
:raises: BIOSSettingNotFound if the BIOS setting is not found.
"""
@abc.abstractmethod
def get_bios_setting_list(self, node_id):
"""Retrieve BIOS settings of a given node.
:param node_id: The node id.
:returns: A list of BIOSSetting objects.
:raises: NodeNotFound if the node is not found.
"""
@abc.abstractmethod
def get_allocation_by_id(self, allocation_id):
"""Return an allocation representation.
:param allocation_id: The id of an allocation.
:returns: An allocation.
:raises: AllocationNotFound
"""
@abc.abstractmethod
def get_allocation_by_uuid(self, allocation_uuid):
"""Return an allocation representation.
:param allocation_uuid: The uuid of an allocation.
:returns: An allocation.
:raises: AllocationNotFound
"""
@abc.abstractmethod
def get_allocation_by_name(self, name):
"""Return an allocation representation.
:param name: The logical name of an allocation.
:returns: An allocation.
:raises: AllocationNotFound
"""
@abc.abstractmethod
def get_allocation_list(self, filters=None, limit=None, marker=None,
sort_key=None, sort_dir=None):
"""Return a list of allocations.
:param filters: Filters to apply. Defaults to None.
:node_uuid: uuid of node
:state: allocation state
:resource_class: requested resource class
:param limit: Maximum number of allocations to return.
:param marker: The last item of the previous page; we return the next
result set.
:param sort_key: Attribute by which results should be sorted.
:param sort_dir: Direction in which results should be sorted.
(asc, desc)
:returns: A list of allocations.
"""
@abc.abstractmethod
def create_allocation(self, values):
"""Create a new allocation.
:param values: Dict of values to create an allocation with
:returns: An allocation
:raises: AllocationDuplicateName
:raises: AllocationAlreadyExists
"""
@abc.abstractmethod
def update_allocation(self, allocation_id, values, update_node=True):
"""Update properties of an allocation.
:param allocation_id: Allocation ID
:param values: Dict of values to update.
:param update_node: If True and node_id is updated, update the node
with instance_uuid and traits from the allocation
:returns: An allocation.
:raises: AllocationNotFound
:raises: AllocationDuplicateName
:raises: InstanceAssociated
:raises: NodeAssociated
"""
@abc.abstractmethod
def take_over_allocation(self, allocation_id, old_conductor_id,
new_conductor_id):
"""Do a take over for an allocation.
The allocation is only updated if the old conductor matches the
provided value, thus guarding against races.
:param allocation_id: Allocation ID
:param old_conductor_id: The conductor ID we expect to be the current
``conductor_affinity`` of the allocation.
:param new_conductor_id: The conductor ID of the new
``conductor_affinity``.
:returns: True if the take over was successful, False otherwise.
:raises: AllocationNotFound
"""
@abc.abstractmethod
def destroy_allocation(self, allocation_id):
"""Destroy an allocation.
:param allocation_id: Allocation ID
:raises: AllocationNotFound
"""
@abc.abstractmethod
def create_deploy_template(self, values):
"""Create a deployment template.
:param values: A dict describing the deployment template. For example:
::
{
'uuid': uuidutils.generate_uuid(),
'name': 'CUSTOM_DT1',
}
:raises: DeployTemplateDuplicateName if a deploy template with the same
name exists.
:raises: DeployTemplateAlreadyExists if a deploy template with the same
UUID exists.
:returns: A deploy template.
"""
@abc.abstractmethod
def update_deploy_template(self, template_id, values):
"""Update a deployment template.
:param template_id: ID of the deployment template to update.
:param values: A dict describing the deployment template. For example:
::
{
'uuid': uuidutils.generate_uuid(),
'name': 'CUSTOM_DT1',
}
:raises: DeployTemplateDuplicateName if a deploy template with the same
name exists.
:raises: DeployTemplateNotFound if the deploy template does not exist.
:returns: A deploy template.
"""
@abc.abstractmethod
def destroy_deploy_template(self, template_id):
"""Destroy a deployment template.
:param template_id: ID of the deployment template to destroy.
:raises: DeployTemplateNotFound if the deploy template does not exist.
"""
@abc.abstractmethod
def get_deploy_template_by_id(self, template_id):
"""Retrieve a deployment template by ID.
:param template_id: ID of the deployment template to retrieve.
:raises: DeployTemplateNotFound if the deploy template does not exist.
:returns: A deploy template.
"""
@abc.abstractmethod
def get_deploy_template_by_uuid(self, template_uuid):
"""Retrieve a deployment template by UUID.
:param template_uuid: UUID of the deployment template to retrieve.
:raises: DeployTemplateNotFound if the deploy template does not exist.
:returns: A deploy template.
"""
@abc.abstractmethod
def get_deploy_template_by_name(self, template_name):
"""Retrieve a deployment template by name.
:param template_name: name of the deployment template to retrieve.
:raises: DeployTemplateNotFound if the deploy template does not exist.
:returns: A deploy template.
"""
@abc.abstractmethod
def get_deploy_template_list(self, limit=None, marker=None,
sort_key=None, sort_dir=None):
"""Retrieve a list of deployment templates.
:param limit: Maximum number of deploy templates to return.
:param marker: The last item of the previous page; we return the next
result set.
:param sort_key: Attribute by which results should be sorted.
:param sort_dir: Direction in which results should be sorted.
(asc, desc)
:returns: A list of deploy templates.
"""
@abc.abstractmethod
def get_deploy_template_list_by_names(self, names):
"""Return a list of deployment templates with one of a list of names.
:param names: List of names to filter by.
:returns: A list of deploy templates.
"""
@abc.abstractmethod
def create_node_history(self, values):
"""Create a new history record.
:param values: Dict of values.
"""
@abc.abstractmethod
def destroy_node_history_by_uuid(self, history_uuid):
"""Destroy a history record.
:param history_uuid: The uuid of a history record
"""
@abc.abstractmethod
def get_node_history_by_id(self, history_id):
"""Return a node history representation.
:param history_id: The id of a history record.
:returns: A history.
"""
@abc.abstractmethod
def get_node_history_by_uuid(self, history_uuid):
"""Return a node history representation.
:param history_uuid: The uuid of a history record
:returns: A history.
"""
@abc.abstractmethod
def get_node_history_list(self, limit=None, marker=None,
sort_key=None, sort_dir=None):
"""Return a list of node history records
:param limit: Maximum number of history records to return.
:param marker: the last item of the previous page; we return the next
result set.
:param sort_key: Attribute by which results should be sorted.
:param sort_dir: direction in which results should be sorted.
(asc, desc)
"""
@abc.abstractmethod
def get_node_history_by_node_id(self, node_id, limit=None, marker=None,
sort_key=None, sort_dir=None):
"""List all the history records for a given node.
:param node_id: The integer node ID.
:param limit: Maximum number of history records to return.
:param marker: the last item of the previous page; we return the next
result set.
:param sort_key: Attribute by which results should be sorted
:param sort_dir: direction in which results should be sorted
(asc, desc)
:returns: A list of histories.
"""
@abc.abstractmethod
def query_node_history_records_for_purge(self, conductor_id):
"""Utility method to identify nodes to clean history records for.
:param conductor_id: Id value for the conductor to perform this
query on behalf of.
:returns: A dictionary with key values of node database ID values
and a list of values associated with the node.
"""
@abc.abstractmethod
def bulk_delete_node_history_records(self, node_id, limit):
"""Utility method to bulk delete node history entries.
:param entires: A list of node history entriy id's to be
queried for deletion.
"""
@abc.abstractmethod
def count_nodes_in_provision_state(self, state):
"""Count the number of nodes in given provision state.
:param state: A provision_state value to match for the
count operation. This can be a single provision
state value or a list of values.
"""
@abc.abstractmethod
def create_node_inventory(self, values):
"""Create a new inventory record.
:param values: Dict of values.
"""
@abc.abstractmethod
def destroy_node_inventory_by_node_id(self, inventory_node_id):
"""Destroy a inventory record.
:param inventory_uuid: The uuid of a inventory record
"""
@abc.abstractmethod
def get_node_inventory_by_node_id(self, node_id):
"""Get the node inventory for a given node.
:param node_id: The integer node ID.
:returns: An inventory of a node.
"""
@abc.abstractmethod
def get_shard_list(self):
"""Retrieve a list of shards.
:returns: list of dicts containing shard names and count
"""
@abc.abstractclassmethod
def create_firmware_component(self, values):
"""Create a FirmwareComponent record for a given node.
:param values: a dictionary with the necessary information to create
a FirmwareComponent.
::
{
'component': String,
'initial_version': String,
'current_version': String,
'last_version_flashed': String
}
:returns: A FirmwareComponent object.
:raises: FirmwareComponentAlreadyExists if any of the component
records already exists.
"""
@abc.abstractclassmethod
def update_firmware_component(self, node_id, component, values):
"""Update a FirmwareComponent record.
:param node_id: The node id.
:param component: The component of the node to update.
:param values: A dictionary with the new information about the
FirmwareComponent.
::
{
'current_version': String,
'last_version_flashed': String
}
:returns: A FirmwareComponent object.
:raises: FirmwareComponentNotFound the component
is not found.
"""
@abc.abstractmethod
def get_firmware_component(self, node_id, name):
"""Retrieve Firmware Component.
:param node_id: The node id.
:param name: name of Firmware component.
:returns: The FirmwareComponent object.
:raises: NodeNotFound if the node is not found.
:raises: FirmwareComponentNotFound if the Firmware component
is not found.
"""
@abc.abstractclassmethod
def get_firmware_component_list(self, node_id):
"""Retrieve a list Firmware Components of a given node.
:param node_id: The node id.
:returns: A list of FirmwareComponent objects.
:raises: NodeNotFound if the node is not found.
"""
|
1077291b0c5eb9ee57abb9ea4244293ee7888985
|
55540f3e86f1d5d86ef6b5d295a63518e274efe3
|
/toolchain/riscv/MSYS/python/Lib/idlelib/idle_test/test_tooltip.py
|
a4dbc48ae9976f9dd23be49a81c738890eab606d
|
[
"bzip2-1.0.6",
"LicenseRef-scancode-proprietary-license",
"OpenSSL",
"Python-2.0",
"LicenseRef-scancode-newlib-historical",
"TCL",
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
bouffalolab/bl_iot_sdk
|
bc5eaf036b70f8c65dd389439062b169f8d09daa
|
b90664de0bd4c1897a9f1f5d9e360a9631d38b34
|
refs/heads/master
| 2023-08-31T03:38:03.369853
| 2023-08-16T08:50:33
| 2023-08-18T09:13:27
| 307,347,250
| 244
| 101
|
Apache-2.0
| 2023-08-28T06:29:02
| 2020-10-26T11:16:30
|
C
|
UTF-8
|
Python
| false
| false
| 5,546
|
py
|
test_tooltip.py
|
"""Test tooltip, coverage 100%.
Coverage is 100% after excluding 6 lines with "# pragma: no cover".
They involve TclErrors that either should or should not happen in a
particular situation, and which are 'pass'ed if they do.
"""
from idlelib.tooltip import TooltipBase, Hovertip
from test.support import requires
requires('gui')
from functools import wraps
import time
from tkinter import Button, Tk, Toplevel
import unittest
def setUpModule():
global root
root = Tk()
def tearDownModule():
global root
root.update_idletasks()
root.destroy()
del root
def add_call_counting(func):
@wraps(func)
def wrapped_func(*args, **kwargs):
wrapped_func.call_args_list.append((args, kwargs))
return func(*args, **kwargs)
wrapped_func.call_args_list = []
return wrapped_func
def _make_top_and_button(testobj):
global root
top = Toplevel(root)
testobj.addCleanup(top.destroy)
top.title("Test tooltip")
button = Button(top, text='ToolTip test button')
button.pack()
testobj.addCleanup(button.destroy)
top.lift()
return top, button
class ToolTipBaseTest(unittest.TestCase):
def setUp(self):
self.top, self.button = _make_top_and_button(self)
def test_base_class_is_unusable(self):
global root
top = Toplevel(root)
self.addCleanup(top.destroy)
button = Button(top, text='ToolTip test button')
button.pack()
self.addCleanup(button.destroy)
with self.assertRaises(NotImplementedError):
tooltip = TooltipBase(button)
tooltip.showtip()
class HovertipTest(unittest.TestCase):
def setUp(self):
self.top, self.button = _make_top_and_button(self)
def is_tipwindow_shown(self, tooltip):
return tooltip.tipwindow and tooltip.tipwindow.winfo_viewable()
def test_showtip(self):
tooltip = Hovertip(self.button, 'ToolTip text')
self.addCleanup(tooltip.hidetip)
self.assertFalse(self.is_tipwindow_shown(tooltip))
tooltip.showtip()
self.assertTrue(self.is_tipwindow_shown(tooltip))
def test_showtip_twice(self):
tooltip = Hovertip(self.button, 'ToolTip text')
self.addCleanup(tooltip.hidetip)
self.assertFalse(self.is_tipwindow_shown(tooltip))
tooltip.showtip()
self.assertTrue(self.is_tipwindow_shown(tooltip))
orig_tipwindow = tooltip.tipwindow
tooltip.showtip()
self.assertTrue(self.is_tipwindow_shown(tooltip))
self.assertIs(tooltip.tipwindow, orig_tipwindow)
def test_hidetip(self):
tooltip = Hovertip(self.button, 'ToolTip text')
self.addCleanup(tooltip.hidetip)
tooltip.showtip()
tooltip.hidetip()
self.assertFalse(self.is_tipwindow_shown(tooltip))
def test_showtip_on_mouse_enter_no_delay(self):
tooltip = Hovertip(self.button, 'ToolTip text', hover_delay=None)
self.addCleanup(tooltip.hidetip)
tooltip.showtip = add_call_counting(tooltip.showtip)
root.update()
self.assertFalse(self.is_tipwindow_shown(tooltip))
self.button.event_generate('<Enter>', x=0, y=0)
root.update()
self.assertTrue(self.is_tipwindow_shown(tooltip))
self.assertGreater(len(tooltip.showtip.call_args_list), 0)
def test_hover_with_delay(self):
# Run multiple tests requiring an actual delay simultaneously.
# Test #1: A hover tip with a non-zero delay appears after the delay.
tooltip1 = Hovertip(self.button, 'ToolTip text', hover_delay=100)
self.addCleanup(tooltip1.hidetip)
tooltip1.showtip = add_call_counting(tooltip1.showtip)
root.update()
self.assertFalse(self.is_tipwindow_shown(tooltip1))
self.button.event_generate('<Enter>', x=0, y=0)
root.update()
self.assertFalse(self.is_tipwindow_shown(tooltip1))
# Test #2: A hover tip with a non-zero delay doesn't appear when
# the mouse stops hovering over the base widget before the delay
# expires.
tooltip2 = Hovertip(self.button, 'ToolTip text', hover_delay=100)
self.addCleanup(tooltip2.hidetip)
tooltip2.showtip = add_call_counting(tooltip2.showtip)
root.update()
self.button.event_generate('<Enter>', x=0, y=0)
root.update()
self.button.event_generate('<Leave>', x=0, y=0)
root.update()
time.sleep(0.15)
root.update()
# Test #1 assertions.
self.assertTrue(self.is_tipwindow_shown(tooltip1))
self.assertGreater(len(tooltip1.showtip.call_args_list), 0)
# Test #2 assertions.
self.assertFalse(self.is_tipwindow_shown(tooltip2))
self.assertEqual(tooltip2.showtip.call_args_list, [])
def test_hidetip_on_mouse_leave(self):
tooltip = Hovertip(self.button, 'ToolTip text', hover_delay=None)
self.addCleanup(tooltip.hidetip)
tooltip.showtip = add_call_counting(tooltip.showtip)
root.update()
self.button.event_generate('<Enter>', x=0, y=0)
root.update()
self.button.event_generate('<Leave>', x=0, y=0)
root.update()
self.assertFalse(self.is_tipwindow_shown(tooltip))
self.assertGreater(len(tooltip.showtip.call_args_list), 0)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
6552589ace2b100e6672f165f07dae5d414a62dd
|
27b86f422246a78704e0e84983b2630533a47db6
|
/tests/test_05_tools/test_530_acis_sab.py
|
4b1675a23001b5aeffeac14503381895cc015163
|
[
"MIT"
] |
permissive
|
mozman/ezdxf
|
7512decd600896960660f0f580cab815bf0d7a51
|
ba6ab0264dcb6833173042a37b1b5ae878d75113
|
refs/heads/master
| 2023-09-01T11:55:13.462105
| 2023-08-15T11:50:05
| 2023-08-15T12:00:04
| 79,697,117
| 750
| 194
|
MIT
| 2023-09-14T09:40:41
| 2017-01-22T05:55:55
|
Python
|
UTF-8
|
Python
| false
| false
| 1,988
|
py
|
test_530_acis_sab.py
|
# Copyright (c) 2022, Manfred Moitzi
# License: MIT License
import pytest
from datetime import datetime
from ezdxf.acis import sab
T = sab.Tags
def test_decode_header(cube_sab):
decoder = sab.Decoder(cube_sab)
header = decoder.read_header()
assert header.version == 21800
assert header.n_records == 0
assert header.n_entities == 2
assert header.flags == 12
assert header.product_id == "Open Design Alliance ACIS Builder"
assert header.acis_version == "ACIS 218.00 NT"
assert header.creation_date == datetime(2022, 5, 2, 5, 33, 25)
assert header.units_in_mm == 1.0
def test_encode_header(cube_sab):
decoder = sab.Decoder(cube_sab)
header = decoder.read_header()
data = header.dumpb()
assert data == cube_sab[:len(data)]
def test_decode_first_record(cube_sab):
decoder = sab.Decoder(cube_sab)
_ = decoder.read_header()
record = decoder.read_record()
assert record == [
(T.ENTITY_TYPE, "asmheader"),
(T.POINTER, -1),
(T.INT, -1),
(T.STR, "208.0.4.7009"),
]
def test_decode_all_records(cube_sab):
decoder = sab.Decoder(cube_sab)
_ = decoder.read_header()
records = list(decoder.read_records())
assert len(records) == 116
assert records[-1][0].value == "End-of-ASM-data"
def test_parse_sab(cube_sab):
builder = sab.parse_sab(cube_sab)
assert builder.header.version == 21800
assert len(builder.entities) == 116
assert builder.entities[0].name == "asmheader"
assert builder.entities[-1].name == "End-of-ASM-data"
class TestSabEntity:
@pytest.fixture(scope="class")
def builder(self, cube_sab):
return sab.parse_sab(cube_sab)
@pytest.fixture(scope="class")
def body(self, builder):
return builder.bodies[0]
def test_get_pointer_at_index(self, body):
assert body.name == "body"
assert body.attributes.is_null_ptr is False
if __name__ == "__main__":
pytest.main([__file__])
|
22dce54ad5a868ff7635c1fa7ead779069bc79b0
|
e93097f04bb3e49c1c6b326ccf91a59ab2a158e7
|
/petlib/cipher.py
|
d68f8eda1527d0ac2b909bfde2da2ce95e791284
|
[
"BSD-2-Clause"
] |
permissive
|
gdanezis/petlib
|
ce7a4a241e3a72492cfe4af8a5406c5ecd71a62a
|
7aafddb46f5e643c0a1addb1067acc3dc74ce59d
|
refs/heads/master
| 2022-11-06T04:28:44.094953
| 2021-05-11T13:58:48
| 2021-05-11T13:58:48
| 27,001,491
| 123
| 41
|
BSD-2-Clause
| 2022-01-25T12:30:56
| 2014-11-22T14:37:53
|
Python
|
UTF-8
|
Python
| false
| false
| 18,373
|
py
|
cipher.py
|
from .bindings import _FFI, _C, _OPENSSL_VERSION, OpenSSLVersion
import pytest
def _check(return_val):
"""Checks the return code of the C calls"""
if isinstance(return_val, int) and return_val == 1:
return
if isinstance(return_val, bool) and return_val == True:
return
raise Exception(
"Cipher exception: Unknown type %s or value %s" %
(str(
type(return_val)),
str(return_val)))
class Cipher(object):
"""A class representing a symmetric cipher and mode.
Example:
An example of encryption and decryption using AES in counter mode.
>>> from os import urandom
>>> aes = Cipher("AES-128-CTR") # Init AES in Counter mode
>>> key = urandom(16)
>>> iv = urandom(16)
>>>
>>> # Get a CipherOperation object for encryption
>>> enc = aes.enc(key, iv)
>>> ref = b"Hello World"
>>> ciphertext = enc.update(ref)
>>> ciphertext += enc.finalize()
>>>
>>> # Get a CipherOperation object for decryption
>>> dec = aes.dec(key, iv)
>>> plaintext = dec.update(ciphertext)
>>> plaintext += dec.finalize()
>>> plaintext == ref # Check resulting plaintest matches referece one.
True
"""
__slots__ = ["alg", "gcm"]
def __init__(self, name, _alg=None):
"""Initialize the cipher by name."""
if _alg:
self.alg = _alg
self.gcm = True
return
else:
self.alg = _C.EVP_get_cipherbyname(name.encode("utf8"))
self.gcm = False
if self.alg == _FFI.NULL:
raise Exception("Unknown cipher: %s" % name)
if "gcm" in name.lower():
self.gcm = True
if "ccm" in name.lower():
raise Exception("CCM mode not supported")
def len_IV(self):
"""Return the Initialization Vector length in bytes."""
if _OPENSSL_VERSION == OpenSSLVersion.V1_0:
return int(self.alg.iv_len)
else:
return int(_C.EVP_CIPHER_iv_length(self.alg))
def len_key(self):
"""Return the secret key length in bytes."""
if _OPENSSL_VERSION == OpenSSLVersion.V1_0:
return int(self.alg.key_len)
else:
return int(_C.EVP_CIPHER_key_length(self.alg))
def len_block(self):
"""Return the block size in bytes."""
if _OPENSSL_VERSION == OpenSSLVersion.V1_0:
return int(self.alg.block_size)
else:
return int(_C.EVP_CIPHER_block_size(self.alg))
def get_nid(self):
"""Return the OpenSSL nid of the cipher and mode."""
if _OPENSSL_VERSION == OpenSSLVersion.V1_0:
return int(self.alg.nid)
else:
return int(_C.EVP_CIPHER_nid(self.alg))
def op(self, key, iv, enc=1):
"""Initializes a cipher operation, either encrypt or decrypt
and returns a CipherOperation object
Args:
key (str): the block cipher symmetric key. Length depends on block cipher choice.
iv (str): an Initialization Vector of up to the block size. (Can be shorter.)
enc (int): set to 1 to perform encryption, or 0 to perform decryption.
"""
c_op = CipherOperation(enc)
_check(len(key) == self.len_key())
_check(enc in [0, 1])
if not self.gcm:
_check(len(iv) == self.len_IV())
_check(_C.EVP_CipherInit_ex(c_op.ctx,
self.alg, _FFI.NULL, key, iv, enc))
else:
_check(_C.EVP_CipherInit_ex(c_op.ctx,
self.alg, _FFI.NULL, _FFI.NULL, _FFI.NULL, enc))
# assert len(iv) <= self.len_block()
_check(_C.EVP_CIPHER_CTX_ctrl(c_op.ctx,
_C.EVP_CTRL_GCM_SET_IVLEN, len(iv), _FFI.NULL))
_C.EVP_CIPHER_CTX_ctrl(
c_op.ctx, _C.EVP_CTRL_GCM_SET_IV_FIXED, -1, iv)
_C.EVP_CIPHER_CTX_ctrl(c_op.ctx, _C.EVP_CTRL_GCM_IV_GEN, 0, iv)
_check(_C.EVP_CipherInit_ex(c_op.ctx,
_FFI.NULL, _FFI.NULL, key, iv, enc))
c_op.cipher = self
return c_op
def enc(self, key, iv):
"""Initializes an encryption engine with the cipher with a specific key and Initialization Vector (IV).
Returns the CipherOperation engine.
Args:
key (str): the block cipher symmetric key. Length depends on block cipher choice.
iv (str): an Initialization Vector of up to the block size. (Can be shorter.)
"""
return self.op(key, iv, enc=1)
def dec(self, key, iv):
"""Initializes a decryption engine with the cipher with a specific key and Initialization Vector (IV).
Returns the CipherOperation engine.
Args:
key (str): the block cipher symmetric key. Length depends on block cipher choice.
iv (str): an Initialization Vector of up to the block size. (Can be shorter.)
"""
return self.op(key, iv, enc=0)
def __del__(self):
pass
# --------- AES GCM special functions ---------------
@staticmethod
def aes_128_gcm():
"""Returns a pre-initalized AES-GCM cipher with 128 bits key size"""
return Cipher(None, _C.EVP_aes_128_gcm())
@staticmethod
def aes_192_gcm():
"""Returns a pre-initalized AES-GCM cipher with 192 bits key size"""
return Cipher(None, _C.EVP_aes_192_gcm())
@staticmethod
def aes_256_gcm():
"""Returns a pre-initalized AES-GCM cipher with 256 bits key size"""
return Cipher(None, _C.EVP_aes_256_gcm())
def quick_gcm_enc(self, key, iv, msg, assoc=None, tagl=16):
"""One operation GCM encryption.
Args:
key (str): the AES symmetric key. Length depends on block cipher choice.
iv (str): an Initialization Vector of up to the block size. (Can be shorter.)
msg (str): the message to encrypt.
assoc (str): associated data that will be integrity protected, but not encrypted.
tagl (int): the length of the tag, up to the block length.
Example:
Use of `quick_gcm_enc` and `quick_gcm_dec` for AES-GCM operations.
>>> from os import urandom # Secure OS random source
>>> aes = Cipher("aes-128-gcm") # Initialize AES-GCM with 128 bit keys
>>> iv = urandom(16)
>>> key = urandom(16)
>>> # Encryption using AES-GCM returns a ciphertext and a tag
>>> ciphertext, tag = aes.quick_gcm_enc(key, iv, b"Hello")
>>> # Decrytion using AES-GCM
>>> p = aes.quick_gcm_dec(key, iv, ciphertext, tag)
>>> assert p == b'Hello'
"""
enc = self.enc(key, iv)
if assoc:
enc.update_associated(assoc)
ciphertext = enc.update(msg)
ciphertext += enc.finalize()
tag = enc.get_tag(tagl)
return (ciphertext, tag)
def quick_gcm_dec(self, key, iv, cip, tag, assoc=None):
"""One operation GCM decrypt. See usage example in "quick_gcm_enc".
Throws an exception on failure of decryption
Args:
key (str): the AES symmetric key. Length depends on block cipher choice.
iv (str): an Initialization Vector of up to the block size. (Can be shorter.)
cip (str): the ciphertext to decrypt.
tag (int): the integrity tag.
assoc (str): associated data that will be integrity protected, but not encrypted.
"""
dec = self.dec(key, iv)
if assoc:
dec.update_associated(assoc)
dec.set_tag(tag)
plain = dec.update(cip)
try:
plain += dec.finalize()
except BaseException:
raise Exception("Cipher: decryption failed.")
return plain
class CipherOperation(object):
__slots__ = ["ctx", "cipher", "xenc"]
def __init__(self, xenc):
self.ctx = _C.EVP_CIPHER_CTX_new()
self.cipher = None
self.xenc = xenc
def update(self, data):
"""Processes some data, and returns a partial result."""
block_len = self.cipher.len_block()
alloc_len = len(data) + block_len + 1
outl = _FFI.new("int *")
outl[0] = alloc_len
out = _FFI.new("unsigned char[]", alloc_len)
_check(_C.EVP_CipherUpdate(self.ctx, out, outl, data, len(data)))
ret = bytes(_FFI.buffer(out)[:int(outl[0])])
return ret
def finalize(self):
"""Finalizes the operation and may return some additional data.
Throws an exception if the authenticator tag is different from the expected value.
Example:
Example of the exception thrown when an invalid tag is provided.
>>> from os import urandom
>>> aes = Cipher.aes_128_gcm() # Define an AES-GCM cipher
>>> iv = urandom(16)
>>> key = urandom(16)
>>> ciphertext, tag = aes.quick_gcm_enc(key, iv, b"Hello")
>>>
>>> dec = aes.dec(key, iv) # Get a decryption CipherOperation
>>> dec.set_tag(urandom(len(tag))) # Provide an invalid tag.
>>> plaintext = dec.update(ciphertext) # Feed in the ciphertext for decryption.
>>> try:
... dec.finalize() # Check and Finalize.
... except:
... print("Failure")
Failure
Throws an exception since integrity check fails due to the invalid tag.
"""
block_len = self.cipher.len_block()
alloc_len = block_len
outl = _FFI.new("int *")
outl[0] = alloc_len
out = _FFI.new("unsigned char[]", alloc_len)
try:
_check(_C.EVP_CipherFinal_ex(self.ctx, out, outl))
if outl[0] == 0:
return b''
ret = bytes(_FFI.buffer(out)[:int(outl[0])])
return ret
except BaseException:
raise Exception("Cipher: decryption failed.")
def update_associated(self, data):
"""Processes some GCM associated data, and returns nothing."""
if self.xenc == 0:
self.set_tag(b"\00" * 16)
outl = _FFI.new("int *")
_check(_C.EVP_CipherUpdate(self.ctx, _FFI.NULL, outl, data, len(data)))
_check(outl[0] == len(data))
def get_tag(self, tag_len=16):
"""Get the GCM authentication tag. Execute after finalizing the encryption.
Example:
AES-GCM encryption usage:
>>> from os import urandom
>>> aes = Cipher.aes_128_gcm() # Initialize AES cipher
>>> key = urandom(16)
>>> iv = urandom(16)
>>> enc = aes.enc(key, iv) # Get an encryption CipherOperation
>>> enc.update_associated(b"Hello") # Include some associated data
>>> ciphertext = enc.update(b"World!") # Include some plaintext
>>> nothing = enc.finalize() # Finalize
>>> tag = enc.get_tag(16) # Get the AES-GCM tag
"""
tag = _FFI.new("unsigned char []", tag_len)
ret = _C.EVP_CIPHER_CTX_ctrl(
self.ctx, _C.EVP_CTRL_GCM_GET_TAG, tag_len, tag)
_check(ret)
s = bytes(_FFI.buffer(tag)[:])
return s
def set_tag(self, tag):
"""Specify the GCM authenticator tag. Must be done before finalizing decryption
Example:
AES-GCM decryption and check:
>>> aes = Cipher.aes_128_gcm() # Define an AES-GCM cipher
>>> ciphertext, tag = (b'dV\\xb9:\\xd0\\xbe', b'pA\\xbe?\\xfc\\xd1&\\x03\\x1438\\xc5\\xf8In\\xaa')
>>> dec = aes.dec(key=b"A"*16, iv=b"A"*16) # Get a decryption CipherOperation
>>> dec.update_associated(b"Hello") # Feed in the non-secret assciated data.
>>> plaintext = dec.update(ciphertext) # Feed in the ciphertext for decryption.
>>> dec.set_tag(tag) # Provide the AES-GCM tag for integrity.
>>> nothing = dec.finalize() # Check and finalize.
>>> assert plaintext == b'World!'
"""
_check(
_C.EVP_CIPHER_CTX_ctrl(
self.ctx,
_C.EVP_CTRL_GCM_SET_TAG,
len(tag),
tag))
def __del__(self):
_C.EVP_CIPHER_CTX_free(self.ctx)
# When testing ignore extra variables
# pylint: disable=unused-variable,redefined-outer-name
def test_aes_init():
aes = Cipher("AES-128-CBC")
assert aes.alg != _FFI.NULL
assert aes.len_IV() == 16
assert aes.len_block() == 16
assert aes.len_key() == 16
assert aes.get_nid() == 419
del aes
def test_errors():
with pytest.raises(Exception) as excinfo:
aes = Cipher("AES-128-XXF")
assert 'Unknown' in str(excinfo.value)
def test_aes_enc():
aes = Cipher("AES-128-CBC")
enc = aes.op(key=b"A" * 16, iv=b"A" * 16)
ref = b"Hello World" * 10000
ciphertext = enc.update(ref)
ciphertext += enc.finalize()
dec = aes.op(key=b"A" * 16, iv=b"A" * 16, enc=0)
plaintext = dec.update(ciphertext)
plaintext += dec.finalize()
assert plaintext == ref
def test_aes_ctr():
aes = Cipher("AES-128-CTR")
enc = aes.op(key=b"A" * 16, iv=b"A" * 16)
ref = b"Hello World" * 10000
ciphertext = enc.update(ref)
ciphertext += enc.finalize()
dec = aes.op(key=b"A" * 16, iv=b"A" * 16, enc=0)
plaintext = dec.update(ciphertext)
plaintext += dec.finalize()
assert plaintext == ref
def test_aes_ops():
aes = Cipher("AES-128-CTR")
enc = aes.enc(key=b"A" * 16, iv=b"A" * 16)
ref = b"Hello World" * 10000
ciphertext = enc.update(ref)
ciphertext += enc.finalize()
dec = aes.dec(key=b"A" * 16, iv=b"A" * 16)
plaintext = dec.update(ciphertext)
plaintext += dec.finalize()
assert plaintext == ref
def test_aes_gcm_encrypt():
aes = Cipher.aes_128_gcm()
assert aes.gcm
enc = aes.op(key=b"A" * 16, iv=b"A" * 16)
enc.update_associated(b"Hello")
ciphertext = enc.update(b"World!")
c2 = enc.finalize()
assert c2 == b''
tag = enc.get_tag(16)
assert len(tag) == 16
def test_aes_gcm_encrypt_192():
aes = Cipher.aes_192_gcm()
assert aes.gcm
enc = aes.op(key=b"A" * 24, iv=b"A" * 16)
enc.update_associated(b"Hello")
ciphertext = enc.update(b"World!")
c2 = enc.finalize()
assert c2 == b''
tag = enc.get_tag(16)
assert len(tag) == 16
def test_aes_gcm_encrypt_256():
aes = Cipher.aes_256_gcm()
assert aes.gcm
enc = aes.op(key=b"A" * 32, iv=b"A" * 16)
enc.update_associated(b"Hello")
ciphertext = enc.update(b"World!")
c2 = enc.finalize()
assert c2 == b''
tag = enc.get_tag(16)
assert len(tag) == 16
@pytest.fixture
def aesenc():
aes = Cipher.aes_128_gcm()
assert aes.gcm
enc = aes.op(key=b"A" * 16, iv=b"A" * 16)
enc.update_associated(b"Hello")
ciphertext = enc.update(b"World!")
c2 = enc.finalize()
assert c2 == b''
tag = enc.get_tag(16)
assert len(tag) == 16
return (aes, enc, ciphertext, tag)
def test_gcm_dec(aesenc):
aes, enc, ciphertext, tag = aesenc
dec = aes.dec(key=b"A" * 16, iv=b"A" * 16)
dec.update_associated(b"Hello")
plaintext = dec.update(ciphertext)
dec.set_tag(tag)
dec.finalize()
assert plaintext == b"World!"
def test_gcm_dec_badassoc(aesenc):
aes, enc, ciphertext, tag = aesenc
dec = aes.dec(key=b"A" * 16, iv=b"A" * 16)
dec.update_associated(b"H4llo")
plaintext = dec.update(ciphertext)
dec.set_tag(tag)
with pytest.raises(Exception) as excinfo:
dec.finalize()
assert "Cipher" in str(excinfo.value)
def test_gcm_dec_badkey(aesenc):
aes, enc, ciphertext, tag = aesenc
dec = aes.dec(key=b"B" * 16, iv=b"A" * 16)
dec.update_associated(b"Hello")
plaintext = dec.update(ciphertext)
dec.set_tag(tag)
with pytest.raises(Exception) as excinfo:
dec.finalize()
assert "Cipher" in str(excinfo.value)
def test_gcm_dec_badiv(aesenc):
aes, enc, ciphertext, tag = aesenc
dec = aes.dec(key=b"A" * 16, iv=b"B" * 16)
dec.update_associated(b"Hello")
plaintext = dec.update(ciphertext)
dec.set_tag(tag)
with pytest.raises(Exception) as excinfo:
dec.finalize()
assert "Cipher" in str(excinfo.value)
def test_aes_gcm_byname():
aes = Cipher("aes-128-gcm")
assert aes.gcm
enc = aes.op(key=b"A" * 16, iv=b"A" * 16)
enc.update_associated(b"Hello")
ciphertext = enc.update(b"World!")
c2 = enc.finalize()
assert c2 == b''
tag = enc.get_tag(16)
assert len(tag) == 16
dec = aes.dec(key=b"A" * 16, iv=b"A" * 16)
dec.update_associated(b"Hello")
plaintext = dec.update(ciphertext)
dec.set_tag(tag)
dec.finalize()
assert plaintext == b"World!"
def test_aes_gcm_different_IV():
aes = Cipher("aes-128-gcm")
enc = aes.op(key=b"A" * 16, iv=b"A" * 16)
enc.update_associated(b"Hello")
ciphertext = enc.update(b"World!")
c2 = enc.finalize()
tag = enc.get_tag(16)
enc = aes.op(key=b"A" * 16, iv=b"A" * 16)
enc.update_associated(b"Hello")
ciphertext2 = enc.update(b"World!")
c2 = enc.finalize()
tag2 = enc.get_tag(16)
enc = aes.op(key=b"A" * 16, iv=b"B" * 16)
enc.update_associated(b"Hello")
ciphertext3 = enc.update(b"World!")
c2 = enc.finalize()
tag3 = enc.get_tag(16)
assert ciphertext == ciphertext2
assert ciphertext != ciphertext3
def test_quick():
aes = Cipher("aes-128-gcm")
c, t = aes.quick_gcm_enc(b"A" * 16, b"A" * 16, b"Hello")
p = aes.quick_gcm_dec(b"A" * 16, b"A" * 16, c, t)
assert p == b"Hello"
def test_quick_assoc():
aes = Cipher("aes-128-gcm")
c, t = aes.quick_gcm_enc(b"A" * 16, b"A" * 16, b"Hello", assoc=b"blah")
p = aes.quick_gcm_dec(b"A" * 16, b"A" * 16, c, t, assoc=b"blah")
assert p == b"Hello"
# pylint: enable=unused-variable,redefined-outer-name
|
c69210fe78c435d43e84d1a36936bdfdc0ff9c56
|
35b55815a7278fffbf05aedb256e84fbf9536b14
|
/modeltranslation/widgets.py
|
702c3729d8add65adc90c9e9c7f28cc97abade30
|
[
"BSD-3-Clause"
] |
permissive
|
deschler/django-modeltranslation
|
1dadf1efca1ed573f0f2dbd16b2d11d6e65fbeaa
|
b1d32066ed46c9223c94053bb817beb04fec9522
|
refs/heads/master
| 2023-08-10T09:14:24.994377
| 2023-08-03T07:27:14
| 2023-08-09T06:45:53
| 6,241,977
| 1,107
| 278
|
BSD-3-Clause
| 2023-09-08T06:00:00
| 2012-10-16T09:33:56
|
Python
|
UTF-8
|
Python
| false
| false
| 3,683
|
py
|
widgets.py
|
from django.forms.widgets import CheckboxInput, Media, Widget
from django.utils.html import conditional_escape
from django.utils.safestring import mark_safe
from django.utils.translation import gettext_lazy as _
class ClearableWidgetWrapper(Widget):
"""
Wraps another widget adding a clear checkbox, making it possible to
reset the field to some empty value even if the original input doesn't
have means to.
Useful for ``TextInput`` and ``Textarea`` based widgets used in combination
with nullable text fields.
Use it in ``Field.formfield`` or ``ModelAdmin.formfield_for_dbfield``:
field.widget = ClearableWidgetWrapper(field.widget)
``None`` is assumed to be a proper choice for the empty value, but you may
pass another one to the constructor.
"""
clear_checkbox_label = _("None")
template = '<span class="clearable-input">{0} <span>{2}</span> {3}</span>'
# TODO: Label would be proper, but admin applies some hardly undoable
# styling to labels.
# template = '<span class="clearable-input">{} <label for="{}">{}</label> {}</span>'
class Media:
js = ('modeltranslation/js/clearable_inputs.js',)
def __init__(self, widget, empty_value=None):
"""
Remebers the widget we are wrapping and precreates a checkbox input.
Allows overriding the empty value.
"""
self.widget = widget
self.checkbox = CheckboxInput(attrs={'tabindex': '-1'})
self.empty_value = empty_value
def __getattr__(self, name):
"""
If we don't have a property or a method, chances are the wrapped
widget does.
"""
if name != 'widget':
return getattr(self.widget, name)
raise AttributeError
@property
def media(self):
"""
Combines media of both components and adds a small script that unchecks
the clear box, when a value in any wrapped input is modified.
"""
return self.widget.media + self.checkbox.media + Media(self.Media)
def render(self, name, value, attrs=None, renderer=None):
"""
Appends a checkbox for clearing the value (that is, setting the field
with the ``empty_value``).
"""
wrapped = self.widget.render(name, value, attrs, renderer)
checkbox_name = self.clear_checkbox_name(name)
checkbox_id = self.clear_checkbox_id(checkbox_name)
checkbox_label = self.clear_checkbox_label
checkbox = self.checkbox.render(
checkbox_name, value == self.empty_value, attrs={'id': checkbox_id}, renderer=renderer
)
return mark_safe(
self.template.format(
conditional_escape(wrapped),
conditional_escape(checkbox_id),
conditional_escape(checkbox_label),
conditional_escape(checkbox),
)
)
def value_from_datadict(self, data, files, name):
"""
If the clear checkbox is checked returns the configured empty value,
completely ignoring the original input.
"""
clear = self.checkbox.value_from_datadict(data, files, self.clear_checkbox_name(name))
if clear:
return self.empty_value
return self.widget.value_from_datadict(data, files, name)
def clear_checkbox_name(self, name):
"""
Given the name of the input, returns the name of the clear checkbox.
"""
return name + '-clear'
def clear_checkbox_id(self, name):
"""
Given the name of the clear checkbox input, returns the HTML id for it.
"""
return name + '_id'
|
d6e68f374b2ee9d358a59a23c2652c3b12fbdc33
|
a0eb6744e6f7f509b96d21f0bc8b3f8387f6861c
|
/notebook/pandas_option_display.py
|
630242f151ef9822bf1a7d2e6ca2bb30b4066e0c
|
[
"MIT"
] |
permissive
|
nkmk/python-snippets
|
a6c66bdf999502e52f4795a3074ced63bf440817
|
f9dd286a9cf93f474e20371f8fffc4732cb3c4d5
|
refs/heads/master
| 2023-08-03T04:20:05.606293
| 2023-07-26T13:21:11
| 2023-07-26T13:21:11
| 98,900,570
| 253
| 77
|
MIT
| 2020-10-25T01:12:53
| 2017-07-31T14:54:47
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 9,346
|
py
|
pandas_option_display.py
|
import pandas as pd
import numpy as np
print(pd.__version__)
# 0.23.0
print(pd.options.display.precision)
# 6
s_decimal = pd.Series([123.456, 12.3456, 1.23456, 0.123456, 0.0123456, 0.00123456])
print(s_decimal)
# 0 123.456000
# 1 12.345600
# 2 1.234560
# 3 0.123456
# 4 0.012346
# 5 0.001235
# dtype: float64
print(s_decimal[5])
# 0.00123456
pd.options.display.precision = 4
print(s_decimal)
# 0 123.4560
# 1 12.3456
# 2 1.2346
# 3 0.1235
# 4 0.0123
# 5 0.0012
# dtype: float64
pd.options.display.precision = 2
print(s_decimal)
# 0 1.23e+02
# 1 1.23e+01
# 2 1.23e+00
# 3 1.23e-01
# 4 1.23e-02
# 5 1.23e-03
# dtype: float64
print(pd.options.display.float_format)
# None
pd.options.display.float_format = '{:.2f}'.format
print(s_decimal)
# 0 123.46
# 1 12.35
# 2 1.23
# 3 0.12
# 4 0.01
# 5 0.00
# dtype: float64
pd.options.display.float_format = '{:.4g}'.format
print(s_decimal)
# 0 123.5
# 1 12.35
# 2 1.235
# 3 0.1235
# 4 0.01235
# 5 0.001235
# dtype: float64
pd.options.display.float_format = '{:.4e}'.format
print(s_decimal)
# 0 1.2346e+02
# 1 1.2346e+01
# 2 1.2346e+00
# 3 1.2346e-01
# 4 1.2346e-02
# 5 1.2346e-03
# dtype: float64
pd.options.display.float_format = '{: <10.2%}'.format
print(s_decimal)
# 0 12345.60%
# 1 1234.56%
# 2 123.46%
# 3 12.35%
# 4 1.23%
# 5 0.12%
# dtype: float64
df_decimal = pd.DataFrame({'s': ['0.4', '0.5', '0.6', '1.4', '1.5', '1.6'],
'f': [0.4, 0.5, 0.6, 1.4, 1.5, 1.6]})
pd.options.display.float_format = '{:.0f}'.format
print(df_decimal)
# s f
# 0 0.4 0
# 1 0.5 0
# 2 0.6 1
# 3 1.4 1
# 4 1.5 2
# 5 1.6 2
df_decimal2 = pd.DataFrame({'s': ['0.04', '0.05', '0.06', '0.14', '0.15', '0.16'],
'f': [0.04, 0.05, 0.06, 0.14, 0.15, 0.16]})
pd.options.display.float_format = '{:.1f}'.format
print(df_decimal2)
# s f
# 0 0.04 0.0
# 1 0.05 0.1
# 2 0.06 0.1
# 3 0.14 0.1
# 4 0.15 0.1
# 5 0.16 0.2
print(pd.options.display.max_rows)
# 60
df_tall = pd.DataFrame(np.arange(300).reshape((100, 3)))
pd.options.display.max_rows = 10
print(df_tall)
# 0 1 2
# 0 0 1 2
# 1 3 4 5
# 2 6 7 8
# 3 9 10 11
# 4 12 13 14
# .. ... ... ...
# 95 285 286 287
# 96 288 289 290
# 97 291 292 293
# 98 294 295 296
# 99 297 298 299
# [100 rows x 3 columns]
print(df_tall.head(10))
# 0 1 2
# 0 0 1 2
# 1 3 4 5
# 2 6 7 8
# 3 9 10 11
# 4 12 13 14
# 5 15 16 17
# 6 18 19 20
# 7 21 22 23
# 8 24 25 26
# 9 27 28 29
print(df_tall.head(20))
# 0 1 2
# 0 0 1 2
# 1 3 4 5
# 2 6 7 8
# 3 9 10 11
# 4 12 13 14
# .. .. .. ..
# 15 45 46 47
# 16 48 49 50
# 17 51 52 53
# 18 54 55 56
# 19 57 58 59
# [20 rows x 3 columns]
pd.options.display.max_rows = None
print(pd.options.display.max_columns)
# 20
df_wide = pd.DataFrame(np.arange(90).reshape((3, 30)))
print(df_wide)
# 0 1 2 3 4 5 6 7 8 9 ... 20 21 22 23 24 25 26 27 \
# 0 0 1 2 3 4 5 6 7 8 9 ... 20 21 22 23 24 25 26 27
# 1 30 31 32 33 34 35 36 37 38 39 ... 50 51 52 53 54 55 56 57
# 2 60 61 62 63 64 65 66 67 68 69 ... 80 81 82 83 84 85 86 87
# 28 29
# 0 28 29
# 1 58 59
# 2 88 89
# [3 rows x 30 columns]
pd.options.display.max_columns = 10
print(df_wide)
# 0 1 2 3 4 ... 25 26 27 28 29
# 0 0 1 2 3 4 ... 25 26 27 28 29
# 1 30 31 32 33 34 ... 55 56 57 58 59
# 2 60 61 62 63 64 ... 85 86 87 88 89
# [3 rows x 30 columns]
pd.options.display.max_columns = None
print(df_wide)
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 \
# 0 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
# 1 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48
# 2 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78
# 19 20 21 22 23 24 25 26 27 28 29
# 0 19 20 21 22 23 24 25 26 27 28 29
# 1 49 50 51 52 53 54 55 56 57 58 59
# 2 79 80 81 82 83 84 85 86 87 88 89
print(pd.options.display.show_dimensions)
# truncate
pd.options.display.max_columns = 10
print(df_wide)
# 0 1 2 3 4 ... 25 26 27 28 29
# 0 0 1 2 3 4 ... 25 26 27 28 29
# 1 30 31 32 33 34 ... 55 56 57 58 59
# 2 60 61 62 63 64 ... 85 86 87 88 89
# [3 rows x 30 columns]
df = pd.DataFrame(np.arange(12).reshape((3, 4)))
print(df)
# 0 1 2 3
# 0 0 1 2 3
# 1 4 5 6 7
# 2 8 9 10 11
pd.options.display.show_dimensions = True
print(df_wide)
# 0 1 2 3 4 ... 25 26 27 28 29
# 0 0 1 2 3 4 ... 25 26 27 28 29
# 1 30 31 32 33 34 ... 55 56 57 58 59
# 2 60 61 62 63 64 ... 85 86 87 88 89
# [3 rows x 30 columns]
print(df)
# 0 1 2 3
# 0 0 1 2 3
# 1 4 5 6 7
# 2 8 9 10 11
# [3 rows x 4 columns]
pd.options.display.show_dimensions = False
print(df_wide)
# 0 1 2 3 4 ... 25 26 27 28 29
# 0 0 1 2 3 4 ... 25 26 27 28 29
# 1 30 31 32 33 34 ... 55 56 57 58 59
# 2 60 61 62 63 64 ... 85 86 87 88 89
print(df)
# 0 1 2 3
# 0 0 1 2 3
# 1 4 5 6 7
# 2 8 9 10 11
print(pd.options.display.width)
# 80
pd.options.display.max_columns = None
print(df_wide)
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 \
# 0 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
# 1 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48
# 2 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78
# 19 20 21 22 23 24 25 26 27 28 29
# 0 19 20 21 22 23 24 25 26 27 28 29
# 1 49 50 51 52 53 54 55 56 57 58 59
# 2 79 80 81 82 83 84 85 86 87 88 89
pd.options.display.width = 60
print(df_wide)
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13 \
# 0 0 1 2 3 4 5 6 7 8 9 10 11 12 13
# 1 30 31 32 33 34 35 36 37 38 39 40 41 42 43
# 2 60 61 62 63 64 65 66 67 68 69 70 71 72 73
# 14 15 16 17 18 19 20 21 22 23 24 25 26 27 \
# 0 14 15 16 17 18 19 20 21 22 23 24 25 26 27
# 1 44 45 46 47 48 49 50 51 52 53 54 55 56 57
# 2 74 75 76 77 78 79 80 81 82 83 84 85 86 87
# 28 29
# 0 28 29
# 1 58 59
# 2 88 89
pd.options.display.width = None
print(df_wide)
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 \
# 0 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
# 1 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48
# 2 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78
# 19 20 21 22 23 24 25 26 27 28 29
# 0 19 20 21 22 23 24 25 26 27 28 29
# 1 49 50 51 52 53 54 55 56 57 58 59
# 2 79 80 81 82 83 84 85 86 87 88 89
print(pd.options.display.max_colwidth)
# 50
df_long_col = pd.DataFrame({'col': ['a' * 10, 'a' * 30, 'a' * 60]})
print(df_long_col)
# col
# 0 aaaaaaaaaa
# 1 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
# 2 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa...
pd.options.display.max_colwidth = 80
print(df_long_col)
# col
# 0 aaaaaaaaaa
# 1 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
# 2 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
df_long_col2 = pd.DataFrame({'col1': ['a' * 10, 'a' * 30, 'a' * 60],
'col2': ['a' * 10, 'a' * 30, 'a' * 60]})
pd.options.display.max_colwidth = 20
print(df_long_col2)
# col1 col2
# 0 aaaaaaaaaa aaaaaaaaaa
# 1 aaaaaaaaaaaaaaaa... aaaaaaaaaaaaaaaa...
# 2 aaaaaaaaaaaaaaaa... aaaaaaaaaaaaaaaa...
df_long_col_header = pd.DataFrame({'a' * 60: ['a' * 10, 'a' * 30, 'a' * 60]})
pd.options.display.max_colwidth = 40
print(df_long_col_header)
# aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
# 0 aaaaaaaaaa
# 1 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
# 2 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa...
print(pd.options.display.colheader_justify)
# right
print(df_long_col)
# col
# 0 aaaaaaaaaa
# 1 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
# 2 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa...
pd.options.display.colheader_justify = 'left'
print(df_long_col)
# col
# 0 aaaaaaaaaa
# 1 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
# 2 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa...
|
2041133e19fa69530c869a333ee9b5c9fc95523c
|
7c1b7d19b6d6a1f1cd50495f006bdbac4ea85313
|
/Contrib/TranslationToolkit/Convert_Compile.py
|
dac64cc5a9cc67a8eae279114ea9243ba4e2496e
|
[
"BSD-2-Clause",
"curl",
"LGPL-2.0-or-later",
"Zlib",
"NCSA",
"GPL-1.0-or-later",
"MPL-2.0",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
MediaArea/MediaInfo
|
9280deb0b6f4967dce363aeaa0a04a7d51375ce3
|
aebdbf83098ec8a06e5ccf83d5a06d74b9d61683
|
refs/heads/master
| 2023-09-03T21:04:26.706366
| 2023-08-16T08:33:01
| 2023-08-16T08:33:01
| 20,677,163
| 1,069
| 176
|
BSD-2-Clause
| 2023-09-14T13:44:38
| 2014-06-10T07:59:01
|
Pascal
|
UTF-8
|
Python
| false
| false
| 2,694
|
py
|
Convert_Compile.py
|
# Type: Python 3 script
# Author: Steven Sun <stevenlele@outlook.com>
# Date: Feb 28, 2020
# Notes: Put "Language.csv" and "Language_parsed.csv" in this folder
# and run the script. It converts the parsed file back into the
# original CSV, and generates a single language file for testing.
import csv
import sys
import os
#################################
######### Configuration #########
# edit the filename if the CSV file is placed elsewhere
language_file = 'Language.csv'
translated_file = 'Language_parsed.csv'
# output filename
output_file = 'Language_translated.csv'
######### Configuration #########
#################################
csv.register_dialect('MediaInfo', delimiter=';')
if not os.path.exists(language_file):
print('Error: Language.csv file does not exist!')
sys.exit(1)
if not os.path.exists(translated_file):
print('Error: Translated file does not exist!')
sys.exit(1)
output_rows = []
output_lang_rows = []
translated_rows = []
with open(translated_file, 'r', encoding='utf_8') as f:
reader = csv.reader(f)
header = next(reader)
lang_code = header[2]
for row in reader:
key = row[0]
translated = row[2]
translated_rows.append((key, translated))
with open(language_file, 'r', encoding='utf_8') as f:
reader = csv.reader(f, dialect='MediaInfo')
header = next(reader)
index_lang = header.index(lang_code)
output_rows.append(header)
output_lang_rows.append([header[0], header[index_lang]])
column_count = len(header)
for row in reader:
key = row[0]
translated_row = translated_rows.pop(0)
if key != translated_row[0]:
print(f'Error: Key "{key}" does not match the translated file "{translated_rows[0][0]}".')
print('Please check whether you have changed the translated file or not.')
sys.exit(1)
translated_string = translated_row[1]
if translated_string.startswith('"') and translated_string.endswith('"'):
translated_string = translated_string.strip('"')
new_row = row
if len(new_row) < column_count:
new_row.extend([''] * (column_count - len(new_row)))
new_row[index_lang] = translated_string
output_rows.append(new_row)
output_lang_rows.append([key, translated_string])
with open(output_file, 'w', encoding='utf_8', newline='') as f:
writer = csv.writer(f, dialect='MediaInfo')
writer.writerows(output_rows)
with open(f'{lang_code}.csv', 'w', encoding='utf_8', newline='') as f:
writer = csv.writer(f, dialect='MediaInfo')
writer.writerows(output_lang_rows)
print('Info: Compile completed!')
|
abf6baf22bc7ba4cf14133cfca478eeab5d90d47
|
cbbf0ebc36ec7a49ff4d26b9f334f2d1e8bf492d
|
/code/val_2D.py
|
1d3735368451e0113e72fb488d71f57e5b132051
|
[
"MIT"
] |
permissive
|
HiLab-git/SSL4MIS
|
e9c9d0eee6058d576da533191762a7c588b4d365
|
769258bcb25cc64292c95d18ce18a6f9f028a8a4
|
refs/heads/master
| 2023-08-23T07:32:16.616731
| 2023-07-26T03:36:43
| 2023-07-26T03:36:43
| 298,459,550
| 1,710
| 354
|
MIT
| 2022-08-17T02:27:30
| 2020-09-25T03:39:19
|
Python
|
UTF-8
|
Python
| false
| false
| 2,359
|
py
|
val_2D.py
|
import numpy as np
import torch
from medpy import metric
from scipy.ndimage import zoom
def calculate_metric_percase(pred, gt):
pred[pred > 0] = 1
gt[gt > 0] = 1
if pred.sum() > 0:
dice = metric.binary.dc(pred, gt)
hd95 = metric.binary.hd95(pred, gt)
return dice, hd95
else:
return 0, 0
def test_single_volume(image, label, net, classes, patch_size=[256, 256]):
image, label = image.squeeze(0).cpu().detach(
).numpy(), label.squeeze(0).cpu().detach().numpy()
prediction = np.zeros_like(label)
for ind in range(image.shape[0]):
slice = image[ind, :, :]
x, y = slice.shape[0], slice.shape[1]
slice = zoom(slice, (patch_size[0] / x, patch_size[1] / y), order=0)
input = torch.from_numpy(slice).unsqueeze(
0).unsqueeze(0).float().cuda()
net.eval()
with torch.no_grad():
out = torch.argmax(torch.softmax(
net(input), dim=1), dim=1).squeeze(0)
out = out.cpu().detach().numpy()
pred = zoom(out, (x / patch_size[0], y / patch_size[1]), order=0)
prediction[ind] = pred
metric_list = []
for i in range(1, classes):
metric_list.append(calculate_metric_percase(
prediction == i, label == i))
return metric_list
def test_single_volume_ds(image, label, net, classes, patch_size=[256, 256]):
image, label = image.squeeze(0).cpu().detach(
).numpy(), label.squeeze(0).cpu().detach().numpy()
prediction = np.zeros_like(label)
for ind in range(image.shape[0]):
slice = image[ind, :, :]
x, y = slice.shape[0], slice.shape[1]
slice = zoom(slice, (patch_size[0] / x, patch_size[1] / y), order=0)
input = torch.from_numpy(slice).unsqueeze(
0).unsqueeze(0).float().cuda()
net.eval()
with torch.no_grad():
output_main, _, _, _ = net(input)
out = torch.argmax(torch.softmax(
output_main, dim=1), dim=1).squeeze(0)
out = out.cpu().detach().numpy()
pred = zoom(out, (x / patch_size[0], y / patch_size[1]), order=0)
prediction[ind] = pred
metric_list = []
for i in range(1, classes):
metric_list.append(calculate_metric_percase(
prediction == i, label == i))
return metric_list
|
53288ae0de54f8ca838294d2e310b62649b6983c
|
c530897cb72b6943c7226b25824444cad5f3503b
|
/usaspending_api/idvs/tests/conftest.py
|
6dc70012d52532bc5212ba46e74910657d673128
|
[
"CC0-1.0"
] |
permissive
|
fedspendingtransparency/usaspending-api
|
fc63a22d32ea0207b7273d3e1ef26ba9dbabc42a
|
38f920438697930ae3ac57bbcaae9034877d8fb7
|
refs/heads/master
| 2023-09-01T22:00:36.633612
| 2023-08-29T18:39:18
| 2023-08-29T18:39:18
| 65,394,827
| 276
| 118
|
CC0-1.0
| 2023-09-14T20:33:15
| 2016-08-10T15:39:45
|
Python
|
UTF-8
|
Python
| false
| false
| 325
|
py
|
conftest.py
|
from usaspending_api.idvs.tests.data.idv_rollup_data import (
basic_idvs,
idv_with_unreleased_submissions,
idv_with_released_submissions,
)
from usaspending_api.disaster.tests.fixtures.helpers import helpers
__all__ = ["basic_idvs", "helpers", "idv_with_released_submissions", "idv_with_unreleased_submissions"]
|
b7eb79cd62e8af10a2b296d22d1f2e872b1a4e60
|
2342b8737b9ffeb9715158b8ec74a33c7a4947f6
|
/koku/api/resource_types/serializers.py
|
32bfa4615f214d1d8345a50a44931a5f69f8bbd4
|
[
"Apache-2.0"
] |
permissive
|
project-koku/koku
|
444d8df05da5416c9cee606c42481c99be45f13d
|
0416e5216eb1ec4b41c8dd4999adde218b1ab2e1
|
refs/heads/main
| 2023-08-20T11:30:17.510182
| 2023-08-17T18:27:30
| 2023-08-17T18:27:30
| 126,496,611
| 225
| 94
|
Apache-2.0
| 2023-09-14T17:38:08
| 2018-03-23T14:29:23
|
Python
|
UTF-8
|
Python
| false
| false
| 554
|
py
|
serializers.py
|
#
# Copyright 2021 Red Hat Inc.
# SPDX-License-Identifier: Apache-2.0
#
"""Serializer for Resource Types."""
from rest_framework import serializers
class ResourceTypeSerializer(serializers.Serializer):
"""Serializer for resource-specific resource-type APIs."""
extra_kwargs = {"test": {"error_messages": {"required": "Give yourself a username"}}}
cluster_alias = serializers.CharField(source="ocp_cluster_alias", required=False)
account_alias = serializers.CharField(source="alias", required=False)
value = serializers.CharField()
|
440ebaf889f2a6b985fe1285a55883d51682d9cd
|
fae16a539b7c1b0525aab40ddaeee3e451fc9b74
|
/src/argilla/metrics/commons.py
|
563c08683ac25ff982a6171af2debcf3427229f8
|
[
"LicenseRef-scancode-free-unknown",
"Apache-2.0"
] |
permissive
|
argilla-io/argilla
|
a6b45f7f64e9db82f6d9a61771d758ffbb3dab4a
|
7c1b2368b444b7b7a281d37ad51bcb2d8e92acf5
|
refs/heads/develop
| 2023-09-04T03:58:05.914619
| 2023-09-01T15:58:31
| 2023-09-01T15:58:31
| 362,500,938
| 1,085
| 122
|
Apache-2.0
| 2023-09-14T15:31:07
| 2021-04-28T14:37:42
|
Python
|
UTF-8
|
Python
| false
| false
| 3,758
|
py
|
commons.py
|
# Copyright 2021-present, the Recognai S.L. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
from argilla.client import api
from argilla.metrics import helpers
from argilla.metrics.models import MetricSummary
def text_length(name: str, query: Optional[str] = None) -> MetricSummary:
"""Computes the input text length metrics for a dataset
Args:
name:
The dataset name.
query:
An ElasticSearch query with the [query string syntax](https://argilla.readthedocs.io/en/stable/guides/queries.html)
Returns:
The text length metric summary
Examples:
>>> from argilla.metrics.commons import text_length
>>> summary = text_length(name="example-dataset")
>>> summary.visualize() # will plot an histogram with results
>>> summary.data # returns the raw result data
"""
metric = api.active_api().compute_metric(name, metric="text_length", query=query)
return MetricSummary.new_summary(
data=metric.results,
visualization=lambda: helpers.histogram(data=metric.results, title=metric.description),
)
def records_status(name: str, query: Optional[str] = None) -> MetricSummary:
"""Computes the records status distribution for a dataset
Args:
name:
The dataset name.
query:
An ElasticSearch query with the [query string syntax](https://argilla.readthedocs.io/en/stable/guides/queries.html)
Returns:
The status distribution metric summary
Examples:
>>> from argilla.metrics.commons import records_status
>>> summary = records_status(name="example-dataset")
>>> summary.visualize() # will plot an histogram with results
>>> summary.data # returns the raw result data
"""
metric = api.active_api().compute_metric(name, metric="status_distribution", query=query)
return MetricSummary.new_summary(
data=metric.results,
visualization=lambda: helpers.bar(data=metric.results, title=metric.description),
)
def keywords(name: str, query: Optional[str] = None, size: int = 20) -> MetricSummary:
"""Computes the keywords occurrence distribution in dataset
Args:
name:
The dataset name.
query:
An ElasticSearch query with the [query string syntax](
https://argilla.readthedocs.io/en/stable/guides/queries.html)
size:
The number of kewords to retrieve. Default to `20`
Returns:
The dataset keywords occurrence distribution
Examples:
>>> from argilla.metrics.commons import keywords
>>> summary = keywords(name="example-dataset")
>>> summary.visualize() # will plot an histogram with results
>>> summary.data # returns the raw result data
"""
assert size > 0, ValueError("size must be greater than 0")
metric = api.active_api().compute_metric(
name,
metric="words_cloud",
query=query,
size=size,
)
return MetricSummary.new_summary(
data=metric.results,
visualization=lambda: helpers.bar(
data=metric.results,
title=metric.description,
),
)
|
43c648cc6af693e115ce910b66f3c21de7e4db51
|
6fe5c42adc0d55a9f792c5627a2da79be7910444
|
/tests/unit/tree/request_node/test_validate.py
|
9077703a47c62c149af643f647a862364d35d908
|
[
"MIT"
] |
permissive
|
scanapi/scanapi
|
31570a51b4ee8c5c40887073e4d8f221bbfa6299
|
1e419f29364585123cb1bc14c2d7ebd2c9b117b7
|
refs/heads/main
| 2023-09-03T13:12:47.521987
| 2023-07-24T09:01:22
| 2023-07-24T09:01:22
| 197,915,529
| 1,330
| 95
|
MIT
| 2023-09-11T03:44:07
| 2019-07-20T10:31:47
|
Python
|
UTF-8
|
Python
| false
| false
| 1,275
|
py
|
test_validate.py
|
from pytest import fixture, mark
from scanapi.tree import EndpointNode, RequestNode, tree_keys
@mark.describe("request node")
@mark.describe("_validate")
class TestValidate:
@fixture
def mock_validate_keys(self, mocker):
return mocker.patch("scanapi.tree.request_node.validate_keys")
@mark.it("should call the validate_keys method")
def test_should_call_validate_keys(self, mock_validate_keys):
spec = {
"headers": {"foo": "bar"},
"name": "foo",
"path": "foo.bar",
}
node = RequestNode(
spec, endpoint=EndpointNode({"name": "foo", "requests": [{}]})
)
keys = spec.keys()
node._validate()
mock_validate_keys.assert_called_with(
keys,
(
"body",
"headers",
"method",
"name",
"params",
"path",
"tests",
tree_keys.VARS_KEY,
"delay",
"retry",
"options",
),
("name",),
"request",
)
assert len(keys) == 3
assert "headers" in keys
assert "name" in keys
assert "path" in keys
|
90ed1668b516ffd323c282d7906d28ce17e28bb0
|
88dda5e76cef286c7db3ae7e5d1a32d28f7815a3
|
/reviewboard/reviews/tests/test_review_file_attachment_view.py
|
c6d6effcf10195aa26f4473e16713809803d42a0
|
[
"MIT"
] |
permissive
|
reviewboard/reviewboard
|
f4d3bada08ba9d6ef53add2d1fdb82bd6cc63a1e
|
c3a991f1e9d7682239a1ab0e8661cee6da01d537
|
refs/heads/master
| 2023-08-31T09:03:14.170335
| 2023-08-30T08:22:43
| 2023-08-30T08:22:43
| 285,304
| 1,141
| 353
|
MIT
| 2023-06-07T16:51:02
| 2009-08-22T21:39:49
|
Python
|
UTF-8
|
Python
| false
| false
| 10,040
|
py
|
test_review_file_attachment_view.py
|
"""Unit tests for reviewboard.reviews.views.ReviewFileAttachmentView."""
from reviewboard.site.urlresolvers import local_site_reverse
from reviewboard.testing import TestCase
class ReviewFileAttachmentViewTests(TestCase):
"""Unit tests for reviewboard.reviews.views.ReviewFileAttachmentView."""
fixtures = ['test_users']
def test_access_with_valid_id(self):
"""Testing ReviewFileAttachmentView access with valid attachment for
review request
"""
review_request = self.create_review_request(publish=True)
attachment = self.create_file_attachment(review_request)
response = self.client.get(
local_site_reverse(
'file-attachment',
kwargs={
'review_request_id': review_request.pk,
'file_attachment_id': attachment.pk,
}))
self.assertEqual(response.status_code, 200)
def test_access_with_valid_id_and_draft(self):
"""Testing ReviewFileAttachmentView access with valid attachment for
review request draft
"""
review_request = self.create_review_request(publish=True)
attachment = self.create_file_attachment(review_request, draft=True)
# Log in so that we can check against the draft.
username = review_request.submitter.username
self.client.login(username=username, password=username)
response = self.client.get(
local_site_reverse(
'file-attachment',
kwargs={
'review_request_id': review_request.pk,
'file_attachment_id': attachment.pk,
}))
self.assertEqual(response.status_code, 200)
def test_access_with_invalid_id(self):
"""Testing ReviewFileAttachmentView access with invalid attachment for
review request
"""
review_request = self.create_review_request(publish=True)
attachment = self.create_file_attachment(review_request)
review_request2 = self.create_review_request(publish=True)
response = self.client.get(
local_site_reverse(
'file-attachment',
kwargs={
'review_request_id': review_request2.pk,
'file_attachment_id': attachment.pk,
}))
self.assertEqual(response.status_code, 404)
def test_access_with_invalid_id_and_draft(self):
"""Testing ReviewFileAttachmentView access with invalid attachment for
review request draft
"""
review_request = self.create_review_request(publish=True)
attachment = self.create_file_attachment(review_request, draft=True)
review_request2 = self.create_review_request(publish=True)
# Log in so that we can check against the draft.
username = review_request.submitter.username
self.client.login(username=username, password=username)
response = self.client.get(
local_site_reverse(
'file-attachment',
kwargs={
'review_request_id': review_request2.pk,
'file_attachment_id': attachment.pk,
}))
self.assertEqual(response.status_code, 404)
def test_access_with_valid_inactive_id(self):
"""Testing ReviewFileAttachmentView access with valid inactive
attachment for review request
"""
review_request = self.create_review_request(publish=True)
attachment = self.create_file_attachment(review_request, active=False)
response = self.client.get(
local_site_reverse(
'file-attachment',
kwargs={
'review_request_id': review_request.pk,
'file_attachment_id': attachment.pk,
}))
self.assertEqual(response.status_code, 200)
def test_access_with_valid_inactive_id_draft(self):
"""Testing ReviewFileAttachmentView access with valid inactive
attachment for review request draft
"""
review_request = self.create_review_request(publish=True)
attachment = self.create_file_attachment(review_request, draft=True,
active=False)
# Log in so that we can check against the draft.
username = review_request.submitter.username
self.client.login(username=username, password=username)
response = self.client.get(
local_site_reverse(
'file-attachment',
kwargs={
'review_request_id': review_request.pk,
'file_attachment_id': attachment.pk,
}))
self.assertEqual(response.status_code, 200)
def test_access_with_invalid_inactive_id(self):
"""Testing ReviewFileAttachmentView access with invalid inactive
attachment for review request
"""
review_request = self.create_review_request(publish=True)
attachment = self.create_file_attachment(review_request, active=False)
review_request2 = self.create_review_request(publish=True)
response = self.client.get(
local_site_reverse(
'file-attachment',
kwargs={
'review_request_id': review_request2.pk,
'file_attachment_id': attachment.pk,
}))
self.assertEqual(response.status_code, 404)
def test_access_invalid_inactive_id_draft(self):
"""Testing ReviewFileAttachmentView access with invalid inactive
attachment for review request draft
"""
review_request = self.create_review_request(publish=True)
attachment = self.create_file_attachment(review_request, draft=True,
active=False)
review_request2 = self.create_review_request(publish=True)
# Log in so that we can check against the draft.
username = review_request.submitter.username
self.client.login(username=username, password=username)
response = self.client.get(
local_site_reverse(
'file-attachment',
kwargs={
'review_request_id': review_request2.pk,
'file_attachment_id': attachment.pk,
}))
self.assertEqual(response.status_code, 404)
def test_access_with_valid_diff_against_id(self):
"""Testing ReviewFileAttachmentView access with valid diff-against
attachment for review request
"""
review_request = self.create_review_request(publish=True)
attachment = self.create_file_attachment(review_request)
attachment2 = self.create_file_attachment(review_request)
response = self.client.get(
local_site_reverse(
'file-attachment',
kwargs={
'review_request_id': review_request.pk,
'file_attachment_id': attachment.pk,
'file_attachment_diff_id': attachment2.pk,
}))
self.assertEqual(response.status_code, 200)
def test_access_valid_diff_against_id_draft(self):
"""Testing ReviewFileAttachmentView access with valid diff-against
attachment for review request draft
"""
review_request = self.create_review_request(publish=True)
attachment = self.create_file_attachment(review_request)
attachment2 = self.create_file_attachment(review_request, draft=True)
# Log in so that we can check against the draft.
username = review_request.submitter.username
self.client.login(username=username, password=username)
response = self.client.get(
local_site_reverse(
'file-attachment',
kwargs={
'review_request_id': review_request.pk,
'file_attachment_id': attachment.pk,
'file_attachment_diff_id': attachment2.pk,
}))
self.assertEqual(response.status_code, 200)
def test_access_with_invalid_diff_against_id(self):
"""Testing ReviewFileAttachmentView access with invalid diff-against
attachment for review request
"""
review_request = self.create_review_request(publish=True)
attachment = self.create_file_attachment(review_request)
review_request2 = self.create_review_request(publish=True)
attachment2 = self.create_file_attachment(review_request2)
response = self.client.get(
local_site_reverse(
'file-attachment',
kwargs={
'review_request_id': review_request.pk,
'file_attachment_id': attachment.pk,
'file_attachment_diff_id': attachment2.pk,
}))
self.assertEqual(response.status_code, 404)
def test_access_invalid_diff_against_id_draft(self):
"""Testing ReviewFileAttachmentView access with invalid diff-against
attachment for review request draft
"""
review_request = self.create_review_request(publish=True)
attachment = self.create_file_attachment(review_request)
review_request2 = self.create_review_request(publish=True)
attachment2 = self.create_file_attachment(review_request2, draft=True)
# Log in so that we can check against the draft.
username = review_request.submitter.username
self.client.login(username=username, password=username)
response = self.client.get(
local_site_reverse(
'file-attachment',
kwargs={
'review_request_id': review_request.pk,
'file_attachment_id': attachment.pk,
'file_attachment_diff_id': attachment2.pk,
}))
self.assertEqual(response.status_code, 404)
|
2a02bb17ba88277bcd32485e2def6f98912a652e
|
fad4aa5a174627b8930beb8f5f987dd62c88957e
|
/sky/skylet/providers/kubernetes/config.py
|
d684c4b461799d9f18ca30ae858dd53c1b993126
|
[
"Apache-2.0"
] |
permissive
|
skypilot-org/skypilot
|
603e29ecb3ce3b25d308f018fd402488ee352ef0
|
e58f33f315ca08c6e057ab9a2d00cd27476529a1
|
refs/heads/master
| 2023-08-16T21:46:53.379586
| 2023-08-16T02:17:21
| 2023-08-16T02:17:21
| 395,140,743
| 3,416
| 220
|
Apache-2.0
| 2023-09-14T21:20:44
| 2021-08-11T23:32:15
|
Python
|
UTF-8
|
Python
| false
| false
| 11,154
|
py
|
config.py
|
import copy
import logging
import math
import re
from sky.adaptors import kubernetes
from sky.skylet.providers.kubernetes import utils
logger = logging.getLogger(__name__)
MEMORY_SIZE_UNITS = {
"K": 2**10,
"M": 2**20,
"G": 2**30,
"T": 2**40,
'P': 2**50,
}
log_prefix = 'KubernetesNodeProvider: '
# Timeout for deleting a Kubernetes resource (in seconds).
DELETION_TIMEOUT = 90
class InvalidNamespaceError(ValueError):
def __init__(self, field_name, namespace):
self.message = (
f'Namespace of {field_name} config does not match provided '
f'namespace "{namespace}". Either set it to {namespace} or remove the '
'field')
def __str__(self):
return self.message
def using_existing_msg(resource_type, name):
return f'using existing {resource_type} "{name}"'
def updating_existing_msg(resource_type, name):
return f'updating existing {resource_type} "{name}"'
def not_found_msg(resource_type, name):
return f'{resource_type} "{name}" not found, attempting to create it'
def not_checking_msg(resource_type, name):
return f'not checking if {resource_type} "{name}" exists'
def created_msg(resource_type, name):
return f'successfully created {resource_type} "{name}"'
def not_provided_msg(resource_type):
return f'no {resource_type} config provided, must already exist'
def bootstrap_kubernetes(config):
namespace = utils.get_current_kube_config_context_namespace()
_configure_services(namespace, config['provider'])
if not config['provider'].get('_operator'):
# These steps are unecessary when using the Operator.
_configure_autoscaler_service_account(namespace, config['provider'])
_configure_autoscaler_role(namespace, config['provider'])
_configure_autoscaler_role_binding(namespace, config['provider'])
return config
def fillout_resources_kubernetes(config):
"""Fills CPU and GPU resources in the ray cluster config.
For each node type and each of CPU/GPU, looks at container's resources
and limits, takes min of the two.
"""
if 'available_node_types' not in config:
return config
node_types = copy.deepcopy(config['available_node_types'])
head_node_type = config['head_node_type']
for node_type in node_types:
node_config = node_types[node_type]['node_config']
# The next line is for compatibility with configs which define pod specs
# cf. KubernetesNodeProvider.create_node().
pod = node_config.get('pod', node_config)
container_data = pod['spec']['containers'][0]
autodetected_resources = get_autodetected_resources(container_data)
if node_types == head_node_type:
# we only autodetect worker type node memory resource
autodetected_resources.pop('memory')
if 'resources' not in config['available_node_types'][node_type]:
config['available_node_types'][node_type]['resources'] = {}
autodetected_resources.update(
config['available_node_types'][node_type]['resources'])
config['available_node_types'][node_type][
'resources'] = autodetected_resources
logger.debug(f'Updating the resources of node type {node_type} '
f'to include {autodetected_resources}.')
return config
def get_autodetected_resources(container_data):
container_resources = container_data.get('resources', None)
if container_resources is None:
return {'CPU': 0, 'GPU': 0}
node_type_resources = {
resource_name.upper(): get_resource(container_resources, resource_name)
for resource_name in ['cpu', 'gpu']
}
# TODO(romilb): Update this to allow fractional resources.
memory_limits = get_resource(container_resources, 'memory')
node_type_resources['memory'] = int(memory_limits)
return node_type_resources
def get_resource(container_resources, resource_name):
limit = _get_resource(container_resources,
resource_name,
field_name='limits')
# float('inf') means there's no limit set
return 0 if limit == float('inf') else int(limit)
def _get_resource(container_resources, resource_name, field_name):
"""Returns the resource quantity.
The amount of resource is rounded up to nearest integer.
Returns float("inf") if the resource is not present.
Args:
container_resources: Container's resource field.
resource_name: One of 'cpu', 'gpu' or memory.
field_name: One of 'requests' or 'limits'.
Returns:
Union[int, float]: Detected resource quantity.
"""
if field_name not in container_resources:
# No limit/resource field.
return float('inf')
resources = container_resources[field_name]
# Look for keys containing the resource_name. For example,
# the key 'nvidia.com/gpu' contains the key 'gpu'.
matching_keys = [key for key in resources if resource_name in key.lower()]
if len(matching_keys) == 0:
return float('inf')
if len(matching_keys) > 1:
# Should have only one match -- mostly relevant for gpu.
raise ValueError(f'Multiple {resource_name} types not supported.')
# E.g. 'nvidia.com/gpu' or 'cpu'.
resource_key = matching_keys.pop()
resource_quantity = resources[resource_key]
if resource_name == 'memory':
return _parse_memory_resource(resource_quantity)
else:
return _parse_cpu_or_gpu_resource(resource_quantity)
def _parse_cpu_or_gpu_resource(resource):
resource_str = str(resource)
if resource_str[-1] == 'm':
# For example, '500m' rounds up to 1.
return math.ceil(int(resource_str[:-1]) / 1000)
else:
return float(resource_str)
def _parse_memory_resource(resource):
resource_str = str(resource)
try:
return int(resource_str)
except ValueError:
pass
memory_size = re.sub(r'([KMGTP]+)', r' \1', resource_str)
number, unit_index = [item.strip() for item in memory_size.split()]
unit_index = unit_index[0]
return float(number) * MEMORY_SIZE_UNITS[unit_index]
def _configure_autoscaler_service_account(namespace, provider_config):
account_field = 'autoscaler_service_account'
if account_field not in provider_config:
logger.info(log_prefix + not_provided_msg(account_field))
return
account = provider_config[account_field]
if 'namespace' not in account['metadata']:
account['metadata']['namespace'] = namespace
elif account['metadata']['namespace'] != namespace:
raise InvalidNamespaceError(account_field, namespace)
name = account['metadata']['name']
field_selector = f'metadata.name={name}'
accounts = (kubernetes.core_api().list_namespaced_service_account(
namespace, field_selector=field_selector).items)
if len(accounts) > 0:
assert len(accounts) == 1
logger.info(log_prefix + using_existing_msg(account_field, name))
return
logger.info(log_prefix + not_found_msg(account_field, name))
kubernetes.core_api().create_namespaced_service_account(namespace, account)
logger.info(log_prefix + created_msg(account_field, name))
def _configure_autoscaler_role(namespace, provider_config):
role_field = 'autoscaler_role'
if role_field not in provider_config:
logger.info(log_prefix + not_provided_msg(role_field))
return
role = provider_config[role_field]
if 'namespace' not in role['metadata']:
role['metadata']['namespace'] = namespace
elif role['metadata']['namespace'] != namespace:
raise InvalidNamespaceError(role_field, namespace)
name = role['metadata']['name']
field_selector = f'metadata.name={name}'
accounts = (kubernetes.auth_api().list_namespaced_role(
namespace, field_selector=field_selector).items)
if len(accounts) > 0:
assert len(accounts) == 1
logger.info(log_prefix + using_existing_msg(role_field, name))
return
logger.info(log_prefix + not_found_msg(role_field, name))
kubernetes.auth_api().create_namespaced_role(namespace, role)
logger.info(log_prefix + created_msg(role_field, name))
def _configure_autoscaler_role_binding(namespace, provider_config):
binding_field = 'autoscaler_role_binding'
if binding_field not in provider_config:
logger.info(log_prefix + not_provided_msg(binding_field))
return
binding = provider_config[binding_field]
if 'namespace' not in binding['metadata']:
binding['metadata']['namespace'] = namespace
elif binding['metadata']['namespace'] != namespace:
raise InvalidNamespaceError(binding_field, namespace)
for subject in binding['subjects']:
if 'namespace' not in subject:
subject['namespace'] = namespace
elif subject['namespace'] != namespace:
subject_name = subject['name']
raise InvalidNamespaceError(
binding_field + f' subject {subject_name}', namespace)
name = binding['metadata']['name']
field_selector = f'metadata.name={name}'
accounts = (kubernetes.auth_api().list_namespaced_role_binding(
namespace, field_selector=field_selector).items)
if len(accounts) > 0:
assert len(accounts) == 1
logger.info(log_prefix + using_existing_msg(binding_field, name))
return
logger.info(log_prefix + not_found_msg(binding_field, name))
kubernetes.auth_api().create_namespaced_role_binding(namespace, binding)
logger.info(log_prefix + created_msg(binding_field, name))
def _configure_services(namespace, provider_config):
service_field = 'services'
if service_field not in provider_config:
logger.info(log_prefix + not_provided_msg(service_field))
return
services = provider_config[service_field]
for service in services:
if 'namespace' not in service['metadata']:
service['metadata']['namespace'] = namespace
elif service['metadata']['namespace'] != namespace:
raise InvalidNamespaceError(service_field, namespace)
name = service['metadata']['name']
field_selector = f'metadata.name={name}'
services = (kubernetes.core_api().list_namespaced_service(
namespace, field_selector=field_selector).items)
if len(services) > 0:
assert len(services) == 1
existing_service = services[0]
if service == existing_service:
logger.info(log_prefix + using_existing_msg('service', name))
return
else:
logger.info(log_prefix + updating_existing_msg('service', name))
kubernetes.core_api().patch_namespaced_service(
name, namespace, service)
else:
logger.info(log_prefix + not_found_msg('service', name))
kubernetes.core_api().create_namespaced_service(namespace, service)
logger.info(log_prefix + created_msg('service', name))
class KubernetesError(Exception):
pass
|
2c8990d5e72d58d2b14965fddfda24e07eaa8fc1
|
7bea5adf7d6284fbad0131d665e957d58adfe7c7
|
/allauth/socialaccount/providers/gitlab/views.py
|
addcf932f1b7e12a7be614c56ede49caa0737118
|
[
"MIT"
] |
permissive
|
pennersr/django-allauth
|
50c9e71c3666785368e92ed9e19ea0f6a5438cd2
|
6b8911a5ebbabda0d446f2743bd4d00d250ed500
|
refs/heads/main
| 2023-09-03T16:48:10.988418
| 2023-09-02T08:00:53
| 2023-09-02T08:00:53
| 976,994
| 7,719
| 3,481
|
MIT
| 2023-09-14T15:06:57
| 2010-10-10T20:10:52
|
Python
|
UTF-8
|
Python
| false
| false
| 2,427
|
py
|
views.py
|
# -*- coding: utf-8 -*-
import requests
from allauth.socialaccount import app_settings
from allauth.socialaccount.providers.gitlab.provider import GitLabProvider
from allauth.socialaccount.providers.oauth2.client import OAuth2Error
from allauth.socialaccount.providers.oauth2.views import (
OAuth2Adapter,
OAuth2CallbackView,
OAuth2LoginView,
)
def _check_errors(response):
# 403 error's are presented as user-facing errors
if response.status_code == 403:
msg = response.content
raise OAuth2Error("Invalid data from GitLab API: %r" % (msg))
try:
data = response.json()
except ValueError: # JSONDecodeError on py3
raise OAuth2Error("Invalid JSON from GitLab API: %r" % (response.text))
if response.status_code >= 400 or "error" in data:
# For errors, we expect the following format:
# {"error": "error_name", "error_description": "Oops!"}
# For example, if the token is not valid, we will get:
# {"message": "status_code - message"}
error = data.get("error", "") or response.status_code
desc = data.get("error_description", "") or data.get("message", "")
raise OAuth2Error("GitLab error: %s (%s)" % (error, desc))
# The expected output from the API follows this format:
# {"id": 12345, ...}
if "id" not in data:
# If the id is not present, the output is not usable (no UID)
raise OAuth2Error("Invalid data from GitLab API: %r" % (data))
return data
class GitLabOAuth2Adapter(OAuth2Adapter):
provider_id = GitLabProvider.id
provider_default_url = "https://gitlab.com"
provider_api_version = "v4"
settings = app_settings.PROVIDERS.get(provider_id, {})
provider_base_url = settings.get("GITLAB_URL", provider_default_url)
access_token_url = "{0}/oauth/token".format(provider_base_url)
authorize_url = "{0}/oauth/authorize".format(provider_base_url)
profile_url = "{0}/api/{1}/user".format(provider_base_url, provider_api_version)
def complete_login(self, request, app, token, response):
response = requests.get(self.profile_url, params={"access_token": token.token})
data = _check_errors(response)
return self.get_provider().sociallogin_from_response(request, data)
oauth2_login = OAuth2LoginView.adapter_view(GitLabOAuth2Adapter)
oauth2_callback = OAuth2CallbackView.adapter_view(GitLabOAuth2Adapter)
|
d6790bdb8c70e7832781b56867cbcced316c0f1f
|
24051779055e60e5ea0537c36782a0c8c550631e
|
/cryptos/wallet.py
|
ee07c82d7235ed0f847f7cae4e405119351aa0af
|
[
"LicenseRef-scancode-public-domain",
"MIT"
] |
permissive
|
primal100/pybitcointools
|
58c2df69a06c453b6d8abd0845a8d5fd4dd949e7
|
e7c96bfe1f4be08a9f3c540e598a73dc20ca2462
|
refs/heads/master
| 2023-07-23T21:44:49.362775
| 2023-07-18T14:55:43
| 2023-07-18T14:55:43
| 111,823,327
| 298
| 180
|
NOASSERTION
| 2023-09-09T12:03:19
| 2017-11-23T15:11:22
|
Python
|
UTF-8
|
Python
| false
| false
| 11,948
|
py
|
wallet.py
|
from .main import *
from .transaction import select
class Wallet:
def __init__(self, keystore, transaction_history=None):
self.coin = keystore.coin
self.keystore = keystore
self.address_derivations = {}
self.is_watching_only = self.keystore.is_watching_only()
self.transaction_history = transaction_history or []
self.xtype = self.keystore.xtype
if self.keystore.electrum:
self.script_type = self.keystore.xtype
else:
self.script_type = "p2pkh"
def privkey(self, address, formt="wif_compressed", password=None):
if self.is_watching_only:
return
try:
addr_derivation = self.address_derivations[address]
except KeyError:
raise Exception(
"Address %s has not been generated yet. Generate new addresses with new_receiving_addresses or new_change_addresses methods" % address)
pk, compressed = self.keystore.get_private_key(addr_derivation, password)
return self.coin.encode_privkey(pk, formt, script_type=self.script_type)
def export_privkeys(self, password=None):
if self.is_watching_only:
return
return {
'receiving': {addr: self.privkey(addr, password=password) for addr in self.receiving_addresses},
'change': {addr: self.privkey(addr, password=password) for addr in self.change_addresses}
}
def receiving_address(self, index):
pubkey = self.keystore.keypairs.keys()(index)
address = self.pubtoaddr(pubkey)
self.address_derivations[address] = pubkey
return address
def change_address(self, index):
pubkey = self.keystore.keypairs.keys()(index)
address = self.pubtoaddr(pubkey)
self.address_derivations[address] = pubkey
return address
def pubtoaddr(self, pubkey):
if self.xtype == "p2pkh":
return self.coin.pubtoaddr(pubkey)
elif self.xtype == "p2wpkh":
return self.coin.pubtosegwitaddress(pubkey)
elif self.xtype == "p2wpkh-p2sh":
return self.coin.pubtop2wpkh_p2sh(pubkey)
@property
def addresses(self):
return [self.pubtoaddr(pub) for pub in self.keystore.keypairs.keys()]
@property
def receiving_addresses(self):
return self.addresses
@property
def change_addresses(self):
return self.addresses
def select_receive_address(self):
return self.addresses[0]
def select_change_address(self):
return self.addresses[0]
def new_receiving_address_range(self, num):
return self.receiving_addresses[0]
def new_change_address_range(self, num):
return self.receiving_addresses[0]
def new_receiving_addresses(self, num=10):
return self.addresses
def new_change_addresses(self, num=10):
return self.addresses
def new_receiving_address(self):
return self.new_receiving_addresses(num=1)[0]
def new_change_address(self):
return self.new_change_addresses(num=1)[0]
def is_mine(self, address):
return address in self.addresses
def is_change(self, address):
return True
def get_balances(self):
return self.coin.get_balance(*self.addresses)
def balance(self):
balances = self.get_balances()
confirmed_balance = sum(b['confirmed'] for b in balances)
unconfirmed_balance = sum(b['unconfirmed'] for b in balances)
return {
'total': confirmed_balance + unconfirmed_balance,
'unconfirmed': unconfirmed_balance,
'confirmed': confirmed_balance
}
def unspent(self, addresses=None, merkle_proof=False):
addresses = addresses or self.addresses
return self.coin.unspent(*addresses, merkle_proof=merkle_proof)
def select_unspents(self, value, addresses=None, merkle_proof=False):
unspents = self.unspent(addresses=addresses, merkle_proof=merkle_proof)
return select(unspents, value)
def history(self, addresses=None, merkle_proof=False):
addresses = addresses or self.addresses
return self.coin.history(*addresses, merkle_proof=merkle_proof)
def synchronise(self):
tx_hashes = [tx['tx_hash'] for tx in self.transaction_history]
txs = self.history()
new_txs = [tx for tx in txs if tx['tx_hash'] not in tx_hashes]
self.transaction_history += self.coin.filter_by_proof(*new_txs)
def sign(self, txobj, password=None):
if self.is_watching_only:
return
pkeys_for = [inp['address'] for inp in txobj['ins']]
privkeys = {address: self.privkey('address', password) for address in pkeys_for}
return self.coin.signall(txobj, privkeys)
def pushtx(self, tx_hex):
return self.coin.pushtx(tx_hex)
def preparemultitx(self, outs, fee=50000, change_addr=None, fee_for_blocks=0, addresses=None):
change = change_addr or self.select_change_address()
value = sum(out['value'] for out in outs) + fee
ins = self.select_unspents(value, addresses=addresses)
if self.coin.segwit_supported:
if self.xtype == 'p2pkh':
for i in ins:
i['segwit'] = False
i['new_segwit'] = False
elif self.xtype == "p2wpkh-p2sh":
for i in ins:
i['segwit'] = True
i['new_segwit'] = False
elif self.xtype == 'p2wpkh':
for i in ins:
i['segwit'] = True
i['new_segwit'] = True
return self.coin.mktx_with_change(ins, outs, fee=fee, fee_for_blocks=fee_for_blocks, change=change)
def preparetx(self, to, value, fee=50000, fee_for_blocks=0, change_addr=None, addresses=None):
outs = [{'address': to, 'value': value}]
return self.preparemultitx(outs, fee=fee, fee_for_blocks=fee_for_blocks, change_addr=change_addr,
addresses=addresses)
def preparesignedtx(self, to, value, fee=50000, fee_for_blocks=0, change_addr=None, addresses=addresses,
password=None):
txobj = self.preparetx(to, value, fee=fee, fee_for_blocks=fee_for_blocks, change_addr=change_addr,
addresses=addresses)
return self.sign(txobj, password=password)
def preparesignedmultitx(self, outs, fee=50000, fee_for_blocks=0, change_addr=None, addresses=None, password=None):
txobj = self.preparemultitx(outs, fee=fee, change_addr=change_addr, addresses=addresses,
fee_for_blocks=fee_for_blocks)
return self.sign(txobj, password=password)
def send(self, to, value, fee=50000, fee_for_blocks=0, change_addr=None, addresses=None, password=None):
tx = self.preparesignedtx(to, value, fee=fee, fee_for_blocks=fee_for_blocks, change_addr=change_addr,
addresses=addresses, password=password)
return self.pushtx(tx)
def sendmultitx(self, outs, fee=50000, fee_for_blocks=0, change_addr=None, addresses=None, password=None):
tx = self.preparesignedmultitx(outs, fee=fee, fee_for_blocks=fee_for_blocks, change_addr=change_addr,
addresses=addresses, password=password)
return self.pushtx(tx)
class HDWallet(Wallet):
def __init__(self, keystore, transaction_history=None, num_addresses=0, last_receiving_index=0, last_change_index=0):
super(HDWallet, self).__init__(keystore, transaction_history=transaction_history)
self.last_receiving_index = last_receiving_index
self.last_change_index = last_change_index
self.new_receiving_addresses(num=num_addresses)
self.new_change_addresses(num=num_addresses)
self.used_addresses = self.get_used_addresses()
self.xtype = self.keystore.xtype
if self.keystore.electrum:
self.script_type = self.keystore.xtype
else:
self.script_type = "p2pkh"
def privkey(self, address, formt="wif_compressed", password=None):
if self.is_watching_only:
return
try:
addr_derivation = self.address_derivations[address]
except KeyError:
raise Exception(
"Address %s has not been generated yet. Generate new address_derivations with new_receiving_addresses or new_change_addresses methods" % address)
pk, compressed = self.keystore.get_private_key(addr_derivation, password)
return self.coin.encode_privkey(pk, formt, script_type=self.script_type)
def export_privkeys(self, password=None):
if self.is_watching_only:
return
return {
'receiving': {addr: self.privkey(addr, password=password) for addr in self.receiving_addresses},
'change': {addr: self.privkey(addr, password=password) for addr in self.change_addresses}
}
def pubkey_receiving(self, index):
return self.keystore.derive_pubkey(0, index)
def pubkey_change(self, index):
return self.keystore.derive_pubkey(1, index)
def pubtoaddr(self, pubkey):
if self.xtype == "p2pkh":
return self.coin.pubtoaddr(pubkey)
elif self.xtype == "p2wpkh":
return self.coin.pubtosegwitaddress(pubkey)
elif self.xtype == "p2wpkh-p2sh":
return self.coin.pubtop2wpkh_p2sh(pubkey)
def receiving_address(self, index):
pubkey = self.pubkey_receiving(index)
address = self.pubtoaddr(pubkey)
self.address_derivations[address] = (0, index)
return address
def change_address(self, index):
pubkey = self.pubkey_change(index)
address = self.pubtoaddr(pubkey)
self.address_derivations[address] = (1, index)
return address
@property
def addresses(self):
return self.address_derivations.keys()
@property
def receiving_addresses(self):
return [addr for addr in self.address_derivations.keys() if not self.address_derivations[addr][0]]
@property
def change_addresses(self):
return [addr for addr in self.address_derivations.keys() if self.address_derivations[addr][0]]
def new_receiving_address_range(self, num):
index = self.last_receiving_index
return range(index, index+num)
def new_change_address_range(self, num):
index = self.last_change_index
return range(index, index+num)
def new_receiving_addresses(self, num=10):
addresses = list(map(self.receiving_address, self.new_receiving_address_range(num)))
self.last_receiving_index += num
return addresses
def new_change_addresses(self, num=10):
addresses = list(map(self.change_address, self.new_change_address_range(num)))
self.last_change_index += num
return addresses
def new_receiving_address(self):
return self.new_receiving_addresses(num=1)[0]
def new_change_address(self):
return self.new_change_addresses(num=1)[0]
def select_receive_address(self):
try:
return next(addr for addr in self.receiving_addresses if addr not in self.used_addresses)
except StopIteration:
return self.new_receiving_address()
def select_change_address(self):
try:
return next(addr for addr in self.receiving_addresses if addr not in self.used_addresses)
except StopIteration:
return self.new_change_address()
def is_change(self, address):
return address in self.change_addresses
def get_used_addresses(self):
return list(set([tx['addr'] for tx in self.transaction_history]))
def synchronise(self):
super(HDWallet, self).synchronise()
self.used_addresses = self.get_used_addresses()
|
297bc25154232acb612d864e3473dd06f8b34810
|
68073b5bbec051890bce2cdb0abbf1c7652002ed
|
/src/robotide/application/pluginloader.py
|
379fb12bc1af7cc1f99fb6c03cd4fac5678b8120
|
[
"Apache-2.0"
] |
permissive
|
robotframework/RIDE
|
3b6dc9629e34b6f350e154e5f76d106fa48eaaa8
|
ed4d650dbd806672401d4341fecc30274c4972c7
|
refs/heads/master
| 2023-09-05T15:59:01.151700
| 2023-09-02T22:39:16
| 2023-09-02T22:39:16
| 2,467,257
| 897
| 419
|
Apache-2.0
| 2023-09-10T03:43:39
| 2011-09-27T11:53:40
|
Python
|
UTF-8
|
Python
| false
| false
| 3,027
|
py
|
pluginloader.py
|
# Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import importlib.util
import inspect
import os
from ..context import LOG
from ..pluginapi import Plugin
from .pluginconnector import plugin_factory
class PluginLoader(object):
def __init__(self, application, load_dirs, standard_classes):
self._load_errors = []
self.plugins = [plugin_factory(application, cls) for cls in standard_classes + self._find_classes(load_dirs)]
if self._load_errors:
LOG.error('\n\n'.join(self._load_errors))
def enable_plugins(self):
for p in self.plugins:
p.enable_on_startup()
def _find_classes(self, load_dirs):
classes = []
for path in self._find_python_files(load_dirs):
for cls in self._import_classes(path):
if self._is_plugin_class(path, cls):
classes.append(cls)
return classes
def _is_plugin_class(self, path, cls):
try:
return issubclass(cls, Plugin) and cls is not Plugin
except Exception as err:
msg = "Finding classes from module '%s' failed: %s"
self._load_errors.append(msg % (path, err))
def _find_python_files(self, load_dirs):
files = []
for path in load_dirs:
if not os.path.exists(path):
continue
for filename in os.listdir(path):
full_path = os.path.join(path, filename)
if filename[0].isalpha() and \
os.path.splitext(filename)[1].lower() == ".py":
files.append(full_path)
elif os.path.isdir(full_path):
files.extend(self._find_python_files([full_path]))
return files
def _import_classes(self, path):
_, filename = os.path.split(path)
modulename = os.path.splitext(filename)[0]
spec = importlib.util.spec_from_file_location(modulename, path)
if spec is None:
return []
try:
m_module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(m_module)
except Exception as err:
self._load_errors.append("Importing plugin module '%s' failed:\n%s"
% (path, err))
return []
return [cls for _, cls in
inspect.getmembers(m_module, predicate=inspect.isclass)]
|
95e3dc8395fc3885adbc1a8dc90f20d91c99ada1
|
578db86c51d44ebddd0dc7b1738985b3dc69eb74
|
/corehq/motech/tests/test_repeater_helpers.py
|
bd66083eb40b11d5e9379c3a4fa50c4b46c046c8
|
[
"BSD-3-Clause"
] |
permissive
|
dimagi/commcare-hq
|
a43c7dd32b5f89c89fd5aa1b1359ab7301f4ff6b
|
e7391ddae1af1dbf118211ecb52c83fc508aa656
|
refs/heads/master
| 2023-08-16T22:38:27.853437
| 2023-08-16T19:07:19
| 2023-08-16T19:07:19
| 247,278
| 499
| 203
|
BSD-3-Clause
| 2023-09-14T19:03:24
| 2009-07-09T17:00:07
|
Python
|
UTF-8
|
Python
| false
| false
| 6,115
|
py
|
test_repeater_helpers.py
|
from datetime import datetime
from unittest.mock import patch
from django.test.testcases import TestCase
from corehq.form_processor.models import CommCareCase
from corehq.motech.repeater_helpers import get_relevant_case_updates_from_form_json
class TestRepeaterHelpers(TestCase):
def setUp(self):
self.domain = 'test-domain'
self.extra_fields = []
self.form_question_values = {}
case_1_data = {
'case_id': '5ca13e74-8ba3-4d0d-l09j-66371e8895dd',
'domain': self.domain,
'type': 'paciente',
'name': 'case1',
'owner_id': 'owner_1',
'modified_by': 'modified_by',
}
case_2_data = {
'case_id': '6ca13e74-8ba3-4d0d-l09j-66371e8895dc',
'domain': self.domain,
'type': 'casa',
'name': 'case2',
'owner_id': 'owner_2',
'modified_by': 'modified_by',
}
self.case_1 = create_commcare_case(case_1_data)
self.case_2 = create_commcare_case(case_2_data)
def tearDown(self):
self.case_1.delete()
self.case_2.delete()
@patch.object(CommCareCase.objects, 'get_cases')
def test__get_relevant_case_updates_from_form_json_with_case_types(self, get_cases):
get_cases.return_value = [self.case_1, self.case_2]
result = get_relevant_case_updates_from_form_json(
self.domain,
_get_form_json(),
['paciente'],
self.extra_fields
)
self.assertEqual(len(result), 2)
@patch.object(CommCareCase.objects, 'get_cases')
def test__get_relevant_case_updates_from_form_json_without_case_types(self, get_cases):
get_cases.return_value = [self.case_1, self.case_2]
result = get_relevant_case_updates_from_form_json(
self.domain,
_get_form_json(),
[],
self.extra_fields
)
self.assertEqual(len(result), 3)
def create_commcare_case(data):
cccsql = CommCareCase(
case_id=data['case_id'],
domain=data['domain'],
type=data['type'],
name=data['name'],
owner_id=data['owner_id'],
modified_by=data['modified_by'],
modified_on=datetime.utcnow(),
server_modified_on=datetime.utcnow(),
)
cccsql.save()
return cccsql
def _get_form_json():
return {'app_id': 'APP_ID',
'archived': False,
'attachments': {
'form.xml': {
'content_type': 'text/xml',
'length': 10975,
'url': 'https://www.commcarehq.org/a/infomovel-pepfar'
'/api/form/attachment/CONFIDENTIAL/form.xml'
}
},
'build_id': 'BUILD_ID',
'domain': 'infomovel-pepfar',
'edited_by_user_id': None,
'edited_on': None,
'form': {'#type': 'data',
'@name': 'SOME NAME',
'@uiVersion': '1',
'@version': 'VERSION',
'@xmlns': 'http://openrosa.org/formdesigner/IDIDID',
'casa_data': {'convivente_cascade': {},
'conviventes_names': {},
'index_cascade': {},
'save_to_case': {'alocar_paciente_casa': {
'case': {'@case_id': '5ca13e74-8ba3-4d0d-l09j-66371e8895dd',
'@date_modified': '2021-06-24T08:43:06.746000Z',
'@user_id': 'USER ID',
'@xmlns': 'http://commcarehq.org/case/transaction/v2',
'index': {
'parent': {
'#text': '6ca13e74-8ba3-4d0d-l09j-66371e8895dc',
'@case_type': '',
'@relationship': 'child'
}
}}},
'criar_actualizar_casa': {
'case': {'@case_id': '6ca13e74-8ba3-4d0d-l09j-66371e8895dc',
'@date_modified': '2021-05-24T08:43:06.746000Z',
'@user_id': 'USER ID',
'@xmlns': 'http://commcarehq.org/case/transaction/v2',
'create': {'case_name': 'CASE NAME',
'case_type': 'casa',
'owner_id': 'owner_1'},
'update': {
'age_range1': '25-30',
'age_range2': '25-30 anos',
}
}}},
'tb_patient_in_household': '0'},
'case': {'@case_id': '5ca13e74-8ba3-4d0d-l09j-66371e8895dd',
'@date_modified': '2021-06-24T08:43:06.746000Z',
'@user_id': 'USER ID',
'@xmlns': 'http://commcarehq.org/case/transaction/v2',
'update': {'name': 'John Lennon'}},
'confirm_info': {},
'confirmar_perfil': {},
'imported_properties': {},
'indicators_v4': {},
'key_workflow_properties': {},
'meta': {},
'patient_data': {}, },
'metadata': {},
}
|
6208360d76361ad22c088716113868cfe933089e
|
9ee89ea711a14565d8664f180b213c0e0bd423ac
|
/pywinauto/unittests/test_menuwrapper.py
|
494317972c9e1e39730135a503d934b145791482
|
[
"BSD-3-Clause",
"LGPL-2.1-or-later",
"LGPL-2.1-only"
] |
permissive
|
pywinauto/pywinauto
|
c55b3f42d0b9d5b18b005527edd791f4da05f805
|
bf7f789d01b7c66ccd0c213db0a029da7e588c9e
|
refs/heads/atspi
| 2023-08-30T21:05:31.992955
| 2022-12-29T15:23:40
| 2022-12-30T10:52:06
| 36,223,210
| 4,466
| 819
|
BSD-3-Clause
| 2023-09-12T02:26:23
| 2015-05-25T09:50:40
|
Python
|
UTF-8
|
Python
| false
| false
| 6,282
|
py
|
test_menuwrapper.py
|
# GUI Application automation and testing library
# Copyright (C) 2006-2018 Mark Mc Mahon and Contributors
# https://github.com/pywinauto/pywinauto/graphs/contributors
# http://pywinauto.readthedocs.io/en/latest/credits.html
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of pywinauto nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for Menu"""
import sys
import os
import unittest
sys.path.append(".")
from pywinauto.windows.application import Application
from pywinauto.sysinfo import is_x64_Python
from pywinauto.controls.menuwrapper import MenuItemNotEnabled
from pywinauto.timings import Timings
mfc_samples_folder = os.path.join(
os.path.dirname(__file__), r"..\..\apps\MFC_samples")
if is_x64_Python():
mfc_samples_folder = os.path.join(mfc_samples_folder, 'x64')
class MenuWrapperTests(unittest.TestCase):
"Unit tests for the Menu and the MenuItem classes"
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
Timings.defaults()
self.app = Application()
self.app.start("Notepad.exe")
self.dlg = self.app.Notepad
def tearDown(self):
"Close the application after tests"
self.app.kill()
def testInvalidHandle(self):
"Test that an exception is raised with an invalid menu handle"
# self.assertRaises(InvalidWindowHandle, HwndWrapper, -1)
pass
def testItemCount(self):
self.assertEqual(5, self.dlg.menu().item_count())
def testItem(self):
self.assertEqual(u'&File', self.dlg.menu().item(0).text())
self.assertEqual(u'&File', self.dlg.menu().item(u'File').text())
self.assertEqual(u'&File', self.dlg.menu().item(u'&File', exact=True).text())
def testItems(self):
self.assertEqual([u'&File', u'&Edit', u'F&ormat', u'&View', u'&Help'],
[item.text() for item in self.dlg.menu().items()])
def testFriendlyClassName(self):
self.assertEqual('MenuItem', self.dlg.menu().item(0).friendly_class_name())
def testMenuItemNotEnabled(self):
self.assertRaises(MenuItemNotEnabled, self.dlg.menu_select, 'Edit->Find Next')
self.assertRaises(MenuItemNotEnabled, self.dlg.menu_item('Edit->Find Next').click)
self.assertRaises(MenuItemNotEnabled, self.dlg.menu_item('Edit->Find Next').click_input)
def testGetProperties(self):
self.assertEqual(
{u'menu_items':
[{u'index': 0, u'state': 0, u'item_type': 0, u'item_id': 64, u'text': u'View &Help'},
{u'index': 1, u'state': 3, u'item_type': 2048, u'item_id': 0, u'text': u''},
{u'index': 2, u'state': 0, u'item_type': 0, u'item_id': 65, u'text': u'&About Notepad'}]},
self.dlg.menu().get_menu_path('Help')[0].sub_menu().get_properties())
def testGetMenuPath(self):
# print('id = ' + str(self.dlg.menu().get_menu_path('Help->#3')[0].id()))
self.assertEqual(u'&About Notepad', self.dlg.menu().get_menu_path(' Help -> #2 ')[-1].text())
self.assertEqual(u'&About Notepad', self.dlg.menu().get_menu_path('Help->$65')[-1].text())
self.assertEqual(u'&About Notepad',
self.dlg.menu().get_menu_path('&Help->&About Notepad', exact=True)[-1].text())
self.assertRaises(IndexError, self.dlg.menu().get_menu_path, '&Help->About what?', exact=True)
def test__repr__(self):
print(self.dlg.menu())
print(self.dlg.menu().get_menu_path('&Help->&About Notepad', exact=True)[-1])
def testClick(self):
self.dlg.menu().get_menu_path('&Help->&About Notepad')[-1].click()
About = self.app.window(name='About Notepad')
About.wait('ready')
About.OK.click()
About.wait_not('visible')
def testClickInput(self):
self.dlg.menu().get_menu_path('&Help->&About Notepad')[-1].click_input()
About = self.app.window(name='About Notepad')
About.wait('ready')
About.OK.click()
About.wait_not('visible')
class OwnerDrawnMenuTests(unittest.TestCase):
"""Unit tests for the OWNERDRAW menu items"""
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
Timings.defaults()
self.app = Application().start(os.path.join(mfc_samples_folder, u"BCDialogMenu.exe"))
self.dlg = self.app.BCDialogMenu
self.app.wait_cpu_usage_lower(threshold=1.5, timeout=30, usage_interval=1)
self.dlg.wait('ready')
def tearDown(self):
"""Close the application after tests"""
self.app.kill()
def testCorrectText(self):
menu = self.dlg.menu()
self.assertEqual(u'&New', menu.get_menu_path('&File->#0')[-1].text()[:4])
self.assertEqual(u'&Open...', menu.get_menu_path('&File->#1')[-1].text()[:8])
if __name__ == "__main__":
unittest.main()
|
a1adf4a5f40b95ba24f1e8051dc71190aeacf310
|
bbf0c3294b8aac4cbabf255c2904556144c85610
|
/modoboa/ldapsync/management/commands/update_dovecot_conf.py
|
dc476beeb8931c2e40e338057dd70b1dca6f4cb2
|
[
"ISC"
] |
permissive
|
modoboa/modoboa
|
4a170fabcb15b892fe627795b02a02d4c16783d6
|
df699aab0799ec1725b6b89be38e56285821c889
|
refs/heads/master
| 2023-08-30T12:58:51.313642
| 2023-08-29T13:22:14
| 2023-08-29T13:22:14
| 9,469,271
| 2,201
| 370
|
ISC
| 2023-09-13T12:47:28
| 2013-04-16T09:43:55
|
Python
|
UTF-8
|
Python
| false
| false
| 927
|
py
|
update_dovecot_conf.py
|
"""Command to overwrite dovecot LDAP configuration (auth)."""
from django.core.management.base import BaseCommand
from modoboa.parameters import tools as param_tools
from ... import lib
from modoboa.core import models
class Command(BaseCommand):
"""Command definition."""
help = "Update dovecot configuration file to enable LDAP auth"
def handle(self, *args, **options):
"""Command entry point."""
localconfig = models.LocalConfig.objects.first()
if not localconfig.need_dovecot_update:
return
config = dict(param_tools.get_global_parameters("core"))
condition = (
config["authentication_type"] == "ldap" and
config["ldap_dovecot_sync"]
)
if condition:
lib.update_dovecot_config_file(config)
localconfig.need_dovecot_update = False
localconfig.save(update_fields=["need_dovecot_update"])
|
85c112c4d05de368a38ffd9040112a9fd2e13366
|
8f4488494507da4cb6f15073b8aa2e6f97fabb35
|
/test/functional/test_intermediate_output.py
|
82169b6c9faa3df3bd221d800eec047545f87e3c
|
[
"Apache-2.0"
] |
permissive
|
aws/sagemaker-training-toolkit
|
025966a1216aeb78b58f7abab19c6ccb01b0897d
|
e4a765e699e16c5849bbdfd789edbfc9820fdd77
|
refs/heads/master
| 2023-08-21T12:33:59.831391
| 2023-08-08T16:46:40
| 2023-08-08T16:46:40
| 212,439,434
| 415
| 110
|
Apache-2.0
| 2023-09-07T19:58:23
| 2019-10-02T20:54:32
|
Python
|
UTF-8
|
Python
| false
| false
| 9,114
|
py
|
test_intermediate_output.py
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License'). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the 'license' file accompanying this file. This file is
# distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
import os
import platform
import time
import boto3
from botocore.exceptions import ClientError
import numpy as np
import pytest
from sagemaker_training import environment, files, intermediate_output
import test
intermediate_path = environment.output_intermediate_dir
bucket = test.default_bucket()
bucket_uri = "s3://{}".format(bucket)
region = test.DEFAULT_REGION
def _timestamp():
return time.strftime("%Y-%m-%d-%H-%M-%S")
@pytest.mark.xfail(
platform.system() != "Linux",
reason="""
intermediate_output.start_sync depends on inotify, which is a Linux kernel subsystem
and is not available on macOS or Windows.
""",
)
def test_intermediate_upload():
os.environ["TRAINING_JOB_NAME"] = _timestamp()
p = intermediate_output.start_sync(bucket_uri, region)
file1 = os.path.join(intermediate_path, "file1.txt")
write_file(file1, "file1!")
os.makedirs(os.path.join(intermediate_path, "dir1", "dir2", "dir3"))
dir1 = os.path.join(intermediate_path, "dir1")
dir2 = os.path.join(dir1, "dir2")
dir3 = os.path.join(dir2, "dir3")
file2 = os.path.join(dir1, "file2.txt")
file3 = os.path.join(dir2, "file3.txt")
file4 = os.path.join(dir3, "file4.txt")
write_file(file2, "dir1_file2!")
write_file(file3, "dir2_file3!")
write_file(file4, "dir1_file4!")
dir_to_delete1 = os.path.join(dir1, "dir4")
file_to_delete1 = os.path.join(dir_to_delete1, "file_to_delete1.txt")
os.makedirs(dir_to_delete1)
write_file(file_to_delete1, "file_to_delete1!")
os.remove(file_to_delete1)
os.removedirs(dir_to_delete1)
file_to_delete2_but_copy = os.path.join(intermediate_path, "file_to_delete2_but_copy.txt")
write_file(file_to_delete2_but_copy, "file_to_delete2!")
time.sleep(1)
os.remove(file_to_delete2_but_copy)
file_to_modify1 = os.path.join(dir3, "file_to_modify1.txt")
write_file(file_to_modify1, "dir3_file_to_modify1_1!")
write_file(file_to_modify1, "dir3_file_to_modify1_2!")
write_file(file_to_modify1, "dir3_file_to_modify1_3!")
content_to_assert = "dir3_file_to_modify1_4!"
write_file(file_to_modify1, content_to_assert)
# the last file to be moved
file5 = os.path.join(intermediate_path, "file5.txt")
write_file(file5, "file5!")
files.write_success_file()
p.join()
# shouldn't be moved
file6 = os.path.join(intermediate_path, "file6.txt")
write_file(file6, "file6!")
# assert that all files that should be under intermediate are still there
assert os.path.exists(file1)
assert os.path.exists(file2)
assert os.path.exists(file3)
assert os.path.exists(file4)
assert os.path.exists(file5)
assert os.path.exists(file6)
assert os.path.exists(file_to_modify1)
# and all the deleted folders and files aren't there
assert not os.path.exists(dir_to_delete1)
assert not os.path.exists(file_to_delete1)
assert not os.path.exists(file_to_delete2_but_copy)
# assert files exist in S3
key_prefix = os.path.join(os.environ.get("TRAINING_JOB_NAME"), "output", "intermediate")
client = boto3.client("s3", region)
assert _file_exists_in_s3(
client, os.path.join(key_prefix, os.path.relpath(file1, intermediate_path))
)
assert _file_exists_in_s3(
client, os.path.join(key_prefix, os.path.relpath(file2, intermediate_path))
)
assert _file_exists_in_s3(
client, os.path.join(key_prefix, os.path.relpath(file3, intermediate_path))
)
assert _file_exists_in_s3(
client, os.path.join(key_prefix, os.path.relpath(file4, intermediate_path))
)
assert _file_exists_in_s3(
client, os.path.join(key_prefix, os.path.relpath(file5, intermediate_path))
)
assert _file_exists_in_s3(
client, os.path.join(key_prefix, os.path.relpath(file_to_modify1, intermediate_path))
)
deleted_file = os.path.join(
key_prefix, os.path.relpath(file_to_delete2_but_copy, intermediate_path)
)
assert _file_exists_in_s3(client, deleted_file)
assert not _file_exists_in_s3(
client, os.path.join(key_prefix, os.path.relpath(dir_to_delete1, intermediate_path))
)
assert not _file_exists_in_s3(
client, os.path.join(key_prefix, os.path.relpath(file_to_delete1, intermediate_path))
)
assert not _file_exists_in_s3(
client, os.path.join(key_prefix, os.path.relpath(file6, intermediate_path))
)
# check that modified file has
s3 = boto3.resource("s3", region_name=region)
key = os.path.join(key_prefix, os.path.relpath(file_to_modify1, intermediate_path))
modified_file = os.path.join(environment.output_dir, "modified_file.txt")
s3.Bucket(bucket).download_file(key, modified_file)
with open(modified_file) as f:
content = f.read()
assert content == content_to_assert
@pytest.mark.xfail(
platform.system() != "Linux",
reason="""
intermediate_output.start_sync depends on inotify, which is a Linux kernel subsystem
and is not available on macOS or Windows.
""",
)
def test_nested_delayed_file():
os.environ["TRAINING_JOB_NAME"] = _timestamp()
p = intermediate_output.start_sync(bucket_uri, region)
os.makedirs(os.path.join(intermediate_path, "dir1"))
dir1 = os.path.join(intermediate_path, "dir1")
time.sleep(3)
os.makedirs(os.path.join(dir1, "dir2"))
dir2 = os.path.join(dir1, "dir2")
time.sleep(3)
file1 = os.path.join(dir2, "file1.txt")
write_file(file1, "file1")
os.makedirs(os.path.join(intermediate_path, "dir3"))
dir3 = os.path.join(intermediate_path, "dir3")
time.sleep(3)
file2 = os.path.join(dir3, "file2.txt")
write_file(file2, "file2")
files.write_success_file()
p.join()
# assert that all files that should be under intermediate are still there
assert os.path.exists(file1)
assert os.path.exists(file2)
# assert file exist in S3
key_prefix = os.path.join(os.environ.get("TRAINING_JOB_NAME"), "output", "intermediate")
client = boto3.client("s3", region)
assert _file_exists_in_s3(
client, os.path.join(key_prefix, os.path.relpath(file1, intermediate_path))
)
assert _file_exists_in_s3(
client, os.path.join(key_prefix, os.path.relpath(file2, intermediate_path))
)
@pytest.mark.xfail(
platform.system() != "Linux",
reason="""
intermediate_output.start_sync depends on inotify, which is a Linux kernel subsystem
and is not available on macOS or Windows.
""",
)
def test_large_files():
os.environ["TRAINING_JOB_NAME"] = _timestamp()
p = intermediate_output.start_sync(bucket_uri, region)
file_size = 1024 * 256 * 17 # 17MB
file = os.path.join(intermediate_path, "file.npy")
_generate_large_npy_file(file_size, file)
file_to_modify = os.path.join(intermediate_path, "file_to_modify.npy")
_generate_large_npy_file(file_size, file_to_modify)
content_to_assert = _generate_large_npy_file(file_size, file_to_modify)
files.write_failure_file("Failure!!")
p.join()
assert os.path.exists(file)
assert os.path.exists(file_to_modify)
key_prefix = os.path.join(os.environ.get("TRAINING_JOB_NAME"), "output", "intermediate")
client = boto3.client("s3", region)
assert _file_exists_in_s3(
client, os.path.join(key_prefix, os.path.relpath(file, intermediate_path))
)
assert _file_exists_in_s3(
client, os.path.join(key_prefix, os.path.relpath(file_to_modify, intermediate_path))
)
# check that modified file has
s3 = boto3.resource("s3", region_name=region)
key = os.path.join(key_prefix, os.path.relpath(file_to_modify, intermediate_path))
modified_file = os.path.join(environment.output_dir, "modified_file.npy")
s3.Bucket(bucket).download_file(key, modified_file)
assert np.array_equal(np.load(modified_file), content_to_assert)
def write_file(path, data, mode="w"):
with open(path, mode) as f:
f.write(data)
def _generate_large_npy_file(size, file_path):
letters = np.array(list(chr(ord("a") + i) for i in range(26)))
content = np.random.choice(letters, size)
np.save(file_path, content)
return content
def _file_exists_in_s3(client, key):
"""return the key's size if it exist, else None"""
try:
obj = client.head_object(Bucket=bucket, Key=key)
return obj["ContentLength"]
except ClientError as exc:
if exc.response["Error"]["Code"] != "404":
raise
|
07772cff9e091a44e23bb021ab41cd5e0afcf0c2
|
db12b990924703cd74748d8585cd9c11fafa6746
|
/h2o-py/tests/testdir_apis/H2O_Module/pyunit_h2olazy_import.py
|
a1c2efb45d7e65b733bee2dfeeff94e78f0615d9
|
[
"Apache-2.0"
] |
permissive
|
h2oai/h2o-3
|
919019a8f297eec676011a9cfd2cc2d97891ce14
|
d817ab90c8c47f6787604a0b9639b66234158228
|
refs/heads/master
| 2023-08-17T18:50:17.732191
| 2023-08-17T16:44:42
| 2023-08-17T16:44:42
| 17,371,412
| 6,872
| 2,345
|
Apache-2.0
| 2023-09-14T18:05:40
| 2014-03-03T16:08:07
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 455
|
py
|
pyunit_h2olazy_import.py
|
import sys
sys.path.insert(1,"../../../")
from tests import pyunit_utils
import h2o
from h2o.utils.typechecks import assert_is_type
def h2olazy_import():
"""
Python API test: h2o.lazy_import(path)
"""
training_data = h2o.lazy_import(pyunit_utils.locate("smalldata/prostate/prostate_cat.csv"))
assert_is_type(training_data, list)
if __name__ == "__main__":
pyunit_utils.standalone_test(h2olazy_import)
else:
h2olazy_import()
|
d96e80aae7e48e027750e289033610839bb56436
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/HLTrigger/Configuration/python/HLT_75e33/modules/hltPhase2L3MuonInitialStepSeeds_cfi.py
|
e070e1e12253b6e87ff04af536b81ff9e272bf34
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 598
|
py
|
hltPhase2L3MuonInitialStepSeeds_cfi.py
|
import FWCore.ParameterSet.Config as cms
hltPhase2L3MuonInitialStepSeeds = cms.EDProducer("SeedGeneratorFromProtoTracksEDProducer",
InputCollection = cms.InputTag("hltPhase2L3MuonPixelTracks"),
InputVertexCollection = cms.InputTag(""),
SeedCreatorPSet = cms.PSet(
refToPSet_ = cms.string('hltPhase2L3MuonSeedFromProtoTracks')
),
TTRHBuilder = cms.string('WithTrackAngle'),
originHalfLength = cms.double(0.3),
originRadius = cms.double(0.1),
useEventsWithNoVertex = cms.bool(True),
usePV = cms.bool(True),
useProtoTrackKinematics = cms.bool(False)
)
|
29afb22790a604376ab6037e622dba2972864bec
|
5fe00998d48c5fcf6f2e0a48599cdfbf3853004f
|
/pydm/tests/widgets/test_curve_editor.py
|
09e8c09d2b32032a247b68975ddee0e490529f61
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] |
permissive
|
slaclab/pydm
|
859e6e8240de286c0ed617031a37a84e91947cbc
|
ac354b744d358e05d6e0606acc114a3a511d7e5d
|
refs/heads/master
| 2023-08-16T18:08:29.656670
| 2023-08-15T20:13:19
| 2023-08-15T20:13:19
| 59,700,161
| 109
| 82
|
NOASSERTION
| 2023-09-14T04:45:36
| 2016-05-25T21:50:56
|
Python
|
UTF-8
|
Python
| false
| false
| 7,627
|
py
|
test_curve_editor.py
|
from qtpy.QtWidgets import QTableView
from ...widgets.baseplot import BasePlot
from ...widgets.baseplot_curve_editor import (AxisColumnDelegate, ColorColumnDelegate, LineColumnDelegate,
SymbolColumnDelegate, RedrawModeColumnDelegate, PlotStyleColumnDelegate)
from ...widgets.baseplot_table_model import BasePlotCurvesModel
from ...widgets.scatterplot_curve_editor import ScatterPlotCurveEditorDialog
from ...widgets.timeplot_curve_editor import TimePlotCurveEditorDialog
from ...widgets.waveformplot import WaveformCurveItem
from ...widgets.waveformplot_curve_editor import WaveformPlotCurveEditorDialog
def test_waveform_curve_editor(qtbot):
"""
Ensure that the waveform curve editor looks and functions as expected
"""
# Create waveform plot curve editor along with its associated plot. Ensure it shows.
base_plot = BasePlot()
qtbot.addWidget(base_plot)
curve_editor = WaveformPlotCurveEditorDialog(base_plot)
qtbot.addWidget(curve_editor)
curve_editor.show()
table_model = curve_editor.table_model
table_view = curve_editor.table_view
# Verify that the drop downs for columns with non built-in types are all put in the correct place
# Note: We do need to check these on each individual type of curve editor (see below tests) and not just
# in the base plot editor since each plot type can have varying numbers of columns
color_index = table_model.getColumnIndex('Color')
line_style_index = table_model.getColumnIndex('Line Style')
symbol_index = table_model.getColumnIndex('Symbol')
redraw_mode_index = table_model.getColumnIndex('Redraw Mode')
plot_style_index = table_model.getColumnIndex('Style')
assert type(table_view.itemDelegateForColumn(color_index)) is ColorColumnDelegate
assert type(table_view.itemDelegateForColumn(line_style_index)) is LineColumnDelegate
assert type(table_view.itemDelegateForColumn(symbol_index)) is SymbolColumnDelegate
assert type(table_view.itemDelegateForColumn(redraw_mode_index)) is RedrawModeColumnDelegate
assert type(table_view.itemDelegateForColumn(plot_style_index)) is PlotStyleColumnDelegate
def test_timeplot_curve_editor(qtbot):
"""
Ensure that the time plot curve editor looks and functions as expected
"""
# Create time plot curve editor along with its associated plot. Ensure it shows.
base_plot = BasePlot()
qtbot.addWidget(base_plot)
curve_editor = TimePlotCurveEditorDialog(base_plot)
qtbot.addWidget(curve_editor)
curve_editor.show()
table_model = curve_editor.table_model
table_view = curve_editor.table_view
# Verify that the drop downs for columns with non built-in types are all put in the correct place
color_index = table_model.getColumnIndex('Color')
line_style_index = table_model.getColumnIndex('Line Style')
symbol_index = table_model.getColumnIndex('Symbol')
plot_style_index = table_model.getColumnIndex('Style')
assert type(table_view.itemDelegateForColumn(color_index)) is ColorColumnDelegate
assert type(table_view.itemDelegateForColumn(line_style_index)) is LineColumnDelegate
assert type(table_view.itemDelegateForColumn(symbol_index)) is SymbolColumnDelegate
assert type(table_view.itemDelegateForColumn(plot_style_index)) is PlotStyleColumnDelegate
def test_scatterplot_editor(qtbot):
"""
Ensure that the scatter plot curve editor looks and functions as expected
"""
# Create scatter plot curve editor along with its associated plot. Ensure it shows.
base_plot = BasePlot()
qtbot.addWidget(base_plot)
curve_editor = ScatterPlotCurveEditorDialog(base_plot)
qtbot.addWidget(curve_editor)
curve_editor.show()
table_model = curve_editor.table_model
table_view = curve_editor.table_view
# Verify that the drop downs for columns with non built-in types are all put in the correct place
color_index = table_model.getColumnIndex('Color')
line_style_index = table_model.getColumnIndex('Line Style')
symbol_index = table_model.getColumnIndex('Symbol')
redraw_mode_index = table_model.getColumnIndex('Redraw Mode')
assert type(table_view.itemDelegateForColumn(color_index)) is ColorColumnDelegate
assert type(table_view.itemDelegateForColumn(line_style_index)) is LineColumnDelegate
assert type(table_view.itemDelegateForColumn(symbol_index)) is SymbolColumnDelegate
assert type(table_view.itemDelegateForColumn(redraw_mode_index)) is RedrawModeColumnDelegate
def test_axis_editor(qtbot):
"""
Ensure that the axis editor tab in the curve editor looks and functions as expected
"""
base_plot = BasePlot()
qtbot.addWidget(base_plot)
curve_editor = WaveformPlotCurveEditorDialog(base_plot)
axis_model = curve_editor.axis_model
axis_view = curve_editor.axis_view
# Verify the column count is correct, and the axis column delegate is placed correctly
axis_orientation_index = axis_model._column_names.index('Y-Axis Orientation')
assert type(axis_view.itemDelegateForColumn(axis_orientation_index)) is AxisColumnDelegate
def test_plot_style_column_delegate(qtbot):
""" Verify the functionality of the show/hide column feature """
# Set up a plot with three data items. Two will be plotted as lines, and one as bars.
base_plot = BasePlot()
qtbot.addWidget(base_plot)
line_item_1 = WaveformCurveItem()
line_item_2 = WaveformCurveItem()
bar_item = WaveformCurveItem(plot_style='Bar')
plot_curves_model = BasePlotCurvesModel(plot=base_plot)
table_view = QTableView()
table_view.setModel(plot_curves_model)
plot_style_column_delegate = PlotStyleColumnDelegate(parent=base_plot,
table_model=plot_curves_model,
table_view=table_view)
base_plot.addCurve(line_item_1)
plot_style_column_delegate.toggleColumnVisibility()
# With only the line style curve displayed the four line columns should be shown
for column in plot_style_column_delegate.line_columns_to_toggle:
assert not table_view.isColumnHidden(plot_curves_model.getColumnIndex(column))
# And the four bar columns should be hidden
for column in plot_style_column_delegate.bar_columns_to_toggle:
assert table_view.isColumnHidden(plot_curves_model.getColumnIndex(column))
# Now add an additional line curve and a bar curve. All 8 columns should now be visible since it's a mixed plot
base_plot.addCurve(line_item_2)
base_plot.addCurve(bar_item)
plot_style_column_delegate.toggleColumnVisibility()
for column in plot_style_column_delegate.line_columns_to_toggle:
assert not table_view.isColumnHidden(plot_curves_model.getColumnIndex(column))
for column in plot_style_column_delegate.bar_columns_to_toggle:
assert not table_view.isColumnHidden(plot_curves_model.getColumnIndex(column))
# Finally remove both line curves to test the last option, nothing but bar style curves. All line options should
# be hidden, while the bar options should still be shown
base_plot.removeCurve(line_item_1)
base_plot.removeCurve(line_item_2)
plot_style_column_delegate.toggleColumnVisibility()
for column in plot_style_column_delegate.line_columns_to_toggle:
assert table_view.isColumnHidden(plot_curves_model.getColumnIndex(column))
for column in plot_style_column_delegate.bar_columns_to_toggle:
assert not table_view.isColumnHidden(plot_curves_model.getColumnIndex(column))
|
93a4a73212de8c4f3e50ae66ab3f3052b6fe974b
|
a41e1498e3c080f47abd8e8e57157548df3ebbf1
|
/pandas/tests/series/methods/test_convert_dtypes.py
|
d1c79d0f00365ed5e2bd2fae08ce57483f51182c
|
[
"BSD-3-Clause"
] |
permissive
|
pandas-dev/pandas
|
e7e639454a298bebc272622e66faa9829ea393bb
|
c7325d7e7e77ecb4a4e57b48bc25265277c75712
|
refs/heads/main
| 2023-09-01T12:42:07.927176
| 2023-09-01T11:14:10
| 2023-09-01T11:14:10
| 858,127
| 36,166
| 18,728
|
BSD-3-Clause
| 2023-09-14T21:18:41
| 2010-08-24T01:37:33
|
Python
|
UTF-8
|
Python
| false
| false
| 8,302
|
py
|
test_convert_dtypes.py
|
from itertools import product
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
# Each test case consists of a tuple with the data and dtype to create the
# test Series, the default dtype for the expected result (which is valid
# for most cases), and the specific cases where the result deviates from
# this default. Those overrides are defined as a dict with (keyword, val) as
# dictionary key. In case of multiple items, the last override takes precedence.
@pytest.fixture(
params=[
(
# data
[1, 2, 3],
# original dtype
np.dtype("int32"),
# default expected dtype
"Int32",
# exceptions on expected dtype
{("convert_integer", False): np.dtype("int32")},
),
(
[1, 2, 3],
np.dtype("int64"),
"Int64",
{("convert_integer", False): np.dtype("int64")},
),
(
["x", "y", "z"],
np.dtype("O"),
pd.StringDtype(),
{("convert_string", False): np.dtype("O")},
),
(
[True, False, np.nan],
np.dtype("O"),
pd.BooleanDtype(),
{("convert_boolean", False): np.dtype("O")},
),
(
["h", "i", np.nan],
np.dtype("O"),
pd.StringDtype(),
{("convert_string", False): np.dtype("O")},
),
( # GH32117
["h", "i", 1],
np.dtype("O"),
np.dtype("O"),
{},
),
(
[10, np.nan, 20],
np.dtype("float"),
"Int64",
{
("convert_integer", False, "convert_floating", True): "Float64",
("convert_integer", False, "convert_floating", False): np.dtype(
"float"
),
},
),
(
[np.nan, 100.5, 200],
np.dtype("float"),
"Float64",
{("convert_floating", False): np.dtype("float")},
),
(
[3, 4, 5],
"Int8",
"Int8",
{},
),
(
[[1, 2], [3, 4], [5]],
None,
np.dtype("O"),
{},
),
(
[4, 5, 6],
np.dtype("uint32"),
"UInt32",
{("convert_integer", False): np.dtype("uint32")},
),
(
[-10, 12, 13],
np.dtype("i1"),
"Int8",
{("convert_integer", False): np.dtype("i1")},
),
(
[1.2, 1.3],
np.dtype("float32"),
"Float32",
{("convert_floating", False): np.dtype("float32")},
),
(
[1, 2.0],
object,
"Int64",
{
("convert_integer", False): "Float64",
("convert_integer", False, "convert_floating", False): np.dtype(
"float"
),
("infer_objects", False): np.dtype("object"),
},
),
(
[1, 2.5],
object,
"Float64",
{
("convert_floating", False): np.dtype("float"),
("infer_objects", False): np.dtype("object"),
},
),
(["a", "b"], pd.CategoricalDtype(), pd.CategoricalDtype(), {}),
(
pd.to_datetime(["2020-01-14 10:00", "2020-01-15 11:11"]),
pd.DatetimeTZDtype(tz="UTC"),
pd.DatetimeTZDtype(tz="UTC"),
{},
),
(
pd.to_datetime(["2020-01-14 10:00", "2020-01-15 11:11"]),
"datetime64[ns]",
np.dtype("datetime64[ns]"),
{},
),
(
pd.to_datetime(["2020-01-14 10:00", "2020-01-15 11:11"]),
object,
np.dtype("datetime64[ns]"),
{("infer_objects", False): np.dtype("object")},
),
(
pd.period_range("1/1/2011", freq="M", periods=3),
None,
pd.PeriodDtype("M"),
{},
),
(
pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(1, 5)]),
None,
pd.IntervalDtype("int64", "right"),
{},
),
]
)
def test_cases(request):
return request.param
class TestSeriesConvertDtypes:
@pytest.mark.parametrize("params", product(*[(True, False)] * 5))
def test_convert_dtypes(
self,
test_cases,
params,
):
data, maindtype, expected_default, expected_other = test_cases
if (
hasattr(data, "dtype")
and data.dtype == "M8[ns]"
and isinstance(maindtype, pd.DatetimeTZDtype)
):
# this astype is deprecated in favor of tz_localize
msg = "Cannot use .astype to convert from timezone-naive dtype"
with pytest.raises(TypeError, match=msg):
pd.Series(data, dtype=maindtype)
return
if maindtype is not None:
series = pd.Series(data, dtype=maindtype)
else:
series = pd.Series(data)
result = series.convert_dtypes(*params)
param_names = [
"infer_objects",
"convert_string",
"convert_integer",
"convert_boolean",
"convert_floating",
]
params_dict = dict(zip(param_names, params))
expected_dtype = expected_default
for spec, dtype in expected_other.items():
if all(params_dict[key] is val for key, val in zip(spec[::2], spec[1::2])):
expected_dtype = dtype
expected = pd.Series(data, dtype=expected_dtype)
tm.assert_series_equal(result, expected)
# Test that it is a copy
copy = series.copy(deep=True)
if result.notna().sum() > 0 and result.dtype in ["interval[int64, right]"]:
with tm.assert_produces_warning(FutureWarning, match="incompatible dtype"):
result[result.notna()] = np.nan
else:
result[result.notna()] = np.nan
# Make sure original not changed
tm.assert_series_equal(series, copy)
def test_convert_string_dtype(self, nullable_string_dtype):
# https://github.com/pandas-dev/pandas/issues/31731 -> converting columns
# that are already string dtype
df = pd.DataFrame(
{"A": ["a", "b", pd.NA], "B": ["ä", "ö", "ü"]}, dtype=nullable_string_dtype
)
result = df.convert_dtypes()
tm.assert_frame_equal(df, result)
def test_convert_bool_dtype(self):
# GH32287
df = pd.DataFrame({"A": pd.array([True])})
tm.assert_frame_equal(df, df.convert_dtypes())
def test_convert_byte_string_dtype(self):
# GH-43183
byte_str = b"binary-string"
df = pd.DataFrame(data={"A": byte_str}, index=[0])
result = df.convert_dtypes()
expected = df
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"infer_objects, dtype", [(True, "Int64"), (False, "object")]
)
def test_convert_dtype_object_with_na(self, infer_objects, dtype):
# GH#48791
ser = pd.Series([1, pd.NA])
result = ser.convert_dtypes(infer_objects=infer_objects)
expected = pd.Series([1, pd.NA], dtype=dtype)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"infer_objects, dtype", [(True, "Float64"), (False, "object")]
)
def test_convert_dtype_object_with_na_float(self, infer_objects, dtype):
# GH#48791
ser = pd.Series([1.5, pd.NA])
result = ser.convert_dtypes(infer_objects=infer_objects)
expected = pd.Series([1.5, pd.NA], dtype=dtype)
tm.assert_series_equal(result, expected)
def test_convert_dtypes_pyarrow_to_np_nullable(self):
# GH 53648
pytest.importorskip("pyarrow")
ser = pd.Series(range(2), dtype="int32[pyarrow]")
result = ser.convert_dtypes(dtype_backend="numpy_nullable")
expected = pd.Series(range(2), dtype="Int32")
tm.assert_series_equal(result, expected)
|
60ac98acfe4d685cbe057536bbdd69ccb58d2335
|
305d26e6cb4929e36fd333362be006edf46401cd
|
/cle/backends/elf/relocation/amd64.py
|
44361f7ea1a6193d98d30b40303309e802744845
|
[
"BSD-2-Clause"
] |
permissive
|
angr/cle
|
192ecf000618661e151e15440fc9c2e5f26842b9
|
23edc1e95b0b1bace308ca80b5a8189bf8cbf8f3
|
refs/heads/master
| 2023-08-25T10:28:15.602744
| 2023-08-22T17:14:21
| 2023-08-22T17:14:21
| 36,358,216
| 389
| 158
|
BSD-2-Clause
| 2023-09-11T22:08:49
| 2015-05-27T09:53:48
|
Python
|
UTF-8
|
Python
| false
| false
| 1,638
|
py
|
amd64.py
|
from .generic import (
GenericAbsoluteAddendReloc,
GenericCopyReloc,
GenericIRelativeReloc,
GenericJumpslotReloc,
GenericPCRelativeAddendReloc,
GenericRelativeReloc,
GenericTLSDoffsetReloc,
GenericTLSModIdReloc,
GenericTLSOffsetReloc,
RelocGOTMixin,
RelocTruncate32Mixin,
)
arch = "AMD64"
class R_X86_64_64(GenericAbsoluteAddendReloc):
pass
class R_X86_64_COPY(GenericCopyReloc):
pass
class R_X86_64_RELATIVE(GenericRelativeReloc):
pass
class R_X86_64_IRELATIVE(GenericIRelativeReloc):
pass
class R_X86_64_GLOB_DAT(GenericJumpslotReloc):
pass
class R_X86_64_JUMP_SLOT(GenericJumpslotReloc):
pass
class R_X86_64_DTPMOD64(GenericTLSModIdReloc):
pass
class R_X86_64_DTPOFF64(GenericTLSDoffsetReloc):
pass
class R_X86_64_TPOFF64(GenericTLSOffsetReloc):
pass
class R_X86_64_PC32(RelocTruncate32Mixin, GenericPCRelativeAddendReloc):
check_sign_extend = True
class R_X86_64_32(RelocTruncate32Mixin, GenericAbsoluteAddendReloc):
check_zero_extend = True
class R_X86_64_32S(RelocTruncate32Mixin, GenericAbsoluteAddendReloc):
check_sign_extend = True
class R_X86_64_PLT32(RelocTruncate32Mixin, GenericPCRelativeAddendReloc):
check_sign_extend = True
class R_X86_64_GOTPCREL(RelocGOTMixin, RelocTruncate32Mixin, GenericPCRelativeAddendReloc):
check_sign_extend = True
class R_X86_64_GOTPCRELX(RelocGOTMixin, RelocTruncate32Mixin, GenericPCRelativeAddendReloc):
check_sign_extend = True
class R_X86_64_REX_GOTPCRELX(RelocGOTMixin, RelocTruncate32Mixin, GenericPCRelativeAddendReloc):
check_sign_extend = True
|
956eebc9cfba61211976c43319ce86e8cc46e45a
|
35cd401ef876bec39366e5724bae71581c0cb658
|
/Python/control-examples/klampt_catkin/src/klampt/scripts/serialcontroller.py
|
25fc29ab8d7c69f7b87a03f28254f05a3a98cf44
|
[] |
permissive
|
krishauser/Klampt
|
bd450b8c67189b31abe4eb056707d50da3aa651b
|
dbaf38ca290a36fba9a8f4f9b6a49fda689f6585
|
refs/heads/master
| 2023-08-29T21:54:40.758699
| 2023-07-21T12:34:38
| 2023-07-21T12:34:38
| 13,944,923
| 327
| 98
|
BSD-3-Clause
| 2023-08-26T04:20:26
| 2013-10-29T02:46:08
|
C++
|
UTF-8
|
Python
| false
| false
| 8,460
|
py
|
serialcontroller.py
|
"""An adaptor between python controllers and the Klamp't serial controller
interface (SerialController).
"""
import asyncore,socket
import errno
import json
import time
import controller
headerlen = 4
def packStrlen(s):
l = len(s)
assert(l <= 0xffffffff)
bytes = [None]*4
bytes[0] = chr(l&0xff)
bytes[1] = chr((l>>8)&0xff)
bytes[2] = chr((l>>16)&0xff)
bytes[3] = chr((l>>24)&0xff)
return ''.join(bytes)
def unpackStrlen(s):
assert len(s)==headerlen
return (ord(s[3])<<24)|(ord(s[2])<<16)|(ord(s[1])<<8)|ord(s[0])
def writeSocket(socket,msg):
totalsent = 0
while totalsent < len(msg):
sent = socket.send(msg[totalsent:])
if sent == 0:
raise IOError("socket connection broken")
totalsent = totalsent + sent
return
def readSocket(socket,length):
chunk = socket.recv(length)
msg = chunk
while len(msg) < length:
chunk = socket.recv(length-len(msg))
if chunk == '':
raise IOError("socket connection broken")
msg = msg + chunk
return msg
class JsonClient(asyncore.dispatcher):
"""A client that transmits JSON messages in the Klamp't simple serial
interface. Sends/receives variable-length messages such that the first
4 bytes are the length of the message (in binary) and the remainder is
the payload.
Subclasses should override onMessage, which accepts with arbitrary
Python objects that can be serialized by the json module.
Subclasses should use sendMessage to send a message.
To run, call asyncore.loop().
"""
def __init__(self, addr):
if isinstance(addr,socket.socket):
asyncore.dispatcher.__init__(self,s)
else:
asyncore.dispatcher.__init__(self)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.connect( addr )
self.buffer = ""
def handle_connect(self):
"""Called on socket connect. May be overridden."""
pass
def handle_close(self):
"""Called on socket close. May be overridden."""
self.close()
def handle_read(self):
"""Called on read. Do not override; override onMessage instead."""
lenstr = self.read(headerlen)
msglen = unpackStrlen(lenstr)
msg = self.read(msglen)
try:
output = json.loads(msg)
except ValueError:
print "Error parsing JSON object from message '"+msg+"'"
return
self.onMessage(output)
def writable(self):
"""Called to determine whether there's any data left to be sent.
Do not override."""
return (len(self.buffer) > 0)
def handle_write(self):
"""Called to send data when available. Do not override."""
sent = self.send(self.buffer)
self.buffer = self.buffer[sent:]
def onMessage(self,msg):
"""Override this to handle an incoming message"""
pass
def sendMessage(self,msg):
"""Call this to send an outgoing message"""
smsg = json.dumps(msg)
#print "JSON message:",smsg
self.buffer = self.buffer + packStrlen(smsg) + smsg
#print "buffer now:",self.buffer
def read(self,length):
chunk = self.recv(length)
msg = chunk
while len(msg) < length:
chunk = self.recv(length-len(msg))
if chunk == '':
raise IOError("socket connection broken")
msg = msg + chunk
return msg
def recv(self, buffer_size):
"""Fix for windows sockets throwing EAGAIN crashing asyncore"""
while True:
try:
data = self.socket.recv(buffer_size)
if not data:
# a closed connection is indicated by signaling
# a read condition, and having recv() return 0.
print "Socket closed..."
self.handle_close()
return ''
else:
return data
except socket.error, why:
# winsock sometimes throws ENOTCONN
if why.args[0] in (errno.EAGAIN, errno.EWOULDBLOCK):
#print "EGAIN or EWOULDBLOCK returned... spin waiting"
time.sleep(0.001)
continue
elif why.args[0] == errno.ENOTCONN:
self.handle_close()
return ''
else:
raise
class ControllerClient(JsonClient):
"""A client that relays Python BaseController object to a
SerialController.
The interface simply translates messages back and forth using the standard
BaseController messages.
To run, pass it an address and a control.BaseController interface.
Then, call asyncore.loop().
"""
def __init__(self,addr,controller):
"""Sends the output of a controller to a SerialController.
controller is assumed to follow the control.BaseController interface.
"""
self.connecting = True
JsonClient.__init__(self,addr)
self.controller = controller
def handle_connect(self):
print "Handle connect"
JsonClient.handle_connect(self)
def handle_expt(self):
self.close()
def handle_error(self):
JsonClient.handle_error(self)
if self.connecting:
print
print "(Did you forget to start up a Klamp't controller server?)"
else:
print
print "(Did the Klamp't controller server shut down?)"
def handle_connect(self):
self.connecting = False;
self.controller.signal('enter')
return
def onMessage(self,msg):
#print "receiving message",msg
try:
res = self.controller.output_and_advance(**msg)
if res==None: return
except Exception as e:
print "Exception",e,"on read"
return
try:
#print "sending message",res
self.sendMessage(res)
except IOError as e:
print "Exception",e,"on send"
return
class SerialController(controller.BaseController):
"""A controller that maintains a server to write/read messages.
It simply translates messages back and forth to a client via the serial
interface.
"""
def __init__(self,addr=('localhost',3456)):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.bind( addr )
self.sock.listen(1)
print "SerialController: Listening on port",addr[1]
self.clientsock = None
def accept(self):
"""Get a new connection, if there isn't one"""
if self.clientsock == None:
pair = self.sock.accept()
if pair != None:
sock, addr = pair
print 'SerialController: Incoming connection from %s' % repr(addr)
self.clientsock = sock
return
def output(self,**inputs):
self.accept()
if self.clientsock == None:
return None
#Convert inputs to JSON message
smsg = json.dumps(inputs)
msg = packStrlen(smsg) + smsg
try:
writeSocket(self.clientsock,msg)
#Read response from serial client
lenstr = readSocket(self.clientsock,headerlen)
msglen = unpackStrlen(lenstr)
msg = readSocket(self.clientsock,msglen)
except IOError:
print "SerialController: Error writing or reading socket..."
self.clientsock.close()
self.clientsock = None
return None
try:
output = json.loads(msg)
return output
except ValueError:
#didn't parse properly
print "Couldn't read Python object from JSON message '"+msg+"'"
return None
if __name__ == "__main__":
import sys
import trajectory_controller
host = 'localhost'
port = 3456
if len(sys.argv)==1:
print "Usage: %s [linear_path_file]\n"%(sys.argv[0],)
print "By default connects to localhost:3456"
exit()
#by default, runs a trajectory controller
pathfn = sys.argv[1]
pycontroller = trajectory_controller.make(None,pathfn)
s = ControllerClient((host,port),pycontroller)
asyncore.loop()
def make(robot):
return SerialController()
|
d85e50d4798a01171823405aff0a4772057c08e9
|
1969460112e17d3cc42c8172ce2e4efe1509c774
|
/replication_handler/util/misc.py
|
48972f462cffbac877ecb77a7a6ddbbed00143f1
|
[
"Apache-2.0"
] |
permissive
|
Yelp/mysql_streamer
|
9dcc3655d1ba70b424340adaa504f648a4e413dc
|
b88183ac00b88f5dff9c01ad87a46da9e3615d9e
|
refs/heads/master
| 2023-09-02T10:54:21.589275
| 2022-08-17T18:35:50
| 2022-08-17T18:35:50
| 66,398,331
| 433
| 92
|
Apache-2.0
| 2022-08-17T18:30:59
| 2016-08-23T19:52:38
|
Python
|
UTF-8
|
Python
| false
| false
| 5,042
|
py
|
misc.py
|
# -*- coding: utf-8 -*-
# Copyright 2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from __future__ import unicode_literals
import logging
import os
import simplejson
from data_pipeline.schematizer_clientlib.schematizer import get_schematizer
from replication_handler.config import env_config
from replication_handler.models.data_event_checkpoint import DataEventCheckpoint
from replication_handler.models.global_event_state import EventType
from replication_handler.models.global_event_state import GlobalEventState
REPLICATION_HANDLER_PRODUCER_NAME = env_config.producer_name
REPLICATION_HANDLER_TEAM_NAME = env_config.team_name
HEARTBEAT_DB = "yelp_heartbeat"
LOG_TRANSACTION_ID_SCHEMA_FILEPATH = os.path.join(
os.path.dirname(__file__),
'../../schema/avro_schema/log_transaction_id_v1.avsc')
GLOBAL_TRANSACTION_ID_SCHEMA_FILEPATH = os.path.join(
os.path.dirname(__file__),
'../../schema/avro_schema/global_transaction_id_v1.avsc')
log = logging.getLogger('replication_handler.util.misc.data_event')
class ReplicationHandlerEvent(object):
""" Class to associate an event and its position."""
def __init__(self, event, position):
self.event = event
self.position = position
class DataEvent(object):
""" Class to replace pymysqlreplication RowsEvent, since we want one
row per event.
Args:
schema(string): schema/database name of event.
table(string): table name of event.
log_pos(int): binary log position of event.
log_file(string): binary log file name of event.
row(dict): a dictionary containing fields and values of the changed row.
timestamp(int): timestamp of event, in epoch time format.
message_type(data_pipeline.message_type): the type of event, can be CreateMessage,
UpdateMessage, DeleteMessage or RefreshMessage.
"""
def __init__(
self,
schema,
table,
log_pos,
log_file,
row,
timestamp,
message_type
):
self.schema = schema
self.table = table
self.log_pos = log_pos
self.log_file = log_file
self.row = row
self.timestamp = timestamp
self.message_type = message_type
def save_position(position_data, state_session, is_clean_shutdown=False):
if not position_data or not position_data.last_published_message_position_info:
log.info(
"Unable to save position with invalid position_data: ".format(
position_data
)
)
return
log.info("Saving position with position data {}.".format(position_data))
position_info = position_data.last_published_message_position_info
topic_to_kafka_offset_map = position_data.topic_to_kafka_offset_map
with state_session.connect_begin(ro=False) as session:
GlobalEventState.upsert(
session=session,
position=position_info["position"],
event_type=EventType.DATA_EVENT,
cluster_name=position_info["cluster_name"],
database_name=position_info["database_name"],
table_name=position_info["table_name"],
is_clean_shutdown=is_clean_shutdown,
)
DataEventCheckpoint.upsert_data_event_checkpoint(
session=session,
topic_to_kafka_offset_map=topic_to_kafka_offset_map,
cluster_name=position_info["cluster_name"]
)
def get_transaction_id_schema_id(gtid_enabled):
if gtid_enabled:
file_name = GLOBAL_TRANSACTION_ID_SCHEMA_FILEPATH
source = 'global_transaction_id'
else:
file_name = LOG_TRANSACTION_ID_SCHEMA_FILEPATH
source = 'log_transaction_id'
with open(file_name, 'r') as schema_file:
avro_schema = simplejson.loads(schema_file.read())
schema = get_schematizer().register_schema_from_schema_json(
namespace='yelp.replication_handler',
source=source,
schema_json=avro_schema,
source_owner_email='bam+replication_handler@yelp.com',
contains_pii=False,
)
return schema.schema_id
def transform_timedelta_to_number_of_microseconds(value):
return (value.microseconds + (value.seconds + value.days * 86400) * 1000000)
def delete_file_if_exists(filename):
try:
os.remove(filename)
except OSError:
# Its fine to pass over this error cause this just means that the file
# didn't exist in the first place.
pass
|
952d41aef74919e7e8a847d849cb50eb3f25c888
|
10ddfb2d43a8ec5d47ce35dc0b8acf4fd58dea94
|
/Python/complex-number-multiplication.py
|
8e8221bfe95088d491a97e44b031ff6207aa1696
|
[
"MIT"
] |
permissive
|
kamyu104/LeetCode-Solutions
|
f54822059405ef4df737d2e9898b024f051fd525
|
4dc4e6642dc92f1983c13564cc0fd99917cab358
|
refs/heads/master
| 2023-09-02T13:48:26.830566
| 2023-08-28T10:11:12
| 2023-08-28T10:11:12
| 152,631,182
| 4,549
| 1,651
|
MIT
| 2023-05-31T06:10:33
| 2018-10-11T17:38:35
|
C++
|
UTF-8
|
Python
| false
| false
| 338
|
py
|
complex-number-multiplication.py
|
# Time: O(1)
# Space: O(1)
class Solution(object):
def complexNumberMultiply(self, a, b):
"""
:type a: str
:type b: str
:rtype: str
"""
ra, ia = map(int, a[:-1].split('+'))
rb, ib = map(int, b[:-1].split('+'))
return '%d+%di' % (ra * rb - ia * ib, ra * ib + ia * rb)
|
640f198deb63753b34cb6252b706ab4b8c079ed9
|
f80ef3a3cf859b13e8af8433af549b6b1043bf6e
|
/pyobjc-framework-VideoToolbox/PyObjCTest/test_vtsession.py
|
9eb494bb840111f169718d13c07566199c927820
|
[
"MIT"
] |
permissive
|
ronaldoussoren/pyobjc
|
29dc9ca0af838a56105a9ddd62fb38ec415f0b86
|
77b98382e52818690449111cd2e23cd469b53cf5
|
refs/heads/master
| 2023-09-01T05:15:21.814504
| 2023-06-13T20:00:17
| 2023-06-13T20:00:17
| 243,933,900
| 439
| 49
| null | 2023-06-25T02:49:07
| 2020-02-29T08:43:12
|
Python
|
UTF-8
|
Python
| false
| false
| 1,999
|
py
|
test_vtsession.py
|
import VideoToolbox
import objc
from PyObjCTools.TestSupport import TestCase, min_os_level, expectedFailure
class TestVTSession(TestCase):
@expectedFailure
@min_os_level("10.10")
def test_types(self):
self.assertIsCFType(VideoToolbox.VTSessionRef)
def test_constants(self):
self.assertIsInstance(VideoToolbox.kVTPropertyTypeKey, str)
self.assertIsInstance(VideoToolbox.kVTPropertyType_Enumeration, str)
self.assertIsInstance(VideoToolbox.kVTPropertyType_Boolean, str)
self.assertIsInstance(VideoToolbox.kVTPropertyType_Number, str)
self.assertIsInstance(VideoToolbox.kVTPropertyReadWriteStatusKey, str)
self.assertIsInstance(VideoToolbox.kVTPropertyReadWriteStatus_ReadOnly, str)
self.assertIsInstance(VideoToolbox.kVTPropertyReadWriteStatus_ReadWrite, str)
self.assertIsInstance(VideoToolbox.kVTPropertyShouldBeSerializedKey, str)
self.assertIsInstance(VideoToolbox.kVTPropertySupportedValueMinimumKey, str)
self.assertIsInstance(VideoToolbox.kVTPropertySupportedValueMaximumKey, str)
self.assertIsInstance(VideoToolbox.kVTPropertySupportedValueListKey, str)
self.assertIsInstance(VideoToolbox.kVTPropertyDocumentationKey, str)
def test_functions(self):
self.assertArgIsOut(VideoToolbox.VTSessionCopySupportedPropertyDictionary, 1)
self.assertArgIsCFRetained(
VideoToolbox.VTSessionCopySupportedPropertyDictionary, 1
)
self.assertArgHasType(VideoToolbox.VTSessionSetProperty, 2, objc._C_ID)
self.assertArgIsOut(VideoToolbox.VTSessionCopyProperty, 3)
self.assertArgHasType(VideoToolbox.VTSessionCopyProperty, 3, b"o^@")
self.assertArgIsCFRetained(VideoToolbox.VTSessionCopyProperty, 3)
VideoToolbox.VTSessionSetProperties
self.assertArgIsOut(VideoToolbox.VTSessionCopySerializableProperties, 2)
self.assertArgIsCFRetained(VideoToolbox.VTSessionCopySerializableProperties, 2)
|
c5f04a081831d3c7c7f5fac58e44ed04106f04e7
|
767c07db1fb131047af3d9b0a065b8fdc8aac9ab
|
/45-stats1/40G5_quantiles.py
|
f6a1572bc0b43e70956d2674a17da7ff18c85941
|
[] |
no_license
|
DUanalytics/pyAnalytics
|
e52c5469da30a5f436ec0f3120d9f15fb82fd9b3
|
107a08bebe46ea51afccfeae4a666213bb405d41
|
refs/heads/master
| 2023-07-08T04:32:54.758902
| 2023-07-03T14:37:04
| 2023-07-03T14:37:04
| 202,094,535
| 394
| 31
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,679
|
py
|
40G5_quantiles.py
|
#Topic: Quantiles in
#-----------------------------
#libraries
from pydataset import data
mtcars = data('mtcars')
mtcars.head()
df=mtcars
df.quantile()
#0,100 ; #40,60
df.mpg.mean()
df.mpg.median()
df.mpg.sort_values()
df.quantile(q=.5) #default 50% Q2
df.quantile([0,.25, .50, .75, 1.0]) #quartiles:Q1, Q2, Q3, Q4
df.quantile(np.arange(0,1,.1)) #decile
np.arange(0,100,5)
df[['mpg','wt']].quantile(np.arange(0,1,.01)) #percentile at interval of 1 %
df[['cyl','gear','am']]= df[['cyl','gear','am']].astype('category')
df.quantile([.1,.5, .9], numeric_only=True, axis=0) #selected percentile
df.mpg.quantile([0,.25, .50,.6, .75, 1.0]) #numpy single column
df["mpg"].plot.box()
#clockwise
df["wt"].plot.box(vert=False)
df["wt"].plot.kde()
#Box Plots columns
fig = plt.figure(figsize=(16, 8))
plt.suptitle("Box Plots")
plt.subplot(2, 2, 1) # matrix of 2 x 2 plots : first plot
df["mpg"].plot.box()
plt.title('Mileage')
plt.subplot(2, 2, 2) # matrix of 2 x 2 plots : 2nd plot
df.wt.plot.box()
plt.title('Weight')
plt.subplot(2, 2, 3) # matrix of 2 x 2 plots : 3nd plot
df.hp.plot.box(vert=False) #vert=False stands # for "no vertical"
plt.title('Horse Power')
plt.subplot(2, 2, 4) # matrix of 2 x 2 plots : 4th plot
df["disp"].plot.box()
plt.title('Displacement')
plt.xticks(rotation=25)
plt.show();
#A boxplot is a standardized way of displaying the distribution of data based on a five number summary (“minimum”, first quartile (Q1), median, third quartile (Q3), and “maximum”). It can tell you about your outliers and what their values are. It can also tell you if your data is symmetrical, how tightly your data is grouped, and if and how your data is skewed.
|
b8b58e1e2194d4174e42f498520fc4ceb849b359
|
997645d6bb9c404f2f195328f29afa0eaa3c55b4
|
/piccolo/apps/migrations/commands/new.py
|
09f6ac9e87a6a2aade7d8acf5db7a2b22548a930
|
[
"MIT"
] |
permissive
|
piccolo-orm/piccolo
|
e43ea13c05c53ac00d9d20474c53ad2c49a40e80
|
83ea66323ef5a8e4010ea3ee19f34163bc881ace
|
refs/heads/master
| 2023-08-08T19:31:37.783445
| 2023-07-28T06:19:35
| 2023-07-28T06:19:35
| 155,008,334
| 1,139
| 90
|
MIT
| 2023-09-08T16:54:45
| 2018-10-27T20:53:26
|
Python
|
UTF-8
|
Python
| false
| false
| 7,849
|
py
|
new.py
|
from __future__ import annotations
import datetime
import os
import string
import sys
import typing as t
from dataclasses import dataclass
from itertools import chain
from types import ModuleType
import black
import jinja2
from piccolo import __VERSION__
from piccolo.apps.migrations.auto import (
AlterStatements,
DiffableTable,
SchemaDiffer,
SchemaSnapshot,
)
from piccolo.conf.apps import AppConfig, Finder
from piccolo.engine import SQLiteEngine
from piccolo.utils.printing import print_heading
from .base import BaseMigrationManager
TEMPLATE_DIRECTORY = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "templates"
)
JINJA_ENV = jinja2.Environment(
loader=jinja2.FileSystemLoader(searchpath=TEMPLATE_DIRECTORY),
)
MIGRATION_MODULES: t.Dict[str, ModuleType] = {}
VALID_PYTHON_MODULE_CHARACTERS = string.ascii_lowercase + string.digits + "_"
def render_template(**kwargs):
template = JINJA_ENV.get_template("migration.py.jinja")
return template.render(version=__VERSION__, **kwargs)
def _create_migrations_folder(migrations_path: str) -> bool:
"""
Creates the folder that migrations live in. Returns True/False depending
on whether it was created or not.
"""
if os.path.exists(migrations_path):
return False
os.mkdir(migrations_path)
with open(os.path.join(migrations_path, "__init__.py"), "w"):
pass
return True
@dataclass
class NewMigrationMeta:
migration_id: str
migration_filename: str
migration_path: str
def now():
"""
In a separate function so it's easier to patch in tests.
"""
return datetime.datetime.now()
def _generate_migration_meta(app_config: AppConfig) -> NewMigrationMeta:
"""
Generates the migration ID and filename.
"""
# The microseconds originally weren't part of the ID, but there was a
# chance that the IDs would clash if the migrations are generated
# programatically in quick succession (e.g. in a unit test), so they had
# to be added. The trade off is a longer ID.
_id = now().strftime("%Y-%m-%dT%H:%M:%S:%f")
# Originally we just used the _id as the filename, but colons aren't
# supported in Windows, so we need to sanitize it. We don't want to
# change the _id format though, as it would break existing migrations.
# The filename doesn't have any special significance - only the id matters.
cleaned_id = _id.replace(":", "_").replace("-", "_").lower()
# Just in case the app name contains characters which aren't valid for
# a Python module.
cleaned_app_name = "".join(
[
i
for i in app_config.app_name.lower().replace("-", "_")
if i in VALID_PYTHON_MODULE_CHARACTERS
]
)
filename = f"{cleaned_app_name}_{cleaned_id}"
path = os.path.join(app_config.migrations_folder_path, f"{filename}.py")
return NewMigrationMeta(
migration_id=_id, migration_filename=filename, migration_path=path
)
class NoChanges(Exception):
pass
async def _create_new_migration(
app_config: AppConfig,
auto: bool = False,
description: str = "",
auto_input: t.Optional[str] = None,
) -> NewMigrationMeta:
"""
Creates a new migration file on disk.
"""
meta = _generate_migration_meta(app_config=app_config)
if auto:
alter_statements = await AutoMigrationManager(
auto_input=auto_input
).get_alter_statements(app_config=app_config)
_alter_statements = list(
chain(*[i.statements for i in alter_statements])
)
extra_imports = sorted(
set(chain(*[i.extra_imports for i in alter_statements])),
key=lambda x: x.__repr__(),
)
extra_definitions = sorted(
set(chain(*[i.extra_definitions for i in alter_statements])),
)
if sum(len(i.statements) for i in alter_statements) == 0:
raise NoChanges()
file_contents = render_template(
migration_id=meta.migration_id,
auto=True,
alter_statements=_alter_statements,
extra_imports=extra_imports,
extra_definitions=extra_definitions,
app_name=app_config.app_name,
description=description,
)
else:
file_contents = render_template(
migration_id=meta.migration_id, auto=False, description=description
)
# Beautify the file contents a bit.
file_contents = black.format_str(
file_contents, mode=black.FileMode(line_length=82)
)
with open(meta.migration_path, "w") as f:
f.write(file_contents)
return meta
###############################################################################
class AutoMigrationManager(BaseMigrationManager):
def __init__(self, auto_input: t.Optional[str] = None, *args, **kwargs):
self.auto_input = auto_input
super().__init__(*args, **kwargs)
async def get_alter_statements(
self, app_config: AppConfig
) -> t.List[AlterStatements]:
"""
Works out which alter statements are required.
"""
migration_managers = await self.get_migration_managers(
app_config=app_config
)
schema_snapshot = SchemaSnapshot(migration_managers)
snapshot = schema_snapshot.get_snapshot()
# Now get the current schema:
current_diffable_tables = [
DiffableTable(
class_name=i.__name__,
tablename=i._meta.tablename,
columns=i._meta.non_default_columns,
schema=i._meta.schema,
)
for i in app_config.table_classes
]
# Compare the current schema with the snapshot
differ = SchemaDiffer(
schema=current_diffable_tables,
schema_snapshot=snapshot,
auto_input=self.auto_input,
)
return differ.get_alter_statements()
###############################################################################
async def new(
app_name: str,
auto: bool = False,
desc: str = "",
auto_input: t.Optional[str] = None,
):
"""
Creates a new migration file in the migrations folder.
:param app_name:
The app to create a migration for. Specify a value of 'all' to create
migrations for all apps (use in conjunction with --auto).
:param auto:
Auto create the migration contents.
:param desc:
A description of what the migration does, for example --desc='adding
name column'.
:param auto_input:
If provided, all prompts for user input will automatically have this
entered. For example, --auto_input='y'.
"""
engine = Finder().get_engine()
if auto and isinstance(engine, SQLiteEngine):
sys.exit("Auto migrations aren't currently supported by SQLite.")
if app_name == "all" and not auto:
raise ValueError(
"Only use `--app_name=all` in conjunction with `--auto`."
)
app_names = (
sorted(
BaseMigrationManager().get_app_names(
sort_by_migration_dependencies=False
)
)
if app_name == "all"
else [app_name]
)
for app_name in app_names:
print_heading(app_name)
print("🚀 Creating new migration ...")
app_config = Finder().get_app_config(app_name=app_name)
_create_migrations_folder(app_config.migrations_folder_path)
try:
await _create_new_migration(
app_config=app_config,
auto=auto,
description=desc,
auto_input=auto_input,
)
except NoChanges:
print("🏁 No changes detected.")
print("\n✅ Finished\n")
|
75e663ee0a2a6fcf3724dfdd92fc2ab24a30565c
|
8f76cee606ca901d6b9c1ecdcadaa172aa861c00
|
/languages/python/software_engineering_xmlrpcclient.py
|
96eb298a4d402c1af0c36d4125ee5e523475d5ce
|
[
"BSD-3-Clause"
] |
permissive
|
uthcode/learntosolveit
|
b0cdc386ab17dadcefef9867aacc5ef0326b7215
|
88b1cbfea313fdca50f48573c396bed9ba38c354
|
refs/heads/master
| 2023-05-24T14:00:34.115585
| 2023-05-15T00:18:02
| 2023-05-15T00:18:02
| 14,986,557
| 171
| 1,714
|
NOASSERTION
| 2022-09-13T13:37:07
| 2013-12-06T15:44:38
|
Java
|
UTF-8
|
Python
| false
| false
| 675
|
py
|
software_engineering_xmlrpcclient.py
|
import xmlrpc.client
proxy = xmlrpc.client.ServerProxy('http://localhost:9000')
# Call expliciting registered function
print('dir():',proxy.dir('/'))
try:
print('list_contents():', proxy.list_contents('/'))
except:
print('You should use a registered name.')
# Call the standard functions registered with server
print('BEFORE:', 'EXAMPLE' in proxy.dir.list('/tmp'))
print('CREATE:', proxy.dir.create('/tmp/EXAMPLE'))
print('SHOULD EXIST:', 'EXAMPLE' in proxy.dir.list('/tmp'))
print('REMOVE:', proxy.dir.remove('/tmp/EXAMPLE'))
print('AFTER', 'EXAMPLE' in proxy.dir.list('/tmp'))
# Call the function (handler) which has space
print(getattr(proxy,'my func')(5,5))
|
5f4da0f9a025334746123271c3749c80642a669a
|
d88458a65a173999df390117005fa813735e5fe2
|
/astroquery/imcce/tests/test_miriade_remote.py
|
4037850cca07c749fb5cdad8e494b15cd549f71d
|
[] |
permissive
|
astropy/astroquery
|
9a2793826229ba4b41ec3607ca77832036a534e9
|
51316d7417d7daf01a8b29d1df99037b9227c2bc
|
refs/heads/main
| 2023-09-01T20:52:41.625935
| 2023-08-29T23:22:40
| 2023-08-29T23:22:40
| 4,787,269
| 636
| 365
|
BSD-3-Clause
| 2023-09-14T21:56:33
| 2012-06-25T20:52:07
|
Python
|
UTF-8
|
Python
| false
| false
| 647
|
py
|
test_miriade_remote.py
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy.testing as npt
import pytest
from .. import core
@pytest.mark.remote_data
class TestMiriadeClass:
def test_ephemerides(self):
# check values of Ceres for a given epoch
# orbital uncertainty of Ceres is basically zero
res = core.Miriade.get_ephemerides('Ceres', location='500',
epoch=2451544.5)
assert res['target'] == "Ceres"
npt.assert_allclose(
[2451544.5, 188.70280, 9.09829],
[res['epoch'][0], res['RA'][0], res['DEC'][0]],
rtol=1e-5)
|
c5d5070648aa3cbfdbe5d409a9110b8a9699402d
|
a3c36ea1ad9a13dcaf152cdfed9aebf8ab84f1e1
|
/RePoE/parser/modules/stat_translations.py
|
ae42777311eda4a1e08aa895d029589be6939db0
|
[
"MIT"
] |
permissive
|
brather1ng/RePoE
|
e976db980de93db34efe8dfdc05673bb4cea6aff
|
8023a1d696dbddc836c05ac3fcedd072da1767d2
|
refs/heads/master
| 2022-10-08T03:37:41.167923
| 2022-09-06T19:43:56
| 2022-09-06T19:43:56
| 76,997,578
| 278
| 42
|
NOASSERTION
| 2022-08-23T08:22:40
| 2016-12-20T22:51:10
|
Python
|
UTF-8
|
Python
| false
| false
| 3,254
|
py
|
stat_translations.py
|
from PyPoE.poe.file.translations import get_custom_translation_file
from RePoE.parser.util import write_json, call_with_default_args, get_stat_translation_file_name
from RePoE.parser import Parser_Module
def _convert_tags(n_ids, tags, tags_types):
f = ["ignore" for _ in range(n_ids)]
for tag, tag_type in zip(tags, tags_types):
if tag_type == "+d":
f[tag] = "+#"
elif tag_type == "d":
f[tag] = "#"
elif tag_type == "":
f[tag] = "#"
else:
print("Unknown tag type:", tag_type)
return f
def _convert_range(translation_range):
rs = []
for r in translation_range:
r_dict = {}
if r.min is not None:
r_dict["min"] = r.min
if r.max is not None:
r_dict["max"] = r.max
if r.negated:
r_dict["negated"] = True
rs.append(r_dict)
return rs
def _convert_handlers(n_ids, index_handlers):
hs = [[] for _ in range(n_ids)]
for handler_name, ids in index_handlers.items():
for i in ids:
# Indices in the handler dict are 1-based
hs[i - 1].append(handler_name)
return hs
def _convert(tr, tag_set):
ids = tr.ids
n_ids = len(ids)
english = []
for s in tr.get_language("English").strings:
tags = _convert_tags(n_ids, s.tags, s.tags_types)
tag_set.update(tags)
english.append(
{
"condition": _convert_range(s.range),
"string": s.as_format_string,
"format": tags,
"index_handlers": _convert_handlers(n_ids, s.quantifier.index_handlers),
}
)
return {"ids": ids, "English": english}
def _get_stat_translations(tag_set, translations, custom_translations):
previous = set()
root = []
for tr in translations:
id_str = " ".join(tr.ids)
if id_str in previous:
print("Duplicate id", tr.ids)
continue
previous.add(id_str)
root.append(_convert(tr, tag_set))
for tr in custom_translations:
id_str = " ".join(tr.ids)
if id_str in previous:
continue
previous.add(id_str)
result = _convert(tr, tag_set)
result["hidden"] = True
root.append(result)
return root
def _build_stat_translation_file_map(file_system):
node = file_system.build_directory()
for game_file in node["Metadata"]["StatDescriptions"].children.keys():
out_file = get_stat_translation_file_name(game_file)
if out_file:
yield game_file, out_file
class stat_translations(Parser_Module):
@staticmethod
def write(file_system, data_path, relational_reader, translation_file_cache, ot_file_cache):
tag_set = set()
for in_file, out_file in _build_stat_translation_file_map(file_system):
translations = translation_file_cache[in_file].translations
result = _get_stat_translations(tag_set, translations, get_custom_translation_file().translations)
write_json(result, data_path, out_file)
print("Possible format tags: {}".format(tag_set))
if __name__ == "__main__":
call_with_default_args(stat_translations.write)
|
e5aa57a5343b0198c4e1e63d15dafb59d41583a5
|
010279e2ba272d09e9d2c4e903722e5faba2cf7a
|
/catboost/spark/catboost4j-spark/core/src/test/python/config.py
|
cd3c8b838d1d6354736665b42fbcdda4ac15a0b9
|
[
"Apache-2.0"
] |
permissive
|
catboost/catboost
|
854c1a1f439a96f1ae6b48e16644be20aa04dba2
|
f5042e35b945aded77b23470ead62d7eacefde92
|
refs/heads/master
| 2023-09-01T12:14:14.174108
| 2023-09-01T10:01:01
| 2023-09-01T10:22:12
| 97,556,265
| 8,012
| 1,425
|
Apache-2.0
| 2023-09-11T03:32:32
| 2017-07-18T05:29:04
|
Python
|
UTF-8
|
Python
| false
| false
| 58
|
py
|
config.py
|
CATBOOST_TEST_DATA_DIR = '../../../../../../pytest/data'
|
d23ab2dbad2521d37cba50da6d858b105cd804dd
|
22c465bfe12ccff5e6bb8cd072436bb0697d538a
|
/ssod/utils/vars.py
|
49da8fdef9932072a28ea405a29caa743ca71401
|
[] |
no_license
|
ligang-cs/PseCo
|
4c2391961668f8882e5bc998352494f45a48234f
|
33b06eeebaf456213f7c2b0cb25122b81fa50897
|
refs/heads/master
| 2023-05-24T03:15:47.279723
| 2022-08-26T05:37:57
| 2022-08-26T05:37:57
| 514,835,957
| 119
| 20
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,076
|
py
|
vars.py
|
import re
from typing import Union
pattern = re.compile("\$\{[a-zA-Z\d_.]*\}")
def get_value(cfg: dict, chained_key: str):
keys = chained_key.split(".")
if len(keys) == 1:
return cfg[keys[0]]
else:
return get_value(cfg[keys[0]], ".".join(keys[1:]))
def resolve(cfg: Union[dict, list], base=None):
if base is None:
base = cfg
if isinstance(cfg, dict):
return {k: resolve(v, base) for k, v in cfg.items()}
elif isinstance(cfg, list):
return [resolve(v, base) for v in cfg]
elif isinstance(cfg, tuple):
return tuple([resolve(v, base) for v in cfg])
elif isinstance(cfg, str):
# process
var_names = pattern.findall(cfg)
if len(var_names) == 1 and len(cfg) == len(var_names[0]):
return get_value(base, var_names[0][2:-1])
else:
vars = [get_value(base, name[2:-1]) for name in var_names]
for name, var in zip(var_names, vars):
cfg = cfg.replace(name, str(var))
return cfg
else:
return cfg
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.