blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0c526c56c00ba208f758cf4422bfebbfe602bd20
|
0760fb4901a75766921a205b55686d6d6f049b30
|
/rllib/execution/train_ops.py
|
c71f9fc3482654cd316c34f7bbf8a52c52ae0d6a
|
[
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
ray-project/ray
|
a4bb6940b08b59a61ef0b8e755a52d8563a2f867
|
edba68c3e7cf255d1d6479329f305adb7fa4c3ed
|
refs/heads/master
| 2023-08-31T03:36:48.164405
| 2023-08-31T03:20:38
| 2023-08-31T03:20:38
| 71,932,349
| 29,482
| 5,669
|
Apache-2.0
| 2023-09-14T21:48:14
| 2016-10-25T19:38:30
|
Python
|
UTF-8
|
Python
| false
| false
| 8,161
|
py
|
train_ops.py
|
import logging
import numpy as np
import math
from typing import Dict
from ray.rllib.execution.common import (
LEARN_ON_BATCH_TIMER,
LOAD_BATCH_TIMER,
)
from ray.rllib.policy.sample_batch import DEFAULT_POLICY_ID
from ray.rllib.utils.annotations import DeveloperAPI
from ray.rllib.utils.framework import try_import_tf
from ray.rllib.utils.deprecation import deprecation_warning
from ray.rllib.utils.metrics import (
NUM_ENV_STEPS_TRAINED,
NUM_AGENT_STEPS_TRAINED,
)
from ray.rllib.utils.metrics.learner_info import LearnerInfoBuilder
from ray.rllib.utils.sgd import do_minibatch_sgd
from ray.util import log_once
tf1, tf, tfv = try_import_tf()
logger = logging.getLogger(__name__)
@DeveloperAPI
def train_one_step(algorithm, train_batch, policies_to_train=None) -> Dict:
"""Function that improves the all policies in `train_batch` on the local worker.
Examples:
>>> from ray.rllib.execution.rollout_ops import synchronous_parallel_sample
>>> algo = [...] # doctest: +SKIP
>>> train_batch = synchronous_parallel_sample(algo.workers) # doctest: +SKIP
>>> # This trains the policy on one batch.
>>> results = train_one_step(algo, train_batch)) # doctest: +SKIP
{"default_policy": ...}
Updates the NUM_ENV_STEPS_TRAINED and NUM_AGENT_STEPS_TRAINED counters as well as
the LEARN_ON_BATCH_TIMER timer of the `algorithm` object.
"""
if log_once("train_one_step_deprecation_warning"):
deprecation_warning(old="ray.rllib.execution.train_ops.train_one_step")
config = algorithm.config
workers = algorithm.workers
local_worker = workers.local_worker()
num_sgd_iter = config.get("num_sgd_iter", 1)
sgd_minibatch_size = config.get("sgd_minibatch_size", 0)
learn_timer = algorithm._timers[LEARN_ON_BATCH_TIMER]
with learn_timer:
# Subsample minibatches (size=`sgd_minibatch_size`) from the
# train batch and loop through train batch `num_sgd_iter` times.
if num_sgd_iter > 1 or sgd_minibatch_size > 0:
info = do_minibatch_sgd(
train_batch,
{
pid: local_worker.get_policy(pid)
for pid in policies_to_train
or local_worker.get_policies_to_train(train_batch)
},
local_worker,
num_sgd_iter,
sgd_minibatch_size,
[],
)
# Single update step using train batch.
else:
info = local_worker.learn_on_batch(train_batch)
learn_timer.push_units_processed(train_batch.count)
algorithm._counters[NUM_ENV_STEPS_TRAINED] += train_batch.count
algorithm._counters[NUM_AGENT_STEPS_TRAINED] += train_batch.agent_steps()
if algorithm.reward_estimators:
info[DEFAULT_POLICY_ID]["off_policy_estimation"] = {}
for name, estimator in algorithm.reward_estimators.items():
info[DEFAULT_POLICY_ID]["off_policy_estimation"][name] = estimator.train(
train_batch
)
return info
@DeveloperAPI
def multi_gpu_train_one_step(algorithm, train_batch) -> Dict:
"""Multi-GPU version of train_one_step.
Uses the policies' `load_batch_into_buffer` and `learn_on_loaded_batch` methods
to be more efficient wrt CPU/GPU data transfers. For example, when doing multiple
passes through a train batch (e.g. for PPO) using `config.num_sgd_iter`, the
actual train batch is only split once and loaded once into the GPU(s).
Examples:
>>> from ray.rllib.execution.rollout_ops import synchronous_parallel_sample
>>> algo = [...] # doctest: +SKIP
>>> train_batch = synchronous_parallel_sample(algo.workers) # doctest: +SKIP
>>> # This trains the policy on one batch.
>>> results = multi_gpu_train_one_step(algo, train_batch)) # doctest: +SKIP
{"default_policy": ...}
Updates the NUM_ENV_STEPS_TRAINED and NUM_AGENT_STEPS_TRAINED counters as well as
the LOAD_BATCH_TIMER and LEARN_ON_BATCH_TIMER timers of the Algorithm instance.
"""
if log_once("mulit_gpu_train_one_step_deprecation_warning"):
deprecation_warning(
old=("ray.rllib.execution.train_ops." "multi_gpu_train_one_step")
)
config = algorithm.config
workers = algorithm.workers
local_worker = workers.local_worker()
num_sgd_iter = config.get("num_sgd_iter", 1)
sgd_minibatch_size = config.get("sgd_minibatch_size", config["train_batch_size"])
# Determine the number of devices (GPUs or 1 CPU) we use.
num_devices = int(math.ceil(config["num_gpus"] or 1))
# Make sure total batch size is dividable by the number of devices.
# Batch size per tower.
per_device_batch_size = sgd_minibatch_size // num_devices
# Total batch size.
batch_size = per_device_batch_size * num_devices
assert batch_size % num_devices == 0
assert batch_size >= num_devices, "Batch size too small!"
# Handle everything as if multi-agent.
train_batch = train_batch.as_multi_agent()
# Load data into GPUs.
load_timer = algorithm._timers[LOAD_BATCH_TIMER]
with load_timer:
num_loaded_samples = {}
for policy_id, batch in train_batch.policy_batches.items():
# Not a policy-to-train.
if (
local_worker.is_policy_to_train is not None
and not local_worker.is_policy_to_train(policy_id, train_batch)
):
continue
# Decompress SampleBatch, in case some columns are compressed.
batch.decompress_if_needed()
# Load the entire train batch into the Policy's only buffer
# (idx=0). Policies only have >1 buffers, if we are training
# asynchronously.
num_loaded_samples[policy_id] = local_worker.policy_map[
policy_id
].load_batch_into_buffer(batch, buffer_index=0)
# Execute minibatch SGD on loaded data.
learn_timer = algorithm._timers[LEARN_ON_BATCH_TIMER]
with learn_timer:
# Use LearnerInfoBuilder as a unified way to build the final
# results dict from `learn_on_loaded_batch` call(s).
# This makes sure results dicts always have the same structure
# no matter the setup (multi-GPU, multi-agent, minibatch SGD,
# tf vs torch).
learner_info_builder = LearnerInfoBuilder(num_devices=num_devices)
for policy_id, samples_per_device in num_loaded_samples.items():
policy = local_worker.policy_map[policy_id]
num_batches = max(1, int(samples_per_device) // int(per_device_batch_size))
logger.debug("== sgd epochs for {} ==".format(policy_id))
for _ in range(num_sgd_iter):
permutation = np.random.permutation(num_batches)
for batch_index in range(num_batches):
# Learn on the pre-loaded data in the buffer.
# Note: For minibatch SGD, the data is an offset into
# the pre-loaded entire train batch.
results = policy.learn_on_loaded_batch(
permutation[batch_index] * per_device_batch_size, buffer_index=0
)
learner_info_builder.add_learn_on_batch_results(results, policy_id)
# Tower reduce and finalize results.
learner_info = learner_info_builder.finalize()
load_timer.push_units_processed(train_batch.count)
learn_timer.push_units_processed(train_batch.count)
# TODO: Move this into Algorithm's `training_step` method for
# better transparency.
algorithm._counters[NUM_ENV_STEPS_TRAINED] += train_batch.count
algorithm._counters[NUM_AGENT_STEPS_TRAINED] += train_batch.agent_steps()
if algorithm.reward_estimators:
learner_info[DEFAULT_POLICY_ID]["off_policy_estimation"] = {}
for name, estimator in algorithm.reward_estimators.items():
learner_info[DEFAULT_POLICY_ID]["off_policy_estimation"][
name
] = estimator.train(train_batch)
return learner_info
|
f8d2928388e17c5cfc291176f47964c79425e9ee
|
1063bc07ba3331808812499af980c30aa4becca4
|
/canvas_grab/planner.py
|
108fa8a1fca6c045f6d43781fee1bd840f5d61fb
|
[
"MIT"
] |
permissive
|
skyzh/canvas_grab
|
ab3a8d55bffe9009211113c28d793b51290015f1
|
2c2b2d79def5f12564aae7d2034e8b3f4066db16
|
refs/heads/master
| 2022-09-22T18:23:58.342682
| 2022-09-12T17:02:19
| 2022-09-12T17:02:19
| 244,135,442
| 214
| 46
|
MIT
| 2023-09-09T02:35:44
| 2020-03-01T11:19:02
|
Python
|
UTF-8
|
Python
| false
| false
| 1,788
|
py
|
planner.py
|
from .snapshot import SnapshotFile, SnapshotLink
class Planner(object):
"""Planner generates a transfer plan from two snapshots
"""
def __init__(self, remove_local_file):
self.remove_local_file = remove_local_file
def plan(self, snapshot_from, snapshot_to, file_filter):
"""plan a transfer
Args:
snapshot_from (dict): source snapshot
snapshot_to (dict): target snapshot
file_filter (canvas_grab.file_filter.FileFilter): file filter
Returns:
transfer plan
"""
snapshot_from_filter = file_filter.filter_files(snapshot_from)
plans = []
# Add and update files
for key, from_item in snapshot_from.items():
if key not in snapshot_from_filter:
plans.append(('ignore', key, from_item))
elif key not in snapshot_to:
plans.append(('add', key, from_item))
else:
to_item = snapshot_to[key]
if isinstance(from_item, SnapshotFile):
if to_item.size != from_item.size or to_item.modified_at != from_item.modified_at:
plans.append(('update', key, from_item))
if isinstance(from_item, SnapshotLink):
content_length = len(from_item.content().encode('utf-8'))
if to_item.size != content_length:
plans.append(('update', key, from_item))
for key, to_item in snapshot_to.items():
if key not in snapshot_from_filter:
if self.remove_local_file:
plans.append(('delete', key, to_item))
else:
plans.append(('try-remove', key, to_item))
return plans
|
cc70b20e1d73aae84666105a9ecaf310e8e9d333
|
afd2087e80478010d9df66e78280f75e1ff17d45
|
/torch/utils/_python_dispatch.py
|
32b2620ee02328d479ad54956bc8bde3aa01e93e
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"LicenseRef-scancode-secret-labs-2011",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] |
permissive
|
pytorch/pytorch
|
7521ac50c47d18b916ae47a6592c4646c2cb69b5
|
a6f7dd4707ac116c0f5fb5f44f42429f38d23ab4
|
refs/heads/main
| 2023-08-03T05:05:02.822937
| 2023-08-03T00:40:33
| 2023-08-03T04:14:52
| 65,600,975
| 77,092
| 24,610
|
NOASSERTION
| 2023-09-14T21:58:39
| 2016-08-13T05:26:41
|
Python
|
UTF-8
|
Python
| false
| false
| 5,602
|
py
|
_python_dispatch.py
|
import contextlib
from typing import Optional
import warnings
import torch
from torch._C import _len_torch_dispatch_stack, _get_dispatch_stack_at,\
_pop_torch_dispatch_stack, _push_on_torch_dispatch_stack, DispatchKey
# TODO: Limitations and things about enable_torch_dispatch_mode we should fix before exposing it:
# - We need a better user-facing api for _DisableTorchDispatch that
# is able to selectively disable __torch_dispatch__ of a particular class.
# - It doesn't work with the tensor constructors (torch.tensor, torch.Tensor)
# - Better name (see https://github.com/pytorch/pytorch/pull/63496#discussion_r694091694)
class TorchDispatchMode:
"""
A ``TorchDispatchMode`` allows you to override the meaning of all
``__torch_dispatch__`` overrideable functions within a dynamic scope,
without having to actually create a tensor subclass or manually
monkey-patch functions in the PyTorch API. Some common situations
where you should use a mode:
* You want to override the meaning of factory functions, or other
functions that do not otherwise take a tensor as an argument
(these cannot be overridden with tensor subclasses).
* You want to override the behavior of all functions without needing
to wrap your inputs in tensor subclasses; e.g., if you are just
interested in logging intermediate computations.
* You want to control the order of execution of various tensor
subclasses explicitly, rather than implicitly via the return of
``NotImplemented``.
Independent subclasses of :class:`TorchDispatchMode` are compositional:
modes can be pushed onto a stack using ``with MyMode():``.
When you call functions in the PyTorch API inside your
``__torch_dispatch__`` implementation, by default, they will forward on to
the next mode on the mode stack. If you want recursively call back into
your current ``__torch_dispatch__`` implementation, either explicitly
invoke ``self.__torch_dispatch__(...)``, or use the context manager
``__torch_dispatch__(self)`` to make PyTorch
API self-referential (beware of infinite loops, in this case!)
"""
def __init__(self, _dispatch_key=None):
if _dispatch_key is not None:
assert isinstance(_dispatch_key, torch._C.DispatchKey)
self.__dict__['_dispatch_key'] = _dispatch_key
def __torch_dispatch__(self, func, types, args=(), kwargs=None):
raise NotImplementedError()
def __enter__(self):
_push_mode(self, self.__dict__.get("_dispatch_key", None))
return self
def __exit__(self, exc_type, exc_val, exc_tb):
_pop_mode(self.__dict__.get("_dispatch_key", None))
@classmethod
def push(cls, *args, **kwargs):
warnings.warn("`Mode.push()` is no longer necessary and can be replaced with just `with Mode()`")
instance = cls(*args, **kwargs)
return instance
def _get_current_dispatch_mode():
stack_len = _len_torch_dispatch_stack()
return _get_dispatch_stack_at(stack_len - 1) if stack_len > 0 else None
def _get_current_dispatch_mode_stack():
stack_len = _len_torch_dispatch_stack()
return [_get_dispatch_stack_at(i) for i in range(stack_len)]
def _push_mode(mode, k: Optional[DispatchKey] = None):
if k is not None:
from torch._ops import push_mode_for_key, get_cached_ops
# See Note [Not Caching Per-Dispatch-Key Mode Handlers]
# Clear the cache of every op that has been used so far, for this particular key.
ks = torch._C._functionality_to_backend_keys(k)
for op in get_cached_ops():
for key in ks:
op._uncache_dispatch(key)
push_mode_for_key(k, mode)
else:
_push_on_torch_dispatch_stack(mode)
def _pop_mode(k: Optional[DispatchKey] = None):
if k is not None:
from torch._ops import pop_mode_for_key
return pop_mode_for_key(k)
else:
return _pop_torch_dispatch_stack()
@contextlib.contextmanager
def _pop_mode_temporarily(k: Optional[DispatchKey] = None):
old = _pop_mode(k)
try:
yield old
finally:
_push_mode(old, k)
@contextlib.contextmanager
def _disable_current_modes():
mode_len = _len_torch_dispatch_stack()
old_modes = [_pop_mode() for _ in range(mode_len)]
try:
yield old_modes
finally:
for mode in reversed(old_modes):
_push_mode(mode)
class BaseTorchDispatchMode(TorchDispatchMode):
def __torch_dispatch__(self, func, types, args=(), kwargs=None):
if kwargs is None:
kwargs = {}
return func(*args, **kwargs)
def is_traceable_wrapper_subclass(t):
# In order for a tensor subclass to support TorchDispatchMode-style tracing in PT2,
# It must implement two magic methods: __tensor_flatten__ and __tensor_unflatten__.
is_subclass = isinstance(t, torch.Tensor) and type(t) != torch.Tensor
return is_subclass and hasattr(t, "__tensor_flatten__") and hasattr(t, "__tensor_unflatten__")
def transform_subclass(t, callback):
assert is_traceable_wrapper_subclass(t), f"Expects traceable wrapper subclass but got {type(t)}"
# convert the tensor subclass into its constituent dense tensors,
# and apply a transformation to each dense tensor.
from torch.utils._pytree import tree_map_only
flattened_tensors, ctx = type(t).__tensor_flatten__(t)
transformed_tensors = tree_map_only(torch.Tensor, callback, flattened_tensors)
return type(t).__tensor_unflatten__(transformed_tensors, ctx)
|
3e7abeb488e927af85b9d91e4d693a423c3a7762
|
f0f07a94d3876779d27c6b01e2c08c08391fbafb
|
/tensorflow_model_analysis/api/tfma_unit.py
|
e5d8dab3ce593933b5aa0eaf34d3c3eae922b6a0
|
[
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] |
permissive
|
tensorflow/model-analysis
|
4ec2f637a657d49aa503f2f047bed45b82c93f7b
|
ee0d8eff562bfe068a3ffdc4da0472cc90adaf41
|
refs/heads/master
| 2023-08-18T22:51:47.404027
| 2023-08-17T22:39:56
| 2023-08-17T22:40:27
| 126,528,713
| 1,200
| 313
|
Apache-2.0
| 2023-08-14T20:18:27
| 2018-03-23T19:08:49
|
Python
|
UTF-8
|
Python
| false
| false
| 16,375
|
py
|
tfma_unit.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit test library for testing your TFMA models / metrics.
This is publicly accessible as the tfma.test module. Example usage:
class MyModelTFMATest(tfma.test.TestCase):
def testWithoutBeam(self):
path = train_and_export_my_model(...)
examples = [self.makeExample(age=5, label=1.0),
self.makeExample(age=10, label=0.0)]
expected_metrics={
'average_loss': tfma.test.BoundedValue(upper_bound=1.0),
'auc': tfma.test.BoundedValue(lower_bound=0.5),
'example_count': 3.0,
}
self.assertMetricsComputedWithoutBeamAre(
eval_saved_model_path=path,
serialized_examples=examples,
expected_metrics=expected_metrics)
def testWithBeam(self):
path = train_and_export_my_model(...)
examples = [self.makeExample(age=5, label=1.0),
self.makeExample(age=10, label=0.0)]
expected_metrics={
'average_loss': tfma.test.BoundedValue(upper_bound=1.0),
'auc': tfma.test.BoundedValue(lower_bound=0.5),
'example_count': 3.0,
}
self.assertMetricsComputedWithBeamAre(
eval_saved_model_path=path,
serialized_examples=examples,
expected_metrics=expected_metrics)
We recommend that you actually train and export your model with the test, as
opposed to training and exporting the model once and saving it alongside the
test. This is so that the model is always exported using the latest code and is
of the latest format.
Note that if you are retraining a new model for each test, your model may have
different weights each time and have different metric values. As such, we
recommend that you use BoundedValue with loose bounds to avoid flaky tests.
"""
from typing import Any, Dict, List, Optional
import apache_beam as beam
from apache_beam.testing import util as beam_util
from tensorflow_model_analysis import constants
from tensorflow_model_analysis.api import model_eval_lib
from tensorflow_model_analysis.api import types
from tensorflow_model_analysis.eval_saved_model import load
from tensorflow_model_analysis.eval_saved_model import testutil
from tensorflow_model_analysis.evaluators import legacy_metrics_and_plots_evaluator
from tensorflow_model_analysis.extractors import extractor
from tensorflow_model_analysis.proto import config_pb2
from tensorflow_model_analysis.slicer import slicer_lib as slicer
from tfx_bsl.tfxio import raw_tf_record
@beam.ptransform_fn
@beam.typehints.with_input_types(types.Extracts)
@beam.typehints.with_output_types(types.Extracts)
def Extract( # pylint: disable=invalid-name
extracts: beam.pvalue.PCollection, extractors: List[extractor.Extractor]):
for x in extractors:
extracts = (extracts | x.stage_name >> x.ptransform)
return extracts
class BoundedValue:
"""Represents a bounded value for a metric for the TFMA unit test."""
def __init__(self,
lower_bound: float = float('-inf'),
upper_bound: float = float('inf')):
self.lower_bound = lower_bound
self.upper_bound = upper_bound
class TestCase(testutil.TensorflowModelAnalysisTest):
"""Test class with extra methods for unit-testing TFMA models / metrics."""
def makeExample(self, **kwargs):
"""Returns a serialized TF.Example with the fields set accordingly.
Example usage:
makeExample(name="Tom", age=20, weight=132.5, colors=["red", "green"],
label=1.0)
Note that field types will be inferred from the type of the arguments, so
1.0 goes into a float_list, 1 goes into an int64_list, and "one" goes into
a bytes_list. As the example illustrates, both singleton values and lists
of values are accepted.
Args:
**kwargs: key=value pairs specifying the field values for the example.
Returns:
Serialized TF.Example with the fields set accordingly.
"""
return self._makeExample(**kwargs).SerializeToString()
def assertDictElementsWithinBounds(self, got_values_dict: Dict[str, Any],
expected_values_dict: Dict[str, Any]):
"""Checks the elements for two dictionaries.
It asserts all values in `expected_values_dict` are close to values with the
same key in `got_values_dict`.
Args:
got_values_dict: The actual dictionary.
expected_values_dict: The expected dictionary. The values in can be either
`BoundedValue` or any type accepted by
`tf.test.TestCase.assertAllClose()`. When the type is `BoundedValue`, it
expects the corresponding value from `got_values_dict` falls into the
boundaries provided in the `BoundedValue`.
"""
for key, value in expected_values_dict.items():
self.assertIn(key, got_values_dict)
got_value = got_values_dict[key]
if isinstance(value, BoundedValue):
if got_value < value.lower_bound or got_value > value.upper_bound:
self.fail('expecting key %s to have value between %f and %f '
'(both ends inclusive), but value was %f instead' %
(key, value.lower_bound, value.upper_bound, got_value))
else:
self.assertAllClose(got_value, value, msg='key = %s' % key)
def assertMetricsComputedWithoutBeamAre(self, eval_saved_model_path: str,
serialized_examples: List[bytes],
expected_metrics: Dict[str, Any]):
"""Checks metrics in-memory using the low-level APIs without Beam.
Example usage:
self.assertMetricsComputedWithoutBeamAre(
eval_saved_model_path=path,
serialized_examples=[self.makeExample(age=5, label=1.0),
self.makeExample(age=10, label=0.0)],
expected_metrics={'average_loss': 0.1})
Args:
eval_saved_model_path: Path to the directory containing the
EvalSavedModel.
serialized_examples: List of serialized example bytes.
expected_metrics: Dictionary of expected metric values.
"""
self.assertDictElementsWithinBounds(
got_values_dict=self._computeMetricsWithoutBeam(eval_saved_model_path,
serialized_examples),
expected_values_dict=expected_metrics)
def assertMetricsComputedWithoutBeamNoBatchingAre(
self, eval_saved_model_path: str, serialized_examples: List[bytes],
expected_metrics: Dict[str, Any]):
"""Checks metrics in-memory using the low-level APIs without Beam.
This is the non-batched version of assertMetricsComputedWithoutBeamAre.
This can be useful for debugging batching issues with TFMA or with your
model (e.g. your model or metrics only works with a fixed-batch size - TFMA
requires that your model can accept batches of any size).
Args:
eval_saved_model_path: Path to the directory containing the
EvalSavedModel.
serialized_examples: List of serialized example bytes.
expected_metrics: Dictionary of expected metric values.
"""
self.assertDictElementsWithinBounds(
got_values_dict=self._computeMetricsWithoutBeamNoBatching(
eval_saved_model_path, serialized_examples),
expected_values_dict=expected_metrics)
def _computeMetricsWithoutBeam(
self, eval_saved_model_path: str,
serialized_examples: List[bytes]) -> Dict[str, Any]:
"""Computes metrics in-memory using the low-level APIs without Beam.
Args:
eval_saved_model_path: Path to the directory containing the
EvalSavedModel.
serialized_examples: List of serialized example bytes.
Returns:
Metrics computed by TFMA using your model on the given examples.
"""
eval_saved_model = load.EvalSavedModel(eval_saved_model_path)
eval_saved_model.metrics_reset_update_get_list(serialized_examples)
return eval_saved_model.get_metric_values()
def _computeMetricsWithoutBeamNoBatching(
self, eval_saved_model_path: str,
serialized_examples: List[bytes]) -> Dict[str, Any]:
"""Computes metrics in-memory using the low-level APIs without Beam.
This is the non-batched version of computeMetricsWithoutBeam. This can be
useful for debugging batching issues with TFMA or with your model
(e.g. your model or metrics only works with a fixed-batch size - TFMA
requires that your model can accept batches of any size)
Args:
eval_saved_model_path: Path to the directory containing the
EvalSavedModel.
serialized_examples: List of serialized example bytes.
Returns:
Metrics computed by TFMA using your model on the given examples.
"""
eval_saved_model = load.EvalSavedModel(eval_saved_model_path)
for example in serialized_examples:
eval_saved_model.metrics_reset_update_get_list([example])
return eval_saved_model.get_metric_values()
def assertMetricsComputedWithBeamAre(
self,
eval_saved_model_path: str,
serialized_examples: List[bytes],
expected_metrics: Dict[str, Any],
add_metrics_callbacks: Optional[List[
types.AddMetricsCallbackType]] = None):
"""Checks metrics computed using Beam.
Metrics will be computed over all examples, without any slicing. If you
want to provide your own PCollection (e.g. read a large number of examples
from a file), if you want to check metrics over certain slices, or if you
want to add additional post-export metrics, use the more general
assertGeneralMetricsComputedWithBeamAre.
Example usage:
self.assertMetricsComputedWithBeamAre(
eval_saved_model_path=path,
serialized_examples=[self.makeExample(age=5, label=1.0),
self.makeExample(age=10, label=0.0)],
expected_metrics={'average_loss': 0.1})
Args:
eval_saved_model_path: Path to the directory containing the
EvalSavedModel.
serialized_examples: List of serialized example bytes.
expected_metrics: Dictionary of expected metric values.
add_metrics_callbacks: Optional. Callbacks for adding additional metrics.
"""
def check_metrics(got):
"""Check metrics callback."""
try:
self.assertEqual(
1, len(got), 'expecting metrics for exactly one slice, but got %d '
'slices instead. metrics were: %s' % (len(got), got))
(slice_key, value) = got[0]
self.assertEqual((), slice_key)
self.assertDictElementsWithinBounds(
got_values_dict=value, expected_values_dict=expected_metrics)
except AssertionError as err:
raise beam_util.BeamAssertException(err)
eval_config = config_pb2.EvalConfig()
eval_shared_model = model_eval_lib.default_eval_shared_model(
eval_saved_model_path=eval_saved_model_path,
add_metrics_callbacks=add_metrics_callbacks)
extractors = model_eval_lib.default_extractors(
eval_config=eval_config, eval_shared_model=eval_shared_model)
tfx_io = raw_tf_record.RawBeamRecordTFXIO(
physical_format='inmemory',
raw_record_column_name=constants.ARROW_INPUT_COLUMN,
telemetry_descriptors=['TFMATest'])
with beam.Pipeline() as pipeline:
# pylint: disable=no-value-for-parameter
(metrics, _), _ = (
pipeline
| 'CreateExamples' >> beam.Create(serialized_examples)
| 'BatchExamples' >> tfx_io.BeamSource()
| 'InputsToExtracts' >> model_eval_lib.BatchedInputsToExtracts()
| 'Extract' >> Extract(extractors=extractors)
| 'ComputeMetricsAndPlots' >>
legacy_metrics_and_plots_evaluator._ComputeMetricsAndPlots( # pylint: disable=protected-access
eval_shared_model=eval_shared_model))
# pylint: enable=no-value-for-parameter
beam_util.assert_that(metrics, check_metrics)
def assertGeneralMetricsComputedWithBeamAre(
self, eval_saved_model_path: str,
examples_pcollection: beam.pvalue.PCollection,
slice_spec: List[slicer.SingleSliceSpec],
add_metrics_callbacks: List[types.AddMetricsCallbackType],
expected_slice_metrics: Dict[Any, Dict[str, Any]]):
"""Checks metrics computed using Beam.
A more general version of assertMetricsComputedWithBeamAre. Note that the
caller is responsible for setting up and running the Beam pipeline.
Example usage:
def add_metrics(features, predictions, labels):
metric_ops = {
'mse': tf.metrics.mean_squared_error(labels, predictions['logits']),
'mae': tf.metrics.mean_absolute_error(labels, predictions['logits']),
}
return metric_ops
with beam.Pipeline() as pipeline:
expected_slice_metrics = {
(): {
'mae': 0.1,
'mse': 0.2,
tfma.post_export_metrics.metric_keys.AUC:
tfma.test.BoundedValue(lower_bound=0.5)
},
(('age', 10),): {
'mae': 0.2,
'mse': 0.3,
tfma.post_export_metrics.metric_keys.AUC:
tfma.test.BoundedValue(lower_bound=0.5)
},
}
examples = pipeline | 'ReadExamples' >> beam.io.ReadFromTFRecord(path)
self.assertGeneralMetricsComputedWithBeamAre(
eval_saved_model_path=path,
examples_pcollection=examples,
slice_spec=[tfma.slicer.SingleSliceSpec(),
tfma.slicer.SingleSliceSpec(columns=['age'])],
add_metrics_callbacks=[
add_metrics, tfma.post_export_metrics.auc()],
expected_slice_metrics=expected_slice_metrics)
Args:
eval_saved_model_path: Path to the directory containing the
EvalSavedModel.
examples_pcollection: A PCollection of serialized example bytes.
slice_spec: List of slice specifications.
add_metrics_callbacks: Callbacks for adding additional metrics.
expected_slice_metrics: Dictionary of dictionaries describing the expected
metrics for each slice. The outer dictionary map slice keys to the
expected metrics for that slice.
"""
def check_metrics(got):
"""Check metrics callback."""
try:
slices = {}
for slice_key, value in got:
slices[slice_key] = value
self.assertCountEqual(list(slices), list(expected_slice_metrics))
for slice_key, expected_metrics in expected_slice_metrics.items():
self.assertDictElementsWithinBounds(
got_values_dict=slices[slice_key],
expected_values_dict=expected_metrics)
except AssertionError as err:
raise beam_util.BeamAssertException(err)
slicing_specs = None
if slice_spec:
slicing_specs = [s.to_proto() for s in slice_spec]
eval_config = config_pb2.EvalConfig(slicing_specs=slicing_specs)
eval_shared_model = self.createTestEvalSharedModel(
eval_saved_model_path=eval_saved_model_path,
add_metrics_callbacks=add_metrics_callbacks)
extractors = model_eval_lib.default_extractors(
eval_config=eval_config, eval_shared_model=eval_shared_model)
tfx_io = raw_tf_record.RawBeamRecordTFXIO(
physical_format='inmemory',
raw_record_column_name=constants.ARROW_INPUT_COLUMN,
telemetry_descriptors=['TFMATest'])
# pylint: disable=no-value-for-parameter
(metrics, _), _ = (
examples_pcollection
| 'BatchExamples' >> tfx_io.BeamSource()
| 'InputsToExtracts' >> model_eval_lib.BatchedInputsToExtracts()
| 'Extract' >> Extract(extractors=extractors)
| 'ComputeMetricsAndPlots' >>
legacy_metrics_and_plots_evaluator._ComputeMetricsAndPlots( # pylint: disable=protected-access
eval_shared_model=eval_shared_model))
# pylint: enable=no-value-for-parameter
beam_util.assert_that(metrics, check_metrics)
|
e8c83c1ceb06e5eaf5eea8ff9e83242b26f71035
|
542f898adea1b36d627d4bf437731022f242d2dd
|
/configs/new_baselines/mask_rcnn_regnety_4gf_dds_FPN_200ep_LSJ.py
|
b867cc865e5ac4d7b70221da141894efd7cbd75c
|
[
"Apache-2.0"
] |
permissive
|
facebookresearch/detectron2
|
24bf508e374a98a5e5d1bd4cc96556d5914215f4
|
80307d2d5e06f06a8a677cc2653f23a4c56402ac
|
refs/heads/main
| 2023-08-30T17:00:01.293772
| 2023-08-25T22:10:24
| 2023-08-25T22:10:24
| 206,660,580
| 27,469
| 8,047
|
Apache-2.0
| 2023-09-13T09:25:57
| 2019-09-05T21:30:20
|
Python
|
UTF-8
|
Python
| false
| false
| 333
|
py
|
mask_rcnn_regnety_4gf_dds_FPN_200ep_LSJ.py
|
from .mask_rcnn_regnety_4gf_dds_FPN_100ep_LSJ import (
dataloader,
lr_multiplier,
model,
optimizer,
train,
)
train.max_iter *= 2 # 100ep -> 200ep
lr_multiplier.scheduler.milestones = [
milestone * 2 for milestone in lr_multiplier.scheduler.milestones
]
lr_multiplier.scheduler.num_updates = train.max_iter
|
72eb5e7e625cf13b66490eb78cb10c4f0116c2f0
|
4d28185e7a78a569f9a449f39f183cac3024f711
|
/packages/Python/lldbsuite/test/python_api/section/TestSectionAPI.py
|
088a66c71c5cafa18f0bae2e9fa013e5664d1bbe
|
[
"NCSA",
"Apache-2.0",
"LLVM-exception"
] |
permissive
|
apple/swift-lldb
|
2789bf44f648609a1674ee520ac20b64c95de072
|
d74be846ef3e62de946df343e8c234bde93a8912
|
refs/heads/stable
| 2023-04-06T00:28:15.882479
| 2019-10-25T22:46:59
| 2019-10-25T22:46:59
| 44,838,862
| 780
| 291
|
Apache-2.0
| 2020-01-10T19:28:43
| 2015-10-23T21:13:18
|
C++
|
UTF-8
|
Python
| false
| false
| 1,341
|
py
|
TestSectionAPI.py
|
"""
Test SBSection APIs.
"""
from __future__ import print_function
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class SectionAPITestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
@add_test_categories(['pyapi'])
def test_get_target_byte_size(self):
d = {'EXE': 'b.out'}
self.build(dictionary=d)
self.setTearDownCleanup(dictionary=d)
exe = self.getBuildArtifact('b.out')
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
# find the .data section of the main module
mod = target.GetModuleAtIndex(0)
data_section = None
for s in mod.sections:
sect_type = s.GetSectionType()
if sect_type == lldb.eSectionTypeData:
data_section = s
break
elif sect_type == lldb.eSectionTypeContainer:
for i in range(s.GetNumSubSections()):
ss = s.GetSubSectionAtIndex(i)
sect_type = ss.GetSectionType()
if sect_type == lldb.eSectionTypeData:
data_section = ss
break
self.assertIsNotNone(data_section)
self.assertEqual(data_section.target_byte_size, 1)
|
e0c6ad5c3ba067120f1cdc9edbc3d0c212e322c5
|
3bb9fb3e0096f2e08259fc0cb244dfca61385b6b
|
/deepbgc/models/wrapper.py
|
67bea60a9ac1b7cb19011a4d6bb7742e05f42fd0
|
[
"MIT",
"BSD-3-Clause",
"Apache-2.0",
"Python-2.0",
"LicenseRef-scancode-biopython",
"GPL-3.0-only"
] |
permissive
|
Merck/deepbgc
|
d95545c74f99a2e0e16c5eb24b12f37ff0d280b3
|
bd653ad3fa578f12f8004c9df390f9faa782bb08
|
refs/heads/master
| 2023-01-12T23:16:16.373478
| 2022-12-16T19:26:22
| 2022-12-16T19:26:22
| 162,141,645
| 105
| 33
|
MIT
| 2022-09-06T22:59:12
| 2018-12-17T14:19:43
|
Python
|
UTF-8
|
Python
| false
| false
| 8,677
|
py
|
wrapper.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# David Prihoda
# Wrapper for a BGC detection model than handles feature transformation and loading model definitions from JSON
from __future__ import (
print_function,
division,
absolute_import,
)
import logging
import six
from deepbgc import models, features, __version__
import pickle
import json
from sklearn.base import BaseEstimator, ClassifierMixin
import pprint
import pandas as pd
import re
import time
class SequenceModelWrapper(BaseEstimator, ClassifierMixin):
"""
Wrapper for a sequence detection/classification model than handles feature transformation and loading model definitions from JSON
"""
def __init__(self, transformer, model, fit_params):
"""
:param transformer: ListTransformer used to transform Domain DataFrames into feature matrices
:param model: New instance of a BGC detection model
:param fit_params: Params to pass to the fit function of given model
"""
self.transformer = transformer
self.model = model
self.fit_params = fit_params
self.version = __version__
self.timestamp = time.time()
def fit(self, samples, y, validation_samples=None, validation_y=None, **extra_fit_params):
"""
Train model with given list of samples, observe performance on given validation samples.
Domain DataFrames are converted to feature matrices using the pipeline's feature transformer.
:param samples: List of Domain DataFrames, each DataFrame contains one BGC or non-BGC sample's sequence of protein domains.
:param y: List of output values, one value for each sequence
:param validation_samples: List of validation samples
:param validation_y: List of validation sample outputs
:param extra_fit_params: Extra fitting parameters to pass to the fit function of given model
:return: self
"""
if validation_samples is None:
validation_samples = []
if validation_y is None:
validation_y = []
self._check_samples(samples)
self._check_samples(validation_samples)
self.transformer.fit(samples, y)
train_X_list = self._safe_transform(samples, y)
validation_X_list = self._safe_transform(validation_samples, validation_y)
self._debug_samples(train_X_list, y)
merged_params = self.fit_params.copy()
merged_params.update(extra_fit_params)
return self.model.fit(train_X_list, y, validation_X_list=validation_X_list, validation_y_list=validation_y, **merged_params)
def _check_samples(self, samples):
# Wrap single sample into list
if isinstance(samples, pd.DataFrame):
samples = [samples]
elif not isinstance(samples, list):
raise TypeError('Expected single sample or list of samples, got {}'.format(type(samples)))
for sequence in samples:
if not isinstance(sequence, pd.DataFrame):
raise TypeError('Sample has to be a DataFrame, got ' + str(type(sequence)))
def _debug_samples(self, X_list, y=None):
if isinstance(X_list, pd.DataFrame):
logging.debug('-'*80)
logging.debug('Preview of sequence vectors X:\n%s', X_list.head(5))
logging.debug('-'*80)
if y is not None:
logging.debug('Preview of response vectors y:\n%s', y.head(5))
logging.debug('-'*80)
elif isinstance(X_list, list) and X_list:
logging.debug('-'*80)
logging.debug('Preview of first sequence X:\n%s', X_list[0].head(5))
logging.debug('-'*80)
if y is None:
pass
elif isinstance(y, pd.DataFrame):
logging.debug('Preview of response vectors y:\n%s', y.head())
logging.debug('-'*80)
else:
logging.debug('Preview of first sequence y:\n%s', y[0].head())
logging.debug('-'*80)
def _safe_transform(self, samples, y):
X_list = self.transformer.transform(samples)
if isinstance(X_list, pd.DataFrame) and not X_list.empty:
if not isinstance(y, pd.DataFrame):
raise ValueError('In single vector sequence mode, the response needs to be a DataFrame with one row for each sample')
if len(X_list.index) != len(y.index):
raise ValueError('Index length does not match for sample vectors ({}) and responses ({})'.format(len(X_list.index), len(y.index)))
return X_list
def predict(self, samples):
"""
Return prediction scores for each sequence in list.
In detection, will return list of numpy arrays with prediction score for each sequence element (e.g. protein domain).
In classification, will return a DataFrame with one row for each sequence and one column for each predicted class score.
:param samples: List of DataFrames (sequences) or single DataFrame (sequence)
:return: Return prediction scores for each sequence in list.
"""
X_list = self.transformer.transform(samples)
self._debug_samples(X_list)
if isinstance(X_list, list):
return [self.model.predict(X) for X in X_list]
return self.model.predict(X_list)
@classmethod
def from_config(cls, config, meta_only=False, vars=None):
"""
Load model configuration from a JSON config
:param config: Path to JSON config or loaded config dict
:param meta_only: Do not create feature transformers
:param vars: Dictionary of variables to inject into JSON fields in "#{MYVAR}" format
:return: Untrained pipeline based on given config
"""
if isinstance(config, six.string_types):
with open(config) as f:
config = json.loads(f.read())
elif isinstance(config, dict):
pass
else:
raise AttributeError('Invalid config type "{}": {}'.format(type(config), config))
config = fill_vars(config, vars)
logging.info('Loaded model:')
logging.info(pprint.pformat(config, indent=4))
build_params = config.get('build_params', {})
fit_params = config.get('fit_params', {})
input_params = config.get('input_params', {})
sequence_as_vector = input_params.get('sequence_as_vector', False)
# Get class from "models" module. Don't forget to import the class in models.__init__ first!
clf_class = getattr(models, config.get('type'))
# Create a new model instance
model = clf_class(**build_params)
if meta_only:
transformer = None
else:
feature_params = input_params.get('features', [])
transformer = features.ListTransformer.from_config(feature_params, sequence_as_vector=sequence_as_vector)
return SequenceModelWrapper(transformer=transformer, model=model, fit_params=fit_params)
def save(self, path):
with open(path, 'wb') as f:
pickle.dump(self, f, protocol=2)
return self
@classmethod
def load(cls, path):
logging.info('Loading model from: {}'.format(path))
try:
try:
with open(path, 'rb') as f:
model = pickle.load(f)
except UnicodeDecodeError:
with open(path, 'rb') as f:
model = pickle.load(f, encoding='latin1')
except ImportError as e:
if 'hmmlearn' in str(e):
from deepbgc.models.hmm import get_hmmlearn_import_error
raise get_hmmlearn_import_error()
raise e
except Exception as e:
raise ValueError("Error unpickling model from path '{}'".format(path), e)
if not isinstance(model, cls):
raise TypeError("Provided model is not a SequenceModelWrapper: '{}' is a {}".format(path, type(model)))
return model
VAR_PATTERN = re.compile("(#{([a-zA-Z_0-9]+)})")
def fill_vars(d, vars):
if vars is None:
vars = {}
if isinstance(d, dict):
return {k: fill_vars(v, vars) for k, v in d.items()}
elif isinstance(d, list):
return [fill_vars(v, vars) for v in d]
elif isinstance(d, six.string_types):
return VAR_PATTERN.sub(lambda match: _get_matched_var(match, vars), d)
return d
def _get_matched_var(match, vars):
name = match.group(2)
if name not in vars:
raise ValueError("Missing config variable {}, specify it using --config {} VALUE".format(name, name))
return vars[name]
|
febda0db237d16fd5f74e8c8105ebddc08a4b538
|
05fe579c12f0013ce83a106083ddb66ace5e8f47
|
/tests/utils/log_operations.py
|
2f800d247cf18d506daafe34abd3ba345ba3d27a
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"BSD-3-Clause"
] |
permissive
|
mindspore-ai/mindinsight
|
59d3f47144ada9a12d2c82d9826ad5f5288aed78
|
a774d893fb2f21dbc3edb5cd89f9e6eec274ebf1
|
refs/heads/master
| 2023-07-22T22:46:43.075617
| 2023-07-17T11:26:58
| 2023-07-17T11:26:58
| 250,692,948
| 224
| 24
|
Apache-2.0
| 2020-12-29T12:22:51
| 2020-03-28T01:58:56
|
Python
|
UTF-8
|
Python
| false
| false
| 7,318
|
py
|
log_operations.py
|
# Copyright 2019-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
Log operations.
"""
import json
import os
import time
from mindinsight.datavisual.common.enums import PluginNameEnum
from .log_generators.graph_log_generator import GraphLogGenerator
from .log_generators.images_log_generator import ImagesLogGenerator
from .log_generators.scalars_log_generator import ScalarsLogGenerator
from .log_generators.histogram_log_generator import HistogramLogGenerator
from .log_generators.tensor_log_generator import TensorLogGenerator
from .log_generators.landscape_log_generator import LandscapeLogGenerator
log_generators = {
PluginNameEnum.GRAPH.value: GraphLogGenerator(),
PluginNameEnum.IMAGE.value: ImagesLogGenerator(),
PluginNameEnum.SCALAR.value: ScalarsLogGenerator(),
PluginNameEnum.HISTOGRAM.value: HistogramLogGenerator(),
PluginNameEnum.TENSOR.value: TensorLogGenerator(),
PluginNameEnum.LANDSCAPE.value: LandscapeLogGenerator()
}
class LogOperations:
"""Log Operations."""
def __init__(self):
self._step_num = 3
self._tag_num = 2
self._time_count = 0
self._graph_base_path = os.path.join(os.path.dirname(__file__), "log_generators", "graph_base.json")
def _get_steps(self):
"""Get steps."""
return range(self._step_num)
def _get_tags(self):
"""Get tags."""
return ["%s%d" % ("tag_name_", i) for i in range(self._tag_num)]
def create_summary(self, log_dir, steps_list, tag_name_list):
"""Create summary in log_dir."""
metadata_dict = dict()
timestamp = time.time() + self._time_count
file_path = os.path.join(log_dir, f'test.summary.{int(timestamp)}')
metadata_dict.update({"plugins": dict()})
metadata_dict.update({"metadata": dict()})
metadata_dict.update({"actual_values": dict()})
for plugin_name in PluginNameEnum.list_members():
if plugin_name == PluginNameEnum.OPTIMIZED_GRAPH.value:
continue
metadata_dict["plugins"].update({plugin_name: list()})
log_generator = log_generators.get(plugin_name)
if plugin_name == PluginNameEnum.GRAPH.value:
with open(self._graph_base_path, 'r') as load_f:
graph_dict = json.load(load_f)
values = log_generator.generate_log(file_path, graph_dict)
metadata_dict["actual_values"].update({plugin_name: values})
metadata_dict["plugins"][plugin_name].append("UUID str")
else:
for tag_name in tag_name_list:
metadata, values = log_generator.generate_log(file_path, steps_list, tag_name)
full_tag_name = f'{tag_name}/{plugin_name}'
metadata_dict["metadata"].update({full_tag_name: metadata})
metadata_dict["plugins"][plugin_name].append(full_tag_name)
if plugin_name == PluginNameEnum.IMAGE.value:
metadata_dict["actual_values"].update({full_tag_name: values})
os.utime(file_path, (timestamp, timestamp))
self._time_count += 1
return metadata_dict
def create_summary_logs(self, summary_base_dir, summary_dir_num, dir_prefix, start_index=0):
"""Create summary logs in summary_base_dir."""
summary_metadata = dict()
steps_list = self._get_steps()
tag_name_list = self._get_tags()
for i in range(start_index, summary_dir_num + start_index):
log_dir = os.path.join(summary_base_dir, f'{dir_prefix}{i}')
os.makedirs(log_dir)
train_id = log_dir.replace(summary_base_dir, ".")
metadata_dict = self.create_summary(log_dir, steps_list, tag_name_list)
summary_metadata.update({train_id: metadata_dict})
return summary_metadata
def create_multiple_logs(self, summary_base_dir, dir_name, log_nums):
"""Create multiple logs in summary_base_dir."""
metadata_dict = None
steps_list = self._get_steps()
tag_name_list = self._get_tags()
log_dir = os.path.join(summary_base_dir, dir_name)
os.makedirs(log_dir)
train_id = log_dir.replace(summary_base_dir, ".")
for _ in range(log_nums):
metadata_dict = self.create_summary(log_dir, steps_list, tag_name_list)
return {train_id: metadata_dict}
def create_reservoir_log(self, summary_base_dir, dir_name, step_num):
"""Create reservoir log in summary_base_dir."""
steps_list = range(step_num)
tag_name_list = self._get_tags()
log_dir = os.path.join(summary_base_dir, dir_name)
os.makedirs(log_dir)
train_id = log_dir.replace(summary_base_dir, ".")
metadata_dict = self.create_summary(log_dir, steps_list, tag_name_list)
return {train_id: metadata_dict}
def generate_log(self, plugin_name, log_dir, log_settings=None, valid=True):
"""
Generate log for ut.
Args:
plugin_name (str): Plugin name, contains 'graph', 'image', and 'scalar'.
log_dir (str): Log path to write log.
log_settings (dict): Info about the log, e.g.:
{
current_time (int): Timestamp in summary file name, not necessary.
graph_base_path (str): Path of graph_bas.json, necessary for `graph`.
steps (list[int]): Steps for `image` and `scalar`, default is [1].
tag (str): Tag name, default is 'default_tag'.
}
valid (bool): If true, summary name will be valid.
Returns:
str, Summary log path.
"""
if log_settings is None:
log_settings = dict()
current_time = log_settings.get('time', int(time.time()))
current_time = int(current_time)
log_generator = log_generators.get(plugin_name)
if valid:
temp_path = os.path.join(log_dir, '%s.%s' % ('test.summary', str(current_time)))
else:
temp_path = os.path.join(log_dir, '%s.%s' % ('test.invalid', str(current_time)))
if plugin_name == PluginNameEnum.GRAPH.value:
with open(self._graph_base_path, 'r') as load_f:
graph_dict = json.load(load_f)
graph_dict = log_generator.generate_log(temp_path, graph_dict)
return temp_path, graph_dict, None
steps_list = log_settings.get('steps', [1])
tag_name = log_settings.get('tag', 'default_tag')
metadata, values = log_generator.generate_log(temp_path, steps_list, tag_name)
return temp_path, metadata, values
|
58a99775abd5260a43c93535c74fea040bb02958
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/Geometry/GEMGeometry/test/runGEMDetIdAnalysis_cfg.py
|
a7be075f6538ff85d28b8126620b01c2dbc29896
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 870
|
py
|
runGEMDetIdAnalysis_cfg.py
|
import FWCore.ParameterSet.Config as cms
from Configuration.StandardSequences.Eras import eras
process = cms.Process('PROD',eras.Phase2C4)
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('DataFormats.MuonDetId.gemDetIdAnalyzer_cfi')
if hasattr(process,'MessageLogger'):
process.MessageLogger.GEMAnalysis=dict()
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
'file:step3_29034.root',
# 'root://cms-xrd-global.cern.ch//store/relval/CMSSW_9_1_1_patch1/RelValSingleElectronPt35Extended/GEN-SIM-RECO/91X_upgrade2023_realistic_v1_D17-v1/10000/10D95AC2-B14A-E711-BC4A-0CC47A7C3638.root',
)
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(20)
)
# Schedule definition
process.p = cms.Path(process.gemDetIdAnalyzer)
|
15dd74c26f217b6111ba84421ef0275dcc9ad8e7
|
cbb0bd995f5ecb64f93a30d5f1dcd106e3241214
|
/Compiler/floatingpoint.py
|
bd5c13844fcdff97e9b93f9472b1903a198a53e6
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
data61/MP-SPDZ
|
324010a4caaa403f64d769a276d58931e0ed274e
|
5c26feece05e13387fc9bd2ef3f09b2735d6ea4b
|
refs/heads/master
| 2023-08-10T01:25:33.653174
| 2023-08-09T02:13:34
| 2023-08-09T02:13:34
| 152,511,277
| 724
| 277
|
NOASSERTION
| 2023-07-21T04:43:18
| 2018-10-11T01:16:16
|
C++
|
UTF-8
|
Python
| false
| false
| 24,014
|
py
|
floatingpoint.py
|
import math
from math import log, floor, ceil
from Compiler.instructions import *
from . import types
from . import comparison
from . import program
from . import util
from . import instructions_base
##
## Helper functions for floating point arithmetic
##
def two_power(n):
if isinstance(n, int) and n < 31:
return 2**n
else:
max = types.cint(1) << 31
res = 2**(n%31)
for i in range(n // 31):
res *= max
return res
def shift_two(n, pos):
return n >> pos
def maskRing(a, k):
shift = int(program.Program.prog.options.ring) - k
if program.Program.prog.use_edabit():
r_prime, r = types.sint.get_edabit(k)
elif program.Program.prog.use_dabit:
rr, r = zip(*(types.sint.get_dabit() for i in range(k)))
r_prime = types.sint.bit_compose(rr)
else:
r = [types.sint.get_random_bit() for i in range(k)]
r_prime = types.sint.bit_compose(r)
c = ((a + r_prime) << shift).reveal(False) >> shift
return c, r
def maskField(a, k, kappa):
r_dprime = types.sint()
r_prime = types.sint()
c = types.cint()
r = [types.sint() for i in range(k)]
comparison.PRandM(r_dprime, r_prime, r, k, k, kappa)
# always signed due to usage in equality testing
a += two_power(k)
asm_open(True, c, a + two_power(k) * r_dprime + r_prime)
return c, r
@instructions_base.ret_cisc
def EQZ(a, k, kappa):
prog = program.Program.prog
if prog.use_split():
from GC.types import sbitvec
v = sbitvec(a, k).v
bit = util.tree_reduce(operator.and_, (~b for b in v))
return types.sintbit.conv(bit)
prog.non_linear.check_security(kappa)
return prog.non_linear.eqz(a, k)
def bits(a,m):
""" Get the bits of an int """
if isinstance(a, int):
res = [None]*m
for i in range(m):
res[i] = a & 1
a >>= 1
else:
res = []
from Compiler.types import regint, cint
while m > 0:
aa = regint()
convmodp(aa, a, bitlength=0)
res += [cint(x) for x in aa.bit_decompose(min(64, m))]
m -= 64
if m > 0:
aa = cint()
shrci(aa, a, 64)
a = aa
return res
def carry(b, a, compute_p=True):
""" Carry propogation:
(p,g) = (p_2, g_2)o(p_1, g_1) -> (p_1 & p_2, g_2 | (p_2 & g_1))
"""
if compute_p:
t1 = a[0].bit_and(b[0])
else:
t1 = None
t2 = a[1] + a[0].bit_and(b[1])
return (t1, t2)
def or_op(a, b, void=None):
return util.or_op(a, b)
def mul_op(a, b, void=None):
return a * b
def PreORC(a, kappa=None, m=None, raw=False):
k = len(a)
if k == 1:
return [a[0]]
prog = program.Program.prog
kappa = kappa or prog.security
m = m or k
if isinstance(a[0], types.sgf2n):
max_k = program.Program.prog.galois_length - 1
else:
# assume prime length is power of two
prime_length = 2 ** int(ceil(log(prog.bit_length + kappa, 2)))
max_k = prime_length - kappa - 2
assert(max_k > 0)
if k <= max_k:
p = [None] * m
if m == k:
p[0] = a[0]
if isinstance(a[0], types.sgf2n):
b = comparison.PreMulC([3 - a[i] for i in range(k)])
for i in range(m):
tmp = b[k-1-i]
if not raw:
tmp = tmp.bit_decompose()[0]
p[m-1-i] = 1 - tmp
else:
t = [types.sint() for i in range(m)]
b = comparison.PreMulC([a[i] + 1 for i in range(k)])
for i in range(m):
comparison.Mod2(t[i], b[k-1-i], k, kappa, False)
p[m-1-i] = 1 - t[i]
return p
else:
# not constant-round anymore
s = [PreORC(a[i:i+max_k], kappa, raw=raw) for i in range(0,k,max_k)]
t = PreORC([si[-1] for si in s[:-1]], kappa, raw=raw)
return sum(([or_op(x, y) for x in si]
for si,y in zip(s[1:],t)), s[0])[-m:]
def PreOpL(op, items):
"""
Uses algorithm from SecureSCM WP9 deliverable.
op must be a binary function that outputs a new register
"""
k = len(items)
logk = int(ceil(log(k,2)))
kmax = 2**logk
output = list(items)
for i in range(logk):
for j in range(kmax//(2**(i+1))):
y = two_power(i) + j*two_power(i+1) - 1
for z in range(1, 2**i+1):
if y+z < k:
output[y+z] = op(output[y], output[y+z], j != 0)
return output
def PreOpL2(op, items):
"""
Uses algorithm from SecureSCM WP9 deliverable.
op must be a binary function that outputs a new register
"""
k = len(items)
half = k // 2
output = list(items)
if k == 0:
return []
u = [op(items[2 * i], items[2 * i + 1]) for i in range(half)]
v = PreOpL2(op, u)
for i in range(half):
output[2 * i + 1] = v[i]
for i in range(1, (k + 1) // 2):
output[2 * i] = op(v[i - 1], items[2 * i])
return output
def PreOpN(op, items):
""" Naive PreOp algorithm """
k = len(items)
output = [None]*k
output[0] = items[0]
for i in range(1, k):
output[i] = op(output[i-1], items[i])
return output
def PreOR(a, kappa=None, raw=False):
if comparison.const_rounds:
return PreORC(a, kappa, raw=raw)
else:
return PreOpL(or_op, a)
def KOpL(op, a):
k = len(a)
if k == 1:
return a[0]
else:
t1 = KOpL(op, a[:k//2])
t2 = KOpL(op, a[k//2:])
return op(t1, t2)
def KORL(a, kappa=None):
""" log rounds k-ary OR """
k = len(a)
if k == 1:
return a[0]
else:
t1 = KORL(a[:k//2], kappa)
t2 = KORL(a[k//2:], kappa)
return t1 + t2 - t1.bit_and(t2)
def KORC(a, kappa):
return PreORC(a, kappa, 1)[0]
def KOR(a, kappa):
if comparison.const_rounds:
return KORC(a, kappa)
else:
return KORL(a, None)
def KMul(a):
if comparison.const_rounds:
return comparison.KMulC(a)
else:
return KOpL(mul_op, a)
def Inv(a):
""" Invert a non-zero value """
t = [types.sint() for i in range(3)]
c = [types.cint() for i in range(2)]
one = types.cint()
ldi(one, 1)
inverse(t[0], t[1])
s = t[0]*a
asm_open(True, c[0], s)
# avoid division by zero for benchmarking
divc(c[1], one, c[0])
#divc(c[1], c[0], one)
return c[1]*t[0]
def BitAdd(a, b, bits_to_compute=None):
""" Add the bits a[k-1], ..., a[0] and b[k-1], ..., b[0], return k+1
bits s[0], ... , s[k] """
k = len(a)
if not bits_to_compute:
bits_to_compute = list(range(k))
d = [None] * k
for i in range(1,k):
t = a[i]*b[i]
d[i] = (a[i] + b[i] - 2*t, t)
d[0] = (None, a[0]*b[0])
pg = PreOpL(carry, d)
c = [pair[1] for pair in pg]
s = [None] * (k+1)
if 0 in bits_to_compute:
s[0] = a[0] + b[0] - 2*c[0]
bits_to_compute.remove(0)
for i in bits_to_compute:
s[i] = a[i] + b[i] + c[i-1] - 2*c[i]
s[k] = c[k-1]
return s
def BitDec(a, k, m, kappa, bits_to_compute=None):
return program.Program.prog.non_linear.bit_dec(a, k, m)
def BitDecRingRaw(a, k, m):
comparison.require_ring_size(m, 'bit decomposition')
n_shift = int(program.Program.prog.options.ring) - m
if program.Program.prog.use_split():
x = a.split_to_two_summands(m)
bits = types._bitint.carry_lookahead_adder(x[0], x[1], fewer_inv=False)
return bits[:m]
else:
if program.Program.prog.use_edabit():
r, r_bits = types.sint.get_edabit(m, strict=False)
elif program.Program.prog.use_dabit:
r, r_bits = zip(*(types.sint.get_dabit() for i in range(m)))
r = types.sint.bit_compose(r)
else:
r_bits = [types.sint.get_random_bit() for i in range(m)]
r = types.sint.bit_compose(r_bits)
shifted = ((a - r) << n_shift).reveal(False)
masked = shifted >> n_shift
bits = r_bits[0].bit_adder(r_bits, masked.bit_decompose(m))
return bits
def BitDecRing(a, k, m):
bits = BitDecRingRaw(a, k, m)
# reversing to reduce number of rounds
return [types.sintbit.conv(bit) for bit in reversed(bits)][::-1]
def BitDecFieldRaw(a, k, m, kappa, bits_to_compute=None):
instructions_base.set_global_vector_size(a.size)
r_dprime = types.sint()
r_prime = types.sint()
c = types.cint()
r = [types.sint() for i in range(m)]
comparison.PRandM(r_dprime, r_prime, r, k, m, kappa)
pow2 = two_power(k + kappa)
asm_open(True, c, pow2 + two_power(k) + a - two_power(m)*r_dprime - r_prime)
res = r[0].bit_adder(r, list(r[0].bit_decompose_clear(c,m)))
instructions_base.reset_global_vector_size()
return res
def BitDecField(a, k, m, kappa, bits_to_compute=None):
res = BitDecFieldRaw(a, k, m, kappa, bits_to_compute)
return [types.sintbit.conv(bit) for bit in res]
@instructions_base.ret_cisc
def Pow2(a, l, kappa):
comparison.program.curr_tape.require_bit_length(l - 1)
m = int(ceil(log(l, 2)))
t = BitDec(a, m, m, kappa)
return Pow2_from_bits(t)
def Pow2_from_bits(bits):
m = len(bits)
t = list(bits)
pow2k = [None for i in range(m)]
for i in range(m):
pow2k[i] = two_power(2**i)
t[i] = t[i]*pow2k[i] + 1 - t[i]
return KMul(t)
def B2U(a, l, kappa):
pow2a = Pow2(a, l, kappa)
return B2U_from_Pow2(pow2a, l, kappa), pow2a
def B2U_from_Pow2(pow2a, l, kappa):
r = [types.sint() for i in range(l)]
t = types.sint()
c = types.cint()
if program.Program.prog.use_dabit:
r, r_bits = zip(*(types.sint.get_dabit() for i in range(l)))
else:
for i in range(l):
bit(r[i])
r_bits = r
if program.Program.prog.options.ring:
n_shift = int(program.Program.prog.options.ring) - l
assert n_shift > 0
c = ((pow2a + types.sint.bit_compose(r)) << n_shift).reveal(False) >> n_shift
else:
comparison.PRandInt(t, kappa)
asm_open(True, c, pow2a + two_power(l) * t +
sum(two_power(i) * r[i] for i in range(l)))
comparison.program.curr_tape.require_bit_length(l + kappa)
c = list(r_bits[0].bit_decompose_clear(c, l))
x = [r_bits[i].bit_xor(c[i]) for i in range(l)]
#print ' '.join(str(b.value) for b in x)
y = PreOR(x, kappa)
#print ' '.join(str(b.value) for b in y)
return [types.sint.conv(1 - y[i]) for i in range(l)]
def Trunc(a, l, m, kappa=None, compute_modulo=False, signed=False):
""" Oblivious truncation by secret m """
prog = program.Program.prog
kappa = kappa or prog.security
if util.is_constant(m) and not compute_modulo:
# cheaper
res = type(a)(size=a.size)
comparison.Trunc(res, a, l, m, kappa, signed=signed)
return res
if l == 1:
if compute_modulo:
return a * m, 1 + m
else:
return a * (1 - m)
if program.Program.prog.options.ring and not compute_modulo:
return TruncInRing(a, l, Pow2(m, l, kappa))
r = [types.sint() for i in range(l)]
r_dprime = types.sint(0)
r_prime = types.sint(0)
rk = types.sint()
c = types.cint()
ci = [types.cint() for i in range(l)]
d = types.sint()
x, pow2m = B2U(m, l, kappa)
for i in range(l):
bit(r[i])
t1 = two_power(i) * r[i]
t2 = t1*x[i]
r_prime += t2
r_dprime += t1 - t2
if program.Program.prog.options.ring:
n_shift = int(program.Program.prog.options.ring) - l
c = ((a + r_dprime + r_prime) << n_shift).reveal(False) >> n_shift
else:
comparison.PRandInt(rk, kappa)
r_dprime += two_power(l) * rk
asm_open(True, c, a + r_dprime + r_prime)
for i in range(1,l):
ci[i] = c % two_power(i)
c_dprime = sum(ci[i]*(x[i-1] - x[i]) for i in range(1,l))
d = program.Program.prog.non_linear.ltz(c_dprime - r_prime, l, kappa)
if compute_modulo:
b = c_dprime - r_prime + pow2m * d
return b, pow2m
else:
to_shift = a - c_dprime + r_prime
if program.Program.prog.options.ring:
shifted = TruncInRing(to_shift, l, pow2m)
else:
pow2inv = Inv(pow2m)
shifted = to_shift * pow2inv
b = shifted - d
return b
def TruncInRing(to_shift, l, pow2m):
n_shift = int(program.Program.prog.options.ring) - l
bits = BitDecRing(to_shift, l, l)
rev = types.sint.bit_compose(reversed(bits))
rev <<= n_shift
rev *= pow2m
r_bits = [types.sint.get_random_bit() for i in range(l)]
r = types.sint.bit_compose(r_bits)
shifted = (rev - (r << n_shift)).reveal(False)
masked = shifted >> n_shift
bits = types.intbitint.bit_adder(r_bits, masked.bit_decompose(l))
return types.sint.bit_compose(reversed(bits))
def SplitInRing(a, l, m):
if l == 1:
return m.if_else(a, 0), m.if_else(0, a), 1
pow2m = Pow2(m, l, None)
upper = TruncInRing(a, l, pow2m)
lower = a - upper * pow2m
return lower, upper, pow2m
def TruncRoundNearestAdjustOverflow(a, length, target_length, kappa):
t = comparison.TruncRoundNearest(a, length, length - target_length, kappa)
overflow = t.greater_equal(two_power(target_length), target_length + 1, kappa)
if program.Program.prog.options.ring:
s = (1 - overflow) * t + \
comparison.TruncLeakyInRing(overflow * t, length, 1, False)
else:
s = (1 - overflow) * t + overflow * t / 2
return s, overflow
def Int2FL(a, gamma, l, kappa=None):
lam = gamma - 1
s = a.less_than(0, gamma, security=kappa)
z = a.equal(0, gamma, security=kappa)
a = s.if_else(-a, a)
a_bits = a.bit_decompose(lam, security=kappa)
a_bits.reverse()
b = PreOR(a_bits, kappa)
t = a * (1 + a.bit_compose(1 - b_i for b_i in b))
p = a.popcnt_bits(b) - lam
if gamma - 1 > l:
if types.sfloat.round_nearest:
v, overflow = TruncRoundNearestAdjustOverflow(t, gamma - 1, l, kappa)
p = p + overflow
else:
v = t.right_shift(gamma - l - 1, gamma - 1, kappa, signed=False)
else:
v = 2**(l-gamma+1) * t
p = (p + gamma - 1 - l) * z.bit_not()
return v, p, z, s
def FLRound(x, mode):
""" Rounding with floating point output.
*mode*: 0 -> floor, 1 -> ceil, -1 > trunc """
v1, p1, z1, s1, l, k = x.v, x.p, x.z, x.s, x.vlen, x.plen
a = types.sint()
comparison.LTZ(a, p1, k, x.kappa)
b = p1.less_than(-l + 1, k, x.kappa)
v2, inv_2pow_p1 = Trunc(v1, l, -a * (1 - b) * x.p, x.kappa, True)
c = EQZ(v2, l, x.kappa)
if mode == -1:
away_from_zero = 0
mode = x.s
else:
away_from_zero = mode + s1 - 2 * mode * s1
v = v1 - v2 + (1 - c) * inv_2pow_p1 * away_from_zero
d = v.equal(two_power(l), l + 1, x.kappa)
v = d * two_power(l-1) + (1 - d) * v
v = a * ((1 - b) * v + b * away_from_zero * two_power(l-1)) + (1 - a) * v1
s = (1 - b * mode) * s1
z = or_op(EQZ(v, l, x.kappa), z1)
v = v * (1 - z)
p = ((p1 + d * a) * (1 - b) + b * away_from_zero * (1 - l)) * (1 - z)
return v, p, z, s
@instructions_base.ret_cisc
def TruncPr(a, k, m, kappa=None, signed=True):
""" Probabilistic truncation [a/2^m + u]
where Pr[u = 1] = (a % 2^m) / 2^m
"""
nl = program.Program.prog.non_linear
nl.check_security(kappa)
return nl.trunc_pr(a, k, m, signed)
def TruncPrRing(a, k, m, signed=True):
if m == 0:
return a
n_ring = int(program.Program.prog.options.ring)
comparison.require_ring_size(k, 'truncation')
if k == n_ring:
program.Program.prog.curr_tape.require_bit_length(1)
if program.Program.prog.use_edabit():
a += types.sint.get_edabit(m, True)[0]
else:
for i in range(m):
a += types.sint.get_random_bit() << i
return comparison.TruncLeakyInRing(a, k, m, signed=signed)
else:
from .types import sint
prog = program.Program.prog
if signed and prog.use_trunc_pr != -1:
a += (1 << (k - 1))
if program.Program.prog.use_trunc_pr:
res = sint()
trunc_pr(res, a, k, m)
else:
# extra bit to mask overflow
prog = program.Program.prog
prog.curr_tape.require_bit_length(1)
if prog.use_edabit() or prog.use_split() > 2:
lower = sint.get_random_int(m)
upper = sint.get_random_int(k - m)
msb = sint.get_random_bit()
r = (msb << k) + (upper << m) + lower
else:
r_bits = [sint.get_random_bit() for i in range(k + 1)]
r = sint.bit_compose(r_bits)
upper = sint.bit_compose(r_bits[m:k])
msb = r_bits[-1]
n_shift = n_ring - (k + 1)
tmp = a + r
masked = (tmp << n_shift).reveal(False)
shifted = (masked << 1 >> (n_shift + m + 1))
overflow = msb.bit_xor(masked >> (n_ring - 1))
res = shifted - upper + \
(overflow << (k - m))
if signed and prog.use_trunc_pr != -1:
res -= (1 << (k - m - 1))
return res
def TruncPrField(a, k, m, kappa=None):
if m == 0:
return a
if kappa is None:
kappa = 40
b = two_power(k-1) + a
r_prime, r_dprime = types.sint(), types.sint()
comparison.PRandM(r_dprime, r_prime, [types.sint() for i in range(m)],
k, m, kappa, use_dabit=False)
two_to_m = two_power(m)
r = two_to_m * r_dprime + r_prime
c = (b + r).reveal(False)
c_prime = c % two_to_m
a_prime = c_prime - r_prime
d = (a - a_prime) / two_to_m
return d
@instructions_base.ret_cisc
def SDiv(a, b, l, kappa, round_nearest=False):
theta = int(ceil(log(l / 3.5) / log(2)))
alpha = two_power(2*l)
w = types.cint(int(2.9142 * 2 ** l)) - 2 * b
x = alpha - b * w
y = a * w
y = y.round(2 * l + 1, l, kappa, round_nearest, signed=False)
x2 = types.sint()
comparison.Mod2m(x2, x, 2 * l + 1, l, kappa, True)
x1 = comparison.TruncZeros(x - x2, 2 * l + 1, l, True)
for i in range(theta-1):
y = y * (x1 + two_power(l)) + (y * x2).round(2 * l, l, kappa,
round_nearest,
signed=False)
y = y.round(2 * l + 1, l, kappa, round_nearest, signed=False)
x = x1 * x2 + (x2**2).round(2 * l + 1, l + 1, kappa, round_nearest,
signed=False)
x = x1 * x1 + x.round(2 * l + 1, l - 1, kappa, round_nearest,
signed=False)
x2 = types.sint()
comparison.Mod2m(x2, x, 2 * l, l, kappa, False)
x1 = comparison.TruncZeros(x - x2, 2 * l + 1, l, True)
y = y * (x1 + two_power(l)) + (y * x2).round(2 * l, l, kappa,
round_nearest, signed=False)
y = y.round(2 * l + 1, l + 1, kappa, round_nearest)
return y
def SDiv_mono(a, b, l, kappa):
theta = int(ceil(log(l / 3.5) / log(2)))
alpha = two_power(2*l)
w = types.cint(int(2.9142 * two_power(l))) - 2 * b
x = alpha - b * w
y = a * w
y = TruncPr(y, 2 * l + 1, l + 1, kappa)
for i in range(theta-1):
y = y * (alpha + x)
# keep y with l bits
y = TruncPr(y, 3 * l, 2 * l, kappa)
x = x**2
# keep x with 2l bits
x = TruncPr(x, 4 * l, 2 * l, kappa)
y = y * (alpha + x)
y = TruncPr(y, 3 * l, 2 * l, kappa)
return y
# LT bit comparison on shared bit values
# Assumes b has the larger size
# - From the paper
# Unconditionally Secure Constant-Rounds Multi-party Computation
# for Equality, Comparison, Bits and Exponentiation
def BITLT(a, b, bit_length):
from .types import sint, regint, longint, cint
e = [None]*bit_length
g = [None]*bit_length
h = [None]*bit_length
for i in range(bit_length):
# Compute the XOR (reverse order of e for PreOpL)
e[bit_length-i-1] = util.bit_xor(a[i], b[i])
f = PreOpL(or_op, e)
g[bit_length-1] = f[0]
for i in range(bit_length-1):
# reverse order of f due to PreOpL
g[i] = f[bit_length-i-1]-f[bit_length-i-2]
ans = 0
for i in range(bit_length):
h[i] = g[i].bit_and(b[i])
ans = ans + h[i]
return ans
# Exact BitDec with no need for a statistical gap
# - From the paper
# Multiparty Computation for Interval, Equality, and Comparison without
# Bit-Decomposition Protocol
def BitDecFull(a, n_bits=None, maybe_mixed=False):
from .library import get_program, do_while, if_, break_point
from .types import sint, regint, longint, cint
p = get_program().prime
assert p
bit_length = p.bit_length()
n_bits = n_bits or bit_length
assert n_bits <= bit_length
logp = int(round(math.log(p, 2)))
if get_program().rabbit_gap():
# inspired by Rabbit (https://eprint.iacr.org/2021/119)
# no need for exact randomness generation
# if modulo a power of two is close enough
if get_program().use_edabit():
b, bbits = sint.get_edabit(logp, True, size=a.size)
if logp != bit_length:
from .GC.types import sbits
bbits += [0]
else:
bbits = [sint.get_random_bit(size=a.size) for i in range(logp)]
b = sint.bit_compose(bbits)
if logp != bit_length:
bbits += [sint(0, size=a.size)]
else:
bbits = [sint(size=a.size) for i in range(bit_length)]
tbits = [[sint(size=1) for i in range(bit_length)] for j in range(a.size)]
pbits = util.bit_decompose(p)
# Loop until we get some random integers less than p
done = [regint(0) for i in range(a.size)]
@do_while
def get_bits_loop():
for j in range(a.size):
@if_(done[j] == 0)
def _():
for i in range(bit_length):
tbits[j][i].link(sint.get_random_bit())
c = regint(BITLT(tbits[j], pbits, bit_length).reveal(False))
done[j].link(c)
return (sum(done) != a.size)
for j in range(a.size):
for i in range(bit_length):
movs(bbits[i][j], tbits[j][i])
b = sint.bit_compose(bbits)
c = (a-b).reveal(False)
cmodp = c
t = bbits[0].bit_decompose_clear(p - c, bit_length)
c = longint(c, bit_length)
czero = (c==0)
q = bbits[0].long_one() - comparison.BitLTL_raw(bbits, t)
fbar = [bbits[0].clear_type.conv(cint(x))
for x in ((1<<bit_length)+c-p).bit_decompose(n_bits)]
fbard = bbits[0].bit_decompose_clear(cmodp, n_bits)
g = [q.if_else(fbar[i], fbard[i]) for i in range(n_bits)]
h = bbits[0].bit_adder(bbits, g)
abits = [bbits[0].clear_type(cint(czero)).if_else(bbits[i], h[i])
for i in range(n_bits)]
if maybe_mixed:
return abits
else:
return [sint.conv(bit) for bit in abits]
|
cc78939f353dee48f2355561a3f8b4eb799fd833
|
40e5847e3d0302af01af9f5595f6cc12e7ca08c4
|
/main_test.py
|
caacea0256ff88c1f53d760cc86ae60f99c3b50a
|
[] |
no_license
|
le-liang/MARLspectrumSharingV2X
|
4c52b43d0b5016c8107dd98cf777534226f1274b
|
5626e1ad90352e8fb5a520d3d7c558612a4cfe42
|
refs/heads/master
| 2023-03-05T21:39:25.278890
| 2023-02-23T12:22:18
| 2023-02-23T12:22:18
| 174,652,256
| 185
| 87
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 23,159
|
py
|
main_test.py
|
from __future__ import division, print_function
import random
import scipy
import scipy.io
import numpy as np
import tensorflow as tf
import Environment_marl_test
import os
from replay_memory import ReplayMemory
import sys
my_config = tf.ConfigProto()
my_config.gpu_options.allow_growth=True
class Agent(object):
def __init__(self, memory_entry_size):
self.discount = 1
self.double_q = True
self.memory_entry_size = memory_entry_size
self.memory = ReplayMemory(self.memory_entry_size)
# ################## SETTINGS ######################
up_lanes = [i/2.0 for i in [3.5/2,3.5/2 + 3.5,250+3.5/2, 250+3.5+3.5/2, 500+3.5/2, 500+3.5+3.5/2]]
down_lanes = [i/2.0 for i in [250-3.5-3.5/2,250-3.5/2,500-3.5-3.5/2,500-3.5/2,750-3.5-3.5/2,750-3.5/2]]
left_lanes = [i/2.0 for i in [3.5/2,3.5/2 + 3.5,433+3.5/2, 433+3.5+3.5/2, 866+3.5/2, 866+3.5+3.5/2]]
right_lanes = [i/2.0 for i in [433-3.5-3.5/2,433-3.5/2,866-3.5-3.5/2,866-3.5/2,1299-3.5-3.5/2,1299-3.5/2]]
width = 750/2
height = 1298/2
# This main file is for testing only
IS_TRAIN = 0 # hard-coded to 0
IS_TEST = 1-IS_TRAIN
label = 'marl_model'
label_sarl = 'sarl_model'
n_veh = 4
n_neighbor = 1
n_RB = n_veh
env = Environment_marl_test.Environ(down_lanes, up_lanes, left_lanes, right_lanes, width, height, n_veh, n_neighbor)
env.new_random_game() # initialize parameters in env
n_episode = 3000
n_step_per_episode = int(env.time_slow/env.time_fast)
epsi_final = 0.02
epsi_anneal_length = int(0.8*n_episode)
mini_batch_step = n_step_per_episode
target_update_step = n_step_per_episode*4
n_episode_test = 100 # test episodes
######################################################
def get_state(env, idx=(0,0), ind_episode=1., epsi=0.02):
""" Get state from the environment """
# V2I_channel = (env.V2I_channels_with_fastfading[idx[0], :] - 80) / 60
V2I_fast = (env.V2I_channels_with_fastfading[idx[0], :] - env.V2I_channels_abs[idx[0]] + 10)/35
# V2V_channel = (env.V2V_channels_with_fastfading[:, env.vehicles[idx[0]].destinations[idx[1]], :] - 80) / 60
V2V_fast = (env.V2V_channels_with_fastfading[:, env.vehicles[idx[0]].destinations[idx[1]], :] - env.V2V_channels_abs[:, env.vehicles[idx[0]].destinations[idx[1]]] + 10)/35
V2V_interference = (-env.V2V_Interference_all[idx[0], idx[1], :] - 60) / 60
V2I_abs = (env.V2I_channels_abs[idx[0]] - 80) / 60.0
V2V_abs = (env.V2V_channels_abs[:, env.vehicles[idx[0]].destinations[idx[1]]] - 80)/60.0
load_remaining = np.asarray([env.demand[idx[0], idx[1]] / env.demand_size])
time_remaining = np.asarray([env.individual_time_limit[idx[0], idx[1]] / env.time_slow])
# return np.concatenate((np.reshape(V2V_channel, -1), V2V_interference, V2I_abs, V2V_abs, time_remaining, load_remaining, np.asarray([ind_episode, epsi])))
return np.concatenate((V2I_fast, np.reshape(V2V_fast, -1), V2V_interference, np.asarray([V2I_abs]), V2V_abs, time_remaining, load_remaining, np.asarray([ind_episode, epsi])))
def get_state_sarl(env, idx=(0,0), ind_episode=1., epsi=0.02):
""" Get state from the environment """
# V2I_channel = (env.V2I_channels_with_fastfading[idx[0], :] - 80) / 60
V2I_fast = (env.V2I_channels_with_fastfading[idx[0], :] - env.V2I_channels_abs[idx[0]] + 10)/35
# V2V_channel = (env.V2V_channels_with_fastfading[:, env.vehicles[idx[0]].destinations[idx[1]], :] - 80) / 60
V2V_fast = (env.V2V_channels_with_fastfading[:, env.vehicles[idx[0]].destinations[idx[1]], :] - env.V2V_channels_abs[:, env.vehicles[idx[0]].destinations[idx[1]]] + 10)/35
V2V_interference = (-env.V2V_Interference_all_sarl[idx[0], idx[1], :] - 60) / 60
V2I_abs = (env.V2I_channels_abs[idx[0]] - 80) / 60.0
V2V_abs = (env.V2V_channels_abs[:, env.vehicles[idx[0]].destinations[idx[1]]] - 80)/60.0
load_remaining = np.asarray([env.demand_sarl[idx[0], idx[1]] / env.demand_size])
time_remaining = np.asarray([env.individual_time_limit_sarl[idx[0], idx[1]] / env.time_slow])
# return np.concatenate((np.reshape(V2V_channel, -1), V2V_interference, V2I_abs, V2V_abs, time_remaining, load_remaining, np.asarray([ind_episode, epsi])))
return np.concatenate((V2I_fast, np.reshape(V2V_fast, -1), V2V_interference, np.asarray([V2I_abs]), V2V_abs, time_remaining, load_remaining, np.asarray([ind_episode, epsi])))
# -----------------------------------------------------------
n_hidden_1 = 500
n_hidden_2 = 250
n_hidden_3 = 120
n_input = len(get_state(env=env))
n_output = n_RB * len(env.V2V_power_dB_List)
g = tf.Graph()
with g.as_default():
# ============== Training network ========================
x = tf.placeholder(tf.float32, [None, n_input])
w_1 = tf.Variable(tf.truncated_normal([n_input, n_hidden_1], stddev=0.1))
w_2 = tf.Variable(tf.truncated_normal([n_hidden_1, n_hidden_2], stddev=0.1))
w_3 = tf.Variable(tf.truncated_normal([n_hidden_2, n_hidden_3], stddev=0.1))
w_4 = tf.Variable(tf.truncated_normal([n_hidden_3, n_output], stddev=0.1))
b_1 = tf.Variable(tf.truncated_normal([n_hidden_1], stddev=0.1))
b_2 = tf.Variable(tf.truncated_normal([n_hidden_2], stddev=0.1))
b_3 = tf.Variable(tf.truncated_normal([n_hidden_3], stddev=0.1))
b_4 = tf.Variable(tf.truncated_normal([n_output], stddev=0.1))
layer_1 = tf.nn.relu(tf.add(tf.matmul(x, w_1), b_1))
layer_1_b = tf.layers.batch_normalization(layer_1)
layer_2 = tf.nn.relu(tf.add(tf.matmul(layer_1_b, w_2), b_2))
layer_2_b = tf.layers.batch_normalization(layer_2)
layer_3 = tf.nn.relu(tf.add(tf.matmul(layer_2_b, w_3), b_3))
layer_3_b = tf.layers.batch_normalization(layer_3)
y = tf.nn.relu(tf.add(tf.matmul(layer_3, w_4), b_4))
g_q_action = tf.argmax(y, axis=1)
# compute loss
g_target_q_t = tf.placeholder(tf.float32, None, name="target_value")
g_action = tf.placeholder(tf.int32, None, name='g_action')
action_one_hot = tf.one_hot(g_action, n_output, 1.0, 0.0, name='action_one_hot')
q_acted = tf.reduce_sum(y * action_one_hot, reduction_indices=1, name='q_acted')
g_loss = tf.reduce_mean(tf.square(g_target_q_t - q_acted), name='g_loss')
optim = tf.train.RMSPropOptimizer(learning_rate=0.001, momentum=0.95, epsilon=0.01).minimize(g_loss)
# ==================== Prediction network ========================
x_p = tf.placeholder(tf.float32, [None, n_input])
w_1_p = tf.Variable(tf.truncated_normal([n_input, n_hidden_1], stddev=0.1))
w_2_p = tf.Variable(tf.truncated_normal([n_hidden_1, n_hidden_2], stddev=0.1))
w_3_p = tf.Variable(tf.truncated_normal([n_hidden_2, n_hidden_3], stddev=0.1))
w_4_p = tf.Variable(tf.truncated_normal([n_hidden_3, n_output], stddev=0.1))
b_1_p = tf.Variable(tf.truncated_normal([n_hidden_1], stddev=0.1))
b_2_p = tf.Variable(tf.truncated_normal([n_hidden_2], stddev=0.1))
b_3_p = tf.Variable(tf.truncated_normal([n_hidden_3], stddev=0.1))
b_4_p = tf.Variable(tf.truncated_normal([n_output], stddev=0.1))
layer_1_p = tf.nn.relu(tf.add(tf.matmul(x_p, w_1_p), b_1_p))
layer_1_p_b = tf.layers.batch_normalization(layer_1_p)
layer_2_p = tf.nn.relu(tf.add(tf.matmul(layer_1_p_b, w_2_p), b_2_p))
layer_2_p_b = tf.layers.batch_normalization(layer_2_p)
layer_3_p = tf.nn.relu(tf.add(tf.matmul(layer_2_p_b, w_3_p), b_3_p))
layer_3_p_b = tf.layers.batch_normalization(layer_3_p)
y_p = tf.nn.relu(tf.add(tf.matmul(layer_3_p_b, w_4_p), b_4_p))
g_target_q_idx = tf.placeholder('int32', [None, None], 'output_idx')
target_q_with_idx = tf.gather_nd(y_p, g_target_q_idx)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
def predict(sess, s_t, ep, test_ep = False):
n_power_levels = len(env.V2V_power_dB_List)
if np.random.rand() < ep and not test_ep:
pred_action = np.random.randint(n_RB*n_power_levels)
else:
pred_action = sess.run(g_q_action, feed_dict={x: [s_t]})[0]
return pred_action
def predict_sarl(sess, s_t):
pred_action = sess.run(g_q_action, feed_dict={x: [s_t]})[0]
return pred_action
def q_learning_mini_batch(current_agent, current_sess):
""" Training a sampled mini-batch """
batch_s_t, batch_s_t_plus_1, batch_action, batch_reward = current_agent.memory.sample()
if current_agent.double_q: # double q-learning
pred_action = current_sess.run(g_q_action, feed_dict={x: batch_s_t_plus_1})
q_t_plus_1 = current_sess.run(target_q_with_idx, {x_p: batch_s_t_plus_1, g_target_q_idx: [[idx, pred_a] for idx, pred_a in enumerate(pred_action)]})
batch_target_q_t = current_agent.discount * q_t_plus_1 + batch_reward
else:
q_t_plus_1 = current_sess.run(y_p, {x_p: batch_s_t_plus_1})
max_q_t_plus_1 = np.max(q_t_plus_1, axis=1)
batch_target_q_t = current_agent.discount * max_q_t_plus_1 + batch_reward
_, loss_val = current_sess.run([optim, g_loss], {g_target_q_t: batch_target_q_t, g_action: batch_action, x: batch_s_t})
return loss_val
def update_target_q_network(sess):
""" Update target q network once in a while """
sess.run(w_1_p.assign(sess.run(w_1)))
sess.run(w_2_p.assign(sess.run(w_2)))
sess.run(w_3_p.assign(sess.run(w_3)))
sess.run(w_4_p.assign(sess.run(w_4)))
sess.run(b_1_p.assign(sess.run(b_1)))
sess.run(b_2_p.assign(sess.run(b_2)))
sess.run(b_3_p.assign(sess.run(b_3)))
sess.run(b_4_p.assign(sess.run(b_4)))
def save_models(sess, model_path):
""" Save models to the current directory with the name filename """
current_dir = os.path.dirname(os.path.realpath(__file__))
model_path = os.path.join(current_dir, "model/" + model_path)
if not os.path.exists(os.path.dirname(model_path)):
os.makedirs(os.path.dirname(model_path))
saver.save(sess, model_path, write_meta_graph=False)
def load_models(sess, model_path):
""" Restore models from the current directory with the name filename """
dir_ = os.path.dirname(os.path.realpath(__file__))
model_path = os.path.join(dir_, "model/" + model_path)
saver.restore(sess, model_path)
def print_weight(sess, target=False):
""" debug """
if not target:
print(sess.run(w_1[0, 0:4]))
else:
print(sess.run(w_1_p[0, 0:4]))
# --------------------------------------------------------------
agents = []
sesses = []
for ind_agent in range(n_veh * n_neighbor): # initialize agents
print("Initializing agent", ind_agent)
agent = Agent(memory_entry_size=len(get_state(env)))
agents.append(agent)
sess = tf.Session(graph=g,config=my_config)
sess.run(init)
sesses.append(sess)
agent_sarl = Agent(memory_entry_size=len(get_state(env)))
sess_sarl = tf.Session(graph=g,config=my_config)
sess_sarl.run(init)
# -------------- Testing --------------
if IS_TEST:
print("\nRestoring the model...")
for i in range(n_veh):
for j in range(n_neighbor):
model_path = label + '/agent_' + str(i * n_neighbor + j)
load_models(sesses[i * n_neighbor + j], model_path)
# restore the single-agent model
model_path_single = label_sarl + '/agent'
load_models(sess_sarl, model_path_single)
V2I_rate_list = []
V2V_success_list = []
V2I_rate_list_rand = []
V2V_success_list_rand = []
V2I_rate_list_sarl = []
V2V_success_list_sarl = []
V2I_rate_list_dpra = []
V2V_success_list_dpra = []
rate_marl = np.zeros([n_episode_test, n_step_per_episode, n_veh, n_neighbor])
rate_rand = np.zeros([n_episode_test, n_step_per_episode, n_veh, n_neighbor])
demand_marl = env.demand_size * np.ones([n_episode_test, n_step_per_episode+1, n_veh, n_neighbor])
demand_rand = env.demand_size * np.ones([n_episode_test, n_step_per_episode+1, n_veh, n_neighbor])
action_all_testing_sarl = np.zeros([n_veh, n_neighbor, 2], dtype='int32')
action_all_testing_dpra = np.zeros([n_veh, n_neighbor, 2], dtype='int32')
for idx_episode in range(n_episode_test):
print('----- Episode', idx_episode, '-----')
env.renew_positions()
env.renew_neighbor()
env.renew_channel()
env.renew_channels_fastfading()
env.demand = env.demand_size * np.ones((env.n_Veh, env.n_neighbor))
env.individual_time_limit = env.time_slow * np.ones((env.n_Veh, env.n_neighbor))
env.active_links = np.ones((env.n_Veh, env.n_neighbor), dtype='bool')
env.demand_rand = env.demand_size * np.ones((env.n_Veh, env.n_neighbor))
env.individual_time_limit_rand = env.time_slow * np.ones((env.n_Veh, env.n_neighbor))
env.active_links_rand = np.ones((env.n_Veh, env.n_neighbor), dtype='bool')
env.demand_sarl = env.demand_size * np.ones((env.n_Veh, env.n_neighbor))
env.individual_time_limit_sarl = env.time_slow * np.ones((env.n_Veh, env.n_neighbor))
env.active_links_sarl = np.ones((env.n_Veh, env.n_neighbor), dtype='bool')
env.demand_dpra = env.demand_size * np.ones((env.n_Veh, env.n_neighbor))
env.individual_time_limit_dpra = env.time_slow * np.ones((env.n_Veh, env.n_neighbor))
env.active_links_dpra = np.ones((env.n_Veh, env.n_neighbor), dtype='bool')
V2I_rate_per_episode = []
V2I_rate_per_episode_rand = []
V2I_rate_per_episode_sarl = []
V2I_rate_per_episode_dpra = []
for test_step in range(n_step_per_episode):
# trained models
action_all_testing = np.zeros([n_veh, n_neighbor, 2], dtype='int32')
for i in range(n_veh):
for j in range(n_neighbor):
state_old = get_state(env, [i, j], 1, epsi_final)
action = predict(sesses[i*n_neighbor+j], state_old, epsi_final, True)
action_all_testing[i, j, 0] = action % n_RB # chosen RB
action_all_testing[i, j, 1] = int(np.floor(action / n_RB)) # power level
action_temp = action_all_testing.copy()
V2I_rate, V2V_success, V2V_rate = env.act_for_testing(action_temp)
V2I_rate_per_episode.append(np.sum(V2I_rate)) # sum V2I rate in bps
rate_marl[idx_episode, test_step,:,:] = V2V_rate
demand_marl[idx_episode, test_step+1,:,:] = env.demand
# random baseline
action_rand = np.zeros([n_veh, n_neighbor, 2], dtype='int32')
action_rand[:, :, 0] = np.random.randint(0, n_RB, [n_veh, n_neighbor]) # band
action_rand[:, :, 1] = np.random.randint(0, len(env.V2V_power_dB_List), [n_veh, n_neighbor]) # power
V2I_rate_rand, V2V_success_rand, V2V_rate_rand = env.act_for_testing_rand(action_rand)
V2I_rate_per_episode_rand.append(np.sum(V2I_rate_rand)) # sum V2I rate in bps
rate_rand[idx_episode, test_step, :, :] = V2V_rate_rand
demand_rand[idx_episode, test_step+1,:,:] = env.demand_rand
# SARL
remainder = test_step % (n_veh * n_neighbor)
i = int(np.floor(remainder/n_neighbor))
j = remainder % n_neighbor
state_sarl = get_state_sarl(env, [i, j], 1, epsi_final)
action = predict_sarl(sess_sarl, state_sarl)
action_all_testing_sarl[i, j, 0] = action % n_RB # chosen RB
action_all_testing_sarl[i, j, 1] = int(np.floor(action / n_RB)) # power level
action_temp_sarl = action_all_testing_sarl.copy()
V2I_rate_sarl, V2V_success_sarl, V2V_rate_sarl = env.act_for_testing_sarl(action_temp_sarl)
V2I_rate_per_episode_sarl.append(np.sum(V2I_rate_sarl)) # sum V2I rate in bps
# # Used as V2I upper bound only, no V2V transmission
# action_all_testing_dpra[i, j, 0] = 0 # chosen RB
# action_all_testing_dpra[i, j, 1] = 3 # power level, fixed to -100 dBm, no V2V transmission
#
# action_temp_dpra = action_all_testing_dpra.copy()
# V2I_rate_dpra, V2V_success_dpra, V2V_rate_dpra = env.act_for_testing_dpra(action_temp_dpra)
# V2I_rate_per_episode_dpra.append(np.sum(V2I_rate_dpra)) # sum V2I rate in bps
# # V2V Upper bound only, centralized maxV2V
# The following applies to n_veh = 4 and n_neighbor = 1 only
action_dpra = np.zeros([n_veh, n_neighbor, 2], dtype='int32')
# n_power_level = len(env.V2V_power_dB_List)
n_power_level = 1
store_action = np.zeros([(n_RB*n_power_level)**4, 4])
rate_all_dpra = []
t = 0
# for i in range(n_RB*len(env.V2V_power_dB_List)):\
for i in range(n_RB):
for j in range(n_RB):
for m in range(n_RB):
for n in range(n_RB):
action_dpra[0, 0, 0] = i % n_RB
action_dpra[0, 0, 1] = int(np.floor(i / n_RB)) # power level
action_dpra[1, 0, 0] = j % n_RB
action_dpra[1, 0, 1] = int(np.floor(j / n_RB)) # power level
action_dpra[2, 0, 0] = m % n_RB
action_dpra[2, 0, 1] = int(np.floor(m / n_RB)) # power level
action_dpra[3, 0, 0] = n % n_RB
action_dpra[3, 0, 1] = int(np.floor(n / n_RB)) # power level
action_temp_findMax = action_dpra.copy()
V2I_rate_findMax, V2V_rate_findMax = env.Compute_Rate(action_temp_findMax)
rate_all_dpra.append(np.sum(V2V_rate_findMax))
store_action[t, :] = [i,j,m,n]
t += 1
i = store_action[np.argmax(rate_all_dpra), 0]
j = store_action[np.argmax(rate_all_dpra), 1]
m = store_action[np.argmax(rate_all_dpra), 2]
n = store_action[np.argmax(rate_all_dpra), 3]
action_testing_dpra = np.zeros([n_veh, n_neighbor, 2], dtype='int32')
action_testing_dpra[0, 0, 0] = i % n_RB
action_testing_dpra[0, 0, 1] = int(np.floor(i / n_RB)) # power level
action_testing_dpra[1, 0, 0] = j % n_RB
action_testing_dpra[1, 0, 1] = int(np.floor(j / n_RB)) # power level
action_testing_dpra[2, 0, 0] = m % n_RB
action_testing_dpra[2, 0, 1] = int(np.floor(m / n_RB)) # power level
action_testing_dpra[3, 0, 0] = n % n_RB
action_testing_dpra[3, 0, 1] = int(np.floor(n / n_RB)) # power level
V2I_rate_findMax, V2V_rate_findMax = env.Compute_Rate(action_testing_dpra)
check_sum = np.sum(V2V_rate_findMax)
action_temp_dpra = action_testing_dpra.copy()
V2I_rate_dpra, V2V_success_dpra, V2V_rate_dpra = env.act_for_testing_dpra(action_temp_dpra)
V2I_rate_per_episode_dpra.append(np.sum(V2I_rate_dpra)) # sum V2I rate in bps
# update the environment and compute interference
env.renew_channels_fastfading()
env.Compute_Interference(action_temp)
env.Compute_Interference_sarl(action_temp_sarl)
env.Compute_Interference_dpra(action_temp_dpra)
if test_step == n_step_per_episode - 1:
V2V_success_list.append(V2V_success)
V2V_success_list_rand.append(V2V_success_rand)
V2V_success_list_sarl.append(V2V_success_sarl)
V2V_success_list_dpra.append(V2V_success_dpra)
V2I_rate_list.append(np.mean(V2I_rate_per_episode))
V2I_rate_list_rand.append(np.mean(V2I_rate_per_episode_rand))
V2I_rate_list_sarl.append(np.mean(V2I_rate_per_episode_sarl))
V2I_rate_list_dpra.append(np.mean(V2I_rate_per_episode_dpra))
print('marl', round(np.average(V2I_rate_per_episode), 2), 'sarl', round(np.average(V2I_rate_per_episode_sarl), 2), 'rand', round(np.average(V2I_rate_per_episode_rand), 2), 'dpra', round(np.average(V2I_rate_per_episode_dpra), 2))
print('marl', V2V_success_list[idx_episode], 'sarl', V2V_success_list_sarl[idx_episode], 'rand', V2V_success_list_rand[idx_episode], 'dpra', V2V_success_list_dpra[idx_episode])
print('-------- marl -------------')
print('n_veh:', n_veh, ', n_neighbor:', n_neighbor)
print('Sum V2I rate:', round(np.average(V2I_rate_list), 2), 'Mbps')
print('Pr(V2V success):', round(np.average(V2V_success_list), 4))
#
print('-------- sarl -------------')
print('n_veh:', n_veh, ', n_neighbor:', n_neighbor)
print('Sum V2I rate:', round(np.average(V2I_rate_list_sarl), 2), 'Mbps')
print('Pr(V2V success):', round(np.average(V2V_success_list_sarl), 4))
print('-------- random -------------')
print('n_veh:', n_veh, ', n_neighbor:', n_neighbor)
print('Sum V2I rate:', round(np.average(V2I_rate_list_rand), 2), 'Mbps')
print('Pr(V2V success):', round(np.average(V2V_success_list_rand), 4))
print('-------- DPRA -------------')
print('n_veh:', n_veh, ', n_neighbor:', n_neighbor)
print('Sum V2I rate:', round(np.average(V2I_rate_list_dpra), 2), 'Mbps')
print('Pr(V2V success):', round(np.average(V2V_success_list_dpra), 4))
# The name "DPRA" is used for historical reasons. Not really the case...
with open("Data.txt", "a") as f:
f.write('-------- marl, ' + label + '------\n')
f.write('n_veh: ' + str(n_veh) + ', n_neighbor: ' + str(n_neighbor) + '\n')
f.write('Sum V2I rate: ' + str(round(np.average(V2I_rate_list), 5)) + ' Mbps\n')
f.write('Pr(V2V): ' + str(round(np.average(V2V_success_list), 5)) + '\n')
f.write('-------- sarl, ' + label_sarl + '------\n')
f.write('n_veh: ' + str(n_veh) + ', n_neighbor: ' + str(n_neighbor) + '\n')
f.write('Sum V2I rate: ' + str(round(np.average(V2I_rate_list_sarl), 5)) + ' Mbps\n')
f.write('Pr(V2V): ' + str(round(np.average(V2V_success_list_sarl), 5)) + '\n')
f.write('--------random ------------\n')
f.write('Rand Sum V2I rate: ' + str(round(np.average(V2I_rate_list_rand), 5)) + ' Mbps\n')
f.write('Rand Pr(V2V): ' + str(round(np.average(V2V_success_list_rand), 5)) + '\n')
f.write('--------DPRA ------------\n')
f.write('Dpra Sum V2I rate: ' + str(round(np.average(V2I_rate_list_dpra), 5)) + ' Mbps\n')
f.write('Dpra Pr(V2V): ' + str(round(np.average(V2V_success_list_dpra), 5)) + '\n')
current_dir = os.path.dirname(os.path.realpath(__file__))
marl_path = os.path.join(current_dir, "model/" + label + '/rate_marl.mat')
scipy.io.savemat(marl_path, {'rate_marl': rate_marl})
rand_path = os.path.join(current_dir, "model/" + label + '/rate_rand.mat')
scipy.io.savemat(rand_path, {'rate_rand': rate_rand})
demand_marl_path = os.path.join(current_dir, "model/" + label + '/demand_marl.mat')
scipy.io.savemat(demand_marl_path, {'demand_marl': demand_marl})
demand_rand_path = os.path.join(current_dir, "model/" + label + '/demand_rand.mat')
scipy.io.savemat(demand_rand_path, {'demand_rand': demand_rand})
# close sessions
for sess in sesses:
sess.close()
# if __name__ == '__main__':
# tf.app.run()
|
260db06e3678aa531c6ead2921e173a5b29ba243
|
42a7b596cd3d8700631c3b83f6fc16e536b35ec1
|
/jsonpatch.py
|
d3fc26d51c53006fecd09ff7a31e10ef01883f25
|
[
"BSD-3-Clause"
] |
permissive
|
stefankoegl/python-json-patch
|
1c7f9353b44c83f069101ff6e5cc1099225c4155
|
73c36f2c4776c008cd4e750f5240e06dfdc918fc
|
refs/heads/master
| 2023-07-06T00:06:52.183696
| 2023-06-28T04:01:39
| 2023-06-28T04:01:39
| 1,915,437
| 309
| 84
|
BSD-3-Clause
| 2023-06-28T04:01:40
| 2011-06-18T12:03:47
|
Python
|
UTF-8
|
Python
| false
| false
| 29,778
|
py
|
jsonpatch.py
|
# -*- coding: utf-8 -*-
#
# python-json-patch - An implementation of the JSON Patch format
# https://github.com/stefankoegl/python-json-patch
#
# Copyright (c) 2011 Stefan Kögl <stefan@skoegl.net>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
""" Apply JSON-Patches (RFC 6902) """
from __future__ import unicode_literals
import collections
import copy
import functools
import json
import sys
try:
from collections.abc import Sequence
except ImportError: # Python 3
from collections import Sequence
try:
from types import MappingProxyType
except ImportError:
# Python < 3.3
MappingProxyType = dict
from jsonpointer import JsonPointer, JsonPointerException
_ST_ADD = 0
_ST_REMOVE = 1
try:
from collections.abc import MutableMapping, MutableSequence
except ImportError:
from collections import MutableMapping, MutableSequence
str = unicode
# Will be parsed by setup.py to determine package metadata
__author__ = 'Stefan Kögl <stefan@skoegl.net>'
__version__ = '1.33'
__website__ = 'https://github.com/stefankoegl/python-json-patch'
__license__ = 'Modified BSD License'
# pylint: disable=E0611,W0404
if sys.version_info >= (3, 0):
basestring = (bytes, str) # pylint: disable=C0103,W0622
class JsonPatchException(Exception):
"""Base Json Patch exception"""
class InvalidJsonPatch(JsonPatchException):
""" Raised if an invalid JSON Patch is created """
class JsonPatchConflict(JsonPatchException):
"""Raised if patch could not be applied due to conflict situation such as:
- attempt to add object key when it already exists;
- attempt to operate with nonexistence object key;
- attempt to insert value to array at position beyond its size;
- etc.
"""
class JsonPatchTestFailed(JsonPatchException, AssertionError):
""" A Test operation failed """
def multidict(ordered_pairs):
"""Convert duplicate keys values to lists."""
# read all values into lists
mdict = collections.defaultdict(list)
for key, value in ordered_pairs:
mdict[key].append(value)
return dict(
# unpack lists that have only 1 item
(key, values[0] if len(values) == 1 else values)
for key, values in mdict.items()
)
# The "object_pairs_hook" parameter is used to handle duplicate keys when
# loading a JSON object.
_jsonloads = functools.partial(json.loads, object_pairs_hook=multidict)
def apply_patch(doc, patch, in_place=False, pointer_cls=JsonPointer):
"""Apply list of patches to specified json document.
:param doc: Document object.
:type doc: dict
:param patch: JSON patch as list of dicts or raw JSON-encoded string.
:type patch: list or str
:param in_place: While :const:`True` patch will modify target document.
By default patch will be applied to document copy.
:type in_place: bool
:param pointer_cls: JSON pointer class to use.
:type pointer_cls: Type[JsonPointer]
:return: Patched document object.
:rtype: dict
>>> doc = {'foo': 'bar'}
>>> patch = [{'op': 'add', 'path': '/baz', 'value': 'qux'}]
>>> other = apply_patch(doc, patch)
>>> doc is not other
True
>>> other == {'foo': 'bar', 'baz': 'qux'}
True
>>> patch = [{'op': 'add', 'path': '/baz', 'value': 'qux'}]
>>> apply_patch(doc, patch, in_place=True) == {'foo': 'bar', 'baz': 'qux'}
True
>>> doc == other
True
"""
if isinstance(patch, basestring):
patch = JsonPatch.from_string(patch, pointer_cls=pointer_cls)
else:
patch = JsonPatch(patch, pointer_cls=pointer_cls)
return patch.apply(doc, in_place)
def make_patch(src, dst, pointer_cls=JsonPointer):
"""Generates patch by comparing two document objects. Actually is
a proxy to :meth:`JsonPatch.from_diff` method.
:param src: Data source document object.
:type src: dict
:param dst: Data source document object.
:type dst: dict
:param pointer_cls: JSON pointer class to use.
:type pointer_cls: Type[JsonPointer]
>>> src = {'foo': 'bar', 'numbers': [1, 3, 4, 8]}
>>> dst = {'baz': 'qux', 'numbers': [1, 4, 7]}
>>> patch = make_patch(src, dst)
>>> new = patch.apply(src)
>>> new == dst
True
"""
return JsonPatch.from_diff(src, dst, pointer_cls=pointer_cls)
class PatchOperation(object):
"""A single operation inside a JSON Patch."""
def __init__(self, operation, pointer_cls=JsonPointer):
self.pointer_cls = pointer_cls
if not operation.__contains__('path'):
raise InvalidJsonPatch("Operation must have a 'path' member")
if isinstance(operation['path'], self.pointer_cls):
self.location = operation['path'].path
self.pointer = operation['path']
else:
self.location = operation['path']
try:
self.pointer = self.pointer_cls(self.location)
except TypeError as ex:
raise InvalidJsonPatch("Invalid 'path'")
self.operation = operation
def apply(self, obj):
"""Abstract method that applies a patch operation to the specified object."""
raise NotImplementedError('should implement the patch operation.')
def __hash__(self):
return hash(frozenset(self.operation.items()))
def __eq__(self, other):
if not isinstance(other, PatchOperation):
return False
return self.operation == other.operation
def __ne__(self, other):
return not(self == other)
@property
def path(self):
return '/'.join(self.pointer.parts[:-1])
@property
def key(self):
try:
return int(self.pointer.parts[-1])
except ValueError:
return self.pointer.parts[-1]
@key.setter
def key(self, value):
self.pointer.parts[-1] = str(value)
self.location = self.pointer.path
self.operation['path'] = self.location
class RemoveOperation(PatchOperation):
"""Removes an object property or an array element."""
def apply(self, obj):
subobj, part = self.pointer.to_last(obj)
if isinstance(subobj, Sequence) and not isinstance(part, int):
raise JsonPointerException("invalid array index '{0}'".format(part))
try:
del subobj[part]
except (KeyError, IndexError) as ex:
msg = "can't remove a non-existent object '{0}'".format(part)
raise JsonPatchConflict(msg)
return obj
def _on_undo_remove(self, path, key):
if self.path == path:
if self.key >= key:
self.key += 1
else:
key -= 1
return key
def _on_undo_add(self, path, key):
if self.path == path:
if self.key > key:
self.key -= 1
else:
key -= 1
return key
class AddOperation(PatchOperation):
"""Adds an object property or an array element."""
def apply(self, obj):
try:
value = self.operation["value"]
except KeyError as ex:
raise InvalidJsonPatch(
"The operation does not contain a 'value' member")
subobj, part = self.pointer.to_last(obj)
if isinstance(subobj, MutableSequence):
if part == '-':
subobj.append(value) # pylint: disable=E1103
elif part > len(subobj) or part < 0:
raise JsonPatchConflict("can't insert outside of list")
else:
subobj.insert(part, value) # pylint: disable=E1103
elif isinstance(subobj, MutableMapping):
if part is None:
obj = value # we're replacing the root
else:
subobj[part] = value
else:
if part is None:
raise TypeError("invalid document type {0}".format(type(subobj)))
else:
raise JsonPatchConflict("unable to fully resolve json pointer {0}, part {1}".format(self.location, part))
return obj
def _on_undo_remove(self, path, key):
if self.path == path:
if self.key > key:
self.key += 1
else:
key += 1
return key
def _on_undo_add(self, path, key):
if self.path == path:
if self.key > key:
self.key -= 1
else:
key += 1
return key
class ReplaceOperation(PatchOperation):
"""Replaces an object property or an array element by a new value."""
def apply(self, obj):
try:
value = self.operation["value"]
except KeyError as ex:
raise InvalidJsonPatch(
"The operation does not contain a 'value' member")
subobj, part = self.pointer.to_last(obj)
if part is None:
return value
if part == "-":
raise InvalidJsonPatch("'path' with '-' can't be applied to 'replace' operation")
if isinstance(subobj, MutableSequence):
if part >= len(subobj) or part < 0:
raise JsonPatchConflict("can't replace outside of list")
elif isinstance(subobj, MutableMapping):
if part not in subobj:
msg = "can't replace a non-existent object '{0}'".format(part)
raise JsonPatchConflict(msg)
else:
if part is None:
raise TypeError("invalid document type {0}".format(type(subobj)))
else:
raise JsonPatchConflict("unable to fully resolve json pointer {0}, part {1}".format(self.location, part))
subobj[part] = value
return obj
def _on_undo_remove(self, path, key):
return key
def _on_undo_add(self, path, key):
return key
class MoveOperation(PatchOperation):
"""Moves an object property or an array element to a new location."""
def apply(self, obj):
try:
if isinstance(self.operation['from'], self.pointer_cls):
from_ptr = self.operation['from']
else:
from_ptr = self.pointer_cls(self.operation['from'])
except KeyError as ex:
raise InvalidJsonPatch(
"The operation does not contain a 'from' member")
subobj, part = from_ptr.to_last(obj)
try:
value = subobj[part]
except (KeyError, IndexError) as ex:
raise JsonPatchConflict(str(ex))
# If source and target are equal, this is a no-op
if self.pointer == from_ptr:
return obj
if isinstance(subobj, MutableMapping) and \
self.pointer.contains(from_ptr):
raise JsonPatchConflict('Cannot move values into their own children')
obj = RemoveOperation({
'op': 'remove',
'path': self.operation['from']
}, pointer_cls=self.pointer_cls).apply(obj)
obj = AddOperation({
'op': 'add',
'path': self.location,
'value': value
}, pointer_cls=self.pointer_cls).apply(obj)
return obj
@property
def from_path(self):
from_ptr = self.pointer_cls(self.operation['from'])
return '/'.join(from_ptr.parts[:-1])
@property
def from_key(self):
from_ptr = self.pointer_cls(self.operation['from'])
try:
return int(from_ptr.parts[-1])
except TypeError:
return from_ptr.parts[-1]
@from_key.setter
def from_key(self, value):
from_ptr = self.pointer_cls(self.operation['from'])
from_ptr.parts[-1] = str(value)
self.operation['from'] = from_ptr.path
def _on_undo_remove(self, path, key):
if self.from_path == path:
if self.from_key >= key:
self.from_key += 1
else:
key -= 1
if self.path == path:
if self.key > key:
self.key += 1
else:
key += 1
return key
def _on_undo_add(self, path, key):
if self.from_path == path:
if self.from_key > key:
self.from_key -= 1
else:
key -= 1
if self.path == path:
if self.key > key:
self.key -= 1
else:
key += 1
return key
class TestOperation(PatchOperation):
"""Test value by specified location."""
def apply(self, obj):
try:
subobj, part = self.pointer.to_last(obj)
if part is None:
val = subobj
else:
val = self.pointer.walk(subobj, part)
except JsonPointerException as ex:
raise JsonPatchTestFailed(str(ex))
try:
value = self.operation['value']
except KeyError as ex:
raise InvalidJsonPatch(
"The operation does not contain a 'value' member")
if val != value:
msg = '{0} ({1}) is not equal to tested value {2} ({3})'
raise JsonPatchTestFailed(msg.format(val, type(val),
value, type(value)))
return obj
class CopyOperation(PatchOperation):
""" Copies an object property or an array element to a new location """
def apply(self, obj):
try:
from_ptr = self.pointer_cls(self.operation['from'])
except KeyError as ex:
raise InvalidJsonPatch(
"The operation does not contain a 'from' member")
subobj, part = from_ptr.to_last(obj)
try:
value = copy.deepcopy(subobj[part])
except (KeyError, IndexError) as ex:
raise JsonPatchConflict(str(ex))
obj = AddOperation({
'op': 'add',
'path': self.location,
'value': value
}, pointer_cls=self.pointer_cls).apply(obj)
return obj
class JsonPatch(object):
json_dumper = staticmethod(json.dumps)
json_loader = staticmethod(_jsonloads)
operations = MappingProxyType({
'remove': RemoveOperation,
'add': AddOperation,
'replace': ReplaceOperation,
'move': MoveOperation,
'test': TestOperation,
'copy': CopyOperation,
})
"""A JSON Patch is a list of Patch Operations.
>>> patch = JsonPatch([
... {'op': 'add', 'path': '/foo', 'value': 'bar'},
... {'op': 'add', 'path': '/baz', 'value': [1, 2, 3]},
... {'op': 'remove', 'path': '/baz/1'},
... {'op': 'test', 'path': '/baz', 'value': [1, 3]},
... {'op': 'replace', 'path': '/baz/0', 'value': 42},
... {'op': 'remove', 'path': '/baz/1'},
... ])
>>> doc = {}
>>> result = patch.apply(doc)
>>> expected = {'foo': 'bar', 'baz': [42]}
>>> result == expected
True
JsonPatch object is iterable, so you can easily access each patch
statement in a loop:
>>> lpatch = list(patch)
>>> expected = {'op': 'add', 'path': '/foo', 'value': 'bar'}
>>> lpatch[0] == expected
True
>>> lpatch == patch.patch
True
Also JsonPatch could be converted directly to :class:`bool` if it contains
any operation statements:
>>> bool(patch)
True
>>> bool(JsonPatch([]))
False
This behavior is very handy with :func:`make_patch` to write more readable
code:
>>> old = {'foo': 'bar', 'numbers': [1, 3, 4, 8]}
>>> new = {'baz': 'qux', 'numbers': [1, 4, 7]}
>>> patch = make_patch(old, new)
>>> if patch:
... # document have changed, do something useful
... patch.apply(old) #doctest: +ELLIPSIS
{...}
"""
def __init__(self, patch, pointer_cls=JsonPointer):
self.patch = patch
self.pointer_cls = pointer_cls
# Verify that the structure of the patch document
# is correct by retrieving each patch element.
# Much of the validation is done in the initializer
# though some is delayed until the patch is applied.
for op in self.patch:
# We're only checking for basestring in the following check
# for two reasons:
#
# - It should come from JSON, which only allows strings as
# dictionary keys, so having a string here unambiguously means
# someone used: {"op": ..., ...} instead of [{"op": ..., ...}].
#
# - There's no possible false positive: if someone give a sequence
# of mappings, this won't raise.
if isinstance(op, basestring):
raise InvalidJsonPatch("Document is expected to be sequence of "
"operations, got a sequence of strings.")
self._get_operation(op)
def __str__(self):
"""str(self) -> self.to_string()"""
return self.to_string()
def __bool__(self):
return bool(self.patch)
__nonzero__ = __bool__
def __iter__(self):
return iter(self.patch)
def __hash__(self):
return hash(tuple(self._ops))
def __eq__(self, other):
if not isinstance(other, JsonPatch):
return False
return self._ops == other._ops
def __ne__(self, other):
return not(self == other)
@classmethod
def from_string(cls, patch_str, loads=None, pointer_cls=JsonPointer):
"""Creates JsonPatch instance from string source.
:param patch_str: JSON patch as raw string.
:type patch_str: str
:param loads: A function of one argument that loads a serialized
JSON string.
:type loads: function
:param pointer_cls: JSON pointer class to use.
:type pointer_cls: Type[JsonPointer]
:return: :class:`JsonPatch` instance.
"""
json_loader = loads or cls.json_loader
patch = json_loader(patch_str)
return cls(patch, pointer_cls=pointer_cls)
@classmethod
def from_diff(
cls, src, dst, optimization=True, dumps=None,
pointer_cls=JsonPointer,
):
"""Creates JsonPatch instance based on comparison of two document
objects. Json patch would be created for `src` argument against `dst`
one.
:param src: Data source document object.
:type src: dict
:param dst: Data source document object.
:type dst: dict
:param dumps: A function of one argument that produces a serialized
JSON string.
:type dumps: function
:param pointer_cls: JSON pointer class to use.
:type pointer_cls: Type[JsonPointer]
:return: :class:`JsonPatch` instance.
>>> src = {'foo': 'bar', 'numbers': [1, 3, 4, 8]}
>>> dst = {'baz': 'qux', 'numbers': [1, 4, 7]}
>>> patch = JsonPatch.from_diff(src, dst)
>>> new = patch.apply(src)
>>> new == dst
True
"""
json_dumper = dumps or cls.json_dumper
builder = DiffBuilder(src, dst, json_dumper, pointer_cls=pointer_cls)
builder._compare_values('', None, src, dst)
ops = list(builder.execute())
return cls(ops, pointer_cls=pointer_cls)
def to_string(self, dumps=None):
"""Returns patch set as JSON string."""
json_dumper = dumps or self.json_dumper
return json_dumper(self.patch)
@property
def _ops(self):
return tuple(map(self._get_operation, self.patch))
def apply(self, obj, in_place=False):
"""Applies the patch to a given object.
:param obj: Document object.
:type obj: dict
:param in_place: Tweaks the way how patch would be applied - directly to
specified `obj` or to its copy.
:type in_place: bool
:return: Modified `obj`.
"""
if not in_place:
obj = copy.deepcopy(obj)
for operation in self._ops:
obj = operation.apply(obj)
return obj
def _get_operation(self, operation):
if 'op' not in operation:
raise InvalidJsonPatch("Operation does not contain 'op' member")
op = operation['op']
if not isinstance(op, basestring):
raise InvalidJsonPatch("Operation's op must be a string")
if op not in self.operations:
raise InvalidJsonPatch("Unknown operation {0!r}".format(op))
cls = self.operations[op]
return cls(operation, pointer_cls=self.pointer_cls)
class DiffBuilder(object):
def __init__(self, src_doc, dst_doc, dumps=json.dumps, pointer_cls=JsonPointer):
self.dumps = dumps
self.pointer_cls = pointer_cls
self.index_storage = [{}, {}]
self.index_storage2 = [[], []]
self.__root = root = []
self.src_doc = src_doc
self.dst_doc = dst_doc
root[:] = [root, root, None]
def store_index(self, value, index, st):
typed_key = (value, type(value))
try:
storage = self.index_storage[st]
stored = storage.get(typed_key)
if stored is None:
storage[typed_key] = [index]
else:
storage[typed_key].append(index)
except TypeError:
self.index_storage2[st].append((typed_key, index))
def take_index(self, value, st):
typed_key = (value, type(value))
try:
stored = self.index_storage[st].get(typed_key)
if stored:
return stored.pop()
except TypeError:
storage = self.index_storage2[st]
for i in range(len(storage)-1, -1, -1):
if storage[i][0] == typed_key:
return storage.pop(i)[1]
def insert(self, op):
root = self.__root
last = root[0]
last[1] = root[0] = [last, root, op]
return root[0]
def remove(self, index):
link_prev, link_next, _ = index
link_prev[1] = link_next
link_next[0] = link_prev
index[:] = []
def iter_from(self, start):
root = self.__root
curr = start[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __iter__(self):
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def execute(self):
root = self.__root
curr = root[1]
while curr is not root:
if curr[1] is not root:
op_first, op_second = curr[2], curr[1][2]
if op_first.location == op_second.location and \
type(op_first) == RemoveOperation and \
type(op_second) == AddOperation:
yield ReplaceOperation({
'op': 'replace',
'path': op_second.location,
'value': op_second.operation['value'],
}, pointer_cls=self.pointer_cls).operation
curr = curr[1][1]
continue
yield curr[2].operation
curr = curr[1]
def _item_added(self, path, key, item):
index = self.take_index(item, _ST_REMOVE)
if index is not None:
op = index[2]
if type(op.key) == int and type(key) == int:
for v in self.iter_from(index):
op.key = v._on_undo_remove(op.path, op.key)
self.remove(index)
if op.location != _path_join(path, key):
new_op = MoveOperation({
'op': 'move',
'from': op.location,
'path': _path_join(path, key),
}, pointer_cls=self.pointer_cls)
self.insert(new_op)
else:
new_op = AddOperation({
'op': 'add',
'path': _path_join(path, key),
'value': item,
}, pointer_cls=self.pointer_cls)
new_index = self.insert(new_op)
self.store_index(item, new_index, _ST_ADD)
def _item_removed(self, path, key, item):
new_op = RemoveOperation({
'op': 'remove',
'path': _path_join(path, key),
}, pointer_cls=self.pointer_cls)
index = self.take_index(item, _ST_ADD)
new_index = self.insert(new_op)
if index is not None:
op = index[2]
# We can't rely on the op.key type since PatchOperation casts
# the .key property to int and this path wrongly ends up being taken
# for numeric string dict keys while the intention is to only handle lists.
# So we do an explicit check on the item affected by the op instead.
added_item = op.pointer.to_last(self.dst_doc)[0]
if type(added_item) == list:
for v in self.iter_from(index):
op.key = v._on_undo_add(op.path, op.key)
self.remove(index)
if new_op.location != op.location:
new_op = MoveOperation({
'op': 'move',
'from': new_op.location,
'path': op.location,
}, pointer_cls=self.pointer_cls)
new_index[2] = new_op
else:
self.remove(new_index)
else:
self.store_index(item, new_index, _ST_REMOVE)
def _item_replaced(self, path, key, item):
self.insert(ReplaceOperation({
'op': 'replace',
'path': _path_join(path, key),
'value': item,
}, pointer_cls=self.pointer_cls))
def _compare_dicts(self, path, src, dst):
src_keys = set(src.keys())
dst_keys = set(dst.keys())
added_keys = dst_keys - src_keys
removed_keys = src_keys - dst_keys
for key in removed_keys:
self._item_removed(path, str(key), src[key])
for key in added_keys:
self._item_added(path, str(key), dst[key])
for key in src_keys & dst_keys:
self._compare_values(path, key, src[key], dst[key])
def _compare_lists(self, path, src, dst):
len_src, len_dst = len(src), len(dst)
max_len = max(len_src, len_dst)
min_len = min(len_src, len_dst)
for key in range(max_len):
if key < min_len:
old, new = src[key], dst[key]
if old == new:
continue
elif isinstance(old, MutableMapping) and \
isinstance(new, MutableMapping):
self._compare_dicts(_path_join(path, key), old, new)
elif isinstance(old, MutableSequence) and \
isinstance(new, MutableSequence):
self._compare_lists(_path_join(path, key), old, new)
else:
self._item_removed(path, key, old)
self._item_added(path, key, new)
elif len_src > len_dst:
self._item_removed(path, len_dst, src[key])
else:
self._item_added(path, key, dst[key])
def _compare_values(self, path, key, src, dst):
if isinstance(src, MutableMapping) and \
isinstance(dst, MutableMapping):
self._compare_dicts(_path_join(path, key), src, dst)
elif isinstance(src, MutableSequence) and \
isinstance(dst, MutableSequence):
self._compare_lists(_path_join(path, key), src, dst)
# To ensure we catch changes to JSON, we can't rely on a simple
# src == dst, because it would not recognize the difference between
# 1 and True, among other things. Using json.dumps is the most
# fool-proof way to ensure we catch type changes that matter to JSON
# and ignore those that don't. The performance of this could be
# improved by doing more direct type checks, but we'd need to be
# careful to accept type changes that don't matter when JSONified.
elif self.dumps(src) == self.dumps(dst):
return
else:
self._item_replaced(path, key, dst)
def _path_join(path, key):
if key is None:
return path
return path + '/' + str(key).replace('~', '~0').replace('/', '~1')
|
d870291877029e7f05fc790fb3d5e65a4333082a
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-hss/huaweicloudsdkhss/v5/model/show_asset_statistic_response.py
|
c1f6ff8240ed33a099af611e616023c99546d435
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318
| 2023-08-31T08:28:59
| 2023-08-31T08:28:59
| 262,207,814
| 103
| 44
|
NOASSERTION
| 2023-06-22T14:50:48
| 2020-05-08T02:28:43
|
Python
|
UTF-8
|
Python
| false
| false
| 10,544
|
py
|
show_asset_statistic_response.py
|
# coding: utf-8
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ShowAssetStatisticResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'account_num': 'int',
'port_num': 'int',
'process_num': 'int',
'app_num': 'int',
'auto_launch_num': 'int',
'web_framework_num': 'int',
'web_site_num': 'int',
'jar_package_num': 'int',
'kernel_module_num': 'int'
}
attribute_map = {
'account_num': 'account_num',
'port_num': 'port_num',
'process_num': 'process_num',
'app_num': 'app_num',
'auto_launch_num': 'auto_launch_num',
'web_framework_num': 'web_framework_num',
'web_site_num': 'web_site_num',
'jar_package_num': 'jar_package_num',
'kernel_module_num': 'kernel_module_num'
}
def __init__(self, account_num=None, port_num=None, process_num=None, app_num=None, auto_launch_num=None, web_framework_num=None, web_site_num=None, jar_package_num=None, kernel_module_num=None):
"""ShowAssetStatisticResponse
The model defined in huaweicloud sdk
:param account_num: 账号数量
:type account_num: int
:param port_num: 开放端口数量
:type port_num: int
:param process_num: 进程数量
:type process_num: int
:param app_num: 软件数量
:type app_num: int
:param auto_launch_num: 自启动数量
:type auto_launch_num: int
:param web_framework_num: web框架数量
:type web_framework_num: int
:param web_site_num: Web站点数量
:type web_site_num: int
:param jar_package_num: Jar包数量
:type jar_package_num: int
:param kernel_module_num: 内核模块数量
:type kernel_module_num: int
"""
super(ShowAssetStatisticResponse, self).__init__()
self._account_num = None
self._port_num = None
self._process_num = None
self._app_num = None
self._auto_launch_num = None
self._web_framework_num = None
self._web_site_num = None
self._jar_package_num = None
self._kernel_module_num = None
self.discriminator = None
if account_num is not None:
self.account_num = account_num
if port_num is not None:
self.port_num = port_num
if process_num is not None:
self.process_num = process_num
if app_num is not None:
self.app_num = app_num
if auto_launch_num is not None:
self.auto_launch_num = auto_launch_num
if web_framework_num is not None:
self.web_framework_num = web_framework_num
if web_site_num is not None:
self.web_site_num = web_site_num
if jar_package_num is not None:
self.jar_package_num = jar_package_num
if kernel_module_num is not None:
self.kernel_module_num = kernel_module_num
@property
def account_num(self):
"""Gets the account_num of this ShowAssetStatisticResponse.
账号数量
:return: The account_num of this ShowAssetStatisticResponse.
:rtype: int
"""
return self._account_num
@account_num.setter
def account_num(self, account_num):
"""Sets the account_num of this ShowAssetStatisticResponse.
账号数量
:param account_num: The account_num of this ShowAssetStatisticResponse.
:type account_num: int
"""
self._account_num = account_num
@property
def port_num(self):
"""Gets the port_num of this ShowAssetStatisticResponse.
开放端口数量
:return: The port_num of this ShowAssetStatisticResponse.
:rtype: int
"""
return self._port_num
@port_num.setter
def port_num(self, port_num):
"""Sets the port_num of this ShowAssetStatisticResponse.
开放端口数量
:param port_num: The port_num of this ShowAssetStatisticResponse.
:type port_num: int
"""
self._port_num = port_num
@property
def process_num(self):
"""Gets the process_num of this ShowAssetStatisticResponse.
进程数量
:return: The process_num of this ShowAssetStatisticResponse.
:rtype: int
"""
return self._process_num
@process_num.setter
def process_num(self, process_num):
"""Sets the process_num of this ShowAssetStatisticResponse.
进程数量
:param process_num: The process_num of this ShowAssetStatisticResponse.
:type process_num: int
"""
self._process_num = process_num
@property
def app_num(self):
"""Gets the app_num of this ShowAssetStatisticResponse.
软件数量
:return: The app_num of this ShowAssetStatisticResponse.
:rtype: int
"""
return self._app_num
@app_num.setter
def app_num(self, app_num):
"""Sets the app_num of this ShowAssetStatisticResponse.
软件数量
:param app_num: The app_num of this ShowAssetStatisticResponse.
:type app_num: int
"""
self._app_num = app_num
@property
def auto_launch_num(self):
"""Gets the auto_launch_num of this ShowAssetStatisticResponse.
自启动数量
:return: The auto_launch_num of this ShowAssetStatisticResponse.
:rtype: int
"""
return self._auto_launch_num
@auto_launch_num.setter
def auto_launch_num(self, auto_launch_num):
"""Sets the auto_launch_num of this ShowAssetStatisticResponse.
自启动数量
:param auto_launch_num: The auto_launch_num of this ShowAssetStatisticResponse.
:type auto_launch_num: int
"""
self._auto_launch_num = auto_launch_num
@property
def web_framework_num(self):
"""Gets the web_framework_num of this ShowAssetStatisticResponse.
web框架数量
:return: The web_framework_num of this ShowAssetStatisticResponse.
:rtype: int
"""
return self._web_framework_num
@web_framework_num.setter
def web_framework_num(self, web_framework_num):
"""Sets the web_framework_num of this ShowAssetStatisticResponse.
web框架数量
:param web_framework_num: The web_framework_num of this ShowAssetStatisticResponse.
:type web_framework_num: int
"""
self._web_framework_num = web_framework_num
@property
def web_site_num(self):
"""Gets the web_site_num of this ShowAssetStatisticResponse.
Web站点数量
:return: The web_site_num of this ShowAssetStatisticResponse.
:rtype: int
"""
return self._web_site_num
@web_site_num.setter
def web_site_num(self, web_site_num):
"""Sets the web_site_num of this ShowAssetStatisticResponse.
Web站点数量
:param web_site_num: The web_site_num of this ShowAssetStatisticResponse.
:type web_site_num: int
"""
self._web_site_num = web_site_num
@property
def jar_package_num(self):
"""Gets the jar_package_num of this ShowAssetStatisticResponse.
Jar包数量
:return: The jar_package_num of this ShowAssetStatisticResponse.
:rtype: int
"""
return self._jar_package_num
@jar_package_num.setter
def jar_package_num(self, jar_package_num):
"""Sets the jar_package_num of this ShowAssetStatisticResponse.
Jar包数量
:param jar_package_num: The jar_package_num of this ShowAssetStatisticResponse.
:type jar_package_num: int
"""
self._jar_package_num = jar_package_num
@property
def kernel_module_num(self):
"""Gets the kernel_module_num of this ShowAssetStatisticResponse.
内核模块数量
:return: The kernel_module_num of this ShowAssetStatisticResponse.
:rtype: int
"""
return self._kernel_module_num
@kernel_module_num.setter
def kernel_module_num(self, kernel_module_num):
"""Sets the kernel_module_num of this ShowAssetStatisticResponse.
内核模块数量
:param kernel_module_num: The kernel_module_num of this ShowAssetStatisticResponse.
:type kernel_module_num: int
"""
self._kernel_module_num = kernel_module_num
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShowAssetStatisticResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
1587e9609833149d1bf62ded35ec5a7384040c06
|
10d495c139f6556d27f8dfe34c4ee348b1803b64
|
/src/snowflake/connector/s3_storage_client.py
|
0df5fd84f18bc24623331f53f0f67b4f05e6ea77
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
snowflakedb/snowflake-connector-python
|
a5544f53d68a7a8d9be80ddf6e3cd34412610e8b
|
da1ae4ed1e940e4210348c59c9c660ebaa78fc2e
|
refs/heads/main
| 2023-09-01T19:50:39.864458
| 2023-08-31T20:06:56
| 2023-08-31T20:06:56
| 62,262,074
| 492
| 494
|
Apache-2.0
| 2023-09-14T10:02:00
| 2016-06-29T22:29:53
|
Python
|
UTF-8
|
Python
| false
| false
| 21,830
|
py
|
s3_storage_client.py
|
#
# Copyright (c) 2012-2023 Snowflake Computing Inc. All rights reserved.
#
from __future__ import annotations
import binascii
import re
import xml.etree.ElementTree as ET
from datetime import datetime
from io import IOBase
from logging import getLogger
from operator import itemgetter
from typing import TYPE_CHECKING, Any, NamedTuple
from cryptography.hazmat.primitives import hashes, hmac
from .compat import quote, urlparse
from .constants import (
HTTP_HEADER_CONTENT_TYPE,
HTTP_HEADER_VALUE_OCTET_STREAM,
FileHeader,
ResultStatus,
)
from .encryption_util import EncryptionMetadata
from .storage_client import SnowflakeStorageClient, remove_content_encoding
from .vendored import requests
if TYPE_CHECKING: # pragma: no cover
from .file_transfer_agent import SnowflakeFileMeta, StorageCredential
logger = getLogger(__name__)
META_PREFIX = "x-amz-meta-"
SFC_DIGEST = "sfc-digest"
AMZ_MATDESC = "x-amz-matdesc"
AMZ_KEY = "x-amz-key"
AMZ_IV = "x-amz-iv"
ERRORNO_WSAECONNABORTED = 10053 # network connection was aborted
EXPIRED_TOKEN = "ExpiredToken"
ADDRESSING_STYLE = "virtual" # explicit force to use virtual addressing style
UNSIGNED_PAYLOAD = "UNSIGNED-PAYLOAD"
RE_MULTIPLE_SPACES = re.compile(r" +")
class S3Location(NamedTuple):
bucket_name: str
path: str
class SnowflakeS3RestClient(SnowflakeStorageClient):
def __init__(
self,
meta: SnowflakeFileMeta,
credentials: StorageCredential,
stage_info: dict[str, Any],
chunk_size: int,
use_accelerate_endpoint: bool | None = None,
use_s3_regional_url: bool = False,
) -> None:
"""Rest client for S3 storage.
Args:
stage_info:
"""
super().__init__(meta, stage_info, chunk_size, credentials=credentials)
# Signature version V4
# Addressing style Virtual Host
self.region_name: str = stage_info["region"]
# Multipart upload only
self.upload_id: str | None = None
self.etags: list[str] | None = None
self.s3location: S3Location = (
SnowflakeS3RestClient._extract_bucket_name_and_path(
self.stage_info["location"]
)
)
self.use_s3_regional_url = use_s3_regional_url
# if GS sends us an endpoint, it's likely for FIPS. Use it.
self.endpoint: str | None = None
if stage_info["endPoint"]:
self.endpoint = (
f"https://{self.s3location.bucket_name}." + stage_info["endPoint"]
)
self.transfer_accelerate_config(use_accelerate_endpoint)
def transfer_accelerate_config(
self, use_accelerate_endpoint: bool | None = None
) -> bool:
# if self.endpoint has been set, e.g. by metadata, no more config is needed.
if self.endpoint is not None:
return self.endpoint.find("s3-accelerate.amazonaws.com") >= 0
if self.use_s3_regional_url:
self.endpoint = (
f"https://{self.s3location.bucket_name}."
f"s3.{self.region_name}.amazonaws.com"
)
return False
else:
if use_accelerate_endpoint is None:
use_accelerate_endpoint = self._get_bucket_accelerate_config(
self.s3location.bucket_name
)
if use_accelerate_endpoint:
self.endpoint = (
f"https://{self.s3location.bucket_name}.s3-accelerate.amazonaws.com"
)
else:
self.endpoint = (
f"https://{self.s3location.bucket_name}.s3.amazonaws.com"
)
return use_accelerate_endpoint
@staticmethod
def _sign_bytes(secret_key: bytes, _input: str) -> bytes:
"""Applies HMAC-SHA-256 to given string with secret_key."""
h = hmac.HMAC(secret_key, hashes.SHA256())
h.update(_input.encode("utf-8"))
return h.finalize()
@staticmethod
def _sign_bytes_hex(secret_key: bytes, _input: str) -> bytes:
"""Convenience function, same as _sign_bytes, but returns result in hex form."""
return binascii.hexlify(SnowflakeS3RestClient._sign_bytes(secret_key, _input))
@staticmethod
def _hash_bytes(_input: bytes) -> bytes:
"""Applies SHA-256 hash to given bytes."""
digest = hashes.Hash(hashes.SHA256())
digest.update(_input)
return digest.finalize()
@staticmethod
def _hash_bytes_hex(_input: bytes) -> bytes:
"""Convenience function, same as _hash_bytes, but returns result in hex form."""
return binascii.hexlify(SnowflakeS3RestClient._hash_bytes(_input))
@staticmethod
def _construct_query_string(
query_parts: tuple[tuple[str, str], ...],
) -> str:
"""Convenience function to build the query part of a URL from key-value pairs.
It filters out empty strings from the key, value pairs.
"""
return "&".join(["=".join(filter(bool, e)) for e in query_parts])
@staticmethod
def _construct_canonicalized_and_signed_headers(
headers: dict[str, str | list[str]]
) -> tuple[str, str]:
"""Construct canonical headers as per AWS specs, returns the signed headers too.
Does not support sorting by values in case the keys are the same, don't send
in duplicate keys, but this is not possible with a dictionary anyways.
"""
res = []
low_key_dict = {k.lower(): v for k, v in headers.items()}
sorted_headers = sorted(low_key_dict.keys())
_res = [(k, low_key_dict[k]) for k in sorted_headers]
for k, v in _res:
# if value is a list, convert to string delimited by comma
if isinstance(v, list):
v = ",".join(v)
# if multiline header, replace withs space
k = k.replace("\n", " ")
res.append(k.strip() + ":" + RE_MULTIPLE_SPACES.sub(" ", v.strip()))
ans = "\n".join(res)
if ans:
ans += "\n"
return ans, ";".join(sorted_headers)
@staticmethod
def _construct_canonical_request_and_signed_headers(
verb: str,
canonical_uri_parameter: str,
query_parts: dict[str, str],
canonical_headers: dict[str, str | list[str]] | None = None,
payload_hash: str = "",
) -> tuple[str, str]:
"""Build canonical request and also return signed headers.
Note: this doesn't support sorting by values in case the same key is given
more than once, but doing this is also not possible with a dictionary.
"""
canonical_query_string = "&".join(
"=".join([k, v]) for k, v in sorted(query_parts.items(), key=itemgetter(0))
)
(
canonical_headers,
signed_headers,
) = SnowflakeS3RestClient._construct_canonicalized_and_signed_headers(
canonical_headers
)
return (
"\n".join(
[
verb,
canonical_uri_parameter or "/",
canonical_query_string,
canonical_headers,
signed_headers,
payload_hash,
]
),
signed_headers,
)
@staticmethod
def _construct_string_to_sign(
region_name: str,
service_name: str,
amzdate: str,
short_amzdate: str,
canonical_request_hash: bytes,
) -> tuple[str, str]:
"""Given all the necessary information construct a V4 string to sign.
As per AWS specs it requires the scope, the hash of the canonical request and
the current date in the following format: YYYYMMDDTHHMMSSZ where T and Z are
constant characters.
This function generates the scope from the amzdate (which is just the date
portion of amzdate), region name and service we want to use (this is only s3
in our case).
"""
scope = f"{short_amzdate}/{region_name}/{service_name}/aws4_request"
return (
"\n".join(
[
"AWS4-HMAC-SHA256",
amzdate,
scope,
canonical_request_hash.decode("utf-8"),
]
),
scope,
)
def _has_expired_token(self, response: requests.Response) -> bool:
"""Extract error code and error message from the S3's error response.
Expected format:
https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#RESTErrorResponses
Args:
response: Rest error response in XML format
Returns: True if the error response is caused by token expiration
"""
if response.status_code != 400:
return False
message = response.text
if not message or message.isspace():
return False
err = ET.fromstring(message)
return err.find("Code").text == EXPIRED_TOKEN
@staticmethod
def _extract_bucket_name_and_path(stage_location) -> S3Location:
# split stage location as bucket name and path
bucket_name, _, path = stage_location.partition("/")
if path and not path.endswith("/"):
path += "/"
return S3Location(bucket_name=bucket_name, path=path)
def _send_request_with_authentication_and_retry(
self,
url: str,
verb: str,
retry_id: int | str,
query_parts: dict[str, str] | None = None,
x_amz_headers: dict[str, str] | None = None,
headers: dict[str, str] | None = None,
payload: bytes | bytearray | IOBase | None = None,
unsigned_payload: bool = False,
ignore_content_encoding: bool = False,
) -> requests.Response:
if x_amz_headers is None:
x_amz_headers = {}
if headers is None:
headers = {}
if payload is None:
payload = b""
if query_parts is None:
query_parts = {}
parsed_url = urlparse(url)
x_amz_headers["x-amz-security-token"] = self.credentials.creds.get(
"AWS_TOKEN", ""
)
x_amz_headers["host"] = parsed_url.hostname
if unsigned_payload:
x_amz_headers["x-amz-content-sha256"] = UNSIGNED_PAYLOAD
else:
x_amz_headers["x-amz-content-sha256"] = (
SnowflakeS3RestClient._hash_bytes_hex(payload).lower().decode()
)
def generate_authenticated_url_and_args_v4() -> tuple[bytes, dict[str, bytes]]:
t = datetime.utcnow()
amzdate = t.strftime("%Y%m%dT%H%M%SZ")
short_amzdate = amzdate[:8]
x_amz_headers["x-amz-date"] = amzdate
(
canonical_request,
signed_headers,
) = self._construct_canonical_request_and_signed_headers(
verb=verb,
canonical_uri_parameter=parsed_url.path
+ (f";{parsed_url.params}" if parsed_url.params else ""),
query_parts=query_parts,
canonical_headers=x_amz_headers,
payload_hash=x_amz_headers["x-amz-content-sha256"],
)
string_to_sign, scope = self._construct_string_to_sign(
self.region_name,
"s3",
amzdate,
short_amzdate,
self._hash_bytes_hex(canonical_request.encode("utf-8")).lower(),
)
kDate = self._sign_bytes(
("AWS4" + self.credentials.creds["AWS_SECRET_KEY"]).encode("utf-8"),
short_amzdate,
)
kRegion = self._sign_bytes(kDate, self.region_name)
kService = self._sign_bytes(kRegion, "s3")
signing_key = self._sign_bytes(kService, "aws4_request")
signature = self._sign_bytes_hex(signing_key, string_to_sign).lower()
authorization_header = (
"AWS4-HMAC-SHA256 "
+ f"Credential={self.credentials.creds['AWS_KEY_ID']}/{scope}, "
+ f"SignedHeaders={signed_headers}, "
+ f"Signature={signature.decode('utf-8')}"
)
headers.update(x_amz_headers)
headers["Authorization"] = authorization_header
rest_args = {"headers": headers}
if payload:
rest_args["data"] = payload
# add customized hook: to remove content-encoding from response.
if ignore_content_encoding:
rest_args["hooks"] = {"response": remove_content_encoding}
return url.encode("utf-8"), rest_args
return self._send_request_with_retry(
verb, generate_authenticated_url_and_args_v4, retry_id
)
def get_file_header(self, filename: str) -> FileHeader | None:
"""Gets the metadata of file in specified location.
Args:
filename: Name of remote file.
Returns:
None if HEAD returns 404, otherwise a FileHeader instance populated
with metadata
"""
path = quote(self.s3location.path + filename.lstrip("/"))
url = self.endpoint + f"/{path}"
retry_id = "HEAD"
self.retry_count[retry_id] = 0
response = self._send_request_with_authentication_and_retry(
url=url, verb="HEAD", retry_id=retry_id
)
if response.status_code == 200:
self.meta.result_status = ResultStatus.UPLOADED
metadata = response.headers
encryption_metadata = (
EncryptionMetadata(
key=metadata.get(META_PREFIX + AMZ_KEY),
iv=metadata.get(META_PREFIX + AMZ_IV),
matdesc=metadata.get(META_PREFIX + AMZ_MATDESC),
)
if metadata.get(META_PREFIX + AMZ_KEY)
else None
)
return FileHeader(
digest=metadata.get(META_PREFIX + SFC_DIGEST),
content_length=int(metadata.get("Content-Length")),
encryption_metadata=encryption_metadata,
)
elif response.status_code == 404:
logger.debug(
f"not found. bucket: {self.s3location.bucket_name}, path: {path}"
)
self.meta.result_status = ResultStatus.NOT_FOUND_FILE
return None
else:
response.raise_for_status()
def _prepare_file_metadata(self) -> dict[str, Any]:
"""Construct metadata for a file to be uploaded.
Returns: File metadata in a dict.
"""
s3_metadata = {
META_PREFIX + SFC_DIGEST: self.meta.sha256_digest,
}
if self.encryption_metadata:
s3_metadata.update(
{
META_PREFIX + AMZ_IV: self.encryption_metadata.iv,
META_PREFIX + AMZ_KEY: self.encryption_metadata.key,
META_PREFIX + AMZ_MATDESC: self.encryption_metadata.matdesc,
}
)
return s3_metadata
def _initiate_multipart_upload(self) -> None:
query_parts = (("uploads", ""),)
path = quote(self.s3location.path + self.meta.dst_file_name.lstrip("/"))
query_string = self._construct_query_string(query_parts)
url = self.endpoint + f"/{path}?{query_string}"
s3_metadata = self._prepare_file_metadata()
# initiate multipart upload
retry_id = "Initiate"
self.retry_count[retry_id] = 0
response = self._send_request_with_authentication_and_retry(
url=url,
verb="POST",
retry_id=retry_id,
x_amz_headers=s3_metadata,
headers={HTTP_HEADER_CONTENT_TYPE: HTTP_HEADER_VALUE_OCTET_STREAM},
query_parts=dict(query_parts),
)
if response.status_code == 200:
self.upload_id = ET.fromstring(response.content)[2].text
self.etags = [None] * self.num_of_chunks
else:
response.raise_for_status()
def _upload_chunk(self, chunk_id: int, chunk: bytes) -> None:
path = quote(self.s3location.path + self.meta.dst_file_name.lstrip("/"))
url = self.endpoint + f"/{path}"
if self.num_of_chunks == 1: # single request
s3_metadata = self._prepare_file_metadata()
response = self._send_request_with_authentication_and_retry(
url=url,
verb="PUT",
retry_id=chunk_id,
payload=chunk,
x_amz_headers=s3_metadata,
headers={HTTP_HEADER_CONTENT_TYPE: HTTP_HEADER_VALUE_OCTET_STREAM},
unsigned_payload=True,
)
response.raise_for_status()
else:
# multipart PUT
query_parts = (
("partNumber", str(chunk_id + 1)),
("uploadId", self.upload_id),
)
query_string = self._construct_query_string(query_parts)
chunk_url = f"{url}?{query_string}"
response = self._send_request_with_authentication_and_retry(
url=chunk_url,
verb="PUT",
retry_id=chunk_id,
payload=chunk,
unsigned_payload=True,
query_parts=dict(query_parts),
)
if response.status_code == 200:
self.etags[chunk_id] = response.headers["ETag"]
response.raise_for_status()
def _complete_multipart_upload(self) -> None:
query_parts = (("uploadId", self.upload_id),)
path = quote(self.s3location.path + self.meta.dst_file_name.lstrip("/"))
query_string = self._construct_query_string(query_parts)
url = self.endpoint + f"/{path}?{query_string}"
logger.debug("Initiating multipart upload complete")
# Complete multipart upload
root = ET.Element("CompleteMultipartUpload")
for idx, etag_str in enumerate(self.etags):
part = ET.Element("Part")
etag = ET.Element("ETag")
etag.text = etag_str
part.append(etag)
part_number = ET.Element("PartNumber")
part_number.text = str(idx + 1)
part.append(part_number)
root.append(part)
retry_id = "Complete"
self.retry_count[retry_id] = 0
response = self._send_request_with_authentication_and_retry(
url=url,
verb="POST",
retry_id=retry_id,
payload=ET.tostring(root),
query_parts=dict(query_parts),
)
response.raise_for_status()
def _abort_multipart_upload(self) -> None:
if self.upload_id is None:
return
query_parts = (("uploadId", self.upload_id),)
path = quote(self.s3location.path + self.meta.dst_file_name.lstrip("/"))
query_string = self._construct_query_string(query_parts)
url = self.endpoint + f"/{path}?{query_string}"
retry_id = "Abort"
self.retry_count[retry_id] = 0
response = self._send_request_with_authentication_and_retry(
url=url,
verb="DELETE",
retry_id=retry_id,
query_parts=dict(query_parts),
)
response.raise_for_status()
def download_chunk(self, chunk_id: int) -> None:
logger.debug(f"Downloading chunk {chunk_id}")
path = quote(self.s3location.path + self.meta.src_file_name.lstrip("/"))
url = self.endpoint + f"/{path}"
if self.num_of_chunks == 1:
response = self._send_request_with_authentication_and_retry(
url=url,
verb="GET",
retry_id=chunk_id,
ignore_content_encoding=True,
)
if response.status_code == 200:
self.write_downloaded_chunk(0, response.content)
self.meta.result_status = ResultStatus.DOWNLOADED
response.raise_for_status()
else:
chunk_size = self.chunk_size
if chunk_id < self.num_of_chunks - 1:
_range = f"{chunk_id * chunk_size}-{(chunk_id+1)*chunk_size-1}"
else:
_range = f"{chunk_id * chunk_size}-"
response = self._send_request_with_authentication_and_retry(
url=url,
verb="GET",
retry_id=chunk_id,
headers={"Range": f"bytes={_range}"},
)
if response.status_code in (200, 206):
self.write_downloaded_chunk(chunk_id, response.content)
response.raise_for_status()
def _get_bucket_accelerate_config(self, bucket_name: str) -> bool:
query_parts = (("accelerate", ""),)
query_string = self._construct_query_string(query_parts)
url = f"https://{bucket_name}.s3.amazonaws.com/?{query_string}"
retry_id = "accelerate"
self.retry_count[retry_id] = 0
response = self._send_request_with_authentication_and_retry(
url=url, verb="GET", retry_id=retry_id, query_parts=dict(query_parts)
)
if response.status_code == 200:
config = ET.fromstring(response.text)
namespace = config.tag[: config.tag.index("}") + 1]
statusTag = f"{namespace}Status"
found = config.find(statusTag)
use_accelerate_endpoint = (
False if found is None else (found.text == "Enabled")
)
logger.debug(f"use_accelerate_endpoint: {use_accelerate_endpoint}")
return use_accelerate_endpoint
return False
|
78e98e0876df8887e9f44d01e07a6660bfa2ed8d
|
378c63f88e266b35aa07f2691de768bd55f12637
|
/godmode/database/base.py
|
eaf1dbe6d7506c0fcda4f3377f17386379c7d9b7
|
[
"WTFPL"
] |
permissive
|
vas3k/GodMode2
|
a55ba1a177e48743eb36ae808e1238858a96dab9
|
d8a79b45c6d8b94f3d2af3113428a87d148d20d0
|
refs/heads/public
| 2023-05-12T00:27:34.315970
| 2021-09-08T12:05:52
| 2021-09-08T12:05:52
| 63,632,563
| 291
| 22
|
WTFPL
| 2023-05-01T20:52:43
| 2016-07-18T19:52:52
|
CSS
|
UTF-8
|
Python
| false
| false
| 786
|
py
|
base.py
|
import hashlib
import sqlalchemy as sa
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from sqlalchemy.pool import QueuePool
import settings
class BaseDatabase:
def __init__(self, dsn: str, **kwargs):
self.dsn = dsn
self.engine = sa.create_engine(
dsn,
poolclass=QueuePool,
pool_size=10,
max_overflow=10,
pool_recycle=3600,
echo=settings.SQL_DEBUG,
**kwargs
)
self.TableBase = declarative_base(bind=self.engine) # pylint: disable=invalid-name
self.metadata = self.TableBase.metadata
self.session = sessionmaker(bind=self.engine)()
self.hash = hashlib.sha256(self.dsn.encode()).hexdigest()
|
08ee3ae89ed9cc9e57af7d0d9432f148cfe3fe8f
|
dcd772f567ef8a8a1173a9f437cd68f211fb9362
|
/ravenframework/JobHandler.py
|
b851deff43f2d1e82a66546aa4bf37a757e2524c
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
idaholab/raven
|
39cdce98ad916c638399232cdc01a9be00e200a2
|
2b16e7aa3325fe84cab2477947a951414c635381
|
refs/heads/devel
| 2023-08-31T08:40:16.653099
| 2023-08-29T16:21:51
| 2023-08-29T16:21:51
| 85,989,537
| 201
| 126
|
Apache-2.0
| 2023-09-13T21:55:43
| 2017-03-23T19:29:27
|
C++
|
UTF-8
|
Python
| false
| false
| 54,365
|
py
|
JobHandler.py
|
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on Mar 5, 2013
@author: alfoa, cogljj, crisr
"""
import time
import collections
import os
import copy
import sys
import threading
from random import randint
import socket
import re
from .utils import importerUtils as im
from .utils import utils
from .utils.utils import ParallelLibEnum
from .BaseClasses import BaseType
from . import Runners
from . import Models
# for internal parallel
_rayAvail = im.isLibAvail("ray")
_daskAvail = im.isLibAvail("dask")
if _daskAvail:
import dask
import dask.distributed
if _rayAvail:
import ray
# end internal parallel module
# Internal Modules End-----------------------------------------------------------
# FIXME: Finished jobs can bog down the queue waiting for other objects to take
# them away. Can we shove them onto a different list and free up the job queue?
class JobHandler(BaseType):
"""
JobHandler class. This handles the execution of any job in the RAVEN
framework
"""
def __init__(self):
"""
Init method
@ In, None
@ Out, None
"""
super().__init__()
self.printTag = 'Job Handler' # Print tag of this object
self.runInfoDict = {} # Container of the running info (RunInfo block in the input file)
self.__isDistributedInitialized = False # Is Ray or Dask Initialized?
self._server = None # Variable containing the info about the RAY or DASK parallel server.
# If None, multi-threading is used
self.sleepTime = 1e-4 # Sleep time for collecting/inquiring/submitting new jobs
self.completed = False # Is the execution completed? When True, the JobHandler is shut down
self.__profileJobs = False # Determines whether to collect and print job timing summaries at the end of job runs.
self.maxQueueSize = None # Prevents the pending queue from growing indefinitely, but also
# allowing extra jobs to be queued to prevent starving
# parallelized environments of jobs.
############################################################################
# The following variables are protected by the __queueLock
# Placeholders for each actively running job. When a job finishes, its
# spot in one of these lists will be reset to None and the next Runner will
# be placed in a free None spot, and set to start
self.__running = []
self.__clientRunning = []
# Queue of jobs to be run, when something on the list above opens up, the
# corresponding queue will pop a job (Runner) and put it into that location
# and set it to start
self.__queue = collections.deque()
self.__clientQueue = collections.deque()
# A counter used for uniquely identifying the next id for an ExternalRunner
# InternalRunners will increment this counter, but do not use it currently
self.__nextId = 0
# List of finished jobs. When a job finishes, it is placed here until
# something from the main thread can remove them.
self.__finished = []
# End block of __queueLock protected variables
############################################################################
self.__queueLock = threading.RLock()
# List of submitted job identifiers, includes jobs that have completed as
# this list is not cleared until a new step is entered
self.__submittedJobs = []
# Dict of failed jobs of the form { identifier: metadata }
self.__failedJobs = {}
# Dict containing info about batching
self.__batching = collections.defaultdict()
self.rayInstanciatedOutside = None
self.daskInstanciatedOutside = None
self.remoteServers = None
self.daskSchedulerFile = None
self._daskScheduler = None
def __getstate__(self):
"""
This function return the state of the JobHandler
@ In, None
@ Out, state, dict, it contains all the information needed by the ROM to be initialized
"""
state = copy.copy(self.__dict__)
state.pop('_JobHandler__queueLock')
#XXX we probably need to record how this was init, and store that
# such as the scheduler file
if self._parallelLib == ParallelLibEnum.dask and '_server' in state:
state.pop('_server')
return state
def __setstate__(self, d):
"""
Initialize the JobHandler with the data contained in newstate
@ In, d, dict, it contains all the information needed by the JobHandler to be initialized
@ Out, None
"""
self.__dict__.update(d)
self.__queueLock = threading.RLock()
def applyRunInfo(self, runInfo):
"""
Allows access to the RunInfo data
@ In, runInfo, dict, info from RunInfo
@ Out, None
"""
self.runInfoDict = runInfo
def initialize(self):
"""
Method to initialize the JobHandler
@ In, None
@ Out, None
"""
# set the maximum queue size (number of jobs to queue past the running number)
self.maxQueueSize = self.runInfoDict['maxQueueSize']
# defaults to None; if None, then use batchSize instead
if self.maxQueueSize is None:
self.maxQueueSize = self.runInfoDict['batchSize']
# if requested max size less than 1, we can't do that, so take 1 instead
if self.maxQueueSize < 1:
self.raiseAWarning('maxQueueSize was set to be less than 1! Setting to 1...')
self.maxQueueSize = 1
self.raiseADebug('Setting maxQueueSize to', self.maxQueueSize)
# initialize PBS
with self.__queueLock:
self.__running = [None]*self.runInfoDict['batchSize']
self.__clientRunning = [None]*self.runInfoDict['batchSize']
self._parallelLib = ParallelLibEnum.shared
if self.runInfoDict['parallelMethod'] is not None and self.runInfoDict['parallelMethod'] != ParallelLibEnum.distributed:
self._parallelLib = self.runInfoDict['parallelMethod']
elif self.runInfoDict['internalParallel'] or \
self.runInfoDict['parallelMethod'] is not None and self.runInfoDict['parallelMethod'] == ParallelLibEnum.distributed:
#If ParallelLibEnum.distributed or internalParallel True
# than choose a library automatically.
if _daskAvail:
self._parallelLib = ParallelLibEnum.dask
elif _rayAvail:
self._parallelLib = ParallelLibEnum.ray
else:
self.raiseAWarning("Distributed Running requested but no parallel method found")
self._parallelLib = ParallelLibEnum.shared
desiredParallelMethod = f"parallelMethod: {self.runInfoDict['parallelMethod']} internalParallel: {self.runInfoDict['internalParallel']}"
self.raiseADebug(f"Using parallelMethod: {self._parallelLib} because Input: {desiredParallelMethod} and Ray Availablility: {_rayAvail} and Dask Availabilitiy: {_daskAvail}")
if self._parallelLib == ParallelLibEnum.dask and not _daskAvail:
self.raiseAnError(RuntimeError, f"dask requested but not available. {desiredParallelMethod}")
if self._parallelLib == ParallelLibEnum.ray and not _rayAvail:
self.raiseAnError(RuntimeError, f"ray requested but not available. {desiredParallelMethod}")
# internal server is initialized only in case an internal calc is requested
if not self.__isDistributedInitialized:
self.__initializeDistributed()
def __checkAndRemoveFinished(self, running):
"""
Method to check if a run is finished and remove it from the queque
@ In, running, instance, the job instance (InternalRunner or ExternalRunner)
@ Out, None
"""
with self.__queueLock:
returnCode = running.getReturnCode()
if returnCode != 0:
metadataFailedRun = running.getMetadata()
metadataToKeep = metadataFailedRun
if metadataFailedRun is not None:
metadataKeys = list(metadataFailedRun.keys())
if 'jobHandler' in metadataKeys:
metadataKeys.pop(metadataKeys.index("jobHandler"))
metadataToKeep = { keepKey: metadataFailedRun[keepKey] for keepKey in metadataKeys }
# FIXME: The running.command was always internal now, so I removed it.
# We should probably find a way to give more pertinent information.
self.raiseAMessage(f" Process Failed {running.identifier}:{running} internal returnCode {returnCode}")
self.__failedJobs[running.identifier]=(returnCode,copy.deepcopy(metadataToKeep))
def __initializeDistributed(self):
"""
Internal method that is aimed to initialize the internal parallel system.
It initializes the RAY or DASK implementation (with socketing system) in
case RAVEN is run in a cluster with multiple nodes or the NumMPI > 1,
otherwise multi-threading is used.
@ In, None
@ Out, None
"""
self.raiseADebug("Initializing parallel InternalParallel: {0} Nodes: {1}".format(self.runInfoDict['internalParallel'],len(self.runInfoDict['Nodes'])))
if self._parallelLib != ParallelLibEnum.shared:
# dashboard?
db = self.runInfoDict['includeDashboard']
# Check if the list of unique nodes is present and, in case, initialize the
servers = None
sys.path.append(self.runInfoDict['WorkingDir'])
if 'UPDATE_PYTHONPATH' in self.runInfoDict:
sys.path.extend([p.strip() for p in self.runInfoDict['UPDATE_PYTHONPATH'].split(":")])
if _rayAvail:
# update the python path and working dir
olderPath = os.environ["PYTHONPATH"].split(os.pathsep) if "PYTHONPATH" in os.environ else []
os.environ["PYTHONPATH"] = os.pathsep.join(set(olderPath+sys.path))
# is ray instanciated outside?
self.rayInstanciatedOutside = 'headNode' in self.runInfoDict
self.daskInstanciatedOutside = 'schedulerFile' in self.runInfoDict
if len(self.runInfoDict['Nodes']) > 0 or self.rayInstanciatedOutside or self.daskInstanciatedOutside:
availableNodes = [nodeId.strip() for nodeId in self.runInfoDict['Nodes']]
uniqueN = list(set(availableNodes))
# identify the local host name and get the number of local processors
localHostName = self.__getLocalHost()
self.raiseADebug("Head host name is : ", localHostName)
# number of processors
nProcsHead = availableNodes.count(localHostName)
if not nProcsHead:
self.raiseAWarning("# of local procs are 0. Only remote procs are avalable")
self.raiseAWarning(f'Head host name "{localHostName}" /= Avail Nodes "'+', '.join(uniqueN)+'"!')
self.raiseADebug("# of local procs : ", str(nProcsHead))
self.raiseADebug("# of total procs : ", str(len(availableNodes)))
if nProcsHead != len(availableNodes) or self.rayInstanciatedOutside or self.daskInstanciatedOutside:
if self.rayInstanciatedOutside:
address = self.runInfoDict['headNode']
elif self.daskInstanciatedOutside:
self.daskSchedulerFile = self.runInfoDict['schedulerFile']
else:
# create head node cluster
# port 0 lets ray choose an available port
address = self.__runHeadNode(nProcsHead, 0)
if self._parallelLib == ParallelLibEnum.ray:
# add names in runInfo
self.runInfoDict['headNode'] = address
self.raiseADebug("Head host IP :", address)
if self._parallelLib == ParallelLibEnum.dask:
# add file in runInfo
self.runInfoDict['schedulerFile'] = self.daskSchedulerFile
self.raiseADebug('scheduler file :', self.daskSchedulerFile)
## Get servers and run ray or dask remote listener
if self.rayInstanciatedOutside or self.daskInstanciatedOutside:
servers = self.runInfoDict['remoteNodes']
else:
servers = self.__runRemoteListeningSockets(address, localHostName)
# add names in runInfo
self.runInfoDict['remoteNodes'] = servers
if self._parallelLib == ParallelLibEnum.ray:
## initialize ray server with nProcs
self._server = ray.init(address=address,log_to_driver=False,include_dashboard=db)
elif self._parallelLib == ParallelLibEnum.dask:
if self.daskSchedulerFile is not None:
#handle multinode and prestarted configurations
self._server = dask.distributed.Client(scheduler_file=self.daskSchedulerFile)
else:
#Start locally
cluster = dask.distributed.LocalCluster()
self._server = dask.distributed.Client(cluster)
else:
self.raiseAWarning("No supported server")
if self._parallelLib == ParallelLibEnum.ray:
self.raiseADebug("NODES IN THE CLUSTER : ", str(ray.nodes()))
else:
if self._parallelLib == ParallelLibEnum.ray:
self.raiseADebug("Executing RAY in the cluster but with a single node configuration")
self._server = ray.init(num_cpus=nProcsHead,log_to_driver=False,include_dashboard=db)
elif self._parallelLib == ParallelLibEnum.dask:
self.raiseADebug("Executing DASK in the cluster but with a single node configuration")
#Start locally
cluster = dask.distributed.LocalCluster()
self._server = dask.distributed.Client(cluster)
else:
self.raiseADebug("Initializing", str(self._parallelLib), "locally with num_cpus: ", self.runInfoDict['totalNumCoresUsed'])
if self._parallelLib == ParallelLibEnum.ray:
self._server = ray.init(num_cpus=int(self.runInfoDict['totalNumCoresUsed']),include_dashboard=db)
elif self._parallelLib == ParallelLibEnum.dask:
#handle local method
cluster = dask.distributed.LocalCluster(n_workers=int(self.runInfoDict['totalNumCoresUsed']))
self._server = dask.distributed.Client(cluster)
else:
self.raiseAWarning("parallellib creation not handled")
if self._parallelLib == ParallelLibEnum.ray:
self.raiseADebug("Head node IP address: ", self._server.address_info['node_ip_address'])
self.raiseADebug("Redis address : ", self._server.address_info['redis_address'])
self.raiseADebug("Object store address: ", self._server.address_info['object_store_address'])
self.raiseADebug("Raylet socket name : ", self._server.address_info['raylet_socket_name'])
self.raiseADebug("Session directory : ", self._server.address_info['session_dir'])
self.raiseADebug("GCS Address : ", self._server.address_info['gcs_address'])
if servers:
self.raiseADebug("# of remote servers : ", str(len(servers)))
self.raiseADebug("Remote servers : ", " , ".join(servers))
else:
self.raiseADebug("JobHandler initialized without ray")
else:
## We are just using threading
self._server = None
self.raiseADebug("JobHandler initialized with threading")
# ray or dask is initialized
self.__isDistributedInitialized = True
def __getLocalAndRemoteMachineNames(self):
"""
Method to get the qualified host and remote nodes' names
@ In, None
@ Out, hostNameMapping, dict, dictionary containing the qualified names of the remote nodes
"""
hostNameMapping = {}
## collect the qualified hostnames for each remote node
for nodeId in list(set(self.runInfoDict['Nodes'])):
hostNameMapping[nodeId.strip()] = socket.gethostbyname(nodeId.strip())
self.raiseADebug('Host "'+nodeId.strip()+'" identified with IP: ', hostNameMapping[nodeId.strip()])
return hostNameMapping
def __getLocalHost(self):
"""
Method to get the name of the local host
@ In, None
@ Out, __getLocalHost, string, the local host name
"""
return str(socket.getfqdn()).strip()
def __shutdownParallel(self):
"""
shutdown the parallel protocol
@ In, None
@ Out, None
"""
if self._parallelLib == ParallelLibEnum.ray and self._server is not None and not self.rayInstanciatedOutside:
# we need to ssh and stop each remote node cluster (ray)
servers = []
if 'remoteNodes' in self.runInfoDict:
servers += self.runInfoDict['remoteNodes']
if 'headNode' in self.runInfoDict:
servers += [self.runInfoDict['headNode']]
# get local enviroment
localEnv = os.environ.copy()
localEnv["PYTHONPATH"] = os.pathsep.join(sys.path)
for nodeAddress in servers:
self.raiseAMessage("Shutting down ray at address: "+ nodeAddress)
command="ray stop"
rayTerminate = utils.pickleSafeSubprocessPopen(['ssh',nodeAddress.split(":")[0],"COMMAND='"+command+"'","RAVEN_FRAMEWORK_DIR='"+self.runInfoDict["FrameworkDir"]+"'",self.runInfoDict['RemoteRunCommand']],shell=False,env=localEnv)
rayTerminate.wait()
if rayTerminate.returncode != 0:
self.raiseAWarning("RAY FAILED TO TERMINATE ON NODE: "+nodeAddress)
# shutdown ray API (object storage, plasma, etc.)
ray.shutdown()
elif self._parallelLib == ParallelLibEnum.dask and self._server is not None and not self.rayInstanciatedOutside:
self._server.close()
if self._daskScheduler is not None:
self._daskScheduler.terminate()
def __runHeadNode(self, nProcs, port=None):
"""
Method to activate the head ray server
@ In, nProcs, int, the number of processors
@ In, port, int, desired port (None: ray default, 0: ray finds available)
@ Out, address, str, the retrieved address (ip:port)
"""
address = None
# get local enviroment
localEnv = os.environ.copy()
localEnv["PYTHONPATH"] = os.pathsep.join(sys.path)
if self._parallelLib == ParallelLibEnum.ray:
command = ["ray", "start", "--head"]
if nProcs is not None:
command.append("--num-cpus="+str(nProcs))
if port is not None:
command.append("--port="+str(port))
outFile = open("ray_head.ip", 'w')
rayStart = utils.pickleSafeSubprocessPopen(command,shell=False,stdout=outFile, stderr=outFile, env=localEnv)
rayStart.wait()
outFile.close()
if rayStart.returncode != 0:
self.raiseAnError(RuntimeError, f"RAY failed to start on the --head node! Return code is {rayStart.returncode}")
else:
address = self.__getRayInfoFromStart("ray_head.ip")
elif self._parallelLib == ParallelLibEnum.dask:
self.daskSchedulerFile = os.path.join(self.runInfoDict['WorkingDir'],"scheduler.json")
if os.path.exists(self.daskSchedulerFile):
self.raiseADebug("Removing "+str(self.daskSchedulerFile))
os.remove(self.daskSchedulerFile)
tries = 0
succeeded = False
while not succeeded:
#If there is a way to tell dask scheduler to automatically choose a
# port, please change this to that.
scheduler = utils.pickleSafeSubprocessPopen(["dask","scheduler",
"--scheduler-file",
self.daskSchedulerFile,
"--port",str(8786+tries)])
waitCount = 0.0
while not (os.path.exists(self.daskSchedulerFile) or scheduler.poll() is not None or waitCount > 20.0):
time.sleep(0.1)
waitCount += 0.1
if os.path.exists(self.daskSchedulerFile) and scheduler.poll() is None:
succeeded = True
self._daskScheduler = scheduler
self.raiseADebug("dask scheduler started with "+str(self.daskSchedulerFile))
break
if scheduler.poll() is None:
self.raiseAWarning("killing dask scheduler")
scheduler.terminate()
tries += 1
if tries > 20:
succeeded = False
self.raiseAWarning("failed to start dask scheduler")
self.daskSchedulerFile = None
break
if succeeded:
#do equivelent of dask worker start in start_dask.sh:
# dask worker --nworkers $NUM_CPUS --scheduler-file $SCHEDULER_FILE >> $OUTFILE
outFile = open(os.path.join(self.runInfoDict['WorkingDir'],
"server_debug_"+self.__getLocalHost()),'w')
command = ["dask","worker","--scheduler-file",self.daskSchedulerFile]
if nProcs is not None:
command.extend(("--nworkers",str(nProcs)))
headDaskWorker = utils.pickleSafeSubprocessPopen(command,shell=False,
stdout=outFile, stderr=outFile, env=localEnv)
return address
def __getRayInfoFromStart(self, rayLog):
"""
Read Ray info from shell return script for ray
@ In, rayLog, str, the ray output log
@ Out, address, str, the retrieved address (ip:port)
"""
with open(rayLog, 'r') as rayLogObj:
for line in rayLogObj.readlines():
match = re.search("ray start --address='([^']*)'", line)
if match:
address = match.groups()[0]
return address
self.raiseAWarning("ray start address not found in "+str(rayLog))
return None
def __updateListeningSockets(self, localHostName):
"""
Update the path in the remote nodes
@ In, localHostName, string, the head node name
@ Out, None
"""
## Get the local machine name and the remote nodes one
remoteNodesIP = self.__getLocalAndRemoteMachineNames()
## Strip out the nodes' names
availableNodes = [node.strip() for node in self.runInfoDict['Nodes']]
## Get unique nodes
uniqueNodes = list(set(list(set(availableNodes))) - set([localHostName]))
self.remoteServers = {}
if len(uniqueNodes) > 0:
## There are remote nodes that need to be activated
## Modify the python path used by the local environment
localEnv = os.environ.copy()
pathSeparator = os.pathsep
if "PYTHONPATH" in localEnv and len(localEnv["PYTHONPATH"].strip()) > 0:
previousPath = localEnv["PYTHONPATH"].strip()+pathSeparator
else:
previousPath = ""
localEnv["PYTHONPATH"] = previousPath+pathSeparator.join(sys.path)
## Start
for nodeId in uniqueNodes:
remoteHostName = remoteNodesIP[nodeId]
## Activate the remote socketing system
## let's build the command and then call the os-agnostic version
if _rayAvail:
self.raiseADebug("Updating RAY server in node:", nodeId.strip())
runScript = os.path.join(self.runInfoDict['FrameworkDir'],"RemoteNodeScripts","update_path_in_remote_servers.sh")
command=" ".join([runScript,"--remote-node-address",nodeId," --working-dir ",self.runInfoDict['WorkingDir']])
self.raiseADebug("command is:", command)
command += " --python-path "+localEnv["PYTHONPATH"]
self.remoteServers[nodeId] = utils.pickleSafeSubprocessPopen([command],shell=True,env=localEnv)
def __removeLibPythonFromPath(self, pythonPath):
"""
Method to remove the python library from the path (which can cause
problems with different python version)
@ In, pythonPath, string, the original python path
@ Out, pythonPath, string, the python path with lib.python removed.
"""
if re.search("lib.python", pythonPath):
#strip out python libraries from path
#XXX ideally, this would have a way to tell if
# the paths we are stripping are the real builtin python paths
# instead of just using a regular expression
splitted=pythonPath.split(os.pathsep)
newpath = []
for part in splitted:
if not re.search("lib.python", part):
newpath.append(part)
else:
self.raiseADebug(f"removepath: {part}")
return os.pathsep.join(newpath)
return pythonPath
def __runRemoteListeningSockets(self, address, localHostName):
"""
Method to activate the remote sockets for parallel python
@ In, address, string, the head node redis address
@ In, localHostName, string, the local host name
@ Out, servers, list, list containing the nodes in which the remote sockets have been activated
"""
## Get the local machine name and the remote nodes one
remoteNodesIP = self.__getLocalAndRemoteMachineNames()
## Strip out the nodes' names
availableNodes = [node.strip() for node in self.runInfoDict['Nodes']]
## Get unique nodes
uniqueNodes = list(set(availableNodes) - set([localHostName]))
servers = []
self.remoteServers = {}
if len(uniqueNodes) > 0:
## There are remote nodes that need to be activated
## Modify the python path used by the local environment
localEnv = os.environ.copy()
pathSeparator = os.pathsep
if "PYTHONPATH" in localEnv and len(localEnv["PYTHONPATH"].strip()) > 0:
previousPath = localEnv["PYTHONPATH"].strip()+pathSeparator
else:
previousPath = ""
localEnv["PYTHONPATH"] = previousPath+pathSeparator.join(sys.path)
## Start
for nodeId in uniqueNodes:
## Check how many processors are available in the node
ntasks = availableNodes.count(nodeId)
remoteHostName = remoteNodesIP[nodeId]
## Activate the remote socketing system
## let's build the command and then call the os-agnostic version
if self._parallelLib == ParallelLibEnum.ray:
self.raiseADebug("Setting up RAY server in node: "+nodeId.strip())
runScript = os.path.join(self.runInfoDict['FrameworkDir'],"RemoteNodeScripts","start_remote_servers.sh")
command=" ".join([runScript,"--remote-node-address",nodeId, "--address",address, "--num-cpus",str(ntasks)," --working-dir ",self.runInfoDict['WorkingDir']," --raven-framework-dir",self.runInfoDict["FrameworkDir"],"--remote-bash-profile",self.runInfoDict['RemoteRunCommand']])
self.raiseADebug("command is: "+command)
command += " --python-path "+localEnv["PYTHONPATH"]
self.remoteServers[nodeId] = utils.pickleSafeSubprocessPopen([command],shell=True,env=localEnv)
elif self._parallelLib == ParallelLibEnum.dask:
remoteServerScript = os.path.join(self.runInfoDict['FrameworkDir'],
"RemoteNodeScripts","start_dask.sh")
outputFile = os.path.join(self.runInfoDict['WorkingDir'],"server_debug_"+nodeId)
command = ['ssh',nodeId,remoteServerScript,outputFile,
self.daskSchedulerFile,str(ntasks),
self.runInfoDict["FrameworkDir"],
self.runInfoDict['RemoteRunCommand'],
self.runInfoDict['WorkingDir']]
self.raiseADebug("command is: "+" ".join(command))
command.append(self.__removeLibPythonFromPath(localEnv["PYTHONPATH"]))
self.remoteServers[nodeId] = utils.pickleSafeSubprocessPopen(command, env=localEnv)
## update list of servers
servers.append(nodeId)
if self._parallelLib == ParallelLibEnum.ray or self._parallelLib == ParallelLibEnum.dask:
#wait for the servers to finish starting (prevents zombies)
for nodeId in uniqueNodes:
self.remoteServers[nodeId].wait()
self.raiseADebug("server "+str(nodeId)+" result: "+str(self.remoteServers[nodeId]))
return servers
def sendDataToWorkers(self, data):
"""
Method to send data to workers (if ray activated) and return a reference
If ray is not used, the data is simply returned, otherwise an object reference id is returned
@ In, data, object, any data to send to workers
@ Out, ref, ray.ObjectRef or object, the reference or the object itself
"""
if self._server is not None and self._parallelLib == ParallelLibEnum.ray:
ref = ray.put(copy.deepcopy(data))
else:
ref = copy.deepcopy(data)
return ref
def startLoop(self):
"""
This function begins the polling loop for the JobHandler where it will
constantly fill up its running queue with jobs in its pending queue and
unload finished jobs into its finished queue to be extracted by
"""
while not self.completed:
self.fillJobQueue()
self.cleanJobQueue()
# TODO May want to revisit this:
# http://stackoverflow.com/questions/29082268/python-time-sleep-vs-event-wait
# probably when we move to Python 3.
time.sleep(self.sleepTime)
def addJob(self, args, functionToRun, identifier, metadata=None, forceUseThreads = False, uniqueHandler="any", clientQueue = False, groupInfo = None):
"""
Method to add an internal run (function execution)
@ In, args, dict, this is a list of arguments that will be passed as
function parameters into whatever method is stored in functionToRun.
e.g., functionToRun(*args)
@ In, functionToRun,function or method, the function that needs to be
executed
@ In, identifier, string, the job identifier
@ In, metadata, dict, optional, dictionary of metadata associated to this
run
@ In, forceUseThreads, bool, optional, flag that, if True, is going to
force the usage of multi-threading even if parallel python is activated
@ In, uniqueHandler, string, optional, it is a special keyword attached to
this runner. For example, if present, to retrieve this runner using the
method jobHandler.getFinished, the uniqueHandler needs to be provided.
If uniqueHandler == 'any', every "client" can get this runner
@ In, groupInfo, dict, optional, {id:string, size:int}.
- "id": it is a special keyword attached to
this runner to identify that this runner belongs to a special set of runs that need to be
grouped together (all will be retrievable only when all the runs ended).
- "size", number of runs in this group self.__batching
NOTE: If the "size" of the group is only set the first time a job of this group is added.
Consequentially the size is immutable
@ In, clientQueue, boolean, optional, if this run needs to be added in the
clientQueue
@ Out, None
"""
assert "original_function" in dir(functionToRun), "to parallelize a function, it must be" \
" decorated with RAVEN Parallel decorator"
if self._server is None or forceUseThreads:
internalJob = Runners.factory.returnInstance('SharedMemoryRunner', args,
functionToRun.original_function,
identifier=identifier,
metadata=metadata,
uniqueHandler=uniqueHandler,
profile=self.__profileJobs)
else:
if self._parallelLib == ParallelLibEnum.dask:
arguments = tuple([self._server] + list(args))
else:
arguments = args
if self._parallelLib == ParallelLibEnum.dask:
internalJob = Runners.factory.returnInstance('DaskRunner', arguments,
functionToRun.original_function,
identifier=identifier,
metadata=metadata,
uniqueHandler=uniqueHandler,
profile=self.__profileJobs)
elif self._parallelLib == ParallelLibEnum.ray:
internalJob = Runners.factory.returnInstance('RayRunner', arguments,
functionToRun.remote,
identifier=identifier,
metadata=metadata,
uniqueHandler=uniqueHandler,
profile=self.__profileJobs)
# set the client info
internalJob.clientRunner = clientQueue
# set the groupping id if present
if groupInfo is not None:
groupId = groupInfo['id']
# TODO: create method in Runner to set flags,ids,etc in the instanciated runner
internalJob.groupId = groupId
if groupId not in self.__batching:
# NOTE: The size of the group is only set once the first job beloning to a group is added
# ***** THE size of a group is IMMUTABLE *****
self.__batching[groupId] = {"counter": 0, "ids": [], "size": groupInfo['size'], 'finished': []}
self.__batching[groupId]["counter"] += 1
if self.__batching[groupId]["counter"] > self.__batching[groupId]["size"]:
self.raiseAnError(RuntimeError, f"group id {groupId} is full. Size reached:")
self.__batching[groupId]["ids"].append(identifier)
# add the runner in the Queue
self.reAddJob(internalJob)
def reAddJob(self, runner):
"""
Method to add a runner object in the queue
@ In, runner, Runner Instance, this is the instance of the runner that we want to readd in the queque
@ Out, None
"""
with self.__queueLock:
if not runner.clientRunner:
self.__queue.append(runner)
else:
self.__clientQueue.append(runner)
if self.__profileJobs:
runner.trackTime('queue')
self.__submittedJobs.append(runner.identifier)
def addClientJob(self, args, functionToRun, identifier, metadata=None, uniqueHandler="any"):
"""
Method to add an internal run (function execution), without consuming
resources (free spots). This can be used for client handling (see
metamodel)
@ In, args, dict, this is a list of arguments that will be passed as
function parameters into whatever method is stored in functionToRun.
e.g., functionToRun(*args)
@ In, functionToRun,function or method, the function that needs to be
executed
@ In, identifier, string, the job identifier
@ In, metadata, dict, optional, dictionary of metadata associated to this
run
@ In, uniqueHandler, string, optional, it is a special keyword attached to
this runner. For example, if present, to retrieve this runner using the
method jobHandler.getFinished, the uniqueHandler needs to be provided.
If uniqueHandler == 'any', every "client" can get this runner.
@ Out, None
"""
self.addJob(args, functionToRun, identifier, metadata,
forceUseThreads = True, uniqueHandler = uniqueHandler,
clientQueue = True)
def addFinishedJob(self, data, metadata=None, uniqueHandler="any", profile=False):
"""
Takes an already-finished job (for example, a restart realization) and adds it to the finished queue.
@ In, data, dict, completed realization
@ In, data, dict, fully-evaluated realization
@ In, metadata, dict, optional, dictionary of metadata associated with
this run
@ In, uniqueHandler, string, optional, it is a special keyword attached to
this runner. For example, if present, to retrieve this runner using the
method jobHandler.getFinished, the uniqueHandler needs to be provided.
If uniqueHandler == 'any', every "client" can get this runner
@ In, profile, bool, optional, if True then at de-construction timing statements will be printed
@ Out, None
"""
# create a placeholder runner
run = Runners.factory.returnInstance('PassthroughRunner', data, None,
metadata=metadata,
uniqueHandler=uniqueHandler,
profile=profile)
# place it on the finished queue
with self.__queueLock:
self.__finished.append(run)
def isFinished(self, uniqueHandler=None):
"""
Method to check if all the runs in the queue are finished, or if a specific job(s) is done (jobIdentifier or uniqueHandler)
@ In, uniqueHandler, string, optional, it is a special keyword attached to
each runner. If provided, just the jobs that have the uniqueIdentifier
will be checked. By default uniqueHandler = None => all the jobs for
which no uniqueIdentifier has been set up are going to be checked
@ Out, isFinished, bool, True all the runs in the queue are finished
"""
# FIXME: The following two lines of codes have been a temporary fix for timing issues
# on the collections of jobs in the jobHandler. This issue has emerged when
# performing batching. It is needed to review the relations between jobHandler
# and the Step when retrieving multiple jobs.
# An issue has been opened: 'JobHandler and Batching #1402'
with self.__queueLock:
# If there is still something left in the queue, we are not done yet.
if len(self.__queue)>0 or len(self.__clientQueue)>0:
return False
# Otherwise, let's look at our running lists and see if there is a job
# that is not done.
for run in self.__running+self.__clientRunning:
if run:
if uniqueHandler is None or uniqueHandler == run.uniqueHandler:
return False
# Are there runs that need to be claimed? If so, then I cannot say I am done.
numFinished = len(self.getFinishedNoPop())
if numFinished != 0:
return False
return True
def availability(self, client=False):
"""
Returns the number of runs that can be added until we consider our queue
saturated
@ In, client, bool, if true, then return the values for the
__clientQueue, otherwise use __queue
@ Out, availability, int the number of runs that can be added until we
reach saturation
"""
# Due to possibility of memory explosion, we should include the finished
# queue when considering whether we should add a new job. There was an
# issue when running on a distributed system where we saw that this list
# seemed to be growing indefinitely as the main thread was unable to clear
# that list within a reasonable amount of time. The issue on the main thread
# should also be addressed, but at least we can prevent it on this end since
# the main thread's issue may be legitimate.
maxCount = self.maxQueueSize
finishedCount = len(self.__finished)
if client:
if maxCount is None:
maxCount = self.__clientRunning.count(None)
queueCount = len(self.__clientQueue)
else:
if maxCount is None:
maxCount = self.__running.count(None)
queueCount = len(self.__queue)
availability = maxCount - queueCount - finishedCount
return availability
def isThisJobFinished(self, identifier):
"""
Method to check if the run identified by "identifier" is finished
@ In, identifier, string, identifier
@ Out, isFinished, bool, True if the job identified by "identifier" is
finished
"""
identifier = identifier.strip()
with self.__queueLock:
# Look through the finished jobs and attempt to find a matching
# identifier. If the job exists here, it is finished
for run in self.__finished:
if run.identifier == identifier:
return True
# Look through the pending jobs and attempt to find a matching identifier
# If the job exists here, it is not finished
for queue in [self.__queue, self.__clientQueue]:
for run in queue:
if run.identifier == identifier:
return False
# Look through the running jobs and attempt to find a matching identifier
# If the job exists here, it is not finished
for run in self.__running+self.__clientRunning:
if run is not None and run.identifier == identifier:
return False
# If you made it here and we still have not found anything, we have got
# problems.
self.raiseAnError(RuntimeError,"Job "+identifier+" is unknown!")
def areTheseJobsFinished(self, uniqueHandler="any"):
"""
Method to check if all the runs in the queue are finished
@ In, uniqueHandler, string, optional, it is a special keyword attached to
each runner. If provided, just the jobs that have the uniqueIdentifier
will be retrieved. By default uniqueHandler = 'any' => all the jobs for
which no uniqueIdentifier has been set up are going to be retrieved
@ Out, isFinished, bool, True all the runs in the queue are finished
"""
uniqueHandler = uniqueHandler.strip()
with self.__queueLock:
for run in self.__finished:
if run.uniqueHandler == uniqueHandler:
return False
for queue in [self.__queue, self.__clientQueue]:
for run in queue:
if run.uniqueHandler == uniqueHandler:
return False
for run in self.__running + self.__clientRunning:
if run is not None and run.uniqueHandler == uniqueHandler:
return False
self.raiseADebug("The jobs with uniqueHandler ", uniqueHandler, "are finished")
return True
def getFailedJobs(self):
"""
Method to get list of failed jobs
@ In, None
@ Out, __failedJobs, list, list of the identifiers (jobs) that failed
"""
return self.__failedJobs
def getFinished(self, removeFinished=True, jobIdentifier='', uniqueHandler="any"):
"""
Method to get the list of jobs that ended (list of objects)
@ In, removeFinished, bool, optional, flag to control if the finished jobs
need to be removed from the queue
@ In, jobIdentifier, string, optional, if specified, only collects
finished runs that start with this text. If not specified collect all.
@ In, uniqueHandler, string, optional, it is a special keyword attached to
each runner. If provided, just the jobs that have the uniqueIdentifier
will be retrieved. By default uniqueHandler = 'any' => all the jobs for
which no uniqueIdentifier has been set up are going to be retrieved
@ Out, finished, list, list of list containing finished jobs (InternalRunner or
ExternalRunner objects) (if jobIdentifier is None), else the finished
jobs matching the base case jobIdentifier
NOTE:
- in case the runs belong to a groupID (batching), each element of the list
contains a list of the finished runs belonging to that group (Batch)
- otherwise a flat list of jobs are returned.
For example:
finished = [job1, job2, [job3.1, job3.2], job4 ] (job3.1/3.2 belong to the same groupID)
or [job1, job2, job3, job4]
"""
# If the user does not specify a jobIdentifier, then set it to the empty
# string because every job will match this starting string.
if jobIdentifier is None:
jobIdentifier = ''
with self.__queueLock:
finished = []
runsToBeRemoved = []
for i,run in enumerate(self.__finished):
# If the jobIdentifier does not match or the uniqueHandler does not
# match, then don't bother trying to do anything with it
if not run.identifier.startswith(jobIdentifier) \
or uniqueHandler != run.uniqueHandler:
continue
# check if the run belongs to a subgroup and in case
if run.groupId in self.__batching:
if not run in self.__batching[run.groupId]['finished']:
self.__batching[run.groupId]['finished'].append(run)
else:
finished.append(run)
if removeFinished:
runsToBeRemoved.append(i)
self.__checkAndRemoveFinished(run)
#FIXME: IF THE RUN IS PART OF A BATCH AND IT FAILS, WHAT DO WE DO? alfoa
# check if batches are ready to be returned
for groupId in list(self.__batching.keys()):
if len(self.__batching[groupId]['finished']) > self.__batching[groupId]['size']:
self.raiseAnError(RuntimeError,'The batching system got corrupted. Open an issue in RAVEN github!')
if removeFinished:
if len(self.__batching[groupId]['finished']) == self.__batching[groupId]['size']:
doneBatch = self.__batching.pop(groupId)
finished.append(doneBatch['finished'])
else:
doneBatch = self.__batching[groupId]
finished.append(doneBatch['finished'])
# Since these indices are sorted, reverse them to ensure that when we
# delete something it will not shift anything to the left (lower index)
# than it.
if removeFinished:
for i in reversed(runsToBeRemoved):
self.__finished[i].trackTime('collected')
del self.__finished[i]
# end with self.__queueLock
return finished
def getFinishedNoPop(self):
"""
Method to get the list of jobs that ended (list of objects) without
removing them from the queue
@ In, None
@ Out, finished, list, list of finished jobs (InternalRunner or
ExternalRunner objects)
"""
finished = self.getFinished(False)
return finished
# Deprecating this function because I don't think it is doing the right thing
# People using the job handler should be asking for what is available not the
# number of free spots in the running block. Only the job handler should be
# able to internally alter or query the running and clientRunning queues.
# The outside environment can only access the queue and clientQueue variables.
# def numFreeSpots(self, client=False):
def numRunning(self):
"""
Returns the number of runs currently running.
@ In, None
@ Out, activeRuns, int, number of active runs
"""
#with self.__queueLock:
# The size of the list does not change, only its contents, so I don't
# think there should be any conflict if we are reading a variable from
# one thread and updating it on the other thread.
activeRuns = sum(run is not None for run in self.__running)
return activeRuns
def numRunningTotal(self):
"""
Returns the number of runs currently running in both lists.
@ In, None
@ Out, activeRuns, int, number of active runs
"""
activeRuns = sum(run is not None for run in self.__running + self.__clientRunning)
return activeRuns
def _numQueuedTotal(self):
"""
Returns the number of runs currently waiting in both queues.
@ In, None
@ Out, queueSize, int, number of runs in queue
"""
queueSize = len(self.__queue) + len(self.__clientQueue)
return queueSize
def numSubmitted(self):
"""
Method to get the number of submitted jobs
@ In, None
@ Out, len(self.__submittedJobs), int, number of submitted jobs
"""
return len(self.__submittedJobs)
def fillJobQueue(self):
"""
Method to start running the jobs in queue. If there are empty slots
takes jobs out of the queue and starts running them.
@ In, None
@ Out, None
"""
# Only the jobHandler's startLoop thread should have write access to the
# self.__running variable, so we should be able to safely query this outside
# of the lock given that this function is called only on that thread as well.
emptySlots = [i for i,run in enumerate(self.__running) if run is None]
# Don't bother acquiring the lock if there are no empty spots or nothing
# in the queue (this could be simultaneously added to by the main thread,
# but I will be back here after a short wait on this thread so I am not
# concerned about this potential inconsistency)
if len(emptySlots) > 0 and len(self.__queue) > 0:
with self.__queueLock:
for i in emptySlots:
# The queue could be emptied during this loop, so we will to break
# out as soon as that happens so we don't hog the lock.
if len(self.__queue) > 0:
item = self.__queue.popleft()
# Okay, this is a little tricky, but hang with me here. Whenever
# a code model is run, we need to replace some of its command
# parameters. The way we do this is by looking at the job instance
# and checking if the first argument (the self in
# self.evaluateSample) is an instance of Code, if so, then we need
# to replace the execution command. Is this fragile? Possibly. We may
# want to revisit this on the next iteration of this code.
if len(item.args) > 0 and isinstance(item.args[0], Models.Code):
kwargs = {}
if self._server is not None:
for infoKey in ['headNode','remoteNodes','schedulerFile']:
if infoKey in self.runInfoDict:
kwargs[infoKey] = self.runInfoDict[infoKey]
kwargs['INDEX'] = str(i)
kwargs['INDEX1'] = str(i+i)
kwargs['CURRENT_ID'] = str(self.__nextId)
kwargs['CURRENT_ID1'] = str(self.__nextId+1)
kwargs['SCRIPT_DIR'] = self.runInfoDict['ScriptDir']
kwargs['FRAMEWORK_DIR'] = self.runInfoDict['FrameworkDir']
# This will not be used since the Code will create a new
# directory for its specific files and will spawn a process there
# so we will let the Code fill that in. Note, the line below
# represents the WRONG directory for an instance of a code!
# It is however the correct directory for a MultiRun step
# -- DPM 5/4/17
kwargs['WORKING_DIR'] = item.args[0].workingDir
kwargs['BASE_WORKING_DIR'] = self.runInfoDict['WorkingDir']
kwargs['METHOD'] = os.environ.get("METHOD","opt")
kwargs['NUM_CPUS'] = str(self.runInfoDict['NumThreads'])
item.args[3].update(kwargs)
self.__running[i] = item
self.__running[i].start()
self.__running[i].trackTime('started')
self.__nextId += 1
else:
break
# Repeat the same process above, only for the clientQueue
emptySlots = [i for i,run in enumerate(self.__clientRunning) if run is None]
if len(emptySlots) > 0 and len(self.__clientQueue) > 0:
with self.__queueLock:
for i in emptySlots:
if len(self.__clientQueue) > 0:
self.__clientRunning[i] = self.__clientQueue.popleft()
self.__clientRunning[i].start()
self.__clientRunning[i].trackTime('jobHandler_started')
self.__nextId += 1
else:
break
def cleanJobQueue(self):
"""
Method that will remove finished jobs from the queue and place them into the
finished queue to be read by some other thread.
@ In, None
@ Out, None
"""
# The code handling these two lists was the exact same, I have taken the
# liberty of condensing these loops into one and removing some of the
# redundant checks to make this code a bit simpler.
for runList in [self.__running, self.__clientRunning]:
with self.__queueLock:
# We need the queueLock, because if terminateJobs runs kill on it,
# kill changes variables that can cause run.isDone to error out.
for i,run in enumerate(runList):
if run is not None and run.isDone():
self.__finished.append(run)
self.__finished[-1].trackTime('jobHandler_finished')
runList[i] = None
def setProfileJobs(self,profile=False):
"""
Sets whether profiles for jobs are printed or not.
@ In, profile, bool, optional, if True then print timings for jobs when they are garbage collected
@ Out, None
"""
self.__profileJobs = profile
def startingNewStep(self):
"""
Method to reset the __submittedJobs to an empty list.
@ In, None
@ Out, None
"""
with self.__queueLock:
self.__submittedJobs = []
def shutdown(self):
"""
This function will mark the job handler as done, so it can shutdown its
polling thread.
@ In, None
@ Out, None
"""
self.completed = True
self.__shutdownParallel()
def terminateAll(self):
"""
Method to clear out the queue by killing all running processes.
@ In, None
@ Out, None
"""
with self.__queueLock:
for queue in [self.__queue, self.__clientQueue]:
queue.clear()
for runList in [self.__running, self.__clientRunning]:
unfinishedRuns = [run for run in runList if run is not None]
for run in unfinishedRuns:
run.kill()
def terminateJobs(self, ids):
"""
Kills running jobs that match the given ids.
@ In, ids, list(str), job prefixes to terminate
@ Out, None
"""
#WARNING: terminateJobs modifies the running queue, which
# fillJobQueue assumes can't happen
queues = [self.__queue, self.__clientQueue, self.__running, self.__clientRunning]
with self.__queueLock:
for _, queue in enumerate(queues):
toRemove = []
for job in queue:
if job is not None and job.identifier in ids:
# this assumes that each uniqueHandle only exists once in any queue anywhere
ids.remove(job.identifier)
toRemove.append(job)
for job in toRemove:
# for fixed-spot queues, need to replace job with None not remove
if isinstance(queue,list):
job.kill()
queue[queue.index(job)] = None
# for variable queues, can just remove the job
else:
queue.remove(job)
self.raiseADebug(f'Terminated job "{job.identifier}" by request.')
if len(ids):
self.raiseADebug('Tried to remove some jobs but not found in any queues:',', '.join(ids))
|
d4bf7889af1cc0c39402364804440611065d5990
|
2bb7bc07df02a17735c2cacc7b2ba0c6de77b63c
|
/tron/bin/recover_batch.py
|
a92b8ccbc8dedd2602e7636f2b88dfba4a8d96ce
|
[
"Apache-2.0"
] |
permissive
|
Yelp/Tron
|
2c30a301055a732c3b33a39e05dbdcfc84ac8e02
|
958a2e22a6ac733cba043bc4238f3bf2b8048f4b
|
refs/heads/master
| 2023-08-29T11:35:11.716532
| 2023-08-21T19:27:45
| 2023-08-21T19:27:45
| 899,771
| 226
| 53
|
NOASSERTION
| 2023-08-21T19:26:45
| 2010-09-09T20:54:04
|
Python
|
UTF-8
|
Python
| false
| false
| 2,982
|
py
|
recover_batch.py
|
#!/usr/bin/env python3.6
import argparse
import logging
import signal
import sys
from queue import Queue
import psutil
from twisted.internet import inotify
from twisted.internet import reactor
from twisted.python import filepath
from tron import yaml
log = logging.getLogger("tron.recover_batch")
class StatusFileWatcher:
"""
Watches the status file produced by action runners
"""
def __init__(self, to_watch, callback):
notifier = inotify.INotify()
notifier.startReading()
notifier.watch(filepath.FilePath(to_watch), callbacks=[callback])
def parse_args():
parser = argparse.ArgumentParser(description="Check if a action runner has exited; wait otherwise",)
parser.add_argument("filepath")
return parser.parse_args()
def read_last_yaml_entries(filename):
with open(filename) as f:
lines = list(yaml.load_all(f))
if not lines:
entries = {}
else:
entries = lines[-1]
return entries
def notify(notify_queue, ignored, filepath, mask):
exit_code, error_message = get_exit_code(filepath.path)
if exit_code is not None:
reactor.stop()
notify_queue.put((exit_code, error_message))
def get_exit_code(filepath):
entries = read_last_yaml_entries(filepath)
pid = entries.get("runner_pid")
return_code = entries.get("return_code")
exit_code, error_message = None, None
if return_code is not None:
if return_code < 0:
# from the subprocess docs on the return code of a process:
# "A negative value -N indicates that the child was terminated by signal N (POSIX only)."
# We should always exit with a positive code, so we take the absolute value of the return code
exit_code = abs(return_code)
error_message = f"Action run killed by signal {signal.Signals(exit_code).name}"
else:
exit_code = return_code
elif pid is None:
log.warning(f"Status file {filepath} didn't have a PID. Will watch the file for updates.")
elif not psutil.pid_exists(pid):
exit_code = 1
error_message = f"Action runner pid {pid} no longer running. Assuming an exit of 1."
return exit_code, error_message
def run(fpath):
# Check if the process has already completed.
# If it has, we don't expect any more updates.
return_code, error_message = get_exit_code(fpath)
if return_code is not None:
if error_message is not None:
log.warning(error_message)
sys.exit(return_code)
# If not, wait for updates to the file.
notify_queue = Queue()
StatusFileWatcher(
fpath, lambda *args, **kwargs: notify(notify_queue, *args, **kwargs),
)
reactor.run()
exit_code, error_message = notify_queue.get()
if error_message is not None:
log.warning(error_message)
sys.exit(exit_code)
if __name__ == "__main__":
args = parse_args()
run(args.filepath)
|
ba58784a7a072b1ae38ec319806240e7930b742b
|
944a49e62bc79622fe01abee62403397a1b0504d
|
/openstackclient/tests/unit/volume/v2/test_backup_record.py
|
4933af16a74342c7d0521a6a06ed33abe69d9d15
|
[
"Apache-2.0"
] |
permissive
|
openstack/python-openstackclient
|
1c22984f9b29ae8ff9bbea26067981e2130ed039
|
78988d1786c0634ee055714910d1e6187f941673
|
refs/heads/master
| 2023-08-28T15:10:05.542862
| 2023-08-26T12:44:20
| 2023-08-26T12:44:20
| 4,170,310
| 286
| 224
|
Apache-2.0
| 2022-09-19T13:29:49
| 2012-04-28T21:07:25
|
Python
|
UTF-8
|
Python
| false
| false
| 3,729
|
py
|
test_backup_record.py
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from openstackclient.tests.unit.volume.v2 import fakes as volume_fakes
from openstackclient.volume.v2 import backup_record
class TestBackupRecord(volume_fakes.TestVolume):
def setUp(self):
super().setUp()
self.backups_mock = self.app.client_manager.volume.backups
self.backups_mock.reset_mock()
class TestBackupRecordExport(TestBackupRecord):
new_backup = volume_fakes.create_one_backup(
attrs={'volume_id': 'a54708a2-0388-4476-a909-09579f885c25'},
)
new_record = volume_fakes.create_backup_record()
def setUp(self):
super().setUp()
self.backups_mock.export_record.return_value = self.new_record
self.backups_mock.get.return_value = self.new_backup
# Get the command object to mock
self.cmd = backup_record.ExportBackupRecord(self.app, None)
def test_backup_export_table(self):
arglist = [
self.new_backup.name,
]
verifylist = [
("backup", self.new_backup.name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
parsed_args.formatter = 'table'
columns, __ = self.cmd.take_action(parsed_args)
self.backups_mock.export_record.assert_called_with(
self.new_backup.id,
)
expected_columns = ('Backup Service', 'Metadata')
self.assertEqual(columns, expected_columns)
def test_backup_export_json(self):
arglist = [
self.new_backup.name,
]
verifylist = [
("backup", self.new_backup.name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
parsed_args.formatter = 'json'
columns, __ = self.cmd.take_action(parsed_args)
self.backups_mock.export_record.assert_called_with(
self.new_backup.id,
)
expected_columns = ('backup_service', 'backup_url')
self.assertEqual(columns, expected_columns)
class TestBackupRecordImport(TestBackupRecord):
new_backup = volume_fakes.create_one_backup(
attrs={'volume_id': 'a54708a2-0388-4476-a909-09579f885c25'},
)
new_import = volume_fakes.import_backup_record()
def setUp(self):
super().setUp()
self.backups_mock.import_record.return_value = self.new_import
# Get the command object to mock
self.cmd = backup_record.ImportBackupRecord(self.app, None)
def test_backup_import(self):
arglist = [
"cinder.backup.drivers.swift.SwiftBackupDriver",
"fake_backup_record_data",
]
verifylist = [
(
"backup_service",
"cinder.backup.drivers.swift.SwiftBackupDriver",
),
("backup_metadata", "fake_backup_record_data"),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, __ = self.cmd.take_action(parsed_args)
self.backups_mock.import_record.assert_called_with(
"cinder.backup.drivers.swift.SwiftBackupDriver",
"fake_backup_record_data",
)
self.assertEqual(columns, ('backup',))
|
5fd2ef96a8e6613469b8612f2766252605891b2f
|
bf8d344b17e2ff9b7e38ad9597d5ce0e3d4da062
|
/ppdet/__init__.py
|
6fcc982fb60c796e6b9b6e23026d50ef0e9611ae
|
[
"Apache-2.0"
] |
permissive
|
PaddlePaddle/PaddleDetection
|
e7e0f40bef75a4e0b6dcbacfafa7eb1969e44961
|
bd83b98342b0a6bc8d8dcd5936233aeda1e32167
|
refs/heads/release/2.6
| 2023-08-31T07:04:15.357051
| 2023-08-18T02:24:45
| 2023-08-18T02:24:45
| 217,475,193
| 12,523
| 3,096
|
Apache-2.0
| 2023-09-10T10:05:56
| 2019-10-25T07:21:14
|
Python
|
UTF-8
|
Python
| false
| false
| 1,039
|
py
|
__init__.py
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import (core, data, engine, modeling, model_zoo, optimizer, metrics,
utils, slim)
try:
from .version import full_version as __version__
from .version import commit as __git_commit__
except ImportError:
import sys
sys.stderr.write("Warning: import ppdet from source directory " \
"without installing, run 'python setup.py install' to " \
"install ppdet firstly\n")
|
53e5dbda61ab8d856cf2d13c273481253de5ae1b
|
52042672823e33dcb1a2291f2a134e09571e5909
|
/moonlight/util/more_iter_tools_test.py
|
480da513e58d34247b2e688f5abfbb38575cfdfa
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
tensorflow/moonlight
|
1fcb859b7e76c89647f0ef96aade26c353896107
|
d80279a3bf5d93f309ac6df2ec3baf47f29fa52a
|
refs/heads/master
| 2023-08-23T22:35:21.400841
| 2022-06-02T19:24:07
| 2022-06-02T19:24:07
| 129,930,967
| 334
| 85
|
Apache-2.0
| 2022-02-11T02:33:16
| 2018-04-17T16:05:02
|
Python
|
UTF-8
|
Python
| false
| false
| 1,195
|
py
|
more_iter_tools_test.py
|
"""Tests for more_iter_tools."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
from absl.testing import absltest
from moonlight.util import more_iter_tools
import numpy as np
from six import moves
class MoreIterToolsTest(absltest.TestCase):
def testSample_count_0(self):
self.assertEqual([], more_iter_tools.iter_sample(moves.range(100), 0))
def testSample_iter_empty(self):
self.assertEqual([], more_iter_tools.iter_sample(moves.range(0), 10))
def testSample_distribution(self):
sample = more_iter_tools.iter_sample(
moves.range(0, 100000), 9999, rand=random.Random(12345))
self.assertEqual(9999, len(sample))
# Create a histogram with 10 bins.
bins = np.bincount([elem // 10000 for elem in sample])
self.assertEqual(10, len(bins))
# Samples should be distributed roughly uniformly into bins.
expected_bin_count = 9999 // 10
for bin_count in bins:
self.assertTrue(
np.allclose(bin_count, expected_bin_count, rtol=0.1),
'{} within 10% of {}'.format(bin_count, expected_bin_count))
if __name__ == '__main__':
absltest.main()
|
cf1fbf0e4470f609e825889de59c727825cb5b60
|
c3493b2d99ea73b71d6a930482a906c11432c568
|
/muspy/inputs/note.py
|
c6dbaa3b30f9e492795c2ab20379d5b37189bc04
|
[
"MIT"
] |
permissive
|
salu133445/muspy
|
bdf1a1cc2d28e5fc8423ec7fe2e467fb0b67122c
|
b2d4265c6279e730903d8abe9dddda8484511903
|
refs/heads/main
| 2023-07-08T09:29:22.413086
| 2023-06-26T08:08:22
| 2023-06-26T08:08:22
| 247,167,654
| 380
| 54
|
MIT
| 2023-01-02T20:20:21
| 2020-03-13T21:53:38
|
Python
|
UTF-8
|
Python
| false
| false
| 2,467
|
py
|
note.py
|
"""Note-based representation input interface."""
from operator import attrgetter
import numpy as np
from numpy import ndarray
from ..classes import DEFAULT_VELOCITY, Note, Track
from ..music import DEFAULT_RESOLUTION, Music
def from_note_representation(
array: ndarray,
resolution: int = DEFAULT_RESOLUTION,
program: int = 0,
is_drum: bool = False,
use_start_end: bool = False,
encode_velocity: bool = True,
default_velocity: int = DEFAULT_VELOCITY,
) -> Music:
"""Decode note-based representation into a Music object.
Parameters
----------
array : ndarray
Array in note-based representation to decode.
resolution : int, default: `muspy.DEFAULT_RESOLUTION` (24)
Time steps per quarter note.
program : int, default: 0 (Acoustic Grand Piano)
Program number, according to General MIDI specification [1].
Valid values are 0 to 127.
is_drum : bool, default: False
Whether it is a percussion track.
use_start_end : bool, default: False
Whether to use 'start' and 'end' to encode the timing rather
than 'time' and 'duration'.
encode_velocity : bool, default: True
Whether to encode note velocities.
default_velocity : int, default: `muspy.DEFAULT_VELOCITY` (64)
Default velocity value to use when decoding. Only used when
`encode_velocity` is True.
Returns
-------
:class:`muspy.Music`
Decoded Music object.
References
----------
[1] https://www.midi.org/specifications/item/gm-level-1-sound-set
"""
if not np.issubdtype(array.dtype, np.integer):
raise TypeError("Array must be of type int.")
notes = []
velocity = default_velocity
for note_tuple in array:
if encode_velocity:
velocity = note_tuple[3]
if use_start_end:
duration = note_tuple[2] - note_tuple[0]
else:
duration = note_tuple[2]
notes.append(
Note(
time=int(note_tuple[0]),
pitch=int(note_tuple[1]),
duration=int(duration),
velocity=int(velocity),
)
)
# Sort the notes
notes.sort(key=attrgetter("time", "pitch", "duration", "velocity"))
# Create the Track and Music objects
track = Track(program=program, is_drum=is_drum, notes=notes)
music = Music(resolution=resolution, tracks=[track])
return music
|
e82241e65fb84027fc41baf24fa2e5fc151c6ecd
|
59864cbd213b5da6f50d6255b0a021564b3d5bd4
|
/challenges/Audio_Visualizer/poller/for-release/machine.py
|
130fe89207052dbb8b3abdc7dd6cc51f00c092d5
|
[
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-unknown",
"BSD-2-Clause"
] |
permissive
|
trailofbits/cb-multios
|
8af96a4fbc3b34644367faa135347f88e0e0d0a3
|
810d7b24b1f62f56ef49b148fe155b0d0629cad2
|
refs/heads/master
| 2023-09-05T03:56:20.229403
| 2022-12-27T15:47:54
| 2022-12-27T15:47:54
| 41,688,943
| 522
| 133
|
MIT
| 2023-06-29T02:47:13
| 2015-08-31T17:04:31
|
C
|
UTF-8
|
Python
| false
| false
| 6,524
|
py
|
machine.py
|
from generator.actions import Actions
import random
import string
import struct
#define MAX_MSGS 5
#define MAX_RCPT 16
#define MAX_ADDR_LEN 32
#define MAX_DATA 512
#define MAX_USER_LEN 64
#define MAX_PASS_LEN 64
#define MAX_LINE 64
def random_bad_input(max_size=10, min_size=2):
characters = string.letters + string.digits + ' .\t;:\'"?!@#$&()'
max_size = max_size if max_size >= min_size else min_size
return ''.join(random.choice(characters) for c in range(max_size)) + '\n'
def random_bad_choice(is_file_loaded):
characters = '34567890'
if(is_file_loaded):
return '0\n'
else:
return ''.join(random.choice(characters) + '\n')
def random_num(is_file_loaded):
characters = string.digits
return ''.join(random.choice(characters) + '\n')
def random_uwfc_file(well_formed):
MAX_NUM_SAMPLES = 2
#sample_rates = [8000, 11025, 16000, 22050, 32000, 44100, 48000]
# cb-test timeout is too low, so only allowing 2
# sample_rates = [8000, 11025]
sample_rates = [4000, 8000] # BMC - modified to use smaller sample rates, for faster testing
chunk_id_p = struct.pack('4s', "FFIR")
format_p = struct.pack('4s', "UWFC")
sub_chunk1_id_p = struct.pack('4s', "cgc ")
sub_chunk1_size = 16
sub_chunk1_size_p = struct.pack('i', sub_chunk1_size)
audio_format_p = struct.pack('h', 1)
num_channels = 1 if random.randint(0,1) else 2
num_channels_p = struct.pack('h', num_channels)
sample_rate = sample_rates[random.randint(0, len(sample_rates) - 1)]
sample_rate_p = struct.pack('i', sample_rate)
bits_per_sample = 8 if random.randint(0,1) else 16
bits_per_sample_p = struct.pack('h', bits_per_sample)
sub_chunk2_id_p = struct.pack('4s', "data")
byte_rate_p = struct.pack('i', sample_rate * num_channels * bits_per_sample/8)
block_align = num_channels * bits_per_sample/8
block_align_p = struct.pack('h', block_align)
sub_chunk2_size = random.randrange(0, sample_rate * MAX_NUM_SAMPLES, block_align)
sub_chunk2_size_p = struct.pack('i', sub_chunk2_size)
chunk_size_p = struct.pack('i', 4 + (8 + sub_chunk1_size) + (8 + sub_chunk2_size))
data_p = ''
for x in xrange(0, sub_chunk2_size, block_align):
for y in xrange(num_channels):
if(bits_per_sample == 8):
data_p += struct.pack('B', random.randint(0, 255))
elif(bits_per_sample == 16):
data_p += struct.pack('h', random.randint(-32768, 32767))
if not well_formed:
chunk_id_p = struct.pack('4s', "ffir")
uwfc_file = (chunk_id_p + chunk_size_p + format_p +
sub_chunk1_id_p + sub_chunk1_size_p + audio_format_p +
num_channels_p + sample_rate_p + byte_rate_p +
block_align_p + bits_per_sample_p + sub_chunk2_id_p +
sub_chunk2_size_p + data_p)
return uwfc_file
def random_line(max_size=62, min_size=2):
characters = string.letters + string.digits + ' .\t;:\'"?!@#$&()'
max_size = max_size if max_size >= min_size else min_size
return ''.join(random.choice(characters) for c in range(max_size)) + '\n'
def random_alphanum(max_size, min_size=2):
alphanum = string.letters + string.digits
max_size = max_size if max_size >= min_size else min_size
return ''.join(random.choice(alphanum) for c in range(max_size))
class Visualizer(Actions):
num_buckets = 12
eq_delim = "|-----|" * num_buckets + "\n"
nf_menu = ( "1. Load File\n"
"2. Quit\n\n" )
fl_menu = ( "1. Load New File\n"
"2. Wave Visualizer\n"
"3. EQ Visualizer\n"
"4. Power Visualizer\n"
"5. Enable Low Pass Filter\n"
"6. Enable High Pass Filter\n"
"7. Disable Filters\n"
"8. Set visualization speed multiplier (0-9)\n"
"9. Quit" )
fl_menu_pcre = ( "--Current\ Track:\ Sampled\ @\d{4,5}Hz\ (?:8|16)\ bit,\ (?:Stereo|Mono)\ -\ Data\ Size=\d*\n"
"::Options:\ Filter\ Type=(?:None|Low\ Pass|High\ Pass)\ \|\ Visualization\ Slowdown\ Multiplier=[0-9]" )
wav_vis_pcre = "(?:\ *\*\ *\|\ *\*\ *\n|\ *\*\ *\n)*"
eq_vis_pcre = "(?:\ {3}(?:\|[\ \|=-]{5}\|){12}\n*)*"
pow_vis_pcre = "(?:=*]\n)*"
def start(self):
pass
def no_file(self):
self.state['file_loaded'] = False
self.read(delim='--NO FILE LOADED--\n', expect=self.nf_menu)
def bad_nf_choice(self):
self.write(random_bad_choice(self.state['file_loaded']))
self.read(delim='\n', expect="ERROR: Bad Choice. Select another option")
def bad_nf_input(self):
self.write(random_bad_input())
self.read(delim='\n', expect="ERROR: Bad Input. Valid Options: 1, 2")
def nf_quit(self):
self.write('2\n')
self.read(delim='\n', expect="EXITING...")
def load_file(self):
self.state['file_loaded'] = True
uwfc_file = random_uwfc_file(True)
self.write('1\n')
self.write(uwfc_file)
def file_loaded(self):
self.state['file_loaded'] = True
self.read(delim='\n\n', expect=self.fl_menu)
self.read(delim='::\n\n', expect=self.fl_menu_pcre, expect_format='pcre')
def bad_fl_choice(self):
self.write(random_bad_choice(self.state['file_loaded']))
self.read(delim='\n', expect="ERROR: Bad Choice. Select another option")
def bad_fl_input(self):
self.write(random_bad_input())
self.read(delim='\n', expect="ERROR: Bad Input. Valid Options: 1, 2, 3, 4, 5, 6, 7, 8, 9")
def wave_vis(self):
self.write('2\n')
self.read(delim='++++END VISUALIZATION++++\n\n', expect=self.wav_vis_pcre, expect_format='pcre')
def eq_vis(self):
self.write('3\n')
self.read(delim='++++END VISUALIZATION++++\n\n', expect=self.eq_vis_pcre, expect_format='pcre')
def pow_vis(self):
self.write('4\n')
self.read(delim='++++END VISUALIZATION++++\n\n', expect=self.pow_vis_pcre, expect_format='pcre')
def low_pass(self):
self.write('5\n')
def high_pass(self):
self.write('6\n')
def no_filter(self):
self.write('7\n')
def vis_speed(self):
self.write('8\n')
speed = random.choice('0123456789')
self.write(speed + '\n')
self.read(delim='\n', expect="Multiplier set to {0}\n".format(speed))
def quit(self):
self.write('9\n')
self.read(delim='\n', expect="EXITING...\n")
|
912fc8871e184458ecba751c81601a28a4294d35
|
140e90b1cdc47650cc88637d79e122d0695a8105
|
/pyrender/platforms/egl.py
|
ae2478d29c9a538c53ad83fa31f8e2277cd897c8
|
[
"MIT"
] |
permissive
|
mmatl/pyrender
|
2d7a92cab4e134a65f98990549d98ef3c63aa668
|
a59963ef890891656fd17c90e12d663233dcaa99
|
refs/heads/master
| 2023-07-10T17:31:50.861253
| 2022-04-30T19:40:43
| 2022-04-30T19:40:43
| 166,606,982
| 1,166
| 224
|
MIT
| 2023-04-06T08:09:08
| 2019-01-20T00:11:18
|
Python
|
UTF-8
|
Python
| false
| false
| 6,615
|
py
|
egl.py
|
import ctypes
import os
import OpenGL.platform
from .base import Platform
EGL_PLATFORM_DEVICE_EXT = 0x313F
EGL_DRM_DEVICE_FILE_EXT = 0x3233
def _ensure_egl_loaded():
plugin = OpenGL.platform.PlatformPlugin.by_name('egl')
if plugin is None:
raise RuntimeError("EGL platform plugin is not available.")
plugin_class = plugin.load()
plugin.loaded = True
# create instance of this platform implementation
plugin = plugin_class()
plugin.install(vars(OpenGL.platform))
_ensure_egl_loaded()
from OpenGL import EGL as egl
def _get_egl_func(func_name, res_type, *arg_types):
address = egl.eglGetProcAddress(func_name)
if address is None:
return None
proto = ctypes.CFUNCTYPE(res_type)
proto.argtypes = arg_types
func = proto(address)
return func
def _get_egl_struct(struct_name):
from OpenGL._opaque import opaque_pointer_cls
return opaque_pointer_cls(struct_name)
# These are not defined in PyOpenGL by default.
_EGLDeviceEXT = _get_egl_struct('EGLDeviceEXT')
_eglGetPlatformDisplayEXT = _get_egl_func('eglGetPlatformDisplayEXT', egl.EGLDisplay)
_eglQueryDevicesEXT = _get_egl_func('eglQueryDevicesEXT', egl.EGLBoolean)
_eglQueryDeviceStringEXT = _get_egl_func('eglQueryDeviceStringEXT', ctypes.c_char_p)
def query_devices():
if _eglQueryDevicesEXT is None:
raise RuntimeError("EGL query extension is not loaded or is not supported.")
num_devices = egl.EGLint()
success = _eglQueryDevicesEXT(0, None, ctypes.pointer(num_devices))
if not success or num_devices.value < 1:
return []
devices = (_EGLDeviceEXT * num_devices.value)() # array of size num_devices
success = _eglQueryDevicesEXT(num_devices.value, devices, ctypes.pointer(num_devices))
if not success or num_devices.value < 1:
return []
return [EGLDevice(devices[i]) for i in range(num_devices.value)]
def get_default_device():
# Fall back to not using query extension.
if _eglQueryDevicesEXT is None:
return EGLDevice(None)
return query_devices()[0]
def get_device_by_index(device_id):
if _eglQueryDevicesEXT is None and device_id == 0:
return get_default_device()
devices = query_devices()
if device_id >= len(devices):
raise ValueError('Invalid device ID ({})'.format(device_id, len(devices)))
return devices[device_id]
class EGLDevice:
def __init__(self, display=None):
self._display = display
def get_display(self):
if self._display is None:
return egl.eglGetDisplay(egl.EGL_DEFAULT_DISPLAY)
return _eglGetPlatformDisplayEXT(EGL_PLATFORM_DEVICE_EXT, self._display, None)
@property
def name(self):
if self._display is None:
return 'default'
name = _eglQueryDeviceStringEXT(self._display, EGL_DRM_DEVICE_FILE_EXT)
if name is None:
return None
return name.decode('ascii')
def __repr__(self):
return "<EGLDevice(name={})>".format(self.name)
class EGLPlatform(Platform):
"""Renders using EGL.
"""
def __init__(self, viewport_width, viewport_height, device: EGLDevice = None):
super(EGLPlatform, self).__init__(viewport_width, viewport_height)
if device is None:
device = get_default_device()
self._egl_device = device
self._egl_display = None
self._egl_context = None
def init_context(self):
_ensure_egl_loaded()
from OpenGL.EGL import (
EGL_SURFACE_TYPE, EGL_PBUFFER_BIT, EGL_BLUE_SIZE,
EGL_RED_SIZE, EGL_GREEN_SIZE, EGL_DEPTH_SIZE,
EGL_COLOR_BUFFER_TYPE, EGL_RGB_BUFFER,
EGL_RENDERABLE_TYPE, EGL_OPENGL_BIT, EGL_CONFORMANT,
EGL_NONE, EGL_DEFAULT_DISPLAY, EGL_NO_CONTEXT,
EGL_OPENGL_API, EGL_CONTEXT_MAJOR_VERSION,
EGL_CONTEXT_MINOR_VERSION,
EGL_CONTEXT_OPENGL_PROFILE_MASK,
EGL_CONTEXT_OPENGL_CORE_PROFILE_BIT,
eglGetDisplay, eglInitialize, eglChooseConfig,
eglBindAPI, eglCreateContext, EGLConfig
)
from OpenGL import arrays
config_attributes = arrays.GLintArray.asArray([
EGL_SURFACE_TYPE, EGL_PBUFFER_BIT,
EGL_BLUE_SIZE, 8,
EGL_RED_SIZE, 8,
EGL_GREEN_SIZE, 8,
EGL_DEPTH_SIZE, 24,
EGL_COLOR_BUFFER_TYPE, EGL_RGB_BUFFER,
EGL_RENDERABLE_TYPE, EGL_OPENGL_BIT,
EGL_CONFORMANT, EGL_OPENGL_BIT,
EGL_NONE
])
context_attributes = arrays.GLintArray.asArray([
EGL_CONTEXT_MAJOR_VERSION, 4,
EGL_CONTEXT_MINOR_VERSION, 1,
EGL_CONTEXT_OPENGL_PROFILE_MASK,
EGL_CONTEXT_OPENGL_CORE_PROFILE_BIT,
EGL_NONE
])
major, minor = ctypes.c_long(), ctypes.c_long()
num_configs = ctypes.c_long()
configs = (EGLConfig * 1)()
# Cache DISPLAY if necessary and get an off-screen EGL display
orig_dpy = None
if 'DISPLAY' in os.environ:
orig_dpy = os.environ['DISPLAY']
del os.environ['DISPLAY']
self._egl_display = self._egl_device.get_display()
if orig_dpy is not None:
os.environ['DISPLAY'] = orig_dpy
# Initialize EGL
assert eglInitialize(self._egl_display, major, minor)
assert eglChooseConfig(
self._egl_display, config_attributes, configs, 1, num_configs
)
# Bind EGL to the OpenGL API
assert eglBindAPI(EGL_OPENGL_API)
# Create an EGL context
self._egl_context = eglCreateContext(
self._egl_display, configs[0],
EGL_NO_CONTEXT, context_attributes
)
# Make it current
self.make_current()
def make_current(self):
from OpenGL.EGL import eglMakeCurrent, EGL_NO_SURFACE
assert eglMakeCurrent(
self._egl_display, EGL_NO_SURFACE, EGL_NO_SURFACE,
self._egl_context
)
def make_uncurrent(self):
"""Make the OpenGL context uncurrent.
"""
pass
def delete_context(self):
from OpenGL.EGL import eglDestroyContext, eglTerminate
if self._egl_display is not None:
if self._egl_context is not None:
eglDestroyContext(self._egl_display, self._egl_context)
self._egl_context = None
eglTerminate(self._egl_display)
self._egl_display = None
def supports_framebuffers(self):
return True
__all__ = ['EGLPlatform']
|
8b7959df4c968555e333913279fcc1d0ebcbc860
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/FWCore/Integration/test/ref_alias_compare_drop_original_cfg.py
|
c6ffa3f5715dd6229a48e5b4c63ba18b879fc3a6
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 1,546
|
py
|
ref_alias_compare_drop_original_cfg.py
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("Test")
process.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(10))
process.source = cms.Source("EmptySource")
process.thing = cms.EDProducer("ThingProducer")
process.thingAlias = cms.EDAlias( thing = cms.VPSet(
cms.PSet(type = cms.string('edmtestThings'),
fromProductInstance = cms.string('*'),
toProductInstance = cms.string('*'))))
process.otherThing1 = cms.EDProducer("OtherThingProducer",
thingTag=cms.InputTag("thing"))
process.otherThing2 = cms.EDProducer("OtherThingProducer",
thingTag=cms.InputTag("thingAlias"))
process.comparer = cms.EDAnalyzer("OtherThingRefComparer",
first = cms.untracked.InputTag("otherThing1:testUserTag"),
second = cms.untracked.InputTag("otherThing2:testUserTag")
)
process.out = cms.OutputModule("PoolOutputModule",
fileName = cms.untracked.string("ref_alias_drop_original.root"),
outputCommands = cms.untracked.vstring("keep *",
"drop *_thing_*_*")
)
process.p = cms.Path(process.thing+process.otherThing1+process.otherThing2+process.comparer)
process.o = cms.EndPath(process.out)
|
bea53f239bec18e6712a642e62da2a2be538149a
|
61673ab9a42f7151de7337608c442fa6247f13bb
|
/pygame/GUI- widgets-SGC/sgc3/widgets/_locals.py
|
7e261eae70c4ed6fc3af6d2a75e3e2d9bbe37211
|
[
"MIT",
"BSD-2-Clause"
] |
permissive
|
furas/python-examples
|
22d101670ecd667a29376d7c7d7d86f8ec71f6cf
|
95cb53b664f312e0830f010c0c96be94d4a4db90
|
refs/heads/master
| 2022-08-23T23:55:08.313936
| 2022-08-01T14:48:33
| 2022-08-01T14:48:33
| 45,575,296
| 176
| 91
|
MIT
| 2021-02-17T23:33:37
| 2015-11-04T23:54:32
|
Python
|
UTF-8
|
Python
| false
| false
| 13,593
|
py
|
_locals.py
|
# Copyright 2010-2012 the SGC project developers.
# See the LICENSE file at the top-level directory of this distribution
# and at http://program.sambull.org/sgc/license.html.
"""
A collection of things for widgets to use. These can be imported with a
`from _locals import *` line.
Constants:
GUI: Widgets should use this for the event type of any events emitted.
get_screen(): Returns the screen object.
"""
import pygame.sprite
from pygame.locals import *
try:
from OpenGL.GL import *
from OpenGL.GLU import *
except ImportError: pass
# Things for widgets to import
__all__ = ["GUI", "get_screen", "Font"]
# Event type
GUI = USEREVENT
SCREEN = None
get_screen = lambda: SCREEN
# Cursor queue for set_cursor() and remove_cursor()
cursors = []
# ----- EXTERNAL FUNCTIONS -----
def update(time):
"""Updates all active widgets or modal widgets each frame."""
def _fade(widget):
"""Fade widget."""
if widget._fade is not None:
widget.image.set_alpha(widget._fade)
if widget._fade_up:
widget._fade += time / 3.
else:
widget._fade -= time / 4.
if widget._fade <= 0:
# Remove after fading
widget.kill()
# Reset widget to be added again
widget._fade = None
elif widget._fade >= 255:
widget._fade = None
widget.image.set_alpha(255)
def draw_opengl(image, rect, alpha):
texture_data = pygame.image.tostring(image, "RGBA")
w,h = image.get_size()
tex = glGenTextures(1)
if alpha is None:
alpha = 255.
glColor(0,0,0, alpha/255.)
glBindTexture(GL_TEXTURE_2D, tex)
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_ADD)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, w, h, 0, GL_RGBA,
GL_UNSIGNED_BYTE, texture_data)
glPushMatrix()
glTranslatef(rect.x, rect.y, 0)
glEnable(GL_TEXTURE_2D)
#glBindTexture(GL_TEXTURE_2D, tex)
glBegin(GL_QUADS)
glTexCoord2f(0, 0)
glVertex2f(0, 0)
glTexCoord2f(1, 0)
glVertex2f(w, 0)
glTexCoord2f(1, 1)
glVertex2f(w, h)
glTexCoord2f(0, 1)
glVertex2f(0, h)
glEnd()
glPopMatrix()
glDeleteTextures(tex)
def widget_image(w):
"""Blit extra images, handle transparency fades and blit to screen."""
copy = w.image.copy()
# Blit extra images onto copy
for img in map(lambda x: w._images[x], w._extra_images):
if img._show:
copy.blit(img.image, img.rect)
# Blend transparent surface when fading and blit to screen.
if w._fade is not None:
transparent = pygame.surface.Surface(w.rect.size, SRCALPHA)
transparent.fill((255,255,255, w._fade))
copy.blit(transparent, (0,0), special_flags=BLEND_RGBA_MULT)
return copy
if SCREEN._opengl:
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
w,h = SCREEN.get_size()
glOrtho(0, w, h, 0, 0, 1)
glMatrixMode(GL_MODELVIEW)
glPushMatrix()
glLoadIdentity()
glDisable(GL_LIGHTING)
glDisable(GL_DEPTH_TEST)
glEnable(GL_SCISSOR_TEST)
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
# Update widgets
active_widgets.update(time)
for widget in active_widgets:
_fade(widget)
for w in active_widgets:
copy = widget_image(w)
if not SCREEN._opengl:
SCREEN.blit(copy, w.rect)
else:
draw_opengl(copy, w.rect, w._fade)
# Update layered widgets
layer_widgets.update(time)
for widget in layer_widgets:
_fade(widget)
for w in layer_widgets:
copy = widget_image(w)
if not SCREEN._opengl:
SCREEN.blit(copy, w.rect)
else:
draw_opengl(copy, w.rect, w._fade)
if SCREEN._opengl:
glDisable(GL_SCISSOR_TEST)
glPopMatrix()
def event(event):
"""Send event to focused widget and handle widget focus."""
# Special case gets priority over modal widgets (e.g. scroll handles)
for w in special_case:
if event.type == MOUSEBUTTONDOWN:
if w.rect.collidepoint(event.pos):
focus.add(2, w)
break
else:
focus.empty()
else:
if modal_widgets and not focus:
modal_widgets.sprites()[-1].add(0)
# Mouse focus
if event.type == MOUSEBUTTONDOWN:
if not modal_widgets:
hit = False
for widget_list in (reversed(layer_widgets.sprites()),
active_widgets):
for widget in widget_list:
# Check if user clicked a widget
if widget._can_focus and \
widget.rect.collidepoint(event.pos):
if event.button == 1:
focus.add(2, widget)
if widget in layer_widgets:
layer_widgets.move_to_front(widget)
elif 4 <= event.button <= 7:
widget._event(event)
hit = True
break
if hit: break
# Lose focus if clicking away from widgets
if not hit:
focus.empty()
# Keyboard focus
elif event.type == KEYDOWN and event.key == K_TAB:
if not modal_widgets and focus_order:
# Flattened focus_order
order = sum(focus_order,())
if focus.sprite not in order:
curr_num = None
else:
# Focus number for current focused widget
curr_num = order[order.index(focus.sprite)-1]
# Sorted list of the focus numbers being used
list_num = sorted(order[::2])
if not event.mod & KMOD_SHIFT: # Move focus to next widget
if curr_num is None:
# If nothing focused, focus first widget
new_num = list_num[0]
elif not focus.sprite._change_focus(True):
# Don't change when not at end of container widget
new_num = curr_num
elif list_num.index(curr_num) == len(list_num)-1:
# Jump back to first widget
new_num = list_num[0]
else:
# Next focus number in the list
new_num = list_num[list_num.index(curr_num)+1]
else: # Shift key - move focus to previous widget
if curr_num is None:
new_num = list_num[-1]
elif not focus.sprite._change_focus(False):
new_num = curr_num
elif list_num.index(curr_num) == 0:
# Jump back to last widget
new_num = list_num[len(list_num)-1]
else:
new_num = list_num[list_num.index(curr_num)-1]
if curr_num != new_num:
# Set widget at new focus number
focus.add(1, order[order.index(new_num)+1])
# Send event to focused widget
if focus:
focus.sprite._event(event)
# ----- FONTS -----
class _Font():
"""Wrapper class for font objects."""
__slots__ = ("_font",)
_font = None
def replace(self, font):
"""Replace the font in-place."""
self._font = font
def __getattr__(self, atr):
return getattr(self._font, atr)
def __nonzero__(self):
return True if self._font else False
class FontMetaclass(type):
"""Font metaclass to allow indexing of class."""
def __getitem__(cls, item):
return cls._fonts[item]
class Font():
"""
Class containing fonts available for use.
Index class to get fonts, such as ``Font["widget"]`` for the widget font.
The default fonts are:
widget: The default font for widgets.
title: A larger title font.
mono: A monospaced font.
Attributes:
col: (r,g,b) tuple, containing the default font colour.
"""
__metaclass__ = FontMetaclass
__slots__ = ("_fonts", "col")
_fonts = {"widget": _Font(), "title": _Font(), "mono": _Font()}
col = (255,255,255)
@classmethod
def set_fonts(cls, fonts={}):
"""
Set fonts to a specific font. If a font exists, it will be replaced,
otherwise it will be newly created.
Args:
fonts: Dictionary containing fonts to use.
Key should be name of font. Value should be string
naming either custom FreeType or a system font.
"""
for font in fonts:
if font not in cls._fonts:
cls._fonts[font] = _Font()
cls._fonts[font].replace(cls._create_font(fonts[font], 16))
if not cls._fonts["widget"]:
cls._fonts["widget"].replace(cls._create_font("Arial", 16))
if not cls._fonts["title"]:
name = fonts["widget"] if ("widget" in fonts) else "Arial"
cls._fonts["title"].replace(cls._create_font(name, 30))
if not cls._fonts["mono"]:
cls._fonts["mono"].replace(cls._create_font(
"Ubuntu Mono, FreeMono, Monospace", 16))
#if SCREEN._opengl:
# cls.mono_w = cls["mono"].font.Advance("e")
#else:
cls.mono_w = cls["mono"].render("e", False, (0,0,0)).get_width()
@classmethod
def _create_font(cls, font, size):
"""
Returns the correct font object for FreeType or system font, and
for OpenGL or Pygame.
"""
if font[-4:] in (".ttf", ".otf"):
return pygame.font.Font(font, size)
else:
return pygame.font.SysFont(font, size)
# ----- WIDGET GROUPS -----
class Focus(pygame.sprite.GroupSingle):
"""
Contains currently focused widget.
"""
def add(self, focus=0, *sprites):
"""Extend add to call _focus_exit and _focus_enter methods."""
if self.sprite: self.sprite._focus_exit()
pygame.sprite.GroupSingle.add(self, *sprites)
self.sprite._focus_enter(focus)
def empty(self):
"""Extend empty to call _focus_exit method."""
if self.sprite: self.sprite._focus_exit()
pygame.sprite.GroupSingle.empty(self)
# Widget groups
active_widgets = pygame.sprite.Group()
modal_widgets = pygame.sprite.OrderedUpdates()
layer_widgets = pygame.sprite.LayeredUpdates()
special_case = set()
# The widget that currently has focus
focus = Focus()
# Order the widgets should receive focus through TAB
focus_order = []
# ----- WIDGET FUNCTIONS -----
def add_widget(widget, order=None, grab_focus=False):
"""
Add widget to screen. Used by the base widget.
Args:
order: Integer representing the order widget should receive focus
when user presses TAB. The widget with the lowest order will
receive focus first, then moving up with increasing values.
Returns:
True if widget has been added. False if already added.
"""
added = False
# Add to group of active widgets
if widget not in active_widgets and not widget._layered:
active_widgets.add(widget)
added = True
if order is not None and widget._can_focus:
focus_order.append((order,widget))
# Add to layered group
elif widget._layered and widget not in layer_widgets:
layer_widgets.add(widget)
added = True
# Add to group of modal widgets
if widget._modal and widget not in modal_widgets:
modal_widgets.add(widget)
added = True
# Focus newly added modal widgets
if grab_focus is not False:
focus.add(grab_focus, widget)
elif widget._modal:
focus.add(0, widget)
return added
def remove_widget_order(widget):
"""Remove widget from focus order. Called by the base widget."""
order = sum(focus_order,())
if widget in order:
# Remove from focus_order
num = (order.index(widget)-1)/2
del focus_order[num]
def has_focus(widget):
"""Checks if a widget currently has focus."""
for group in widget.groups():
if isinstance(group, Focus):
return True
return False
def is_active(widget):
"""Checks if widget is onscreen."""
return widget in active_widgets or widget in layer_widgets
def set_cursor(widget, size, hotspot, xormasks, andmasks):
"""
Sets a cursor and adds to a queue.
Args:
widget: The widget that set the cursor, used as an ID in the queue.
size,hotspot,xormasks,andmasks: Arguments for pygame.mouse.set_cursor().
"""
if not cursors:
cursors.append((None, pygame.mouse.get_cursor()))
cursors.append((widget, (size, hotspot, xormasks, andmasks)))
pygame.mouse.set_cursor(size, hotspot, xormasks, andmasks)
def remove_cursor(widget):
"""
Removes the cursor set by widget and sets cursor to whichever cursor
is now at the end of the queue.
"""
for w, c in cursors:
if w == widget:
cursors.remove((w, c))
pygame.mouse.set_cursor(*cursors[-1][1])
if len(cursors) <= 1:
del cursors[:]
|
318a4c8b1961a21aacca13220883c672d63d64c0
|
a9ed175e2cf09ea29fe72b90dc4186ff9515c9b0
|
/tests/integration/suite/test_password_store.py
|
96f0709faef3df5d4759a37f7152aa889c176d7e
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
rancher/rancher
|
96d67163506fa4ab219dca4d86d9bfebacb68a6d
|
90fc68836fd8d1bd2996283500f954ace40203ba
|
refs/heads/release/v2.8
| 2023-08-29T06:23:12.480919
| 2023-08-28T21:35:36
| 2023-08-28T21:35:36
| 26,337,322
| 21,773
| 3,508
|
Apache-2.0
| 2023-09-14T19:51:31
| 2014-11-07T20:49:31
|
Go
|
UTF-8
|
Python
| false
| false
| 3,585
|
py
|
test_password_store.py
|
from kubernetes.client import CustomObjectsApi
from kubernetes.client import CoreV1Api
from kubernetes.client.rest import ApiException
from .common import random_str
import base64
group = 'management.cattle.io'
version = 'v3'
namespace = 'local'
plural = 'clusterloggings'
clusterId = "local"
globalNS = "cattle-global-data"
def verifyPassword(crdClient, k8sclient, ns, name, secretPassword):
k8es = crdClient.get_namespaced_custom_object(
group, version, namespace, plural, name)
secretName = k8es['spec']['elasticsearchConfig']['authPassword']
ns, name = secretName.split(":")
assert ns is not None
assert name is not None
secret = k8sclient.read_namespaced_secret(name, ns)
assert base64.b64decode(secret.data[name]).\
decode("utf-8") == secretPassword
def verifyPasswords(crdClient, k8sclient, ns, name, fluentdServers):
k8fs = crdClient.get_namespaced_custom_object(
group, version, namespace, plural, name)
servers = k8fs['spec']['fluentForwarderConfig']['fluentServers']
for ind, server in enumerate(fluentdServers):
secretName = servers[ind]['password']
ns, name = secretName.split(":")
assert ns is not None
assert name is not None
secret = k8sclient.read_namespaced_secret(name, ns)
assert base64.b64decode(secret.data[name]).\
decode("utf-8") == server['password']
def checkSecret(crdClient, k8sclient, ns, name, es, client, func):
k8es = crdClient.get_namespaced_custom_object(
group, version, namespace, plural, name)
secretName = k8es['spec']['elasticsearchConfig']['authPassword']
ns, name = secretName.split(":")
func(client, es)
try:
k8sclient.read_namespaced_secret(name, ns)
except ApiException as e:
assert e.status == 404
def checkSecrets(crdClient, k8sclient, ns, name, fs, client, func):
k8fs = crdClient.get_namespaced_custom_object(
group, version, namespace, plural, name)
servers = k8fs['spec']['fluentForwarderConfig']['fluentServers']
secretNames = []
for ind, server in enumerate(servers):
secretName = server['password']
ns, name = secretName.split(":")
secretNames.append(name)
func(client, fs)
for secretName in secretNames:
try:
k8sclient.read_namespaced_secret(name, globalNS)
except ApiException as e:
assert e.status == 404
def getClients(admin_mc):
return CustomObjectsApi(admin_mc.k8s_client), \
CoreV1Api(admin_mc.k8s_client)
def upFuncFluentd(client, fs):
try:
fs = client.update(fs, fluentForwarderConfig=None)
except ApiException as e:
assert e is None
def upFuncElastic(client, es):
try:
es = client.update(es, elasticsearchConfig=None)
except ApiException as e:
assert e is None
def deleteFunc(client, obj):
client.delete(obj)
def getFluentdServers():
return [{
"endpoint": "192.168.1.10:87",
"standby": False,
"username": random_str(),
"weight": 100,
"password": random_str()
},
{
"endpoint": "192.168.1.10:89",
"standby": False,
"username": random_str(),
"weight": 100,
"password": random_str()
},
{
"endpoint": "192.168.2.10:86",
"standby": False,
"username": random_str(),
"weight": 100,
"password": random_str()
}]
|
06f9b2142cfaf056cb528b67c0cf25fe6b30ee41
|
e726481820c477c2bcfcc2b03da3feda30b0beaf
|
/simplenote_cli/view_note.py
|
37d6fbd90b7b736fa9fdb5ed29516442fd4b8d1f
|
[
"MIT"
] |
permissive
|
insanum/sncli
|
06c759e2893cdec5d5d8646043242d24a227696d
|
0276a8b5de718b2f023f8ec574c98763bd96ce23
|
refs/heads/master
| 2023-08-29T13:38:17.125943
| 2023-06-19T00:35:55
| 2023-06-19T00:35:55
| 21,809,114
| 413
| 48
|
MIT
| 2023-02-28T19:04:24
| 2014-07-14T05:58:57
|
Python
|
UTF-8
|
Python
| false
| false
| 8,936
|
py
|
view_note.py
|
# Copyright (c) 2014 Eric Davis
# Licensed under the MIT License
import time, urwid
from . import utils
import re
from .clipboard import Clipboard
import logging
class ViewNote(urwid.ListBox):
def __init__(self, config, args):
self.config = config
self.ndb = args['ndb']
self.key = args['key']
self.log = args['log']
self.search_string = ''
self.search_mode = 'gstyle'
self.search_direction = ''
self.note = self.ndb.get_note(self.key) if self.key else None
self.old_note = None
self.tabstop = int(self.config.get_config('tabstop'))
self.clipboard = Clipboard()
super(ViewNote, self).__init__(
urwid.SimpleFocusListWalker(self.get_note_content_as_list()))
def get_note_content_as_list(self):
lines = []
if not self.key:
return lines
if self.old_note:
for l in utils.get_note_lines(self.old_note):
lines.append(
urwid.AttrMap(urwid.Text(l.replace('\t', ' ' * self.tabstop)),
'note_content_old',
'note_content_old_focus'))
else:
for l in utils.get_note_lines(self.note):
lines.append(
urwid.AttrMap(urwid.Text(l.replace('\t', ' ' * self.tabstop)),
'note_content',
'note_content_focus'))
lines.append(urwid.AttrMap(urwid.Divider('-'), 'default'))
return lines
def update_note_view(self, key=None, version=None):
if key: # setting a new note
self.key = key
self.note = self.ndb.get_note(self.key)
self.old_note = None
if self.key and version:
# verify version is within range
if int(version) <= 0 or int(version) >= self.note['version'] + 1:
self.log('Version v{0} is unavailable (key={1})'.
format(version, self.key))
return
if (not version and self.old_note) or \
(self.key and version and version == self.note['version']):
self.log('Displaying latest version v{0} of note (key={1})'.
format(self.note['version'], self.key))
self.old_note = None
elif self.key and version:
# get a previous version of the note
self.log('Fetching version v{0} of note (key={1})'.
format(version, self.key))
version_note = self.ndb.get_note_version(self.key, version)
if not version_note:
self.log('Failed to get version v{0} of note (key={1})'.
format(version, self.key))
# don't do anything, keep current note/version
else:
self.old_note = version_note
self.body[:] = \
urwid.SimpleFocusListWalker(self.get_note_content_as_list())
if not self.search_string:
self.focus_position = 0
def lines_after_current_position(self):
lines_after_current_position = list(range(self.focus_position + 1, len(self.body.positions()) - 1))
return lines_after_current_position
def lines_before_current_position(self):
lines_before_current_position = list(range(0, self.focus_position))
lines_before_current_position.reverse()
return lines_before_current_position
def search_note_view_next(self, search_string=None, search_mode=None):
if search_string:
self.search_string = search_string
if search_mode:
self.search_mode = search_mode
note_range = self.lines_after_current_position() if self.search_direction == 'forward' else self.lines_before_current_position()
self.search_note_range(note_range)
def search_note_view_prev(self, search_string=None, search_mode=None):
if search_string:
self.search_string = search_string
if search_mode:
self.search_mode = search_mode
note_range = self.lines_after_current_position() if self.search_direction == 'backward' else self.lines_before_current_position()
self.search_note_range(note_range)
def search_note_range(self, note_range):
note_lines = utils.get_note_lines(self.note)
for line in note_range:
line_content = note_lines[line]
if (self.is_match(self.search_string, line_content)):
self.focus_position = line
break
self.update_note_view()
def is_match(self, term, full_text):
if self.search_mode == 'gstyle':
return term in full_text
else:
sspat = utils.build_regex_search(term)
return sspat and sspat.search(full_text)
def get_status_bar(self):
if not self.key:
return \
urwid.AttrMap(urwid.Text('No note...'),
'status_bar')
cur = -1
total = 0
if len(self.body.positions()) > 0:
cur = self.focus_position
total = len(self.body.positions())
if self.old_note:
t = time.localtime(float(self.old_note['modificationDate']))
title = utils.get_note_title(self.old_note)
version = self.old_note['version']
else:
t = time.localtime(float(self.note['modificationDate']))
title = utils.get_note_title(self.note)
flags = utils.get_note_flags(self.note)
tags = utils.get_note_tags(self.note)
version = self.note.get('version', 0)
mod_time = time.strftime('Date: %a, %d %b %Y %H:%M:%S', t)
status_title = \
urwid.AttrMap(urwid.Text('Title: ' +
title,
wrap='clip'),
'status_bar')
status_key_index = \
('pack', urwid.AttrMap(urwid.Text(' [' +
self.key +
'] ' +
str(cur + 1) +
'/' +
str(total)),
'status_bar'))
status_date = \
urwid.AttrMap(urwid.Text(mod_time,
wrap='clip'),
'status_bar')
if self.old_note:
status_tags_flags = \
('pack', urwid.AttrMap(urwid.Text('[OLD:v' +
str(version) +
']'),
'status_bar'))
else:
status_tags_flags = \
('pack', urwid.AttrMap(urwid.Text('[' +
tags +
'] [v' +
str(version) +
'] [' +
flags +
']'),
'status_bar'))
pile_top = urwid.Columns([ status_title, status_key_index ])
pile_bottom = urwid.Columns([ status_date, status_tags_flags ])
if self.old_note or \
not (utils.note_published(self.note) and 'publishkey' in self.note):
return urwid.AttrMap(urwid.Pile([ pile_top, pile_bottom ]),
'status_bar')
pile_publish = \
urwid.AttrMap(urwid.Text('Published: http://simp.ly/publish/' +
self.note['publishkey']),
'status_bar')
return \
urwid.AttrMap(urwid.Pile([ pile_top, pile_bottom, pile_publish ]),
'status_bar')
def copy_note_text(self):
line_content = utils.get_note_lines(self.note)[self.focus_position]
self.clipboard.copy(line_content)
def keypress(self, size, key):
if key == self.config.get_keybind('tabstop2'):
self.tabstop = 2
self.body[:] = \
urwid.SimpleFocusListWalker(self.get_note_content_as_list())
elif key == self.config.get_keybind('tabstop4'):
self.tabstop = 4
self.body[:] = \
urwid.SimpleFocusListWalker(self.get_note_content_as_list())
elif key == self.config.get_keybind('tabstop8'):
self.tabstop = 8
self.body[:] = \
urwid.SimpleFocusListWalker(self.get_note_content_as_list())
else:
return key
return None
|
963fc73cb18d787513ae872e285e36502b4470a5
|
307d3837d31f9e3728af2b62ca51ebf63fe6ec6b
|
/hall_of_fame/kimdonghun/[BOJ]2525_OvenClock.py
|
f48978ad68d3b5f58ced15deeb4653d8d183de13
|
[] |
no_license
|
ellynhan/challenge100-codingtest-study
|
905043497d154b8a7333ca536e536d013f6e7454
|
bcdc6d04f13b12ba80b42e066f9d244d7c2cc698
|
refs/heads/master
| 2023-09-01T14:10:13.481013
| 2023-08-27T14:38:52
| 2023-08-27T14:38:52
| 401,561,230
| 162
| 176
| null | 2023-09-09T14:56:25
| 2021-08-31T03:30:36
|
C++
|
UTF-8
|
Python
| false
| false
| 196
|
py
|
[BOJ]2525_OvenClock.py
|
import sys
A,B = map(int, sys.stdin.readline().split())
C = int(sys.stdin.readline())
min = B + C
hour_add = min // 60
min %= 60
hour = A + hour_add
hour %= 24
print(hour, min)
|
78b5a02c11f8adfb4999b42b0d2f0caeed1d631f
|
d110546d747d7e3865ce5742d5fca09f404623c0
|
/salt/utils/dockermod/translate/network.py
|
a49f312ec59390d02391455d80593d1ef594f74f
|
[
"Apache-2.0",
"MIT",
"BSD-2-Clause"
] |
permissive
|
saltstack/salt
|
354fc86a7be1f69514b3dd3b2edb9e6f66844c1d
|
1ef90cbdc7203f97775edb7666db86a41eb9fc15
|
refs/heads/master
| 2023-07-19T20:56:20.210556
| 2023-06-29T23:12:28
| 2023-07-19T11:47:47
| 1,390,248
| 11,026
| 6,296
|
Apache-2.0
| 2023-09-14T20:45:37
| 2011-02-20T20:16:56
|
Python
|
UTF-8
|
Python
| false
| false
| 4,143
|
py
|
network.py
|
"""
Functions to translate input for network creation
"""
from salt.exceptions import SaltInvocationError
from . import helpers
ALIASES = {
"driver_opt": "options",
"driver_opts": "options",
"ipv6": "enable_ipv6",
}
IPAM_ALIASES = {
"ip_range": "iprange",
"aux_address": "aux_addresses",
}
# ALIASES is a superset of IPAM_ALIASES
ALIASES.update(IPAM_ALIASES)
ALIASES_REVMAP = {y: x for x, y in ALIASES.items()}
DEFAULTS = {"check_duplicate": True}
def _post_processing(
kwargs, skip_translate, invalid
): # pylint: disable=unused-argument
"""
Additional network-specific post-translation processing
"""
# If any defaults were not expicitly passed, add them
for item in DEFAULTS:
if item not in kwargs:
kwargs[item] = DEFAULTS[item]
# Functions below must match names of docker-py arguments
def driver(val, **kwargs): # pylint: disable=unused-argument
return helpers.translate_str(val)
def options(val, **kwargs): # pylint: disable=unused-argument
return helpers.translate_key_val(val, delimiter="=")
def ipam(val, **kwargs): # pylint: disable=unused-argument
return helpers.translate_dict(val)
def check_duplicate(val, **kwargs): # pylint: disable=unused-argument
return helpers.translate_bool(val)
def internal(val, **kwargs): # pylint: disable=unused-argument
return helpers.translate_bool(val)
def labels(val, **kwargs): # pylint: disable=unused-argument
return helpers.translate_labels(val)
def enable_ipv6(val, **kwargs): # pylint: disable=unused-argument
return helpers.translate_bool(val)
def attachable(val, **kwargs): # pylint: disable=unused-argument
return helpers.translate_bool(val)
def ingress(val, **kwargs): # pylint: disable=unused-argument
return helpers.translate_bool(val)
# IPAM args
def ipam_driver(val, **kwargs): # pylint: disable=unused-argument
return driver(val, **kwargs)
def ipam_opts(val, **kwargs): # pylint: disable=unused-argument
return options(val, **kwargs)
def ipam_pools(val, **kwargs): # pylint: disable=unused-argument
if not hasattr(val, "__iter__") or not all(isinstance(x, dict) for x in val):
# Can't do a simple dictlist check because each dict may have more than
# one element.
raise SaltInvocationError("ipam_pools must be a list of dictionaries")
skip_translate = kwargs.get("skip_translate", ())
if not (skip_translate is True or "ipam_pools" in skip_translate):
_globals = globals()
for ipam_dict in val:
for key in list(ipam_dict):
if skip_translate is not True and key in skip_translate:
continue
if key in IPAM_ALIASES:
# Make sure we resolve aliases, since this wouldn't have
# been done within the individual IPAM dicts
ipam_dict[IPAM_ALIASES[key]] = ipam_dict.pop(key)
key = IPAM_ALIASES[key]
if key in _globals:
ipam_dict[key] = _globals[key](ipam_dict[key])
return val
def subnet(val, **kwargs): # pylint: disable=unused-argument
validate_ip_addrs = kwargs.get("validate_ip_addrs", True)
val = helpers.translate_str(val)
if validate_ip_addrs:
helpers.validate_subnet(val)
return val
def iprange(val, **kwargs): # pylint: disable=unused-argument
validate_ip_addrs = kwargs.get("validate_ip_addrs", True)
val = helpers.translate_str(val)
if validate_ip_addrs:
helpers.validate_subnet(val)
return val
def gateway(val, **kwargs): # pylint: disable=unused-argument
validate_ip_addrs = kwargs.get("validate_ip_addrs", True)
val = helpers.translate_str(val)
if validate_ip_addrs:
helpers.validate_ip(val)
return val
def aux_addresses(val, **kwargs): # pylint: disable=unused-argument
validate_ip_addrs = kwargs.get("validate_ip_addrs", True)
val = helpers.translate_key_val(val, delimiter="=")
if validate_ip_addrs:
for address in val.values():
helpers.validate_ip(address)
return val
|
5ead7b2e6b8f6a47b5854c4f0a6edd0f8051e7db
|
6d7e44292e34bbc5e8cbc0eb9e9b264c0b498c5d
|
/modules/sfp_gravatar.py
|
6b42f780410fd37fac07613f1ba4ee70b0f3edee
|
[
"Python-2.0",
"MIT"
] |
permissive
|
smicallef/spiderfoot
|
69585266dad860d3230d3ce7b801e34eeb359f90
|
6e8e6a8277ea251fdd62a0946268f5dfe9162817
|
refs/heads/master
| 2023-08-28T09:40:10.136780
| 2023-08-18T05:47:39
| 2023-08-18T05:47:39
| 4,165,675
| 10,620
| 2,130
|
MIT
| 2023-09-13T08:18:31
| 2012-04-28T07:10:13
|
Python
|
UTF-8
|
Python
| false
| false
| 7,361
|
py
|
sfp_gravatar.py
|
# -------------------------------------------------------------------------------
# Name: sfp_gravatar
# Purpose: SpiderFoot plug-in to search Gravatar API for an email address
# and retrieve user information, including username, name, phone
# numbers, additional email addresses, and social media usernames.
#
# Author: <bcoles@gmail.com>
#
# Created: 2019-05-26
# Copyright: (c) bcoles 2019
# Licence: MIT
# -------------------------------------------------------------------------------
import hashlib
import json
import time
from spiderfoot import SpiderFootEvent, SpiderFootHelpers, SpiderFootPlugin
class sfp_gravatar(SpiderFootPlugin):
meta = {
'name': "Gravatar",
'summary': "Retrieve user information from Gravatar API.",
'flags': [],
'useCases': ["Footprint", "Investigate", "Passive"],
'categories': ["Social Media"],
'dataSource': {
'website': "https://secure.gravatar.com/",
'model': "FREE_NOAUTH_UNLIMITED",
'references': [
"https://secure.gravatar.com/site/implement/"
],
'favIcon': "https://secure.gravatar.com/favicon.ico",
'logo': "https://secure.gravatar.com/favicon.ico",
'description': "Your Gravatar is an image that follows you from site to site "
"appearing beside your name when you do things like comment or post on a blog.\n"
"A Gravatar is a Globally Recognized Avatar. You upload it and create your profile just once, "
"and then when you participate in any Gravatar-enabled site, your Gravatar image will automatically follow you there.",
}
}
# Default options
opts = {
}
# Option descriptions
optdescs = {
}
results = None
reportedUsers = None
def setup(self, sfc, userOpts=dict()):
self.sf = sfc
self.results = self.tempStorage()
self.reportedUsers = self.tempStorage()
for opt in list(userOpts.keys()):
self.opts[opt] = userOpts[opt]
# What events is this module interested in for input
def watchedEvents(self):
return ['EMAILADDR']
# What events this module produces
def producedEvents(self):
return ['RAW_RIR_DATA', 'USERNAME',
'EMAILADDR', 'EMAILADDR_GENERIC', 'PHONE_NUMBER', 'GEOINFO',
'ACCOUNT_EXTERNAL_OWNED', 'SOCIAL_MEDIA']
# Query Gravatar API for the specified email address
# https://secure.gravatar.com/site/implement/
# https://secure.gravatar.com/site/implement/profiles/
def query(self, qry):
email_hash = hashlib.md5(qry.encode('utf-8', errors='replace').lower()).hexdigest() # noqa: DUO130
output = 'json'
res = self.sf.fetchUrl("https://secure.gravatar.com/" + email_hash + '.' + output,
timeout=self.opts['_fetchtimeout'],
useragent=self.opts['_useragent'])
time.sleep(1)
if res['content'] is None:
self.debug('No response from gravatar.com')
return None
if res['code'] != '200':
return None
try:
data = json.loads(res['content'])
except Exception as e:
self.debug(f"Error processing JSON response: {e}")
return None
if data.get('entry') is None or len(data.get('entry')) == 0:
return None
return data.get('entry')[0]
# Handle events sent to this module
def handleEvent(self, event):
eventName = event.eventType
srcModuleName = event.module
eventData = event.data
if eventData in self.results:
return
self.results[eventData] = True
self.debug(f"Received event, {eventName}, from {srcModuleName}")
data = self.query(eventData)
if data is None:
self.debug("No user information found for " + eventData)
return
evt = SpiderFootEvent("RAW_RIR_DATA", str(data), self.__name__, event)
self.notifyListeners(evt)
if data.get('preferredUsername') is not None:
un = data.get('preferredUsername')
evt = SpiderFootEvent("USERNAME", un, self.__name__, event)
self.notifyListeners(evt)
self.reportedUsers[un] = True
names = list()
if data.get('name') is not None:
if type(data.get('name')) != list:
names.append(data.get('name'))
else:
names = data.get('name')
for name in names:
full_name = name.get('formatted')
if full_name:
evt = SpiderFootEvent("RAW_RIR_DATA", f"Possible full name: {full_name}", self.__name__, event)
self.notifyListeners(evt)
# TODO: re-enable once location validation is implemented
# location can not be trusted
# if data.get('currentLocation') is not None:
# location = data.get('currentLocation')
# if len(location) < 3 or len(location) > 100:
# self.debug("Skipping likely invalid location.")
# else:
# evt = SpiderFootEvent("GEOINFO", location, self.__name__, event)
# self.notifyListeners(evt)
if data.get('phoneNumbers') is not None:
for number in data.get('phoneNumbers'):
if number.get('value') is not None:
evt = SpiderFootEvent("PHONE_NUMBER", number.get('value'), self.__name__, event)
self.notifyListeners(evt)
if data.get('emails') is not None:
for email in data.get('emails'):
em = email.get('value')
if not em:
continue
if SpiderFootHelpers.validEmail(em) and em != eventData:
if em.split("@")[0] in self.opts['_genericusers'].split(","):
evttype = "EMAILADDR_GENERIC"
else:
evttype = "EMAILADDR"
evt = SpiderFootEvent(evttype, em, self.__name__, event)
self.notifyListeners(evt)
if data.get('ims') is not None:
for im in data.get('ims'):
v = im.get('value')
if v is None:
continue
t = im.get('type').capitalize() + " (Instant Messenger)\n" + v
evt = SpiderFootEvent("ACCOUNT_EXTERNAL_OWNED", t, self.__name__, event)
self.notifyListeners(evt)
if v not in self.reportedUsers:
evt = SpiderFootEvent("USERNAME", v, self.__name__, event)
self.notifyListeners(evt)
self.reportedUsers[v] = True
if data.get('accounts') is not None:
for account in data.get('accounts'):
url = account.get('url')
platform = account.get('shortname')
if platform is not None and url is not None:
t = platform.capitalize() + ": <SFURL>" + url + "</SFURL>"
evt = SpiderFootEvent("SOCIAL_MEDIA", t, self.__name__, event)
self.notifyListeners(evt)
# End of sfp_gravatar class
|
36a6cdff795ae65a19dd49610796f51a264d69bb
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/core/azure-core/azure/core/rest/_aiohttp.py
|
64833e311c8624b2b1a4f9228647db21d11f233b
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012
| 2023-09-06T01:08:06
| 2023-09-06T01:08:06
| 4,127,088
| 4,046
| 2,755
|
MIT
| 2023-09-14T21:48:49
| 2012-04-24T16:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 7,782
|
py
|
_aiohttp.py
|
# --------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the ""Software""), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# --------------------------------------------------------------------------
import collections.abc
import asyncio
from itertools import groupby
from typing import Iterator, cast
from multidict import CIMultiDict
from ._http_response_impl_async import (
AsyncHttpResponseImpl,
AsyncHttpResponseBackcompatMixin,
)
from ..pipeline.transport._aiohttp import AioHttpStreamDownloadGenerator
from ..utils._pipeline_transport_rest_shared import _pad_attr_name, _aiohttp_body_helper
from ..exceptions import ResponseNotReadError
class _ItemsView(collections.abc.ItemsView):
def __init__(self, ref):
super().__init__(ref)
self._ref = ref
def __iter__(self):
for key, groups in groupby(self._ref.__iter__(), lambda x: x[0]):
yield tuple([key, ", ".join(group[1] for group in groups)])
def __contains__(self, item):
if not (isinstance(item, (list, tuple)) and len(item) == 2):
return False
for k, v in self.__iter__():
if item[0].lower() == k.lower() and item[1] == v:
return True
return False
def __repr__(self):
return f"dict_items({list(self.__iter__())})"
class _KeysView(collections.abc.KeysView):
def __init__(self, items):
super().__init__(items)
self._items = items
def __iter__(self) -> Iterator[str]:
for key, _ in self._items:
yield key
def __contains__(self, key):
try:
for k in self.__iter__():
if cast(str, key).lower() == k.lower():
return True
except AttributeError: # Catch "lower()" if key not a string
pass
return False
def __repr__(self) -> str:
return f"dict_keys({list(self.__iter__())})"
class _ValuesView(collections.abc.ValuesView):
def __init__(self, items):
super().__init__(items)
self._items = items
def __iter__(self):
for _, value in self._items:
yield value
def __contains__(self, value):
for v in self.__iter__():
if value == v:
return True
return False
def __repr__(self):
return f"dict_values({list(self.__iter__())})"
class _CIMultiDict(CIMultiDict):
"""Dictionary with the support for duplicate case-insensitive keys."""
def __iter__(self):
return iter(self.keys())
def keys(self):
"""Return a new view of the dictionary's keys.
:return: A new view of the dictionary's keys
:rtype: ~collections.abc.KeysView
"""
return _KeysView(self.items())
def items(self):
"""Return a new view of the dictionary's items.
:return: A new view of the dictionary's items
:rtype: ~collections.abc.ItemsView
"""
return _ItemsView(super().items())
def values(self):
"""Return a new view of the dictionary's values.
:return: A new view of the dictionary's values
:rtype: ~collections.abc.ValuesView
"""
return _ValuesView(self.items())
def __getitem__(self, key: str) -> str:
return ", ".join(self.getall(key, []))
def get(self, key, default=None):
values = self.getall(key, None)
if values:
values = ", ".join(values)
return values or default
class _RestAioHttpTransportResponseBackcompatMixin(AsyncHttpResponseBackcompatMixin):
"""Backcompat mixin for aiohttp responses.
Need to add it's own mixin because it has function load_body, which other
transport responses don't have, and also because we need to synchronously
decompress the body if users call .body()
"""
def body(self) -> bytes:
"""Return the whole body as bytes in memory.
Have to modify the default behavior here. In AioHttp, we do decompression
when accessing the body method. The behavior here is the same as if the
caller did an async read of the response first. But for backcompat reasons,
we need to support this decompression within the synchronous body method.
:return: The response's bytes
:rtype: bytes
"""
return _aiohttp_body_helper(self)
async def _load_body(self) -> None:
"""Load in memory the body, so it could be accessible from sync methods."""
self._content = await self.read() # type: ignore
def __getattr__(self, attr):
backcompat_attrs = ["load_body"]
attr = _pad_attr_name(attr, backcompat_attrs)
return super().__getattr__(attr)
class RestAioHttpTransportResponse(AsyncHttpResponseImpl, _RestAioHttpTransportResponseBackcompatMixin):
def __init__(self, *, internal_response, decompress: bool = True, **kwargs):
headers = _CIMultiDict(internal_response.headers)
super().__init__(
internal_response=internal_response,
status_code=internal_response.status,
headers=headers,
content_type=headers.get("content-type"),
reason=internal_response.reason,
stream_download_generator=AioHttpStreamDownloadGenerator,
content=None,
**kwargs,
)
self._decompress = decompress
self._decompressed_content = False
def __getstate__(self):
state = self.__dict__.copy()
# Remove the unpicklable entries.
state["_internal_response"] = None # aiohttp response are not pickable (see headers comments)
state["headers"] = CIMultiDict(self.headers) # MultiDictProxy is not pickable
return state
@property
def content(self) -> bytes:
"""Return the response's content in bytes.
:return: The response's content in bytes
:rtype: bytes
"""
if self._content is None:
raise ResponseNotReadError(self)
return _aiohttp_body_helper(self)
async def read(self) -> bytes:
"""Read the response's bytes into memory.
:return: The response's bytes
:rtype: bytes
"""
if not self._content:
self._stream_download_check()
self._content = await self._internal_response.read()
await self._set_read_checks()
return _aiohttp_body_helper(self)
async def close(self) -> None:
"""Close the response.
:return: None
:rtype: None
"""
if not self.is_closed:
self._is_closed = True
self._internal_response.close()
await asyncio.sleep(0)
|
cc0cc5342da1d3edd5b80466c36c553bd5456734
|
0f2b08b31fab269c77d4b14240b8746a3ba17d5e
|
/onnxruntime/test/testdata/transform/propagate_cast/gen_propagate_cast.py
|
dca1cbaf1d3d7174b5a4f73b7d254fb5b6c3ff9a
|
[
"MIT"
] |
permissive
|
microsoft/onnxruntime
|
f75aa499496f4d0a07ab68ffa589d06f83b7db1d
|
5e747071be882efd6b54d7a7421042e68dcd6aff
|
refs/heads/main
| 2023-09-04T03:14:50.888927
| 2023-09-02T07:16:28
| 2023-09-02T07:16:28
| 156,939,672
| 9,912
| 2,451
|
MIT
| 2023-09-14T21:22:46
| 2018-11-10T02:22:53
|
C++
|
UTF-8
|
Python
| false
| false
| 15,042
|
py
|
gen_propagate_cast.py
|
import itertools
import numpy as np
import onnx
from onnx import OperatorSetIdProto, TensorProto, helper
onnxdomain = OperatorSetIdProto()
onnxdomain.version = 12
# The empty string ("") or absence of this field implies the operator set that is defined as part of the ONNX specification.
onnxdomain.domain = ""
msdomain = OperatorSetIdProto()
msdomain.version = 1
msdomain.domain = "com.microsoft"
opsets = [onnxdomain, msdomain]
# expect type to be either TensorProto.FLOAT or TensorProto.FLOAT16
def type_to_string(type):
return "float" if type == TensorProto.FLOAT else "float16"
def save(model_path, nodes, inputs, outputs, initializers):
graph = helper.make_graph(nodes, "CastPropagateTest", inputs, outputs, initializers)
model = helper.make_model(graph, opset_imports=opsets, producer_name="onnxruntime-test")
onnx.save(model, model_path + ".onnx")
def gen_fuse_back2back_casts(model_path):
for type1, type2 in list(itertools.product([TensorProto.FLOAT, TensorProto.FLOAT16], repeat=2)):
nodes = [
helper.make_node("MatMul", ["input_0", "input_1"], ["product"], "MatMul_0"),
helper.make_node("Cast", ["product"], ["product_cast"], "Cast_0", to=type1),
helper.make_node("Cast", ["product_cast"], ["output"], "Cast_1", to=type2),
]
input_type = (
type2 if type1 != type2 else (TensorProto.FLOAT16 if type1 == TensorProto.FLOAT else TensorProto.FLOAT)
)
output_type = (
input_type
if type1 != type2
else (TensorProto.FLOAT16 if input_type == TensorProto.FLOAT else TensorProto.FLOAT)
)
inputs = [
helper.make_tensor_value_info("input_0", input_type, ["M", "K"]),
helper.make_tensor_value_info("input_1", input_type, ["K", "N"]),
]
outputs = [
helper.make_tensor_value_info("output", output_type, ["M", "N"]),
]
save(
model_path + "_" + type_to_string(type1) + "_" + type_to_string(type2),
nodes,
inputs,
outputs,
[],
)
def gen_fuse_sibling_casts(model_path):
for type1, type2 in list(itertools.product([TensorProto.FLOAT, TensorProto.FLOAT16], repeat=2)):
input_type = (
type2 if type1 != type2 else (TensorProto.FLOAT16 if type1 == TensorProto.FLOAT else TensorProto.FLOAT)
)
nodes = [
helper.make_node("MatMul", ["input_0", "input_1"], ["product"], "MatMul_0"),
helper.make_node("Cast", ["product"], ["cast_0_output"], "Cast_0", to=type1),
helper.make_node("Identity", ["cast_0_output"], ["output_0"], "Identity_0"),
helper.make_node("Cast", ["product"], ["cast_1_output"], "Cast_1", to=type2),
helper.make_node("Identity", ["cast_1_output"], ["output_1"], "Identity_1"),
]
inputs = [
helper.make_tensor_value_info("input_0", input_type, ["M", "K"]),
helper.make_tensor_value_info("input_1", input_type, ["K", "N"]),
]
outputs = [
helper.make_tensor_value_info("output_0", type1, ["M", "N"]),
helper.make_tensor_value_info("output_1", type2, ["M", "N"]),
]
save(
model_path + "_" + type_to_string(type1) + "_" + type_to_string(type2),
nodes,
inputs,
outputs,
[],
)
def flip_type(type, flip=True):
return (TensorProto.FLOAT16 if type == TensorProto.FLOAT else TensorProto.FLOAT) if flip else type
def do_cast_inputs(input_0, input_1, nodes, input_cast_type):
nodes.extend(
[
helper.make_node("Cast", [input_0], ["cast_" + input_0], "Cast_0", to=input_cast_type),
helper.make_node("Cast", [input_1], ["cast_" + input_1], "Cast_1", to=input_cast_type),
]
)
return "cast_" + input_0, "cast_" + input_1
def do_transpose_inputs(input_0, input_1, nodes):
nodes.extend(
[
helper.make_node("Transpose", [input_0], ["input_transpose_0"], "Transpose_0"),
helper.make_node("Transpose", [input_1], ["input_transpose_1"], "Transpose_1"),
]
)
return "input_transpose_0", "input_transpose_1"
def do_cast_product(product, nodes, product_type):
nodes.insert(
1,
helper.make_node("Cast", [product], [product + "_cast"], "Cast_2", to=product_type),
)
return product + "_cast"
def do_transpose_product(product, nodes):
if transpose_product:
nodes.append(helper.make_node("Transpose", [product], [product + "_transpose"], "Transpose_2"))
return product + "_transpose"
def do_cast_sum(sum, nodes, type):
nodes.append(helper.make_node("Cast", [sum], ["cast_" + sum], "Cast_3", to=type))
return "cast_" + sum
def do_cast_input2(input_2, nodes, type):
nodes.append(helper.make_node("Cast", [input_2], ["cast_" + input_2], "Cast_4", to=type))
return "cast_" + input_2
def gen_propagate_cast_test_model(
model_path,
transpose_inputs,
transpose_product,
cast_inputs,
cast_product,
insert_add,
cast_sum,
cast_input2,
transpose_inputs_before_cast=False,
):
input_0 = "input_0"
input_1 = "input_1"
product = "product"
nodes = []
input_type = TensorProto.FLOAT
product_type = input_type
if transpose_inputs_before_cast:
if transpose_inputs:
input_0, input_1 = do_transpose_inputs(input_0, input_1, nodes)
if cast_inputs:
input_0, input_1 = do_cast_inputs(input_0, input_1, nodes, input_type)
input_type = flip_type(input_type)
else:
if cast_inputs:
input_0, input_1 = do_cast_inputs(input_0, input_1, nodes, input_type)
input_type = flip_type(input_type)
if transpose_inputs:
input_0, input_1 = do_transpose_inputs(input_0, input_1, nodes)
nodes.append(helper.make_node("MatMul", [input_0, input_1], [product], "MatMul_0"))
if transpose_product:
product = do_transpose_product(product, nodes)
if cast_product:
product = do_cast_product(product, nodes, flip_type(product_type))
product_type = flip_type(product_type)
inputs = [
helper.make_tensor_value_info("input_0", input_type, ["N", "N"]),
helper.make_tensor_value_info("input_1", input_type, ["N", "N"]),
]
if insert_add:
input_2 = "input_2"
add_input_type = flip_type(product_type, cast_input2)
inputs.append(helper.make_tensor_value_info(input_2, add_input_type, ["N", "N"]))
output = "sum"
output_type = product_type
if cast_input2:
input_2 = do_cast_input2(input_2, nodes, flip_type(add_input_type))
nodes.append(helper.make_node("Add", [product, input_2], [output], "Add_0"))
if cast_sum:
output = do_cast_sum(output, nodes, flip_type(output_type))
output_type = flip_type(output_type)
else:
output = product
output_type = product_type
outputs = [helper.make_tensor_value_info(output, output_type, ["N", "N"])]
save(
model_path
+ ("_transpose_inputs" if transpose_inputs else "")
+ ("_transpose_product" if transpose_product else "")
+ ("_cast_inputs" if cast_inputs else "")
+ ("_cast_product" if cast_product else "")
+ ("_cast_input2" if cast_input2 else "")
+ ("_cast_sum" if cast_sum else ""),
nodes,
inputs,
outputs,
[],
)
def gen_matmul_two_products(model_path, transpose, transpose_before_cast, second_matmul, cast_inputs):
def do_transpose(output_0, output_1, transpose, nodes):
nodes.append(helper.make_node("Transpose", [output_0], ["transpose_0_" + output_0], "Transpose_0"))
output_0 = "transpose_0_" + output_0
if transpose > 1:
nodes.append(helper.make_node("Transpose", [output_1], ["transpose_1_" + output_1], "Transpose_1"))
output_1 = "transpose_1_" + output_1
return output_0, output_1
input_type = flip_type(TensorProto.FLOAT, cast_inputs)
input_0 = "input_0"
input_1 = "input_1"
output = "product"
output_0 = "product"
output_1 = "product"
outputs = []
nodes = []
cast_count = 0
inputs = [
helper.make_tensor_value_info("input_0", input_type, ["M", "K"]),
helper.make_tensor_value_info("input_1", input_type, ["K", "N"]),
]
if cast_inputs:
input_type = flip_type(input_type)
input_0, input_1 = do_cast_inputs(input_0, input_1, nodes, input_type)
cast_count += 2
output0_type = input_type
output1_type = input_type
nodes.append(helper.make_node("MatMul", [input_0, input_1], [output], "MatMul_0"))
if second_matmul:
nodes.append(helper.make_node("MatMul", [input_0, input_1], ["second_" + output], "MatMul_1"))
outputs.append(helper.make_tensor_value_info("second_" + output, input_type, ["M", "N"]))
if add_products:
nodes.append(helper.make_node("Add", [output, "second_" + output], ["sum"], "Add_0"))
outputs.append(helper.make_tensor_value_info("sum", input_type, ["M", "N"]))
if transpose > 0 and transpose_before_cast:
output_0, output_1 = do_transpose(output_0, output_1, transpose, nodes)
output0_type = flip_type(output0_type)
nodes.append(
helper.make_node(
"Cast",
[output_0],
["cast_" + str(cast_count) + "_" + output_0],
"Cast_" + str(cast_count),
to=output0_type,
)
)
output_0 = "cast_" + str(cast_count) + "_" + output_0
cast_count += 1
if second_matmul:
nodes.append(
helper.make_node(
"Cast",
[output_1],
["cast_" + str(cast_count) + "_" + output_1],
"Cast_" + str(cast_count),
to=TensorProto.FLOAT16,
)
)
output_1 = "cast_" + str(cast_count) + "_" + output_1
output1_type = flip_type(output1_type)
if transpose > 0 and not transpose_before_cast:
output_0, output_1 = do_transpose(output_0, output_1, transpose, nodes)
outputs.extend(
[
helper.make_tensor_value_info(output_0, output0_type, ["M", "N"]),
helper.make_tensor_value_info(output_1, output1_type, ["M", "N"]),
]
)
model_path += "_cast_inputs" if cast_inputs else ""
model_path += (
("_transpose_before_cast" if transpose_before_cast else "_transpose_after_cast") if transpose > 0 else ""
)
model_path += "_transpose" if transpose > 1 else ""
model_path += "_second_matmul" if second_matmul else ""
model_path += "_add_products" if add_products else ""
save(model_path, nodes, inputs, outputs, [])
def gen_bool_to_float16_cast(model_path):
X1 = helper.make_tensor_value_info("x1", TensorProto.INT64, [1, 1]) # noqa: N806
X2 = helper.make_tensor_value_info("x2", TensorProto.INT64, [1, 1]) # noqa: N806
X3 = helper.make_tensor_value_info("x3", TensorProto.FLOAT, [1, 1]) # noqa: N806
Y = helper.make_tensor_value_info("output", TensorProto.FLOAT16, [1, 1]) # noqa: N806
less1 = helper.make_node("Less", ["x1", "x2"], ["less1"], name="less1")
cast1 = helper.make_node("Cast", ["less1"], ["cast1"], name="cast1", to=TensorProto.FLOAT16)
cast2 = helper.make_node("Cast", ["x3"], ["cast2"], name="cast2", to=TensorProto.FLOAT16)
add1 = helper.make_node("Add", ["cast1", "cast2"], ["output"])
save(model_path, [less1, cast1, cast2, add1], [X1, X2, X3], [Y], [])
def gen_bool_to_float_cast(model_path):
X1 = helper.make_tensor_value_info("x1", TensorProto.INT64, [1, 1]) # noqa: N806
X2 = helper.make_tensor_value_info("x2", TensorProto.INT64, [1, 1]) # noqa: N806
X3 = helper.make_tensor_value_info("x3", TensorProto.FLOAT16, [1, 1]) # noqa: N806
Y = helper.make_tensor_value_info("output", TensorProto.FLOAT16, [1, 1]) # noqa: N806
less1 = helper.make_node("Less", ["x1", "x2"], ["less1"], name="less1")
cast1 = helper.make_node("Cast", ["less1"], ["cast1"], name="cast1", to=TensorProto.FLOAT)
cast2 = helper.make_node("Cast", ["x3"], ["cast2"], name="cast2", to=TensorProto.FLOAT)
add1 = helper.make_node("Add", ["cast1", "cast2"], ["add1"])
cast3 = helper.make_node("Cast", ["add1"], ["output"], name="cast3", to=TensorProto.FLOAT16)
save(model_path, [less1, cast1, cast2, cast3, add1], [X1, X2, X3], [Y], [])
def gen_one_input_one_output_test(op, model_path, axes_attribute=False):
X = helper.make_tensor_value_info("x", TensorProto.FLOAT16, [2, 2]) # noqa: N806
output_shape = [2, 2]
if op == "Unsqueeze":
output_shape.append(1)
Y = helper.make_tensor_value_info("y", TensorProto.FLOAT16, output_shape) # noqa: N806
node_inputs = []
graph_inputs = [X]
cast1 = helper.make_node("Cast", ["x"], ["cast1"], name="cast1", to=TensorProto.FLOAT)
node_inputs.insert(0, "cast1")
if axes_attribute:
node = helper.make_node(
op,
node_inputs,
["op_output"],
name=op + str(1),
axes=np.array([2]).astype(np.int64),
)
else:
node = helper.make_node(op, node_inputs, ["op_output"], name=op + str(1))
cast2 = helper.make_node("Cast", ["op_output"], ["y"], name="cast2", to=TensorProto.FLOAT16)
save(model_path, [cast1, node, cast2], graph_inputs, [Y], [])
for (
transpose_inputs,
transpose_product,
cast_inputs,
cast_product,
insert_add,
cast_sum,
cast_input2,
) in list(itertools.product([False, True], repeat=7)):
if not insert_add and (cast_sum or cast_input2):
continue
if cast_inputs or cast_product or cast_sum:
gen_propagate_cast_test_model(
"matmul_add" if insert_add else "matmul",
transpose_inputs,
transpose_product,
cast_inputs,
cast_product,
insert_add,
cast_sum,
cast_input2,
)
gen_fuse_sibling_casts("fuse_sibling_casts")
gen_fuse_back2back_casts("fuse_back2back_casts")
for (
transpose,
transpose_before_cast,
second_matmul,
add_products,
cast_inputs,
) in list(itertools.product([0, 1, 2], [False, True], [False, True], [False, True], [False, True])):
if not transpose and transpose_before_cast:
continue
if not second_matmul and add_products:
continue
gen_matmul_two_products(
"matmul_two_outputs",
transpose,
transpose_before_cast,
second_matmul,
cast_inputs,
)
gen_bool_to_float16_cast("negative_test_case_bool_fp16_cast")
gen_bool_to_float_cast("negative_test_case_bool_fp_cast")
gen_one_input_one_output_test("Squeeze", "squeeze_cast_propagation_test")
gen_one_input_one_output_test("Unsqueeze", "unsqueeze_cast_propagation_test", True)
|
c4b9d1865eaa564a484f58ad3f7a421ad3b606f0
|
828a277419095556f50b331da73e4a931be7b452
|
/tests/unit/test_pathspec_math.py
|
9cb3098568fec1ef6c3bc261a7586e68395f6a6e
|
[
"Apache-2.0"
] |
permissive
|
iterative/dvc
|
6b843492a680370054cddf5cf62c66b6808007ca
|
f47d21b0c29f5603bff09cc51c4a14a1bdf962c6
|
refs/heads/main
| 2023-09-02T17:14:25.412431
| 2023-08-31T23:17:17
| 2023-08-31T23:17:17
| 83,878,269
| 11,687
| 1,331
|
Apache-2.0
| 2023-09-14T14:36:48
| 2017-03-04T08:16:33
|
Python
|
UTF-8
|
Python
| false
| false
| 3,747
|
py
|
test_pathspec_math.py
|
import pytest
from dvc.pathspec_math import PatternInfo, _change_dirname
@pytest.mark.parametrize(
"patterns, dirname, changed",
[
# A line starting with # serves as a comment.
("#comment", "/dir", "#comment"),
# Put a backslash ("\") in front of the first hash for patterns that
# begin with a hash.
("\\#hash", "/dir", "dir/**/#hash"),
("\\#hash", "/#dir", "#dir/**/#hash"),
# Trailing spaces are ignored unless they are quoted with
# backslash ("\").
(" space", "/dir", "dir/**/space"),
("\\ space", "/dir", "dir/**/ space"),
# An optional prefix "!" which negates the pattern;
("!include", "/dir", "!/dir/**/include"),
# Put a backslash ("\") in front of the first "!" for patterns that
# begin with a literal "!", for example, "\!important!.txt".
("\\!important!.txt", "/dir", "dir/**/!important!.txt"),
# If there is a separator at the beginning or middle (or both) of the
# pattern, then the pattern is relative to the directory level of the
# particular .gitignore file itself.
("/separator.txt", "/dir", "dir/separator.txt"),
("subdir/separator.txt", "/dir", "dir/subdir/separator.txt"),
# Otherwise the pattern may also match at any level below
# the .gitignore level.
("no_sep", "/dir", "dir/**/no_sep"),
# If there is a separator at the end of the pattern then the pattern
# will only match directories, otherwise the pattern can match both
# files and directories.
("doc/fortz/", "/dir", "dir/doc/fortz/"),
("fortz/", "/dir", "dir/**/fortz/"),
# An asterisk "*" matches anything except a slash.
("*aste*risk*", "/dir", "dir/**/*aste*risk*"),
# The character "?" matches any one character except "/".
("?fi?le?", "/dir", "dir/**/?fi?le?"),
# The range notation, e.g. [a-zA-Z], can be used to match one of the
# characters in a range. See fnmatch(3) and the FNM_PATHNAME flag
# for a more detailed description.
("[a-zA-Z]file[a-zA-Z]", "/dir", "dir/**/[a-zA-Z]file[a-zA-Z]"),
# Two consecutive asterisks ("**") in patterns matched against full
# pathname may have special meaning:
# A leading "**" followed by a slash means match in all directories.
# For example, "**/foo" matches file or directory "foo" anywhere,
# the same as pattern "foo".
("**/foo", "/dir", "dir/**/foo"),
# "**/foo/bar" matches file or directory "bar" anywhere that is
# directly under directory "foo".
("**/foo/bar", "/dir", "dir/**/foo/bar"),
# A trailing "/**" matches everything inside.
# For example, "abc/**" matches all files inside directory "abc",
# relative to the location of the .gitignore file, with infinite depth.
("abc/**", "/dir", "dir/abc/**"),
# A slash followed by two consecutive asterisks then a slash matches
# zero or more directories. For example, "a/**/b"
# matches "a/b", "a/x/b", "a/x/y/b" and so on.
("a/**/b", "/dir", "dir/a/**/b"),
# Other consecutive asterisks are considered regular asterisks and
# will match according to the previous rules.
("/***.txt", "/dir", "dir/***.txt"),
("data/***", "/dir", "dir/data/***"),
("***/file.txt", "/dir", "dir/***/file.txt"),
("***file", "/dir", "dir/**/***file"),
("a/***/b", "/dir", "dir/a/***/b"),
],
)
def test_dvcignore_pattern_change_dir(tmp_dir, patterns, dirname, changed):
assert _change_dirname(dirname, [PatternInfo(patterns, "")], "/") == [
PatternInfo(changed, "")
]
|
d47a65f63ffcad64d329097be0afa7e5526ba11a
|
ba26fe339628554701cfd94ddb0afe217d2a9f4c
|
/retail/recommendation-system/bqml-scann/tfx_pipeline/bq_components.py
|
f4571a92971fc7b5ce634f65e8c518edad9b9cff
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
GoogleCloudPlatform/analytics-componentized-patterns
|
8eb704a4de1b01705301ee41b80c9fb6724edce5
|
51329bab5c0e2812f07b1a4addf38c0a71a6939d
|
refs/heads/master
| 2023-05-08T14:34:27.923955
| 2023-01-04T11:35:10
| 2023-01-04T11:35:10
| 282,664,606
| 157
| 90
|
Apache-2.0
| 2023-05-01T22:25:19
| 2020-07-26T14:17:44
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 4,043
|
py
|
bq_components.py
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BigQuery components."""
import os
import warnings
import logging
from google.cloud import bigquery
import tfx
import tensorflow as tf
from tfx.types.experimental.simple_artifacts import Dataset
from tfx.types.standard_artifacts import Artifact
from tfx.dsl.component.experimental.decorators import component
from tfx.dsl.component.experimental.annotations import InputArtifact, OutputArtifact, Parameter
from tfx.types.standard_artifacts import Model as BQModel
@component
def compute_pmi(
project_id: Parameter[str],
bq_dataset: Parameter[str],
min_item_frequency: Parameter[int],
max_group_size: Parameter[int],
item_cooc: OutputArtifact[Dataset]):
stored_proc = f'{bq_dataset}.sp_ComputePMI'
query = f'''
DECLARE min_item_frequency INT64;
DECLARE max_group_size INT64;
SET min_item_frequency = {min_item_frequency};
SET max_group_size = {max_group_size};
CALL {stored_proc}(min_item_frequency, max_group_size);
'''
result_table = 'item_cooc'
logging.info(f'Starting computing PMI...')
client = bigquery.Client(project=project_id)
query_job = client.query(query)
query_job.result() # Wait for the job to complete
logging.info(f'Items PMI computation completed. Output in {bq_dataset}.{result_table}.')
# Write the location of the output table to metadata.
item_cooc.set_string_custom_property('bq_dataset', bq_dataset)
item_cooc.set_string_custom_property('bq_result_table', result_table)
@component
def train_item_matching_model(
project_id: Parameter[str],
bq_dataset: Parameter[str],
dimensions: Parameter[int],
item_cooc: InputArtifact[Dataset],
bq_model: OutputArtifact[BQModel]):
item_cooc_table = item_cooc.get_string_custom_property('bq_result_table')
stored_proc = f'{bq_dataset}.sp_TrainItemMatchingModel'
query = f'''
DECLARE dimensions INT64 DEFAULT {dimensions};
CALL {stored_proc}(dimensions);
'''
model_name = 'item_matching_model'
logging.info(f'Using item co-occurrence table: {bq_dataset}.{item_cooc_table}')
logging.info(f'Starting training of the model...')
client = bigquery.Client(project=project_id)
query_job = client.query(query)
query_job.result()
logging.info(f'Model training completed. Output in {bq_dataset}.{model_name}.')
# Write the location of the model to metadata.
bq_model.set_string_custom_property('bq_dataset', bq_dataset)
bq_model.set_string_custom_property('bq_model_name', model_name)
@component
def extract_embeddings(
project_id: Parameter[str],
bq_dataset: Parameter[str],
bq_model: InputArtifact[BQModel],
item_embeddings: OutputArtifact[Dataset]):
embedding_model_name = bq_model.get_string_custom_property('bq_model_name')
stored_proc = f'{bq_dataset}.sp_ExractEmbeddings'
query = f'''
CALL {stored_proc}();
'''
result_table = 'item_embeddings'
logging.info(f'Extracting item embedding from: {bq_dataset}.{embedding_model_name}')
logging.info(f'Starting exporting embeddings...')
client = bigquery.Client(project=project_id)
query_job = client.query(query)
query_job.result() # Wait for the job to complete
logging.info(f'Embeddings export completed. Output in {bq_dataset}.{result_table}')
# Write the location of the output table to metadata.
item_embeddings.set_string_custom_property('bq_dataset', bq_dataset)
item_embeddings.set_string_custom_property('bq_result_table', result_table)
|
25ca49b80140b27b4e2fbad70dc2973e43426066
|
5eb35e6fa9d0d7b28b3478cd932a867f2fcd66cd
|
/test/dlc_tests/sanity/quick_checks/test_dlc_developer_config.py
|
e06a3860e0dfe3916ad6c3a58e3b4438d35288bf
|
[
"Apache-2.0"
] |
permissive
|
aws/deep-learning-containers
|
3f4bc3fa0a27eca35c2faeac282a3fc2891e8d40
|
ba76fa031f29e323a67b29c1d863db5c3d95084c
|
refs/heads/master
| 2023-09-01T13:55:25.697898
| 2023-08-31T16:41:44
| 2023-08-31T16:41:44
| 234,634,164
| 811
| 434
|
NOASSERTION
| 2023-09-14T17:59:08
| 2020-01-17T20:52:43
|
Python
|
UTF-8
|
Python
| false
| false
| 8,272
|
py
|
test_dlc_developer_config.py
|
import pytest
from src import config
@pytest.mark.quick_checks
@pytest.mark.model("N/A")
@pytest.mark.integration("dlc_developer_config")
def test_developer_configuration():
"""
Ensure that defaults are set back to normal before merge
"""
# Check dev settings
assert config.parse_dlc_developer_configs("dev", "partner_developer") == ""
assert config.parse_dlc_developer_configs("dev", "ei_mode") is False
assert config.parse_dlc_developer_configs("dev", "neuron_mode") is False
assert config.parse_dlc_developer_configs("dev", "neuronx_mode") is False
assert config.parse_dlc_developer_configs("dev", "graviton_mode") is False
assert config.parse_dlc_developer_configs("dev", "benchmark_mode") is False
assert config.parse_dlc_developer_configs("dev", "habana_mode") is False
assert config.parse_dlc_developer_configs("dev", "trcomp_mode") is False
# Check build settings
assert config.parse_dlc_developer_configs("build", "build_frameworks") == []
assert config.parse_dlc_developer_configs("build", "build_training") is True
assert config.parse_dlc_developer_configs("build", "build_inference") is True
assert config.parse_dlc_developer_configs("build", "datetime_tag") is True
assert config.parse_dlc_developer_configs("build", "do_build") is True
# Check test settings
assert config.parse_dlc_developer_configs("test", "sanity_tests") is True
assert config.parse_dlc_developer_configs("test", "sagemaker_remote_tests") == "off"
assert config.parse_dlc_developer_configs("test", "sagemaker_remote_efa_instance_type") == ""
assert config.parse_dlc_developer_configs("test", "sagemaker_local_tests") is False
assert config.parse_dlc_developer_configs("test", "ecs_tests") is True
assert config.parse_dlc_developer_configs("test", "eks_tests") is True
assert config.parse_dlc_developer_configs("test", "ec2_tests") is True
assert config.parse_dlc_developer_configs("test", "ec2_tests_on_heavy_instances") is False
assert config.parse_dlc_developer_configs("test", "nightly_pr_test_mode") is False
assert config.parse_dlc_developer_configs("test", "use_scheduler") is False
assert config.parse_dlc_developer_configs("test", "safety_check_test") is False
assert config.parse_dlc_developer_configs("test", "ecr_scan_allowlist_feature") is False
@pytest.mark.quick_checks
@pytest.mark.model("N/A")
@pytest.mark.integration("dlc_developer_config")
def test_developer_config_wrappers_defaults():
"""
Test defaults of config file wrappers
"""
# Check test settings
assert config.are_sm_efa_tests_enabled() is False
assert config.is_sanity_test_enabled() is True
assert config.is_sm_local_test_enabled() is False
assert config.is_sm_remote_test_enabled() is False
assert config.get_sagemaker_remote_efa_instance_type() == ""
assert config.is_ecs_test_enabled() is True
assert config.is_eks_test_enabled() is True
assert config.is_ec2_test_enabled() is True
assert config.are_heavy_instance_ec2_tests_enabled() is False
assert config.is_nightly_pr_test_mode_enabled() is False
assert config.is_scheduler_enabled() is False
assert config.is_safety_check_test_enabled() is False
assert config.is_ecr_scan_allowlist_feature_enabled() is False
@pytest.mark.quick_checks
@pytest.mark.model("N/A")
@pytest.mark.integration("dlc_build_version_override_config")
def test_build_version_override_configuration():
"""
Ensure that buildspec override defaults are set back to normal before merge
"""
assert config.parse_dlc_developer_configs("buildspec_override", "dlc-pr-mxnet-training") == ""
assert config.parse_dlc_developer_configs("buildspec_override", "dlc-pr-pytorch-training") == ""
assert (
config.parse_dlc_developer_configs("buildspec_override", "dlc-pr-tensorflow-2-training")
== ""
)
assert (
config.parse_dlc_developer_configs("buildspec_override", "dlc-pr-autogluon-training") == ""
)
assert (
config.parse_dlc_developer_configs(
"buildspec_override", "dlc-pr-huggingface-tensorflow-training"
)
== ""
)
assert (
config.parse_dlc_developer_configs(
"buildspec_override", "dlc-pr-huggingface-pytorch-training"
)
== ""
)
assert (
config.parse_dlc_developer_configs(
"buildspec_override", "dlc-pr-huggingface-pytorch-trcomp-training"
)
== ""
)
assert (
config.parse_dlc_developer_configs(
"buildspec_override", "dlc-pr-huggingface-tensorflow-2-trcomp-training"
)
== ""
)
assert (
config.parse_dlc_developer_configs("buildspec_override", "dlc-pr-pytorch-trcomp-training")
== ""
)
assert (
config.parse_dlc_developer_configs("buildspec_override", "dlc-pr-mxnet-neuron-training")
== ""
)
assert (
config.parse_dlc_developer_configs("buildspec_override", "dlc-pr-pytorch-neuron-training")
== ""
)
assert (
config.parse_dlc_developer_configs(
"buildspec_override", "dlc-pr-tensorflow-2-neuron-training"
)
== ""
)
assert (
config.parse_dlc_developer_configs(
"buildspec_override", "dlc-pr-stabilityai-pytorch-training"
)
== ""
)
assert (
config.parse_dlc_developer_configs("buildspec_override", "dlc-pr-pytorch-habana-training")
== ""
)
assert (
config.parse_dlc_developer_configs(
"buildspec_override", "dlc-pr-tensorflow-2-habana-training"
)
== ""
)
assert config.parse_dlc_developer_configs("buildspec_override", "dlc-pr-mxnet-inference") == ""
assert (
config.parse_dlc_developer_configs("buildspec_override", "dlc-pr-pytorch-inference") == ""
)
assert (
config.parse_dlc_developer_configs("buildspec_override", "dlc-pr-tensorflow-2-inference")
== ""
)
assert (
config.parse_dlc_developer_configs("buildspec_override", "dlc-pr-autogluon-inference") == ""
)
assert (
config.parse_dlc_developer_configs("buildspec_override", "dlc-pr-mxnet-neuron-inference")
== ""
)
assert (
config.parse_dlc_developer_configs("buildspec_override", "dlc-pr-pytorch-neuron-inference")
== ""
)
assert (
config.parse_dlc_developer_configs(
"buildspec_override", "dlc-pr-tensorflow-1-neuron-inference"
)
== ""
)
assert (
config.parse_dlc_developer_configs(
"buildspec_override", "dlc-pr-tensorflow-2-neuron-inference"
)
== ""
)
assert (
config.parse_dlc_developer_configs(
"buildspec_override", "dlc-pr-huggingface-tensorflow-inference"
)
== ""
)
assert (
config.parse_dlc_developer_configs(
"buildspec_override", "dlc-pr-huggingface-pytorch-inference"
)
== ""
)
assert (
config.parse_dlc_developer_configs(
"buildspec_override", "dlc-pr-huggingface-pytorch-neuron-inference"
)
== ""
)
assert (
config.parse_dlc_developer_configs(
"buildspec_override", "dlc-pr-stabilityai-pytorch-inference"
)
== ""
)
assert (
config.parse_dlc_developer_configs("buildspec_override", "dlc-pr-mxnet-graviton-inference")
== ""
)
assert (
config.parse_dlc_developer_configs(
"buildspec_override", "dlc-pr-pytorch-graviton-inference"
)
== ""
)
assert (
config.parse_dlc_developer_configs(
"buildspec_override", "dlc-pr-tensorflow-2-graviton-inference"
)
== ""
)
assert (
config.parse_dlc_developer_configs("buildspec_override", "dlc-pr-mxnet-eia-inference") == ""
)
assert (
config.parse_dlc_developer_configs("buildspec_override", "dlc-pr-pytorch-eia-inference")
== ""
)
assert (
config.parse_dlc_developer_configs(
"buildspec_override", "dlc-pr-tensorflow-2-eia-inference"
)
== ""
)
|
3bb47f13d70fd35c606ed5770f0e651637ff7507
|
d110546d747d7e3865ce5742d5fca09f404623c0
|
/tests/pytests/unit/states/test_mac_xattr.py
|
a338467955e0c8b3cfb8cf0dcf3165b32fdc927f
|
[
"Apache-2.0",
"MIT",
"BSD-2-Clause"
] |
permissive
|
saltstack/salt
|
354fc86a7be1f69514b3dd3b2edb9e6f66844c1d
|
1ef90cbdc7203f97775edb7666db86a41eb9fc15
|
refs/heads/master
| 2023-07-19T20:56:20.210556
| 2023-06-29T23:12:28
| 2023-07-19T11:47:47
| 1,390,248
| 11,026
| 6,296
|
Apache-2.0
| 2023-09-14T20:45:37
| 2011-02-20T20:16:56
|
Python
|
UTF-8
|
Python
| false
| false
| 4,180
|
py
|
test_mac_xattr.py
|
import pytest
import salt.states.mac_xattr as xattr
from tests.support.mock import MagicMock, patch
@pytest.fixture
def configure_loader_modules():
return {xattr: {}}
def test_exists_not():
"""
Test adding an attribute when it doesn't exist
"""
with patch("os.path.exists") as exists_mock:
expected = {
"changes": {"key": "value"},
"comment": "",
"name": "/path/to/file",
"result": True,
}
exists_mock.return_value = True
list_mock = MagicMock(return_value={"other.id": "value2"})
write_mock = MagicMock()
with patch.dict(
xattr.__salt__, {"xattr.list": list_mock, "xattr.write": write_mock}
):
out = xattr.exists("/path/to/file", ["key=value"])
list_mock.assert_called_once_with("/path/to/file")
write_mock.assert_called_once_with("/path/to/file", "key", "value", False)
assert out == expected
def test_exists_change():
"""
Test changing an attribute value
"""
with patch("os.path.exists") as exists_mock:
expected = {
"changes": {"key": "other_value"},
"comment": "",
"name": "/path/to/file",
"result": True,
}
exists_mock.return_value = True
list_mock = MagicMock(return_value={"key": "value"})
write_mock = MagicMock()
with patch.dict(
xattr.__salt__, {"xattr.list": list_mock, "xattr.write": write_mock}
):
out = xattr.exists("/path/to/file", ["key=other_value"])
list_mock.assert_called_once_with("/path/to/file")
write_mock.assert_called_once_with(
"/path/to/file", "key", "other_value", False
)
assert out == expected
def test_exists_already():
"""
Test with the same value does nothing
"""
with patch("os.path.exists") as exists_mock:
expected = {
"changes": {},
"comment": "All values existed correctly.",
"name": "/path/to/file",
"result": True,
}
exists_mock.return_value = True
list_mock = MagicMock(return_value={"key": "value"})
write_mock = MagicMock()
with patch.dict(
xattr.__salt__, {"xattr.list": list_mock, "xattr.write": write_mock}
):
out = xattr.exists("/path/to/file", ["key=value"])
list_mock.assert_called_once_with("/path/to/file")
assert not write_mock.called
assert out == expected
def test_delete():
"""
Test deleting an attribute from a file
"""
with patch("os.path.exists") as exists_mock:
expected = {
"changes": {"key": "delete"},
"comment": "",
"name": "/path/to/file",
"result": True,
}
exists_mock.return_value = True
list_mock = MagicMock(return_value={"key": "value2"})
delete_mock = MagicMock()
with patch.dict(
xattr.__salt__, {"xattr.list": list_mock, "xattr.delete": delete_mock}
):
out = xattr.delete("/path/to/file", ["key"])
list_mock.assert_called_once_with("/path/to/file")
delete_mock.assert_called_once_with("/path/to/file", "key")
assert out == expected
def test_delete_not():
"""
Test deleting an attribute that doesn't exist from a file
"""
with patch("os.path.exists") as exists_mock:
expected = {
"changes": {},
"comment": "All attributes were already deleted.",
"name": "/path/to/file",
"result": True,
}
exists_mock.return_value = True
list_mock = MagicMock(return_value={"other.key": "value2"})
delete_mock = MagicMock()
with patch.dict(
xattr.__salt__, {"xattr.list": list_mock, "xattr.delete": delete_mock}
):
out = xattr.delete("/path/to/file", ["key"])
list_mock.assert_called_once_with("/path/to/file")
assert not delete_mock.called
assert out == expected
|
1be328ce5c44dee6511ed3fc2a71c8aac962b5ad
|
2c596f013d7ffb7415a1021fe58ef7fb2f8d5428
|
/mem_usage_ui/snapshot.py
|
96e8509d74113ee486cb9c20e4082ff68e7cb021
|
[
"MIT"
] |
permissive
|
parikls/mem_usage_ui
|
11e4c5f86d1fbbc2e79a9baa7049d449a4cd0ec1
|
e65177bcba4528bcfe15ff8a315b90e5def3530d
|
refs/heads/master
| 2023-07-06T17:54:18.157766
| 2023-06-28T11:10:36
| 2023-06-28T11:10:36
| 164,311,793
| 138
| 6
|
MIT
| 2022-11-11T15:27:20
| 2019-01-06T13:52:57
|
Python
|
UTF-8
|
Python
| false
| false
| 6,875
|
py
|
snapshot.py
|
import asyncio
import getpass
import logging
from typing import Union, List, Dict
import psutil
from aiohttp.web_app import Application
from aiohttp.web_ws import WebSocketResponse
from psutil import Error
logger = logging.getLogger("mem_usage_ui")
class SnapshotProcessor:
"""
Main class for handling processes
and memory snapshots
"""
MESSAGE_INIT = "init"
MESSAGE_SUBSCRIBE = "subscribe"
MESSAGE_UNSUBSCRIBE = "unsubscribe"
USER_ROOT = "root"
MEM_RSS_DIVIDER = 1024
DEFAULT_PROCESS_ATTRS = ("pid", "name", "cmdline")
EXTENDED_PROCESS_ATTRS = (
"memory_info", "status", "cpu_percent", "memory_percent", "num_threads", "username"
)
PROCESS_DIFF_SNAPSHOT_INTERVAL = 1
MEMORY_SNAPSHOT_INTERVAL = 1
@classmethod
async def create(cls, app):
instance = cls(app)
app["snapshot_processor"] = instance
asyncio.create_task(instance.process_diff())
return instance
@staticmethod
def get_processes_as_dict() -> Dict:
"""
Return processes in a dict format where key is a PID
"""
current_user = getpass.getuser()
processes = {}
for process in psutil.process_iter():
if current_user == SnapshotProcessor.USER_ROOT or process.username() == current_user:
try:
process_dict = process.as_dict(attrs=SnapshotProcessor.DEFAULT_PROCESS_ATTRS)
except Error:
continue
process_dict["cmdline"] = " ".join(process_dict["cmdline"] or [])
processes[process_dict["pid"]] = process_dict
return processes
def __init__(self, app: Application):
self._websockets = app["websockets"]
self._pid_ws = {}
self._ws_pid = {}
self._processes = self.get_processes_as_dict()
async def process_diff(self):
"""
Background task which take a process snapshot every `interval`,
and sends a diff to all connected websockets
"""
await asyncio.sleep(self.PROCESS_DIFF_SNAPSHOT_INTERVAL)
if self._websockets:
# proceed only if there are connected clients
current_processes = self.get_processes_as_dict()
terminated_processes = self._processes.keys() - current_processes.keys()
new_processes = current_processes.keys() - self._processes.keys()
if terminated_processes or new_processes:
await self.send_process_diff(
list(terminated_processes),
{pid: current_processes[pid] for pid in new_processes},
)
# update existing processes
self._processes = current_processes
# re-schedule task
_ = asyncio.create_task(self.process_diff())
async def process_user_message(self, ws: WebSocketResponse, message: dict):
"""
Process user message. Creates or cancel
snapshot tasks based on user input
"""
logger.info("Processing user message")
# todo: handle unknown message type
if message["type"] == self.MESSAGE_INIT:
# on init - send only existing processes
await self.send_process_diff(
terminated_processes=None,
new_processes=self._processes
)
elif message["type"] == self.MESSAGE_SUBSCRIBE:
await self.subscribe(ws, message)
elif message["type"] == self.MESSAGE_UNSUBSCRIBE:
await self.unsubscribe(ws)
async def subscribe(self, ws: WebSocketResponse, message: dict):
"""
Entry point for process subscription. Put user into an internal structures
and schedule a snapshot task for this user
"""
logger.info("New subscribe message received for PID %s" % message["pid"])
# reference of PID to websocket and vice versa
self._pid_ws[message["pid"]] = ws
self._ws_pid[ws] = message["pid"]
_ = asyncio.create_task(
self.snapshot(
message["pid"],
message.get("interval", self.MEMORY_SNAPSHOT_INTERVAL)
)
)
async def unsubscribe(self, ws: WebSocketResponse):
""" Unsubscribe from a process snapshotting. Cleanup internal structures """
logger.info("Unsubscribe message received")
pid = self._ws_pid.pop(ws, None)
self._pid_ws.pop(pid, None)
async def send_process_diff(self,
terminated_processes: Union[None, List] = None,
new_processes: Union[None, Dict[int, Dict]] = None):
"""
Send process diff to all connected users
"""
terminated_processes = terminated_processes or []
new_processes = new_processes or []
result = {
"type": "process_diff",
"payload": {
"terminated": terminated_processes,
"new": new_processes
}
}
# iterate over a copy to avoid potential error with mutability
for ws in list(self._websockets):
try:
await ws.send_json(result)
except Exception: # noqa
# it seems like TCP connection was closed
pass
async def snapshot(self, pid: int, interval: float = 1):
"""
Main task, which get a process data and send it to a user.
In the end - re-schedules self if user is still subscribed
"""
await asyncio.sleep(float(interval))
try:
ws = self._pid_ws[pid]
except KeyError:
# user unsubscribed - terminate task
return
payload = {"type": "pid_update"}
try:
process = psutil.Process(pid)
process_dict = process.as_dict(attrs=self.EXTENDED_PROCESS_ATTRS)
memory_info = process_dict.pop("memory_info", None)
if not memory_info:
raise ValueError("missing memory info")
process_dict['cpu_percent'] = process.cpu_percent(interval=None)
process_dict["rss"] = round(
memory_info.rss / self.MEM_RSS_DIVIDER / self.MEM_RSS_DIVIDER
)
payload["success"] = True
payload["process"] = process_dict
except (Error, ValueError):
logger.warning("No such process. PID=%s" % pid)
payload["success"] = False
payload["message"] = "No such process or it was terminated."
await ws.send_json(payload)
await self.unsubscribe(ws)
return
if not ws.closed:
await ws.send_json(payload)
# still subscribed - re-schedule
_ = asyncio.create_task(self.snapshot(pid, interval))
|
795224a4498e4c555a68f39033b857e31d4bfcdf
|
09724e5c0ad743b3cd4a4729bd15f5df6a467c25
|
/rrd/view/index.py
|
7c786c39d30c23734c69f1ee2bfa5e8b37c6b0eb
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
open-falcon/dashboard
|
c97478c7db99a6e2cd784155bc33c2799e657d6d
|
e98217f5003d5a12055215667e2b0a92395f20c1
|
refs/heads/master
| 2023-05-12T13:50:43.158398
| 2022-05-27T09:36:46
| 2022-05-27T09:36:46
| 35,652,835
| 404
| 317
|
Apache-2.0
| 2023-02-15T21:49:16
| 2015-05-15T04:17:14
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 4,762
|
py
|
index.py
|
#-*- coding:utf-8 -*-
# Copyright 2017 Xiaomi, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import re
from flask import request, abort, g, render_template
from rrd import app, config
from rrd import corelib
def get_api_endpoints(q, tags, page=1, limit=100):
if not q and not tags:
raise Exception("no query params given")
h = {"Content-type": "application/json"}
r = corelib.auth_requests("GET", config.API_ADDR + "/graph/endpoint?q=%s&limit=%d&page=%d&tags=%s" %(q, limit, page, tags), headers=h)
if r.status_code != 200:
raise Exception(r.text)
j = sorted(r.json(), key=lambda x:x["endpoint"])
return j
@app.route("/")
def index():
q = request.args.get("q") or "."
raw_tag = request.args.get("tags") or ""
tags = ','.join(re.split('\s*,\s*', raw_tag))
limit = int(request.args.get("limit") or 50)
page = int(request.args.get("page") or 1)
endpoint_objs = get_api_endpoints(q, tags, page, limit)
return render_template("index.html", **locals())
@app.route("/api/endpoints")
def api_endpoints():
ret = {
"ok": False,
"msg": "",
"data": [],
}
q = request.args.get("q") or "."
raw_tag = request.args.get("tags") or ""
tags = ','.join(re.split('\s*,\s*', raw_tag))
limit = int(request.args.get("limit") or 100)
page = int(request.args.get("page") or 1)
try:
data = get_api_endpoints(q, tags, page, limit)
ret['data'] = data
ret['ok'] = True
return json.dumps(ret)
except Exception as e:
abort(400, str(ret))
@app.route("/api/counters", methods=["POST"])
def api_get_counters():
ret = {
"ok": False,
"msg": "",
"data": [],
}
q = request.form.get("q") or ""
limit = int(request.form.get("limit") or 50)
page = int(request.form.get("page") or 1)
eids = request.form.get("eids") or ""
eids = eids and json.loads(eids) or []
if not (eids or q):
ret['msg'] = "no endpoints or counter given"
return json.dumps(ret)
h = {"Content-type": "application/json"}
r = corelib.auth_requests("GET", config.API_ADDR + "/graph/endpoint_counter?eid=%s&metricQuery=%s&limit=%d&page=%d" %(",".join(eids), q, limit, page), headers=h)
if r.status_code != 200:
abort(400, r.text)
j = r.json()
counters_map = {}
for x in j:
counters_map[x['counter']] = [x['counter'], x['type'], x['step']]
sorted_counters = sorted(counters_map.keys())
sorted_values = [counters_map[x] for x in sorted_counters]
ret['data'] = sorted_values
ret['ok'] = True
return json.dumps(ret)
@app.route("/api/counters", methods=["DELETE"])
def api_delete_counters():
ret = {
"ok": False,
"msg": "",
}
endpoints = request.form.getlist("endpoints[]") or []
counters = request.form.getlist("counters[]") or []
if len(endpoints) == 0 or len(counters) == 0:
ret['msg'] = "no endpoint and counter"
return json.dumps(ret)
h = {"Content-type": "application/json"}
d = {
"endpoints": endpoints,
"counters": counters,
}
r = corelib.auth_requests("DELETE", config.API_ADDR + "/graph/counter", headers=h, data=json.dumps(d))
if r.status_code != 200:
abort(r.status_code, r.text)
j = r.json()
ret["ok"] = True
ret["data"] = "%s counters affected" %j.get("affected_counter")
return json.dumps(ret)
@app.route("/api/endpoints", methods=["DELETE"])
def api_delete_endpoints():
ret = {
"ok": False,
"msg": "",
}
endpoints = request.form.getlist("endpoints[]") or []
if len(endpoints) == 0:
ret['msg'] = "no endpoint"
return json.dumps(ret)
h = {"Content-type": "application/json"}
d = endpoints
r = corelib.auth_requests("DELETE", config.API_ADDR + "/graph/endpoint", headers=h, data=json.dumps(d))
if r.status_code != 200:
abort(r.status_code, r.text)
j = r.json()
ret["ok"] = True
ret["data"] = "%s counters affected, %s endpoints affected" %(j.get("affected_counter"), j.get("affected_endpoint"))
return json.dumps(ret)
|
a3f2e4c7eb8661ea2de60aec91ffeda470647525
|
7bf0e3d4f4841db2d2df37c33e7e7fdff6b465e1
|
/tsfresh/feature_extraction/data.py
|
0174094549b83ef8ce4f7f62952c5b762e5a1e2a
|
[
"MIT"
] |
permissive
|
blue-yonder/tsfresh
|
6bacfff586db8e9bf2f3f5f096af9637e1a7518a
|
f3a6a7c6fc851ec0ab98e7f3a227c89ca41560af
|
refs/heads/main
| 2023-08-30T15:16:01.905311
| 2023-08-03T21:07:06
| 2023-08-03T21:07:06
| 71,996,613
| 8,031
| 1,324
|
MIT
| 2023-09-13T01:13:42
| 2016-10-26T11:29:17
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 16,831
|
py
|
data.py
|
import itertools
from collections import defaultdict, namedtuple
from typing import Iterable, Sized
import pandas as pd
try:
from dask import dataframe as dd
except ImportError: # pragma: no cover
dd = None
def _binding_helper(f, kwargs, column_sort, column_id, column_kind, column_value):
def wrapped_feature_extraction(x):
if column_sort is not None:
x = x.sort_values(column_sort)
chunk = Timeseries(
x[column_id].iloc[0], x[column_kind].iloc[0], x[column_value]
)
result = f(chunk, **kwargs)
result = pd.DataFrame(result, columns=[column_id, "variable", "value"])
result["value"] = result["value"].astype("double")
return result[[column_id, "variable", "value"]]
return wrapped_feature_extraction
class Timeseries(namedtuple("Timeseries", ["id", "kind", "data"])):
"""
Timeseries tuple used for feature extraction.
Make sure `kind` is of type `str` to allow inference
of feature settings in `feature_extraction.settings.from_columns`.
"""
class TsData:
"""
TsData provides access to time series data for internal usage.
Distributors will use this data class to apply functions on the data.
All derived classes must either implement the `apply` method,
which is used to apply the given function directly on the data
or the __iter__ method, which can be used to get an iterator of
Timeseries instances (which distributors can use to apply the function on).
Other methods can be overwritten if a more efficient solution exists for the underlying data store.
"""
pass
class PartitionedTsData(Iterable[Timeseries], Sized, TsData):
"""
Special class of TsData, which can be partitioned.
Derived classes should implement __iter__ and __len__.
"""
def __init__(self, df, column_id):
self.df_id_type = df[column_id].dtype
def pivot(self, results):
"""
Helper function to turn an iterable of tuples with three entries into a dataframe.
The input ``list_of_tuples`` needs to be an iterable with tuples containing three
entries: (a, b, c).
Out of this, a pandas dataframe will be created with all a's as index,
all b's as columns and all c's as values.
It basically does a pd.pivot(first entry, second entry, third entry),
but optimized for non-pandas input (= python list of tuples).
This function is called in the end of the extract_features call.
"""
return_df_dict = defaultdict(dict)
for chunk_id, variable, value in results:
# we turn it into a nested mapping `column -> index -> value`
return_df_dict[variable][chunk_id] = value
# the mapping column -> {index -> value}
# is now a dict of dicts. The pandas dataframe
# constructor will peel this off:
# first, the keys of the outer dict (the column)
# will turn into a column header and the rest into a column
# the rest is {index -> value} which will be turned into a
# column with index.
# All index will be aligned.
return_df = pd.DataFrame(return_df_dict, dtype=float)
# copy the type of the index
return_df.index = return_df.index.astype(self.df_id_type)
# Sort by index to be backward compatible
return_df = return_df.sort_index()
return return_df
def _check_colname(*columns):
"""
Check if given column names conflict with `settings.from_columns` (ends with '_' or contains '__').
:param columns: the column names to check
:type columns: str
:return: None
:rtype: None
:raise: ``ValueError`` if column names are invalid.
"""
for col in columns:
if str(col).endswith("_"):
raise ValueError(
"Dict keys are not allowed to end with '_': {}".format(col)
)
if "__" in str(col):
raise ValueError(
"Dict keys are not allowed to contain '__': {}".format(col)
)
def _check_nan(df, *columns):
"""
Raise a ``ValueError`` if one of the columns does not exist or contains NaNs.
:param df: the pandas DataFrame to test for NaNs
:type df: pandas.DataFrame
:param columns: a list of columns to test for NaNs. If left empty, all columns of the DataFrame will be tested.
:type columns: str
:return: None
:rtype: None
:raise: ``ValueError`` if ``NaNs`` are found in the DataFrame.
"""
for col in columns:
if col not in df.columns:
raise ValueError("Column not found: {}".format(col))
if df[col].isnull().any():
raise ValueError("Column must not contain NaN values: {}".format(col))
def _get_value_columns(df, *other_columns):
value_columns = [col for col in df.columns if col not in other_columns]
if len(value_columns) == 0:
raise ValueError(
"Could not guess the value column! Please hand it to the function as an argument."
)
return value_columns
class WideTsFrameAdapter(PartitionedTsData):
def __init__(self, df, column_id, column_sort=None, value_columns=None):
"""
Adapter for Pandas DataFrames in wide format, where multiple columns contain different time series for
the same id.
:param df: the data frame
:type df: pd.DataFrame
:param column_id: the name of the column containing time series group ids
:type column_id: str
:param column_sort: the name of the column to sort on
:type column_sort: str|None
:param value_columns: list of column names to treat as time series values.
If `None` or empty, all columns except `column_id` and `column_sort` will be used.
:type value_columns: list[str]|None
"""
if column_id is None:
raise ValueError("A value for column_id needs to be supplied")
_check_nan(df, column_id)
if not value_columns:
value_columns = _get_value_columns(df, column_id, column_sort)
_check_nan(df, *value_columns)
_check_colname(*value_columns)
self.value_columns = value_columns
if column_sort is not None:
_check_nan(df, column_sort)
self.column_sort = column_sort
self.df_grouped = df.groupby(column_id)
super().__init__(df, column_id)
def __len__(self):
return self.df_grouped.ngroups * len(self.value_columns)
def __iter__(self):
for group_name, group in self.df_grouped:
if self.column_sort is not None:
group = group.sort_values(self.column_sort)
for kind in self.value_columns:
yield Timeseries(group_name, kind, group[kind])
class LongTsFrameAdapter(PartitionedTsData):
def __init__(self, df, column_id, column_kind, column_value=None, column_sort=None):
"""
Adapter for Pandas DataFrames in long format, where different time series for the same id are
labeled by column `column_kind`.
:param df: the data frame
:type df: pd.DataFrame
:param column_id: the name of the column containing time series group ids
:type column_id: str
:param column_kind: the name of the column containing time series kinds for each id
:type column_kind: str
:param column_value: None or the name of the column containing time series values
If `None`, try to guess it from the remaining, unused columns.
:type column_value: str|None
:param column_sort: the name of the column to sort on
:type column_sort: str|None
"""
if column_id is None:
raise ValueError("A value for column_id needs to be supplied")
if column_kind is None:
raise ValueError("A value for column_kind needs to be supplied")
if column_value is None:
possible_value_columns = _get_value_columns(
df, column_id, column_sort, column_kind
)
if len(possible_value_columns) != 1:
raise ValueError(
"Could not guess the value column, as the number of unused columns os not equal to 1."
f"These columns where currently unused: {','.join(possible_value_columns)}"
"Please hand it to the function as an argument."
)
self.column_value = possible_value_columns[0]
else:
self.column_value = column_value
_check_nan(df, column_id, column_kind, self.column_value)
if column_sort is not None:
_check_nan(df, column_sort)
self.column_sort = column_sort
self.df_grouped = df.groupby([column_id, column_kind])
super().__init__(df, column_id)
def __len__(self):
return len(self.df_grouped)
def __iter__(self):
for group_key, group in self.df_grouped:
if self.column_sort is not None:
group = group.sort_values(self.column_sort)
yield Timeseries(group_key[0], str(group_key[1]), group[self.column_value])
class TsDictAdapter(PartitionedTsData):
def __init__(self, ts_dict, column_id, column_value, column_sort=None):
"""
Adapter for a dict, which maps different time series kinds to Pandas DataFrames.
:param ts_dict: a dict of data frames
:type ts_dict: dict[str, pd.DataFrame]
:param column_id: the name of the column containing time series group ids
:type column_id: str
:param column_value: the name of the column containing time series values
:type column_value: str
:param column_sort: the name of the column to sort on
:type column_sort: str|None
"""
_check_colname(*list(ts_dict.keys()))
for df in ts_dict.values():
_check_nan(df, column_id, column_value)
self.column_value = column_value
if column_sort is not None:
for key, df in ts_dict.items():
_check_nan(df, column_sort)
self.grouped_dict = {
key: df.sort_values([column_sort]).groupby(column_id)
for key, df in ts_dict.items()
}
else:
self.grouped_dict = {
key: df.groupby(column_id) for key, df in ts_dict.items()
}
super().__init__(df, column_id)
def __iter__(self):
for kind, grouped_df in self.grouped_dict.items():
for ts_id, group in grouped_df:
yield Timeseries(ts_id, str(kind), group[self.column_value])
def __len__(self):
return sum(grouped_df.ngroups for grouped_df in self.grouped_dict.values())
class DaskTsAdapter(TsData):
def __init__(
self, df, column_id, column_kind=None, column_value=None, column_sort=None
):
if column_id is None:
raise ValueError("column_id must be set")
if column_id not in df.columns:
raise ValueError(f"Column not found: {column_id}")
# Get all columns, which are not id, kind or sort
possible_value_columns = _get_value_columns(
df, column_id, column_sort, column_kind
)
# The user has already a kind column. That means we just need to group by id (and additionally by id)
if column_kind is not None:
if column_kind not in df.columns:
raise ValueError(f"Column not found: {column_kind}")
self.df = df.groupby([column_id, column_kind])
# We assume the last remaining column is the value - but there needs to be one!
if column_value is None:
if len(possible_value_columns) != 1:
raise ValueError(
"Could not guess the value column! Please hand it to the function as an argument."
)
column_value = possible_value_columns[0]
else:
# Ok, the user has no kind, so it is in Wide format.
# That means we have do melt before we can group.
# TODO: here is some room for optimization!
# we could choose the same way as for the Wide and LongTsAdapter
# We first choose a name for our future kind column
column_kind = "kind"
# if the user has specified a value column, use it
# if not, just go with every remaining columns
if column_value is not None:
value_vars = [column_value]
else:
value_vars = possible_value_columns
column_value = "value"
# Make sure we are not reusing a column that already exists
while column_value in df.columns:
column_value += "_"
_check_colname(*value_vars)
id_vars = [column_id, column_sort] if column_sort else [column_id]
# Now melt and group
df_melted = df.melt(
id_vars=id_vars,
value_vars=value_vars,
var_name=column_kind,
value_name=column_value,
)
self.df = df_melted.groupby([column_id, column_kind])
self.column_id = column_id
self.column_kind = column_kind
self.column_value = column_value
self.column_sort = column_sort
def apply(self, f, meta, **kwargs):
"""
Apply the wrapped feature extraction function "f"
onto the data.
Before that, turn the data into the correct form of Timeseries instances
usable the the feature extraction.
After the call, turn it back into pandas dataframes
for further processing.
"""
bound_function = _binding_helper(
f,
kwargs,
self.column_sort,
self.column_id,
self.column_kind,
self.column_value,
)
return self.df.apply(bound_function, meta=meta)
def pivot(self, results):
"""
The extract features function for dask returns a
dataframe of [id, variable, value].
Turn this into a pivoted dataframe, where only the variables are the columns
and the ids are the rows.
Attention: this is highly non-optimized!
"""
results = results.reset_index(drop=True).persist()
results = results.categorize(columns=["variable"])
feature_table = results.pivot_table(
index=self.column_id, columns="variable", values="value", aggfunc="sum"
)
return feature_table
def to_tsdata(
df, column_id=None, column_kind=None, column_value=None, column_sort=None
):
"""
Wrap supported data formats as a TsData object, i.e. an iterable of individual time series.
E.g. the DataFrame
==== ====== =========
id kind val
==== ====== =========
1 a -0.21761
1 a -0.613667
1 a -2.07339
2 b -0.576254
2 b -1.21924
==== ====== =========
into
Iterable((1, 'a', pd.Series([-0.217610, -0.613667, -2.073386]),
(2, 'b', pd.Series([-0.576254, -1.219238]))
:param df: one of the supported input formats
:type df: pd.DataFrame|dict|TsData
:param column_id: The name of the id column to group by.
:type column_id: str|None
:param column_kind: The name of the column keeping record on the kind of the value.
:type column_kind: str|None
:param column_value: The name for the column keeping the value itself.
:type column_value: str|None
:param column_sort: The name for the column to sort on.
:type column_sort: str|None
:return: a data adapter
:rtype: TsData
"""
if isinstance(df, TsData):
return df
elif isinstance(df, pd.DataFrame):
if column_kind is not None:
return LongTsFrameAdapter(
df, column_id, column_kind, column_value, column_sort
)
else:
if column_value is not None:
return WideTsFrameAdapter(df, column_id, column_sort, [column_value])
else:
return WideTsFrameAdapter(df, column_id, column_sort)
elif isinstance(df, dict):
return TsDictAdapter(df, column_id, column_value, column_sort)
elif dd and isinstance(df, dd.DataFrame):
return DaskTsAdapter(df, column_id, column_kind, column_value, column_sort)
else:
raise ValueError(
"df must be a DataFrame or a dict of DataFrames. "
"See https://tsfresh.readthedocs.io/en/latest/text/data_formats.html"
)
|
b21bcdcda60480469ebd3a4d6b843c87789bc13c
|
e60f402bb3adf22b086ba4cd707db050c9e93c80
|
/src/pytest_recording/exceptions.py
|
c44fb2d809e3e628869303f96d5d8be3a558bd8d
|
[
"MIT"
] |
permissive
|
kiwicom/pytest-recording
|
8cbb45230b292316efe72c87c1501def3d96fde3
|
85c3ffa55e01d26ca0551bf6a9a3eacdd6c1d8a1
|
refs/heads/master
| 2023-08-20T02:25:40.548883
| 2023-07-31T23:36:35
| 2023-07-31T23:36:35
| 197,214,988
| 309
| 26
|
MIT
| 2023-09-04T16:50:25
| 2019-07-16T14:57:52
|
Python
|
UTF-8
|
Python
| false
| false
| 91
|
py
|
exceptions.py
|
class UsageError(Exception):
"""Error in plugin usage."""
__module__ = "builtins"
|
9429ea112671cdab5f049c15ec4c2c268a9a43f0
|
2ac03b8c24df220ea32ea525e1d65aeb294cd1a4
|
/custom_components/waste_collection_schedule/waste_collection_schedule/source/iweb_itouchvision_com.py
|
4c44f6b09211c5d2129445f3bae2da366e94dd58
|
[
"MIT"
] |
permissive
|
mampfes/hacs_waste_collection_schedule
|
a7b98319a7483dedc8cf78b724f93932934c1702
|
1dc9476efef9963a141b9ac987e2708224b9eaaf
|
refs/heads/master
| 2023-08-16T21:14:46.088962
| 2023-08-16T10:05:24
| 2023-08-16T10:05:24
| 254,347,436
| 495
| 428
|
MIT
| 2023-09-12T18:59:07
| 2020-04-09T11:02:16
|
Python
|
UTF-8
|
Python
| false
| false
| 8,345
|
py
|
iweb_itouchvision_com.py
|
# Credit where it's due:
# This is predominantly a refactoring of the Somerset Council script from the UKBinCollectionData repo
# https://github.com/robbrad/UKBinCollectionData
import json
from datetime import datetime
import requests
from bs4 import BeautifulSoup
from waste_collection_schedule import Collection
TITLE = "iTouchVision"
URL = "https://iweb.itouchvision.com/"
COUNTRY = "uk"
EXTRA_INFO = [
{
"title": "Somerset Council",
"url": "https://www.somerset.gov.uk/",
"country": "uk",
},
{
"title": "South Somerset District Council",
"url": "https://www.somerset.gov.uk/",
"country": "uk",
},
{
"title": "Mendip District Council",
"url": "https://www.somerset.gov.uk/",
"country": "uk",
},
{
"title": "Sedgemoor District Council",
"url": "https://www.somerset.gov.uk/",
"country": "uk",
},
{
"title": "Somerset West & Taunton District Council",
"url": "https://www.somerset.gov.uk/",
"country": "uk",
},
{
"title": "Somerset County Council",
"url": "https://www.somerset.gov.uk/",
"country": "uk",
},
{
"title": "Test Valley Borough Council",
"url": "https://www.testvalley.gov.uk/",
"country": "uk",
},
]
DESCRIPTION = """Consolidated source for waste collection services from:
Somerset Council, comprising four former District Councils (Mendip, Sedgemoor, Somerset West & Taunton, South Somerset) and Somerset County Council
Test Valley Borough Council
"""
HEADERS = {
"user-agent": "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36",
}
URLS = {
"TEST_VALLEY": "https://iweb.itouchvision.com/portal/f?p=customer:BIN_DAYS:::NO:RP:UID:13353F039C4B1454827EE05536414091A8C058F4",
"SOMERSET": "https://iweb.itouchvision.com/portal/f?p=customer:BIN_DAYS:::NO:RP:UID:625C791B4D9301137723E9095361401AE8C03934",
"FLOW.ACCEPT": "https://iweb.itouchvision.com/portal/wwv_flow.accept",
"BIN_DAYS": "https://iweb.itouchvision.com/portal/itouchvision/r/customer/bin_days",
}
KEYLISTS = {
"POSTCODE_1": [
"P153_UPRN",
"P153_TEMP",
"P153_SYSDATE",
"P0_LANGUAGE",
"P153_POST_CODE",
],
"POSTCODE_2": [
"p_flow_id",
"p_flow_step_id",
"p_instance",
"p_page_submission_id",
"p_request",
"p_reload_on_submit",
],
"ADDRESS_1": ["P153_UPRN", "P153_TEMP", "P153_SYSDATE", "P0_LANGUAGE"],
"ADDRESS_2": [
"p_flow_id",
"p_flow_step_id",
"p_instance",
"p_page_submission_id",
"p_request",
"p_reload_on_submit",
],
}
TEST_CASES = {
"Somerset #1": {"postcode": "TA20 2JG", "uprn": "30071283", "council": "SOMERSET"},
"Somerset #2": {"postcode": "BA9 9NF", "uprn": "30002380", "council": "SOMERSET"},
"Somerset #3": {"postcode": "TA24 7JE", "uprn": 10023837109, "council": "SOMERSET"},
"Test Valley #1": {
"postcode": "SP10 3JB",
"uprn": "100060559598",
"council": "TEST_VALLEY",
},
"Test Valley #2": {
"postcode": "SO20 6EJ",
"uprn": "100060583697",
"council": "TEST_VALLEY",
},
"Test Valley #3": {
"postcode": "SO51 5BE",
"uprn": 100060571645,
"council": "TEST_VALLEY",
},
}
ICON_MAP = {
"GARDEN": "mdi:leaf",
"RECYCLING": "mdi:recycle",
"REFUSE": "mdi:trash-can",
"HOUSEHOLD WASTE": "mdi:trash-can",
"GARDEN WASTE": "mdi:leaf",
}
class Source:
def __init__(self, council, postcode, uprn):
self._postcode = postcode.upper().strip()
self._uprn = str(uprn)
self._council = council.upper()
def get_payloads(self, s):
p1 = {i["name"]: i.get("value", "") for i in s.select("input[name]")}
p2 = {i["data-for"]: i.get("value", "") for i in s.select("input[data-for]")}
ps = s.select_one('input[id="pSalt"]').get("value")
pp = s.select_one('input[id="pPageItemsProtected"]').get("value")
return p1, p2, ps, pp
def fetch(self):
s = requests.Session()
s.headers.update(HEADERS)
# Get postcode search page
r0 = s.get(URLS[self._council])
# Extract values needed for the postcode search
soup = BeautifulSoup(r0.text, "html.parser")
payload1, payload2, payload_salt, payload_protected = self.get_payloads(soup)
payload1["p_request"] = "SEARCH"
payload1["P153_POST_CODE"] = self._postcode
# Build JSON for postcode search
merged_list = {**payload1, **payload2}
new_list = []
other_list = {}
for key in merged_list.keys():
temp_list = {}
val = merged_list[key]
if key in KEYLISTS["POSTCODE_1"]:
temp_list = {"n": key, "v": val}
new_list.append(temp_list)
elif key in KEYLISTS["POSTCODE_2"]:
other_list[key] = val
else:
temp_list = {"n": key, "v": "", "ck": val}
new_list.append(temp_list)
json_builder = {
"pageItems": {
"itemsToSubmit": new_list,
"protected": payload_protected,
"rowVersion": "",
"formRegionChecksums": [],
},
"salt": payload_salt,
}
json_object = json.dumps(json_builder, separators=(",", ":"))
other_list["p_json"] = json_object
# Update header and submit postcode search
s.headers.update(
{
"referer": URLS[self._council],
}
)
s.post(URLS["FLOW.ACCEPT"], data=other_list)
# Get address selection page
r2 = s.get(URLS["BIN_DAYS"])
# Extract values needed for address selection
soup = BeautifulSoup(r2.text, "html.parser")
payload1, payload2, payload_salt, payload_protected = self.get_payloads(soup)
payload1["p_request"] = "SUBMIT"
payload1["P153_UPRN"] = self._uprn
# Build JSON for address selection
merged_list = {**payload1, **payload2}
new_list = []
other_list = {}
for key in merged_list.keys():
temp_list = {}
val = merged_list[key]
if key in KEYLISTS["ADDRESS_1"]:
temp_list = {"n": key, "v": val}
new_list.append(temp_list)
elif key in ["P153_ZABY"]:
temp_list = {"n": key, "v": "1", "ck": val}
new_list.append(temp_list)
elif key in ["P153_POST_CODE"]:
temp_list = {"n": key, "v": self._postcode, "ck": val}
new_list.append(temp_list)
elif key in KEYLISTS["ADDRESS_2"]:
other_list[key] = val
else:
temp_list = {"n": key, "v": "", "ck": val}
new_list.append(temp_list)
json_builder = {
"pageItems": {
"itemsToSubmit": new_list,
"protected": payload_protected,
"rowVersion": "",
"formRegionChecksums": [],
},
"salt": payload_salt,
}
json_object = json.dumps(json_builder, separators=(",", ":"))
other_list["p_json"] = json_object
# Submit address selection
s.post(URLS["FLOW.ACCEPT"], data=other_list)
# Finally, get the collection schedule page
r4 = s.get(URLS["BIN_DAYS"])
soup = BeautifulSoup(r4.text, "html.parser")
entries = []
for item in soup.select(".t-MediaList-item"):
for value in item.select(".t-MediaList-body"):
waste_type = value.select("span")[1].get_text(strip=True).title()
waste_date = datetime.strptime(
value.select(".t-MediaList-desc")[0].get_text(strip=True),
"%A, %d %B, %Y",
).date()
entries.append(
Collection(
date=waste_date,
t=waste_type,
icon=ICON_MAP.get(waste_type.upper()),
)
)
return entries
|
fc7be47ae8d41bbfb73bef37b3a1ee3d9755b38a
|
9ed4d46aedd4d4acadb48d610e940594b5b7b3fd
|
/other/activity_selection.py
|
2cc08d9598622e8837bc76e5632bbb95721a6e6a
|
[
"MIT"
] |
permissive
|
TheAlgorithms/Python
|
7596a0e236ed12a61f9db19a7ea68309779cc85b
|
421ace81edb0d9af3a173f4ca7e66cc900078c1d
|
refs/heads/master
| 2023-09-01T17:32:20.190949
| 2023-08-29T13:18:10
| 2023-08-29T13:18:10
| 63,476,337
| 184,217
| 48,615
|
MIT
| 2023-09-14T02:05:29
| 2016-07-16T09:44:01
|
Python
|
UTF-8
|
Python
| false
| false
| 1,253
|
py
|
activity_selection.py
|
"""The following implementation assumes that the activities
are already sorted according to their finish time"""
"""Prints a maximum set of activities that can be done by a
single person, one at a time"""
# n --> Total number of activities
# start[]--> An array that contains start time of all activities
# finish[] --> An array that contains finish time of all activities
def print_max_activities(start: list[int], finish: list[int]) -> None:
"""
>>> start = [1, 3, 0, 5, 8, 5]
>>> finish = [2, 4, 6, 7, 9, 9]
>>> print_max_activities(start, finish)
The following activities are selected:
0,1,3,4,
"""
n = len(finish)
print("The following activities are selected:")
# The first activity is always selected
i = 0
print(i, end=",")
# Consider rest of the activities
for j in range(n):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(j, end=",")
i = j
if __name__ == "__main__":
import doctest
doctest.testmod()
start = [1, 3, 0, 5, 8, 5]
finish = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
|
0e7021a72df8271aa1aa39245f29ef4c4e2a0199
|
010279e2ba272d09e9d2c4e903722e5faba2cf7a
|
/contrib/python/Pygments/py3/pygments/lexers/javascript.py
|
bc5e2e43cb8da41f277917e4d8d10a4648394088
|
[
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
catboost/catboost
|
854c1a1f439a96f1ae6b48e16644be20aa04dba2
|
f5042e35b945aded77b23470ead62d7eacefde92
|
refs/heads/master
| 2023-09-01T12:14:14.174108
| 2023-09-01T10:01:01
| 2023-09-01T10:22:12
| 97,556,265
| 8,012
| 1,425
|
Apache-2.0
| 2023-09-11T03:32:32
| 2017-07-18T05:29:04
|
Python
|
UTF-8
|
Python
| false
| false
| 62,859
|
py
|
javascript.py
|
"""
pygments.lexers.javascript
~~~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for JavaScript and related languages.
:copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import bygroups, combined, default, do_insertions, include, \
inherit, Lexer, RegexLexer, this, using, words, line_re
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Other, Generic, Whitespace
from pygments.util import get_bool_opt
import pygments.unistring as uni
__all__ = ['JavascriptLexer', 'KalLexer', 'LiveScriptLexer', 'DartLexer',
'TypeScriptLexer', 'LassoLexer', 'ObjectiveJLexer',
'CoffeeScriptLexer', 'MaskLexer', 'EarlGreyLexer', 'JuttleLexer',
'NodeConsoleLexer']
JS_IDENT_START = ('(?:[$_' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl') +
']|\\\\u[a-fA-F0-9]{4})')
JS_IDENT_PART = ('(?:[$' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl',
'Mn', 'Mc', 'Nd', 'Pc') +
'\u200c\u200d]|\\\\u[a-fA-F0-9]{4})')
JS_IDENT = JS_IDENT_START + '(?:' + JS_IDENT_PART + ')*'
class JavascriptLexer(RegexLexer):
"""
For JavaScript source code.
"""
name = 'JavaScript'
url = 'https://www.ecma-international.org/publications-and-standards/standards/ecma-262/'
aliases = ['javascript', 'js']
filenames = ['*.js', '*.jsm', '*.mjs', '*.cjs']
mimetypes = ['application/javascript', 'application/x-javascript',
'text/x-javascript', 'text/javascript']
flags = re.DOTALL | re.MULTILINE
tokens = {
'commentsandwhitespace': [
(r'\s+', Whitespace),
(r'<!--', Comment),
(r'//.*?$', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline)
],
'slashstartsregex': [
include('commentsandwhitespace'),
(r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
r'([gimuysd]+\b|\B)', String.Regex, '#pop'),
(r'(?=/)', Text, ('#pop', 'badregex')),
default('#pop')
],
'badregex': [
(r'\n', Whitespace, '#pop')
],
'root': [
(r'\A#! ?/.*?$', Comment.Hashbang), # recognized by node.js
(r'^(?=\s|/|<!--)', Text, 'slashstartsregex'),
include('commentsandwhitespace'),
# Numeric literals
(r'0[bB][01]+n?', Number.Bin),
(r'0[oO]?[0-7]+n?', Number.Oct), # Browsers support "0o7" and "07" (< ES5) notations
(r'0[xX][0-9a-fA-F]+n?', Number.Hex),
(r'[0-9]+n', Number.Integer), # Javascript BigInt requires an "n" postfix
# Javascript doesn't have actual integer literals, so every other
# numeric literal is handled by the regex below (including "normal")
# integers
(r'(\.[0-9]+|[0-9]+\.[0-9]*|[0-9]+)([eE][-+]?[0-9]+)?', Number.Float),
(r'\.\.\.|=>', Punctuation),
(r'\+\+|--|~|\?\?=?|\?|:|\\(?=\n)|'
r'(<<|>>>?|==?|!=?|(?:\*\*|\|\||&&|[-<>+*%&|^/]))=?', Operator, 'slashstartsregex'),
(r'[{(\[;,]', Punctuation, 'slashstartsregex'),
(r'[})\].]', Punctuation),
(r'(typeof|instanceof|in|void|delete|new)\b', Operator.Word, 'slashstartsregex'),
# Match stuff like: constructor
(r'\b(constructor|from|as)\b', Keyword.Reserved),
(r'(for|in|while|do|break|return|continue|switch|case|default|if|else|'
r'throw|try|catch|finally|yield|await|async|this|of|static|export|'
r'import|debugger|extends|super)\b', Keyword, 'slashstartsregex'),
(r'(var|let|const|with|function|class)\b', Keyword.Declaration, 'slashstartsregex'),
(r'(abstract|boolean|byte|char|double|enum|final|float|goto|'
r'implements|int|interface|long|native|package|private|protected|'
r'public|short|synchronized|throws|transient|volatile)\b', Keyword.Reserved),
(r'(true|false|null|NaN|Infinity|undefined)\b', Keyword.Constant),
(r'(Array|Boolean|Date|BigInt|Function|Math|ArrayBuffer|'
r'Number|Object|RegExp|String|Promise|Proxy|decodeURI|'
r'decodeURIComponent|encodeURI|encodeURIComponent|'
r'eval|isFinite|isNaN|parseFloat|parseInt|DataView|'
r'document|window|globalThis|global|Symbol|Intl|'
r'WeakSet|WeakMap|Set|Map|Reflect|JSON|Atomics|'
r'Int(?:8|16|32)Array|BigInt64Array|Float32Array|Float64Array|'
r'Uint8ClampedArray|Uint(?:8|16|32)Array|BigUint64Array)\b', Name.Builtin),
(r'((?:Eval|Internal|Range|Reference|Syntax|Type|URI)?Error)\b', Name.Exception),
# Match stuff like: super(argument, list)
(r'(super)(\s*)(\([\w,?.$\s]+\s*\))',
bygroups(Keyword, Whitespace), 'slashstartsregex'),
# Match stuff like: function() {...}
(r'([a-zA-Z_?.$][\w?.$]*)(?=\(\) \{)', Name.Other, 'slashstartsregex'),
(JS_IDENT, Name.Other),
(r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
(r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
(r'`', String.Backtick, 'interp'),
],
'interp': [
(r'`', String.Backtick, '#pop'),
(r'\\.', String.Backtick),
(r'\$\{', String.Interpol, 'interp-inside'),
(r'\$', String.Backtick),
(r'[^`\\$]+', String.Backtick),
],
'interp-inside': [
# TODO: should this include single-line comments and allow nesting strings?
(r'\}', String.Interpol, '#pop'),
include('root'),
],
}
class TypeScriptLexer(JavascriptLexer):
"""
For TypeScript source code.
.. versionadded:: 1.6
"""
name = 'TypeScript'
url = 'https://www.typescriptlang.org/'
aliases = ['typescript', 'ts']
filenames = ['*.ts']
mimetypes = ['application/x-typescript', 'text/x-typescript']
# Higher priority than the TypoScriptLexer, as TypeScript is far more
# common these days
priority = 0.5
tokens = {
'root': [
(r'(abstract|implements|private|protected|public|readonly)\b',
Keyword, 'slashstartsregex'),
(r'(enum|interface|override)\b', Keyword.Declaration, 'slashstartsregex'),
(r'\b(declare|type)\b', Keyword.Reserved),
# Match variable type keywords
(r'\b(string|boolean|number)\b', Keyword.Type),
# Match stuff like: module name {...}
(r'\b(module)(\s*)([\w?.$]+)(\s*)',
bygroups(Keyword.Reserved, Whitespace, Name.Other, Whitespace), 'slashstartsregex'),
# Match stuff like: (function: return type)
(r'([\w?.$]+)(\s*)(:)(\s*)([\w?.$]+)',
bygroups(Name.Other, Whitespace, Operator, Whitespace, Keyword.Type)),
# Match stuff like: Decorators
(r'@' + JS_IDENT, Keyword.Declaration),
inherit,
],
}
class KalLexer(RegexLexer):
"""
For Kal source code.
.. versionadded:: 2.0
"""
name = 'Kal'
url = 'http://rzimmerman.github.io/kal'
aliases = ['kal']
filenames = ['*.kal']
mimetypes = ['text/kal', 'application/kal']
flags = re.DOTALL
tokens = {
'commentsandwhitespace': [
(r'\s+', Whitespace),
(r'###[^#].*?###', Comment.Multiline),
(r'(#(?!##[^#]).*?)(\n)', bygroups(Comment.Single, Whitespace)),
],
'functiondef': [
(r'([$a-zA-Z_][\w$]*)(\s*)', bygroups(Name.Function, Whitespace),
'#pop'),
include('commentsandwhitespace'),
],
'classdef': [
(r'\b(inherits)(\s+)(from)\b',
bygroups(Keyword, Whitespace, Keyword)),
(r'([$a-zA-Z_][\w$]*)(?=\s*\n)', Name.Class, '#pop'),
(r'[$a-zA-Z_][\w$]*\b', Name.Class),
include('commentsandwhitespace'),
],
'listcomprehension': [
(r'\]', Punctuation, '#pop'),
(r'\b(property|value)\b', Keyword),
include('root'),
],
'waitfor': [
(r'\n', Whitespace, '#pop'),
(r'\bfrom\b', Keyword),
include('root'),
],
'root': [
include('commentsandwhitespace'),
(r'/(?! )(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
r'([gimuysd]+\b|\B)', String.Regex),
(r'\?|:|_(?=\n)|==?|!=|-(?!>)|[<>+*/-]=?',
Operator),
(r'\b(and|or|isnt|is|not|but|bitwise|mod|\^|xor|exists|'
r'doesnt\s+exist)\b', Operator.Word),
(r'(\([^()]+\))?(\s*)(>)',
bygroups(Name.Function, Whitespace, Punctuation)),
(r'[{(]', Punctuation),
(r'\[', Punctuation, 'listcomprehension'),
(r'[})\].,]', Punctuation),
(r'\b(function|method|task)\b', Keyword.Declaration, 'functiondef'),
(r'\bclass\b', Keyword.Declaration, 'classdef'),
(r'\b(safe(?=\s))?(\s*)(wait(?=\s))(\s+)(for)\b',
bygroups(Keyword, Whitespace, Keyword, Whitespace,
Keyword), 'waitfor'),
(r'\b(me|this)(\.[$a-zA-Z_][\w.$]*)?\b', Name.Variable.Instance),
(r'(?<![.$])(run)(\s+)(in)(\s+)(parallel)\b',
bygroups(Keyword, Whitespace, Keyword, Whitespace, Keyword)),
(r'(?<![.$])(for)(\s+)(parallel|series)?\b',
bygroups(Keyword, Whitespace, Keyword)),
(r'(?<![.$])(except)(\s+)(when)?\b',
bygroups(Keyword, Whitespace, Keyword)),
(r'(?<![.$])(fail)(\s+)(with)?\b',
bygroups(Keyword, Whitespace, Keyword)),
(r'(?<![.$])(inherits)(\s+)(from)?\b',
bygroups(Keyword, Whitespace, Keyword)),
(r'(?<![.$])(for)(\s+)(parallel|series)?\b',
bygroups(Keyword, Whitespace, Keyword)),
(words((
'in', 'of', 'while', 'until', 'break', 'return', 'continue',
'when', 'if', 'unless', 'else', 'otherwise', 'throw', 'raise',
'try', 'catch', 'finally', 'new', 'delete', 'typeof',
'instanceof', 'super'), prefix=r'(?<![.$])', suffix=r'\b'),
Keyword),
(words((
'true', 'false', 'yes', 'no', 'on', 'off', 'null', 'nothing',
'none', 'NaN', 'Infinity', 'undefined'), prefix=r'(?<![.$])',
suffix=r'\b'), Keyword.Constant),
(words((
'Array', 'Boolean', 'Date', 'Error', 'Function', 'Math',
'Number', 'Object', 'RegExp', 'String', 'decodeURI',
'decodeURIComponent', 'encodeURI', 'encodeURIComponent', 'eval',
'isFinite', 'isNaN', 'isSafeInteger', 'parseFloat', 'parseInt',
'document', 'window', 'globalThis', 'Symbol', 'print'),
suffix=r'\b'), Name.Builtin),
(r'([$a-zA-Z_][\w.$]*)(\s*)(:|[+\-*/]?\=)?\b',
bygroups(Name.Variable, Whitespace, Operator)),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+', Number.Integer),
('"""', String, 'tdqs'),
("'''", String, 'tsqs'),
('"', String, 'dqs'),
("'", String, 'sqs'),
],
'strings': [
(r'[^#\\\'"]+', String),
# note that all kal strings are multi-line.
# hashmarks, quotes and backslashes must be parsed one at a time
],
'interpoling_string': [
(r'\}', String.Interpol, "#pop"),
include('root')
],
'dqs': [
(r'"', String, '#pop'),
(r'\\.|\'', String), # double-quoted string don't need ' escapes
(r'#\{', String.Interpol, "interpoling_string"),
include('strings')
],
'sqs': [
(r"'", String, '#pop'),
(r'#|\\.|"', String), # single quoted strings don't need " escapses
include('strings')
],
'tdqs': [
(r'"""', String, '#pop'),
(r'\\.|\'|"', String), # no need to escape quotes in triple-string
(r'#\{', String.Interpol, "interpoling_string"),
include('strings'),
],
'tsqs': [
(r"'''", String, '#pop'),
(r'#|\\.|\'|"', String), # no need to escape quotes in triple-strings
include('strings')
],
}
class LiveScriptLexer(RegexLexer):
"""
For LiveScript source code.
.. versionadded:: 1.6
"""
name = 'LiveScript'
url = 'https://livescript.net/'
aliases = ['livescript', 'live-script']
filenames = ['*.ls']
mimetypes = ['text/livescript']
flags = re.DOTALL
tokens = {
'commentsandwhitespace': [
(r'\s+', Whitespace),
(r'/\*.*?\*/', Comment.Multiline),
(r'(#.*?)(\n)', bygroups(Comment.Single, Whitespace)),
],
'multilineregex': [
include('commentsandwhitespace'),
(r'//([gimuysd]+\b|\B)', String.Regex, '#pop'),
(r'/', String.Regex),
(r'[^/#]+', String.Regex)
],
'slashstartsregex': [
include('commentsandwhitespace'),
(r'//', String.Regex, ('#pop', 'multilineregex')),
(r'/(?! )(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
r'([gimuysd]+\b|\B)', String.Regex, '#pop'),
(r'/', Operator, '#pop'),
default('#pop'),
],
'root': [
(r'\A(?=\s|/)', Text, 'slashstartsregex'),
include('commentsandwhitespace'),
(r'(?:\([^()]+\))?[ ]*[~-]{1,2}>|'
r'(?:\(?[^()\n]+\)?)?[ ]*<[~-]{1,2}', Name.Function),
(r'\+\+|&&|(?<![.$])\b(?:and|x?or|is|isnt|not)\b|\?|:|=|'
r'\|\||\\(?=\n)|(<<|>>>?|==?|!=?|'
r'~(?!\~?>)|-(?!\-?>)|<(?!\[)|(?<!\])>|'
r'[+*`%&|^/])=?',
Operator, 'slashstartsregex'),
(r'[{(\[;,]', Punctuation, 'slashstartsregex'),
(r'[})\].]', Punctuation),
(r'(?<![.$])(for|own|in|of|while|until|loop|break|'
r'return|continue|switch|when|then|if|unless|else|'
r'throw|try|catch|finally|new|delete|typeof|instanceof|super|'
r'extends|this|class|by|const|var|to|til)\b', Keyword,
'slashstartsregex'),
(r'(?<![.$])(true|false|yes|no|on|off|'
r'null|NaN|Infinity|undefined|void)\b',
Keyword.Constant),
(r'(Array|Boolean|Date|Error|Function|Math|'
r'Number|Object|RegExp|String|decodeURI|'
r'decodeURIComponent|encodeURI|encodeURIComponent|'
r'eval|isFinite|isNaN|parseFloat|parseInt|document|window|'
r'globalThis|Symbol|Symbol|BigInt)\b', Name.Builtin),
(r'([$a-zA-Z_][\w.\-:$]*)(\s*)([:=])(\s+)',
bygroups(Name.Variable, Whitespace, Operator, Whitespace),
'slashstartsregex'),
(r'(@[$a-zA-Z_][\w.\-:$]*)(\s*)([:=])(\s+)',
bygroups(Name.Variable.Instance, Whitespace, Operator,
Whitespace),
'slashstartsregex'),
(r'@', Name.Other, 'slashstartsregex'),
(r'@?[$a-zA-Z_][\w-]*', Name.Other, 'slashstartsregex'),
(r'[0-9]+\.[0-9]+([eE][0-9]+)?[fd]?(?:[a-zA-Z_]+)?', Number.Float),
(r'[0-9]+(~[0-9a-z]+)?(?:[a-zA-Z_]+)?', Number.Integer),
('"""', String, 'tdqs'),
("'''", String, 'tsqs'),
('"', String, 'dqs'),
("'", String, 'sqs'),
(r'\\\S+', String),
(r'<\[.*?\]>', String),
],
'strings': [
(r'[^#\\\'"]+', String),
# note that all coffee script strings are multi-line.
# hashmarks, quotes and backslashes must be parsed one at a time
],
'interpoling_string': [
(r'\}', String.Interpol, "#pop"),
include('root')
],
'dqs': [
(r'"', String, '#pop'),
(r'\\.|\'', String), # double-quoted string don't need ' escapes
(r'#\{', String.Interpol, "interpoling_string"),
(r'#', String),
include('strings')
],
'sqs': [
(r"'", String, '#pop'),
(r'#|\\.|"', String), # single quoted strings don't need " escapses
include('strings')
],
'tdqs': [
(r'"""', String, '#pop'),
(r'\\.|\'|"', String), # no need to escape quotes in triple-string
(r'#\{', String.Interpol, "interpoling_string"),
(r'#', String),
include('strings'),
],
'tsqs': [
(r"'''", String, '#pop'),
(r'#|\\.|\'|"', String), # no need to escape quotes in triple-strings
include('strings')
],
}
class DartLexer(RegexLexer):
"""
For Dart source code.
.. versionadded:: 1.5
"""
name = 'Dart'
url = 'http://dart.dev/'
aliases = ['dart']
filenames = ['*.dart']
mimetypes = ['text/x-dart']
flags = re.MULTILINE | re.DOTALL
tokens = {
'root': [
include('string_literal'),
(r'#!(.*?)$', Comment.Preproc),
(r'\b(import|export)\b', Keyword, 'import_decl'),
(r'\b(library|source|part of|part)\b', Keyword),
(r'[^\S\n]+', Whitespace),
(r'(//.*?)(\n)', bygroups(Comment.Single, Whitespace)),
(r'/\*.*?\*/', Comment.Multiline),
(r'\b(class|extension|mixin)\b(\s+)',
bygroups(Keyword.Declaration, Whitespace), 'class'),
(r'\b(as|assert|break|case|catch|const|continue|default|do|else|finally|'
r'for|if|in|is|new|rethrow|return|super|switch|this|throw|try|while)\b',
Keyword),
(r'\b(abstract|async|await|const|covariant|extends|external|factory|final|'
r'get|implements|late|native|on|operator|required|set|static|sync|typedef|'
r'var|with|yield)\b', Keyword.Declaration),
(r'\b(bool|double|dynamic|int|num|Function|Never|Null|Object|String|void)\b',
Keyword.Type),
(r'\b(false|null|true)\b', Keyword.Constant),
(r'[~!%^&*+=|?:<>/-]|as\b', Operator),
(r'@[a-zA-Z_$]\w*', Name.Decorator),
(r'[a-zA-Z_$]\w*:', Name.Label),
(r'[a-zA-Z_$]\w*', Name),
(r'[(){}\[\],.;]', Punctuation),
(r'0[xX][0-9a-fA-F]+', Number.Hex),
# DIGIT+ (‘.’ DIGIT*)? EXPONENT?
(r'\d+(\.\d*)?([eE][+-]?\d+)?', Number),
(r'\.\d+([eE][+-]?\d+)?', Number), # ‘.’ DIGIT+ EXPONENT?
(r'\n', Whitespace)
# pseudo-keyword negate intentionally left out
],
'class': [
(r'[a-zA-Z_$]\w*', Name.Class, '#pop')
],
'import_decl': [
include('string_literal'),
(r'\s+', Whitespace),
(r'\b(as|deferred|show|hide)\b', Keyword),
(r'[a-zA-Z_$]\w*', Name),
(r'\,', Punctuation),
(r'\;', Punctuation, '#pop')
],
'string_literal': [
# Raw strings.
(r'r"""([\w\W]*?)"""', String.Double),
(r"r'''([\w\W]*?)'''", String.Single),
(r'r"(.*?)"', String.Double),
(r"r'(.*?)'", String.Single),
# Normal Strings.
(r'"""', String.Double, 'string_double_multiline'),
(r"'''", String.Single, 'string_single_multiline'),
(r'"', String.Double, 'string_double'),
(r"'", String.Single, 'string_single')
],
'string_common': [
(r"\\(x[0-9A-Fa-f]{2}|u[0-9A-Fa-f]{4}|u\{[0-9A-Fa-f]*\}|[a-z'\"$\\])",
String.Escape),
(r'(\$)([a-zA-Z_]\w*)', bygroups(String.Interpol, Name)),
(r'(\$\{)(.*?)(\})',
bygroups(String.Interpol, using(this), String.Interpol))
],
'string_double': [
(r'"', String.Double, '#pop'),
(r'[^"$\\\n]+', String.Double),
include('string_common'),
(r'\$+', String.Double)
],
'string_double_multiline': [
(r'"""', String.Double, '#pop'),
(r'[^"$\\]+', String.Double),
include('string_common'),
(r'(\$|\")+', String.Double)
],
'string_single': [
(r"'", String.Single, '#pop'),
(r"[^'$\\\n]+", String.Single),
include('string_common'),
(r'\$+', String.Single)
],
'string_single_multiline': [
(r"'''", String.Single, '#pop'),
(r'[^\'$\\]+', String.Single),
include('string_common'),
(r'(\$|\')+', String.Single)
]
}
class LassoLexer(RegexLexer):
"""
For Lasso source code, covering both Lasso 9
syntax and LassoScript for Lasso 8.6 and earlier. For Lasso embedded in
HTML, use the `LassoHtmlLexer`.
Additional options accepted:
`builtinshighlighting`
If given and ``True``, highlight builtin types, traits, methods, and
members (default: ``True``).
`requiredelimiters`
If given and ``True``, only highlight code between delimiters as Lasso
(default: ``False``).
.. versionadded:: 1.6
"""
name = 'Lasso'
aliases = ['lasso', 'lassoscript']
filenames = ['*.lasso', '*.lasso[89]']
alias_filenames = ['*.incl', '*.inc', '*.las']
mimetypes = ['text/x-lasso']
flags = re.IGNORECASE | re.DOTALL | re.MULTILINE
tokens = {
'root': [
(r'^#![ \S]+lasso9\b', Comment.Preproc, 'lasso'),
(r'(?=\[|<)', Other, 'delimiters'),
(r'\s+', Whitespace),
default(('delimiters', 'lassofile')),
],
'delimiters': [
(r'\[no_square_brackets\]', Comment.Preproc, 'nosquarebrackets'),
(r'\[noprocess\]', Comment.Preproc, 'noprocess'),
(r'\[', Comment.Preproc, 'squarebrackets'),
(r'<\?(lasso(script)?|=)', Comment.Preproc, 'anglebrackets'),
(r'<(!--.*?-->)?', Other),
(r'[^[<]+', Other),
],
'nosquarebrackets': [
(r'\[noprocess\]', Comment.Preproc, 'noprocess'),
(r'\[', Other),
(r'<\?(lasso(script)?|=)', Comment.Preproc, 'anglebrackets'),
(r'<(!--.*?-->)?', Other),
(r'[^[<]+', Other),
],
'noprocess': [
(r'\[/noprocess\]', Comment.Preproc, '#pop'),
(r'\[', Other),
(r'[^[]', Other),
],
'squarebrackets': [
(r'\]', Comment.Preproc, '#pop'),
include('lasso'),
],
'anglebrackets': [
(r'\?>', Comment.Preproc, '#pop'),
include('lasso'),
],
'lassofile': [
(r'\]|\?>', Comment.Preproc, '#pop'),
include('lasso'),
],
'whitespacecomments': [
(r'\s+', Whitespace),
(r'(//.*?)(\s*)$', bygroups(Comment.Single, Whitespace)),
(r'/\*\*!.*?\*/', String.Doc),
(r'/\*.*?\*/', Comment.Multiline),
],
'lasso': [
# whitespace/comments
include('whitespacecomments'),
# literals
(r'\d*\.\d+(e[+-]?\d+)?', Number.Float),
(r'0x[\da-f]+', Number.Hex),
(r'\d+', Number.Integer),
(r'(infinity|NaN)\b', Number),
(r"'", String.Single, 'singlestring'),
(r'"', String.Double, 'doublestring'),
(r'`[^`]*`', String.Backtick),
# names
(r'\$[a-z_][\w.]*', Name.Variable),
(r'#([a-z_][\w.]*|\d+\b)', Name.Variable.Instance),
(r"(\.)(\s*)('[a-z_][\w.]*')",
bygroups(Name.Builtin.Pseudo, Whitespace, Name.Variable.Class)),
(r"(self)(\s*)(->)(\s*)('[a-z_][\w.]*')",
bygroups(Name.Builtin.Pseudo, Whitespace, Operator, Whitespace,
Name.Variable.Class)),
(r'(\.\.?)(\s*)([a-z_][\w.]*(=(?!=))?)',
bygroups(Name.Builtin.Pseudo, Whitespace, Name.Other.Member)),
(r'(->\\?|&)(\s*)([a-z_][\w.]*(=(?!=))?)',
bygroups(Operator, Whitespace, Name.Other.Member)),
(r'(?<!->)(self|inherited|currentcapture|givenblock)\b',
Name.Builtin.Pseudo),
(r'-(?!infinity)[a-z_][\w.]*', Name.Attribute),
(r'(::)(\s*)([a-z_][\w.]*)',
bygroups(Punctuation, Whitespace, Name.Label)),
(r'(error_(code|msg)_\w+|Error_AddError|Error_ColumnRestriction|'
r'Error_DatabaseConnectionUnavailable|Error_DatabaseTimeout|'
r'Error_DeleteError|Error_FieldRestriction|Error_FileNotFound|'
r'Error_InvalidDatabase|Error_InvalidPassword|'
r'Error_InvalidUsername|Error_ModuleNotFound|'
r'Error_NoError|Error_NoPermission|Error_OutOfMemory|'
r'Error_ReqColumnMissing|Error_ReqFieldMissing|'
r'Error_RequiredColumnMissing|Error_RequiredFieldMissing|'
r'Error_UpdateError)\b', Name.Exception),
# definitions
(r'(define)(\s+)([a-z_][\w.]*)(\s*)(=>)(\s*)(type|trait|thread)\b',
bygroups(Keyword.Declaration, Whitespace, Name.Class,
Whitespace, Operator, Whitespace, Keyword)),
(r'(define)(\s+)([a-z_][\w.]*)(\s*)(->)(\s*)([a-z_][\w.]*=?|[-+*/%])',
bygroups(Keyword.Declaration, Whitespace, Name.Class,
Whitespace, Operator, Whitespace, Name.Function),
'signature'),
(r'(define)(\s+)([a-z_][\w.]*)',
bygroups(Keyword.Declaration, Whitespace, Name.Function), 'signature'),
(r'(public|protected|private|provide)(\s+)(([a-z_][\w.]*=?|[-+*/%])'
r'(?=\s*\())', bygroups(Keyword, Whitespace, Name.Function),
'signature'),
(r'(public|protected|private|provide)(\s+)([a-z_][\w.]*)',
bygroups(Keyword, Whitespace, Name.Function)),
# keywords
(r'(true|false|none|minimal|full|all|void)\b', Keyword.Constant),
(r'(local|var|variable|global|data(?=\s))\b', Keyword.Declaration),
(r'(array|date|decimal|duration|integer|map|pair|string|tag|xml|'
r'null|boolean|bytes|keyword|list|locale|queue|set|stack|'
r'staticarray)\b', Keyword.Type),
(r'([a-z_][\w.]*)(\s+)(in)\b', bygroups(Name, Whitespace, Keyword)),
(r'(let|into)(\s+)([a-z_][\w.]*)', bygroups(Keyword, Whitespace, Name)),
(r'require\b', Keyword, 'requiresection'),
(r'(/?)(Namespace_Using)\b', bygroups(Punctuation, Keyword.Namespace)),
(r'(/?)(Cache|Database_Names|Database_SchemaNames|'
r'Database_TableNames|Define_Tag|Define_Type|Email_Batch|'
r'Encode_Set|HTML_Comment|Handle|Handle_Error|Header|If|Inline|'
r'Iterate|LJAX_Target|Link|Link_CurrentAction|Link_CurrentGroup|'
r'Link_CurrentRecord|Link_Detail|Link_FirstGroup|Link_FirstRecord|'
r'Link_LastGroup|Link_LastRecord|Link_NextGroup|Link_NextRecord|'
r'Link_PrevGroup|Link_PrevRecord|Log|Loop|Output_None|Portal|'
r'Private|Protect|Records|Referer|Referrer|Repeating|ResultSet|'
r'Rows|Search_Args|Search_Arguments|Select|Sort_Args|'
r'Sort_Arguments|Thread_Atomic|Value_List|While|Abort|Case|Else|'
r'Fail_If|Fail_IfNot|Fail|If_Empty|If_False|If_Null|If_True|'
r'Loop_Abort|Loop_Continue|Loop_Count|Params|Params_Up|Return|'
r'Return_Value|Run_Children|SOAP_DefineTag|SOAP_LastRequest|'
r'SOAP_LastResponse|Tag_Name|ascending|average|by|define|'
r'descending|do|equals|frozen|group|handle_failure|import|in|into|'
r'join|let|match|max|min|on|order|parent|protected|provide|public|'
r'require|returnhome|skip|split_thread|sum|take|thread|to|trait|'
r'type|where|with|yield|yieldhome)\b',
bygroups(Punctuation, Keyword)),
# other
(r',', Punctuation, 'commamember'),
(r'(and|or|not)\b', Operator.Word),
(r'([a-z_][\w.]*)(\s*)(::)(\s*)([a-z_][\w.]*)?(\s*=(?!=))',
bygroups(Name, Whitespace, Punctuation, Whitespace, Name.Label,
Operator)),
(r'(/?)([\w.]+)', bygroups(Punctuation, Name.Other)),
(r'(=)(n?bw|n?ew|n?cn|lte?|gte?|n?eq|n?rx|ft)\b',
bygroups(Operator, Operator.Word)),
(r':=|[-+*/%=<>&|!?\\]+', Operator),
(r'[{}():;,@^]', Punctuation),
],
'singlestring': [
(r"'", String.Single, '#pop'),
(r"[^'\\]+", String.Single),
include('escape'),
(r"\\", String.Single),
],
'doublestring': [
(r'"', String.Double, '#pop'),
(r'[^"\\]+', String.Double),
include('escape'),
(r'\\', String.Double),
],
'escape': [
(r'\\(U[\da-f]{8}|u[\da-f]{4}|x[\da-f]{1,2}|[0-7]{1,3}|:[^:\n\r]+:|'
r'[abefnrtv?"\'\\]|$)', String.Escape),
],
'signature': [
(r'=>', Operator, '#pop'),
(r'\)', Punctuation, '#pop'),
(r'[(,]', Punctuation, 'parameter'),
include('lasso'),
],
'parameter': [
(r'\)', Punctuation, '#pop'),
(r'-?[a-z_][\w.]*', Name.Attribute, '#pop'),
(r'\.\.\.', Name.Builtin.Pseudo),
include('lasso'),
],
'requiresection': [
(r'(([a-z_][\w.]*=?|[-+*/%])(?=\s*\())', Name, 'requiresignature'),
(r'(([a-z_][\w.]*=?|[-+*/%])(?=(\s*::\s*[\w.]+)?\s*,))', Name),
(r'[a-z_][\w.]*=?|[-+*/%]', Name, '#pop'),
(r'(::)(\s*)([a-z_][\w.]*)',
bygroups(Punctuation, Whitespace, Name.Label)),
(r',', Punctuation),
include('whitespacecomments'),
],
'requiresignature': [
(r'(\)(?=(\s*::\s*[\w.]+)?\s*,))', Punctuation, '#pop'),
(r'\)', Punctuation, '#pop:2'),
(r'-?[a-z_][\w.]*', Name.Attribute),
(r'(::)(\s*)([a-z_][\w.]*)',
bygroups(Punctuation, Whitespace, Name.Label)),
(r'\.\.\.', Name.Builtin.Pseudo),
(r'[(,]', Punctuation),
include('whitespacecomments'),
],
'commamember': [
(r'(([a-z_][\w.]*=?|[-+*/%])'
r'(?=\s*(\(([^()]*\([^()]*\))*[^)]*\)\s*)?(::[\w.\s]+)?=>))',
Name.Function, 'signature'),
include('whitespacecomments'),
default('#pop'),
],
}
def __init__(self, **options):
self.builtinshighlighting = get_bool_opt(
options, 'builtinshighlighting', True)
self.requiredelimiters = get_bool_opt(
options, 'requiredelimiters', False)
self._builtins = set()
self._members = set()
if self.builtinshighlighting:
from pygments.lexers._lasso_builtins import BUILTINS, MEMBERS
for key, value in BUILTINS.items():
self._builtins.update(value)
for key, value in MEMBERS.items():
self._members.update(value)
RegexLexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
stack = ['root']
if self.requiredelimiters:
stack.append('delimiters')
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text, stack):
if (token is Name.Other and value.lower() in self._builtins or
token is Name.Other.Member and
value.lower().rstrip('=') in self._members):
yield index, Name.Builtin, value
continue
yield index, token, value
def analyse_text(text):
rv = 0.0
if 'bin/lasso9' in text:
rv += 0.8
if re.search(r'<\?lasso', text, re.I):
rv += 0.4
if re.search(r'local\(', text, re.I):
rv += 0.4
return rv
class ObjectiveJLexer(RegexLexer):
"""
For Objective-J source code with preprocessor directives.
.. versionadded:: 1.3
"""
name = 'Objective-J'
aliases = ['objective-j', 'objectivej', 'obj-j', 'objj']
filenames = ['*.j']
mimetypes = ['text/x-objective-j']
#: optional Comment or Whitespace
_ws = r'(?:\s|//[^\n]*\n|/[*](?:[^*]|[*][^/])*[*]/)*'
flags = re.DOTALL | re.MULTILINE
tokens = {
'root': [
include('whitespace'),
# function definition
(r'^(' + _ws + r'[+-]' + _ws + r')([(a-zA-Z_].*?[^(])(' + _ws + r'\{)',
bygroups(using(this), using(this, state='function_signature'),
using(this))),
# class definition
(r'(@interface|@implementation)(\s+)', bygroups(Keyword, Whitespace),
'classname'),
(r'(@class|@protocol)(\s*)', bygroups(Keyword, Whitespace),
'forward_classname'),
(r'(\s*)(@end)(\s*)', bygroups(Whitespace, Keyword, Whitespace)),
include('statements'),
('[{()}]', Punctuation),
(';', Punctuation),
],
'whitespace': [
(r'(@import)(\s+)("(?:\\\\|\\"|[^"])*")',
bygroups(Comment.Preproc, Whitespace, String.Double)),
(r'(@import)(\s+)(<(?:\\\\|\\>|[^>])*>)',
bygroups(Comment.Preproc, Whitespace, String.Double)),
(r'(#(?:include|import))(\s+)("(?:\\\\|\\"|[^"])*")',
bygroups(Comment.Preproc, Whitespace, String.Double)),
(r'(#(?:include|import))(\s+)(<(?:\\\\|\\>|[^>])*>)',
bygroups(Comment.Preproc, Whitespace, String.Double)),
(r'#if\s+0', Comment.Preproc, 'if0'),
(r'#', Comment.Preproc, 'macro'),
(r'\s+', Whitespace),
(r'(\\)(\n)',
bygroups(String.Escape, Whitespace)), # line continuation
(r'//(\n|(.|\n)*?[^\\]\n)', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'<!--', Comment),
],
'slashstartsregex': [
include('whitespace'),
(r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
r'([gim]+\b|\B)', String.Regex, '#pop'),
(r'(?=/)', Text, ('#pop', 'badregex')),
default('#pop'),
],
'badregex': [
(r'\n', Whitespace, '#pop'),
],
'statements': [
(r'(L|@)?"', String, 'string'),
(r"(L|@)?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'",
String.Char),
(r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
(r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'0x[0-9a-fA-F]+[Ll]?', Number.Hex),
(r'0[0-7]+[Ll]?', Number.Oct),
(r'\d+[Ll]?', Number.Integer),
(r'^(?=\s|/|<!--)', Text, 'slashstartsregex'),
(r'\+\+|--|~|&&|\?|:|\|\||\\(?=\n)|'
r'(<<|>>>?|==?|!=?|[-<>+*%&|^/])=?',
Operator, 'slashstartsregex'),
(r'[{(\[;,]', Punctuation, 'slashstartsregex'),
(r'[})\].]', Punctuation),
(r'(for|in|while|do|break|return|continue|switch|case|default|if|'
r'else|throw|try|catch|finally|new|delete|typeof|instanceof|void|'
r'prototype|__proto__)\b', Keyword, 'slashstartsregex'),
(r'(var|with|function)\b', Keyword.Declaration, 'slashstartsregex'),
(r'(@selector|@private|@protected|@public|@encode|'
r'@synchronized|@try|@throw|@catch|@finally|@end|@property|'
r'@synthesize|@dynamic|@for|@accessors|new)\b', Keyword),
(r'(int|long|float|short|double|char|unsigned|signed|void|'
r'id|BOOL|bool|boolean|IBOutlet|IBAction|SEL|@outlet|@action)\b',
Keyword.Type),
(r'(self|super)\b', Name.Builtin),
(r'(TRUE|YES|FALSE|NO|Nil|nil|NULL)\b', Keyword.Constant),
(r'(true|false|null|NaN|Infinity|undefined)\b', Keyword.Constant),
(r'(ABS|ASIN|ACOS|ATAN|ATAN2|SIN|COS|TAN|EXP|POW|CEIL|FLOOR|ROUND|'
r'MIN|MAX|RAND|SQRT|E|LN2|LN10|LOG2E|LOG10E|PI|PI2|PI_2|SQRT1_2|'
r'SQRT2)\b', Keyword.Constant),
(r'(Array|Boolean|Date|Error|Function|Math|'
r'Number|Object|RegExp|String|decodeURI|'
r'decodeURIComponent|encodeURI|encodeURIComponent|'
r'Error|eval|isFinite|isNaN|parseFloat|parseInt|document|this|'
r'window|globalThis|Symbol)\b', Name.Builtin),
(r'([$a-zA-Z_]\w*)(' + _ws + r')(?=\()',
bygroups(Name.Function, using(this))),
(r'[$a-zA-Z_]\w*', Name),
],
'classname': [
# interface definition that inherits
(r'([a-zA-Z_]\w*)(' + _ws + r':' + _ws +
r')([a-zA-Z_]\w*)?',
bygroups(Name.Class, using(this), Name.Class), '#pop'),
# interface definition for a category
(r'([a-zA-Z_]\w*)(' + _ws + r'\()([a-zA-Z_]\w*)(\))',
bygroups(Name.Class, using(this), Name.Label, Text), '#pop'),
# simple interface / implementation
(r'([a-zA-Z_]\w*)', Name.Class, '#pop'),
],
'forward_classname': [
(r'([a-zA-Z_]\w*)(\s*)(,)(\s*)',
bygroups(Name.Class, Whitespace, Text, Whitespace), '#push'),
(r'([a-zA-Z_]\w*)(\s*)(;?)',
bygroups(Name.Class, Whitespace, Text), '#pop'),
],
'function_signature': [
include('whitespace'),
# start of a selector w/ parameters
(r'(\(' + _ws + r')' # open paren
r'([a-zA-Z_]\w+)' # return type
r'(' + _ws + r'\)' + _ws + r')' # close paren
r'([$a-zA-Z_]\w+' + _ws + r':)', # function name
bygroups(using(this), Keyword.Type, using(this),
Name.Function), 'function_parameters'),
# no-param function
(r'(\(' + _ws + r')' # open paren
r'([a-zA-Z_]\w+)' # return type
r'(' + _ws + r'\)' + _ws + r')' # close paren
r'([$a-zA-Z_]\w+)', # function name
bygroups(using(this), Keyword.Type, using(this),
Name.Function), "#pop"),
# no return type given, start of a selector w/ parameters
(r'([$a-zA-Z_]\w+' + _ws + r':)', # function name
bygroups(Name.Function), 'function_parameters'),
# no return type given, no-param function
(r'([$a-zA-Z_]\w+)', # function name
bygroups(Name.Function), "#pop"),
default('#pop'),
],
'function_parameters': [
include('whitespace'),
# parameters
(r'(\(' + _ws + ')' # open paren
r'([^)]+)' # type
r'(' + _ws + r'\)' + _ws + r')' # close paren
r'([$a-zA-Z_]\w+)', # param name
bygroups(using(this), Keyword.Type, using(this), Text)),
# one piece of a selector name
(r'([$a-zA-Z_]\w+' + _ws + r':)', # function name
Name.Function),
# smallest possible selector piece
(r'(:)', Name.Function),
# var args
(r'(,' + _ws + r'\.\.\.)', using(this)),
# param name
(r'([$a-zA-Z_]\w+)', Text),
],
'expression': [
(r'([$a-zA-Z_]\w*)(\()', bygroups(Name.Function,
Punctuation)),
(r'(\))', Punctuation, "#pop"),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'(\\)(\n)', bygroups(String.Escape, Whitespace)), # line continuation
(r'\\', String), # stray backslash
],
'macro': [
(r'[^/\n]+', Comment.Preproc),
(r'/[*](.|\n)*?[*]/', Comment.Multiline),
(r'(//.*?)(\n)', bygroups(Comment.Single, Whitespace), '#pop'),
(r'/', Comment.Preproc),
(r'(?<=\\)\n', Whitespace),
(r'\n', Whitespace, '#pop'),
],
'if0': [
(r'^\s*#if.*?(?<!\\)\n', Comment.Preproc, '#push'),
(r'^\s*#endif.*?(?<!\\)\n', Comment.Preproc, '#pop'),
(r'(.*?)(\n)', bygroups(Comment, Whitespace)),
]
}
def analyse_text(text):
if re.search(r'^\s*@import\s+[<"]', text, re.MULTILINE):
# special directive found in most Objective-J files
return True
return False
class CoffeeScriptLexer(RegexLexer):
"""
For CoffeeScript source code.
.. versionadded:: 1.3
"""
name = 'CoffeeScript'
url = 'http://coffeescript.org'
aliases = ['coffeescript', 'coffee-script', 'coffee']
filenames = ['*.coffee']
mimetypes = ['text/coffeescript']
_operator_re = (
r'\+\+|~|&&|\band\b|\bor\b|\bis\b|\bisnt\b|\bnot\b|\?|:|'
r'\|\||\\(?=\n)|'
r'(<<|>>>?|==?(?!>)|!=?|=(?!>)|-(?!>)|[<>+*`%&|\^/])=?')
flags = re.DOTALL
tokens = {
'commentsandwhitespace': [
(r'\s+', Whitespace),
(r'###[^#].*?###', Comment.Multiline),
(r'(#(?!##[^#]).*?)(\n)', bygroups(Comment.Single, Whitespace)),
],
'multilineregex': [
(r'[^/#]+', String.Regex),
(r'///([gimuysd]+\b|\B)', String.Regex, '#pop'),
(r'#\{', String.Interpol, 'interpoling_string'),
(r'[/#]', String.Regex),
],
'slashstartsregex': [
include('commentsandwhitespace'),
(r'///', String.Regex, ('#pop', 'multilineregex')),
(r'/(?! )(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
r'([gimuysd]+\b|\B)', String.Regex, '#pop'),
# This isn't really guarding against mishighlighting well-formed
# code, just the ability to infinite-loop between root and
# slashstartsregex.
(r'/', Operator, '#pop'),
default('#pop'),
],
'root': [
include('commentsandwhitespace'),
(r'\A(?=\s|/)', Text, 'slashstartsregex'),
(_operator_re, Operator, 'slashstartsregex'),
(r'(?:\([^()]*\))?\s*[=-]>', Name.Function, 'slashstartsregex'),
(r'[{(\[;,]', Punctuation, 'slashstartsregex'),
(r'[})\].]', Punctuation),
(r'(?<![.$])(for|own|in|of|while|until|'
r'loop|break|return|continue|'
r'switch|when|then|if|unless|else|'
r'throw|try|catch|finally|new|delete|typeof|instanceof|super|'
r'extends|this|class|by)\b', Keyword, 'slashstartsregex'),
(r'(?<![.$])(true|false|yes|no|on|off|null|'
r'NaN|Infinity|undefined)\b',
Keyword.Constant),
(r'(Array|Boolean|Date|Error|Function|Math|'
r'Number|Object|RegExp|String|decodeURI|'
r'decodeURIComponent|encodeURI|encodeURIComponent|'
r'eval|isFinite|isNaN|parseFloat|parseInt|document|window|globalThis|Symbol)\b',
Name.Builtin),
(r'([$a-zA-Z_][\w.:$]*)(\s*)([:=])(\s+)',
bygroups(Name.Variable, Whitespace, Operator, Whitespace),
'slashstartsregex'),
(r'(@[$a-zA-Z_][\w.:$]*)(\s*)([:=])(\s+)',
bygroups(Name.Variable.Instance, Whitespace, Operator, Whitespace),
'slashstartsregex'),
(r'@', Name.Other, 'slashstartsregex'),
(r'@?[$a-zA-Z_][\w$]*', Name.Other),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+', Number.Integer),
('"""', String, 'tdqs'),
("'''", String, 'tsqs'),
('"', String, 'dqs'),
("'", String, 'sqs'),
],
'strings': [
(r'[^#\\\'"]+', String),
# note that all coffee script strings are multi-line.
# hashmarks, quotes and backslashes must be parsed one at a time
],
'interpoling_string': [
(r'\}', String.Interpol, "#pop"),
include('root')
],
'dqs': [
(r'"', String, '#pop'),
(r'\\.|\'', String), # double-quoted string don't need ' escapes
(r'#\{', String.Interpol, "interpoling_string"),
(r'#', String),
include('strings')
],
'sqs': [
(r"'", String, '#pop'),
(r'#|\\.|"', String), # single quoted strings don't need " escapses
include('strings')
],
'tdqs': [
(r'"""', String, '#pop'),
(r'\\.|\'|"', String), # no need to escape quotes in triple-string
(r'#\{', String.Interpol, "interpoling_string"),
(r'#', String),
include('strings'),
],
'tsqs': [
(r"'''", String, '#pop'),
(r'#|\\.|\'|"', String), # no need to escape quotes in triple-strings
include('strings')
],
}
class MaskLexer(RegexLexer):
"""
For Mask markup.
.. versionadded:: 2.0
"""
name = 'Mask'
url = 'https://github.com/atmajs/MaskJS'
aliases = ['mask']
filenames = ['*.mask']
mimetypes = ['text/x-mask']
flags = re.MULTILINE | re.IGNORECASE | re.DOTALL
tokens = {
'root': [
(r'\s+', Whitespace),
(r'(//.*?)(\n)', bygroups(Comment.Single, Whitespace)),
(r'/\*.*?\*/', Comment.Multiline),
(r'[{};>]', Punctuation),
(r"'''", String, 'string-trpl-single'),
(r'"""', String, 'string-trpl-double'),
(r"'", String, 'string-single'),
(r'"', String, 'string-double'),
(r'([\w-]+)', Name.Tag, 'node'),
(r'([^.#;{>\s]+)', Name.Class, 'node'),
(r'(#[\w-]+)', Name.Function, 'node'),
(r'(\.[\w-]+)', Name.Variable.Class, 'node')
],
'string-base': [
(r'\\.', String.Escape),
(r'~\[', String.Interpol, 'interpolation'),
(r'.', String.Single),
],
'string-single': [
(r"'", String.Single, '#pop'),
include('string-base')
],
'string-double': [
(r'"', String.Single, '#pop'),
include('string-base')
],
'string-trpl-single': [
(r"'''", String.Single, '#pop'),
include('string-base')
],
'string-trpl-double': [
(r'"""', String.Single, '#pop'),
include('string-base')
],
'interpolation': [
(r'\]', String.Interpol, '#pop'),
(r'(\s*)(:)', bygroups(Whitespace, String.Interpol), 'expression'),
(r'(\s*)(\w+)(:)', bygroups(Whitespace, Name.Other, Punctuation)),
(r'[^\]]+', String.Interpol)
],
'expression': [
(r'[^\]]+', using(JavascriptLexer), '#pop')
],
'node': [
(r'\s+', Whitespace),
(r'\.', Name.Variable.Class, 'node-class'),
(r'\#', Name.Function, 'node-id'),
(r'(style)([ \t]*)(=)',
bygroups(Name.Attribute, Whitespace, Operator),
'node-attr-style-value'),
(r'([\w:-]+)([ \t]*)(=)',
bygroups(Name.Attribute, Whitespace, Operator),
'node-attr-value'),
(r'[\w:-]+', Name.Attribute),
(r'[>{;]', Punctuation, '#pop')
],
'node-class': [
(r'[\w-]+', Name.Variable.Class),
(r'~\[', String.Interpol, 'interpolation'),
default('#pop')
],
'node-id': [
(r'[\w-]+', Name.Function),
(r'~\[', String.Interpol, 'interpolation'),
default('#pop')
],
'node-attr-value': [
(r'\s+', Whitespace),
(r'\w+', Name.Variable, '#pop'),
(r"'", String, 'string-single-pop2'),
(r'"', String, 'string-double-pop2'),
default('#pop')
],
'node-attr-style-value': [
(r'\s+', Whitespace),
(r"'", String.Single, 'css-single-end'),
(r'"', String.Single, 'css-double-end'),
include('node-attr-value')
],
'css-base': [
(r'\s+', Whitespace),
(r";", Punctuation),
(r"[\w\-]+\s*:", Name.Builtin)
],
'css-single-end': [
include('css-base'),
(r"'", String.Single, '#pop:2'),
(r"[^;']+", Name.Entity)
],
'css-double-end': [
include('css-base'),
(r'"', String.Single, '#pop:2'),
(r'[^;"]+', Name.Entity)
],
'string-single-pop2': [
(r"'", String.Single, '#pop:2'),
include('string-base')
],
'string-double-pop2': [
(r'"', String.Single, '#pop:2'),
include('string-base')
],
}
class EarlGreyLexer(RegexLexer):
"""
For Earl-Grey source code.
.. versionadded: 2.1
"""
name = 'Earl Grey'
aliases = ['earl-grey', 'earlgrey', 'eg']
filenames = ['*.eg']
mimetypes = ['text/x-earl-grey']
tokens = {
'root': [
(r'\n', Whitespace),
include('control'),
(r'[^\S\n]+', Text),
(r'(;;.*)(\n)', bygroups(Comment, Whitespace)),
(r'[\[\]{}:(),;]', Punctuation),
(r'(\\)(\n)', bygroups(String.Escape, Whitespace)),
(r'\\', Text),
include('errors'),
(words((
'with', 'where', 'when', 'and', 'not', 'or', 'in',
'as', 'of', 'is'),
prefix=r'(?<=\s|\[)', suffix=r'(?![\w$\-])'),
Operator.Word),
(r'[*@]?->', Name.Function),
(r'[+\-*/~^<>%&|?!@#.]*=', Operator.Word),
(r'\.{2,3}', Operator.Word), # Range Operator
(r'([+*/~^<>&|?!]+)|([#\-](?=\s))|@@+(?=\s)|=+', Operator),
(r'(?<![\w$\-])(var|let)(?:[^\w$])', Keyword.Declaration),
include('keywords'),
include('builtins'),
include('assignment'),
(r'''(?x)
(?:()([a-zA-Z$_](?:[\w$\-]*[\w$])?)|
(?<=[\s{\[(])(\.)([a-zA-Z$_](?:[\w$\-]*[\w$])?))
(?=.*%)''',
bygroups(Punctuation, Name.Tag, Punctuation, Name.Class.Start), 'dbs'),
(r'[rR]?`', String.Backtick, 'bt'),
(r'[rR]?```', String.Backtick, 'tbt'),
(r'(?<=[\s\[{(,;])\.([a-zA-Z$_](?:[\w$\-]*[\w$])?)'
r'(?=[\s\]}),;])', String.Symbol),
include('nested'),
(r'(?:[rR]|[rR]\.[gmi]{1,3})?"', String, combined('stringescape', 'dqs')),
(r'(?:[rR]|[rR]\.[gmi]{1,3})?\'', String, combined('stringescape', 'sqs')),
(r'"""', String, combined('stringescape', 'tdqs')),
include('tuple'),
include('import_paths'),
include('name'),
include('numbers'),
],
'dbs': [
(r'(\.)([a-zA-Z$_](?:[\w$\-]*[\w$])?)(?=[.\[\s])',
bygroups(Punctuation, Name.Class.DBS)),
(r'(\[)([\^#][a-zA-Z$_](?:[\w$\-]*[\w$])?)(\])',
bygroups(Punctuation, Name.Entity.DBS, Punctuation)),
(r'\s+', Whitespace),
(r'%', Operator.DBS, '#pop'),
],
'import_paths': [
(r'(?<=[\s:;,])(\.{1,3}(?:[\w\-]*/)*)(\w(?:[\w\-]*\w)*)(?=[\s;,])',
bygroups(Text.Whitespace, Text)),
],
'assignment': [
(r'(\.)?([a-zA-Z$_](?:[\w$\-]*[\w$])?)'
r'(?=\s+[+\-*/~^<>%&|?!@#.]*\=\s)',
bygroups(Punctuation, Name.Variable))
],
'errors': [
(words(('Error', 'TypeError', 'ReferenceError'),
prefix=r'(?<![\w\-$.])', suffix=r'(?![\w\-$.])'),
Name.Exception),
(r'''(?x)
(?<![\w$])
E\.[\w$](?:[\w$\-]*[\w$])?
(?:\.[\w$](?:[\w$\-]*[\w$])?)*
(?=[({\[?!\s])''',
Name.Exception),
],
'control': [
(r'''(?x)
([a-zA-Z$_](?:[\w$-]*[\w$])?)
(?!\n)\s+
(?!and|as|each\*|each|in|is|mod|of|or|when|where|with)
(?=(?:[+\-*/~^<>%&|?!@#.])?[a-zA-Z$_](?:[\w$-]*[\w$])?)''',
Keyword.Control),
(r'([a-zA-Z$_](?:[\w$-]*[\w$])?)(?!\n)(\s+)(?=[\'"\d{\[(])',
bygroups(Keyword.Control, Whitespace)),
(r'''(?x)
(?:
(?<=[%=])|
(?<=[=\-]>)|
(?<=with|each|with)|
(?<=each\*|where)
)(\s+)
([a-zA-Z$_](?:[\w$-]*[\w$])?)(:)''',
bygroups(Whitespace, Keyword.Control, Punctuation)),
(r'''(?x)
(?<![+\-*/~^<>%&|?!@#.])(\s+)
([a-zA-Z$_](?:[\w$-]*[\w$])?)(:)''',
bygroups(Whitespace, Keyword.Control, Punctuation)),
],
'nested': [
(r'''(?x)
(?<=[\w$\]})])(\.)
([a-zA-Z$_](?:[\w$-]*[\w$])?)
(?=\s+with(?:\s|\n))''',
bygroups(Punctuation, Name.Function)),
(r'''(?x)
(?<!\s)(\.)
([a-zA-Z$_](?:[\w$-]*[\w$])?)
(?=[}\]).,;:\s])''',
bygroups(Punctuation, Name.Field)),
(r'''(?x)
(?<=[\w$\]})])(\.)
([a-zA-Z$_](?:[\w$-]*[\w$])?)
(?=[\[{(:])''',
bygroups(Punctuation, Name.Function)),
],
'keywords': [
(words((
'each', 'each*', 'mod', 'await', 'break', 'chain',
'continue', 'elif', 'expr-value', 'if', 'match',
'return', 'yield', 'pass', 'else', 'require', 'var',
'let', 'async', 'method', 'gen'),
prefix=r'(?<![\w\-$.])', suffix=r'(?![\w\-$.])'),
Keyword.Pseudo),
(words(('this', 'self', '@'),
prefix=r'(?<![\w\-$.])', suffix=r'(?![\w\-$])'),
Keyword.Constant),
(words((
'Function', 'Object', 'Array', 'String', 'Number',
'Boolean', 'ErrorFactory', 'ENode', 'Promise'),
prefix=r'(?<![\w\-$.])', suffix=r'(?![\w\-$])'),
Keyword.Type),
],
'builtins': [
(words((
'send', 'object', 'keys', 'items', 'enumerate', 'zip',
'product', 'neighbours', 'predicate', 'equal',
'nequal', 'contains', 'repr', 'clone', 'range',
'getChecker', 'get-checker', 'getProperty', 'get-property',
'getProjector', 'get-projector', 'consume', 'take',
'promisify', 'spawn', 'constructor'),
prefix=r'(?<![\w\-#.])', suffix=r'(?![\w\-.])'),
Name.Builtin),
(words((
'true', 'false', 'null', 'undefined'),
prefix=r'(?<![\w\-$.])', suffix=r'(?![\w\-$.])'),
Name.Constant),
],
'name': [
(r'@([a-zA-Z$_](?:[\w$-]*[\w$])?)', Name.Variable.Instance),
(r'([a-zA-Z$_](?:[\w$-]*[\w$])?)(\+\+|\-\-)?',
bygroups(Name.Symbol, Operator.Word))
],
'tuple': [
(r'#[a-zA-Z_][\w\-]*(?=[\s{(,;])', Name.Namespace)
],
'interpoling_string': [
(r'\}', String.Interpol, '#pop'),
include('root')
],
'stringescape': [
(r'\\([\\abfnrtv"\']|\n|N\{.*?\}|u[a-fA-F0-9]{4}|'
r'U[a-fA-F0-9]{8}|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape)
],
'strings': [
(r'[^\\\'"]', String),
(r'[\'"\\]', String),
(r'\n', String) # All strings are multiline in EG
],
'dqs': [
(r'"', String, '#pop'),
(r'\\\\|\\"|\\\n', String.Escape),
include('strings')
],
'sqs': [
(r"'", String, '#pop'),
(r"\\\\|\\'|\\\n", String.Escape),
(r'\{', String.Interpol, 'interpoling_string'),
include('strings')
],
'tdqs': [
(r'"""', String, '#pop'),
include('strings'),
],
'bt': [
(r'`', String.Backtick, '#pop'),
(r'(?<!`)\n', String.Backtick),
(r'\^=?', String.Escape),
(r'.+', String.Backtick),
],
'tbt': [
(r'```', String.Backtick, '#pop'),
(r'\n', String.Backtick),
(r'\^=?', String.Escape),
(r'[^`]+', String.Backtick),
],
'numbers': [
(r'\d+\.(?!\.)\d*([eE][+-]?[0-9]+)?', Number.Float),
(r'\d+[eE][+-]?[0-9]+', Number.Float),
(r'8r[0-7]+', Number.Oct),
(r'2r[01]+', Number.Bin),
(r'16r[a-fA-F0-9]+', Number.Hex),
(r'([3-79]|[12][0-9]|3[0-6])r[a-zA-Z\d]+(\.[a-zA-Z\d]+)?',
Number.Radix),
(r'\d+', Number.Integer)
],
}
class JuttleLexer(RegexLexer):
"""
For Juttle source code.
.. versionadded:: 2.2
"""
name = 'Juttle'
url = 'http://juttle.github.io/'
aliases = ['juttle']
filenames = ['*.juttle']
mimetypes = ['application/juttle', 'application/x-juttle',
'text/x-juttle', 'text/juttle']
flags = re.DOTALL | re.MULTILINE
tokens = {
'commentsandwhitespace': [
(r'\s+', Whitespace),
(r'(//.*?)(\n)', bygroups(Comment.Single, Whitespace)),
(r'/\*.*?\*/', Comment.Multiline)
],
'slashstartsregex': [
include('commentsandwhitespace'),
(r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
r'([gimuysd]+\b|\B)', String.Regex, '#pop'),
(r'(?=/)', Text, ('#pop', 'badregex')),
default('#pop')
],
'badregex': [
(r'\n', Text, '#pop')
],
'root': [
(r'^(?=\s|/)', Text, 'slashstartsregex'),
include('commentsandwhitespace'),
(r':\d{2}:\d{2}:\d{2}(\.\d*)?:', String.Moment),
(r':(now|beginning|end|forever|yesterday|today|tomorrow|'
r'(\d+(\.\d*)?|\.\d+)(ms|[smhdwMy])?):', String.Moment),
(r':\d{4}-\d{2}-\d{2}(T\d{2}:\d{2}:\d{2}(\.\d*)?)?'
r'(Z|[+-]\d{2}:\d{2}|[+-]\d{4})?:', String.Moment),
(r':((\d+(\.\d*)?|\.\d+)[ ]+)?(millisecond|second|minute|hour|'
r'day|week|month|year)[s]?'
r'(([ ]+and[ ]+(\d+[ ]+)?(millisecond|second|minute|hour|'
r'day|week|month|year)[s]?)'
r'|[ ]+(ago|from[ ]+now))*:', String.Moment),
(r'\+\+|--|~|&&|\?|:|\|\||\\(?=\n)|'
r'(==?|!=?|[-<>+*%&|^/])=?', Operator, 'slashstartsregex'),
(r'[{(\[;,]', Punctuation, 'slashstartsregex'),
(r'[})\].]', Punctuation),
(r'(import|return|continue|if|else)\b', Keyword, 'slashstartsregex'),
(r'(var|const|function|reducer|sub|input)\b', Keyword.Declaration,
'slashstartsregex'),
(r'(batch|emit|filter|head|join|keep|pace|pass|put|read|reduce|remove|'
r'sequence|skip|sort|split|tail|unbatch|uniq|view|write)\b',
Keyword.Reserved),
(r'(true|false|null|Infinity)\b', Keyword.Constant),
(r'(Array|Date|Juttle|Math|Number|Object|RegExp|String)\b',
Name.Builtin),
(JS_IDENT, Name.Other),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
(r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
]
}
class NodeConsoleLexer(Lexer):
"""
For parsing within an interactive Node.js REPL, such as:
.. sourcecode:: nodejsrepl
> let a = 3
undefined
> a
3
> let b = '4'
undefined
> b
'4'
> b == a
false
.. versionadded: 2.10
"""
name = 'Node.js REPL console session'
aliases = ['nodejsrepl', ]
mimetypes = ['text/x-nodejsrepl', ]
def get_tokens_unprocessed(self, text):
jslexer = JavascriptLexer(**self.options)
curcode = ''
insertions = []
for match in line_re.finditer(text):
line = match.group()
if line.startswith('> '):
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:1]),
(1, Whitespace, line[1:2])]))
curcode += line[2:]
elif line.startswith('...'):
# node does a nested ... thing depending on depth
code = line.lstrip('.')
lead = len(line) - len(code)
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:lead])]))
curcode += code
else:
if curcode:
yield from do_insertions(insertions,
jslexer.get_tokens_unprocessed(curcode))
curcode = ''
insertions = []
yield from do_insertions([],
jslexer.get_tokens_unprocessed(line))
if curcode:
yield from do_insertions(insertions,
jslexer.get_tokens_unprocessed(curcode))
|
f28934fb9a46ecf60aa1fbeba70cb48230866071
|
772e04b18f36fe1bffb05c16ef4eff3ba765fd13
|
/sample_kg/network_prediction/script/split_graph.py
|
314549bae6dbef36151ebd07d00f87321d6b98cc
|
[
"LicenseRef-scancode-other-permissive"
] |
permissive
|
clinfo/kGCN
|
3c74f552dd9d71d470a3173012b01733a1262688
|
32328d5a41e6ed7491b3edb705ff94658fc95d3f
|
refs/heads/master
| 2023-08-16T19:43:17.149381
| 2023-08-03T00:08:11
| 2023-08-03T00:08:11
| 194,075,235
| 110
| 38
|
NOASSERTION
| 2022-02-04T17:09:55
| 2019-06-27T10:31:57
|
Python
|
UTF-8
|
Python
| false
| false
| 6,732
|
py
|
split_graph.py
|
import sys
import pandas as pd
from sklearn.model_selection import KFold
import argparse
def check_graph(filename):
print(f'\n== Check input data==\n'
f'[LOAD] input graph: {filename}')
graph = pd.read_table(filename, sep='\t', header=None)
if len(graph.columns) == 3:
graph.columns = ['nodeA', 'edgetype', 'nodeB']
node_list = []
nodepair_list = []
edgetype_list = []
for node1, edgetype, node2 in zip(graph['nodeA'], graph['edgetype'], graph['nodeB']):
nodepair = (node1, node2)
nodepair = tuple(sorted(nodepair))
nodepair_list.append(nodepair)
edgetype_list.append(edgetype)
node_list.append(node1)
node_list.append(node2)
nodepair_list_deduplicates = list(set(nodepair_list))
edgetype_list_deduplicates = list(set(edgetype_list))
node_list_deduplicates = list(set(node_list))
print(f'# node: {len(node_list_deduplicates)}\n'
f'# edge: {len(graph)}\n'
f'# nodepair: {len(nodepair_list)}\n'
f'# nodepair post deduplicates: {len(nodepair_list_deduplicates)}\n'
f'# edgetype: {len(edgetype_list_deduplicates)}')
if len(nodepair_list) == len(nodepair_list_deduplicates):
print('[Check]: OK, no edge duplicates. Graph is undirected.')
graph = graph.sample(frac=1).reset_index(drop=True) # shuffle rows and reset row-index number
return graph
else:
print('[ERROR]: Umm...Exist edge duplicates. Graph seems to be directed.\n')
sys.exit(1)
elif len(graph.columns) == 2:
graph.columns = ['nodeA', 'nodeB']
nodepair_list = []
node_list = []
for node1, node2 in zip(graph['nodeA'], graph['nodeB']):
nodepair = (node1, node2)
nodepair = tuple(sorted(nodepair))
nodepair_list.append(nodepair)
node_list.append(node1)
node_list.append(node2)
nodepair_list_deduplicates = list(set(nodepair_list))
node_list_deduplicates = list(set(node_list))
print(f'# node: {len(node_list_deduplicates)}\n'
f'# edge: {len(graph)}\n'
f'# nodepair: {len(nodepair_list)}\n'
f'# nodepair post deduplicates: {len(nodepair_list_deduplicates)}')
if len(nodepair_list) == len(nodepair_list_deduplicates):
print('[Check]: OK, no edge duplicates. Graph is undirected.')
graph = graph.sample(frac=1).reset_index(drop=True) # shuffle rows and reset row-index number
return graph
else:
print('[ERROR]: Umm...Exist edge duplicates. Graph seems to be directed.\n')
sys.exit(1)
else:
print('[ERROR]: Unknown format.\n')
sys.exit(1)
def split(graph, output, mode, split_rate, split_num, cv_fold):
print('\n== Split graph data into train/test ==')
if mode == 'rate':
print('[Split mode]: set rate')
train_graph = graph.sample(frac=split_rate, replace=False, axis=0) #axis=0:row, frac:sampling rate, replace:allow duplicates pick
train_graph_filename = output + '.train.graph.tsv'
print(f'[SAVE] train file: {train_graph_filename}\n'
f'train split rate: {split_rate}\n'
f'train shape: {train_graph.shape}')
with open(train_graph_filename, 'w') as f:
train_graph.to_csv(f, sep='\t', header=False, index=False)
# Prep test
test_graph = graph.drop(train_graph.index)
test_graph_filename = output + '.test.graph.tsv'
print(f'[SAVE] test file: {test_graph_filename}\n'
f'test split rate: {1 - split_rate}\n'
f'test shape: {test_graph.shape}\n')
with open(test_graph_filename, 'w') as ff:
test_graph.to_csv(ff, sep='\t', header=False, index=False)
elif mode == 'num':
print('[Split mode]: set actual number')
train_graph = graph.sample(n=split_num, replace=False, axis=0)
train_graph_filename = output + '.train.graph.tsv'
print(f'[SAVE] train file: {train_graph_filename}\n'
f'train shape: {train_graph.shape}')
with open(train_graph_filename, 'w') as f:
train_graph.to_csv(f, sep='\t', header=False, index=False)
# Prep test
test_graph = graph.drop(train_graph.index)
test_graph_filename = output + '.test.graph.tsv'
print(f'[SAVE] test file: {test_graph_filename}\n'
f'test shape: {test_graph.shape}\n')
with open(test_graph_filename, 'w') as ff:
test_graph.to_csv(ff, sep='\t', header=False, index=False)
elif mode == 'cv':
print('[Split mode]: cross validation')
kf = KFold(n_splits=cv_fold, shuffle=True, random_state=1234) # shuffle data here
train_idx_list = []
test_idx_list = []
for train, test in kf.split(graph):
train_idx_list.append(train)
test_idx_list.append(test)
for i, (train, test) in enumerate(zip(train_idx_list, test_idx_list)):
print(f'- generate dataset for cv{i}')
trainset = graph.iloc[train]
testset = graph.iloc[test]
filename_train = output + '_cv' + str(i) + '.train.graph.tsv'
print(f'[SAVE] train file: {filename_train}\n'
f'trainset shape: {trainset.shape}')
with open(filename_train, 'w') as f:
trainset.to_csv(f, sep='\t', header=False, index=False)
filename_test = output + '_cv' + str(i) + '.test.graph.tsv'
print(f'[SAVE] test file: {filename_test}\n'
f'testset shape: {testset.shape}\n')
with open(filename_test, 'w') as f:
testset.to_csv(f, sep='\t', header=False, index=False)
else:
print('[ERROR]: you need to select mode.\n')
sys.exit(1)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--input', type=str, help='input graph data')
parser.add_argument('--output', type=str, help='set output file name')
parser.add_argument('--mode', type=str, help='select: cv, rate, num')
parser.add_argument('--cv_fold', type=int, default=5, help='set cv fold to split data')
parser.add_argument('--split_rate', type=float, default=0.2, help='data split rate for train')
parser.add_argument('--split_num', type=int, default=5000, help='data extraction number for train')
args = parser.parse_args()
graph = check_graph(args.input)
split(graph, args.output, args.mode, args.split_rate, args.split_num, args.cv_fold)
|
e32157cdb35086c8901cfd86090a4748a37ff1e4
|
f1872915f044e9bc8d6622d529535441ea8aec6a
|
/tools/setupHelpers.py
|
a6e5d884f8ddb9b3f125381daaaeda9d052af8bf
|
[
"MIT"
] |
permissive
|
pyqtgraph/pyqtgraph
|
5dc14ddd513f4f3fdd0e834aba720e61b122c886
|
f261280905a74f6cae4a43e39fa1732635b25c63
|
refs/heads/master
| 2023-08-30T23:24:30.305478
| 2023-08-29T00:35:59
| 2023-08-29T00:35:59
| 12,777,496
| 3,432
| 1,184
|
NOASSERTION
| 2023-09-14T13:40:51
| 2013-09-12T07:18:21
|
Python
|
UTF-8
|
Python
| false
| false
| 20,540
|
py
|
setupHelpers.py
|
from contextlib import suppress
import json
import os
import re
import shutil
import subprocess
import sys
from distutils import core
from typing import Dict, Any
from .generateChangelog import generateDebianChangelog
# Maximum allowed repository size difference (in kB) following merge.
# This is used to prevent large files from being inappropriately added to
# the repository history.
MERGE_SIZE_LIMIT = 100
# Paths that are checked for style by flake and flake_diff
FLAKE_CHECK_PATHS = ['pyqtgraph', 'examples', 'tools']
# Flake style checks -- mandatory, recommended, optional
# See: http://pep8.readthedocs.org/en/1.4.6/intro.html
# and https://flake8.readthedocs.org/en/2.0/warnings.html
FLAKE_MANDATORY = set([
'E101', # indentation contains mixed spaces and tabs
'E112', # expected an indented block
'E122', # continuation line missing indentation or outdented
'E125', # continuation line does not distinguish itself from next line
'E133', # closing bracket is missing indentation
'E223', # tab before operator
'E224', # tab after operator
'E242', # tab after ‘,’
'E273', # tab after keyword
'E274', # tab before keyword
'E901', # SyntaxError or IndentationError
'E902', # IOError
'W191', # indentation contains tabs
'W601', # .has_key() is deprecated, use ‘in’
'W602', # deprecated form of raising exception
'W603', # ‘<>’ is deprecated, use ‘!=’
'W604', # backticks are deprecated, use ‘repr()’
])
FLAKE_RECOMMENDED = set([
'E124', # closing bracket does not match visual indentation
'E231', # missing whitespace after ‘,’
'E211', # whitespace before ‘(‘
'E261', # at least two spaces before inline comment
'E271', # multiple spaces after keyword
'E272', # multiple spaces before keyword
'E304', # blank lines found after function decorator
'F401', # module imported but unused
'F402', # import module from line N shadowed by loop variable
'F403', # ‘from module import *’ used; unable to detect undefined names
'F404', # future import(s) name after other statements
'E501', # line too long (82 > 79 characters)
'E502', # the backslash is redundant between brackets
'E702', # multiple statements on one line (semicolon)
'E703', # statement ends with a semicolon
'E711', # comparison to None should be ‘if cond is None:’
'E712', # comparison to True should be ‘if cond is True:’ or ‘if cond:’
'E721', # do not compare types, use ‘isinstance()’
'F811', # redefinition of unused name from line N
'F812', # list comprehension redefines name from line N
'F821', # undefined name name
'F822', # undefined name name in __all__
'F823', # local variable name ... referenced before assignment
'F831', # duplicate argument name in function definition
'F841', # local variable name is assigned to but never used
'W292', # no newline at end of file
])
FLAKE_OPTIONAL = set([
'E121', # continuation line indentation is not a multiple of four
'E123', # closing bracket does not match indentation of opening bracket
'E126', # continuation line over-indented for hanging indent
'E127', # continuation line over-indented for visual indent
'E128', # continuation line under-indented for visual indent
'E201', # whitespace after ‘(‘
'E202', # whitespace before ‘)’
'E203', # whitespace before ‘:’
'E221', # multiple spaces before operator
'E222', # multiple spaces after operator
'E225', # missing whitespace around operator
'E227', # missing whitespace around bitwise or shift operator
'E226', # missing whitespace around arithmetic operator
'E228', # missing whitespace around modulo operator
'E241', # multiple spaces after ‘,’
'E251', # unexpected spaces around keyword / parameter equals
'E262', # inline comment should start with ‘# ‘
'E301', # expected 1 blank line, found 0
'E302', # expected 2 blank lines, found 0
'E303', # too many blank lines (3)
'E401', # multiple imports on one line
'E701', # multiple statements on one line (colon)
'W291', # trailing whitespace
'W293', # blank line contains whitespace
'W391', # blank line at end of file
])
FLAKE_IGNORE = set([
# 111 and 113 are ignored because they appear to be broken.
'E111', # indentation is not a multiple of four
'E113', # unexpected indentation
])
def checkStyle():
""" Run flake8, checking only lines that are modified since the last
git commit. """
# First check _all_ code against mandatory error codes
print('flake8: check all code against mandatory error set...')
errors = ','.join(FLAKE_MANDATORY)
cmd = ['flake8', '--select=' + errors] + FLAKE_CHECK_PATHS
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
#ret = proc.wait()
output = proc.stdout.read().decode('utf-8')
ret = proc.wait()
printFlakeOutput(output)
# Check for DOS newlines
print('check line endings in all files...')
count = 0
allowedEndings = set([None, '\n'])
for path, dirs, files in os.walk('.'):
if path.startswith("." + os.path.sep + ".tox"):
continue
for f in files:
if os.path.splitext(f)[1] not in ('.py', '.rst'):
continue
filename = os.path.join(path, f)
with open(filename, 'U') as fh:
_ = fh.readlines()
endings = set(
fh.newlines
if isinstance(fh.newlines, tuple)
else (fh.newlines,)
)
endings -= allowedEndings
if len(endings) > 0:
print("\033[0;31m"
+ "File has invalid line endings: "
+ "%s" % filename + "\033[0m")
ret = ret | 2
count += 1
print('checked line endings in %d files' % count)
# Next check new code with optional error codes
print('flake8: check new code against recommended error set...')
diff = subprocess.check_output(['git', 'diff'])
proc = subprocess.Popen(['flake8', '--diff', # '--show-source',
'--ignore=' + errors],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
proc.stdin.write(diff)
proc.stdin.close()
output = proc.stdout.read().decode('utf-8')
ret |= printFlakeOutput(output)
if ret == 0:
print('style test passed.')
else:
print('style test failed: %d' % ret)
return ret
def printFlakeOutput(text):
""" Print flake output, colored by error category.
Return 2 if there were any mandatory errors,
1 if only recommended / optional errors, and
0 if only optional errors.
"""
ret = 0
gotError = False
for line in text.split('\n'):
m = re.match(r'[^\:]+\:\d+\:\d+\: (\w+) .*', line)
if m is None:
print(line)
else:
gotError = True
error = m.group(1)
if error in FLAKE_MANDATORY:
print("\033[0;31m" + line + "\033[0m")
ret |= 2
elif error in FLAKE_RECOMMENDED:
print("\033[0;33m" + line + "\033[0m")
#ret |= 1
elif error in FLAKE_OPTIONAL:
print("\033[0;32m" + line + "\033[0m")
elif error in FLAKE_IGNORE:
continue
else:
print("\033[0;36m" + line + "\033[0m")
if not gotError:
print(" [ no errors ]\n")
return ret
def unitTests():
"""
Run all unit tests (using py.test)
Return the exit code.
"""
try:
if sys.version[0] == '3':
out = subprocess.check_output('PYTHONPATH=. py.test-3', shell=True)
else:
out = subprocess.check_output('PYTHONPATH=. py.test', shell=True)
ret = 0
except Exception as e:
out = e.output
ret = e.returncode
print(out.decode('utf-8'))
return ret
def checkMergeSize(
sourceBranch=None,
targetBranch=None,
sourceRepo=None,
targetRepo=None
):
"""
Check that a git merge would not increase the repository size by
MERGE_SIZE_LIMIT.
"""
if sourceBranch is None:
sourceBranch = getGitBranch()
sourceRepo = '..'
if targetBranch is None:
if sourceBranch == 'master':
targetBranch = 'master'
targetRepo = 'https://github.com/pyqtgraph/pyqtgraph.git'
else:
targetBranch = 'master'
targetRepo = '..'
workingDir = '__merge-test-clone'
env = dict(TARGET_BRANCH=targetBranch,
SOURCE_BRANCH=sourceBranch,
TARGET_REPO=targetRepo,
SOURCE_REPO=sourceRepo,
WORKING_DIR=workingDir,
)
print("Testing merge size difference:\n"
" SOURCE: {SOURCE_REPO} {SOURCE_BRANCH}\n"
" TARGET: {TARGET_BRANCH} {TARGET_REPO}".format(**env))
setup = """
mkdir {WORKING_DIR} && cd {WORKING_DIR} &&
git init && git remote add -t {TARGET_BRANCH} target {TARGET_REPO} &&
git fetch target {TARGET_BRANCH} &&
git checkout -qf target/{TARGET_BRANCH} &&
git gc -q --aggressive
""".format(**env)
checkSize = """
cd {WORKING_DIR} &&
du -s . | sed -e "s/\t.*//"
""".format(**env)
merge = """
cd {WORKING_DIR} &&
git pull -q {SOURCE_REPO} {SOURCE_BRANCH} &&
git gc -q --aggressive
""".format(**env)
try:
print("Check out target branch:\n" + setup)
subprocess.check_call(setup, shell=True)
targetSize = int(subprocess.check_output(checkSize, shell=True))
print("TARGET SIZE: %d kB" % targetSize)
print("Merge source branch:\n" + merge)
subprocess.check_call(merge, shell=True)
mergeSize = int(subprocess.check_output(checkSize, shell=True))
print("MERGE SIZE: %d kB" % mergeSize)
diff = mergeSize - targetSize
if diff <= MERGE_SIZE_LIMIT:
print("DIFFERENCE: %d kB [OK]" % diff)
return 0
else:
print("\033[0;31m"
+ "DIFFERENCE: %d kB [exceeds %d kB]" % (
diff,
MERGE_SIZE_LIMIT)
+ "\033[0m")
return 2
finally:
if os.path.isdir(workingDir):
shutil.rmtree(workingDir)
def mergeTests():
ret = checkMergeSize()
ret |= unitTests()
ret |= checkStyle()
if ret == 0:
print("\033[0;32m" + "\nAll merge tests passed." + "\033[0m")
else:
print("\033[0;31m" + "\nMerge tests failed." + "\033[0m")
return ret
def getInitVersion(pkgroot):
"""Return the version string defined in __init__.py"""
path = os.getcwd()
initfile = os.path.join(path, pkgroot, '__init__.py')
init = open(initfile).read()
m = re.search(r'__version__ = (\S+)\n', init)
if m is None or len(m.groups()) != 1:
raise Exception("Cannot determine __version__ from init file: "
+ "'%s'!" % initfile)
version = m.group(1).strip('\'\"')
return version
def gitCommit(name):
"""Return the commit ID for the given name."""
commit = subprocess.check_output(
['git', 'show', name],
universal_newlines=True).split('\n')[0]
assert commit[:7] == 'commit '
return commit[7:]
def getGitVersion(tagPrefix):
"""Return a version string with information about this git checkout.
If the checkout is an unmodified, tagged commit, then return the tag
version
If this is not a tagged commit, return the output of
``git describe --tags``
If this checkout has been modified, append "+" to the version.
"""
path = os.getcwd()
if not os.path.isdir(os.path.join(path, '.git')):
return None
try:
v = (
subprocess.check_output(
["git", "describe", "--tags", "--dirty", '--match="%s*"' % tagPrefix],
stderr=subprocess.DEVNULL)
.strip()
.decode("utf-8")
)
except (FileNotFoundError, subprocess.CalledProcessError):
return None
# chop off prefix
assert v.startswith(tagPrefix)
v = v[len(tagPrefix):]
# split up version parts
parts = v.split('-')
# has working tree been modified?
modified = False
if parts[-1] == 'dirty':
modified = True
parts = parts[:-1]
# have commits been added on top of last tagged version?
# (git describe adds -NNN-gXXXXXXX if this is the case)
local = None
if (len(parts) > 2 and
re.match(r'\d+', parts[-2]) and
re.match(r'g[0-9a-f]{7}', parts[-1])):
local = parts[-1]
parts = parts[:-2]
gitVersion = '-'.join(parts)
if local is not None:
gitVersion += '+' + local
if modified:
gitVersion += 'm'
return gitVersion
def getGitBranch():
m = re.search(
r'\* (.*)',
subprocess.check_output(['git', 'branch'],
universal_newlines=True))
if m is None:
return ''
else:
return m.group(1)
def getVersionStrings(pkg):
"""
Returns 4 version strings:
* the version string to use for this build,
* version string requested with --force-version (or None)
* version string that describes the current git checkout (or None).
* version string in the pkg/__init__.py,
The first return value is (forceVersion or gitVersion or initVersion).
"""
## Determine current version string from __init__.py
initVersion = getInitVersion(pkgroot=pkg)
# If this is a git checkout
# try to generate a more descriptive version string
try:
gitVersion = getGitVersion(tagPrefix=pkg+'-')
except:
gitVersion = None
sys.stderr.write("This appears to be a git checkout, but an error "
"occurred while attempting to determine a version "
"string for the current commit.\n")
sys.excepthook(*sys.exc_info())
# See whether a --force-version flag was given
forcedVersion = None
for i, arg in enumerate(sys.argv):
if arg.startswith('--force-version'):
if arg == '--force-version':
forcedVersion = sys.argv[i+1]
sys.argv.pop(i)
sys.argv.pop(i)
elif arg.startswith('--force-version='):
forcedVersion = sys.argv[i].replace('--force-version=', '')
sys.argv.pop(i)
## Finally decide on a version string to use:
if forcedVersion is not None:
version = forcedVersion
else:
version = initVersion
# if git says this is a modified branch, add local version information
if gitVersion is not None:
_, _, local = gitVersion.partition('+')
if local != '':
version = version + '+' + local
sys.stderr.write("Detected git commit; "
+ "will use version string: '%s'\n" % version)
return version, forcedVersion, gitVersion, initVersion
DEFAULT_ASV: Dict[str, Any] = {
"version": 1,
"project": "pyqtgraph",
"project_url": "http://pyqtgraph.org/",
"repo": ".",
"branches": ["master"],
"environment_type": "virtualenv",
"show_commit_url": "http://github.com/pyqtgraph/pyqtgraph/commit/",
# "pythons": ["3.7", "3.8", "3.9"],
"matrix": {
# "numpy": ["1.17", "1.18", "1.19", ""],
"numpy": "",
"pyqt5": ["", None],
"pyside2": ["", None],
},
"exclude": [
{"pyqt5": "", "pyside2": ""},
{"pyqt5": None, "pyside2": None}
],
"benchmark_dir": "benchmarks",
"env_dir": ".asv/env",
"results_dir": ".asv/results",
"html_dir": ".asv/html",
"build_cache_size": 5
}
class ASVConfigCommand(core.Command):
description = "Setup the ASV benchmarking config for this system"
user_options = []
def initialize_options(self) -> None:
pass
def finalize_options(self) -> None:
pass
def run(self) -> None:
config = DEFAULT_ASV
with suppress(FileNotFoundError, subprocess.CalledProcessError):
cuda_check = subprocess.check_output(["nvcc", "--version"])
match = re.search(r"release (\d{1,2}\.\d)", cuda_check.decode("utf-8"))
ver = match.groups()[0] # e.g. 11.0
ver_str = ver.replace(".", "") # e.g. 110
config["matrix"][f"cupy-cuda{ver_str}"] = ""
with open("asv.conf.json", "w") as conf_file:
conf_file.write(json.dumps(config, indent=2))
class DebCommand(core.Command):
description = "build .deb package using `debuild -us -uc`"
maintainer = "Luke Campagnola <luke.campagnola@gmail.com>"
debTemplate = "debian"
debDir = "deb_build"
user_options = []
def initialize_options(self):
self.cwd = None
def finalize_options(self):
self.cwd = os.getcwd()
def run(self):
version = self.distribution.get_version()
pkgName = self.distribution.get_name()
debName = "python-" + pkgName
debDir = self.debDir
assert os.getcwd() == self.cwd, 'Must be in package root: %s' % self.cwd
if os.path.isdir(debDir):
raise Exception('DEB build dir already exists: "%s"' % debDir)
sdist = "dist/%s-%s.tar.gz" % (pkgName, version)
if not os.path.isfile(sdist):
raise Exception("No source distribution; "
+ "run `setup.py sdist` first.")
# copy sdist to build directory and extract
os.mkdir(debDir)
renamedSdist = '%s_%s.orig.tar.gz' % (debName, version)
print("copy %s => %s" % (sdist, os.path.join(debDir, renamedSdist)))
shutil.copy(sdist, os.path.join(debDir, renamedSdist))
print("cd %s; tar -xzf %s" % (debDir, renamedSdist))
if os.system("cd %s; tar -xzf %s" % (debDir, renamedSdist)) != 0:
raise Exception("Error extracting source distribution.")
buildDir = '%s/%s-%s' % (debDir, pkgName, version)
# copy debian control structure
print("copytree %s => %s" % (self.debTemplate, buildDir+'/debian'))
shutil.copytree(self.debTemplate, buildDir+'/debian')
# Write new changelog
chlog = generateDebianChangelog(
pkgName,
'CHANGELOG',
version,
self.maintainer)
print("write changelog %s" % buildDir+'/debian/changelog')
open(buildDir+'/debian/changelog', 'w').write(chlog)
# build package
print('cd %s; debuild -us -uc' % buildDir)
if os.system('cd %s; debuild -us -uc' % buildDir) != 0:
raise Exception("Error during debuild.")
class DebugCommand(core.Command):
"""Just for learning about distutils."""
description = ""
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
global cmd
cmd = self
print(self.distribution.name)
print(self.distribution.version)
class TestCommand(core.Command):
description = "Run all package tests and exit immediately with ", \
"informative return code."
user_options = []
def run(self):
sys.exit(unitTests())
def initialize_options(self):
pass
def finalize_options(self):
pass
class StyleCommand(core.Command):
description = "Check all code for style, exit immediately with ", \
"informative return code."
user_options = []
def run(self):
sys.exit(checkStyle())
def initialize_options(self):
pass
def finalize_options(self):
pass
class MergeTestCommand(core.Command):
description = "Run all tests needed to determine whether the current ",\
"code is suitable for merge."
user_options = []
def run(self):
sys.exit(mergeTests())
def initialize_options(self):
pass
def finalize_options(self):
pass
|
67aafca8cc322769b86437c66b5bd40c4f64b30c
|
d4412fbe37540e2c4cbe59ed6503d3661ccb7d9c
|
/examples/tutorial/large_batch_optimizer/config.py
|
2efa0ffd0556c6d245dea8e86ffc19cf5ab68cc3
|
[
"BSD-3-Clause",
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0",
"BSD-2-Clause",
"MIT"
] |
permissive
|
hpcaitech/ColossalAI
|
a082ed08a3807b53c49d1f86835b9808590d9042
|
c7b60f75470f067d1342705708810a660eabd684
|
refs/heads/main
| 2023-09-01T04:13:13.834565
| 2023-08-30T15:07:21
| 2023-08-30T15:07:21
| 422,274,596
| 32,044
| 4,084
|
Apache-2.0
| 2023-09-14T15:19:54
| 2021-10-28T16:19:44
|
Python
|
UTF-8
|
Python
| false
| false
| 314
|
py
|
config.py
|
from colossalai.amp import AMP_TYPE
# hyperparameters
# BATCH_SIZE is as per GPU
# global batch size = BATCH_SIZE x data parallel size
BATCH_SIZE = 512
LEARNING_RATE = 3e-3
WEIGHT_DECAY = 0.3
NUM_EPOCHS = 2
WARMUP_EPOCHS = 1
# model config
NUM_CLASSES = 10
fp16 = dict(mode=AMP_TYPE.NAIVE)
clip_grad_norm = 1.0
|
cf0133a83d2520ee5a879d0fff1cd330ad8e8538
|
b06340ae3dfcb551bacefa362c034b064809fd28
|
/examples/test_example_functions_pass.py
|
11c86fba3bc9489933e347dbfaad5a042c8da7b0
|
[
"MIT"
] |
permissive
|
okken/pytest-check
|
cd3b82ae31932d54550822abb6cc96fa6b4e7c88
|
c7e7741e4d5665a07b0985932acc484aac2d5095
|
refs/heads/main
| 2023-08-19T09:10:40.776832
| 2023-08-11T20:44:36
| 2023-08-11T20:44:36
| 108,791,429
| 282
| 35
|
MIT
| 2023-08-11T20:37:17
| 2017-10-30T02:22:27
|
Python
|
UTF-8
|
Python
| false
| false
| 1,718
|
py
|
test_example_functions_pass.py
|
"""
Passing versions of all of the check helper functions.
"""
from pytest_check import check
def test_equal():
check.equal(1, 1)
def test_not_equal():
check.not_equal(1, 2)
def test_is():
x = ["foo"]
y = x
check.is_(x, y)
def test_is_not():
x = ["foo"]
y = ["foo"]
check.is_not(x, y)
def test_is_true():
check.is_true(True)
def test_is_false():
check.is_false(False)
def test_is_none():
a = None
check.is_none(a)
def test_is_not_none():
a = 1
check.is_not_none(a)
def test_is_in():
check.is_in(2, [1, 2, 3])
def test_is_not_in():
check.is_not_in(4, [1, 2, 3])
def test_is_instance():
check.is_instance(1, int)
def test_is_not_instance():
check.is_not_instance(1, str)
def test_almost_equal():
check.almost_equal(1, 1)
check.almost_equal(1, 1.1, abs=0.2)
check.almost_equal(2, 1, rel=1)
def test_not_almost_equal():
check.not_almost_equal(1, 2)
check.not_almost_equal(1, 2.1, abs=0.1)
check.not_almost_equal(3, 1, rel=1)
def test_greater():
check.greater(2, 1)
def test_greater_equal():
check.greater_equal(2, 1)
check.greater_equal(1, 1)
def test_less():
check.less(1, 2)
def test_less_equal():
check.less_equal(1, 2)
check.less_equal(1, 1)
def test_between():
check.between(10, 0, 20)
def test_between_ge():
check.between(10, 0, 20, ge=True)
check.between(0, 0, 20, ge=True)
def test_between_le():
check.between(10, 0, 20, le=True)
check.between(20, 0, 20, le=True)
def test_between_ge_le():
check.between(0, 0, 20, ge=True, le=True)
check.between(10, 0, 20, ge=True, le=True)
check.between(20, 0, 20, ge=True, le=True)
|
934ba587f94fae781a9eb7ce23de4ebc29b185b3
|
a3d6556180e74af7b555f8d47d3fea55b94bcbda
|
/chrome/browser/ui/ash/multi_user/DEPS
|
8ce1ebc41777471427dfbdf40e42a0347dbfeca3
|
[
"BSD-3-Clause"
] |
permissive
|
chromium/chromium
|
aaa9eda10115b50b0616d2f1aed5ef35d1d779d6
|
a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c
|
refs/heads/main
| 2023-08-24T00:35:12.585945
| 2023-08-23T22:01:11
| 2023-08-23T22:01:11
| 120,360,765
| 17,408
| 7,102
|
BSD-3-Clause
| 2023-09-10T23:44:27
| 2018-02-05T20:55:32
| null |
UTF-8
|
Python
| false
| false
| 119
|
DEPS
|
specific_include_rules = {
"multi_profile_support\.cc": [
"+components/app_restore/full_restore_utils.h",
],
}
|
|
3966258e5292c676c7b915597b6e72fb72a0ff6c
|
af101b467134e10270bb72d02f41f07daa7f57d8
|
/configs/controlnet_animation/anythingv3_config.py
|
b6159dc3780b69b764ca2902d9265dca226da503
|
[
"Apache-2.0"
] |
permissive
|
open-mmlab/mmagic
|
4d864853417db300de4dfe7e83ce380fd1557a23
|
a382f143c0fd20d227e1e5524831ba26a568190d
|
refs/heads/main
| 2023-08-31T14:40:24.936423
| 2023-08-30T05:05:56
| 2023-08-30T05:05:56
| 203,999,962
| 1,370
| 192
|
Apache-2.0
| 2023-09-14T11:39:18
| 2019-08-23T13:04:29
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,331
|
py
|
anythingv3_config.py
|
# config for model
stable_diffusion_v15_url = 'Linaqruf/anything-v3.0'
controlnet_hed_url = 'lllyasviel/sd-controlnet-hed'
control_detector = 'lllyasviel/ControlNet'
control_scheduler = 'UniPCMultistepScheduler'
# method type : 'multi-frame rendering' or 'attention_injection'
inference_method = 'attention_injection'
model = dict(
type='ControlStableDiffusionImg2Img',
vae=dict(
type='AutoencoderKL',
from_pretrained=stable_diffusion_v15_url,
subfolder='vae'),
unet=dict(
type='UNet2DConditionModel',
subfolder='unet',
from_pretrained=stable_diffusion_v15_url),
text_encoder=dict(
type='ClipWrapper',
clip_type='huggingface',
pretrained_model_name_or_path=stable_diffusion_v15_url,
subfolder='text_encoder'),
tokenizer=stable_diffusion_v15_url,
controlnet=dict(
type='ControlNetModel', from_pretrained=controlnet_hed_url),
scheduler=dict(
type='DDPMScheduler',
from_pretrained=stable_diffusion_v15_url,
subfolder='scheduler'),
test_scheduler=dict(
type='DDIMScheduler',
from_pretrained=stable_diffusion_v15_url,
subfolder='scheduler'),
data_preprocessor=dict(type='DataPreprocessor'),
init_cfg=dict(type='init_from_unet'),
enable_xformers=False,
)
|
889a9688578fc2a3208716e742dbfcba72086e7b
|
5fdfa2069b1aa05f61852b498328366d3dcfeb2a
|
/2021_11_10/dojo_test.py
|
7aea3ec61babe7734bb33f4f7afcefc6d1b2e986
|
[
"MIT"
] |
permissive
|
globocom/dojo
|
5110b5ed86734d49fd0934d8701d5016e7e27e0d
|
8df96c932f61645e9717197e5b58ca60909c7fc1
|
refs/heads/master
| 2022-07-21T17:59:16.133549
| 2022-06-22T18:17:01
| 2022-06-22T18:17:01
| 2,145,424
| 121
| 40
|
MIT
| 2022-02-17T17:21:46
| 2011-08-02T22:11:54
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,004
|
py
|
dojo_test.py
|
import unittest
from dojo import permutation, hashme
class DojoTest(unittest.TestCase):
def test_check_permutation_two_numbers(self):
self.assertEqual(permutation([0,1]),[[0,1],[1,0]])
def test_check_permutation_one_number(self):
self.assertEqual(permutation([0]),[[0]])
def test_check_permutation_no_numbers(self):
self.assertEqual(permutation([]),[[]])
def test_check_permutation_three_numbers(self):
self.assertEqual(hashme(permutation([1,2,3])), hashme([[1,2,3],[1,3,2],[2,1,3],[2,3,1],[3,1,2],[3,2,1]]))
def test_check_permutation_four_numbers(self):
self.assertEqual(
hashme(permutation([1,2,3,4])),
hashme([[1,2,3,4],[1,2,4,3],[1,3,2,4],[1,3,4,2],[1,4,2,3],[1,4,3,2],[2,1,3,4],[2,1,4,3],[2,3,1,4],[2,3,4,1],[2,4,1,3],[2,4,3,1],[3,1,2,4],[3,1,4,2],[3,2,1,4],[3,2,4,1],[3,4,1,2],[3,4,2,1],[4,1,2,3],[4,1,3,2],[4,2,1,3],[4,2,3,1],[4,3,1,2],[4,3,2,1]])
)
if __name__ == '__main__':
unittest.main()
|
995dcf3b2e460fc4f5d9105909bef9a017df1137
|
5218e77f02d278e43b7df687e37cd73cffd21823
|
/doc/samples/custom_loader.py
|
2d4e58e65c13021b6aef301648f4edb2f65c41db
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
pydoit/doit
|
12c4fa30f2153bdba69810b0393f5be19edae110
|
00c136f5dfe7e9039d0fed6dddd6d45c84c307b4
|
refs/heads/master
| 2023-08-03T04:41:26.397490
| 2023-01-16T03:40:03
| 2023-01-16T03:40:03
| 16,851,127
| 1,727
| 201
|
MIT
| 2023-03-18T18:37:45
| 2014-02-14T22:21:23
|
Python
|
UTF-8
|
Python
| false
| false
| 608
|
py
|
custom_loader.py
|
#! /usr/bin/env python3
import sys
from doit.task import dict_to_task
from doit.cmd_base import TaskLoader2
from doit.doit_cmd import DoitMain
my_builtin_task = {
'name': 'sample_task',
'actions': ['echo hello from built in'],
'doc': 'sample doc',
}
class MyLoader(TaskLoader2):
def setup(self, opt_values):
pass
def load_doit_config(self):
return {'verbosity': 2}
def load_tasks(self, cmd, pos_args):
task_list = [dict_to_task(my_builtin_task)]
return task_list
if __name__ == "__main__":
sys.exit(DoitMain(MyLoader()).run(sys.argv[1:]))
|
397a287b2b2189a078f42dd6f406130fe6221356
|
7976cbc8e26c8db85d75820ff289b048f22d3986
|
/isort/io.py
|
94698917118e647e66269da31463cadce8970eef
|
[
"MIT"
] |
permissive
|
PyCQA/isort
|
5eaf38d78f5088f7568a2056fe7868c4f79abfdd
|
e35a55f6ab8473003d68a8b2ffebdd08fbb2b61b
|
refs/heads/main
| 2023-08-28T12:03:16.110191
| 2023-08-20T20:18:05
| 2023-08-20T20:18:05
| 12,550,138
| 2,914
| 312
|
MIT
| 2023-09-06T18:45:21
| 2013-09-02T22:22:53
|
Python
|
UTF-8
|
Python
| false
| false
| 2,216
|
py
|
io.py
|
"""Defines any IO utilities used by isort"""
import dataclasses
import re
import tokenize
from contextlib import contextmanager
from io import BytesIO, StringIO, TextIOWrapper
from pathlib import Path
from typing import Any, Callable, Iterator, TextIO, Union
from isort.exceptions import UnsupportedEncoding
_ENCODING_PATTERN = re.compile(rb"^[ \t\f]*#.*?coding[:=][ \t]*([-_.a-zA-Z0-9]+)")
@dataclasses.dataclass(frozen=True)
class File:
stream: TextIO
path: Path
encoding: str
@staticmethod
def detect_encoding(filename: Union[str, Path], readline: Callable[[], bytes]) -> str:
try:
return tokenize.detect_encoding(readline)[0]
except Exception:
raise UnsupportedEncoding(filename)
@staticmethod
def from_contents(contents: str, filename: str) -> "File":
encoding = File.detect_encoding(filename, BytesIO(contents.encode("utf-8")).readline)
return File(stream=StringIO(contents), path=Path(filename).resolve(), encoding=encoding)
@property
def extension(self) -> str:
return self.path.suffix.lstrip(".")
@staticmethod
def _open(filename: Union[str, Path]) -> TextIOWrapper:
"""Open a file in read only mode using the encoding detected by
detect_encoding().
"""
buffer = open(filename, "rb")
try:
encoding = File.detect_encoding(filename, buffer.readline)
buffer.seek(0)
text = TextIOWrapper(buffer, encoding, line_buffering=True, newline="")
text.mode = "r" # type: ignore
return text
except Exception:
buffer.close()
raise
@staticmethod
@contextmanager
def read(filename: Union[str, Path]) -> Iterator["File"]:
file_path = Path(filename).resolve()
stream = None
try:
stream = File._open(file_path)
yield File(stream=stream, path=file_path, encoding=stream.encoding)
finally:
if stream is not None:
stream.close()
class _EmptyIO(StringIO):
def write(self, *args: Any, **kwargs: Any) -> None: # type: ignore # skipcq: PTC-W0049
pass
Empty = _EmptyIO()
|
fa265b47e6a1a768e8ca316d1e176b974e938ad8
|
7b7c570b30d6d7a0e9b904c7cb378cfb0d0f0e07
|
/mlflow/tracking/request_header/default_request_header_provider.py
|
191150821fa6ab8a7c06881543891b5eb45e8814
|
[
"Apache-2.0"
] |
permissive
|
mlflow/mlflow
|
ca97bfbbf32f8e59f454e428f5e46eb3d34d062f
|
37298ffafcd34002352d01d579d4524790544267
|
refs/heads/master
| 2023-09-01T13:15:53.902815
| 2023-09-01T09:00:42
| 2023-09-01T09:00:42
| 136,202,695
| 14,102
| 3,748
|
Apache-2.0
| 2023-09-14T21:52:42
| 2018-06-05T16:05:58
|
Python
|
UTF-8
|
Python
| false
| false
| 484
|
py
|
default_request_header_provider.py
|
from mlflow import __version__
from mlflow.tracking.request_header.abstract_request_header_provider import RequestHeaderProvider
_USER_AGENT = "User-Agent"
_DEFAULT_HEADERS = {_USER_AGENT: f"mlflow-python-client/{__version__}"}
class DefaultRequestHeaderProvider(RequestHeaderProvider):
"""
Provides default request headers for outgoing request.
"""
def in_context(self):
return True
def request_headers(self):
return dict(**_DEFAULT_HEADERS)
|
fa3e390dfbf095f42fbd7c45730c7ed1f371f012
|
46b0c5040aacffb01a6b9013c578ea92ea50a345
|
/examples/python/test_connection.py
|
0cd3b46fd49bd00222b5fa71beeea9ea5e98f40f
|
[
"MIT"
] |
permissive
|
looker-open-source/sdk-codegen
|
13957cf7ff24f8bd1a68205f1a1f0a4dbb9cb192
|
351a15d71dd6b87124a0275a9b2d126f4e5c3ed1
|
refs/heads/main
| 2023-08-22T09:32:26.892694
| 2023-08-21T21:00:59
| 2023-08-21T21:00:59
| 180,189,919
| 213
| 196
|
MIT
| 2023-08-30T05:44:25
| 2019-04-08T16:30:46
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 2,134
|
py
|
test_connection.py
|
""" Given a connection name, obtain all supported tests, and run these test
$ python test_connection.py <connection_name>
Example:
$ python test_connection.py thelook
Notes: Connections to Looker's internal database cannot be tested.
Last modified: August 25, 2021
"""
from functools import reduce
import sys
from typing import cast, MutableSequence, Sequence
import looker_sdk
from looker_sdk import models
sdk = looker_sdk.init40("../../looker.ini")
def main():
connection_name = sys.argv[1] if len(sys.argv) > 1 else ""
if not connection_name:
raise Exception("Please provide a connection name")
elif connection_name in ["looker", "looker__internal__analytics"]:
raise Exception(
f"Connection '{connection_name}' is internal and cannot be tested."
)
connection = get_connections(connection_name)
results = test_connection(connection)
output_results(cast(str, connection.name), results)
def get_connections(name: str) -> models.DBConnection:
connection = sdk.connection(name, fields="name, dialect")
return connection
def test_connection(
connection: models.DBConnection,
) -> Sequence[models.DBConnectionTestResult]:
"""Run supported tests against a given connection."""
assert connection.name
assert connection.dialect and connection.dialect.connection_tests
supported_tests: MutableSequence[str] = list(connection.dialect.connection_tests)
test_results = sdk.test_connection(
connection.name, models.DelimSequence(supported_tests)
)
return test_results
def output_results(
connection_name: str, test_results: Sequence[models.DBConnectionTestResult]
):
"""Prints connection test results."""
errors = list(filter(lambda test: cast(str, test.status) == "error", test_results))
if errors:
report = reduce(
lambda failures, error: failures + f"\n - {error.message}",
errors,
f"{connection_name}:",
)
else:
report = f"All tests for connection '{connection_name}' were successful."
print(report)
main()
|
9e12f9a57c98bf72915cc0b9a60055af28112c74
|
67ce6a1d1369463b15023cc5bd1be9e823bab398
|
/lib/pymedphys/_dicom/header.py
|
7b13544d37c7493c61ebf4469dedde0d9c7381d9
|
[
"Apache-2.0"
] |
permissive
|
pymedphys/pymedphys
|
2487efe7259cc4e226e93d32fe86cef01673016e
|
f6acdf9bd2e8a32e372966879284fbd71c612358
|
refs/heads/main
| 2023-08-05T06:27:48.110296
| 2023-06-07T18:22:09
| 2023-06-07T18:22:09
| 168,238,552
| 288
| 79
|
Apache-2.0
| 2023-05-30T03:23:50
| 2019-01-29T22:20:04
|
Python
|
UTF-8
|
Python
| false
| false
| 5,206
|
py
|
header.py
|
# Copyright (C) 2021 Matthew Jennings
# Copyright (C) 2019 Cancer Care Associates
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from copy import deepcopy
from typing import Sequence
from pymedphys._imports import pydicom
from .create import dicom_dataset_from_dict
def adjust_machine_name(dicom_dataset, new_machine_name):
"""Change the machine name within the DICOM header"""
new_dicom_dataset = deepcopy(dicom_dataset)
for beam in new_dicom_dataset.BeamSequence:
beam.TreatmentMachineName = new_machine_name
return new_dicom_dataset
def adjust_machine_name_cli(args):
dicom_dataset = pydicom.read_file(args.input_file, force=True)
new_dicom_dataset = adjust_machine_name(dicom_dataset, args.new_machine_name)
pydicom.write_file(args.output_file, new_dicom_dataset)
def delete_sequence_item_with_matching_key(sequence, key, value):
new_sequence = deepcopy(sequence)
for i, item in reversed(list(enumerate(sequence))):
try:
if value == getattr(item, key):
new_sequence.pop(i)
except AttributeError:
pass
return new_sequence
def adjust_rel_elec_density(
dicom_dataset, adjustment_map, ignore_missing_structure=False
):
"""Append or adjust relative electron densities of structures"""
new_dicom_dataset = deepcopy(dicom_dataset)
ROI_name_to_number_map = {
structure_set.ROIName: structure_set.ROINumber
for structure_set in new_dicom_dataset.StructureSetROISequence
}
ROI_number_to_observation_map = {
observation.ReferencedROINumber: observation
for observation in new_dicom_dataset.RTROIObservationsSequence
}
for structure_name, new_red in adjustment_map.items():
try:
ROI_number = ROI_name_to_number_map[structure_name]
except KeyError:
if ignore_missing_structure:
continue
raise
observation = ROI_number_to_observation_map[ROI_number]
try:
physical_properties = observation.ROIPhysicalPropertiesSequence
except AttributeError:
physical_properties = []
physical_properties = delete_sequence_item_with_matching_key(
physical_properties, "ROIPhysicalProperty", "REL_ELEC_DENSITY"
)
physical_properties.append(
dicom_dataset_from_dict(
{
"ROIPhysicalProperty": "REL_ELEC_DENSITY",
"ROIPhysicalPropertyValue": new_red,
}
)
)
observation.ROIPhysicalPropertiesSequence = physical_properties
return new_dicom_dataset
def adjust_RED_cli(args):
adjustment_map = dict(zip(args.adjustment_map[::2], args.adjustment_map[1::2]))
dicom_dataset = pydicom.read_file(args.input_file, force=True)
new_dicom_dataset = adjust_rel_elec_density(
dicom_dataset,
adjustment_map,
ignore_missing_structure=args.ignore_missing_structure,
)
pydicom.write_file(args.output_file, new_dicom_dataset)
def RED_adjustment_map_from_structure_names(structure_names):
structure_name_containing_RED_regex = r"^.*RED\s*[=:]\s*(\d+\.?\d*)\s*$"
pattern = re.compile(structure_name_containing_RED_regex, flags=re.IGNORECASE)
adjustment_map = {
structure: float(pattern.match(structure).group(1))
for structure in structure_names
if pattern.match(structure)
}
return adjustment_map
def adjust_RED_by_structure_name(dicom_dataset):
"""Adjust the structure electron density based on structure name."""
structure_names = [
structure_set.ROIName for structure_set in dicom_dataset.StructureSetROISequence
]
adjustment_map = RED_adjustment_map_from_structure_names(structure_names)
adjusted_dicom_dataset = adjust_rel_elec_density(dicom_dataset, adjustment_map)
return adjusted_dicom_dataset
def adjust_RED_by_structure_name_cli(args):
dicom_dataset = pydicom.read_file(args.input_file, force=True)
new_dicom_dataset = adjust_RED_by_structure_name(dicom_dataset)
pydicom.write_file(args.output_file, new_dicom_dataset)
def patient_ids_in_datasets_are_equal(
datasets: Sequence["pydicom.dataset.Dataset"],
) -> bool:
"""True if all DICOM datasets have the same Patient ID
Parameters
----------
datasets : sequence of pydicom.dataset.Dataset
A sequence of DICOM datasets whose Patient IDs are to be
compared.
Returns
-------
bool
True if Patient IDs match for all datasets, False otherwise.
"""
return all(ds.PatientID == datasets[0].PatientID for ds in datasets)
|
b47e59029a996536856a42956247c7037649a368
|
6eb0ba72a576b18873e53b0ff4f86fb581c6c806
|
/tests/unit/models_images_test.py
|
3478c3fedb3b6ef0ca9086852d0c7622ea0b94dc
|
[
"Apache-2.0"
] |
permissive
|
docker/docker-py
|
566f9dd69c71ef79fbe2b9dd2745c905e1c613df
|
c38656dc7894363f32317affecc3e4279e1163f8
|
refs/heads/main
| 2023-08-31T14:13:48.087317
| 2023-08-21T13:31:57
| 2023-08-21T13:31:57
| 10,247,874
| 6,473
| 1,943
|
Apache-2.0
| 2023-09-08T18:24:21
| 2013-05-23T16:15:07
|
Python
|
UTF-8
|
Python
| false
| false
| 5,925
|
py
|
models_images_test.py
|
import unittest
import warnings
from docker.constants import DEFAULT_DATA_CHUNK_SIZE
from docker.models.images import Image
from .fake_api import FAKE_IMAGE_ID
from .fake_api_client import make_fake_client
class ImageCollectionTest(unittest.TestCase):
def test_build(self):
client = make_fake_client()
image = client.images.build()
client.api.build.assert_called_with()
client.api.inspect_image.assert_called_with(FAKE_IMAGE_ID)
assert isinstance(image, Image)
assert image.id == FAKE_IMAGE_ID
def test_get(self):
client = make_fake_client()
image = client.images.get(FAKE_IMAGE_ID)
client.api.inspect_image.assert_called_with(FAKE_IMAGE_ID)
assert isinstance(image, Image)
assert image.id == FAKE_IMAGE_ID
def test_labels(self):
client = make_fake_client()
image = client.images.get(FAKE_IMAGE_ID)
assert image.labels == {'bar': 'foo'}
def test_list(self):
client = make_fake_client()
images = client.images.list(all=True)
client.api.images.assert_called_with(all=True, name=None, filters=None)
assert len(images) == 1
assert isinstance(images[0], Image)
assert images[0].id == FAKE_IMAGE_ID
def test_load(self):
client = make_fake_client()
client.images.load('byte stream')
client.api.load_image.assert_called_with('byte stream')
def test_pull(self):
client = make_fake_client()
image = client.images.pull('test_image:test')
client.api.pull.assert_called_with(
'test_image', tag='test', all_tags=False, stream=True
)
client.api.inspect_image.assert_called_with('test_image:test')
assert isinstance(image, Image)
assert image.id == FAKE_IMAGE_ID
def test_pull_tag_precedence(self):
client = make_fake_client()
image = client.images.pull('test_image:latest', tag='test')
client.api.pull.assert_called_with(
'test_image', tag='test', all_tags=False, stream=True
)
client.api.inspect_image.assert_called_with('test_image:test')
image = client.images.pull('test_image')
client.api.pull.assert_called_with(
'test_image', tag='latest', all_tags=False, stream=True
)
client.api.inspect_image.assert_called_with('test_image:latest')
assert isinstance(image, Image)
assert image.id == FAKE_IMAGE_ID
def test_pull_multiple(self):
client = make_fake_client()
images = client.images.pull('test_image', all_tags=True)
client.api.pull.assert_called_with(
'test_image', tag='latest', all_tags=True, stream=True
)
client.api.images.assert_called_with(
all=False, name='test_image', filters=None
)
client.api.inspect_image.assert_called_with(FAKE_IMAGE_ID)
assert len(images) == 1
image = images[0]
assert isinstance(image, Image)
assert image.id == FAKE_IMAGE_ID
def test_pull_with_stream_param(self):
client = make_fake_client()
with warnings.catch_warnings(record=True) as w:
client.images.pull('test_image', stream=True)
assert len(w) == 1
assert str(w[0].message).startswith(
'`stream` is not a valid parameter'
)
def test_push(self):
client = make_fake_client()
client.images.push('foobar', insecure_registry=True)
client.api.push.assert_called_with(
'foobar',
tag=None,
insecure_registry=True
)
def test_remove(self):
client = make_fake_client()
client.images.remove('test_image')
client.api.remove_image.assert_called_with('test_image')
def test_search(self):
client = make_fake_client()
client.images.search('test')
client.api.search.assert_called_with('test')
def test_search_limit(self):
client = make_fake_client()
client.images.search('test', limit=5)
client.api.search.assert_called_with('test', limit=5)
class ImageTest(unittest.TestCase):
def test_short_id(self):
image = Image(attrs={'Id': 'sha256:b6846070672ce4e8f1f91564ea6782bd675'
'f69d65a6f73ef6262057ad0a15dcd'})
assert image.short_id == 'sha256:b6846070672c'
image = Image(attrs={'Id': 'b6846070672ce4e8f1f91564ea6782bd675'
'f69d65a6f73ef6262057ad0a15dcd'})
assert image.short_id == 'b6846070672c'
def test_tags(self):
image = Image(attrs={
'RepoTags': ['test_image:latest']
})
assert image.tags == ['test_image:latest']
image = Image(attrs={
'RepoTags': ['<none>:<none>']
})
assert image.tags == []
image = Image(attrs={
'RepoTags': None
})
assert image.tags == []
def test_history(self):
client = make_fake_client()
image = client.images.get(FAKE_IMAGE_ID)
image.history()
client.api.history.assert_called_with(FAKE_IMAGE_ID)
def test_remove(self):
client = make_fake_client()
image = client.images.get(FAKE_IMAGE_ID)
image.remove()
client.api.remove_image.assert_called_with(
FAKE_IMAGE_ID,
force=False,
noprune=False,
)
def test_save(self):
client = make_fake_client()
image = client.images.get(FAKE_IMAGE_ID)
image.save()
client.api.get_image.assert_called_with(
FAKE_IMAGE_ID, DEFAULT_DATA_CHUNK_SIZE
)
def test_tag(self):
client = make_fake_client()
image = client.images.get(FAKE_IMAGE_ID)
image.tag('foo')
client.api.tag.assert_called_with(FAKE_IMAGE_ID, 'foo', tag=None)
|
c2aa52641d64f1a1bca8d401e82e5cfea3dc0bb3
|
68073b5bbec051890bce2cdb0abbf1c7652002ed
|
/src/robotide/widgets/font.py
|
a0813321fe1f2c356f6ff9a60a05033439fa2002
|
[
"Apache-2.0"
] |
permissive
|
robotframework/RIDE
|
3b6dc9629e34b6f350e154e5f76d106fa48eaaa8
|
ed4d650dbd806672401d4341fecc30274c4972c7
|
refs/heads/master
| 2023-09-05T15:59:01.151700
| 2023-09-02T22:39:16
| 2023-09-02T22:39:16
| 2,467,257
| 897
| 419
|
Apache-2.0
| 2023-09-10T03:43:39
| 2011-09-27T11:53:40
|
Python
|
UTF-8
|
Python
| false
| false
| 1,285
|
py
|
font.py
|
# Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import wx
class Font(object):
help = property(lambda self: self._get_font(scale=-2))
fixed = property(lambda self: self._get_font(family=wx.FONTFAMILY_MODERN))
fixed_log = property(lambda self:
self._get_font(scale=-2, family=wx.FONTFAMILY_MODERN))
underlined = property(lambda self: self._get_font(underlined=True))
def _get_font(self, scale=0, family=wx.FONTFAMILY_DEFAULT, underlined=False):
size = wx.SystemSettings.GetFont(wx.SYS_SYSTEM_FONT).GetPointSize() + scale
return wx.Font( size, family, wx.FONTSTYLE_NORMAL,
wx.FONTWEIGHT_NORMAL, underline=underlined)
|
6333af436fabf4db54fc09456d8645e7e578386c
|
3de6d6c868ed8eaa73369ca3af92c8af93b0046a
|
/软统/task3/problem10.py
|
67eaceb1b3b0bc80f0b35699aaf2bb794adf8d78
|
[] |
no_license
|
NJU-SE-15-share-review/professional-class
|
32fed8b83a7e008b04dd046433017c2fe6cc8ddf
|
a3531b0d0efd681ae4cf3ec01573654161b3ae3f
|
refs/heads/master
| 2023-08-25T04:09:10.523340
| 2023-03-22T15:50:09
| 2023-03-22T15:50:09
| 94,741,641
| 824
| 441
| null | 2023-03-22T15:50:11
| 2017-06-19T05:59:55
|
HTML
|
UTF-8
|
Python
| false
| false
| 478
|
py
|
problem10.py
|
# -*- coding:utf-8 -*-
"""
log api example: log('output is: ' + str(output))
"""
from __future__ import print_function
from scipy.stats import t
import math
class Solution():
def solve(self):
n = 25
mean = 7.73
std = 0.77
u0 = 8
stat_value = (mean - u0) / (std / math.sqrt(n))
t_value = t.isf(0.05, n - 1)
return [round(n-1, 2), round(stat_value, 2), not math.fabs(stat_value) >= t_value]
print(Solution().solve())
|
8ea59aedb6eac226b55af31cdb4795dc463c2434
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/DPGAnalysis/SiStripTools/python/eventtimedistribution_cfi.py
|
e5c6f6a4182591ec2121037fa10b253b390d38b7
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 794
|
py
|
eventtimedistribution_cfi.py
|
import FWCore.ParameterSet.Config as cms
eventtimedistribution = cms.EDAnalyzer('EventTimeDistribution',
historyProduct = cms.InputTag("consecutiveHEs"),
apvPhaseCollection = cms.InputTag("APVPhases"),
phasePartition = cms.untracked.string("All"),
maxLSBeforeRebin = cms.untracked.uint32(100),
wantDBXvsBXincycle = cms.untracked.bool(True),
wantDBXvsBX = cms.untracked.bool(False),
wantBXincyclevsBX = cms.untracked.bool(False),
wantOrbitvsBXincycle = cms.untracked.bool(False)
)
|
24cbd65a30208f78648b7abb1e48b4601416406a
|
2337351b228818e41be3002bd38f68f77c2aa074
|
/commands/network-scan.py
|
be3102fee9ba7bf0dfd25aedab89933bbbc54ed0
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
nocproject/noc
|
57d40c680a1499374463e472434f9595ed6d1374
|
6e6d71574e9b9d822bec572cc629a0ea73604a59
|
refs/heads/master
| 2023-08-31T01:11:33.544573
| 2023-08-30T17:31:11
| 2023-08-30T17:31:11
| 107,815,776
| 105
| 33
|
BSD-3-Clause
| 2023-07-31T07:57:45
| 2017-10-21T21:04:33
|
Python
|
UTF-8
|
Python
| false
| false
| 20,855
|
py
|
network-scan.py
|
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------
# network-scan command
# ----------------------------------------------------------------------
# Copyright (C) 2007-2023 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Python modules
import argparse
import asyncio
import datetime
from io import BytesIO
import logging
import socket
# Third-party modules
import xlsxwriter
from gufo.ping import Ping
# NOC modules
from noc.core.management.base import BaseCommand
from noc.core.validators import is_ipv4
from noc.core.ip import IP
from noc.core.ioloop.snmp import snmp_get, SNMPError
from noc.core.mib import mib
from noc.core.snmp.version import SNMP_v1, SNMP_v2c
from noc.sa.models.managedobject import ManagedObject, ManagedObjectProfile
from noc.sa.models.credentialcheckrule import CredentialCheckRule
from noc.sa.models.managedobject import AdministrativeDomain
from noc.inv.models.networksegment import NetworkSegment
from noc.main.models.pool import Pool
from noc.sa.models.profile import Profile
from noc.inv.models.platform import Platform
from noc.services.mailsender.service import MailSenderService
from noc.main.models.notificationgroup import NotificationGroup
from noc.core.comp import smart_text
from noc.core.mongo.connection import connect
from noc.config import config
# example
# ./noc network-scan 10.0.0.0/24
# ./noc network-scan --autoadd test --email example@example.org --formats xlsx 10.0.0.0/24
# ./noc network-scan --in /opt/net/nets --exclude /opt/net/exclude
class Command(BaseCommand):
DEFAULT_OID = "1.3.6.1.2.1.1.2.0"
DEFAULT_COMMUNITY = "public"
CHECK_OIDS = [mib["SNMPv2-MIB::sysObjectID.0"], mib["SNMPv2-MIB::sysName.0"]]
CHECK_VERSION = {SNMP_v1: "snmp_v2c_get", SNMP_v2c: "snmp_v1_get"}
SNMP_VERSION = {0: "SNMP_v1", 1: "SNMP_v2c"}
def add_arguments(self, parser):
parser.add_argument("--in", action="append", dest="inputs", help="File with addresses")
parser.add_argument(
"--import", action="append", dest="imports", help="File to import into NOC"
)
parser.add_argument(
"--exclude", action="append", dest="exclude", help="File with addresses for exclusion"
)
parser.add_argument(
"--jobs", action="store", type=int, default=100, dest="jobs", help="Concurrent jobs"
)
parser.add_argument("addresses", nargs=argparse.REMAINDER, help="Object name")
parser.add_argument("--community", action="append", help="SNMP community")
parser.add_argument("--oid", default=self.CHECK_OIDS, action="append", help="SNMP GET OIDs")
parser.add_argument("--timeout", type=int, default=1, help="SNMP GET timeout")
parser.add_argument("--version", type=int, help="version snmp check")
parser.add_argument("--obj-profile", help="name object profile", default="default")
parser.add_argument("--credential", help="credential profile")
parser.add_argument("--pool", help="name pool", default="default")
parser.add_argument("--adm-domain", help="name adm domain", default="default")
parser.add_argument("--segment", help="network segment", default="ALL")
parser.add_argument("--label", action="append", help="mo label")
parser.add_argument("--autoadd", help="add object", action="store_true")
parser.add_argument("--syslog-source", choices=["m", "a"], help="syslog_source")
parser.add_argument("--trap-source", choices=["m", "a"], help="trap_source")
parser.add_argument("--mail", help="mail notification_group name")
parser.add_argument("--email", action="append", help="mailbox list")
parser.add_argument("--formats", default="csv", help="Format file (csv or xlsx)")
parser.add_argument("--resolve-name-snmp", action="store_true", help="hostname->name")
parser.add_argument("--resolve-name-dns", action="store_true", help="ptr dns->name")
def handle(
self,
inputs,
imports,
exclude,
addresses,
jobs,
community,
oid,
timeout,
version,
credential,
pool,
adm_domain,
segment,
obj_profile,
autoadd,
label,
mail,
email,
formats,
resolve_name_snmp,
resolve_name_dns,
syslog_source,
trap_source,
*args,
**options,
):
async def ping_task():
queue = asyncio.Queue(maxsize=self.jobs)
for _ in range(self.jobs):
asyncio.create_task(self.ping_worker(queue))
# Read exclude addresses from files
"""
file example
10.0.0.1
10.1.1.0/24
10.1.2.1
"""
if exclude:
for fn in exclude:
try:
with open(fn) as f:
for line in f:
line = line.strip()
ip = line.split("/")
if is_ipv4(ip[0]):
if len(ip) == 2:
ip = IP.prefix(line)
first = ip.first
last = ip.last
for x in first.iter_address(until=last):
ip2 = str(x).split("/")
self.hosts_exclude.add(ip2[0])
else:
self.hosts_exclude.add(line)
except OSError as e:
self.die("Cannot read file %s: %s\n" % (fn, e))
# Direct addresses 10.0.0.1 or 10.0.0.0/24
for a in addresses:
self.addresses = set()
self.nets.append(a)
ip = a.split("/")
if not is_ipv4(ip[0]):
continue
if len(ip) == 2:
ip = IP.prefix(a)
first = ip.first
last = ip.last
for x in first.iter_address(until=last):
ip2 = str(x).split("/")
if ip2[0] not in self.hosts_exclude:
await queue.put(ip2[0])
else:
if a not in self.hosts_exclude:
await queue.put(a)
# Read addresses from files
"""
file example
10.0.0.1
10.1.1.0/24
10.1.2.1
"""
if inputs:
for fn in inputs:
try:
with open(fn) as f:
for line in f:
line = line.strip()
ip = line.split("/")
if is_ipv4(ip[0]):
self.nets.append(line)
if len(ip) == 2:
ip = IP.prefix(line)
first = ip.first
last = ip.last
for x in first.iter_address(until=last):
ip2 = str(x).split("/")
if ip2[0] not in self.hosts_exclude:
await queue.put(ip2[0])
else:
if line not in self.hosts_exclude:
await queue.put(line)
except OSError as e:
self.die("Cannot read file %s: %s\n" % (fn, e))
await queue.join()
async def snmp_task():
queue = asyncio.Queue(maxsize=self.jobs)
for _ in range(self.jobs):
asyncio.create_task(self.snmp_worker(queue, community, oid, timeout, self.version))
for a in self.enable_ping:
await queue.put(a)
await queue.join()
connect()
self.addresses = set() # ip for ping
self.enable_ping = set() # ip ping
self.not_ping = set() # ip not ping
self.enable_snmp = set() # ip responding snmp
self.hosts_enable = set() # ip in noc
self.hosts_exclude = set() # ip exclude
self.mo = {}
self.snmp = {}
self.nets = [] # nets
self.count_ping = 0
self.count_not_ping = 0
self.count_snmp = 0
self.count_net = 0
# options by-default
# administrative_domain = "default"
profile = "Generic.Host"
# object_profile = "default"
description = "create object %s" % (datetime.datetime.now().strftime("%Y%m%d"))
# segment = "ALL"
# scheme = "1"
# address = ""
# port = ""
# user=""
# password=""
# super_password = ""
# remote_path = ""
# trap_source_ip = ""
# trap_community = ""
# snmp_ro=""
# snmp_rw=""
# vc_domain = "default"
# vrf = ""
# termination_group = ""
# service_terminator = ""
# shape = "Cisco/router"
# config_filter_rule = ""
# config_diff_filter_rule = ""
# config_validation_rule = ""
# max_scripts = "1"
# labels = ["autoadd"]
# pool = "default"
# container = ""
# trap_source_type = "d"
# syslog_source_type = "d"
# object_profile="default"
# time_pattern = ""
# x = ""
# y = ""
# default_zoom = ""
# key processing
if version is None:
self.version = [1, 0]
else:
self.version = [version]
try:
self.pool = Pool.objects.get(name=pool)
except Pool.DoesNotExist:
self.die("Invalid pool-%s" % pool)
# snmp community
if not community:
community = []
if credential:
try:
self.cred = CredentialCheckRule.objects.get(name=credential)
except CredentialCheckRule.DoesNotExist:
self.die("Invalid credential profile-%s" % credential)
for snmp in self.cred.suggest_snmp:
community.append(snmp.snmp_ro)
else:
community = [self.DEFAULT_COMMUNITY]
# auto add objects profile
if autoadd:
try:
self.adm_domain = AdministrativeDomain.objects.get(name=adm_domain)
except AdministrativeDomain.DoesNotExist:
self.die("Invalid adm profile-%s")
self.profile = Profile.objects.get(name=profile)
try:
self.segment = NetworkSegment.objects.get(name=segment)
except NetworkSegment.DoesNotExist:
self.die("Invalid network segment-%s")
try:
self.object_profile = ManagedObjectProfile.objects.get(name=obj_profile)
except ManagedObjectProfile.DoesNotExist:
self.die("Invalid object profile-%s")
# creating a list of presence mo in noc
moall = ManagedObject.objects.filter(is_managed=True)
moall = moall.filter(pool=self.pool)
for mm in moall:
self.hosts_enable.add(mm.address)
self.mo[mm.address] = {
"name": mm.name,
"labels": mm.labels,
"is_managed": mm.is_managed,
"snmp_ro": mm.auth_profile.snmp_ro if mm.auth_profile else mm.snmp_ro,
}
# add to mo list with remote:deleted
moall = ManagedObject.objects.filter(is_managed=False).exclude(
labels__contains=["remote:deleted"]
)
moall = moall.filter(pool=self.pool)
for mm in moall:
if mm.address not in self.hosts_enable:
self.hosts_enable.add(mm.address)
self.mo[mm.address] = {
"name": mm.name,
"labels": mm.labels,
"is_managed": mm.is_managed,
"snmp_ro": mm.auth_profile.snmp_ro if mm.auth_profile else mm.snmp_ro,
}
# Ping
self.ping = Ping(tos=config.ping.tos)
self.jobs = jobs
asyncio.run(ping_task())
print("ver.16")
print("enable_ping ", len(self.enable_ping))
# snmp
asyncio.run(snmp_task())
print("enable_snmp ", len(self.enable_snmp))
data = "IP;Available via ICMP;IP enable;is_managed;suggest name;SMNP sysname;SNMP sysObjectId;Vendor;Model;Name;pool;labels\n"
for ipx in self.enable_ping:
x2 = "True"
x12 = ipx
if resolve_name_dns:
if self.get_domain_name(ipx):
x12 = self.get_domain_name(ipx)
if resolve_name_snmp:
if ipx in self.enable_snmp and "1.3.6.1.2.1.1.5.0" in self.snmp[ipx]:
x12 = self.snmp[ipx]["1.3.6.1.2.1.1.5.0"]
x4 = x5 = x6 = x7 = x8 = x9 = x11 = "None"
x12 = x12.strip()
if ipx in self.hosts_enable:
x3 = "True"
x8 = self.mo[ipx]["name"]
x11 = str(self.mo[ipx]["is_managed"])
if self.mo[ipx]["labels"]:
x9 = ",".join(self.mo[ipx]["labels"] if self.mo[ipx]["labels"] else [])
else:
if autoadd:
m = ManagedObject(
name=x12 if not x12 or x12 != "" else ipx,
administrative_domain=self.adm_domain,
profile=self.profile,
description=description,
object_profile=self.object_profile,
segment=self.segment,
scheme=1,
address=ipx,
pool=self.pool,
)
if label:
m.labels = label
if syslog_source:
m.syslog_source_type = syslog_source
if trap_source:
m.trap_source_type = trap_source
try:
m.save()
except Exception as e:
print(e)
x3 = "False"
if ipx in self.enable_snmp:
# ['1.3.6.1.2.1.1.2.0', '1.3.6.1.2.1.1.5.0']
if "1.3.6.1.2.1.1.2.0" in self.snmp[ipx]:
x5 = self.snmp[ipx]["1.3.6.1.2.1.1.2.0"]
for p in Platform.objects.filter(snmp_sysobjectid=x5):
if p:
x6 = p.vendor
x7 = p.name
else:
x5 = "None"
if "1.3.6.1.2.1.1.5.0" in self.snmp[ipx]:
sysname = self.snmp[ipx]["1.3.6.1.2.1.1.5.0"]
x4 = sysname
else:
x4 = "None"
# try:
# sysname = self.snmp[ipx]["1.3.6.1.2.1.1.5.0"]
# x4 = sysname
# except:
# x4 = "None"
s = ";".join(
[
smart_text(ipx),
smart_text(x2),
smart_text(x3),
smart_text(x11),
smart_text(x12),
smart_text(x4),
smart_text(x5),
smart_text(x6),
smart_text(x7),
smart_text(x8),
smart_text(pool),
smart_text(x9),
]
)
data += s + "\n"
fn = "/tmp/report.csv"
file = open(fn, "w")
file.write(data)
file.close()
# mail in notification_group
if mail:
if not email:
email = []
g = NotificationGroup.get_by_name(mail)
for method, params, lang in g.active_members:
if "mail" in method:
email.append(params)
# output in csv or mail
if email:
bodymessage = "Report in attachment.\n\nscan network:\n"
for adr in self.nets:
bodymessage += adr + "\n"
filename = "found_ip_%s" % (datetime.datetime.now().strftime("%Y%m%d"))
if formats == "csv":
f = "%s.csv" % filename
attach = [{"filename": f, "data": data}]
elif formats == "xlsx":
f = "%s.xlsx" % filename
response = BytesIO()
wb = xlsxwriter.Workbook(response)
ws = wb.add_worksheet("Objects")
row = 0
ss = data.split("\n")
for line in ss:
row_data = str(line).strip("\n")
rr = row_data.split(";")
ws.write_row(row, 0, tuple(rr))
# Move on to the next worksheet row.
row += 1
wb.close()
response.seek(0)
attach = [
{"filename": f, "data": response.getvalue(), "transfer-encoding": "base64"}
]
response.close()
ms = MailSenderService()
ms.logger = logging.getLogger("network_scan")
self.i = 1
for boxmail in email:
self.i += 1
msg = {
"address": boxmail,
"subject": "Report (%s)" % pool,
"body": bodymessage,
"attachments": attach,
}
ms.send_mail(self.i, msg)
"""
msg = {
"address": email,
"subject": "Report (%s)" % pool,
"body": bodymessage,
"attachments": attach,
}
ms.send_mail("11", msg)
"""
else:
print(data)
async def ping_check(self, addr: str) -> bool:
"""
Try to ping address.
Args:
addr: Address to ping.
Returns:
* True, on success.
* False, otherwise.
"""
for _ in range(3): # @todo: Make configurable
rtt = await self.ping.ping(addr)
if rtt is not None:
return True
return False
async def ping_worker(self, queue):
while True:
a = await queue.get()
if a and await self.ping_check(a):
self.enable_ping.add(a)
queue.task_done()
if not a:
break
async def snmp_worker(self, queue, community, oid, timeout, version):
while True:
a = await queue.get()
if not a:
queue.task_done()
break
if a in self.hosts_enable:
community = [self.mo[a]["snmp_ro"]]
if not community[0] is None:
for c in community:
for ver in version:
try:
self.r = await snmp_get(
address=a,
oids=dict((k, k) for k in oid),
community=c,
version=ver,
timeout=timeout,
)
# self.s = "OK"
self.enable_snmp.add(a)
self.snmp[a] = self.r
self.snmp[a]["version"] = ver
self.snmp[a]["community"] = c
break
except SNMPError as e:
# self.s = "FAIL"
self.r = str(e)
except Exception as e:
# self.s = "EXCEPTION"
self.r = str(e)
break
queue.task_done()
@staticmethod
def get_domain_name(ip_address):
try:
result = socket.gethostbyaddr(ip_address)
except Exception:
return
return list(result)[0]
if __name__ == "__main__":
Command().run()
|
e2a604dafbfc0491cd23fc4c51bef58d1ea0cb2b
|
d6aae799e18e907fb413b715200c7832252a87e5
|
/image-superresolution/esrgan/convert_weights.py
|
e5c4c0958d440a1a72497a8a30b93faef8bcc1d5
|
[
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-proprietary-license",
"Apache-2.0",
"CC-BY-NC-4.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
sony/nnabla-examples
|
0d0bbd5df3028996e790bcf07248fdb0932697d1
|
41f71faa6efff7774a76bbd5af3198322a90a6ab
|
refs/heads/master
| 2023-09-04T03:45:54.023899
| 2023-08-22T03:31:21
| 2023-08-22T03:31:21
| 109,625,584
| 308
| 108
|
Apache-2.0
| 2023-08-22T03:31:23
| 2017-11-05T23:30:40
|
Python
|
UTF-8
|
Python
| false
| false
| 1,895
|
py
|
convert_weights.py
|
# Copyright 2019,2020,2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import nnabla as nn
import nnabla.parametric_functions as PF
import numpy
import argparse
parser = argparse.ArgumentParser(description='esrgan')
parser.add_argument('--pretrained_model', default='./RRDB_ESRGAN_x4.pth',
help='path to pytorch pretrained model')
parser.add_argument('--save_path', default='./ESRGAN_NNabla_model.h5',
help='Path to save h5 file')
args = parser.parse_args()
def pytorch_to_nn_param_map():
'''map from tensor name to Nnabla default parameter names
'''
return {
'weight': 'conv/W',
'bias': 'conv/b',
'.': '/'
}
def rename_params(param_name):
pytorch_to_nn_dict = pytorch_to_nn_param_map()
for k in pytorch_to_nn_dict:
if k in param_name:
param_name = param_name.replace(k, pytorch_to_nn_dict[k])
return param_name
def pytorch_to_nnabla(input_file, h5_file):
read = torch.load(input_file)
for k, v in read.items():
key = rename_params(k)
params = PF.get_parameter_or_create(key, shape=v.shape)
params.d = v.numpy()
nn.parameter.save_parameters(h5_file)
def main():
pytorch_to_nnabla(args.pretrained_model, args.save_path)
if __name__ == "__main__":
main()
|
44bb3385bfb8056dcad473e951ac92b8852a9ad5
|
a4250d60726fa37341736ed14173ce77797b47e9
|
/setup.py
|
aa5b05c308daba4e7abf64430a144f168fc20e58
|
[
"MIT"
] |
permissive
|
Scille/umongo
|
cdd04c85a5a93a85a64bb97367aec5219fb48a97
|
1b23dc7155448a52fa6e7f3d7da6621632e259f5
|
refs/heads/master
| 2023-08-03T18:14:05.908192
| 2022-09-21T15:09:58
| 2022-09-21T15:09:58
| 53,513,600
| 482
| 84
|
MIT
| 2023-05-16T22:47:55
| 2016-03-09T16:34:42
|
Python
|
UTF-8
|
Python
| false
| false
| 1,553
|
py
|
setup.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
with open('README.rst', 'rb') as readme_file:
readme = readme_file.read().decode('utf8')
with open('HISTORY.rst', 'rb') as history_file:
history = history_file.read().decode('utf8')
requirements = [
"marshmallow>=3.10.0",
"pymongo>=3.7.0",
]
setup(
name='umongo',
version='3.1.0',
description="sync/async MongoDB ODM, yes.",
long_description=readme + '\n\n' + history,
author="Emmanuel Leblond, Jérôme Lafréchoux",
author_email='jerome@jolimont.fr',
url='https://github.com/touilleMan/umongo',
packages=['umongo', 'umongo.frameworks'],
include_package_data=True,
python_requires='>=3.7',
install_requires=requirements,
extras_require={
'motor': ['motor>=2.0,<3.0'],
'txmongo': ['txmongo>=19.2.0'],
'mongomock': ['mongomock'],
},
license="MIT",
zip_safe=False,
keywords='umongo mongodb pymongo txmongo motor mongomock asyncio twisted',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3 :: Only',
],
)
|
6259e6a0fa103cc761339946b5a30a101ab34733
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/domain/RndBenefitRule.py
|
99bd93257284bb62cd1503cefde2d48533eb6e0a
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 1,913
|
py
|
RndBenefitRule.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class RndBenefitRule(object):
def __init__(self):
self._max_benefit = None
self._min_benefit = None
self._proportion = None
@property
def max_benefit(self):
return self._max_benefit
@max_benefit.setter
def max_benefit(self, value):
self._max_benefit = value
@property
def min_benefit(self):
return self._min_benefit
@min_benefit.setter
def min_benefit(self, value):
self._min_benefit = value
@property
def proportion(self):
return self._proportion
@proportion.setter
def proportion(self, value):
self._proportion = value
def to_alipay_dict(self):
params = dict()
if self.max_benefit:
if hasattr(self.max_benefit, 'to_alipay_dict'):
params['max_benefit'] = self.max_benefit.to_alipay_dict()
else:
params['max_benefit'] = self.max_benefit
if self.min_benefit:
if hasattr(self.min_benefit, 'to_alipay_dict'):
params['min_benefit'] = self.min_benefit.to_alipay_dict()
else:
params['min_benefit'] = self.min_benefit
if self.proportion:
if hasattr(self.proportion, 'to_alipay_dict'):
params['proportion'] = self.proportion.to_alipay_dict()
else:
params['proportion'] = self.proportion
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = RndBenefitRule()
if 'max_benefit' in d:
o.max_benefit = d['max_benefit']
if 'min_benefit' in d:
o.min_benefit = d['min_benefit']
if 'proportion' in d:
o.proportion = d['proportion']
return o
|
b9c0fcc695ac64ac88d408fec9a1730f52733028
|
462b8a2326486dd41bf0d1ddbb19bbcee9532411
|
/blender/arm/logicnode/canvas/LN_set_canvas_checkbox.py
|
ad41a7ae8c60d17d5dca2487419a87f637f993c2
|
[
"Zlib",
"GPL-2.0-only"
] |
permissive
|
armory3d/armory
|
b751fb23d6590f2ca421ace7cf7cbeaef91f472c
|
511657981bd2716eddcee8dff26820d27f2bc610
|
refs/heads/main
| 2023-08-12T02:57:02.898742
| 2023-08-04T18:55:45
| 2023-08-04T18:55:45
| 45,202,654
| 3,077
| 530
|
Zlib
| 2023-09-12T11:24:38
| 2015-10-29T18:27:56
|
Python
|
UTF-8
|
Python
| false
| false
| 479
|
py
|
LN_set_canvas_checkbox.py
|
from arm.logicnode.arm_nodes import *
class CanvasSetCheckBoxNode(ArmLogicTreeNode):
"""Sets the state of the given UI checkbox."""
bl_idname = 'LNCanvasSetCheckBoxNode'
bl_label = 'Set Canvas Checkbox'
arm_version = 1
def arm_init(self, context):
self.add_input('ArmNodeSocketAction', 'In')
self.add_input('ArmStringSocket', 'Element')
self.add_input('ArmBoolSocket', 'Check')
self.add_output('ArmNodeSocketAction', 'Out')
|
26846119cd58ab81424034999e41136cd5cd056e
|
62179a165ec620ba967dbc20016e890978fbff50
|
/tests/torch/quantization/test_unified_scales.py
|
78f9ede365601a75b57622f6b928d294517db4e1
|
[
"Apache-2.0"
] |
permissive
|
openvinotoolkit/nncf
|
91fcf153a96f85da166aacb7a70ca4941e4ba4a4
|
c027c8b43c4865d46b8de01d8350dd338ec5a874
|
refs/heads/develop
| 2023-08-24T11:25:05.704499
| 2023-08-23T14:44:05
| 2023-08-23T14:44:05
| 263,687,600
| 558
| 157
|
Apache-2.0
| 2023-09-14T17:06:41
| 2020-05-13T16:41:05
|
Python
|
UTF-8
|
Python
| false
| false
| 29,572
|
py
|
test_unified_scales.py
|
# Copyright (c) 2023 Intel Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
from collections import Counter
from functools import partial
from typing import Dict, List
import onnx
import pytest
import torch
import torch.nn
from nncf.common.graph import NNCFNodeName
from nncf.common.graph.transformations.commands import TargetType
from nncf.common.hardware.config import HWConfigType
from nncf.common.quantization.quantizer_propagation.solver import QuantizerPropagationSolver
from nncf.common.quantization.structs import NonWeightQuantizerId
from nncf.torch.dynamic_graph.operation_address import OperationAddress
from nncf.torch.graph.transformations.commands import PTTargetPoint
from nncf.torch.quantization.layers import AsymmetricQuantizer
from tests.torch.helpers import create_compressed_model_and_algo_for_test
from tests.torch.helpers import get_nodes_by_type
from tests.torch.helpers import register_bn_adaptation_init_args
from tests.torch.helpers import resolve_constant_node_inputs_to_values
from tests.torch.quantization.quantization_helpers import get_quantization_config_without_range_init
from tests.torch.quantization.test_onnx_export import get_successors
# pylint: disable=no-member
def make_op_address_for_coalescing_test(scope_str: str) -> OperationAddress:
op_address = OperationAddress.from_str(scope_str)
return op_address
def make_insertion_point_for_coalescing_test(node_name: NNCFNodeName, input_port_id: int = None) -> PTTargetPoint:
retval = PTTargetPoint(TargetType.OPERATOR_POST_HOOK, target_node_name=node_name, input_port_id=input_port_id)
return retval
@pytest.mark.parametrize(
"input_insertion_points, linked_scopes_groups_list, ref_coalesced_ip_lists",
# ref_coalesced_ip_lists == None means that the coalescing should raise an exception
[
# 0 - Empty linked scopes list
(
[
make_insertion_point_for_coalescing_test("Foo/Baz[bar]/conv2d_0"),
make_insertion_point_for_coalescing_test("Foo/Xyz[leet]/__add___0", input_port_id=1),
],
[],
# Each coalesced list has one entry
[
[
make_insertion_point_for_coalescing_test("Foo/Baz[bar]/conv2d_0"),
],
[
make_insertion_point_for_coalescing_test("Foo/Xyz[leet]/__add___0", input_port_id=1),
],
],
),
# 1 - Linked scope only affects 1 operation
(
[
make_insertion_point_for_coalescing_test("Foo/Baz[bar]/conv2d_0", input_port_id=0),
make_insertion_point_for_coalescing_test("Foo/Xyz[leet]/__add___0"),
],
[["Foo/Baz[bar]/conv2d_0"]],
# Each coalesced list has one entry
[
[
make_insertion_point_for_coalescing_test("Foo/Baz[bar]/conv2d_0", input_port_id=0),
],
[
make_insertion_point_for_coalescing_test("Foo/Xyz[leet]/__add___0"),
],
],
),
# 2 - Same as 1 but with multiple groups
(
[
make_insertion_point_for_coalescing_test("Foo/Baz[bar]/conv2d_0", input_port_id=0),
make_insertion_point_for_coalescing_test("Foo/Xyz[leet]/__add___0", input_port_id=1),
],
[["Foo/Baz[bar]/conv2d_0"], ["Foo/Xyz[leet]/__add___0"]],
# Each coalesced list has one entry again
[
[
make_insertion_point_for_coalescing_test("Foo/Baz[bar]/conv2d_0", input_port_id=0),
],
[
make_insertion_point_for_coalescing_test("Foo/Xyz[leet]/__add___0", input_port_id=1),
],
],
),
# 3 - Single group affecting some of the scopes
(
[
make_insertion_point_for_coalescing_test("Foo/Baz[bar]/conv2d_0"),
make_insertion_point_for_coalescing_test("Foo/Baz[bar]/linear_0", input_port_id=0),
make_insertion_point_for_coalescing_test("Foo/Xyz[leet]/__add___0", input_port_id=1),
make_insertion_point_for_coalescing_test("Foo/Xyz[leet]/matmul_0", input_port_id=1),
],
[["Foo/Xyz[leet]/matmul_0", "Foo/Xyz[leet]/__add___0", "Foo/Baz[bar]/linear_0"]],
[
[
make_insertion_point_for_coalescing_test("Foo/Xyz[leet]/matmul_0", input_port_id=1),
make_insertion_point_for_coalescing_test("Foo/Baz[bar]/linear_0", input_port_id=0),
make_insertion_point_for_coalescing_test("Foo/Xyz[leet]/__add___0", input_port_id=1),
],
[
make_insertion_point_for_coalescing_test("Foo/Baz[bar]/conv2d_0"),
],
],
),
# 4 - Multiple groups, each affecting one operation
(
[
make_insertion_point_for_coalescing_test("Foo/Baz[bar]/conv2d_0", input_port_id=0),
make_insertion_point_for_coalescing_test("Foo/Baz[bar]/linear_0"),
make_insertion_point_for_coalescing_test("Foo/Xyz[leet]/__add___0", input_port_id=0),
make_insertion_point_for_coalescing_test("Foo/Xyz[leet]/matmul_0", input_port_id=0),
make_insertion_point_for_coalescing_test("Foo/Asdf[jkl]/softmax_0"),
],
[["Foo/Baz[bar]/linear_0"], ["Foo/Asdf[jkl]/softmax_0"]],
[
# Each coalesced list has one entry again
[
make_insertion_point_for_coalescing_test("Foo/Baz[bar]/linear_0"),
],
[
make_insertion_point_for_coalescing_test("Foo/Asdf[jkl]/softmax_0"),
],
[
make_insertion_point_for_coalescing_test("Foo/Baz[bar]/conv2d_0", input_port_id=0),
],
[
make_insertion_point_for_coalescing_test("Foo/Xyz[leet]/__add___0", input_port_id=0),
],
[
make_insertion_point_for_coalescing_test("Foo/Xyz[leet]/matmul_0", input_port_id=0),
],
],
),
# 5 - Multiple groups affecting multiple operations without overlapping
(
[
make_insertion_point_for_coalescing_test("Foo/Baz[bar]/conv2d_0"),
make_insertion_point_for_coalescing_test("Foo/Baz[bar]/linear_0", input_port_id=0),
make_insertion_point_for_coalescing_test("Foo/Xyz[leet]/__add___0", input_port_id=1),
make_insertion_point_for_coalescing_test("Foo/Xyz[leet]/matmul_0"),
make_insertion_point_for_coalescing_test("Foo/Asdf[jkl]/softmax_0"),
make_insertion_point_for_coalescing_test("Foo/Asdf[jkl]/softmax_1", input_port_id=0),
],
[
["Foo/Baz[bar]/conv2d_0", "Foo/Baz[bar]/linear_0"],
["Foo/Asdf[jkl]/softmax_1", "Foo/Xyz[leet]/__add___0"],
],
[
[
make_insertion_point_for_coalescing_test("Foo/Baz[bar]/conv2d_0"),
make_insertion_point_for_coalescing_test("Foo/Baz[bar]/linear_0", input_port_id=0),
],
[
make_insertion_point_for_coalescing_test("Foo/Asdf[jkl]/softmax_1", input_port_id=0),
make_insertion_point_for_coalescing_test("Foo/Xyz[leet]/__add___0", input_port_id=1),
],
[
make_insertion_point_for_coalescing_test("Foo/Xyz[leet]/matmul_0"),
],
[
make_insertion_point_for_coalescing_test("Foo/Asdf[jkl]/softmax_0"),
],
],
),
# 6 - A variation of 5
(
[
make_insertion_point_for_coalescing_test("Foo/Baz[bar]/conv2d_0"),
make_insertion_point_for_coalescing_test("Foo/Baz[bar]/linear_0"),
make_insertion_point_for_coalescing_test("Foo/Xyz[leet]/__add___0"),
make_insertion_point_for_coalescing_test("Foo/Xyz[leet]/matmul_0"),
make_insertion_point_for_coalescing_test("Foo/Asdf[jkl]/softmax_0"),
make_insertion_point_for_coalescing_test(
"Foo/Asdf[jkl]/Qwer[tyu]/conv2d_0",
input_port_id=0,
),
],
[
["Foo/Baz[bar]/conv2d_0", "Foo/Baz[bar]/linear_0", "Foo/Xyz[leet]/matmul_0"],
["Foo/Asdf[jkl]/softmax_0", "Foo/Asdf[jkl]/Qwer[tyu]/conv2d_0"],
],
[
[
make_insertion_point_for_coalescing_test("Foo/Baz[bar]/conv2d_0"),
make_insertion_point_for_coalescing_test("Foo/Baz[bar]/linear_0"),
make_insertion_point_for_coalescing_test("Foo/Xyz[leet]/matmul_0"),
],
[
make_insertion_point_for_coalescing_test("Foo/Asdf[jkl]/softmax_0"),
make_insertion_point_for_coalescing_test("Foo/Asdf[jkl]/Qwer[tyu]/conv2d_0", input_port_id=0),
],
[
make_insertion_point_for_coalescing_test("Foo/Xyz[leet]/__add___0"),
],
],
),
# 7 - Overlapping groups
(
[
make_insertion_point_for_coalescing_test("Foo/Baz[bar]/conv2d_0"),
make_insertion_point_for_coalescing_test("Foo/Baz[bar]/linear_0"),
make_insertion_point_for_coalescing_test("Foo/Xyz[leet]/__add___0"),
make_insertion_point_for_coalescing_test(
"Foo/Xyz[leet]/matmul_0",
input_port_id=1,
),
make_insertion_point_for_coalescing_test("Foo/Asdf[jkl]/softmax_0"),
make_insertion_point_for_coalescing_test("Foo/Asdf[jkl]/Qwer[tyu]/conv2d_0"),
],
[
["Foo/Baz[bar]/conv2d_0", "Foo/Baz[bar]/linear_0", "Foo/Xyz[leet]/matmul_0"],
["Foo/Xyz[leet]/matmul_0", "Foo/Asdf[jkl]/Qwer[tyu]/conv2d_0"],
],
None,
),
# 8 - More than 1 match for the operation specified in the group
(
[
make_insertion_point_for_coalescing_test("Foo/Baz[bar]/conv2d_0"),
make_insertion_point_for_coalescing_test(
"Foo/Baz[bar]/conv2d_0",
input_port_id=0,
),
make_insertion_point_for_coalescing_test(
"Foo/Baz[bar]/linear_0",
),
make_insertion_point_for_coalescing_test(
"Foo/Xyz[leet]/__add___0",
),
make_insertion_point_for_coalescing_test(
"Foo/Xyz[leet]/matmul_0",
input_port_id=1,
),
make_insertion_point_for_coalescing_test("Foo/Asdf[jkl]/softmax_0"),
make_insertion_point_for_coalescing_test("Foo/Asdf[jkl]/Qwer[tyu]/conv2d_0"),
],
[
["Foo/Baz[bar]/conv2d_0", "Foo/Xyz[leet]/matmul_0"],
["Foo/Xyz[leet]/matmul_0", "Foo/Asdf[jkl]/Qwer[tyu]/conv2d_0"],
],
None,
),
# 9 - No match for an operation specified in the group
(
[
make_insertion_point_for_coalescing_test(
"Foo/Baz[bar]/conv2d_0",
input_port_id=0,
),
make_insertion_point_for_coalescing_test("Foo/Baz[bar]/linear_0"),
make_insertion_point_for_coalescing_test("Foo/Xyz[leet]/__add___0"),
make_insertion_point_for_coalescing_test(
"Foo/Xyz[leet]/matmul_0",
input_port_id=1,
),
make_insertion_point_for_coalescing_test("Foo/Asdf[jkl]/softmax_0"),
make_insertion_point_for_coalescing_test("Foo/Asdf[jkl]/Qwer[tyu]/conv2d_0"),
],
[
["Foo/Baz[bar]/conv2d_0", "Foo/Xyz[leet]/matmul_1"],
["Foo/Xyz[leet]/matmul_0", "Foo/Asdf[jkl]/Qwer[tyu]/conv2d_0"],
],
None,
),
],
)
def test_insertion_point_coalescing(
input_insertion_points: List[PTTargetPoint],
linked_scopes_groups_list: List[List[str]],
ref_coalesced_ip_lists: List[List[PTTargetPoint]],
):
if ref_coalesced_ip_lists is None:
with pytest.raises(RuntimeError):
_ = QuantizerPropagationSolver.coalesce_insertion_points(input_insertion_points, linked_scopes_groups_list)
else:
test_coalesced_ip_lists = QuantizerPropagationSolver.coalesce_insertion_points(
input_insertion_points, linked_scopes_groups_list
)
assert len(test_coalesced_ip_lists) == len(ref_coalesced_ip_lists)
for idx, test_list in enumerate(test_coalesced_ip_lists):
assert Counter(test_list) == Counter(ref_coalesced_ip_lists[idx])
class EltwiseQuantizerLinkingTestModel(torch.nn.Module):
def __init__(self):
super().__init__()
class Path(torch.nn.Module):
def forward(self, input_1, input_2):
retval0 = input_1 + input_2
retval1 = retval0 * input_2
retval2 = retval0 + retval1
# __add___0, __mul___0, __add___1 results respectively
return retval0, retval1, retval2
self.path1 = Path()
self.path2 = Path()
def forward(self, input_1, input_2):
path1_results = self.path1(input_1, input_2)
path2_results = self.path2(input_1, input_2)
return tuple(x + y for x, y in zip(path1_results, path2_results))
def test_quantizer_scale_linking(mocker):
nncf_config = get_quantization_config_without_range_init(model_size=1)
nncf_config["input_info"] = [
{
"sample_size": [1, 1, 1, 1],
},
{
"sample_size": [1, 1, 1, 1],
},
]
nncf_config["compression"]["activations"] = {
"unified_scale_ops": [
[
# Note: Assuming that quantizers are attached as a post-op to the specified operation
"EltwiseQuantizerLinkingTestModel/Path[path2]/__mul___0",
"EltwiseQuantizerLinkingTestModel/Path[path2]/__add___0",
]
],
"ignored_scopes": [
# Ignore path output averaging operations
"EltwiseQuantizerLinkingTestModel/__add___0",
"EltwiseQuantizerLinkingTestModel/__add___1",
"EltwiseQuantizerLinkingTestModel/__add___2",
],
}
register_bn_adaptation_init_args(nncf_config)
compressed_model, compression_ctrl = create_compressed_model_and_algo_for_test(
EltwiseQuantizerLinkingTestModel(), nncf_config
)
# 18 inputs to quantize (14 regular + 4 linked),
# 8 quantization points left after propagation, out of these 3 are linked
assert len(compression_ctrl.non_weight_quantizers) == 6
shared_quantizer_id = NonWeightQuantizerId(target_node_name="/nncf_model_input_0")
non_shared_spies = []
for aq_id, aq_info in compression_ctrl.non_weight_quantizers.items():
quantizer = aq_info.quantizer_module_ref
spy = mocker.spy(quantizer, "forward")
if aq_id == shared_quantizer_id:
shared_spy = spy
else:
non_shared_spies.append(spy)
test_input1 = torch.ones([1, 1, 1, 1])
test_input2 = 2 * test_input1
compressed_model(test_input1, test_input2)
assert shared_spy.call_count == 3
for non_shared_spy in non_shared_spies:
assert non_shared_spy.call_count == 1
def test_eltwise_unified_scales_for_vpu():
nncf_config = get_quantization_config_without_range_init(model_size=1)
nncf_config["input_info"] = [
{
"sample_size": [1, 1, 1, 1],
},
{
"sample_size": [1, 1, 1, 1],
},
]
nncf_config["target_device"] = "VPU"
register_bn_adaptation_init_args(nncf_config)
_, compression_ctrl = create_compressed_model_and_algo_for_test(EltwiseQuantizerLinkingTestModel(), nncf_config)
assert len(compression_ctrl.non_weight_quantizers) == 2
total_quantizations = sum(len(info.affected_insertions) for info in compression_ctrl.non_weight_quantizers.values())
assert total_quantizations == 8
class SingleCatModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(4, 1, 1)
def forward(self, x, y):
x = x * x
y = y * y
z = torch.cat([x, y])
v = self.conv(z)
return v
class DoubleCatModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(4, 1, 1)
def forward(self, x, y):
x = x * x
y = y * y
z = torch.cat([x, y])
v = torch.cat([x, z])
w = self.conv(v)
return w
class UNetLikeModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv_1 = torch.nn.Conv2d(4, 8, 1)
self.conv_2 = torch.nn.Conv2d(8, 16, 1)
self.conv_3 = torch.nn.Conv2d(16, 32, 1)
self.conv_t_3 = torch.nn.ConvTranspose2d(32, 16, 1)
self.conv_t_2 = torch.nn.ConvTranspose2d(16, 8, 1)
self.conv_t_1 = torch.nn.ConvTranspose2d(8, 4, 1)
def forward(self, x, y):
y1 = self.conv_1(x)
y2 = self.conv_2(y1)
y3 = self.conv_3(y2)
z3 = self.conv_t_3(y3)
z3 = torch.cat([z3, y2])
z2 = self.conv_t_2(z3)
z2 = torch.cat([z2, y1])
z1 = self.conv_t_1(z2)
return z1
CAT_UNIFIED_SCALE_TEST_STRUCTS = [(SingleCatModel, 3, 4), (DoubleCatModel, 3, 4), (UNetLikeModel, 4, 6)]
@pytest.mark.parametrize(
"target_device, model_creator, ref_aq_module_count, ref_quantizations",
[
(t_dev,) + rest
for t_dev, rest in itertools.product([x.value for x in HWConfigType], CAT_UNIFIED_SCALE_TEST_STRUCTS)
],
)
def test_unified_scales_with_concat(target_device, model_creator, ref_aq_module_count, ref_quantizations):
nncf_config = get_quantization_config_without_range_init(model_size=1)
nncf_config["input_info"] = [
{
"sample_size": [1, 4, 1, 1],
},
{
"sample_size": [1, 4, 1, 1],
},
]
nncf_config["target_device"] = target_device
register_bn_adaptation_init_args(nncf_config)
_, compression_ctrl = create_compressed_model_and_algo_for_test(model_creator(), nncf_config)
assert len(compression_ctrl.non_weight_quantizers) == ref_aq_module_count
total_quantizations = sum(len(info.affected_insertions) for info in compression_ctrl.non_weight_quantizers.values())
assert total_quantizations == ref_quantizations
class SimplerModelForUnifiedScalesTesting(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv2d_1 = torch.nn.Conv2d(1, 1, 1)
self.conv2d_2 = torch.nn.Conv2d(1, 1, 1)
self.conv2d_3 = torch.nn.Conv2d(1, 1, 1)
self.conv2d_4 = torch.nn.Conv2d(1, 1, 1)
self.conv2d_5 = torch.nn.Conv2d(1, 1, 1)
self.conv2d_6 = torch.nn.Conv2d(1, 1, 1)
def forward(self, x):
in_1, in_2 = x.chunk(dim=-1, chunks=2)
in_1 = self.conv2d_1(in_1)
in_2 = self.conv2d_2(in_2)
x = in_1 + in_2
x = torch.stack([x, x], dim=-1)
x = x.squeeze(dim=0)
in1, in2 = x.chunk(dim=-1, chunks=2)
in1 = self.conv2d_3(in1)
in2 = self.conv2d_3(in2)
x = torch.cat([in1, in2], dim=-1)
in_1, in_2 = x.chunk(dim=-1, chunks=2)
in_1 = self.conv2d_5(in_1)
in_2 = self.conv2d_6(in_2)
x = in_1 * in_2
return x
class TwoEmbeddingAddModel(torch.nn.Module):
EMBEDDING_IO_SHAPE = [10, 10]
def __init__(self):
super().__init__()
self.embedding1 = torch.nn.Embedding(*self.EMBEDDING_IO_SHAPE)
self.embedding2 = torch.nn.Embedding(*self.EMBEDDING_IO_SHAPE)
def forward(self, x):
y1 = self.embedding1(x)
y2 = self.embedding2(x)
return y1 + y2
class TestsWithONNXInspection:
@staticmethod
def get_fq_nodes(onnx_model: onnx.ModelProto) -> List[onnx.NodeProto]:
return get_nodes_by_type(onnx_model, "FakeQuantize")
@staticmethod
def immediately_dominates_add_or_mul(node: onnx.NodeProto, graph: onnx.GraphProto) -> bool:
if len(node.output) != 1:
return False
output_tensor_id = node.output[0]
matches = [x for x in graph.node if output_tensor_id in x.input]
for match in matches:
if match.op_type in ["Add", "Mul"]:
return True
return False
@staticmethod
def immediately_dominates_cat(node: onnx.NodeProto, graph: onnx.GraphProto) -> bool:
if len(node.output) != 1:
return False
output_tensor_id = node.output[0]
matches = [x for x in graph.node if output_tensor_id in x.input]
for match in matches:
if match.op_type in ["Concat"]:
return True
return False
@staticmethod
def immediately_dominates_embedding(node: onnx.NodeProto, graph: onnx.GraphProto) -> bool:
if len(node.output) != 1:
return False
output_tensor_id = node.output[0]
matches = [x for x in graph.node if output_tensor_id in x.input]
for match in matches:
if match.op_type in ["Gather"]:
return True
return False
@staticmethod
def group_nodes_by_output_target(nodes: List[onnx.NodeProto], graph: onnx.GraphProto) -> List[List[onnx.NodeProto]]:
output_nodes = {} # type: Dict[str, List[onnx.NodeProto]]
for node in nodes:
succs = get_successors(node, graph)
assert len(succs) == 1
target_node_name = next(iter(succs)).name
if target_node_name not in output_nodes:
output_nodes[target_node_name] = []
output_nodes[target_node_name].append(node)
return list(output_nodes.values())
def test_unified_scales_are_identical_in_onnx(self, tmp_path):
# pylint:disable=no-member
nncf_config = get_quantization_config_without_range_init(model_size=1)
nncf_config["compression"]["quantize_outputs"] = True
nncf_config["input_info"] = [
{
"sample_size": [1, 1, 1, 2],
},
]
nncf_config["target_device"] = "VPU"
register_bn_adaptation_init_args(nncf_config)
compressed_model, compression_ctrl = create_compressed_model_and_algo_for_test(
SimplerModelForUnifiedScalesTesting(), nncf_config
)
with torch.no_grad():
for quant_info in compression_ctrl.non_weight_quantizers.values():
if isinstance(quant_info.quantizer_module_ref, AsymmetricQuantizer):
quant_info.quantizer_module_ref.input_range *= torch.abs(
torch.rand_like(quant_info.quantizer_module_ref.input_range)
)
else:
quant_info.quantizer_module_ref.scale *= torch.abs(
torch.rand_like(quant_info.quantizer_module_ref.scale)
)
test_input1 = torch.ones([1, 1, 1, 2])
compressed_model.forward(test_input1)
onnx_path = str(tmp_path / "model.onnx")
# Exporting the operator ::chunk to ONNX opset version 9 is not supported.
# Support for this operator was added in version 11
compression_ctrl.export_model(onnx_path, save_format="onnx_11")
onnx_model = onnx.load(onnx_path)
fq_nodes = TestsWithONNXInspection.get_fq_nodes(onnx_model)
eltwise_dominator_predicate = partial(
TestsWithONNXInspection.immediately_dominates_add_or_mul, graph=onnx_model.graph
)
eltwise_fq_nodes = list(filter(eltwise_dominator_predicate, fq_nodes))
cat_dominator_predicate = partial(TestsWithONNXInspection.immediately_dominates_cat, graph=onnx_model.graph)
cat_fq_nodes = list(filter(cat_dominator_predicate, fq_nodes))
fq_nodes_grouped_by_output = TestsWithONNXInspection.group_nodes_by_output_target(
eltwise_fq_nodes + cat_fq_nodes, onnx_model.graph
)
for unified_scale_group in fq_nodes_grouped_by_output:
inputs = [
resolve_constant_node_inputs_to_values(fq_node, onnx_model.graph) for fq_node in unified_scale_group
]
for inputs_dict in inputs[1:]:
curr_values = list(inputs_dict.values())
ref_values = list(inputs[0].values())
assert curr_values == ref_values # All inputs for unified scale quantizers must be equal
def test_weight_and_act_quantizer_scale_unification(self, tmp_path):
# pylint:disable=no-member
nncf_config = get_quantization_config_without_range_init(model_size=1)
nncf_config["input_info"] = [
{"sample_size": [1, 5], "type": "long", "filler": "zeros"},
]
nncf_config["target_device"] = "VPU"
register_bn_adaptation_init_args(nncf_config)
compressed_model, compression_ctrl = create_compressed_model_and_algo_for_test(
TwoEmbeddingAddModel(), nncf_config
)
with torch.no_grad():
for quant_module in compression_ctrl.all_quantizations.values():
if isinstance(quant_module, AsymmetricQuantizer):
quant_module.input_range *= torch.abs(torch.rand_like(quant_module.input_range))
else:
quant_module.scale *= torch.abs(torch.rand_like(quant_module.scale))
test_input1 = torch.ones([1, 5], dtype=torch.long)
compressed_model.forward(test_input1)
onnx_path = str(tmp_path / "model.onnx")
compression_ctrl.export_model(onnx_path)
onnx_model = onnx.load(onnx_path)
fq_nodes = TestsWithONNXInspection.get_fq_nodes(onnx_model)
eltwise_dominator_predicate = partial(
TestsWithONNXInspection.immediately_dominates_add_or_mul, graph=onnx_model.graph
)
embedding_dominator_predicate = partial(
TestsWithONNXInspection.immediately_dominates_embedding, graph=onnx_model.graph
)
eltwise_fq_nodes = list(filter(eltwise_dominator_predicate, fq_nodes))
embedding_weight_fq_nodes = list(filter(embedding_dominator_predicate, fq_nodes))
fq_nodes_with_expected_unified_scales = embedding_weight_fq_nodes + eltwise_fq_nodes
unified_fq_node_inputs = [
resolve_constant_node_inputs_to_values(fq_node, onnx_model.graph)
for fq_node in fq_nodes_with_expected_unified_scales
]
# delete weights from input dict
for inputs_for_single_fq in unified_fq_node_inputs:
weight_input_names = []
for input_name, input_tensor in inputs_for_single_fq.items():
if list(input_tensor.shape) == TwoEmbeddingAddModel.EMBEDDING_IO_SHAPE:
weight_input_names.append(input_name)
for weight_input_name in weight_input_names:
inputs_for_single_fq.pop(weight_input_name)
ref_values = list(unified_fq_node_inputs[0].values())
for inputs_dict in unified_fq_node_inputs[1:]:
curr_values = list(inputs_dict.values())
assert curr_values == ref_values # All inputs for unified scale quantizers must be equal
class SharedEmbeddingAddModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.shared_embedding = torch.nn.Embedding(10, 10)
def forward(self, x):
y1 = self.shared_embedding(x)
y2 = self.shared_embedding(x)
return y1 + y2
def test_unified_scales_with_shared_nodes():
nncf_config = get_quantization_config_without_range_init(model_size=1)
nncf_config["input_info"] = [
{"sample_size": [1, 5], "type": "long", "filler": "zeros"},
]
nncf_config["target_device"] = "VPU"
register_bn_adaptation_init_args(nncf_config)
_, compression_ctrl = create_compressed_model_and_algo_for_test(
SharedEmbeddingAddModel(), nncf_config
) # type: NNCFNetwork, QuantizationController
assert len(compression_ctrl.weight_quantizers) == 1 # The two embedding nodes point to a single shared layer
assert len(compression_ctrl.non_weight_quantizers) == 0 # The "add" operation has its inputs already quantized
|
fd2afdb9a3afe2676481f5731ac9cceb7dd52cb7
|
2337351b228818e41be3002bd38f68f77c2aa074
|
/services/ping/datastream.py
|
a62b2834f7659fbed807f39b88df900d588cac1d
|
[
"BSD-3-Clause"
] |
permissive
|
nocproject/noc
|
57d40c680a1499374463e472434f9595ed6d1374
|
6e6d71574e9b9d822bec572cc629a0ea73604a59
|
refs/heads/master
| 2023-08-31T01:11:33.544573
| 2023-08-30T17:31:11
| 2023-08-30T17:31:11
| 107,815,776
| 105
| 33
|
BSD-3-Clause
| 2023-07-31T07:57:45
| 2017-10-21T21:04:33
|
Python
|
UTF-8
|
Python
| false
| false
| 705
|
py
|
datastream.py
|
# ----------------------------------------------------------------------
# Ping DataStream client
# ----------------------------------------------------------------------
# Copyright (C) 2007-2018 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# NOC modules
from noc.core.datastream.client import DataStreamClient
class PingDataStreamClient(DataStreamClient):
async def on_change(self, data):
await self.service.update_probe(data)
async def on_delete(self, data):
await self.service.delete_probe(data["id"])
async def on_ready(self):
self.service.logger.info("Initial object mappings are ready")
|
059564d4848743efdf572fd7060158a665b790e0
|
150a7b11cb531f8bc2a045aefcf2ebe1d151efa3
|
/tests/encryption/test_intransit_encryption_sanity.py
|
b9b6304e33d2fbdc59bf0a2f0f72145f289e5334
|
[
"MIT"
] |
permissive
|
red-hat-storage/ocs-ci
|
c7ac414e1b86552da0439223dfa9bca39977f31a
|
5e9e504957403148e413326f65c3769bf9d8eb39
|
refs/heads/master
| 2023-08-17T16:19:51.154403
| 2023-08-17T13:27:12
| 2023-08-17T13:27:12
| 179,558,938
| 146
| 210
|
MIT
| 2023-09-14T16:38:44
| 2019-04-04T19:00:02
|
Python
|
UTF-8
|
Python
| false
| false
| 2,478
|
py
|
test_intransit_encryption_sanity.py
|
import logging
import pytest
from ocs_ci.ocs.resources.storage_cluster import (
in_transit_encryption_verification,
set_in_transit_encryption,
get_in_transit_encryption_config_state,
)
from ocs_ci.framework.pytest_customization.marks import (
tier1,
skipif_ocs_version,
green_squad,
)
from ocs_ci.framework import config
log = logging.getLogger(__name__)
@green_squad
class TestInTransitEncryptionSanity:
@pytest.fixture(autouse=True)
def set_encryption_at_teardown(self, request):
def teardown():
if config.ENV_DATA.get("in_transit_encryption"):
set_in_transit_encryption()
else:
set_in_transit_encryption(enabled=False)
request.addfinalizer(teardown)
@tier1
@skipif_ocs_version("<4.13")
@pytest.mark.polarion_id("OCS-4861")
def test_intransit_encryption_enable_disable_statetransition(self):
"""
The test does the following:
1. Enable in-transit Encryption if not Enabled.
2. Verify in-transit Encryption is Enable on setup.
3. Disable Encryption
4. Verify in-transit encryption configuration is removed.
5. Enable encryption Again and verify it.
6. Verify in-transit encryption config is exists.
"""
if not get_in_transit_encryption_config_state():
if config.ENV_DATA.get("in_transit_encryption"):
pytest.fail("In-transit encryption is not enabled on the setup")
else:
set_in_transit_encryption()
log.info("Verifying the in-transit encryption is enable on setup.")
assert in_transit_encryption_verification()
log.info("Disabling the in-transit encryption.")
set_in_transit_encryption(enabled=False)
# Verify that encryption is actually disabled by checking that a ValueError is raised.
log.info("Verifying the in-transit encryption is disabled.")
with pytest.raises(ValueError):
assert not in_transit_encryption_verification()
if config.ENV_DATA.get("in_transit_encryption"):
log.info("Re-enabling in-transit encryption.")
set_in_transit_encryption()
# Verify that encryption is enabled again after re-enabling it
log.info(
"Verifying the in-transit encryption config after enabling the cluster."
)
assert in_transit_encryption_verification()
|
c03ab15c47890a35b70582c1b64da626bbb230f5
|
3d4c7b9c179322e6bdb3c7a0c137919364806cb3
|
/examples/python/native/multi_head_attention.py
|
7ccdae4186cb5c8b563f268e87d41a7fefb6ab97
|
[
"Apache-2.0"
] |
permissive
|
flexflow/FlexFlow
|
291282d27009924a427966e899d7c2fda9c20cec
|
b2ec6cb5d2b898db1ad4df32adf5699bc48aaac7
|
refs/heads/inference
| 2023-09-04T05:25:02.250225
| 2023-09-03T14:15:07
| 2023-09-03T14:15:07
| 160,988,469
| 1,139
| 186
|
Apache-2.0
| 2023-09-14T17:56:24
| 2018-12-08T23:43:13
|
C++
|
UTF-8
|
Python
| false
| false
| 3,052
|
py
|
multi_head_attention.py
|
from flexflow.core import *
from argparse import ArgumentParser
import numpy as np
def parse_args():
parser = ArgumentParser()
parser.add_argument('--seq-length', default=256, type=int)
parser.add_argument('--num-heads', default=16, type=int)
parser.add_argument('--hidden-size', default=512, type=int)
args, unknown = parser.parse_known_args()
return args
def attention():
args = parse_args()
ffconfig = FFConfig()
print("Python API: batch_size(%d) GPUs/node(%d) nodes(%d)" %(ffconfig.batch_size, ffconfig.workers_per_node, ffconfig.num_nodes))
ffmodel = FFModel(ffconfig)
batch_size = ffconfig.batch_size
dims_input = [batch_size, args.seq_length, args.hidden_size]
input = ffmodel.create_tensor(dims_input, DataType.DT_FLOAT)
q = ffmodel.dense(input, args.hidden_size)
k = ffmodel.dense(input, args.hidden_size)
v = ffmodel.dense(input, args.hidden_size)
q = ffmodel.reshape(q, shape=(batch_size, args.seq_length, args.num_heads, args.hidden_size // args.num_heads))
k = ffmodel.reshape(k, shape=(batch_size, args.seq_length, args.num_heads, args.hidden_size // args.num_heads))
v = ffmodel.reshape(v, shape=(batch_size, args.seq_length, args.num_heads, args.hidden_size // args.num_heads))
q = ffmodel.transpose(q, perm=(0, 2, 1, 3))
k = ffmodel.transpose(k, perm=(0, 2, 3, 1))
v = ffmodel.transpose(v, perm=(0, 2, 1, 3))
logits = ffmodel.batch_matmul(q, k)
#logits = ffmodel.softmax(logits)
output = ffmodel.batch_matmul(logits, v)
output = ffmodel.transpose(output, perm=(0, 2, 1, 3))
output = ffmodel.reshape(output, shape=(batch_size, args.seq_length, args.hidden_size))
output = ffmodel.dense(output, args.hidden_size, ActiMode.AC_MODE_RELU)
output = ffmodel.dense(output, args.hidden_size)
ffoptimizer = SGDOptimizer(ffmodel)
ffmodel.optimizer = ffoptimizer
ffmodel.compile(loss_type=LossType.LOSS_MEAN_SQUARED_ERROR_AVG_REDUCE, metrics=[MetricsType.METRICS_MEAN_SQUARED_ERROR], comp_mode=CompMode.INFERENCE)
label_tensor = ffmodel.label_tensor
# Full inputs/label
dims = [batch_size * 10, args.seq_length, args.hidden_size]
np_input = np.zeros(dims, dtype=np.float32)
np_label = np.zeros(dims, dtype=np.float32)
dl_input = ffmodel.create_data_loader(input, np_input)
dl_label = ffmodel.create_data_loader(label, np_label)
ffmodel.init_layers()
epochs = ffconfig.epochs
dl_input.next_batch(ffmodel)
dl_label.next_batch(ffmodel)
ts_start = ffconfig.get_current_time()
for epoch in range(0, epochs):
ffmodel.reset_metrics()
iterations = num_samples // batch_size
for iter in range(0, iterations):
ffconfig.begin_trace(111)
ffmodel.forward()
ffmodel.zero_gradients()
ffmodel.backward()
ffmodel.update()
ffconfig.end_trace(111)
ts_end = ffconfig.get_current_time()
run_time = 1e-6 * (ts_end - ts_start)
print("EPOCHS %d, ELAPSED TIME = %.4fs, THROUGHPUT = %.2f samples/s\n" %(epochs, run_time, num_samples * epochs / run_time));
if __name__ == "__main__":
print("Attention")
attention()
|
49020e19bcb75fa1861af3c9a3f0923728ce9421
|
10cb11f83e1c8b51b9d72c28d6259a56ff1a97c8
|
/tests/integration/pipeline/test_bootstrap_command.py
|
876faef31c879b7cd76ad1b64da66c5a9f303468
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"BSD-2-Clause"
] |
permissive
|
aws/aws-sam-cli
|
6d4411aacf7f861e75e5cf4882a32858797a276d
|
b297ff015f2b69d7c74059c2d42ece1c29ea73ee
|
refs/heads/develop
| 2023-08-30T23:28:36.179932
| 2023-08-30T21:58:26
| 2023-08-30T21:58:26
| 92,205,085
| 1,402
| 470
|
Apache-2.0
| 2023-09-14T21:14:23
| 2017-05-23T18:16:23
|
Python
|
UTF-8
|
Python
| false
| false
| 20,237
|
py
|
test_bootstrap_command.py
|
from unittest import skipIf
import pytest
from parameterized import parameterized
from samcli.commands.pipeline.bootstrap.cli import PIPELINE_CONFIG_FILENAME, PIPELINE_CONFIG_DIR
from samcli.lib.config.samconfig import SamConfig
from tests.integration.pipeline.base import BootstrapIntegBase
from tests.testing_utils import (
run_command_with_input,
RUNNING_ON_CI,
RUNNING_TEST_FOR_MASTER_ON_CI,
RUN_BY_CANARY,
run_command,
run_command_with_inputs,
)
import boto3
from botocore.exceptions import ClientError
# bootstrap tests require credentials and CI/CD will only add credentials to the env if the PR is from the same repo.
# This is to restrict tests to run outside of CI/CD, when the branch is not master or tests are not run by Canary
SKIP_BOOTSTRAP_TESTS = RUNNING_ON_CI and RUNNING_TEST_FOR_MASTER_ON_CI and not RUN_BY_CANARY
# In order to run bootstrap integration test locally make sure your test account is configured as `default` account.
CREDENTIAL_PROFILE = "2" if not RUN_BY_CANARY else "1"
CFN_OUTPUT_TO_CONFIG_KEY = {
"ArtifactsBucket": "artifacts_bucket",
"CloudFormationExecutionRole": "cloudformation_execution_role",
"PipelineExecutionRole": "pipeline_execution_role",
"PipelineUser": "pipeline_user",
}
@skipIf(SKIP_BOOTSTRAP_TESTS, "Skip bootstrap tests in CI/CD only")
class TestBootstrap(BootstrapIntegBase):
@parameterized.expand([("create_image_repository",), (False,)])
def test_interactive_with_no_resources_provided(self, create_image_repository):
stage_configuration_name, stack_name = self._get_stage_and_stack_name()
self.stack_names = [stack_name]
bootstrap_command_list = self.get_bootstrap_command_list()
inputs = [
stage_configuration_name,
CREDENTIAL_PROFILE,
self.region, # region
"1", # IAM permissions provider
"", # pipeline user
"", # Pipeline execution role
"", # CloudFormation execution role
"", # Artifacts bucket
"y" if create_image_repository else "N", # Should we create ECR repo
]
if create_image_repository:
inputs.append("") # Create image repository
inputs.append("") # Confirm summary
inputs.append("y") # Create resources
bootstrap_process_execute = run_command_with_inputs(bootstrap_command_list, inputs)
self.assertEqual(bootstrap_process_execute.process.returncode, 0)
stdout = bootstrap_process_execute.stdout.decode()
# make sure pipeline user's credential is printed
self.assertIn("ACCESS_KEY_ID", stdout)
self.assertIn("SECRET_ACCESS_KEY", stdout)
common_resources = {
"PipelineUser",
"PipelineUserAccessKey",
"PipelineUserSecretKey",
"CloudFormationExecutionRole",
"PipelineExecutionRole",
"ArtifactsBucket",
"ArtifactsLoggingBucket",
"ArtifactsLoggingBucketPolicy",
"ArtifactsBucketPolicy",
"PipelineExecutionRolePermissionPolicy",
}
if create_image_repository:
self.assertSetEqual(
{
*common_resources,
"ImageRepository",
},
set(self._extract_created_resource_logical_ids(stack_name)),
)
CFN_OUTPUT_TO_CONFIG_KEY["ImageRepository"] = "image_repository"
self.validate_pipeline_config(stack_name, stage_configuration_name, list(CFN_OUTPUT_TO_CONFIG_KEY.keys()))
del CFN_OUTPUT_TO_CONFIG_KEY["ImageRepository"]
else:
self.assertSetEqual(common_resources, set(self._extract_created_resource_logical_ids(stack_name)))
self.validate_pipeline_config(stack_name, stage_configuration_name)
def test_interactive_with_no_resources_provided_using_oidc(self):
stage_configuration_name, stack_name = self._get_stage_and_stack_name()
self.stack_names = [stack_name]
bootstrap_command_list = self.get_bootstrap_command_list()
inputs = [
stage_configuration_name,
CREDENTIAL_PROFILE,
self.region, # region
"2", # OIDC permissions provider
"1", # GitHub Actions OIDC
"https://token.actions.githubusercontent.com", # GitHub Actions OIDC URL
"sts.amazonaws.com", # GitHub Actions OIDC client id
"test_github_org", # GitHub Organization
"test_not_real", # Github Repository
"main", # Deployment branch
"", # Pipeline execution role
"", # CloudFormation execution role
"", # Artifacts bucket
"N", # Should we create ECR repo
]
inputs.append("") # Confirm summary
inputs.append("y") # Create resources
bootstrap_process_execute = run_command_with_inputs(bootstrap_command_list, inputs)
self.assertEqual(bootstrap_process_execute.process.returncode, 0)
stdout = bootstrap_process_execute.stdout.decode()
# make sure pipeline user's credential is printed
common_resources = {
"CloudFormationExecutionRole",
"PipelineExecutionRole",
"ArtifactsBucket",
"ArtifactsLoggingBucket",
"ArtifactsLoggingBucketPolicy",
"ArtifactsBucketPolicy",
"PipelineExecutionRolePermissionPolicy",
"OidcProvider",
}
CFN_OUTPUT_TO_CONFIG_KEY["OidcProvider"] = "oidc_provider_url"
del CFN_OUTPUT_TO_CONFIG_KEY["PipelineUser"]
self.assertSetEqual(common_resources, set(self._extract_created_resource_logical_ids(stack_name)))
self.validate_pipeline_config(stack_name, stage_configuration_name)
del CFN_OUTPUT_TO_CONFIG_KEY["OidcProvider"]
CFN_OUTPUT_TO_CONFIG_KEY["PipelineUser"] = "pipeline_user"
@parameterized.expand([("create_image_repository",), (False,)])
def test_non_interactive_with_no_resources_provided(self, create_image_repository):
stage_configuration_name, stack_name = self._get_stage_and_stack_name()
self.stack_names = [stack_name]
bootstrap_command_list = self.get_bootstrap_command_list(
no_interactive=True,
create_image_repository=create_image_repository,
no_confirm_changeset=True,
region=self.region,
)
bootstrap_process_execute = run_command(bootstrap_command_list)
self.assertEqual(bootstrap_process_execute.process.returncode, 2)
stderr = bootstrap_process_execute.stderr.decode()
self.assertIn("Missing required parameter", stderr)
def test_interactive_with_all_required_resources_provided(self):
stage_configuration_name, stack_name = self._get_stage_and_stack_name()
self.stack_names = [stack_name]
bootstrap_command_list = self.get_bootstrap_command_list()
inputs = [
stage_configuration_name,
CREDENTIAL_PROFILE,
self.region, # region
"1", # IAM permissions
"arn:aws:iam::123:user/user-name", # pipeline user
"arn:aws:iam::123:role/role-name", # Pipeline execution role
"arn:aws:iam::123:role/role-name", # CloudFormation execution role
"arn:aws:s3:::bucket-name", # Artifacts bucket
"N", # Should we create ECR repo, 3 - specify one
"",
]
bootstrap_process_execute = run_command_with_inputs(bootstrap_command_list, inputs)
self.assertEqual(bootstrap_process_execute.process.returncode, 0)
stdout = bootstrap_process_execute.stdout.decode()
self.assertIn("skipping creation", stdout)
def test_no_interactive_with_all_required_resources_provided(self):
stage_configuration_name, stack_name = self._get_stage_and_stack_name()
self.stack_names = [stack_name]
bootstrap_command_list = self.get_bootstrap_command_list(
no_interactive=True,
stage_configuration_name=stage_configuration_name,
pipeline_user="arn:aws:iam::123:user/user-name", # pipeline user
pipeline_execution_role="arn:aws:iam::123:role/role-name", # Pipeline execution role
cloudformation_execution_role="arn:aws:iam::123:role/role-name", # CloudFormation execution role
bucket="arn:aws:s3:::bucket-name", # Artifacts bucket
image_repository="arn:aws:ecr:::repository/repo-name", # ecr repo
region=self.region,
)
bootstrap_process_execute = run_command(bootstrap_command_list)
self.assertEqual(bootstrap_process_execute.process.returncode, 0)
stdout = bootstrap_process_execute.stdout.decode()
self.assertIn("skipping creation", stdout)
def validate_pipeline_config(self, stack_name, stage_configuration_name, cfn_keys_to_check=None):
# Get output values from cloudformation
if cfn_keys_to_check is None:
cfn_keys_to_check = list(CFN_OUTPUT_TO_CONFIG_KEY.keys())
response = self.cf_client.describe_stacks(StackName=stack_name)
stacks = response["Stacks"]
self.assertTrue(len(stacks) > 0) # in case stack name is invalid
stack_outputs = stacks[0]["Outputs"]
output_values = {}
for value in stack_outputs:
output_values[value["OutputKey"]] = value["OutputValue"]
# Get values saved in config file
config = SamConfig(PIPELINE_CONFIG_DIR, PIPELINE_CONFIG_FILENAME)
config_values = config.get_all(["pipeline", "bootstrap"], "parameters", stage_configuration_name)
config_values = {**config_values, **config.get_all(["pipeline", "bootstrap"], "parameters")}
for key in CFN_OUTPUT_TO_CONFIG_KEY:
if key not in cfn_keys_to_check:
continue
value = CFN_OUTPUT_TO_CONFIG_KEY[key]
if key != "OidcProvider":
cfn_value = output_values[key]
config_value = config_values[value]
if key == "ImageRepository":
self.assertEqual(cfn_value.split("/")[-1], config_value.split("/")[-1])
elif key == "OidcProvider":
self.assertTrue(config_value.startswith("https://"))
else:
self.assertTrue(cfn_value.endswith(config_value) or cfn_value == config_value)
@parameterized.expand([("confirm_changeset",), (False,)])
def test_no_interactive_with_some_required_resources_provided(self, confirm_changeset: bool):
stage_configuration_name, stack_name = self._get_stage_and_stack_name()
self.stack_names = [stack_name]
bootstrap_command_list = self.get_bootstrap_command_list(
no_interactive=True,
stage_configuration_name=stage_configuration_name,
pipeline_user="arn:aws:iam::123:user/user-name", # pipeline user
pipeline_execution_role="arn:aws:iam::123:role/role-name", # Pipeline execution role
# CloudFormation execution role missing
bucket="arn:aws:s3:::bucket-name", # Artifacts bucket
image_repository="arn:aws:ecr:::repository/repo-name", # ecr repo
no_confirm_changeset=not confirm_changeset,
region=self.region,
)
inputs = [
"y", # proceed
]
bootstrap_process_execute = run_command_with_inputs(bootstrap_command_list, inputs if confirm_changeset else [])
self.assertEqual(bootstrap_process_execute.process.returncode, 0)
stdout = bootstrap_process_execute.stdout.decode()
self.assertIn("Successfully created!", stdout)
self.assertIn("CloudFormationExecutionRole", self._extract_created_resource_logical_ids(stack_name))
def test_interactive_cancelled_by_user(self):
stage_configuration_name, stack_name = self._get_stage_and_stack_name()
self.stack_names = [stack_name]
bootstrap_command_list = self.get_bootstrap_command_list()
inputs = [
stage_configuration_name,
CREDENTIAL_PROFILE,
self.region, # region
"1", # IAM permissions
"arn:aws:iam::123:user/user-name", # pipeline user
"arn:aws:iam::123:role/role-name", # Pipeline execution role
"", # CloudFormation execution role
"arn:aws:s3:::bucket-name", # Artifacts bucket
"N", # Do you have Lambda with package type Image
"",
"", # Create resources confirmation
]
bootstrap_process_execute = run_command_with_inputs(bootstrap_command_list, inputs)
self.assertEqual(bootstrap_process_execute.process.returncode, 0)
stdout = bootstrap_process_execute.stdout.decode()
self.assertTrue(stdout.strip().endswith("Canceling pipeline bootstrap creation."))
self.assertFalse(self._stack_exists(stack_name))
def test_interactive_with_some_required_resources_provided(self):
stage_configuration_name, stack_name = self._get_stage_and_stack_name()
self.stack_names = [stack_name]
bootstrap_command_list = self.get_bootstrap_command_list()
inputs = [
stage_configuration_name,
CREDENTIAL_PROFILE,
self.region, # region
"1", # IAM permissions
"arn:aws:iam::123:user/user-name", # pipeline user
"arn:aws:iam::123:role/role-name", # Pipeline execution role
"", # CloudFormation execution role
"arn:aws:s3:::bucket-name", # Artifacts bucket
"N", # Do you have Lambda with package type Image
"",
"y", # Create resources confirmation
]
bootstrap_process_execute = run_command_with_inputs(bootstrap_command_list, inputs)
self.assertEqual(bootstrap_process_execute.process.returncode, 0)
stdout = bootstrap_process_execute.stdout.decode()
self.assertIn("Successfully created!", stdout)
# make sure the not provided resource is the only resource created.
self.assertIn("CloudFormationExecutionRole", self._extract_created_resource_logical_ids(stack_name))
if "ImageRepository" in CFN_OUTPUT_TO_CONFIG_KEY:
del CFN_OUTPUT_TO_CONFIG_KEY["ImageRepository"]
if "OidcProvider" in CFN_OUTPUT_TO_CONFIG_KEY:
del CFN_OUTPUT_TO_CONFIG_KEY["OidcProvider"]
self.validate_pipeline_config(stack_name, stage_configuration_name)
def test_interactive_pipeline_user_only_created_once(self):
"""
Create 3 stages, only the first stage resource stack creates
a pipeline user, and the remaining two share the same pipeline user.
"""
stage_configuration_names = []
for suffix in ["1", "2", "3"]:
stage_configuration_name, stack_name = self._get_stage_and_stack_name(suffix)
stage_configuration_names.append(stage_configuration_name)
self.stack_names.append(stack_name)
bootstrap_command_list = self.get_bootstrap_command_list()
for i, stage_configuration_name in enumerate(stage_configuration_names):
inputs = [
stage_configuration_name,
CREDENTIAL_PROFILE,
self.region, # region
"1", # IAM permissions
*([""] if i == 0 else []), # pipeline user
"arn:aws:iam::123:role/role-name", # Pipeline execution role
"arn:aws:iam::123:role/role-name", # CloudFormation execution role
"arn:aws:s3:::bucket-name", # Artifacts bucket
"N", # Should we create ECR repo, 3 - specify one
"",
"y", # Create resources confirmation
]
bootstrap_process_execute = run_command_with_input(
bootstrap_command_list, ("\n".join(inputs) + "\n").encode()
)
self.assertEqual(bootstrap_process_execute.process.returncode, 0)
stdout = bootstrap_process_execute.stdout.decode()
# Only first environment creates pipeline user
if i == 0:
self.assertIn("The following resources were created in your account:", stdout)
resources = self._extract_created_resource_logical_ids(self.stack_names[i])
self.assertTrue("PipelineUser" in resources)
self.assertTrue("PipelineUserAccessKey" in resources)
self.assertTrue("PipelineUserSecretKey" in resources)
self.validate_pipeline_config(self.stack_names[i], stage_configuration_name)
else:
self.assertIn("skipping creation", stdout)
@parameterized.expand([("ArtifactsBucket",), ("ArtifactsLoggingBucket",)])
def test_bootstrapped_buckets_accept_ssl_requests_only(self, bucket_logical_id):
stage_configuration_name, stack_name = self._get_stage_and_stack_name()
self.stack_names = [stack_name]
bootstrap_command_list = self.get_bootstrap_command_list(
stage_configuration_name=stage_configuration_name,
no_interactive=True,
no_confirm_changeset=True,
region=self.region,
)
bootstrap_process_execute = run_command(bootstrap_command_list)
self.assertEqual(bootstrap_process_execute.process.returncode, 0)
stack_resources = self.cf_client.describe_stack_resources(StackName=stack_name)
bucket = next(
resource
for resource in stack_resources["StackResources"]
if resource["LogicalResourceId"] == bucket_logical_id
)
bucket_name = bucket["PhysicalResourceId"]
bucket_key = "any/testing/key.txt"
testing_data = b"any testing binary data"
s3_ssl_client = boto3.client("s3", region_name=self.region)
s3_non_ssl_client = boto3.client("s3", use_ssl=False, region_name=self.region)
# Assert SSL requests are accepted
s3_ssl_client.put_object(Body=testing_data, Bucket=bucket_name, Key=bucket_key)
res = s3_ssl_client.get_object(Bucket=bucket_name, Key=bucket_key)
retrieved_data = res["Body"].read()
self.assertEqual(retrieved_data, testing_data)
# Assert non SSl requests are denied
with self.assertRaises(ClientError) as error:
s3_non_ssl_client.get_object(Bucket=bucket_name, Key=bucket_key)
self.assertEqual(
str(error.exception), "An error occurred (AccessDenied) when calling the GetObject operation: Access Denied"
)
def test_bootstrapped_artifacts_bucket_has_server_access_log_enabled(self):
stage_configuration_name, stack_name = self._get_stage_and_stack_name()
self.stack_names = [stack_name]
bootstrap_command_list = self.get_bootstrap_command_list(
stage_configuration_name=stage_configuration_name,
no_interactive=True,
no_confirm_changeset=True,
region=self.region,
)
bootstrap_process_execute = run_command(bootstrap_command_list)
self.assertEqual(bootstrap_process_execute.process.returncode, 0)
stack_resources = self.cf_client.describe_stack_resources(StackName=stack_name)
artifacts_bucket = next(
resource
for resource in stack_resources["StackResources"]
if resource["LogicalResourceId"] == "ArtifactsBucket"
)
artifacts_bucket_name = artifacts_bucket["PhysicalResourceId"]
artifacts_logging_bucket = next(
resource
for resource in stack_resources["StackResources"]
if resource["LogicalResourceId"] == "ArtifactsLoggingBucket"
)
artifacts_logging_bucket_name = artifacts_logging_bucket["PhysicalResourceId"]
s3_client = boto3.client("s3", region_name=self.region)
res = s3_client.get_bucket_logging(Bucket=artifacts_bucket_name)
self.assertEqual(artifacts_logging_bucket_name, res["LoggingEnabled"]["TargetBucket"])
|
38fb58b7a74a7d21c87ee2b3b1ea4114971adb36
|
8b725e9f1bef041961f9df5a77ed5ef14cd75908
|
/src/pystack/colors.py
|
8628568042dc6d2fad64aec1996302b3d10176b0
|
[
"Apache-2.0",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
bloomberg/pystack
|
70a3615961ba61a53a0f8417cd4eea91a89d2e69
|
e07ee353cbdac9359aaa84c4c624f709ec39512c
|
refs/heads/main
| 2023-08-31T01:35:51.516066
| 2023-08-23T10:01:20
| 2023-08-23T10:01:20
| 593,714,137
| 791
| 35
|
Apache-2.0
| 2023-09-14T16:51:06
| 2023-01-26T17:15:57
|
Python
|
UTF-8
|
Python
| false
| false
| 3,064
|
py
|
colors.py
|
import os
import re
import sys
from typing import Iterable
from typing import Optional
ATTRIBUTES = dict(
list(
zip(
[
"bold",
"faint",
"italized",
"underline",
"blink",
"",
"reverse",
"concealed",
],
list(range(1, 9)),
)
)
)
del ATTRIBUTES[""]
ATTRIBUTES_RE = r"\033\[(?:%s)m" % "|".join(["%d" % v for v in ATTRIBUTES.values()])
HIGHLIGHTS = dict(
list(
zip(
[
"on_grey",
"on_red",
"on_green",
"on_yellow",
"on_blue",
"on_magenta",
"on_cyan",
"on_white",
],
list(range(40, 48)),
)
)
)
HIGHLIGHTS_RE = r"\033\[(?:%s)m" % "|".join(["%d" % v for v in HIGHLIGHTS.values()])
COLORS = dict(
list(
zip(
[
"grey",
"red",
"green",
"yellow",
"blue",
"magenta",
"cyan",
"white",
],
list(range(30, 38)),
)
)
)
COLORS_RE = r"\033\[(?:%s)m" % "|".join(["%d" % v for v in COLORS.values()])
RESET = "\033[0m"
RESET_RE = r"033\[0m"
def _is_stdout_a_tty() -> bool:
return hasattr(sys.stdout, "isatty") and sys.stdout.isatty()
def colored(
text: str,
color: Optional[str] = None,
highlight: Optional[str] = None,
attrs: Optional[Iterable[str]] = None,
) -> str:
"""Colorize text, while stripping nested ANSI color sequences.
Available text colors:
red, green, yellow, blue, magenta, cyan, white.
Available text highlights:
on_red, on_green, on_yellow, on_blue, on_magenta, on_cyan, on_white.
Available attributes:
bold, dark, underline, blink, reverse, concealed.
Example:
colored('Hello, World!', 'red', 'on_grey', ['blue', 'blink'])
colored('Hello, World!', 'green')
"""
def terminal_supports_color() -> bool:
if os.getenv("NO_COLOR") is not None:
return False
return _is_stdout_a_tty()
if not terminal_supports_color():
return text
return format_colored(text, color, highlight, attrs)
def format_colored(
text: str,
color: Optional[str] = None,
highlight: Optional[str] = None,
attrs: Optional[Iterable[str]] = None,
) -> str:
fmt_str = "\033[%dm%s"
if color is not None:
text = re.sub(COLORS_RE + "(.*?)" + RESET_RE, r"\1", text)
text = fmt_str % (COLORS[color], text)
if highlight is not None:
text = re.sub(HIGHLIGHTS_RE + "(.*?)" + RESET_RE, r"\1", text)
text = fmt_str % (HIGHLIGHTS[highlight], text)
if attrs is not None:
text = re.sub(ATTRIBUTES_RE + "(.*?)" + RESET_RE, r"\1", text)
for attr in attrs:
text = fmt_str % (ATTRIBUTES[attr], text)
return text + RESET
|
a4331505b809deba657cc577bb0349acb1e26778
|
951c578186220f2499a7aecf99a314e46778fa75
|
/tests/columns/test_nested.py
|
9c9f39080f1016220f87506a846d084af1e41e27
|
[
"MIT",
"Python-2.0"
] |
permissive
|
mymarilyn/clickhouse-driver
|
0d06fb1d3b28f61b267307fb6cea1a33d7997df4
|
ce712b5bc7a7900e844c7c8f99a1e3426aa326f7
|
refs/heads/master
| 2023-07-20T08:41:27.193499
| 2023-06-30T08:29:06
| 2023-06-30T08:29:31
| 90,912,724
| 1,108
| 229
|
NOASSERTION
| 2023-05-24T02:54:41
| 2017-05-10T22:13:04
|
Python
|
UTF-8
|
Python
| false
| false
| 3,248
|
py
|
test_nested.py
|
from tests.testcase import BaseTestCase
class NestedTestCase(BaseTestCase):
required_server_version = (21, 3, 13)
def entuple(self, lst):
return tuple(
self.entuple(x) if isinstance(x, list) else x for x in lst
)
def test_simple(self):
columns = 'n Nested(i Int32, s String)'
# INSERT INTO test_nested VALUES ([(0, 'a'), (1, 'b')]);
data = [([(0, 'a'), (1, 'b')],)]
with self.create_table(columns, flatten_nested=0):
self.client.execute(
'INSERT INTO test (n) VALUES', data
)
query = 'SELECT * FROM test'
inserted = self.emit_cli(query)
self.assertEqual(inserted, "[(0,'a'),(1,'b')]\n")
inserted = self.client.execute(query)
self.assertEqual(inserted, data)
projected_i = self.client.execute('SELECT n.i FROM test')
self.assertEqual(
projected_i,
[([0, 1],)]
)
projected_s = self.client.execute('SELECT n.s FROM test')
self.assertEqual(
projected_s,
[(['a', 'b'],)]
)
def test_multiple_rows(self):
columns = 'n Nested(i Int32, s String)'
data = [([(0, 'a'), (1, 'b')],), ([(3, 'd'), (4, 'e')],)]
with self.create_table(columns, flatten_nested=0):
self.client.execute(
'INSERT INTO test (n) VALUES', data
)
query = 'SELECT * FROM test'
inserted = self.emit_cli(query)
self.assertEqual(
inserted,
"[(0,'a'),(1,'b')]\n[(3,'d'),(4,'e')]\n"
)
inserted = self.client.execute(query)
self.assertEqual(inserted, data)
def test_dict(self):
columns = 'n Nested(i Int32, s String)'
data = [
{'n': [{'i': 0, 's': 'a'}, {'i': 1, 's': 'b'}]},
{'n': [{'i': 3, 's': 'd'}, {'i': 4, 's': 'e'}]},
]
with self.create_table(columns, flatten_nested=0):
self.client.execute(
'INSERT INTO test (n) VALUES', data
)
query = 'SELECT * FROM test'
inserted = self.emit_cli(query)
self.assertEqual(
inserted,
"[(0,'a'),(1,'b')]\n[(3,'d'),(4,'e')]\n"
)
inserted = self.client.execute(query)
self.assertEqual(
inserted,
[([(0, 'a'), (1, 'b')],), ([(3, 'd'), (4, 'e')],)]
)
def test_nested_side_effect_as_json(self):
client_settings = {
'allow_experimental_object_type': True
}
columns = 'n Nested(i Int32, s String)'
data = [([(0, 'a'), (1, 'b')],)]
with self.create_table(columns, flatten_nested=0):
with self.created_client(settings=client_settings) as client:
client.execute(
'INSERT INTO test (n) VALUES', data
)
inserted = client.execute('SELECT * FROM test')
self.assertEqual(
inserted,
[([{'i': 0, 's': 'a'}, {'i': 1, 's': 'b'}],)]
)
|
f07e550a1988b8d9733ce380e3c2b94e1b13067c
|
e4c5238c86c8a114d49b7ba3ecc5ef9d5157e152
|
/tests/test_state_opt/test_ppt_distinguishability.py
|
1ac7ccfca65f8403b33a88935ea4aca4a1e00e39
|
[
"MIT"
] |
permissive
|
vprusso/toqito
|
64a9963c02b73127836b76d886543a0642b93664
|
7e6869d783f98cb241579ea89e0f9ff61eff9d9b
|
refs/heads/master
| 2023-07-22T17:08:18.392204
| 2023-07-19T07:27:37
| 2023-07-19T07:27:37
| 235,493,396
| 116
| 53
|
MIT
| 2023-09-12T13:35:38
| 2020-01-22T03:47:16
|
Python
|
UTF-8
|
Python
| false
| false
| 9,162
|
py
|
test_ppt_distinguishability.py
|
"""Test ppt_distinguishability."""
import numpy as np
from toqito.perms import swap_operator
from toqito.state_opt import ppt_distinguishability
from toqito.states import basis, bell
def test_ppt_distinguishability_yyd_density_matrices():
"""
PPT distinguishing the YYD states from [1] should yield `7/8 ~ 0.875`
Feeding the input to the function as density matrices.
References:
[1]: Yu, Nengkun, Runyao Duan, and Mingsheng Ying.
"Four locally indistinguishable ququad-ququad orthogonal
maximally entangled states."
Physical review letters 109.2 (2012): 020506.
https://arxiv.org/abs/1107.3224
"""
psi_0 = bell(0)
psi_1 = bell(2)
psi_2 = bell(3)
psi_3 = bell(1)
x_1 = np.kron(psi_0, psi_0)
x_2 = np.kron(psi_1, psi_3)
x_3 = np.kron(psi_2, psi_3)
x_4 = np.kron(psi_3, psi_3)
rho_1 = x_1 * x_1.conj().T
rho_2 = x_2 * x_2.conj().T
rho_3 = x_3 * x_3.conj().T
rho_4 = x_4 * x_4.conj().T
states = [rho_1, rho_2, rho_3, rho_4]
probs = [1 / 4, 1 / 4, 1 / 4, 1 / 4]
# Min-error tests:
primal_res = ppt_distinguishability(states, probs=probs, dist_method="min-error", strategy=True)
dual_res = ppt_distinguishability(states, probs=probs, dist_method="min-error", strategy=False)
np.testing.assert_equal(np.isclose(primal_res, 7 / 8, atol=0.001), True)
np.testing.assert_equal(np.isclose(dual_res, 7 / 8, atol=0.001), True)
primal_res = ppt_distinguishability(
states, probs=probs, dist_method="unambiguous", strategy=True
)
dual_res = ppt_distinguishability(
states, probs=probs, dist_method="unambiguous", strategy=False
)
np.testing.assert_equal(np.isclose(primal_res, 3 / 4, atol=0.001), True)
np.testing.assert_equal(np.isclose(dual_res, 3 / 4, atol=0.001), True)
def test_ppt_distinguishability_yyd_vectors():
"""
PPT distinguishing the YYD states from [1] should yield `7/8 ~ 0.875`
Feeding the input to the function as state vectors.
References:
[1]: Yu, Nengkun, Runyao Duan, and Mingsheng Ying.
"Four locally indistinguishable ququad-ququad orthogonal
maximally entangled states."
Physical review letters 109.2 (2012): 020506.
https://arxiv.org/abs/1107.3224
"""
psi_0 = bell(0)
psi_1 = bell(2)
psi_2 = bell(3)
psi_3 = bell(1)
x_1 = np.kron(psi_0, psi_0)
x_2 = np.kron(psi_1, psi_3)
x_3 = np.kron(psi_2, psi_3)
x_4 = np.kron(psi_3, psi_3)
states = [x_1, x_2, x_3, x_4]
probs = [1 / 4, 1 / 4, 1 / 4, 1 / 4]
primal_res = ppt_distinguishability(states, probs=probs, dist_method="min-error", strategy=True)
dual_res = ppt_distinguishability(states, probs=probs, dist_method="min-error", strategy=False)
np.testing.assert_equal(np.isclose(primal_res, 7 / 8, atol=0.001), True)
np.testing.assert_equal(np.isclose(dual_res, 7 / 8, atol=0.001), True)
primal_res = ppt_distinguishability(
states, probs=probs, dist_method="unambiguous", strategy=True
)
dual_res = ppt_distinguishability(
states, probs=probs, dist_method="unambiguous", strategy=False
)
np.testing.assert_equal(np.isclose(primal_res, 3 / 4, atol=0.001), True)
np.testing.assert_equal(np.isclose(dual_res, 3 / 4, atol=0.001), True)
def test_ppt_distinguishability_yyd_states_no_probs():
"""
PPT distinguishing the YYD states from [1] should yield 7/8 ~ 0.875
If no probability vector is explicitly given, assume uniform
probabilities are given.
References:
[1]: Yu, Nengkun, Runyao Duan, and Mingsheng Ying.
"Four locally indistinguishable ququad-ququad orthogonal
maximally entangled states."
Physical review letters 109.2 (2012): 020506.
https://arxiv.org/abs/1107.3224
"""
psi_0 = bell(0)
psi_1 = bell(2)
psi_2 = bell(3)
psi_3 = bell(1)
x_1 = np.kron(psi_0, psi_0)
x_2 = np.kron(psi_1, psi_3)
x_3 = np.kron(psi_2, psi_3)
x_4 = np.kron(psi_3, psi_3)
rho_1 = x_1 * x_1.conj().T
rho_2 = x_2 * x_2.conj().T
rho_3 = x_3 * x_3.conj().T
rho_4 = x_4 * x_4.conj().T
states = [rho_1, rho_2, rho_3, rho_4]
primal_res = ppt_distinguishability(states, probs=None, dist_method="min-error", strategy=True)
dual_res = ppt_distinguishability(states, probs=None, dist_method="min-error", strategy=False)
np.testing.assert_equal(np.isclose(primal_res, 7 / 8, atol=0.001), True)
np.testing.assert_equal(np.isclose(dual_res, 7 / 8, atol=0.001), True)
primal_res = ppt_distinguishability(
states, probs=None, dist_method="unambiguous", strategy=True
)
dual_res = ppt_distinguishability(states, probs=None, dist_method="unambiguous", strategy=False)
np.testing.assert_equal(np.isclose(primal_res, 3 / 4, atol=0.001), True)
np.testing.assert_equal(np.isclose(dual_res, 3 / 4, atol=0.001), True)
def test_ppt_distinguishability_werner_hiding_pairs():
r"""
One quantum data hiding scheme involves the Werner hiding pair.
A Werner hiding pair is defined by
.. math::
\begin{equation}
\sigma_0^{(n)} = \frac{\mathbb{I} \otimes \mathbb{I} + W_n}{n(n+1)}
\quad \text{and} \quad
\sigma_1^{(n)} = \frac{\mathbb{I} \otimes \mathbb{I} - W_n}{n(n-1)}
\end{equation}
The optimal probability to distinguish the Werner hiding pair is known
to be upper bounded by the following equation
.. math::
\begin{equation}
\frac{1}{2} + \frac{1}{n+1}
\end{equation}
References:
[1]: Terhal, Barbara M., David P. DiVincenzo, and Debbie W. Leung.
"Hiding bits in Bell states."
Physical review letters 86.25 (2001): 5807.
https://arxiv.org/abs/quant-ph/0011042
[2]: Cosentino, Alessandro
"Quantum state local distinguishability via convex optimization".
University of Waterloo, Thesis
https://uwspace.uwaterloo.ca/handle/10012/9572
"""
dim = 2
sigma_0 = (np.kron(np.identity(dim), np.identity(dim)) + swap_operator(dim)) / (dim * (dim + 1))
sigma_1 = (np.kron(np.identity(dim), np.identity(dim)) - swap_operator(dim)) / (dim * (dim - 1))
states = [sigma_0, sigma_1]
expected_val = 1 / 2 + 1 / (dim + 1)
primal_res = ppt_distinguishability(states, probs=None, dist_method="min-error", strategy=True)
dual_res = ppt_distinguishability(states, probs=None, dist_method="min-error", strategy=False)
np.testing.assert_equal(np.isclose(primal_res, expected_val, atol=0.001), True)
np.testing.assert_equal(np.isclose(dual_res, expected_val, atol=0.001), True)
primal_res = ppt_distinguishability(
states, probs=None, dist_method="unambiguous", strategy=True
)
dual_res = ppt_distinguishability(states, probs=None, dist_method="unambiguous", strategy=False)
np.testing.assert_equal(np.isclose(primal_res, 1 / 3, atol=0.001), True)
np.testing.assert_equal(np.isclose(dual_res, 1 / 3, atol=0.001), True)
def test_ppt_distinguishability_four_bell_states():
r"""
PPT distinguishing the four Bell states.
There exists a closed form formula for the probability with which one
is able to distinguish one of the four Bell states given with equal
probability when Alice and Bob have access to a resource state [1].
The resource state is defined by
..math::
|\tau_{\epsilon} \rangle = \sqrt{\frac{1+\epsilon}{2}} +
|0\rangle | 0\rangle +
\sqrt{\frac{1-\epsilon}{2}} |1 \rangle |1 \rangle
The closed form probability with which Alice and Bob can distinguish via
PPT measurements is given as follows
.. math::
\frac{1}{2} \left(1 + \sqrt{1 - \epsilon^2} \right).
This formula happens to be equal to LOCC and SEP as well for this case.
Refer to Theorem 5 in [1] for more details.
References:
[1]: Bandyopadhyay, Somshubhro, et al.
"Limitations on separable measurements by convex optimization."
IEEE Transactions on Information Theory 61.6 (2015): 3593-3604.
https://arxiv.org/abs/1408.6981
"""
rho_1 = bell(0) * bell(0).conj().T
rho_2 = bell(1) * bell(1).conj().T
rho_3 = bell(2) * bell(2).conj().T
rho_4 = bell(3) * bell(3).conj().T
e_0, e_1 = basis(2, 0), basis(2, 1)
e_00 = np.kron(e_0, e_0)
e_11 = np.kron(e_1, e_1)
eps = 0.5
resource_state = np.sqrt((1 + eps) / 2) * e_00 + np.sqrt((1 - eps) / 2) * e_11
resource_state = resource_state * resource_state.conj().T
states = [
np.kron(rho_1, resource_state),
np.kron(rho_2, resource_state),
np.kron(rho_3, resource_state),
np.kron(rho_4, resource_state),
]
probs = [1 / 4, 1 / 4, 1 / 4, 1 / 4]
exp_res = 1 / 2 * (1 + np.sqrt(1 - eps**2))
primal_res = ppt_distinguishability(states, probs=probs, dist_method="min-error", strategy=True)
dual_res = ppt_distinguishability(states, probs=probs, dist_method="min-error", strategy=False)
np.testing.assert_equal(np.isclose(primal_res, exp_res, atol=0.001), True)
np.testing.assert_equal(np.isclose(dual_res, exp_res, atol=0.001), True)
if __name__ == "__main__":
np.testing.run_module_suite()
|
8fb5b76df5512078c51be7c0e1a10bf1ec5f3c73
|
150a7b11cb531f8bc2a045aefcf2ebe1d151efa3
|
/ocs_ci/helpers/dr_helpers_ui.py
|
9bc6638f6e2df80fd4743ae7f196f97c8fc54a73
|
[
"MIT"
] |
permissive
|
red-hat-storage/ocs-ci
|
c7ac414e1b86552da0439223dfa9bca39977f31a
|
5e9e504957403148e413326f65c3769bf9d8eb39
|
refs/heads/master
| 2023-08-17T16:19:51.154403
| 2023-08-17T13:27:12
| 2023-08-17T13:27:12
| 179,558,938
| 146
| 210
|
MIT
| 2023-09-14T16:38:44
| 2019-04-04T19:00:02
|
Python
|
UTF-8
|
Python
| false
| false
| 18,182
|
py
|
dr_helpers_ui.py
|
"""
Helper functions specific to DR User Interface
"""
import logging
from selenium.common.exceptions import NoSuchElementException
from ocs_ci.framework import config
from ocs_ci.ocs import constants
from ocs_ci.ocs.ui.views import locators
from ocs_ci.ocs.ui.helpers_ui import format_locator
from ocs_ci.utility.utils import get_ocp_version
from ocs_ci.ocs.utils import get_non_acm_cluster_config
log = logging.getLogger(__name__)
def dr_submariner_validation_from_ui(acm_obj):
"""
This function is only applicable for Regional DR.
This function calls other function and does pre-checks on ACM UI
such as Submariner validation from ACM console for Regional DR.
Args:
acm_obj (AcmAddClusters): ACM Page Navigator Class
"""
multicluster_mode = config.MULTICLUSTER.get("multicluster_mode", None)
if multicluster_mode == constants.RDR_MODE:
# Add an arg to below function and pass the cluster_set_name created on your cluster
# when running the test locally.
acm_obj.submariner_validation_ui()
def check_cluster_status_on_acm_console(
acm_obj,
down_cluster_name=None,
cluster_names=None,
timeout=900,
expected_text=constants.STATUS_READY,
):
"""
This function checks the current status of imported clusters on the ACM console.
These clusters are the managed OCP clusters and the ACM Hub cluster.
Args:
acm_obj (AcmAddClusters): ACM Page Navigator Class
down_cluster_name (str): If Failover is performed when a cluster goes down, it waits and checks the updated
status of cluster unavailability on the ACM console.
It takes the cluster name which is down.
cluster_names (list): This is a list of cluster names involved in a DR setup. You can either pass the cluster
names as args in the form of list, but if not passed, it fetches the primary & secondary
cluster names passed at run time for context setting
(max. 3 for now including ACM Hub cluster).
ACM Hub cluster name is hard coded as "local-cluster" as the name is constant & isn't
expected to change.
timeout (int): Timeout to wait for certain elements to be found on the ACM UI
expected_text (str): Any particular string/status of the cluster to be checked on the ACM console.
Default is set to ready
"""
ocp_version = get_ocp_version()
acm_loc = locators[ocp_version]["acm_page"]
acm_obj.navigate_clusters_page()
if down_cluster_name:
log.info(
"Down cluster name is provided, checking it's updated status on ACM console"
)
acm_obj.do_click(format_locator(acm_loc["cluster_name"], down_cluster_name))
check_cluster_unavailability = acm_obj.wait_until_expected_text_is_found(
format_locator(acm_loc["cluster_status_check"], expected_text),
expected_text=expected_text,
timeout=timeout,
)
if check_cluster_unavailability:
log.info(f"Down cluster {down_cluster_name} is '{expected_text}'")
acm_obj.take_screenshot()
log.info("Navigate back to Clusters page")
acm_obj.do_click(acm_loc["clusters-page"])
return True
else:
check_cluster_availability = acm_obj.wait_until_expected_text_is_found(
format_locator(acm_loc["cluster_status_check"], "Ready"),
expected_text="Ready",
timeout=30,
)
assert check_cluster_availability, (
f"Down cluster {down_cluster_name} is still in {constants.STATUS_READY} state after {timeout} seconds,"
f"expected status is {expected_text}"
)
log.info(
f"Checking other expected statuses cluster {down_cluster_name} could be in on ACM UI "
f"due to Node shutdown"
)
# Overall cluster status should change when only a few nodes of the cluster are down
# as per BZ 2155203, hence the below code is written
# and can be further implemented depending upon the fix.
other_expected_status = ["Unavailable", "NotReady", "Offline", "Error"]
for status in other_expected_status:
check_cluster_unavailability_again = (
acm_obj.wait_until_expected_text_is_found(
format_locator(acm_loc["cluster_status_check"], status),
expected_text=status,
timeout=10,
)
)
if check_cluster_unavailability_again:
f"Cluster {down_cluster_name} is in {status} state on ACM UI"
acm_obj.take_screenshot()
log.info("Navigate back to Clusters page")
acm_obj.do_click(acm_loc["clusters-page"])
return True
log.error(f"Down cluster {down_cluster_name} status check failed")
acm_obj.take_screenshot()
return False
else:
if not cluster_names:
cluster_names = ["local-cluster"]
for cluster in get_non_acm_cluster_config():
cluster_names.append(cluster.ENV_DATA["cluster_name"])
for cluster in cluster_names:
log.info(f"Checking status of cluster {cluster} on ACM UI")
acm_obj.do_click(format_locator(acm_loc["cluster_name"], cluster))
cluster_status = acm_obj.get_element_text(
format_locator(acm_loc["cluster_status_check"], expected_text)
)
if cluster_status == expected_text:
log.info(f"Cluster {cluster} status is {cluster_status} on ACM UI")
log.info("Navigate back to Clusters page")
acm_obj.do_click(acm_loc["clusters-page"], enable_screenshot=True)
else:
wait_for_expected_status = acm_obj.wait_until_expected_text_is_found(
format_locator(acm_loc["cluster_status_check"], expected_text),
timeout=900,
)
if wait_for_expected_status:
log.info(f"Cluster {cluster} status is {expected_text} on ACM UI")
log.info("Navigate back to Clusters page")
acm_obj.do_click(acm_loc["clusters-page"], enable_screenshot=True)
else:
log.error(
f"Cluster {cluster} status is not {expected_text} on ACM UI"
)
log.info("Navigate back to Clusters page")
acm_obj.do_click(acm_loc["clusters-page"], enable_screenshot=True)
return False
def verify_drpolicy_ui(acm_obj, scheduling_interval):
"""
Function to verify DRPolicy status and replication policy on Data Policies page of ACM console
Args:
acm_obj (AcmAddClusters): ACM Page Navigator Class
scheduling_interval (int): Scheduling interval in the DRPolicy to be verified on ACM UI
"""
ocp_version = get_ocp_version()
acm_loc = locators[ocp_version]["acm_page"]
acm_obj.navigate_data_services()
log.info("Verify status of DRPolicy on ACM UI")
policy_status = acm_obj.wait_until_expected_text_is_found(
acm_loc["drpolicy-status"], expected_text="Validated"
)
if policy_status:
log.info(f"DRPolicy status on ACM UI is {constants.DRPOLICY_STATUS}")
else:
log.error(
f"DRPolicy status on ACM UI is not {constants.DRPOLICY_STATUS}, can not proceed"
)
raise NoSuchElementException
log.info("Verify Replication policy on ACM UI")
replication_policy = acm_obj.get_element_text(acm_loc["replication-policy"])
multicluster_mode = config.MULTICLUSTER.get("multicluster_mode", None)
if multicluster_mode == constants.RDR_MODE:
assert (
replication_policy
== f"{constants.RDR_REPLICATION_POLICY}, interval: {scheduling_interval}m"
), f"Replication policy on ACM UI is {replication_policy}, can not proceed"
log.info("DRPolicy successfully validated on ACM UI")
def failover_relocate_ui(
acm_obj,
scheduling_interval=0,
workload_to_move=None,
policy_name=None,
failover_or_preferred_cluster=None,
action=constants.ACTION_FAILOVER,
timeout=120,
move_workloads_to_same_cluster=False,
):
"""
Function to perform Failover/Relocate operations via ACM UI
Args:
acm_obj (AcmAddClusters): ACM Page Navigator Class
scheduling_interval (int): scheduling interval value from DRPolicy
workload_to_move (str): Name of running workloads on which action to be taken
policy_name (str): Name of the DR policy applied to the running workload
failover_or_preferred_cluster (str): Name of the failover cluster or preferred cluster to which workloads
will be moved
action (str): action could be "Failover" or "Relocate", "Failover" is set to default
timeout (int): timeout to wait for certain elements to be found on the ACM UI
move_workloads_to_same_cluster (bool): Bool condition to test negative failover/relocate scenarios to move
running workloads to same cluster
Returns:
bool: True if the action is triggered, raises Exception if any of the mandatory argument is not provided
"""
if workload_to_move and policy_name and failover_or_preferred_cluster:
ocp_version = get_ocp_version()
acm_loc = locators[ocp_version]["acm_page"]
verify_drpolicy_ui(acm_obj, scheduling_interval=scheduling_interval)
acm_obj.navigate_applications_page()
log.info("Click on search bar")
acm_obj.do_click(acm_loc["search-bar"])
log.info("Clear existing text from search bar if any")
acm_obj.do_clear(acm_loc["search-bar"])
log.info("Enter the workload to be searched")
acm_obj.do_send_keys(acm_loc["search-bar"], text=workload_to_move)
log.info("Click on kebab menu option")
acm_obj.do_click(acm_loc["kebab-action"], enable_screenshot=True)
if action == constants.ACTION_FAILOVER:
log.info("Selecting action as Failover from ACM UI")
acm_obj.do_click(
acm_loc["failover-app"], enable_screenshot=True, timeout=timeout
)
else:
log.info("Selecting action as Relocate from ACM UI")
acm_obj.do_click(
acm_loc["relocate-app"], enable_screenshot=True, timeout=timeout
)
log.info("Click on policy dropdown")
acm_obj.do_click(acm_loc["policy-dropdown"], enable_screenshot=True)
log.info("Select policy from policy dropdown")
acm_obj.do_click(
format_locator(acm_loc["select-policy"], policy_name),
enable_screenshot=True,
)
log.info("Click on target cluster dropdown")
acm_obj.do_click(acm_loc["target-cluster-dropdown"], enable_screenshot=True)
if move_workloads_to_same_cluster:
log.info("Select target cluster same as current primary cluster on ACM UI")
acm_obj.do_click(
format_locator(
acm_loc["failover-preferred-cluster-name"],
failover_or_preferred_cluster,
),
enable_screenshot=True,
)
else:
log.info("Select target cluster on ACM UI")
acm_obj.do_click(
format_locator(
acm_loc["failover-preferred-cluster-name"],
failover_or_preferred_cluster,
),
enable_screenshot=True,
)
log.info("Check operation readiness")
if action == constants.ACTION_FAILOVER:
if move_workloads_to_same_cluster:
assert not acm_obj.wait_until_expected_text_is_found(
locator=acm_loc["operation-readiness"],
expected_text=constants.STATUS_READY,
timeout=30,
), "Failover Operation readiness check failed"
log.info("Failover readiness is False as expected")
else:
assert acm_obj.wait_until_expected_text_is_found(
locator=acm_loc["operation-readiness"],
expected_text=constants.STATUS_READY,
), "Failover Operation readiness check failed"
else:
if move_workloads_to_same_cluster:
assert not acm_obj.wait_until_expected_text_is_found(
locator=acm_loc["operation-readiness"],
expected_text=constants.STATUS_READY,
timeout=30,
), "Relocate Operation readiness check failed"
log.info("Relocate readiness is False as expected")
else:
assert acm_obj.wait_until_expected_text_is_found(
locator=acm_loc["operation-readiness"],
expected_text=constants.STATUS_READY,
), "Relocate Operation readiness check failed"
initiate_btn = acm_obj.find_an_element_by_xpath(
"//button[@id='modal-intiate-action']"
)
aria_disabled = initiate_btn.get_attribute("aria-disabled")
if move_workloads_to_same_cluster:
if aria_disabled == "false":
log.error(
"Initiate button in enabled to failover/relocate on the same cluster"
)
acm_obj.take_screenshot()
return False
else:
log.info(
"As expected, initiate button is disabled to failover/relocate on the same cluster"
)
acm_obj.take_screenshot()
return True
log.info("Click on subscription dropdown")
acm_obj.do_click(acm_loc["subscription-dropdown"], enable_screenshot=True)
# TODO: Commented below lines due to Regression BZ2208637
# log.info("Check peer readiness")
# assert acm_obj.wait_until_expected_text_is_found(
# locator=acm_loc["peer-ready"],
# expected_text=constants.PEER_READY,
# ), f"Peer is not ready, can not initiate {action}"
acm_obj.take_screenshot()
if aria_disabled == "true":
log.error("Initiate button in not enabled to failover/relocate")
return False
else:
log.info("Click on Initiate button to failover/relocate")
acm_obj.do_click(acm_loc["initiate-action"], enable_screenshot=True)
if action == constants.ACTION_FAILOVER:
log.info("Failover trigerred from ACM UI")
else:
log.info("Relocate trigerred from ACM UI")
acm_obj.take_screenshot()
log.info("Close the action modal")
acm_obj.do_click(acm_loc["close-action-modal"], enable_screenshot=True)
return True
else:
log.error(
"Incorrect or missing params to perform Failover/Relocate operation from ACM UI"
)
raise NotImplementedError
def verify_failover_relocate_status_ui(
acm_obj, action=constants.ACTION_FAILOVER, timeout=120
):
"""
Function to verify current status of in progress Failover/Relocate operation on ACM UI
Args:
acm_obj (AcmAddClusters): ACM Page Navigator Class
action (str): action "Failover" or "Relocate" which was taken on the workloads,
"Failover" is set to default
timeout (int): timeout to wait for certain elements to be found on the ACM UI
"""
ocp_version = get_ocp_version()
acm_loc = locators[ocp_version]["acm_page"]
data_policy_hyperlink = acm_obj.wait_until_expected_text_is_found(
locator=acm_loc["data-policy-hyperlink"],
expected_text="1 policy",
timeout=timeout,
)
if data_policy_hyperlink:
log.info(
"Click on drpolicy hyperlink under Data policy column on Applications page"
)
acm_obj.do_click(acm_loc["data-policy-hyperlink"], enable_screenshot=True)
else:
log.error(
"drpolicy hyperlink under Data policy column on Applications page not found,"
"can not proceed with verification"
)
raise NoSuchElementException
log.info("Click on View more details")
acm_obj.do_click(acm_loc["view-more-details"], enable_screenshot=True)
log.info("Verifying failover/relocate status on ACM UI")
if action == constants.ACTION_FAILOVER:
action_status = acm_obj.wait_until_expected_text_is_found(
acm_loc["action-status-failover"],
expected_text="FailedOver",
timeout=timeout,
)
fetch_status = acm_obj.get_element_text(acm_loc["action-status-failover"])
assert action_status, "Failover verification from ACM UI failed"
log.info(f"{action} successfully verified on ACM UI, status is {fetch_status}")
else:
action_status = acm_obj.wait_until_expected_text_is_found(
acm_loc["action-status-relocate"],
expected_text="Relocated",
timeout=timeout,
)
fetch_status = acm_obj.get_element_text(acm_loc["action-status-relocate"])
assert action_status, "Relocate verification from ACM UI failed"
log.info(f"{action} successfully verified on ACM UI, status is {fetch_status}")
close_action_modal = acm_obj.wait_until_expected_text_is_found(
acm_loc["close-action-modal"], expected_text="Close", timeout=120
)
if close_action_modal:
log.info("Close button found")
acm_obj.do_click_by_xpath("//*[text()='Close']")
log.info("Data policy modal page closed")
|
438ca23fa7d617c6c84f7ed31aa49d68ca07a94c
|
554718851656376ad2bceb282de30459167ffeb2
|
/smdebug/mxnet/singleton_utils.py
|
4da2498b91197fa3482ce3a4aabb7b023b403f36
|
[
"Apache-2.0"
] |
permissive
|
awslabs/sagemaker-debugger
|
d6ae6a6177a6cb457972772e2b3021e8a9dcc621
|
37ecf0aaeb24ab2adbe7f0ad664d0e50fa4154f2
|
refs/heads/master
| 2023-09-05T05:20:02.458427
| 2023-04-20T20:48:11
| 2023-04-20T20:48:11
| 222,554,670
| 162
| 89
|
Apache-2.0
| 2023-08-23T14:31:27
| 2019-11-18T22:12:36
|
Python
|
UTF-8
|
Python
| false
| false
| 564
|
py
|
singleton_utils.py
|
"""
Easy-to-use methods for getting the singleton SessionHook.
This is abstracted into its own module to prevent circular import problems.
Sample usage (in AWS-MXNet repo):
import smdebug.mxnet as smd
hook = smd.hook()
"""
# First Party
import smdebug.core.singleton_utils as sutils
from smdebug.core.singleton_utils import del_hook, set_hook # noqa
def get_hook(json_config_path=None) -> "Hook":
from smdebug.mxnet.hook import Hook
return sutils.get_hook(
json_config_path=json_config_path, hook_class=Hook, create_if_not_exists=True
)
|
e9e1e732d70afdca14dc799431ffeb2747537631
|
6189f34eff2831e3e727cd7c5e43bc5b591adffc
|
/WebMirror/management/rss_parser_funcs/feed_parse_extractStarrynightnovelsWordpressCom.py
|
68bf15a4df9b07a611277c2ba4f98c543688e105
|
[
"BSD-3-Clause"
] |
permissive
|
fake-name/ReadableWebProxy
|
24603660b204a9e7965cfdd4a942ff62d7711e27
|
ca2e086818433abc08c014dd06bfd22d4985ea2a
|
refs/heads/master
| 2023-09-04T03:54:50.043051
| 2023-08-26T16:08:46
| 2023-08-26T16:08:46
| 39,611,770
| 207
| 20
|
BSD-3-Clause
| 2023-09-11T15:48:15
| 2015-07-24T04:30:43
|
Python
|
UTF-8
|
Python
| false
| false
| 1,980
|
py
|
feed_parse_extractStarrynightnovelsWordpressCom.py
|
def extractStarrynightnovelsWordpressCom(item):
'''
Parser for 'starrynightnovels.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('Shini Yasui Kōshaku Reijō to Shichi-nin no Kikōshi', 'Shini Yasui Kōshaku Reijō to Shichi-nin no Kikōshi', 'translated'),
('Stepbrother\'s Diary', 'Lean Tuna and Her Stepbrother’s Plan to Become a Fatty Tuna', 'translated'),
('MaguToro', 'Lean Tuna and Her Stepbrother’s Plan to Become a Fatty Tuna', 'translated'),
('Lewd Game', 'I Decided to Participate in a Lewd Game in My Dream', 'translated'),
('summoned hero', 'I Summoned the Hero, to the Present Age', 'translated'),
('Seven Nobles', 'Duke\'s Daughter who is Liable to Die and the Seven Nobles', 'translated'),
('Erica', 'Duke\'s Daughter who is Liable to Die and the Seven Nobles', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
chp_prefixes = [
('IDWBV – ', 'I Don’t Want to Become a Villainess, So I Aim at Becoming a Perfect Lady Together with the Prince!', 'translated'),
]
for prefix, series, tl_type in chp_prefixes:
if item['title'].lower().startswith(prefix.lower()):
return buildReleaseMessageWithType(item, series, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
edc72604308d853c0b5785dee3513f0f4cc30a0c
|
594555ce60da5dd32e34b87ea48f0731e19adc9d
|
/models/DeformableConvNets/lib/utils/show_boxes.py
|
42b7ec2105603bb42f395e5305dc57cc9fa398b4
|
[
"MIT"
] |
permissive
|
i-pan/kaggle-rsna18
|
97a4a016183ae19a3690f0c89454339fdfdaf2a9
|
2db498fe99615d935aa676f04847d0c562fd8e46
|
refs/heads/master
| 2022-02-09T16:39:53.996841
| 2021-12-23T23:49:23
| 2021-12-23T23:49:23
| 156,621,490
| 227
| 72
|
MIT
| 2021-12-23T23:49:45
| 2018-11-07T23:21:51
|
Python
|
UTF-8
|
Python
| false
| false
| 1,231
|
py
|
show_boxes.py
|
# --------------------------------------------------------
# Deformable Convolutional Networks
# Copyright (c) 2017 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Yi Li, Haocheng Zhang
# --------------------------------------------------------
import matplotlib.pyplot as plt
from random import random as rand
def show_boxes(im, dets, classes, scale = 1.0):
plt.cla()
plt.axis("off")
plt.imshow(im)
for cls_idx, cls_name in enumerate(classes):
cls_dets = dets[cls_idx]
for det in cls_dets:
bbox = det[:4] * scale
color = (rand(), rand(), rand())
rect = plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor=color, linewidth=2.5)
plt.gca().add_patch(rect)
if cls_dets.shape[1] == 5:
score = det[-1]
plt.gca().text(bbox[0], bbox[1],
'{:s} {:.3f}'.format(cls_name, score),
bbox=dict(facecolor=color, alpha=0.5), fontsize=9, color='white')
plt.show()
return im
|
6a774b3c94e5daee6f8edbc9711451c85f99d85c
|
0339ff270893ee0787ed9818aab05dd56c185e92
|
/showyourwork/workflow/rules/preprocess.smk
|
3ad10bfda41472d35437660ee699ab4098262871
|
[
"MIT"
] |
permissive
|
showyourwork/showyourwork
|
27a985642c497079eaff43d494456510de46af8d
|
acd9a28f2a85c3a954f9a9c6dc99f3aa1d947c7d
|
refs/heads/main
| 2023-09-01T15:45:31.115438
| 2023-08-24T12:02:03
| 2023-08-24T12:02:03
| 383,577,518
| 247
| 31
|
MIT
| 2023-09-12T12:39:11
| 2021-07-06T19:25:57
|
TeX
|
UTF-8
|
Python
| false
| false
| 3,037
|
smk
|
preprocess.smk
|
"""
Defines the rule ``syw__preprocess`` to parse the config and build the
workflow graph.
Runs the script :doc:`preprocess` to generate the ``.showyourwork/config.json``
file containing metadata about the build and the workflow graph.
"""
from showyourwork import paths
# Allow user to define their own tectonic.yml file in their repo:
tectonic_yml = paths.user().repo / "tectonic.yml"
if not tectonic_yml.exists():
tectonic_yml = paths.showyourwork().envs / "tectonic.yml"
rule:
"""
Setup the temporary files for compilation.
"""
name:
"syw__preprocess_setup"
message:
"Preprocess: Setting up..."
input:
config["ms_tex"],
config["tex_files_in"],
"showyourwork.yml",
"zenodo.yml" if (paths.user().repo / "zenodo.yml").exists() else [],
stylesheet=(paths.showyourwork().resources / "styles" / "preprocess.tex").as_posix()
output:
temporary_tex_files(root=paths.user().preprocess),
compile_dir=directory(paths.user().preprocess.as_posix()),
params:
metadata=False
script:
"../scripts/compile_setup.py"
rule:
"""
Compile the manuscript into the article PDF.
"""
name:
"syw__preprocess_xml"
message:
"Preprocess: Generating XML tree..."
input:
temporary_tex_files(root=paths.user().preprocess),
"showyourwork.yml",
compile_dir=paths.user().preprocess.as_posix()
output:
(paths.user().preprocess / "showyourwork.xml").as_posix()
conda:
tectonic_yml.as_posix()
params:
user_args=" ".join(config["user_args"])
shell:
"""
cd "{input.compile_dir}"
tectonic \\
--chatter minimal \\
--keep-logs \\
--keep-intermediates \\
-r 0 \\
{params.user_args} \\
"{input[0]}"
"""
rule:
"""
Generate a `config.json` file for the main build.
This rule builds the article using ``tectonic``, but re-defines ``figure``,
``caption``, and ``label`` commands to print XML tags to a special log file.
This way, we can use TeX to construct a full XML tree of the document for us,
without any need for parsing the TeX file ourselves. This XML tree is then
used to determine relationships between the figure scripts and the figure
files.
This rule also assembles information about the datasets and other script
dependencies, as well as metadata about the git repo. It then packages
all this up alongside the user's config settings into the file
`config.json`, which is used as input to the main `showyourwork`
workflow.
"""
name:
"syw__preprocess"
message:
"Preprocess: Setting up the workflow..."
input:
(paths.user().preprocess / "showyourwork.xml").as_posix()
output:
config["config_json"],
script:
"../scripts/preprocess.py"
|
d248ab0ed6ab8c9a84e7fe922b027bbe201a3091
|
3a61d74a32de232682867a1063944cad4daaa47f
|
/custom_components/elasticsearch/config_flow.py
|
2564ec1721eb747f2c8eabe12cf7888061d38d69
|
[
"MIT"
] |
permissive
|
legrego/homeassistant-elasticsearch
|
c131eacf125bb8008fa85dfb477375ec6abb185c
|
fbe6f8c48c62c9b9b4b559d372d38716afe8ebf1
|
refs/heads/main
| 2023-08-22T12:27:49.162089
| 2023-08-13T23:43:52
| 2023-08-13T23:43:52
| 132,661,981
| 115
| 29
|
MIT
| 2023-09-07T18:20:09
| 2018-05-08T20:35:58
|
Python
|
UTF-8
|
Python
| false
| false
| 15,242
|
py
|
config_flow.py
|
"""Config flow for Elastic."""
import homeassistant.helpers.config_validation as cv
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.config_entries import SOURCE_IGNORE, SOURCE_IMPORT
from homeassistant.const import (
CONF_ALIAS,
CONF_PASSWORD,
CONF_TIMEOUT,
CONF_URL,
CONF_USERNAME,
CONF_VERIFY_SSL,
)
from homeassistant.core import callback
from homeassistant.helpers.selector import selector
from .const import (
CONF_EXCLUDED_DOMAINS,
CONF_EXCLUDED_ENTITIES,
CONF_HEALTH_SENSOR_ENABLED,
CONF_ILM_DELETE_AFTER,
CONF_ILM_ENABLED,
CONF_ILM_MAX_SIZE,
CONF_ILM_POLICY_NAME,
CONF_INCLUDED_DOMAINS,
CONF_INCLUDED_ENTITIES,
CONF_INDEX_FORMAT,
CONF_PUBLISH_ENABLED,
CONF_PUBLISH_FREQUENCY,
CONF_PUBLISH_MODE,
CONF_SSL_CA_PATH,
)
from .const import DOMAIN as ELASTIC_DOMAIN
from .const import (
ONE_MINUTE,
PUBLISH_MODE_ALL,
PUBLISH_MODE_ANY_CHANGES,
PUBLISH_MODE_STATE_CHANGES,
)
from .errors import (
AuthenticationRequired,
CannotConnect,
InsufficientPrivileges,
UnsupportedVersion,
UntrustedCertificate,
)
from .es_gateway import ElasticsearchGateway
from .logger import LOGGER
DEFAULT_URL = "http://localhost:9200"
DEFAULT_ALIAS = "active-hass-index"
DEFAULT_INDEX_FORMAT = "hass-events"
DEFAULT_PUBLISH_ENABLED = True
DEFAULT_PUBLISH_FREQUENCY = ONE_MINUTE
DEFAULT_PUBLISH_MODE = PUBLISH_MODE_ANY_CHANGES
DEFAULT_VERIFY_SSL = True
DEFAULT_TIMEOUT_SECONDS = 30
DEFAULT_ILM_ENABLED = True
DEFAULT_ILM_POLICY_NAME = "home-assistant"
DEFAULT_ILM_MAX_SIZE = "30gb"
DEFAULT_ILM_DELETE_AFTER = "365d"
class ElasticFlowHandler(config_entries.ConfigFlow, domain=ELASTIC_DOMAIN):
"""Handle an Elastic config flow."""
VERSION = 2
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_PUSH
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get the options flow for this handler."""
return ElasticOptionsFlowHandler(config_entry)
def __init__(self):
"""Initialize the Elastic flow."""
self.config = {}
self.tls_schema = {
vol.Optional(CONF_VERIFY_SSL, default=DEFAULT_VERIFY_SSL): bool,
vol.Optional(CONF_SSL_CA_PATH, default=""): str,
}
def build_setup_schema(self):
schema = {
vol.Required(
CONF_URL, default=self.config.get(CONF_URL, "http://localhost:9200")
): str,
vol.Optional(CONF_USERNAME): str,
vol.Optional(CONF_PASSWORD): str,
}
if self.show_advanced_options:
schema[
vol.Required(
CONF_TIMEOUT,
default=self.config.get(CONF_TIMEOUT, DEFAULT_TIMEOUT_SECONDS),
)
] = int
return schema
def build_full_config(self, user_input={}):
return {
CONF_URL: user_input.get(CONF_URL, DEFAULT_URL),
CONF_USERNAME: user_input.get(CONF_USERNAME),
CONF_PASSWORD: user_input.get(CONF_PASSWORD),
CONF_TIMEOUT: user_input.get(CONF_TIMEOUT, DEFAULT_TIMEOUT_SECONDS),
CONF_VERIFY_SSL: user_input.get(CONF_VERIFY_SSL, DEFAULT_VERIFY_SSL),
CONF_SSL_CA_PATH: user_input.get(CONF_SSL_CA_PATH, None),
CONF_PUBLISH_ENABLED: user_input.get(
CONF_PUBLISH_ENABLED, DEFAULT_PUBLISH_ENABLED
),
CONF_PUBLISH_FREQUENCY: user_input.get(
CONF_PUBLISH_FREQUENCY, DEFAULT_PUBLISH_FREQUENCY
),
CONF_PUBLISH_MODE: user_input.get(CONF_PUBLISH_MODE, DEFAULT_PUBLISH_MODE),
CONF_ALIAS: user_input.get(CONF_ALIAS, DEFAULT_ALIAS),
CONF_INDEX_FORMAT: user_input.get(CONF_INDEX_FORMAT, DEFAULT_INDEX_FORMAT),
CONF_EXCLUDED_DOMAINS: user_input.get(CONF_EXCLUDED_DOMAINS, []),
CONF_EXCLUDED_ENTITIES: user_input.get(CONF_EXCLUDED_ENTITIES, []),
CONF_INCLUDED_DOMAINS: user_input.get(CONF_INCLUDED_DOMAINS, []),
CONF_INCLUDED_ENTITIES: user_input.get(CONF_INCLUDED_ENTITIES, []),
CONF_HEALTH_SENSOR_ENABLED: user_input.get(
CONF_HEALTH_SENSOR_ENABLED, True
),
CONF_ILM_ENABLED: user_input.get(CONF_ILM_ENABLED, DEFAULT_ILM_ENABLED),
CONF_ILM_POLICY_NAME: user_input.get(
CONF_ILM_POLICY_NAME, DEFAULT_ILM_POLICY_NAME
),
CONF_ILM_MAX_SIZE: user_input.get(CONF_ILM_MAX_SIZE, DEFAULT_ILM_MAX_SIZE),
CONF_ILM_DELETE_AFTER: user_input.get(
CONF_ILM_DELETE_AFTER, DEFAULT_ILM_DELETE_AFTER
),
}
async def async_step_user(self, user_input=None):
"""Handle a flow initialized by the user."""
if self._async_current_entries():
return self.async_abort(reason="single_instance_allowed")
if user_input is None:
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(self.build_setup_schema()),
)
self.config = self.build_full_config(user_input)
return await self._async_elasticsearch_login()
async def async_step_tls(self, user_input=None):
"""Handle establishing a trusted connection to Elasticsearch."""
if user_input is None:
return self.async_show_form(
step_id="tls", data_schema=vol.Schema(self.tls_schema)
)
self.config[CONF_VERIFY_SSL] = user_input[CONF_VERIFY_SSL]
if len(user_input[CONF_SSL_CA_PATH]):
self.config[CONF_SSL_CA_PATH] = user_input[CONF_SSL_CA_PATH]
return await self._async_elasticsearch_login()
async def async_step_import(self, import_config):
"""Import a config entry from configuration.yaml."""
# Check if new config entry matches any existing config entries
entries = self.hass.config_entries.async_entries(ELASTIC_DOMAIN)
for entry in entries:
# If source is ignore bypass host check and continue through loop
if entry.source == SOURCE_IGNORE:
continue
if entry.data[CONF_URL] == import_config[CONF_URL]:
self.hass.config_entries.async_update_entry(
entry=entry,
data=self.build_full_config(import_config),
options=import_config,
)
return self.async_abort(reason="updated_entry")
if entries:
LOGGER.warning("Already configured. Only a single configuration possible.")
return self.async_abort(reason="single_instance_allowed")
return await self.async_step_user(user_input=import_config)
async def _async_elasticsearch_login(self):
"""Handle connection & authentication to Elasticsearch"""
errors = {}
try:
gateway = ElasticsearchGateway(self.config)
await gateway.check_connection(self.hass)
except UntrustedCertificate:
errors["base"] = "untrusted_connection"
return self.async_show_form(
step_id="tls", data_schema=vol.Schema(self.tls_schema), errors=errors
)
except AuthenticationRequired:
errors["base"] = "invalid_auth"
except InsufficientPrivileges:
errors["base"] = "insufficient_privileges"
except CannotConnect:
errors["base"] = "cannot_connect"
except UnsupportedVersion:
errors["base"] = "unsupported_version"
except Exception as ex: # pylint: disable=broad-except
LOGGER.error(
"Unknown error connecting with Elasticsearch cluster. %s",
ex,
)
errors["base"] = "cannot_connect"
if errors:
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(self.build_setup_schema()),
errors=errors,
)
return await self._async_create_entry()
async def _async_create_entry(self):
"""Create the config entry."""
existing_entry = await self.async_set_unique_id(self.config[CONF_URL])
if existing_entry:
self.hass.config_entries.async_update_entry(
existing_entry, data=self.config
)
# Reload the config entry otherwise devices will remain unavailable
self.hass.async_create_task(
self.hass.config_entries.async_reload(existing_entry.entry_id)
)
return self.async_abort(reason="reauth_successful")
return self.async_create_entry(title=self.config[CONF_URL], data=self.config)
class ElasticOptionsFlowHandler(config_entries.OptionsFlow):
"""Handle Elastic options."""
def __init__(self, config_entry):
"""Initialize Elastic options flow."""
self.config_entry = config_entry
self.options = dict(config_entry.options)
async def async_step_init(self, user_input=None):
"""Manage the Elastic options."""
if self.config_entry.source == SOURCE_IMPORT:
return await self.async_step_yaml(user_input)
return await self.async_step_publish_options()
async def async_step_yaml(self, user_input=None):
"""No options for yaml managed entries."""
return self.async_abort(reason="configured_via_yaml")
async def async_step_publish_options(self, user_input=None):
"""Publish Options"""
if user_input is not None:
self.options.update(user_input)
return await self.async_step_ilm_options()
return self.async_show_form(
step_id="publish_options",
data_schema=vol.Schema(await self.async_build_publish_options_schema()),
)
async def async_step_ilm_options(self, user_input=None):
"""ILM Options"""
errors = {}
if user_input is not None:
self.options.update(user_input)
return await self.async_step_health_options()
return self.async_show_form(
step_id="ilm_options",
data_schema=vol.Schema(self._build_ilm_options_schema()),
errors=errors,
)
async def async_step_health_options(self, user_input=None):
""" Health Sensor Options"""
if user_input is not None:
self.options.update(user_input)
return await self._update_options()
return self.async_show_form(
step_id="health_options",
data_schema=vol.Schema(self._build_health_options_schema()),
)
async def _update_options(self):
"""Update config entry options."""
return self.async_create_entry(title="", data=self.options)
def _get_config_value(self, key, default):
current = self.options.get(key, default)
if current is None:
return default
return current
async def async_build_publish_options_schema(self):
"""Builds the schema for publish options."""
domains, entities = await self._async_get_domains_and_entities()
schema = {
vol.Required(
CONF_PUBLISH_ENABLED,
default=self._get_config_value(
CONF_PUBLISH_ENABLED, DEFAULT_PUBLISH_ENABLED
),
): bool,
vol.Required(
CONF_PUBLISH_FREQUENCY,
default=self._get_config_value(
CONF_PUBLISH_FREQUENCY, DEFAULT_PUBLISH_FREQUENCY
),
): int,
vol.Required(
CONF_PUBLISH_MODE,
default=self._get_config_value(CONF_PUBLISH_MODE, DEFAULT_PUBLISH_MODE),
): selector(
{
"select": {
"options": [
{"label": "All entities", "value": PUBLISH_MODE_ALL},
{
"label": "Entities with state changes",
"value": PUBLISH_MODE_STATE_CHANGES,
},
{
"label": "Entities with state or attribute changes",
"value": PUBLISH_MODE_ANY_CHANGES,
},
]
}
}
),
vol.Required(
CONF_EXCLUDED_DOMAINS,
default=self._get_config_value(CONF_EXCLUDED_DOMAINS, []),
): cv.multi_select(domains),
vol.Required(
CONF_EXCLUDED_ENTITIES,
default=self._get_config_value(CONF_EXCLUDED_ENTITIES, []),
): cv.multi_select(entities),
vol.Required(
CONF_INCLUDED_DOMAINS,
default=self._get_config_value(CONF_INCLUDED_DOMAINS, []),
): cv.multi_select(domains),
vol.Required(
CONF_INCLUDED_ENTITIES,
default=self._get_config_value(CONF_INCLUDED_ENTITIES, []),
): cv.multi_select(entities),
}
if self.show_advanced_options:
schema[
vol.Required(
CONF_INDEX_FORMAT,
default=self._get_config_value(
CONF_INDEX_FORMAT, DEFAULT_INDEX_FORMAT
),
)
] = str
schema[
vol.Required(
CONF_ALIAS,
default=self._get_config_value(CONF_ALIAS, DEFAULT_ALIAS),
)
] = str
return schema
def _build_ilm_options_schema(self):
schema = {
vol.Required(
CONF_ILM_ENABLED, default=self._get_config_value(CONF_ILM_ENABLED, True)
): bool,
vol.Required(
CONF_ILM_POLICY_NAME,
default=self._get_config_value(
CONF_ILM_POLICY_NAME, DEFAULT_ILM_POLICY_NAME
),
): str,
vol.Required(
CONF_ILM_MAX_SIZE,
default=self._get_config_value(CONF_ILM_MAX_SIZE, DEFAULT_ILM_MAX_SIZE),
): str,
vol.Required(
CONF_ILM_DELETE_AFTER,
default=self._get_config_value(
CONF_ILM_DELETE_AFTER, DEFAULT_ILM_DELETE_AFTER
),
): str,
}
return schema
def _build_health_options_schema(self):
schema = {
vol.Required(
CONF_HEALTH_SENSOR_ENABLED,
default=self._get_config_value(CONF_HEALTH_SENSOR_ENABLED, True),
): bool,
}
return schema
@callback
async def _async_get_domains_and_entities(self):
states = self.hass.states.async_all()
domains = set()
entity_ids = []
for state in states:
entity_ids.append(state.entity_id)
domains.add(state.domain)
return sorted(list(domains)), sorted(entity_ids)
|
928a5d302d4fb598b1e7f8cbafc470bd55954372
|
b8d80a23cb27af08a1c4d34b478c76228ae5fbb4
|
/insights/tests/parsers/test_sat5_insights_properties.py
|
62d61bbdab8888a22870d566ff68f57b5cb0acc3
|
[
"Apache-2.0"
] |
permissive
|
RedHatInsights/insights-core
|
bb243e2bf8a52446fefb95ebe05478d6e35efe2e
|
b0ea07fc3f4dd8801b505fe70e9b36e628152c4a
|
refs/heads/master
| 2023-09-04T21:15:40.456257
| 2023-09-04T10:46:56
| 2023-09-04T10:46:56
| 92,518,221
| 144
| 290
|
Apache-2.0
| 2023-09-14T02:40:13
| 2017-05-26T14:23:11
|
Python
|
UTF-8
|
Python
| false
| false
| 1,079
|
py
|
test_sat5_insights_properties.py
|
import doctest
import pytest
from insights.core.exceptions import SkipComponent
from insights.parsers import sat5_insights_properties
from insights.parsers.sat5_insights_properties import Sat5InsightsProperties
from insights.tests import context_wrap
INSIGHTS_PROPERTIES = """
portalurl = https://cert-api.access.redhat.com/r/insights
enabled = true
debug = true
rpmname = redhat-access-insights
""".strip()
def test_insights_properties():
result = Sat5InsightsProperties(context_wrap(INSIGHTS_PROPERTIES))
assert result["enabled"] == 'true'
assert result.enabled is True
assert result.get("debug") == 'true'
assert result.get("rpmname") == 'redhat-access-insights'
assert result["rpmname"] == 'redhat-access-insights'
def test_doc():
env = {
'insights_props': Sat5InsightsProperties(context_wrap(INSIGHTS_PROPERTIES)),
}
failed, total = doctest.testmod(sat5_insights_properties, globs=env)
assert failed == 0
def test_AB():
with pytest.raises(SkipComponent):
Sat5InsightsProperties(context_wrap(''))
|
b4dc07846e72265da382f281147657aaf71504a1
|
caeaa54aabffb74c7c72d24d246d7a1226950b10
|
/other/gif/loga.py
|
7d5b940bce9dbecc05102985a35084ada8b0a86f
|
[] |
no_license
|
DIYer22/boxx
|
883931b570c767baca9f8ada58034340c108d535
|
69b117ee983d8a9ee3ad176af8b096acc902f3ae
|
refs/heads/master
| 2023-09-01T01:13:38.515791
| 2023-08-24T07:37:34
| 2023-08-24T07:37:34
| 112,070,868
| 452
| 45
| null | 2023-04-24T07:48:32
| 2017-11-26T09:35:12
|
Python
|
UTF-8
|
Python
| false
| false
| 148
|
py
|
loga.py
|
import numpy as np
array = np.random.normal(size=(4,3, 244, 244))
# I want to know every thing about `array`
array[0] = np.inf
array[1] = np.nan
|
261e88df72362667f3add64a263d007868fee028
|
4b484246502bd0607d9f9e25078adcf49bac1e84
|
/roles/openshift_node/callback_plugins/aa_version_requirement.py
|
00494b8e19677e75eb17e1b06af4d62736567422
|
[
"Apache-2.0"
] |
permissive
|
openshift/openshift-ansible
|
b94ca8004be5373bc78b33e7c2cc4909c5ab81ab
|
2ba66d98edb01fb1ff1250053c09fddae0441b12
|
refs/heads/master
| 2023-08-23T12:26:12.586513
| 2023-06-19T08:20:58
| 2023-06-19T08:20:58
| 24,109,199
| 2,386
| 3,107
|
Apache-2.0
| 2023-09-14T16:02:38
| 2014-09-16T17:07:51
|
Python
|
UTF-8
|
Python
| false
| false
| 1,397
|
py
|
aa_version_requirement.py
|
#!/usr/bin/python
"""
This callback plugin verifies the required minimum version of Ansible
is installed for proper operation of the OpenShift Ansible Installer.
The plugin is named with leading `aa_` to ensure this plugin is loaded
first (alphanumerically) by Ansible.
"""
import sys
from pkg_resources import parse_version
from ansible import __version__
from ansible.plugins.callback import CallbackBase
from ansible.utils.display import Display
def display(*args, **kwargs):
"""Set up display function for Ansible v2"""
display_instance = Display()
display_instance.display(*args, **kwargs)
# Set to minimum required Ansible version
REQUIRED_VERSION = '2.12.2'
DESCRIPTION = "Supported versions: %s or newer" % REQUIRED_VERSION
class CallbackModule(CallbackBase):
"""
Ansible callback plugin
"""
CALLBACK_VERSION = 1.0
CALLBACK_NAME = 'version_requirement'
def __init__(self):
"""
Version verification is performed in __init__ to catch the
requirement early in the execution of Ansible and fail gracefully
"""
super(CallbackModule, self).__init__()
if not parse_version(__version__) >= parse_version(REQUIRED_VERSION):
display(
'FATAL: Current Ansible version (%s) is not supported. %s'
% (__version__, DESCRIPTION), color='red')
sys.exit(1)
|
9851ad120a86bb158e94962276b6831e5c4c5378
|
1236eb50df4b9cf63caf9d0c6ceabe34c323bd24
|
/tools/phising_attack.py
|
44513463cb2b17612bc61d8c66bc3e0107f6dac1
|
[
"MIT"
] |
permissive
|
Z4nzu/hackingtool
|
f444de56cca7446cecc0a35c48bdb2c44d5492e9
|
f5aff5547a4506cfc62008cced6055a354ac70b9
|
refs/heads/master
| 2023-08-22T06:53:58.418816
| 2023-07-23T05:20:23
| 2023-07-23T05:20:23
| 254,832,799
| 39,265
| 4,263
|
MIT
| 2023-09-13T19:08:34
| 2020-04-11T09:21:31
|
Python
|
UTF-8
|
Python
| false
| false
| 9,062
|
py
|
phising_attack.py
|
# coding=utf-8
import os
from core import HackingTool
from core import HackingToolsCollection
class autophisher(HackingTool):
TITLE = "Autophisher RK"
DESCRIPTION = "Automated Phishing Toolkit"
INSTALL_COMMANDS = [
"sudo git clone https://github.com/CodingRanjith/autophisher.git",
"cd autophisher"
]
RUN_COMMANDS = ["cd autophisher;sudo bash autophisher.sh"]
PROJECT_URL = "https://github.com/CodingRanjith/autophisher"
class Pyphisher(HackingTool):
TITLE = "Pyphisher"
DESCRIPTION = "Easy to use phishing tool with 77 website templates"
INSTALL_COMMANDS = [
"sudo git clone https://github.com/KasRoudra/PyPhisher",
"cd PyPhisher/files",
"pip3 install -r requirements.txt"
]
RUN_COMMANDS = ["cd PyPhisher;sudo python3 pyphisher.py"]
PROJECT_URL = "git clone https://github.com/KasRoudra/PyPhisher"
class AdvPhishing(HackingTool):
TITLE = "AdvPhishing"
DESCRIPTION = "This is Advance Phishing Tool ! OTP PHISHING"
INSTALL_COMMANDS = [
"sudo git clone https://github.com/Ignitetch/AdvPhishing.git",
"cd AdvPhishing;chmod 777 *;bash Linux-Setup.sh"]
RUN_COMMANDS = ["cd AdvPhishing && sudo bash AdvPhishing.sh"]
PROJECT_URL = "https://github.com/Ignitetch/AdvPhishing"
class Setoolkit(HackingTool):
TITLE = "Setoolkit"
DESCRIPTION = "The Social-Engineer Toolkit is an open-source penetration\n" \
"testing framework designed for social engine"
INSTALL_COMMANDS = [
"git clone https://github.com/trustedsec/social-engineer-toolkit/",
"cd social-engineer-toolkit && sudo python3 setup.py"
]
RUN_COMMANDS = ["sudo setoolkit"]
PROJECT_URL = "https://github.com/trustedsec/social-engineer-toolkit"
class SocialFish(HackingTool):
TITLE = "SocialFish"
DESCRIPTION = "Automated Phishing Tool & Information Collector NOTE: username is 'root' and password is 'pass'"
INSTALL_COMMANDS = [
"sudo git clone https://github.com/UndeadSec/SocialFish.git && sudo apt-get install python3 python3-pip python3-dev -y",
"cd SocialFish && sudo python3 -m pip install -r requirements.txt"
]
RUN_COMMANDS = ["cd SocialFish && sudo python3 SocialFish.py root pass"]
PROJECT_URL = "https://github.com/UndeadSec/SocialFish"
class HiddenEye(HackingTool):
TITLE = "HiddenEye"
DESCRIPTION = "Modern Phishing Tool With Advanced Functionality And " \
"Multiple Tunnelling Services \n" \
"\t [!]https://github.com/DarkSecDevelopers/HiddenEye"
INSTALL_COMMANDS = [
"sudo git clone https://github.com/Morsmalleo/HiddenEye.git ;sudo chmod 777 HiddenEye",
"cd HiddenEye;sudo pip3 install -r requirements.txt;sudo pip3 install requests;pip3 install pyngrok"
]
RUN_COMMANDS = ["cd HiddenEye;sudo python3 HiddenEye.py"]
PROJECT_URL = "https://github.com/Morsmalleo/HiddenEye.git"
class Evilginx2(HackingTool):
TITLE = "Evilginx2"
DESCRIPTION = "evilginx2 is a man-in-the-middle attack framework used " \
"for phishing login credentials along with session cookies,\n" \
"which in turn allows to bypass 2-factor authentication protection.\n\n\t " \
"[+]Make sure you have installed GO of version at least 1.14.0 \n" \
"[+]After installation, add this to your ~/.profile, assuming that you installed GO in /usr/local/go\n\t " \
"[+]export GOPATH=$HOME/go \n " \
"[+]export PATH=$PATH:/usr/local/go/bin:$GOPATH/bin \n" \
"[+]Then load it with source ~/.profiles."
INSTALL_COMMANDS = [
"sudo apt-get install git make;go get -u github.com/kgretzky/evilginx2",
"cd $GOPATH/src/github.com/kgretzky/evilginx2;make",
"sudo make install;sudo evilginx"
]
RUN_COMMANDS = ["sudo evilginx"]
PROJECT_URL = "https://github.com/kgretzky/evilginx2"
class ISeeYou(HackingTool):
TITLE = "I-See_You"
DESCRIPTION = "[!] ISeeYou is a tool to find Exact Location of Victom By" \
" User SocialEngineering or Phishing Engagement..\n" \
"[!] Users can expose their local servers to the Internet " \
"and decode the location coordinates by looking at the log file"
INSTALL_COMMANDS = [
"sudo git clone https://github.com/Viralmaniar/I-See-You.git",
"cd I-See-You && sudo chmod u+x ISeeYou.sh"
]
RUN_COMMANDS = ["cd I-See-You && sudo bash ISeeYou.sh"]
PROJECT_URL = "https://github.com/Viralmaniar/I-See-You"
class SayCheese(HackingTool):
TITLE = "SayCheese"
DESCRIPTION = "Take webcam shots from target just sending a malicious link"
INSTALL_COMMANDS = ["sudo git clone https://github.com/hangetzzu/saycheese"]
RUN_COMMANDS = ["cd saycheese && sudo bash saycheese.sh"]
PROJECT_URL = "https://github.com/hangetzzu/saycheese"
class QRJacking(HackingTool):
TITLE = "QR Code Jacking"
DESCRIPTION = "QR Code Jacking (Any Website)"
INSTALL_COMMANDS = [
"sudo git clone https://github.com/cryptedwolf/ohmyqr.git && sudo apt -y install scrot"]
RUN_COMMANDS = ["cd ohmyqr && sudo bash ohmyqr.sh"]
PROJECT_URL = "https://github.com/cryptedwolf/ohmyqr"
class WifiPhisher(HackingTool):
TITLE = "WifiPhisher"
DESCRIPTION = "The Rogue Access Point Framework"
INSTALL_COMMANDS = [
"sudo git clone https://github.com/wifiphisher/wifiphisher.git",
"cd wifiphisher"]
RUN_COMMANDS = ["cd wifiphisher && sudo python setup.py"]
PROJECT_URL = "https://github.com/wifiphisher/wifiphisher"
class BlackEye(HackingTool):
TITLE = "BlackEye"
DESCRIPTION = "The ultimate phishing tool with 38 websites available!"
INSTALL_COMMANDS = [
"sudo git clone https://github.com/thelinuxchoice/blackeye",
"cd blackeye "]
RUN_COMMANDS = ["cd blackeye && sudo bash blackeye.sh"]
PROJECT_URL = "https://github.com/An0nUD4Y/blackeye"
class ShellPhish(HackingTool):
TITLE = "ShellPhish"
DESCRIPTION = "Phishing Tool for 18 social media"
INSTALL_COMMANDS = ["git clone https://github.com/An0nUD4Y/shellphish.git"]
RUN_COMMANDS = ["cd shellphish;sudo bash shellphish.sh"]
PROJECT_URL = "https://github.com/An0nUD4Y/shellphish"
class Thanos(HackingTool):
TITLE = "Thanos"
DESCRIPTION = "Browser to Browser Phishingtoolkit"
INSTALL_COMMANDS = [
"sudo git clone https://github.com/TridevReddy/Thanos.git",
"cd Thanos && sudo chmod -R 777 Thanos.sh"
]
RUN_COMMANDS = ["cd Thanos;sudo bash Thanos.sh"]
PROJECT_URL = "https://github.com/TridevReddy/Thanos"
class QRLJacking(HackingTool):
TITLE = "QRLJacking"
DESCRIPTION = "QRLJacking"
INSTALL_COMMANDS = [
"git clone https://github.com/OWASP/QRLJacking.git",
"cd QRLJacking",
"git clone https://github.com/mozilla/geckodriver.git",
"chmod +x geckodriver",
"sudo mv -f geckodriver /usr/local/share/geckodriver",
"sudo ln -s /usr/local/share/geckodriver /usr/local/bin/geckodriver",
"sudo ln -s /usr/local/share/geckodriver /usr/bin/geckodriver",
"cd QRLJacker;pip3 install -r requirements.txt"
]
RUN_COMMANDS = ["cd QRLJacking/QRLJacker;python3 QrlJacker.py"]
PROJECT_URL = "https://github.com/OWASP/QRLJacking"
class Maskphish(HackingTool):
TITLE = "Miskphish"
DESCRIPTION = "Hide phishing URL under a normal looking URL (google.com or facebook.com)"
INSTALL_COMMANDS = [
"sudo git clone https://github.com/jaykali/maskphish.git",
"cd maskphish"]
RUN_COMMANDS = ["cd maskphish;sudo bash maskphish.sh"]
PROJECT_URL = "https://github.com/jaykali/maskphish"
class BlackPhish(HackingTool):
TITLE = "BlackPhish"
INSTALL_COMMANDS = [
"sudo git clone https://github.com/iinc0gnit0/BlackPhish.git",
"cd BlackPhish;sudo bash install.sh"
]
RUN_COMMANDS = ["cd BlackPhish;sudo python3 blackphish.py"]
PROJECT_URL = "https://github.com/iinc0gnit0/BlackPhish"
def __init__(self):
super(BlackPhish, self).__init__([('Update', self.update)])
def update(self):
os.system("cd BlackPhish;sudo bash update.sh")
class dnstwist(HackingTool):
Title='dnstwist'
Install_commands=['sudo git clone https://github.com/elceef/dnstwist.git','cd dnstwist']
Run_commands=['cd dnstwist;sudo python3 dnstwist.py']
project_url='https://github.com/elceef/dnstwist'
class PhishingAttackTools(HackingToolsCollection):
TITLE = "Phishing attack tools"
TOOLS = [
autophisher(),
Pyphisher(),
AdvPhishing(),
Setoolkit(),
SocialFish(),
HiddenEye(),
Evilginx2(),
ISeeYou(),
SayCheese(),
QRJacking(),
BlackEye(),
ShellPhish(),
Thanos(),
QRLJacking(),
BlackPhish(),
Maskphish(),
dnstwist()
]
|
54d9df5e0d0dce4d5b8d1042f873e6e7abddb3de
|
0ca218c0f54dac33a2ade4accfdf8f5be3207588
|
/lib/sqlalchemy/sql/schema.py
|
008ae2c0059b722e8dbf302e3684fa09e4cb5c24
|
[
"MIT"
] |
permissive
|
sqlalchemy/sqlalchemy
|
9d949c67c9b5396b1f33e7ff0f3230c81babf5be
|
b382bff6e3464f039db0fd1f2ce1b79038675e48
|
refs/heads/main
| 2023-08-31T17:40:59.565421
| 2023-08-30T15:01:41
| 2023-08-30T15:01:41
| 159,271,175
| 8,083
| 1,489
|
MIT
| 2023-09-12T18:53:55
| 2018-11-27T03:35:03
|
Python
|
UTF-8
|
Python
| false
| false
| 227,728
|
py
|
schema.py
|
# sql/schema.py
# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
"""The schema module provides the building blocks for database metadata.
Each element within this module describes a database entity which can be
created and dropped, or is otherwise part of such an entity. Examples include
tables, columns, sequences, and indexes.
All entities are subclasses of :class:`~sqlalchemy.schema.SchemaItem`, and as
defined in this module they are intended to be agnostic of any vendor-specific
constructs.
A collection of entities are grouped into a unit called
:class:`~sqlalchemy.schema.MetaData`. MetaData serves as a logical grouping of
schema elements, and can also be associated with an actual database connection
such that operations involving the contained elements can contact the database
as needed.
Two of the elements here also build upon their "syntactic" counterparts, which
are defined in :class:`~sqlalchemy.sql.expression.`, specifically
:class:`~sqlalchemy.schema.Table` and :class:`~sqlalchemy.schema.Column`.
Since these objects are part of the SQL expression language, they are usable
as components in SQL expressions.
"""
from __future__ import annotations
from abc import ABC
import collections
from enum import Enum
import operator
import typing
from typing import Any
from typing import Callable
from typing import cast
from typing import Collection
from typing import Dict
from typing import Iterable
from typing import Iterator
from typing import List
from typing import Mapping
from typing import NoReturn
from typing import Optional
from typing import overload
from typing import Sequence as _typing_Sequence
from typing import Set
from typing import Tuple
from typing import Type
from typing import TYPE_CHECKING
from typing import TypeVar
from typing import Union
from . import coercions
from . import ddl
from . import roles
from . import type_api
from . import visitors
from .base import _DefaultDescriptionTuple
from .base import _NoneName
from .base import _SentinelColumnCharacterization
from .base import _SentinelDefaultCharacterization
from .base import DedupeColumnCollection
from .base import DialectKWArgs
from .base import Executable
from .base import SchemaEventTarget as SchemaEventTarget
from .coercions import _document_text_coercion
from .elements import ClauseElement
from .elements import ColumnClause
from .elements import ColumnElement
from .elements import quoted_name
from .elements import TextClause
from .selectable import TableClause
from .type_api import to_instance
from .visitors import ExternallyTraversible
from .visitors import InternalTraversal
from .. import event
from .. import exc
from .. import inspection
from .. import util
from ..util import HasMemoized
from ..util.typing import Final
from ..util.typing import Literal
from ..util.typing import Protocol
from ..util.typing import Self
from ..util.typing import TypedDict
from ..util.typing import TypeGuard
if typing.TYPE_CHECKING:
from ._typing import _AutoIncrementType
from ._typing import _DDLColumnArgument
from ._typing import _InfoType
from ._typing import _TextCoercedExpressionArgument
from ._typing import _TypeEngineArgument
from .base import ReadOnlyColumnCollection
from .compiler import DDLCompiler
from .elements import BindParameter
from .functions import Function
from .type_api import TypeEngine
from .visitors import _TraverseInternalsType
from .visitors import anon_map
from ..engine import Connection
from ..engine import Engine
from ..engine.interfaces import _CoreMultiExecuteParams
from ..engine.interfaces import CoreExecuteOptionsParameter
from ..engine.interfaces import ExecutionContext
from ..engine.mock import MockConnection
from ..engine.reflection import _ReflectionInfo
from ..sql.selectable import FromClause
_T = TypeVar("_T", bound="Any")
_SI = TypeVar("_SI", bound="SchemaItem")
_TAB = TypeVar("_TAB", bound="Table")
_CreateDropBind = Union["Engine", "Connection", "MockConnection"]
_ConstraintNameArgument = Optional[Union[str, _NoneName]]
_ServerDefaultArgument = Union[
"FetchedValue", str, TextClause, ColumnElement[Any]
]
class SchemaConst(Enum):
RETAIN_SCHEMA = 1
"""Symbol indicating that a :class:`_schema.Table`, :class:`.Sequence`
or in some cases a :class:`_schema.ForeignKey` object, in situations
where the object is being copied for a :meth:`.Table.to_metadata`
operation, should retain the schema name that it already has.
"""
BLANK_SCHEMA = 2
"""Symbol indicating that a :class:`_schema.Table` or :class:`.Sequence`
should have 'None' for its schema, even if the parent
:class:`_schema.MetaData` has specified a schema.
.. seealso::
:paramref:`_schema.MetaData.schema`
:paramref:`_schema.Table.schema`
:paramref:`.Sequence.schema`
"""
NULL_UNSPECIFIED = 3
"""Symbol indicating the "nullable" keyword was not passed to a Column.
This is used to distinguish between the use case of passing
``nullable=None`` to a :class:`.Column`, which has special meaning
on some backends such as SQL Server.
"""
RETAIN_SCHEMA: Final[
Literal[SchemaConst.RETAIN_SCHEMA]
] = SchemaConst.RETAIN_SCHEMA
BLANK_SCHEMA: Final[
Literal[SchemaConst.BLANK_SCHEMA]
] = SchemaConst.BLANK_SCHEMA
NULL_UNSPECIFIED: Final[
Literal[SchemaConst.NULL_UNSPECIFIED]
] = SchemaConst.NULL_UNSPECIFIED
def _get_table_key(name: str, schema: Optional[str]) -> str:
if schema is None:
return name
else:
return schema + "." + name
# this should really be in sql/util.py but we'd have to
# break an import cycle
def _copy_expression(
expression: ColumnElement[Any],
source_table: Optional[Table],
target_table: Optional[Table],
) -> ColumnElement[Any]:
if source_table is None or target_table is None:
return expression
fixed_source_table = source_table
fixed_target_table = target_table
def replace(
element: ExternallyTraversible, **kw: Any
) -> Optional[ExternallyTraversible]:
if (
isinstance(element, Column)
and element.table is fixed_source_table
and element.key in fixed_source_table.c
):
return fixed_target_table.c[element.key]
else:
return None
return cast(
ColumnElement[Any],
visitors.replacement_traverse(expression, {}, replace),
)
@inspection._self_inspects
class SchemaItem(SchemaEventTarget, visitors.Visitable):
"""Base class for items that define a database schema."""
__visit_name__ = "schema_item"
create_drop_stringify_dialect = "default"
def _init_items(self, *args: SchemaItem, **kw: Any) -> None:
"""Initialize the list of child items for this SchemaItem."""
for item in args:
if item is not None:
try:
spwd = item._set_parent_with_dispatch
except AttributeError as err:
raise exc.ArgumentError(
"'SchemaItem' object, such as a 'Column' or a "
"'Constraint' expected, got %r" % item
) from err
else:
spwd(self, **kw)
def __repr__(self) -> str:
return util.generic_repr(self, omit_kwarg=["info"])
@util.memoized_property
def info(self) -> _InfoType:
"""Info dictionary associated with the object, allowing user-defined
data to be associated with this :class:`.SchemaItem`.
The dictionary is automatically generated when first accessed.
It can also be specified in the constructor of some objects,
such as :class:`_schema.Table` and :class:`_schema.Column`.
"""
return {}
def _schema_item_copy(self, schema_item: _SI) -> _SI:
if "info" in self.__dict__:
schema_item.info = self.info.copy()
schema_item.dispatch._update(self.dispatch)
return schema_item
_use_schema_map = True
class HasConditionalDDL:
"""define a class that includes the :meth:`.HasConditionalDDL.ddl_if`
method, allowing for conditional rendering of DDL.
Currently applies to constraints and indexes.
.. versionadded:: 2.0
"""
_ddl_if: Optional[ddl.DDLIf] = None
def ddl_if(
self,
dialect: Optional[str] = None,
callable_: Optional[ddl.DDLIfCallable] = None,
state: Optional[Any] = None,
) -> Self:
r"""apply a conditional DDL rule to this schema item.
These rules work in a similar manner to the
:meth:`.ExecutableDDLElement.execute_if` callable, with the added
feature that the criteria may be checked within the DDL compilation
phase for a construct such as :class:`.CreateTable`.
:meth:`.HasConditionalDDL.ddl_if` currently applies towards the
:class:`.Index` construct as well as all :class:`.Constraint`
constructs.
:param dialect: string name of a dialect, or a tuple of string names
to indicate multiple dialect types.
:param callable\_: a callable that is constructed using the same form
as that described in
:paramref:`.ExecutableDDLElement.execute_if.callable_`.
:param state: any arbitrary object that will be passed to the
callable, if present.
.. versionadded:: 2.0
.. seealso::
:ref:`schema_ddl_ddl_if` - background and usage examples
"""
self._ddl_if = ddl.DDLIf(dialect, callable_, state)
return self
class HasSchemaAttr(SchemaItem):
"""schema item that includes a top-level schema name"""
schema: Optional[str]
class Table(
DialectKWArgs, HasSchemaAttr, TableClause, inspection.Inspectable["Table"]
):
r"""Represent a table in a database.
e.g.::
mytable = Table(
"mytable", metadata,
Column('mytable_id', Integer, primary_key=True),
Column('value', String(50))
)
The :class:`_schema.Table`
object constructs a unique instance of itself based
on its name and optional schema name within the given
:class:`_schema.MetaData` object. Calling the :class:`_schema.Table`
constructor with the same name and same :class:`_schema.MetaData` argument
a second time will return the *same* :class:`_schema.Table`
object - in this way
the :class:`_schema.Table` constructor acts as a registry function.
.. seealso::
:ref:`metadata_describing` - Introduction to database metadata
"""
__visit_name__ = "table"
if TYPE_CHECKING:
@util.ro_non_memoized_property
def primary_key(self) -> PrimaryKeyConstraint:
...
@util.ro_non_memoized_property
def foreign_keys(self) -> Set[ForeignKey]:
...
_columns: DedupeColumnCollection[Column[Any]]
_sentinel_column: Optional[Column[Any]]
constraints: Set[Constraint]
"""A collection of all :class:`_schema.Constraint` objects associated with
this :class:`_schema.Table`.
Includes :class:`_schema.PrimaryKeyConstraint`,
:class:`_schema.ForeignKeyConstraint`, :class:`_schema.UniqueConstraint`,
:class:`_schema.CheckConstraint`. A separate collection
:attr:`_schema.Table.foreign_key_constraints` refers to the collection
of all :class:`_schema.ForeignKeyConstraint` objects, and the
:attr:`_schema.Table.primary_key` attribute refers to the single
:class:`_schema.PrimaryKeyConstraint` associated with the
:class:`_schema.Table`.
.. seealso::
:attr:`_schema.Table.constraints`
:attr:`_schema.Table.primary_key`
:attr:`_schema.Table.foreign_key_constraints`
:attr:`_schema.Table.indexes`
:class:`_reflection.Inspector`
"""
indexes: Set[Index]
"""A collection of all :class:`_schema.Index` objects associated with this
:class:`_schema.Table`.
.. seealso::
:meth:`_reflection.Inspector.get_indexes`
"""
_traverse_internals: _TraverseInternalsType = (
TableClause._traverse_internals
+ [("schema", InternalTraversal.dp_string)]
)
if TYPE_CHECKING:
@util.ro_non_memoized_property
def columns(self) -> ReadOnlyColumnCollection[str, Column[Any]]:
...
@util.ro_non_memoized_property
def exported_columns(
self,
) -> ReadOnlyColumnCollection[str, Column[Any]]:
...
@util.ro_non_memoized_property
def c(self) -> ReadOnlyColumnCollection[str, Column[Any]]:
...
def _gen_cache_key(
self, anon_map: anon_map, bindparams: List[BindParameter[Any]]
) -> Tuple[Any, ...]:
if self._annotations:
return (self,) + self._annotations_cache_key
else:
return (self,)
if not typing.TYPE_CHECKING:
# typing tools seem to be inconsistent in how they handle
# __new__, so suggest this pattern for classes that use
# __new__. apply typing to the __init__ method normally
@util.deprecated_params(
mustexist=(
"1.4",
"Deprecated alias of :paramref:`_schema.Table.must_exist`",
),
)
def __new__(cls, *args: Any, **kw: Any) -> Any:
return cls._new(*args, **kw)
@classmethod
def _new(cls, *args: Any, **kw: Any) -> Any:
if not args and not kw:
# python3k pickle seems to call this
return object.__new__(cls)
try:
name, metadata, args = args[0], args[1], args[2:]
except IndexError:
raise TypeError(
"Table() takes at least two positional-only "
"arguments 'name' and 'metadata'"
)
schema = kw.get("schema", None)
if schema is None:
schema = metadata.schema
elif schema is BLANK_SCHEMA:
schema = None
keep_existing = kw.get("keep_existing", False)
extend_existing = kw.get("extend_existing", False)
if keep_existing and extend_existing:
msg = "keep_existing and extend_existing are mutually exclusive."
raise exc.ArgumentError(msg)
must_exist = kw.pop("must_exist", kw.pop("mustexist", False))
key = _get_table_key(name, schema)
if key in metadata.tables:
if not keep_existing and not extend_existing and bool(args):
raise exc.InvalidRequestError(
"Table '%s' is already defined for this MetaData "
"instance. Specify 'extend_existing=True' "
"to redefine "
"options and columns on an "
"existing Table object." % key
)
table = metadata.tables[key]
if extend_existing:
table._init_existing(*args, **kw)
return table
else:
if must_exist:
raise exc.InvalidRequestError("Table '%s' not defined" % (key))
table = object.__new__(cls)
table.dispatch.before_parent_attach(table, metadata)
metadata._add_table(name, schema, table)
try:
table.__init__(name, metadata, *args, _no_init=False, **kw)
table.dispatch.after_parent_attach(table, metadata)
return table
except Exception:
with util.safe_reraise():
metadata._remove_table(name, schema)
def __init__(
self,
name: str,
metadata: MetaData,
*args: SchemaItem,
schema: Optional[Union[str, Literal[SchemaConst.BLANK_SCHEMA]]] = None,
quote: Optional[bool] = None,
quote_schema: Optional[bool] = None,
autoload_with: Optional[Union[Engine, Connection]] = None,
autoload_replace: bool = True,
keep_existing: bool = False,
extend_existing: bool = False,
resolve_fks: bool = True,
include_columns: Optional[Collection[str]] = None,
implicit_returning: bool = True,
comment: Optional[str] = None,
info: Optional[Dict[Any, Any]] = None,
listeners: Optional[
_typing_Sequence[Tuple[str, Callable[..., Any]]]
] = None,
prefixes: Optional[_typing_Sequence[str]] = None,
# used internally in the metadata.reflect() process
_extend_on: Optional[Set[Table]] = None,
# used by __new__ to bypass __init__
_no_init: bool = True,
# dialect-specific keyword args
**kw: Any,
) -> None:
r"""Constructor for :class:`_schema.Table`.
:param name: The name of this table as represented in the database.
The table name, along with the value of the ``schema`` parameter,
forms a key which uniquely identifies this :class:`_schema.Table`
within
the owning :class:`_schema.MetaData` collection.
Additional calls to :class:`_schema.Table` with the same name,
metadata,
and schema name will return the same :class:`_schema.Table` object.
Names which contain no upper case characters
will be treated as case insensitive names, and will not be quoted
unless they are a reserved word or contain special characters.
A name with any number of upper case characters is considered
to be case sensitive, and will be sent as quoted.
To enable unconditional quoting for the table name, specify the flag
``quote=True`` to the constructor, or use the :class:`.quoted_name`
construct to specify the name.
:param metadata: a :class:`_schema.MetaData`
object which will contain this
table. The metadata is used as a point of association of this table
with other tables which are referenced via foreign key. It also
may be used to associate this table with a particular
:class:`.Connection` or :class:`.Engine`.
:param \*args: Additional positional arguments are used primarily
to add the list of :class:`_schema.Column`
objects contained within this
table. Similar to the style of a CREATE TABLE statement, other
:class:`.SchemaItem` constructs may be added here, including
:class:`.PrimaryKeyConstraint`, and
:class:`_schema.ForeignKeyConstraint`.
:param autoload_replace: Defaults to ``True``; when using
:paramref:`_schema.Table.autoload_with`
in conjunction with :paramref:`_schema.Table.extend_existing`,
indicates
that :class:`_schema.Column` objects present in the already-existing
:class:`_schema.Table`
object should be replaced with columns of the same
name retrieved from the autoload process. When ``False``, columns
already present under existing names will be omitted from the
reflection process.
Note that this setting does not impact :class:`_schema.Column` objects
specified programmatically within the call to :class:`_schema.Table`
that
also is autoloading; those :class:`_schema.Column` objects will always
replace existing columns of the same name when
:paramref:`_schema.Table.extend_existing` is ``True``.
.. seealso::
:paramref:`_schema.Table.autoload_with`
:paramref:`_schema.Table.extend_existing`
:param autoload_with: An :class:`_engine.Engine` or
:class:`_engine.Connection` object,
or a :class:`_reflection.Inspector` object as returned by
:func:`_sa.inspect`
against one, with which this :class:`_schema.Table`
object will be reflected.
When set to a non-None value, the autoload process will take place
for this table against the given engine or connection.
.. seealso::
:ref:`metadata_reflection_toplevel`
:meth:`_events.DDLEvents.column_reflect`
:ref:`metadata_reflection_dbagnostic_types`
:param extend_existing: When ``True``, indicates that if this
:class:`_schema.Table` is already present in the given
:class:`_schema.MetaData`,
apply further arguments within the constructor to the existing
:class:`_schema.Table`.
If :paramref:`_schema.Table.extend_existing` or
:paramref:`_schema.Table.keep_existing` are not set,
and the given name
of the new :class:`_schema.Table` refers to a :class:`_schema.Table`
that is
already present in the target :class:`_schema.MetaData` collection,
and
this :class:`_schema.Table`
specifies additional columns or other constructs
or flags that modify the table's state, an
error is raised. The purpose of these two mutually-exclusive flags
is to specify what action should be taken when a
:class:`_schema.Table`
is specified that matches an existing :class:`_schema.Table`,
yet specifies
additional constructs.
:paramref:`_schema.Table.extend_existing`
will also work in conjunction
with :paramref:`_schema.Table.autoload_with` to run a new reflection
operation against the database, even if a :class:`_schema.Table`
of the same name is already present in the target
:class:`_schema.MetaData`; newly reflected :class:`_schema.Column`
objects
and other options will be added into the state of the
:class:`_schema.Table`, potentially overwriting existing columns
and options of the same name.
As is always the case with :paramref:`_schema.Table.autoload_with`,
:class:`_schema.Column` objects can be specified in the same
:class:`_schema.Table`
constructor, which will take precedence. Below, the existing
table ``mytable`` will be augmented with :class:`_schema.Column`
objects
both reflected from the database, as well as the given
:class:`_schema.Column`
named "y"::
Table("mytable", metadata,
Column('y', Integer),
extend_existing=True,
autoload_with=engine
)
.. seealso::
:paramref:`_schema.Table.autoload_with`
:paramref:`_schema.Table.autoload_replace`
:paramref:`_schema.Table.keep_existing`
:param implicit_returning: True by default - indicates that
RETURNING can be used, typically by the ORM, in order to fetch
server-generated values such as primary key values and
server side defaults, on those backends which support RETURNING.
In modern SQLAlchemy there is generally no reason to alter this
setting, except for some backend specific cases
(see :ref:`mssql_triggers` in the SQL Server dialect documentation
for one such example).
:param include_columns: A list of strings indicating a subset of
columns to be loaded via the ``autoload`` operation; table columns who
aren't present in this list will not be represented on the resulting
``Table`` object. Defaults to ``None`` which indicates all columns
should be reflected.
:param resolve_fks: Whether or not to reflect :class:`_schema.Table`
objects
related to this one via :class:`_schema.ForeignKey` objects, when
:paramref:`_schema.Table.autoload_with` is
specified. Defaults to True. Set to False to disable reflection of
related tables as :class:`_schema.ForeignKey`
objects are encountered; may be
used either to save on SQL calls or to avoid issues with related tables
that can't be accessed. Note that if a related table is already present
in the :class:`_schema.MetaData` collection, or becomes present later,
a
:class:`_schema.ForeignKey` object associated with this
:class:`_schema.Table` will
resolve to that table normally.
.. versionadded:: 1.3
.. seealso::
:paramref:`.MetaData.reflect.resolve_fks`
:param info: Optional data dictionary which will be populated into the
:attr:`.SchemaItem.info` attribute of this object.
:param keep_existing: When ``True``, indicates that if this Table
is already present in the given :class:`_schema.MetaData`, ignore
further arguments within the constructor to the existing
:class:`_schema.Table`, and return the :class:`_schema.Table`
object as
originally created. This is to allow a function that wishes
to define a new :class:`_schema.Table` on first call, but on
subsequent calls will return the same :class:`_schema.Table`,
without any of the declarations (particularly constraints)
being applied a second time.
If :paramref:`_schema.Table.extend_existing` or
:paramref:`_schema.Table.keep_existing` are not set,
and the given name
of the new :class:`_schema.Table` refers to a :class:`_schema.Table`
that is
already present in the target :class:`_schema.MetaData` collection,
and
this :class:`_schema.Table`
specifies additional columns or other constructs
or flags that modify the table's state, an
error is raised. The purpose of these two mutually-exclusive flags
is to specify what action should be taken when a
:class:`_schema.Table`
is specified that matches an existing :class:`_schema.Table`,
yet specifies
additional constructs.
.. seealso::
:paramref:`_schema.Table.extend_existing`
:param listeners: A list of tuples of the form ``(<eventname>, <fn>)``
which will be passed to :func:`.event.listen` upon construction.
This alternate hook to :func:`.event.listen` allows the establishment
of a listener function specific to this :class:`_schema.Table` before
the "autoload" process begins. Historically this has been intended
for use with the :meth:`.DDLEvents.column_reflect` event, however
note that this event hook may now be associated with the
:class:`_schema.MetaData` object directly::
def listen_for_reflect(table, column_info):
"handle the column reflection event"
# ...
t = Table(
'sometable',
autoload_with=engine,
listeners=[
('column_reflect', listen_for_reflect)
])
.. seealso::
:meth:`_events.DDLEvents.column_reflect`
:param must_exist: When ``True``, indicates that this Table must already
be present in the given :class:`_schema.MetaData` collection, else
an exception is raised.
:param prefixes:
A list of strings to insert after CREATE in the CREATE TABLE
statement. They will be separated by spaces.
:param quote: Force quoting of this table's name on or off, corresponding
to ``True`` or ``False``. When left at its default of ``None``,
the column identifier will be quoted according to whether the name is
case sensitive (identifiers with at least one upper case character are
treated as case sensitive), or if it's a reserved word. This flag
is only needed to force quoting of a reserved word which is not known
by the SQLAlchemy dialect.
.. note:: setting this flag to ``False`` will not provide
case-insensitive behavior for table reflection; table reflection
will always search for a mixed-case name in a case sensitive
fashion. Case insensitive names are specified in SQLAlchemy only
by stating the name with all lower case characters.
:param quote_schema: same as 'quote' but applies to the schema identifier.
:param schema: The schema name for this table, which is required if
the table resides in a schema other than the default selected schema
for the engine's database connection. Defaults to ``None``.
If the owning :class:`_schema.MetaData` of this :class:`_schema.Table`
specifies its
own :paramref:`_schema.MetaData.schema` parameter,
then that schema name will
be applied to this :class:`_schema.Table`
if the schema parameter here is set
to ``None``. To set a blank schema name on a :class:`_schema.Table`
that
would otherwise use the schema set on the owning
:class:`_schema.MetaData`,
specify the special symbol :attr:`.BLANK_SCHEMA`.
The quoting rules for the schema name are the same as those for the
``name`` parameter, in that quoting is applied for reserved words or
case-sensitive names; to enable unconditional quoting for the schema
name, specify the flag ``quote_schema=True`` to the constructor, or use
the :class:`.quoted_name` construct to specify the name.
:param comment: Optional string that will render an SQL comment on table
creation.
.. versionadded:: 1.2 Added the :paramref:`_schema.Table.comment`
parameter
to :class:`_schema.Table`.
:param \**kw: Additional keyword arguments not mentioned above are
dialect specific, and passed in the form ``<dialectname>_<argname>``.
See the documentation regarding an individual dialect at
:ref:`dialect_toplevel` for detail on documented arguments.
""" # noqa: E501
if _no_init:
# don't run __init__ from __new__ by default;
# __new__ has a specific place that __init__ is called
return
super().__init__(quoted_name(name, quote))
self.metadata = metadata
if schema is None:
self.schema = metadata.schema
elif schema is BLANK_SCHEMA:
self.schema = None
else:
quote_schema = quote_schema
assert isinstance(schema, str)
self.schema = quoted_name(schema, quote_schema)
self._sentinel_column = None
self.indexes = set()
self.constraints = set()
PrimaryKeyConstraint(
_implicit_generated=True
)._set_parent_with_dispatch(self)
self.foreign_keys = set() # type: ignore
self._extra_dependencies: Set[Table] = set()
if self.schema is not None:
self.fullname = "%s.%s" % (self.schema, self.name)
else:
self.fullname = self.name
self.implicit_returning = implicit_returning
_reflect_info = kw.pop("_reflect_info", None)
self.comment = comment
if info is not None:
self.info = info
if listeners is not None:
for evt, fn in listeners:
event.listen(self, evt, fn)
self._prefixes = prefixes if prefixes else []
self._extra_kwargs(**kw)
# load column definitions from the database if 'autoload' is defined
# we do it after the table is in the singleton dictionary to support
# circular foreign keys
if autoload_with is not None:
self._autoload(
metadata,
autoload_with,
include_columns,
_extend_on=_extend_on,
_reflect_info=_reflect_info,
resolve_fks=resolve_fks,
)
# initialize all the column, etc. objects. done after reflection to
# allow user-overrides
self._init_items(
*args,
allow_replacements=extend_existing
or keep_existing
or autoload_with,
all_names={},
)
def _autoload(
self,
metadata: MetaData,
autoload_with: Union[Engine, Connection],
include_columns: Optional[Collection[str]],
exclude_columns: Collection[str] = (),
resolve_fks: bool = True,
_extend_on: Optional[Set[Table]] = None,
_reflect_info: _ReflectionInfo | None = None,
) -> None:
insp = inspection.inspect(autoload_with)
with insp._inspection_context() as conn_insp:
conn_insp.reflect_table(
self,
include_columns,
exclude_columns,
resolve_fks,
_extend_on=_extend_on,
_reflect_info=_reflect_info,
)
@property
def _sorted_constraints(self) -> List[Constraint]:
"""Return the set of constraints as a list, sorted by creation
order.
"""
return sorted(self.constraints, key=lambda c: c._creation_order)
@property
def foreign_key_constraints(self) -> Set[ForeignKeyConstraint]:
""":class:`_schema.ForeignKeyConstraint` objects referred to by this
:class:`_schema.Table`.
This list is produced from the collection of
:class:`_schema.ForeignKey`
objects currently associated.
.. seealso::
:attr:`_schema.Table.constraints`
:attr:`_schema.Table.foreign_keys`
:attr:`_schema.Table.indexes`
"""
return {
fkc.constraint
for fkc in self.foreign_keys
if fkc.constraint is not None
}
def _init_existing(self, *args: Any, **kwargs: Any) -> None:
autoload_with = kwargs.pop("autoload_with", None)
autoload = kwargs.pop("autoload", autoload_with is not None)
autoload_replace = kwargs.pop("autoload_replace", True)
schema = kwargs.pop("schema", None)
_extend_on = kwargs.pop("_extend_on", None)
_reflect_info = kwargs.pop("_reflect_info", None)
# these arguments are only used with _init()
extend_existing = kwargs.pop("extend_existing", False)
keep_existing = kwargs.pop("keep_existing", False)
assert extend_existing
assert not keep_existing
if schema and schema != self.schema:
raise exc.ArgumentError(
"Can't change schema of existing table from '%s' to '%s'",
(self.schema, schema),
)
include_columns = kwargs.pop("include_columns", None)
if include_columns is not None:
for c in self.c:
if c.name not in include_columns:
self._columns.remove(c)
resolve_fks = kwargs.pop("resolve_fks", True)
for key in ("quote", "quote_schema"):
if key in kwargs:
raise exc.ArgumentError(
"Can't redefine 'quote' or 'quote_schema' arguments"
)
# update `self` with these kwargs, if provided
self.comment = kwargs.pop("comment", self.comment)
self.implicit_returning = kwargs.pop(
"implicit_returning", self.implicit_returning
)
self.info = kwargs.pop("info", self.info)
exclude_columns: _typing_Sequence[str]
if autoload:
if not autoload_replace:
# don't replace columns already present.
# we'd like to do this for constraints also however we don't
# have simple de-duping for unnamed constraints.
exclude_columns = [c.name for c in self.c]
else:
exclude_columns = ()
self._autoload(
self.metadata,
autoload_with,
include_columns,
exclude_columns,
resolve_fks,
_extend_on=_extend_on,
_reflect_info=_reflect_info,
)
all_names = {c.name: c for c in self.c}
self._extra_kwargs(**kwargs)
self._init_items(*args, allow_replacements=True, all_names=all_names)
def _extra_kwargs(self, **kwargs: Any) -> None:
self._validate_dialect_kwargs(kwargs)
def _init_collections(self) -> None:
pass
def _reset_exported(self) -> None:
pass
@util.ro_non_memoized_property
def _autoincrement_column(self) -> Optional[Column[int]]:
return self.primary_key._autoincrement_column
@util.ro_memoized_property
def _sentinel_column_characteristics(
self,
) -> _SentinelColumnCharacterization:
"""determine a candidate column (or columns, in case of a client
generated composite primary key) which can be used as an
"insert sentinel" for an INSERT statement.
The returned structure, :class:`_SentinelColumnCharacterization`,
includes all the details needed by :class:`.Dialect` and
:class:`.SQLCompiler` to determine if these column(s) can be used
as an INSERT..RETURNING sentinel for a particular database
dialect.
.. versionadded:: 2.0.10
"""
sentinel_is_explicit = False
sentinel_is_autoinc = False
the_sentinel: Optional[_typing_Sequence[Column[Any]]] = None
# see if a column was explicitly marked "insert_sentinel=True".
explicit_sentinel_col = self._sentinel_column
if explicit_sentinel_col is not None:
the_sentinel = (explicit_sentinel_col,)
sentinel_is_explicit = True
autoinc_col = self._autoincrement_column
if sentinel_is_explicit and explicit_sentinel_col is autoinc_col:
assert autoinc_col is not None
sentinel_is_autoinc = True
elif explicit_sentinel_col is None and autoinc_col is not None:
the_sentinel = (autoinc_col,)
sentinel_is_autoinc = True
default_characterization = _SentinelDefaultCharacterization.UNKNOWN
if the_sentinel:
the_sentinel_zero = the_sentinel[0]
if the_sentinel_zero.identity:
if the_sentinel_zero.identity._increment_is_negative:
if sentinel_is_explicit:
raise exc.InvalidRequestError(
"Can't use IDENTITY default with negative "
"increment as an explicit sentinel column"
)
else:
if sentinel_is_autoinc:
autoinc_col = None
sentinel_is_autoinc = False
the_sentinel = None
else:
default_characterization = (
_SentinelDefaultCharacterization.IDENTITY
)
elif (
the_sentinel_zero.default is None
and the_sentinel_zero.server_default is None
):
if the_sentinel_zero.nullable:
raise exc.InvalidRequestError(
f"Column {the_sentinel_zero} has been marked as a "
"sentinel "
"column with no default generation function; it "
"at least needs to be marked nullable=False assuming "
"user-populated sentinel values will be used."
)
default_characterization = (
_SentinelDefaultCharacterization.NONE
)
elif the_sentinel_zero.default is not None:
if the_sentinel_zero.default.is_sentinel:
default_characterization = (
_SentinelDefaultCharacterization.SENTINEL_DEFAULT
)
elif default_is_sequence(the_sentinel_zero.default):
if the_sentinel_zero.default._increment_is_negative:
if sentinel_is_explicit:
raise exc.InvalidRequestError(
"Can't use SEQUENCE default with negative "
"increment as an explicit sentinel column"
)
else:
if sentinel_is_autoinc:
autoinc_col = None
sentinel_is_autoinc = False
the_sentinel = None
default_characterization = (
_SentinelDefaultCharacterization.SEQUENCE
)
elif the_sentinel_zero.default.is_callable:
default_characterization = (
_SentinelDefaultCharacterization.CLIENTSIDE
)
elif the_sentinel_zero.server_default is not None:
if sentinel_is_explicit:
raise exc.InvalidRequestError(
f"Column {the_sentinel[0]} can't be a sentinel column "
"because it uses an explicit server side default "
"that's not the Identity() default."
)
default_characterization = (
_SentinelDefaultCharacterization.SERVERSIDE
)
if the_sentinel is None and self.primary_key:
assert autoinc_col is None
# determine for non-autoincrement pk if all elements are
# client side
for _pkc in self.primary_key:
if _pkc.server_default is not None or (
_pkc.default and not _pkc.default.is_callable
):
break
else:
the_sentinel = tuple(self.primary_key)
default_characterization = (
_SentinelDefaultCharacterization.CLIENTSIDE
)
return _SentinelColumnCharacterization(
the_sentinel,
sentinel_is_explicit,
sentinel_is_autoinc,
default_characterization,
)
@property
def autoincrement_column(self) -> Optional[Column[int]]:
"""Returns the :class:`.Column` object which currently represents
the "auto increment" column, if any, else returns None.
This is based on the rules for :class:`.Column` as defined by the
:paramref:`.Column.autoincrement` parameter, which generally means the
column within a single integer column primary key constraint that is
not constrained by a foreign key. If the table does not have such
a primary key constraint, then there's no "autoincrement" column.
A :class:`.Table` may have only one column defined as the
"autoincrement" column.
.. versionadded:: 2.0.4
.. seealso::
:paramref:`.Column.autoincrement`
"""
return self._autoincrement_column
@property
def key(self) -> str:
"""Return the 'key' for this :class:`_schema.Table`.
This value is used as the dictionary key within the
:attr:`_schema.MetaData.tables` collection. It is typically the same
as that of :attr:`_schema.Table.name` for a table with no
:attr:`_schema.Table.schema`
set; otherwise it is typically of the form
``schemaname.tablename``.
"""
return _get_table_key(self.name, self.schema)
def __repr__(self) -> str:
return "Table(%s)" % ", ".join(
[repr(self.name)]
+ [repr(self.metadata)]
+ [repr(x) for x in self.columns]
+ ["%s=%s" % (k, repr(getattr(self, k))) for k in ["schema"]]
)
def __str__(self) -> str:
return _get_table_key(self.description, self.schema)
def add_is_dependent_on(self, table: Table) -> None:
"""Add a 'dependency' for this Table.
This is another Table object which must be created
first before this one can, or dropped after this one.
Usually, dependencies between tables are determined via
ForeignKey objects. However, for other situations that
create dependencies outside of foreign keys (rules, inheriting),
this method can manually establish such a link.
"""
self._extra_dependencies.add(table)
def append_column(
self, column: ColumnClause[Any], replace_existing: bool = False
) -> None:
"""Append a :class:`_schema.Column` to this :class:`_schema.Table`.
The "key" of the newly added :class:`_schema.Column`, i.e. the
value of its ``.key`` attribute, will then be available
in the ``.c`` collection of this :class:`_schema.Table`, and the
column definition will be included in any CREATE TABLE, SELECT,
UPDATE, etc. statements generated from this :class:`_schema.Table`
construct.
Note that this does **not** change the definition of the table
as it exists within any underlying database, assuming that
table has already been created in the database. Relational
databases support the addition of columns to existing tables
using the SQL ALTER command, which would need to be
emitted for an already-existing table that doesn't contain
the newly added column.
:param replace_existing: When ``True``, allows replacing existing
columns. When ``False``, the default, an warning will be raised
if a column with the same ``.key`` already exists. A future
version of sqlalchemy will instead rise a warning.
.. versionadded:: 1.4.0
"""
try:
column._set_parent_with_dispatch(
self,
allow_replacements=replace_existing,
all_names={c.name: c for c in self.c},
)
except exc.DuplicateColumnError as de:
raise exc.DuplicateColumnError(
f"{de.args[0]} Specify replace_existing=True to "
"Table.append_column() to replace an "
"existing column."
) from de
def append_constraint(self, constraint: Union[Index, Constraint]) -> None:
"""Append a :class:`_schema.Constraint` to this
:class:`_schema.Table`.
This has the effect of the constraint being included in any
future CREATE TABLE statement, assuming specific DDL creation
events have not been associated with the given
:class:`_schema.Constraint` object.
Note that this does **not** produce the constraint within the
relational database automatically, for a table that already exists
in the database. To add a constraint to an
existing relational database table, the SQL ALTER command must
be used. SQLAlchemy also provides the
:class:`.AddConstraint` construct which can produce this SQL when
invoked as an executable clause.
"""
constraint._set_parent_with_dispatch(self)
def _set_parent(self, parent: SchemaEventTarget, **kw: Any) -> None:
metadata = parent
assert isinstance(metadata, MetaData)
metadata._add_table(self.name, self.schema, self)
self.metadata = metadata
def create(self, bind: _CreateDropBind, checkfirst: bool = False) -> None:
"""Issue a ``CREATE`` statement for this
:class:`_schema.Table`, using the given
:class:`.Connection` or :class:`.Engine`
for connectivity.
.. seealso::
:meth:`_schema.MetaData.create_all`.
"""
bind._run_ddl_visitor(ddl.SchemaGenerator, self, checkfirst=checkfirst)
def drop(self, bind: _CreateDropBind, checkfirst: bool = False) -> None:
"""Issue a ``DROP`` statement for this
:class:`_schema.Table`, using the given
:class:`.Connection` or :class:`.Engine` for connectivity.
.. seealso::
:meth:`_schema.MetaData.drop_all`.
"""
bind._run_ddl_visitor(ddl.SchemaDropper, self, checkfirst=checkfirst)
@util.deprecated(
"1.4",
":meth:`_schema.Table.tometadata` is renamed to "
":meth:`_schema.Table.to_metadata`",
)
def tometadata(
self,
metadata: MetaData,
schema: Union[str, Literal[SchemaConst.RETAIN_SCHEMA]] = RETAIN_SCHEMA,
referred_schema_fn: Optional[
Callable[
[Table, Optional[str], ForeignKeyConstraint, Optional[str]],
Optional[str],
]
] = None,
name: Optional[str] = None,
) -> Table:
"""Return a copy of this :class:`_schema.Table`
associated with a different
:class:`_schema.MetaData`.
See :meth:`_schema.Table.to_metadata` for a full description.
"""
return self.to_metadata(
metadata,
schema=schema,
referred_schema_fn=referred_schema_fn,
name=name,
)
def to_metadata(
self,
metadata: MetaData,
schema: Union[str, Literal[SchemaConst.RETAIN_SCHEMA]] = RETAIN_SCHEMA,
referred_schema_fn: Optional[
Callable[
[Table, Optional[str], ForeignKeyConstraint, Optional[str]],
Optional[str],
]
] = None,
name: Optional[str] = None,
) -> Table:
"""Return a copy of this :class:`_schema.Table` associated with a
different :class:`_schema.MetaData`.
E.g.::
m1 = MetaData()
user = Table('user', m1, Column('id', Integer, primary_key=True))
m2 = MetaData()
user_copy = user.to_metadata(m2)
.. versionchanged:: 1.4 The :meth:`_schema.Table.to_metadata` function
was renamed from :meth:`_schema.Table.tometadata`.
:param metadata: Target :class:`_schema.MetaData` object,
into which the
new :class:`_schema.Table` object will be created.
:param schema: optional string name indicating the target schema.
Defaults to the special symbol :attr:`.RETAIN_SCHEMA` which indicates
that no change to the schema name should be made in the new
:class:`_schema.Table`. If set to a string name, the new
:class:`_schema.Table`
will have this new name as the ``.schema``. If set to ``None``, the
schema will be set to that of the schema set on the target
:class:`_schema.MetaData`, which is typically ``None`` as well,
unless
set explicitly::
m2 = MetaData(schema='newschema')
# user_copy_one will have "newschema" as the schema name
user_copy_one = user.to_metadata(m2, schema=None)
m3 = MetaData() # schema defaults to None
# user_copy_two will have None as the schema name
user_copy_two = user.to_metadata(m3, schema=None)
:param referred_schema_fn: optional callable which can be supplied
in order to provide for the schema name that should be assigned
to the referenced table of a :class:`_schema.ForeignKeyConstraint`.
The callable accepts this parent :class:`_schema.Table`, the
target schema that we are changing to, the
:class:`_schema.ForeignKeyConstraint` object, and the existing
"target schema" of that constraint. The function should return the
string schema name that should be applied. To reset the schema
to "none", return the symbol :data:`.BLANK_SCHEMA`. To effect no
change, return ``None`` or :data:`.RETAIN_SCHEMA`.
.. versionchanged:: 1.4.33 The ``referred_schema_fn`` function
may return the :data:`.BLANK_SCHEMA` or :data:`.RETAIN_SCHEMA`
symbols.
E.g.::
def referred_schema_fn(table, to_schema,
constraint, referred_schema):
if referred_schema == 'base_tables':
return referred_schema
else:
return to_schema
new_table = table.to_metadata(m2, schema="alt_schema",
referred_schema_fn=referred_schema_fn)
:param name: optional string name indicating the target table name.
If not specified or None, the table name is retained. This allows
a :class:`_schema.Table` to be copied to the same
:class:`_schema.MetaData` target
with a new name.
"""
if name is None:
name = self.name
actual_schema: Optional[str]
if schema is RETAIN_SCHEMA:
actual_schema = self.schema
elif schema is None:
actual_schema = metadata.schema
else:
actual_schema = schema # type: ignore
key = _get_table_key(name, actual_schema)
if key in metadata.tables:
util.warn(
"Table '%s' already exists within the given "
"MetaData - not copying." % self.description
)
return metadata.tables[key]
args = []
for col in self.columns:
args.append(col._copy(schema=actual_schema))
table = Table(
name,
metadata,
schema=actual_schema,
comment=self.comment,
*args,
**self.kwargs,
)
for const in self.constraints:
if isinstance(const, ForeignKeyConstraint):
referred_schema = const._referred_schema
if referred_schema_fn:
fk_constraint_schema = referred_schema_fn(
self, actual_schema, const, referred_schema
)
else:
fk_constraint_schema = (
actual_schema
if referred_schema == self.schema
else None
)
table.append_constraint(
const._copy(
schema=fk_constraint_schema, target_table=table
)
)
elif not const._type_bound:
# skip unique constraints that would be generated
# by the 'unique' flag on Column
if const._column_flag:
continue
table.append_constraint(
const._copy(schema=actual_schema, target_table=table)
)
for index in self.indexes:
# skip indexes that would be generated
# by the 'index' flag on Column
if index._column_flag:
continue
Index(
index.name,
unique=index.unique,
*[
_copy_expression(expr, self, table)
for expr in index._table_bound_expressions
],
_table=table,
**index.kwargs,
)
return self._schema_item_copy(table)
class Column(DialectKWArgs, SchemaItem, ColumnClause[_T]):
"""Represents a column in a database table."""
__visit_name__ = "column"
inherit_cache = True
key: str
server_default: Optional[FetchedValue]
def __init__(
self,
__name_pos: Optional[
Union[str, _TypeEngineArgument[_T], SchemaEventTarget]
] = None,
__type_pos: Optional[
Union[_TypeEngineArgument[_T], SchemaEventTarget]
] = None,
*args: SchemaEventTarget,
name: Optional[str] = None,
type_: Optional[_TypeEngineArgument[_T]] = None,
autoincrement: _AutoIncrementType = "auto",
default: Optional[Any] = None,
doc: Optional[str] = None,
key: Optional[str] = None,
index: Optional[bool] = None,
unique: Optional[bool] = None,
info: Optional[_InfoType] = None,
nullable: Optional[
Union[bool, Literal[SchemaConst.NULL_UNSPECIFIED]]
] = SchemaConst.NULL_UNSPECIFIED,
onupdate: Optional[Any] = None,
primary_key: bool = False,
server_default: Optional[_ServerDefaultArgument] = None,
server_onupdate: Optional[FetchedValue] = None,
quote: Optional[bool] = None,
system: bool = False,
comment: Optional[str] = None,
insert_sentinel: bool = False,
_omit_from_statements: bool = False,
_proxies: Optional[Any] = None,
**dialect_kwargs: Any,
):
r"""
Construct a new ``Column`` object.
:param name: The name of this column as represented in the database.
This argument may be the first positional argument, or specified
via keyword.
Names which contain no upper case characters
will be treated as case insensitive names, and will not be quoted
unless they are a reserved word. Names with any number of upper
case characters will be quoted and sent exactly. Note that this
behavior applies even for databases which standardize upper
case names as case insensitive such as Oracle.
The name field may be omitted at construction time and applied
later, at any time before the Column is associated with a
:class:`_schema.Table`. This is to support convenient
usage within the :mod:`~sqlalchemy.ext.declarative` extension.
:param type\_: The column's type, indicated using an instance which
subclasses :class:`~sqlalchemy.types.TypeEngine`. If no arguments
are required for the type, the class of the type can be sent
as well, e.g.::
# use a type with arguments
Column('data', String(50))
# use no arguments
Column('level', Integer)
The ``type`` argument may be the second positional argument
or specified by keyword.
If the ``type`` is ``None`` or is omitted, it will first default to
the special type :class:`.NullType`. If and when this
:class:`_schema.Column` is made to refer to another column using
:class:`_schema.ForeignKey` and/or
:class:`_schema.ForeignKeyConstraint`, the type
of the remote-referenced column will be copied to this column as
well, at the moment that the foreign key is resolved against that
remote :class:`_schema.Column` object.
:param \*args: Additional positional arguments include various
:class:`.SchemaItem` derived constructs which will be applied
as options to the column. These include instances of
:class:`.Constraint`, :class:`_schema.ForeignKey`,
:class:`.ColumnDefault`, :class:`.Sequence`, :class:`.Computed`
:class:`.Identity`. In some cases an
equivalent keyword argument is available such as ``server_default``,
``default`` and ``unique``.
:param autoincrement: Set up "auto increment" semantics for an
**integer primary key column with no foreign key dependencies**
(see later in this docstring for a more specific definition).
This may influence the :term:`DDL` that will be emitted for
this column during a table create, as well as how the column
will be considered when INSERT statements are compiled and
executed.
The default value is the string ``"auto"``,
which indicates that a single-column (i.e. non-composite) primary key
that is of an INTEGER type with no other client-side or server-side
default constructs indicated should receive auto increment semantics
automatically. Other values include ``True`` (force this column to
have auto-increment semantics for a :term:`composite primary key` as
well), ``False`` (this column should never have auto-increment
semantics), and the string ``"ignore_fk"`` (special-case for foreign
key columns, see below).
The term "auto increment semantics" refers both to the kind of DDL
that will be emitted for the column within a CREATE TABLE statement,
when methods such as :meth:`.MetaData.create_all` and
:meth:`.Table.create` are invoked, as well as how the column will be
considered when an INSERT statement is compiled and emitted to the
database:
* **DDL rendering** (i.e. :meth:`.MetaData.create_all`,
:meth:`.Table.create`): When used on a :class:`.Column` that has
no other
default-generating construct associated with it (such as a
:class:`.Sequence` or :class:`.Identity` construct), the parameter
will imply that database-specific keywords such as PostgreSQL
``SERIAL``, MySQL ``AUTO_INCREMENT``, or ``IDENTITY`` on SQL Server
should also be rendered. Not every database backend has an
"implied" default generator available; for example the Oracle
backend always needs an explicit construct such as
:class:`.Identity` to be included with a :class:`.Column` in order
for the DDL rendered to include auto-generating constructs to also
be produced in the database.
* **INSERT semantics** (i.e. when a :func:`_sql.insert` construct is
compiled into a SQL string and is then executed on a database using
:meth:`_engine.Connection.execute` or equivalent): A single-row
INSERT statement will be known to produce a new integer primary key
value automatically for this column, which will be accessible
after the statement is invoked via the
:attr:`.CursorResult.inserted_primary_key` attribute upon the
:class:`_result.Result` object. This also applies towards use of the
ORM when ORM-mapped objects are persisted to the database,
indicating that a new integer primary key will be available to
become part of the :term:`identity key` for that object. This
behavior takes place regardless of what DDL constructs are
associated with the :class:`_schema.Column` and is independent
of the "DDL Rendering" behavior discussed in the previous note
above.
The parameter may be set to ``True`` to indicate that a column which
is part of a composite (i.e. multi-column) primary key should
have autoincrement semantics, though note that only one column
within a primary key may have this setting. It can also
be set to ``True`` to indicate autoincrement semantics on a
column that has a client-side or server-side default configured,
however note that not all dialects can accommodate all styles
of default as an "autoincrement". It can also be
set to ``False`` on a single-column primary key that has a
datatype of INTEGER in order to disable auto increment semantics
for that column.
The setting *only* has an effect for columns which are:
* Integer derived (i.e. INT, SMALLINT, BIGINT).
* Part of the primary key
* Not referring to another column via :class:`_schema.ForeignKey`,
unless
the value is specified as ``'ignore_fk'``::
# turn on autoincrement for this column despite
# the ForeignKey()
Column('id', ForeignKey('other.id'),
primary_key=True, autoincrement='ignore_fk')
It is typically not desirable to have "autoincrement" enabled on a
column that refers to another via foreign key, as such a column is
required to refer to a value that originates from elsewhere.
The setting has these effects on columns that meet the
above criteria:
* DDL issued for the column, if the column does not already include
a default generating construct supported by the backend such as
:class:`.Identity`, will include database-specific
keywords intended to signify this column as an
"autoincrement" column for specific backends. Behavior for
primary SQLAlchemy dialects includes:
* AUTO INCREMENT on MySQL and MariaDB
* SERIAL on PostgreSQL
* IDENTITY on MS-SQL - this occurs even without the
:class:`.Identity` construct as the
:paramref:`.Column.autoincrement` parameter pre-dates this
construct.
* SQLite - SQLite integer primary key columns are implicitly
"auto incrementing" and no additional keywords are rendered;
to render the special SQLite keyword ``AUTOINCREMENT``
is not included as this is unnecessary and not recommended
by the database vendor. See the section
:ref:`sqlite_autoincrement` for more background.
* Oracle - The Oracle dialect has no default "autoincrement"
feature available at this time, instead the :class:`.Identity`
construct is recommended to achieve this (the :class:`.Sequence`
construct may also be used).
* Third-party dialects - consult those dialects' documentation
for details on their specific behaviors.
* When a single-row :func:`_sql.insert` construct is compiled and
executed, which does not set the :meth:`_sql.Insert.inline`
modifier, newly generated primary key values for this column
will be automatically retrieved upon statement execution
using a method specific to the database driver in use:
* MySQL, SQLite - calling upon ``cursor.lastrowid()``
(see
`https://www.python.org/dev/peps/pep-0249/#lastrowid
<https://www.python.org/dev/peps/pep-0249/#lastrowid>`_)
* PostgreSQL, SQL Server, Oracle - use RETURNING or an equivalent
construct when rendering an INSERT statement, and then retrieving
the newly generated primary key values after execution
* PostgreSQL, Oracle for :class:`_schema.Table` objects that
set :paramref:`_schema.Table.implicit_returning` to False -
for a :class:`.Sequence` only, the :class:`.Sequence` is invoked
explicitly before the INSERT statement takes place so that the
newly generated primary key value is available to the client
* SQL Server for :class:`_schema.Table` objects that
set :paramref:`_schema.Table.implicit_returning` to False -
the ``SELECT scope_identity()`` construct is used after the
INSERT statement is invoked to retrieve the newly generated
primary key value.
* Third-party dialects - consult those dialects' documentation
for details on their specific behaviors.
* For multiple-row :func:`_sql.insert` constructs invoked with
a list of parameters (i.e. "executemany" semantics), primary-key
retrieving behaviors are generally disabled, however there may
be special APIs that may be used to retrieve lists of new
primary key values for an "executemany", such as the psycopg2
"fast insertmany" feature. Such features are very new and
may not yet be well covered in documentation.
:param default: A scalar, Python callable, or
:class:`_expression.ColumnElement` expression representing the
*default value* for this column, which will be invoked upon insert
if this column is otherwise not specified in the VALUES clause of
the insert. This is a shortcut to using :class:`.ColumnDefault` as
a positional argument; see that class for full detail on the
structure of the argument.
Contrast this argument to
:paramref:`_schema.Column.server_default`
which creates a default generator on the database side.
.. seealso::
:ref:`metadata_defaults_toplevel`
:param doc: optional String that can be used by the ORM or similar
to document attributes on the Python side. This attribute does
**not** render SQL comments; use the
:paramref:`_schema.Column.comment`
parameter for this purpose.
:param key: An optional string identifier which will identify this
``Column`` object on the :class:`_schema.Table`.
When a key is provided,
this is the only identifier referencing the ``Column`` within the
application, including ORM attribute mapping; the ``name`` field
is used only when rendering SQL.
:param index: When ``True``, indicates that a :class:`_schema.Index`
construct will be automatically generated for this
:class:`_schema.Column`, which will result in a "CREATE INDEX"
statement being emitted for the :class:`_schema.Table` when the DDL
create operation is invoked.
Using this flag is equivalent to making use of the
:class:`_schema.Index` construct explicitly at the level of the
:class:`_schema.Table` construct itself::
Table(
"some_table",
metadata,
Column("x", Integer),
Index("ix_some_table_x", "x")
)
To add the :paramref:`_schema.Index.unique` flag to the
:class:`_schema.Index`, set both the
:paramref:`_schema.Column.unique` and
:paramref:`_schema.Column.index` flags to True simultaneously,
which will have the effect of rendering the "CREATE UNIQUE INDEX"
DDL instruction instead of "CREATE INDEX".
The name of the index is generated using the
:ref:`default naming convention <constraint_default_naming_convention>`
which for the :class:`_schema.Index` construct is of the form
``ix_<tablename>_<columnname>``.
As this flag is intended only as a convenience for the common case
of adding a single-column, default configured index to a table
definition, explicit use of the :class:`_schema.Index` construct
should be preferred for most use cases, including composite indexes
that encompass more than one column, indexes with SQL expressions
or ordering, backend-specific index configuration options, and
indexes that use a specific name.
.. note:: the :attr:`_schema.Column.index` attribute on
:class:`_schema.Column`
**does not indicate** if this column is indexed or not, only
if this flag was explicitly set here. To view indexes on
a column, view the :attr:`_schema.Table.indexes` collection
or use :meth:`_reflection.Inspector.get_indexes`.
.. seealso::
:ref:`schema_indexes`
:ref:`constraint_naming_conventions`
:paramref:`_schema.Column.unique`
:param info: Optional data dictionary which will be populated into the
:attr:`.SchemaItem.info` attribute of this object.
:param nullable: When set to ``False``, will cause the "NOT NULL"
phrase to be added when generating DDL for the column. When
``True``, will normally generate nothing (in SQL this defaults to
"NULL"), except in some very specific backend-specific edge cases
where "NULL" may render explicitly.
Defaults to ``True`` unless :paramref:`_schema.Column.primary_key`
is also ``True`` or the column specifies a :class:`_sql.Identity`,
in which case it defaults to ``False``.
This parameter is only used when issuing CREATE TABLE statements.
.. note::
When the column specifies a :class:`_sql.Identity` this
parameter is in general ignored by the DDL compiler. The
PostgreSQL database allows nullable identity column by
setting this parameter to ``True`` explicitly.
:param onupdate: A scalar, Python callable, or
:class:`~sqlalchemy.sql.expression.ClauseElement` representing a
default value to be applied to the column within UPDATE
statements, which will be invoked upon update if this column is not
present in the SET clause of the update. This is a shortcut to
using :class:`.ColumnDefault` as a positional argument with
``for_update=True``.
.. seealso::
:ref:`metadata_defaults` - complete discussion of onupdate
:param primary_key: If ``True``, marks this column as a primary key
column. Multiple columns can have this flag set to specify
composite primary keys. As an alternative, the primary key of a
:class:`_schema.Table` can be specified via an explicit
:class:`.PrimaryKeyConstraint` object.
:param server_default: A :class:`.FetchedValue` instance, str, Unicode
or :func:`~sqlalchemy.sql.expression.text` construct representing
the DDL DEFAULT value for the column.
String types will be emitted as-is, surrounded by single quotes::
Column('x', Text, server_default="val")
x TEXT DEFAULT 'val'
A :func:`~sqlalchemy.sql.expression.text` expression will be
rendered as-is, without quotes::
Column('y', DateTime, server_default=text('NOW()'))
y DATETIME DEFAULT NOW()
Strings and text() will be converted into a
:class:`.DefaultClause` object upon initialization.
This parameter can also accept complex combinations of contextually
valid SQLAlchemy expressions or constructs::
from sqlalchemy import create_engine
from sqlalchemy import Table, Column, MetaData, ARRAY, Text
from sqlalchemy.dialects.postgresql import array
engine = create_engine(
'postgresql+psycopg2://scott:tiger@localhost/mydatabase'
)
metadata_obj = MetaData()
tbl = Table(
"foo",
metadata_obj,
Column("bar",
ARRAY(Text),
server_default=array(["biz", "bang", "bash"])
)
)
metadata_obj.create_all(engine)
The above results in a table created with the following SQL::
CREATE TABLE foo (
bar TEXT[] DEFAULT ARRAY['biz', 'bang', 'bash']
)
Use :class:`.FetchedValue` to indicate that an already-existing
column will generate a default value on the database side which
will be available to SQLAlchemy for post-fetch after inserts. This
construct does not specify any DDL and the implementation is left
to the database, such as via a trigger.
.. seealso::
:ref:`server_defaults` - complete discussion of server side
defaults
:param server_onupdate: A :class:`.FetchedValue` instance
representing a database-side default generation function,
such as a trigger. This
indicates to SQLAlchemy that a newly generated value will be
available after updates. This construct does not actually
implement any kind of generation function within the database,
which instead must be specified separately.
.. warning:: This directive **does not** currently produce MySQL's
"ON UPDATE CURRENT_TIMESTAMP()" clause. See
:ref:`mysql_timestamp_onupdate` for background on how to
produce this clause.
.. seealso::
:ref:`triggered_columns`
:param quote: Force quoting of this column's name on or off,
corresponding to ``True`` or ``False``. When left at its default
of ``None``, the column identifier will be quoted according to
whether the name is case sensitive (identifiers with at least one
upper case character are treated as case sensitive), or if it's a
reserved word. This flag is only needed to force quoting of a
reserved word which is not known by the SQLAlchemy dialect.
:param unique: When ``True``, and the :paramref:`_schema.Column.index`
parameter is left at its default value of ``False``,
indicates that a :class:`_schema.UniqueConstraint`
construct will be automatically generated for this
:class:`_schema.Column`,
which will result in a "UNIQUE CONSTRAINT" clause referring
to this column being included
in the ``CREATE TABLE`` statement emitted, when the DDL create
operation for the :class:`_schema.Table` object is invoked.
When this flag is ``True`` while the
:paramref:`_schema.Column.index` parameter is simultaneously
set to ``True``, the effect instead is that a
:class:`_schema.Index` construct which includes the
:paramref:`_schema.Index.unique` parameter set to ``True``
is generated. See the documentation for
:paramref:`_schema.Column.index` for additional detail.
Using this flag is equivalent to making use of the
:class:`_schema.UniqueConstraint` construct explicitly at the
level of the :class:`_schema.Table` construct itself::
Table(
"some_table",
metadata,
Column("x", Integer),
UniqueConstraint("x")
)
The :paramref:`_schema.UniqueConstraint.name` parameter
of the unique constraint object is left at its default value
of ``None``; in the absence of a :ref:`naming convention <constraint_naming_conventions>`
for the enclosing :class:`_schema.MetaData`, the UNIQUE CONSTRAINT
construct will be emitted as unnamed, which typically invokes
a database-specific naming convention to take place.
As this flag is intended only as a convenience for the common case
of adding a single-column, default configured unique constraint to a table
definition, explicit use of the :class:`_schema.UniqueConstraint` construct
should be preferred for most use cases, including composite constraints
that encompass more than one column, backend-specific index configuration options, and
constraints that use a specific name.
.. note:: the :attr:`_schema.Column.unique` attribute on
:class:`_schema.Column`
**does not indicate** if this column has a unique constraint or
not, only if this flag was explicitly set here. To view
indexes and unique constraints that may involve this column,
view the
:attr:`_schema.Table.indexes` and/or
:attr:`_schema.Table.constraints` collections or use
:meth:`_reflection.Inspector.get_indexes` and/or
:meth:`_reflection.Inspector.get_unique_constraints`
.. seealso::
:ref:`schema_unique_constraint`
:ref:`constraint_naming_conventions`
:paramref:`_schema.Column.index`
:param system: When ``True``, indicates this is a "system" column,
that is a column which is automatically made available by the
database, and should not be included in the columns list for a
``CREATE TABLE`` statement.
For more elaborate scenarios where columns should be
conditionally rendered differently on different backends,
consider custom compilation rules for :class:`.CreateColumn`.
:param comment: Optional string that will render an SQL comment on
table creation.
.. versionadded:: 1.2 Added the
:paramref:`_schema.Column.comment`
parameter to :class:`_schema.Column`.
:param insert_sentinel: Marks this :class:`_schema.Column` as an
:term:`insert sentinel` used for optimizing the performance of the
:term:`insertmanyvalues` feature for tables that don't
otherwise have qualifying primary key configurations.
.. versionadded:: 2.0.10
.. seealso::
:func:`_schema.insert_sentinel` - all in one helper for declaring
sentinel columns
:ref:`engine_insertmanyvalues`
:ref:`engine_insertmanyvalues_sentinel_columns`
""" # noqa: E501, RST201, RST202
l_args = [__name_pos, __type_pos] + list(args)
del args
if l_args:
if isinstance(l_args[0], str):
if name is not None:
raise exc.ArgumentError(
"May not pass name positionally and as a keyword."
)
name = l_args.pop(0) # type: ignore
elif l_args[0] is None:
l_args.pop(0)
if l_args:
coltype = l_args[0]
if hasattr(coltype, "_sqla_type"):
if type_ is not None:
raise exc.ArgumentError(
"May not pass type_ positionally and as a keyword."
)
type_ = l_args.pop(0) # type: ignore
elif l_args[0] is None:
l_args.pop(0)
if name is not None:
name = quoted_name(name, quote)
elif quote is not None:
raise exc.ArgumentError(
"Explicit 'name' is required when " "sending 'quote' argument"
)
# name = None is expected to be an interim state
# note this use case is legacy now that ORM declarative has a
# dedicated "column" construct local to the ORM
super().__init__(name, type_) # type: ignore
self.key = key if key is not None else name # type: ignore
self.primary_key = primary_key
self._insert_sentinel = insert_sentinel
self._omit_from_statements = _omit_from_statements
self._user_defined_nullable = udn = nullable
if udn is not NULL_UNSPECIFIED:
self.nullable = udn
else:
self.nullable = not primary_key
# these default to None because .index and .unique is *not*
# an informational flag about Column - there can still be an
# Index or UniqueConstraint referring to this Column.
self.index = index
self.unique = unique
self.system = system
self.doc = doc
self.autoincrement: _AutoIncrementType = autoincrement
self.constraints = set()
self.foreign_keys = set()
self.comment = comment
self.computed = None
self.identity = None
# check if this Column is proxying another column
if _proxies is not None:
self._proxies = _proxies
else:
# otherwise, add DDL-related events
self._set_type(self.type)
if default is not None:
if not isinstance(default, (ColumnDefault, Sequence)):
default = ColumnDefault(default)
self.default = default
l_args.append(default)
else:
self.default = None
if onupdate is not None:
if not isinstance(onupdate, (ColumnDefault, Sequence)):
onupdate = ColumnDefault(onupdate, for_update=True)
self.onupdate = onupdate
l_args.append(onupdate)
else:
self.onupdate = None
if server_default is not None:
if isinstance(server_default, FetchedValue):
server_default = server_default._as_for_update(False)
l_args.append(server_default)
else:
server_default = DefaultClause(server_default)
l_args.append(server_default)
self.server_default = server_default
if server_onupdate is not None:
if isinstance(server_onupdate, FetchedValue):
server_onupdate = server_onupdate._as_for_update(True)
l_args.append(server_onupdate)
else:
server_onupdate = DefaultClause(
server_onupdate, for_update=True
)
l_args.append(server_onupdate)
self.server_onupdate = server_onupdate
self._init_items(*cast(_typing_Sequence[SchemaItem], l_args))
util.set_creation_order(self)
if info is not None:
self.info = info
self._extra_kwargs(**dialect_kwargs)
table: Table
constraints: Set[Constraint]
foreign_keys: Set[ForeignKey]
"""A collection of all :class:`_schema.ForeignKey` marker objects
associated with this :class:`_schema.Column`.
Each object is a member of a :class:`_schema.Table`-wide
:class:`_schema.ForeignKeyConstraint`.
.. seealso::
:attr:`_schema.Table.foreign_keys`
"""
index: Optional[bool]
"""The value of the :paramref:`_schema.Column.index` parameter.
Does not indicate if this :class:`_schema.Column` is actually indexed
or not; use :attr:`_schema.Table.indexes`.
.. seealso::
:attr:`_schema.Table.indexes`
"""
unique: Optional[bool]
"""The value of the :paramref:`_schema.Column.unique` parameter.
Does not indicate if this :class:`_schema.Column` is actually subject to
a unique constraint or not; use :attr:`_schema.Table.indexes` and
:attr:`_schema.Table.constraints`.
.. seealso::
:attr:`_schema.Table.indexes`
:attr:`_schema.Table.constraints`.
"""
computed: Optional[Computed]
identity: Optional[Identity]
def _set_type(self, type_: TypeEngine[Any]) -> None:
self.type = type_
if isinstance(self.type, SchemaEventTarget):
self.type._set_parent_with_dispatch(self)
for impl in self.type._variant_mapping.values():
if isinstance(impl, SchemaEventTarget):
impl._set_parent_with_dispatch(self)
@HasMemoized.memoized_attribute
def _default_description_tuple(self) -> _DefaultDescriptionTuple:
"""used by default.py -> _process_execute_defaults()"""
return _DefaultDescriptionTuple._from_column_default(self.default)
@HasMemoized.memoized_attribute
def _onupdate_description_tuple(self) -> _DefaultDescriptionTuple:
"""used by default.py -> _process_execute_defaults()"""
return _DefaultDescriptionTuple._from_column_default(self.onupdate)
@util.memoized_property
def _gen_static_annotations_cache_key(self) -> bool: # type: ignore
"""special attribute used by cache key gen, if true, we will
use a static cache key for the annotations dictionary, else we
will generate a new cache key for annotations each time.
Added for #8790
"""
return self.table is not None and self.table._is_table
def _extra_kwargs(self, **kwargs: Any) -> None:
self._validate_dialect_kwargs(kwargs)
def __str__(self) -> str:
if self.name is None:
return "(no name)"
elif self.table is not None:
if self.table.named_with_column:
return self.table.description + "." + self.description
else:
return self.description
else:
return self.description
def references(self, column: Column[Any]) -> bool:
"""Return True if this Column references the given column via foreign
key."""
for fk in self.foreign_keys:
if fk.column.proxy_set.intersection(column.proxy_set):
return True
else:
return False
def append_foreign_key(self, fk: ForeignKey) -> None:
fk._set_parent_with_dispatch(self)
def __repr__(self) -> str:
kwarg = []
if self.key != self.name:
kwarg.append("key")
if self.primary_key:
kwarg.append("primary_key")
if not self.nullable:
kwarg.append("nullable")
if self.onupdate:
kwarg.append("onupdate")
if self.default:
kwarg.append("default")
if self.server_default:
kwarg.append("server_default")
if self.comment:
kwarg.append("comment")
return "Column(%s)" % ", ".join(
[repr(self.name)]
+ [repr(self.type)]
+ [repr(x) for x in self.foreign_keys if x is not None]
+ [repr(x) for x in self.constraints]
+ [
(
self.table is not None
and "table=<%s>" % self.table.description
or "table=None"
)
]
+ ["%s=%s" % (k, repr(getattr(self, k))) for k in kwarg]
)
def _set_parent( # type: ignore[override]
self,
parent: SchemaEventTarget,
*,
all_names: Dict[str, Column[Any]],
allow_replacements: bool,
**kw: Any,
) -> None:
table = parent
assert isinstance(table, Table)
if not self.name:
raise exc.ArgumentError(
"Column must be constructed with a non-blank name or "
"assign a non-blank .name before adding to a Table."
)
self._reset_memoizations()
if self.key is None:
self.key = self.name
existing = getattr(self, "table", None)
if existing is not None and existing is not table:
raise exc.ArgumentError(
"Column object '%s' already assigned to Table '%s'"
% (self.key, existing.description)
)
extra_remove = None
existing_col = None
conflicts_on = ""
if self.key in table._columns:
existing_col = table._columns[self.key]
if self.key == self.name:
conflicts_on = "name"
else:
conflicts_on = "key"
elif self.name in all_names:
existing_col = all_names[self.name]
extra_remove = {existing_col}
conflicts_on = "name"
if existing_col is not None:
if existing_col is not self:
if not allow_replacements:
raise exc.DuplicateColumnError(
f"A column with {conflicts_on} "
f"""'{
self.key if conflicts_on == 'key' else self.name
}' """
f"is already present in table '{table.name}'."
)
for fk in existing_col.foreign_keys:
table.foreign_keys.remove(fk)
if fk.constraint in table.constraints:
# this might have been removed
# already, if it's a composite constraint
# and more than one col being replaced
table.constraints.remove(fk.constraint)
if extra_remove and existing_col is not None and self.key == self.name:
util.warn(
f'Column with user-specified key "{existing_col.key}" is '
"being replaced with "
f'plain named column "{self.name}", '
f'key "{existing_col.key}" is being removed. If this is a '
"reflection operation, specify autoload_replace=False to "
"prevent this replacement."
)
table._columns.replace(self, extra_remove=extra_remove)
all_names[self.name] = self
self.table = table
if self._insert_sentinel:
if self.table._sentinel_column is not None:
raise exc.ArgumentError(
"a Table may have only one explicit sentinel column"
)
self.table._sentinel_column = self
if self.primary_key:
table.primary_key._replace(self)
elif self.key in table.primary_key:
raise exc.ArgumentError(
"Trying to redefine primary-key column '%s' as a "
"non-primary-key column on table '%s'"
% (self.key, table.fullname)
)
if self.index:
if isinstance(self.index, str):
raise exc.ArgumentError(
"The 'index' keyword argument on Column is boolean only. "
"To create indexes with a specific name, create an "
"explicit Index object external to the Table."
)
table.append_constraint(
Index(
None, self.key, unique=bool(self.unique), _column_flag=True
)
)
elif self.unique:
if isinstance(self.unique, str):
raise exc.ArgumentError(
"The 'unique' keyword argument on Column is boolean "
"only. To create unique constraints or indexes with a "
"specific name, append an explicit UniqueConstraint to "
"the Table's list of elements, or create an explicit "
"Index object external to the Table."
)
table.append_constraint(
UniqueConstraint(self.key, _column_flag=True)
)
self._setup_on_memoized_fks(lambda fk: fk._set_remote_table(table))
if self.identity and (
isinstance(self.default, Sequence)
or isinstance(self.onupdate, Sequence)
):
raise exc.ArgumentError(
"An column cannot specify both Identity and Sequence."
)
def _setup_on_memoized_fks(self, fn: Callable[..., Any]) -> None:
fk_keys = [
((self.table.key, self.key), False),
((self.table.key, self.name), True),
]
for fk_key, link_to_name in fk_keys:
if fk_key in self.table.metadata._fk_memos:
for fk in self.table.metadata._fk_memos[fk_key]:
if fk.link_to_name is link_to_name:
fn(fk)
def _on_table_attach(self, fn: Callable[..., Any]) -> None:
if self.table is not None:
fn(self, self.table)
else:
event.listen(self, "after_parent_attach", fn)
@util.deprecated(
"1.4",
"The :meth:`_schema.Column.copy` method is deprecated "
"and will be removed in a future release.",
)
def copy(self, **kw: Any) -> Column[Any]:
return self._copy(**kw)
def _copy(self, **kw: Any) -> Column[Any]:
"""Create a copy of this ``Column``, uninitialized.
This is used in :meth:`_schema.Table.to_metadata` and by the ORM.
"""
# Constraint objects plus non-constraint-bound ForeignKey objects
args: List[SchemaItem] = [
c._copy(**kw)
for c in self.constraints
if not c._type_bound # type: ignore
] + [
c._copy(**kw) # type: ignore
for c in self.foreign_keys
if not c.constraint
]
# ticket #5276
column_kwargs = {}
for dialect_name in self.dialect_options:
dialect_options = self.dialect_options[dialect_name]._non_defaults
for (
dialect_option_key,
dialect_option_value,
) in dialect_options.items():
column_kwargs[
dialect_name + "_" + dialect_option_key
] = dialect_option_value
server_default = self.server_default
server_onupdate = self.server_onupdate
if isinstance(server_default, (Computed, Identity)):
# TODO: likely should be copied in all cases
args.append(server_default._copy(**kw))
server_default = server_onupdate = None
type_ = self.type
if isinstance(type_, SchemaEventTarget):
type_ = type_.copy(**kw)
# TODO: DefaultGenerator is not copied here! it's just used again
# with _set_parent() pointing to the old column. see the new
# use of _copy() in the new _merge() method
c = self._constructor(
name=self.name,
type_=type_,
key=self.key,
primary_key=self.primary_key,
unique=self.unique,
system=self.system,
# quote=self.quote, # disabled 2013-08-27 (commit 031ef080)
index=self.index,
autoincrement=self.autoincrement,
default=self.default,
server_default=server_default,
onupdate=self.onupdate,
server_onupdate=server_onupdate,
doc=self.doc,
comment=self.comment,
_omit_from_statements=self._omit_from_statements,
insert_sentinel=self._insert_sentinel,
*args,
**column_kwargs,
)
# copy the state of "nullable" exactly, to accommodate for
# ORM flipping the .nullable flag directly
c.nullable = self.nullable
c._user_defined_nullable = self._user_defined_nullable
return self._schema_item_copy(c)
def _merge(self, other: Column[Any]) -> None:
"""merge the elements of another column into this one.
this is used by ORM pep-593 merge and will likely need a lot
of fixes.
"""
if self.primary_key:
other.primary_key = True
type_ = self.type
if not type_._isnull and other.type._isnull:
if isinstance(type_, SchemaEventTarget):
type_ = type_.copy()
other.type = type_
if isinstance(type_, SchemaEventTarget):
type_._set_parent_with_dispatch(other)
for impl in type_._variant_mapping.values():
if isinstance(impl, SchemaEventTarget):
impl._set_parent_with_dispatch(other)
if (
self._user_defined_nullable is not NULL_UNSPECIFIED
and other._user_defined_nullable is NULL_UNSPECIFIED
):
other.nullable = self.nullable
other._user_defined_nullable = self._user_defined_nullable
if self.default is not None and other.default is None:
new_default = self.default._copy()
new_default._set_parent(other)
if self.server_default and other.server_default is None:
new_server_default = self.server_default
if isinstance(new_server_default, FetchedValue):
new_server_default = new_server_default._copy()
new_server_default._set_parent(other)
else:
other.server_default = new_server_default
if self.server_onupdate and other.server_onupdate is None:
new_server_onupdate = self.server_onupdate
new_server_onupdate = new_server_onupdate._copy()
new_server_onupdate._set_parent(other)
if self.onupdate and other.onupdate is None:
new_onupdate = self.onupdate._copy()
new_onupdate._set_parent(other)
if self.index and not other.index:
other.index = True
if self.unique and not other.unique:
other.unique = True
for const in self.constraints:
if not const._type_bound:
new_const = const._copy()
new_const._set_parent(other)
for fk in self.foreign_keys:
if not fk.constraint:
new_fk = fk._copy()
new_fk._set_parent(other)
def _make_proxy(
self,
selectable: FromClause,
name: Optional[str] = None,
key: Optional[str] = None,
name_is_truncatable: bool = False,
compound_select_cols: Optional[
_typing_Sequence[ColumnElement[Any]]
] = None,
**kw: Any,
) -> Tuple[str, ColumnClause[_T]]:
"""Create a *proxy* for this column.
This is a copy of this ``Column`` referenced by a different parent
(such as an alias or select statement). The column should
be used only in select scenarios, as its full DDL/default
information is not transferred.
"""
fk = [
ForeignKey(
col if col is not None else f._colspec,
_unresolvable=col is None,
_constraint=f.constraint,
)
for f, col in [
(fk, fk._resolve_column(raiseerr=False))
for fk in self.foreign_keys
]
]
if name is None and self.name is None:
raise exc.InvalidRequestError(
"Cannot initialize a sub-selectable"
" with this Column object until its 'name' has "
"been assigned."
)
try:
c = self._constructor(
coercions.expect(
roles.TruncatedLabelRole, name if name else self.name
)
if name_is_truncatable
else (name or self.name),
self.type,
# this may actually be ._proxy_key when the key is incoming
key=key if key else name if name else self.key,
primary_key=self.primary_key,
nullable=self.nullable,
_proxies=list(compound_select_cols)
if compound_select_cols
else [self],
*fk,
)
except TypeError as err:
raise TypeError(
"Could not create a copy of this %r object. "
"Ensure the class includes a _constructor() "
"attribute or method which accepts the "
"standard Column constructor arguments, or "
"references the Column class itself." % self.__class__
) from err
c.table = selectable
c._propagate_attrs = selectable._propagate_attrs
if selectable._is_clone_of is not None:
c._is_clone_of = selectable._is_clone_of.columns.get(c.key)
if self.primary_key:
selectable.primary_key.add(c) # type: ignore
if fk:
selectable.foreign_keys.update(fk) # type: ignore
return c.key, c
def insert_sentinel(
name: Optional[str] = None,
type_: Optional[_TypeEngineArgument[_T]] = None,
*,
default: Optional[Any] = None,
omit_from_statements: bool = True,
) -> Column[Any]:
"""Provides a surrogate :class:`_schema.Column` that will act as a
dedicated insert :term:`sentinel` column, allowing efficient bulk
inserts with deterministic RETURNING sorting for tables that
don't otherwise have qualifying primary key configurations.
Adding this column to a :class:`.Table` object requires that a
corresponding database table actually has this column present, so if adding
it to an existing model, existing database tables would need to be migrated
(e.g. using ALTER TABLE or similar) to include this column.
For background on how this object is used, see the section
:ref:`engine_insertmanyvalues_sentinel_columns` as part of the
section :ref:`engine_insertmanyvalues`.
The :class:`_schema.Column` returned will be a nullable integer column by
default and make use of a sentinel-specific default generator used only in
"insertmanyvalues" operations.
.. seealso::
:func:`_orm.orm_insert_sentinel`
:paramref:`_schema.Column.insert_sentinel`
:ref:`engine_insertmanyvalues`
:ref:`engine_insertmanyvalues_sentinel_columns`
.. versionadded:: 2.0.10
"""
return Column(
name=name,
type_=type_api.INTEGERTYPE if type_ is None else type_,
default=default
if default is not None
else _InsertSentinelColumnDefault(),
_omit_from_statements=omit_from_statements,
insert_sentinel=True,
)
class ForeignKey(DialectKWArgs, SchemaItem):
"""Defines a dependency between two columns.
``ForeignKey`` is specified as an argument to a :class:`_schema.Column`
object,
e.g.::
t = Table("remote_table", metadata,
Column("remote_id", ForeignKey("main_table.id"))
)
Note that ``ForeignKey`` is only a marker object that defines
a dependency between two columns. The actual constraint
is in all cases represented by the :class:`_schema.ForeignKeyConstraint`
object. This object will be generated automatically when
a ``ForeignKey`` is associated with a :class:`_schema.Column` which
in turn is associated with a :class:`_schema.Table`. Conversely,
when :class:`_schema.ForeignKeyConstraint` is applied to a
:class:`_schema.Table`,
``ForeignKey`` markers are automatically generated to be
present on each associated :class:`_schema.Column`, which are also
associated with the constraint object.
Note that you cannot define a "composite" foreign key constraint,
that is a constraint between a grouping of multiple parent/child
columns, using ``ForeignKey`` objects. To define this grouping,
the :class:`_schema.ForeignKeyConstraint` object must be used, and applied
to the :class:`_schema.Table`. The associated ``ForeignKey`` objects
are created automatically.
The ``ForeignKey`` objects associated with an individual
:class:`_schema.Column`
object are available in the `foreign_keys` collection
of that column.
Further examples of foreign key configuration are in
:ref:`metadata_foreignkeys`.
"""
__visit_name__ = "foreign_key"
parent: Column[Any]
_table_column: Optional[Column[Any]]
def __init__(
self,
column: _DDLColumnArgument,
_constraint: Optional[ForeignKeyConstraint] = None,
use_alter: bool = False,
name: _ConstraintNameArgument = None,
onupdate: Optional[str] = None,
ondelete: Optional[str] = None,
deferrable: Optional[bool] = None,
initially: Optional[str] = None,
link_to_name: bool = False,
match: Optional[str] = None,
info: Optional[_InfoType] = None,
comment: Optional[str] = None,
_unresolvable: bool = False,
**dialect_kw: Any,
):
r"""
Construct a column-level FOREIGN KEY.
The :class:`_schema.ForeignKey` object when constructed generates a
:class:`_schema.ForeignKeyConstraint`
which is associated with the parent
:class:`_schema.Table` object's collection of constraints.
:param column: A single target column for the key relationship. A
:class:`_schema.Column` object or a column name as a string:
``tablename.columnkey`` or ``schema.tablename.columnkey``.
``columnkey`` is the ``key`` which has been assigned to the column
(defaults to the column name itself), unless ``link_to_name`` is
``True`` in which case the rendered name of the column is used.
:param name: Optional string. An in-database name for the key if
`constraint` is not provided.
:param onupdate: Optional string. If set, emit ON UPDATE <value> when
issuing DDL for this constraint. Typical values include CASCADE,
DELETE and RESTRICT.
:param ondelete: Optional string. If set, emit ON DELETE <value> when
issuing DDL for this constraint. Typical values include CASCADE,
DELETE and RESTRICT.
:param deferrable: Optional bool. If set, emit DEFERRABLE or NOT
DEFERRABLE when issuing DDL for this constraint.
:param initially: Optional string. If set, emit INITIALLY <value> when
issuing DDL for this constraint.
:param link_to_name: if True, the string name given in ``column`` is
the rendered name of the referenced column, not its locally
assigned ``key``.
:param use_alter: passed to the underlying
:class:`_schema.ForeignKeyConstraint`
to indicate the constraint should
be generated/dropped externally from the CREATE TABLE/ DROP TABLE
statement. See :paramref:`_schema.ForeignKeyConstraint.use_alter`
for further description.
.. seealso::
:paramref:`_schema.ForeignKeyConstraint.use_alter`
:ref:`use_alter`
:param match: Optional string. If set, emit MATCH <value> when issuing
DDL for this constraint. Typical values include SIMPLE, PARTIAL
and FULL.
:param info: Optional data dictionary which will be populated into the
:attr:`.SchemaItem.info` attribute of this object.
:param comment: Optional string that will render an SQL comment on
foreign key constraint creation.
.. versionadded:: 2.0
:param \**dialect_kw: Additional keyword arguments are dialect
specific, and passed in the form ``<dialectname>_<argname>``. The
arguments are ultimately handled by a corresponding
:class:`_schema.ForeignKeyConstraint`.
See the documentation regarding
an individual dialect at :ref:`dialect_toplevel` for detail on
documented arguments.
"""
self._colspec = coercions.expect(roles.DDLReferredColumnRole, column)
self._unresolvable = _unresolvable
if isinstance(self._colspec, str):
self._table_column = None
else:
self._table_column = self._colspec
if not isinstance(
self._table_column.table, (type(None), TableClause)
):
raise exc.ArgumentError(
"ForeignKey received Column not bound "
"to a Table, got: %r" % self._table_column.table
)
# the linked ForeignKeyConstraint.
# ForeignKey will create this when parent Column
# is attached to a Table, *or* ForeignKeyConstraint
# object passes itself in when creating ForeignKey
# markers.
self.constraint = _constraint
# .parent is not Optional under normal use
self.parent = None # type: ignore
self.use_alter = use_alter
self.name = name
self.onupdate = onupdate
self.ondelete = ondelete
self.deferrable = deferrable
self.initially = initially
self.link_to_name = link_to_name
self.match = match
self.comment = comment
if info:
self.info = info
self._unvalidated_dialect_kw = dialect_kw
def __repr__(self) -> str:
return "ForeignKey(%r)" % self._get_colspec()
@util.deprecated(
"1.4",
"The :meth:`_schema.ForeignKey.copy` method is deprecated "
"and will be removed in a future release.",
)
def copy(self, *, schema: Optional[str] = None, **kw: Any) -> ForeignKey:
return self._copy(schema=schema, **kw)
def _copy(self, *, schema: Optional[str] = None, **kw: Any) -> ForeignKey:
"""Produce a copy of this :class:`_schema.ForeignKey` object.
The new :class:`_schema.ForeignKey` will not be bound
to any :class:`_schema.Column`.
This method is usually used by the internal
copy procedures of :class:`_schema.Column`, :class:`_schema.Table`,
and :class:`_schema.MetaData`.
:param schema: The returned :class:`_schema.ForeignKey` will
reference the original table and column name, qualified
by the given string schema name.
"""
fk = ForeignKey(
self._get_colspec(schema=schema),
use_alter=self.use_alter,
name=self.name,
onupdate=self.onupdate,
ondelete=self.ondelete,
deferrable=self.deferrable,
initially=self.initially,
link_to_name=self.link_to_name,
match=self.match,
comment=self.comment,
**self._unvalidated_dialect_kw,
)
return self._schema_item_copy(fk)
def _get_colspec(
self,
schema: Optional[
Union[
str,
Literal[SchemaConst.RETAIN_SCHEMA, SchemaConst.BLANK_SCHEMA],
]
] = None,
table_name: Optional[str] = None,
_is_copy: bool = False,
) -> str:
"""Return a string based 'column specification' for this
:class:`_schema.ForeignKey`.
This is usually the equivalent of the string-based "tablename.colname"
argument first passed to the object's constructor.
"""
if schema not in (None, RETAIN_SCHEMA):
_schema, tname, colname = self._column_tokens
if table_name is not None:
tname = table_name
if schema is BLANK_SCHEMA:
return "%s.%s" % (tname, colname)
else:
return "%s.%s.%s" % (schema, tname, colname)
elif table_name:
schema, tname, colname = self._column_tokens
if schema:
return "%s.%s.%s" % (schema, table_name, colname)
else:
return "%s.%s" % (table_name, colname)
elif self._table_column is not None:
if self._table_column.table is None:
if _is_copy:
raise exc.InvalidRequestError(
f"Can't copy ForeignKey object which refers to "
f"non-table bound Column {self._table_column!r}"
)
else:
return self._table_column.key
return "%s.%s" % (
self._table_column.table.fullname,
self._table_column.key,
)
else:
assert isinstance(self._colspec, str)
return self._colspec
@property
def _referred_schema(self) -> Optional[str]:
return self._column_tokens[0]
def _table_key(self) -> Any:
if self._table_column is not None:
if self._table_column.table is None:
return None
else:
return self._table_column.table.key
else:
schema, tname, colname = self._column_tokens
return _get_table_key(tname, schema)
target_fullname = property(_get_colspec)
def references(self, table: Table) -> bool:
"""Return True if the given :class:`_schema.Table`
is referenced by this
:class:`_schema.ForeignKey`."""
return table.corresponding_column(self.column) is not None
def get_referent(self, table: FromClause) -> Optional[Column[Any]]:
"""Return the :class:`_schema.Column` in the given
:class:`_schema.Table` (or any :class:`.FromClause`)
referenced by this :class:`_schema.ForeignKey`.
Returns None if this :class:`_schema.ForeignKey`
does not reference the given
:class:`_schema.Table`.
"""
# our column is a Column, and any subquery etc. proxying us
# would be doing so via another Column, so that's what would
# be returned here
return table.columns.corresponding_column(self.column) # type: ignore
@util.memoized_property
def _column_tokens(self) -> Tuple[Optional[str], str, Optional[str]]:
"""parse a string-based _colspec into its component parts."""
m = self._get_colspec().split(".")
if m is None:
raise exc.ArgumentError(
"Invalid foreign key column specification: %s" % self._colspec
)
if len(m) == 1:
tname = m.pop()
colname = None
else:
colname = m.pop()
tname = m.pop()
# A FK between column 'bar' and table 'foo' can be
# specified as 'foo', 'foo.bar', 'dbo.foo.bar',
# 'otherdb.dbo.foo.bar'. Once we have the column name and
# the table name, treat everything else as the schema
# name. Some databases (e.g. Sybase) support
# inter-database foreign keys. See tickets#1341 and --
# indirectly related -- Ticket #594. This assumes that '.'
# will never appear *within* any component of the FK.
if len(m) > 0:
schema = ".".join(m)
else:
schema = None
return schema, tname, colname
def _resolve_col_tokens(self) -> Tuple[Table, str, Optional[str]]:
if self.parent is None:
raise exc.InvalidRequestError(
"this ForeignKey object does not yet have a "
"parent Column associated with it."
)
elif self.parent.table is None:
raise exc.InvalidRequestError(
"this ForeignKey's parent column is not yet associated "
"with a Table."
)
parenttable = self.parent.table
if self._unresolvable:
schema, tname, colname = self._column_tokens
tablekey = _get_table_key(tname, schema)
return parenttable, tablekey, colname
# assertion
# basically Column._make_proxy() sends the actual
# target Column to the ForeignKey object, so the
# string resolution here is never called.
for c in self.parent.base_columns:
if isinstance(c, Column):
assert c.table is parenttable
break
else:
assert False
######################
schema, tname, colname = self._column_tokens
if schema is None and parenttable.metadata.schema is not None:
schema = parenttable.metadata.schema
tablekey = _get_table_key(tname, schema)
return parenttable, tablekey, colname
def _link_to_col_by_colstring(
self, parenttable: Table, table: Table, colname: Optional[str]
) -> Column[Any]:
_column = None
if colname is None:
# colname is None in the case that ForeignKey argument
# was specified as table name only, in which case we
# match the column name to the same column on the
# parent.
# this use case wasn't working in later 1.x series
# as it had no test coverage; fixed in 2.0
parent = self.parent
assert parent is not None
key = parent.key
_column = table.c.get(key, None)
elif self.link_to_name:
key = colname
for c in table.c:
if c.name == colname:
_column = c
else:
key = colname
_column = table.c.get(colname, None)
if _column is None:
raise exc.NoReferencedColumnError(
"Could not initialize target column "
"for ForeignKey '%s' on table '%s': "
"table '%s' has no column named '%s'"
% (self._colspec, parenttable.name, table.name, key),
table.name,
key,
)
return _column
def _set_target_column(self, column: Column[Any]) -> None:
assert self.parent is not None
# propagate TypeEngine to parent if it didn't have one
if self.parent.type._isnull:
self.parent.type = column.type
# super-edgy case, if other FKs point to our column,
# they'd get the type propagated out also.
def set_type(fk: ForeignKey) -> None:
if fk.parent.type._isnull:
fk.parent.type = column.type
self.parent._setup_on_memoized_fks(set_type)
self.column = column # type: ignore
@util.ro_memoized_property
def column(self) -> Column[Any]:
"""Return the target :class:`_schema.Column` referenced by this
:class:`_schema.ForeignKey`.
If no target column has been established, an exception
is raised.
"""
return self._resolve_column()
@overload
def _resolve_column(self, *, raiseerr: Literal[True] = ...) -> Column[Any]:
...
@overload
def _resolve_column(
self, *, raiseerr: bool = ...
) -> Optional[Column[Any]]:
...
def _resolve_column(
self, *, raiseerr: bool = True
) -> Optional[Column[Any]]:
_column: Column[Any]
if isinstance(self._colspec, str):
parenttable, tablekey, colname = self._resolve_col_tokens()
if self._unresolvable or tablekey not in parenttable.metadata:
if not raiseerr:
return None
raise exc.NoReferencedTableError(
"Foreign key associated with column '%s' could not find "
"table '%s' with which to generate a "
"foreign key to target column '%s'"
% (self.parent, tablekey, colname),
tablekey,
)
elif parenttable.key not in parenttable.metadata:
if not raiseerr:
return None
raise exc.InvalidRequestError(
"Table %s is no longer associated with its "
"parent MetaData" % parenttable
)
else:
table = parenttable.metadata.tables[tablekey]
return self._link_to_col_by_colstring(
parenttable, table, colname
)
elif hasattr(self._colspec, "__clause_element__"):
_column = self._colspec.__clause_element__()
return _column
else:
_column = self._colspec
return _column
def _set_parent(self, parent: SchemaEventTarget, **kw: Any) -> None:
assert isinstance(parent, Column)
if self.parent is not None and self.parent is not parent:
raise exc.InvalidRequestError(
"This ForeignKey already has a parent !"
)
self.parent = parent
self.parent.foreign_keys.add(self)
self.parent._on_table_attach(self._set_table)
def _set_remote_table(self, table: Table) -> None:
parenttable, _, colname = self._resolve_col_tokens()
_column = self._link_to_col_by_colstring(parenttable, table, colname)
self._set_target_column(_column)
assert self.constraint is not None
self.constraint._validate_dest_table(table)
def _remove_from_metadata(self, metadata: MetaData) -> None:
parenttable, table_key, colname = self._resolve_col_tokens()
fk_key = (table_key, colname)
if self in metadata._fk_memos[fk_key]:
# TODO: no test coverage for self not in memos
metadata._fk_memos[fk_key].remove(self)
def _set_table(self, column: Column[Any], table: Table) -> None:
# standalone ForeignKey - create ForeignKeyConstraint
# on the hosting Table when attached to the Table.
assert isinstance(table, Table)
if self.constraint is None:
self.constraint = ForeignKeyConstraint(
[],
[],
use_alter=self.use_alter,
name=self.name,
onupdate=self.onupdate,
ondelete=self.ondelete,
deferrable=self.deferrable,
initially=self.initially,
match=self.match,
comment=self.comment,
**self._unvalidated_dialect_kw,
)
self.constraint._append_element(column, self)
self.constraint._set_parent_with_dispatch(table)
table.foreign_keys.add(self)
# set up remote ".column" attribute, or a note to pick it
# up when the other Table/Column shows up
if isinstance(self._colspec, str):
parenttable, table_key, colname = self._resolve_col_tokens()
fk_key = (table_key, colname)
if table_key in parenttable.metadata.tables:
table = parenttable.metadata.tables[table_key]
try:
_column = self._link_to_col_by_colstring(
parenttable, table, colname
)
except exc.NoReferencedColumnError:
# this is OK, we'll try later
pass
else:
self._set_target_column(_column)
parenttable.metadata._fk_memos[fk_key].append(self)
elif hasattr(self._colspec, "__clause_element__"):
_column = self._colspec.__clause_element__()
self._set_target_column(_column)
else:
_column = self._colspec
self._set_target_column(_column)
if TYPE_CHECKING:
def default_is_sequence(
obj: Optional[DefaultGenerator],
) -> TypeGuard[Sequence]:
...
def default_is_clause_element(
obj: Optional[DefaultGenerator],
) -> TypeGuard[ColumnElementColumnDefault]:
...
def default_is_scalar(
obj: Optional[DefaultGenerator],
) -> TypeGuard[ScalarElementColumnDefault]:
...
else:
default_is_sequence = operator.attrgetter("is_sequence")
default_is_clause_element = operator.attrgetter("is_clause_element")
default_is_scalar = operator.attrgetter("is_scalar")
class DefaultGenerator(Executable, SchemaItem):
"""Base class for column *default* values.
This object is only present on column.default or column.onupdate.
It's not valid as a server default.
"""
__visit_name__ = "default_generator"
_is_default_generator = True
is_sequence = False
is_identity = False
is_server_default = False
is_clause_element = False
is_callable = False
is_scalar = False
has_arg = False
is_sentinel = False
column: Optional[Column[Any]]
def __init__(self, for_update: bool = False) -> None:
self.for_update = for_update
def _set_parent(self, parent: SchemaEventTarget, **kw: Any) -> None:
if TYPE_CHECKING:
assert isinstance(parent, Column)
self.column = parent
if self.for_update:
self.column.onupdate = self
else:
self.column.default = self
def _copy(self) -> DefaultGenerator:
raise NotImplementedError()
def _execute_on_connection(
self,
connection: Connection,
distilled_params: _CoreMultiExecuteParams,
execution_options: CoreExecuteOptionsParameter,
) -> Any:
util.warn_deprecated(
"Using the .execute() method to invoke a "
"DefaultGenerator object is deprecated; please use "
"the .scalar() method.",
"2.0",
)
return self._execute_on_scalar(
connection, distilled_params, execution_options
)
def _execute_on_scalar(
self,
connection: Connection,
distilled_params: _CoreMultiExecuteParams,
execution_options: CoreExecuteOptionsParameter,
) -> Any:
return connection._execute_default(
self, distilled_params, execution_options
)
class ColumnDefault(DefaultGenerator, ABC):
"""A plain default value on a column.
This could correspond to a constant, a callable function,
or a SQL clause.
:class:`.ColumnDefault` is generated automatically
whenever the ``default``, ``onupdate`` arguments of
:class:`_schema.Column` are used. A :class:`.ColumnDefault`
can be passed positionally as well.
For example, the following::
Column('foo', Integer, default=50)
Is equivalent to::
Column('foo', Integer, ColumnDefault(50))
"""
arg: Any
@overload
def __new__(
cls, arg: Callable[..., Any], for_update: bool = ...
) -> CallableColumnDefault:
...
@overload
def __new__(
cls, arg: ColumnElement[Any], for_update: bool = ...
) -> ColumnElementColumnDefault:
...
# if I return ScalarElementColumnDefault here, which is what's actually
# returned, mypy complains that
# overloads overlap w/ incompatible return types.
@overload
def __new__(cls, arg: object, for_update: bool = ...) -> ColumnDefault:
...
def __new__(
cls, arg: Any = None, for_update: bool = False
) -> ColumnDefault:
"""Construct a new :class:`.ColumnDefault`.
:param arg: argument representing the default value.
May be one of the following:
* a plain non-callable Python value, such as a
string, integer, boolean, or other simple type.
The default value will be used as is each time.
* a SQL expression, that is one which derives from
:class:`_expression.ColumnElement`. The SQL expression will
be rendered into the INSERT or UPDATE statement,
or in the case of a primary key column when
RETURNING is not used may be
pre-executed before an INSERT within a SELECT.
* A Python callable. The function will be invoked for each
new row subject to an INSERT or UPDATE.
The callable must accept exactly
zero or one positional arguments. The one-argument form
will receive an instance of the :class:`.ExecutionContext`,
which provides contextual information as to the current
:class:`_engine.Connection` in use as well as the current
statement and parameters.
"""
if isinstance(arg, FetchedValue):
raise exc.ArgumentError(
"ColumnDefault may not be a server-side default type."
)
elif callable(arg):
cls = CallableColumnDefault
elif isinstance(arg, ClauseElement):
cls = ColumnElementColumnDefault
elif arg is not None:
cls = ScalarElementColumnDefault
return object.__new__(cls)
def __repr__(self) -> str:
return f"{self.__class__.__name__}({self.arg!r})"
class ScalarElementColumnDefault(ColumnDefault):
"""default generator for a fixed scalar Python value
.. versionadded: 2.0
"""
is_scalar = True
has_arg = True
def __init__(self, arg: Any, for_update: bool = False) -> None:
self.for_update = for_update
self.arg = arg
def _copy(self) -> ScalarElementColumnDefault:
return ScalarElementColumnDefault(
arg=self.arg, for_update=self.for_update
)
class _InsertSentinelColumnDefault(ColumnDefault):
"""Default generator that's specific to the use of a "sentinel" column
when using the insertmanyvalues feature.
This default is used as part of the :func:`_schema.insert_sentinel`
construct.
"""
is_sentinel = True
for_update = False
arg = None
def __new__(cls) -> _InsertSentinelColumnDefault:
return object.__new__(cls)
def __init__(self) -> None:
pass
def _set_parent(self, parent: SchemaEventTarget, **kw: Any) -> None:
col = cast("Column[Any]", parent)
if not col._insert_sentinel:
raise exc.ArgumentError(
"The _InsertSentinelColumnDefault may only be applied to a "
"Column marked as insert_sentinel=True"
)
elif not col.nullable:
raise exc.ArgumentError(
"The _InsertSentinelColumnDefault may only be applied to a "
"Column that is nullable"
)
super()._set_parent(parent, **kw)
def _copy(self) -> _InsertSentinelColumnDefault:
return _InsertSentinelColumnDefault()
_SQLExprDefault = Union["ColumnElement[Any]", "TextClause"]
class ColumnElementColumnDefault(ColumnDefault):
"""default generator for a SQL expression
.. versionadded:: 2.0
"""
is_clause_element = True
has_arg = True
arg: _SQLExprDefault
def __init__(
self,
arg: _SQLExprDefault,
for_update: bool = False,
) -> None:
self.for_update = for_update
self.arg = arg
def _copy(self) -> ColumnElementColumnDefault:
return ColumnElementColumnDefault(
arg=self.arg, for_update=self.for_update
)
@util.memoized_property
@util.preload_module("sqlalchemy.sql.sqltypes")
def _arg_is_typed(self) -> bool:
sqltypes = util.preloaded.sql_sqltypes
return not isinstance(self.arg.type, sqltypes.NullType)
class _CallableColumnDefaultProtocol(Protocol):
def __call__(self, context: ExecutionContext) -> Any:
...
class CallableColumnDefault(ColumnDefault):
"""default generator for a callable Python function
.. versionadded:: 2.0
"""
is_callable = True
arg: _CallableColumnDefaultProtocol
has_arg = True
def __init__(
self,
arg: Union[_CallableColumnDefaultProtocol, Callable[[], Any]],
for_update: bool = False,
) -> None:
self.for_update = for_update
self.arg = self._maybe_wrap_callable(arg)
def _copy(self) -> CallableColumnDefault:
return CallableColumnDefault(arg=self.arg, for_update=self.for_update)
def _maybe_wrap_callable(
self, fn: Union[_CallableColumnDefaultProtocol, Callable[[], Any]]
) -> _CallableColumnDefaultProtocol:
"""Wrap callables that don't accept a context.
This is to allow easy compatibility with default callables
that aren't specific to accepting of a context.
"""
try:
argspec = util.get_callable_argspec(fn, no_self=True)
except TypeError:
return util.wrap_callable(lambda ctx: fn(), fn) # type: ignore
defaulted = argspec[3] is not None and len(argspec[3]) or 0
positionals = len(argspec[0]) - defaulted
if positionals == 0:
return util.wrap_callable(lambda ctx: fn(), fn) # type: ignore
elif positionals == 1:
return fn # type: ignore
else:
raise exc.ArgumentError(
"ColumnDefault Python function takes zero or one "
"positional arguments"
)
class IdentityOptions:
"""Defines options for a named database sequence or an identity column.
.. versionadded:: 1.3.18
.. seealso::
:class:`.Sequence`
"""
def __init__(
self,
start: Optional[int] = None,
increment: Optional[int] = None,
minvalue: Optional[int] = None,
maxvalue: Optional[int] = None,
nominvalue: Optional[bool] = None,
nomaxvalue: Optional[bool] = None,
cycle: Optional[bool] = None,
cache: Optional[int] = None,
order: Optional[bool] = None,
) -> None:
"""Construct a :class:`.IdentityOptions` object.
See the :class:`.Sequence` documentation for a complete description
of the parameters.
:param start: the starting index of the sequence.
:param increment: the increment value of the sequence.
:param minvalue: the minimum value of the sequence.
:param maxvalue: the maximum value of the sequence.
:param nominvalue: no minimum value of the sequence.
:param nomaxvalue: no maximum value of the sequence.
:param cycle: allows the sequence to wrap around when the maxvalue
or minvalue has been reached.
:param cache: optional integer value; number of future values in the
sequence which are calculated in advance.
:param order: optional boolean value; if ``True``, renders the
ORDER keyword.
"""
self.start = start
self.increment = increment
self.minvalue = minvalue
self.maxvalue = maxvalue
self.nominvalue = nominvalue
self.nomaxvalue = nomaxvalue
self.cycle = cycle
self.cache = cache
self.order = order
@property
def _increment_is_negative(self) -> bool:
return self.increment is not None and self.increment < 0
class Sequence(HasSchemaAttr, IdentityOptions, DefaultGenerator):
"""Represents a named database sequence.
The :class:`.Sequence` object represents the name and configurational
parameters of a database sequence. It also represents
a construct that can be "executed" by a SQLAlchemy :class:`_engine.Engine`
or :class:`_engine.Connection`,
rendering the appropriate "next value" function
for the target database and returning a result.
The :class:`.Sequence` is typically associated with a primary key column::
some_table = Table(
'some_table', metadata,
Column('id', Integer, Sequence('some_table_seq', start=1),
primary_key=True)
)
When CREATE TABLE is emitted for the above :class:`_schema.Table`, if the
target platform supports sequences, a CREATE SEQUENCE statement will
be emitted as well. For platforms that don't support sequences,
the :class:`.Sequence` construct is ignored.
.. seealso::
:ref:`defaults_sequences`
:class:`.CreateSequence`
:class:`.DropSequence`
"""
__visit_name__ = "sequence"
is_sequence = True
column: Optional[Column[Any]]
data_type: Optional[TypeEngine[int]]
def __init__(
self,
name: str,
start: Optional[int] = None,
increment: Optional[int] = None,
minvalue: Optional[int] = None,
maxvalue: Optional[int] = None,
nominvalue: Optional[bool] = None,
nomaxvalue: Optional[bool] = None,
cycle: Optional[bool] = None,
schema: Optional[Union[str, Literal[SchemaConst.BLANK_SCHEMA]]] = None,
cache: Optional[int] = None,
order: Optional[bool] = None,
data_type: Optional[_TypeEngineArgument[int]] = None,
optional: bool = False,
quote: Optional[bool] = None,
metadata: Optional[MetaData] = None,
quote_schema: Optional[bool] = None,
for_update: bool = False,
) -> None:
"""Construct a :class:`.Sequence` object.
:param name: the name of the sequence.
:param start: the starting index of the sequence. This value is
used when the CREATE SEQUENCE command is emitted to the database
as the value of the "START WITH" clause. If ``None``, the
clause is omitted, which on most platforms indicates a starting
value of 1.
.. versionchanged:: 2.0 The :paramref:`.Sequence.start` parameter
is required in order to have DDL emit "START WITH". This is a
reversal of a change made in version 1.4 which would implicitly
render "START WITH 1" if the :paramref:`.Sequence.start` were
not included. See :ref:`change_7211` for more detail.
:param increment: the increment value of the sequence. This
value is used when the CREATE SEQUENCE command is emitted to
the database as the value of the "INCREMENT BY" clause. If ``None``,
the clause is omitted, which on most platforms indicates an
increment of 1.
:param minvalue: the minimum value of the sequence. This
value is used when the CREATE SEQUENCE command is emitted to
the database as the value of the "MINVALUE" clause. If ``None``,
the clause is omitted, which on most platforms indicates a
minvalue of 1 and -2^63-1 for ascending and descending sequences,
respectively.
:param maxvalue: the maximum value of the sequence. This
value is used when the CREATE SEQUENCE command is emitted to
the database as the value of the "MAXVALUE" clause. If ``None``,
the clause is omitted, which on most platforms indicates a
maxvalue of 2^63-1 and -1 for ascending and descending sequences,
respectively.
:param nominvalue: no minimum value of the sequence. This
value is used when the CREATE SEQUENCE command is emitted to
the database as the value of the "NO MINVALUE" clause. If ``None``,
the clause is omitted, which on most platforms indicates a
minvalue of 1 and -2^63-1 for ascending and descending sequences,
respectively.
:param nomaxvalue: no maximum value of the sequence. This
value is used when the CREATE SEQUENCE command is emitted to
the database as the value of the "NO MAXVALUE" clause. If ``None``,
the clause is omitted, which on most platforms indicates a
maxvalue of 2^63-1 and -1 for ascending and descending sequences,
respectively.
:param cycle: allows the sequence to wrap around when the maxvalue
or minvalue has been reached by an ascending or descending sequence
respectively. This value is used when the CREATE SEQUENCE command
is emitted to the database as the "CYCLE" clause. If the limit is
reached, the next number generated will be the minvalue or maxvalue,
respectively. If cycle=False (the default) any calls to nextval
after the sequence has reached its maximum value will return an
error.
:param schema: optional schema name for the sequence, if located
in a schema other than the default. The rules for selecting the
schema name when a :class:`_schema.MetaData`
is also present are the same
as that of :paramref:`_schema.Table.schema`.
:param cache: optional integer value; number of future values in the
sequence which are calculated in advance. Renders the CACHE keyword
understood by Oracle and PostgreSQL.
:param order: optional boolean value; if ``True``, renders the
ORDER keyword, understood by Oracle, indicating the sequence is
definitively ordered. May be necessary to provide deterministic
ordering using Oracle RAC.
:param data_type: The type to be returned by the sequence, for
dialects that allow us to choose between INTEGER, BIGINT, etc.
(e.g., mssql).
.. versionadded:: 1.4.0
:param optional: boolean value, when ``True``, indicates that this
:class:`.Sequence` object only needs to be explicitly generated
on backends that don't provide another way to generate primary
key identifiers. Currently, it essentially means, "don't create
this sequence on the PostgreSQL backend, where the SERIAL keyword
creates a sequence for us automatically".
:param quote: boolean value, when ``True`` or ``False``, explicitly
forces quoting of the :paramref:`_schema.Sequence.name` on or off.
When left at its default of ``None``, normal quoting rules based
on casing and reserved words take place.
:param quote_schema: Set the quoting preferences for the ``schema``
name.
:param metadata: optional :class:`_schema.MetaData` object which this
:class:`.Sequence` will be associated with. A :class:`.Sequence`
that is associated with a :class:`_schema.MetaData`
gains the following
capabilities:
* The :class:`.Sequence` will inherit the
:paramref:`_schema.MetaData.schema`
parameter specified to the target :class:`_schema.MetaData`, which
affects the production of CREATE / DROP DDL, if any.
* The :meth:`.Sequence.create` and :meth:`.Sequence.drop` methods
automatically use the engine bound to the :class:`_schema.MetaData`
object, if any.
* The :meth:`_schema.MetaData.create_all` and
:meth:`_schema.MetaData.drop_all`
methods will emit CREATE / DROP for this :class:`.Sequence`,
even if the :class:`.Sequence` is not associated with any
:class:`_schema.Table` / :class:`_schema.Column`
that's a member of this
:class:`_schema.MetaData`.
The above behaviors can only occur if the :class:`.Sequence` is
explicitly associated with the :class:`_schema.MetaData`
via this parameter.
.. seealso::
:ref:`sequence_metadata` - full discussion of the
:paramref:`.Sequence.metadata` parameter.
:param for_update: Indicates this :class:`.Sequence`, when associated
with a :class:`_schema.Column`,
should be invoked for UPDATE statements
on that column's table, rather than for INSERT statements, when
no value is otherwise present for that column in the statement.
"""
DefaultGenerator.__init__(self, for_update=for_update)
IdentityOptions.__init__(
self,
start=start,
increment=increment,
minvalue=minvalue,
maxvalue=maxvalue,
nominvalue=nominvalue,
nomaxvalue=nomaxvalue,
cycle=cycle,
cache=cache,
order=order,
)
self.column = None
self.name = quoted_name(name, quote)
self.optional = optional
if schema is BLANK_SCHEMA:
self.schema = schema = None
elif metadata is not None and schema is None and metadata.schema:
self.schema = schema = metadata.schema
else:
self.schema = quoted_name.construct(schema, quote_schema)
self.metadata = metadata
self._key = _get_table_key(name, schema)
if metadata:
self._set_metadata(metadata)
if data_type is not None:
self.data_type = to_instance(data_type)
else:
self.data_type = None
@util.preload_module("sqlalchemy.sql.functions")
def next_value(self) -> Function[int]:
"""Return a :class:`.next_value` function element
which will render the appropriate increment function
for this :class:`.Sequence` within any SQL expression.
"""
return util.preloaded.sql_functions.func.next_value(self)
def _set_parent(self, parent: SchemaEventTarget, **kw: Any) -> None:
column = parent
assert isinstance(column, Column)
super()._set_parent(column)
column._on_table_attach(self._set_table)
def _copy(self) -> Sequence:
return Sequence(
name=self.name,
start=self.start,
increment=self.increment,
minvalue=self.minvalue,
maxvalue=self.maxvalue,
nominvalue=self.nominvalue,
nomaxvalue=self.nomaxvalue,
cycle=self.cycle,
schema=self.schema,
cache=self.cache,
order=self.order,
data_type=self.data_type,
optional=self.optional,
metadata=self.metadata,
for_update=self.for_update,
)
def _set_table(self, column: Column[Any], table: Table) -> None:
self._set_metadata(table.metadata)
def _set_metadata(self, metadata: MetaData) -> None:
self.metadata = metadata
self.metadata._sequences[self._key] = self
def create(self, bind: _CreateDropBind, checkfirst: bool = True) -> None:
"""Creates this sequence in the database."""
bind._run_ddl_visitor(ddl.SchemaGenerator, self, checkfirst=checkfirst)
def drop(self, bind: _CreateDropBind, checkfirst: bool = True) -> None:
"""Drops this sequence from the database."""
bind._run_ddl_visitor(ddl.SchemaDropper, self, checkfirst=checkfirst)
def _not_a_column_expr(self) -> NoReturn:
raise exc.InvalidRequestError(
"This %s cannot be used directly "
"as a column expression. Use func.next_value(sequence) "
"to produce a 'next value' function that's usable "
"as a column element." % self.__class__.__name__
)
@inspection._self_inspects
class FetchedValue(SchemaEventTarget):
"""A marker for a transparent database-side default.
Use :class:`.FetchedValue` when the database is configured
to provide some automatic default for a column.
E.g.::
Column('foo', Integer, FetchedValue())
Would indicate that some trigger or default generator
will create a new value for the ``foo`` column during an
INSERT.
.. seealso::
:ref:`triggered_columns`
"""
is_server_default = True
reflected = False
has_argument = False
is_clause_element = False
is_identity = False
column: Optional[Column[Any]]
def __init__(self, for_update: bool = False) -> None:
self.for_update = for_update
def _as_for_update(self, for_update: bool) -> FetchedValue:
if for_update == self.for_update:
return self
else:
return self._clone(for_update) # type: ignore
def _copy(self) -> FetchedValue:
return FetchedValue(self.for_update)
def _clone(self, for_update: bool) -> Self:
n = self.__class__.__new__(self.__class__)
n.__dict__.update(self.__dict__)
n.__dict__.pop("column", None)
n.for_update = for_update
return n
def _set_parent(self, parent: SchemaEventTarget, **kw: Any) -> None:
column = parent
assert isinstance(column, Column)
self.column = column
if self.for_update:
self.column.server_onupdate = self
else:
self.column.server_default = self
def __repr__(self) -> str:
return util.generic_repr(self)
class DefaultClause(FetchedValue):
"""A DDL-specified DEFAULT column value.
:class:`.DefaultClause` is a :class:`.FetchedValue`
that also generates a "DEFAULT" clause when
"CREATE TABLE" is emitted.
:class:`.DefaultClause` is generated automatically
whenever the ``server_default``, ``server_onupdate`` arguments of
:class:`_schema.Column` are used. A :class:`.DefaultClause`
can be passed positionally as well.
For example, the following::
Column('foo', Integer, server_default="50")
Is equivalent to::
Column('foo', Integer, DefaultClause("50"))
"""
has_argument = True
def __init__(
self,
arg: Union[str, ClauseElement, TextClause],
for_update: bool = False,
_reflected: bool = False,
) -> None:
util.assert_arg_type(arg, (str, ClauseElement, TextClause), "arg")
super().__init__(for_update)
self.arg = arg
self.reflected = _reflected
def _copy(self) -> DefaultClause:
return DefaultClause(
arg=self.arg, for_update=self.for_update, _reflected=self.reflected
)
def __repr__(self) -> str:
return "DefaultClause(%r, for_update=%r)" % (self.arg, self.for_update)
class Constraint(DialectKWArgs, HasConditionalDDL, SchemaItem):
"""A table-level SQL constraint.
:class:`_schema.Constraint` serves as the base class for the series of
constraint objects that can be associated with :class:`_schema.Table`
objects, including :class:`_schema.PrimaryKeyConstraint`,
:class:`_schema.ForeignKeyConstraint`
:class:`_schema.UniqueConstraint`, and
:class:`_schema.CheckConstraint`.
"""
__visit_name__ = "constraint"
_creation_order: int
_column_flag: bool
def __init__(
self,
name: _ConstraintNameArgument = None,
deferrable: Optional[bool] = None,
initially: Optional[str] = None,
info: Optional[_InfoType] = None,
comment: Optional[str] = None,
_create_rule: Optional[Any] = None,
_type_bound: bool = False,
**dialect_kw: Any,
) -> None:
r"""Create a SQL constraint.
:param name:
Optional, the in-database name of this ``Constraint``.
:param deferrable:
Optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when
issuing DDL for this constraint.
:param initially:
Optional string. If set, emit INITIALLY <value> when issuing DDL
for this constraint.
:param info: Optional data dictionary which will be populated into the
:attr:`.SchemaItem.info` attribute of this object.
:param comment: Optional string that will render an SQL comment on
foreign key constraint creation.
.. versionadded:: 2.0
:param \**dialect_kw: Additional keyword arguments are dialect
specific, and passed in the form ``<dialectname>_<argname>``. See
the documentation regarding an individual dialect at
:ref:`dialect_toplevel` for detail on documented arguments.
:param _create_rule:
used internally by some datatypes that also create constraints.
:param _type_bound:
used internally to indicate that this constraint is associated with
a specific datatype.
"""
self.name = name
self.deferrable = deferrable
self.initially = initially
if info:
self.info = info
self._create_rule = _create_rule
self._type_bound = _type_bound
util.set_creation_order(self)
self._validate_dialect_kwargs(dialect_kw)
self.comment = comment
def _should_create_for_compiler(
self, compiler: DDLCompiler, **kw: Any
) -> bool:
if self._create_rule is not None and not self._create_rule(compiler):
return False
elif self._ddl_if is not None:
return self._ddl_if._should_execute(
ddl.CreateConstraint(self), self, None, compiler=compiler, **kw
)
else:
return True
@property
def table(self) -> Table:
try:
if isinstance(self.parent, Table):
return self.parent
except AttributeError:
pass
raise exc.InvalidRequestError(
"This constraint is not bound to a table. Did you "
"mean to call table.append_constraint(constraint) ?"
)
def _set_parent(self, parent: SchemaEventTarget, **kw: Any) -> None:
assert isinstance(parent, (Table, Column))
self.parent = parent
parent.constraints.add(self)
@util.deprecated(
"1.4",
"The :meth:`_schema.Constraint.copy` method is deprecated "
"and will be removed in a future release.",
)
def copy(self, **kw: Any) -> Self:
return self._copy(**kw) # type: ignore
def _copy(self, **kw: Any) -> Self:
raise NotImplementedError()
class ColumnCollectionMixin:
"""A :class:`_expression.ColumnCollection` of :class:`_schema.Column`
objects.
This collection represents the columns which are referred to by
this object.
"""
_columns: DedupeColumnCollection[Column[Any]]
_allow_multiple_tables = False
_pending_colargs: List[Optional[Union[str, Column[Any]]]]
if TYPE_CHECKING:
def _set_parent_with_dispatch(
self, parent: SchemaEventTarget, **kw: Any
) -> None:
...
def __init__(
self,
*columns: _DDLColumnArgument,
_autoattach: bool = True,
_column_flag: bool = False,
_gather_expressions: Optional[
List[Union[str, ColumnElement[Any]]]
] = None,
) -> None:
self._column_flag = _column_flag
self._columns = DedupeColumnCollection()
processed_expressions: Optional[
List[Union[ColumnElement[Any], str]]
] = _gather_expressions
if processed_expressions is not None:
self._pending_colargs = []
for (
expr,
_,
_,
add_element,
) in coercions.expect_col_expression_collection(
roles.DDLConstraintColumnRole, columns
):
self._pending_colargs.append(add_element)
processed_expressions.append(expr)
else:
self._pending_colargs = [
coercions.expect(roles.DDLConstraintColumnRole, column)
for column in columns
]
if _autoattach and self._pending_colargs:
self._check_attach()
def _check_attach(self, evt: bool = False) -> None:
col_objs = [c for c in self._pending_colargs if isinstance(c, Column)]
cols_w_table = [c for c in col_objs if isinstance(c.table, Table)]
cols_wo_table = set(col_objs).difference(cols_w_table)
if cols_wo_table:
# feature #3341 - place event listeners for Column objects
# such that when all those cols are attached, we autoattach.
assert not evt, "Should not reach here on event call"
# issue #3411 - don't do the per-column auto-attach if some of the
# columns are specified as strings.
has_string_cols = {
c for c in self._pending_colargs if c is not None
}.difference(col_objs)
if not has_string_cols:
def _col_attached(column: Column[Any], table: Table) -> None:
# this isinstance() corresponds with the
# isinstance() above; only want to count Table-bound
# columns
if isinstance(table, Table):
cols_wo_table.discard(column)
if not cols_wo_table:
self._check_attach(evt=True)
self._cols_wo_table = cols_wo_table
for col in cols_wo_table:
col._on_table_attach(_col_attached)
return
columns = cols_w_table
tables = {c.table for c in columns}
if len(tables) == 1:
self._set_parent_with_dispatch(tables.pop())
elif len(tables) > 1 and not self._allow_multiple_tables:
table = columns[0].table
others = [c for c in columns[1:] if c.table is not table]
if others:
raise exc.ArgumentError(
"Column(s) %s are not part of table '%s'."
% (
", ".join("'%s'" % c for c in others),
table.description,
)
)
@util.ro_memoized_property
def columns(self) -> ReadOnlyColumnCollection[str, Column[Any]]:
return self._columns.as_readonly()
@util.ro_memoized_property
def c(self) -> ReadOnlyColumnCollection[str, Column[Any]]:
return self._columns.as_readonly()
def _col_expressions(
self, parent: Union[Table, Column[Any]]
) -> List[Optional[Column[Any]]]:
if isinstance(parent, Column):
result: List[Optional[Column[Any]]] = [
c for c in self._pending_colargs if isinstance(c, Column)
]
assert len(result) == len(self._pending_colargs)
return result
else:
try:
return [
parent.c[col] if isinstance(col, str) else col
for col in self._pending_colargs
]
except KeyError as ke:
raise exc.ConstraintColumnNotFoundError(
f"Can't create {self.__class__.__name__} "
f"on table '{parent.description}': no column "
f"named '{ke.args[0]}' is present."
) from ke
def _set_parent(self, parent: SchemaEventTarget, **kw: Any) -> None:
assert isinstance(parent, (Table, Column))
for col in self._col_expressions(parent):
if col is not None:
self._columns.add(col)
class ColumnCollectionConstraint(ColumnCollectionMixin, Constraint):
"""A constraint that proxies a ColumnCollection."""
def __init__(
self,
*columns: _DDLColumnArgument,
name: _ConstraintNameArgument = None,
deferrable: Optional[bool] = None,
initially: Optional[str] = None,
info: Optional[_InfoType] = None,
_autoattach: bool = True,
_column_flag: bool = False,
_gather_expressions: Optional[List[_DDLColumnArgument]] = None,
**dialect_kw: Any,
) -> None:
r"""
:param \*columns:
A sequence of column names or Column objects.
:param name:
Optional, the in-database name of this constraint.
:param deferrable:
Optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when
issuing DDL for this constraint.
:param initially:
Optional string. If set, emit INITIALLY <value> when issuing DDL
for this constraint.
:param \**dialect_kw: other keyword arguments including
dialect-specific arguments are propagated to the :class:`.Constraint`
superclass.
"""
Constraint.__init__(
self,
name=name,
deferrable=deferrable,
initially=initially,
info=info,
**dialect_kw,
)
ColumnCollectionMixin.__init__(
self, *columns, _autoattach=_autoattach, _column_flag=_column_flag
)
columns: ReadOnlyColumnCollection[str, Column[Any]]
"""A :class:`_expression.ColumnCollection` representing the set of columns
for this constraint.
"""
def _set_parent(self, parent: SchemaEventTarget, **kw: Any) -> None:
assert isinstance(parent, (Column, Table))
Constraint._set_parent(self, parent)
ColumnCollectionMixin._set_parent(self, parent)
def __contains__(self, x: Any) -> bool:
return x in self._columns
@util.deprecated(
"1.4",
"The :meth:`_schema.ColumnCollectionConstraint.copy` method "
"is deprecated and will be removed in a future release.",
)
def copy(
self,
*,
target_table: Optional[Table] = None,
**kw: Any,
) -> ColumnCollectionConstraint:
return self._copy(target_table=target_table, **kw)
def _copy(
self,
*,
target_table: Optional[Table] = None,
**kw: Any,
) -> ColumnCollectionConstraint:
# ticket #5276
constraint_kwargs = {}
for dialect_name in self.dialect_options:
dialect_options = self.dialect_options[dialect_name]._non_defaults
for (
dialect_option_key,
dialect_option_value,
) in dialect_options.items():
constraint_kwargs[
dialect_name + "_" + dialect_option_key
] = dialect_option_value
assert isinstance(self.parent, Table)
c = self.__class__(
name=self.name,
deferrable=self.deferrable,
initially=self.initially,
*[
_copy_expression(expr, self.parent, target_table)
for expr in self._columns
],
comment=self.comment,
**constraint_kwargs,
)
return self._schema_item_copy(c)
def contains_column(self, col: Column[Any]) -> bool:
"""Return True if this constraint contains the given column.
Note that this object also contains an attribute ``.columns``
which is a :class:`_expression.ColumnCollection` of
:class:`_schema.Column` objects.
"""
return self._columns.contains_column(col)
def __iter__(self) -> Iterator[Column[Any]]:
return iter(self._columns)
def __len__(self) -> int:
return len(self._columns)
class CheckConstraint(ColumnCollectionConstraint):
"""A table- or column-level CHECK constraint.
Can be included in the definition of a Table or Column.
"""
_allow_multiple_tables = True
__visit_name__ = "table_or_column_check_constraint"
@_document_text_coercion(
"sqltext",
":class:`.CheckConstraint`",
":paramref:`.CheckConstraint.sqltext`",
)
def __init__(
self,
sqltext: _TextCoercedExpressionArgument[Any],
name: _ConstraintNameArgument = None,
deferrable: Optional[bool] = None,
initially: Optional[str] = None,
table: Optional[Table] = None,
info: Optional[_InfoType] = None,
_create_rule: Optional[Any] = None,
_autoattach: bool = True,
_type_bound: bool = False,
**dialect_kw: Any,
) -> None:
r"""Construct a CHECK constraint.
:param sqltext:
A string containing the constraint definition, which will be used
verbatim, or a SQL expression construct. If given as a string,
the object is converted to a :func:`_expression.text` object.
If the textual
string includes a colon character, escape this using a backslash::
CheckConstraint(r"foo ~ E'a(?\:b|c)d")
:param name:
Optional, the in-database name of the constraint.
:param deferrable:
Optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when
issuing DDL for this constraint.
:param initially:
Optional string. If set, emit INITIALLY <value> when issuing DDL
for this constraint.
:param info: Optional data dictionary which will be populated into the
:attr:`.SchemaItem.info` attribute of this object.
"""
self.sqltext = coercions.expect(roles.DDLExpressionRole, sqltext)
columns: List[Column[Any]] = []
visitors.traverse(self.sqltext, {}, {"column": columns.append})
super().__init__(
name=name,
deferrable=deferrable,
initially=initially,
_create_rule=_create_rule,
info=info,
_type_bound=_type_bound,
_autoattach=_autoattach,
*columns,
**dialect_kw,
)
if table is not None:
self._set_parent_with_dispatch(table)
@property
def is_column_level(self) -> bool:
return not isinstance(self.parent, Table)
@util.deprecated(
"1.4",
"The :meth:`_schema.CheckConstraint.copy` method is deprecated "
"and will be removed in a future release.",
)
def copy(
self, *, target_table: Optional[Table] = None, **kw: Any
) -> CheckConstraint:
return self._copy(target_table=target_table, **kw)
def _copy(
self, *, target_table: Optional[Table] = None, **kw: Any
) -> CheckConstraint:
if target_table is not None:
# note that target_table is None for the copy process of
# a column-bound CheckConstraint, so this path is not reached
# in that case.
sqltext = _copy_expression(self.sqltext, self.table, target_table)
else:
sqltext = self.sqltext
c = CheckConstraint(
sqltext,
name=self.name,
initially=self.initially,
deferrable=self.deferrable,
_create_rule=self._create_rule,
table=target_table,
comment=self.comment,
_autoattach=False,
_type_bound=self._type_bound,
)
return self._schema_item_copy(c)
class ForeignKeyConstraint(ColumnCollectionConstraint):
"""A table-level FOREIGN KEY constraint.
Defines a single column or composite FOREIGN KEY ... REFERENCES
constraint. For a no-frills, single column foreign key, adding a
:class:`_schema.ForeignKey` to the definition of a :class:`_schema.Column`
is a
shorthand equivalent for an unnamed, single column
:class:`_schema.ForeignKeyConstraint`.
Examples of foreign key configuration are in :ref:`metadata_foreignkeys`.
"""
__visit_name__ = "foreign_key_constraint"
def __init__(
self,
columns: _typing_Sequence[_DDLColumnArgument],
refcolumns: _typing_Sequence[_DDLColumnArgument],
name: _ConstraintNameArgument = None,
onupdate: Optional[str] = None,
ondelete: Optional[str] = None,
deferrable: Optional[bool] = None,
initially: Optional[str] = None,
use_alter: bool = False,
link_to_name: bool = False,
match: Optional[str] = None,
table: Optional[Table] = None,
info: Optional[_InfoType] = None,
comment: Optional[str] = None,
**dialect_kw: Any,
) -> None:
r"""Construct a composite-capable FOREIGN KEY.
:param columns: A sequence of local column names. The named columns
must be defined and present in the parent Table. The names should
match the ``key`` given to each column (defaults to the name) unless
``link_to_name`` is True.
:param refcolumns: A sequence of foreign column names or Column
objects. The columns must all be located within the same Table.
:param name: Optional, the in-database name of the key.
:param onupdate: Optional string. If set, emit ON UPDATE <value> when
issuing DDL for this constraint. Typical values include CASCADE,
DELETE and RESTRICT.
:param ondelete: Optional string. If set, emit ON DELETE <value> when
issuing DDL for this constraint. Typical values include CASCADE,
DELETE and RESTRICT.
:param deferrable: Optional bool. If set, emit DEFERRABLE or NOT
DEFERRABLE when issuing DDL for this constraint.
:param initially: Optional string. If set, emit INITIALLY <value> when
issuing DDL for this constraint.
:param link_to_name: if True, the string name given in ``column`` is
the rendered name of the referenced column, not its locally assigned
``key``.
:param use_alter: If True, do not emit the DDL for this constraint as
part of the CREATE TABLE definition. Instead, generate it via an
ALTER TABLE statement issued after the full collection of tables
have been created, and drop it via an ALTER TABLE statement before
the full collection of tables are dropped.
The use of :paramref:`_schema.ForeignKeyConstraint.use_alter` is
particularly geared towards the case where two or more tables
are established within a mutually-dependent foreign key constraint
relationship; however, the :meth:`_schema.MetaData.create_all` and
:meth:`_schema.MetaData.drop_all`
methods will perform this resolution
automatically, so the flag is normally not needed.
.. seealso::
:ref:`use_alter`
:param match: Optional string. If set, emit MATCH <value> when issuing
DDL for this constraint. Typical values include SIMPLE, PARTIAL
and FULL.
:param info: Optional data dictionary which will be populated into the
:attr:`.SchemaItem.info` attribute of this object.
:param comment: Optional string that will render an SQL comment on
foreign key constraint creation.
.. versionadded:: 2.0
:param \**dialect_kw: Additional keyword arguments are dialect
specific, and passed in the form ``<dialectname>_<argname>``. See
the documentation regarding an individual dialect at
:ref:`dialect_toplevel` for detail on documented arguments.
"""
Constraint.__init__(
self,
name=name,
deferrable=deferrable,
initially=initially,
info=info,
comment=comment,
**dialect_kw,
)
self.onupdate = onupdate
self.ondelete = ondelete
self.link_to_name = link_to_name
self.use_alter = use_alter
self.match = match
if len(set(columns)) != len(refcolumns):
if len(set(columns)) != len(columns):
# e.g. FOREIGN KEY (a, a) REFERENCES r (b, c)
raise exc.ArgumentError(
"ForeignKeyConstraint with duplicate source column "
"references are not supported."
)
else:
# e.g. FOREIGN KEY (a) REFERENCES r (b, c)
# paraphrasing
# https://www.postgresql.org/docs/current/static/ddl-constraints.html
raise exc.ArgumentError(
"ForeignKeyConstraint number "
"of constrained columns must match the number of "
"referenced columns."
)
# standalone ForeignKeyConstraint - create
# associated ForeignKey objects which will be applied to hosted
# Column objects (in col.foreign_keys), either now or when attached
# to the Table for string-specified names
self.elements = [
ForeignKey(
refcol,
_constraint=self,
name=self.name,
onupdate=self.onupdate,
ondelete=self.ondelete,
use_alter=self.use_alter,
link_to_name=self.link_to_name,
match=self.match,
deferrable=self.deferrable,
initially=self.initially,
**self.dialect_kwargs,
)
for refcol in refcolumns
]
ColumnCollectionMixin.__init__(self, *columns)
if table is not None:
if hasattr(self, "parent"):
assert table is self.parent
self._set_parent_with_dispatch(table)
def _append_element(self, column: Column[Any], fk: ForeignKey) -> None:
self._columns.add(column)
self.elements.append(fk)
columns: ReadOnlyColumnCollection[str, Column[Any]]
"""A :class:`_expression.ColumnCollection` representing the set of columns
for this constraint.
"""
elements: List[ForeignKey]
"""A sequence of :class:`_schema.ForeignKey` objects.
Each :class:`_schema.ForeignKey`
represents a single referring column/referred
column pair.
This collection is intended to be read-only.
"""
@property
def _elements(self) -> util.OrderedDict[str, ForeignKey]:
# legacy - provide a dictionary view of (column_key, fk)
return util.OrderedDict(zip(self.column_keys, self.elements))
@property
def _referred_schema(self) -> Optional[str]:
for elem in self.elements:
return elem._referred_schema
else:
return None
@property
def referred_table(self) -> Table:
"""The :class:`_schema.Table` object to which this
:class:`_schema.ForeignKeyConstraint` references.
This is a dynamically calculated attribute which may not be available
if the constraint and/or parent table is not yet associated with
a metadata collection that contains the referred table.
"""
return self.elements[0].column.table
def _validate_dest_table(self, table: Table) -> None:
table_keys = {elem._table_key() for elem in self.elements}
if None not in table_keys and len(table_keys) > 1:
elem0, elem1 = sorted(table_keys)[0:2]
raise exc.ArgumentError(
"ForeignKeyConstraint on %s(%s) refers to "
"multiple remote tables: %s and %s"
% (table.fullname, self._col_description, elem0, elem1)
)
@property
def column_keys(self) -> _typing_Sequence[str]:
"""Return a list of string keys representing the local
columns in this :class:`_schema.ForeignKeyConstraint`.
This list is either the original string arguments sent
to the constructor of the :class:`_schema.ForeignKeyConstraint`,
or if the constraint has been initialized with :class:`_schema.Column`
objects, is the string ``.key`` of each element.
"""
if hasattr(self, "parent"):
return self._columns.keys()
else:
return [
col.key if isinstance(col, ColumnElement) else str(col)
for col in self._pending_colargs
]
@property
def _col_description(self) -> str:
return ", ".join(self.column_keys)
def _set_parent(self, parent: SchemaEventTarget, **kw: Any) -> None:
table = parent
assert isinstance(table, Table)
Constraint._set_parent(self, table)
ColumnCollectionConstraint._set_parent(self, table)
for col, fk in zip(self._columns, self.elements):
if not hasattr(fk, "parent") or fk.parent is not col:
fk._set_parent_with_dispatch(col)
self._validate_dest_table(table)
@util.deprecated(
"1.4",
"The :meth:`_schema.ForeignKeyConstraint.copy` method is deprecated "
"and will be removed in a future release.",
)
def copy(
self,
*,
schema: Optional[str] = None,
target_table: Optional[Table] = None,
**kw: Any,
) -> ForeignKeyConstraint:
return self._copy(schema=schema, target_table=target_table, **kw)
def _copy(
self,
*,
schema: Optional[str] = None,
target_table: Optional[Table] = None,
**kw: Any,
) -> ForeignKeyConstraint:
fkc = ForeignKeyConstraint(
[x.parent.key for x in self.elements],
[
x._get_colspec(
schema=schema,
table_name=target_table.name
if target_table is not None
and x._table_key() == x.parent.table.key
else None,
_is_copy=True,
)
for x in self.elements
],
name=self.name,
onupdate=self.onupdate,
ondelete=self.ondelete,
use_alter=self.use_alter,
deferrable=self.deferrable,
initially=self.initially,
link_to_name=self.link_to_name,
match=self.match,
comment=self.comment,
)
for self_fk, other_fk in zip(self.elements, fkc.elements):
self_fk._schema_item_copy(other_fk)
return self._schema_item_copy(fkc)
class PrimaryKeyConstraint(ColumnCollectionConstraint):
"""A table-level PRIMARY KEY constraint.
The :class:`.PrimaryKeyConstraint` object is present automatically
on any :class:`_schema.Table` object; it is assigned a set of
:class:`_schema.Column` objects corresponding to those marked with
the :paramref:`_schema.Column.primary_key` flag::
>>> my_table = Table('mytable', metadata,
... Column('id', Integer, primary_key=True),
... Column('version_id', Integer, primary_key=True),
... Column('data', String(50))
... )
>>> my_table.primary_key
PrimaryKeyConstraint(
Column('id', Integer(), table=<mytable>,
primary_key=True, nullable=False),
Column('version_id', Integer(), table=<mytable>,
primary_key=True, nullable=False)
)
The primary key of a :class:`_schema.Table` can also be specified by using
a :class:`.PrimaryKeyConstraint` object explicitly; in this mode of usage,
the "name" of the constraint can also be specified, as well as other
options which may be recognized by dialects::
my_table = Table('mytable', metadata,
Column('id', Integer),
Column('version_id', Integer),
Column('data', String(50)),
PrimaryKeyConstraint('id', 'version_id',
name='mytable_pk')
)
The two styles of column-specification should generally not be mixed.
An warning is emitted if the columns present in the
:class:`.PrimaryKeyConstraint`
don't match the columns that were marked as ``primary_key=True``, if both
are present; in this case, the columns are taken strictly from the
:class:`.PrimaryKeyConstraint` declaration, and those columns otherwise
marked as ``primary_key=True`` are ignored. This behavior is intended to
be backwards compatible with previous behavior.
For the use case where specific options are to be specified on the
:class:`.PrimaryKeyConstraint`, but the usual style of using
``primary_key=True`` flags is still desirable, an empty
:class:`.PrimaryKeyConstraint` may be specified, which will take on the
primary key column collection from the :class:`_schema.Table` based on the
flags::
my_table = Table('mytable', metadata,
Column('id', Integer, primary_key=True),
Column('version_id', Integer, primary_key=True),
Column('data', String(50)),
PrimaryKeyConstraint(name='mytable_pk',
mssql_clustered=True)
)
"""
__visit_name__ = "primary_key_constraint"
def __init__(
self,
*columns: _DDLColumnArgument,
name: Optional[str] = None,
deferrable: Optional[bool] = None,
initially: Optional[str] = None,
info: Optional[_InfoType] = None,
_implicit_generated: bool = False,
**dialect_kw: Any,
) -> None:
self._implicit_generated = _implicit_generated
super().__init__(
*columns,
name=name,
deferrable=deferrable,
initially=initially,
info=info,
**dialect_kw,
)
def _set_parent(self, parent: SchemaEventTarget, **kw: Any) -> None:
table = parent
assert isinstance(table, Table)
super()._set_parent(table)
if table.primary_key is not self:
table.constraints.discard(table.primary_key)
table.primary_key = self # type: ignore
table.constraints.add(self)
table_pks = [c for c in table.c if c.primary_key]
if (
self._columns
and table_pks
and set(table_pks) != set(self._columns)
):
util.warn(
"Table '%s' specifies columns %s as primary_key=True, "
"not matching locally specified columns %s; setting the "
"current primary key columns to %s. This warning "
"may become an exception in a future release"
% (
table.name,
", ".join("'%s'" % c.name for c in table_pks),
", ".join("'%s'" % c.name for c in self._columns),
", ".join("'%s'" % c.name for c in self._columns),
)
)
table_pks[:] = []
for c in self._columns:
c.primary_key = True
if c._user_defined_nullable is NULL_UNSPECIFIED:
c.nullable = False
if table_pks:
self._columns.extend(table_pks)
def _reload(self, columns: Iterable[Column[Any]]) -> None:
"""repopulate this :class:`.PrimaryKeyConstraint` given
a set of columns.
Existing columns in the table that are marked as primary_key=True
are maintained.
Also fires a new event.
This is basically like putting a whole new
:class:`.PrimaryKeyConstraint` object on the parent
:class:`_schema.Table` object without actually replacing the object.
The ordering of the given list of columns is also maintained; these
columns will be appended to the list of columns after any which
are already present.
"""
# set the primary key flag on new columns.
# note any existing PK cols on the table also have their
# flag still set.
for col in columns:
col.primary_key = True
self._columns.extend(columns)
PrimaryKeyConstraint._autoincrement_column._reset(self) # type: ignore
self._set_parent_with_dispatch(self.table)
def _replace(self, col: Column[Any]) -> None:
PrimaryKeyConstraint._autoincrement_column._reset(self) # type: ignore
self._columns.replace(col)
self.dispatch._sa_event_column_added_to_pk_constraint(self, col)
@property
def columns_autoinc_first(self) -> List[Column[Any]]:
autoinc = self._autoincrement_column
if autoinc is not None:
return [autoinc] + [c for c in self._columns if c is not autoinc]
else:
return list(self._columns)
@util.ro_memoized_property
def _autoincrement_column(self) -> Optional[Column[int]]:
def _validate_autoinc(col: Column[Any], autoinc_true: bool) -> bool:
if col.type._type_affinity is None or not issubclass(
col.type._type_affinity,
(
type_api.INTEGERTYPE._type_affinity,
type_api.NUMERICTYPE._type_affinity,
),
):
if autoinc_true:
raise exc.ArgumentError(
"Column type %s on column '%s' is not "
"compatible with autoincrement=True" % (col.type, col)
)
else:
return False
elif (
not isinstance(col.default, (type(None), Sequence))
and not autoinc_true
):
return False
elif (
col.server_default is not None
and not isinstance(col.server_default, Identity)
and not autoinc_true
):
return False
elif col.foreign_keys and col.autoincrement not in (
True,
"ignore_fk",
):
return False
return True
if len(self._columns) == 1:
col = list(self._columns)[0]
if col.autoincrement is True:
_validate_autoinc(col, True)
return col
elif col.autoincrement in (
"auto",
"ignore_fk",
) and _validate_autoinc(col, False):
return col
else:
return None
else:
autoinc = None
for col in self._columns:
if col.autoincrement is True:
_validate_autoinc(col, True)
if autoinc is not None:
raise exc.ArgumentError(
"Only one Column may be marked "
"autoincrement=True, found both %s and %s."
% (col.name, autoinc.name)
)
else:
autoinc = col
return autoinc
class UniqueConstraint(ColumnCollectionConstraint):
"""A table-level UNIQUE constraint.
Defines a single column or composite UNIQUE constraint. For a no-frills,
single column constraint, adding ``unique=True`` to the ``Column``
definition is a shorthand equivalent for an unnamed, single column
UniqueConstraint.
"""
__visit_name__ = "unique_constraint"
class Index(
DialectKWArgs, ColumnCollectionMixin, HasConditionalDDL, SchemaItem
):
"""A table-level INDEX.
Defines a composite (one or more column) INDEX.
E.g.::
sometable = Table("sometable", metadata,
Column("name", String(50)),
Column("address", String(100))
)
Index("some_index", sometable.c.name)
For a no-frills, single column index, adding
:class:`_schema.Column` also supports ``index=True``::
sometable = Table("sometable", metadata,
Column("name", String(50), index=True)
)
For a composite index, multiple columns can be specified::
Index("some_index", sometable.c.name, sometable.c.address)
Functional indexes are supported as well, typically by using the
:data:`.func` construct in conjunction with table-bound
:class:`_schema.Column` objects::
Index("some_index", func.lower(sometable.c.name))
An :class:`.Index` can also be manually associated with a
:class:`_schema.Table`,
either through inline declaration or using
:meth:`_schema.Table.append_constraint`. When this approach is used,
the names
of the indexed columns can be specified as strings::
Table("sometable", metadata,
Column("name", String(50)),
Column("address", String(100)),
Index("some_index", "name", "address")
)
To support functional or expression-based indexes in this form, the
:func:`_expression.text` construct may be used::
from sqlalchemy import text
Table("sometable", metadata,
Column("name", String(50)),
Column("address", String(100)),
Index("some_index", text("lower(name)"))
)
.. seealso::
:ref:`schema_indexes` - General information on :class:`.Index`.
:ref:`postgresql_indexes` - PostgreSQL-specific options available for
the :class:`.Index` construct.
:ref:`mysql_indexes` - MySQL-specific options available for the
:class:`.Index` construct.
:ref:`mssql_indexes` - MSSQL-specific options available for the
:class:`.Index` construct.
"""
__visit_name__ = "index"
table: Optional[Table]
expressions: _typing_Sequence[Union[str, ColumnElement[Any]]]
_table_bound_expressions: _typing_Sequence[ColumnElement[Any]]
def __init__(
self,
name: Optional[str],
*expressions: _DDLColumnArgument,
unique: bool = False,
quote: Optional[bool] = None,
info: Optional[_InfoType] = None,
_table: Optional[Table] = None,
_column_flag: bool = False,
**dialect_kw: Any,
) -> None:
r"""Construct an index object.
:param name:
The name of the index
:param \*expressions:
Column expressions to include in the index. The expressions
are normally instances of :class:`_schema.Column`, but may also
be arbitrary SQL expressions which ultimately refer to a
:class:`_schema.Column`.
:param unique=False:
Keyword only argument; if True, create a unique index.
:param quote=None:
Keyword only argument; whether to apply quoting to the name of
the index. Works in the same manner as that of
:paramref:`_schema.Column.quote`.
:param info=None: Optional data dictionary which will be populated
into the :attr:`.SchemaItem.info` attribute of this object.
:param \**dialect_kw: Additional keyword arguments not mentioned above
are dialect specific, and passed in the form
``<dialectname>_<argname>``. See the documentation regarding an
individual dialect at :ref:`dialect_toplevel` for detail on
documented arguments.
"""
self.table = table = None
self.name = quoted_name.construct(name, quote)
self.unique = unique
if info is not None:
self.info = info
# TODO: consider "table" argument being public, but for
# the purpose of the fix here, it starts as private.
if _table is not None:
table = _table
self._validate_dialect_kwargs(dialect_kw)
self.expressions = []
# will call _set_parent() if table-bound column
# objects are present
ColumnCollectionMixin.__init__(
self,
*expressions,
_column_flag=_column_flag,
_gather_expressions=self.expressions,
)
if table is not None:
self._set_parent(table)
def _set_parent(self, parent: SchemaEventTarget, **kw: Any) -> None:
table = parent
assert isinstance(table, Table)
ColumnCollectionMixin._set_parent(self, table)
if self.table is not None and table is not self.table:
raise exc.ArgumentError(
"Index '%s' is against table '%s', and "
"cannot be associated with table '%s'."
% (self.name, self.table.description, table.description)
)
self.table = table
table.indexes.add(self)
expressions = self.expressions
col_expressions = self._col_expressions(table)
assert len(expressions) == len(col_expressions)
exprs = []
for expr, colexpr in zip(expressions, col_expressions):
if isinstance(expr, ClauseElement):
exprs.append(expr)
elif colexpr is not None:
exprs.append(colexpr)
else:
assert False
self.expressions = self._table_bound_expressions = exprs
def create(self, bind: _CreateDropBind, checkfirst: bool = False) -> None:
"""Issue a ``CREATE`` statement for this
:class:`.Index`, using the given
:class:`.Connection` or :class:`.Engine`` for connectivity.
.. seealso::
:meth:`_schema.MetaData.create_all`.
"""
bind._run_ddl_visitor(ddl.SchemaGenerator, self, checkfirst=checkfirst)
def drop(self, bind: _CreateDropBind, checkfirst: bool = False) -> None:
"""Issue a ``DROP`` statement for this
:class:`.Index`, using the given
:class:`.Connection` or :class:`.Engine` for connectivity.
.. seealso::
:meth:`_schema.MetaData.drop_all`.
"""
bind._run_ddl_visitor(ddl.SchemaDropper, self, checkfirst=checkfirst)
def __repr__(self) -> str:
exprs: _typing_Sequence[Any] # noqa: F842
return "Index(%s)" % (
", ".join(
[repr(self.name)]
+ [repr(e) for e in self.expressions]
+ (self.unique and ["unique=True"] or [])
)
)
_AllConstraints = Union[
Index,
UniqueConstraint,
CheckConstraint,
ForeignKeyConstraint,
PrimaryKeyConstraint,
]
_NamingSchemaCallable = Callable[[_AllConstraints, Table], str]
class _NamingSchemaTD(TypedDict, total=False):
fk: Union[str, _NamingSchemaCallable]
pk: Union[str, _NamingSchemaCallable]
ix: Union[str, _NamingSchemaCallable]
ck: Union[str, _NamingSchemaCallable]
uq: Union[str, _NamingSchemaCallable]
_NamingSchemaParameter = Union[
_NamingSchemaTD,
Mapping[
Union[Type[_AllConstraints], str], Union[str, _NamingSchemaCallable]
],
]
DEFAULT_NAMING_CONVENTION: _NamingSchemaParameter = util.immutabledict(
{"ix": "ix_%(column_0_label)s"} # type: ignore[arg-type]
)
class MetaData(HasSchemaAttr):
"""A collection of :class:`_schema.Table`
objects and their associated schema
constructs.
Holds a collection of :class:`_schema.Table` objects as well as
an optional binding to an :class:`_engine.Engine` or
:class:`_engine.Connection`. If bound, the :class:`_schema.Table` objects
in the collection and their columns may participate in implicit SQL
execution.
The :class:`_schema.Table` objects themselves are stored in the
:attr:`_schema.MetaData.tables` dictionary.
:class:`_schema.MetaData` is a thread-safe object for read operations.
Construction of new tables within a single :class:`_schema.MetaData`
object,
either explicitly or via reflection, may not be completely thread-safe.
.. seealso::
:ref:`metadata_describing` - Introduction to database metadata
"""
__visit_name__ = "metadata"
def __init__(
self,
schema: Optional[str] = None,
quote_schema: Optional[bool] = None,
naming_convention: Optional[_NamingSchemaParameter] = None,
info: Optional[_InfoType] = None,
) -> None:
"""Create a new MetaData object.
:param schema:
The default schema to use for the :class:`_schema.Table`,
:class:`.Sequence`, and potentially other objects associated with
this :class:`_schema.MetaData`. Defaults to ``None``.
.. seealso::
:ref:`schema_metadata_schema_name` - details on how the
:paramref:`_schema.MetaData.schema` parameter is used.
:paramref:`_schema.Table.schema`
:paramref:`.Sequence.schema`
:param quote_schema:
Sets the ``quote_schema`` flag for those :class:`_schema.Table`,
:class:`.Sequence`, and other objects which make usage of the
local ``schema`` name.
:param info: Optional data dictionary which will be populated into the
:attr:`.SchemaItem.info` attribute of this object.
:param naming_convention: a dictionary referring to values which
will establish default naming conventions for :class:`.Constraint`
and :class:`.Index` objects, for those objects which are not given
a name explicitly.
The keys of this dictionary may be:
* a constraint or Index class, e.g. the :class:`.UniqueConstraint`,
:class:`_schema.ForeignKeyConstraint` class, the :class:`.Index`
class
* a string mnemonic for one of the known constraint classes;
``"fk"``, ``"pk"``, ``"ix"``, ``"ck"``, ``"uq"`` for foreign key,
primary key, index, check, and unique constraint, respectively.
* the string name of a user-defined "token" that can be used
to define new naming tokens.
The values associated with each "constraint class" or "constraint
mnemonic" key are string naming templates, such as
``"uq_%(table_name)s_%(column_0_name)s"``,
which describe how the name should be composed. The values
associated with user-defined "token" keys should be callables of the
form ``fn(constraint, table)``, which accepts the constraint/index
object and :class:`_schema.Table` as arguments, returning a string
result.
The built-in names are as follows, some of which may only be
available for certain types of constraint:
* ``%(table_name)s`` - the name of the :class:`_schema.Table`
object
associated with the constraint.
* ``%(referred_table_name)s`` - the name of the
:class:`_schema.Table`
object associated with the referencing target of a
:class:`_schema.ForeignKeyConstraint`.
* ``%(column_0_name)s`` - the name of the :class:`_schema.Column`
at
index position "0" within the constraint.
* ``%(column_0N_name)s`` - the name of all :class:`_schema.Column`
objects in order within the constraint, joined without a
separator.
* ``%(column_0_N_name)s`` - the name of all
:class:`_schema.Column`
objects in order within the constraint, joined with an
underscore as a separator.
* ``%(column_0_label)s``, ``%(column_0N_label)s``,
``%(column_0_N_label)s`` - the label of either the zeroth
:class:`_schema.Column` or all :class:`.Columns`, separated with
or without an underscore
* ``%(column_0_key)s``, ``%(column_0N_key)s``,
``%(column_0_N_key)s`` - the key of either the zeroth
:class:`_schema.Column` or all :class:`.Columns`, separated with
or without an underscore
* ``%(referred_column_0_name)s``, ``%(referred_column_0N_name)s``
``%(referred_column_0_N_name)s``, ``%(referred_column_0_key)s``,
``%(referred_column_0N_key)s``, ... column tokens which
render the names/keys/labels of columns that are referenced
by a :class:`_schema.ForeignKeyConstraint`.
* ``%(constraint_name)s`` - a special key that refers to the
existing name given to the constraint. When this key is
present, the :class:`.Constraint` object's existing name will be
replaced with one that is composed from template string that
uses this token. When this token is present, it is required that
the :class:`.Constraint` is given an explicit name ahead of time.
* user-defined: any additional token may be implemented by passing
it along with a ``fn(constraint, table)`` callable to the
naming_convention dictionary.
.. versionadded:: 1.3.0 - added new ``%(column_0N_name)s``,
``%(column_0_N_name)s``, and related tokens that produce
concatenations of names, keys, or labels for all columns referred
to by a given constraint.
.. seealso::
:ref:`constraint_naming_conventions` - for detailed usage
examples.
"""
if schema is not None and not isinstance(schema, str):
raise exc.ArgumentError(
"expected schema argument to be a string, "
f"got {type(schema)}."
)
self.tables = util.FacadeDict()
self.schema = quoted_name.construct(schema, quote_schema)
self.naming_convention = (
naming_convention
if naming_convention
else DEFAULT_NAMING_CONVENTION
)
if info:
self.info = info
self._schemas: Set[str] = set()
self._sequences: Dict[str, Sequence] = {}
self._fk_memos: Dict[
Tuple[str, Optional[str]], List[ForeignKey]
] = collections.defaultdict(list)
tables: util.FacadeDict[str, Table]
"""A dictionary of :class:`_schema.Table`
objects keyed to their name or "table key".
The exact key is that determined by the :attr:`_schema.Table.key`
attribute;
for a table with no :attr:`_schema.Table.schema` attribute,
this is the same
as :attr:`_schema.Table.name`. For a table with a schema,
it is typically of the
form ``schemaname.tablename``.
.. seealso::
:attr:`_schema.MetaData.sorted_tables`
"""
def __repr__(self) -> str:
return "MetaData()"
def __contains__(self, table_or_key: Union[str, Table]) -> bool:
if not isinstance(table_or_key, str):
table_or_key = table_or_key.key
return table_or_key in self.tables
def _add_table(
self, name: str, schema: Optional[str], table: Table
) -> None:
key = _get_table_key(name, schema)
self.tables._insert_item(key, table)
if schema:
self._schemas.add(schema)
def _remove_table(self, name: str, schema: Optional[str]) -> None:
key = _get_table_key(name, schema)
removed = dict.pop(self.tables, key, None) # type: ignore
if removed is not None:
for fk in removed.foreign_keys:
fk._remove_from_metadata(self)
if self._schemas:
self._schemas = {
t.schema for t in self.tables.values() if t.schema is not None
}
def __getstate__(self) -> Dict[str, Any]:
return {
"tables": self.tables,
"schema": self.schema,
"schemas": self._schemas,
"sequences": self._sequences,
"fk_memos": self._fk_memos,
"naming_convention": self.naming_convention,
}
def __setstate__(self, state: Dict[str, Any]) -> None:
self.tables = state["tables"]
self.schema = state["schema"]
self.naming_convention = state["naming_convention"]
self._sequences = state["sequences"]
self._schemas = state["schemas"]
self._fk_memos = state["fk_memos"]
def clear(self) -> None:
"""Clear all Table objects from this MetaData."""
dict.clear(self.tables) # type: ignore
self._schemas.clear()
self._fk_memos.clear()
def remove(self, table: Table) -> None:
"""Remove the given Table object from this MetaData."""
self._remove_table(table.name, table.schema)
@property
def sorted_tables(self) -> List[Table]:
"""Returns a list of :class:`_schema.Table` objects sorted in order of
foreign key dependency.
The sorting will place :class:`_schema.Table`
objects that have dependencies
first, before the dependencies themselves, representing the
order in which they can be created. To get the order in which
the tables would be dropped, use the ``reversed()`` Python built-in.
.. warning::
The :attr:`.MetaData.sorted_tables` attribute cannot by itself
accommodate automatic resolution of dependency cycles between
tables, which are usually caused by mutually dependent foreign key
constraints. When these cycles are detected, the foreign keys
of these tables are omitted from consideration in the sort.
A warning is emitted when this condition occurs, which will be an
exception raise in a future release. Tables which are not part
of the cycle will still be returned in dependency order.
To resolve these cycles, the
:paramref:`_schema.ForeignKeyConstraint.use_alter` parameter may be
applied to those constraints which create a cycle. Alternatively,
the :func:`_schema.sort_tables_and_constraints` function will
automatically return foreign key constraints in a separate
collection when cycles are detected so that they may be applied
to a schema separately.
.. versionchanged:: 1.3.17 - a warning is emitted when
:attr:`.MetaData.sorted_tables` cannot perform a proper sort
due to cyclical dependencies. This will be an exception in a
future release. Additionally, the sort will continue to return
other tables not involved in the cycle in dependency order which
was not the case previously.
.. seealso::
:func:`_schema.sort_tables`
:func:`_schema.sort_tables_and_constraints`
:attr:`_schema.MetaData.tables`
:meth:`_reflection.Inspector.get_table_names`
:meth:`_reflection.Inspector.get_sorted_table_and_fkc_names`
"""
return ddl.sort_tables(
sorted(self.tables.values(), key=lambda t: t.key) # type: ignore
)
@util.preload_module("sqlalchemy.engine.reflection")
def reflect(
self,
bind: Union[Engine, Connection],
schema: Optional[str] = None,
views: bool = False,
only: Optional[_typing_Sequence[str]] = None,
extend_existing: bool = False,
autoload_replace: bool = True,
resolve_fks: bool = True,
**dialect_kwargs: Any,
) -> None:
r"""Load all available table definitions from the database.
Automatically creates ``Table`` entries in this ``MetaData`` for any
table available in the database but not yet present in the
``MetaData``. May be called multiple times to pick up tables recently
added to the database, however no special action is taken if a table
in this ``MetaData`` no longer exists in the database.
:param bind:
A :class:`.Connection` or :class:`.Engine` used to access the
database.
:param schema:
Optional, query and reflect tables from an alternate schema.
If None, the schema associated with this :class:`_schema.MetaData`
is used, if any.
:param views:
If True, also reflect views (materialized and plain).
:param only:
Optional. Load only a sub-set of available named tables. May be
specified as a sequence of names or a callable.
If a sequence of names is provided, only those tables will be
reflected. An error is raised if a table is requested but not
available. Named tables already present in this ``MetaData`` are
ignored.
If a callable is provided, it will be used as a boolean predicate to
filter the list of potential table names. The callable is called
with a table name and this ``MetaData`` instance as positional
arguments and should return a true value for any table to reflect.
:param extend_existing: Passed along to each :class:`_schema.Table` as
:paramref:`_schema.Table.extend_existing`.
:param autoload_replace: Passed along to each :class:`_schema.Table`
as
:paramref:`_schema.Table.autoload_replace`.
:param resolve_fks: if True, reflect :class:`_schema.Table`
objects linked
to :class:`_schema.ForeignKey` objects located in each
:class:`_schema.Table`.
For :meth:`_schema.MetaData.reflect`,
this has the effect of reflecting
related tables that might otherwise not be in the list of tables
being reflected, for example if the referenced table is in a
different schema or is omitted via the
:paramref:`.MetaData.reflect.only` parameter. When False,
:class:`_schema.ForeignKey` objects are not followed to the
:class:`_schema.Table`
in which they link, however if the related table is also part of the
list of tables that would be reflected in any case, the
:class:`_schema.ForeignKey` object will still resolve to its related
:class:`_schema.Table` after the :meth:`_schema.MetaData.reflect`
operation is
complete. Defaults to True.
.. versionadded:: 1.3.0
.. seealso::
:paramref:`_schema.Table.resolve_fks`
:param \**dialect_kwargs: Additional keyword arguments not mentioned
above are dialect specific, and passed in the form
``<dialectname>_<argname>``. See the documentation regarding an
individual dialect at :ref:`dialect_toplevel` for detail on
documented arguments.
.. seealso::
:ref:`metadata_reflection_toplevel`
:meth:`_events.DDLEvents.column_reflect` - Event used to customize
the reflected columns. Usually used to generalize the types using
:meth:`_types.TypeEngine.as_generic`
:ref:`metadata_reflection_dbagnostic_types` - describes how to
reflect tables using general types.
"""
with inspection.inspect(bind)._inspection_context() as insp:
reflect_opts: Any = {
"autoload_with": insp,
"extend_existing": extend_existing,
"autoload_replace": autoload_replace,
"resolve_fks": resolve_fks,
"_extend_on": set(),
}
reflect_opts.update(dialect_kwargs)
if schema is None:
schema = self.schema
if schema is not None:
reflect_opts["schema"] = schema
kind = util.preloaded.engine_reflection.ObjectKind.TABLE
available: util.OrderedSet[str] = util.OrderedSet(
insp.get_table_names(schema)
)
if views:
kind = util.preloaded.engine_reflection.ObjectKind.ANY
available.update(insp.get_view_names(schema))
try:
available.update(insp.get_materialized_view_names(schema))
except NotImplementedError:
pass
if schema is not None:
available_w_schema: util.OrderedSet[str] = util.OrderedSet(
[f"{schema}.{name}" for name in available]
)
else:
available_w_schema = available
current = set(self.tables)
if only is None:
load = [
name
for name, schname in zip(available, available_w_schema)
if extend_existing or schname not in current
]
elif callable(only):
load = [
name
for name, schname in zip(available, available_w_schema)
if (extend_existing or schname not in current)
and only(name, self)
]
else:
missing = [name for name in only if name not in available]
if missing:
s = schema and (" schema '%s'" % schema) or ""
raise exc.InvalidRequestError(
"Could not reflect: requested table(s) not available "
"in %r%s: (%s)" % (bind.engine, s, ", ".join(missing))
)
load = [
name
for name in only
if extend_existing or name not in current
]
# pass the available tables so the inspector can
# choose to ignore the filter_names
_reflect_info = insp._get_reflection_info(
schema=schema,
filter_names=load,
available=available,
kind=kind,
scope=util.preloaded.engine_reflection.ObjectScope.ANY,
**dialect_kwargs,
)
reflect_opts["_reflect_info"] = _reflect_info
for name in load:
try:
Table(name, self, **reflect_opts)
except exc.UnreflectableTableError as uerr:
util.warn("Skipping table %s: %s" % (name, uerr))
def create_all(
self,
bind: _CreateDropBind,
tables: Optional[_typing_Sequence[Table]] = None,
checkfirst: bool = True,
) -> None:
"""Create all tables stored in this metadata.
Conditional by default, will not attempt to recreate tables already
present in the target database.
:param bind:
A :class:`.Connection` or :class:`.Engine` used to access the
database.
:param tables:
Optional list of ``Table`` objects, which is a subset of the total
tables in the ``MetaData`` (others are ignored).
:param checkfirst:
Defaults to True, don't issue CREATEs for tables already present
in the target database.
"""
bind._run_ddl_visitor(
ddl.SchemaGenerator, self, checkfirst=checkfirst, tables=tables
)
def drop_all(
self,
bind: _CreateDropBind,
tables: Optional[_typing_Sequence[Table]] = None,
checkfirst: bool = True,
) -> None:
"""Drop all tables stored in this metadata.
Conditional by default, will not attempt to drop tables not present in
the target database.
:param bind:
A :class:`.Connection` or :class:`.Engine` used to access the
database.
:param tables:
Optional list of ``Table`` objects, which is a subset of the
total tables in the ``MetaData`` (others are ignored).
:param checkfirst:
Defaults to True, only issue DROPs for tables confirmed to be
present in the target database.
"""
bind._run_ddl_visitor(
ddl.SchemaDropper, self, checkfirst=checkfirst, tables=tables
)
class Computed(FetchedValue, SchemaItem):
"""Defines a generated column, i.e. "GENERATED ALWAYS AS" syntax.
The :class:`.Computed` construct is an inline construct added to the
argument list of a :class:`_schema.Column` object::
from sqlalchemy import Computed
Table('square', metadata_obj,
Column('side', Float, nullable=False),
Column('area', Float, Computed('side * side'))
)
See the linked documentation below for complete details.
.. versionadded:: 1.3.11
.. seealso::
:ref:`computed_ddl`
"""
__visit_name__ = "computed_column"
column: Optional[Column[Any]]
@_document_text_coercion(
"sqltext", ":class:`.Computed`", ":paramref:`.Computed.sqltext`"
)
def __init__(
self, sqltext: _DDLColumnArgument, persisted: Optional[bool] = None
) -> None:
"""Construct a GENERATED ALWAYS AS DDL construct to accompany a
:class:`_schema.Column`.
:param sqltext:
A string containing the column generation expression, which will be
used verbatim, or a SQL expression construct, such as a
:func:`_expression.text`
object. If given as a string, the object is converted to a
:func:`_expression.text` object.
:param persisted:
Optional, controls how this column should be persisted by the
database. Possible values are:
* ``None``, the default, it will use the default persistence
defined by the database.
* ``True``, will render ``GENERATED ALWAYS AS ... STORED``, or the
equivalent for the target database if supported.
* ``False``, will render ``GENERATED ALWAYS AS ... VIRTUAL``, or
the equivalent for the target database if supported.
Specifying ``True`` or ``False`` may raise an error when the DDL
is emitted to the target database if the database does not support
that persistence option. Leaving this parameter at its default
of ``None`` is guaranteed to succeed for all databases that support
``GENERATED ALWAYS AS``.
"""
self.sqltext = coercions.expect(roles.DDLExpressionRole, sqltext)
self.persisted = persisted
self.column = None
def _set_parent(self, parent: SchemaEventTarget, **kw: Any) -> None:
assert isinstance(parent, Column)
if not isinstance(
parent.server_default, (type(None), Computed)
) or not isinstance(parent.server_onupdate, (type(None), Computed)):
raise exc.ArgumentError(
"A generated column cannot specify a server_default or a "
"server_onupdate argument"
)
self.column = parent
parent.computed = self
self.column.server_onupdate = self
self.column.server_default = self
def _as_for_update(self, for_update: bool) -> FetchedValue:
return self
@util.deprecated(
"1.4",
"The :meth:`_schema.Computed.copy` method is deprecated "
"and will be removed in a future release.",
)
def copy(
self, *, target_table: Optional[Table] = None, **kw: Any
) -> Computed:
return self._copy(target_table=target_table, **kw)
def _copy(
self, *, target_table: Optional[Table] = None, **kw: Any
) -> Computed:
sqltext = _copy_expression(
self.sqltext,
self.column.table if self.column is not None else None,
target_table,
)
g = Computed(sqltext, persisted=self.persisted)
return self._schema_item_copy(g)
class Identity(IdentityOptions, FetchedValue, SchemaItem):
"""Defines an identity column, i.e. "GENERATED { ALWAYS | BY DEFAULT }
AS IDENTITY" syntax.
The :class:`.Identity` construct is an inline construct added to the
argument list of a :class:`_schema.Column` object::
from sqlalchemy import Identity
Table('foo', metadata_obj,
Column('id', Integer, Identity())
Column('description', Text),
)
See the linked documentation below for complete details.
.. versionadded:: 1.4
.. seealso::
:ref:`identity_ddl`
"""
__visit_name__ = "identity_column"
is_identity = True
def __init__(
self,
always: bool = False,
on_null: Optional[bool] = None,
start: Optional[int] = None,
increment: Optional[int] = None,
minvalue: Optional[int] = None,
maxvalue: Optional[int] = None,
nominvalue: Optional[bool] = None,
nomaxvalue: Optional[bool] = None,
cycle: Optional[bool] = None,
cache: Optional[int] = None,
order: Optional[bool] = None,
) -> None:
"""Construct a GENERATED { ALWAYS | BY DEFAULT } AS IDENTITY DDL
construct to accompany a :class:`_schema.Column`.
See the :class:`.Sequence` documentation for a complete description
of most parameters.
.. note::
MSSQL supports this construct as the preferred alternative to
generate an IDENTITY on a column, but it uses non standard
syntax that only support :paramref:`_schema.Identity.start`
and :paramref:`_schema.Identity.increment`.
All other parameters are ignored.
:param always:
A boolean, that indicates the type of identity column.
If ``False`` is specified, the default, then the user-specified
value takes precedence.
If ``True`` is specified, a user-specified value is not accepted (
on some backends, like PostgreSQL, OVERRIDING SYSTEM VALUE, or
similar, may be specified in an INSERT to override the sequence
value).
Some backends also have a default value for this parameter,
``None`` can be used to omit rendering this part in the DDL. It
will be treated as ``False`` if a backend does not have a default
value.
:param on_null:
Set to ``True`` to specify ON NULL in conjunction with a
``always=False`` identity column. This option is only supported on
some backends, like Oracle.
:param start: the starting index of the sequence.
:param increment: the increment value of the sequence.
:param minvalue: the minimum value of the sequence.
:param maxvalue: the maximum value of the sequence.
:param nominvalue: no minimum value of the sequence.
:param nomaxvalue: no maximum value of the sequence.
:param cycle: allows the sequence to wrap around when the maxvalue
or minvalue has been reached.
:param cache: optional integer value; number of future values in the
sequence which are calculated in advance.
:param order: optional boolean value; if true, renders the
ORDER keyword.
"""
IdentityOptions.__init__(
self,
start=start,
increment=increment,
minvalue=minvalue,
maxvalue=maxvalue,
nominvalue=nominvalue,
nomaxvalue=nomaxvalue,
cycle=cycle,
cache=cache,
order=order,
)
self.always = always
self.on_null = on_null
self.column = None
def _set_parent(self, parent: SchemaEventTarget, **kw: Any) -> None:
assert isinstance(parent, Column)
if not isinstance(
parent.server_default, (type(None), Identity)
) or not isinstance(parent.server_onupdate, type(None)):
raise exc.ArgumentError(
"A column with an Identity object cannot specify a "
"server_default or a server_onupdate argument"
)
if parent.autoincrement is False:
raise exc.ArgumentError(
"A column with an Identity object cannot specify "
"autoincrement=False"
)
self.column = parent
parent.identity = self
if parent._user_defined_nullable is NULL_UNSPECIFIED:
parent.nullable = False
parent.server_default = self
def _as_for_update(self, for_update: bool) -> FetchedValue:
return self
@util.deprecated(
"1.4",
"The :meth:`_schema.Identity.copy` method is deprecated "
"and will be removed in a future release.",
)
def copy(self, **kw: Any) -> Identity:
return self._copy(**kw)
def _copy(self, **kw: Any) -> Identity:
i = Identity(
always=self.always,
on_null=self.on_null,
start=self.start,
increment=self.increment,
minvalue=self.minvalue,
maxvalue=self.maxvalue,
nominvalue=self.nominvalue,
nomaxvalue=self.nomaxvalue,
cycle=self.cycle,
cache=self.cache,
order=self.order,
)
return self._schema_item_copy(i)
|
ccc085ca36fa5838f96149a7503e5546d5cd3b27
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-mrs/huaweicloudsdkmrs/v2/model/node_group_v2.py
|
db6ebf25b0994182ab69e25498608caa0505c6d1
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318
| 2023-08-31T08:28:59
| 2023-08-31T08:28:59
| 262,207,814
| 103
| 44
|
NOASSERTION
| 2023-06-22T14:50:48
| 2020-05-08T02:28:43
|
Python
|
UTF-8
|
Python
| false
| false
| 15,679
|
py
|
node_group_v2.py
|
# coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class NodeGroupV2:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'group_name': 'str',
'node_num': 'int',
'node_size': 'str',
'root_volume': 'Volume',
'data_volume': 'Volume',
'data_volume_count': 'int',
'charge_info': 'ChargeInfo',
'auto_scaling_policy': 'AutoScalingPolicy',
'assigned_roles': 'list[str]'
}
attribute_map = {
'group_name': 'group_name',
'node_num': 'node_num',
'node_size': 'node_size',
'root_volume': 'root_volume',
'data_volume': 'data_volume',
'data_volume_count': 'data_volume_count',
'charge_info': 'charge_info',
'auto_scaling_policy': 'auto_scaling_policy',
'assigned_roles': 'assigned_roles'
}
def __init__(self, group_name=None, node_num=None, node_size=None, root_volume=None, data_volume=None, data_volume_count=None, charge_info=None, auto_scaling_policy=None, assigned_roles=None):
"""NodeGroupV2
The model defined in huaweicloud sdk
:param group_name: 节点组名称,最大长度64,支持大小写英文、数字以及“_”。节点组配置原则如下: - master_node_default_group:Master节点组,所有集群类型均需包含该节点组。 - core_node_analysis_group:分析Core节点组,分析集群、混合集群均需包含该节点组。 - core_node_streaming_group:流式Core节点组,流式集群和混合集群均需包含该节点组。 - task_node_analysis_group:分析Task节点组,分析集群和混合集群可根据需要选择该节点组。 - task_node_streaming_group:流式Task节点组,流式集群、混合集群可根据需要选择该节点组。 - node_group{x}:自定义集群节点组,可根据需要添加多个,最多支持添加9个该节点组。
:type group_name: str
:param node_num: 节点数量,取值范围0~500,Core与Task节点总数最大为500个。
:type node_num: int
:param node_size: 节点的实例规格。 例如:c3.4xlarge.2.linux.bigdata。实例规格详细说明请参见[MRS所使用的弹性云服务器规格](https://support.huaweicloud.com/api-mrs/mrs_01_9006.html)和[MRS所使用的裸金属服务器规格](https://support.huaweicloud.com/api-mrs/mrs_01_9001.html)。 该参数建议从MRS控制台的集群创建页面获取对应区域对应版本所支持的规格。
:type node_size: str
:param root_volume:
:type root_volume: :class:`huaweicloudsdkmrs.v2.Volume`
:param data_volume:
:type data_volume: :class:`huaweicloudsdkmrs.v2.Volume`
:param data_volume_count: 节点数据磁盘存储数目,取值范围:0~10。
:type data_volume_count: int
:param charge_info:
:type charge_info: :class:`huaweicloudsdkmrs.v2.ChargeInfo`
:param auto_scaling_policy:
:type auto_scaling_policy: :class:`huaweicloudsdkmrs.v2.AutoScalingPolicy`
:param assigned_roles: 当集群类型为CUSTOM时,该参数必选。可以指定节点组中部署的角色,该参数是一个字符串数组,每个字符串表示一个角色表达式。 角色表达式定义: - 当该角色在节点组所有节点部署时: {role name},如“DataNode”。 - 当该角色在节点组指定下标节点部署时:{role name}:{index1},{index2}…,{indexN},如“NameNode:1,2”,下标从1开始计数。 - 部分角色支持多实例部署(即在一个节点部署多个同角色的实例):{role name}[{instance count}],如“EsNode[9]” 可选的角色请参考[MRS支持的角色与组件对应表](https://support.huaweicloud.com/api-mrs/mrs_02_0106.html)。
:type assigned_roles: list[str]
"""
self._group_name = None
self._node_num = None
self._node_size = None
self._root_volume = None
self._data_volume = None
self._data_volume_count = None
self._charge_info = None
self._auto_scaling_policy = None
self._assigned_roles = None
self.discriminator = None
self.group_name = group_name
self.node_num = node_num
self.node_size = node_size
if root_volume is not None:
self.root_volume = root_volume
if data_volume is not None:
self.data_volume = data_volume
if data_volume_count is not None:
self.data_volume_count = data_volume_count
if charge_info is not None:
self.charge_info = charge_info
if auto_scaling_policy is not None:
self.auto_scaling_policy = auto_scaling_policy
if assigned_roles is not None:
self.assigned_roles = assigned_roles
@property
def group_name(self):
"""Gets the group_name of this NodeGroupV2.
节点组名称,最大长度64,支持大小写英文、数字以及“_”。节点组配置原则如下: - master_node_default_group:Master节点组,所有集群类型均需包含该节点组。 - core_node_analysis_group:分析Core节点组,分析集群、混合集群均需包含该节点组。 - core_node_streaming_group:流式Core节点组,流式集群和混合集群均需包含该节点组。 - task_node_analysis_group:分析Task节点组,分析集群和混合集群可根据需要选择该节点组。 - task_node_streaming_group:流式Task节点组,流式集群、混合集群可根据需要选择该节点组。 - node_group{x}:自定义集群节点组,可根据需要添加多个,最多支持添加9个该节点组。
:return: The group_name of this NodeGroupV2.
:rtype: str
"""
return self._group_name
@group_name.setter
def group_name(self, group_name):
"""Sets the group_name of this NodeGroupV2.
节点组名称,最大长度64,支持大小写英文、数字以及“_”。节点组配置原则如下: - master_node_default_group:Master节点组,所有集群类型均需包含该节点组。 - core_node_analysis_group:分析Core节点组,分析集群、混合集群均需包含该节点组。 - core_node_streaming_group:流式Core节点组,流式集群和混合集群均需包含该节点组。 - task_node_analysis_group:分析Task节点组,分析集群和混合集群可根据需要选择该节点组。 - task_node_streaming_group:流式Task节点组,流式集群、混合集群可根据需要选择该节点组。 - node_group{x}:自定义集群节点组,可根据需要添加多个,最多支持添加9个该节点组。
:param group_name: The group_name of this NodeGroupV2.
:type group_name: str
"""
self._group_name = group_name
@property
def node_num(self):
"""Gets the node_num of this NodeGroupV2.
节点数量,取值范围0~500,Core与Task节点总数最大为500个。
:return: The node_num of this NodeGroupV2.
:rtype: int
"""
return self._node_num
@node_num.setter
def node_num(self, node_num):
"""Sets the node_num of this NodeGroupV2.
节点数量,取值范围0~500,Core与Task节点总数最大为500个。
:param node_num: The node_num of this NodeGroupV2.
:type node_num: int
"""
self._node_num = node_num
@property
def node_size(self):
"""Gets the node_size of this NodeGroupV2.
节点的实例规格。 例如:c3.4xlarge.2.linux.bigdata。实例规格详细说明请参见[MRS所使用的弹性云服务器规格](https://support.huaweicloud.com/api-mrs/mrs_01_9006.html)和[MRS所使用的裸金属服务器规格](https://support.huaweicloud.com/api-mrs/mrs_01_9001.html)。 该参数建议从MRS控制台的集群创建页面获取对应区域对应版本所支持的规格。
:return: The node_size of this NodeGroupV2.
:rtype: str
"""
return self._node_size
@node_size.setter
def node_size(self, node_size):
"""Sets the node_size of this NodeGroupV2.
节点的实例规格。 例如:c3.4xlarge.2.linux.bigdata。实例规格详细说明请参见[MRS所使用的弹性云服务器规格](https://support.huaweicloud.com/api-mrs/mrs_01_9006.html)和[MRS所使用的裸金属服务器规格](https://support.huaweicloud.com/api-mrs/mrs_01_9001.html)。 该参数建议从MRS控制台的集群创建页面获取对应区域对应版本所支持的规格。
:param node_size: The node_size of this NodeGroupV2.
:type node_size: str
"""
self._node_size = node_size
@property
def root_volume(self):
"""Gets the root_volume of this NodeGroupV2.
:return: The root_volume of this NodeGroupV2.
:rtype: :class:`huaweicloudsdkmrs.v2.Volume`
"""
return self._root_volume
@root_volume.setter
def root_volume(self, root_volume):
"""Sets the root_volume of this NodeGroupV2.
:param root_volume: The root_volume of this NodeGroupV2.
:type root_volume: :class:`huaweicloudsdkmrs.v2.Volume`
"""
self._root_volume = root_volume
@property
def data_volume(self):
"""Gets the data_volume of this NodeGroupV2.
:return: The data_volume of this NodeGroupV2.
:rtype: :class:`huaweicloudsdkmrs.v2.Volume`
"""
return self._data_volume
@data_volume.setter
def data_volume(self, data_volume):
"""Sets the data_volume of this NodeGroupV2.
:param data_volume: The data_volume of this NodeGroupV2.
:type data_volume: :class:`huaweicloudsdkmrs.v2.Volume`
"""
self._data_volume = data_volume
@property
def data_volume_count(self):
"""Gets the data_volume_count of this NodeGroupV2.
节点数据磁盘存储数目,取值范围:0~10。
:return: The data_volume_count of this NodeGroupV2.
:rtype: int
"""
return self._data_volume_count
@data_volume_count.setter
def data_volume_count(self, data_volume_count):
"""Sets the data_volume_count of this NodeGroupV2.
节点数据磁盘存储数目,取值范围:0~10。
:param data_volume_count: The data_volume_count of this NodeGroupV2.
:type data_volume_count: int
"""
self._data_volume_count = data_volume_count
@property
def charge_info(self):
"""Gets the charge_info of this NodeGroupV2.
:return: The charge_info of this NodeGroupV2.
:rtype: :class:`huaweicloudsdkmrs.v2.ChargeInfo`
"""
return self._charge_info
@charge_info.setter
def charge_info(self, charge_info):
"""Sets the charge_info of this NodeGroupV2.
:param charge_info: The charge_info of this NodeGroupV2.
:type charge_info: :class:`huaweicloudsdkmrs.v2.ChargeInfo`
"""
self._charge_info = charge_info
@property
def auto_scaling_policy(self):
"""Gets the auto_scaling_policy of this NodeGroupV2.
:return: The auto_scaling_policy of this NodeGroupV2.
:rtype: :class:`huaweicloudsdkmrs.v2.AutoScalingPolicy`
"""
return self._auto_scaling_policy
@auto_scaling_policy.setter
def auto_scaling_policy(self, auto_scaling_policy):
"""Sets the auto_scaling_policy of this NodeGroupV2.
:param auto_scaling_policy: The auto_scaling_policy of this NodeGroupV2.
:type auto_scaling_policy: :class:`huaweicloudsdkmrs.v2.AutoScalingPolicy`
"""
self._auto_scaling_policy = auto_scaling_policy
@property
def assigned_roles(self):
"""Gets the assigned_roles of this NodeGroupV2.
当集群类型为CUSTOM时,该参数必选。可以指定节点组中部署的角色,该参数是一个字符串数组,每个字符串表示一个角色表达式。 角色表达式定义: - 当该角色在节点组所有节点部署时: {role name},如“DataNode”。 - 当该角色在节点组指定下标节点部署时:{role name}:{index1},{index2}…,{indexN},如“NameNode:1,2”,下标从1开始计数。 - 部分角色支持多实例部署(即在一个节点部署多个同角色的实例):{role name}[{instance count}],如“EsNode[9]” 可选的角色请参考[MRS支持的角色与组件对应表](https://support.huaweicloud.com/api-mrs/mrs_02_0106.html)。
:return: The assigned_roles of this NodeGroupV2.
:rtype: list[str]
"""
return self._assigned_roles
@assigned_roles.setter
def assigned_roles(self, assigned_roles):
"""Sets the assigned_roles of this NodeGroupV2.
当集群类型为CUSTOM时,该参数必选。可以指定节点组中部署的角色,该参数是一个字符串数组,每个字符串表示一个角色表达式。 角色表达式定义: - 当该角色在节点组所有节点部署时: {role name},如“DataNode”。 - 当该角色在节点组指定下标节点部署时:{role name}:{index1},{index2}…,{indexN},如“NameNode:1,2”,下标从1开始计数。 - 部分角色支持多实例部署(即在一个节点部署多个同角色的实例):{role name}[{instance count}],如“EsNode[9]” 可选的角色请参考[MRS支持的角色与组件对应表](https://support.huaweicloud.com/api-mrs/mrs_02_0106.html)。
:param assigned_roles: The assigned_roles of this NodeGroupV2.
:type assigned_roles: list[str]
"""
self._assigned_roles = assigned_roles
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, NodeGroupV2):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
f615574c8b534e5a0ea72fd46c6829a3955062c9
|
de499dbec27ff53d8163b816cafcd62b3a00d438
|
/psql_database_helper.py
|
3dd1524d8cd50127d2dc782ca5a722951771e59d
|
[
"MIT"
] |
permissive
|
TonicAI/condenser
|
0f93720bade91ca22cfd6790598b842fcb9b1475
|
14ff40774ce7f6a3a6167df4cac64b10490c5487
|
refs/heads/master
| 2023-05-26T11:04:18.590206
| 2023-05-18T14:48:45
| 2023-05-18T14:48:45
| 143,047,034
| 285
| 46
|
MIT
| 2023-05-18T14:48:47
| 2018-07-31T17:39:50
|
Python
|
UTF-8
|
Python
| false
| false
| 8,903
|
py
|
psql_database_helper.py
|
import os, uuid, csv
import config_reader
from pathlib import Path
from psycopg2.extras import execute_values, register_default_json, register_default_jsonb
from subset_utils import columns_joined, columns_tupled, schema_name, table_name, fully_qualified_table, redact_relationships, quoter
register_default_json(loads=lambda x: str(x))
register_default_jsonb(loads=lambda x: str(x))
def prep_temp_dbs(_, __):
pass
def unprep_temp_dbs(_, __):
pass
def turn_off_constraints(connection):
# can't be done in postgres
pass
def copy_rows(source, destination, query, destination_table):
datatypes = get_table_datatypes(table_name(destination_table), schema_name(destination_table), destination)
non_generated_columns = [(dt[0], dt[1]) for i, dt in enumerate(datatypes) if dt[2] != 's']
generated_columns_positions = [i for i, dt in enumerate(datatypes) if 's' in dt[2]]
always_generated_id = any([dt[3] == 'a' for dt in datatypes])
def template_piece(dt):
if dt == '_json':
return '%s::json[]'
elif dt == '_jsonb':
return '%s::jsonb[]'
else:
return '%s'
template = '(' + ','.join([template_piece(dt[1]) for dt in non_generated_columns]) + ')'
columns = '("' + '","'.join([dt[0] for dt in non_generated_columns]) + '")'
cursor_name='table_cursor_'+str(uuid.uuid4()).replace('-','')
cursor = source.cursor(name=cursor_name)
cursor.execute(query)
fetch_row_count = 100000
while True:
rows = cursor.fetchmany(fetch_row_count)
if len(rows) == 0:
break
# using the inner_cursor means we don't log all the noise
destination_cursor = destination.cursor().inner_cursor
insert_query = 'INSERT INTO {} {} VALUES %s'.format(fully_qualified_table(destination_table), columns)
if (always_generated_id):
insert_query = 'INSERT INTO {} {} OVERRIDING SYSTEM VALUE VALUES %s'.format(fully_qualified_table(destination_table), columns)
updated_rows = [tuple(val for i, val in enumerate(row) if i not in generated_columns_positions) for row in rows]
execute_values(destination_cursor, insert_query, updated_rows, template)
destination_cursor.close()
cursor.close()
destination.commit()
def source_db_temp_table(target_table):
return 'tonic_subset_' + schema_name(target_table) + '_' + table_name(target_table)
def create_id_temp_table(conn, number_of_columns):
table_name = 'tonic_subset_' + str(uuid.uuid4())
cursor = conn.cursor()
column_defs = ',\n'.join([' col' + str(aye) + ' varchar' for aye in range(number_of_columns)])
q = 'CREATE TEMPORARY TABLE "{}" (\n {} \n)'.format(table_name, column_defs)
cursor.execute(q)
cursor.close()
return table_name
def copy_to_temp_table(conn, query, target_table, pk_columns = None):
temp_table = fully_qualified_table(source_db_temp_table(target_table))
with conn.cursor() as cur:
cur.execute('CREATE TEMPORARY TABLE IF NOT EXISTS ' + temp_table + ' AS ' + query + ' LIMIT 0')
if pk_columns:
query = query + ' WHERE {} NOT IN (SELECT {} FROM {})'.format(columns_tupled(pk_columns), columns_joined(pk_columns), temp_table)
cur.execute('INSERT INTO ' + temp_table + ' ' + query)
conn.commit()
def clean_temp_table_cells(fk_table, fk_columns, target_table, target_columns, conn):
fk_alias = 'tonic_subset_398dhjr23_fk'
target_alias = 'tonic_subset_398dhjr23_target'
fk_table = fully_qualified_table(source_db_temp_table(fk_table))
target_table = fully_qualified_table(source_db_temp_table(target_table))
assignment_list = ','.join(['{} = NULL'.format(quoter(c)) for c in fk_columns])
column_matching = ' AND '.join(['{}.{} = {}.{}'.format(fk_alias, quoter(fc), target_alias, quoter(tc)) for fc, tc in zip(fk_columns, target_columns)])
q = 'UPDATE {} {} SET {} WHERE NOT EXISTS (SELECT 1 FROM {} {} WHERE {})'.format(fk_table, fk_alias, assignment_list, target_table, target_alias, column_matching)
run_query(q, conn)
def get_redacted_table_references(table_name, tables, conn):
relationships = get_unredacted_fk_relationships(tables, conn)
redacted = redact_relationships(relationships)
return [r for r in redacted if r['target_table']==table_name]
def get_unredacted_fk_relationships(tables, conn):
cur = conn.cursor()
q = '''
SELECT fk_nsp.nspname || '.' || fk_table AS fk_table, array_agg(fk_att.attname ORDER BY fk_att.attnum) AS fk_columns, tar_nsp.nspname || '.' || target_table AS target_table, array_agg(tar_att.attname ORDER BY fk_att.attnum) AS target_columns
FROM (
SELECT
fk.oid AS fk_table_id,
fk.relnamespace AS fk_schema_id,
fk.relname AS fk_table,
unnest(con.conkey) as fk_column_id,
tar.oid AS target_table_id,
tar.relnamespace AS target_schema_id,
tar.relname AS target_table,
unnest(con.confkey) as target_column_id,
con.connamespace AS constraint_nsp,
con.conname AS constraint_name
FROM pg_constraint con
JOIN pg_class fk ON con.conrelid = fk.oid
JOIN pg_class tar ON con.confrelid = tar.oid
WHERE con.contype = 'f'
) sub
JOIN pg_attribute fk_att ON fk_att.attrelid = fk_table_id AND fk_att.attnum = fk_column_id
JOIN pg_attribute tar_att ON tar_att.attrelid = target_table_id AND tar_att.attnum = target_column_id
JOIN pg_namespace fk_nsp ON fk_schema_id = fk_nsp.oid
JOIN pg_namespace tar_nsp ON target_schema_id = tar_nsp.oid
GROUP BY 1, 3, sub.constraint_nsp, sub.constraint_name;
'''
cur.execute(q)
relationships = list()
for row in cur.fetchall():
d = dict()
d['fk_table'] = row[0]
d['fk_columns'] = row[1]
d['target_table'] = row[2]
d['target_columns'] = row[3]
if d['fk_table'] in tables and d['target_table'] in tables:
relationships.append( d )
cur.close()
for augment in config_reader.get_fk_augmentation():
not_present = True
for r in relationships:
not_present = not_present and not all([r[key] == augment[key] for key in r.keys()])
if not not_present:
break
if augment['fk_table'] in tables and augment['target_table'] in tables and not_present:
relationships.append(augment)
return relationships
def run_query(query, conn, commit=True):
with conn.cursor() as cur:
cur.execute(query)
if commit:
conn.commit()
def get_table_count_estimate(table_name, schema, conn):
with conn.cursor() as cur:
cur.execute('SELECT reltuples::BIGINT AS count FROM pg_class WHERE oid=\'"{}"."{}"\'::regclass'.format(schema, table_name))
return cur.fetchone()[0]
def get_table_columns(table, schema, conn):
with conn.cursor() as cur:
cur.execute('SELECT attname FROM pg_attribute WHERE attrelid=\'"{}"."{}"\'::regclass AND attnum > 0 AND NOT attisdropped ORDER BY attnum;'.format(schema, table))
return [r[0] for r in cur.fetchall()]
def list_all_user_schemas(conn):
with conn.cursor() as cur:
cur.execute("SELECT nspname FROM pg_catalog.pg_namespace WHERE nspname NOT LIKE 'pg\_%' and nspname != 'information_schema';")
return [r[0] for r in cur.fetchall()]
def list_all_tables(db_connect):
conn = db_connect.get_db_connection()
with conn.cursor() as cur:
cur.execute("""SELECT concat(concat(nsp.nspname,'.'),cls.relname)
FROM pg_class cls
JOIN pg_namespace nsp ON nsp.oid = cls.relnamespace
WHERE nsp.nspname NOT IN ('information_schema', 'pg_catalog') AND cls.relkind = 'r';""")
return [r[0] for r in cur.fetchall()]
def get_table_datatypes(table, schema, conn):
if not schema:
table_clause = "cl.relname = '{}'".format(table)
else:
table_clause = "cl.relname = '{}' AND ns.nspname = '{}'".format(table, schema)
with conn.cursor() as cur:
cur.execute("""SELECT att.attname, ty.typname, att.attgenerated, att.attidentity
FROM pg_attribute att
JOIN pg_class cl ON cl.oid = att.attrelid
JOIN pg_type ty ON ty.oid = att.atttypid
JOIN pg_namespace ns ON ns.oid = cl.relnamespace
WHERE {} AND att.attnum > 0 AND
NOT att.attisdropped
ORDER BY att.attnum;
""".format(table_clause))
return [(r[0], r[1], r[2], r[3]) for r in cur.fetchall()]
def truncate_table(target_table, conn):
with conn.cursor() as cur:
cur.execute("TRUNCATE TABLE {}".format(target_table))
conn.commit()
|
44963c83618614e1a82e508572dbd79f00596ed8
|
cd954f06232e3b9fe008f9a6291689e75f179a88
|
/GPLT_Python/L1-035.py
|
8c8d8a5f94ee052fe38fef92f557d8dfd28e6ba8
|
[
"MIT"
] |
permissive
|
upupming/algorithm
|
35446f4b15f3a505041ac65c1dc6f825951d8e99
|
a3807ba05960b9025e55d668ef95b3375ae71895
|
refs/heads/master
| 2023-08-09T03:07:18.047084
| 2023-08-01T05:57:13
| 2023-08-01T05:57:13
| 217,478,998
| 239
| 34
|
MIT
| 2021-08-13T05:42:26
| 2019-10-25T07:41:19
|
C++
|
UTF-8
|
Python
| false
| false
| 300
|
py
|
L1-035.py
|
a = []
while True:
N = input()
if N != '.':
a.append(N)
else:
break
if len(a) < 2:
print('Momo... No one is for you ...')
elif 2 <= len(a) < 14:
print(a[1] + ' is the only one for you...')
else:
print(a[1] + ' and ' + a[13] + ' are inviting you to dinner...')
|
3d93c847ea5cd9d3ee089cf12c1d3a49e4b1e531
|
16fcf54e753704a08888a235419ef19fcb49b793
|
/Widgets/Skins/PreviewWidget.py
|
00e24dbbd5655575a9781665a753f29eac0d752f
|
[] |
no_license
|
PyQt5/PyQtClient
|
08ca93f7af79c201fbb3d232a6063741f900a019
|
f86e4e5038f9d9b1626c0a25f3aa59d33c3e4393
|
refs/heads/master
| 2022-09-03T21:43:42.371070
| 2022-09-01T19:20:44
| 2022-09-01T19:20:44
| 120,252,826
| 285
| 97
| null | 2019-02-02T09:53:59
| 2018-02-05T04:00:37
|
Python
|
UTF-8
|
Python
| false
| false
| 4,908
|
py
|
PreviewWidget.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on 2019年1月30日
@author: Irony
@site: https://pyqt.site https://github.com/PyQt5
@email: 892768447@qq.com
@file: Widgets.Skins.PreviewWidget
@description: 主题预览
"""
import os
from PyQt5.QtCore import Qt, QTimer, pyqtSlot
from PyQt5.QtGui import QColor, QPixmap
from PyQt5.QtWidgets import QGraphicsDropShadowEffect, QWidget
from UiFiles.Ui_MainWindow import Ui_FormMainWindow
from UiFiles.Ui_PreviewWidget import Ui_FormPreviewWidget
from Utils.CommonUtil import Setting
from Utils.GradientUtils import GradientUtils
from Utils.ThemeManager import ThemeManager
__Author__ = 'Irony'
__Copyright__ = 'Copyright (c) 2019'
class PreviewWidget(QWidget, Ui_FormPreviewWidget):
Theme = 0
Color = 1
Picture = 2
def __init__(self, *args, **kwargs):
super(PreviewWidget, self).__init__(*args, **kwargs)
self.setupUi(self)
self.setAttribute(Qt.WA_StyledBackground, True) # 支持样式
# 图片边缘阴影效果
effect = QGraphicsDropShadowEffect(self.labelPreviewImage)
effect.setBlurRadius(40)
effect.setOffset(0, 0)
effect.setColor(Qt.gray)
self.labelPreviewImage.setGraphicsEffect(effect)
# 鼠标样式
ThemeManager.loadCursor(self, ThemeManager.CursorDefault)
ThemeManager.loadCursor(self.buttonPreviewApply,
ThemeManager.CursorPointer)
ThemeManager.loadCursor(self.buttonPreviewClose,
ThemeManager.CursorPointer)
ThemeManager.loadCursor(self.buttonPreviewNext,
ThemeManager.CursorPointer)
ThemeManager.loadCursor(self.buttonPreviewPrevious,
ThemeManager.CursorPointer)
def setTitle(self, title):
"""设置标题
:param title:
"""
self.labelPreviewTitle.setText(title)
self.setWindowTitle(title)
def setPixmap(self, which, poc):
"""设置图片
:param which: Theme = 0,Color = 1,Picture = 2
:param poc: color or path
"""
self._which = which
self._poc = poc
if not hasattr(self, '_UiMainWindow'):
# 创建一个隐藏的主界面
self._UiMainWindow = QWidget()
ui = Ui_FormMainWindow()
ui.setupUi(self._UiMainWindow)
# 修改名字防止受app的style影响
ui.widgetMain.setObjectName('widgetMain1')
self._UiMainWindow.setAttribute(Qt.WA_TranslucentBackground, True)
self._UiMainWindow.setWindowFlags(self.windowFlags() |
Qt.FramelessWindowHint)
self._UiMainWindow.hide()
if which == self.Theme:
self.labelPreviewImage.setPixmap(
QPixmap(poc).scaledToWidth(400, Qt.SmoothTransformation))
return
elif which == self.Color:
ThemeManager.loadColourfulTheme(poc, self._UiMainWindow,
{'widgetMain': 'widgetMain1'})
elif which == self.Picture:
ThemeManager.loadPictureTheme(poc, self._UiMainWindow,
{'widgetMain': 'widgetMain1'})
# 对隐藏窗口截图
# 至于为什么要加延迟, 设置样式后可能UI还没刷新
self._UiMainWindow.repaint()
QTimer.singleShot(100, self._updatePixmap)
def _updatePixmap(self):
poc = self._UiMainWindow.grab().scaledToWidth(400,
Qt.SmoothTransformation)
self.labelPreviewImage.setPixmap(poc)
@pyqtSlot()
def on_buttonPreviewClose_clicked(self):
"""隐藏自己
"""
self.setVisible(False)
@pyqtSlot()
def on_buttonPreviewApply_clicked(self):
"""设置主题
"""
if self._which == self.Theme:
ThemeManager.loadUserTheme(
os.path.basename(os.path.dirname(self._poc)))
Setting.setValue('picture', None)
Setting.setValue('colourful', None)
elif self._which == self.Color:
ThemeManager.loadColourfulTheme(self._poc)
if isinstance(self._poc, QColor):
Setting.setValue('colourful', self._poc)
else:
# 渐变需要转成字典数据
Setting.setValue('colourful', GradientUtils.toJson(self._poc))
Setting.setValue('picture', None)
elif self._which == self.Picture:
ThemeManager.loadPictureTheme(self._poc)
Setting.setValue('colourful', None)
Setting.setValue('picture', self._poc.replace('\\', '/'))
|
58a4c108d3bd0fc36fb5531675f408633450e25a
|
a3d6556180e74af7b555f8d47d3fea55b94bcbda
|
/chrome/android/junit/src/org/chromium/chrome/browser/contextmenu/DEPS
|
78b9a20ec80974b771dcfe8fdec0493cac43a0a2
|
[
"BSD-3-Clause"
] |
permissive
|
chromium/chromium
|
aaa9eda10115b50b0616d2f1aed5ef35d1d779d6
|
a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c
|
refs/heads/main
| 2023-08-24T00:35:12.585945
| 2023-08-23T22:01:11
| 2023-08-23T22:01:11
| 120,360,765
| 17,408
| 7,102
|
BSD-3-Clause
| 2023-09-10T23:44:27
| 2018-02-05T20:55:32
| null |
UTF-8
|
Python
| false
| false
| 112
|
DEPS
|
include_rules = [
"+components/embedder_support/android/java/src/org/chromium/components/embedder_support",
]
|
|
0c8e7e8414a3bf3e232f16ea6fac8a30fdd2ca65
|
3c6b36eb1f4f9760c52903f6d0ec4a501f948c90
|
/osp/test/citations/jstor_record/test_surname.py
|
cbd01a29611b86b68df4fc84af0d59d1fff4f1d3
|
[
"Apache-2.0"
] |
permissive
|
davidmcclure/open-syllabus-project
|
38444249af845013e3f281a7a713dca83159c56e
|
078cfd4c5a257fbfb0901d43bfbc6350824eed4e
|
refs/heads/master
| 2021-06-30T21:47:07.636558
| 2021-06-27T15:15:35
| 2021-06-27T15:15:35
| 50,152,020
| 220
| 14
|
Apache-2.0
| 2021-06-27T15:11:15
| 2016-01-22T02:29:57
|
Python
|
UTF-8
|
Python
| false
| false
| 610
|
py
|
test_surname.py
|
import pytest
from osp.citations.jstor_record import JSTOR_Record
@pytest.mark.parametrize('inputs,surname', [
# Single author.
(
[
('David', 'McClure'),
],
'McClure'
),
# Multiple authors.
(
[
('David', 'McClure'),
('Joe', 'Karaganis'),
('Dennis', 'Tenen'),
],
'McClure'
),
# No authors.
(
[],
None
),
])
def test_surname(inputs, surname, mock_jstor):
path = mock_jstor.add_article(author=inputs)
assert JSTOR_Record(path).surname == surname
|
6e7356dbfcdeac3dcb5e897f5031dbf849fcef60
|
c5fd80ede07f0972a9b99d0c65a0df40e6d487fa
|
/pyocd/target/builtin/target_MAX32660.py
|
60585b456911b092109083c94fb288a84fdd0908
|
[
"CC-BY-4.0",
"Apache-2.0"
] |
permissive
|
pyocd/pyOCD
|
46330f3a10c9be381293d220cc025e0e347513ce
|
9253740baf46ebf4eacbce6bf3369150c5fb8ee0
|
refs/heads/main
| 2023-08-18T07:56:54.205305
| 2023-08-13T19:11:01
| 2023-08-13T19:11:01
| 13,862,423
| 507
| 204
|
Apache-2.0
| 2023-09-09T20:13:57
| 2013-10-25T14:10:05
|
Python
|
UTF-8
|
Python
| false
| false
| 4,036
|
py
|
target_MAX32660.py
|
# pyOCD debugger
# Copyright (c) 2017-2021 Maxim Integrated (Part of Analog Devices)
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ...coresight.coresight_target import CoreSightTarget
from ...core.memory_map import (FlashRegion, RamRegion, MemoryMap)
from ...debug.svd.loader import SVDFile
FLASH_ALGO = {
'load_address' : 0x20000000,
# Flash algorithm as a hex string
'instructions': [
0xe00abe00,
0x20604989, 0x6a4a6048, 0x44484888, 0x22006082, 0x688a624a, 0x68426042, 0x4270f022, 0x68426042,
0x5200f042, 0x68406042, 0x47706088, 0x688a497e, 0x4448487e, 0x68426042, 0x4270f022, 0x68426042,
0x6880608a, 0x47706248, 0x47702000, 0x47702000, 0xf7ffb500, 0x4b74ffd5, 0x48746899, 0x60414448,
0xf4216841, 0x6041417f, 0xf4416841, 0x6041412a, 0x60996841, 0xf0416841, 0x60410102, 0x60986840,
0x01c06898, 0xf7ffd4fc, 0x6a58ffd1, 0xf04f0780, 0xd5010000, 0x20016258, 0xb500bd00, 0x035b0b43,
0xffaef7ff, 0x600b4960, 0x4860688a, 0x60424448, 0xf4226842, 0x6042427f, 0xf4426842, 0x604242aa,
0x608a6842, 0xf0426842, 0x60420204, 0x60886840, 0x01c06888, 0x6a48d4fc, 0xd5050780, 0x62482000,
0xffa4f7ff, 0xbd002001, 0xffa0f7ff, 0xbd002000, 0x4613b5f8, 0x4605460c, 0xff82f7ff, 0x6881484a,
0x444a4a4a, 0x68516051, 0x6100f021, 0x68516051, 0x0110f041, 0x68516051, 0xe00e6081, 0x68196005,
0x68516301, 0x0101f041, 0x68516051, 0x68816081, 0xd4fc01c9, 0x1f241d1b, 0x2c041d2d, 0x06e9d301,
0x6811d1ec, 0xd1202980, 0xd31e2c10, 0x60516881, 0xf0216851, 0x60510110, 0x60816851, 0x68196005,
0x68596301, 0x68996341, 0x68d96381, 0x685163c1, 0x0101f041, 0x68516051, 0x68816081, 0xd4fc01c9,
0x3c103310, 0x2c103510, 0x2c04d2e8, 0x6881d31c, 0x68516051, 0x6100f021, 0x68516051, 0x0110f041,
0x68516051, 0x60056081, 0x63016819, 0xf0416851, 0x60510101, 0x60816851, 0x01c96881, 0x1d1bd4fc,
0x1d2d1f24, 0xd2ee2c04, 0xa119b314, 0x91006809, 0x21006886, 0x68566056, 0x6600f026, 0x68566056,
0x0610f046, 0x68566056, 0x466e6086, 0x7b01f813, 0x1c495477, 0xd1f91e64, 0x99006005, 0x68516301,
0x0101f041, 0x68516051, 0x68816081, 0xd4fc01c9, 0x07806a40, 0xf7ffd503, 0x2001ff09, 0xf7ffbdf8,
0x2000ff05, 0x0000bdf8, 0x40029000, 0x00000004, 0xffffffff, 0x00000000, 0x00000020, 0x00000000,
0x00000000
],
# Relative function addresses
'pc_init': 0x2000004d,
'pc_unInit': 0x20000051,
'pc_program_page': 0x200000f5,
'pc_erase_sector': 0x2000009f,
'pc_eraseAll': 0x20000055,
'static_base' : 0x20000000 + 0x00000004 + 0x00000234,
'begin_stack' : 0x20000448,
'begin_data' : 0x20000000 + 0x1000,
'page_size' : 0x400,
'analyzer_supported' : False,
'analyzer_address' : 0x00000000,
'page_buffers' : [0x20001000, 0x20002000], # Enable double buffering
'min_program_length' : 0x400,
# Relative region addresses and sizes
'ro_start': 0x0,
'ro_size': 0x234,
'rw_start': 0x234,
'rw_size': 0x10,
'zi_start': 0x244,
'zi_size': 0x0,
# Flash information
'flash_start': 0x0,
'flash_size': 0x40000,
'sector_sizes': (
(0x0, 0x2000),
)
}
class MAX32660(CoreSightTarget):
VENDOR = "Maxim"
MEMORY_MAP = MemoryMap(
FlashRegion( start=0, length=0x40000, blocksize=0x2000, is_boot_memory=True, algo=FLASH_ALGO),
RamRegion( start=0x20000000, length=0x18000),
)
def __init__(self, session):
super().__init__(session, self.MEMORY_MAP)
self._svd_location = SVDFile.from_builtin("max32660.svd")
|
2f41476b06c06442d68a3259b59e4454c6bee2b2
|
187414dcb264fb49d82507a099fd5fdca6e55e38
|
/python/pyspark/mllib/tests/test_util.py
|
28a53af0aa8ae8558047d0c57bde5798bd150347
|
[
"BSD-3-Clause",
"CC0-1.0",
"CDDL-1.1",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference",
"EPL-2.0",
"CDDL-1.0",
"MIT",
"LGPL-2.0-or-later",
"Python-2.0",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-free-unknown",
"EPL-1.0",
"Classpath-exception-2.0",
"GCC-exception-3.1",
"CC-BY-SA-3.0",
"LGPL-2.1-only",
"LicenseRef-scancode-unicode",
"CPL-1.0",
"LicenseRef-scancode-other-permissive",
"GPL-2.0-only",
"CC-PDDC",
"NAIST-2003",
"LicenseRef-scancode-other-copyleft"
] |
permissive
|
apache/spark
|
8aeba2d80465a262acc95781ede105a5b5886f6d
|
60d8fc49bec5dae1b8cf39a0670cb640b430f520
|
refs/heads/master
| 2023-09-04T04:33:36.058199
| 2023-09-04T03:48:52
| 2023-09-04T03:48:52
| 17,165,658
| 39,983
| 32,449
|
Apache-2.0
| 2023-09-14T19:46:24
| 2014-02-25T08:00:08
|
Scala
|
UTF-8
|
Python
| false
| false
| 3,929
|
py
|
test_util.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import tempfile
import unittest
from pyspark.mllib.common import _to_java_object_rdd
from pyspark.mllib.util import LinearDataGenerator
from pyspark.mllib.util import MLUtils
from pyspark.mllib.linalg import SparseVector, DenseVector, Vectors
from pyspark.mllib.random import RandomRDDs
from pyspark.testing.mllibutils import MLlibTestCase
class MLUtilsTests(MLlibTestCase):
def test_append_bias(self):
data = [2.0, 2.0, 2.0]
ret = MLUtils.appendBias(data)
self.assertEqual(ret[3], 1.0)
self.assertEqual(type(ret), DenseVector)
def test_append_bias_with_vector(self):
data = Vectors.dense([2.0, 2.0, 2.0])
ret = MLUtils.appendBias(data)
self.assertEqual(ret[3], 1.0)
self.assertEqual(type(ret), DenseVector)
def test_append_bias_with_sp_vector(self):
data = Vectors.sparse(3, {0: 2.0, 2: 2.0})
expected = Vectors.sparse(4, {0: 2.0, 2: 2.0, 3: 1.0})
# Returned value must be SparseVector
ret = MLUtils.appendBias(data)
self.assertEqual(ret, expected)
self.assertEqual(type(ret), SparseVector)
def test_load_vectors(self):
import shutil
data = [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0]]
temp_dir = tempfile.mkdtemp()
load_vectors_path = os.path.join(temp_dir, "test_load_vectors")
try:
self.sc.parallelize(data).saveAsTextFile(load_vectors_path)
ret_rdd = MLUtils.loadVectors(self.sc, load_vectors_path)
ret = ret_rdd.collect()
self.assertEqual(len(ret), 2)
self.assertEqual(ret[0], DenseVector([1.0, 2.0, 3.0]))
self.assertEqual(ret[1], DenseVector([1.0, 2.0, 3.0]))
except BaseException:
self.fail()
finally:
shutil.rmtree(load_vectors_path)
class LinearDataGeneratorTests(MLlibTestCase):
def test_dim(self):
linear_data = LinearDataGenerator.generateLinearInput(
intercept=0.0,
weights=[0.0, 0.0, 0.0],
xMean=[0.0, 0.0, 0.0],
xVariance=[0.33, 0.33, 0.33],
nPoints=4,
seed=0,
eps=0.1,
)
self.assertEqual(len(linear_data), 4)
for point in linear_data:
self.assertEqual(len(point.features), 3)
linear_data = LinearDataGenerator.generateLinearRDD(
sc=self.sc, nexamples=6, nfeatures=2, eps=0.1, nParts=2, intercept=0.0
).collect()
self.assertEqual(len(linear_data), 6)
for point in linear_data:
self.assertEqual(len(point.features), 2)
class SerDeTest(MLlibTestCase):
def test_to_java_object_rdd(self): # SPARK-6660
data = RandomRDDs.uniformRDD(self.sc, 10, 5, seed=0)
self.assertEqual(_to_java_object_rdd(data).count(), 10)
if __name__ == "__main__":
from pyspark.mllib.tests.test_util import * # noqa: F401
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
79e35eafa3d12bf27d1381346345636be664833e
|
8cc3498e311d15c9a4394aaa341ef489b482dbe6
|
/test/language/expressions/python/StringTypeTest.py
|
215bf470a8bb924c20b91768fed07a1c79c87224
|
[
"BSD-3-Clause"
] |
permissive
|
ndsev/zserio
|
3e55c064f72e86219a6da297f116d3dbb565a9a9
|
c540c4a97fee4e08bfc6669a2cec0d2b8282d8f6
|
refs/heads/master
| 2023-08-24T14:56:10.750155
| 2023-08-11T19:36:54
| 2023-08-11T19:36:54
| 141,550,444
| 113
| 23
|
BSD-3-Clause
| 2023-08-30T11:14:47
| 2018-07-19T08:44:23
|
Java
|
UTF-8
|
Python
| false
| false
| 3,342
|
py
|
StringTypeTest.py
|
import unittest
from testutils import getZserioApi
class StringTypeTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.api = getZserioApi(__file__, "expressions.zs").string_type
def testReturnValue(self):
stringTypeExpression = self._createStringTypeExpression(True)
self.assertEqual(self.VALUE, stringTypeExpression.return_value())
def testReturnDefaultValue(self):
stringTypeExpression = self._createStringTypeExpression(True)
self.assertEqual(self.api.STRING_CONSTANT if self.api.CHOOSER
else self.FALSE + self.SPACE + self.api.STRING_CONSTANT,
stringTypeExpression.return_default_value())
def testReturnDefaultChosen(self):
stringTypeExpression = self._createStringTypeExpression(True)
self.assertEqual(self.CHOSEN + self.SPACE + self.api.STRING_CONSTANT if self.api.CHOOSER
else "", stringTypeExpression.return_default_chosen())
def testAppendix(self):
stringTypeExpression = self._createStringTypeExpression(False)
self.assertEqual(self.APPEND + self.IX_LITERAL, stringTypeExpression.appendix())
def testAppendToConst(self):
stringTypeExpression = self._createStringTypeExpression(False)
self.assertEqual(self.api.STRING_CONSTANT + self.UNDERSCORE + self.APPEND + self.IX_LITERAL,
stringTypeExpression.append_to_const())
def testValueOrLiteral(self):
stringTypeExpression1 = self._createStringTypeExpression(True)
self.assertEqual(self.VALUE, stringTypeExpression1.value_or_literal())
stringTypeExpression2 = self._createStringTypeExpression(False)
self.assertEqual(self.LITERAL, stringTypeExpression2.value_or_literal())
def testValueOrLiteralExpression(self):
stringTypeExpression1 = self._createStringTypeExpression(True)
self.assertEqual(self.VALUE, stringTypeExpression1.value_or_literal_expression())
stringTypeExpression2 = self._createStringTypeExpression(False)
self.assertEqual(self.LITERAL + self.SPACE + self.EXPRESSION,
stringTypeExpression2.value_or_literal_expression())
def testValueOrConst(self):
stringTypeExpression1 = self._createStringTypeExpression(True)
self.assertEqual(self.VALUE, stringTypeExpression1.value_or_const())
stringTypeExpression2 = self._createStringTypeExpression(False)
self.assertEqual(self.api.STRING_CONSTANT, stringTypeExpression2.value_or_const())
def testValueOrConstExpression(self):
stringTypeExpression1 = self._createStringTypeExpression(True)
self.assertEqual(self.VALUE, stringTypeExpression1.value_or_const_expression())
stringTypeExpression2 = self._createStringTypeExpression(False)
self.assertEqual(self.api.STRING_CONSTANT + self.SPACE + self.EXPRESSION,
stringTypeExpression2.value_or_const_expression())
def _createStringTypeExpression(self, hasValue):
return self.api.StringTypeExpression(hasValue, self.VALUE if hasValue else None)
VALUE = "value"
APPEND = "append"
IX_LITERAL = "ix"
LITERAL = "literal"
EXPRESSION = "expression"
FALSE = "false"
CHOSEN = "chosen"
SPACE = " "
UNDERSCORE = "_"
|
3e5f8b1dfc050e759d7a862d665c88f3b5f69b8c
|
3c2ee998c99a693b3b04d44f8c5af0fc5fb2c49d
|
/migrations/versions/c40e1fdf6b70_.py
|
93d64de31e128451e8b7334320ae8dbe680872a5
|
[
"BSD-2-Clause"
] |
permissive
|
hotosm/tasking-manager
|
4520a56b31b35ebfc82a337bc7e676f1f8bc946a
|
45bf3937c74902226096aee5b49e7abea62df524
|
refs/heads/develop
| 2023-09-01T02:43:43.875659
| 2023-08-16T21:26:02
| 2023-08-29T13:15:52
| 80,733,077
| 526
| 316
|
BSD-2-Clause
| 2023-09-14T10:15:55
| 2017-02-02T14:31:35
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 5,029
|
py
|
c40e1fdf6b70_.py
|
"""empty message
Revision ID: c40e1fdf6b70
Revises: 84c793a951b2
Create Date: 2020-02-04 22:23:22.457001
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "c40e1fdf6b70"
down_revision = "84c793a951b2"
branch_labels = None
depends_on = None
class Determiner:
def determine_mapping_permission(self, val, reverse=False):
# restrict_mapping_level_to_project=True => LEVEL = 1
# restrict_mapping_level_to_project=False => ANY = 0
permissions = {True: 1, False: 0}
if reverse:
return list(permissions.keys())[list(permissions.values()).index(val)]
return permissions.get(val)
def determine_validation_permission(self, val, reverse=False):
# (restrict_validation_role=True, restrict_validation_level_intermediate=True) => TEAMS_LEVEL = 3
# (restrict_validation_role=True, restrict_validation_level_intermediate=False) => TEAMS = 2
# (restrict_validation_role=False, restrict_validation_level_intermediate=True) => LEVEL = 1
# (restrict_validation_role=False, restrict_validation_level_intermediate=False) => ANY = 0
permissions = {
"True,True": 3,
"True,False": 2,
"False,True": 1,
"False,False": 0,
}
if reverse:
return list(permissions.keys())[list(permissions.values()).index(val)]
return permissions.get(val)
def upgrade():
conn = op.get_bind()
conn.execute(sa.text("ALTER TABLE projects ADD mapping_permission Integer;"))
conn.execute(sa.text("ALTER TABLE projects ADD validation_permission Integer;"))
fetch_all_projects = "select id, restrict_mapping_level_to_project, \
restrict_validation_role, restrict_validation_level_intermediate from projects;"
all_projects = conn.execute(sa.text(fetch_all_projects))
for project in all_projects:
mapping_permission = None
validation_permission = None
project_id = project[0]
mapping_restriction = project[1]
validation_role_restriction = project[2]
validation_level_restriction = project[3]
# Map existing restrictions to V4 permission integers
d = Determiner()
mapping_permission = d.determine_mapping_permission(mapping_restriction)
validation_restriction = (
str(validation_role_restriction) + "," + str(validation_level_restriction)
)
validation_permission = d.determine_validation_permission(
validation_restriction
)
update_query = (
"update projects set mapping_permission = '"
+ str(mapping_permission)
+ "', validation_permission = '"
+ str(validation_permission)
+ "' where id = "
+ str(project_id)
)
op.execute(update_query)
op.drop_column("projects", "restrict_mapping_level_to_project")
op.drop_column("projects", "restrict_validation_role")
op.drop_column("projects", "restrict_validation_level_intermediate")
def downgrade():
conn = op.get_bind()
conn.execute(
sa.text("ALTER TABLE projects ADD restrict_mapping_level_to_project boolean;")
)
conn.execute(sa.text("ALTER TABLE projects ADD restrict_validation_role boolean;"))
conn.execute(
sa.text(
"ALTER TABLE projects ADD restrict_validation_level_intermediate boolean;"
)
)
fetch_all_projects = (
"select id, mapping_permission, validation_permission from projects;"
)
all_projects = conn.execute(sa.text(fetch_all_projects))
for project in all_projects:
project_id = project[0]
mapping_permission = project[1]
validation_permission = project[2]
mapping_restriction = False
validation_role_restriction = None
validation_level_restriction = None
# Reverse map V4 permission integers to V3 restrictions
d = Determiner()
try:
mapping_restriction = d.determine_mapping_permission(
mapping_permission, True
)
except Exception:
mapping_restriction = False
validation_restriction = d.determine_validation_permission(
validation_permission, True
).split(",")
validation_role_restriction = validation_restriction[0]
validation_level_restriction = validation_restriction[1]
update_query = (
"update projects set restrict_mapping_level_to_project = '"
+ str(mapping_restriction)
+ "', restrict_validation_role = '"
+ str(validation_role_restriction)
+ "', restrict_validation_level_intermediate = '"
+ str(validation_level_restriction)
+ "' where id = "
+ str(project_id)
)
op.execute(update_query)
op.drop_column("projects", "mapping_permission")
op.drop_column("projects", "validation_permission")
|
b61443c8afeaf79ab1629940b61316f6bf26afe2
|
069c2295076c482afadfe6351da5ae02be8e18e6
|
/tests/i18n/patterns/urls/wrong.py
|
b4a2e0e16ba4cf879b48c2f5051508f9421a23bb
|
[
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"GPL-1.0-or-later",
"Python-2.0.1",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-other-permissive",
"Python-2.0"
] |
permissive
|
django/django
|
5eb557f57053631cd4f566f451e43197309dbeeb
|
c74a6fad5475495756a5bdb18b2cab2b68d429bc
|
refs/heads/main
| 2023-09-01T03:43:44.033530
| 2023-08-31T08:27:32
| 2023-08-31T08:27:32
| 4,164,482
| 73,530
| 38,187
|
BSD-3-Clause
| 2023-09-14T20:03:48
| 2012-04-28T02:47:18
|
Python
|
UTF-8
|
Python
| false
| false
| 297
|
py
|
wrong.py
|
from django.conf.urls.i18n import i18n_patterns
from django.urls import include, re_path
from django.utils.translation import gettext_lazy as _
urlpatterns = i18n_patterns(
re_path(
_(r"^account/"),
include("i18n.patterns.urls.wrong_namespace", namespace="account"),
),
)
|
01e8e1eba44795626e398b6bc694f85c22033d61
|
c50e7eb190802d7849c0d0cea02fb4d2f0021777
|
/src/virtual-network-tap/azext_vnettap/commands.py
|
eff3d854c6b5bbde93a658736bf8e7905cf46248
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
Azure/azure-cli-extensions
|
c1615b19930bba7166c282918f166cd40ff6609c
|
b8c2cf97e991adf0c0a207d810316b8f4686dc29
|
refs/heads/main
| 2023-08-24T12:40:15.528432
| 2023-08-24T09:17:25
| 2023-08-24T09:17:25
| 106,580,024
| 336
| 1,226
|
MIT
| 2023-09-14T10:48:57
| 2017-10-11T16:27:31
|
Python
|
UTF-8
|
Python
| false
| false
| 1,695
|
py
|
commands.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
from azure.cli.core.commands import CliCommandType
from ._client_factory import cf_virtual_network_taps, cf_nic_tap_config
# pylint: disable=too-many-locals, too-many-statements
def load_command_table(self, _):
network_vnet_tap_sdk = CliCommandType(
operations_tmpl='azext_vnettap.vendored_sdks.operations.virtual_network_taps_operations#VirtualNetworkTapsOperations.{}',
client_factory=cf_virtual_network_taps,
min_api='2018-08-01'
)
network_nic_tap_config_sdk = CliCommandType(
operations_tmpl='azext_vnettap.vendored_sdks.operations.network_interface_tap_configurations_operations#NetworkInterfaceTapConfigurationsOperations.{}',
client_factory=cf_nic_tap_config,
min_api='2018-08-01'
)
with self.command_group('network vnet tap', network_vnet_tap_sdk) as g:
g.custom_command('create', 'create_vnet_tap')
g.command('delete', 'delete')
g.custom_command('list', 'list_vnet_taps')
g.show_command('show', 'get')
g.generic_update_command('update')
with self.command_group('network nic vtap-config', network_nic_tap_config_sdk) as g:
g.custom_command('create', 'create_vtap_config')
g.command('delete', 'delete')
g.command('list', 'list')
g.show_command('show', 'get')
|
3941444e0aab79e6b4da555aef5a1e3649bb459a
|
010279e2ba272d09e9d2c4e903722e5faba2cf7a
|
/contrib/python/scipy/py3/scipy/integrate/_ivp/base.py
|
ada0589dfa6a8f274d1eb4a8733c6bef7295b4c7
|
[
"Python-2.0",
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"Qhull",
"BSD-3-Clause",
"BSL-1.0",
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
catboost/catboost
|
854c1a1f439a96f1ae6b48e16644be20aa04dba2
|
f5042e35b945aded77b23470ead62d7eacefde92
|
refs/heads/master
| 2023-09-01T12:14:14.174108
| 2023-09-01T10:01:01
| 2023-09-01T10:22:12
| 97,556,265
| 8,012
| 1,425
|
Apache-2.0
| 2023-09-11T03:32:32
| 2017-07-18T05:29:04
|
Python
|
UTF-8
|
Python
| false
| false
| 9,550
|
py
|
base.py
|
import numpy as np
def check_arguments(fun, y0, support_complex):
"""Helper function for checking arguments common to all solvers."""
y0 = np.asarray(y0)
if np.issubdtype(y0.dtype, np.complexfloating):
if not support_complex:
raise ValueError("`y0` is complex, but the chosen solver does "
"not support integration in a complex domain.")
dtype = complex
else:
dtype = float
y0 = y0.astype(dtype, copy=False)
if y0.ndim != 1:
raise ValueError("`y0` must be 1-dimensional.")
def fun_wrapped(t, y):
return np.asarray(fun(t, y), dtype=dtype)
return fun_wrapped, y0
class OdeSolver:
"""Base class for ODE solvers.
In order to implement a new solver you need to follow the guidelines:
1. A constructor must accept parameters presented in the base class
(listed below) along with any other parameters specific to a solver.
2. A constructor must accept arbitrary extraneous arguments
``**extraneous``, but warn that these arguments are irrelevant
using `common.warn_extraneous` function. Do not pass these
arguments to the base class.
3. A solver must implement a private method `_step_impl(self)` which
propagates a solver one step further. It must return tuple
``(success, message)``, where ``success`` is a boolean indicating
whether a step was successful, and ``message`` is a string
containing description of a failure if a step failed or None
otherwise.
4. A solver must implement a private method `_dense_output_impl(self)`,
which returns a `DenseOutput` object covering the last successful
step.
5. A solver must have attributes listed below in Attributes section.
Note that ``t_old`` and ``step_size`` are updated automatically.
6. Use `fun(self, t, y)` method for the system rhs evaluation, this
way the number of function evaluations (`nfev`) will be tracked
automatically.
7. For convenience, a base class provides `fun_single(self, t, y)` and
`fun_vectorized(self, t, y)` for evaluating the rhs in
non-vectorized and vectorized fashions respectively (regardless of
how `fun` from the constructor is implemented). These calls don't
increment `nfev`.
8. If a solver uses a Jacobian matrix and LU decompositions, it should
track the number of Jacobian evaluations (`njev`) and the number of
LU decompositions (`nlu`).
9. By convention, the function evaluations used to compute a finite
difference approximation of the Jacobian should not be counted in
`nfev`, thus use `fun_single(self, t, y)` or
`fun_vectorized(self, t, y)` when computing a finite difference
approximation of the Jacobian.
Parameters
----------
fun : callable
Right-hand side of the system. The calling signature is ``fun(t, y)``.
Here ``t`` is a scalar and there are two options for ndarray ``y``.
It can either have shape (n,), then ``fun`` must return array_like with
shape (n,). Or, alternatively, it can have shape (n, n_points), then
``fun`` must return array_like with shape (n, n_points) (each column
corresponds to a single column in ``y``). The choice between the two
options is determined by `vectorized` argument (see below).
t0 : float
Initial time.
y0 : array_like, shape (n,)
Initial state.
t_bound : float
Boundary time --- the integration won't continue beyond it. It also
determines the direction of the integration.
vectorized : bool
Whether `fun` is implemented in a vectorized fashion.
support_complex : bool, optional
Whether integration in a complex domain should be supported.
Generally determined by a derived solver class capabilities.
Default is False.
Attributes
----------
n : int
Number of equations.
status : string
Current status of the solver: 'running', 'finished' or 'failed'.
t_bound : float
Boundary time.
direction : float
Integration direction: +1 or -1.
t : float
Current time.
y : ndarray
Current state.
t_old : float
Previous time. None if no steps were made yet.
step_size : float
Size of the last successful step. None if no steps were made yet.
nfev : int
Number of the system's rhs evaluations.
njev : int
Number of the Jacobian evaluations.
nlu : int
Number of LU decompositions.
"""
TOO_SMALL_STEP = "Required step size is less than spacing between numbers."
def __init__(self, fun, t0, y0, t_bound, vectorized,
support_complex=False):
self.t_old = None
self.t = t0
self._fun, self.y = check_arguments(fun, y0, support_complex)
self.t_bound = t_bound
self.vectorized = vectorized
if vectorized:
def fun_single(t, y):
return self._fun(t, y[:, None]).ravel()
fun_vectorized = self._fun
else:
fun_single = self._fun
def fun_vectorized(t, y):
f = np.empty_like(y)
for i, yi in enumerate(y.T):
f[:, i] = self._fun(t, yi)
return f
def fun(t, y):
self.nfev += 1
return self.fun_single(t, y)
self.fun = fun
self.fun_single = fun_single
self.fun_vectorized = fun_vectorized
self.direction = np.sign(t_bound - t0) if t_bound != t0 else 1
self.n = self.y.size
self.status = 'running'
self.nfev = 0
self.njev = 0
self.nlu = 0
@property
def step_size(self):
if self.t_old is None:
return None
else:
return np.abs(self.t - self.t_old)
def step(self):
"""Perform one integration step.
Returns
-------
message : string or None
Report from the solver. Typically a reason for a failure if
`self.status` is 'failed' after the step was taken or None
otherwise.
"""
if self.status != 'running':
raise RuntimeError("Attempt to step on a failed or finished "
"solver.")
if self.n == 0 or self.t == self.t_bound:
# Handle corner cases of empty solver or no integration.
self.t_old = self.t
self.t = self.t_bound
message = None
self.status = 'finished'
else:
t = self.t
success, message = self._step_impl()
if not success:
self.status = 'failed'
else:
self.t_old = t
if self.direction * (self.t - self.t_bound) >= 0:
self.status = 'finished'
return message
def dense_output(self):
"""Compute a local interpolant over the last successful step.
Returns
-------
sol : `DenseOutput`
Local interpolant over the last successful step.
"""
if self.t_old is None:
raise RuntimeError("Dense output is available after a successful "
"step was made.")
if self.n == 0 or self.t == self.t_old:
# Handle corner cases of empty solver and no integration.
return ConstantDenseOutput(self.t_old, self.t, self.y)
else:
return self._dense_output_impl()
def _step_impl(self):
raise NotImplementedError
def _dense_output_impl(self):
raise NotImplementedError
class DenseOutput:
"""Base class for local interpolant over step made by an ODE solver.
It interpolates between `t_min` and `t_max` (see Attributes below).
Evaluation outside this interval is not forbidden, but the accuracy is not
guaranteed.
Attributes
----------
t_min, t_max : float
Time range of the interpolation.
"""
def __init__(self, t_old, t):
self.t_old = t_old
self.t = t
self.t_min = min(t, t_old)
self.t_max = max(t, t_old)
def __call__(self, t):
"""Evaluate the interpolant.
Parameters
----------
t : float or array_like with shape (n_points,)
Points to evaluate the solution at.
Returns
-------
y : ndarray, shape (n,) or (n, n_points)
Computed values. Shape depends on whether `t` was a scalar or a
1-D array.
"""
t = np.asarray(t)
if t.ndim > 1:
raise ValueError("`t` must be a float or a 1-D array.")
return self._call_impl(t)
def _call_impl(self, t):
raise NotImplementedError
class ConstantDenseOutput(DenseOutput):
"""Constant value interpolator.
This class used for degenerate integration cases: equal integration limits
or a system with 0 equations.
"""
def __init__(self, t_old, t, value):
super().__init__(t_old, t)
self.value = value
def _call_impl(self, t):
if t.ndim == 0:
return self.value
else:
ret = np.empty((self.value.shape[0], t.shape[0]))
ret[:] = self.value[:, None]
return ret
|
c24982b8326befabe53b0df06c41789e8cf664c7
|
e03bce53de6f88c0e09f56e4fe11c36af0f1161f
|
/tests/integration/conftest.py
|
b0a9f2427a32a3ed394babf8adef0851f6067274
|
[
"Apache-2.0"
] |
permissive
|
onicagroup/runway
|
20c31df9cbc1a1ffc5c9aa468ce5cf7d6ac7899f
|
0763b06aee07d2cf3f037a49ca0cb81a048c5deb
|
refs/heads/master
| 2023-08-30T22:35:54.113981
| 2023-08-29T14:13:35
| 2023-08-29T14:13:35
| 122,529,924
| 156
| 79
|
Apache-2.0
| 2023-09-13T13:43:50
| 2018-02-22T20:12:55
|
Python
|
UTF-8
|
Python
| false
| false
| 1,467
|
py
|
conftest.py
|
"""Pytest configuration, fixtures, and plugins."""
# pylint: disable=redefined-outer-name
from __future__ import annotations
import shutil
from pathlib import Path
from typing import TYPE_CHECKING, Any, Callable
import pytest
if TYPE_CHECKING:
from _pytest.config import Config
TEST_ROOT = Path(__file__).parent
CpConfigTypeDef = Callable[[str, Path], Path]
# pylint: disable=unused-argument
def pytest_ignore_collect(path: Any, config: Config) -> bool:
"""Determine if this directory should have its tests collected."""
if config.option.functional:
return True
if config.option.markexpr and "wip" in config.option.markexpr:
return False # collect when looking for markers
return not (config.option.integration or config.option.integration_only)
@pytest.fixture
def configs() -> Path:
"""Path to Runway config fixtures."""
return TEST_ROOT.parent / "fixtures" / "configs"
@pytest.fixture
def cp_config(configs: Path) -> Callable[[str, Path], Path]:
"""Copy a config file."""
def copy_config(config_name: str, dest_path: Path) -> Path:
"""Copy a config file by name to a destination directory.
The resulting config will be named runway.yml.
"""
runway_yml = dest_path / "runway.yml"
if not config_name.startswith(".yml"):
config_name += ".yml"
shutil.copy(configs / config_name, runway_yml)
return runway_yml
return copy_config
|
202d252fda5c41bebdab30a6f4603e7378890fc5
|
9c8e06301f6559a106b805dfe0b372ad2e6bc4d8
|
/scripts/audit_frontend_licenses.py
|
76c813b8ab1a8a3e878b8fe4deee79a6d7adc3bb
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"Zlib",
"BSD-3-Clause",
"WTFPL",
"MIT",
"MPL-2.0",
"MPL-1.1",
"CC0-1.0",
"GPL-2.0-only",
"AFL-2.1",
"CC-BY-3.0",
"Unlicense"
] |
permissive
|
streamlit/streamlit
|
beecb89300d6f219f3a43ed328f22d3656243f26
|
4f45c18a4323a796440d651ba77b5eb29409cb2b
|
refs/heads/develop
| 2023-09-06T06:22:40.853489
| 2023-09-04T13:53:20
| 2023-09-04T13:53:20
| 204,086,862
| 27,877
| 2,739
|
Apache-2.0
| 2023-09-14T19:08:39
| 2019-08-24T00:14:52
|
Python
|
UTF-8
|
Python
| false
| false
| 7,038
|
py
|
audit_frontend_licenses.py
|
#!/usr/bin/env python
# Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Audit the licenses of all our frontend dependencies (as defined by our
`yarn.lock` file). If any dependency has an unacceptable license, print it
out and exit with an error code. If all dependencies have acceptable licenses,
exit normally.
"""
import json
import subprocess
import sys
from pathlib import Path
from typing import NoReturn, Set, Tuple, cast
from typing_extensions import TypeAlias
PackageInfo: TypeAlias = Tuple[str, str, str, str, str, str]
SCRIPT_DIR = Path(__file__).resolve().parent
FRONTEND_DIR_LIB = SCRIPT_DIR.parent / "frontend/lib"
FRONTEND_DIR_APP = SCRIPT_DIR.parent / "frontend/app"
# Set of acceptable licenses. If a library uses one of these licenses,
# we can include it as a dependency.
ACCEPTABLE_LICENSES = {
"MIT", # https://opensource.org/licenses/MIT
"Apache-2.0", # https://opensource.org/licenses/Apache-2.0
"Apache-2.0 WITH LLVM-exception", # https://spdx.org/licenses/LLVM-exception.html
"0BSD", # https://opensource.org/licenses/0BSD
"BSD-2-Clause", # https://opensource.org/licenses/BSD-2-Clause
"BSD-3-Clause", # https://opensource.org/licenses/BSD-3-Clause
"ISC", # https://opensource.org/licenses/ISC
"CC0-1.0", # https://creativecommons.org/publicdomain/zero/1.0/
"CC-BY-3.0", # https://creativecommons.org/licenses/by/3.0/
"CC-BY-4.0", # https://creativecommons.org/licenses/by/4.0/
"Python-2.0", # https://www.python.org/download/releases/2.0/license/
"Zlib", # https://opensource.org/licenses/Zlib
"Unlicense", # https://unlicense.org/
"WTFPL", # http://www.wtfpl.net/about/
# Dual-licenses are acceptable if at least one of the two licenses is
# acceptable.
"(MIT OR Apache-2.0)",
"(MPL-2.0 OR Apache-2.0)",
"(MIT OR CC0-1.0)",
"(Apache-2.0 OR MPL-1.1)",
"(BSD-3-Clause OR GPL-2.0)",
"(MIT AND BSD-3-Clause)",
"(MIT AND Zlib)",
"(WTFPL OR MIT)",
"(AFL-2.1 OR BSD-3-Clause)",
}
# Some of our dependencies have licenses that yarn fails to parse, but that
# are still acceptable. This set contains all those exceptions. Each entry
# should include a comment about why it's an exception.
PACKAGE_EXCEPTIONS: Set[PackageInfo] = {
(
# MIT license: https://github.com/mapbox/jsonlint
"@mapbox/jsonlint-lines-primitives",
"2.0.2",
"UNKNOWN",
"git://github.com/mapbox/jsonlint.git",
"http://zaa.ch",
"Zach Carter",
),
(
# Apache 2.0 license: https://github.com/google/flatbuffers
"flatbuffers",
"2.0.4",
"SEE LICENSE IN LICENSE.txt",
"git+https://github.com/google/flatbuffers.git",
"https://google.github.io/flatbuffers/",
"The FlatBuffers project",
),
(
# Mapbox Web SDK license: https://github.com/mapbox/mapbox-gl-js/blob/main/LICENSE.txt
"mapbox-gl",
"1.13.3",
"SEE LICENSE IN LICENSE.txt",
"git://github.com/mapbox/mapbox-gl-js.git",
"Unknown",
"Unknown",
),
(
# Mapbox Web SDK license: https://github.com/mapbox/mapbox-gl-js/blob/main/LICENSE.txt
"mapbox-gl",
"1.10.1",
"SEE LICENSE IN LICENSE.txt",
"git://github.com/mapbox/mapbox-gl-js.git",
"Unknown",
"Unknown",
),
(
# MIT license: https://github.com/dy/image-palette
"image-palette",
"2.1.0",
"MIT*",
"https://github.com/dy/image-palette.git",
"Unknown",
"Dmitry Yv",
),
(
# CC-BY-3.0 license: https://github.com/cartodb/cartocolor#licensing
"cartocolor",
"4.0.2",
"UNKNOWN",
"https://github.com/cartodb/cartocolor",
"http://carto.com/",
"Unknown",
),
(
# Apache-2.0 license: https://github.com/saikocat/colorbrewer/blob/master/LICENSE.txt
"colorbrewer",
"1.0.0",
"Apache*",
"https://github.com/saikocat/colorbrewer",
"http://colorbrewer2.org/",
"Cynthia Brewer",
),
}
def get_license_type(package: PackageInfo) -> str:
"""Return the license type string for a dependency entry."""
return package[2]
def check_licenses(licenses) -> NoReturn:
# `yarn licenses` outputs a bunch of lines.
# The last line contains the JSON object we care about
licenses_json = json.loads(licenses[len(licenses) - 1])
assert licenses_json["type"] == "table"
# Pull out the list of package infos from the JSON.
packages = [
cast(PackageInfo, tuple(package)) for package in licenses_json["data"]["body"]
]
# Discover dependency exceptions that are no longer used and can be
# jettisoned, and print them out with a warning.
unused_exceptions = PACKAGE_EXCEPTIONS.difference(set(packages))
if len(unused_exceptions) > 0:
for exception in sorted(list(unused_exceptions)):
print(f"Unused package exception, please remove: {exception}")
# Discover packages that don't have an acceptable license, and that don't
# have an explicit exception. If we have any, we print them out and exit
# with an error.
bad_packages = [
package
for package in packages
if (get_license_type(package) not in ACCEPTABLE_LICENSES)
and (package not in PACKAGE_EXCEPTIONS)
# workspace aggregator is yarn workspaces
and "workspace-aggregator" not in package[0]
]
if len(bad_packages) > 0:
for package in bad_packages:
print(f"Unacceptable license: '{get_license_type(package)}' (in {package})")
print(f"{len(bad_packages)} unacceptable licenses")
sys.exit(1)
print(f"No unacceptable licenses")
sys.exit(0)
def main() -> NoReturn:
# Run `yarn licenses` for lib.
licenses_output = (
subprocess.check_output(
["yarn", "licenses", "list", "--json", "--production", "--ignore-platform"],
cwd=str(FRONTEND_DIR_LIB),
)
.decode()
.splitlines()
)
# Run `yarn licenses` for app.
licenses_output = licenses_output + (
subprocess.check_output(
["yarn", "licenses", "list", "--json", "--production", "--ignore-platform"],
cwd=str(FRONTEND_DIR_APP),
)
.decode()
.splitlines()
)
check_licenses(licenses_output)
if __name__ == "__main__":
main()
|
511b918c86ed9c52d3b8d6d7e7dd0ccd2dca2c40
|
833ef1cc5cbd5cf76da144d10d393e30976d9185
|
/froide/problem/migrations/0005_problemreport_escalated.py
|
af20d954d8c30d307458c0d7dabad2835323bb4b
|
[
"MIT"
] |
permissive
|
okfde/froide
|
d022407ec30bf018e6ca587ae9df0b73a8625edf
|
16e3c69b333fc82cb1e52378fc003ddf071152a7
|
refs/heads/main
| 2023-08-31T08:02:23.343743
| 2023-08-29T07:01:03
| 2023-08-29T07:01:03
| 1,700,944
| 230
| 48
|
MIT
| 2023-09-13T09:10:40
| 2011-05-04T12:20:51
|
Python
|
UTF-8
|
Python
| false
| false
| 405
|
py
|
0005_problemreport_escalated.py
|
# Generated by Django 3.0.8 on 2020-07-15 16:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("problem", "0004_auto_20200710_1148"),
]
operations = [
migrations.AddField(
model_name="problemreport",
name="escalated",
field=models.DateTimeField(blank=True, null=True),
),
]
|
18bc0cffafaec5172f27e32ea9bb707c07af4686
|
08ea46c0a9fb71ef222cf6afa2e9094f5663dcfb
|
/tests/test_pvacfuse_generate_protein_fasta.py
|
d2b58dd9e29f8f0c638a5e50044d42bcb266ba95
|
[
"BSD-3-Clause-Clear"
] |
permissive
|
griffithlab/pVACtools
|
e358919eee76100f79dbe8d40d02b3fce8b227ac
|
3317d2c18e82edb5ea183ae09820beb68c39d256
|
refs/heads/master
| 2023-08-09T15:42:06.725426
| 2023-08-09T14:28:44
| 2023-08-09T14:28:44
| 102,625,109
| 124
| 64
|
BSD-3-Clause-Clear
| 2023-09-08T14:17:22
| 2017-09-06T15:23:04
|
Python
|
UTF-8
|
Python
| false
| false
| 3,182
|
py
|
test_pvacfuse_generate_protein_fasta.py
|
import unittest
import os
import sys
import tempfile
from subprocess import call
from filecmp import cmp
import py_compile
from pvactools.tools.pvacfuse import generate_protein_fasta
from tests.utils import *
class GenerateFastaTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.python = sys.executable
cls.executable_dir = os.path.join(pvactools_directory(), 'pvactools', 'tools', 'pvacfuse')
cls.executable = os.path.join(cls.executable_dir, 'generate_protein_fasta.py')
cls.test_data_dir = os.path.join(pvactools_directory(), 'tests', 'test_data', 'pvacfuse_generate_protein_fasta')
cls.flanking_sequence_length = '10'
def test_source_compiles(self):
self.assertTrue(py_compile.compile(self.executable))
def test_agfusion_input_file_generates_expected_file(self):
generate_protein_fasta_input_file = os.path.join(self.test_data_dir, 'agfusion')
generate_protein_fasta_output_file = tempfile.NamedTemporaryFile()
self.assertFalse(call([
self.python,
self.executable,
generate_protein_fasta_input_file,
self.flanking_sequence_length,
generate_protein_fasta_output_file.name,
'-d', 'full',
], shell=False))
expected_output_file = os.path.join(self.test_data_dir, 'output_agfusion.fasta')
os.unlink("{}.manufacturability.tsv".format(generate_protein_fasta_output_file.name))
self.assertTrue(cmp(generate_protein_fasta_output_file.name, expected_output_file))
def test_input_tsv(self):
generate_protein_fasta_input_file = os.path.join(self.test_data_dir, 'agfusion')
generate_protein_fasta_input_tsv = os.path.join(self.test_data_dir, 'input.tsv')
generate_protein_fasta_output_file = tempfile.NamedTemporaryFile()
self.assertFalse(call([
self.python,
self.executable,
generate_protein_fasta_input_file,
self.flanking_sequence_length,
generate_protein_fasta_output_file.name,
'-d', 'full',
'--input-tsv', generate_protein_fasta_input_tsv,
], shell=False))
expected_output_file = os.path.join(self.test_data_dir, 'output_with_tsv.fasta')
os.unlink("{}.manufacturability.tsv".format(generate_protein_fasta_output_file.name))
self.assertTrue(cmp(generate_protein_fasta_output_file.name, expected_output_file))
def test_arriba_tsv_with_invalid_character(self):
generate_protein_fasta_input_file = os.path.join(self.test_data_dir, 'input_with_invalid_character.tsv')
generate_protein_fasta_output_file = tempfile.NamedTemporaryFile()
self.assertFalse(call([
self.python,
self.executable,
generate_protein_fasta_input_file,
self.flanking_sequence_length,
generate_protein_fasta_output_file.name,
'-d', 'full'
], shell=False))
os.unlink("{}.manufacturability.tsv".format(generate_protein_fasta_output_file.name))
self.assertEqual(os.path.getsize(generate_protein_fasta_output_file.name), 0)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.