blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f242ef8f18dc2245fad91441c12af27ab8d4ce43
|
cd4be8b6bee2964d063b332c0c8784ab6c89c8e5
|
/setup.py
|
6609105b6ac52f007c4dfbd88d3cb3ea8f62134b
|
[
"Apache-2.0"
] |
permissive
|
pytorch/opacus
|
d55f9c3627943a3c067528849401663cfaf7d622
|
79bdfac28afb526430a938d38513c46936f8670a
|
refs/heads/main
| 2023-09-04T01:03:50.533043
| 2023-08-01T19:37:56
| 2023-08-01T19:37:56
| 226,441,159
| 1,358
| 291
|
Apache-2.0
| 2023-09-11T13:29:37
| 2019-12-07T01:58:09
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,951
|
py
|
setup.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from setuptools import find_packages, setup
REQUIRED_MAJOR = 3
REQUIRED_MINOR = 7
REQUIRED_MICRO = 5
version = {}
with open("opacus/version.py") as fp:
exec(fp.read(), version)
__version__ = version["__version__"]
# Check for python version
if sys.version_info < (REQUIRED_MAJOR, REQUIRED_MINOR, REQUIRED_MICRO):
error = (
"Your version of python ({major}.{minor}.{micro}) is too old. You need "
"python >= {required_major}.{required_minor}.{required_micro}"
).format(
major=sys.version_info.major,
minor=sys.version_info.minor,
micro=sys.version_info.micro,
required_major=REQUIRED_MAJOR,
required_minor=REQUIRED_MINOR,
required_micro=REQUIRED_MICRO,
)
sys.exit(error)
src_dir = os.path.abspath(os.path.dirname(__file__))
with open("README.md", "r", encoding="utf8") as fh:
long_description = fh.read()
requirements_txt = os.path.join(src_dir, "requirements.txt")
with open("requirements.txt", encoding="utf8") as f:
required = f.read().splitlines()
with open("dev_requirements.txt", encoding="utf8") as f:
dev_required = f.read().splitlines()
setup(
name="opacus",
version=__version__,
author="The Opacus Team",
description="Train PyTorch models with Differential Privacy",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://opacus.ai",
project_urls={
"Documentation": "https://opacus.ai/api",
"Source": "https://github.com/pytorch/opacus",
},
license="Apache-2.0",
install_requires=required,
extras_require={"dev": dev_required},
packages=find_packages(),
keywords=[
"PyTorch",
"Differential Privacy",
"DP-SGD",
"DP SGD",
"Privacy Preserving Machine Learning",
"PPML",
"PPAI",
],
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3 :: Only",
"Topic :: Scientific/Engineering",
],
python_requires=f">={REQUIRED_MAJOR}.{REQUIRED_MINOR}.{REQUIRED_MICRO}",
)
|
955c3ef2db7c1438255caeba646b0f98d21ce064
|
aa1865ff1b47a2ed0f43dc993e7e0916969d3a9e
|
/sentence_transformers/losses/BatchSemiHardTripletLoss.py
|
2d543e96d379de4b52603ae3322f7469e9ca045c
|
[
"Apache-2.0"
] |
permissive
|
UKPLab/sentence-transformers
|
a1c0e055d96478b9b2c7718a299fc268e9449a63
|
a458ce79c40fef93d5ecc66931b446ea65fdd017
|
refs/heads/master
| 2023-08-31T22:01:24.008337
| 2023-08-08T07:06:22
| 2023-08-08T07:06:22
| 198,616,978
| 11,967
| 2,260
|
Apache-2.0
| 2023-09-08T14:54:52
| 2019-07-24T10:53:51
|
Python
|
UTF-8
|
Python
| false
| false
| 5,586
|
py
|
BatchSemiHardTripletLoss.py
|
import torch
from torch import nn, Tensor
from typing import Union, Tuple, List, Iterable, Dict
from .BatchHardTripletLoss import BatchHardTripletLoss, BatchHardTripletLossDistanceFunction
from sentence_transformers.SentenceTransformer import SentenceTransformer
class BatchSemiHardTripletLoss(nn.Module):
"""
BatchSemiHardTripletLoss takes a batch with (label, sentence) pairs and computes the loss for all possible, valid
triplets, i.e., anchor and positive must have the same label, anchor and negative a different label. It then looks
for the semi hard positives and negatives.
The labels must be integers, with same label indicating sentences from the same class. You train dataset
must contain at least 2 examples per label class. The margin is computed automatically.
Source: https://github.com/NegatioN/OnlineMiningTripletLoss/blob/master/online_triplet_loss/losses.py
Paper: In Defense of the Triplet Loss for Person Re-Identification, https://arxiv.org/abs/1703.07737
Blog post: https://omoindrot.github.io/triplet-loss
:param model: SentenceTransformer model
:param distance_metric: Function that returns a distance between two emeddings. The class SiameseDistanceMetric contains pre-defined metrices that can be used
Example::
from sentence_transformers import SentenceTransformer, SentencesDataset, losses
from sentence_transformers.readers import InputExample
model = SentenceTransformer('distilbert-base-nli-mean-tokens')
train_examples = [InputExample(texts=['Sentence from class 0'], label=0), InputExample(texts=['Another sentence from class 0'], label=0),
InputExample(texts=['Sentence from class 1'], label=1), InputExample(texts=['Sentence from class 2'], label=2)]
train_dataset = SentencesDataset(train_examples, model)
train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=train_batch_size)
train_loss = losses.BatchSemiHardTripletLoss(model=model)
"""
def __init__(self, model: SentenceTransformer, distance_metric = BatchHardTripletLossDistanceFunction.eucledian_distance, margin: float = 5):
super(BatchSemiHardTripletLoss, self).__init__()
self.sentence_embedder = model
self.margin = margin
self.distance_metric = distance_metric
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
rep = self.sentence_embedder(sentence_features[0])['sentence_embedding']
return self.batch_semi_hard_triplet_loss(labels, rep)
# Semi-Hard Triplet Loss
# Based on: https://github.com/tensorflow/addons/blob/master/tensorflow_addons/losses/triplet.py#L71
# Paper: FaceNet: A Unified Embedding for Face Recognition and Clustering: https://arxiv.org/pdf/1503.03832.pdf
def batch_semi_hard_triplet_loss(self, labels: Tensor, embeddings: Tensor) -> Tensor:
"""Build the triplet loss over a batch of embeddings.
We generate all the valid triplets and average the loss over the positive ones.
Args:
labels: labels of the batch, of size (batch_size,)
embeddings: tensor of shape (batch_size, embed_dim)
margin: margin for triplet loss
squared: Boolean. If true, output is the pairwise squared euclidean distance matrix.
If false, output is the pairwise euclidean distance matrix.
Returns:
Label_Sentence_Triplet: scalar tensor containing the triplet loss
"""
labels = labels.unsqueeze(1)
pdist_matrix = self.distance_metric(embeddings)
adjacency = labels == labels.t()
adjacency_not = ~adjacency
batch_size = torch.numel(labels)
pdist_matrix_tile = pdist_matrix.repeat([batch_size, 1])
mask = adjacency_not.repeat([batch_size, 1]) & (pdist_matrix_tile > torch.reshape(pdist_matrix.t(), [-1, 1]))
mask_final = torch.reshape(torch.sum(mask, 1, keepdims=True) > 0.0, [batch_size, batch_size])
mask_final = mask_final.t()
negatives_outside = torch.reshape(BatchSemiHardTripletLoss._masked_minimum(pdist_matrix_tile, mask), [batch_size, batch_size])
negatives_outside = negatives_outside.t()
negatives_inside = BatchSemiHardTripletLoss._masked_maximum(pdist_matrix, adjacency_not)
negatives_inside = negatives_inside.repeat([1, batch_size])
semi_hard_negatives = torch.where(mask_final, negatives_outside, negatives_inside)
loss_mat = (pdist_matrix - semi_hard_negatives) + self.margin
mask_positives = adjacency.float().to(labels.device) - torch.eye(batch_size, device=labels.device)
mask_positives = mask_positives.to(labels.device)
num_positives = torch.sum(mask_positives)
triplet_loss = torch.sum(torch.max(loss_mat * mask_positives, torch.tensor([0.0], device=labels.device))) / num_positives
return triplet_loss
@staticmethod
def _masked_minimum(data, mask, dim=1):
axis_maximums, _ = data.max(dim, keepdims=True)
masked_minimums = (data - axis_maximums) * mask
masked_minimums, _ = masked_minimums.min(dim, keepdims=True)
masked_minimums += axis_maximums
return masked_minimums
@staticmethod
def _masked_maximum(data, mask, dim=1):
axis_minimums, _ = data.min(dim, keepdims=True)
masked_maximums = (data - axis_minimums) * mask
masked_maximums, _ = masked_maximums.max(dim, keepdims=True)
masked_maximums += axis_minimums
return masked_maximums
|
41c8b8c2a31c0c652bbe0da84bcb81c08fcb9e47
|
ab2c71406829cbc40ef96b8990b67b33744479c9
|
/examples/localhost-ssh-example/dag.py
|
86df694a70774a3565740485f76c5efc8e870f02
|
[
"Apache-2.0"
] |
permissive
|
godatadriven/whirl
|
39fba96c0250f93f5f0511cd48711cc8fad197c6
|
f8c217dae5504de56fda37b49cb38b88ad394727
|
refs/heads/master
| 2023-08-14T00:56:03.880426
| 2023-07-15T06:58:09
| 2023-07-15T06:58:09
| 173,707,232
| 183
| 13
|
Apache-2.0
| 2023-07-21T08:03:30
| 2019-03-04T08:46:05
|
Shell
|
UTF-8
|
Python
| false
| false
| 649
|
py
|
dag.py
|
from datetime import timedelta, datetime
from airflow import DAG
from airflow.providers.ssh.operators.ssh import SSHOperator
default_args = {
'owner': 'whirl',
'depends_on_past': False,
'start_date': datetime.now() - timedelta(minutes=20),
'retries': 1,
'retry_delay': timedelta(minutes=5),
}
dag = DAG(dag_id='whirl-local-ssh-example',
default_args=default_args,
schedule_interval='@once',
dagrun_timeout=timedelta(seconds=120))
ssh_copy = SSHOperator(
ssh_conn_id='local_ssh',
task_id='test_ssh_operator',
command="cp /opt/airflow/airflow.cfg /tmp/copied_airflow.cfg",
dag=dag)
|
933df067d2b7cb522ce2417babae65df0498963a
|
25df83bdcecde0b8a5d57afc1e5e9bedded1a5ab
|
/spikeextractors/extractors/cellexplorersortingextractor/__init__.py
|
25a41d1b9cbcf0879d871677b2d2b6221f8b23be
|
[
"MIT"
] |
permissive
|
SpikeInterface/spikeextractors
|
38313020db49fcc5692eb3d4e5b65c6fa83034ee
|
d24335cc2fa6e407469b790dc8fea2fdadb2b2a3
|
refs/heads/master
| 2022-12-04T23:04:51.998828
| 2022-11-22T12:00:25
| 2022-11-22T12:00:25
| 150,145,518
| 149
| 67
|
MIT
| 2022-11-22T12:00:26
| 2018-09-24T17:58:51
|
Python
|
UTF-8
|
Python
| false
| false
| 71
|
py
|
__init__.py
|
from .cellexplorersortingextractor import CellExplorerSortingExtractor
|
d57c3d319a899b306a20e1e3043376fa219e15db
|
ef4bd152c448cc4304e292bfbe38c08f0e9ed685
|
/src/torchmetrics/functional/audio/__init__.py
|
a0982d3443d39cdbc112036386a14b2b2ba95c07
|
[
"Apache-2.0",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
Lightning-AI/torchmetrics
|
05aef74af7a167bdbf0f6f605454656a4031e26a
|
66f1859c5fefffcb37de4f430a6fbf71fb6c8c6b
|
refs/heads/master
| 2023-08-31T20:50:41.184700
| 2023-08-31T16:17:12
| 2023-08-31T16:17:12
| 323,721,661
| 295
| 59
|
Apache-2.0
| 2023-09-14T14:58:53
| 2020-12-22T20:02:42
|
Python
|
UTF-8
|
Python
| false
| false
| 2,135
|
py
|
__init__.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from torchmetrics.functional.audio.pit import permutation_invariant_training, pit_permutate
from torchmetrics.functional.audio.sdr import (
scale_invariant_signal_distortion_ratio,
signal_distortion_ratio,
source_aggregated_signal_distortion_ratio,
)
from torchmetrics.functional.audio.snr import (
complex_scale_invariant_signal_noise_ratio,
scale_invariant_signal_noise_ratio,
signal_noise_ratio,
)
from torchmetrics.utilities.imports import (
_GAMMATONE_AVAILABEL,
_PESQ_AVAILABLE,
_PYSTOI_AVAILABLE,
_TORCHAUDIO_AVAILABEL,
_TORCHAUDIO_GREATER_EQUAL_0_10,
)
__all__ = [
"permutation_invariant_training",
"pit_permutate",
"scale_invariant_signal_distortion_ratio",
"source_aggregated_signal_distortion_ratio",
"signal_distortion_ratio",
"scale_invariant_signal_noise_ratio",
"signal_noise_ratio",
"complex_scale_invariant_signal_noise_ratio",
]
if _PESQ_AVAILABLE:
from torchmetrics.functional.audio.pesq import perceptual_evaluation_speech_quality # noqa: F401
__all__.append("perceptual_evaluation_speech_quality")
if _PYSTOI_AVAILABLE:
from torchmetrics.functional.audio.stoi import short_time_objective_intelligibility # noqa: F401
__all__.append("short_time_objective_intelligibility")
if _GAMMATONE_AVAILABEL and _TORCHAUDIO_AVAILABEL and _TORCHAUDIO_GREATER_EQUAL_0_10:
from torchmetrics.functional.audio.srmr import speech_reverberation_modulation_energy_ratio # noqa: F401
__all__.append("speech_reverberation_modulation_energy_ratio")
|
995bcf26829693041073c8214f304870c985f1c9
|
e50503e4fc186c7bf8ad7e08fda896c53dae3f98
|
/scripts/rename_item_pngs.py
|
2dffc08890cfa21d11d70cfd585648f28dc3fbad
|
[
"BSD-2-Clause"
] |
permissive
|
Hyphen-ated/RebirthItemTracker
|
2e54fa3214ad751b9c6aaa9d3844e37cffae654f
|
fdaff33c0789ba9dc4f43b166ebe88d48d8f1848
|
refs/heads/master
| 2021-06-10T21:43:35.113154
| 2021-04-03T02:00:12
| 2021-04-03T02:00:12
| 30,712,308
| 152
| 75
|
BSD-2-Clause
| 2021-04-02T20:01:35
| 2015-02-12T16:43:08
|
Python
|
UTF-8
|
Python
| false
| false
| 804
|
py
|
rename_item_pngs.py
|
# this is a script to add new item pngs to the repository in the form the tracker expects
# basically we're going from "collectibles_440_kidneystone.png" to "collectibles_440.png"
# this is not part of the tracker itself
import os, re, shutil
files = []
incoming_files_directory = 'brand_new_images'
outgoing_files_directory = 'renamed_images'
for f in os.listdir(incoming_files_directory):
file = os.path.join(incoming_files_directory, f)
if os.path.isfile(file):
m = re.match('collectibles_(\d\d\d)_.*\.png', f)
if m:
itemid = m.group(1)
finalname = 'collectibles_' + itemid + '.png'
finalpath = os.path.join(outgoing_files_directory, finalname)
shutil.copy(file, finalpath)
print(file + " copied to " + finalpath)
|
7469015dce1162f7c22b0d601d71c2fa750dad5d
|
b4cfd4949cab5dc5bd27fb028596a9fc02f4e1db
|
/skfda/exploratory/outliers/_envelopes.py
|
e14d397aedb42f0bbecbc6f8aeece7daa9e2fe0f
|
[
"BSD-3-Clause"
] |
permissive
|
GAA-UAM/scikit-fda
|
dabfd995f2c82efb0d44fa1d2005b2a8ca67442b
|
dfbce35cc9e67d93306dddf0edf4f95aaacd8aff
|
refs/heads/develop
| 2023-08-31T09:11:31.407423
| 2023-08-18T08:19:21
| 2023-08-18T08:19:21
| 96,133,420
| 231
| 55
|
BSD-3-Clause
| 2023-08-18T08:19:22
| 2017-07-03T17:06:56
|
Python
|
UTF-8
|
Python
| false
| false
| 1,978
|
py
|
_envelopes.py
|
from __future__ import annotations
import math
from typing import Tuple
import numpy as np
from ...representation import FDataGrid
from ...typing._numpy import NDArrayBool, NDArrayFloat, NDArrayInt
def compute_region(
fdatagrid: FDataGrid,
indices_descending_depth: NDArrayInt,
prob: float,
) -> FDataGrid:
"""Compute central region of a given quantile."""
indices_samples = indices_descending_depth[
:math.ceil(fdatagrid.n_samples * prob)
]
return fdatagrid[indices_samples]
def compute_envelope(region: FDataGrid) -> Tuple[NDArrayFloat, NDArrayFloat]:
"""Compute curves comprising a region."""
max_envelope = np.max(region.data_matrix, axis=0)
min_envelope = np.min(region.data_matrix, axis=0)
return min_envelope, max_envelope
def predict_outliers(
fdatagrid: FDataGrid,
non_outlying_threshold: Tuple[NDArrayFloat, NDArrayFloat],
) -> NDArrayBool:
"""
Predict outliers given a threshold.
A functional datum is considered an outlier if it has ANY point
in ANY dimension outside the envelope for inliers.
"""
min_threshold, max_threshold = non_outlying_threshold
or_axes = tuple(i for i in range(1, fdatagrid.data_matrix.ndim))
below_outliers: NDArrayBool = np.any(
fdatagrid.data_matrix < min_threshold,
axis=or_axes,
)
above_outliers: NDArrayBool = np.any(
fdatagrid.data_matrix > max_threshold,
axis=or_axes,
)
return below_outliers | above_outliers
def non_outlying_threshold(
central_envelope: Tuple[NDArrayFloat, NDArrayFloat],
factor: float,
) -> Tuple[NDArrayFloat, NDArrayFloat]:
"""Compute a non outlying threshold."""
iqr = central_envelope[1] - central_envelope[0]
non_outlying_threshold_max = central_envelope[1] + iqr * factor
non_outlying_threshold_min = central_envelope[0] - iqr * factor
return (
non_outlying_threshold_min,
non_outlying_threshold_max,
)
|
74f98a0100087e85c0a3128e8d1b8e8caf98f9ca
|
2ead2b29aa5cd0de97d3bf57ffbdca5114dd9ad1
|
/latest_devices.py
|
047fbeef757d09100bc09fa8bec3396f133de848
|
[
"MIT"
] |
permissive
|
meraki/automation-scripts
|
c9d036cdd98063776bcffa3794987de75167cbc9
|
c6939a9bb9df5d87ac15a333f70d50fb2728a30a
|
refs/heads/master
| 2023-06-09T13:18:40.674543
| 2023-06-06T06:41:50
| 2023-06-06T06:41:50
| 96,457,083
| 348
| 190
|
MIT
| 2023-05-22T23:30:44
| 2017-07-06T17:49:01
|
Python
|
UTF-8
|
Python
| false
| false
| 16,838
|
py
|
latest_devices.py
|
readMe = '''
Exports CSV of all in-use devices claimed after a specific date. Can include info
for one or more organizations. The script works by fetching the devices in the inventory
of the organizations being processed and creates a report for those that match the following
criteria:
* They are part of a network
* Their claim date is equal or later than the start date parameter
* Their claim date is equal or earlier than the end date parameter (if defined)
Note that the script assumes all organizations in scope have unique names.
Syntax, Windows:
python latest_devices.py [-k <api_key>] -o <org_name> -s <start_date> [-e <end_date>]
Syntax, Linux and Mac:
python3 latest_devices.py [-k <api_key>] -o <org_name> -s <start_date> [-e <end_date>]
Mandatory parameters:
-s <start_date> The start date of the reporting period. Must be in ISO format (eg. 2023-03-02)
-o <org_name> The name of the organization you want to run the report for, or one of the
following keywords:
/all Run report for all organizations
/ea Run report for all EA-enabled organizations. EA-organizations
are detected by checking if they have exactly 10000 MR licenses
Optional parameters:
-k <api_key> Your Meraki Dashboard API key. If omitted, one will be loaded from
environment variable MERAKI_DASHBOARD_API_KEY
-e <end_date> The end date of the reporting period. If omitted, the present date will be used
The CSV file exported will have the following name format:
devices_s_<start_date>_e_<end_date>_ts_<timestamp>.csv
Example:
Create CSV with all devices added into EA organizations since 1 January 2022:
python latest_devices.py -k 1234 -o /ea -s 2022-01-01
Required packages:
requests
To install required packages enter the following commands:
Windows:
pip install requests
Linux and Mac:
pip3 install requests
'''
#### START #### AUTO-GENERATED CODE TO INTERACT WITH MERAKI DASHBOARD ####
# Code generated using: https://github.com/mpapazog/rogue_meraki_python_sdk
import time
from urllib.parse import urlencode
from requests import Session, utils
class NoRebuildAuthSession(Session):
def rebuild_auth(self, prepared_request, response):
"""
This method is intentionally empty. Needed to prevent auth header stripping on redirect. More info:
https://stackoverflow.com/questions/60358216/python-requests-post-request-dropping-authorization-header
"""
API_MAX_RETRIES = 3
API_CONNECT_TIMEOUT = 60
API_TRANSMIT_TIMEOUT = 60
API_STATUS_RATE_LIMIT = 429
API_RETRY_DEFAULT_WAIT = 3
#Set to True or False to enable/disable console logging of sent API requests
FLAG_REQUEST_VERBOSE = True
API_BASE_URL = "https://api.meraki.com/api/v1"
def merakiRequest(p_apiKey, p_httpVerb, p_endpoint, p_additionalHeaders=None, p_queryItems=None,
p_requestBody=None, p_verbose=False, p_retry=0):
#returns success, errors, responseHeaders, responseBody
if p_retry > API_MAX_RETRIES:
if(p_verbose):
print("ERROR: Reached max retries")
return False, None, None, None
bearerString = "Bearer " + str(p_apiKey)
headers = {"Authorization": bearerString}
if not p_additionalHeaders is None:
headers.update(p_additionalHeaders)
query = ""
if not p_queryItems is None:
qArrayFix = {}
for item in p_queryItems:
if isinstance(p_queryItems[item], list):
qArrayFix["%s[]" % item] = p_queryItems[item]
else:
qArrayFix[item] = p_queryItems[item]
query = "?" + urlencode(qArrayFix, True)
url = API_BASE_URL + p_endpoint + query
verb = p_httpVerb.upper()
session = NoRebuildAuthSession()
verbs = {
'DELETE' : { 'function': session.delete, 'hasBody': False },
'GET' : { 'function': session.get, 'hasBody': False },
'POST' : { 'function': session.post, 'hasBody': True },
'PUT' : { 'function': session.put, 'hasBody': True }
}
try:
if(p_verbose):
print(verb, url)
if verb in verbs:
if verbs[verb]['hasBody'] and not p_requestBody is None:
r = verbs[verb]['function'](
url,
headers = headers,
json = p_requestBody,
timeout = (API_CONNECT_TIMEOUT, API_TRANSMIT_TIMEOUT)
)
else:
r = verbs[verb]['function'](
url,
headers = headers,
timeout = (API_CONNECT_TIMEOUT, API_TRANSMIT_TIMEOUT)
)
else:
return False, None, None, None
except:
return False, None, None, None
if(p_verbose):
print(r.status_code)
success = r.status_code in range (200, 299)
errors = None
responseHeaders = None
responseBody = None
if r.status_code == API_STATUS_RATE_LIMIT:
retryInterval = API_RETRY_DEFAULT_WAIT
if "Retry-After" in r.headers:
retryInterval = r.headers["Retry-After"]
if "retry-after" in r.headers:
retryInterval = r.headers["retry-after"]
if(p_verbose):
print("INFO: Hit max request rate. Retrying %s after %s seconds" % (p_retry+1, retryInterval))
time.sleep(int(retryInterval))
success, errors, responseHeaders, responseBody = merakiRequest(p_apiKey, p_httpVerb, p_endpoint, p_additionalHeaders,
p_queryItems, p_requestBody, p_verbose, p_retry+1)
return success, errors, responseHeaders, responseBody
try:
rjson = r.json()
except:
rjson = None
if not rjson is None:
if "errors" in rjson:
errors = rjson["errors"]
if(p_verbose):
print(errors)
else:
responseBody = rjson
if "Link" in r.headers:
parsedLinks = utils.parse_header_links(r.headers["Link"])
for link in parsedLinks:
if link["rel"] == "next":
if(p_verbose):
print("Next page:", link["url"])
splitLink = link["url"].split("/api/v1")
success, errors, responseHeaders, nextBody = merakiRequest(p_apiKey, p_httpVerb, splitLink[1],
p_additionalHeaders=p_additionalHeaders,
p_requestBody=p_requestBody,
p_verbose=p_verbose)
if success:
if not responseBody is None:
responseBody = responseBody + nextBody
else:
responseBody = None
return success, errors, responseHeaders, responseBody
# getOrganizations
#
# Description: List the organizations that the user has privileges on
# Endpoint: GET /organizations
#
# Endpoint documentation: https://developer.cisco.com/meraki/api-v1/#!get-organizations
def getOrganizations(apiKey):
url = "/organizations"
success, errors, headers, response = merakiRequest(apiKey, "get", url, p_verbose=FLAG_REQUEST_VERBOSE)
return success, errors, response
# getOrganizationInventoryDevices
#
# Description: Return the device inventory for an organization
# Endpoint: GET /organizations/{organizationId}/inventory/devices
#
# Endpoint documentation: https://developer.cisco.com/meraki/api-v1/#!get-organization-inventory-devices
#
# Query parameters:
# perPage: Integer. The number of entries per page returned. Acceptable range is 3 - 1000. Default is 1000.
# startingAfter: String. A token used by the server to indicate the start of the page. Often this is a timestamp or an ID but it is not limited to those. This parameter should not be defined by client applications. The link for the first, last, prev, or next page in the HTTP Link header should define it.
# endingBefore: String. A token used by the server to indicate the end of the page. Often this is a timestamp or an ID but it is not limited to those. This parameter should not be defined by client applications. The link for the first, last, prev, or next page in the HTTP Link header should define it.
# usedState: String. Filter results by used or unused inventory. Accepted values are 'used' or 'unused'.
# search: String. Search for devices in inventory based on serial number, mac address, or model.
# macs: Array. Search for devices in inventory based on mac addresses.
# networkIds: Array. Search for devices in inventory based on network ids.
# serials: Array. Search for devices in inventory based on serials.
# models: Array. Search for devices in inventory based on model.
# tags: Array. Filter devices by tags. The filtering is case-sensitive. If tags are included, 'tagsFilterType' should also be included (see below).
# tagsFilterType: String. To use with 'tags' parameter, to filter devices which contain ANY or ALL given tags. Accepted values are 'withAnyTags' or 'withAllTags', default is 'withAnyTags'.
# productTypes: Array. Filter devices by product type. Accepted values are appliance, camera, cellularGateway, sensor, switch, systemsManager, and wireless.
# licenseExpirationDate: String. Filter devices by license expiration date, ISO 8601 format. To filter with a range of dates, use 'licenseExpirationDate[<option>]=?' in the request. Accepted options include lt, gt, lte, gte.
def getOrganizationInventoryDevices(apiKey, organizationId, query=None):
url = "/organizations/" + str(organizationId) + "/inventory/devices"
success, errors, headers, response = merakiRequest(apiKey, "get", url, p_queryItems=query, p_verbose=FLAG_REQUEST_VERBOSE)
return success, errors, response
# getOrganizationLicensesOverview
#
# Description: Return an overview of the license state for an organization
# Endpoint: GET /organizations/{organizationId}/licenses/overview
#
# Endpoint documentation: https://developer.cisco.com/meraki/api-v1/#!get-organization-licenses-overview
def getOrganizationLicensesOverview(apiKey, organizationId):
url = "/organizations/" + str(organizationId) + "/licenses/overview"
success, errors, headers, response = merakiRequest(apiKey, "get", url, p_verbose=FLAG_REQUEST_VERBOSE)
return success, errors, response
#### END #### AUTO-GENERATED CODE TO INTERACT WITH MERAKI DASHBOARD ####
import sys, getopt, os, datetime, re
# https://stackoverflow.com/questions/41129921/validate-an-iso-8601-datetime-string-in-python
regex = r'^(-?(?:[1-9][0-9]*)?[0-9]{4})-(1[0-2]|0[1-9])-(3[01]|0[1-9]|[12][0-9])$'
matchIsoDateFormat = re.compile(regex).match
def checkIsoDate(str_val):
try:
if matchIsoDateFormat( str_val ) is not None:
return True
except:
pass
return False
def log(text, filePath=None):
logString = "%s -- %s" % (str(datetime.datetime.now())[:19], text)
print(logString)
if not filePath is None:
try:
with open(filePath, "a") as logFile:
logFile.write("%s\n" % logString)
except:
log("ERROR: Unable to append to log file")
def killScript(reason=None):
if reason is None:
print(readMe)
sys.exit()
else:
log("ERROR: %s" % reason)
sys.exit()
def getApiKey(argument):
if not argument is None:
return str(argument)
return os.environ.get("MERAKI_DASHBOARD_API_KEY", None)
def main(argv):
# [-k <api_key>] [-o <org_name>] -s <start_date> [-e <end_date>]
arg_apiKey = None
arg_orgName = None
arg_startDate = None
arg_endDate = None
try:
opts, args = getopt.getopt(argv, 'k:o:s:e:h')
except getopt.GetoptError:
killScript()
for opt, arg in opts:
if opt == '-k':
arg_apiKey = str(arg)
if opt == '-o':
arg_orgName = str(arg)
if opt == '-s':
arg_startDate = str(arg)
if opt == '-e':
arg_endDate = str(arg)
if opt == '-h':
killScript()
apiKey = getApiKey(arg_apiKey)
if apiKey is None:
log("ERROR: API key not found")
killScript()
if arg_orgName is None:
log("ERROR: No organization name or scope provided")
killScript()
if not checkIsoDate(arg_startDate):
killScript("Invalid or missing start date (must be YYYY-MM-DD)")
isoNow = datetime.datetime.now().isoformat()
isoToday = isoNow[:10]
if not arg_endDate is None:
if not checkIsoDate(arg_endDate):
killScript("Invalid end date (must be YYYY-MM-DD)")
else:
arg_endDate = isoToday
nowTimestamp = str(isoNow)[:19].replace('T','_').replace(':', '.')
fileName = "devices_s_%s_e_%s_ts_%s.csv" % (arg_startDate, arg_endDate, nowTimestamp)
log('Organization scope : "%s"' % arg_orgName)
log("Report start date : %s" % arg_startDate)
log("Report end date : %s" % arg_endDate)
log("Report filename : %s" % fileName)
success, errors, rawOrgs = getOrganizations(apiKey)
if rawOrgs == None:
killScript('Unable to fetch organizations')
organizations = []
if arg_orgName == '/all':
organizations = rawOrgs
elif arg_orgName == '/ea':
for org in rawOrgs:
if org['licensing']['model'] == 'co-term':
success, errors, licenseState = getOrganizationLicensesOverview(apiKey, org['id'])
if licenseState == None:
continue
if 'wireless' in licenseState['licensedDeviceCounts'] and licenseState['licensedDeviceCounts']['wireless'] == 10000:
organizations.append(org)
else:
for org in rawOrgs:
if org['name'] == arg_orgName:
organizations.append(org)
break
for org in organizations:
org['deviceCounts'] = {}
deviceCounts = {}
success, errors, rawDevices = getOrganizationInventoryDevices(apiKey, org['id'])
if rawDevices == None:
continue
for device in rawDevices:
claimDate = device['claimedAt'][:10]
if device['networkId'] != None and claimDate >= arg_startDate and claimDate <= arg_endDate:
if not device['model'] in deviceCounts:
deviceCounts[device['model']] = 0
deviceCounts[device['model']] += 1
org['deviceCounts'] = deviceCounts
models = []
orgNames = []
for org in organizations:
if org['deviceCounts'] != {}:
orgNames.append(org['name'])
for model in org['deviceCounts']:
if not model in models:
models.append(model)
models.sort()
orgNames.sort()
headersArray = ['orgName', 'orgId']
for model in models:
headersArray.append(model)
headersString = ','.join(headersArray)
reportLines = []
for orgName in orgNames:
for org in organizations:
if org['name'] == orgName:
line = [org['name'], org['id']]
for model in models:
amountStr = '0'
if model in org['deviceCounts']:
amountStr = str(org['deviceCounts'][model])
line.append(amountStr)
reportLines.append(','.join(line))
break
try:
f = open(fileName, 'w')
f.write("%s\n" % headersString)
for line in reportLines:
f.write("%s\n" % line)
f.close()
except:
killScript('Unable to write to file "%s"' % fileName)
log('File "%s" written' % fileName)
log('End of script.')
if __name__ == '__main__':
main(sys.argv[1:])
|
d645795c73581a0b97e0139f68def2733aab2807
|
75699bf130edd9317dfc11338b18fb8750e4f95a
|
/source/todo/data_access.py
|
bfcc8ddf1ee3c05e18124e3db17f72fde091c787
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
foobuzz/todo
|
a9fef55827c9aff20dd0f6dec99034b26b137282
|
8d21c51d401845d164ae38dfb30059233bc873e5
|
refs/heads/master
| 2023-08-25T07:04:21.721162
| 2022-06-25T16:59:26
| 2022-06-25T17:03:44
| 51,653,533
| 451
| 65
|
MIT
| 2022-12-27T16:36:01
| 2016-02-13T16:17:42
|
Python
|
UTF-8
|
Python
| false
| false
| 22,747
|
py
|
data_access.py
|
import sqlite3, json, os
import os.path as op
from datetime import datetime
from . import utils, init_db
from .utils import DATA_DIR, DB_PATH, DATAFILE_NAME, DATA_CTX_NAME
DATETIME_MIN = '0001-01-01 00:00:00'
END_OF_JSON = '2.1'
def setup_data_access(current_version):
"""
Prepare the sqlite database so that it's ready to be used by the
application. It's supposed to work in any environment (new installation,
old version, etc) and will make any necessary conversion between different
versions (for example converting the json datafile from v2.2- into a
sqlite database)
"""
if not op.exists(DATA_DIR):
os.makedirs(DATA_DIR)
init_db.update_database(DB_PATH, current_version)
if current_version is not None \
and utils.compare_versions(current_version, END_OF_JSON) <= 0:
json_path = op.join(DATA_DIR, DATAFILE_NAME)
with open(json_path) as datafile:
data = json.load(datafile)
connection = sqlite3.connect(DB_PATH)
transfer_data(connection, data)
def transfer_data(connection, data):
""" Transfer all data from a v2.2- JSON datafile held in the `data`
dictionary into a sqlite database connected to with `connection`."""
daccess = DataAccess(connection)
for ctx, props in data['contexts'].items():
ctx = dbfy_context(ctx)
options = []
if 'p' in props:
options.append(('priority', props['p']))
if 'v' in props and props['v'] == 'hidden':
options.append(('visibility', 'hidden'))
daccess.get_or_create_context(ctx, options)
for task in data['tasks']:
title = task['content']
if 'context' in task:
ctx = dbfy_context(task['context'])
daccess.get_or_create_context(ctx)
else:
ctx = ''
options = []
if 'created' in task:
created = iso2sqlite(task['created'])
else:
created = DATETIME_MIN
options.append(('created', created))
for prop in ['start', 'deadline']:
if prop in task:
options.append((prop, iso2sqlite(task[prop])))
if 'done' in task and task['done']:
options.append(('done', '1'))
if 'priority' in task:
options.append(('priority', task['priority']))
daccess.add_task(title, None, ctx, options)
daccess.exit()
# In the database, contexts all descent from the root context ('') and any
# non-root context therefore starts with a dot as the dot separates
# different hierarchical levels (<empty string> <dot> <subcontext name>).
# For convenience, we allow the user to type a context's path without the
# starting dot. To do this, we manually add a dot at the begining of path
# that doesn't already start with a dot, unless the path is empty in which
# case it explicitly designates the root context.
#
# dbfy_context and userify_context perform necessary convertions between user-
# typed contexts' path and their DB representation.
def dbfy_context(ctx):
if ctx == '' or ctx.startswith('.'):
return ctx
else:
return '.'+ctx
def userify_context(ctx):
if ctx == '':
return ''
else:
return ctx[1:]
# This one is used by transfer_data
def iso2sqlite(iso_date):
dt = datetime.strptime(iso_date, utils.ISO_DATE)
return dt.strftime(utils.SQLITE_DT_FORMAT)
def get_insert_components(options):
""" Takes a list of 2-tuple in the form (option, value) and returns a
triplet (colnames, placeholders, values) that permits making a database
query as follows: c.execute('INSERT INTO Table ({colnames}) VALUES
{placeholders}', values). """
col_names = ','.join(opt[0] for opt in options)
placeholders = ','.join('?' for i in range(len(options)))
if len(col_names) > 0:
col_names = ',' + col_names
if len(placeholders) > 0:
placeholders = ',' + placeholders
values = tuple(opt[1] for opt in options)
return col_names, placeholders, values
def get_update_components(options):
""" Same as get_insert_components but for update queries. Returns a tuple
in the form (placeholders, values) to be used as follows:
c.execute('UPDATE Table SET {placeholders}', values)"""
placeholders = ','.join(
'{}=?'.format(opt[0])
for opt in options
)
values = tuple(opt[1] for opt in options)
return placeholders, values
def check_options(options, allowed_options):
for option, val in options:
if option not in allowed_options:
raise ValueError('Illegal option: {}'.format(option))
def rename_context(path, name):
""" Returns a string which is what the path of the context would be if the
context pointed to by `path` would be renamed with `name`."""
return '.'.join(path.split('.')[:-1]+[name])
TASK_OPTIONS = {
'title',
'content',
'created',
'deadline',
'start',
'priority',
'context',
'done'
}
CONTEXT_OPTIONS = {
'priority',
'visibility'
}
class DataAccess():
""" Wrap SQL operations into an methods-based interface. An instance of
DataAccess is created thanks to a DB-API connection object that is
connected to a sqlite database setup for todo.
Some methods return instances of Row objects from the database. Such Row
objects support the mapping protocol. More specifically, when the
documentation mention Row-task objects, it's a mapping which represent a
task with the following keys: id, title, created, deadline, start,
priority, done, context, ctx_path where context is the context ID the task
belongs to and ctx_path is the path of the same context.
Row-context objects represent a context with the following keys: id, path,
priority, visibility, own_tasks, total_tasks where own_tasks is the number
of tasks which directly belong to the context and total_tasks is the
number of tasks which belong to the context or one of the contexts in its
descendance.
The following list sums up the types and formats that must be used when
calling methods and that are used in the returned values from the methods:
* Task ID are integers (type int). The hexadecimal representation is
dealt with by the calling application.
* Context path are fully dotted (non-root contexts start with a dot since
it's <empty string (name of the root context)> <dot> <name of the
subcontext>). The starting dot is removed by the calling application
for user convenience.
* Datetimes are strings in the %Y-%m-%d %H:%M:%S format (see Python
datetimes formatting reference)
When a method accepts an `options` argument, it awaits a list of 2-tuple
in the form (column name, value).
"""
def __init__(self, connection):
self.connection = connection
self.case_sensitive_like = False
self.set_case_sensitive_like(True)
c = self.connection.cursor()
c.execute('PRAGMA foreign_keys = ON;')
c.execute('PRAGMA synchronous = OFF;')
self.connection.row_factory = sqlite3.Row
self.changed_contexts = False
def set_case_sensitive_like(self, switch=True):
if self.case_sensitive_like == switch:
return
value = 'ON' if switch else 'OFF'
c = self.connection.cursor()
c.execute('PRAGMA case_sensitive_like = {};'.format(value))
self.case_sensitive_like = switch
def add_task(self, title, content, context='', options=[]):
""" Add a task titled `title` and associated to the given `context`,
with the given `options`. The context is created if not already
existing."""
check_options(options, TASK_OPTIONS)
cid = self.get_or_create_context(context)
query_tmp = """
INSERT INTO Task (title, content, context {})
VALUES (?, ?, ? {})
"""
col_names, placeholders, values = get_insert_components(options)
values = (title, content, cid) + values
query = query_tmp.format(col_names, placeholders)
c = self.connection.cursor()
c.execute(query, values)
return c.lastrowid
def update_task(self, tid, context=None, options=None):
""" Update the task identified by ID `tid` (int) with the given
`options`. If the context of the task needs to be updated as well,
then `context` should be passed the dotted path of the new context.
Otherwise it should be None."""
if options is None:
options = []
if context is None and not options:
raise ValueError(
"update_task cannot be called without any context of options"
)
check_options(options, TASK_OPTIONS)
if context is not None:
cid = self.get_or_create_context(context)
options = options.copy()
options.append(('context', cid))
query_tmp = """
UPDATE Task SET {}
WHERE id = ?
"""
placeholders, values = get_update_components(options)
values += (tid,)
query = query_tmp.format(placeholders)
c = self.connection.cursor()
c.execute(query, values)
return c.rowcount
def task_exists(self, tid):
c = self.connection.cursor()
c.execute("""
SELECT 1 FROM Task
WHERE id = ?
""", (tid,))
return c.fetchone() is not None
def get_task(self, tid):
""" Get the task identified by ID `tid` (int). Return a Row-Task object."""
query = """
SELECT t.*, c.path as ctx_path
FROM Task t JOIN Context c
ON t.context = c.id
WHERE t.id = ?
"""
c = self.connection.cursor()
c.execute(query, (tid,))
row = c.fetchone()
if row is None:
return None
return row
def set_task_dependencies(self, tid, dependencies):
# Remove any existing dependencies
c = self.connection.cursor()
c.execute("""
DELETE FROM TaskDependency
WHERE task_id = ?
""", (tid,))
# Add the new dependencies
unexisting_dependencies = []
for dependency_id in dependencies:
# Check that the dependency actually exists
c.execute("""
SELECT 1 FROM Task
WHERE id = ?
""", (dependency_id,))
if c.fetchone() is None:
unexisting_dependencies.append(dependency_id)
continue
# Add it
c.execute("""
INSERT INTO TaskDependency (task_id, dependency_id)
VALUES (?, ?)
""", (tid, dependency_id))
return unexisting_dependencies
def do_many(self, function, tids):
""" Call the method `function` for each task ID in the `tids` list.
`function` should accept only one positional argument (in addition to
self) which is a task ID."""
missing = []
for tid in tids:
updated = getattr(self, function)(tid)
if updated == 0:
missing.append(tid)
return missing
def set_done(self, tid):
c = self.connection.cursor()
c.execute("""
UPDATE Task SET done = datetime('now')
WHERE id = ?
AND done IS NULL
""", (tid,))
return c.rowcount
def set_undone(self, tid):
c = self.connection.cursor()
c.execute("""
UPDATE Task SET done = null
WHERE id = ?
AND done IS NOT NULL
""", (tid,))
return c.rowcount
def remove(self, tid):
c = self.connection.cursor()
c.execute("""
DELETE FROM Task
WHERE id = ?
""", (tid,))
return c.rowcount
def ping(self, tid):
c = self.connection.cursor()
c.execute("""
UPDATE Task
SET ping = ping + 1
WHERE id = ?
""", (tid,))
return c.rowcount
def get_or_create_context(self, path, options=[]):
""" Get the context whose path is `path`. If the context
doesn't exists, it is created as well as well all necessary
intermediary contexts. If the context is created, then `options` are
applied to the newly created context.
Return the ID of the context."""
check_options(options, CONTEXT_OPTIONS)
ctxs = path.split('.')[1:-1]
path_so_far = ''
c = self.connection.cursor()
for ctx in ctxs:
path_so_far += '.'+ctx
try:
c.execute("""
INSERT INTO Context (path)
VALUES (?)
""", (path_so_far,))
except sqlite3.IntegrityError: # Already exists
continue
else:
self.changed_contexts = True
query_tmp = """
INSERT INTO Context (path {})
VALUES (? {})
"""
col_names, placeholders, values = get_insert_components(options)
query = query_tmp.format(col_names, placeholders)
try:
c.execute(query, (path,) + values)
except sqlite3.IntegrityError as e:
c2 = self.connection.cursor()
c2.execute("""
SELECT id FROM Context
WHERE path = ?
""", (path,))
row = c2.fetchone()
return row[0]
else:
self.changed_contexts = True
return c.lastrowid
def context_exists(self, path):
""" Return a boolean indicating whether the context pointed to by the
dotted path `path` exists."""
c = self.connection.cursor()
c.execute("""
SELECT 1 FROM Context
WHERE path = ?
""", (path,))
row = c.fetchone()
return row is not None
def get_basic_context_tally(self, path):
""" Returns the number of (direct) tasks a context contains and the
number of (direct subcontexts it contains, in the form of a
2-tuple."""
c = self.connection.cursor()
c.execute("""
SELECT COUNT(t.id)
FROM Task t
JOIN Context c
ON t.context = c.id
WHERE t.done IS NULL
AND c.path = ?
UNION ALL
SELECT COUNT(id)
FROM Context
WHERE path LIKE ?
""", (path, '{}_%'.format(path)))
result = c.fetchall()
return result[0][0], result[1][0]
def set_context(self, path, options=[]):
""" Set the context pointed to by `path` to have the given `options`.
If the context doesn't already exist, it's created, which is why this
method is called set_context and not update_context.
Return the number of rows affected."""
check_options(options, CONTEXT_OPTIONS)
cid = self.get_or_create_context(path)
query_tmp = """
UPDATE Context SET {}
WHERE id = ?
"""
placeholders, values = get_update_components(options)
query = query_tmp.format(placeholders)
c = self.connection.cursor()
c.execute(query, values + (cid,))
return c.rowcount
def move(self, ctx1, ctx2):
""" Move all the direct tasks from ctx1 to ctx2 (dotted paths).
Doesn't affect subcontexts (and subtasks) of ctx1."""
cid = self.get_or_create_context(ctx2)
c = self.connection.cursor()
c.execute("""
UPDATE Task
SET context = ?
WHERE context = (
SELECT id FROM Context
WHERE path = ?
)
""", (cid, ctx1))
def move_all(self, ctx1, ctx2):
""" Same as `move` but move tasks of subcontexts as well. (any
necessary context is created at the destination context."""
for ctx in self.get_descendants(ctx1):
dest = ctx2 + ctx['path'][len(ctx1):]
self.move(ctx['path'], dest)
def remove_context(self, path):
""" Remove the context (and all subcontexts and tasks/subtasks)
pointed to by `path`."""
# We only have to remove the context and its subcontext as the foreign
# key on tasks to set up to cascade the delete
c = self.connection.cursor()
c.execute("""
DELETE FROM Context
WHERE path LIKE ?
""", ('{}%'.format(path),))
self.changed_contexts = True
return c.rowcount
def rename_context(self, path, name):
"""Rename context with given path with name. Returns None if new name
already exists, number of row affected otherwise. `name` must NOT contain a dot.
"""
assert '.' not in name
renamed = rename_context(path, name)
# Lock the db at the first select to avoid race conditions
self.connection.isolation_level = 'IMMEDIATE'
# If the context (after renaming) already exists, abort
c = self.connection.cursor()
c.execute("""
SELECT 1 FROM Context WHERE path = ?
""", (renamed,))
if c.fetchone() is not None:
return None
# We need to rename all the subcontexts as well. Can't do it in one
# UPDATE query because sqlite's REPLACE would replace all occurrences
# and we risk renaming other things (contexts whose same structure
# repeats at a sub-level)
c = self.connection.cursor()
c.execute("""
SELECT id, path FROM Context WHERE path LIKE ?
""", ('{}%'.format(path),))
new = ((renamed + row['path'][len(path):], row['id']) for row in c)
c2 = self.connection.cursor()
c2.executemany("""
UPDATE Context
SET path = ?
WHERE ID = ?
""", new)
self.changed_contexts = True
return c2.rowcount
def todo(self, path='', recursive=False):
""" Return a list of Row-tasks which belong the the context pointed to
by `path`. If `recursive` is False, then the list only contains tasks
that *directly* belong to the context. Otherwise it contains tasks
from descendance as well. In the list, tasks are sorted by:
* priority, descending
* remaining time (before deadline, infinity if no deadline),
ascending
* datetime created, ascending
"""
if recursive:
operator, value = 'LIKE', '{}%'.format(path)
else:
operator, value = '=', path
c = self.connection.cursor()
c.execute("""
SELECT t.*, c.path as ctx_path
FROM Task t
JOIN Context c
ON t.context = c.id
WHERE c.path {} ?
AND t.done IS NULL
AND (c.path = ? OR c.visibility = 'normal')
AND (datetime('now')) >= datetime(t.start)
AND NOT EXISTS (
SELECT * FROM Task as Dependency
JOIN TaskDependency ON Dependency.id = TaskDependency.dependency_id
WHERE TaskDependency.task_id = t.id
AND Dependency.done is NULL
)
ORDER BY
priority DESC,
COALESCE(
julianday(deadline),
julianday('9999-12-31 23:59:59')
) - julianday('now') ASC,
ping DESC,
created ASC
""".format(operator), (value, path))
return c.fetchall()
def get_subcontexts(self, path='', get_empty=True):
"""
Return a list of Row-contexts that are direct children of the context
pointed to by `path`. The list doesn't contain contexts that have a
"hidden" visibility. If `get_empty` is False, then contexts that have
0 total tasks are excluded from the list. Tasks in hidden contexts are
not counted. In the list, contexts are sorted by:
* priority, descending
* total number of tasks (including tasks in descendance), ascending
"""
if get_empty:
add_condition = ''
else:
add_condition = 'AND total_tasks > 0'
c = self.connection.cursor()
c.execute("""
SELECT c.*, COUNT(own.id) as own_tasks, (
SELECT COUNT(t.id)
FROM Context c1
LEFT JOIN Task t
ON t.context = c1.id
AND t.start <= (datetime('now'))
AND t.done IS NULL
WHERE c1.path LIKE c.path||'%'
AND c1.visibility = 'normal'
) as total_tasks
FROM Context c
LEFT JOIN Task own
ON own.context = c.id
AND own.start <= (datetime('now'))
AND own.done IS NULL
WHERE path LIKE ?
AND path NOT LIKE ?
AND visibility = 'normal'
{}
GROUP BY c.id
ORDER BY
priority DESC,
total_tasks DESC
""".format(add_condition), (
'{}.%'.format(path),
'{}.%.%'.format(path),
)
)
return c.fetchall()
def get_descendants(self, path=''):
""" Return an iterator over Row-contexts that are in the descendance
of the context pointed to by `path`. The iterator is agnostic of
visibility and the contexts are sorted by their path. The first
element of the iterator is the given context itself. """
c = self.connection.cursor()
c.execute("""
SELECT c.*, COUNT(own.id) as own_tasks, (
SELECT COUNT(t.id)
FROM Context c1
LEFT JOIN Task t
ON t.context = c1.id
AND t.start <= (datetime('now'))
AND t.done IS NULL
WHERE c1.path LIKE c.path||'%'
) as total_tasks
FROM Context c
LEFT JOIN Task own
ON own.context = c.id
AND own.start <= (datetime('now'))
AND own.done IS NULL
WHERE path LIKE ?
GROUP BY c.id
ORDER BY
c.path
""", ('{}%'.format(path),))
return c
def history(self):
""" Return an iterator over Row-tasks which iterates over all the
tasks in existence, sorted by their date of creation."""
c = self.connection.cursor()
c.execute("""
SELECT t.*, c.path as ctx_path
FROM Task t JOIN Context c
ON t.context = c.id
ORDER BY t.created
""")
return c
def get_greatest_id(self):
""" Returns the greatest existing task ID, or None if there are no
task."""
c = self.connection.cursor()
c.execute("""
SELECT MAX(id)
FROM Task
""")
row = c.fetchone()
if row is None:
return None
else:
return row[0]
def purge(self, before):
""" Remove all done tasks that were created before `before`. Remove
all done tasks if `before` is None."""
c = self.connection.cursor()
query = """
DELETE FROM Task
WHERE done IS NOT NULL
"""
if before is not None:
query += """
AND created < ?
"""
values = (before,)
else:
values = ()
c.execute(query, values)
return c.rowcount
def search(self, term, ctx='', done=None, before=None, after=None,
case=False):
original = self.case_sensitive_like
self.set_case_sensitive_like(case)
c = self.connection.cursor()
query = """
SELECT t.*, c.path as ctx_path
FROM Task t JOIN Context c
ON t.context = c.id
WHERE t.title LIKE ?
AND c.path LIKE ?
"""
params = ('%{}%'.format(term), '{}%'.format(ctx))
if done is not None:
cond = 'IS NOT NULL' if done else 'IS NULL'
query += """
AND t.done {}
""".format(cond)
if before is not None:
query += """
AND t.created < ?
"""
params = params + (before,)
if after is not None:
query += """
AND t.created > ?
"""
params = params + (after,)
c.execute(query, params)
self.set_case_sensitive_like(original)
return c.fetchall()
def get_future_tasks(self):
c = self.connection.cursor()
now = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')
query = """
SELECT
t.*,
c.path as ctx_path,
group_concat(dependee.id, ', ') as dependencies_ids
FROM Task t
JOIN Context c ON t.context = c.id
LEFT JOIN TaskDependency ON TaskDependency.task_id = t.id
LEFT JOIN Task dependee ON TaskDependency.dependency_id = dependee.id
WHERE t.start > ?
OR dependee.id AND dependee.done IS NULL
OR dependee.id AND dependee.start > ?
GROUP BY t.id
ORDER BY t.created
"""
c.execute(query, (now, now))
return c.fetchall()
def take_editing_lock(self, tid):
"""
Set the column `editing` to 1 iff it's 0. Return True if the column was effectively set, False otherwise.
"""
c = self.connection.cursor()
c.execute("""
UPDATE Task
SET editing = 1
WHERE id = ?
AND editing = 0
""", (tid,))
taken = c.rowcount == 1
self.connection.commit()
return taken
def release_editing_lock(self, tid):
"""
Release the editing lock on a task. The caller is trusted to have the
lock and no verification is made.
"""
c = self.connection.cursor()
c.execute("""
UPDATE Task
SET editing = 0
WHERE id = ?
""", (tid,))
self.connection.commit()
def exit(self, save=True):
""" Close the database and save all operations done to it if `save` is
True. Write all contexts paths (NON fully-dotted) to the contexts file
if at least one context was created or removed during operations. The
contexts file exists for terminal auto-completion."""
if save:
self.connection.commit()
if self.changed_contexts:
c = self.connection.cursor()
c.execute("""
SELECT DISTINCT path FROM Context
ORDER BY path
""")
data_ctx = op.join(DATA_DIR, DATA_CTX_NAME)
with open(data_ctx, 'w') as ctx_file:
for row in c:
ctx = userify_context(row[0])
ctx_file.write(ctx + '\n')
self.connection.close()
|
aefde3acd80e0ed767d2d5fddd85b79bb5441477
|
12ac38b4146253c5778d0e53eb4128dce2ae1860
|
/autosar/datatype.py
|
c93142b1c23c99b98dbf21fe1558dc0895bcfad9
|
[
"MIT"
] |
permissive
|
cogu/autosar
|
a75e12186877b4ace807ab5e14daa89c6fb42935
|
5db06cc46f27c1ef8edec92f6883850cfd9e61f1
|
refs/heads/master
| 2023-08-30T19:59:59.277984
| 2023-05-08T19:10:21
| 2023-05-08T19:10:21
| 63,766,974
| 291
| 152
|
MIT
| 2022-03-27T18:30:55
| 2016-07-20T09:05:26
|
Python
|
UTF-8
|
Python
| false
| false
| 32,190
|
py
|
datatype.py
|
from autosar.element import Element
import autosar.base
import math
import json
import copy
import collections
class RecordTypeElement(Element):
"""
(AUTOSAR3)
Implemenetation of <RECORD-ELEMENT> (found inside <RECORD-TYPE>).
"""
def tag(self, version=None): return 'RECORD-ELEMENT'
def __init__(self, name, typeRef, parent = None, adminData = None):
super().__init__(name, parent, adminData)
self.typeRef=typeRef
def __eq__(self,other):
if self is other: return True
if type(self) == type(other):
if self.name == other.name:
lhs = None if self.typeRef is None else self.rootWS().find(self.typeRef)
rhs = None if other.typeRef is None else other.rootWS().find(other.typeRef)
if lhs != rhs:
print(self.name,self.typeRef)
return lhs==rhs
return False
class CompuScaleElement:
"""
Implementation of <COMPU-SCALE>
"""
def tag(self, version=None): return 'COMPU-SCALE'
def __init__(self, lowerLimit, upperLimit, lowerLimitType = 'CLOSED', upperLimitType = 'CLOSED', label=None, symbol=None, textValue = None, numerator = None, denominator = None, offset = None, mask = None, adminData=None):
self.lowerLimit = lowerLimit
self.upperLimit = upperLimit
self.lowerLimitType = lowerLimitType
self.upperLimitType = upperLimitType
self.symbol = symbol
self.label = label
self.adminData = adminData
self.textValue = textValue
self.offset = offset
self.numerator = numerator
self.denominator = denominator
self.mask = mask
class Unit(Element):
"""
Implementation of <UNIT>
"""
def tag(self, version=None): return 'UNIT'
def __init__(self, name, displayName, factor=None, offset=None, parent=None):
super().__init__(name, parent)
self.displayName=displayName
self.factor = factor #only supported in AUTOSAR 4 and above
self.offset = offset #only supported in AUTOSAR 4 and above
def __eq__(self,other):
if self is other: return True
if type(self) is type(other):
if (self.name==other.name) and (self.displayName == other.displayName) and (
self.factor == other.factor) and (self.offset == other.offset):
return True
return False
class DataType(Element):
"""
Base type for DataType (AUTOSAR3)
"""
def __init__(self, name, parent=None, adminData=None):
super().__init__(name, parent, adminData)
@property
def isComplexType(self):
return True if isinstance(self, (RecordDataType, ArrayDataType)) else False
class IntegerDataType(DataType):
"""
IntegerDataType (AUTOSAR3)
"""
def tag(self,version=None): return 'INTEGER-TYPE'
def __init__(self, name, minVal=0, maxVal=0, compuMethodRef=None, parent=None, adminData=None):
super().__init__(name, parent, adminData)
self.minVal = int(minVal)
self.maxVal = int(maxVal)
self._minValType = 'CLOSED'
self._maxValType = 'CLOSED'
if isinstance(compuMethodRef,str):
self.compuMethodRef=compuMethodRef
elif hasattr(compuMethodRef,'ref'):
self.compuMethodRef=compuMethodRef.ref
else:
self.compuMethodRef=None
@property
def minValType(self):
return self._minValType
@minValType.setter
def minValueType(self, value):
if (value != "CLOSED") and (value != "OPEN"):
raise ValueError('value must be either "CLOSED" or "OPEN"')
self._minValType=value
@property
def maxValType(self):
return self._maxValType
@maxValType.setter
def maxValueType(self, value):
if (value != "CLOSED") and (value != "OPEN"):
raise ValueError('value must be either "CLOSED" or "OPEN"')
self._minValType=value
def __eq__(self,other):
if self is other: return True
if type(other) is type(self):
if (self.name==other.name) and (self.minVal == other.minVal) and (self.maxVal==other.maxVal):
if (self.compuMethodRef is not None) and (other.compuMethodRef is not None):
return self.rootWS().find(self.compuMethodRef) == other.rootWS().find(other.compuMethodRef)
elif (self.compuMethodRef is None) and (other.compuMethodRef is None):
return True
def __deepcopy__(self,memo):
obj=type(self)(self.name,self.minVal,self.maxVal,self.compuMethodRef)
if self.adminData is not None: obj.adminData=copy.deepcopy(self.adminData,memo)
return obj
class RecordDataType(DataType):
"""
RecordDataType (AUTOSAR3)
"""
def tag(self,version=None): return 'RECORD-TYPE'
def __init__(self, name, elements=None, parent=None, adminData=None):
super().__init__(name, parent, adminData)
self.elements = []
if elements is not None:
for elem in elements:
if isinstance(elem, RecordTypeElement):
self.elements.append(elem)
elem.parent=self
else:
raise ValueError('Element must be an instance of RecordTypeElement')
def __eq__(self, other):
if self is other: return True
if (self.name == other.name) and (len(self.elements)==len(other.elements)):
for i in range(len(self.elements)):
if self.elements[i] != other.elements[i]: return False
return True
return False
def __deepcopy__(self,memo):
obj=type(self)(self.name)
if self.adminData is not None: obj.adminData=copy.deepcopy(self.adminData,memo)
for elem in self.elements:
obj.elements.append(RecordTypeElement(elem.name,elem.typeRef,self))
return obj
class ArrayDataType(DataType):
"""
ArrayDataType (AUTOSAR 3)
"""
def tag(self,version=None): return 'ARRAY-TYPE'
def __init__(self, name, typeRef, length, parent=None, adminData=None):
super().__init__(name, parent, adminData)
self.typeRef = typeRef
self.length = length
class BooleanDataType(DataType):
"""
BooleanDataType (AUTOSAR 3)
"""
def tag(self,version=None): return 'BOOLEAN-TYPE'
def __init__(self,name, parent=None, adminData=None):
super().__init__(name, parent, adminData)
class StringDataType(DataType):
"""
StringDataType (AUTOSAR 3)
"""
def tag(self,version=None): return 'STRING-TYPE'
def __init__(self,name,length,encoding, parent=None, adminData=None):
super().__init__(name, parent, adminData)
self.length=length
self.encoding=encoding
def asdict(self):
data={'type': self.__class__.__name__,'name':self.name,'encoding':self.encoding,'length':self.length}
return data
def __eq__(self,other):
if self is other: return False
if type(self) == type(other):
if (self.name==other.name) and (self.length == other.length) and (self.encoding == other.encoding):
return True
return False
def __deepcopy__(self,memo):
obj=type(self)(self.name,self.length,self.encoding)
return obj
class RealDataType(DataType):
"""
RealDataType (AUTOSAR 3)
"""
def tag(self,version=None): return 'REAL-TYPE'
def __init__(self, name, minVal, maxVal, minValType='CLOSED', maxValType='CLOSED', hasNaN=False, encoding='SINGLE', parent=None, adminData=None):
super().__init__(name, parent, adminData)
self.minVal=minVal
self.maxVal=maxVal
self.minValType = minValType
self.maxValType = maxValType
self.hasNaN=hasNaN
self.encoding=encoding
class Computation:
"""
Represents one computation (COMPU-INTERNAL-TO-PHYS or COMPU-PHYS-TO-INTERNAL).
Contains a list of CompuScaleElement objects as well as an optional defaultValue.
"""
def __init__(self, defaultValue = None):
self.elements = [] #list of CompuScaleElement
self.defaultValue = defaultValue
@property
def lowerLimit(self):
"""
Returns lowerLimit of first element
"""
if len(self.elements) > 0:
return self.elements[0].lowerLimit
else:
raise KeyError('No elements in Computation object')
@property
def upperLimit(self):
"""
Returns upperLimit of last element
"""
if len(self.elements) > 0:
return self.elements[-1].upperLimit
else:
raise KeyError('No elements in Computation object')
def createValueTable(self, elements, autoLabel = True):
"""
Creates a list of CompuScaleElements based on contents of the elements argument
When elements is a list of strings:
Creates one CompuScaleElement per list item and automatically calculates lower and upper limits
When elements is a list of tuples:
If 2-tuple: First element is both lowerLimit and upperLimit, second element is textValue.
If 3-tuple: First element is lowerLimit, second element is upperLimit, third element is textValue.
autoLabel: automatically creates a <SHORT-LABEL> based on the element.textValue (bool). Default=True
"""
lowerLimitType, upperLimitType = 'CLOSED', 'CLOSED'
for elem in elements:
if isinstance(elem, str):
limit = len(self.elements)
(lowerLimit, upperLimit, textValue) = (limit, limit, elem)
elif isinstance(elem, tuple):
if len(elem) == 2:
(limit, textValue) = elem
(lowerLimit, upperLimit, textValue) = (limit, limit, textValue)
elif len(elem) == 3:
lowerLimit, upperLimit, textValue = elem
else:
raise ValueError('invalid length: %d'%len(elem))
else:
raise ValueError('type not supported:%s'%str(type(elem)))
label = textValue if autoLabel else None
self.elements.append(CompuScaleElement(lowerLimit, upperLimit, lowerLimitType, upperLimitType, textValue = textValue, label = label))
def createRationalScaling(self, offset, numerator, denominator, lowerLimit, upperLimit, lowerLimitType = 'CLOSED', upperLimitType = 'CLOSED', label = None, symbol = None, adminData = None):
"""
Creates COMPU-SCALE based on rational scaling
"""
element = CompuScaleElement(lowerLimit, upperLimit, lowerLimitType, upperLimitType, label = label, symbol = symbol, offset = offset, numerator = numerator, denominator = denominator, adminData = adminData)
self.elements.append(element)
return element
def createBitMask(self, elements, autoLabel = True):
"""
When elements is a list of tuples:
If 2-tuple: First element is the bitmask (int), second element is the symbol (str)
"""
lowerLimitType, upperLimitType = 'CLOSED', 'CLOSED'
for elem in elements:
if isinstance(elem, tuple):
if len(elem) == 2:
(mask, symbol) = elem
(lowerLimit, upperLimit) = (mask, mask)
else:
raise ValueError('invalid length: %d'%len(elem))
else:
raise ValueError('type not supported:%s'%str(type(elem)))
label = symbol if autoLabel else None
self.elements.append(CompuScaleElement(lowerLimit, upperLimit, lowerLimitType, upperLimitType, symbol = symbol, label = label, mask = mask))
class CompuMethod(Element):
"""
CompuMethod class
"""
def tag(self,version=None): return 'COMPU-METHOD'
def __init__(self, name, useIntToPhys, usePhysToInt, unitRef = None, category = None, parent = None, adminData = None):
super().__init__(name, parent, adminData, category)
self.unitRef = unitRef
self.intToPhys = None
self.physToInt = None
if useIntToPhys:
self.intToPhys = Computation()
if usePhysToInt:
self.physToInt = Computation()
class ConstraintBase:
def __init__(self, lowerLimit, upperLimit, lowerLimitType, upperLimitType):
if lowerLimit is not None:
if isinstance(lowerLimit, str) and lowerLimit != '-INF':
raise ValueError('Unknown lowerLimit: '+lowerLimit)
self.lowerLimit = lowerLimit
if upperLimit is not None:
if isinstance(upperLimit, str) and upperLimit != 'INF':
raise ValueError('Unknown upperLimit: '+upperLimit)
self.upperLimit = upperLimit
if lowerLimitType == 'CLOSED' or lowerLimitType == 'OPEN':
self.lowerLimitType = lowerLimitType
else:
raise ValueError(lowerLimitType)
if upperLimitType == 'CLOSED' or upperLimitType == 'OPEN':
self.upperLimitType = upperLimitType
else:
raise ValueError(upperLimitType)
def check_value(self, value):
if ((self.lowerLimitType=='CLOSED') and (value<self.lowerLimit)) or ((self.lowerLimitType=='OPEN') and (value<=self.lowerLimit)) :
raise autosar.base.DataConstraintError('Value {} outside lower data constraint ({}) '.format(str(value), str(self.lowerLimit)))
if ((self.upperLimitType=='CLOSED') and (value>self.upperLimit)) or ((self.upperLimitType=='OPEN') and (value>=self.upperLimit)) :
raise autosar.base.DataConstraintError('Value {} outside upper data constraint ({}) '.format(str(value), str(self.upperLimit)))
class InternalConstraint(ConstraintBase):
def __init__(self, lowerLimit=None, upperLimit=None, lowerLimitType='CLOSED', upperLimitType='CLOSED'):
super().__init__(lowerLimit, upperLimit, lowerLimitType, upperLimitType)
class PhysicalConstraint(ConstraintBase):
def __init__(self, lowerLimit=None, upperLimit=None, lowerLimitType='CLOSED', upperLimitType='CLOSED'):
super().__init__(lowerLimit, upperLimit, lowerLimitType, upperLimitType)
class DataConstraint(Element):
def tag(self,version=None): return 'DATA-CONSTR'
def __init__(self, name, rules, constraintLevel=None, parent=None, adminData=None):
super().__init__(name, parent, adminData)
self.level = constraintLevel
self.rules = []
for rule in rules:
if rule['type'] == 'internalConstraint':
self.rules.append(InternalConstraint(lowerLimit=rule['lowerLimit'], upperLimit=rule['upperLimit'], lowerLimitType=rule['lowerLimitType'], upperLimitType=rule['upperLimitType']))
elif rule['type'] == 'physicalConstraint':
self.rules.append(PhysicalConstraint(lowerLimit=rule['lowerLimit'], upperLimit=rule['upperLimit'], lowerLimitType=rule['lowerLimitType'], upperLimitType=rule['upperLimitType']))
else:
raise NotImplementedError
@property
def constraintLevel(self):
if self.level is None or isinstance(self.level, int):
return self.level
else:
raise ValueError('Unknown constraintLevel: '+str(self.level))
@property
def lowerLimit(self):
if len(self.rules) == 1:
return self.rules[0].lowerLimit
else:
raise NotImplementedError('Only a single constraint rule supported')
@property
def upperLimit(self):
if len(self.rules) == 1:
return self.rules[0].upperLimit
else:
raise NotImplementedError('Only a single constraint rule supported')
@property
def lowerLimitType(self):
if len(self.rules) == 1:
return self.rules[0].lowerLimitType
else:
raise NotImplementedError('Only a single constraint rule supported')
@property
def upperLimitType(self):
if len(self.rules) == 1:
return self.rules[0].upperLimitType
else:
raise NotImplementedError('Only a single constraint rule supported')
def checkValue(self, v):
if len(self.rules) == 1:
self.rules[0].check_value(v)
else:
raise NotImplementedError('Only a single rule constraint supported')
def findByType(self, constraintType = 'internalConstraint'):
"""
Returns the first constraint of the given constraint type (internalConstraint or physicalConstraint)
"""
for rule in self.rules:
if (isinstance(rule, InternalConstraint) and constraintType == 'internalConstraint') or (isinstance(rule, PhysicalConstraint) and constraintType == 'physicalConstraint'):
return rule
class ImplementationDataType(Element):
def tag(self, version=None): return 'IMPLEMENTATION-DATA-TYPE'
def __init__(self, name, variantProps = None, dynamicArraySizeProfile = None, typeEmitter = None, category='VALUE', parent = None, adminData = None):
super().__init__(name, parent, adminData, category)
self.dynamicArraySizeProfile = dynamicArraySizeProfile
self.typeEmitter = typeEmitter
self.variantProps = []
self.subElements = []
self.symbolProps = None
if isinstance(variantProps, (autosar.base.SwDataDefPropsConditional, autosar.base.SwPointerTargetProps)):
self.variantProps.append(variantProps)
elif isinstance(variantProps, collections.abc.Iterable):
for elem in variantProps:
if isinstance(elem, (autosar.base.SwDataDefPropsConditional, autosar.base.SwPointerTargetProps)):
self.variantProps.append(elem)
else:
raise ValueError('Invalid type: ', type(elem))
def getArrayLength(self):
"""
Deprecated, use arraySize property instead
"""
return self.arraySize
def getTypeReference(self):
"""
Deprecated, use implementationTypeRef property instead
"""
return self.implementationTypeRef
@property
def arraySize(self):
if len(self.subElements)>0:
return self.subElements[0].arraySize
else:
return None
@property
def implementationTypeRef(self):
if len(self.variantProps)>0:
return self.variantProps[0].implementationTypeRef
else:
raise RuntimeError('ImplementationDataType has no variantProps')
@property
def baseTypeRef(self):
if len(self.variantProps)>0:
return self.variantProps[0].baseTypeRef
else:
raise RuntimeError('Element has no variantProps set')
@property
def dataConstraintRef(self):
if len(self.variantProps)>0:
return self.variantProps[0].dataConstraintRef
else:
raise RuntimeError('Element has no variantProps set')
@property
def compuMethodRef(self):
if len(self.variantProps)>0:
return self.variantProps[0].compuMethodRef
else:
raise RuntimeError('Element has no variantProps set')
def setSymbolProps(self, name, symbol):
"""
Sets SymbolProps for this data type
Arguments:
name: <SHORT-NAME> (str)
symbol: <SYMBOL> (str)
"""
self.symbolProps = autosar.base.SymbolProps( str(name), str(symbol))
class SwBaseType(Element):
def tag(self, version=None): return 'SW-BASE-TYPE'
def __init__(self, name, size = None, typeEncoding = None, nativeDeclaration = None, category='FIXED_LENGTH', parent = None, adminData = None):
super().__init__(name, parent, adminData, category)
self.size = None if size is None else int(size)
self.nativeDeclaration = nativeDeclaration
self.typeEncoding = typeEncoding
class ImplementationDataTypeElement(Element):
def tag(self, version=None): return 'IMPLEMENTATION-DATA-TYPE-ELEMENT'
def __init__(self, name, category = None, arraySize = None, arraySizeSemantics = None, variantProps = None, parent = None, adminData = None):
super().__init__(name, parent, adminData, category)
self.arraySize = arraySize
self.variantProps = []
if arraySize is not None:
if arraySizeSemantics is not None:
self.arraySizeSemantics = arraySizeSemantics
else:
self.arraySizeSemantics = 'FIXED-SIZE'
else:
self.arraySizeSemantics = None
if variantProps is not None:
if isinstance(variantProps, (autosar.base.SwDataDefPropsConditional, autosar.base.SwPointerTargetProps)):
self.variantProps.append(variantProps)
elif isinstance(variantProps, collections.abc.Iterable):
for elem in variantProps:
if isinstance(elem, (autosar.base.SwDataDefPropsConditional, autosar.base.SwPointerTargetProps)):
self.variantProps.append(elem)
else:
raise ValueError('Invalid type: ', type(elem))
class ApplicationDataType(Element):
"""
Base type for AUTOSAR application data types (AUTOSAR4)
Arguments:
name: <SHORT-NAME> (None or str)
variantProps: <SW-DATA-DEF-PROPS-VARIANTS> (instance (or list) of autosar.base.SwDataDefPropsConditional)
category: <CATEGORY> (None or str)
parent: parent object instance (usually the package it will belong to), (object)
adminData: <ADMIN-DATA> (instance of autosar.base.AdminData or dict)
"""
def __init__(self, name, variantProps=None, category=None, parent=None, adminData=None):
super().__init__(name, parent, adminData, category)
self.variantProps = []
if variantProps is not None:
if isinstance(variantProps, autosar.base.SwDataDefPropsConditional):
self.variantProps.append(variantProps)
else:
self.variantProps = list(variantProps)
@property
def compuMethodRef(self):
if len(self.variantProps)>0:
return self.variantProps[0].compuMethodRef
else:
raise RuntimeError('Element has no variantProps')
@property
def dataConstraintRef(self):
if len(self.variantProps)>0:
return self.variantProps[0].dataConstraintRef
else:
raise RuntimeError('Element has no variantProps')
@property
def unitRef(self):
if len(self.variantProps)>0:
return self.variantProps[0].unitRef
else:
raise RuntimeError('Element has no variantProps')
class ApplicationPrimitiveDataType(ApplicationDataType):
"""
Implementation of <APPLICATION-PRIMITIVE-DATA-TYPE> (AUTOSAR 4)
Arguments:
(see base class)
"""
def tag(self, version): return 'APPLICATION-PRIMITIVE-DATA-TYPE'
def __init__(self, name, variantProps=None, category=None, parent=None, adminData=None):
super().__init__(name, variantProps, category, parent, adminData)
class ApplicationArrayDataType(ApplicationDataType):
"""
Implementation of <APPLICATION-ARRAY-DATA-TYPE> (AUTOSAR 4)
Arguments:
element: <ELEMENT> (instance of ApplicationArrayElement)
"""
def tag(self, version): return 'APPLICATION-ARRAY-DATA-TYPE'
def __init__(self, name, element, variantProps = None, category = 'ARRAY', parent=None, adminData=None):
super().__init__(name, variantProps, category, parent, adminData)
if element is None or isinstance(element, ApplicationArrayElement):
self.element = element
element.parent = self
else:
raise ValueError("element argument must be None or instance of ApplicationArrayElement")
class ApplicationArrayElement(Element):
"""
An application array element (AUTOSAR 4).
This is to be used as the element property of ApplicationArrayDataType.
arguments:
name: <SHORT-NAME> (None or str)
typeRef: <TYPE-TREF> (None or str)
arraySize: <MAX-NUMBER-OF-ELEMENTS> (None or int)
sizeHandling: <ARRAY-SIZE-HANDLING> (None or str['ALL-INDICES-DIFFERENT-ARRAY-SIZE', 'ALL-INDICES-SAME-ARRAY-SIZE', 'INHERITED-FROM-ARRAY-ELEMENT-TYPE-SIZE', ])
sizeSemantics: <ARRAY-SIZE-SEMANTICS> (None or str['FIXED-SIZE', 'VARIABLE-SIZE']])
"""
def tag(self, version=None): return 'ELEMENT'
def __init__(self, name = None, typeRef = None, arraySize = None, sizeHandling = None, sizeSemantics = 'FIXED-SIZE', category = 'VALUE', parent = None, adminData = None):
super().__init__(name, parent, adminData, category)
self.typeRef = None if typeRef is None else str(typeRef)
self.arraySize = None if arraySize is None else int(arraySize)
self.sizeHandling = None if sizeHandling is None else str(sizeHandling)
self.sizeSemantics = None if sizeSemantics is None else str(sizeSemantics)
class ApplicationRecordDataType(ApplicationDataType):
"""
Implementation of <APPLICATION-RECORD-DATA-TYPE> (AUTOSAR 4)
Arguments:
elements: list of ApplicationRecordElement or None
"""
def tag(self, version): return 'APPLICATION-RECORD-DATA-TYPE'
def __init__(self, name, elements = None, variantProps = None, category = None, parent = None, adminData = None):
super().__init__(name, variantProps, category, parent, adminData)
if elements is None:
self.elements = []
else:
self.elements = list(elements)
def append(self, element):
"""
Append element to self.elements list
"""
if not isinstance(element, ApplicationRecordElement):
raise ValueError('element must be an instance of ApplicationRecordElement')
element.parent = self
self.elements.append(element)
def createElement(self, name, typeRef, category = 'VALUE', adminData = None):
"""
Creates a new instance of ApplicationRecordElement and appends it to internal elements list
"""
element = ApplicationRecordElement(name, typeRef, category, self, adminData)
self.elements.append(element)
class ApplicationRecordElement(Element):
"""
Implements <APPLICATION-RECORD-ELEMENT> (AUTOSAR4)
"""
def tag(self, version): return 'APPLICATION-RECORD-ELEMENT'
def __init__(self, name, typeRef, category = None, parent=None, adminData=None):
super().__init__(name, parent, adminData, category)
if not isinstance(typeRef, str):
raise autosar.base.InvalidDataTypeRef(typeRef)
self.typeRef = typeRef
class DataTypeMappingSet(Element):
def tag(self, version): return 'DATA-TYPE-MAPPING-SET'
def __init__(self, name, parent = None, adminData = None):
super().__init__(name, parent, adminData)
self.applicationTypeMap = {} #applicationDataTypeRef to implementationDataTypeRef dictionary
self.modeRequestMap = {} #modeDeclarationGroupRef to implementationDataTypeRef dictionary
def createDataTypeMapping(self, applicationDataTypeRef, implementationDataTypeRef):
self.applicationTypeMap[applicationDataTypeRef] = implementationDataTypeRef
return DataTypeMap(applicationDataTypeRef, implementationDataTypeRef)
def createModeRequestMapping(self, modeDeclarationGroupRef, implementationDataTypeRef):
self.modeRequestMap[modeDeclarationGroupRef] = implementationDataTypeRef
return ModeRequestTypeMap(modeDeclarationGroupRef, implementationDataTypeRef)
def add(self, item):
if isinstance(item, DataTypeMap):
self.createDataTypeMapping(item.applicationDataTypeRef, item.implementationDataTypeRef)
elif isinstance(item, ModeRequestTypeMap):
self.createModeRequestMapping(item.modeDeclarationGroupRef, item.implementationDataTypeRef)
else:
raise ValueError("Item is neither an instance of DataTypeMap or ModeRequestTypeMap")
def getDataTypeMapping(self, applicationDataTypeRef):
"""
Returns an instance of DataTypeMap or None if not found.
"""
implementationDataTypeRef = self.applicationTypeMap.get(applicationDataTypeRef, None)
if implementationDataTypeRef is not None:
return DataTypeMap(applicationDataTypeRef, implementationDataTypeRef)
return None
def getModeRequestMapping(self, modeDeclarationGroupRef):
"""
Returns an instance of DataTypeMap or None if not found.
"""
implementationDataTypeRef = self.modeRequestMap.get(modeDeclarationGroupRef, None)
if implementationDataTypeRef is not None:
return ModeRequestTypeMap(modeDeclarationGroupRef, implementationDataTypeRef)
return None
def findMappedDataTypeRef(self, applicationDataTypeRef):
"""
Returns a reference (str) to the mapped implementation data type or None if not found.
"""
return self.applicationTypeMap.get(applicationDataTypeRef, None)
def findMappedDataType(self, applicationDataTypeRef):
"""
Returns the instance of the mapped implementation data type.
This requires that both the DataTypeMappingSet and the implementation data type reference are in the same AUTOSAR workspace.
"""
implementationDataTypeRef = self.applicationTypeMap.get(applicationDataTypeRef, None)
if implementationDataTypeRef is not None:
ws = self.rootWS()
if ws is None:
raise RuntimeError("Root workspace not found")
return ws.find(implementationDataTypeRef)
return None
def findMappedModeRequestRef(self, modeDeclarationGroupRef):
"""
Returns a reference (str) to the mapped implementation data type or None if not found.
"""
return self.modeRequestMap.get(modeDeclarationGroupRef, None)
def findMappedModeRequest(self, modeDeclarationGroupRef):
"""
Returns the instance of the mapped implementation data type.
This requires that both the DataTypeMappingSet and the implementation data type reference are in the same AUTOSAR workspace.
"""
implementationDataTypeRef = self.modeRequestMap.get(modeDeclarationGroupRef, None)
if implementationDataTypeRef is not None:
ws = self.rootWS()
if ws is None:
raise RuntimeError("Root workspace not found")
return ws.find(implementationDataTypeRef)
return None
class DataTypeMap:
"""
Mapping from ApplicationDataType to ImplementationDataType.
"""
def __init__(self, applicationDataTypeRef, implementationDataTypeRef):
self.applicationDataTypeRef = applicationDataTypeRef
self.implementationDataTypeRef = implementationDataTypeRef
def tag(self, version): return 'DATA-TYPE-MAP'
class ModeRequestTypeMap:
"""
Mapping from ModeGroupDeclaration to ImplementationDataType.
"""
def __init__(self, modeDeclarationGroupRef, implementationDataTypeRef):
self.modeDeclarationGroupRef = modeDeclarationGroupRef
self.implementationDataTypeRef = implementationDataTypeRef
def tag(self, version): return 'MODE-REQUEST-TYPE-MAP'
|
76c24e6669ab4aec30c9dbd9121cbde28b32a355
|
7d60cde80c322ff41b233967456f9a5b9f938d16
|
/sqlalchemy/metadata-7-inspector.py
|
380b7332287be60c9358b85d7c5817957de633c0
|
[] |
no_license
|
besnik/tutorials
|
e8a9f9a0a6709f69746b51f5a5bdc3c7520c49d5
|
600494b7296854f873aac7b9ff3c4cc23be7d989
|
refs/heads/master
| 2020-05-21T20:28:21.944713
| 2020-03-28T07:25:14
| 2020-03-28T07:25:14
| 65,710,896
| 113
| 39
| null | 2017-03-07T19:13:13
| 2016-08-15T06:47:56
|
Python
|
UTF-8
|
Python
| false
| false
| 1,736
|
py
|
metadata-7-inspector.py
|
# define metadata for table
from sqlalchemy import MetaData
from sqlalchemy import Table, Column, ForeignKey
from sqlalchemy import Integer, String
metadata = MetaData() # metadata is collection of tables and can be traversed like XML DOM
city_table = Table("city", metadata,
Column("id", Integer, primary_key=True),
Column("name", String),
)
user_table = Table("user", metadata,
Column("id", Integer, primary_key=True),
Column("name", String),
#Column("city_id", Integer, ForeignKey("city.id")) # foreign key can be string "table.column"
Column("city_id", Integer, ForeignKey(city_table.columns.id)) # or it can be reference of column object
)
# init engine over database
from sqlalchemy import create_engine
engine = create_engine("sqlite://", echo=True)
# create table from metadata
metadata.create_all(engine)
# init inspector
from sqlalchemy import inspect
inspector = inspect(engine)
print("--- Inspector loaded ---")
# get info about tables in database
print(inspector.get_table_names()) # ['user']
print(inspector.get_columns("user")) # [{'type': INTEGER(), 'autoincrement': True, 'nullable': False, 'default': None, 'name': 'id', 'primary_key': 1}, {'type': VARCHAR(), 'autoincrement': True, 'nullable': True, 'default': None, 'name': 'name', 'primary_key': 0}]
print(inspector.get_foreign_keys("user")) # [{'name': None, 'referred_columns': ['id'], 'referred_table': 'city', 'referred_schema': None, 'constrained_columns': ['city_id']}]
print("--- Find tables with column named city_id ---")
for tname in inspector.get_table_names():
for column in inspector.get_columns(tname):
if (column["name"] == "city_id"):
print(tname)
|
b3621e61b6c073abaf42c99ee2a62c4539de9d0f
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/communication/azure-communication-phonenumbers/azure/communication/phonenumbers/siprouting/_models.py
|
27d7f74153cc3388e20e1b3a65184fe6ce39bd0a
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012
| 2023-09-06T01:08:06
| 2023-09-06T01:08:06
| 4,127,088
| 4,046
| 2,755
|
MIT
| 2023-09-14T21:48:49
| 2012-04-24T16:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 3,035
|
py
|
_models.py
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
class SipTrunk(object):
"""Represents a SIP trunk for routing calls. See RFC 4904.
:ivar fqdn: FQDN of the trunk.
:vartype fqdn: str
:ivar sip_signaling_port: Gets or sets SIP signaling port of the trunk.
:vartype sip_signaling_port: int
"""
_attribute_map = {
'fqdn': {'key': 'fqdn', 'type': 'str'},
'sip_signaling_port': {'key': 'sipSignalingPort', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
"""
:keyword fqdn: FQDN of the trunk.
:paramtype fqdn: str
:keyword sip_signaling_port: Gets or sets SIP signaling port of the trunk.
:paramtype sip_signaling_port: int
"""
self.fqdn = kwargs.get('fqdn', None)
self.sip_signaling_port = kwargs.get('sip_signaling_port', None)
class SipTrunkRoute(object):
"""Represents a trunk route for routing calls.
:ivar description: Gets or sets description of the route.
:vartype description: str
:ivar name: Gets or sets name of the route. Required.
:vartype name: str
:ivar number_pattern: Gets or sets regex number pattern for routing calls. .NET regex format is
supported.
The regex should match only digits with an optional '+' prefix without spaces.
I.e. "^+[1-9][0-9]{3,23}$". Required.
:vartype number_pattern: str
:ivar trunks: Gets or sets list of SIP trunks for routing calls. Trunks are represented as
FQDN.
:vartype trunks: list[str]
"""
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'number_pattern': {'key': 'numberPattern', 'type': 'str'},
'trunks': {'key': 'trunks', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
"""
:keyword description: Gets or sets description of the route.
:paramtype description: Optional[str]
:keyword name: Gets or sets name of the route. Required.
:paramtype name: str
:keyword number_pattern: Gets or sets regex number pattern for routing calls. .NET regex format
is supported.
The regex should match only digits with an optional '+' prefix without spaces.
I.e. "^+[1-9][0-9]{3,23}$". Required.
:paramtype number_pattern: str
:keyword trunks: Gets or sets list of SIP trunks for routing calls. Trunks are represented as
FQDN.
:paramtype trunks: Optional[List[str]]
"""
self.description = kwargs.get('description', None)
self.name = kwargs.get('name', None)
self.number_pattern = kwargs.get('number_pattern', None)
self.trunks = kwargs.get('trunks',None)
|
88989bf6721c86d498cfeb110accfab06616ba32
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/domain/PageTemplateParamInfoDTO.py
|
e09617ee0d1876a5e7fb1d94e6a3bb6e0dc9167e
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 4,203
|
py
|
PageTemplateParamInfoDTO.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class PageTemplateParamInfoDTO(object):
def __init__(self):
self._allow_user_input = None
self._code = None
self._ext_info = None
self._front_render_type = None
self._label = None
self._required = None
self._type = None
self._value = None
@property
def allow_user_input(self):
return self._allow_user_input
@allow_user_input.setter
def allow_user_input(self, value):
self._allow_user_input = value
@property
def code(self):
return self._code
@code.setter
def code(self, value):
self._code = value
@property
def ext_info(self):
return self._ext_info
@ext_info.setter
def ext_info(self, value):
self._ext_info = value
@property
def front_render_type(self):
return self._front_render_type
@front_render_type.setter
def front_render_type(self, value):
self._front_render_type = value
@property
def label(self):
return self._label
@label.setter
def label(self, value):
self._label = value
@property
def required(self):
return self._required
@required.setter
def required(self, value):
self._required = value
@property
def type(self):
return self._type
@type.setter
def type(self, value):
self._type = value
@property
def value(self):
return self._value
@value.setter
def value(self, value):
self._value = value
def to_alipay_dict(self):
params = dict()
if self.allow_user_input:
if hasattr(self.allow_user_input, 'to_alipay_dict'):
params['allow_user_input'] = self.allow_user_input.to_alipay_dict()
else:
params['allow_user_input'] = self.allow_user_input
if self.code:
if hasattr(self.code, 'to_alipay_dict'):
params['code'] = self.code.to_alipay_dict()
else:
params['code'] = self.code
if self.ext_info:
if hasattr(self.ext_info, 'to_alipay_dict'):
params['ext_info'] = self.ext_info.to_alipay_dict()
else:
params['ext_info'] = self.ext_info
if self.front_render_type:
if hasattr(self.front_render_type, 'to_alipay_dict'):
params['front_render_type'] = self.front_render_type.to_alipay_dict()
else:
params['front_render_type'] = self.front_render_type
if self.label:
if hasattr(self.label, 'to_alipay_dict'):
params['label'] = self.label.to_alipay_dict()
else:
params['label'] = self.label
if self.required:
if hasattr(self.required, 'to_alipay_dict'):
params['required'] = self.required.to_alipay_dict()
else:
params['required'] = self.required
if self.type:
if hasattr(self.type, 'to_alipay_dict'):
params['type'] = self.type.to_alipay_dict()
else:
params['type'] = self.type
if self.value:
if hasattr(self.value, 'to_alipay_dict'):
params['value'] = self.value.to_alipay_dict()
else:
params['value'] = self.value
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = PageTemplateParamInfoDTO()
if 'allow_user_input' in d:
o.allow_user_input = d['allow_user_input']
if 'code' in d:
o.code = d['code']
if 'ext_info' in d:
o.ext_info = d['ext_info']
if 'front_render_type' in d:
o.front_render_type = d['front_render_type']
if 'label' in d:
o.label = d['label']
if 'required' in d:
o.required = d['required']
if 'type' in d:
o.type = d['type']
if 'value' in d:
o.value = d['value']
return o
|
55d315bb7fe89d44a97f13a60487890bee093fad
|
d110546d747d7e3865ce5742d5fca09f404623c0
|
/salt/modules/scsi.py
|
e405865806a29f89d82c73347a5ef013692ffb1a
|
[
"Apache-2.0",
"MIT",
"BSD-2-Clause"
] |
permissive
|
saltstack/salt
|
354fc86a7be1f69514b3dd3b2edb9e6f66844c1d
|
1ef90cbdc7203f97775edb7666db86a41eb9fc15
|
refs/heads/master
| 2023-07-19T20:56:20.210556
| 2023-06-29T23:12:28
| 2023-07-19T11:47:47
| 1,390,248
| 11,026
| 6,296
|
Apache-2.0
| 2023-09-14T20:45:37
| 2011-02-20T20:16:56
|
Python
|
UTF-8
|
Python
| false
| false
| 2,724
|
py
|
scsi.py
|
"""
SCSI administration module
"""
import logging
import os.path
import salt.utils.path
log = logging.getLogger(__name__)
__func_alias__ = {"ls_": "ls"}
def ls_(get_size=True):
"""
List SCSI devices, with details
CLI Examples:
.. code-block:: bash
salt '*' scsi.ls
salt '*' scsi.ls get_size=False
get_size : True
Get the size information for scsi devices. This option
should be set to False for older OS distributions (RHEL6 and older)
due to lack of support for the '-s' option in lsscsi.
.. versionadded:: 2015.5.10
"""
if not salt.utils.path.which("lsscsi"):
__context__["retcode"] = 1
return "scsi.ls not available - lsscsi command not found"
if get_size:
cmd = "lsscsi -dLsv"
else:
cmd = "lsscsi -dLv"
ret = {}
res = __salt__["cmd.run_all"](cmd)
rc = res.get("retcode", -1)
if rc != 0:
__context__["retcode"] = rc
error = res.get("stderr", "").split("\n")[0]
if error == "lsscsi: invalid option -- 's'":
return "{} - try get_size=False".format(error)
return res.get("stderr", "").split("\n")[0]
data = res.get("stdout", "")
for line in data.splitlines():
if line.startswith("["):
size = None
major = None
minor = None
comps = line.strip().split()
key = comps[0]
if get_size:
size = comps.pop()
majmin = comps.pop()
if majmin.startswith("["):
major, minor = majmin.replace("[", "").replace("]", "").split(":")
device = comps.pop()
model = " ".join(comps[3:])
ret[key] = {
"lun": key.replace("[", "").replace("]", ""),
"size": size,
"major": major,
"minor": minor,
"device": device,
"model": model,
}
elif line.startswith(" "):
if line.strip().startswith("dir"):
comps = line.strip().split()
ret[key]["dir"] = [comps[1], comps[2].replace("[", "").replace("]", "")]
else:
comps = line.strip().split("=")
ret[key][comps[0]] = comps[1]
return ret
def rescan_all(host):
"""
List scsi devices
CLI Example:
.. code-block:: bash
salt '*' scsi.rescan_all 0
"""
if os.path.isdir("/sys/class/scsi_host/host{}".format(host)):
cmd = 'echo "- - -" > /sys/class/scsi_host/host{}/scan'.format(host)
else:
return "Host {} does not exist".format(host)
return __salt__["cmd.run"](cmd).splitlines()
|
7e5eebc8d32bd40059d7c08045db0774c74b2f8c
|
90bb1316ae8efc82f25005de9a4dd98c302d4876
|
/examples/vibrancy.py
|
12ef85f5b0daa71e0abf2bd55e14d01883b89730
|
[
"BSD-3-Clause"
] |
permissive
|
r0x0r/pywebview
|
9b5f789430be56a1f709911c81f92e671f1ae527
|
46c5404b72ef8194454a63ff9981ab59c9c2e27e
|
refs/heads/master
| 2023-08-31T05:32:04.531754
| 2023-08-30T20:04:33
| 2023-08-30T20:04:33
| 26,908,699
| 4,094
| 680
|
BSD-3-Clause
| 2023-09-06T01:57:46
| 2014-11-20T11:05:46
|
Python
|
UTF-8
|
Python
| false
| false
| 438
|
py
|
vibrancy.py
|
"""
This example demonstrates how to set vibrancy (macos).
window set transparent and html set background to transparent
"""
import webview
def load_css(window):
window.load_css('body { background: transparent !important; }')
if __name__ == '__main__':
window = webview.create_window(
'Vibrancy example', 'https://pywebview.flowrl.com/hello', transparent=True, vibrancy=True
)
webview.start(load_css, window)
|
7b223f7686382a31eb19cc5e566eb83119659f89
|
22c47b4602212c54b022fd74e2f6c2c879d9699d
|
/chemml/chem/magpie_python/__init__.py
|
34eb7b3945b973910fb430b7770a9938ce1db0b8
|
[
"BSD-3-Clause"
] |
permissive
|
hachmannlab/chemml
|
3b3c295c5de8b5f9e8127b7e11aae62d6c3507ef
|
d511d91500f757de46162d2f8331e353a68de6a0
|
refs/heads/master
| 2023-06-08T22:21:07.063302
| 2023-05-25T22:49:41
| 2023-05-25T22:49:41
| 113,404,097
| 140
| 34
|
BSD-3-Clause
| 2023-05-25T22:49:42
| 2017-12-07T04:48:18
|
Python
|
UTF-8
|
Python
| false
| false
| 7,831
|
py
|
__init__.py
|
"""
This module is adapted entirely from Magpie (https://bitbucket.org/wolverton/magpie).
If you are using this module, please cite Magpie as:
L. Ward, A. Agrawal, A. Choudhary, and C. Wolverton, "A general-purpose machine learning framework for predicting properties of inorganic materials," npj Computational Materials, vol. 2, no. 1, Aug. 2016.
For more information regarding the python version of Magpie, please see https://github.com/ramv2/magpie_python.
The chemml.chem.magpie_python module includes (please click on links adjacent to function names for more information):
- APEAttributeGenerator: :func:`~chemml.chem.magpie_python.APEAttributeGenerator`
- ChargeDependentAttributeGenerator: :func:`~chemml.chem.magpie_python.ChargeDependentAttributeGenerator`
- ElementalPropertyAttributeGenerator: :func:`~chemml.chem.magpie_python.ElementalPropertyAttributeGenerator`
- ElementFractionAttributeGenerator: :func:`~chemml.chem.magpie_python.ElementFractionAttributeGenerator`
- ElementPairPropertyAttributeGenerator: :func:`~chemml.chem.magpie_python.ElementPairPropertyAttributeGenerator`
- GCLPAttributeGenerator: :func:`~chemml.chem.magpie_python.GCLPAttributeGenerator`
- IonicCompoundProximityAttributeGenerator: :func:`~chemml.chem.magpie_python.IonicCompoundProximityAttributeGenerator`
- IonicityAttributeGenerator: :func:`~chemml.chem.magpie_python.IonicityAttributeGenerator`
- MeredigAttributeGenerator: :func:`~chemml.chem.magpie_python.MeredigAttributeGenerator`
- StoichiometricAttributeGenerator: :func:`~chemml.chem.magpie_python.StoichiometricAttributeGenerator`
- ValenceShellAttributeGenerator: :func:`~chemml.chem.magpie_python.ValenceShellAttributeGenerator`
- YangOmegaAttributeGenerator: :func:`~chemml.chem.magpie_python.YangOmegaAttributeGenerator`
- APRDFAttributeGenerator: :func:`~chemml.chem.magpie_python.APRDFAttributeGenerator`
- ChemicalOrderingAttributeGenerator: :func:`~chemml.chem.magpie_python.ChemicalOrderingAttributeGenerator`
- CoordinationNumberAttributeGenerator: :func:`~chemml.chem.magpie_python.CoordinationNumberAttributeGenerator`
- CoulombMatrixAttributeGenerator: :func:`~chemml.chem.magpie_python.CoulombMatrixAttributeGenerator`
- EffectiveCoordinationNumberAttributeGenerator: :func:`~chemml.chem.magpie_python.EffectiveCoordinationNumberAttributeGenerator`
- LatticeSimilarityAttributeGenerator: :func:`~chemml.chem.magpie_python.LatticeSimilarityAttributeGenerator`
- LocalPropertyDifferenceAttributeGenerator: :func:`~chemml.chem.magpie_python.LocalPropertyDifferenceAttributeGenerator`
- LocalPropertyVarianceAttributeGenerator: :func:`~chemml.chem.magpie_python.LocalPropertyVarianceAttributeGenerator`
- PackingEfficiencyAttributeGenerator: :func:`~chemml.chem.magpie_python.PackingEfficiencyAttributeGenerator`
- PRDFAttributeGenerator: :func:`~chemml.chem.magpie_python.PRDFAttributeGenerator`
- StructuralHeterogeneityAttributeGenerator: :func:`~chemml.chem.magpie_python.StructuralHeterogeneityAttributeGenerator`
- CompositionEntry: :func:`~chemml.chem.magpie_python.CompositionEntry`
- CrystalStructureEntry: :func:`~chemml.chem.magpie_python.CrystalStructureEntry`
"""
__all__ = [
'APEAttributeGenerator',
'ChargeDependentAttributeGenerator',
'ElementalPropertyAttributeGenerator',
'ElementFractionAttributeGenerator',
'ElementPairPropertyAttributeGenerator',
'GCLPAttributeGenerator',
'IonicCompoundProximityAttributeGenerator',
'IonicityAttributeGenerator',
'MeredigAttributeGenerator',
'StoichiometricAttributeGenerator',
'ValenceShellAttributeGenerator',
'YangOmegaAttributeGenerator',
'APRDFAttributeGenerator',
'ChemicalOrderingAttributeGenerator',
'CoordinationNumberAttributeGenerator',
'CoulombMatrixAttributeGenerator',
'EffectiveCoordinationNumberAttributeGenerator',
'LatticeSimilarityAttributeGenerator',
'LocalPropertyDifferenceAttributeGenerator',
'LocalPropertyVarianceAttributeGenerator',
'PackingEfficiencyAttributeGenerator',
'PRDFAttributeGenerator',
'StructuralHeterogeneityAttributeGenerator',
'CompositionEntry',
'CrystalStructureEntry',
]
from chemml.chem.magpie_python.attributes.generators.composition.APEAttributeGenerator import \
APEAttributeGenerator
from chemml.chem.magpie_python.attributes.generators.composition.ChargeDependentAttributeGenerator \
import ChargeDependentAttributeGenerator
from chemml.chem.magpie_python.attributes.generators.composition.ElementalPropertyAttributeGenerator \
import ElementalPropertyAttributeGenerator
from chemml.chem.magpie_python.attributes.generators.composition.ElementFractionAttributeGenerator \
import ElementFractionAttributeGenerator
from chemml.chem.magpie_python.attributes.generators.composition.ElementPairPropertyAttributeGenerator \
import ElementPairPropertyAttributeGenerator
from chemml.chem.magpie_python.attributes.generators.composition.GCLPAttributeGenerator import \
GCLPAttributeGenerator
from chemml.chem.magpie_python.attributes.generators.composition\
.IonicCompoundProximityAttributeGenerator import \
IonicCompoundProximityAttributeGenerator
from chemml.chem.magpie_python.attributes.generators.composition.IonicityAttributeGenerator import \
IonicityAttributeGenerator
from chemml.chem.magpie_python.attributes.generators.composition.MeredigAttributeGenerator import \
MeredigAttributeGenerator
from chemml.chem.magpie_python.attributes.generators.composition.StoichiometricAttributeGenerator \
import StoichiometricAttributeGenerator
from chemml.chem.magpie_python.attributes.generators.composition.ValenceShellAttributeGenerator import \
ValenceShellAttributeGenerator
from chemml.chem.magpie_python.attributes.generators.composition.YangOmegaAttributeGenerator import \
YangOmegaAttributeGenerator
from chemml.chem.magpie_python.attributes.generators.crystal.APRDFAttributeGenerator import \
APRDFAttributeGenerator
from chemml.chem.magpie_python.attributes.generators.crystal.ChemicalOrderingAttributeGenerator import \
ChemicalOrderingAttributeGenerator
from chemml.chem.magpie_python.attributes.generators.crystal.CoordinationNumberAttributeGenerator \
import CoordinationNumberAttributeGenerator
from chemml.chem.magpie_python.attributes.generators.crystal.CoulombMatrixAttributeGenerator import \
CoulombMatrixAttributeGenerator
from chemml.chem.magpie_python.attributes.generators.crystal\
.EffectiveCoordinationNumberAttributeGenerator import \
EffectiveCoordinationNumberAttributeGenerator
from chemml.chem.magpie_python.attributes.generators.crystal.LatticeSimilarityAttributeGenerator import\
LatticeSimilarityAttributeGenerator
from chemml.chem.magpie_python.attributes.generators.crystal.LocalPropertyDifferenceAttributeGenerator \
import LocalPropertyDifferenceAttributeGenerator
from chemml.chem.magpie_python.attributes.generators.crystal.LocalPropertyVarianceAttributeGenerator \
import LocalPropertyVarianceAttributeGenerator
from chemml.chem.magpie_python.attributes.generators.crystal.PackingEfficiencyAttributeGenerator import\
PackingEfficiencyAttributeGenerator
from chemml.chem.magpie_python.attributes.generators.crystal.PRDFAttributeGenerator import \
PRDFAttributeGenerator
from chemml.chem.magpie_python.attributes.generators.crystal.StructuralHeterogeneityAttributeGenerator \
import StructuralHeterogeneityAttributeGenerator
from chemml.chem.magpie_python.data.materials.CompositionEntry import CompositionEntry
from chemml.chem.magpie_python.data.materials.CrystalStructureEntry import CrystalStructureEntry
# __all__ = ["attributes", "data", "models", "test", "utility", "vassal"]
|
0a7775bf480d9c333260e5339a08d35a17e9c102
|
3a6a211ea0d32405497fbd6486c490bb147e25f9
|
/dashboard/dashboard/models/anomaly_config_test.py
|
51ca15065611cdedc693a89b4bf8d323e37c1ddc
|
[
"BSD-3-Clause"
] |
permissive
|
catapult-project/catapult
|
e2cbdd5eb89f3b1492fc8752494e62ea1df4bae0
|
53102de187a48ac2cfc241fef54dcbc29c453a8e
|
refs/heads/main
| 2021-05-25T07:37:22.832505
| 2021-05-24T08:01:49
| 2021-05-25T06:07:38
| 33,947,548
| 2,032
| 742
|
BSD-3-Clause
| 2022-08-26T16:01:18
| 2015-04-14T17:49:05
|
HTML
|
UTF-8
|
Python
| false
| false
| 2,199
|
py
|
anomaly_config_test.py
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import unittest
import mock
from google.appengine.ext import ndb
from dashboard.common import testing_common
from dashboard.common import utils
from dashboard.models import anomaly_config
class AnomalyConfigTest(testing_common.TestCase):
def testGetAnomalyConfigDict(self):
testing_common.AddTests(['M'], ['b'], {'foo': {'bar': {}}})
test = utils.TestKey('M/b/foo/bar').get()
# The sample test has no overridden config.
self.assertEqual({}, anomaly_config.GetAnomalyConfigDict(test))
# Override the config for the test added above.
# The overridden config is set in the pre-put hook of the TestMetadata.
my_config = {
'_comment': 'Very particular segment sizes.',
'max_window_size': 721,
'min_segment_size': 123,
}
my_patterns = [test.test_path]
anomaly_config.AnomalyConfig(config=my_config, patterns=my_patterns).put()
test.UpdateSheriff()
test.put()
# The sample test now has an overridden config which is used.
# Extraneous "comment" keys are ignored.
expected = {
'max_window_size': 721,
'min_segment_size': 123,
}
self.assertEqual(expected, anomaly_config.GetAnomalyConfigDict(test))
@mock.patch('logging.warning')
def testGetAnomalyConfigDict_OverriddenConfigNotFound(self,
mock_logging_warning):
testing_common.AddTests(['M'], ['b'], {'foo': {'bar': {}}})
test = utils.TestKey('M/b/foo/bar').get()
test.overridden_anomaly_config = ndb.Key('AnomalyConfig', 'Non-existent')
self.assertEqual({}, anomaly_config.GetAnomalyConfigDict(test))
mock_logging_warning.assert_called_once_with(
'No AnomalyConfig fetched from key %s for test %s',
ndb.Key('AnomalyConfig', 'Non-existent'), 'M/b/foo/bar')
self.assertIsNone(test.key.get().overridden_anomaly_config)
if __name__ == '__main__':
unittest.main()
|
c4670af39ce9add0a2193bfa901be3cdf0f7f5cf
|
3a6a211ea0d32405497fbd6486c490bb147e25f9
|
/telemetry/build/update_docs.py
|
d24f6733fadae7dbf3abdbf0df528f6b56201aad
|
[
"BSD-3-Clause"
] |
permissive
|
catapult-project/catapult
|
e2cbdd5eb89f3b1492fc8752494e62ea1df4bae0
|
53102de187a48ac2cfc241fef54dcbc29c453a8e
|
refs/heads/main
| 2021-05-25T07:37:22.832505
| 2021-05-24T08:01:49
| 2021-05-25T06:07:38
| 33,947,548
| 2,032
| 742
|
BSD-3-Clause
| 2022-08-26T16:01:18
| 2015-04-14T17:49:05
|
HTML
|
UTF-8
|
Python
| false
| false
| 4,771
|
py
|
update_docs.py
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import absolute_import
import logging
import optparse
import os
import pkgutil
import pydoc
import re
import sys
import telemetry
from telemetry.core import util
TELEMETRY_DIR = util.GetTelemetryDir()
DOCS_DIR = os.path.join(TELEMETRY_DIR, 'docs', 'pydoc')
def RemoveAllDocs():
for dirname, _, filenames in os.walk(DOCS_DIR):
for filename in filenames:
os.remove(os.path.join(dirname, filename))
def GenerateHTMLForModule(module):
html = pydoc.html.page(
pydoc.describe(module), pydoc.html.document(module, module.__name__))
# pydoc writes out html with links in a variety of funky ways. We need
# to fix them up.
assert not TELEMETRY_DIR.endswith(os.sep)
links = re.findall('(<a href="(.+?)">(.+?)</a>)', html)
for link_match in links:
link, href, link_text = link_match
if not href.startswith('file:'):
continue
new_href = href.replace('file:', '')
new_href = new_href.replace(TELEMETRY_DIR, '..')
new_href = new_href.replace(os.sep, '/')
new_link_text = link_text.replace(TELEMETRY_DIR + os.sep, '')
new_link = '<a href="%s">%s</a>' % (new_href, new_link_text)
html = html.replace(link, new_link)
# pydoc writes out html with absolute path file links. This is not suitable
# for checked in documentation. So, fix up the HTML after it is generated.
#html = re.sub('href="file:%s' % TELEMETRY_DIR, 'href="..', html)
#html = re.sub(TELEMETRY_DIR + os.sep, '', html)
return html
def WriteHTMLForModule(module):
page = GenerateHTMLForModule(module)
path = os.path.join(DOCS_DIR, '%s.html' % module.__name__)
with open(path, 'w') as f:
sys.stderr.write('Wrote %s\n' % os.path.relpath(path))
f.write(page)
def GetAllModulesToDocument(module):
modules = [module]
for _, modname, _ in pkgutil.walk_packages(module.__path__,
module.__name__ + '.'):
if modname.endswith('_unittest'):
logging.debug('skipping %s due to being a unittest', modname)
continue
module = __import__(modname, fromlist=[''])
name, _ = os.path.splitext(module.__file__)
if not os.path.exists(name + '.py'):
logging.info('skipping %s due to being an orphan .pyc', module.__file__)
continue
modules.append(module)
return modules
class AlreadyDocumentedModule(object):
def __init__(self, filename):
self.filename = filename
@property
def name(self):
basename = os.path.basename(self.filename)
return os.path.splitext(basename)[0]
@property
def contents(self):
with open(self.filename, 'r') as f:
return f.read()
def GetAlreadyDocumentedModules():
modules = []
for dirname, _, filenames in os.walk(DOCS_DIR):
for filename in filenames:
path = os.path.join(dirname, filename)
modules.append(AlreadyDocumentedModule(path))
return modules
def IsUpdateDocsNeeded():
already_documented_modules = GetAlreadyDocumentedModules()
already_documented_modules_by_name = dict(
(module.name, module) for module in already_documented_modules)
current_modules = GetAllModulesToDocument(telemetry)
# Quick check: if the names of modules has changed, we definitely need
# an update.
already_documented_module_names = set(m.name
for m in already_documented_modules)
current_module_names = {m.__name__ for m in current_modules}
if current_module_names != already_documented_module_names:
return True
# Generate the new docs and compare aganist the old. If changed, then a
# an update is needed.
for current_module in current_modules:
already_documented_module = already_documented_modules_by_name[
current_module.__name__]
current_html = GenerateHTMLForModule(current_module)
if current_html != already_documented_module.contents:
return True
return False
def Main(args):
parser = optparse.OptionParser()
parser.add_option(
'-v',
'--verbose',
action='count',
dest='verbosity',
help='Increase verbosity level (repeat as needed)')
options, args = parser.parse_args(args)
if options.verbosity >= 2:
logging.getLogger().setLevel(logging.DEBUG)
elif options.verbosity:
logging.getLogger().setLevel(logging.INFO)
else:
logging.getLogger().setLevel(logging.WARNING)
assert os.path.isdir(DOCS_DIR), '%s does not exist' % DOCS_DIR
RemoveAllDocs()
old_cwd = os.getcwd()
try:
os.chdir(TELEMETRY_DIR)
for module in GetAllModulesToDocument(telemetry):
WriteHTMLForModule(module)
finally:
os.chdir(old_cwd)
|
52d2a4b703309e7a661dddef5c342c34109a775d
|
1bb8bea68052cb7a29b09494d0697f907bb7e6f3
|
/HomeAssistant_File/custom_components/blueiris/managers/home_assistant.py
|
bcb94270fd3458dd7d908e3caf356aa3cee185c0
|
[
"Unlicense"
] |
permissive
|
abeksis/My-HomeAssistant-Config
|
467e5d7df9b8daf785917bce620e3402315a44fd
|
3d9dd11f1a72f9ad44ea72e8476b1b2b95ed3e6a
|
refs/heads/master
| 2023-04-26T19:13:48.225988
| 2023-04-16T08:03:57
| 2023-04-16T08:03:57
| 199,162,968
| 107
| 22
| null | 2019-07-28T09:29:09
| 2019-07-27T12:37:33
|
HTML
|
UTF-8
|
Python
| false
| false
| 8,059
|
py
|
home_assistant.py
|
"""
Support for Blue Iris.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/blueiris/
"""
from datetime import datetime
import logging
import sys
from typing import Optional
from cryptography.fernet import InvalidToken
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.entity_registry import (
EntityRegistry,
async_get_registry as er_async_get_registry,
)
from homeassistant.helpers.event import async_call_later, async_track_time_interval
from ..api.blue_iris_api import BlueIrisApi
from ..helpers.advanced_configurations_generator import AdvancedConfigurationGenerator
from ..helpers.const import *
from ..models.config_data import ConfigData
from .configuration_manager import ConfigManager
from .device_manager import DeviceManager
from .entity_manager import EntityManager
from .password_manager import PasswordManager
from .storage_manager import StorageManager
_LOGGER = logging.getLogger(__name__)
class BlueIrisHomeAssistant:
def __init__(self, hass: HomeAssistant, password_manager: PasswordManager):
self._hass = hass
self._remove_async_track_time = None
self._is_initialized = False
self._is_updating = False
self._entity_registry = None
self._api = None
self._entity_manager = None
self._device_manager = None
self._storage_manager = None
self._config_generator: Optional[AdvancedConfigurationGenerator] = None
self._config_manager = ConfigManager(password_manager)
@property
def api(self) -> BlueIrisApi:
return self._api
@property
def entity_manager(self) -> EntityManager:
return self._entity_manager
@property
def device_manager(self) -> DeviceManager:
return self._device_manager
@property
def entity_registry(self) -> EntityRegistry:
return self._entity_registry
@property
def config_manager(self) -> ConfigManager:
return self._config_manager
@property
def storage_manager(self) -> StorageManager:
return self._storage_manager
@property
def config_data(self) -> Optional[ConfigData]:
if self._config_manager is not None:
return self._config_manager.data
return None
async def async_init(self, entry: ConfigEntry):
try:
self._storage_manager = StorageManager(self._hass)
await self._config_manager.update(entry)
self._api = BlueIrisApi(self._hass, self._config_manager)
self._entity_manager = EntityManager(self._hass, self)
self._device_manager = DeviceManager(self._hass, self)
self._config_generator = AdvancedConfigurationGenerator(self._hass, self)
self._entity_registry = await er_async_get_registry(self._hass)
self._hass.loop.create_task(self._async_init())
except InvalidToken:
error_message = "Encryption key got corrupted, please remove the integration and re-add it"
_LOGGER.error(error_message)
data = await self._storage_manager.async_load_from_store()
data.key = None
await self._storage_manager.async_save_to_store(data)
await self._hass.services.async_call(
"persistent_notification",
"create",
{"title": DEFAULT_NAME, "message": error_message},
)
except Exception as ex:
exc_type, exc_obj, tb = sys.exc_info()
line_number = tb.tb_lineno
_LOGGER.error(f"Failed to async_init, error: {ex}, line: {line_number}")
async def _async_init(self):
load = self._hass.config_entries.async_forward_entry_setup
for domain in SIGNALS:
await load(self._config_manager.config_entry, domain)
self._is_initialized = True
await self.async_update_entry()
def _update_entities(self, now):
self._hass.async_create_task(self.async_update(now))
async def async_update_entry(self, entry: ConfigEntry = None):
update_config_manager = entry is not None
if not update_config_manager:
entry = self._config_manager.config_entry
self._remove_async_track_time = async_track_time_interval(
self._hass, self._update_entities, SCAN_INTERVAL
)
if not self._is_initialized:
_LOGGER.info(
f"NOT INITIALIZED - Failed handling ConfigEntry change: {entry.as_dict()}"
)
return
_LOGGER.info(f"Handling ConfigEntry change: {entry.as_dict()}")
if update_config_manager:
await self._config_manager.update(entry)
await self._api.initialize()
await self.async_update(datetime.now())
data = await self.storage_manager.async_load_from_store()
integration_data = data.integrations.get(entry.title)
if update_config_manager and integration_data is not None:
if integration_data.generate_configuration_files:
async_call_later(self._hass, 5, self.generate_config_files)
integration_data.generate_configuration_files = False
await self.storage_manager.async_save_to_store(data)
async def async_remove(self, entry: ConfigEntry):
_LOGGER.info(f"Removing current integration - {entry.title}")
if self._remove_async_track_time is not None:
self._remove_async_track_time()
self._remove_async_track_time = None
unload = self._hass.config_entries.async_forward_entry_unload
for domain in SUPPORTED_DOMAINS:
await unload(entry, domain)
await self._device_manager.async_remove()
_LOGGER.info(f"Current integration ({entry.title}) removed")
async def async_update(self, event_time):
if not self._is_initialized:
_LOGGER.info(f"NOT INITIALIZED - Failed updating @{event_time}")
return
try:
if self._is_updating:
_LOGGER.debug(f"Skip updating @{event_time}")
return
_LOGGER.debug(f"Updating @{event_time}")
self._is_updating = True
await self._api.async_update()
self.device_manager.update()
self.entity_manager.update()
await self.dispatch_all()
except Exception as ex:
exc_type, exc_obj, tb = sys.exc_info()
line_number = tb.tb_lineno
_LOGGER.error(f"Failed to async_update, Error: {ex}, Line: {line_number}")
self._is_updating = False
async def delete_entity(self, domain, name):
try:
entity = self.entity_manager.get_entity(domain, name)
device_name = entity.device_name
unique_id = entity.unique_id
self.entity_manager.delete_entity(domain, name)
device_in_use = self.entity_manager.is_device_name_in_use(device_name)
entity_id = self.entity_registry.async_get_entity_id(
domain, DOMAIN, unique_id
)
self.entity_registry.async_remove(entity_id)
if not device_in_use:
await self.device_manager.delete_device(device_name)
except Exception as ex:
exc_type, exc_obj, tb = sys.exc_info()
line_number = tb.tb_lineno
_LOGGER.error(f"Failed to delete_entity, Error: {ex}, Line: {line_number}")
async def dispatch_all(self):
if not self._is_initialized:
_LOGGER.info("NOT INITIALIZED - Failed discovering components")
return
for domain in SUPPORTED_DOMAINS:
signal = SIGNALS.get(domain)
async_dispatcher_send(self._hass, signal)
def generate_config_files(self, now):
self._config_generator.generate()
|
a580b4169baa03e2ca40b84aef90caf738745635
|
150a7b11cb531f8bc2a045aefcf2ebe1d151efa3
|
/tests/ecosystem/upgrade/test_configuration.py
|
05aacfee44321155094e81329d07606f8a27f9fa
|
[
"MIT"
] |
permissive
|
red-hat-storage/ocs-ci
|
c7ac414e1b86552da0439223dfa9bca39977f31a
|
5e9e504957403148e413326f65c3769bf9d8eb39
|
refs/heads/master
| 2023-08-17T16:19:51.154403
| 2023-08-17T13:27:12
| 2023-08-17T13:27:12
| 179,558,938
| 146
| 210
|
MIT
| 2023-09-14T16:38:44
| 2019-04-04T19:00:02
|
Python
|
UTF-8
|
Python
| false
| false
| 1,435
|
py
|
test_configuration.py
|
import logging
import pytest
from ocs_ci.framework.pytest_customization.marks import (
pre_upgrade,
post_upgrade,
brown_squad,
)
from ocs_ci.ocs.resources import pod
log = logging.getLogger(__name__)
def get_crush_map():
"""
Get decompiled CRUSH map from ceph toolbox pod.
Returns:
str: Multiline string representing current Ceph CRUSH map
"""
ct_pod = pod.get_ceph_tools_pod()
file_comp = "/tmp/crush_comp"
file_decomp = "/tmp/crush_decomp"
ct_pod.exec_ceph_cmd(f"ceph osd getcrushmap -o {file_comp}")
ct_pod.exec_ceph_cmd(f"crushtool -d {file_comp} -o {file_decomp}")
return ct_pod.exec_sh_cmd_on_pod(f"cat {file_decomp}")
@pytest.fixture(scope="session")
def pre_upgrade_crush_map():
"""
Loads CRUSH map before upgrade by `test_load_crush_map` test case.
Returns:
str: String consisting of CRUSH map before upgrade
"""
crush_map = get_crush_map()
log.info(f"Pre upgrade CRUSH map: {crush_map}")
return crush_map
@pre_upgrade
@brown_squad
def test_load_crush_map(pre_upgrade_crush_map):
"""
Load CRUSH map.
"""
assert pre_upgrade_crush_map
@post_upgrade
@brown_squad
@pytest.mark.polarion_id("OCS-1936")
def test_crush_map_unchanged(pre_upgrade_crush_map):
"""
Test that CRUSH map loaded before upgrade is the same as CRUSH map after
upgrade.
"""
pre_upgrade_crush_map == get_crush_map()
|
f8c61161b355415b5f8c84dd3e858d2e0e25f469
|
c80d0926964a49d9b136f4d591cd220c54a5ef6b
|
/setup.py
|
3da0fb66c0ac04f50a3962885ebf3ca24130d6f4
|
[
"MIT"
] |
permissive
|
python-visualization/folium
|
ac4e0c62ccc150699ccdc5946c2491a190ed23c0
|
b4fa788667c0d4b6ced46cbf1b74d37d7c336f6a
|
refs/heads/main
| 2023-09-01T12:53:42.809713
| 2023-08-15T08:38:44
| 2023-08-15T08:38:44
| 9,952,134
| 6,257
| 2,720
|
MIT
| 2023-09-08T13:17:29
| 2013-05-09T04:21:35
|
Python
|
UTF-8
|
Python
| false
| false
| 2,140
|
py
|
setup.py
|
import os
from setuptools import setup
rootpath = os.path.abspath(os.path.dirname(__file__))
def read(*parts):
return open(os.path.join(rootpath, *parts)).read()
def walk_subpkg(name):
data_files = []
package_dir = "folium"
for parent, dirs, files in os.walk(os.path.join(package_dir, name)):
# Remove package_dir from the path.
sub_dir = os.sep.join(parent.split(os.sep)[1:])
for f in files:
data_files.append(os.path.join(sub_dir, f))
return data_files
package_data = {
"": [
"*.js",
"plugins/*.js",
"plugins/*.html",
"plugins/*.css",
"plugins/*.tpl",
"templates/*.html",
"templates/*.js",
"templates/*.txt",
]
+ walk_subpkg("templates/tiles")
}
packages = ["folium", "folium.plugins"]
# Dependencies.
with open("requirements.txt") as f:
tests_require = f.readlines()
install_requires = [t.strip() for t in tests_require]
setup(
name="folium",
description="Make beautiful maps with Leaflet.js & Python",
license="MIT",
long_description="{}".format(read("README.rst")),
long_description_content_type="text/x-rst",
author="Rob Story",
author_email="wrobstory@gmail.com",
url="https://github.com/python-visualization/folium",
keywords="data visualization",
classifiers=[
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Scientific/Engineering :: GIS",
"Topic :: Scientific/Engineering :: Visualization",
"License :: OSI Approved :: MIT License",
"Development Status :: 5 - Production/Stable",
],
platforms="any",
packages=packages,
package_data=package_data,
python_requires=">=3.5",
extras_require={"testing": ["pytest"]},
install_requires=install_requires,
zip_safe=False,
use_scm_version={
"write_to": "folium/_version.py",
"write_to_template": '__version__ = "{version}"',
"tag_regex": r"^(?P<prefix>v)?(?P<version>[^\+]+)(?P<suffix>.*)?$",
},
)
|
c38dfeca60d178150a113aa7e39f4cbef6827cdd
|
57c0a57269dfc516c7f46468940efb62cb863af4
|
/langchain/serpapi.py
|
dd8569b6b1d7cf41cbfd20e0d973da095b880eb5
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
microsoft/MM-REACT
|
e67a843faadf4752f5e9d0e2dbf0c80068dbd288
|
b8f29af7f3c24cf3a4554bebfa2053064467fbdb
|
refs/heads/main
| 2023-08-31T03:01:42.246514
| 2023-05-12T20:43:11
| 2023-05-12T20:43:11
| 614,230,777
| 705
| 57
|
MIT
| 2023-08-30T00:04:13
| 2023-03-15T06:56:13
|
Python
|
UTF-8
|
Python
| false
| false
| 119
|
py
|
serpapi.py
|
"""For backwards compatiblity."""
from langchain.utilities.serpapi import SerpAPIWrapper
__all__ = ["SerpAPIWrapper"]
|
8c6b23d5a2922002e5e0b84729c3078e7343208b
|
dd5f0d6fd9090659d42809cd60c037dbb2a162d9
|
/tests/data/expected/main/main_external_files_in_directory/output.py
|
cf84178041431b57ab6830811c1cb7c9b5a6b471
|
[
"MIT"
] |
permissive
|
koxudaxi/datamodel-code-generator
|
8f9185af200d26a27346115a89c3a6d330086dac
|
73da2d846271c50790834cf676a57a384b5ffcdd
|
refs/heads/master
| 2023-09-02T16:32:41.659791
| 2023-08-30T16:22:22
| 2023-08-30T16:22:22
| 189,179,081
| 1,800
| 245
|
MIT
| 2023-09-14T04:00:12
| 2019-05-29T08:01:32
|
Python
|
UTF-8
|
Python
| false
| false
| 1,737
|
py
|
output.py
|
# generated by datamodel-codegen:
# filename: person.json
# timestamp: 2019-07-26T00:00:00+00:00
from __future__ import annotations
from enum import Enum
from typing import List, Optional, Union
from pydantic import BaseModel, Extra, Field, conint
class Fur(Enum):
Short_hair = 'Short hair'
Long_hair = 'Long hair'
class Noodle(Enum):
ramen = 'ramen'
spaghetti = 'spaghetti'
class Soup(Enum):
bean = 'bean'
mushroom = 'mushroom'
tomato = 'tomato'
class Coffee(Enum):
Black = 'Black'
Espresso = 'Espresso'
class Tea(Enum):
Oolong = 'Oolong'
Green = 'Green'
class Pet(BaseModel):
name: Optional[str] = None
age: Optional[int] = None
fur: Optional[Fur] = None
class Friend(BaseModel):
class Config:
extra = Extra.allow
name: str = Field(..., example='John Doe')
phone_number: Optional[str] = Field(None, example='(555) 555-1234')
food: Optional[List[Union[Noodle, Soup]]] = None
class Friends(BaseModel):
__root__: List[Friend] = Field(..., title='Friends')
class Person(BaseModel):
first_name: str = Field(..., description="The person's first name.")
last_name: str = Field(..., description="The person's last name.")
age: Optional[conint(ge=0)] = Field(None, description='Age in years.')
pets: Optional[List[Pet]] = None
friends: Optional[Friends] = None
robot: Optional[Robot] = None
comment: None = None
drink: Optional[List[Union[Coffee, Tea]]] = None
food: Optional[List[Union[Noodle, Soup]]] = None
class Robot(Pet):
friends: Optional[Person] = None
drink: Optional[Coffee] = None
food: Optional[Noodle] = None
pet: Optional[Pet] = None
Person.update_forward_refs()
|
3994152200411aa6653403d45368f57f5f154b76
|
b68174b4d500eefe9ce5f6684f7ea830e693c8b6
|
/nsot/api/urls.py
|
ef2c8496fa7c453ccbac5e57d76fdef682931de9
|
[
"Apache-2.0"
] |
permissive
|
dropbox/nsot
|
62e7443b8d2dd18165f23df2b2228480c4effd92
|
941b11f84f5c0d210f638654a6ed34a5610af22a
|
refs/heads/develop
| 2023-07-31T14:24:54.328339
| 2019-10-25T17:50:44
| 2019-10-25T17:50:44
| 28,201,627
| 414
| 84
|
NOASSERTION
| 2022-03-06T15:20:01
| 2014-12-18T21:06:12
|
Python
|
UTF-8
|
Python
| false
| false
| 2,240
|
py
|
urls.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import absolute_import
from django.conf.urls import url, include
from django.conf import settings
from . import routers, views
# Register all endpoints as a top-level resource
router = routers.BulkRouter(trailing_slash=settings.APPEND_SLASH)
# Resources pinned to API index at /
router.register(r'sites', views.SiteViewSet)
router.register(r'attributes', views.AttributeViewSet)
router.register(r'changes', views.ChangeViewSet)
router.register(r'circuits', views.CircuitViewSet)
router.register(r'devices', views.DeviceViewSet)
router.register(r'interfaces', views.InterfaceViewSet)
router.register(r'networks', views.NetworkViewSet)
router.register(r'protocols', views.ProtocolViewSet)
router.register(r'protocol_types', views.ProtocolTypeViewSet)
router.register(r'users', views.UserViewSet)
router.register(r'values', views.ValueViewSet)
# Nested router for resources under /sites
sites_router = routers.BulkNestedRouter(
router, r'sites', lookup='site', trailing_slash=settings.APPEND_SLASH
)
# Resources that are nested under /sites
sites_router.register(r'attributes', views.AttributeViewSet)
sites_router.register(r'changes', views.ChangeViewSet)
sites_router.register(r'circuits', views.CircuitViewSet)
sites_router.register(r'devices', views.DeviceViewSet)
sites_router.register(r'interfaces', views.InterfaceViewSet)
sites_router.register(r'networks', views.NetworkViewSet)
sites_router.register(r'protocols', views.ProtocolViewSet)
sites_router.register(r'protocol_types', views.ProtocolTypeViewSet)
sites_router.register(r'values', views.ValueViewSet)
# Wire up our API using automatic URL routing.
# Additionally, we include login URLs for the browsable API.
urlpatterns = [
# API routes
url(r'^', include(router.urls)),
url(r'^', include(sites_router.urls)),
# Browsable API auth login
url(r'^auth/', include('rest_framework.urls',
namespace='rest_framework')),
# API auth_token login/verify (email/secret_key)
url(r'^authenticate/', views.AuthTokenLoginView.as_view(),
name='authenticate'),
url(r'^verify_token/', views.AuthTokenVerifyView.as_view(),
name='verify_token'),
]
|
20f85a25909a546bb60986909e4b06493845a021
|
6223eda8f703171dfca7973d56d788c6b2afc581
|
/blog/tests/test_views.py
|
6739cccf2c306190cc6b1b9310fe13719aa8893e
|
[
"CC0-1.0"
] |
permissive
|
mdn/django-diy-blog
|
bea3611dbfd89e68c498a4357a6ab4aa7467164d
|
4af1515a1098de5fbfe2f1b37cca282ae029a83d
|
refs/heads/main
| 2023-08-28T04:50:38.774418
| 2023-01-05T15:12:24
| 2023-01-05T15:12:24
| 73,102,179
| 235
| 145
|
CC0-1.0
| 2023-01-05T15:12:26
| 2016-11-07T17:10:57
|
Python
|
UTF-8
|
Python
| false
| false
| 1,549
|
py
|
test_views.py
|
from django.test import TestCase
# Create your tests here.
from blog.models import Blog, BlogAuthor
from django.urls import reverse
from django.contrib.auth.models import User #Blog author or commenter
class BlogListView(TestCase):
@classmethod
def setUpTestData(cls):
test_user1 = User.objects.create_user(username='testuser1', password='12345')
test_user1.save()
blog_author = BlogAuthor.objects.create(user=test_user1, bio='This is a bio')
number_of_blogs = 13
for blog_num in range(number_of_blogs):
Blog.objects.create(name='Test Blog %s' % blog_num,author=blog_author,description='Test Blog %s Description' % blog_num)
def test_view_url_exists_at_desired_location(self):
resp = self.client.get('/blog/blogs/')
self.assertEqual(resp.status_code, 200)
def test_view_url_accessible_by_name(self):
resp = self.client.get(reverse('blogs'))
self.assertEqual(resp.status_code, 200)
def test_view_uses_correct_template(self):
resp = self.client.get(reverse('blogs'))
self.assertEqual(resp.status_code, 200)
self.assertTemplateUsed(resp, 'blog/blog_list.html')
def test_pagination_is_five(self):
resp = self.client.get(reverse('blogs'))
self.assertEqual(resp.status_code, 200)
self.assertTrue('is_paginated' in resp.context)
self.assertTrue(resp.context['is_paginated'] == True)
self.assertEqual( len(resp.context['blog_list']), 5)
|
e7d0445cf3e391386dbb5d63769d2941dd60ff12
|
974d04d2ea27b1bba1c01015a98112d2afb78fe5
|
/test/legacy_test/test_detection_map_op.py
|
e8c0e272110171ff488ab39eb95a6280f19f6a9e
|
[
"Apache-2.0"
] |
permissive
|
PaddlePaddle/Paddle
|
b3d2583119082c8e4b74331dacc4d39ed4d7cff0
|
22a11a60e0e3d10a3cf610077a3d9942a6f964cb
|
refs/heads/develop
| 2023-08-17T21:27:30.568889
| 2023-08-17T12:38:22
| 2023-08-17T12:38:22
| 65,711,522
| 20,414
| 5,891
|
Apache-2.0
| 2023-09-14T19:20:51
| 2016-08-15T06:59:08
|
C++
|
UTF-8
|
Python
| false
| false
| 11,886
|
py
|
test_detection_map_op.py
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import math
import unittest
import numpy as np
from eager_op_test import OpTest
class TestDetectionMAPOp(OpTest):
def set_data(self):
self.class_num = 4
self.init_test_case()
self.mAP = [self.calc_map(self.tf_pos, self.tf_pos_lod)]
self.label = np.array(self.label).astype('float32')
self.detect = np.array(self.detect).astype('float32')
self.mAP = np.array(self.mAP).astype('float32')
if len(self.class_pos_count) > 0:
self.class_pos_count = np.array(self.class_pos_count).astype(
'int32'
)
self.true_pos = np.array(self.true_pos).astype('float32')
self.false_pos = np.array(self.false_pos).astype('float32')
self.has_state = np.array([1]).astype('int32')
self.inputs = {
'Label': (self.label, self.label_lod),
'DetectRes': (self.detect, self.detect_lod),
'HasState': self.has_state,
'PosCount': self.class_pos_count,
'TruePos': (self.true_pos, self.true_pos_lod),
'FalsePos': (self.false_pos, self.false_pos_lod),
}
else:
self.inputs = {
'Label': (self.label, self.label_lod),
'DetectRes': (self.detect, self.detect_lod),
}
self.attrs = {
'overlap_threshold': self.overlap_threshold,
'evaluate_difficult': self.evaluate_difficult,
'ap_type': self.ap_type,
'class_num': self.class_num,
}
self.out_class_pos_count = np.array(self.out_class_pos_count).astype(
'int'
)
self.out_true_pos = np.array(self.out_true_pos).astype('float32')
self.out_false_pos = np.array(self.out_false_pos).astype('float32')
self.outputs = {
'MAP': self.mAP,
'AccumPosCount': self.out_class_pos_count,
'AccumTruePos': (self.out_true_pos, self.out_true_pos_lod),
'AccumFalsePos': (self.out_false_pos, self.out_false_pos_lod),
}
def init_test_case(self):
self.overlap_threshold = 0.3
self.evaluate_difficult = True
self.ap_type = "integral"
self.label_lod = [[2, 2]]
# label difficult xmin ymin xmax ymax
self.label = [
[1, 0, 0.1, 0.1, 0.3, 0.3],
[1, 1, 0.6, 0.6, 0.8, 0.8],
[2, 0, 0.3, 0.3, 0.6, 0.5],
[1, 0, 0.7, 0.1, 0.9, 0.3],
]
# label score xmin ymin xmax ymax difficult
self.detect_lod = [[3, 4]]
self.detect = [
[1, 0.3, 0.1, 0.0, 0.4, 0.3],
[1, 0.7, 0.0, 0.1, 0.2, 0.3],
[1, 0.9, 0.7, 0.6, 0.8, 0.8],
[2, 0.8, 0.2, 0.1, 0.4, 0.4],
[2, 0.1, 0.4, 0.3, 0.7, 0.5],
[1, 0.2, 0.8, 0.1, 1.0, 0.3],
[3, 0.2, 0.8, 0.1, 1.0, 0.3],
]
# label score true_pos false_pos
self.tf_pos_lod = [[3, 4]]
self.tf_pos = [
[1, 0.9, 1, 0],
[1, 0.7, 1, 0],
[1, 0.3, 0, 1],
[1, 0.2, 1, 0],
[2, 0.8, 0, 1],
[2, 0.1, 1, 0],
[3, 0.2, 0, 1],
]
self.class_pos_count = []
self.true_pos_lod = [[]]
self.true_pos = [[]]
self.false_pos_lod = [[]]
self.false_pos = [[]]
def calc_map(self, tf_pos, tf_pos_lod):
mAP = 0.0
count = 0
def get_input_pos(
class_pos_count, true_pos, true_pos_lod, false_pos, false_pos_lod
):
class_pos_count_dict = collections.Counter()
true_pos_dict = collections.defaultdict(list)
false_pos_dict = collections.defaultdict(list)
for i, count in enumerate(class_pos_count):
class_pos_count_dict[i] = count
cur_pos = 0
for i in range(len(true_pos_lod[0])):
start = cur_pos
cur_pos += true_pos_lod[0][i]
end = cur_pos
for j in range(start, end):
true_pos_dict[i].append(true_pos[j])
cur_pos = 0
for i in range(len(false_pos_lod[0])):
start = cur_pos
cur_pos += false_pos_lod[0][i]
end = cur_pos
for j in range(start, end):
false_pos_dict[i].append(false_pos[j])
return class_pos_count_dict, true_pos_dict, false_pos_dict
def get_output_pos(label_count, true_pos, false_pos):
label_number = self.class_num
out_class_pos_count = []
out_true_pos_lod = []
out_true_pos = []
out_false_pos_lod = []
out_false_pos = []
for i in range(label_number):
out_class_pos_count.append([label_count[i]])
true_pos_list = true_pos[i]
out_true_pos += true_pos_list
out_true_pos_lod.append(len(true_pos_list))
false_pos_list = false_pos[i]
out_false_pos += false_pos_list
out_false_pos_lod.append(len(false_pos_list))
return (
out_class_pos_count,
out_true_pos,
[out_true_pos_lod],
out_false_pos,
[out_false_pos_lod],
)
def get_accumulation(pos_list):
sorted_list = sorted(pos_list, key=lambda pos: pos[0], reverse=True)
sum = 0
accu_list = []
for score, count in sorted_list:
sum += count
accu_list.append(sum)
return accu_list
label_count, true_pos, false_pos = get_input_pos(
self.class_pos_count,
self.true_pos,
self.true_pos_lod,
self.false_pos,
self.false_pos_lod,
)
for v in self.label:
label = v[0]
difficult = False if len(v) == 5 else v[1]
if self.evaluate_difficult:
label_count[label] += 1
elif not difficult:
label_count[label] += 1
for label, score, tp, fp in tf_pos:
true_pos[label].append([score, tp])
false_pos[label].append([score, fp])
for label, label_pos_num in label_count.items():
if label_pos_num == 0:
continue
if label not in true_pos:
count += 1
continue
label_true_pos = true_pos[label]
label_false_pos = false_pos[label]
accu_tp_sum = get_accumulation(label_true_pos)
accu_fp_sum = get_accumulation(label_false_pos)
precision = []
recall = []
for i in range(len(accu_tp_sum)):
precision.append(
float(accu_tp_sum[i])
/ float(accu_tp_sum[i] + accu_fp_sum[i])
)
recall.append(float(accu_tp_sum[i]) / label_pos_num)
if self.ap_type == "11point":
max_precisions = [0.0] * 11
start_idx = len(accu_tp_sum) - 1
for j in range(10, -1, -1):
for i in range(start_idx, -1, -1):
if recall[i] < float(j) / 10.0:
start_idx = i
if j > 0:
max_precisions[j - 1] = max_precisions[j]
break
else:
if max_precisions[j] < precision[i]:
max_precisions[j] = precision[i]
for j in range(10, -1, -1):
mAP += max_precisions[j] / 11
count += 1
elif self.ap_type == "integral":
average_precisions = 0.0
prev_recall = 0.0
for i in range(len(accu_tp_sum)):
if math.fabs(recall[i] - prev_recall) > 1e-6:
average_precisions += precision[i] * math.fabs(
recall[i] - prev_recall
)
prev_recall = recall[i]
mAP += average_precisions
count += 1
pcnt, tp, tp_lod, fp, fp_lod = get_output_pos(
label_count, true_pos, false_pos
)
self.out_class_pos_count = pcnt
self.out_true_pos = tp
self.out_true_pos_lod = tp_lod
self.out_false_pos = fp
self.out_false_pos_lod = fp_lod
if count != 0:
mAP /= count
return mAP
def setUp(self):
self.op_type = "detection_map"
self.set_data()
def test_check_output(self):
# NODE(yjjiang11): This op will be deprecated.
self.check_output(check_dygraph=False)
class TestDetectionMAPOpSkipDiff(TestDetectionMAPOp):
def init_test_case(self):
super().init_test_case()
self.evaluate_difficult = False
self.tf_pos_lod = [[2, 4]]
# label score true_pos false_pos
self.tf_pos = [
[1, 0.7, 1, 0],
[1, 0.3, 0, 1],
[1, 0.2, 1, 0],
[2, 0.8, 0, 1],
[2, 0.1, 1, 0],
[3, 0.2, 0, 1],
]
class TestDetectionMAPOpWithoutDiff(TestDetectionMAPOp):
def init_test_case(self):
super().init_test_case()
# label xmin ymin xmax ymax
self.label = [
[1, 0.1, 0.1, 0.3, 0.3],
[1, 0.6, 0.6, 0.8, 0.8],
[2, 0.3, 0.3, 0.6, 0.5],
[1, 0.7, 0.1, 0.9, 0.3],
]
class TestDetectionMAPOp11Point(TestDetectionMAPOp):
def init_test_case(self):
super().init_test_case()
self.ap_type = "11point"
class TestDetectionMAPOpMultiBatch(TestDetectionMAPOp):
def init_test_case(self):
super().init_test_case()
self.class_pos_count = [0, 2, 1, 0]
self.true_pos_lod = [[0, 3, 2]]
self.true_pos = [
[0.7, 1.0],
[0.3, 0.0],
[0.2, 1.0],
[0.8, 0.0],
[0.1, 1.0],
]
self.false_pos_lod = [[0, 3, 2]]
self.false_pos = [
[0.7, 0.0],
[0.3, 1.0],
[0.2, 0.0],
[0.8, 1.0],
[0.1, 0.0],
]
class TestDetectionMAPOp11PointWithClassNoTP(TestDetectionMAPOp):
def init_test_case(self):
self.overlap_threshold = 0.3
self.evaluate_difficult = True
self.ap_type = "11point"
self.label_lod = [[2]]
# label difficult xmin ymin xmax ymax
self.label = [[2, 0, 0.3, 0.3, 0.6, 0.5], [1, 0, 0.7, 0.1, 0.9, 0.3]]
# label score xmin ymin xmax ymax difficult
self.detect_lod = [[1]]
self.detect = [[1, 0.2, 0.8, 0.1, 1.0, 0.3]]
# label score true_pos false_pos
self.tf_pos_lod = [[3, 4]]
self.tf_pos = [[1, 0.2, 1, 0]]
self.class_pos_count = []
self.true_pos_lod = [[]]
self.true_pos = [[]]
self.false_pos_lod = [[]]
self.false_pos = [[]]
if __name__ == '__main__':
unittest.main()
|
190b315de0a231fe6b1fa114f6f36f304d8b362f
|
d110546d747d7e3865ce5742d5fca09f404623c0
|
/tests/pytests/unit/output/test_nested.py
|
c56faf8967263f97a8b8176ef329548cbb0f0494
|
[
"Apache-2.0",
"MIT",
"BSD-2-Clause"
] |
permissive
|
saltstack/salt
|
354fc86a7be1f69514b3dd3b2edb9e6f66844c1d
|
1ef90cbdc7203f97775edb7666db86a41eb9fc15
|
refs/heads/master
| 2023-07-19T20:56:20.210556
| 2023-06-29T23:12:28
| 2023-07-19T11:47:47
| 1,390,248
| 11,026
| 6,296
|
Apache-2.0
| 2023-09-14T20:45:37
| 2011-02-20T20:16:56
|
Python
|
UTF-8
|
Python
| false
| false
| 6,131
|
py
|
test_nested.py
|
"""
Unit tests for the Nested outputter
"""
import pytest
import salt.output.nested as nested
@pytest.fixture
def configure_loader_modules():
return {nested: {"__opts__": {"extension_modules": "", "color": True}}}
@pytest.fixture
def data():
# The example from the documentation for the test.arg execution function
# Same function from the highstate outputter
return {
"local": {
"args": (1, "two", 3.1),
"kwargs": {
"__pub_pid": 25938,
"wow": {"a": 1, "b": "hello"},
"__pub_fun": "test.arg",
"__pub_jid": "20171207105927331329",
"__pub_tgt": "salt-call",
"txt": "hello",
},
}
}
def test_output_with_colors(data):
# Should look exactly like that, with the default color scheme:
#
# local:
# ----------
# args:
# - 1
# - two
# - 3.1
# kwargs:
# ----------
# __pub_fun:
# test.arg
# __pub_jid:
# 20171207105927331329
# __pub_pid:
# 25938
# __pub_tgt:
# salt-call
# txt:
# hello
# wow:
# ----------
# a:
# 1
# b:
# hello
expected_output_str = (
"\x1b[0;36mlocal\x1b[0;0m:\n \x1b[0;36m----------\x1b[0;0m\n "
" \x1b[0;36margs\x1b[0;0m:\n \x1b[0;1;33m- 1\x1b[0;0m\n "
" \x1b[0;32m- two\x1b[0;0m\n \x1b[0;1;33m- 3.1\x1b[0;0m\n "
" \x1b[0;36mkwargs\x1b[0;0m:\n \x1b[0;36m----------\x1b[0;0m\n "
" \x1b[0;36m__pub_fun\x1b[0;0m:\n \x1b[0;32mtest.arg\x1b[0;0m\n"
" \x1b[0;36m__pub_jid\x1b[0;0m:\n "
" \x1b[0;32m20171207105927331329\x1b[0;0m\n "
" \x1b[0;36m__pub_pid\x1b[0;0m:\n \x1b[0;1;33m25938\x1b[0;0m\n "
" \x1b[0;36m__pub_tgt\x1b[0;0m:\n "
" \x1b[0;32msalt-call\x1b[0;0m\n \x1b[0;36mtxt\x1b[0;0m:\n "
" \x1b[0;32mhello\x1b[0;0m\n \x1b[0;36mwow\x1b[0;0m:\n "
" \x1b[0;36m----------\x1b[0;0m\n \x1b[0;36ma\x1b[0;0m:\n "
" \x1b[0;1;33m1\x1b[0;0m\n \x1b[0;36mb\x1b[0;0m:\n "
" \x1b[0;32mhello\x1b[0;0m"
)
ret = nested.output(data)
assert ret == expected_output_str
def test_output_with_retcode(data):
# Non-zero retcode should change the colors
# Same output format as above, just different colors
expected_output_str = (
"\x1b[0;31mlocal\x1b[0;0m:\n \x1b[0;31m----------\x1b[0;0m\n "
" \x1b[0;31margs\x1b[0;0m:\n \x1b[0;1;33m- 1\x1b[0;0m\n "
" \x1b[0;32m- two\x1b[0;0m\n \x1b[0;1;33m- 3.1\x1b[0;0m\n "
" \x1b[0;31mkwargs\x1b[0;0m:\n \x1b[0;31m----------\x1b[0;0m\n "
" \x1b[0;31m__pub_fun\x1b[0;0m:\n \x1b[0;32mtest.arg\x1b[0;0m\n"
" \x1b[0;31m__pub_jid\x1b[0;0m:\n "
" \x1b[0;32m20171207105927331329\x1b[0;0m\n "
" \x1b[0;31m__pub_pid\x1b[0;0m:\n \x1b[0;1;33m25938\x1b[0;0m\n "
" \x1b[0;31m__pub_tgt\x1b[0;0m:\n "
" \x1b[0;32msalt-call\x1b[0;0m\n \x1b[0;31mtxt\x1b[0;0m:\n "
" \x1b[0;32mhello\x1b[0;0m\n \x1b[0;31mwow\x1b[0;0m:\n "
" \x1b[0;31m----------\x1b[0;0m\n \x1b[0;31ma\x1b[0;0m:\n "
" \x1b[0;1;33m1\x1b[0;0m\n \x1b[0;31mb\x1b[0;0m:\n "
" \x1b[0;32mhello\x1b[0;0m"
)
# You can notice that in test_output_with_colors the color code is \x1b[0;36m, i.e., GREEN,
# while here the color code is \x1b[0;31m, i.e., RED (failure)
ret = nested.output(data, _retcode=1)
assert ret == expected_output_str
def test_output_with_indent(data):
# Everything must be indented by exactly two spaces
# (using nested_indent=2 sent to nested.output as kwarg)
expected_output_str = (
" \x1b[0;36m----------\x1b[0;0m\n \x1b[0;36mlocal\x1b[0;0m:\n "
" \x1b[0;36m----------\x1b[0;0m\n \x1b[0;36margs\x1b[0;0m:\n "
" \x1b[0;1;33m- 1\x1b[0;0m\n \x1b[0;32m- two\x1b[0;0m\n "
" \x1b[0;1;33m- 3.1\x1b[0;0m\n \x1b[0;36mkwargs\x1b[0;0m:\n "
" \x1b[0;36m----------\x1b[0;0m\n \x1b[0;36m__pub_fun\x1b[0;0m:\n "
" \x1b[0;32mtest.arg\x1b[0;0m\n "
" \x1b[0;36m__pub_jid\x1b[0;0m:\n "
" \x1b[0;32m20171207105927331329\x1b[0;0m\n "
" \x1b[0;36m__pub_pid\x1b[0;0m:\n \x1b[0;1;33m25938\x1b[0;0m\n"
" \x1b[0;36m__pub_tgt\x1b[0;0m:\n "
" \x1b[0;32msalt-call\x1b[0;0m\n \x1b[0;36mtxt\x1b[0;0m:\n "
" \x1b[0;32mhello\x1b[0;0m\n \x1b[0;36mwow\x1b[0;0m:\n "
" \x1b[0;36m----------\x1b[0;0m\n "
" \x1b[0;36ma\x1b[0;0m:\n \x1b[0;1;33m1\x1b[0;0m\n "
" \x1b[0;36mb\x1b[0;0m:\n \x1b[0;32mhello\x1b[0;0m"
)
ret = nested.output(data, nested_indent=2)
assert ret == expected_output_str
def test_display_with_integer_keys():
"""
Test display output when ret contains a combination of integer and
string keys. See issue #56909
"""
nest = nested.NestDisplay(retcode=0)
test_dict = {1: "test int 1", 2: "test int 2", "three": "test text three"}
lines = nest.display(ret=test_dict, indent=2, prefix="", out=[])
expected = [
" \x1b[0;36m----------\x1b[0;0m",
" \x1b[0;36m1\x1b[0;0m:",
" \x1b[0;32mtest int 1\x1b[0;0m",
" \x1b[0;36m2\x1b[0;0m:",
" \x1b[0;32mtest int 2\x1b[0;0m",
" \x1b[0;36mthree\x1b[0;0m:",
" \x1b[0;32mtest text three\x1b[0;0m",
]
assert lines == expected
|
3143fd74f66b1a75e0e8e27c3249b618a278d1ec
|
e39cfcb65565524225fa304fb06ebf5ad3513306
|
/python-sdk/pachyderm_sdk/datum_batching.py
|
e4a82575cd10846e17560f842fa1b8738fa7ac11
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
pachyderm/pachyderm
|
7b5cbd89c6f3efa5d7bfb3eeed412f5ea1026d04
|
125f95010125f2df34bafb3ac804c966299c0f98
|
refs/heads/master
| 2023-08-31T11:56:47.995626
| 2023-08-30T22:50:50
| 2023-08-30T22:50:50
| 23,653,453
| 5,718
| 712
|
Apache-2.0
| 2023-09-14T19:37:37
| 2014-09-04T07:50:02
|
Go
|
UTF-8
|
Python
| false
| false
| 1,570
|
py
|
datum_batching.py
|
"""A high-level decorator for pipeline code that uses the datum-batching feature."""
from functools import wraps
from typing import Callable
from . import Client
PIPELINE_FUNC = Callable[..., None]
def batch_all_datums(user_code: PIPELINE_FUNC) -> PIPELINE_FUNC:
"""A decorator that will repeatedly call the wrapped function until
all datums have been processed. Before calling the wrapped function,
this decorator will call the NextDatum endpoint within the worker
and set any environment variables specified by the worker.
Any exceptions raised during the execution of the wrapped function
will be reported back to the worker. See the pachyderm documentation
for more information on how the datum batching feature works.
Note: This can only be used within a Pachyderm worker.
Examples
--------
>>> from pachyderm_sdk import batch_all_datums
>>>
>>> @batch_all_datums
>>> def pipeline():
>>> # process datums
>>> pass
>>>
>>> if __name__ == '__main__':
>>> # Perform an expensive computation here before
>>> # entering your datum processing function
>>> # i.e. initializing a model.
>>> pipeline()
Check the following link for a more substatial example:
github.com/pachyderm/examples/tree/master/object-detection
"""
@wraps(user_code)
def wrapper(*args, **kwargs) -> None:
worker = Client().worker
while True:
with worker.batch_datum():
user_code(*args, **kwargs)
return wrapper
|
b9924ea9c1f6de05a79c4a4f3245e02e29237865
|
d886db78c784f5944fd047770795d7b44c161eaf
|
/dockerfiles/puppeteer-context/mina_daemon_puppeteer.py
|
d382c144d66c1cc05a3fb7e98da1fc3150fb990c
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
MinaProtocol/mina
|
b9a5a7e94aae4436ad70169788ba62e96fed1e96
|
e82479e8898fc48be5907f443eb2ca00b408fc7e
|
refs/heads/develop
| 2023-09-06T05:23:37.621958
| 2023-09-05T19:32:22
| 2023-09-05T19:32:22
| 114,581,326
| 1,443
| 391
|
Apache-2.0
| 2023-09-14T20:16:48
| 2017-12-18T01:10:17
|
OCaml
|
UTF-8
|
Python
| false
| false
| 6,183
|
py
|
mina_daemon_puppeteer.py
|
#!/usr/bin/env python3
# This is a temporary hack for the integration test framework to be able to stop
# and start nodes dyamically in a kubernetes environment. This script takes
# mina arguments and will start and monitor a mina process with those arguments.
# If a SIGUSR1 signal is sent, it will stop this process, and if a SIGUSR2 is
# sent, it will resume the process. Since this script is a hack, there are some
# shortcomings of the script. Most notably:
# - the script will stack overflow after a lot of restarts are issued
# - the script does not attempt to handle errors from the tail child process
import os
from pathlib import Path
import signal
import subprocess
import sys
import time
from socketserver import TCPServer
from http.server import HTTPServer, BaseHTTPRequestHandler
# all signals handled by this program
ALL_SIGNALS = [signal.SIGCHLD, signal.SIGUSR1, signal.SIGUSR2]
SCRIPT_LOG_OUTPUT_FILENAME = "mina_daemon_puppeteer.log"
script_log_output_file = None
active_daemon_request = False
inactive_daemon_request = False
tail_process = None
mina_process = None
daemon_args = sys.argv[1:] if len(sys.argv) > 1 else []
TCPServer.allow_reuse_address = True
HTTPServer.timeout = 1
def log(s):
line = "mina_daemon_puppeteer_script: "+s
script_log_output_file.write("%s\n" % line)
script_log_output_file.flush()
def log_event(puppeteer_event_type, message):
line = '{"puppeteer_script_event": true, "puppeteer_event_type": "' + puppeteer_event_type + '", "message":"'+ message+'"}'
script_log_output_file.write("%s\n" % line)
script_log_output_file.flush()
class MockRequestHandler(BaseHTTPRequestHandler):
def do_GET(s):
s.send_response(200)
s.send_header('Content-Type', 'text/html')
s.end_headers()
s.wfile.write(b'<html><body>The daemon is currently offline.<br/><i>This broadcast was brought to you by the puppeteer mock server</i></body></html>')
def handle_child_termination(signum, frame):
log("SIGCHLD received" )
os.waitpid(-1, os.WNOHANG)
def handle_start_request(signum, frame):
log("SIGUSR1 handle_start_request received, setting active_daemon_request to True" )
global active_daemon_request
active_daemon_request = True
def handle_stop_request(signum, frame):
log("SIGUSR2 handle_stop_request received, setting inactive_daemon_request to True" )
global inactive_daemon_request
inactive_daemon_request = True
def get_child_processes(pid):
result = subprocess.run(
['ps', '-o', 'pid=', '--ppid', str(pid)],
stdout=subprocess.PIPE
)
output = result.stdout.decode('ascii')
return list(map(int, filter(lambda s: len(s) > 0, output.split(' '))))
def pid_is_running(pid):
try:
os.kill(pid, 0)
except ProcessLookupError:
return False
return True
def wait_for_pid(pid):
while pid_is_running(pid):
time.sleep(0.25)
def start_daemon():
log("start_daemon called" )
global mina_process
with open('mina.log', 'a') as f:
mina_process = subprocess.Popen(
['mina'] + daemon_args,
stdout=f,
stderr=subprocess.STDOUT,
cwd="/root/.mina-config"
)
log("touching /root/daemon-active" )
Path('daemon-active').touch()
log("daemon fully started" )
# log_event("node_initialized", "node has been initialized")
def stop_daemon():
log("stop_daemon called" )
global mina_process
mina_process.send_signal(signal.SIGTERM)
child_pids = get_child_processes(mina_process.pid)
log("stop_daemon, child_pids: %s" % ', '.join([str(i) for i in child_pids]))
mina_process.wait()
for child_pid in child_pids:
log("waiting for child_pid: " + str(child_pid) )
wait_for_pid(child_pid)
log("done waiting for: " + str(child_pid) )
log("removing /root/daemon-active" )
Path('daemon-active').unlink()
mina_process = None
log("daemon fully stopped" )
log_event("node_offline", "daemon is being stopped by puppeteer script and is going offline")
# technically, doing the loops like this will eventually result in a stack overflow
# however, you would need to do a lot of starts and stops to hit this condition
def inactive_loop():
log("inactive_loop beginning" )
global active_daemon_request
server = None
try:
server = HTTPServer(('0.0.0.0', 3085), MockRequestHandler)
while True:
server.handle_request()
signal.sigtimedwait(ALL_SIGNALS, 0)
if active_daemon_request:
log("inactive_loop: active_daemon_request received, starting daemon" )
start_daemon()
active_daemon_request = False
break
except Exception as err:
log("inactive_loop experienced an error: ")
log(err)
finally:
if server != None:
server.server_close()
log("inactive_loop terminating. mock server closed." )
active_loop()
def active_loop():
log("active_loop beginning" )
global mina_process, inactive_daemon_request
while True:
signal.pause()
status = mina_process.poll()
if status != None:
log("active_loop: status not None, cleaning up and exiting")
cleanup_and_exit(status)
elif inactive_daemon_request:
log("active_loop: inactive daemon request detected, stopping daemon")
stop_daemon()
inactive_daemon_request = False
break
inactive_loop()
def cleanup_and_exit(status):
time.sleep(5)
tail_process.terminate()
tail_process.wait()
sys.exit(status)
if __name__ == '__main__':
script_log_output_file = open(SCRIPT_LOG_OUTPUT_FILENAME, 'w')
log("starting...")
signal.signal(signal.SIGCHLD, handle_child_termination)
signal.signal(signal.SIGUSR1, handle_stop_request)
signal.signal(signal.SIGUSR2, handle_start_request)
Path('.mina-config').mkdir(exist_ok=True)
Path('mina.log').touch()
Path('.mina-config/mina-prover.log').touch()
Path('.mina-config/mina-verifier.log').touch()
Path('.mina-config/mina-best-tip.log').touch()
# currently does not handle tail process dying
tail_process = subprocess.Popen(
['tail', '-q', '-f', 'mina.log', '-f', '.mina-config/mina-prover.log', '-f', '.mina-config/mina-verifier.log', '-f' , '.mina-config/mina-best-tip.log', '-f', SCRIPT_LOG_OUTPUT_FILENAME]
)
inactive_loop()
|
e59937ddc319f33abb52c92fedf973bc2be64547
|
adb4e6b82e5f969fc46f7c58e70e49c5d53a6fe3
|
/exotica_examples/scripts/example_attach
|
0781c52ee0a0c7c5d04aa9c8e782b5e1117c201e
|
[
"BSD-3-Clause"
] |
permissive
|
ipab-slmc/exotica
|
8d9b531916f1e7422f85854597aa925091a7b7c4
|
be580d162c5798d976f138cc1cd99474aef9c6fe
|
refs/heads/master
| 2021-12-15T15:19:26.471745
| 2021-12-08T03:43:44
| 2021-12-08T03:43:44
| 44,607,894
| 144
| 55
|
NOASSERTION
| 2021-07-23T10:51:43
| 2015-10-20T13:24:57
|
C++
|
UTF-8
|
Python
| false
| false
| 1,806
|
example_attach
|
#!/usr/bin/env python
import pyexotica as exo
from pyexotica.publish_trajectory import *
from time import sleep
import signal
def handle_attaching(i, x, scene):
# Update state
scene.update(x)
if i == 0:
# Reset object pose
scene.attach_object_local(
'Item', '', exo.KDLFrame([0.61, -0.3, 0.5, 0, 0, 0, 1]))
if i == 1:
# Attach to end effector
scene.attach_object('Item', 'lwr_arm_6_link')
if i == 2:
# Detach
scene.detach_object('Item')
exo.Setup.init_ros()
ik = exo.Setup.load_solver(
'{exotica_examples}/resources/configs/example_manipulate_ik.xml')
ompl = exo.Setup.load_solver(
'{exotica_examples}/resources/configs/example_manipulate_ompl.xml')
# Plan 3 end poses
ik.get_problem().set_rho('Position1', 1e2)
ik.get_problem().set_rho('Position2', 0)
goal_pose = [ik.solve()[0].tolist()]
ik.get_problem().set_rho('Position1', 0)
ik.get_problem().set_rho('Position2', 1e2)
goal_pose.append(ik.solve()[0].tolist())
goal_pose.append([0]*len(goal_pose[0]))
start_pose = [[0]*len(goal_pose[0]), goal_pose[0], goal_pose[1]]
# Plan 3 trajectories
solution = []
for i in range(0, 3):
print("Plan OMPL", i)
ompl.get_problem().start_state = start_pose[i]
ompl.get_problem().goal_state = goal_pose[i]
handle_attaching(i, start_pose[i], ompl.get_problem().get_scene())
solution.append(ompl.solve().tolist())
# Playback
dt = 0.03
signal.signal(signal.SIGINT, sig_int_handler)
while True:
try:
for i in range(0, len(solution)):
handle_attaching(i, start_pose[i], ompl.get_problem().get_scene())
for t in range(0, len(solution[i])):
publish_pose(solution[i][t], ompl.get_problem())
sleep(dt)
except KeyboardInterrupt:
break
|
|
a349d7f922a6ff43b93e866fdaa3f2fef6d04e9e
|
1f399edf85d995443d01f66d77eca0723886d0ff
|
/misc/config_tools/scenario_config/default_populator.py
|
fdb19754dbe920036c02a1bc977bff44e4c407ee
|
[
"BSD-3-Clause"
] |
permissive
|
projectacrn/acrn-hypervisor
|
f9c5864d54929a5d2fa36b5e78c08f19b46b8f98
|
390740aa1b1e9d62c51f8e3afa0c29e07e43fa23
|
refs/heads/master
| 2023-08-18T05:07:01.310327
| 2023-08-11T07:49:36
| 2023-08-16T13:20:27
| 123,983,554
| 1,059
| 686
|
BSD-3-Clause
| 2023-09-14T09:51:10
| 2018-03-05T21:52:25
|
C
|
UTF-8
|
Python
| false
| false
| 4,154
|
py
|
default_populator.py
|
#!/usr/bin/env python3
#
# Copyright (C) 2022 Intel Corporation.
#
# SPDX-License-Identifier: BSD-3-Clause
#
import os
import argparse
import elementpath
from scenario_transformer import ScenarioTransformer
from pipeline import PipelineObject, PipelineStage, PipelineEngine
from schema_slicer import SlicingSchemaByVMTypeStage
class DefaultValuePopulator(ScenarioTransformer):
def get_default_value(self, xsd_element_node, xml_parent_node):
# The attribute @default of the xsd:element node
v = xsd_element_node.get("default")
if v is not None:
return v
# The acrn:defaults and acrn:unique-among annotations which define a set of default values that shall be unique
# among a collection of nodes
annot_node = self.get_node(xsd_element_node, "xs:annotation")
if annot_node is not None:
defaults = annot_node.get("{https://projectacrn.org}defaults")
unique_among = annot_node.get("{https://projectacrn.org}unique-among")
if defaults is not None and unique_among is not None:
try:
default_values = set(eval(defaults))
existing_values = set(elementpath.select(self.xml_etree, unique_among, variables={"parent": xml_parent_node}))
available_defaults = default_values - existing_values
return sorted(list(available_defaults))[0]
except:
pass
return None
def add_missing_nodes(self, xsd_element_node, xml_parent_node, new_node_index):
element_name = xsd_element_node.get("name")
default_value = self.get_default_value(xsd_element_node, xml_parent_node)
# If the node is neither of a complex type (i.e. it does not have an child node) nor has a default value, do not
# create the node at all. Users are required to fill in proper values in such nodes, and missing any of them
# shall trigger a validation error.
if self.complex_type_of_element(xsd_element_node) is None and default_value is None:
return []
new_node = xml_parent_node.makeelement(element_name, {})
new_node.text = default_value
if new_node_index is not None:
xml_parent_node.insert(new_node_index, new_node)
else:
xml_parent_node.append(new_node)
return [new_node]
def fill_empty_node(self, xsd_element_node, xml_parent_node, xml_empty_node):
default_value = self.get_default_value(xsd_element_node, xml_parent_node)
if default_value is not None:
xml_empty_node.text = default_value
class DefaultValuePopulatingStage(PipelineStage):
uses = {"schema_etree", "scenario_etree"}
provides = {"scenario_etree"}
def run(self, obj):
populator = DefaultValuePopulator(obj.get("schema_etree"))
etree = obj.get("scenario_etree")
populator.transform(etree)
obj.set("scenario_etree", etree)
def main(args):
from xml_loader import XMLLoadStage
from lxml_loader import LXMLLoadStage
pipeline = PipelineEngine(["schema_path", "scenario_path"])
pipeline.add_stages([
LXMLLoadStage("schema"),
XMLLoadStage("scenario"),
SlicingSchemaByVMTypeStage(),
DefaultValuePopulatingStage(),
])
obj = PipelineObject(schema_path = args.schema, scenario_path = args.scenario)
pipeline.run(obj)
obj.get("scenario_etree").write(args.out)
if __name__ == "__main__":
config_tools_dir = os.path.join(os.path.dirname(__file__), "..")
schema_dir = os.path.join(config_tools_dir, "schema")
parser = argparse.ArgumentParser(description="Populate a given scenario XML with default values of nonexistent nodes")
parser.add_argument("scenario", help="Path to the scenario XML file from users")
parser.add_argument("out", nargs="?", default="out.xml", help="Path where the output is placed")
parser.add_argument("--schema", default=os.path.join(schema_dir, "config.xsd"), help="the XML schema that defines the syntax of scenario XMLs")
args = parser.parse_args()
main(args)
|
0fe2b57b6fdeea7cddfdeaee01b1356d87d4c9fa
|
fa1ad2e2ac7e376fc7cb3b3a6e1bb88eed3e80be
|
/dts/airbyte/airbyte-integrations/connectors/source-sentry/source_sentry/streams.py
|
a07681a4566bf7a478c2e5e5f971ec4b0cd439f7
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"Elastic-2.0"
] |
permissive
|
alldatacenter/alldata
|
7bc7713c9f1d56ad6b8e59ea03206d1073b7e047
|
8d5f9a2d49ab8f9e85ccf058cb02c2fda287afc6
|
refs/heads/master
| 2023-08-05T07:32:25.442740
| 2023-08-03T13:17:24
| 2023-08-03T13:17:24
| 213,321,771
| 774
| 250
|
Apache-2.0
| 2023-09-06T17:35:32
| 2019-10-07T07:36:18
| null |
UTF-8
|
Python
| false
| false
| 7,191
|
py
|
streams.py
|
#
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
from abc import ABC
from typing import Any, Iterable, Mapping, MutableMapping, Optional
import pendulum
import requests
from airbyte_cdk.sources.streams import IncrementalMixin
from airbyte_cdk.sources.streams.http import HttpStream
class SentryStream(HttpStream, ABC):
API_VERSION = "0"
URL_TEMPLATE = "https://{hostname}/api/{api_version}/"
primary_key = "id"
def __init__(self, hostname: str, **kwargs):
super().__init__(**kwargs)
self._url_base = self.URL_TEMPLATE.format(hostname=hostname, api_version=self.API_VERSION)
@property
def url_base(self) -> str:
return self._url_base
def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]:
return None
def request_params(
self,
stream_state: Mapping[str, Any],
stream_slice: Optional[Mapping[str, Any]] = None,
next_page_token: Optional[Mapping[str, Any]] = None,
) -> MutableMapping[str, Any]:
return {}
class SentryStreamPagination(SentryStream):
def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]:
"""
Expect the link header field to always contain the values for `rel`, `results`, and `cursor`.
If there is actually the next page, rel="next"; results="true"; cursor="<next-page-token>".
"""
if response.links["next"]["results"] == "true":
return {"cursor": response.links["next"]["cursor"]}
else:
return None
def request_params(
self,
stream_state: Mapping[str, Any],
stream_slice: Optional[Mapping[str, Any]] = None,
next_page_token: Optional[Mapping[str, Any]] = None,
) -> MutableMapping[str, Any]:
params = super().request_params(stream_state, stream_slice, next_page_token)
if next_page_token:
params.update(next_page_token)
return params
def parse_response(self, response: requests.Response, **kwargs) -> Iterable[Mapping]:
yield from response.json()
class SentryIncremental(SentryStreamPagination, IncrementalMixin):
def __init__(self, *args, **kwargs):
super(SentryIncremental, self).__init__(*args, **kwargs)
self._cursor_value = None
def filter_by_state(self, stream_state: Mapping[str, Any] = None, record: Mapping[str, Any] = None) -> Iterable:
"""
Endpoint does not provide query filtering params, but they provide us
cursor field in most cases, so we used that as incremental filtering
during the parsing.
"""
start_date = "1900-01-01T00:00:00.0Z"
if pendulum.parse(record[self.cursor_field]) > pendulum.parse((stream_state or {}).get(self.cursor_field, start_date)):
# Persist state.
# There is a bug in state setter: because of self._cursor_value is not defined it raises Attribute error
# which is ignored in airbyte_cdk/sources/abstract_source.py:320 and we have an empty state in return
# See: https://github.com/airbytehq/oncall/issues/1317
self.state = record
yield record
def parse_response(self, response: requests.Response, stream_state: Mapping[str, Any], **kwargs) -> Iterable[MutableMapping]:
json_response = response.json() or []
for record in json_response:
yield from self.filter_by_state(stream_state=stream_state, record=record)
@property
def state(self) -> Mapping[str, Any]:
return {self.cursor_field: str(self._cursor_value)}
@state.setter
def state(self, value: Mapping[str, Any]):
"""
Define state as a max between given value and current state
"""
if not self._cursor_value:
self._cursor_value = value[self.cursor_field]
else:
self._cursor_value = max(value[self.cursor_field], self.state[self.cursor_field])
class Events(SentryIncremental):
"""
Docs: https://docs.sentry.io/api/events/list-a-projects-events/
"""
primary_key = "id"
cursor_field = "dateCreated"
def __init__(self, organization: str, project: str, **kwargs):
super().__init__(**kwargs)
self._organization = organization
self._project = project
def path(
self,
stream_state: Optional[Mapping[str, Any]] = None,
stream_slice: Optional[Mapping[str, Any]] = None,
next_page_token: Optional[Mapping[str, Any]] = None,
) -> str:
return f"projects/{self._organization}/{self._project}/events/"
def request_params(
self,
stream_state: Mapping[str, Any],
stream_slice: Optional[Mapping[str, Any]] = None,
next_page_token: Optional[Mapping[str, Any]] = None,
) -> MutableMapping[str, Any]:
params = super().request_params(stream_state, stream_slice, next_page_token)
params.update({"full": "true"})
return params
class Issues(SentryIncremental):
"""
Docs: https://docs.sentry.io/api/events/list-a-projects-issues/
"""
primary_key = "id"
cursor_field = "lastSeen"
def __init__(self, organization: str, project: str, **kwargs):
super().__init__(**kwargs)
self._organization = organization
self._project = project
def path(
self,
stream_state: Optional[Mapping[str, Any]] = None,
stream_slice: Optional[Mapping[str, Any]] = None,
next_page_token: Optional[Mapping[str, Any]] = None,
) -> str:
return f"projects/{self._organization}/{self._project}/issues/"
def request_params(
self,
stream_state: Mapping[str, Any],
stream_slice: Optional[Mapping[str, Any]] = None,
next_page_token: Optional[Mapping[str, Any]] = None,
) -> MutableMapping[str, Any]:
params = super().request_params(stream_state, stream_slice, next_page_token)
params.update({"statsPeriod": "", "query": ""})
return params
class Projects(SentryIncremental):
"""
Docs: https://docs.sentry.io/api/projects/list-your-projects/
"""
primary_key = "id"
cursor_field = "dateCreated"
def path(
self,
stream_state: Optional[Mapping[str, Any]] = None,
stream_slice: Optional[Mapping[str, Any]] = None,
next_page_token: Optional[Mapping[str, Any]] = None,
) -> str:
return "projects/"
class ProjectDetail(SentryStream):
"""
Docs: https://docs.sentry.io/api/projects/retrieve-a-project/
"""
def __init__(self, organization: str, project: str, **kwargs):
super().__init__(**kwargs)
self._organization = organization
self._project = project
def path(
self,
stream_state: Optional[Mapping[str, Any]] = None,
stream_slice: Optional[Mapping[str, Any]] = None,
next_page_token: Optional[Mapping[str, Any]] = None,
) -> str:
return f"projects/{self._organization}/{self._project}/"
def parse_response(self, response: requests.Response, **kwargs) -> Iterable[Mapping]:
yield response.json()
|
5232c36eed6a01f842cfa33dd51e8e9053e4f7a5
|
5f69a6549b8d5e417553d910622e6855b2ae679b
|
/src/opendr/perception/activity_recognition/datasets/utils/decoder.py
|
5e2b9b69af077d3ca5eb386581ad69f0f229414f
|
[
"Apache-2.0"
] |
permissive
|
opendr-eu/opendr
|
822219f709613d77c5eb62c5d02808d344239835
|
b3d6ce670cdf63469fc5766630eb295d67b3d788
|
refs/heads/master
| 2023-08-31T07:02:36.375231
| 2023-08-29T06:39:51
| 2023-08-29T06:39:51
| 293,755,225
| 535
| 82
|
Apache-2.0
| 2023-09-13T16:53:34
| 2020-09-08T08:55:04
|
Python
|
UTF-8
|
Python
| false
| false
| 14,700
|
py
|
decoder.py
|
""" Adapted from: https://github.com/facebookresearch/SlowFast
"""
import math
import random
import numpy as np
import torch
import torchvision.io as io
from logging import getLogger
logger = getLogger(__name__)
def temporal_sampling(frames, start_idx, end_idx, num_samples):
"""
Given the start and end frame index, sample num_samples frames between
the start and end with equal interval.
Args:
frames (tensor): a tensor of video frames, dimension is
`num video frames` x `channel` x `height` x `width`.
start_idx (int): the index of the start frame.
end_idx (int): the index of the end frame.
num_samples (int): number of frames to sample.
Returns:
frames (tersor): a tensor of temporal sampled video frames, dimension is
`num clip frames` x `channel` x `height` x `width`.
"""
index = torch.linspace(start_idx, end_idx, num_samples)
index = torch.clamp(index, 0, frames.shape[0] - 1).long()
# int((end_idx - (frames.shape[0] - 1)) / ((end_idx - start_idx) / num_samples))
num_padded_frames = len(index) - len(set(index))
if num_padded_frames > 0:
logger.warning(
f"More frames were asked for than are available. Last frame is repeated {num_padded_frames} times as fill."
)
frames = torch.index_select(frames, 0, index)
return frames
def get_start_end_idx(video_size, clip_size, clip_idx, num_clips):
"""
Sample a clip of size clip_size from a video of size video_size and
return the indices of the first and last frame of the clip. If clip_idx is
-1, the clip is randomly sampled, otherwise uniformly split the video to
num_clips clips, and select the start and end index of clip_idx-th video
clip.
Args:
video_size (int): number of overall frames.
clip_size (int): size of the clip to sample from the frames.
clip_idx (int): if clip_idx is -1, perform random jitter sampling. If
clip_idx is larger than -1, uniformly split the video to num_clips
clips, and select the start and end index of the clip_idx-th video
clip.
num_clips (int): overall number of clips to uniformly sample from the
given video for testing.
Returns:
start_idx (int): the start frame index.
end_idx (int): the end frame index.
"""
delta = max(video_size - clip_size, 0)
if clip_idx == -1:
# Random temporal sampling.
start_idx = random.uniform(0, delta)
else:
# Uniformly sample the clip with the given index.
start_idx = delta * clip_idx / num_clips
end_idx = start_idx + clip_size - 1
return start_idx, end_idx
def get_start_end_idx_centered(video_size, clip_size, clip_idx, num_clips):
max_start_idx = max(video_size - clip_size, 0)
if clip_idx == -1:
# Random temporal sampling.
start_idx = random.uniform(0, max_start_idx)
else:
segment_size = video_size / num_clips
segment_center_index = clip_idx * segment_size + segment_size / 2
start_idx = segment_center_index - clip_size / 2
# Handle bound for start and end index
start_idx = max(0, start_idx)
end_idx = start_idx + clip_size - 1
return start_idx, end_idx
def pyav_decode_stream(
container, start_pts, end_pts, stream, stream_name, buffer_size=0
):
"""
Decode the video with PyAV decoder.
Args:
container (container): PyAV container.
start_pts (int): the starting Presentation TimeStamp to fetch the
video frames.
end_pts (int): the ending Presentation TimeStamp of the decoded frames.
stream (stream): PyAV stream.
stream_name (dict): a dictionary of streams. For example, {"video": 0}
means video stream at stream index 0.
buffer_size (int): number of additional frames to decode beyond end_pts.
Returns:
result (list): list of frames decoded.
max_pts (int): max Presentation TimeStamp of the video sequence.
"""
# Seeking in the stream is imprecise. Thus, seek to an ealier PTS by a
# margin pts.
margin = 1024
seek_offset = max(start_pts - margin, 0)
container.seek(seek_offset, any_frame=False, backward=True, stream=stream)
frames = {}
buffer_count = 0
max_pts = 0
for frame in container.decode(**stream_name):
max_pts = max(max_pts, frame.pts)
if frame.pts < start_pts:
continue
if frame.pts <= end_pts:
frames[frame.pts] = frame
else:
buffer_count += 1
frames[frame.pts] = frame
if buffer_count >= buffer_size:
break
result = [frames[pts] for pts in sorted(frames)]
return result, max_pts
def torchvision_decode(
video_handle,
sampling_rate,
num_frames,
clip_idx,
video_meta,
num_clips=10,
target_fps=30,
modalities=("visual",),
max_spatial_scale=0,
):
"""
If video_meta is not empty, perform temporal selective decoding to sample a
clip from the video with TorchVision decoder. If video_meta is empty, decode
the entire video and update the video_meta.
Args:
video_handle (bytes): raw bytes of the video file.
sampling_rate (int): frame sampling rate (interval between two sampled
frames).
num_frames (int): number of frames to sample.
clip_idx (int): if clip_idx is -1, perform random temporal
sampling. If clip_idx is larger than -1, uniformly split the
video to num_clips clips, and select the clip_idx-th video clip.
video_meta (dict): a dict contains VideoMetaData. Details can be found
at `pytorch/vision/torchvision/io/_video_opt.py`.
num_clips (int): overall number of clips to uniformly sample from the
given video.
target_fps (int): the input video may has different fps, convert it to
the target video fps.
modalities (tuple): tuple of modalities to decode. Currently only
support `visual`, planning to support `acoustic` soon.
max_spatial_scale (int): the maximal resolution of the spatial shorter
edge size during decoding.
Returns:
frames (tensor): decoded frames from the video.
fps (float): the number of frames per second of the video.
decode_all_video (bool): if True, the entire video was decoded.
"""
# Convert the bytes to a tensor.
video_tensor = torch.from_numpy(np.frombuffer(video_handle, dtype=np.uint8))
decode_all_video = True
video_start_pts, video_end_pts = 0, -1
# The video_meta is empty, fetch the meta data from the raw video.
if len(video_meta) == 0:
# Tracking the meta info for selective decoding in the future.
meta = io._probe_video_from_memory(video_tensor)
# Using the information from video_meta to perform selective decoding.
video_meta["video_timebase"] = meta.video_timebase
video_meta["video_numerator"] = meta.video_timebase.numerator
video_meta["video_denominator"] = meta.video_timebase.denominator
video_meta["has_video"] = meta.has_video
video_meta["video_duration"] = meta.video_duration
video_meta["video_fps"] = meta.video_fps
video_meta["audio_timebas"] = meta.audio_timebase
video_meta["audio_numerator"] = meta.audio_timebase.numerator
video_meta["audio_denominator"] = meta.audio_timebase.denominator
video_meta["has_audio"] = meta.has_audio
video_meta["audio_duration"] = meta.audio_duration
video_meta["audio_sample_rate"] = meta.audio_sample_rate
if (
video_meta["has_video"] and
video_meta["video_denominator"] > 0 and
video_meta["video_duration"] > 0
):
decode_all_video = False
start_idx, end_idx = get_start_end_idx_centered(
video_meta["video_fps"] * video_meta["video_duration"],
sampling_rate * num_frames / target_fps * video_meta["video_fps"],
clip_idx,
num_clips,
)
# Convert frame index to pts.
pts_per_frame = video_meta["video_denominator"] / video_meta["video_fps"]
video_start_pts = int(start_idx * pts_per_frame)
video_end_pts = int(end_idx * pts_per_frame)
# Decode the raw video with the tv decoder.
v_frames, _ = io._read_video_from_memory(
video_tensor,
seek_frame_margin=1.0,
read_video_stream="visual" in modalities,
video_width=0,
video_height=0,
video_min_dimension=max_spatial_scale,
video_pts_range=(video_start_pts, video_end_pts),
video_timebase_numerator=video_meta["video_numerator"],
video_timebase_denominator=video_meta["video_denominator"],
)
return v_frames, video_meta["video_fps"], decode_all_video
def pyav_decode(
container, sampling_rate, num_frames, clip_idx, num_clips=10, target_fps=30
):
"""
Convert the video from its original fps to the target_fps. If the video
support selective decoding (contain decoding information in the video head),
the perform temporal selective decoding and sample a clip from the video
with the PyAV decoder. If the video does not support selective decoding,
decode the entire video.
Args:
container (container): pyav container.
sampling_rate (int): frame sampling rate (interval between two sampled
frames.
num_frames (int): number of frames to sample.
clip_idx (int): if clip_idx is -1, perform random temporal sampling. If
clip_idx is larger than -1, uniformly split the video to num_clips
clips, and select the clip_idx-th video clip.
num_clips (int): overall number of clips to uniformly sample from the
given video.
target_fps (int): the input video may has different fps, convert it to
the target video fps before frame sampling.
Returns:
frames (tensor): decoded frames from the video. Return None if the no
video stream was found.
fps (float): the number of frames per second of the video.
decode_all_video (bool): If True, the entire video was decoded.
"""
# Try to fetch the decoding information from the video head. Some of the
# videos does not support fetching the decoding information, for that case
# it will get None duration.
fps = float(container.streams.video[0].average_rate)
frames_length = container.streams.video[0].frames
duration = container.streams.video[0].duration
if duration is None:
# If failed to fetch the decoding information, decode the entire video.
decode_all_video = True
video_start_pts, video_end_pts = 0, math.inf
else:
# Perform selective decoding.
decode_all_video = False
start_idx, end_idx = get_start_end_idx_centered(
frames_length,
sampling_rate * num_frames / target_fps * fps,
clip_idx,
num_clips,
)
timebase = duration / frames_length
video_start_pts = int(start_idx * timebase)
video_end_pts = int(end_idx * timebase)
frames = None
# If video stream was found, fetch video frames from the video.
if container.streams.video:
video_frames, max_pts = pyav_decode_stream(
container,
video_start_pts,
video_end_pts,
container.streams.video[0],
{"video": 0},
)
container.close()
frames = [frame.to_rgb().to_ndarray() for frame in video_frames]
frames = torch.as_tensor(np.stack(frames))
return frames, fps, decode_all_video
def decode(
container,
sampling_rate,
num_frames,
clip_idx=-1,
num_clips=10,
video_meta=None,
target_fps=30,
backend="pyav",
max_spatial_scale=0,
):
"""
Decode the video and perform temporal sampling.
Args:
container (container): pyav container.
sampling_rate (int): frame sampling rate (interval between two sampled
frames).
num_frames (int): number of frames to sample.
clip_idx (int): if clip_idx is -1, perform random temporal
sampling. If clip_idx is larger than -1, uniformly split the
video to num_clips clips, and select the
clip_idx-th video clip.
num_clips (int): overall number of clips to uniformly
sample from the given video.
video_meta (dict): a dict contains VideoMetaData. Details can be find
at `pytorch/vision/torchvision/io/_video_opt.py`.
target_fps (int): the input video may have different fps, convert it to
the target video fps before frame sampling.
backend (str): decoding backend includes `pyav` and `torchvision`. The
default one is `pyav`.
max_spatial_scale (int): keep the aspect ratio and resize the frame so
that shorter edge size is max_spatial_scale. Only used in
`torchvision` backend.
Returns:
frames (tensor): decoded frames from the video.
"""
# Currently support two decoders: 1) PyAV, and 2) TorchVision.
assert clip_idx >= -1, "Not valied clip_idx {}".format(clip_idx)
try:
if backend == "pyav":
frames, fps, decode_all_video = pyav_decode(
container, sampling_rate, num_frames, clip_idx, num_clips, target_fps,
)
elif backend == "torchvision":
frames, fps, decode_all_video = torchvision_decode(
container,
sampling_rate,
num_frames,
clip_idx,
video_meta,
num_clips,
target_fps,
("visual",),
max_spatial_scale,
)
else:
raise NotImplementedError("Unknown decoding backend {}".format(backend))
except Exception as e:
logger.debug("Failed to decode by {} with exception: {}".format(backend, e))
return None
# Return None if the frames was not decoded successfully.
if frames is None or frames.size(0) == 0:
return None
start_idx, end_idx = get_start_end_idx(
frames.shape[0],
num_frames * sampling_rate * fps / target_fps,
clip_idx if decode_all_video else 0,
num_clips if decode_all_video else 1,
)
# Perform temporal sampling from the decoded video.
frames = temporal_sampling(frames, start_idx, end_idx, num_frames)
return frames
|
65b659e41c7f7a359f47563e7cda1ca6327db919
|
63cb78527bcb90f984788587a29f8f115e94ab64
|
/dash_bio/component_factory/_volcano.py
|
a337cac71d17284cc1cb8513d0764db1ebc9bd4a
|
[
"MIT",
"BSD-3-Clause",
"Apache-2.0",
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
plotly/dash-bio
|
2b3468626c7f021c083c8b9170e61862d5dc151d
|
8a97db7811cc586d7e0bf1d33c17b898052b2e8f
|
refs/heads/master
| 2023-09-03T13:30:45.743959
| 2023-08-16T15:26:27
| 2023-08-16T15:26:27
| 141,365,566
| 505
| 228
|
MIT
| 2023-08-23T01:28:46
| 2018-07-18T01:40:23
|
Python
|
UTF-8
|
Python
| false
| false
| 22,893
|
py
|
_volcano.py
|
from __future__ import absolute_import
import numpy as np
import pandas as pd
from pandas.api.types import is_numeric_dtype
import plotly.graph_objects as go
from .utils import _get_hover_text
GENOMEWIDE_LINE_LABEL = 'genomewide_line'
EFFECT_SIZE_LINE_MIN_LABEL = 'effect size min line'
EFFECT_SIZE_LINE_MAX_LABEL = 'effect size max line'
def VolcanoPlot(
dataframe,
effect_size='EFFECTSIZE',
p='P',
snp='SNP',
gene='GENE',
annotation=None,
logp=True,
xlabel=None,
ylabel='-log10(p)',
point_size=5,
col=None,
effect_size_line=None,
effect_size_line_color='grey',
effect_size_line_width=0.5,
genomewideline_value=-np.log10(5e-8),
genomewideline_color='grey',
genomewideline_width=1,
highlight=True,
highlight_color="red",
**kwargs
):
"""Return a Dash Bio VolcanoPlot figure.
Keyword arguments:
- dataframe (dataframe; required): A pandas dataframe which must contain at
least the following two columns:
- a numeric quantity to plot such as a p-value or zscore
- a numeric quantity measuring the strength of association,
typically an odds ratio, regression coefficient, or log fold
change. Here, it is referred to as `effect_size`.
- effect_size (string; default 'EFFECTSIZE'): A string denoting the
column name for the effect size. This column must be numeric and must
not contain missing nor NaN values.
- p (string; default 'P'): A string denoting the column name for the
float quantity to be plotted on the y-axis. This column must be
numeric. It does not have to be a p-value. It can be any
numeric quantity such as peak heights, Bayes factors, test
statistics. If it is not a p-value, make sure to set logp = False.
- snp (string; default 'SNP'): A string denoting the column name for
the SNP names (e.g., rs number). More generally, this column could
be anything that identifies each point being plotted. For example,
in an Epigenomewide association study (EWAS), this could be the
probe name or cg number. This column should be a character. This
argument is optional, however it is necessary to specify it if you
want to highlight points on the plot using the highlight argument
in the figure method.
- gene (string; default 'GENE'): A string denoting the column name for
the GENE names. More generally, this could be any annotation
information that should be included in the plot.
- annotation (string; optional): A string denoting the column to use
as annotations. This could be any annotation information that you
want to include in the plot (e.g., zscore, effect size, minor
allele frequency).
- logp (bool; default True): If True, the -log10 of the p-value is
plotted. It isn't very useful to plot raw p-values; however,
plotting the raw value could be useful for other genome-wide plots
(e.g., peak heights, Bayes factors, test statistics, and other
"scores").
- xlabel (string; optional): Label of the x axis.
- ylabel (string; default '-log10(p)'): Label of the y axis.
- point_size (number; default 5): Size of the points of the Scatter
plot.
- col (string; optional): Color of the points of the Scatter plot. Can
be in any color format accepted by plotly.graph_objects.
- effect_size_line (bool | list; default [-1, 1]): A boolean which
must be either False to deactivate the option, or a list/array containing
the upper and lower bounds of the effect size values. Significant
data points will have lower values than the lower bound, or higher
values than the higher bound. Keeping the default value will
result in assigning the list [-1, 1] to the argument.
- effect_size_line_color (string; default 'grey'): Color of the effect
size lines.
- effect_size_line_width (number; default 2): Width of the effect size
lines.
- genomewideline_value (bool | number; default -log10(5e-8)): A
boolean which must be either False to deactivate the option, or a
numerical value corresponding to the p-value above which the data
points are considered significant.
- genomewideline_color (string; default 'red'): Color of the
genome-wide line. Can be in any color format accepted by
plotly.graph_objects.
- genomewideline_width (number; default 1): Width of the genome-wide
line.
- highlight (bool; default True): Whether the data points considered
significant should be highlighted or not.
- highlight_color (string; default 'red'): Color of the data points
highlighted because considered significant. Can be in any color
format accepted by plotly.graph_objects.
# ...
Example 1: Random Volcano Plot
'''
dataframe = pd.DataFrame(
np.random.randint(0,100,size=(100, 2)),
columns=['P', 'EFFECTSIZE'])
fig = create_volcano(dataframe, title=dict(text='XYZ Volcano plot'))
plotly.offline.plot(fig, image='png')
'''
- Additional keys (misc.): Arbitrary arguments can be passed to modify the
Layout and styling of the graph. A full reference of acceptable args is
available [here](https://plotly.com/python-api-reference/generated/plotly.graph_objects
.Layout.html).
Some commonly used layout keys are:
- title (dict: optional): Dict with compatible properties for the title of
the figure layout.
- xaxis (dict: optional): Dict with compatible properties for the x-axis of
the figure layout.
- yaxis (dict: optional): Dict with compatible properties for the y-axis of
the figure layout.
- height (number; optional): Sets the plot's height (in px).
- width (number; optional): Sets the plot's width (in px).
- margin (dict | plotly.graph_objects.layout.Margin instance): A dict or Margin
instance that sets the separation between the main plotting space and
the outside of the figure.
- legend (dict | plotly.graph_objects.layout.Legend instance): A dict or Legend
instance with compatible properties.
"""
vp = _VolcanoPlot(
dataframe,
effect_size=effect_size,
p=p,
snp=snp,
gene=gene,
annotation=annotation,
logp=logp
)
return vp.figure(
xlabel=xlabel,
ylabel=ylabel,
point_size=point_size,
col=col,
effect_size_line=effect_size_line,
effect_size_line_color=effect_size_line_color,
effect_size_line_width=effect_size_line_width,
genomewideline_value=genomewideline_value,
genomewideline_color=genomewideline_color,
genomewideline_width=genomewideline_width,
highlight=highlight,
highlight_color=highlight_color,
**kwargs
)
class _VolcanoPlot():
def __init__(
self,
x,
effect_size='EFFECTSIZE',
p='P',
snp='SNP',
gene='GENE',
annotation=None,
logp=True
):
"""Return a Dash Bio VolcanoPlot object.
Keyword arguments:
- x (dataframe; required): A pandas dataframe which must contain at
least the following two columns:
- a numeric quantity to plot such as a p-value or zscore
- a numeric quantity measuring the strength of association,
typically an odds ratio, regression coefficient, or log fold
change. Here, it is referred to as `effect_size`.
- p (string; optional): A string denoting the column name for the
float quantity to be plotted on the y-axis. This column must be
numeric. It does not have to be a p-value. It can be any
numeric quantity such as peak heights, Bayes factors, test
statistics. If it is not a p-value, make sure to set logp =
False.
- effect_size (string; optional): A string denoting the column name
for the effect size. This column must be numeric and must not
contain missing nor NaN values.
- snp (string; optional): A string denoting the column name for the
SNP names (e.g. rs number). More generally, this column could be
anything that identifies each point being plotted. For example, in
an Epigenomewide association study (EWAS) this could be the probe
name or cg number. This column should be a character. This
argument is optional, however it is necessary to specify if you
want to highlight points on the plot using the highlight argument
in the figure method.
- gene (string; optional): A string denoting the column name for the
GENE names. More generally, this could be any annotation
information that should be included in the plot.
- annotation (string; optional): A string denoting the column name for
an annotation. This could be any annotation information that you
want to include in the plot (e.g. zscore, effect size, minor
allele frequency).
- logp (bool; optional): If True, the -log10 of the p-value is
plotted. It isn't very useful to plot raw p-values; however,
plotting the raw value could be useful for other genome-wide plots
(e.g., peak heights, bayes factors, test statistics, and other
"scores").
"""
# checking the validity of the arguments
# Make sure you have effect_size and p columns and that they are of
# numeric type
if effect_size not in x.columns.values:
raise KeyError("Column %s not found in 'x' data.frame"
% effect_size)
else:
if not is_numeric_dtype(x[effect_size].dtype):
raise TypeError("%s column should be numeric. Do you have "
"'X', 'Y', 'MT', etc? If so change to "
"numbers and try again." % effect_size)
if p not in x.columns.values:
raise KeyError("Column %s not found in 'x' data.frame" % p)
else:
if not is_numeric_dtype(x[p].dtype):
raise TypeError("%s column should be numeric type" % p)
else:
if (x[p] < 0).any():
raise ValueError("Negative p-values found."
" These must be removed.")
if (x[p] > 1).any():
raise ValueError("P-values greater than 1 found. "
"These must be removed.")
if np.isnan(x[p]).any():
raise ValueError("NaN p-values found. These must be "
"removed")
# Create a new DataFrame with columns named after effect_size and p.
self.data = pd.DataFrame(data=x[[effect_size, p]])
if snp is not None:
if snp not in x.columns.values:
# Warn if you don't have a snp column
raise KeyError(
"snp argument specified as %s but column not found in "
"'x' data.frame" % snp)
else:
# If the input DataFrame has a snp column, add it to the new
# DataFrame
self.data[snp] = x[snp]
if gene is not None:
if gene not in x.columns.values:
# Warn if you don't have a gene column
raise KeyError(
"gene argument specified as %s but column not found in "
"'x' data.frame" % gene)
else:
# If the input DataFrame has a gene column, add it to the new
# DataFrame
self.data[gene] = x[gene]
if annotation is not None:
if annotation not in x.columns.values:
# Warn if you don't have an annotation column
raise KeyError(
"annotation argument specified as %s but column not "
"found in 'x' data.frame" % annotation
)
else:
# If the input DataFrame has a gene column, add it to the new
# DataFrame
self.data[annotation] = x[annotation]
self.xlabel = "Effect Size"
self.ticks = []
self.ticksLabels = []
self.effectSize = effect_size
self.pName = p
self.snpName = snp
self.geneName = gene
self.annotationName = annotation
self.logp = logp
def figure(
self,
xlabel=None,
ylabel='-log10(p)',
point_size=5,
col=None,
effect_size_line=None,
effect_size_line_color='grey',
effect_size_line_width=0.5,
genomewideline_value=-np.log10(5e-8),
genomewideline_color='grey',
genomewideline_width=1,
highlight=True,
highlight_color='red',
**kwargs
):
"""Return a figure object compatible with plotly.graph_objects.
Keyword arguments:
- xlabel (string; optional): Label of the x-axis.
- ylabel (string; default '-log10(p)'): Label of the y-axis.
- point_size (number; default 5): Size of the points of the scatter
plot.
- col (string; optional): Color of the points of the Scatter plot. Can
be in any color format accepted by plotly.graph_objects.
- effect_size_line (bool | list; default [-1, 1]): A boolean which must be
either False to deactivate the option, or a list/array containing the
upper and lower bounds of the effect size values. Significant data
points will have lower values than the lower bound, or higher
values than the higher bound. Keeping the default value will
result in assigning the list [-1, 1] to the argument.
- effect_size_line_color (string; default 'grey'): Color of the
effect size lines.
- effect_size_line_width (number; default 2): Width of the effect
size lines.
- genomewideline_value (bool | number; default -log10(5e-8)): A
boolean which must be either False to deactivate the option, or a
numerical value corresponding to the p-value above which the
data points are considered significant.
- genomewideline_color (string; default 'red'): Color of the genome-wide
line. Can be in any color format accepted by plotly.graph_objects.
- genomewideline_width (number; default 1): Width of the genome-wide
line.
- highlight (bool; default True): Whether the data points considered
significant should be highlighted or not.
- highlight_color (string; default 'red'): Color of the data points
highlighted because considered significant. Can be in any color
format accepted by plotly.graph_objects.
- Additional keys (misc.): Arbitrary arguments can be passed to modify the
Layout and styling of the graph. A full reference of acceptable args is
available [here](https://plotly.com/python-api-reference/generated/plotly.graph_objects
.Layout.html).
Some commonly used layout keys are:
- title (dict: optional): Dict with compatible properties for the title of
the figure layout.
- xaxis (dict: optional): Dict with compatible properties for the x-axis of
the figure layout.
- yaxis (dict: optional): Dict with compatible properties for the y-axis of
the figure layout.
- height (number; optional): Sets the plot's height (in px).
- width (number; optional): Sets the plot's width (in px).
- margin (dict | plotly.graph_objects.layout.Margin instance): A dict or Margin
instance that sets the separation between the main plotting space and
the outside of the figure.
- legend (dict | plotly.graph_objects.layout.Legend instance): A dict or Legend
instance with compatible properties.
"""
if xlabel is None:
xlabel = self.xlabel
if effect_size_line is None:
effect_size_line = [-1, 1]
if not effect_size_line and not isinstance(effect_size_line, bool):
raise ValueError("If effect_size_line is a logical, it must be "
"set to False")
if np.size(effect_size_line) > 2:
raise ValueError("The argument effect_size_line should be a "
"vector or a list of maximum two components")
# Initialize plot
xmin = min(self.data[self.effectSize].values)
xmax = max(self.data[self.effectSize].values)
# Taking 105% of the max value of data for x axis range
xlim = 1.05 * np.max(np.abs([xmin, xmax]))
if self.logp:
ymin = min(-np.log10(self.data[self.pName].values))
ymax = max(-np.log10(self.data[self.pName].values))
else:
ymin = min(self.data[self.pName].values)
ymax = max(self.data[self.pName].values)
if col is None:
col = '#2186f4'
layout = go.Layout(
title={'text': 'Volcano Plot',
'font': {'family': 'sans-serif', 'size': 20},
'x': 0.5,
'xanchor': 'right',
'yanchor': 'top'
},
hovermode='closest',
legend={'bgcolor': '#ebf1fa',
'yanchor': 'top',
'x': 1.01,
"font": {"family": "sans-serif"}
},
xaxis={
'title': xlabel,
'zeroline': False,
'range': [-xlim, xlim]
},
yaxis={
'title': ylabel,
'zeroline': False
}
)
layout.update(**kwargs)
data_to_plot = [] # To contain the data traces
tmp = pd.DataFrame() # Empty DataFrame to contain the highlighted data
if highlight:
if not isinstance(highlight, bool):
if self.snpName not in self.data.columns.values:
raise KeyError(
"snp argument specified for highlight as %s but "
"column not found in the data.frame" % self.snpName
)
else:
if not genomewideline_value:
raise Warning(
"The genomewideline_value you entered is not a "
"positive value, or False, you cannot set highlight "
"to True in that case.")
tmp = self.data
# Sort the p-values (or -log10(p-values) above the line
if genomewideline_value:
if self.logp:
tmp = tmp.loc[-np.log10(tmp[self.pName])
> genomewideline_value]
else:
tmp = tmp.loc[tmp[self.pName] > genomewideline_value]
# Sort the effect size in large positive and large negative
if effect_size_line:
lp = tmp.loc[tmp[self.effectSize] > max(effect_size_line)]
ln = tmp.loc[tmp[self.effectSize] < min(effect_size_line)]
tmp = pd.concat([lp, ln])
highlight_hover_text = _get_hover_text(
tmp,
snpname=self.snpName,
genename=self.geneName,
annotationname=self.annotationName
)
if not tmp.empty:
data_to_plot.append(
go.Scattergl(
x=tmp[self.effectSize],
y=-np.log10(tmp[self.pName].values) if self.logp
else tmp[self.pName].values,
mode='markers',
text=highlight_hover_text,
marker=dict(
color=highlight_color,
size=point_size),
name='Point(s) of interest'
)
)
# Remove the highlighted data from the DataFrame if not empty
if tmp.empty:
data = self.data
else:
data = self.data.drop(self.data.index[tmp.index])
hover_text = _get_hover_text(
data,
snpname=self.snpName,
genename=self.geneName,
annotationname=self.annotationName
)
data_to_plot.append(
go.Scattergl(
x=data[self.effectSize].values,
y=-np.log10(data[self.pName].values) if self.logp
else data[self.pName].values,
mode='markers',
marker={
'color': col,
'size': point_size,
# 'name': "chr%i" % self.data[self.chrName].unique()
},
text=hover_text,
name='Dataset'
)
)
# Draw the effect size lines
if effect_size_line:
lines = [
go.layout.Shape(
name=EFFECT_SIZE_LINE_MIN_LABEL,
type='line',
line=dict(
color=effect_size_line_color,
width=effect_size_line_width,
dash='dash'
),
x0=effect_size_line[0], x1=effect_size_line[0], xref='x',
y0=ymin, y1=ymax, yref='y'
),
go.layout.Shape(
name=EFFECT_SIZE_LINE_MAX_LABEL,
type='line',
line=dict(
color=effect_size_line_color,
width=effect_size_line_width,
dash='dash'
),
x0=effect_size_line[1], x1=effect_size_line[1], xref='x',
y0=ymin, y1=ymax, yref='y'
),
]
else:
lines = []
if genomewideline_value:
genomewideline = go.layout.Shape(
name=GENOMEWIDE_LINE_LABEL,
type='line',
line=dict(
color=genomewideline_color,
width=genomewideline_width,
dash='dash'
),
x0=-xlim, x1=xlim, xref='x',
y0=genomewideline_value, y1=genomewideline_value, yref='y'
)
lines.append(genomewideline)
layout.shapes = lines
return go.Figure(data=data_to_plot, layout=layout)
|
42300d7f72d9035b602ab14a80291be300d679f5
|
9cb06b7767ba4fd20fe12fbd85b2b9f20d0f41a2
|
/safedelete/models.py
|
30a693ab2e2a67e58aefe1958501e263b2821a37
|
[
"BSD-3-Clause"
] |
permissive
|
makinacorpus/django-safedelete
|
7fd630498cc589cc54793ab72bfa3f99d924ecc0
|
6cdd3e8d079896d61efca22c6abe51f10ce26705
|
refs/heads/master
| 2023-08-25T07:12:17.105779
| 2023-08-21T09:21:53
| 2023-08-21T09:22:23
| 10,792,977
| 639
| 141
|
BSD-3-Clause
| 2023-08-30T08:09:26
| 2013-06-19T13:23:35
|
Python
|
UTF-8
|
Python
| false
| false
| 15,110
|
py
|
models.py
|
import warnings
from collections import Counter, defaultdict
from functools import reduce
from itertools import chain
from operator import or_
from typing import Dict, Optional, Tuple, List
import django
from django.contrib.admin.utils import NestedObjects
from django.core.exceptions import ValidationError
from django.db import models, router
from django.db.models import UniqueConstraint
from django.db.models.deletion import ProtectedError
from django.utils import timezone
from .config import (
DELETED_BY_CASCADE_FIELD_NAME,
FIELD_NAME,
HARD_DELETE,
HARD_DELETE_NOCASCADE,
NO_DELETE,
SOFT_DELETE,
SOFT_DELETE_CASCADE,
)
from .managers import (
SafeDeleteAllManager,
SafeDeleteDeletedManager,
SafeDeleteManager,
)
from .signals import post_softdelete, post_undelete, pre_softdelete
from .utils import can_hard_delete, related_objects
def is_safedelete_cls(cls):
for base in cls.__bases__:
# This used to check if it startswith 'safedelete', but that masks
# the issue inside of a test. Other clients create models that are
# outside of the safedelete package.
if base.__module__.startswith('safedelete.models'):
return True
if is_safedelete_cls(base):
return True
return False
def is_safedelete(related):
warnings.warn(
'is_safedelete is deprecated in favor of is_safedelete_cls',
DeprecationWarning)
return is_safedelete_cls(related.__class__)
class SafeDeleteModel(models.Model):
"""Abstract safedelete-ready model.
.. note::
To create your safedelete-ready models, you have to make them inherit from this model.
:attribute deleted:
DateTimeField set to the moment the object was deleted. Is set to
``None`` if the object has not been deleted.
:attribute deleted_by_cascade:
BooleanField set True whenever the object is deleted due cascade operation called by delete
method of any parent Model. Default value is False. Later if its parent model calls for
cascading undelete, it will restore only child classes that were also deleted by a cascading
operation (deleted_by_cascade equals to True), i.e. all objects that were deleted before their
parent deletion, should keep deleted if the same parent object is restored by undelete method.
If this behavior isn't desired, class that inherits from SafeDeleteModel can override this
attribute by setting it as None: overriding model class won't have its ``deleted_by_cascade``
field and won't be restored by cascading undelete even if it was deleted by a cascade operation.
>>> class MyModel(SafeDeleteModel):
... deleted_by_cascade = None
... my_field = models.TextField()
:attribute _safedelete_policy: define what happens when you delete an object.
It can be one of ``HARD_DELETE``, ``SOFT_DELETE``, ``SOFT_DELETE_CASCADE``, ``NO_DELETE`` and ``HARD_DELETE_NOCASCADE``.
Defaults to ``SOFT_DELETE``.
>>> class MyModel(SafeDeleteModel):
... _safedelete_policy = SOFT_DELETE
... my_field = models.TextField()
...
>>> # Now you have your model (with its ``deleted`` field, and custom manager and delete method)
:attribute objects:
The :class:`safedelete.managers.SafeDeleteManager` returns the non-deleted models.
:attribute all_objects:
The :class:`safedelete.managers.SafeDeleteAllManager` returns all the models (non-deleted and soft-deleted).
:attribute deleted_objects:
The :class:`safedelete.managers.SafeDeleteDeletedManager` returns the soft-deleted models.
"""
_safedelete_policy: int = SOFT_DELETE
objects = SafeDeleteManager()
all_objects = SafeDeleteAllManager()
deleted_objects = SafeDeleteDeletedManager()
class Meta:
abstract = True
def save(self, keep_deleted=False, **kwargs):
"""Save an object, un-deleting it if it was deleted.
Args:
keep_deleted: Do not undelete the model if soft-deleted. (default: {False})
kwargs: Passed onto :func:`save`.
.. note::
Undeletes soft-deleted models by default.
"""
# undelete signal has to happen here (and not in undelete)
# in order to catch the case where a deleted model becomes
# implicitly undeleted on-save. If someone manually nulls out
# deleted, it'll bypass this logic, which I think is fine, because
# otherwise we'd have to shadow field changes to handle that case.
was_undeleted = False
if not keep_deleted:
if getattr(self, FIELD_NAME) and self.pk:
was_undeleted = True
setattr(self, FIELD_NAME, None)
setattr(self, DELETED_BY_CASCADE_FIELD_NAME, False)
super(SafeDeleteModel, self).save(**kwargs)
if was_undeleted:
# send undelete signal
using = kwargs.get('using') or router.db_for_write(self.__class__, instance=self)
post_undelete.send(sender=self.__class__, instance=self, using=using)
def undelete(self, force_policy: Optional[int] = None, **kwargs) -> Tuple[int, Dict[str, int]]:
"""Undelete a soft-deleted model.
Args:
force_policy: Force a specific undelete policy. (default: {None})
kwargs: Passed onto :func:`save`.
.. note::
Will raise a :class:`AssertionError` if the model was not soft-deleted.
"""
current_policy = force_policy or self._safedelete_policy
assert getattr(self, FIELD_NAME)
self.save(keep_deleted=False, **kwargs)
undeleted_counter = Counter({self._meta.label: 1})
if current_policy == SOFT_DELETE_CASCADE:
for related in related_objects(self, only_deleted_by_cascade=True):
if is_safedelete_cls(related.__class__) and getattr(related, FIELD_NAME):
_, undelete_response = related.undelete(**kwargs)
undeleted_counter.update(undelete_response)
return sum(undeleted_counter.values()), dict(undeleted_counter)
def delete(self, force_policy=None, **kwargs):
# To know why we need to do that, see https://github.com/makinacorpus/django-safedelete/issues/117
return self._delete(force_policy, **kwargs)
def _delete(self, force_policy: Optional[int] = None, **kwargs) -> Tuple[int, Dict[str, int]]:
"""Overrides Django's delete behaviour based on the model's delete policy.
Args:
force_policy: Force a specific delete policy. (default: {None})
kwargs: Passed onto :func:`save` if soft deleted.
"""
current_policy = self._safedelete_policy if (force_policy is None) else force_policy
if current_policy == NO_DELETE:
return (0, {})
elif current_policy == SOFT_DELETE:
return self.soft_delete_policy_action(**kwargs)
elif current_policy == HARD_DELETE:
return self.hard_delete_policy_action(**kwargs)
elif current_policy == HARD_DELETE_NOCASCADE:
return self.hard_delete_cascade_policy_action(**kwargs)
elif current_policy == SOFT_DELETE_CASCADE:
return self.soft_delete_cascade_policy_action(**kwargs)
return (0, {})
def soft_delete_policy_action(self, **kwargs) -> Tuple[int, Dict[str, int]]:
# Only soft-delete the object, marking it as deleted.
setattr(self, FIELD_NAME, timezone.now())
# is_cascade shouldn't be in kwargs when calling save method.
if kwargs.pop('is_cascade', False):
setattr(self, DELETED_BY_CASCADE_FIELD_NAME, True)
using = kwargs.get('using') or router.db_for_write(self.__class__, instance=self)
# send pre_softdelete signal
pre_softdelete.send(sender=self.__class__, instance=self, using=using)
self.save(keep_deleted=True, **kwargs)
# send softdelete signal
post_softdelete.send(sender=self.__class__, instance=self, using=using)
return (1, {self._meta.label: 1})
def hard_delete_policy_action(self, **kwargs) -> Tuple[int, Dict[str, int]]:
# Normally hard-delete the object.
return super(SafeDeleteModel, self).delete()
def hard_delete_cascade_policy_action(self, **kwargs) -> Tuple[int, Dict[str, int]]:
# Hard-delete the object only if nothing would be deleted with it
if not can_hard_delete(self):
return self._delete(force_policy=SOFT_DELETE, **kwargs)
else:
return self._delete(force_policy=HARD_DELETE, **kwargs)
def soft_delete_cascade_policy_action(self, **kwargs) -> Tuple[int, Dict[str, int]]:
collector = NestedObjects(using=router.db_for_write(type(self)))
collector.collect([self])
# Soft-delete-cascade raises an exception when trying to delete a object that related object is PROTECT
protected_objects = defaultdict(list)
for obj in collector.protected:
if getattr(obj, FIELD_NAME, None) is None:
protected_objects[obj.__class__.__name__].append(obj)
if protected_objects:
raise ProtectedError(
'Cannot delete some instances of model %r because they are '
'referenced through protected foreign keys: %s.' % (
self.__class__.__name__,
', '.join(protected_objects),
),
set(chain.from_iterable(protected_objects.values())),
)
# Soft-delete on related objects before
deleted_counter: Counter = Counter()
for related in related_objects(self):
if is_safedelete_cls(related.__class__) and not getattr(related, FIELD_NAME):
_, delete_response = related.delete(force_policy=SOFT_DELETE, is_cascade=True, **kwargs)
deleted_counter.update(delete_response)
# soft-delete the object
_, delete_response = self._delete(force_policy=SOFT_DELETE, **kwargs)
deleted_counter.update(delete_response)
# update fields (SET, SET_DEFAULT or SET_NULL)
for model, instances_for_fieldvalues in collector.field_updates.items():
if django.VERSION[0] > 4 or (django.VERSION[0] == 4 and django.VERSION[1] >= 2):
# as of 4.2 field_updates values is a list rather than a dictionary
(field, value) = model
instances_list = instances_for_fieldvalues
model = instances_list[0].__class__
updates = []
objs = []
for instances in instances_list:
if isinstance(instances, models.QuerySet):
updates.append(instances)
else:
objs.extend(instances)
if updates:
combined_updates = reduce(or_, updates)
combined_updates.update(**{field.name: value})
if objs:
query = models.sql.UpdateQuery(model)
query.update_batch(
list({obj.pk for obj in instances_list}), {field.name: value}, collector.using
)
else:
for (field, value), instances in instances_for_fieldvalues.items():
query = models.sql.UpdateQuery(model)
query.update_batch(
[obj.pk for obj in instances],
{field.name: value},
collector.using,
)
return sum(deleted_counter.values()), dict(deleted_counter)
@classmethod
def has_unique_fields(cls) -> bool:
"""Checks if one of the fields of this model has a unique constraint set (unique=True).
It also checks if the model has sets of field names that, taken together, must be unique.
Args:
model: Model instance to check
"""
if cls._meta.unique_together:
return True
if django.VERSION[0] > 3 or (django.VERSION[0] == 3 and django.VERSION[1] >= 1):
if cls._meta.total_unique_constraints:
return True
else: # derived from total_unique_constraints in django >= 3.1
for constraint in cls._meta.constraints:
if isinstance(constraint, UniqueConstraint) and constraint.condition is None:
return True
for field in cls._meta.fields:
if field._unique: # type: ignore
return True
return False
# We need to overwrite this check to ensure uniqueness is also checked
# against "deleted" (but still in db) objects.
# FIXME: Better/cleaner way ?
def _perform_unique_checks(self, unique_checks) -> Dict[str, List[ValidationError]]:
errors: Dict[str, List[ValidationError]] = {}
for model_class, unique_check in unique_checks:
lookup_kwargs = {}
for field_name in unique_check:
f = self._meta.get_field(field_name)
lookup_value = getattr(self, f.attname) # type: ignore
if lookup_value is None:
continue
if f.primary_key and not self._state.adding: # type: ignore
continue
lookup_kwargs[str(field_name)] = lookup_value
if len(unique_check) != len(lookup_kwargs):
continue
# This is the changed line
if hasattr(model_class, 'all_objects'):
qs = model_class.all_objects.filter(**lookup_kwargs)
else:
qs = model_class._default_manager.filter(**lookup_kwargs)
model_class_pk = self._get_pk_val(model_class._meta) # type: ignore
if not self._state.adding and model_class_pk is not None:
qs = qs.exclude(pk=model_class_pk)
if qs.exists():
if len(unique_check) == 1:
key = unique_check[0]
else:
key = models.base.NON_FIELD_ERRORS # type: ignore
errors.setdefault(key, []).append(
self.unique_error_message(model_class, unique_check)
)
return errors
SafeDeleteModel.add_to_class(FIELD_NAME, models.DateTimeField(editable=False, null=True, db_index=True))
SafeDeleteModel.add_to_class(DELETED_BY_CASCADE_FIELD_NAME, models.BooleanField(editable=False, default=False))
class SafeDeleteMixin(SafeDeleteModel):
"""``SafeDeleteModel`` was previously named ``SafeDeleteMixin``.
.. deprecated:: 0.4.0
Use :class:`SafeDeleteModel` instead.
"""
class Meta:
abstract = True
def __init__(self, *args, **kwargs):
warnings.warn('The SafeDeleteMixin class was renamed SafeDeleteModel',
DeprecationWarning)
SafeDeleteModel.__init__(self, *args, **kwargs)
|
9631fd6de6c3687851dc77ba23b8d46332c41d27
|
1fd8e5db25f8ebc7cc4506cbb07ba98f717b667e
|
/controls.py
|
fd1da038a3de76a544b9bb4db06be4a52a842742
|
[] |
no_license
|
flatplanet/Intro-To-TKinter-Youtube-Course
|
6103410435fc3b977fb44a4b08d045950ba10380
|
cf988099fc358e52ed773273cb2e7ddb9d37d995
|
refs/heads/master
| 2022-10-06T10:02:38.689302
| 2022-07-18T18:11:12
| 2022-07-18T18:11:12
| 174,183,345
| 524
| 426
| null | 2021-10-10T16:16:44
| 2019-03-06T16:44:03
|
Python
|
UTF-8
|
Python
| false
| false
| 1,521
|
py
|
controls.py
|
from tkinter import *
from tkinter import ttk
root = Tk()
root.title("Controls")
root.geometry("300x650")
root.iconbitmap('c:/guis/exe/codemy.ico')
# Create Launch Function
def launch():
global second
second = Toplevel()
second.geometry("200x200")
# Change the width:
def width_slide(x):
#int(width_slider.get())
#int(height_slider.get())
second.geometry(f"{int(width_slider.get())}x{int(height_slider.get())}")
# Change the height:
def height_slide(x):
second.geometry(f"{int(width_slider.get())}x{int(height_slider.get())}")
# Change the both:
def both_slide(x):
second.geometry(f"{int(both_slider.get())}x{int(both_slider.get())}")
# Create a launch button
launch_button = Button(root, text="Launch Window", command=launch)
launch_button.pack(pady=20)
# Create Some Label Frames
width_frame = LabelFrame(root, text="Width")
width_frame.pack(pady=20)
height_frame = LabelFrame(root, text="Height")
height_frame.pack(pady=20)
both_frame = LabelFrame(root, text="Both")
both_frame.pack(pady=20)
# Create Some Sliders
width_slider = ttk.Scale(width_frame, from_=100, to=500, orient=HORIZONTAL, length=200, command=width_slide, value=100)
width_slider.pack(pady=20, padx=20)
height_slider = ttk.Scale(height_frame, from_=100, to=500, orient=VERTICAL, length=200, command=height_slide, value=100)
height_slider.pack(pady=20, padx=20)
both_slider = ttk.Scale(both_frame, from_=100, to=500, orient=HORIZONTAL, length=200, command=both_slide, value=100)
both_slider.pack(pady=20, padx=20)
root.mainloop()
|
63fcc3152a47bcdafb1a08504ed9dd559ed23521
|
057e555023112b7409de8e3e1908670cbce01d5b
|
/graph_mcts/analyze_dataset.py
|
007a4275072f9cb9f81511b2ca57fd654ae4b203
|
[
"MIT"
] |
permissive
|
BenevolentAI/guacamol_baselines
|
4826ebd3b5d5fe0b0b5dd8e7065094ee25d28e95
|
44d24c53f3acf9266eb2fb06dbff909836549291
|
refs/heads/master
| 2023-08-07T07:13:08.549075
| 2022-02-22T10:54:16
| 2022-02-22T10:54:16
| 157,866,504
| 108
| 39
|
MIT
| 2023-03-24T23:15:14
| 2018-11-16T12:45:46
|
Python
|
UTF-8
|
Python
| false
| false
| 17,665
|
py
|
analyze_dataset.py
|
import argparse
import collections
import datetime
import logging
import os
import pickle
from time import time
from typing import List, Tuple, Dict
import numpy as np
from guacamol.utils.helpers import setup_default_logger
from rdkit import Chem
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
def chembl_problematic_case(key: str) -> bool:
"""
When generating the statistics on ChEMBL, some KeyError exceptions were generated if not checking for
this special case.
"""
allowed_key_beginnings = {'[#6R', '[#7R'}
tokens = key.split(']')
return '=' in key and tokens[0] not in allowed_key_beginnings
def read_file(file_name: str) -> List[str]:
"""
Args:
file_name: Text file with one SMILES per line
Returns: a list of SMILES strings
"""
with open(file_name, 'r') as f:
return [s.strip() for s in f]
def get_counts(smarts_list, smiles_list, ring=False) -> Tuple[int, Dict[str, int]]:
"""
Args:
smarts_list: list of SMARTS of intrest
smiles_list: a list of SMILES strings
ring: determines whether or not the matches are uniquified
Returns:
tot: sum of SMARTS counts
probs2: an OrderedDict of {SMART: counts}
"""
probs = collections.OrderedDict()
for smarts in smarts_list:
probs[smarts] = 0
# number_of_molecules = 0
# tot = 0
for smiles in smiles_list:
# print smiles
# number_of_molecules += 1
mol = Chem.MolFromSmiles(smiles)
Chem.Kekulize(mol)
for smarts in smarts_list:
matches = mol.GetSubstructMatches(Chem.MolFromSmarts(smarts), uniquify=ring)
num_bonds = len(matches) # not the number of bonds, but the number of matches
probs[smarts] += num_bonds
# tot += num_bonds
tot = 0
probs2 = collections.OrderedDict()
for key in probs:
if probs[key] > 0:
# print key, probs[key]
tot += probs[key]
probs2[key] = probs[key]
return tot, probs2
def clean_counts(probs):
"""
Removes counts for certain SMARTS
SMARTS are pairs of atoms
Used only to prepare input for get_rxn_smarts
Args:
probs: OrderedDict of {SMARTS: probability}
probability is actually a count of occurrences
SMARTS are permutations of pairs of atoms (joined by bonds) where one of these atoms is not in a ring
OrderedDict([('[#6]-[#6;!R]', 448), ('[#6]-[#7;!R]', 173)...])
Returns:
tot: sum of SMARTS counts
probs2: an OrderedDict of {SMART: counts}
"""
# Triple bondedN, Carbonyl, Fl, Cl, Br, I
exceptions = ['[#7]#', '[#8]=', '[#9]', '[#17]', '[#35]', '[#53]']
probs2 = collections.OrderedDict()
# for key in probs:
# skip = False
# for exception in exceptions:
# if exception in key:
# tokens = re.split('\[|\]|;',key)
# alt_key = '['+tokens[3]+']'+tokens[2]+'['+tokens[1]+';!R]'
# probs[alt_key] += probs[key]
for key in probs:
skip = False
for exception in exceptions:
if exception in key:
skip = True
if not skip:
probs2[key] = probs[key]
tot = 0
for key in probs2:
tot += probs2[key]
return tot, probs2
def get_probs(probs, tot, ignore_problematic=False):
"""
From counts to probabilities
Args:
probs: OrderedDict of {SMARTS: un-normalised probability}
ignore_problematic: for cases not supported by get_rxn_smarts_make_rings and get_rxn_smarts_rings, the corresponding smarts must also be ignored here.
Returns: OrderedDict of {SMARTS: normalised probability}
"""
p = []
# When ignoring some smarts, we must adapt the total count in order for the probabilities to be normalized to 1.0
ignored_count = 0
for key in probs:
if ignore_problematic and chembl_problematic_case(key):
logger.warning(f'Ignoring key {key} in get_probs to be consistent with other functions')
ignored_count += probs[key]
continue
p.append(float(probs[key]))
adapted_tot = tot - ignored_count
p = [prob / adapted_tot for prob in p]
return p
def get_rxn_smarts_make_rings(probs):
"""
Generate reaction smarts to form a three-membered ring from two atoms that are not in a ring already
SMARTS for 3 atom sequences in rings are transformed from XYZ to give ring forming reaction smarts XZ>>X1YZ1
Transformation CC >> C1CC1 will be performed by [#6;!R:1]=,-[#6;!R:2]>>[*:1]1-[#6R][*:2]1
Args:
probs: OrderedDict of {SMARTS: probability}
probability is actually a count of occurrences (not used)
SMARTS are permutations of 3 specific atoms in a ring
OrderedDict([('[#6R]-[#6R]-[#6R]', 296), ('[#6R]=[#6R]-[#6R]', 1237)...])
Returns: list of reaction SMARTS
"""
X = {'[#6R': 'X4', '[#7R': 'X3'}
rxn_smarts = []
for key in probs:
if chembl_problematic_case(key):
logger.warning(f'Ignoring unsupported key {key} in get_rxn_smarts_make_rings')
continue
tokens = key.split(']') # ['[#6R', '-[#6R', '-[#6R', '']
smarts = ''
if '=' in key:
smarts += tokens[0][:-1] + X[tokens[0]] + ';!R:1]' # [:-1] slice strips trailing R from smarts
else:
smarts += tokens[0][:-1] + ';!R:1]=,' # [:-1] slice strips trailing R from smarts
smarts += tokens[2][:-1] + ';!R:2]>>'
smarts += '[*:1]1' + tokens[1] + '][*:2]1'
# print(rxn_smarts)
# ['[#6;!R:1]=,-[#6;!R:2]>>[*:1]1-[#6R][*:2]1']
# ['[#6X4;!R:1]-[#6;!R:2]>>[*:1]1=[#6R][*:2]1'] (if '=' in key)
rxn_smarts.append(smarts)
return rxn_smarts
def get_rxn_smarts_rings(probs):
"""
Generate reaction smarts to insert one atom in a ring (will not touch 6 or 7-membered rings)
SMARTS matching 3 atom sequences in rings are transformed from XYZ to give reaction SMARTS XZ>>XYZ
Transformation C1CCC1 >> C1CCCC1 will be performed by [#6R;!r6;!r7;!R2:1]-[#6R;!r6;!r7:2]>>[*:1]-[#6R][*:2]
Args:
probs: OrderedDict of {SMARTS: probability}
probability is actually a count of occurrences (not used)
SMARTS are permutations of 3 specific atoms in a ring
OrderedDict([('[#6R]-[#6R]-[#6R]', 296), ('[#6R]=[#6R]-[#6R]', 1237)...])
Returns: list of reaction SMARTS
"""
X = {'[#6R': 'X4', '[#7R': 'X3'} # carbons should have four bonds / nitrogens should have three bonds
rxn_smarts = []
for key in probs:
if chembl_problematic_case(key):
logger.warning(f'Ignoring unsupported key {key} in get_rxn_smarts_rings')
continue
tokens = key.split(']') # ['[#6R', '-[#6R', '-[#6R', '']
smarts = ''
if '=' in key:
# This seems to be intended for aromatic ring systems
# [#6RX4;!r6;!r7;!R2:1] C in a ring where ring size != 6 or 7, with 4 total connections. Not in two rings
smarts += tokens[0] + X[tokens[0]] + ';!r6;!r7;!R2:1]'
else:
# This seems to be intended for aliphatic ring systems
# [#6R;!r6;!r7;!R2:1] C in a ring of size not 6 and not 7. Not in 2 rings
smarts += tokens[0] + ';!r6;!r7;!R2:1]'
smarts += tokens[2] + ';!r6;!r7:2]>>'
smarts += '[*:1]' + tokens[1] + '][*:2]'
# print(smarts)
# [#6R;!r6;!r7;!R2:1]-[#6R;!r6;!r7:2]>>[*:1]-[#6R][*:2]
# [#6RX4;!r6;!r7;!R2:1]-[#6R;!r6;!r7:2]>>[*:1]=[#6R][*:2] (if '=' in key)
rxn_smarts.append(smarts)
return rxn_smarts
def get_rxn_smarts(probs):
"""
Generate reaction smarts to add acyclic atoms
Args:
probs: OrderedDict of {SMARTS: probability}
probability is actually a count of occurrences (not used)
SMARTS are permutations of pairs of atoms (joined by bonds) where one of these atoms is not in a ring
Unlike other functions, here input has been "cleaned" to remove certain functional groups
OrderedDict([('[#6]-[#6;!R]', 448), ('[#6]-[#7;!R]', 173)...])
Returns: list of reaction SMARTS
"""
rxn_smarts = []
for key in probs: # key <-> smarts
# smarts = ''
tokens = key.split(']') # ['[#6', '-[#7;!R', '']
smarts = tokens[0]
if '-' in key and '#16' not in smarts: # check for sulfur
smarts += ';!H0:1]>>[*:1]' # make sure root atom has one or more hydrogens before adding single bond
if '=' in key and '#16' not in smarts: # check for sulfur
smarts += ';!H1;!H0:1]>>[*:1]' # make sure root atom has two or more hydrogens before adding double bond
if ']#[' in key:
smarts += ';H3:1]>>[*:1]' # 3 hydrogens are required on root atom in order to introduce a triple bond
if '#16' in smarts: # key <-> smarts
smarts += ':1]>>[*:1]' # if sulfur, do nothing
# e.g. [#6;!H0:1]>>[*:1]-[#6;!R] add carbon atom to root carbon if root carbon has one or more hydrogens
smarts += tokens[-2] + ']'
rxn_smarts.append(smarts)
return rxn_smarts
def get_mean_size(smiles_list):
"""
Calculates number of atoms `mean` and `std`
given a SMILES list
Args:
smiles_list: list of SMILES
Returns: mean, std
"""
size = []
for smiles in smiles_list:
mol = Chem.MolFromSmiles(smiles)
num_atoms = mol.GetNumAtoms()
size.append(num_atoms)
return np.mean(size), np.std(size)
def count_macro_cycles(smiles_list, smarts_list, tot, probs):
"""
Args:
smiles_list: list of SMILES
smarts_list: list of SMARTS
tot: counter of ... TODO: why is this passed?
probs: OrderedDict of {SMARTS: counts}
Returns:
"""
# probs = collections.OrderedDict()
for smarts in smarts_list:
probs[smarts] = 0
for smiles in smiles_list:
for smarts in smarts_list:
mol = Chem.MolFromSmiles(smiles)
Chem.Kekulize(mol)
matches = mol.GetSubstructMatches(Chem.MolFromSmarts(smarts), uniquify=True)
if len(matches) > 0:
probs[smarts] += 1
tot += 1
return tot, probs
class StatsCalculator:
"""
Contains code of the original main function for a more convenient calculation of statistics.
"""
# Use of '#6' notation conflates aromatic and aliphatic atoms
# ['B', 'C', 'N', 'O', 'F', 'Si', 'P', 'S', 'Cl', 'Se', 'Br', 'I']
elements = ['#5', '#6', '#7', '#8', '#9', '#14', '#15', '#16', '#17', '#34', '#35', '#53']
bonds = ['-', '=', '#']
def __init__(self, smiles_file: str):
self.smiles_list = read_file(smiles_file)
self.tot, self.probs = self.smarts_element_and_rings_probs()
def size_statistics(self) -> Tuple[float, float]:
size_mean, size_stdv = get_mean_size(self.smiles_list)
return size_mean.item(), size_stdv.item()
def atom_in_ring_probs(self) -> Tuple[int, Dict[str, int]]:
# SMARTS probabilities (atom in ring)
smarts = ['[*]', # all
'[R]', # atom in ring
'[!R]', # atom not in ring
'[R2]'] # atom in 2 rings
return get_counts(smarts, self.smiles_list)
def smarts_ring_probs(self) -> Tuple[int, Dict[str, int]]:
# SMARTS probabilities (rings)
smarts = ['[R]~[R]~[R]', # Any 3 ring atoms connected by any two bonds
'[R]-[R]-[R]', # Any 3 ring atoms connected by two single bonds
'[R]=[R]-[R]'] # Any 3 ring atoms connected by one single and one double bond
return get_counts(smarts, self.smiles_list, ring=True)
def smarts_element_and_element_in_ring_probs(self) -> Tuple[int, Dict[str, int]]:
# SMARTS probabilities (elements + elements in ring)
# smarts = []
# for element in self.elements:
# smarts.append('[' + element + ']') # elemental abundance
smarts = []
for element in self.elements:
smarts.append('[' + element + 'R]') # elemental abundance wihin a ring
return get_counts(smarts, self.smiles_list)
def smarts_element_and_rings_probs(self) -> Tuple[int, Dict[str, int]]:
tot_Ratoms, probs_Ratoms = self.smarts_element_and_element_in_ring_probs()
# TODO: rewrite
R_elements = []
for key in probs_Ratoms:
R_elements.append(key)
# Generate smarts for all permutations of 3 atoms in a ring (limited to atoms in 'elements') e.g. [#6R]=[#7R]-[#6R]
smarts = []
for i, e1 in enumerate(R_elements):
for e2 in R_elements:
for j, e3 in enumerate(R_elements):
if j >= i: # makes sure identical reversed smarts patterns aren't generated (C-N-O and O-N-C)
sm_s = e1 + '-' + e2 + '-' + e3
if sm_s not in smarts:
smarts.append(sm_s)
sm_d = e1 + '=' + e2 + '-' + e3
if sm_d not in smarts:
smarts.append(sm_d)
return get_counts(smarts, self.smiles_list, ring=True)
def rxn_smarts_rings(self):
return get_rxn_smarts_rings(self.probs)
def rxn_smarts_make_rings(self):
# Generate reaction smarts to grow rings (not 6 or 7-membered) by inserting one atom
return get_rxn_smarts_make_rings(self.probs)
def ring_probs(self):
return get_probs(self.probs, self.tot, ignore_problematic=True)
def smarts_pair_probs(self) -> Tuple[int, Dict[str, int]]:
smarts = []
for bond in self.bonds:
for element1 in self.elements:
for element2 in self.elements:
smarts.append('[' + element1 + ']' + bond + '[' + element2 + ';!R]')
tot, probs = get_counts(smarts, self.smiles_list)
return clean_counts(probs)
def pair_probs(self):
tot, probs = self.smarts_pair_probs()
# Normalise probs (which actually contains counts) to give probabilities
return get_probs(probs, tot)
def rxn_smarts(self):
# Generate reaction smarts to add atoms to a root atom with a specified bond type
tot, probs = self.smarts_pair_probs()
return get_rxn_smarts(probs)
def smarts_macrocycles_probs(self) -> Tuple[int, Dict[str, int]]:
# count aliphatic and aromatic rings of size 3-6
smarts_list = ['[*]1-[*]-[*]-1', '[*]1-[*]=[*]-1', '[*]1-[*]-[*]-[*]-1', '[*]1=[*]-[*]-[*]-1',
'[*]1=[*]-[*]=[*]-1',
'[*]1-[*]-[*]-[*]-[*]-1', '[*]1=[*]-[*]-[*]-[*]-1', '[*]1=[*]-[*]=[*]-[*]-1',
'[*]1-[*]-[*]-[*]-[*]-[*]-1', '[*]1=[*]-[*]-[*]-[*]-[*]-1', '[*]1=[*]-[*]=[*]-[*]-[*]-1',
'[*]1=[*]-[*]-[*]=[*]-[*]-1', '[*]1=[*]-[*]=[*]-[*]=[*]-1']
# count occurence of macrocycles of size 7-12
smarts_macro = ['[r;!r3;!r4;!r5;!r6;!r8;!r9;!r10;!r11;!r12]', '[r;!r3;!r4;!r5;!r6;!r7;!r9;!r10;!r11;!r12]',
'[r;!r3;!r4;!r5;!r6;!r7;!r8;!r10;!r11;!r12]', '[r;!r3;!r4;!r5;!r6;!r7;!r8;!r9;!r11;!r12]',
'[r;!r3;!r4;!r5;!r6;!r7;!r8;!r9;!r10;!r12]', '[r;!r3;!r4;!r5;!r6;!r7;!r8;!r9;!r10;!r11]']
tot, probs = get_counts(smarts_list, self.smiles_list, ring=True)
return count_macro_cycles(self.smiles_list, smarts_macro, tot, probs)
def number_rings(self) -> int:
tot, probs = self.smarts_macrocycles_probs()
num_rings = 0
for key in probs:
print(key, probs[key])
num_rings += probs[key]
return num_rings
def main():
setup_default_logger()
parser = argparse.ArgumentParser(description='Generate pickle files for the statistics of a training set for MCTS',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--smiles_file', default='data/guacamol_v1_all.smiles',
help='Full path to SMILES file from which to generate the distribution statistics')
parser.add_argument('--output_dir', default=None, help='Output directory for the pickle files')
args = parser.parse_args()
if args.output_dir is None:
args.output_dir = os.path.dirname(os.path.realpath(__file__))
logger.info('Generating probabilities for MCTS...')
t0 = time()
stats = StatsCalculator(args.smiles_file)
size_stats = stats.size_statistics()
rxn_smarts_rings = stats.rxn_smarts_rings()
rxn_smarts_make_rings = stats.rxn_smarts_make_rings()
p_rings = stats.ring_probs()
pickle.dump(size_stats, open(os.path.join(args.output_dir, 'size_stats.p'), 'wb'))
pickle.dump(p_rings, open(os.path.join(args.output_dir, 'p_ring.p'), 'wb'))
pickle.dump(rxn_smarts_rings, open(os.path.join(args.output_dir, 'rs_ring.p'), 'wb'))
pickle.dump(rxn_smarts_make_rings, open(os.path.join(args.output_dir, 'rs_make_ring.p'), 'wb'))
p = stats.pair_probs()
rxn_smarts = stats.rxn_smarts()
pickle.dump(p, open(os.path.join(args.output_dir, 'p1.p'), 'wb'))
pickle.dump(rxn_smarts, open(os.path.join(args.output_dir, 'r_s1.p'), 'wb'))
print(f'Total time: {str(datetime.timedelta(seconds=int(time() - t0)))} secs')
if __name__ == '__main__':
main()
|
ded82ac891d2e95e57533316ab1101cf79c8063a
|
9c774a31ff1e98a6366e71e54e84ea97e6f050a2
|
/examples/array1.py
|
329840d9e80ed26eec652e1571f874e68a6851fd
|
[
"BSD-3-Clause"
] |
permissive
|
PyTables/PyTables
|
65c355d47d68b5e8f4240fc7cc32906c3b6f2eea
|
f3817d7637b465de1a2ab5da9dffd3aba185c331
|
refs/heads/master
| 2023-08-30T02:46:42.212028
| 2023-08-18T05:32:06
| 2023-08-18T05:32:06
| 1,844,194
| 1,076
| 267
|
BSD-3-Clause
| 2023-09-09T06:56:39
| 2011-06-03T19:44:46
|
Python
|
UTF-8
|
Python
| false
| false
| 1,342
|
py
|
array1.py
|
import numpy as np
import tables as tb
# Open a new empty HDF5 file
fileh = tb.open_file("array1.h5", mode="w")
# Get the root group
root = fileh.root
# Create an Array
a = np.array([-1, 2, 4], np.int16)
# Save it on the HDF5 file
hdfarray = fileh.create_array(root, 'array_1', a, "Signed short array")
# Create a scalar Array
a = np.array(4, np.int16)
# Save it on the HDF5 file
hdfarray = fileh.create_array(root, 'array_s', a, "Scalar signed short array")
# Create a 3-d array of floats
a = np.arange(120, dtype=np.float64).reshape(20, 3, 2)
# Save it on the HDF5 file
hdfarray = fileh.create_array(root, 'array_f', a, "3-D float array")
# Close the file
fileh.close()
# Open the file for reading
fileh = tb.open_file("array1.h5", mode="r")
# Get the root group
root = fileh.root
a = root.array_1.read()
print("Signed byte array -->", repr(a), a.shape)
print("Testing iterator (works even over scalar arrays):", end=' ')
arr = root.array_s
for x in arr:
print("nrow-->", arr.nrow)
print("Element-->", repr(x))
# print "Testing getitem:"
# for i in range(root.array_1.nrows):
# print "array_1["+str(i)+"]", "-->", root.array_1[i]
print("array_f[:,2:3,2::2]", repr(root.array_f[:, 2:3, 2::2]))
print("array_f[1,2:]", repr(root.array_f[1, 2:]))
print("array_f[1]", repr(root.array_f[1]))
# Close the file
fileh.close()
|
2b55101539a8d0d1f22fd148eed76cea6569adfa
|
b8bbdfc593b6d816e67a344f720f90ec05236778
|
/tests/api_connexion/schemas/test_xcom_schema.py
|
f12b5b2f776b810e43f816357068f832552d88ca
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
apache/airflow
|
ed78db0a8bab7e096990e143926e52f518e288ab
|
1b122c15030e99cef9d4ff26d3781a7a9d6949bc
|
refs/heads/main
| 2023-09-01T08:37:34.556097
| 2023-09-01T06:49:05
| 2023-09-01T06:49:05
| 33,884,891
| 22,756
| 11,558
|
Apache-2.0
| 2023-09-14T20:12:36
| 2015-04-13T18:04:58
|
Python
|
UTF-8
|
Python
| false
| false
| 7,471
|
py
|
test_xcom_schema.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import pickle
import pytest
from sqlalchemy import or_, select
from airflow.api_connexion.schemas.xcom_schema import (
XComCollection,
xcom_collection_item_schema,
xcom_collection_schema,
xcom_schema,
)
from airflow.models import DagRun, XCom
from airflow.utils.dates import parse_execution_date
from airflow.utils.session import create_session
@pytest.fixture(scope="module", autouse=True)
def clean_xcom():
"""Ensure there's no XCom littered by other modules."""
with create_session() as session:
session.query(XCom).delete()
def _compare_xcom_collections(collection1: dict, collection_2: dict):
assert collection1.get("total_entries") == collection_2.get("total_entries")
def sort_key(record):
return (
record.get("dag_id"),
record.get("task_id"),
record.get("execution_date"),
record.get("map_index"),
record.get("key"),
)
assert sorted(collection1.get("xcom_entries", []), key=sort_key) == sorted(
collection_2.get("xcom_entries", []), key=sort_key
)
@pytest.fixture()
def create_xcom(create_task_instance, session):
def maker(dag_id, task_id, execution_date, key, map_index=-1, value=None):
ti = create_task_instance(
dag_id=dag_id,
task_id=task_id,
execution_date=execution_date,
map_index=map_index,
session=session,
)
run: DagRun = ti.dag_run
xcom = XCom(
dag_run_id=run.id,
task_id=ti.task_id,
map_index=map_index,
key=key,
value=value,
timestamp=run.execution_date,
dag_id=run.dag_id,
run_id=run.run_id,
)
session.add(xcom)
session.commit()
return xcom
return maker
class TestXComCollectionItemSchema:
default_time = "2016-04-02T21:00:00+00:00"
default_time_parsed = parse_execution_date(default_time)
def test_serialize(self, create_xcom, session):
create_xcom(
dag_id="test_dag",
task_id="test_task_id",
execution_date=self.default_time_parsed,
key="test_key",
)
xcom_model = session.query(XCom).first()
deserialized_xcom = xcom_collection_item_schema.dump(xcom_model)
assert deserialized_xcom == {
"key": "test_key",
"timestamp": self.default_time,
"execution_date": self.default_time,
"task_id": "test_task_id",
"dag_id": "test_dag",
"map_index": -1,
}
def test_deserialize(self):
xcom_dump = {
"key": "test_key",
"timestamp": self.default_time,
"execution_date": self.default_time,
"task_id": "test_task_id",
"dag_id": "test_dag",
"map_index": 2,
}
result = xcom_collection_item_schema.load(xcom_dump)
assert result == {
"key": "test_key",
"timestamp": self.default_time_parsed,
"execution_date": self.default_time_parsed,
"task_id": "test_task_id",
"dag_id": "test_dag",
"map_index": 2,
}
class TestXComCollectionSchema:
default_time_1 = "2016-04-02T21:00:00+00:00"
default_time_2 = "2016-04-02T21:01:00+00:00"
time_1 = parse_execution_date(default_time_1)
time_2 = parse_execution_date(default_time_2)
def test_serialize(self, create_xcom, session):
create_xcom(
dag_id="test_dag_1",
task_id="test_task_id_1",
execution_date=self.time_1,
key="test_key_1",
)
create_xcom(
dag_id="test_dag_2",
task_id="test_task_id_2",
execution_date=self.time_2,
key="test_key_2",
)
xcom_models = session.scalars(
select(XCom)
.where(or_(XCom.execution_date == self.time_1, XCom.execution_date == self.time_2))
.order_by(XCom.dag_run_id)
).all()
deserialized_xcoms = xcom_collection_schema.dump(
XComCollection(
xcom_entries=xcom_models,
total_entries=len(xcom_models),
)
)
_compare_xcom_collections(
deserialized_xcoms,
{
"xcom_entries": [
{
"key": "test_key_1",
"timestamp": self.default_time_1,
"execution_date": self.default_time_1,
"task_id": "test_task_id_1",
"dag_id": "test_dag_1",
"map_index": -1,
},
{
"key": "test_key_2",
"timestamp": self.default_time_2,
"execution_date": self.default_time_2,
"task_id": "test_task_id_2",
"dag_id": "test_dag_2",
"map_index": -1,
},
],
"total_entries": 2,
},
)
class TestXComSchema:
default_time = "2016-04-02T21:00:00+00:00"
default_time_parsed = parse_execution_date(default_time)
def test_serialize(self, create_xcom, session):
create_xcom(
dag_id="test_dag",
task_id="test_task_id",
execution_date=self.default_time_parsed,
key="test_key",
value=pickle.dumps(b"test_binary"),
)
xcom_model = session.query(XCom).first()
deserialized_xcom = xcom_schema.dump(xcom_model)
assert deserialized_xcom == {
"key": "test_key",
"timestamp": self.default_time,
"execution_date": self.default_time,
"task_id": "test_task_id",
"dag_id": "test_dag",
"value": "test_binary",
"map_index": -1,
}
def test_deserialize(self):
xcom_dump = {
"key": "test_key",
"timestamp": self.default_time,
"execution_date": self.default_time,
"task_id": "test_task_id",
"dag_id": "test_dag",
"value": b"test_binary",
}
result = xcom_schema.load(xcom_dump)
assert result == {
"key": "test_key",
"timestamp": self.default_time_parsed,
"execution_date": self.default_time_parsed,
"task_id": "test_task_id",
"dag_id": "test_dag",
"value": "test_binary",
}
|
40213a9c281af4e7a798336f7727768b2819887a
|
578db86c51d44ebddd0dc7b1738985b3dc69eb74
|
/corehq/tests/noseplugins/logfile.py
|
5c072e80145525455e7ffb5397591af74fb26318
|
[
"BSD-3-Clause"
] |
permissive
|
dimagi/commcare-hq
|
a43c7dd32b5f89c89fd5aa1b1359ab7301f4ff6b
|
e7391ddae1af1dbf118211ecb52c83fc508aa656
|
refs/heads/master
| 2023-08-16T22:38:27.853437
| 2023-08-16T19:07:19
| 2023-08-16T19:07:19
| 247,278
| 499
| 203
|
BSD-3-Clause
| 2023-09-14T19:03:24
| 2009-07-09T17:00:07
|
Python
|
UTF-8
|
Python
| false
| false
| 2,519
|
py
|
logfile.py
|
"""A plugin to log test failures to a file
This is useful to preserve error output from a test run in a file in
additon to displaying the output in a terminal. It is also possible to
to view errors (in the log file) as soon as they occur while running
very large test suites.
The log file will not be overwritten if the test run completes with no
errors or failures.
Usage:
./manage.py test --log-file=test-failures.log
"""
import datetime
import os
import sys
try:
from shlex import quote # py3
except ImportError:
from pipes import quote # py2
from unittest.runner import TextTestResult, _WritelnDecorator
from nose.plugins import Plugin
class LogFilePlugin(Plugin):
"""Log test failures to file"""
name = "log-file"
def options(self, parser, env):
# Do not call super to avoid adding a ``--with`` option for this plugin
parser.add_option('--log-file',
default=env.get('NOSE_LOG_FILE'),
help="File in which to log test failures. "
"[NOSE_LOG_FILE]")
def configure(self, options, conf):
if options.log_file:
self.enabled = True
self.log_path = os.path.expanduser(options.log_file)
self.log_file = None
self.argv = sys.argv
self.start = datetime.datetime.now()
def setup_log(self):
self.log_file = _WritelnDecorator(open(self.log_path, "w"))
self.log_file.writeln(" ".join(quote(a) for a in self.argv))
self.log_file.writeln(str(self.start))
self.result = TextTestResult(self.log_file, True, 0)
def log(self, label, test, err):
if self.log_file is None:
self.setup_log()
if isinstance(err[1], str):
# Turn value back into an Exception (required in Python 3.x).
# https://github.com/nose-devs/nose/blob/7c26ad1e6b/nose/proxy.py#L90-L95
value = type(err[0].__name__, (Exception,), {})(err[1])
err = (err[0], value, err[2])
err_string = self.result._exc_info_to_string(err, test)
self.result.printErrorList(label, [(test, err_string)])
self.log_file.flush()
def addError(self, test, err):
self.log("ERROR", test, err)
def addFailure(self, test, err):
self.log("FAIL", test, err)
def finalize(self, result):
if self.log_file is not None:
self.log_file.writeln(str(datetime.datetime.now()))
self.log_file.close()
|
3e7d07573232e19963901f1e6518812770517f32
|
428823322eeae0e5441208a9ae32474015287195
|
/tests/test_select_csv_object.py
|
16065f89db9f589f17a1d4e8939cfe87ffc1776a
|
[
"MIT"
] |
permissive
|
aliyun/aliyun-oss-python-sdk
|
de65ed73ea46754d90045094bef0e26fe1f2af88
|
45d54c19483b4b6c873f97f153be19a11a9d60fb
|
refs/heads/master
| 2023-07-31T14:29:01.240869
| 2023-07-18T12:49:32
| 2023-07-18T12:52:47
| 47,454,460
| 965
| 460
|
MIT
| 2023-09-06T08:48:16
| 2015-12-05T12:09:57
|
Python
|
UTF-8
|
Python
| false
| false
| 23,204
|
py
|
test_select_csv_object.py
|
# -*- coding: utf-8 -*-
import requests
import filecmp
import calendar
import csv
import re
import sys
from oss2.exceptions import (ClientError, RequestError, NoSuchBucket,
NotFound, NoSuchKey, Conflict, PositionNotEqualToLength, ObjectNotAppendable, SelectOperationFailed, SelectOperationClientError)
from .common import *
from oss2.select_response import SelectResponseAdapter
if sys.version_info[0] > 2:
# py3k
def _open(file):
return open(file, 'r', encoding='utf-8')
else:
# py2
def _open(file):
return open(file, 'r')
def now():
return int(calendar.timegm(time.gmtime()))
class SelectCsvObjectTestHelper(object):
def __init__(self, bucket):
self.bucket = bucket
self.scannedSize = 0
def select_call_back(self, consumed_bytes, total_bytes = None):
self.scannedSize = consumed_bytes
def test_select_csv_object(self, testCase, sql, line_range = None):
key = "city_sample_data.csv"
result = self.bucket.put_object_from_file(key, 'tests/sample_data.csv')
result = self.bucket.create_select_object_meta(key)
result = self.bucket.head_object(key)
file_size = result.content_length
input_format = {'CsvHeaderInfo' : 'Use'}
if (line_range is not None):
input_format['LineRange'] = line_range
SelectResponseAdapter._FRAMES_FOR_PROGRESS_UPDATE = 0
result = self.bucket.select_object(key, sql, self.select_call_back, input_format)
content = b''
for chunk in result:
content += chunk
print(result.request_id)
testCase.assertEqual(result.status, 206, result.request_id)
testCase.assertTrue(len(content) > 0)
if line_range is None:
testCase.assertEqual(self.scannedSize, file_size)
return content
def test_select_csv_object_invalid_request(self, testCase, sql, line_range = None):
key = "city_sample_data.csv"
self.bucket.put_object_from_file(key, 'tests/sample_data.csv')
result = self.bucket.create_select_object_meta(key)
file_size = result.content_length
input_format = {'CsvHeaderInfo' : 'Use'}
if (line_range is not None):
input_format['Range'] = line_range
try:
result = self.bucket.select_object(key, sql, None, input_format)
testCase.assertEqual(result.status, 400)
except oss2.exceptions.ServerError as e:
testCase.assertEqual(e.status, 400)
class TestSelectCsvObject(OssTestCase):
def test_select_csv_object_not_empty_city(self):
helper = SelectCsvObjectTestHelper(self.bucket)
content = helper.test_select_csv_object(self, "select Year, StateAbbr, CityName, PopulationCount from ossobject where CityName != ''")
with _open('tests/sample_data.csv') as csvfile:
spamreader = csv.DictReader(csvfile, delimiter=',', quotechar='"')
select_data = b''
for row in spamreader:
line = b''
if row['CityName'] != '':
line += row['Year'].encode('utf-8')
line += ','.encode('utf-8')
line += row['StateAbbr'].encode('utf-8')
line += ','.encode('utf-8')
line += row['CityName'].encode('utf-8')
line += ','.encode('utf-8')
line += row['PopulationCount'].encode('utf-8')
line += '\n'.encode('utf-8')
select_data += line
self.assertEqual(select_data, content)
def test_select_csv_object_like(self):
helper = SelectCsvObjectTestHelper(self.bucket)
content = helper.test_select_csv_object(self, "select Year, StateAbbr, CityName, Short_Question_Text from ossobject where Measure like '%blood pressure%Years'")
with _open('tests/sample_data.csv') as csvfile:
spamreader = csv.DictReader(csvfile, delimiter=',', quotechar='"')
select_data = b''
matcher = re.compile('^.*blood pressure.*Years$')
for row in spamreader:
line = b''
if matcher.match(row['Measure']):
line += row['Year'].encode('utf-8')
line += ','.encode('utf-8')
line += row['StateAbbr'].encode('utf-8')
line += ','.encode('utf-8')
line += row['CityName'].encode('utf-8')
line += ','.encode('utf-8')
line += row['Short_Question_Text'].encode('utf-8')
line += '\n'.encode('utf-8')
select_data += line
self.assertEqual(select_data, content)
def test_select_csv_object_line_range(self):
helper = SelectCsvObjectTestHelper(self.bucket)
content = helper.test_select_csv_object(self, "select Year,StateAbbr, CityName, Short_Question_Text from ossobject'", (0, 50))
with _open('tests/sample_data.csv') as csvfile:
spamreader = csv.DictReader(csvfile, delimiter=',', quotechar='"')
select_data = b''
count = 0
for row in spamreader:
if count < 50:
line = b''
line += row['Year'].encode('utf-8')
line += ','.encode('utf-8')
line += row['StateAbbr'].encode('utf-8')
line += ','.encode('utf-8')
line += row['CityName'].encode('utf-8')
line += ','.encode('utf-8')
line += row['Short_Question_Text'].encode('utf-8')
line += '\n'.encode('utf-8')
select_data += line
else:
break
count += 1
self.assertEqual(select_data, content)
def test_select_csv_object_int_aggregation(self):
helper = SelectCsvObjectTestHelper(self.bucket)
content = helper.test_select_csv_object(self, "select avg(cast(year as int)), max(cast(year as int)), min(cast(year as int)) from ossobject where year = 2015")
self.assertEqual(content, b'2015,2015,2015\n')
def test_select_csv_object_float_aggregation(self):
helper = SelectCsvObjectTestHelper(self.bucket)
content = helper.test_select_csv_object(self, "select avg(cast(data_value as double)), max(cast(data_value as double)), sum(cast(data_value as double)) from ossobject")
# select_data = b''
with _open('tests/sample_data.csv') as csvfile:
spamreader = csv.DictReader(csvfile, delimiter=',', quotechar='"')
sum = 0.0
avg = 0.0
line_count = 0
max = 0.0
for row in spamreader:
if len(row['Data_Value']) > 0 :
val = float(row['Data_Value'])
if val > max:
max = val
sum += val
line_count += 1
avg = sum/line_count
# select_data = ("{0:.4f}".format(avg) + "," + str(max) + "," + "{0:.1f}".format(sum) + '\n').encode('utf-8')
aggre_results = content.split(b',')
avg_result = float(aggre_results[0])
max_result = float(aggre_results[1])
sum_result = float(aggre_results[2])
self.assertEqual(avg, avg_result)
self.assertEqual(max, max_result)
self.assertEqual(sum, sum_result)
def test_select_csv_object_concat(self):
helper = SelectCsvObjectTestHelper(self.bucket)
content = helper.test_select_csv_object(self, "select Year,StateAbbr, CityName, Short_Question_Text from ossobject where (data_value || data_value_unit) = '14.8%'")
select_data = b''
with _open('tests/sample_data.csv') as csvfile:
spamreader = csv.DictReader(csvfile, delimiter=',', quotechar='"')
for row in spamreader:
line = b''
if row['Data_Value_Unit'] == '%' and row['Data_Value'] == '14.8' :
line += row['Year'].encode('utf-8')
line += ','.encode('utf-8')
line += row['StateAbbr'].encode('utf-8')
line += ','.encode('utf-8')
line += row['CityName'].encode('utf-8')
line += ','.encode('utf-8')
line += row['Short_Question_Text'].encode('utf-8')
line += '\n'.encode('utf-8')
select_data += line
self.assertEqual(select_data, content)
def test_select_csv_object_complicate_condition(self):
helper = SelectCsvObjectTestHelper(self.bucket)
content = helper.test_select_csv_object(self, "select Year,StateAbbr, CityName, Short_Question_Text, data_value, data_value_unit, category, high_confidence_limit from ossobject where data_value > 14.8 and data_value_unit = '%' or Measure like '%18 Years' and Category = 'Unhealthy Behaviors' or high_confidence_limit > 70.0 ")
select_data = b''
matcher = re.compile('^.*18 Years$')
with _open('tests/sample_data.csv') as csvfile:
spamreader = csv.DictReader(csvfile, delimiter=',', quotechar='"')
for row in spamreader:
line = b''
if len(row['Data_Value']) > 0 and float(row['Data_Value']) > 14.8 and row['Data_Value_Unit'] == '%' or matcher.match(row['Measure']) and row['Category'] == 'Unhealthy Behaviors' or len(row['High_Confidence_Limit']) > 0 and float(row['High_Confidence_Limit']) > 70.0 :
line += row['Year'].encode('utf-8')
line += ','.encode('utf-8')
line += row['StateAbbr'].encode('utf-8')
line += ','.encode('utf-8')
line += row['CityName'].encode('utf-8')
line += ','.encode('utf-8')
line += row['Short_Question_Text'].encode('utf-8')
line += ','.encode('utf-8')
line += row['Data_Value'].encode('utf-8')
line += ','.encode('utf-8')
line += row['Data_Value_Unit'].encode('utf-8')
line += ','.encode('utf-8')
line += row['Category'].encode('utf-8')
line += ','.encode('utf-8')
line += row['High_Confidence_Limit'].encode('utf-8')
line += '\n'.encode('utf-8')
select_data += line
self.assertEqual(select_data, content)
def test_select_csv_object_invalid_sql(self):
helper = SelectCsvObjectTestHelper(self.bucket)
helper.test_select_csv_object_invalid_request(self, "select * from ossobject where avg(cast(year as int)) > 2016")
helper.test_select_csv_object_invalid_request(self, "")
helper.test_select_csv_object_invalid_request(self, "select year || CityName from ossobject")
helper.test_select_csv_object_invalid_request(self, "select * from ossobject group by CityName")
helper.test_select_csv_object_invalid_request(self, "select * from ossobject order by _1")
helper.test_select_csv_object_invalid_request(self, "select * from ossobject oss join s3object s3 on oss.CityName = s3.CityName")
def test_select_csv_object_with_invalid_data(self):
key = "invalid_city_sample_data.csv"
self.bucket.put_object_from_file(key, 'tests/invalid_sample_data.csv')
input_format = {'CsvHeaderInfo' : 'Use'}
result = self.bucket.select_object(key, "select _1 from ossobject", None, input_format)
content = b''
try:
for chunk in result:
content += chunk
self.assertFalse(True, "expect to raise exception")
except SelectOperationFailed:
print("Got the exception. Ok.")
def test_select_csv_object_into_file(self):
key = "city_sample_data.csv"
self.bucket.put_object_from_file(key, 'tests/sample_data.csv')
input_format = {'CsvHeaderInfo' : 'None',
'CommentCharacter' : '#',
'RecordDelimiter' : '\n',
'FieldDelimiter' : ',',
'QuoteCharacter' : '"',
'SplitRange' : (0, None)
}
output_file = 'tests/sample_data_out.csv'
head_csv_params = {
'RecordDelimiter' : '\n',
'FieldDelimiter' : ',',
'QuoteCharacter' : '"',
'OverwriteIfExists': True}
self.bucket.create_select_object_meta(key, head_csv_params)
self.bucket.select_object_to_file(key, output_file, "select * from ossobject", None, input_format)
f1 = _open('tests/sample_data.csv')
content1 = f1.read()
f1.close()
f2 = _open(output_file)
content2 = f2.read()
f2.close()
os.remove(output_file)
self.assertEqual(content1, content2)
def test_select_gzip_csv_object_into_file(self):
key = "city_sample_data.csv.gz"
self.bucket.put_object_from_file(key, 'tests/sample_data.csv.gz')
input_format = {'CsvHeaderInfo' : 'None',
'CommentCharacter' : '#',
'RecordDelimiter' : '\n',
'FieldDelimiter' : ',',
'QuoteCharacter' : '"',
'CompressionType' : 'GZIP'
}
output_file = 'tests/sample_data_out.csv'
self.bucket.select_object_to_file(key, output_file, "select * from ossobject", None, input_format)
f1 = _open('tests/sample_data.csv')
content1 = f1.read()
f1.close()
f2 = _open(output_file)
content2 = f2.read()
f2.close()
os.remove(output_file)
self.assertEqual(content1, content2)
def test_select_csv_object_none_range(self):
key = "city_sample_data.csv"
self.bucket.put_object_from_file(key, 'tests/sample_data.csv')
self.bucket.create_select_object_meta(key)
input_formats = [
{ 'SplitRange' : (None, None) },
{ 'LineRange' : (None, None) },
{'SplitRange' : None },
{'LineRange' : None }
]
for input_format in input_formats:
result = self.bucket.select_object(key, "select * from ossobject", None, input_format)
content = b''
for chunk in result:
content += chunk
self.assertTrue(len(content) > 0)
def test_with_select_result(self):
key = "city_sample_data.csv"
self.bucket.put_object_from_file(key, 'tests/sample_data.csv')
self.bucket.create_select_object_meta(key)
input_formats = [
{'SplitRange': (None, None)},
{'LineRange': (None, None)},
{'SplitRange': None},
{'LineRange': None}
]
for input_format in input_formats:
result1 = self.bucket.select_object(key, "select * from ossobject", None, input_format)
content1 = b''
for chunk in result1:
content1 += chunk
self.assertTrue(len(content1) > 0)
result2 = self.bucket.select_object(key, "select * from ossobject", None, input_format)
content2 = b''
with result2 as f:
for chunk in f:
content2 += chunk
self.assertEqual(content1, content2)
result3 = self.bucket.select_object(key, "select * from ossobject", None, input_format)
with result3 as f:
pass
content3 = result3.read()
self.assertEqual(0, len(content3))
def test_create_csv_object_meta_invalid_request(self):
key = "city_sample_data.csv"
self.bucket.put_object_from_file(key, 'tests/sample_data.csv')
format = {'CompressionType':'GZIP'}
try:
self.bucket.create_select_object_meta(key, format)
self.assertFalse(True, "expected error did not occur")
except oss2.exceptions.ServerError :
print("expected error occured")
def test_create_csv_object_meta_invalid_request2(self):
key = "city_sample_data.csv"
self.bucket.put_object_from_file(key, 'tests/sample_data.csv')
format = {'invalid_type':'value', 'CompressionType':'GZIP'}
try:
self.bucket.create_select_object_meta(key, format)
self.assertFalse(True, "expected error did not occur")
except SelectOperationClientError:
print("expected error occured")
def test_select_csv_object_with_output_delimiters(self):
key = "test_select_csv_object_with_output_delimiters"
content = "abc,def\n"
self.bucket.put_object(key, content.encode('utf_8'))
select_params = {'OutputRecordDelimiter':'\r\n', 'OutputFieldDelimiter':'|'}
result = self.bucket.select_object(key, "select _1, _2 from ossobject", None, select_params)
content = b''
for chunk in result:
content += chunk
self.assertEqual(content, 'abc|def\r\n'.encode('utf-8'))
def test_select_csv_object_with_crc(self):
key = "test_select_csv_object_with_crc"
content = "abc,def\n"
self.bucket.put_object(key, content.encode('utf_8'))
select_params = {'EnablePayloadCrc':True}
result = self.bucket.select_object(key, "select * from ossobject where true", None, select_params)
content = result.read()
self.assertEqual(content, 'abc,def\n'.encode('utf-8'))
def test_select_csv_object_with_skip_partial_data(self):
key = "test_select_csv_object_with_skip_partial_data"
content = "abc,def\nefg\n"
self.bucket.put_object(key, content.encode('utf_8'))
select_params = {'SkipPartialDataRecord':'true'}
result = self.bucket.select_object(key, "select _1, _2 from ossobject", None, select_params)
content = b''
try:
for chunk in result:
content += chunk
self.assertFalse("expected error did not occur")
except SelectOperationFailed:
print("expected error occurs")
self.assertEqual(content, 'abc,def\n'.encode('utf-8'))
def test_select_csv_object_with_max_partial_data(self):
key = "test_select_csv_object_with_skip_partial_data"
content = "abc,def\nefg\n"
self.bucket.put_object(key, content.encode('utf_8'))
select_params = {'SkipPartialDataRecord':'true', "MaxSkippedRecordsAllowed":100}
result = self.bucket.select_object(key, "select _1, _2 from ossobject", None, select_params)
content = b''
for chunk in result:
content += chunk
self.assertEqual(content, 'abc,def\n'.encode('utf-8'))
def test_select_csv_object_with_output_raw(self):
key = "test_select_csv_object_with_output_raw"
content = "abc,def\n"
self.bucket.put_object(key, content.encode('utf_8'))
select_params = {'OutputRawData':'true'}
result = self.bucket.select_object(key, "select _1 from ossobject", None, select_params)
content = b''
for chunk in result:
content += chunk
self.assertEqual(content, 'abc\n'.encode('utf-8'))
def test_select_csv_object_with_keep_columns(self):
key = "test_select_csv_object_with_keep_columns"
content = "abc,def\n"
self.bucket.put_object(key, content.encode('utf_8'))
select_params = {'KeepAllColumns':'true'}
result = self.bucket.select_object(key, "select _1 from ossobject", None, select_params)
content = b''
for chunk in result:
content += chunk
self.assertEqual(content, 'abc,\n'.encode('utf-8'))
def test_select_csv_object_with_output_header(self):
key = "test_select_csv_object_with_output_header"
content = "name,job\nabc,def\n"
self.bucket.put_object(key, content.encode('utf_8'))
select_params = {'OutputHeader':'true', 'CsvHeaderInfo':'Use'}
result = self.bucket.select_object(key, "select name from ossobject", None, select_params)
content = b''
for chunk in result:
content += chunk
self.assertEqual(content, 'name\nabc\n'.encode('utf-8'))
def test_select_csv_object_with_invalid_parameters(self):
key = "test_select_csv_object_with_invalid_parameters"
content = "name,job\nabc,def\n"
self.bucket.put_object(key, content.encode('utf_8'))
select_params = {'OutputHeader123':'true', 'CsvHeaderInfo':'Use'}
try:
result = self.bucket.select_object(key, "select name from ossobject", None, select_params)
self.assertFalse()
except SelectOperationClientError:
print("expected error")
def test_select_csv_object_with_bytes_range(self):
key = "test_select_csv_object_with_bytes_range"
content = "test\nabc\n"
self.bucket.put_object(key, content.encode('utf-8'))
select_params = {'AllowQuotedRecordDelimiter':False}
byte_range = [0,1]
result = self.bucket.select_object(key, "select * from ossobject", None, select_params, byte_range)
content = b''
for chunk in result:
content += chunk
self.assertEqual('test\n'.encode('utf-8'), content)
def test_select_csv_object_with_bytes_range_invalid(self):
key = "test_select_csv_object_with_bytes_range"
content = "test\nabc\n"
self.bucket.put_object(key, content.encode('utf-8'))
byte_range = [0,1]
try:
self.bucket.select_object(key, "select * from ossobject", None, None, byte_range)
self.assertFalse()
except ClientError:
print("expected error")
select_params = {'AllowQuotedRecordDelimiter':True}
try:
self.bucket.select_object(key, "select * from ossobject", None, select_params, byte_range)
self.assertFalse()
except ClientError:
print("expected error")
select_params = {'AllowQuotedRecordDelimiter':False, 'Json_Type':'LINES'}
try:
self.bucket.select_object(key, "select * from ossobject", None, select_params, byte_range)
self.assertFalse()
except SelectOperationClientError:
print("expected error")
if __name__ == '__main__':
unittest.main()
|
e152fe2e87e822672513d8e321a4c7f5b1de372f
|
03a7f7a7eb8c16b537b65ec21f465bb0335bc3b8
|
/pythran/tests/cases/matrix_class_distance.py
|
8460123345688632332097782370d6d4dd2a11b8
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
serge-sans-paille/pythran
|
a0e22af1ac5e1f34f3f29dce36502f4a897b5186
|
d8ab07b4b3b690f50603cb4d08ba303d3af18b90
|
refs/heads/master
| 2023-09-01T16:04:03.289285
| 2023-08-30T09:13:58
| 2023-08-31T08:03:22
| 4,479,494
| 1,882
| 200
|
BSD-3-Clause
| 2023-09-06T20:08:10
| 2012-05-29T08:02:14
|
C++
|
UTF-8
|
Python
| false
| false
| 637
|
py
|
matrix_class_distance.py
|
#pythran export matrix_class_distance(float64[:,:], int[], float64[:,:], int)
#from https://stackoverflow.com/questions/59601987
#runas import numpy as np; n = 200;d = 10;iterations = 20;np.random.seed(42);dat = np.random.random((n, d));dat_filter = np.random.randint(0, n, size=n); dat_points = np.random.random((n, d)); matrix_class_distance(dat, dat_filter, dat_points, iterations)
import numpy as np
def matrix_class_distance(dat, dat_filter, dat_points, iterations):
aggregation = 0
for i in range(iterations):
aggregation += np.sum(np.linalg.norm(dat[dat_filter==i] - dat_points[i], axis=1))
return aggregation
|
fe93df2f95062839f19e4380685268f6f87c05f2
|
6bb45c5892b4c9692dcc44116fb73dc9e7ab90ff
|
/inference/generativeai/llm-workshop/lab2-stable-diffusion/option3-triton-mme/models/sd_depth/1/model.py
|
7cbed281d0d542c7353cc35babac2f7b2d1937cd
|
[
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
aws/amazon-sagemaker-examples
|
8359afe544e873662bda5b8d2b07399c437213c9
|
43dae4b28531cde167598f104f582168b0a4141f
|
refs/heads/main
| 2023-08-26T04:42:52.342776
| 2023-08-25T14:37:19
| 2023-08-25T14:37:19
| 107,937,815
| 4,797
| 3,519
|
Apache-2.0
| 2023-09-14T19:47:03
| 2017-10-23T05:55:22
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,499
|
py
|
model.py
|
import json
import numpy as np
import torch
import triton_python_backend_utils as pb_utils
from diffusers import StableDiffusionDepth2ImgPipeline
from diffusers import DDIMScheduler
from io import BytesIO
import base64
from PIL import Image
def decode_image(img):
buff = BytesIO(base64.b64decode(img.encode("utf8")))
image = Image.open(buff)
return image
def encode_images(images):
encoded_images = []
for image in images:
buffer = BytesIO()
image.save(buffer, format="JPEG")
img_str = base64.b64encode(buffer.getvalue())
encoded_images.append(img_str.decode("utf8"))
return encoded_images
class TritonPythonModel:
def initialize(self, args):
self.model_dir = args['model_repository']
self.model_ver = args['model_version']
device='cuda'
self.pipe = StableDiffusionDepth2ImgPipeline.from_pretrained(f'{self.model_dir}/{self.model_ver}/checkpoint',
torch_dtype=torch.float16).to(device)
self.pipe.scheduler = DDIMScheduler.from_config(self.pipe.scheduler.config)
self.pipe.unet.enable_xformers_memory_efficient_attention()
def execute(self, requests):
logger = pb_utils.Logger
responses = []
for request in requests:
prompt = pb_utils.get_input_tensor_by_name(request, "prompt").as_numpy().item().decode("utf-8")
negative_prompt = pb_utils.get_input_tensor_by_name(request, "negative_prompt")
image = pb_utils.get_input_tensor_by_name(request, "image").as_numpy().item().decode("utf-8")
gen_args = pb_utils.get_input_tensor_by_name(request, "gen_args")
image=decode_image(image)
input_args = dict(prompt=prompt, image=image)
if negative_prompt:
input_args["negative_prompt"] = negative_prompt.as_numpy().item().decode("utf-8")
if gen_args:
gen_args = json.loads(gen_args.as_numpy().item().decode("utf-8"))
input_args.update(gen_args)
images = self.pipe(**input_args).images
encoded_images = encode_images(images)
responses.append(pb_utils.InferenceResponse([pb_utils.Tensor("generated_image", np.array(encoded_images).astype(object))]))
return responses
|
707c5f37070ab351b7fdeee68ccc92d7a3192bb1
|
e1cddfd754d952134e72dfd03522c5ea4fb6008e
|
/extras/deprecated/perfmon/intel_json_to_c.py
|
4389c86fc38715f56225e8fb746a96b6e882409d
|
[
"Apache-2.0"
] |
permissive
|
FDio/vpp
|
0ad30fa1bec2975ffa6b66b45c9f4f32163123b6
|
f234b0d4626d7e686422cc9dfd25958584f4931e
|
refs/heads/master
| 2023-08-31T16:09:04.068646
| 2022-03-14T09:49:15
| 2023-08-31T09:50:00
| 96,556,718
| 1,048
| 630
|
Apache-2.0
| 2023-06-21T05:39:17
| 2017-07-07T16:29:40
|
C
|
UTF-8
|
Python
| false
| false
| 1,576
|
py
|
intel_json_to_c.py
|
#!/usr/bin/env python3
import json, argparse
p = argparse.ArgumentParser()
p.add_argument(
"-i", "--input", action="store", help="input JSON file name", required=True
)
p.add_argument(
"-o", "--output", action="store", help="output C file name", required=True
)
p.add_argument(
"-m",
"--model",
action="append",
help="CPU model in format: model[,stepping0]",
required=True,
)
r = p.parse_args()
with open(r.input, "r") as fp:
objects = json.load(fp)
c = open(r.output, "w")
c.write(
"""
#include <perfmon/perfmon_intel.h>
static perfmon_intel_pmc_cpu_model_t cpu_model_table[] = {
"""
)
for v in r.model:
if "," in v:
(m, s) = v.split(",")
m = int(m, 0)
s = int(s, 0)
c.write(" {}0x{:02X}, 0x{:02X}, 1{},\n".format("{", m, s, "}"))
else:
m = int(v, 0)
c.write(" {}0x{:02X}, 0x00, 0{},\n".format("{", m, "}"))
c.write(
"""
};
static perfmon_intel_pmc_event_t event_table[] = {
"""
)
for obj in objects:
MSRIndex = obj["MSRIndex"]
if MSRIndex != "0":
continue
EventCode = obj["EventCode"]
UMask = obj["UMask"]
EventName = obj["EventName"].lower()
if "," in EventCode:
continue
c.write(" {\n")
c.write(" .event_code = {}{}{},\n".format("{", EventCode, "}"))
c.write(" .umask = {},\n".format(UMask))
c.write(' .event_name = "{}",\n'.format(EventName))
c.write(" },\n")
c.write(
""" {
.event_name = 0,
},
};
PERFMON_REGISTER_INTEL_PMC (cpu_model_table, event_table);
"""
)
c.close()
|
e8743925d1c1e409a85bde6ae5fc34b94d3c1af8
|
896567f9ccb99b3d545c9499745fd24a673f1752
|
/tests/test_optimizealpha.py
|
32901ca69231c9536e5d5c4844ef2ef5122aa8a1
|
[
"MIT"
] |
permissive
|
bellockk/alphashape
|
372e3712c82af71c73e55c7aa3e88653caeab51a
|
671476ecfdf8c0461613d51b2fd9376d4107a03b
|
refs/heads/master
| 2023-07-23T23:57:08.191296
| 2023-04-01T21:44:47
| 2023-04-01T21:44:47
| 183,085,167
| 184
| 30
|
MIT
| 2023-07-06T22:14:17
| 2019-04-23T19:45:03
|
Python
|
UTF-8
|
Python
| false
| false
| 710
|
py
|
test_optimizealpha.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `alphashape` package."""
import unittest
from alphashape import optimizealpha
class TestOptimizeAlapha(unittest.TestCase):
"""Tests for `alphashape` package."""
def setUp(self):
"""Set up test fixtures, if any."""
def tearDown(self):
"""Tear down test fixtures, if any."""
def test_given_a_point_return_a_point(self):
"""
Given a point, the alphashape function should return the same point
"""
alpha = optimizealpha(
[(0., 0.), (0., 1.), (1., 1.), (1., 0.),
(0.5, 0.25), (0.5, 0.75), (0.25, 0.5), (0.75, 0.5)])
assert alpha > 3. and alpha < 3.5
|
eb90663ecb8eafb9c353981de75b1139df7b53d1
|
3daf74bdadb46f4aa18918f1b6938c714b331723
|
/poco/drivers/unity3d/test/tutorial/wait_any_ui.py
|
a1eeb4efa1939ceaed179b47d3e16c9e858ab20e
|
[
"Apache-2.0"
] |
permissive
|
AirtestProject/Poco
|
d173b465edefbae72f02bb11d60edfa5af8d4ec4
|
65c2c5be0c0c1de680eedf34ac18ae065c5408ee
|
refs/heads/master
| 2023-08-15T23:00:11.805669
| 2023-03-29T08:58:41
| 2023-03-29T08:58:41
| 118,706,014
| 1,703
| 312
|
Apache-2.0
| 2023-08-08T10:30:21
| 2018-01-24T03:24:01
|
Python
|
UTF-8
|
Python
| false
| false
| 775
|
py
|
wait_any_ui.py
|
# coding=utf-8
import time
from poco.drivers.unity3d.test.tutorial.case import TutorialCase
class WaitAnyUITutorial(TutorialCase):
def runTest(self):
self.poco(text='wait UI').click()
bomb_count = 0
while True:
blue_fish = self.poco('fish_emitter').child('blue')
yellow_fish = self.poco('fish_emitter').child('yellow')
bomb = self.poco('fish_emitter').child('bomb')
fish = self.poco.wait_for_any([blue_fish, yellow_fish, bomb])
if fish is bomb:
bomb_count += 1
if bomb_count > 3:
return
else:
fish.click()
time.sleep(2.5)
if __name__ == '__main__':
import pocounit
pocounit.main()
|
ba50bd85a9301d36b0a96a7102c9b07d83f8c265
|
4db7f072a2283f67ab98d6971c1d8d51943de100
|
/LoanBroker/AwsStepFunctions/PubSub/GetAggregate.py
|
4d47a9da1fa9eb1ad5a903aa60aed9768c48e25e
|
[
"MIT"
] |
permissive
|
spac3lord/eip
|
5ec381bb33735f1022e2d7d52cbe569d25f712f9
|
09d1e9371fb13fae4684506c778e6ca6b196c5d0
|
refs/heads/master
| 2023-03-18T10:24:43.374748
| 2023-03-11T17:01:02
| 2023-03-11T17:01:02
| 77,563,775
| 127
| 26
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 479
|
py
|
GetAggregate.py
|
import boto3
import json
import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
dynamo = boto3.resource('dynamodb')
def lambda_handler(event, context):
logger.info(event)
key = event['Id']
table = dynamo.Table('MortgageQuotes')
record = table.get_item(Key={'Id': key }, ConsistentRead=True)
if 'Item' in record:
return {'Quotes' : record['Item']['Quotes'] }
else:
logger.info("No aggregate for key %s" % key)
return {'Quotes' : [] }
|
86bf480a1646ee5ee8386cb2307da4042bc3bdc0
|
0b98732dcd3dd94a97555a8f3e8dd3524bb8ec86
|
/mmdet/datasets/__init__.py
|
4ed5439a0dcf70553d97c8ca677744c8d470bb32
|
[
"Apache-2.0"
] |
permissive
|
hasanirtiza/Pedestron
|
e89fea2ec676f150a7266f6b65963dd6c4ec35c9
|
8ab23ec38982cfaf0ae82c77c30f10b2fff62d12
|
refs/heads/master
| 2023-08-06T02:53:06.368937
| 2023-04-06T13:46:27
| 2023-04-06T13:46:27
| 247,410,025
| 723
| 161
|
Apache-2.0
| 2022-10-02T10:17:44
| 2020-03-15T05:52:52
|
Python
|
UTF-8
|
Python
| false
| false
| 829
|
py
|
__init__.py
|
from .custom import CustomDataset
from .xml_style import XMLDataset
from .coco import CocoDataset
from .voc import VOCDataset
from .wider_face import WIDERFaceDataset
from .loader import GroupSampler, DistributedGroupSampler, build_dataloader
from .utils import to_tensor, random_scale, show_ann
from .dataset_wrappers import ConcatDataset, RepeatDataset
from .extra_aug import ExtraAugmentation
from .registry import DATASETS
from .builder import build_dataset
from .coco_csp_ori import CocoCSPORIDataset
__all__ = [
'CustomDataset', 'XMLDataset', 'CocoDataset',
'VOCDataset', 'GroupSampler',
'DistributedGroupSampler', 'build_dataloader', 'to_tensor', 'random_scale',
'show_ann', 'ConcatDataset', 'RepeatDataset', 'ExtraAugmentation',
'WIDERFaceDataset', 'DATASETS', 'build_dataset', 'CocoCSPORIDataset'
]
|
35e2d14e1e2a24a04cf12a6f4188c7a06e190bea
|
c9c68211f8d76470a14a7b40b28e7997b2e34d68
|
/renpy/text/extras.py
|
90b53fb12e10f71628228f416a317972474b85bd
|
[] |
no_license
|
renpy/renpy
|
16e0526b76714ba1fc08c05bdd7417db5f2eeb8c
|
517e12d3e5812b1849fb803f35727124d9cb75f1
|
refs/heads/master
| 2023-08-31T10:04:53.943394
| 2023-08-31T08:30:38
| 2023-08-31T08:30:38
| 4,815,490
| 4,094
| 940
| null | 2023-09-14T01:10:38
| 2012-06-28T01:21:22
|
Ren'Py
|
UTF-8
|
Python
| false
| false
| 13,925
|
py
|
extras.py
|
# Copyright 2004-2023 Tom Rothamel <pytom@bishoujo.us>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# Other text-related things.
from __future__ import division, absolute_import, with_statement, print_function, unicode_literals
from renpy.compat import PY2, basestring, bchr, bord, chr, open, pystr, range, round, str, tobytes, unicode # *
import renpy
from renpy.text.textsupport import DISPLAYABLE, PARAGRAPH, TAG
import renpy.text.textsupport as textsupport
# A list of text tags, mapping from the text tag prefix to if it
# requires a closing tag.
text_tags = dict(
alpha=True,
alt=True,
art=True,
done=False,
image=False,
p=False,
w=False,
fast=False,
b=True,
i=True,
u=True,
a=True,
plain=True,
font=True,
color=True,
outlinecolor=True,
size=True,
noalt=True,
nw=False,
s=True,
rt=True,
rb=True,
k=True,
cps=True,
space=False,
vspace=False
)
text_tags[""] = True
# This checks the text tags in a string to be sure they are all matched, and
# properly nested. It returns an error message, or None if the line is okay.
def check_text_tags(s):
"""
:doc: lint
Checks the text tags in s for correctness. Returns an error string if there is
an error, or None if there is no error.
"""
all_tags = dict(text_tags)
custom_tags = renpy.config.custom_text_tags
if custom_tags:
all_tags.update(custom_tags)
self_closing_custom_tags = renpy.config.self_closing_custom_text_tags
if self_closing_custom_tags:
all_tags.update(dict.fromkeys(self_closing_custom_tags, False))
try:
tokens = textsupport.tokenize(str(s))
except Exception as e:
return e.args[0]
tag_stack = [ ]
for type, text in tokens: # @ReservedAssignment
if type != TAG:
continue
if text[0] == "#":
continue
# Strip off arguments for tags.
if text.find('=') != -1:
text = text[:text.find('=')]
# Closing tag.
if text and text[0] == '/':
if not tag_stack:
return "Close text tag '{%s}' does not match an open text tag." % text
if tag_stack[-1] != text[1:]:
return "Close text tag '{%s}' does not match open text tag '{%s}'." % (text, tag_stack[-1])
tag_stack.pop()
continue
if text not in all_tags:
return "Text tag '%s' is not known." % text
if all_tags[text]:
tag_stack.append(text)
return None
def filter_text_tags(s, allow=None, deny=None):
"""
:doc: text_utility
Returns a copy of `s` with the text tags filtered. Exactly one of the `allow` and `deny` keyword
arguments must be given.
`allow`
A set of tags that are allowed. If a tag is not in this list, it is removed.
`deny`
A set of tags that are denied. If a tag is not in this list, it is kept in the string.
"""
if (allow is None) and (deny is None):
raise Exception("Only one of the allow and deny keyword arguments should be given to filter_text_tags.")
if (allow is not None) and (deny is not None):
raise Exception("Only one of the allow and deny keyword arguments should be given to filter_text_tags.")
tokens = textsupport.tokenize(str(s))
rv = [ ]
for tokentype, text in tokens:
if tokentype == PARAGRAPH:
rv.append("\n")
elif tokentype == TAG:
kind = text.partition("=")[0]
if kind and (kind[0] == "/"):
kind = kind[1:]
if allow is not None:
if kind in allow:
rv.append("{" + text + "}")
else:
if kind not in deny:
rv.append("{" + text + "}")
else:
rv.append(text.replace("{", "{{"))
return "".join(rv)
def filter_alt_text(s):
"""
Returns a copy of `s` with the contents of text tags that shouldn't be in
alt text filtered. This returns just the text to say, with no text tags
at all in it.
"""
tokens = textsupport.tokenize(str(s))
if renpy.config.custom_text_tags or renpy.config.self_closing_custom_text_tags or (renpy.config.replace_text is not None):
tokens = renpy.text.text.Text.apply_custom_tags(tokens)
rv = [ ]
active = set()
for tokentype, text in tokens:
if tokentype == PARAGRAPH:
rv.append("\n")
elif tokentype == TAG:
kind = text.partition("=")[0]
if kind.startswith("/"):
kind = kind[1:]
end = True
else:
end = False
if kind in renpy.config.tts_filter_tags:
if end:
active.discard(kind)
else:
active.add(kind)
elif tokentype == DISPLAYABLE:
rv.append(text._tts())
else:
if not active:
rv.append(text)
return "".join(rv)
class ParameterizedText(object):
"""
:name: ParameterizedText
:doc: text
This is a displayable that can be shown with an additional string
parameter, which then shows that string as if it was an image.
This is usually used as part of the pre-defined ``text`` image.
For example, one can do::
show text "Hello, World" at truecenter
with dissolve
pause 1
hide text
with dissolve
You can use ParameterizedText directly to define similar images with
different style properties. For example, one can write::
image top_text = ParameterizedText(xalign=0.5, yalign=0.0)
label start:
show top_text "This text is shown at the center-top of the screen"
"""
def __init__(self, style='default', **properties):
self.style = style
self.properties = properties
_duplicatable = True
def _duplicate(self, args):
if args.lint:
return renpy.text.text.Text("", style=self.style, **self.properties)
if len(args.args) == 0:
raise Exception("'%s' takes a single string parameter." % ' '.join(args.name))
param = "".join(args.args)
string = renpy.python.py_eval(param)
return renpy.text.text.Text(string, style=self.style, **self.properties)
def textwrap(s, width=78, asian=False):
"""
Wraps the unicode string `s`, and returns a list of strings.
`width`
The number of half-width characters that fit on a line.
`asian`
True if we should make ambiguous width characters full-width, as is
done in Asian encodings.
"""
import unicodedata
glyphs = [ ]
for c in str(s):
eaw = unicodedata.east_asian_width(c)
if (eaw == "F") or (eaw == "W"):
gwidth = 20
elif (eaw == "A"):
if asian:
gwidth = 20
else:
gwidth = 10
else:
gwidth = 10
g = textsupport.Glyph()
g.character = ord(c)
g.ascent = 10
g.line_spacing = 10
g.width = gwidth
g.advance = gwidth
glyphs.append(g)
textsupport.annotate_unicode(glyphs, False, 2)
renpy.text.texwrap.linebreak_tex(glyphs, width * 10, width * 10, False)
return textsupport.linebreak_list(glyphs)
def thaic90(s):
"""
Reencodes `s` to the Thai C90 encoding, which is used by Thai-specific
fonts to combine base characters, upper vowels, lower vowls, and tone marks
into singe precomposed characters in thje unicode private use area.
"""
# Copyright (c) 2021 SahabandhSthabara, Saamkhaih Kyakya
# MIT License.
# Taken from https://gitlab.com/sahabandha/renpy-thai-font-adjuster/-/blob/main/renpythaic90.py
# http://www.bakoma-tex.com/doc/fonts/enc/c90/c90.pdf
# ========== EXTENDED CHARACTER TABLE ==========
# F700: uni0E10.descless (base.descless)
# F701~04: uni0E34~37.left (upper.left)
# F705~09: uni0E48~4C.lowleft (top.lowleft)
# F70A~0E: uni0E48~4C.low (top.low)
# F70F: uni0E0D.descless (base.descless)
# F710~12: uni0E31,4D,47.left (upper.left)
# F713~17: uni0E48~4C.left (top.left)
# F718~1A: uni0E38~3A.low (lower.low)
# ==============================================
def isBase(c):
return (u'\u0E01' <= c <= u'\u0E30') or c == u"\u0E30" or c == u"\u0E40" or c == u"\u0E41"
def isBaseAsc(c):
return c == u'\u0E1B' or c == u'\u0E1D' or c == u'\u0E1F' or c == u'\u0E2C'
def isBaseDesc(c):
return c == u'\u0E0E' or c == u'\u0E0F'
def isTop(c):
# Tone Mark, THANTHAKHAT
if u"\u0E48" <= c <= u"\u0E4C":
return True
def isLower(c):
#SARA U, SARA UU, PHINTHU
return c >= u"\u0E38" and c <= u"\u0E3A"
def isUpper(c):
return c == u'\u0E31' or c == u'\u0E34' or c == u'\u0E35' or c == u'\u0E36' or c == u'\u0E37' or c == u'\u0E47' or c == u'\u0E4D'
rv = [ ]
# [sara am] -> [nikhahit] [sara aa]
s = s.replace(u"\u0E33", u"\u0E4D\u0E32")
s = s.replace(u"\u0E48\u0E4D", u"\u0E4D\u0E48")
s = s.replace(u"\u0E49\u0E4D", u"\u0E4D\u0E49")
s = s.replace(u"\u0E4A\u0E4D", u"\u0E4D\u0E4A")
s = s.replace(u"\u0E4B\u0E4D", u"\u0E4D\u0E4B")
s = s.replace(u"\u0E4C\u0E4D", u"\u0E4D\u0E4C")
length = len(s)
for z in range(length):
c = s[z]
# [base] ~ [top]
if isTop(c) and z > 0:
# [base] [top] -> [base] [top.low]
# [base] [lower] [top] -> [base] [lower] [top.low]
# [base.asc] [top] -> [base.asc] [top.lowleft]
# [base.asc] [lower] [top] -> [base.asc] [lower] [top.lowleft]
b = s[z - 1];
if isLower(b) and z > 0:
b = s[z -2]
if isBase(b):
Nikhahit = (z < length - 1 and (s[z + 1] == u'\u0E33' or s[z + 1] == u'\u0E4D'))
if isBaseAsc(b):
if Nikhahit:
# [base.asc] [nikhahit] [top] -> [base.asc] [nikhahit] [top.left]
choices = {
u'\u0E48': u'\uF713',
u'\u0E49': u'\uF714',
u'\u0E4A': u'\uF715',
u'\u0E4B': u'\uF716',
u'\u0E4C': u'\uF717'
}
c = choices.get(c, 'error')
else:
choices = {
u'\u0E48': u'\uF705',
u'\u0E49': u'\uF706',
u'\u0E4A': u'\uF707',
u'\u0E4B': u'\uF708',
u'\u0E4C': u'\uF709'
}
c = choices.get(c, 'error')
else:
if Nikhahit == False:
choices = {
u'\u0E48': u'\uF70A',
u'\u0E49': u'\uF70B',
u'\u0E4A': u'\uF70C',
u'\u0E4B': u'\uF70D',
u'\u0E4C': u'\uF70E'
}
c = choices.get(c, 'error')
# [base.asc] [upper] [top] -> [base.asc] [upper] [top.left]
if (z > 1 and isUpper(s[z -1]) and isBaseAsc(s[z - 2])):
choices = {
u'\u0E48': u'\uF713',
u'\u0E49': u'\uF714',
u'\u0E4A': u'\uF715',
u'\u0E4B': u'\uF716',
u'\u0E4C': u'\uF717'
}
c = choices.get(c, 'error')
# [base.asc] [upper] -> [base.asc] [upper-left]
elif (isUpper(c)and z > 0 and isBaseAsc(s[z -1])):
choices = {
u'\u0E31': u'\uF710',
u'\u0E34': u'\uF701',
u'\u0E35': u'\uF702',
u'\u0E36': u'\uF703',
u'\u0E37': u'\uF704',
u'\u0E4D': u'\uF711',
u'\u0E47': u'\uF712'
}
c = choices.get(c, 'error')
elif (isLower(c) and z > 0 and isBaseDesc(s[z -1])):
choices = {
u'\u0E38': u'\uF718',
u'\u0E39': u'\uF719',
u'\u0E3A': u'\uF71A'
}
c = choices.get(c, 'error')
elif (c == u'\u0E0D' and z < length -1 and isLower(s[z + 1])):
c = u'\uF70F'
elif (c == u'\u0E10' and z < length -1 and isLower(s[z + 1])):
c = u'\uF700'
else:
c = s[z]
rv.append(c)
return u''.join(rv)
|
a51ac27ac078cd49f3939c219f3b5c1293e18c66
|
10ddfb2d43a8ec5d47ce35dc0b8acf4fd58dea94
|
/Python/recover-a-tree-from-preorder-traversal.py
|
b7377bed56a790da73d6056b9002542f5c6e008a
|
[
"MIT"
] |
permissive
|
kamyu104/LeetCode-Solutions
|
f54822059405ef4df737d2e9898b024f051fd525
|
4dc4e6642dc92f1983c13564cc0fd99917cab358
|
refs/heads/master
| 2023-09-02T13:48:26.830566
| 2023-08-28T10:11:12
| 2023-08-28T10:11:12
| 152,631,182
| 4,549
| 1,651
|
MIT
| 2023-05-31T06:10:33
| 2018-10-11T17:38:35
|
C++
|
UTF-8
|
Python
| false
| false
| 1,742
|
py
|
recover-a-tree-from-preorder-traversal.py
|
# Time: O(n)
# Space: O(h)
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
# iterative stack solution
class Solution(object):
def recoverFromPreorder(self, S):
"""
:type S: str
:rtype: TreeNode
"""
i = 0
stack = []
while i < len(S):
level = 0
while i < len(S) and S[i] == '-':
level += 1
i += 1
while len(stack) > level:
stack.pop()
val = []
while i < len(S) and S[i] != '-':
val.append(S[i])
i += 1
node = TreeNode(int("".join(val)))
if stack:
if stack[-1].left is None:
stack[-1].left = node
else:
stack[-1].right = node
stack.append(node)
return stack[0]
# Time: O(n)
# Space: O(h)
# recursive solution
class Solution2(object):
def recoverFromPreorder(self, S):
"""
:type S: str
:rtype: TreeNode
"""
def recoverFromPreorderHelper(S, level, i):
j = i[0]
while j < len(S) and S[j] == '-':
j += 1
if level != j - i[0]:
return None
i[0] = j
while j < len(S) and S[j] != '-':
j += 1
node = TreeNode(int(S[i[0]:j]))
i[0] = j
node.left = recoverFromPreorderHelper(S, level+1, i)
node.right = recoverFromPreorderHelper(S, level+1, i)
return node
return recoverFromPreorderHelper(S, 0, [0])
|
b73e0e2290ba575d6c3a395cb8ce715694f382c1
|
f26d8a6b2a0dc9b29f944f15577586bfe3bb9b60
|
/cvnn/losses.py
|
ac03df93b9206d3328a53afd4459692d8763cfa0
|
[
"MIT"
] |
permissive
|
NEGU93/cvnn
|
4606ea6bc84621231781c61603dd7c9855a6ce64
|
c8c9b3cc696388c8ec6e7f23092e223e0fe88de4
|
refs/heads/master
| 2023-08-04T12:40:22.288311
| 2023-07-31T09:47:07
| 2023-07-31T09:47:07
| 296,050,056
| 109
| 24
|
MIT
| 2023-08-10T16:53:08
| 2020-09-16T14:02:08
|
Python
|
UTF-8
|
Python
| false
| false
| 3,061
|
py
|
losses.py
|
import tensorflow as tf
from tensorflow.keras import backend
from tensorflow.keras.losses import Loss, categorical_crossentropy
class ComplexAverageCrossEntropy(Loss):
def call(self, y_true, y_pred):
real_loss = categorical_crossentropy(y_true, tf.math.real(y_pred))
if y_pred.dtype.is_complex:
imag_loss = categorical_crossentropy(y_true, tf.math.imag(y_pred))
else:
imag_loss = real_loss
return (real_loss + imag_loss) / 2.
class ComplexAverageCrossEntropyIgnoreUnlabeled(ComplexAverageCrossEntropy):
def call(self, y_true, y_pred):
mask = tf.reduce_any(tf.cast(y_true, tf.bool), axis=-1)
y_true = tf.boolean_mask(y_true, mask)
y_pred = tf.boolean_mask(y_pred, mask)
return super(ComplexAverageCrossEntropyIgnoreUnlabeled, self).call(y_true, y_pred)
class ComplexMeanSquareError(Loss):
def call(self, y_true, y_pred):
y_pred = tf.convert_to_tensor(y_pred)
y_true = tf.convert_to_tensor(y_true)
if y_pred.dtype.is_complex and not y_true.dtype.is_complex: # Complex pred but real true
y_true = tf.complex(y_true, y_true)
y_true = tf.cast(y_true, y_pred.dtype)
return tf.cast(backend.mean(tf.math.square(tf.math.abs(y_true - y_pred)), axis=-1),
dtype=y_pred.dtype.real_dtype)
class ComplexWeightedAverageCrossEntropy(ComplexAverageCrossEntropy):
def __init__(self, weights, **kwargs):
self.class_weights = weights
super(ComplexWeightedAverageCrossEntropy, self).__init__(**kwargs)
def call(self, y_true, y_pred):
# https://stackoverflow.com/questions/44560549/unbalanced-data-and-weighted-cross-entropy
weights = tf.reduce_sum(self.class_weights * y_true, axis=-1)
unweighted_losses = super(ComplexWeightedAverageCrossEntropy, self).call(y_true, y_pred)
weighted_losses = unweighted_losses * tf.cast(weights, dtype=unweighted_losses.dtype)
return weighted_losses
class ComplexWeightedAverageCrossEntropyIgnoreUnlabeled(ComplexAverageCrossEntropy):
def __init__(self, weights, **kwargs):
self.class_weights = weights
super(ComplexWeightedAverageCrossEntropyIgnoreUnlabeled, self).__init__(**kwargs)
def call(self, y_true, y_pred):
mask = tf.reduce_any(tf.cast(y_true, tf.bool), axis=-1)
y_true = tf.boolean_mask(y_true, mask)
y_pred = tf.boolean_mask(y_pred, mask)
return super(ComplexWeightedAverageCrossEntropyIgnoreUnlabeled, self).call(y_true, y_pred)
if __name__ == "__main__":
import numpy as np
y_true = np.random.randint(0, 2, size=(2, 3)).astype("float32")
y_pred = tf.complex(np.random.random(size=(2, 3)).astype("float32"),
np.random.random(size=(2, 3)).astype("float32"))
loss = ComplexMeanSquareError().call(y_true, y_pred)
expected_loss = np.mean(np.square(np.abs(tf.complex(y_true, y_true) - y_pred)), axis=-1)
# import pdb; pdb.set_trace()
assert np.all(loss == expected_loss)
|
b25de04eecbdd2f9960d592c745f05a09270dbc1
|
2ed368cf8a045494759fa4f347564cb383e71a66
|
/setup.py
|
833e5de170a4d3c180dbbf4a38ce6693cecc7870
|
[
"MIT"
] |
permissive
|
better/convoys
|
5cabf13db64059de40b2c3c7bb3f10d53faa7e5b
|
c37536b582547408ad9276b99bc5622f456a69ba
|
refs/heads/master
| 2023-04-16T01:56:33.754660
| 2021-10-26T20:52:18
| 2021-10-26T20:52:18
| 114,835,637
| 249
| 43
|
MIT
| 2023-04-07T10:24:54
| 2017-12-20T02:53:00
|
Python
|
UTF-8
|
Python
| false
| false
| 959
|
py
|
setup.py
|
from setuptools import setup, find_packages
long_description = '''
For more information, see
`the package documentation <https://better.engineering/convoys>`_
or
`the Github project page <https://github.com/better/convoys>`_.
.. image:: https://better.engineering/convoys/_images/dob-violations-combined.png
'''
setup(name='convoys',
version='0.2.1',
description='Fit machine learning models to predict conversion using Weibull and Gamma distributions',
long_description=long_description,
url='https://better.engineering/convoys',
license='MIT',
author='Erik Bernhardsson',
author_email='erikbern@better.com',
packages=find_packages(),
install_requires=[
'autograd',
'autograd-gamma>=0.2.0',
'deprecated',
'emcee>=3.0.0',
'matplotlib>=2.0.0',
'pandas>=0.24.0',
'progressbar2>=3.46.1',
'numpy',
'scipy',
])
|
c2215d0b5a1db2bde6c8280184cad325651ef1ca
|
bb33e6be8316f35decbb2b81badf2b6dcf7df515
|
/source/res/scripts/client/gui/impl/gen/view_models/views/lobby/common/tooltips/selected_rewards_tooltip_category_model.py
|
51ba9eb957cad50be8d7ea26ed3f050aab96af28
|
[] |
no_license
|
StranikS-Scan/WorldOfTanks-Decompiled
|
999c9567de38c32c760ab72c21c00ea7bc20990c
|
d2fe9c195825ececc728e87a02983908b7ea9199
|
refs/heads/1.18
| 2023-08-25T17:39:27.718097
| 2022-09-22T06:49:44
| 2022-09-22T06:49:44
| 148,696,315
| 103
| 39
| null | 2022-09-14T17:50:03
| 2018-09-13T20:49:11
|
Python
|
UTF-8
|
Python
| false
| false
| 1,164
|
py
|
selected_rewards_tooltip_category_model.py
|
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/impl/gen/view_models/views/lobby/common/tooltips/selected_rewards_tooltip_category_model.py
from frameworks.wulf import Array
from frameworks.wulf import ViewModel
from gui.impl.gen.view_models.views.lobby.common.tooltips.selected_rewards_tooltip_reward_model import SelectedRewardsTooltipRewardModel
class SelectedRewardsTooltipCategoryModel(ViewModel):
__slots__ = ()
def __init__(self, properties=2, commands=0):
super(SelectedRewardsTooltipCategoryModel, self).__init__(properties=properties, commands=commands)
def getType(self):
return self._getString(0)
def setType(self, value):
self._setString(0, value)
def getRewards(self):
return self._getArray(1)
def setRewards(self, value):
self._setArray(1, value)
@staticmethod
def getRewardsType():
return SelectedRewardsTooltipRewardModel
def _initialize(self):
super(SelectedRewardsTooltipCategoryModel, self)._initialize()
self._addStringProperty('type', '')
self._addArrayProperty('rewards', Array())
|
af9de77b7a43ee6382d8f81bd7adf9d8c42f192a
|
8de1480d6511ac81c43ebb1fa50875adb1505c3b
|
/awxkit/awxkit/api/pages/teams.py
|
f5614a0c25f4cc1d0239e423c13b03ecd797ace2
|
[
"Apache-2.0"
] |
permissive
|
ansible/awx
|
bbbb0f3f43835a37fbb3eb3dcd7cfe98116fbbba
|
5e105c2cbd3fe828160540b3043cf6f605ed26be
|
refs/heads/devel
| 2023-08-31T11:45:01.446444
| 2023-08-31T04:58:57
| 2023-08-31T04:58:57
| 91,594,105
| 13,353
| 4,186
|
NOASSERTION
| 2023-09-14T20:20:07
| 2017-05-17T15:50:14
|
Python
|
UTF-8
|
Python
| false
| false
| 1,755
|
py
|
teams.py
|
from contextlib import suppress
from awxkit.api.mixins import HasCreate, DSAdapter
from awxkit.utils import random_title, PseudoNamespace
from awxkit.api.resources import resources
from awxkit.api.pages import Organization
from awxkit.exceptions import NoContent
from . import base
from . import page
class Team(HasCreate, base.Base):
dependencies = [Organization]
NATURAL_KEY = ('organization', 'name')
def add_user(self, user):
if isinstance(user, page.Page):
user = user.json
with suppress(NoContent):
self.related.users.post(user)
def payload(self, organization, **kwargs):
payload = PseudoNamespace(
name=kwargs.get('name') or 'Team - {}'.format(random_title()),
description=kwargs.get('description') or random_title(10),
organization=organization.id,
)
return payload
def create_payload(self, name='', description='', organization=Organization, **kwargs):
self.create_and_update_dependencies(organization)
payload = self.payload(organization=self.ds.organization, name=name, description=description, **kwargs)
payload.ds = DSAdapter(self.__class__.__name__, self._dependency_store)
return payload
def create(self, name='', description='', organization=Organization, **kwargs):
payload = self.create_payload(name=name, description=description, organization=organization, **kwargs)
return self.update_identity(Teams(self.connection).post(payload))
page.register_page([resources.team, (resources.teams, 'post')], Team)
class Teams(page.PageList, Team):
pass
page.register_page([resources.teams, resources.credential_owner_teams, resources.related_teams], Teams)
|
0ac7ca883245779533e3b2f605dd7abb7ef1a0fc
|
2cfe6bb4a509c33c32ffde9edbc0f992bc6218ad
|
/pyverilog/utils/identifiervisitor.py
|
2f906efdc14544c662d96cf991c179f0b3e288ac
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
PyHDI/Pyverilog
|
7b0c642f58b809b106c689fbfa51f337fda956c2
|
81838bc463d17148ef6872af34eb27585ee349ba
|
refs/heads/develop
| 2023-08-05T23:09:47.258312
| 2022-12-23T14:16:24
| 2022-12-23T14:16:24
| 14,871,471
| 527
| 161
|
Apache-2.0
| 2023-05-15T12:05:26
| 2013-12-02T18:53:03
|
Python
|
UTF-8
|
Python
| false
| false
| 874
|
py
|
identifiervisitor.py
|
# -------------------------------------------------------------------------------
# identifiervisitor.py
#
# Identifier list generator in a nested operator
#
# Copyright (C) 2015, Shinya Takamaeda-Yamazaki
# License: Apache 2.0
# -------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import print_function
import sys
import os
from pyverilog.dataflow.visit import NodeVisitor
def getIdentifiers(node):
v = IdentifierVisitor()
v.visit(node)
ids = v.getIdentifiers()
return ids
class IdentifierVisitor(NodeVisitor):
def __init__(self):
self.identifiers = []
def getIdentifiers(self):
return tuple(self.identifiers)
def reset(self):
self.identifiers = []
def visit_Identifier(self, node):
self.identifiers.append(node.name)
|
8d6ffbac0468692e0c64bc3693862a862eb1e89a
|
ea1845e05c86a85d3e51d6765a0efdd5003db553
|
/flags/tests/test_management_commands_enable_flag.py
|
f2d76527eec87d4415e9e573aa9ebc62b07113ae
|
[
"CC0-1.0"
] |
permissive
|
cfpb/django-flags
|
806a693ddc48ceacf3f3381faab278b965df837b
|
9cfbb6c6a3afd956b740e7d4c74ed589e6a38f5c
|
refs/heads/main
| 2023-08-09T11:30:43.375403
| 2023-07-27T13:00:38
| 2023-07-27T13:00:38
| 137,590,135
| 197
| 31
|
CC0-1.0
| 2023-07-27T13:00:39
| 2018-06-16T15:23:55
|
Python
|
UTF-8
|
Python
| false
| false
| 672
|
py
|
test_management_commands_enable_flag.py
|
from io import StringIO
from django.core.management import call_command
from django.core.management.base import CommandError
from django.test import TestCase
from flags.state import flag_enabled
class EnableFlagTestCase(TestCase):
def test_enable_flag(self):
out = StringIO()
self.assertFalse(flag_enabled("DB_FLAG"))
call_command("enable_flag", "DB_FLAG", stdout=out)
self.assertTrue(flag_enabled("DB_FLAG"))
self.assertIn("Successfully enabled", out.getvalue())
def test_enable_flag_non_existent_flag(self):
with self.assertRaises(CommandError):
call_command("enable_flag", "FLAG_DOES_NOT_EXIST")
|
cbd580cc6104e498e1eaf4fe373abc4ba5971d88
|
aa409b9d02d6266cb53d293623640deb82f5ba99
|
/examples/task_image_caption.py
|
82215e94e0f28fa5ffdcf5f4b447064dbd5ff187
|
[
"Apache-2.0"
] |
permissive
|
bojone/bert4keras
|
b674fd42538904e33efa61908edae961daf8f15f
|
2072f06dd410ea885a9c6850ba539effce50b22b
|
refs/heads/master
| 2023-08-27T07:35:15.503757
| 2023-07-20T13:42:17
| 2023-07-20T13:42:17
| 204,388,414
| 5,421
| 1,060
|
Apache-2.0
| 2022-11-23T02:43:26
| 2019-08-26T03:27:19
|
Python
|
UTF-8
|
Python
| false
| false
| 8,047
|
py
|
task_image_caption.py
|
#! -*- coding: utf-8 -*-
# bert做image caption任务,coco数据集
# 通过Conditional Layer Normalization融入条件信息
# 请参考:https://kexue.fm/archives/7124
from __future__ import print_function
import json
import numpy as np
from bert4keras.backend import keras, K
from bert4keras.layers import Loss
from bert4keras.models import build_transformer_model
from bert4keras.tokenizers import Tokenizer, load_vocab
from bert4keras.optimizers import Adam
from bert4keras.snippets import sequence_padding, is_string
from bert4keras.snippets import DataGenerator, AutoRegressiveDecoder
from keras.models import Model
import cv2
# 模型配置
maxlen = 64
batch_size = 32
steps_per_epoch = 1000
epochs = 10000
# bert配置
config_path = '/root/kg/bert/uncased_L-12_H-768_A-12/bert_config.json'
checkpoint_path = '/root/kg/bert/uncased_L-12_H-768_A-12/bert_model.ckpt'
dict_path = '/root/kg/bert/uncased_L-12_H-768_A-12/vocab.txt'
# 加载并精简词表,建立分词器
token_dict, keep_tokens = load_vocab(
dict_path=dict_path,
simplified=True,
startswith=['[PAD]', '[UNK]', '[CLS]', '[SEP]'],
)
tokenizer = Tokenizer(token_dict, do_lower_case=True)
def read_caption(f):
"""读取并整理COCO的Caption数据
"""
data = json.load(open(f))
images = {}
for img in data['images']:
images[img['id']] = {
'image_id': img['file_name'],
'caption': [],
'url': img['coco_url']
}
for caption in data['annotations']:
images[caption['image_id']]['caption'].append(caption['caption'])
return list(images.values())
def read_image(f):
"""单图读取函数(对非方形的图片进行白色填充,使其变为方形)
"""
img = cv2.imread(f)
height, width = img.shape[:2]
if height > width:
height, width = img_size, width * img_size // height
img = cv2.resize(img, (width, height))
delta = (height - width) // 2
img = cv2.copyMakeBorder(
img,
top=0,
bottom=0,
left=delta,
right=height - width - delta,
borderType=cv2.BORDER_CONSTANT,
value=[255, 255, 255]
)
else:
height, width = height * img_size // width, img_size
img = cv2.resize(img, (width, height))
delta = (width - height) // 2
img = cv2.copyMakeBorder(
img,
top=delta,
bottom=width - height - delta,
left=0,
right=0,
borderType=cv2.BORDER_CONSTANT,
value=[255, 255, 255]
)
img = img.astype('float32')
return img[..., ::-1] # cv2的读取模式为BGR,但keras的模型要求为RGB
class data_generator(DataGenerator):
"""数据生成器
"""
def __iter__(self, random=False):
batch_images, batch_token_ids, batch_segment_ids = [], [], []
for is_end, D in self.sample(random):
img = '/root/caption/coco/train2014/%s' % D['image_id']
caption = np.random.choice(D['caption'])
token_ids, segment_ids = tokenizer.encode(caption, maxlen=maxlen)
batch_images.append(read_image(img))
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
if len(batch_token_ids) == self.batch_size or is_end:
batch_images = np.array(batch_images)
batch_images = preprocess_input(batch_images)
batch_token_ids = sequence_padding(batch_token_ids)
batch_segment_ids = sequence_padding(batch_segment_ids)
yield [batch_token_ids, batch_segment_ids, batch_images], None
batch_images, batch_token_ids, batch_segment_ids = [], [], []
# 加载数据
train_data = read_caption(
'/root/caption/coco/annotations/captions_train2014.json'
)
valid_data = read_caption(
'/root/caption/coco/annotations/captions_val2014.json'
)
class CrossEntropy(Loss):
"""交叉熵作为loss,并mask掉padding部分
"""
def compute_loss(self, inputs, mask=None):
y_true, y_pred = inputs
if mask[1] is None:
y_mask = 1.0
else:
y_mask = K.cast(mask[1], K.floatx())[:, 1:]
y_true = y_true[:, 1:] # 目标token_ids
y_pred = y_pred[:, :-1] # 预测序列,错开一位
loss = K.sparse_categorical_crossentropy(y_true, y_pred)
loss = K.sum(loss * y_mask) / K.sum(y_mask)
return loss
# 图像模型
MobileNetV2 = keras.applications.mobilenet_v2.MobileNetV2
preprocess_input = keras.applications.mobilenet_v2.preprocess_input
image_model = MobileNetV2(include_top=False, pooling='avg')
img_size = 299
# Bert模型
model = build_transformer_model(
config_path,
checkpoint_path,
application='lm',
keep_tokens=keep_tokens, # 只保留keep_tokens中的字,精简原字表
layer_norm_cond=image_model.output,
layer_norm_cond_hidden_size=128,
layer_norm_cond_hidden_act='swish',
additional_input_layers=image_model.input,
)
output = CrossEntropy(1)([model.inputs[0], model.outputs[0]])
model = Model(model.inputs, output)
model.compile(optimizer=Adam(1e-5))
model.summary()
class AutoCaption(AutoRegressiveDecoder):
"""img2seq解码器
"""
@AutoRegressiveDecoder.wraps(default_rtype='probas')
def predict(self, inputs, output_ids, states):
image = inputs[0]
token_ids = output_ids
segment_ids = np.zeros_like(token_ids)
return self.last_token(model).predict([token_ids, segment_ids, image])
def generate(self, image, topk=1):
if is_string(image):
image = read_image(image)
image = preprocess_input(image)
output_ids = self.beam_search([image], topk=topk) # 基于beam search
return tokenizer.decode(output_ids)
autocaption = AutoCaption(
start_id=tokenizer._token_start_id,
end_id=tokenizer._token_end_id,
maxlen=maxlen
)
def just_show():
samples = [valid_data[i] for i in np.random.choice(len(valid_data), 2)]
for D in samples:
img = '/root/caption/coco/val2014/%s' % D['image_id']
print(u'image_id:', D['image_id'])
print(u'url:', D['url'])
print(u'predict:', autocaption.generate(img))
print(u'references:', D['caption'])
print()
class Evaluator(keras.callbacks.Callback):
"""评估与保存
"""
def __init__(self):
self.lowest = 1e10
def on_epoch_end(self, epoch, logs=None):
# 保存最优
if logs['loss'] <= self.lowest:
self.lowest = logs['loss']
model.save_weights('./best_model.weights')
# 演示效果
just_show()
if __name__ == '__main__':
evaluator = Evaluator()
train_generator = data_generator(train_data, batch_size)
model.fit(
train_generator.forfit(),
steps_per_epoch=steps_per_epoch,
epochs=epochs,
callbacks=[evaluator]
)
else:
model.load_weights('./best_model.weights')
"""
image_id: COCO_val2014_000000524611.jpg
url: http://images.cocodataset.org/val2014/COCO_val2014_000000524611.jpg
predict: a train that is sitting on the tracks.
references: [u'A train carrying chemical tanks traveling past a water tower.', u'Dual train tracks with a train on one of them and a water tower in the background.', u'a train some trees and a water tower ', u'Train on tracks with water tower for Davis Junction in the rear.', u'A train on a train track going through a bunch of trees.']
image_id: COCO_val2014_000000202923.jpg
url: http://images.cocodataset.org/val2014/COCO_val2014_000000202923.jpg
predict: a baseball game in progress with the batter up to plate.
references: [u'Batter, catcher, and umpire anticipating the next pitch.', u'A baseball player holding a baseball bat in the game.', u'A baseball player stands ready at the plate.', u'Baseball players on the field ready for the pitch.', u'A view from behind a mesh fence of a baseball game.']
"""
|
5d9802722d94da86e83404b964dda1667d9a8515
|
e22d4b37236a0327f26159856c7d151826bdd49d
|
/leonardo/conf/base.py
|
16a0e4a9f5c0e1763c0d6555e14ec86c77415fe9
|
[
"BSD-2-Clause"
] |
permissive
|
django-leonardo/django-leonardo
|
8fe1fc6299c5e3d3c0bff96cb7e80d48f7b7e1de
|
7d3f116830075f05a8c9a105ae6f7f80f7a6444c
|
refs/heads/master
| 2022-09-29T02:23:14.426984
| 2020-09-08T13:58:46
| 2020-09-08T13:58:46
| 33,533,052
| 108
| 64
|
BSD-3-Clause
| 2022-09-16T19:34:25
| 2015-04-07T09:05:44
|
Python
|
UTF-8
|
Python
| false
| false
| 3,162
|
py
|
base.py
|
import os
from app_loader.config import Config, MasterConfig
from app_loader.utils import merge
from leonardo.conf.spec import CONF_SPEC, DJANGO_CONF
from leonardo.utils.versions import get_versions
class ModuleConfig(Config):
"""Simple Module Config Object
encapsulation of dot access dictionary
use dictionary as constructor
"""
def get_value(self, key, values):
'''Accept key of propery and actual values'''
return merge(values, self.get_property(key))
def get_property(self, key):
"""Expect Django Conf property"""
_key = DJANGO_CONF[key]
return getattr(self, _key, CONF_SPEC[_key])
@property
def module_name(self):
"""Module name from module if is set"""
if hasattr(self, "module"):
return self.module.__name__
return None
@property
def name(self):
"""Distribution name from module if is set"""
if hasattr(self, "module"):
return self.module.__name__.replace('_', '-')
return None
@property
def version(self):
"""return module version"""
return get_versions([self.module_name]).get(self.module_name, None)
@property
def latest_version(self):
"""return latest version if is available"""
from leonardo_system.pip import check_versions
return check_versions(True).get(self.name, None).get('new', None)
@property
def needs_migrations(self):
"""Indicates whater module needs migrations"""
# TODO(majklk): also check models etc.
if len(self.widgets) > 0:
return True
return False
@property
def needs_sync(self):
"""Indicates whater module needs templates, static etc."""
affected_attributes = [
'css_files', 'js_files',
'scss_files', 'widgets']
for attr in affected_attributes:
if len(getattr(self, attr)) > 0:
return True
return False
def set_module(self, module):
"""Just setter for module"""
setattr(self, "module", module)
@property
def demo_paths(self):
"""returns collected demo paths excluding examples
TODO: call super which returns custom paths in descriptor
"""
base_path = os.path.join(self.module.__path__[0], 'demo')
paths = []
if os.path.isdir(base_path):
for item in os.listdir(base_path):
# TODO: support examples which is not auto-loaded
if not os.path.isdir(os.path.join(base_path, 'examples')):
paths.append(os.path.join(base_path, item))
return paths
class LeonardoConfig(MasterConfig):
def get_attr(self, name, default=None, fail_silently=True):
"""try extra context
"""
try:
return getattr(self, name)
except KeyError:
extra_context = getattr(self, "extra_context")
if name in extra_context:
value = extra_context[name]
if callable(value):
return value(request=None)
return default
|
1da250eaf5164ec84ebff6b00e5a8b3c6220f554
|
a3d6556180e74af7b555f8d47d3fea55b94bcbda
|
/cc/DEPS
|
96a02f5605c9de60454d6918848520ae56914eb8
|
[
"BSD-3-Clause"
] |
permissive
|
chromium/chromium
|
aaa9eda10115b50b0616d2f1aed5ef35d1d779d6
|
a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c
|
refs/heads/main
| 2023-08-24T00:35:12.585945
| 2023-08-23T22:01:11
| 2023-08-23T22:01:11
| 120,360,765
| 17,408
| 7,102
|
BSD-3-Clause
| 2023-09-10T23:44:27
| 2018-02-05T20:55:32
| null |
UTF-8
|
Python
| false
| false
| 2,827
|
DEPS
|
include_rules = [
"+components/ukm/test_ukm_recorder.h",
"+components/viz/client",
"+components/viz/common",
"+gpu/GLES2",
"+gpu/command_buffer/client/context_support.h",
"+gpu/command_buffer/client/gpu_memory_buffer_manager.h",
"+gpu/command_buffer/client/raster_implementation_gles.h",
"+gpu/command_buffer/client/raster_interface.h",
"+gpu/command_buffer/client/shared_image_interface.h",
"+gpu/command_buffer/common/capabilities.h",
"+gpu/command_buffer/common/discardable_handle.h",
"+gpu/command_buffer/common/gl2_types.h",
"+gpu/command_buffer/common/gpu_memory_allocation.h",
"+gpu/command_buffer/common/gpu_memory_buffer_support.h",
"+gpu/command_buffer/common/mailbox.h",
"+gpu/command_buffer/common/mailbox_holder.h",
"+gpu/command_buffer/common/shared_image_trace_utils.h",
"+gpu/command_buffer/common/shared_image_usage.h",
"+gpu/command_buffer/common/sync_token.h",
"+gpu/config/gpu_feature_info.h",
"+gpu/config/gpu_finch_features.h",
"+gpu/config/gpu_info.h",
"+gpu/ipc/client/client_shared_image_interface.h",
"+gpu/vulkan",
"+media",
"+mojo/public/cpp/system/buffer.h",
"+mojo/public/cpp/system/platform_handle.h",
"+skia/buildflags.h",
"+skia/ext",
"+services/metrics/public/cpp",
"+services/tracing/public/cpp",
"+third_party/dawn/include",
"+third_party/khronos/GLES2/gl2.h",
"+third_party/khronos/GLES2/gl2ext.h",
"+third_party/libyuv",
"+third_party/skia/include",
"+third_party/skia/include/private/chromium/SkChromeRemoteGlyphCache.h",
"+third_party/skia/modules/skottie/include",
"+third_party/skia/modules/skresources/include",
"+third_party/perfetto/protos/perfetto/trace/track_event",
"+ui/base",
"+ui/events/types",
"+ui/latency",
"+ui/gfx",
"+ui/gl",
# Do not use mojo bindings in cc/. This library should be agnostic about how
# to communicate with viz.
"-mojo/public/cpp/bindings",
]
specific_include_rules = {
"in_process_context_provider\.cc": [
"+gpu/command_buffer/client",
"+gpu/command_buffer/common",
"+gpu/command_buffer/service",
"+gpu/ipc",
"+gpu/skia_bindings",
],
"in_process_context_provider\.h": [
"+gpu/command_buffer/common",
"+gpu/ipc",
],
".*_(unit|pixel|perf)test.*\.cc": [
"+components/viz/service/display",
"+components/viz/test",
"+gpu/command_buffer/client/gles2_interface.h",
"+gpu/command_buffer/common/command_buffer_id.h",
"+gpu/command_buffer/common/constants.h",
],
"oop_pixeltest\.cc" : [
"+gpu/command_buffer/client",
"+gpu/command_buffer/common",
"+gpu/config",
"+gpu/ipc",
"+gpu/skia_bindings",
],
"transfer_cache_unittest\.cc" : [
"+gpu/command_buffer/client",
"+gpu/command_buffer/common",
"+gpu/command_buffer/service",
"+gpu/config",
"+gpu/ipc",
],
}
|
|
3cef6ef8a843c815bca5b5d97eee74c457814a01
|
f1f21ba2236da38a49a8185ce33b3ce4a4424c1d
|
/apps/fewshot_molecular_property/data/splitdata.py
|
46ca4db340bb9abc0738fd788517cfcced68f611
|
[
"Apache-2.0"
] |
permissive
|
PaddlePaddle/PaddleHelix
|
75a07c2f14475e56e72f4573b2cf82a91d1cbfda
|
e6ab0261eb719c21806bbadfd94001ecfe27de45
|
refs/heads/dev
| 2023-08-05T03:34:55.009355
| 2023-08-01T09:30:44
| 2023-08-01T09:30:44
| 314,704,349
| 771
| 197
|
Apache-2.0
| 2023-08-01T09:15:07
| 2020-11-21T00:53:39
|
Python
|
UTF-8
|
Python
| false
| false
| 1,677
|
py
|
splitdata.py
|
import sys
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
import numpy as np
from itertools import compress
import random
import json
name = 'toxcast' # tox21
f = open(os.path.join(BASE_DIR, '{}/raw/{}.csv'.format(name,name)), 'r').readlines()[1:]
np.random.shuffle(f)
if __name__ == "__main__":
tasks = {}
# Below needs to be modified according to different original datasets
for index, line in enumerate(f):
line=line.strip()
l = line.split(",")
size=len(l)
if size<2:
continue
'''
toxcast, sider -> smi = l[0]; for i in range(1, size)
tox 21 -> smi = l[-1]; for i in range(12):
muv -> smi = l[-1]; for i in range(17):
'''
smi = l[0] # modify to data
for i in range(1, size):
cur_item = l[i].strip()
if i not in tasks:
tasks[i] = [[],[]]
if cur_item == "0.0" or cur_item == "0" or cur_item==0:
tasks[i][0].append(smi)
elif cur_item == "1.0" or cur_item == "1" or cur_item==1:
tasks[i][1].append(smi)
#until here
cnt_tasks=[]
for i in tasks:
root = name + "/new/" + str(i)
os.makedirs(root, exist_ok=True)
os.makedirs(root + "/raw", exist_ok=True)
os.makedirs(root + "/processed", exist_ok=True)
file = open(root + "/raw/" + name + ".json", "w")
file.write(json.dumps(tasks[i]))
file.close()
print('task:',i,len(tasks[i][0]), len(tasks[i][1]))
cnt_tasks.append([len(tasks[i][0]), len(tasks[i][1])])
print(cnt_tasks)
|
7831219f6f4cc95a739eab66247c2cb4eb9d3902
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/FWCore/ParameterSet/python/test/MessWithGeometry_cff.py
|
5ea9f82fb32bae6eb703d97a1afc4fc36afa0bf8
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 117
|
py
|
MessWithGeometry_cff.py
|
import FWCore.ParameterSet.Config as cms
from FWCore.ParameterSet.test.Geometry_cfi import *
geometry.bField = 0.0
|
63b09a8180932cdce26db73ec4febb9973509a05
|
5aeb5c768aed6008bc20c7c6a1d59b569590fad5
|
/athenacli/style.py
|
2b49fb1aa2573deeff165bf873c6c48f398b2d23
|
[
"BSD-3-Clause"
] |
permissive
|
dbcli/athenacli
|
f5a8627ce0a1f4323a73603d1dee9ed164c5ee81
|
c9e477447345a28b070badb014bfd2c6679cc902
|
refs/heads/main
| 2023-03-09T00:56:51.028337
| 2022-05-16T00:50:44
| 2022-05-16T00:50:44
| 145,800,435
| 203
| 38
|
BSD-3-Clause
| 2022-05-16T00:45:05
| 2018-08-23T04:30:53
|
Python
|
UTF-8
|
Python
| false
| false
| 637
|
py
|
style.py
|
from pygments.token import Token
from pygments.style import Style
from pygments.styles.default import DefaultStyle
class AthenaStyle(Style):
styles = {
Token.Menu.Completions.Completion.Current: 'bg:#00aaaa #000000',
Token.Menu.Completions.Completion: 'bg:#008888 #ffffff',
Token.Menu.Completions.ProgressButton: 'bg:#003333',
Token.Menu.Completions.ProgressBar: 'bg:#00aaaa',
Token.SelectedText: '#ffffff bg:#6666aa',
Token.IncrementalSearchMatch: '#ffffff bg:#4444aa',
Token.IncrementalSearchMatch.Current: '#ffffff bg:#44aa44',
}
styles.update(DefaultStyle.styles)
|
34833aeebd60bae65ed7e0e2b9e85ed52bfb67ac
|
5cf6a985ce1ba56ce65999be93c859482307967d
|
/algorithms/color/rgb-to-cmyk.py
|
f9061e68b35834b630f7c43a3853c08f7a50a23b
|
[] |
no_license
|
hacktoon/1001
|
5e1abdeb6313d8960370dbcff33f7359933e4686
|
9117eea9389de180a994fd51a3abc8befacaed2a
|
refs/heads/master
| 2023-08-15T21:44:46.231928
| 2017-10-30T18:09:12
| 2017-10-30T18:09:12
| 1,232,142
| 161
| 67
| null | 2017-10-30T18:09:13
| 2011-01-08T04:00:38
|
Python
|
UTF-8
|
Python
| false
| false
| 2,501
|
py
|
rgb-to-cmyk.py
|
# encoding: utf-8
'''
RGB to CMYK
Autor:
?
Colaborador:
Aurélio A. Heckert
Tipo:
color
Descrição:
Converte uma cor definida em RGB para CMYK.
RGB é um sistema aditivo de definição de cores, representa a mistura de luz. Suas componentes são (em ordem) vermelho, verde e azul.
CMYK é um sistema subtrativo de definição de cores, representa a mistura de pigmentos. Suas componentes são (em ordem) ciano, magenta, amarelo e preto. O preto no CMYK deriva uma necessidade do uso prático, já que a mistura dos 3 pigmentos é custoso, não é realmente preto e a sobreposição de impressões tornaria o desalinhamento mais perceptível nos detalhes escuros.
O algorítimo deste exemplo considera as componentes como valores flutuantes entre 0 e 1, onde 0 significa sem representação e 1 máxima representação. Sendo assim o branco seria (1,1,1) em RGB e (0,0,0,0) em CMYK, o vermelho intenso seria (1,0,0) em RGB e (0,1,1,0) em CMYK e o laranja seria (1,0.5,0) em RGB e (0,0.5,1,0) em CMYK. A representação das componentes em valores flutuantes entre 0 e 1 pode parecer estranho pelo nosso costume em ver cores definidas com 1 byte por unidade, mas essa representação é bastante útil em vários algorítimos para manipulação de cores e ainda viabiliza representações com maior profundidades de cores (mais de 1 byte por componente).
Complexidade:
O(1)
DIficuldade:
facil
Referências:
http://en.wikipedia.org/wiki/RGB
http://en.wikipedia.org/wiki/CMYK
'''
def rgb2cmyk( red, green, blue ):
black = min( 1-red, 1-green, 1-blue )
nb = 1 - black # negative black
if black == 1:
cyan = 0
magenta = 0
yellow = 0
elif nb > 0:
cyan = ( nb - red ) / nb
magenta = ( nb - green ) / nb
yellow = ( nb - blue ) / nb
else:
cyan = 1 - red
magenta = 1 - green
yellow = 1 - blue
return "%.1f %.1f %.1f %.1f" % ( cyan, magenta, yellow, black )
print 'Preto:\t\t\t', rgb2cmyk( 0.0, 0.0, 0.0 )
print 'Cinza escuro:\t\t', rgb2cmyk( 0.3, 0.3, 0.3 )
print 'Cinza médio:\t\t', rgb2cmyk( 0.5, 0.5, 0.5 )
print 'Cinza claro:\t\t', rgb2cmyk( 0.7, 0.7, 0.7 )
print 'Branco:\t\t\t', rgb2cmyk( 1.0, 1.0, 1.0 )
print 'Vermelho vivo:\t\t', rgb2cmyk( 1.0, 0.0, 0.0 )
print 'Vermelho sangue:\t', rgb2cmyk( 0.7, 0.0, 0.0 )
print 'Laranja:\t\t', rgb2cmyk( 1.0, 0.5, 0.0 )
print 'Verde Musgo:\t\t', rgb2cmyk( 0.6, 0.7, 0.6 )
|
761052df34e6cef70879d3af7a254bce3489c593
|
4da20cd59782f820bf0b7d23027f3784058406be
|
/django_pgschemas/management/commands/__init__.py
|
e8c0e4663e03a52aa5ea975b043b1ed05381130c
|
[
"MIT"
] |
permissive
|
lorinkoz/django-pgschemas
|
ee23ae58dee839ed4e5e3a0c7aa7320ab3bb211b
|
30901690aa63b57a7b984b7df97c6d0280a84ea7
|
refs/heads/master
| 2023-09-03T00:11:00.096898
| 2023-08-28T21:00:42
| 2023-08-29T06:28:13
| 162,996,334
| 114
| 18
|
MIT
| 2023-09-12T07:49:04
| 2018-12-24T14:04:14
|
Python
|
UTF-8
|
Python
| false
| false
| 11,120
|
py
|
__init__.py
|
import enum
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.db.models import CharField, Q, Value as V
from django.db.models.functions import Concat
from django.db.utils import ProgrammingError
from ...schema import get_current_schema
from ...utils import create_schema, dynamic_models_exist, get_clone_reference, get_tenant_model
from ._executors import parallel, sequential
class CommandScope(enum.Enum):
ALL = "all"
DYNAMIC = "dynamic"
STATIC = "static"
@classmethod
def allow_static(cls):
return [cls.ALL, cls.STATIC]
@classmethod
def allow_dynamic(cls):
return [cls.ALL, cls.DYNAMIC]
EXECUTORS = {
"sequential": sequential,
"parallel": parallel,
}
class WrappedSchemaOption:
scope = CommandScope.ALL
specific_schemas = None
allow_interactive = True
allow_wildcards = True
def add_arguments(self, parser):
if self.allow_interactive:
parser.add_argument(
"--noinput",
"--no-input",
action="store_false",
dest="interactive",
help="Tells Django to NOT prompt the user for input of any kind.",
)
parser.add_argument(
"-s",
"--schema",
nargs="+",
dest="schemas",
help="Schema(s) to execute the current command",
)
parser.add_argument(
"-x",
"--exclude-schema",
nargs="+",
dest="excluded_schemas",
help="Schema(s) to exclude when executing the current command",
)
if self.allow_wildcards:
parser.add_argument(
"-as",
"--include-all-schemas",
action="store_true",
dest="all_schemas",
help="Include all schemas when executing the current command",
)
parser.add_argument(
"-ss",
"--include-static-schemas",
action="store_true",
dest="static_schemas",
help="Include all static schemas when executing the current command",
)
parser.add_argument(
"-ds",
"--include-dynamic-schemas",
action="store_true",
dest="dynamic_schemas",
help="Include all dynamic schemas when executing the current command",
)
parser.add_argument(
"-ts",
"--include-tenant-schemas",
action="store_true",
dest="tenant_schemas",
help="Include all tenant-like schemas when executing the current command",
)
parser.add_argument(
"--parallel",
dest="parallel",
action="store_true",
help="Run command in parallel mode",
)
parser.add_argument(
"--no-create-schemas",
dest="skip_schema_creation",
action="store_true",
help="Skip automatic creation of non-existing schemas",
)
def get_schemas_from_options(self, **options):
skip_schema_creation = options.get("skip_schema_creation", False)
try:
schemas = self._get_schemas_from_options(**options)
except ProgrammingError:
# This happens with unmigrated database.
# It can also happen when the tenant model contains unapplied migrations that break.
raise CommandError(
"Error while attempting to retrieve dynamic schemas. "
"Perhaps you need to migrate the 'public' schema first?"
)
if self.specific_schemas is not None:
schemas = [x for x in schemas if x in self.specific_schemas]
if not schemas:
raise CommandError("This command can only run in %s" % self.specific_schemas)
if not skip_schema_creation:
for schema in schemas:
create_schema(schema, check_if_exists=True, sync_schema=False, verbosity=0)
return schemas
def get_executor_from_options(self, **options):
return EXECUTORS["parallel"] if options.get("parallel") else EXECUTORS["sequential"]
def get_scope_display(self):
return "|".join(self.specific_schemas or []) or self.scope.value
def _get_schemas_from_options(self, **options):
schemas = options.get("schemas") or []
excluded_schemas = options.get("excluded_schemas") or []
include_all_schemas = options.get("all_schemas") or False
include_static_schemas = options.get("static_schemas") or False
include_dynamic_schemas = options.get("dynamic_schemas") or False
include_tenant_schemas = options.get("tenant_schemas") or False
dynamic_ready = dynamic_models_exist()
allow_static = self.scope in CommandScope.allow_static()
allow_dynamic = self.scope in CommandScope.allow_dynamic()
clone_reference = get_clone_reference()
if (
not schemas
and not include_all_schemas
and not include_static_schemas
and not include_dynamic_schemas
and not include_tenant_schemas
):
if not self.allow_interactive:
include_all_schemas = True
elif options.get("interactive", True):
schema = input(
"Enter schema to run command (leave blank for running on '%s' schemas): "
% self.get_scope_display()
).strip()
if schema:
schemas.append(schema)
else:
include_all_schemas = True
else:
raise CommandError("No schema provided")
TenantModel = get_tenant_model()
static_schemas = (
[x for x in settings.TENANTS.keys() if x != "default"] if allow_static else []
)
dynamic_schemas = (
TenantModel.objects.values_list("schema_name", flat=True)
if TenantModel is not None and dynamic_ready and allow_dynamic
else []
)
if clone_reference and allow_static:
static_schemas.append(clone_reference)
schemas_to_return = set()
if include_all_schemas:
if not allow_static and not allow_dynamic:
raise CommandError("Including all schemas is NOT allowed")
schemas_to_return = schemas_to_return.union(static_schemas + list(dynamic_schemas))
if include_static_schemas:
if not allow_static:
raise CommandError("Including static schemas is NOT allowed")
schemas_to_return = schemas_to_return.union(static_schemas)
if include_dynamic_schemas:
if not allow_dynamic:
raise CommandError("Including dynamic schemas is NOT allowed")
schemas_to_return = schemas_to_return.union(dynamic_schemas)
if include_tenant_schemas:
if not allow_dynamic:
raise CommandError("Including tenant-like schemas is NOT allowed")
schemas_to_return = schemas_to_return.union(dynamic_schemas)
if clone_reference:
schemas_to_return.add(clone_reference)
def find_schema_by_reference(reference, as_excluded=False):
if reference in settings.TENANTS and reference != "default" and allow_static:
return reference
elif reference == clone_reference:
return reference
elif (
TenantModel is not None
and dynamic_ready
and TenantModel.objects.filter(schema_name=reference).exists()
and allow_dynamic
):
return reference
else:
local = []
if allow_static:
local += [
schema_name
for schema_name, data in settings.TENANTS.items()
if schema_name not in ["public", "default"]
and any(x for x in data["DOMAINS"] if x.startswith(reference))
]
if TenantModel is not None and dynamic_ready and allow_dynamic:
local += (
TenantModel.objects.annotate(
route=Concat(
"domains__domain",
V("/"),
"domains__folder",
output_field=CharField(),
)
)
.filter(
Q(schema_name=reference)
| Q(domains__domain__istartswith=reference)
| Q(route=reference)
)
.distinct()
.values_list("schema_name", flat=True)
)
if not local:
message = (
"No schema found for '%s' (excluded)"
if as_excluded
else "No schema found for '%s'"
)
raise CommandError(message % reference)
if len(local) > 1:
message = (
"More than one tenant found for schema '%s' by domain (excluded), "
"please, narrow down the filter"
if as_excluded
else "More than one tenant found for schema '%s' by domain, please, narrow down the filter"
)
raise CommandError(message % reference)
return local[0]
for schema in schemas:
included = find_schema_by_reference(schema, as_excluded=False)
schemas_to_return.add(included)
for schema in excluded_schemas:
excluded = find_schema_by_reference(schema, as_excluded=True)
schemas_to_return -= {excluded}
return (
list(schemas_to_return)
if "public" not in schemas_to_return
else ["public"] + list(schemas_to_return - {"public"})
)
class TenantCommand(WrappedSchemaOption, BaseCommand):
def handle(self, *args, **options):
schemas = self.get_schemas_from_options(**options)
executor = self.get_executor_from_options(**options)
executor(schemas, self, "_raw_handle_tenant", args, options, pass_schema_in_kwargs=True)
def _raw_handle_tenant(self, *args, **kwargs):
kwargs.pop("schema_name")
self.handle_tenant(get_current_schema(), *args, **kwargs)
def handle_tenant(self, tenant, *args, **options):
pass
class StaticTenantCommand(TenantCommand):
scope = CommandScope.STATIC
class DynamicTenantCommand(TenantCommand):
scope = CommandScope.DYNAMIC
|
93ff696f9e2444aa54f7a6df08ce90db9e28cecd
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/tests/components/assist_pipeline/test_ring_buffer.py
|
22185c3ad5b2096b4006e3e654c23be6ce4fa7b6
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 1,177
|
py
|
test_ring_buffer.py
|
"""Tests for audio ring buffer."""
from homeassistant.components.assist_pipeline.ring_buffer import RingBuffer
def test_ring_buffer_empty() -> None:
"""Test empty ring buffer."""
rb = RingBuffer(10)
assert rb.maxlen == 10
assert rb.pos == 0
assert rb.getvalue() == b""
def test_ring_buffer_put_1() -> None:
"""Test putting some data smaller than the maximum length."""
rb = RingBuffer(10)
rb.put(bytes([1, 2, 3, 4, 5]))
assert len(rb) == 5
assert rb.pos == 5
assert rb.getvalue() == bytes([1, 2, 3, 4, 5])
def test_ring_buffer_put_2() -> None:
"""Test putting some data past the end of the buffer."""
rb = RingBuffer(10)
rb.put(bytes([1, 2, 3, 4, 5]))
rb.put(bytes([6, 7, 8, 9, 10, 11, 12]))
assert len(rb) == 10
assert rb.pos == 2
assert rb.getvalue() == bytes([3, 4, 5, 6, 7, 8, 9, 10, 11, 12])
def test_ring_buffer_put_too_large() -> None:
"""Test putting data too large for the buffer."""
rb = RingBuffer(10)
rb.put(bytes([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]))
assert len(rb) == 10
assert rb.pos == 2
assert rb.getvalue() == bytes([3, 4, 5, 6, 7, 8, 9, 10, 11, 12])
|
ec97e367927ccb0e9345a39eade52aab1e949ea3
|
d571d407cfda435fcab8b7ccadb1be812c7047c7
|
/guild/plugins/import_argparse_flags_main.py
|
ce84ed71eb1522cd2d42c67f1be0c60569fdfe03
|
[
"Apache-2.0",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
guildai/guildai
|
2d8661a2a6bf0d1ced6334095c8bf5a8e391d8af
|
149055da49f57eaf4aec418f2e339c8905c1f02f
|
refs/heads/main
| 2023-08-25T10:09:58.560059
| 2023-08-12T20:19:05
| 2023-08-12T20:19:05
| 105,057,392
| 833
| 86
|
Apache-2.0
| 2023-08-07T19:34:27
| 2017-09-27T18:57:50
|
Python
|
UTF-8
|
Python
| false
| false
| 7,892
|
py
|
import_argparse_flags_main.py
|
# Copyright 2017-2023 Posit Software, PBC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import logging
import sys
from guild import entry_point_util
from guild import flag_util
from guild import log as loglib
from guild import python_util
from guild import util
_action_importers = entry_point_util.EntryPointResources(
"guild.python.argparse_actions", "Argparse action importers"
)
class ArgparseActionFlagsImporter:
priority = 50
def __init__(self, ep):
self.ep = ep
def flag_attrs_for_argparse_action(self, action, flag_name):
"""Return a dict of flag config attrs for an argparse action."""
raise NotImplementedError()
flag_action_class_names = [
"_StoreAction",
"_StoreTrueAction",
"_StoreFalseAction",
"BooleanOptionalAction",
]
ignore_flag_action_class_names = [
"_HelpAction",
]
loglib.init_logging()
log = logging.getLogger("guild.plugins.import_argparse_flags_main")
def main():
args = _init_args()
_patch_argparse(args.output_path)
# Importing module has the side-effect of writing flag data due to
# patched argparse.
base_args = util.shlex_split(args.base_args)
_exec_module(args.mod_path, args.package, base_args)
def _init_args():
if len(sys.argv) != 5:
raise SystemExit(
"usage: import_argparse_flags_main.py "
"mod_path package base_args output_path"
)
return argparse.Namespace(
mod_path=sys.argv[1],
package=sys.argv[2],
base_args=sys.argv[3],
output_path=sys.argv[4],
)
def _patch_argparse(output_path):
handle_parse = lambda parse_args, *_args, **_kw: _write_flags(
parse_args, output_path
)
# parse_known_args is called by parse_args, so this handles both
# cases.
python_util.listen_method(argparse.ArgumentParser, "parse_known_args", handle_parse)
def _write_flags(parse_args_f, output_path):
parser = _wrapped_parser(parse_args_f)
flags = {}
for action in parser._actions:
_maybe_apply_flag(action, flags)
log.debug("writing flags to %s: %s", output_path, flags)
with open(output_path, "w") as f:
json.dump(flags, f)
def _wrapped_parser(f):
return _closure_parser(_f_closure(f))
def _f_closure(f):
try:
return f.__closure__
except AttributeError:
try:
return f.func_closure
except AttributeError:
assert False, (type(f), dir(f))
def _closure_parser(closure):
assert isinstance(closure, tuple), (type(closure), closure)
assert len(closure) == 2, closure
parser = closure[1].cell_contents
assert isinstance(parser, argparse.ArgumentParser), (type(parser), parser)
return parser
def _maybe_apply_flag(action, flags):
flag_name = _flag_name(action)
if not flag_name:
log.debug("skipping %s - not a flag action", action)
return
if action.__class__.__name__ not in flag_action_class_names:
if action.__class__.__name__ not in ignore_flag_action_class_names:
log.debug("skipping %s - not an action type", action)
return
attrs = _flag_attrs_for_action(action, flag_name)
if attrs is not None:
flags[flag_name] = attrs
log.debug("added flag %r: %r", flag_name, attrs)
else:
log.debug("unable to import %s flag for action %r", flag_name, action)
def _flag_attrs_for_action(action, flag_name):
for importer in _action_importers_by_priority():
attrs = importer.flag_attrs_for_argparse_action(action, flag_name)
if attrs is not None:
return attrs
return default_flag_attrs_for_argparse_action(action, flag_name)
def _action_importers_by_priority():
importers = [importer for _name, importer in _action_importers]
importers.sort(key=lambda x: x.priority)
return importers
def default_flag_attrs_for_argparse_action(
action, flag_name, ignore_unknown_type=False
):
attrs = {}
if action.help:
attrs["description"] = action.help
if action.default is not None:
attrs["default"] = _ensure_json_encodable(action.default, flag_name)
if action.choices:
attrs["choices"] = _ensure_json_encodable(action.choices, flag_name)
if action.required:
attrs["required"] = True
if action.type:
attrs["type"] = _flag_type_for_action(
action.type,
flag_name,
ignore_unknown_type,
)
if action.__class__.__name__ == "_StoreTrueAction":
_apply_store_true_flag_attrs(attrs)
elif action.__class__.__name__ == "_StoreFalseAction":
_apply_store_false_flag_attrs(attrs)
elif action.__class__.__name__ == "BooleanOptionalAction":
_apply_boolean_option_flag_attrs(action, attrs)
if _multi_arg(action):
attrs["arg-split"] = True
_maybe_encode_splittable_default(attrs)
return attrs
def _ensure_json_encodable(x, flag_name):
try:
json.dumps(x)
except TypeError:
log.warning(
"cannot serialize value %r for flag %s - coercing to string",
x,
flag_name,
)
return str(x)
else:
return x
def _flag_type_for_action(action_type, flag_name, ignore_unknown_type):
if action_type is str:
return "string"
if action_type is float:
return "float"
if action_type is int:
return "int"
if action_type is bool:
return "boolean"
if not ignore_unknown_type:
log.warning(
"unsupported flag type %s for flag %s - ignoring type setting",
action_type,
flag_name,
)
return None
def _apply_store_true_flag_attrs(attrs):
attrs["arg-switch"] = True
attrs["choices"] = [True, False]
def _apply_store_false_flag_attrs(attrs):
attrs["arg-switch"] = False
attrs["choices"] = [True, False]
def _apply_boolean_option_flag_attrs(action, attrs):
if (
len(action.option_strings) != 2 #
or action.option_strings[0][:2] != "--" #
or action.option_strings[1][:2] != "--"
):
log.debug("unexpected option_strings for action: %s", action)
return
attrs["choices"] = [True, False]
if action.default:
attrs["arg-name"] = action.option_strings[1][2:]
attrs["arg-switch"] = False
else:
attrs["arg-name"] = action.option_strings[0][2:]
attrs["arg-switch"] = True
def _multi_arg(action):
return action.nargs in ("+", "*") or (
isinstance(action.nargs, (int, float)) and action.nargs > 1
)
def _maybe_encode_splittable_default(flag_attrs):
default = flag_attrs.get("default")
if isinstance(default, list):
flag_attrs["default"] = flag_util.join_splittable_flag_vals(default)
def _flag_name(action):
for opt in action.option_strings:
if opt.startswith("--"):
return opt[2:]
return None
def _exec_module(mod_path, package, base_args):
assert mod_path.endswith(".py"), mod_path
sys.argv = [mod_path] + base_args + ["--help"]
log.debug("loading module from '%s'", mod_path)
python_util.exec_script(mod_path, mod_name=_exec_mod_name(package))
def _exec_mod_name(package):
if package:
return f"{package}.__main__"
return "__main__"
if __name__ == "__main__":
main()
|
b1ba4938e1d6924bdc6ee4b9e23696c63f95b0ad
|
bb33e6be8316f35decbb2b81badf2b6dcf7df515
|
/source/res/scripts/client_common/script_component/DynamicScriptComponent.py
|
bc46b49d7edb614102df0a3edbf3f67c5baf5476
|
[] |
no_license
|
StranikS-Scan/WorldOfTanks-Decompiled
|
999c9567de38c32c760ab72c21c00ea7bc20990c
|
d2fe9c195825ececc728e87a02983908b7ea9199
|
refs/heads/1.18
| 2023-08-25T17:39:27.718097
| 2022-09-22T06:49:44
| 2022-09-22T06:49:44
| 148,696,315
| 103
| 39
| null | 2022-09-14T17:50:03
| 2018-09-13T20:49:11
|
Python
|
UTF-8
|
Python
| false
| false
| 1,341
|
py
|
DynamicScriptComponent.py
|
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client_common/script_component/DynamicScriptComponent.py
import logging
import BigWorld
from PlayerEvents import g_playerEvents
from shared_utils import nextTick
_logger = logging.getLogger(__name__)
class DynamicScriptComponent(BigWorld.DynamicScriptComponent):
def __init__(self, *_, **__):
BigWorld.DynamicScriptComponent.__init__(self)
if self._isAvatarReady:
nextTick(self._onAvatarReady)()
else:
g_playerEvents.onAvatarReady += self.__onAvatarReady
_logger.debug('%s.__init__. EntityID=%s', self.__class__.__name__, self.entity.id)
@property
def _isAvatarReady(self):
return BigWorld.player().userSeesWorld()
def onDestroy(self):
_logger.debug('%s.onDestroy. EntityID=%s', self.__class__.__name__, self.entity.id)
def onLeaveWorld(self):
self.onDestroy()
@property
def spaceID(self):
return self.entity.spaceID
@property
def keyName(self):
return next((name for name, value in self.entity.dynamicComponents.iteritems() if value == self))
def _onAvatarReady(self):
pass
def __onAvatarReady(self):
g_playerEvents.onAvatarReady -= self.__onAvatarReady
nextTick(self._onAvatarReady)()
|
c4b5079e086066288e16856b229dd161263264e7
|
53a83642c01a8828e3d7bd0b18e33c3b694c2b84
|
/Python/GeeksforGeeks/minimum-number-of-jumps.py
|
925175560656403e0275ba1d34adedec78f3ebc7
|
[] |
no_license
|
anantkaushik/Competitive_Programming
|
1dcd60a28b5b951c23024d6090942be081ad249f
|
6dba38fd7aa4e71b5196d01d64e81f9336d08b13
|
refs/heads/master
| 2022-03-06T15:36:23.797340
| 2022-02-21T12:00:37
| 2022-02-21T12:00:37
| 82,700,948
| 271
| 95
| null | 2020-10-27T17:34:39
| 2017-02-21T16:18:16
|
Python
|
UTF-8
|
Python
| false
| false
| 1,590
|
py
|
minimum-number-of-jumps.py
|
"""
Problem Link: https://practice.geeksforgeeks.org/problems/minimum-number-of-jumps-1587115620/1
Given an array of N integers arr[] where each element represents the max number of steps that can be made
forward from that element. Find the minimum number of jumps to reach the end of the array
(starting from the first element). If an element is 0, then you cannot move through that element.
Note: Return -1 if you can't reach the end of the array.
Example 1:
Input:
N = 11
arr[] = {1, 3, 5, 8, 9, 2, 6, 7, 6, 8, 9}
Output: 3
Explanation:
First jump from 1st element to 2nd
element with value 3. Now, from here
we jump to 5th element with value 9,
and from here we will jump to last.
Example 2:
Input :
N = 6
arr = {1, 4, 3, 2, 6, 7}
Output: 2
Explanation:
First we jump from the 1st to 2nd element
and then jump to the last element.
Your task:
You don't need to read input or print anything. Your task is to complete function minJumps()
which takes the array arr and it's size N as input parameters and returns the minimum number of jumps.
If not possible returns -1.
Expected Time Complexity: O(N)
Expected Space Complexity: O(1)
Constraints:
1 ≤ N ≤ 107
0 ≤ arri ≤ 107
"""
class Solution:
def minJumps(self, arr, n):
max_reach = cur_reach = jumps = 0
for index in range(len(arr)):
if index > max_reach:
return -1
max_reach = max(max_reach, index + arr[index])
if index == cur_reach and index != len(arr) - 1:
jumps += 1
cur_reach = max_reach
return jumps
|
8b56907c92ad47997941286a22e965bfa0a35de7
|
010279e2ba272d09e9d2c4e903722e5faba2cf7a
|
/contrib/python/jedi/jedi/common/__init__.py
|
702a5e609985e10036b70b695e50566fc7097e98
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
catboost/catboost
|
854c1a1f439a96f1ae6b48e16644be20aa04dba2
|
f5042e35b945aded77b23470ead62d7eacefde92
|
refs/heads/master
| 2023-09-01T12:14:14.174108
| 2023-09-01T10:01:01
| 2023-09-01T10:22:12
| 97,556,265
| 8,012
| 1,425
|
Apache-2.0
| 2023-09-11T03:32:32
| 2017-07-18T05:29:04
|
Python
|
UTF-8
|
Python
| false
| false
| 60
|
py
|
__init__.py
|
from jedi.common.context import BaseContextSet, BaseContext
|
e7f60f9b3c4096d23e581e36e759027ea22da24a
|
4d6935a26f211987f54b980fe174971e4a1366e8
|
/ddparser/ernie/file_utils.py
|
1d4fd904d45c6234238e4725981b3e3042fcf857
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
baidu/DDParser
|
17e6d7c653172a93a30caa5aa52c01264067d41a
|
144c09bd058cea53810a45789812aa50aa0f711c
|
refs/heads/master
| 2023-08-29T00:02:21.543273
| 2023-02-05T03:05:09
| 2023-02-05T03:05:57
| 284,605,403
| 946
| 165
|
Apache-2.0
| 2022-10-29T16:12:08
| 2020-08-03T04:45:58
|
Python
|
UTF-8
|
Python
| false
| false
| 1,935
|
py
|
file_utils.py
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import os
import logging
from tqdm import tqdm
log = logging.getLogger(__name__)
def _fetch_from_remote(url, force_download=False):
import hashlib, tempfile, requests, tarfile
sig = hashlib.md5(url.encode('utf8')).hexdigest()
cached_dir = os.path.join(tempfile.gettempdir(), sig)
if force_download or not os.path.exists(cached_dir):
with tempfile.NamedTemporaryFile() as f:
#url = 'https://ernie.bj.bcebos.com/ERNIE_stable.tgz'
r = requests.get(url, stream=True)
total_len = int(r.headers.get('content-length'))
for chunk in tqdm(r.iter_content(chunk_size=1024), total=total_len // 1024, desc='downloading %s' % url, unit='KB'):
if chunk:
f.write(chunk)
f.flush()
log.debug('extacting... to %s' % f.name)
with tarfile.open(f.name) as tf:
tf.extractall(path=cached_dir)
log.debug('%s cached in %s' % (url, cached_dir))
return cached_dir
def add_docstring(doc):
def func(f):
f.__doc__ += ('\n======other docs from supper class ======\n%s' % doc)
return f
return func
|
99ab76134a854d96280e92a90d49a5b8d68fb8e9
|
10ddfb2d43a8ec5d47ce35dc0b8acf4fd58dea94
|
/Python/compare-version-numbers.py
|
34c45443f115e85cfc9f6655cb679b566be5237c
|
[
"MIT"
] |
permissive
|
kamyu104/LeetCode-Solutions
|
f54822059405ef4df737d2e9898b024f051fd525
|
4dc4e6642dc92f1983c13564cc0fd99917cab358
|
refs/heads/master
| 2023-09-02T13:48:26.830566
| 2023-08-28T10:11:12
| 2023-08-28T10:11:12
| 152,631,182
| 4,549
| 1,651
|
MIT
| 2023-05-31T06:10:33
| 2018-10-11T17:38:35
|
C++
|
UTF-8
|
Python
| false
| false
| 2,285
|
py
|
compare-version-numbers.py
|
# Time: O(n)
# Space: O(1)
import itertools
class Solution(object):
def compareVersion(self, version1, version2):
"""
:type version1: str
:type version2: str
:rtype: int
"""
n1, n2 = len(version1), len(version2)
i, j = 0, 0
while i < n1 or j < n2:
v1, v2 = 0, 0
while i < n1 and version1[i] != '.':
v1 = v1 * 10 + int(version1[i])
i += 1
while j < n2 and version2[j] != '.':
v2 = v2 * 10 + int(version2[j])
j += 1
if v1 != v2:
return 1 if v1 > v2 else -1
i += 1
j += 1
return 0
# Time: O(n)
# Space: O(n)
class Solution2(object):
def compareVersion(self, version1, version2):
"""
:type version1: str
:type version2: str
:rtype: int
"""
v1, v2 = version1.split("."), version2.split(".")
if len(v1) > len(v2):
v2 += ['0' for _ in xrange(len(v1) - len(v2))]
elif len(v1) < len(v2):
v1 += ['0' for _ in xrange(len(v2) - len(v1))]
i = 0
while i < len(v1):
if int(v1[i]) > int(v2[i]):
return 1
elif int(v1[i]) < int(v2[i]):
return -1
else:
i += 1
return 0
def compareVersion2(self, version1, version2):
"""
:type version1: str
:type version2: str
:rtype: int
"""
v1 = [int(x) for x in version1.split('.')]
v2 = [int(x) for x in version2.split('.')]
while len(v1) != len(v2):
if len(v1) > len(v2):
v2.append(0)
else:
v1.append(0)
return cmp(v1, v2)
def compareVersion3(self, version1, version2):
splits = (map(int, v.split('.')) for v in (version1, version2))
return cmp(*zip(*itertools.izip_longest(*splits, fillvalue=0)))
def compareVersion4(self, version1, version2):
main1, _, rest1 = ('0' + version1).partition('.')
main2, _, rest2 = ('0' + version2).partition('.')
return cmp(int(main1), int(main2)) or len(rest1 + rest2) and self.compareVersion4(rest1, rest2)
|
8b7be4754488eb2ecaeec2d66a0157f21e098aaf
|
ce516866ef41b3301566a65cb2c729be62d70b54
|
/scripts/example_subdomains.py
|
8f10417c0c1dcba3d39e0bf7bd173107ae2c4cd6
|
[
"MIT"
] |
permissive
|
PMEAL/OpenPNM
|
3a504fb0e982b8c884c7e821f6f970417a1eb861
|
ab6df0001285b4006740095ba829bd1625867eae
|
refs/heads/dev
| 2023-09-01T04:17:01.561886
| 2023-08-31T08:26:00
| 2023-08-31T08:26:00
| 11,670,333
| 411
| 194
|
MIT
| 2023-09-06T04:28:35
| 2013-07-25T20:32:47
|
Python
|
UTF-8
|
Python
| false
| false
| 1,545
|
py
|
example_subdomains.py
|
import pytest
import openpnm as op
ws = op.Workspace()
proj = ws.new_project()
pn = op.network.Cubic(shape=[10, 10, 10], spacing=1e-4, project=proj)
Ps = pn['pore.coords'][:, 0] < pn['pore.coords'][:, 0].mean()
Ts = pn.find_neighbor_throats(pores=Ps, mode='xnor')
geo1 = op.geometry.SpheresAndCylinders(network=pn, pores=Ps, throats=Ts)
Ps = pn['pore.coords'][:, 0] >= pn['pore.coords'][:, 0].mean()
Ts = pn.find_neighbor_throats(pores=Ps, mode='or')
geo2 = op.geometry.SpheresAndCylinders(network=pn, pores=Ps, throats=Ts)
pn['pore.foo'] = 1
# Can't create a subdict below foo
with pytest.raises(Exception):
pn['pore.foo.bar'] = 1
# Can create a subdict directly
pn['pore.baz.bar'] = 2
# Can't create a new item already used as subdict
with pytest.raises(Exception):
pn['pore.baz'] = 2
# Also works on subdomains
geo1['pore.blah'] = 1
with pytest.raises(Exception):
geo1['pore.blah.boo'] = 1
geo1['pore.bee.bop'] = 1
with pytest.raises(Exception):
geo1['pore.bee'] = 1
# Now start looking across objects
with pytest.raises(Exception):
geo1['pore.foo'] = 1 # Already exists on pn
with pytest.raises(Exception):
geo1['pore.foo.bar'] = 1 # pore.foo already exists on pn
with pytest.raises(Exception):
geo1['pore.baz'] = 1 # pore.baz.bar already exists on pn
# Now start looking across objects
geo2['pore.blah'] = 1
geo2['pore.bee.bop'] = 1
with pytest.raises(Exception):
geo1['pore.bee'] = 1
with pytest.raises(Exception):
pn['pore.bee'] = 1
with pytest.raises(Exception):
pn['pore.bee.bop'] = 1
|
fc66b867ba3ac539b2d4fa2b6f375a4a613b7139
|
9efca95a55cb4df52d895d42f1ec10331516a734
|
/c7n/filters/backup.py
|
f02412a6ceb8828c45f059f3525384616b87c8a1
|
[
"Apache-2.0"
] |
permissive
|
cloud-custodian/cloud-custodian
|
519e602abe00c642786441b64cc40857ef5bc9de
|
27563cf4571040f923124e1acb2463f11e372225
|
refs/heads/main
| 2023-09-04T10:54:55.963703
| 2023-09-01T17:40:17
| 2023-09-01T17:40:17
| 52,837,350
| 3,327
| 1,096
|
Apache-2.0
| 2023-09-14T14:03:30
| 2016-03-01T01:11:20
|
Python
|
UTF-8
|
Python
| false
| false
| 3,079
|
py
|
backup.py
|
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
from .core import Filter
from datetime import datetime, timedelta
from c7n.utils import type_schema, local_session, chunks
from c7n.query import RetryPageIterator
class ConsecutiveAwsBackupsFilter(Filter):
"""Returns resources where number of consective backups (based on the
periodicity defined in the filter) is equal to/or greater than n units.
This filter supports the resources that use AWS Backup service for backups.
:example:
.. code-block:: yaml
policies:
- name: dynamodb-consecutive-aws-backup-count
resource: dynamodb-table
filters:
- type: consecutive-aws-backups
count: 7
period: days
status: 'COMPLETED'
"""
schema = type_schema('consecutive-aws-backups', count={'type': 'number', 'minimum': 1},
period={'enum': ['hours', 'days', 'weeks']},
status={'enum': ['COMPLETED', 'PARTIAL', 'DELETING', 'EXPIRED']},
required=['count', 'period', 'status'])
permissions = ('backup:ListRecoveryPointsByResource', )
annotation = 'c7n:AwsBackups'
def process_resource_set(self, resources):
arns = self.manager.get_arns(resources)
client = local_session(self.manager.session_factory).client('backup')
paginator = client.get_paginator('list_recovery_points_by_resource')
paginator.PAGE_ITERATOR_CLS = RetryPageIterator
for r, arn in zip(resources, arns):
r[self.annotation] = paginator.paginate(
ResourceArn=arn).build_full_result().get('RecoveryPoints', [])
def get_date(self, time):
period = self.data.get('period')
if period == 'weeks':
date = (datetime.utcnow() - timedelta(weeks=time)).strftime('%Y-%m-%d')
elif period == 'hours':
date = (datetime.utcnow() - timedelta(hours=time)).strftime('%Y-%m-%d-%H')
else:
date = (datetime.utcnow() - timedelta(days=time)).strftime('%Y-%m-%d')
return date
def process(self, resources, event=None):
results = []
retention = self.data.get('count')
expected_dates = set()
for time in range(1, retention + 1):
expected_dates.add(self.get_date(time))
for resource_set in chunks(
[r for r in resources if self.annotation not in r], 50):
self.process_resource_set(resource_set)
for r in resources:
backup_dates = set()
for backup in r[self.annotation]:
if backup['Status'] == self.data.get('status'):
if self.data.get('period') == 'hours':
backup_dates.add(backup['CreationDate'].strftime('%Y-%m-%d-%H'))
else:
backup_dates.add(backup['CreationDate'].strftime('%Y-%m-%d'))
if expected_dates.issubset(backup_dates):
results.append(r)
return results
|
55866fd3fd02009290202936dbf17d2f6059293f
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-hss/huaweicloudsdkhss/v5/model/associate_policy_group_request_info.py
|
48a057022867e18525cd4ee96280c16d930ee933
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318
| 2023-08-31T08:28:59
| 2023-08-31T08:28:59
| 262,207,814
| 103
| 44
|
NOASSERTION
| 2023-06-22T14:50:48
| 2020-05-08T02:28:43
|
Python
|
UTF-8
|
Python
| false
| false
| 5,658
|
py
|
associate_policy_group_request_info.py
|
# coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class AssociatePolicyGroupRequestInfo:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'target_policy_group_id': 'str',
'operate_all': 'bool',
'host_id_list': 'list[str]'
}
attribute_map = {
'target_policy_group_id': 'target_policy_group_id',
'operate_all': 'operate_all',
'host_id_list': 'host_id_list'
}
def __init__(self, target_policy_group_id=None, operate_all=None, host_id_list=None):
"""AssociatePolicyGroupRequestInfo
The model defined in huaweicloud sdk
:param target_policy_group_id: 部署的目标策略组ID
:type target_policy_group_id: str
:param operate_all: 是否要对全量主机部署策略,如果为true的话,不需填写host_id_list,如果为false的话,需要填写host_id_list
:type operate_all: bool
:param host_id_list: 服务器ID列表
:type host_id_list: list[str]
"""
self._target_policy_group_id = None
self._operate_all = None
self._host_id_list = None
self.discriminator = None
self.target_policy_group_id = target_policy_group_id
if operate_all is not None:
self.operate_all = operate_all
if host_id_list is not None:
self.host_id_list = host_id_list
@property
def target_policy_group_id(self):
"""Gets the target_policy_group_id of this AssociatePolicyGroupRequestInfo.
部署的目标策略组ID
:return: The target_policy_group_id of this AssociatePolicyGroupRequestInfo.
:rtype: str
"""
return self._target_policy_group_id
@target_policy_group_id.setter
def target_policy_group_id(self, target_policy_group_id):
"""Sets the target_policy_group_id of this AssociatePolicyGroupRequestInfo.
部署的目标策略组ID
:param target_policy_group_id: The target_policy_group_id of this AssociatePolicyGroupRequestInfo.
:type target_policy_group_id: str
"""
self._target_policy_group_id = target_policy_group_id
@property
def operate_all(self):
"""Gets the operate_all of this AssociatePolicyGroupRequestInfo.
是否要对全量主机部署策略,如果为true的话,不需填写host_id_list,如果为false的话,需要填写host_id_list
:return: The operate_all of this AssociatePolicyGroupRequestInfo.
:rtype: bool
"""
return self._operate_all
@operate_all.setter
def operate_all(self, operate_all):
"""Sets the operate_all of this AssociatePolicyGroupRequestInfo.
是否要对全量主机部署策略,如果为true的话,不需填写host_id_list,如果为false的话,需要填写host_id_list
:param operate_all: The operate_all of this AssociatePolicyGroupRequestInfo.
:type operate_all: bool
"""
self._operate_all = operate_all
@property
def host_id_list(self):
"""Gets the host_id_list of this AssociatePolicyGroupRequestInfo.
服务器ID列表
:return: The host_id_list of this AssociatePolicyGroupRequestInfo.
:rtype: list[str]
"""
return self._host_id_list
@host_id_list.setter
def host_id_list(self, host_id_list):
"""Sets the host_id_list of this AssociatePolicyGroupRequestInfo.
服务器ID列表
:param host_id_list: The host_id_list of this AssociatePolicyGroupRequestInfo.
:type host_id_list: list[str]
"""
self._host_id_list = host_id_list
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AssociatePolicyGroupRequestInfo):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
7e7eac013c88e9a9906af5986f7cc3770bb38cbb
|
bca3d1e208a5d0e3365a8a1766a0bfd09932367b
|
/polyfile/__main__.py
|
2681fcae417f9f8a4baace2c248e21c23e4379d0
|
[
"Apache-2.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
trailofbits/polyfile
|
998f57d9ca015248019ec55c37f43fa743df8d6c
|
a116740e4baa5765699ca6b5049e9c41d262c325
|
refs/heads/master
| 2023-09-02T07:33:10.380904
| 2023-05-24T23:48:39
| 2023-05-24T23:48:39
| 193,975,534
| 303
| 18
|
Apache-2.0
| 2023-09-05T08:27:51
| 2019-06-26T20:48:20
|
Python
|
UTF-8
|
Python
| false
| false
| 17,685
|
py
|
__main__.py
|
import argparse
from contextlib import ExitStack
import json
import logging
import re
import signal
import sys
from textwrap import dedent
from typing import ContextManager, Optional, TextIO
from . import html
from . import logger
from .fileutils import PathOrStdin, PathOrStdout
from .magic import MagicMatcher
from .debugger import Debugger
from .polyfile import __version__, Analyzer
from .repl import ExitREPL
log = logger.getStatusLogger("polyfile")
class SIGTERMHandler:
def __init__(self):
self.terminated = False
signal.signal(signal.SIGTERM, self.sigterm_handler)
def sigterm_handler(self, signum, frame):
sys.stderr.flush()
sys.stderr.write('\n\nCaught SIGTERM. Exiting gracefully, and dumping partial results...\n')
self.terminated = True
class KeyboardInterruptHandler:
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
if isinstance(exc_val, KeyboardInterrupt):
try:
sys.stdout.flush()
sys.stderr.flush()
sys.stderr.write("\n\nCaught keyboard interrupt.\n")
if not sys.stderr.isatty() or not sys.stdin.isatty():
sys.exit(128 + 15)
while True:
sys.stderr.write("Would you like PolyFile to output its current progress? [Yn] ")
result = input()
if not result or result.lower() == 'y':
return True
elif result and result.lower() == 'n':
sys.exit(0)
except KeyboardInterrupt:
sys.exit(128 + signal.SIGINT)
else:
return exc_type is None
class FormatOutput:
valid_formats = ("mime", "html", "json", "sbud", "explain")
default_format = "file"
def __init__(self, output_format: Optional[str] = None, output_path: Optional[str] = None):
if output_format is None:
output_format = self.default_format
self.output_format: str = output_format
self.output_path: Optional[str] = output_path
@property
def output_to_stdout(self) -> bool:
return self.output_path is None or self.output_path == "-"
@property
def output_stream(self) -> ContextManager[TextIO]:
if self.output_path is None:
return PathOrStdout("-")
else:
return PathOrStdout(self.output_path)
def __hash__(self):
return hash((self.output_format, self.output_path))
def __eq__(self, other):
return isinstance(other, FormatOutput) and other.output_format == self.output_format and other.output_path == self.output_path
def __str__(self):
return self.output_format
__repr__ = __str__
class ValidateOutput(argparse.Action):
@staticmethod
def add_output(args: argparse.Namespace, path: str):
if not hasattr(args, "format") or args.format is None:
setattr(args, "format", [])
if len(args.format) == 0:
args.format.append(FormatOutput(output_path=path))
return
existing_format: FormatOutput = args.format[-1]
if path != "-":
# make sure this path isn't already used
for output_format in args.format[:-1]:
if output_format.output_path == path:
raise ValueError(f"output path {path!r} cannot be used for both --format "
f"{output_format.output_format} and --format {existing_format.output_format}")
if existing_format.output_path is not None and existing_format.output_path != path:
args.format.append(FormatOutput(output_format=existing_format.output_format, output_path=path))
else:
existing_format.output_path = path
def __call__(self, parser, args, values, option_string=None):
ValidateOutput.add_output(args, values)
def main(argv=None):
parser = argparse.ArgumentParser(description='A utility to recursively map the structure of a file.',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('FILE', nargs='?', default='-',
help='the file to analyze; pass \'-\' or omit to read from STDIN')
parser.add_argument('--format', '-r', type=FormatOutput, action="append", choices=[
FormatOutput(f) for f in FormatOutput.valid_formats + ("json",)
], help=dedent("""PolyFile's output format
Output formats are:
file ...... the detected formats associated with the file,
like the output of the `file` command
mime ...... the detected MIME types associated with the file,
like the output of the `file --mime-type` command
explain ... like 'mime', but adds a human-readable explanation
for why each MIME type matched
html ...... an interactive HTML-based hex viewer
json ...... a modified version of the SBUD format in JSON syntax
sbud ...... equivalent to 'json'
Multiple formats can be output at once:
polyfile INPUT_FILE -f mime -f json
Their output will be concatenated to STDOUT in the order that
they occur in the arguments.
To save each format to a separate file, see the `--output` argument.
If no format is specified, PolyFile defaults to `--format file`"""))
parser.add_argument('--output', '-o', action=ValidateOutput, type=str, # nargs=2,
# metavar=(f"{{{','.join(ValidateOutput.valid_outputs)}}}", "PATH"),
help=dedent("""an optional output path for `--format`
Each instance of `--output` applies to the previous instance
of the `--format` option.
For example:
polyfile INPUT_FILE --format html --output output.html \\
--format sbud --output output.json
will save HTML to to `output.html` and SBUD to `output.json`.
No two outputs can be directed at the same file path.
The path can be '-' for STDOUT.
If an `--output` is omitted for a format,
then it will implicitly be printed to STDOUT.
"""))
parser.add_argument('--filetype', '-f', action='append',
help='explicitly match against the given filetype or filetype wildcard (default is to match '
'against all filetypes)')
group = parser.add_mutually_exclusive_group()
group.add_argument('--list', '-l', action='store_true',
help='list the supported filetypes for the `--filetype` argument and exit')
group.add_argument('--html', '-t', action="append",
help=dedent("""path to write an interactive HTML file for exploring the PDF;
equivalent to `--format html --output HTML`"""))
group.add_argument("--explain", action="store_true", help="equivalent to `--format explain")
# parser.add_argument('--try-all-offsets', '-a', action='store_true',
# help='Search for a file match at every possible offset; this can be very slow for larger '
# 'files')
group.add_argument('--only-match-mime', '-I', action='store_true',
help=dedent(""""just print out the matching MIME types for the file, one on each line;
equivalent to `--format mime`"""))
parser.add_argument('--only-match', '-m', action='store_true',
help='do not attempt to parse known filetypes; only match against file magic')
parser.add_argument('--require-match', action='store_true', help='if no matches are found, exit with code 127')
parser.add_argument('--max-matches', type=int, default=None,
help='stop scanning after having found this many matches')
parser.add_argument('--debugger', '-db', action='store_true', help='drop into an interactive debugger for libmagic '
'file definition matching and PolyFile parsing')
parser.add_argument('--eval-command', '-ex', type=str, action='append', help='execute the given debugger command')
parser.add_argument('--no-debug-python', action='store_true', help='by default, the `--debugger` option will break '
'on custom matchers and prompt to debug using '
'PDB. This option will suppress those prompts.')
verbosity_group = parser.add_mutually_exclusive_group()
verbosity_group.add_argument('--quiet', '-q', action='store_true', help='suppress all log output')
verbosity_group.add_argument('--debug', '-d', action='store_true', help='print debug information')
verbosity_group.add_argument('--trace', '-dd', action='store_true', help='print extra verbose debug information')
parser.add_argument('--version', '-v', action='store_true', help='print PolyFile\'s version information to STDERR')
group.add_argument('-dumpversion', action='store_true',
help='print PolyFile\'s raw version information to STDOUT and exit')
if argv is None:
argv = sys.argv
try:
args = parser.parse_args(argv[1:])
except ValueError as e:
parser.print_usage()
sys.stderr.write(f"polyfile: error: {e!s}\n")
exit(1)
if args.dumpversion:
print(__version__)
exit(0)
if args.eval_command and not args.debugger:
parser.print_usage()
sys.stderr.write("polyfile: error: the `--eval-command` argument can only be used in conjunction with "
"`--debugger`\n")
exit(1)
if args.list:
for mimetype in sorted(MagicMatcher.DEFAULT_INSTANCE.mimetypes):
print(mimetype)
exit(0)
if args.version:
sys.stderr.write(f"PolyFile version {__version__}\n")
if args.FILE == '-' and sys.stdin.isatty():
# No file argument was provided and it doesn't look like anything was piped into STDIN,
# so instead of blocking on STDIN just exit
exit(0)
if not hasattr(args, "format") or args.format is None or len(args.format) == 0:
setattr(args, "format", [])
if hasattr(args, "html") and args.html is not None:
for html_path in args.html:
args.format.append(FormatOutput(output_format="html"))
ValidateOutput.add_output(args, html_path)
if hasattr(args, "explain") and args.explain:
args.format.append(FormatOutput(output_format="explain"))
if args.only_match_mime:
args.format.append(FormatOutput(output_format="mime"))
if not args.format:
args.format.append(FormatOutput())
if args.quiet:
logger.setLevel(logging.CRITICAL)
elif args.trace:
logger.setLevel(logger.TRACE)
elif args.debug:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logger.STATUS)
if args.filetype:
regex = r'|'.join(fr"({ f.replace('*', '.*').replace('?', '.?') })" for f in args.filetype)
matcher = re.compile(regex)
mimetypes = [mimetype for mimetype in MagicMatcher.DEFAULT_INSTANCE.mimetypes if matcher.fullmatch(mimetype)]
if not mimetypes:
log.error(f"Filetype argument(s) { args.filetype } did not match any known definitions!")
exit(1)
log.info(f"Only matching against these types: {', '.join(mimetypes)}")
magic_matcher: Optional[MagicMatcher] = MagicMatcher.DEFAULT_INSTANCE.only_match(mimetypes=mimetypes)
else:
magic_matcher = None
sigterm_handler = SIGTERMHandler()
try:
path_or_stdin = PathOrStdin(args.FILE)
except FileNotFoundError:
log.error(f"Cannot open {args.FILE!r} (No such file or directory)")
exit(1)
except KeyboardInterrupt:
# this will happen if the user presses ^C wile reading from STDIN
exit(1)
return # this is here because linters are dumb and will complain about the next line without it
with path_or_stdin as file_path, ExitStack() as stack:
if args.debugger:
debugger = Debugger(break_on_parsing=not args.no_debug_python)
if args.eval_command:
for ex in args.eval_command:
try:
debugger.before_prompt()
debugger.write(f"{debugger.repl_prompt}{ex}\n")
debugger.run_command(ex)
except KeyError:
exit(1)
except ExitREPL:
exit(0)
debugger.write("\n")
stack.enter_context(debugger)
elif args.no_debug_python:
log.warning("Ignoring `--no-debug-python`; it can only be used with the --debugger option.")
analyzer = Analyzer(file_path, parse=not args.only_match, magic_matcher=magic_matcher)
needs_sbud = any(output_format.output_format in {"html", "json", "sbud"} for output_format in args.format)
with KeyboardInterruptHandler():
# do we need to do a full match? if so, do that up front:
if needs_sbud:
if args.max_matches is None or args.max_matches > 0:
for match in analyzer.matches():
if sigterm_handler.terminated:
break
if match.parent is None:
if args.max_matches is not None and len(analyzer.matches_so_far) >= args.max_matches:
log.info(f"Found {args.max_matches} matches; stopping early")
break
if needs_sbud:
sbud = analyzer.sbud(matches=analyzer.matches_so_far)
if args.require_match and not analyzer.matches_so_far:
log.info("No matches found, exiting")
exit(127)
for output_format in args.format:
with output_format.output_stream as output:
if output_format.output_format == "file":
istty = sys.stderr.isatty() and output.isatty() and logging.root.level <= logging.INFO
lines = set()
with KeyboardInterruptHandler():
for match in analyzer.magic_matches():
line = str(match)
if line not in lines:
lines.add(line)
if istty:
log.clear_status()
output.write(f"{line}\n")
output.flush()
else:
output.write(f"{line}\n")
if istty:
log.clear_status()
elif output_format.output_format in ("mime", "explain"):
omm = sys.stderr.isatty() and output.isatty() and logging.root.level <= logging.INFO
if omm:
# figure out the longest MIME type so we can make sure the columns are aligned
longest_mimetype = max(len(mimetype) for mimetype in analyzer.magic_matcher.mimetypes)
found_match = False
with KeyboardInterruptHandler():
for mimetype, match in analyzer.mime_types():
found_match = True
if omm:
log.clear_status()
output.write(mimetype)
output.flush()
sys.stderr.write("." * (longest_mimetype - len(mimetype) + 1))
sys.stderr.write(str(match))
sys.stderr.flush()
output.write("\n")
output.flush()
else:
output.write(mimetype)
output.write("\n")
if output_format.output_format == "explain":
output.write(match.explain(ansi_color=output.isatty(), file=file_path))
if args.require_match and not found_match and not needs_sbud:
log.info("No matches found, exiting")
exit(127)
if omm:
log.clear_status()
elif not output_format.output_to_stdout:
log.info(f"Saved MIME output to {output_format.output_path}")
elif output_format.output_format == "json" or output_format.output_format == "sbud":
assert needs_sbud
json.dump(sbud, output)
if not output_format.output_to_stdout:
log.info(f"Saved {output_format.output_format.upper()} output to {output_format.output_path}")
elif output_format.output_format == "html":
assert needs_sbud
output.write(html.generate(file_path, sbud))
if not output_format.output_to_stdout:
log.info(f"Saved HTML output to {output_format.output_path}")
else:
# This should never happen because the output formats are constrained by argparse
raise NotImplementedError(f"TODO: Add support for output format {output_format!r}")
if sigterm_handler.terminated:
sys.exit(128 + signal.SIGTERM)
if __name__ == '__main__':
main()
|
ba171471fa5c13562c49d6a52a3e5eabc9bd2618
|
5da5473ff3026165a47f98744bac82903cf008e0
|
/packages/google-cloud-dataplex/google/cloud/dataplex_v1/services/metadata_service/async_client.py
|
0ecd3486a7ea8e45e5e799d1d6b4e96c7e44d296
|
[
"Apache-2.0"
] |
permissive
|
googleapis/google-cloud-python
|
ed61a5f03a476ab6053870f4da7bc5534e25558b
|
93c4e63408c65129422f65217325f4e7d41f7edf
|
refs/heads/main
| 2023-09-04T09:09:07.852632
| 2023-08-31T22:49:26
| 2023-08-31T22:49:26
| 16,316,451
| 2,792
| 917
|
Apache-2.0
| 2023-09-14T21:45:18
| 2014-01-28T15:51:47
|
Python
|
UTF-8
|
Python
| false
| false
| 61,629
|
py
|
async_client.py
|
# -*- coding: utf-8 -*-
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import functools
import re
from typing import (
Dict,
Mapping,
MutableMapping,
MutableSequence,
Optional,
Sequence,
Tuple,
Type,
Union,
)
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.api_core.client_options import ClientOptions
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.dataplex_v1 import gapic_version as package_version
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.cloud.location import locations_pb2 # type: ignore
from google.iam.v1 import iam_policy_pb2 # type: ignore
from google.iam.v1 import policy_pb2 # type: ignore
from google.longrunning import operations_pb2
from google.protobuf import timestamp_pb2 # type: ignore
from google.cloud.dataplex_v1.services.metadata_service import pagers
from google.cloud.dataplex_v1.types import metadata_
from .client import MetadataServiceClient
from .transports.base import DEFAULT_CLIENT_INFO, MetadataServiceTransport
from .transports.grpc_asyncio import MetadataServiceGrpcAsyncIOTransport
class MetadataServiceAsyncClient:
"""Metadata service manages metadata resources such as tables,
filesets and partitions.
"""
_client: MetadataServiceClient
DEFAULT_ENDPOINT = MetadataServiceClient.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = MetadataServiceClient.DEFAULT_MTLS_ENDPOINT
entity_path = staticmethod(MetadataServiceClient.entity_path)
parse_entity_path = staticmethod(MetadataServiceClient.parse_entity_path)
partition_path = staticmethod(MetadataServiceClient.partition_path)
parse_partition_path = staticmethod(MetadataServiceClient.parse_partition_path)
zone_path = staticmethod(MetadataServiceClient.zone_path)
parse_zone_path = staticmethod(MetadataServiceClient.parse_zone_path)
common_billing_account_path = staticmethod(
MetadataServiceClient.common_billing_account_path
)
parse_common_billing_account_path = staticmethod(
MetadataServiceClient.parse_common_billing_account_path
)
common_folder_path = staticmethod(MetadataServiceClient.common_folder_path)
parse_common_folder_path = staticmethod(
MetadataServiceClient.parse_common_folder_path
)
common_organization_path = staticmethod(
MetadataServiceClient.common_organization_path
)
parse_common_organization_path = staticmethod(
MetadataServiceClient.parse_common_organization_path
)
common_project_path = staticmethod(MetadataServiceClient.common_project_path)
parse_common_project_path = staticmethod(
MetadataServiceClient.parse_common_project_path
)
common_location_path = staticmethod(MetadataServiceClient.common_location_path)
parse_common_location_path = staticmethod(
MetadataServiceClient.parse_common_location_path
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
MetadataServiceAsyncClient: The constructed client.
"""
return MetadataServiceClient.from_service_account_info.__func__(MetadataServiceAsyncClient, info, *args, **kwargs) # type: ignore
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
MetadataServiceAsyncClient: The constructed client.
"""
return MetadataServiceClient.from_service_account_file.__func__(MetadataServiceAsyncClient, filename, *args, **kwargs) # type: ignore
from_service_account_json = from_service_account_file
@classmethod
def get_mtls_endpoint_and_cert_source(
cls, client_options: Optional[ClientOptions] = None
):
"""Return the API endpoint and client cert source for mutual TLS.
The client cert source is determined in the following order:
(1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
client cert source is None.
(2) if `client_options.client_cert_source` is provided, use the provided one; if the
default client cert source exists, use the default one; otherwise the client cert
source is None.
The API endpoint is determined in the following order:
(1) if `client_options.api_endpoint` if provided, use the provided one.
(2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
default mTLS endpoint; if the environment variable is "never", use the default API
endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
use the default API endpoint.
More details can be found at https://google.aip.dev/auth/4114.
Args:
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. Only the `api_endpoint` and `client_cert_source` properties may be used
in this method.
Returns:
Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
client cert source to use.
Raises:
google.auth.exceptions.MutualTLSChannelError: If any errors happen.
"""
return MetadataServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore
@property
def transport(self) -> MetadataServiceTransport:
"""Returns the transport used by the client instance.
Returns:
MetadataServiceTransport: The transport used by the client instance.
"""
return self._client.transport
get_transport_class = functools.partial(
type(MetadataServiceClient).get_transport_class, type(MetadataServiceClient)
)
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, MetadataServiceTransport] = "grpc_asyncio",
client_options: Optional[ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the metadata service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.MetadataServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (ClientOptions): Custom options for the client. It
won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._client = MetadataServiceClient(
credentials=credentials,
transport=transport,
client_options=client_options,
client_info=client_info,
)
async def create_entity(
self,
request: Optional[Union[metadata_.CreateEntityRequest, dict]] = None,
*,
parent: Optional[str] = None,
entity: Optional[metadata_.Entity] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> metadata_.Entity:
r"""Create a metadata entity.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import dataplex_v1
async def sample_create_entity():
# Create a client
client = dataplex_v1.MetadataServiceAsyncClient()
# Initialize request argument(s)
entity = dataplex_v1.Entity()
entity.id = "id_value"
entity.type_ = "FILESET"
entity.asset = "asset_value"
entity.data_path = "data_path_value"
entity.system = "BIGQUERY"
entity.format_.mime_type = "mime_type_value"
entity.schema.user_managed = True
request = dataplex_v1.CreateEntityRequest(
parent="parent_value",
entity=entity,
)
# Make the request
response = await client.create_entity(request=request)
# Handle the response
print(response)
Args:
request (Optional[Union[google.cloud.dataplex_v1.types.CreateEntityRequest, dict]]):
The request object. Create a metadata entity request.
parent (:class:`str`):
Required. The resource name of the parent zone:
``projects/{project_number}/locations/{location_id}/lakes/{lake_id}/zones/{zone_id}``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
entity (:class:`google.cloud.dataplex_v1.types.Entity`):
Required. Entity resource.
This corresponds to the ``entity`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dataplex_v1.types.Entity:
Represents tables and fileset
metadata contained within a zone.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, entity])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = metadata_.CreateEntityRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if entity is not None:
request.entity = entity
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.create_entity,
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def update_entity(
self,
request: Optional[Union[metadata_.UpdateEntityRequest, dict]] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> metadata_.Entity:
r"""Update a metadata entity. Only supports full resource
update.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import dataplex_v1
async def sample_update_entity():
# Create a client
client = dataplex_v1.MetadataServiceAsyncClient()
# Initialize request argument(s)
entity = dataplex_v1.Entity()
entity.id = "id_value"
entity.type_ = "FILESET"
entity.asset = "asset_value"
entity.data_path = "data_path_value"
entity.system = "BIGQUERY"
entity.format_.mime_type = "mime_type_value"
entity.schema.user_managed = True
request = dataplex_v1.UpdateEntityRequest(
entity=entity,
)
# Make the request
response = await client.update_entity(request=request)
# Handle the response
print(response)
Args:
request (Optional[Union[google.cloud.dataplex_v1.types.UpdateEntityRequest, dict]]):
The request object. Update a metadata entity request.
The exiting entity will be fully
replaced by the entity in the request.
The entity ID is mutable. To modify the
ID, use the current entity ID in the
request URL and specify the new ID in
the request body.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dataplex_v1.types.Entity:
Represents tables and fileset
metadata contained within a zone.
"""
# Create or coerce a protobuf request object.
request = metadata_.UpdateEntityRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.update_entity,
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("entity.name", request.entity.name),)
),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def delete_entity(
self,
request: Optional[Union[metadata_.DeleteEntityRequest, dict]] = None,
*,
name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Delete a metadata entity.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import dataplex_v1
async def sample_delete_entity():
# Create a client
client = dataplex_v1.MetadataServiceAsyncClient()
# Initialize request argument(s)
request = dataplex_v1.DeleteEntityRequest(
name="name_value",
etag="etag_value",
)
# Make the request
await client.delete_entity(request=request)
Args:
request (Optional[Union[google.cloud.dataplex_v1.types.DeleteEntityRequest, dict]]):
The request object. Delete a metadata entity request.
name (:class:`str`):
Required. The resource name of the entity:
``projects/{project_number}/locations/{location_id}/lakes/{lake_id}/zones/{zone_id}/entities/{entity_id}``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = metadata_.DeleteEntityRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.delete_entity,
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
async def get_entity(
self,
request: Optional[Union[metadata_.GetEntityRequest, dict]] = None,
*,
name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> metadata_.Entity:
r"""Get a metadata entity.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import dataplex_v1
async def sample_get_entity():
# Create a client
client = dataplex_v1.MetadataServiceAsyncClient()
# Initialize request argument(s)
request = dataplex_v1.GetEntityRequest(
name="name_value",
)
# Make the request
response = await client.get_entity(request=request)
# Handle the response
print(response)
Args:
request (Optional[Union[google.cloud.dataplex_v1.types.GetEntityRequest, dict]]):
The request object. Get metadata entity request.
name (:class:`str`):
Required. The resource name of the entity:
``projects/{project_number}/locations/{location_id}/lakes/{lake_id}/zones/{zone_id}/entities/{entity_id}.``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dataplex_v1.types.Entity:
Represents tables and fileset
metadata contained within a zone.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = metadata_.GetEntityRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_entity,
default_retry=retries.Retry(
initial=1.0,
maximum=10.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def list_entities(
self,
request: Optional[Union[metadata_.ListEntitiesRequest, dict]] = None,
*,
parent: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListEntitiesAsyncPager:
r"""List metadata entities in a zone.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import dataplex_v1
async def sample_list_entities():
# Create a client
client = dataplex_v1.MetadataServiceAsyncClient()
# Initialize request argument(s)
request = dataplex_v1.ListEntitiesRequest(
parent="parent_value",
view="FILESETS",
)
# Make the request
page_result = client.list_entities(request=request)
# Handle the response
async for response in page_result:
print(response)
Args:
request (Optional[Union[google.cloud.dataplex_v1.types.ListEntitiesRequest, dict]]):
The request object. List metadata entities request.
parent (:class:`str`):
Required. The resource name of the parent zone:
``projects/{project_number}/locations/{location_id}/lakes/{lake_id}/zones/{zone_id}``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dataplex_v1.services.metadata_service.pagers.ListEntitiesAsyncPager:
List metadata entities response.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = metadata_.ListEntitiesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_entities,
default_retry=retries.Retry(
initial=1.0,
maximum=10.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListEntitiesAsyncPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
async def create_partition(
self,
request: Optional[Union[metadata_.CreatePartitionRequest, dict]] = None,
*,
parent: Optional[str] = None,
partition: Optional[metadata_.Partition] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> metadata_.Partition:
r"""Create a metadata partition.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import dataplex_v1
async def sample_create_partition():
# Create a client
client = dataplex_v1.MetadataServiceAsyncClient()
# Initialize request argument(s)
partition = dataplex_v1.Partition()
partition.values = ['values_value1', 'values_value2']
partition.location = "location_value"
request = dataplex_v1.CreatePartitionRequest(
parent="parent_value",
partition=partition,
)
# Make the request
response = await client.create_partition(request=request)
# Handle the response
print(response)
Args:
request (Optional[Union[google.cloud.dataplex_v1.types.CreatePartitionRequest, dict]]):
The request object. Create metadata partition request.
parent (:class:`str`):
Required. The resource name of the parent zone:
``projects/{project_number}/locations/{location_id}/lakes/{lake_id}/zones/{zone_id}/entities/{entity_id}``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
partition (:class:`google.cloud.dataplex_v1.types.Partition`):
Required. Partition resource.
This corresponds to the ``partition`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dataplex_v1.types.Partition:
Represents partition metadata
contained within entity instances.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, partition])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = metadata_.CreatePartitionRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if partition is not None:
request.partition = partition
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.create_partition,
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def delete_partition(
self,
request: Optional[Union[metadata_.DeletePartitionRequest, dict]] = None,
*,
name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Delete a metadata partition.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import dataplex_v1
async def sample_delete_partition():
# Create a client
client = dataplex_v1.MetadataServiceAsyncClient()
# Initialize request argument(s)
request = dataplex_v1.DeletePartitionRequest(
name="name_value",
)
# Make the request
await client.delete_partition(request=request)
Args:
request (Optional[Union[google.cloud.dataplex_v1.types.DeletePartitionRequest, dict]]):
The request object. Delete metadata partition request.
name (:class:`str`):
Required. The resource name of the partition. format:
``projects/{project_number}/locations/{location_id}/lakes/{lake_id}/zones/{zone_id}/entities/{entity_id}/partitions/{partition_value_path}``.
The {partition_value_path} segment consists of an
ordered sequence of partition values separated by "/".
All values must be provided.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = metadata_.DeletePartitionRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.delete_partition,
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
async def get_partition(
self,
request: Optional[Union[metadata_.GetPartitionRequest, dict]] = None,
*,
name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> metadata_.Partition:
r"""Get a metadata partition of an entity.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import dataplex_v1
async def sample_get_partition():
# Create a client
client = dataplex_v1.MetadataServiceAsyncClient()
# Initialize request argument(s)
request = dataplex_v1.GetPartitionRequest(
name="name_value",
)
# Make the request
response = await client.get_partition(request=request)
# Handle the response
print(response)
Args:
request (Optional[Union[google.cloud.dataplex_v1.types.GetPartitionRequest, dict]]):
The request object. Get metadata partition request.
name (:class:`str`):
Required. The resource name of the partition:
``projects/{project_number}/locations/{location_id}/lakes/{lake_id}/zones/{zone_id}/entities/{entity_id}/partitions/{partition_value_path}``.
The {partition_value_path} segment consists of an
ordered sequence of partition values separated by "/".
All values must be provided.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dataplex_v1.types.Partition:
Represents partition metadata
contained within entity instances.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = metadata_.GetPartitionRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_partition,
default_retry=retries.Retry(
initial=1.0,
maximum=10.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def list_partitions(
self,
request: Optional[Union[metadata_.ListPartitionsRequest, dict]] = None,
*,
parent: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListPartitionsAsyncPager:
r"""List metadata partitions of an entity.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import dataplex_v1
async def sample_list_partitions():
# Create a client
client = dataplex_v1.MetadataServiceAsyncClient()
# Initialize request argument(s)
request = dataplex_v1.ListPartitionsRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_partitions(request=request)
# Handle the response
async for response in page_result:
print(response)
Args:
request (Optional[Union[google.cloud.dataplex_v1.types.ListPartitionsRequest, dict]]):
The request object. List metadata partitions request.
parent (:class:`str`):
Required. The resource name of the parent entity:
``projects/{project_number}/locations/{location_id}/lakes/{lake_id}/zones/{zone_id}/entities/{entity_id}``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dataplex_v1.services.metadata_service.pagers.ListPartitionsAsyncPager:
List metadata partitions response.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = metadata_.ListPartitionsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_partitions,
default_retry=retries.Retry(
initial=1.0,
maximum=10.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListPartitionsAsyncPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
async def list_operations(
self,
request: Optional[operations_pb2.ListOperationsRequest] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> operations_pb2.ListOperationsResponse:
r"""Lists operations that match the specified filter in the request.
Args:
request (:class:`~.operations_pb2.ListOperationsRequest`):
The request object. Request message for
`ListOperations` method.
retry (google.api_core.retry.Retry): Designation of what errors,
if any, should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.operations_pb2.ListOperationsResponse:
Response message for ``ListOperations`` method.
"""
# Create or coerce a protobuf request object.
# The request isn't a proto-plus wrapped type,
# so it must be constructed via keyword expansion.
if isinstance(request, dict):
request = operations_pb2.ListOperationsRequest(**request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method.wrap_method(
self._client._transport.list_operations,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def get_operation(
self,
request: Optional[operations_pb2.GetOperationRequest] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> operations_pb2.Operation:
r"""Gets the latest state of a long-running operation.
Args:
request (:class:`~.operations_pb2.GetOperationRequest`):
The request object. Request message for
`GetOperation` method.
retry (google.api_core.retry.Retry): Designation of what errors,
if any, should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.operations_pb2.Operation:
An ``Operation`` object.
"""
# Create or coerce a protobuf request object.
# The request isn't a proto-plus wrapped type,
# so it must be constructed via keyword expansion.
if isinstance(request, dict):
request = operations_pb2.GetOperationRequest(**request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method.wrap_method(
self._client._transport.get_operation,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def delete_operation(
self,
request: Optional[operations_pb2.DeleteOperationRequest] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes a long-running operation.
This method indicates that the client is no longer interested
in the operation result. It does not cancel the operation.
If the server doesn't support this method, it returns
`google.rpc.Code.UNIMPLEMENTED`.
Args:
request (:class:`~.operations_pb2.DeleteOperationRequest`):
The request object. Request message for
`DeleteOperation` method.
retry (google.api_core.retry.Retry): Designation of what errors,
if any, should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
None
"""
# Create or coerce a protobuf request object.
# The request isn't a proto-plus wrapped type,
# so it must be constructed via keyword expansion.
if isinstance(request, dict):
request = operations_pb2.DeleteOperationRequest(**request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method.wrap_method(
self._client._transport.delete_operation,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
async def cancel_operation(
self,
request: Optional[operations_pb2.CancelOperationRequest] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Starts asynchronous cancellation on a long-running operation.
The server makes a best effort to cancel the operation, but success
is not guaranteed. If the server doesn't support this method, it returns
`google.rpc.Code.UNIMPLEMENTED`.
Args:
request (:class:`~.operations_pb2.CancelOperationRequest`):
The request object. Request message for
`CancelOperation` method.
retry (google.api_core.retry.Retry): Designation of what errors,
if any, should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
None
"""
# Create or coerce a protobuf request object.
# The request isn't a proto-plus wrapped type,
# so it must be constructed via keyword expansion.
if isinstance(request, dict):
request = operations_pb2.CancelOperationRequest(**request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method.wrap_method(
self._client._transport.cancel_operation,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
async def get_location(
self,
request: Optional[locations_pb2.GetLocationRequest] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> locations_pb2.Location:
r"""Gets information about a location.
Args:
request (:class:`~.location_pb2.GetLocationRequest`):
The request object. Request message for
`GetLocation` method.
retry (google.api_core.retry.Retry): Designation of what errors,
if any, should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.location_pb2.Location:
Location object.
"""
# Create or coerce a protobuf request object.
# The request isn't a proto-plus wrapped type,
# so it must be constructed via keyword expansion.
if isinstance(request, dict):
request = locations_pb2.GetLocationRequest(**request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method.wrap_method(
self._client._transport.get_location,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def list_locations(
self,
request: Optional[locations_pb2.ListLocationsRequest] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> locations_pb2.ListLocationsResponse:
r"""Lists information about the supported locations for this service.
Args:
request (:class:`~.location_pb2.ListLocationsRequest`):
The request object. Request message for
`ListLocations` method.
retry (google.api_core.retry.Retry): Designation of what errors,
if any, should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.location_pb2.ListLocationsResponse:
Response message for ``ListLocations`` method.
"""
# Create or coerce a protobuf request object.
# The request isn't a proto-plus wrapped type,
# so it must be constructed via keyword expansion.
if isinstance(request, dict):
request = locations_pb2.ListLocationsRequest(**request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method.wrap_method(
self._client._transport.list_locations,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def __aenter__(self) -> "MetadataServiceAsyncClient":
return self
async def __aexit__(self, exc_type, exc, tb):
await self.transport.close()
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=package_version.__version__
)
__all__ = ("MetadataServiceAsyncClient",)
|
967f3b3d61b08717c2ffdeb62be971cc55628438
|
8af4e173ab3be9b9fc5cf1b61dbb5da80234d5c7
|
/tests/integration/build_invoke/java/test_java_8_al2.py
|
3a6c2dd3e2c64782add41fe537a177abfbea5c63
|
[
"Apache-2.0"
] |
permissive
|
aws/aws-sam-cli-app-templates
|
d464da1665d84eda9f427f682b985538827d41b6
|
88380eb265d58c496ea80685d4a5701e3cfc13d2
|
refs/heads/master
| 2023-09-04T13:03:00.204479
| 2023-08-23T20:43:24
| 2023-08-23T20:43:24
| 211,362,544
| 354
| 230
|
Apache-2.0
| 2023-09-14T15:39:09
| 2019-09-27T16:42:59
|
Python
|
UTF-8
|
Python
| false
| false
| 2,488
|
py
|
test_java_8_al2.py
|
import pytest
from tests.integration.build_invoke.build_invoke_base import BuildInvokeBase
"""
For each template, it will test the following sam commands:
1. sam init
2. sam build --use-container (if self.use_container is False, --use-container will be omitted)
3. (if there are event jsons), for each event json, check `sam local invoke` response is a valid json
"""
class BuildInvoke_java8_al2_cookiecutter_aws_sam_hello_java_gradle(
BuildInvokeBase.HelloWorldWithLocationBuildInvokeBase
):
directory = "java8.al2/hello-gradle"
class BuildInvoke_java8_al2_cookiecutter_aws_sam_hello_java_maven(
BuildInvokeBase.HelloWorldWithLocationBuildInvokeBase
):
directory = "java8.al2/hello-maven"
class BuildInvoke_java8_al2_cookiecutter_aws_sam_eventbridge_hello_java_gradle(
BuildInvokeBase.EventBridgeHelloWorldBuildInvokeBase
):
directory = "java8.al2/event-bridge-gradle"
class BuildInvoke_java8_al2_cookiecutter_aws_sam_eventbridge_hello_java_maven(
BuildInvokeBase.EventBridgeHelloWorldBuildInvokeBase
):
directory = "java8.al2/event-bridge-maven"
@pytest.mark.skip("eventbridge schema app requires credential to pull missing files, skip")
class BuildInvoke_java8_al2_cookiecutter_aws_sam_eventbridge_schema_app_java_gradle(BuildInvokeBase.BuildInvokeBase):
directory = "java8.al2/event-bridge-schema-gradle"
@pytest.mark.skip("eventbridge schema app requires credential to pull missing files, skip")
class BuildInvoke_java8_al2_cookiecutter_aws_sam_eventbridge_schema_app_java_maven(BuildInvokeBase.BuildInvokeBase):
directory = "java8.al2/event-bridge-schema-maven"
class BuildInvoke_java8_al2_cookiecutter_aws_sam_powertools_hello_java_maven(BuildInvokeBase.BuildInvokeBase):
directory = "java8.al2/hello-pt-maven"
class BuildInvoke_java8_al2_cookiecutter_aws_sam_step_functions_sample_app_gradle(BuildInvokeBase.BuildInvokeBase):
directory = "java8.al2/step-func-gradle"
class BuildInvoke_java8_al2_cookiecutter_aws_sam_step_functions_sample_app_maven(BuildInvokeBase.BuildInvokeBase):
directory = "java8.al2/step-func-maven"
class BuildInvoke_image_java8_al2_cookiecutter_aws_sam_hello_java_gradle_lambda_image(
BuildInvokeBase.HelloWorldWithLocationBuildInvokeBase
):
directory = "java8.al2/hello-img-gradle"
class BuildInvoke_image_java8_al2_cookiecutter_aws_sam_hello_java_maven_lambda_image(
BuildInvokeBase.HelloWorldWithLocationBuildInvokeBase
):
directory = "java8.al2/hello-img-maven"
|
ec5ac3ebd9a27ea2ec4ab5ed31d09bb1938bd7eb
|
ca10e5645aa2e8152d6219d31ac77d3ed50096c0
|
/bindings/python/capstone/mos65xx_const.py
|
d9e0546a22b3ed4cba4f95ebd8e76af660725518
|
[
"BSD-3-Clause",
"NCSA"
] |
permissive
|
capstone-engine/capstone
|
fc4f1b14eded800818f2ed64eafaf342e6046f9b
|
f036d2dbb6a9f0d1e0dc9c14b4f44878aeed260a
|
refs/heads/next
| 2023-09-02T14:38:15.356818
| 2023-08-30T03:13:17
| 2023-08-30T03:13:17
| 14,735,429
| 1,390
| 292
|
NOASSERTION
| 2023-09-14T20:47:20
| 2013-11-27T02:32:11
|
C
|
UTF-8
|
Python
| false
| false
| 3,277
|
py
|
mos65xx_const.py
|
from . import CS_OP_INVALID, CS_OP_REG, CS_OP_IMM, CS_OP_FP, CS_OP_PRED, CS_OP_SPECIAL, CS_OP_MEM
# For Capstone Engine. AUTO-GENERATED FILE, DO NOT EDIT [mos65xx_const.py]
MOS65XX_REG_INVALID = 0
MOS65XX_REG_ACC = 1
MOS65XX_REG_X = 2
MOS65XX_REG_Y = 3
MOS65XX_REG_P = 4
MOS65XX_REG_SP = 5
MOS65XX_REG_DP = 6
MOS65XX_REG_B = 7
MOS65XX_REG_K = 8
MOS65XX_REG_ENDING = 9
MOS65XX_AM_NONE = 0
MOS65XX_AM_IMP = 1
MOS65XX_AM_ACC = 2
MOS65XX_AM_IMM = 3
MOS65XX_AM_REL = 4
MOS65XX_AM_INT = 5
MOS65XX_AM_BLOCK = 6
MOS65XX_AM_ZP = 7
MOS65XX_AM_ZP_X = 8
MOS65XX_AM_ZP_Y = 9
MOS65XX_AM_ZP_REL = 10
MOS65XX_AM_ZP_IND = 11
MOS65XX_AM_ZP_X_IND = 12
MOS65XX_AM_ZP_IND_Y = 13
MOS65XX_AM_ZP_IND_LONG = 14
MOS65XX_AM_ZP_IND_LONG_Y = 15
MOS65XX_AM_ABS = 16
MOS65XX_AM_ABS_X = 17
MOS65XX_AM_ABS_Y = 18
MOS65XX_AM_ABS_IND = 19
MOS65XX_AM_ABS_X_IND = 20
MOS65XX_AM_ABS_IND_LONG = 21
MOS65XX_AM_ABS_LONG = 22
MOS65XX_AM_ABS_LONG_X = 23
MOS65XX_AM_SR = 24
MOS65XX_AM_SR_IND_Y = 25
MOS65XX_INS_INVALID = 0
MOS65XX_INS_ADC = 1
MOS65XX_INS_AND = 2
MOS65XX_INS_ASL = 3
MOS65XX_INS_BBR = 4
MOS65XX_INS_BBS = 5
MOS65XX_INS_BCC = 6
MOS65XX_INS_BCS = 7
MOS65XX_INS_BEQ = 8
MOS65XX_INS_BIT = 9
MOS65XX_INS_BMI = 10
MOS65XX_INS_BNE = 11
MOS65XX_INS_BPL = 12
MOS65XX_INS_BRA = 13
MOS65XX_INS_BRK = 14
MOS65XX_INS_BRL = 15
MOS65XX_INS_BVC = 16
MOS65XX_INS_BVS = 17
MOS65XX_INS_CLC = 18
MOS65XX_INS_CLD = 19
MOS65XX_INS_CLI = 20
MOS65XX_INS_CLV = 21
MOS65XX_INS_CMP = 22
MOS65XX_INS_COP = 23
MOS65XX_INS_CPX = 24
MOS65XX_INS_CPY = 25
MOS65XX_INS_DEC = 26
MOS65XX_INS_DEX = 27
MOS65XX_INS_DEY = 28
MOS65XX_INS_EOR = 29
MOS65XX_INS_INC = 30
MOS65XX_INS_INX = 31
MOS65XX_INS_INY = 32
MOS65XX_INS_JML = 33
MOS65XX_INS_JMP = 34
MOS65XX_INS_JSL = 35
MOS65XX_INS_JSR = 36
MOS65XX_INS_LDA = 37
MOS65XX_INS_LDX = 38
MOS65XX_INS_LDY = 39
MOS65XX_INS_LSR = 40
MOS65XX_INS_MVN = 41
MOS65XX_INS_MVP = 42
MOS65XX_INS_NOP = 43
MOS65XX_INS_ORA = 44
MOS65XX_INS_PEA = 45
MOS65XX_INS_PEI = 46
MOS65XX_INS_PER = 47
MOS65XX_INS_PHA = 48
MOS65XX_INS_PHB = 49
MOS65XX_INS_PHD = 50
MOS65XX_INS_PHK = 51
MOS65XX_INS_PHP = 52
MOS65XX_INS_PHX = 53
MOS65XX_INS_PHY = 54
MOS65XX_INS_PLA = 55
MOS65XX_INS_PLB = 56
MOS65XX_INS_PLD = 57
MOS65XX_INS_PLP = 58
MOS65XX_INS_PLX = 59
MOS65XX_INS_PLY = 60
MOS65XX_INS_REP = 61
MOS65XX_INS_RMB = 62
MOS65XX_INS_ROL = 63
MOS65XX_INS_ROR = 64
MOS65XX_INS_RTI = 65
MOS65XX_INS_RTL = 66
MOS65XX_INS_RTS = 67
MOS65XX_INS_SBC = 68
MOS65XX_INS_SEC = 69
MOS65XX_INS_SED = 70
MOS65XX_INS_SEI = 71
MOS65XX_INS_SEP = 72
MOS65XX_INS_SMB = 73
MOS65XX_INS_STA = 74
MOS65XX_INS_STP = 75
MOS65XX_INS_STX = 76
MOS65XX_INS_STY = 77
MOS65XX_INS_STZ = 78
MOS65XX_INS_TAX = 79
MOS65XX_INS_TAY = 80
MOS65XX_INS_TCD = 81
MOS65XX_INS_TCS = 82
MOS65XX_INS_TDC = 83
MOS65XX_INS_TRB = 84
MOS65XX_INS_TSB = 85
MOS65XX_INS_TSC = 86
MOS65XX_INS_TSX = 87
MOS65XX_INS_TXA = 88
MOS65XX_INS_TXS = 89
MOS65XX_INS_TXY = 90
MOS65XX_INS_TYA = 91
MOS65XX_INS_TYX = 92
MOS65XX_INS_WAI = 93
MOS65XX_INS_WDM = 94
MOS65XX_INS_XBA = 95
MOS65XX_INS_XCE = 96
MOS65XX_INS_ENDING = 97
MOS65XX_GRP_INVALID = 0
MOS65XX_GRP_JUMP = 1
MOS65XX_GRP_CALL = 2
MOS65XX_GRP_RET = 3
MOS65XX_GRP_INT = 4
MOS65XX_GRP_IRET = 5
MOS65XX_GRP_BRANCH_RELATIVE = 6
MOS65XX_GRP_ENDING = 7
MOS65XX_OP_INVALID = 0
MOS65XX_OP_REG = 1
MOS65XX_OP_IMM = 2
MOS65XX_OP_MEM = 3
|
e1af7e2917bc76b1e811b895ba95bd74276c9bb3
|
73dbe07000651827e2937d728d0c5acf903932e2
|
/farm/data_handler/utils.py
|
45406c7252ef80bc52c37bdf48ce03e02294e027
|
[
"Apache-2.0"
] |
permissive
|
deepset-ai/FARM
|
96a5c7a2b93dcf60f4bc208a6706be0cb07bcd43
|
5919538f721c7974ea951b322d30a3c0e84a1bc2
|
refs/heads/master
| 2023-08-21T23:50:50.414602
| 2022-08-31T09:45:24
| 2022-08-31T09:45:24
| 197,409,619
| 1,765
| 283
|
Apache-2.0
| 2023-08-12T04:20:09
| 2019-07-17T14:51:12
|
Python
|
UTF-8
|
Python
| false
| false
| 31,083
|
py
|
utils.py
|
import hashlib
import json
import logging
import os
import random
import tarfile
import tempfile
import string
import uuid
from contextlib import ExitStack
from itertools import islice
from pathlib import Path
import pandas as pd
from requests import get
from tqdm import tqdm
from typing import List
from farm.file_utils import http_get
from farm.modeling.tokenization import tokenize_with_metadata
logger = logging.getLogger(__name__)
DOWNSTREAM_TASK_MAP = {
"gnad": "https://s3.eu-central-1.amazonaws.com/deepset.ai-farm-downstream/gnad.tar.gz",
"germeval14": "https://s3.eu-central-1.amazonaws.com/deepset.ai-farm-downstream/germeval14.tar.gz",
# only has train.tsv and test.tsv dataset - no dev.tsv
"germeval18": "https://s3.eu-central-1.amazonaws.com/deepset.ai-farm-downstream/germeval18.tar.gz",
"squad20": "https://s3.eu-central-1.amazonaws.com/deepset.ai-farm-downstream/squad20.tar.gz",
"covidqa": "https://s3.eu-central-1.amazonaws.com/deepset.ai-farm-downstream/covidqa.tar.gz",
"conll03detrain": "https://raw.githubusercontent.com/MaviccPRP/ger_ner_evals/master/corpora/conll2003/deu.train",
"conll03dedev": "https://raw.githubusercontent.com/MaviccPRP/ger_ner_evals/master/corpora/conll2003/deu.testa", #https://www.clips.uantwerpen.be/conll2003/ner/000README says testa is dev data
"conll03detest": "https://raw.githubusercontent.com/MaviccPRP/ger_ner_evals/master/corpora/conll2003/deu.testb",
"conll03entrain": "https://raw.githubusercontent.com/synalp/NER/master/corpus/CoNLL-2003/eng.train",
"conll03endev": "https://raw.githubusercontent.com/synalp/NER/master/corpus/CoNLL-2003/eng.testa",
"conll03entest": "https://raw.githubusercontent.com/synalp/NER/master/corpus/CoNLL-2003/eng.testb",
"cord_19": "https://s3.eu-central-1.amazonaws.com/deepset.ai-farm-downstream/cord_19.tar.gz",
"lm_finetune_nips": "https://s3.eu-central-1.amazonaws.com/deepset.ai-farm-downstream/lm_finetune_nips.tar.gz",
"toxic-comments": "https://s3.eu-central-1.amazonaws.com/deepset.ai-farm-downstream/toxic-comments.tar.gz",
'cola': "https://s3.eu-central-1.amazonaws.com/deepset.ai-farm-downstream/cola.tar.gz",
"asnq_binary": "https://s3.eu-central-1.amazonaws.com/deepset.ai-farm-downstream/asnq_binary.tar.gz",
"germeval17": "https://s3.eu-central-1.amazonaws.com/deepset.ai-farm-downstream/germeval17.tar.gz",
"natural_questions": "https://s3.eu-central-1.amazonaws.com/deepset.ai-farm-downstream/natural_questions.tar.gz",
}
def read_tsv(filename, rename_columns, quotechar='"', delimiter="\t", skiprows=None, header=0, proxies=None, max_samples=None):
"""Reads a tab separated value file. Tries to download the data if filename is not found"""
# get remote dataset if needed
if not (os.path.exists(filename)):
logger.info(f" Couldn't find {filename} locally. Trying to download ...")
_download_extract_downstream_data(filename, proxies=proxies)
# read file into df - but only read those cols we need
columns_needed = list(rename_columns.keys())
df = pd.read_csv(
filename,
sep=delimiter,
encoding="utf-8",
quotechar=quotechar,
dtype=str,
skiprows=skiprows,
header=header,
usecols=columns_needed,
)
if max_samples:
df = df.sample(max_samples)
# let's rename our target columns to the default names FARM expects:
# "text": contains the text
# "text_classification_label": contains a label for text classification
df.rename(columns=rename_columns, inplace=True)
df.fillna("", inplace=True)
# convert df to one dict per row
raw_dict = df.to_dict(orient="records")
return raw_dict
def read_tsv_sentence_pair(filename, rename_columns, delimiter="\t", skiprows=None, header=0, proxies=None, max_samples=None):
"""Reads a tab separated value file. Tries to download the data if filename is not found"""
# get remote dataset if needed
if not (os.path.exists(filename)):
logger.info(f" Couldn't find {filename} locally. Trying to download ...")
_download_extract_downstream_data(filename, proxies=proxies)
# TODO quote_char was causing trouble for the asnq dataset so it has been removed - see if there's a better solution
df = pd.read_csv(
filename,
sep=delimiter,
encoding="utf-8",
dtype=str,
skiprows=skiprows,
header=header
)
if max_samples:
df = df.sample(max_samples)
# let's rename our target columns to the default names FARM expects:
# "text": contains the text
# "text_classification_label": contains a label for text classification
columns = ["text"] + ["text_b"] + list(rename_columns.keys())
df = df[columns]
for source_column, label_name in rename_columns.items():
df[label_name] = df[source_column].fillna("")
df.drop(columns=[source_column], inplace=True)
# convert df to one dict per row
raw_dict = df.to_dict(orient="records")
return raw_dict
def read_jsonl(file, proxies=None):
# get remote dataset if needed
if not (os.path.exists(file)):
logger.info(f" Couldn't find {file} locally. Trying to download ...")
_download_extract_downstream_data(file, proxies=proxies)
dicts = [json.loads(l) for l in open(file, encoding="utf-8")]
return dicts
def read_ner_file(filename, sep="\t", proxies=None):
"""
read file
return format :
[ ['EU', 'B-ORG'], ['rejects', 'O'], ['German', 'B-MISC'], ['call', 'O'], ['to', 'O'], ['boycott', 'O'], ['British', 'B-MISC'], ['lamb', 'O'], ['.', 'O'] ]
"""
# checks for correct separator
if "conll03-de" in str(filename):
if sep != " ":
logger.error(f"Separator {sep} for dataset German CONLL03 does not match the requirements. Setting seperator to whitespace")
sep = " "
if "germeval14" in str(filename):
if sep != "\t":
logger.error(f"Separator {sep} for dataset GermEval14 de does not match the requirements. Setting seperator to tab")
sep = "\t"
if not (os.path.exists(filename)):
logger.info(f" Couldn't find {filename} locally. Trying to download ...")
_download_extract_downstream_data(filename, proxies)
if "conll03-de" in str(filename):
f = open(filename, encoding='cp1252')
else:
f = open(filename, encoding='utf-8')
data = []
sentence = []
label = []
for line in f:
if line.startswith("#"):
continue
if len(line) == 0 or "-DOCSTART-" in line or line[0] == "\n":
if len(sentence) > 0:
if "conll03" in str(filename):
_convertIOB1_to_IOB2(label)
if "germeval14" in str(filename):
label = _convert_germeval14_labels(label)
data.append({"text": " ".join(sentence), "ner_label": label})
sentence = []
label = []
continue
splits = line.split(sep)
# adjusting to data format in Germeval14
# Germeval14 has two levels of annotation. E.g. "Univerität Berlin" is both ORG and LOC. We only take the first level.
if "germeval14" in str(filename):
sentence.append(splits[1])
label.append(splits[-2])
else:
sentence.append(splits[0])
label.append(splits[-1][:-1])
# handling end of file, adding the last sentence to data
if len(sentence) > 0:
if(label[-1] == ""):
logger.error(f"The last NER label: '{splits[-1]}' in your dataset might have been converted incorrectly. Please insert a newline at the end of the file.")
label[-1] = "O"
if "conll03-de" in str(filename):
_convertIOB1_to_IOB2(label)
if "germeval14" in str(filename):
label = _convert_germeval14_labels(label)
data.append({"text": " ".join(sentence), "ner_label": label})
return data
def read_dpr_json(file, max_samples=None, proxies=None, num_hard_negatives=1, num_positives=1, shuffle_negatives=True, shuffle_positives=False):
"""
Reads a Dense Passage Retrieval (DPR) data file in json format and returns a list of dictionaries.
:param file: filename of DPR data in json format
Returns:
list of dictionaries: List[dict]
each dictionary: {
"query": str -> query_text
"passages": List[dictionaries] -> [{"text": document_text, "title": xxx, "label": "positive", "external_id": abb123},
{"text": document_text, "title": xxx, "label": "hard_negative", "external_id": abb134},
...]
}
example:
["query": 'who sings does he love me with reba'
"passages" : [{'title': 'Does He Love You',
'text': 'Does He Love You "Does He Love You" is a song written by Sandy Knox and Billy Stritch, and recorded as a duet by American country music artists Reba McEntire and Linda Davis. It was released in August 1993 as the first single from Reba\'s album "Greatest Hits Volume Two". It is one of country music\'s several songs about a love triangle. "Does He Love You" was written in 1982 by Billy Stritch. He recorded it with a trio in which he performed at the time, because he wanted a song that could be sung by the other two members',
'label': 'positive',
'external_id': '11828866'},
{'title': 'When the Nightingale Sings',
'text': "When the Nightingale Sings When The Nightingale Sings is a Middle English poem, author unknown, recorded in the British Library's Harley 2253 manuscript, verse 25. It is a love poem, extolling the beauty and lost love of an unknown maiden. When þe nyhtegale singes þe wodes waxen grene.<br> Lef ant gras ant blosme springes in aueryl y wene,<br> Ant love is to myn herte gon wiþ one spere so kene<br> Nyht ant day my blod hit drynkes myn herte deþ me tene. Ich have loved al þis er þat y may love namore,<br> Ich have siked moni syk lemmon for",
'label': 'hard_negative',
'external_id': '10891637'}]
]
"""
# get remote dataset if needed
if not (os.path.exists(file)):
logger.info(f" Couldn't find {file} locally. Trying to download ...")
_download_extract_downstream_data(file, proxies=proxies)
if file.suffix.lower() == ".jsonl":
dicts = []
with open(file, encoding='utf-8') as f:
for line in f:
dicts.append(json.loads(line))
else:
dicts = json.load(open(file, encoding='utf-8'))
if max_samples:
dicts = random.sample(dicts, min(max_samples, len(dicts)))
# convert DPR dictionary to standard dictionary
query_json_keys = ["question", "questions", "query"]
positive_context_json_keys = ["positive_contexts", "positive_ctxs", "positive_context", "positive_ctx"]
hard_negative_json_keys = ["hard_negative_contexts", "hard_negative_ctxs", "hard_negative_context", "hard_negative_ctx"]
standard_dicts = []
for dict in dicts:
sample = {}
passages = []
for key, val in dict.items():
if key in query_json_keys:
sample["query"] = val
elif key in positive_context_json_keys:
if shuffle_positives:
random.shuffle(val)
for passage in val[:num_positives]:
passages.append({
"title": passage["title"],
"text": passage["text"],
"label": "positive",
"external_id": passage.get("passage_id", uuid.uuid4().hex.upper()[0:8])
})
elif key in hard_negative_json_keys:
if shuffle_negatives:
random.shuffle(val)
for passage in val[:num_hard_negatives]:
passages.append({
"title": passage["title"],
"text": passage["text"],
"label": "hard_negative",
"external_id": passage.get("passage_id", uuid.uuid4().hex.upper()[0:8])
})
sample["passages"] = passages
standard_dicts.append(sample)
return standard_dicts
def _convert_germeval14_labels(tags: List[str]):
newtags = []
for tag in tags:
tag = tag.replace("part","")
tag = tag.replace("deriv","")
newtags.append(tag)
return newtags
def _convertIOB1_to_IOB2(tags: List[str]):
"""
script taken from: https://gist.github.com/allanj/b9bd448dc9b70d71eb7c2b6dd33fe4ef
IOB1: O I I B I
IOB2: O B I B I
Check that tags have a valid IOB format.
Tags in IOB1 format are converted to IOB2.
"""
for i, tag in enumerate(tags):
if tag == 'O':
continue
split = tag.split('-')
if len(split) != 2 or split[0] not in ['I', 'B']:
return False
if split[0] == 'B':
continue
elif i == 0 or tags[i - 1] == 'O': # conversion IOB1 to IOB2
tags[i] = 'B' + tag[1:]
elif tags[i - 1][1:] == tag[1:]:
continue
else: # conversion IOB1 to IOB2
tags[i] = 'B' + tag[1:]
return True
def read_squad_file(filename, proxies=None):
"""Read a SQuAD json file"""
if not (os.path.exists(filename)):
logger.info(f" Couldn't find {filename} locally. Trying to download ...")
_download_extract_downstream_data(filename, proxies)
with open(filename, "r", encoding="utf-8") as reader:
input_data = json.load(reader)["data"]
return input_data
def write_squad_predictions(predictions, out_filename, predictions_filename=None):
predictions_json = {}
for x in predictions:
for p in x["predictions"]:
if p["answers"][0]["answer"] is not None:
predictions_json[p["question_id"]] = p["answers"][0]["answer"]
else:
predictions_json[p["question_id"]] = "" #convert No answer = None to format understood by the SQuAD eval script
if predictions_filename:
dev_labels = {}
temp = json.load(open(predictions_filename, "r"))
for d in temp["data"]:
for p in d["paragraphs"]:
for q in p["qas"]:
if q.get("is_impossible",False):
dev_labels[q["id"]] = "is_impossible"
else:
dev_labels[q["id"]] = q["answers"][0]["text"]
not_included = set(list(dev_labels.keys())) - set(list(predictions_json.keys()))
if len(not_included) > 0:
logger.info(f"There were missing predicitons for question ids: {list(not_included)}")
for x in not_included:
predictions_json[x] = ""
# os.makedirs("model_output", exist_ok=True)
# filepath = Path("model_output") / out_filename
json.dump(predictions_json, open(out_filename, "w"))
logger.info(f"Written Squad predictions to: {out_filename}")
def _get_md5checksum(fname):
# solution from stackoverflow: https://stackoverflow.com/a/3431838
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def _download_extract_downstream_data(input_file, proxies=None):
# download archive to temp dir and extract to correct position
full_path = Path(os.path.realpath(input_file))
directory = full_path.parent
taskname = directory.stem
datadir = directory.parent
logger.info(
"downloading and extracting file {} to dir {}".format(taskname, datadir)
)
if "conll03-" in taskname:
# conll03 is copyrighted, but luckily somebody put it on github. Kudos!
if not os.path.exists(directory):
os.makedirs(directory)
for dataset in ["train", "dev", "test"]:
if "de" in taskname:
_conll03get(dataset, directory, "de")
elif "en" in taskname:
_conll03get(dataset, directory, "en")
else:
logger.error("Cannot download {}. Unknown data source.".format(taskname))
elif taskname not in DOWNSTREAM_TASK_MAP:
logger.error("Cannot download {}. Unknown data source.".format(taskname))
else:
if os.name == "nt": # make use of NamedTemporaryFile compatible with Windows
delete_tmp_file = False
else:
delete_tmp_file = True
with tempfile.NamedTemporaryFile(delete=delete_tmp_file) as temp_file:
http_get(DOWNSTREAM_TASK_MAP[taskname], temp_file, proxies=proxies)
temp_file.flush()
temp_file.seek(0) # making tempfile accessible
# checking files for correctness with md5sum.
if("germeval14" in taskname):
if "2c9d5337d7a25b9a4bf6f5672dd091bc" != _get_md5checksum(temp_file.name):
logger.error(f"Someone has changed the file for {taskname}. Please make sure the correct file is used and update the md5sum in farm/data_handler/utils.py")
elif "germeval18" in taskname:
if "23244fa042dcc39e844635285c455205" != _get_md5checksum(temp_file.name):
logger.error(f"Someone has changed the file for {taskname}. Please make sure the correct file is used and update the md5sum in farm/data_handler/utils.py")
elif "gnad" in taskname:
if "ef62fe3f59c1ad54cf0271d8532b8f22" != _get_md5checksum(temp_file.name):
logger.error(f"Someone has changed the file for {taskname}. Please make sure the correct file is used and update the md5sum in farm/data_handler/utils.py")
elif "germeval17" in taskname:
if "f1bf67247dcfe7c3c919b7b20b3f736e" != _get_md5checksum(temp_file.name):
logger.error(f"Someone has changed the file for {taskname}. Please make sure the correct file is used and update the md5sum in farm/data_handler/utils.py")
tfile = tarfile.open(temp_file.name)
tfile.extractall(datadir)
# temp_file gets deleted here
def _conll03get(dataset, directory, language):
# open in binary mode
with open(directory / f"{dataset}.txt", "wb") as file:
# get request
response = get(DOWNSTREAM_TASK_MAP[f"conll03{language}{dataset}"])
# write to file
file.write(response.content)
# checking files for correctness with md5sum.
if f"conll03{language}{dataset}" == "conll03detrain":
if "ae4be68b11dc94e0001568a9095eb391" != _get_md5checksum(str(directory / f"{dataset}.txt")):
logger.error(
f"Someone has changed the file for conll03detrain. This data was collected from an external github repository.\n"
f"Please make sure the correct file is used and update the md5sum in farm/data_handler/utils.py")
elif f"conll03{language}{dataset}" == "conll03detest":
if "b8514f44366feae8f317e767cf425f28" != _get_md5checksum(str(directory / f"{dataset}.txt")):
logger.error(
f"Someone has changed the file for conll03detest. This data was collected from an external github repository.\n"
f"Please make sure the correct file is used and update the md5sum in farm/data_handler/utils.py")
elif f"conll03{language}{dataset}" == "conll03entrain":
if "11a942ce9db6cc64270372825e964d26" != _get_md5checksum(str(directory / f"{dataset}.txt")):
logger.error(
f"Someone has changed the file for conll03entrain. This data was collected from an external github repository.\n"
f"Please make sure the correct file is used and update the md5sum in farm/data_handler/utils.py")
def read_docs_from_txt(filename, delimiter="", encoding="utf-8", max_docs=None, proxies=None, disable_tqdm=True):
"""Reads a text file with one sentence per line and a delimiter between docs (default: empty lines) ."""
if not (os.path.exists(filename)):
_download_extract_downstream_data(filename, proxies)
doc_count = 0
doc = []
prev_doc = None
corpus_lines = 0
with open(filename, "r", encoding=encoding) as f:
for line_num, line in enumerate(tqdm(f, desc="Loading Dataset", total=corpus_lines, disable=disable_tqdm)):
line = line.strip()
if line == delimiter:
if len(doc) > 0:
yield {"doc": doc}
doc_count += 1
prev_doc = doc
doc = []
if max_docs:
if doc_count >= max_docs:
logger.info(f"Reached number of max_docs ({max_docs}). Skipping rest of file ...")
break
else:
logger.warning(f"Found empty document in '{filename}' (line {line_num}). "
f"Make sure that you comply with the format: "
f"One sentence per line and exactly *one* empty line between docs. "
f"You might have multiple subsequent empty lines.")
else:
doc.append(line)
# if last row in file is not empty, we add the last parsed doc manually to all_docs
if len(doc) > 0:
if doc_count > 0:
if doc != prev_doc:
yield {"doc": doc}
doc_count += 1
else:
yield {"doc": doc}
doc_count += 1
if doc_count < 2:
raise ValueError(f"Found only {doc_count} docs in {filename}). You need at least 2! \n"
f"Make sure that you comply with the format: \n"
f"-> One sentence per line and exactly *one* empty line between docs. \n"
f"You might have a single block of text without empty lines inbetween.")
def pad(seq, max_seq_len, pad_token, pad_on_left=False):
ret = seq
n_required_pad = max_seq_len - len(seq)
for _ in range(n_required_pad):
if pad_on_left:
ret.insert(0, pad_token)
else:
ret.append(pad_token)
return ret
def expand_labels(labels_word, initial_mask, non_initial_token):
# For inference mode
if not labels_word:
return None
labels_token = []
word_index = 0
for im in initial_mask:
if im:
# i.e. if token is word initial
labels_token.append(labels_word[word_index])
word_index += 1
else:
# i.e. token is not the first in the word
labels_token.append(non_initial_token)
assert len(labels_token) == len(initial_mask)
return labels_token
def get_sentence_pair(doc, all_baskets, idx, prob_next_sentence=0.5):
"""
Get one sample from corpus consisting of two sentences. With prob. 50% these are two subsequent sentences
from one doc. With 50% the second sentence will be a random one from another doc.
:param doc: The current document
:param all_baskets: SampleBaskets containing multiple other docs from which we can sample the second sentence
if we need a random one.
:param idx: int, index of sample.
:return: (str, str, int), sentence 1, sentence 2, isNextSentence Label
"""
sent_1, sent_2 = doc[idx], doc[idx + 1]
if random.random() > prob_next_sentence:
label = True
else:
sent_2 = _get_random_sentence(all_baskets, forbidden_doc=doc)
label = False
assert len(sent_1) > 0
assert len(sent_2) > 0
return sent_1, sent_2, label
def _get_random_sentence(all_baskets, forbidden_doc):
"""
Get random line from another document for nextSentence task.
:return: str, content of one line
"""
# Similar to original BERT tf repo: This outer loop should rarely go for more than one iteration for large
# corpora. However, just to be careful, we try to make sure that
# the random document is not the same as the document we're processing.
sentence = None
for _ in range(100):
rand_doc_idx = random.randrange(len(all_baskets))
rand_doc = all_baskets[rand_doc_idx]
# check if our picked random doc is really different to our initial doc
if rand_doc != forbidden_doc:
rand_sent_idx = random.randrange(len(rand_doc))
sentence = rand_doc[rand_sent_idx]
break
if sentence is None:
raise Exception("Failed to pick out a suitable random substitute for next sentence")
return sentence
# return sequence_a, sequence_b, sample_in_clear_text, num_unused_segments
def _get_random_doc(all_baskets, forbidden_doc):
random_doc = None
for _ in range(100):
rand_doc_idx = random.randrange(len(all_baskets))
random_doc = all_baskets[rand_doc_idx]["doc"]
# check if random doc is different from initial doc
if random_doc != forbidden_doc:
break
if random_doc is None:
raise Exception("Failed to pick out a suitable random substitute for next sequence")
return random_doc
def join_sentences(sequence):
"""
Takes a list of subsequent, tokenized sentences and puts them together into one sequence.
:param sequence: List of tokenized sentences.
:type sequence: [dict]
:return: Tokenized sequence. (Dict with keys 'tokens', 'offsets' and 'start_of_word')
"""
sequence_joined = {
"tokens" : [],
"offsets" : [],
"start_of_word" : []
}
last_offset = 0
for sentence in sequence:
sequence_joined["tokens"].extend(sentence["tokens"])
sequence_joined["start_of_word"].extend(sentence["start_of_word"])
# get offsets right
current_offsets = [offset + last_offset for offset in sentence["offsets"]]
sequence_joined["offsets"].extend(current_offsets)
last_offset += sentence["offsets"][-1] + 2
return sequence_joined
def is_json(x):
if issubclass(type(x), Path):
return True
try:
json.dumps(x)
return True
except:
return False
def grouper(iterable, n, worker_id=0, total_workers=1):
"""
Split an iterable into a list of n-sized chunks. Each element in the chunk is a tuple of (index_num, element).
Example:
>>> list(grouper('ABCDEFG', 3))
[[(0, 'A'), (1, 'B'), (2, 'C')], [(3, 'D'), (4, 'E'), (5, 'F')], [(6, 'G')]]
Use with the StreamingDataSilo
When StreamingDataSilo is used with multiple PyTorch DataLoader workers, the generator
yielding dicts(that gets converted to datasets) is replicated across the workers.
To avoid duplicates, we split the dicts across workers by creating a new generator for
each worker using this method.
Input --> [dictA, dictB, dictC, dictD, dictE, ...] with total worker=3 and n=2
Output for worker 1: [(dictA, dictB), (dictG, dictH), ...]
Output for worker 2: [(dictC, dictD), (dictI, dictJ), ...]
Output for worker 3: [(dictE, dictF), (dictK, dictL), ...]
This method also adds an index number to every dict yielded.
:param iterable: a generator object that yields dicts
:type iterable: generator
:param n: the dicts are grouped in n-sized chunks that gets converted to datasets
:type n: int
:param worker_id: the worker_id for the PyTorch DataLoader
:type worker_id: int
:param total_workers: total number of workers for the PyTorch DataLoader
:type total_workers: int
"""
# TODO make me comprehensible :)
def get_iter_start_pos(gen):
start_pos = worker_id * n
for i in gen:
if start_pos:
start_pos -= 1
continue
yield i
def filter_elements_per_worker(gen):
x = n
y = (total_workers - 1) * n
for i in gen:
if x:
yield i
x -= 1
else:
if y != 1:
y -= 1
continue
else:
x = n
y = (total_workers - 1) * n
iterable = iter(enumerate(iterable))
iterable = get_iter_start_pos(iterable)
if total_workers > 1:
iterable = filter_elements_per_worker(iterable)
return iter(lambda: list(islice(iterable, n)), [])
def split_file(filepath, output_dir, docs_per_file=1_000, delimiter="", encoding="utf-8"):
total_lines = sum(1 for line in open(filepath, encoding=encoding))
output_file_number = 1
doc_count = 0
lines_to_write = []
with ExitStack() as stack:
input_file = stack.enter_context(open(filepath, 'r', encoding=encoding))
for line_num, line in enumerate(tqdm(input_file, desc="Splitting file ...", total=total_lines)):
lines_to_write.append(line)
if line.strip() == delimiter:
doc_count += 1
if doc_count % docs_per_file == 0:
filename = output_dir / f"part_{output_file_number}"
os.makedirs(os.path.dirname(filename), exist_ok=True)
write_file = stack.enter_context(open(filename, 'w+', encoding=encoding, buffering=10 * 1024 * 1024))
write_file.writelines(lines_to_write)
write_file.close()
output_file_number += 1
lines_to_write = []
if lines_to_write:
filename = output_dir / f"part_{output_file_number}"
os.makedirs(os.path.dirname(filename), exist_ok=True)
write_file = stack.enter_context(open(filename, 'w+', encoding=encoding, buffering=10 * 1024 * 1024))
write_file.writelines(lines_to_write)
write_file.close()
logger.info(f"The input file {filepath} is split in {output_file_number} parts at {output_dir}.")
def generate_tok_to_ch_map(text):
""" Generates a mapping from token to character index when a string text is split using .split()
TODO e.g."""
map = [0]
follows_whitespace = False
for i, ch in enumerate(text):
if follows_whitespace:
if ch not in string.whitespace:
map.append(i)
follows_whitespace = False
else:
if ch in string.whitespace:
follows_whitespace = True
return map
def split_with_metadata(text):
"""" Splits a string text by whitespace and also returns indexes which is a mapping from token index
to character index"""
split_text = text.split()
indexes = generate_tok_to_ch_map(text)
assert len(split_text) == len(indexes)
return split_text, indexes
|
ba07ea021d3ac7d7d083d16f12854b81bbf4935a
|
022a1c6faec6face5d8dba35e67389db992b073b
|
/xrspatial/tests/test_terrain.py
|
00296fd11454b282479972c5e206005188145dcc
|
[
"MIT"
] |
permissive
|
makepath/xarray-spatial
|
698b34f58478eacd8e4f35d57193db4e7c71260f
|
27ab0c80b71faf706f75e00435dc071ba046224a
|
refs/heads/master
| 2023-07-19T19:20:30.614454
| 2023-07-10T09:09:54
| 2023-07-10T09:09:54
| 239,188,894
| 741
| 89
|
MIT
| 2023-07-10T09:09:56
| 2020-02-08T19:02:33
|
Python
|
UTF-8
|
Python
| false
| false
| 1,408
|
py
|
test_terrain.py
|
import dask.array as da
import numpy as np
import xarray as xr
from xrspatial import generate_terrain
from xrspatial.tests.general_checks import cuda_and_cupy_available
from xrspatial.utils import has_cuda_and_cupy
def create_test_arr(backend='numpy'):
W = 50
H = 50
data = np.zeros((H, W), dtype=np.float32)
raster = xr.DataArray(data, dims=['y', 'x'])
if has_cuda_and_cupy() and 'cupy' in backend:
import cupy
raster.data = cupy.asarray(raster.data)
if 'dask' in backend:
raster.data = da.from_array(raster.data, chunks=(10, 10))
return raster
def test_terrain_cpu():
# vanilla numpy version
data_numpy = create_test_arr()
terrain_numpy = generate_terrain(data_numpy)
# dask
data_dask = create_test_arr(backend='dask')
terrain_dask = generate_terrain(data_dask)
assert isinstance(terrain_dask.data, da.Array)
terrain_dask = terrain_dask.compute()
np.testing.assert_allclose(terrain_numpy.data, terrain_dask.data, rtol=1e-05, atol=1e-07)
@cuda_and_cupy_available
def test_terrain_gpu():
# vanilla numpy version
data_numpy = create_test_arr()
terrain_numpy = generate_terrain(data_numpy)
# cupy
data_cupy = create_test_arr(backend='cupy')
terrain_cupy = generate_terrain(data_cupy)
np.testing.assert_allclose(terrain_numpy.data, terrain_cupy.data.get(), rtol=1e-05, atol=1e-07)
|
8c26f45630b22353f33078fe7c8f00443e05f691
|
e65a4dbfbfb0e54e59787ba7741efee12f7687f3
|
/devel/py-ejson/files/patch-setup.py
|
7ff4d65860c97289ed9ea2b85d451531e24ebca2
|
[
"BSD-2-Clause"
] |
permissive
|
freebsd/freebsd-ports
|
86f2e89d43913412c4f6b2be3e255bc0945eac12
|
605a2983f245ac63f5420e023e7dce56898ad801
|
refs/heads/main
| 2023-08-30T21:46:28.720924
| 2023-08-30T19:33:44
| 2023-08-30T19:33:44
| 1,803,961
| 916
| 918
|
NOASSERTION
| 2023-09-08T04:06:26
| 2011-05-26T11:15:35
| null |
UTF-8
|
Python
| false
| false
| 691
|
py
|
patch-setup.py
|
--- setup.py.orig 2013-11-13 15:40:51 UTC
+++ setup.py
@@ -69,7 +69,7 @@ if __name__ == '__main__':
packages=find_packages(exclude=['*tests*']),
install_requires=install_requires,
dependency_links=dependency_links,
- classifiers=(
+ classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',
@@ -78,5 +78,5 @@ if __name__ == '__main__':
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
- )
+ ]
)
|
53eeea8a34417a5fc271cf34ead7fcb671bf1e4a
|
3a6a211ea0d32405497fbd6486c490bb147e25f9
|
/dashboard/bq_export/bq_export/export_options.py
|
70124d327cb87443028c7c5acd0cad2761f545f5
|
[
"BSD-3-Clause"
] |
permissive
|
catapult-project/catapult
|
e2cbdd5eb89f3b1492fc8752494e62ea1df4bae0
|
53102de187a48ac2cfc241fef54dcbc29c453a8e
|
refs/heads/main
| 2021-05-25T07:37:22.832505
| 2021-05-24T08:01:49
| 2021-05-25T06:07:38
| 33,947,548
| 2,032
| 742
|
BSD-3-Clause
| 2022-08-26T16:01:18
| 2015-04-14T17:49:05
|
HTML
|
UTF-8
|
Python
| false
| false
| 2,785
|
py
|
export_options.py
|
# Copyright (c) 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import pytz
from apache_beam.options.pipeline_options import PipelineOptions
def _YesterdayUTC():
return (datetime.datetime.utcnow() -
datetime.timedelta(days=1)).strftime('%Y%m%d')
class BqExportOptions(PipelineOptions):
@classmethod
def _add_argparse_args(cls, parser): # pylint: disable=invalid-name
parser.add_value_provider_argument(
'--end_date',
help=('Last day of data to export in YYYYMMDD format, or special value '
'"yesterday". Default is yesterday. Timezone is always UTC.'),
default="yesterday")
parser.add_value_provider_argument(
'--num_days', help='Number of days data to export', type=int, default=1)
parser.add_argument(
'--table_suffix',
help='Suffix to add to table name (for dev purposes, e.g. "_test").',
default='')
parser.add_value_provider_argument(
'--dataset',
help='BigQuery dataset name. Overrideable for testing/dev purposes.',
default='chromeperf_dashboard_data')
def GetTimeRangeProvider(self):
"""Return an object with .Get() method that returns (start, end) tuple.
In other words, returns the time range specified by --end_date and
--num_days as a pair of datetime.datetime objects.
"""
return _TimeRangeProvider(self.end_date, self.num_days)
class _TimeRangeProvider(object):
"""A ValueProvider-like based on the end_date and num_days ValueProviders.
This class is a workaround for the lack of NestedValueProviders in Beam's
Python SDK.
"""
def __init__(self, end_date, num_days):
self._end_date = end_date
self._num_days = num_days
def Get(self):
return (self._StartTime(), self._EndTime())
def __str__(self):
return '_TimeRangeProvider({}, {})'.format(self._end_date, self._num_days)
def _EndAsDatetime(self):
# pylint: disable=access-member-before-definition
end_date = self._end_date.get()
if end_date == 'yesterday':
end_date = _YesterdayUTC()
return datetime.datetime.strptime(end_date,
'%Y%m%d').replace(tzinfo=pytz.UTC)
def _StartTime(self):
# pylint: disable=access-member-before-definition
return self._EndTime() - datetime.timedelta(days=self._num_days.get())
def _EndTime(self):
# We want to include all the timestamps during the given day, so return a
# timestamp at midnight of the _following_ day.
return self._EndAsDatetime() + datetime.timedelta(days=1)
|
edafd1a0f441e4c4e42bb7a78af3096242882d49
|
cfa35dc2ea93ee0eceb2399a9e6112e987579c09
|
/stonesoup/resampler/base.py
|
5b7874ce2ab937a15ece0d5357c587f768b63bf3
|
[
"LicenseRef-scancode-proprietary-license",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"Python-2.0",
"LicenseRef-scancode-secret-labs-2011"
] |
permissive
|
dstl/Stone-Soup
|
227e6a9e6fbdceca14af3f0259f311ec74095597
|
f24090cc919b3b590b84f965a3884ed1293d181d
|
refs/heads/main
| 2023-09-01T14:33:14.626428
| 2023-09-01T11:35:46
| 2023-09-01T11:35:46
| 98,420,803
| 315
| 126
|
MIT
| 2023-09-14T14:55:34
| 2017-07-26T12:34:28
|
Python
|
UTF-8
|
Python
| false
| false
| 80
|
py
|
base.py
|
from ..base import Base
class Resampler(Base):
"""Resampler base class"""
|
6236f876e2f572e594b35041ba35c0fd82f43b45
|
e0925c1a2c573c47ceea9bf3a91cea7609916ec7
|
/tests/tabular_output/test_vertical_table_adapter.py
|
359d9d971f7c5f31c59183ea8ef14d7ac7ee48ab
|
[
"BSD-3-Clause"
] |
permissive
|
dbcli/cli_helpers
|
15fcc079bc5b0c2e01f8ffe91ac66b9f1ce1f824
|
4e2c417f68bc07c72b508e107431569b0783c4ef
|
refs/heads/main
| 2022-11-22T05:19:56.874992
| 2022-10-08T14:44:09
| 2022-10-08T14:44:09
| 88,571,641
| 102
| 29
|
BSD-3-Clause
| 2022-10-22T07:45:27
| 2017-04-18T02:20:33
|
Python
|
UTF-8
|
Python
| false
| false
| 1,307
|
py
|
test_vertical_table_adapter.py
|
# -*- coding: utf-8 -*-
"""Test the vertical table formatter."""
from textwrap import dedent
from cli_helpers.compat import text_type
from cli_helpers.tabular_output import vertical_table_adapter
def test_vertical_table():
"""Test the default settings for vertical_table()."""
results = [("hello", text_type(123)), ("world", text_type(456))]
expected = dedent(
"""\
***************************[ 1. row ]***************************
name | hello
age | 123
***************************[ 2. row ]***************************
name | world
age | 456"""
)
assert expected == "\n".join(
vertical_table_adapter.adapter(results, ("name", "age"))
)
def test_vertical_table_customized():
"""Test customized settings for vertical_table()."""
results = [("john", text_type(47)), ("jill", text_type(50))]
expected = dedent(
"""\
-[ PERSON 1 ]-----
name | john
age | 47
-[ PERSON 2 ]-----
name | jill
age | 50"""
)
assert expected == "\n".join(
vertical_table_adapter.adapter(
results,
("name", "age"),
sep_title="PERSON {n}",
sep_character="-",
sep_length=(1, 5),
)
)
|
5703572366d54ec983f6ba9f50b642a27e812ef9
|
bdf0d4d3aac186af3ad0ad6ac9f380f9a0573fba
|
/aries_cloudagent/protocols/discovery/v2_0/messages/disclosures.py
|
a48fcb003c41158618fcb1b77af5a868d6c73bf8
|
[
"LicenseRef-scancode-dco-1.1",
"Apache-2.0"
] |
permissive
|
hyperledger/aries-cloudagent-python
|
f25d961e0717a4d703bf43df3e4b4bc8ec07b908
|
39cac36d8937ce84a9307ce100aaefb8bc05ec04
|
refs/heads/main
| 2023-09-01T15:37:05.353674
| 2023-08-31T14:13:06
| 2023-08-31T14:13:06
| 193,556,007
| 370
| 530
|
Apache-2.0
| 2023-09-14T17:59:34
| 2019-06-24T18:12:14
|
Python
|
UTF-8
|
Python
| false
| false
| 2,950
|
py
|
disclosures.py
|
"""Represents a feature discovery disclosure message."""
from typing import Mapping, Sequence
from marshmallow import EXCLUDE, Schema, ValidationError, fields
from .....messaging.agent_message import AgentMessage, AgentMessageSchema
from .....messaging.models.base import BaseModelError
from ..message_types import DISCLOSURES, PROTOCOL_PACKAGE
HANDLER_CLASS = f"{PROTOCOL_PACKAGE}.handlers.disclosures_handler.DisclosuresHandler"
class ProtocolOrGoalCodeDescriptorField(fields.Field):
"""ProtocolDescriptor or GoalCodeDescriptor for Marshmallow."""
def _serialize(self, value, attr, obj, **kwargs):
return value
def _deserialize(self, value, attr, data, **kwargs):
try:
GoalCodeDescriptorSchema().load(value)
return value
except ValidationError:
try:
ProtocolDescriptorSchema().load(value)
return value
except ValidationError:
raise BaseModelError(
"Field should be ProtocolDescriptor or GoalCodeDescriptor"
)
class ProtocolDescriptorSchema(Schema):
"""Schema for an entry in the protocols list."""
id = fields.Str(required=True)
feature_type = fields.Str(
required=True, data_key="feature-type", metadata={"description": "feature-type"}
)
roles = fields.List(
fields.Str(
metadata={
"description": "Role: requester or responder",
"example": "requester",
}
),
required=False,
allow_none=True,
metadata={"description": "List of roles"},
)
class GoalCodeDescriptorSchema(Schema):
"""Schema for an entry in the goal_code list."""
id = fields.Str(required=True)
feature_type = fields.Str(
required=True, data_key="feature-type", metadata={"description": "feature-type"}
)
class Disclosures(AgentMessage):
"""Represents a feature discovery disclosure, the response to a query message."""
class Meta:
"""Disclose metadata."""
handler_class = HANDLER_CLASS
message_type = DISCLOSURES
schema_class = "DisclosuresSchema"
def __init__(self, *, disclosures: Sequence[Mapping] = None, **kwargs):
"""Initialize disclose message object.
Args:
disclosures: A mapping of protocol names to a dictionary of properties
"""
super().__init__(**kwargs)
self.disclosures = list(disclosures) if disclosures else []
class DisclosuresSchema(AgentMessageSchema):
"""Disclose message schema used in serialization/deserialization."""
class Meta:
"""DiscloseSchema metadata."""
model_class = Disclosures
unknown = EXCLUDE
disclosures = fields.List(
ProtocolOrGoalCodeDescriptorField(),
required=True,
metadata={"description": "List of protocol or goal_code descriptors"},
)
|
9f699581e9b1ce39a150c4cc75fce8962f0adc88
|
6c37d1d2437a08e43b13d621d4a8da4da7135b3a
|
/yt_dlp/extractor/tdslifeway.py
|
3623a68c8c4415a61f8410d6523ac55ff9d4d3de
|
[
"Unlicense",
"GPL-2.0-or-later",
"MPL-2.0",
"BSD-3-Clause",
"GPL-3.0-or-later",
"LGPL-2.1-only",
"BSD-2-Clause",
"MIT"
] |
permissive
|
yt-dlp/yt-dlp
|
be040bde10cc40258c879c75ab30215686352824
|
d3d81cc98f554d0adb87d24bfd6fabaaa803944d
|
refs/heads/master
| 2023-09-05T21:15:21.050538
| 2023-09-05T20:35:23
| 2023-09-05T20:35:23
| 307,260,205
| 52,742
| 5,376
|
Unlicense
| 2023-09-14T05:22:08
| 2020-10-26T04:22:55
|
Python
|
UTF-8
|
Python
| false
| false
| 2,441
|
py
|
tdslifeway.py
|
from .common import InfoExtractor
class TDSLifewayIE(InfoExtractor):
_VALID_URL = r'https?://tds\.lifeway\.com/v1/trainingdeliverysystem/courses/(?P<id>\d+)/index\.html'
_TEST = {
# From http://www.ministrygrid.com/training-viewer/-/training/t4g-2014-conference/the-gospel-by-numbers-4/the-gospel-by-numbers
'url': 'http://tds.lifeway.com/v1/trainingdeliverysystem/courses/3453494717001/index.html?externalRegistration=AssetId%7C34F466F1-78F3-4619-B2AB-A8EFFA55E9E9%21InstanceId%7C0%21UserId%7Caaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa&grouping=http%3A%2F%2Flifeway.com%2Fvideo%2F3453494717001&activity_id=http%3A%2F%2Flifeway.com%2Fvideo%2F3453494717001&content_endpoint=http%3A%2F%2Ftds.lifeway.com%2Fv1%2Ftrainingdeliverysystem%2FScormEngineInterface%2FTCAPI%2Fcontent%2F&actor=%7B%22name%22%3A%5B%22Guest%20Guest%22%5D%2C%22account%22%3A%5B%7B%22accountServiceHomePage%22%3A%22http%3A%2F%2Fscorm.lifeway.com%2F%22%2C%22accountName%22%3A%22aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa%22%7D%5D%2C%22objectType%22%3A%22Agent%22%7D&content_token=462a50b2-b6f9-4970-99b1-930882c499fb®istration=93d6ec8e-7f7b-4ed3-bbc8-a857913c0b2a&externalConfiguration=access%7CFREE%21adLength%7C-1%21assignOrgId%7C4AE36F78-299A-425D-91EF-E14A899B725F%21assignOrgParentId%7C%21courseId%7C%21isAnonymous%7Cfalse%21previewAsset%7Cfalse%21previewLength%7C-1%21previewMode%7Cfalse%21royalty%7CFREE%21sessionId%7C671422F9-8E79-48D4-9C2C-4EE6111EA1CD%21trackId%7C&auth=Basic%20OjhmZjk5MDBmLTBlYTMtNDJhYS04YjFlLWE4MWQ3NGNkOGRjYw%3D%3D&endpoint=http%3A%2F%2Ftds.lifeway.com%2Fv1%2Ftrainingdeliverysystem%2FScormEngineInterface%2FTCAPI%2F',
'info_dict': {
'id': '3453494717001',
'ext': 'mp4',
'title': 'The Gospel by Numbers',
'thumbnail': r're:^https?://.*\.jpg',
'upload_date': '20140410',
'description': 'Coming soon from T4G 2014!',
'uploader_id': '2034960640001',
'timestamp': 1397145591,
},
'params': {
# m3u8 download
'skip_download': True,
},
'add_ie': ['BrightcoveNew'],
}
BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/2034960640001/default_default/index.html?videoId=%s'
def _real_extract(self, url):
brightcove_id = self._match_id(url)
return self.url_result(self.BRIGHTCOVE_URL_TEMPLATE % brightcove_id, 'BrightcoveNew', brightcove_id)
|
78f1c4594f0b7d1feb6beb9f54f287ac8fa9080c
|
2ad93a1cf25a580fe980482d2d17a657de3b2523
|
/django-stubs/test/signals.pyi
|
cad768176ba9325c4eb6834d16ebebdd8e7de025
|
[
"MIT"
] |
permissive
|
typeddjango/django-stubs
|
f35dfcb001e54694a0a1e8c0afcc6e6a3d130c32
|
0117348c3c7713f25f96b46e53ebdeed7bdba544
|
refs/heads/master
| 2023-08-25T19:42:52.707151
| 2023-08-23T15:13:25
| 2023-08-23T15:13:25
| 142,779,680
| 1,133
| 376
|
MIT
| 2023-09-13T19:05:06
| 2018-07-29T17:08:50
|
Python
|
UTF-8
|
Python
| false
| false
| 987
|
pyi
|
signals.pyi
|
from typing import Any
from django.core.signals import setting_changed as setting_changed # noqa: F401
template_rendered: Any
COMPLEX_OVERRIDE_SETTINGS: Any
def clear_cache_handlers(**kwargs: Any) -> None: ...
def update_installed_apps(**kwargs: Any) -> None: ...
def update_connections_time_zone(**kwargs: Any) -> None: ...
def clear_routers_cache(**kwargs: Any) -> None: ...
def reset_template_engines(**kwargs: Any) -> None: ...
def clear_serializers_cache(**kwargs: Any) -> None: ...
def language_changed(**kwargs: Any) -> None: ...
def localize_settings_changed(**kwargs: Any) -> None: ...
def file_storage_changed(**kwargs: Any) -> None: ...
def complex_setting_changed(**kwargs: Any) -> None: ...
def root_urlconf_changed(**kwargs: Any) -> None: ...
def static_storage_changed(**kwargs: Any) -> None: ...
def static_finders_changed(**kwargs: Any) -> None: ...
def auth_password_validators_changed(**kwargs: Any) -> None: ...
def user_model_swapped(**kwargs: Any) -> None: ...
|
9a33b17ac78b247673625598c93ae7891bd00ac6
|
add160f872dccc7f326dc00196fd418c748a89b1
|
/docs-source/api-python/conf.py
|
45dd7e84fe8fe95481260795ab643f63af012b54
|
[] |
no_license
|
openmm/openmm
|
22f3d6ae2747f54acfaa92a5a6a5869049019dee
|
d2593f386a627d069b5ec17a3a2f4ecd40d85dd1
|
refs/heads/master
| 2023-08-22T18:29:54.807240
| 2023-08-18T19:09:05
| 2023-08-18T19:09:05
| 10,178,188
| 875
| 324
| null | 2023-09-12T23:40:17
| 2013-05-20T17:42:52
|
C++
|
UTF-8
|
Python
| false
| false
| 2,124
|
py
|
conf.py
|
# -*- coding: utf-8 -*-
import os
import sys
import openmm.version
extensions = [
"sphinx.ext.mathjax",
"sphinx.ext.ifconfig",
"sphinx.ext.autosummary",
"sphinx.ext.autodoc",
"sphinx.ext.napoleon",
"process-docstring",
]
autosummary_generate = True
autodoc_default_options = {
"members": True,
"inherited-members": True,
"member-order": "bysource",
}
source_suffix = ".rst"
master_doc = "index"
project = u"OpenMM Python API"
copyright = u"2015, Stanford University and the Authors"
version = openmm.version.short_version
release = openmm.version.full_version
exclude_patterns = ["_build", "_templates"]
html_static_path = ["_static"]
templates_path = ["_templates"]
pygments_style = "sphinx"
html_theme = "alabaster"
html_theme_options = {
"github_button": False,
"github_user": "openmm",
"github_repo": "openmm",
"logo_name": True,
"logo": "logo.png",
"extra_nav_links": [
{
"title": "OpenMM.org",
"uri": "https://openmm.org",
"relative": False,
},
{
"title": "User's Manual",
"uri": "../userguide/",
"relative": True,
},
{
"title": "Developer Guide",
"uri": "../developerguide/",
"relative": True,
},
{
"title": "C++ API reference",
"uri": "../api-c++/",
"relative": True,
},
{
"title": "GitHub",
"uri": "https://github.com/openmm",
"relative": False,
},
],
"show_relbar_bottom": True,
}
html_sidebars = {
"**": [
"about.html",
"searchbox.html",
"navigation.html",
]
}
# Napoleon settings
napoleon_google_docstring = False
napoleon_numpy_docstring = True
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
|
396ecd561e05ce7a80479c13cc1efc196a76b8fe
|
965efc4d7a83c2b5592417aa7e0d25a51f5a8108
|
/backend/metering_billing/migrations/0102_alter_invoicelineitem_associated_subscription_record.py
|
093d53104d8bd401184a0bb981ad52784a8c2934
|
[
"MIT"
] |
permissive
|
uselotus/lotus
|
f4ee23bb828605215f18aacd1d6fcff8e0986c53
|
c065fb33ee1a870d72bbd2adfddc08d50ca049b6
|
refs/heads/main
| 2023-08-17T03:38:35.770580
| 2023-07-26T18:50:17
| 2023-07-26T18:50:17
| 516,192,901
| 1,447
| 100
|
MIT
| 2023-06-25T22:53:06
| 2022-07-21T02:06:46
|
Python
|
UTF-8
|
Python
| false
| false
| 682
|
py
|
0102_alter_invoicelineitem_associated_subscription_record.py
|
# Generated by Django 4.0.5 on 2022-12-05 12:03
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("metering_billing", "0101_historicalsubscriptionrecord_fully_billed_and_more"),
]
operations = [
migrations.AlterField(
model_name="invoicelineitem",
name="associated_subscription_record",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="line_items",
to="metering_billing.subscriptionrecord",
),
),
]
|
524dc5c7e29c9548eea3632fabbe309b8f76be9b
|
2dd26e031162e75f37ecb1f7dd7f675eeb634c63
|
/nemo/collections/asr/parts/numba/rnnt_loss/utils/cuda_utils/gpu_rnnt_kernel.py
|
4153af060941afeff1c7afb41c53ab1d53077f59
|
[
"Apache-2.0"
] |
permissive
|
NVIDIA/NeMo
|
1b001fa2ae5d14defbfd02f3fe750c5a09e89dd1
|
c20a16ea8aa2a9d8e31a98eb22178ddb9d5935e7
|
refs/heads/main
| 2023-08-21T15:28:04.447838
| 2023-08-21T00:49:36
| 2023-08-21T00:49:36
| 200,722,670
| 7,957
| 1,986
|
Apache-2.0
| 2023-09-14T18:49:54
| 2019-08-05T20:16:42
|
Python
|
UTF-8
|
Python
| false
| false
| 69,815
|
py
|
gpu_rnnt_kernel.py
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright 2018-2019, Mingkun Huang
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import torch
from numba import cuda
from nemo.collections.asr.parts.numba.rnnt_loss.utils import rnnt_helper
GPU_RNNT_THREAD_SIZE = 256
INF = 10000.0
@cuda.jit(device=True, inline=True)
def logp(
denom: torch.Tensor, acts: torch.Tensor, maxT: int, maxU: int, alphabet_size: int, mb: int, t: int, u: int, v: int
):
"""
Compute the sum of log probability from the activation tensor and its denominator.
Args:
denom: Tensor of shape [B, T, U] flattened. Represents the denominator of the logprobs activation tensor
across entire vocabulary.
acts: Tensor of shape [B, T, U, V+1] flattened. Represents the logprobs activation tensor.
maxT: The maximum possible acoustic sequence length. Represents T in the logprobs tensor.
maxU: The maximum possible target sequence length. Represents U in the logprobs tensor.
alphabet_size: The vocabulary dimension V+1 (inclusive of RNNT blank).
mb: Batch indexer.
t: Acoustic sequence timestep indexer.
u: Target sequence timestep indexer.
v: Vocabulary token indexer.
Returns:
The sum of logprobs[mb, t, u, v] + denom[mb, t, u]
"""
col = (mb * maxT + t) * maxU + u
return denom[col] + acts[col * alphabet_size + v]
@cuda.jit(device=True, inline=True)
def logp_duration(acts: torch.Tensor, maxT: int, maxU: int, num_durations: int, mb: int, t: int, u: int, v: int):
col = (mb * maxT + t) * maxU + u
return acts[col * num_durations + v]
@cuda.jit()
def compute_alphas_kernel(
acts: torch.Tensor,
denom: torch.Tensor,
alphas: torch.Tensor,
llForward: torch.Tensor,
xlen: torch.Tensor,
ylen: torch.Tensor,
mlabels: torch.Tensor, # [B]
minibatch: int,
maxT: int,
maxU: int,
alphabet_size: int,
blank_: int,
):
"""
Compute alpha (forward variable) probabilities over the transduction step.
Args:
acts: Tensor of shape [B, T, U, V+1] flattened. Represents the logprobs activation tensor.
denom: Tensor of shape [B, T, U] flattened. Represents the denominator of the logprobs activation tensor
across entire vocabulary.
alphas: Zero tensor of shape [B, T, U]. Will be updated inside the kernel with the forward variable
probabilities.
llForward: Zero tensor of shape [B]. Represents the log-likelihood of the forward pass.
Returned as the forward pass loss that is reduced by the optimizer.
xlen: Vector of length B which contains the actual acoustic sequence lengths in the padded
activation tensor.
ylen: Vector of length B which contains the actual target sequence lengths in the padded
activation tensor.
mlabels: Matrix of shape [B, U+1] (+1 here is due to <SOS> token - usually the RNNT blank).
The matrix contains the padded target transcription that must be predicted.
minibatch: Int representing the batch size.
maxT: The maximum possible acoustic sequence length. Represents T in the logprobs tensor.
maxU: The maximum possible target sequence length. Represents U in the logprobs tensor.
alphabet_size: The vocabulary dimension V+1 (inclusive of RNNT blank).
blank_: Index of the RNNT blank token in the vocabulary. Generally the first or last token in the vocab.
Updates:
Kernel inplace updates the following inputs:
- alphas: forward variable scores.
- llForward: log-likelihood of forward variable.
"""
# // launch B blocks, each block has U threads
b = cuda.blockIdx.x # // batch id
u = cuda.threadIdx.x # label id, u
T = xlen[b] # select AM length of current sample
U = ylen[b] + 1 # select target length of current sample, +1 for the blank token
labels: torch.Tensor = mlabels[b] # mb label start point, equivalent to mlabels + b * (maxU - 1)
offset = b * maxT * maxU # pointer indexing offset
# alphas += offset # pointer offset, ignored since we explicitly add offset
# Initilize alpha[b, t=0, u=0] for all b in B
if u == 0:
alphas[offset] = 0
# sync until all alphas are initialized
cuda.syncthreads()
# Ordinary alpha calculations, broadcast across B=b and U=u
# Look up forward variable calculation from rnnt_numpy.forward_pass()
for n in range(1, T + U - 1):
t = n - u
if u == 0:
# for t in range(1, T) step to initialize alphas[b, t, 0]
if t > 0 and t < T:
alphas[offset + t * maxU + u] = alphas[offset + (t - 1) * maxU + u] + logp(
denom, acts, maxT, maxU, alphabet_size, b, t - 1, 0, blank_
)
elif u < U:
# for u in range(1, U) step to initialize alphas[b, 0, u]
if t == 0:
alphas[offset + u] = alphas[offset + u - 1] + logp(
denom, acts, maxT, maxU, alphabet_size, b, 0, u - 1, labels[u - 1]
)
# for t in range(1, T) for u in range(1, U) step to compute alphas[b, t, u]
elif t > 0 and t < T:
no_emit = alphas[offset + (t - 1) * maxU + u] + logp(
denom, acts, maxT, maxU, alphabet_size, b, t - 1, u, blank_
)
emit = alphas[offset + t * maxU + u - 1] + logp(
denom, acts, maxT, maxU, alphabet_size, b, t, u - 1, labels[u - 1]
)
alphas[offset + t * maxU + u] = rnnt_helper.log_sum_exp(emit, no_emit)
# sync across all B=b and U=u
cuda.syncthreads()
# After final sync, alphas[b, T-1, U - 1] + logprobs[b, T-1, U-1, blank] + denom[b, T-1, U-1] gives
# log-likelihood of forward pass.
if u == 0:
loglike = alphas[offset + (T - 1) * maxU + U - 1] + logp(
denom, acts, maxT, maxU, alphabet_size, b, T - 1, U - 1, blank_
)
llForward[b] = loglike
@cuda.jit()
def compute_betas_kernel(
acts: torch.Tensor,
denom: torch.Tensor,
betas: torch.Tensor,
llBackward: torch.Tensor,
xlen: torch.Tensor,
ylen: torch.Tensor,
mlabels: torch.Tensor, # [B, U]
minibatch: int,
maxT: int,
maxU: int,
alphabet_size: int,
blank_: int,
):
"""
Compute beta (backward variable) probabilities over the transduction step.
Args:
acts: Tensor of shape [B, T, U, V+1] flattened. Represents the logprobs activation tensor.
denom: Tensor of shape [B, T, U] flattened. Represents the denominator of the logprobs activation tensor
across entire vocabulary.
betas: Zero tensor of shape [B, T, U]. Will be updated inside the kernel with the backward variable
probabilities.
llBackward: Zero tensor of shape [B]. Represents the log-likelihood of the backward pass.
Returned as the backward pass loss that is reduced by the optimizer.
xlen: Vector of length B which contains the actual acoustic sequence lengths in the padded
activation tensor.
ylen: Vector of length B which contains the actual target sequence lengths in the padded
activation tensor.
mlabels: Matrix of shape [B, U+1] (+1 here is due to <SOS> token - usually the RNNT blank).
The matrix contains the padded target transcription that must be predicted.
minibatch: Int representing the batch size.
maxT: The maximum possible acoustic sequence length. Represents T in the logprobs tensor.
maxU: The maximum possible target sequence length. Represents U in the logprobs tensor.
alphabet_size: The vocabulary dimension V+1 (inclusive of RNNT blank).
blank_: Index of the RNNT blank token in the vocabulary. Generally the first or last token in the vocab.
Updates:
Kernel inplace updates the following inputs:
- betas: backward variable scores.
- llBackward: log-likelihood of backward variable.
"""
# // launch B blocks, each block has U threads
b = cuda.blockIdx.x # // batch id
u = cuda.threadIdx.x # label id, u
T = xlen[b] # select AM length of current sample
U = ylen[b] + 1 # select target length of current sample, +1 for the blank token
labels: torch.Tensor = mlabels[b] # mb label start point, equivalent to mlabels + b * (maxU - 1)
offset = b * maxT * maxU # pointer indexing offset
# betas += offset # pointer offset, ignored since we explicitly add offset
# Initilize beta[b, t=T-1, u=U-1] for all b in B with log_probs[b, t=T-1, u=U-1, blank]
if u == 0:
betas[offset + (T - 1) * maxU + U - 1] = logp(denom, acts, maxT, maxU, alphabet_size, b, T - 1, U - 1, blank_)
# sync until all betas are initialized
cuda.syncthreads()
# Ordinary beta calculations, broadcast across B=b and U=u
# Look up backward variable calculation from rnnt_numpy.backward_pass()
for n in range(T + U - 2, -1, -1):
t = n - u
if u == (U - 1):
# for t in reversed(range(T - 1)) step to initialize betas[b, t, U-1]
if t >= 0 and t < (T - 1):
betas[offset + t * maxU + U - 1] = betas[offset + (t + 1) * maxU + U - 1] + logp(
denom, acts, maxT, maxU, alphabet_size, b, t, U - 1, blank_
)
elif u < U:
if t == T - 1:
# for u in reversed(range(U - 1)) step to initialize betas[b, T-1, u]
betas[offset + (T - 1) * maxU + u] = betas[offset + (T - 1) * maxU + u + 1] + logp(
denom, acts, maxT, maxU, alphabet_size, b, T - 1, u, labels[u]
)
elif (t >= 0) and (t < T - 1):
# for t in reversed(range(T - 1)) for u in reversed(range(U - 1)) step to compute betas[b, t, u]
no_emit = betas[offset + (t + 1) * maxU + u] + logp(
denom, acts, maxT, maxU, alphabet_size, b, t, u, blank_
)
emit = betas[offset + t * maxU + u + 1] + logp(
denom, acts, maxT, maxU, alphabet_size, b, t, u, labels[u]
)
betas[offset + t * maxU + u] = rnnt_helper.log_sum_exp(emit, no_emit)
# sync across all B=b and U=u
cuda.syncthreads()
# After final sync, betas[b, 0, 0] gives
# log-likelihood of backward pass.
if u == 0:
llBackward[b] = betas[offset]
@cuda.jit()
def compute_grad_kernel(
grads: torch.Tensor,
acts: torch.Tensor,
denom: torch.Tensor,
alphas: torch.Tensor,
betas: torch.Tensor,
logll: torch.Tensor,
xlen: torch.Tensor,
ylen: torch.Tensor,
mlabels: torch.Tensor, # [B, U]
minibatch: int,
maxT: int,
maxU: int,
alphabet_size: int,
blank_: int,
fastemit_lambda: float,
clamp: float,
):
"""
Compute gradients over the transduction step.
Args:
grads: Zero Tensor of shape [B, T, U, V+1]. Is updated by this kernel to contain the gradients
of this batch of samples.
acts: Tensor of shape [B, T, U, V+1] flattened. Represents the logprobs activation tensor.
denom: Tensor of shape [B, T, U] flattened. Represents the denominator of the logprobs activation tensor
across entire vocabulary.
alphas: Alpha variable, contains forward probabilities. A tensor of shape [B, T, U].
betas: Beta varoable, contains backward probabilities. A tensor of shape [B, T, U].
logll: Log-likelihood of the forward variable, represented as a vector of shape [B].
Represents the log-likelihood of the forward pass.
xlen: Vector of length B which contains the actual acoustic sequence lengths in the padded
activation tensor.
ylen: Vector of length B which contains the actual target sequence lengths in the padded
activation tensor.
mlabels: Matrix of shape [B, U+1] (+1 here is due to <SOS> token - usually the RNNT blank).
The matrix contains the padded target transcription that must be predicted.
minibatch: Int representing the batch size.
maxT: The maximum possible acoustic sequence length. Represents T in the logprobs tensor.
maxU: The maximum possible target sequence length. Represents U in the logprobs tensor.
alphabet_size: The vocabulary dimension V+1 (inclusive of RNNT blank).
blank_: Index of the RNNT blank token in the vocabulary. Generally the first or last token in the vocab.
fastemit_lambda: Float scaling factor for FastEmit regularization. Refer to
FastEmit: Low-latency Streaming ASR with Sequence-level Emission Regularization.
clamp: Float value. When set to value >= 0.0, will clamp the gradient to [-clamp, clamp].
Updates:
Kernel inplace updates the following inputs:
- grads: Gradients with respect to the log likelihood (logll).
"""
# Kernel call:
# blocks_per_grid = minibatch (b) * maxT (t) * maxU (u)
# threads_per_block = constant buffer size of parallel threads (v :: Constant)
tid = cuda.threadIdx.x # represents v, taking steps of some constant size
idx = tid # index of v < V+1; in steps of constant buffer size
col = cuda.blockIdx.x # represents a fused index of b * t * u
# Decompose original indices from fused `col`
u = col % maxU # (b * t * u) % u = u
bt = (col - u) // maxU # (b * t * u - u) // U = b * t
t = bt % maxT # (b * t) % t = t
mb = (bt - t) // maxT # (b * t - t) // T = b
# constants
T = xlen[mb] # select AM length of current sample
U = ylen[mb] + 1 # select target length of current sample, +1 for the blank token
labels: torch.Tensor = mlabels[mb] # labels = mlabels + mb * (maxU - 1);
# Buffered gradient calculations, broadcast across B=b, T=t and U=u, looped over V with some constant stride.
# Look up gradient calculation from rnnt_numpy.compute_gradient()
if t < T and u < U:
# For cuda kernels, maximum number of threads per block is limited to some value.
# However, it may be the case that vocabulary size is larger than this limit
# To work around this, an arbitrary thread buffer size is chosen such that,
# 1) each element within the thread pool operates independently of the other
# 2) An inner while loop moves the index of each buffer element by the size of the buffer itself,
# such that all elements of the vocabulary size are covered in (V + 1 // thread_buffer) number of steps.
# As such, each thread will perform the while loop at least (V + 1 // thread_buffer) number of times
while idx < alphabet_size:
# remember, `col` represents the tri-index [b, t, u]
# therefore; logpk = denom[b, t, u] + acts[b, t, u, v]
logpk = denom[col] + acts[col * alphabet_size + idx]
# initialize the grad of the sample acts[b, t, u, v]
grad = math.exp(alphas[col] + betas[col] + logpk - logll[mb])
# If FastEmit regularization is enabled, calculate the gradeint of probability of predicting the next label
# at the current timestep.
# The formula for this is Equation 9 in https://arxiv.org/abs/2010.11148, multiplied by the log probability
# of the current step (t, u), normalized by the total log likelihood.
# Once the gradient has been calculated, scale it by `fastemit_lambda`, as in Equation 10.
if fastemit_lambda > 0.0 and u < U - 1:
fastemit_grad = fastemit_lambda * math.exp(
alphas[col] # alphas(t, u)
+ (denom[col] + acts[col * alphabet_size + labels[u]]) # y_hat(t, u)
+ betas[col + 1] # betas(t, u+1)
+ logpk # log Pr(k|t, u)
- logll[mb] # total log likelihood for normalization
)
else:
fastemit_grad = 0.0
# Update the gradient of act[b, t, u, v] with the gradient from FastEmit regularization
grad = grad + fastemit_grad
# // grad to last blank transition
# grad[b, T-1, U-1, v=blank] -= exp(alphas[b, t, u) + logpk - logll[b])
if (idx == blank_) and (t == T - 1) and (u == U - 1):
grad -= math.exp(alphas[col] + logpk - logll[mb])
# grad of blank across t < T;
# grad[b, t<T-1, u, v=blank] -= exp(alphas[b, t, u] + logpk - logll[b] betas[b, t + 1, u])
if (idx == blank_) and (t < T - 1):
grad -= math.exp(alphas[col] + logpk - logll[mb] + betas[col + maxU])
# grad of correct token across u < U;
# grad[b, t, u<U-1, v=label[u]] -= exp(alphas[b, t, u] + logpk - logll[b] + betas[b, t, u+1])
# Scale the gradient by (1.0 + FastEmit_lambda) in log space, then exponentiate
if (u < U - 1) and (idx == labels[u]):
# exp(log(1 + fastemit_lambda) + ...) is numerically more stable than
# multiplying (1.0 + fastemit_lambda) with result.
grad -= math.exp(math.log1p(fastemit_lambda) + alphas[col] + logpk - logll[mb] + betas[col + 1])
# update grads[b, t, u, v] = grad
grads[col * alphabet_size + idx] = grad
# clamp gradient (if needed)
if clamp > 0.0:
g = grads[col * alphabet_size + idx]
g = min(g, clamp)
g = max(g, -clamp)
grads[col * alphabet_size + idx] = g
# update internal index through the thread_buffer;
# until idx < V + 1, such that entire vocabulary has been updated.
idx += GPU_RNNT_THREAD_SIZE
@cuda.jit()
def compute_multiblank_alphas_kernel(
acts: torch.Tensor,
denom: torch.Tensor,
sigma: float,
alphas: torch.Tensor,
llForward: torch.Tensor,
xlen: torch.Tensor,
ylen: torch.Tensor,
mlabels: torch.Tensor,
minibatch: int,
maxT: int,
maxU: int,
alphabet_size: int,
blank_: int,
big_blank_duration: torch.Tensor,
num_big_blanks: int,
):
"""
Compute alpha (forward variable) probabilities for multi-blank transducuer loss (https://arxiv.org/pdf/2211.03541).
Args:
acts: Tensor of shape [B, T, U, V + 1 + num_big_blanks] flattened. Represents the logprobs activation tensor.
denom: Tensor of shape [B, T, U] flattened. Represents the denominator of the logprobs activation tensor
across entire vocabulary.
sigma: Hyper-parameter for logit-undernormalization technique for training multi-blank transducers.
alphas: Zero tensor of shape [B, T, U]. Will be updated inside the kernel with the forward variable
probabilities.
llForward: Zero tensor of shape [B]. Represents the log-likelihood of the forward pass.
Returned as the forward pass loss that is reduced by the optimizer.
xlen: Vector of length B which contains the actual acoustic sequence lengths in the padded
activation tensor.
ylen: Vector of length B which contains the actual target sequence lengths in the padded
activation tensor.
mlabels: Matrix of shape [B, U+1] (+1 here is due to <SOS> token - usually the RNNT blank).
The matrix contains the padded target transcription that must be predicted.
minibatch: Int representing the batch size.
maxT: The maximum possible acoustic sequence length. Represents T in the logprobs tensor.
maxU: The maximum possible target sequence length. Represents U in the logprobs tensor.
alphabet_size: The vocabulary dimension V+1 (inclusive of RNNT blank).
blank_: Index of the RNNT standard blank token in the vocabulary.
big_blank_durations: Vector of supported big blank durations of the model.
num_big_blanks: Number of big blanks of the model.
Updates:
Kernel inplace updates the following inputs:
- alphas: forward variable scores.
- llForward: log-likelihood of forward variable.
"""
# // launch B blocks, each block has U threads
b = cuda.blockIdx.x # // batch id
u = cuda.threadIdx.x # label id, u
T = xlen[b] # select AM length of current sample
U = ylen[b] + 1 # select target length of current sample, +1 for the blank token
labels: torch.Tensor = mlabels[b] # mb label start point, equivalent to mlabels + b * (maxU - 1)
offset = b * maxT * maxU # pointer indexing offset
# Initilize alpha[b, t=0, u=0] for all b in B
if u == 0:
alphas[offset] = 0
# sync until all alphas are initialized
cuda.syncthreads()
# Ordinary alpha calculations, broadcast across B=b and U=u
# Look up forward variable calculation from rnnt_numpy.forward_pass()
# Note: because of the logit under-normalization, everytime logp() is called,
# it is always followed by a `-sigma` term.
for n in range(1, T + U - 1):
t = n - u
if u == 0:
# for t in range(1, T) step to initialize alphas[b, t, 0]
if t > 0 and t < T:
alphas[offset + t * maxU + u] = (
alphas[offset + (t - 1) * maxU + u]
+ logp(denom, acts, maxT, maxU, alphabet_size, b, t - 1, 0, blank_)
- sigma
)
# Now add the weights for big blanks.
for i in range(num_big_blanks):
if t >= big_blank_duration[i]:
alphas[offset + t * maxU + u] = rnnt_helper.log_sum_exp(
alphas[offset + t * maxU + u],
alphas[offset + (t - big_blank_duration[i]) * maxU + u]
+ logp(
denom, acts, maxT, maxU, alphabet_size, b, t - big_blank_duration[i], 0, blank_ - 1 - i
)
- sigma,
)
elif u < U:
# for u in range(1, U) step to initialize alphas[b, 0, u]
if t == 0:
alphas[offset + u] = (
alphas[offset + u - 1]
+ logp(denom, acts, maxT, maxU, alphabet_size, b, 0, u - 1, labels[u - 1])
- sigma
)
# for t in range(1, T) for u in range(1, U) step to compute alphas[b, t, u]
elif t > 0 and t < T:
no_emit = (
alphas[offset + (t - 1) * maxU + u]
+ logp(denom, acts, maxT, maxU, alphabet_size, b, t - 1, u, blank_)
- sigma
)
emit = (
alphas[offset + t * maxU + u - 1]
+ logp(denom, acts, maxT, maxU, alphabet_size, b, t, u - 1, labels[u - 1])
- sigma
)
alphas[offset + t * maxU + u] = rnnt_helper.log_sum_exp(emit, no_emit)
# Now add the weights for big blanks.
for i in range(num_big_blanks):
if t >= big_blank_duration[i]:
# big-blank weight here is
# alpha(t - duration, u) * p(big-blank | t - duration, u) / exp(sigma), in log domain
# do this all all big-blanks if the above condition is met
big_blank_no_emit = (
alphas[offset + (t - big_blank_duration[i]) * maxU + u]
+ logp(
denom, acts, maxT, maxU, alphabet_size, b, t - big_blank_duration[i], u, blank_ - 1 - i
)
- sigma
)
alphas[offset + t * maxU + u] = rnnt_helper.log_sum_exp(
alphas[offset + t * maxU + u], big_blank_no_emit
)
# sync across all B=b and U=u
cuda.syncthreads()
# After final sync, alphas[b, T-1, U - 1] + logprobs[b, T-1, U-1, blank] + denom[b, T-1, U-1] gives
# log-likelihood of forward pass.
if u == 0:
loglike = (
alphas[offset + (T - 1) * maxU + U - 1]
+ logp(denom, acts, maxT, maxU, alphabet_size, b, T - 1, U - 1, blank_)
- sigma
)
# Now add the weights for big blanks for the final weight computation.
for i in range(num_big_blanks):
if T >= big_blank_duration[i]:
big_blank_loglike = (
alphas[offset + (T - big_blank_duration[i]) * maxU + U - 1]
+ logp(denom, acts, maxT, maxU, alphabet_size, b, T - big_blank_duration[i], U - 1, blank_ - 1 - i)
- sigma
)
loglike = rnnt_helper.log_sum_exp(loglike, big_blank_loglike)
llForward[b] = loglike
@cuda.jit()
def compute_multiblank_betas_kernel(
acts: torch.Tensor,
denom: torch.Tensor,
sigma: float,
betas: torch.Tensor,
llBackward: torch.Tensor,
xlen: torch.Tensor,
ylen: torch.Tensor,
mlabels: torch.Tensor, # [B, U]
minibatch: int,
maxT: int,
maxU: int,
alphabet_size: int,
blank_: int,
big_blank_duration: torch.Tensor,
num_big_blanks: int,
):
"""
Compute beta (backward variable) probabilities for multi-blank transducer loss (https://arxiv.org/pdf/2211.03541).
Args:
acts: Tensor of shape [B, T, U, V + 1 + num-big-blanks] flattened. Represents the logprobs activation tensor.
denom: Tensor of shape [B, T, U] flattened. Represents the denominator of the logprobs activation tensor
across entire vocabulary.
sigma: Hyper-parameter for logit-undernormalization technique for training multi-blank transducers.
betas: Zero tensor of shape [B, T, U]. Will be updated inside the kernel with the backward variable
probabilities.
llBackward: Zero tensor of shape [B]. Represents the log-likelihood of the backward pass.
Returned as the backward pass loss that is reduced by the optimizer.
xlen: Vector of length B which contains the actual acoustic sequence lengths in the padded
activation tensor.
ylen: Vector of length B which contains the actual target sequence lengths in the padded
activation tensor.
mlabels: Matrix of shape [B, U+1] (+1 here is due to <SOS> token - usually the RNNT blank).
The matrix contains the padded target transcription that must be predicted.
minibatch: Int representing the batch size.
maxT: The maximum possible acoustic sequence length. Represents T in the logprobs tensor.
maxU: The maximum possible target sequence length. Represents U in the logprobs tensor.
alphabet_size: The vocabulary dimension V+1 (inclusive of RNNT blank).
blank_: Index of the RNNT standard blank token in the vocabulary.
big_blank_durations: Vector of supported big blank durations of the model.
num_big_blanks: Number of big blanks of the model.
Updates:
Kernel inplace updates the following inputs:
- betas: backward variable scores.
- llBackward: log-likelihood of backward variable.
"""
# // launch B blocks, each block has U threads
b = cuda.blockIdx.x # // batch id
u = cuda.threadIdx.x # label id, u
T = xlen[b] # select AM length of current sample
U = ylen[b] + 1 # select target length of current sample, +1 for the blank token
labels: torch.Tensor = mlabels[b] # mb label start point, equivalent to mlabels + b * (maxU - 1)
offset = b * maxT * maxU # pointer indexing offset
# Note: just like the alphas, because of the logit under-normalization, everytime
# logp() is called, it is always followed by a `-sigma` term.
# Initilize beta[b, t=T-1, u=U-1] for all b in B with log_probs[b, t=T-1, u=U-1, blank]
if u == 0:
betas[offset + (T - 1) * maxU + U - 1] = (
logp(denom, acts, maxT, maxU, alphabet_size, b, T - 1, U - 1, blank_) - sigma
)
# sync until all betas are initialized
cuda.syncthreads()
# Ordinary beta calculations, broadcast across B=b and U=u
# Look up backward variable calculation from rnnt_numpy.backward_pass()
for n in range(T + U - 2, -1, -1):
t = n - u
if u == (U - 1):
# for t in reversed(range(T - 1)) step to initialize betas[b, t, U-1]
if t >= 0 and t < (T - 1):
# beta[t, U - 1] = beta[t + 1, U - 1] * p(blank | t, U - 1) / exp(sigma)
# this part is the same as regular RNN-T.
betas[offset + t * maxU + U - 1] = (
betas[offset + (t + 1) * maxU + U - 1]
+ logp(denom, acts, maxT, maxU, alphabet_size, b, t, U - 1, blank_)
- sigma
)
# now add the weights from big blanks
for i in range(num_big_blanks):
if t + big_blank_duration[i] < T:
# adding to beta[t, U - 1] of weight (in log domain),
# beta[t + duration, U - 1] * p(big-blank | t, U - 1) / exp(sigma)
betas[offset + t * maxU + U - 1] = rnnt_helper.log_sum_exp(
betas[offset + t * maxU + U - 1],
betas[offset + (t + big_blank_duration[i]) * maxU + U - 1]
+ logp(denom, acts, maxT, maxU, alphabet_size, b, t, U - 1, blank_ - 1 - i)
- sigma,
)
elif t + big_blank_duration[i] == T and big_blank_duration[i] != 1:
# adding to beta[T - duration, U - 1] of weight (in log domain),
# p(big-blank | T - duration, U - 1) / exp(sigma)
betas[offset + t * maxU + U - 1] = rnnt_helper.log_sum_exp(
betas[offset + t * maxU + U - 1],
logp(denom, acts, maxT, maxU, alphabet_size, b, t, U - 1, blank_ - 1 - i) - sigma,
)
elif u < U:
if t == T - 1:
# for u in reversed(range(U - 1)) step to initialize betas[b, T-1, u]
betas[offset + (T - 1) * maxU + u] = (
betas[offset + (T - 1) * maxU + u + 1]
+ logp(denom, acts, maxT, maxU, alphabet_size, b, T - 1, u, labels[u])
- sigma
)
elif (t >= 0) and (t < T - 1):
# for t in reversed(range(T - 1)) for u in reversed(range(U - 1)) step to compute betas[b, t, u]
no_emit = (
betas[offset + (t + 1) * maxU + u]
+ logp(denom, acts, maxT, maxU, alphabet_size, b, t, u, blank_)
- sigma
)
emit = (
betas[offset + t * maxU + u + 1]
+ logp(denom, acts, maxT, maxU, alphabet_size, b, t, u, labels[u])
- sigma
)
betas[offset + t * maxU + u] = rnnt_helper.log_sum_exp(emit, no_emit)
# now add the weights from big blanks
for i in range(num_big_blanks):
if t < T - big_blank_duration[i]:
# added weight for the big-blank,
# beta[t + duration, u] * p(big-blank | t, u) / exp(sigma)
big_blank_no_emit = (
betas[offset + (t + big_blank_duration[i]) * maxU + u]
+ logp(denom, acts, maxT, maxU, alphabet_size, b, t, u, blank_ - 1 - i)
- sigma
)
betas[offset + t * maxU + u] = rnnt_helper.log_sum_exp(
betas[offset + t * maxU + u], big_blank_no_emit
)
# sync across all B=b and U=u
cuda.syncthreads()
# After final sync, betas[b, 0, 0] gives
# log-likelihood of backward pass.
if u == 0:
llBackward[b] = betas[offset]
@cuda.jit()
def compute_multiblank_grad_kernel(
grads: torch.Tensor,
acts: torch.Tensor,
denom: torch.Tensor,
sigma: float,
alphas: torch.Tensor,
betas: torch.Tensor,
logll: torch.Tensor,
xlen: torch.Tensor,
ylen: torch.Tensor,
mlabels: torch.Tensor, # [B, U]
minibatch: int,
maxT: int,
maxU: int,
alphabet_size: int,
blank_: int,
big_blank_duration: torch.Tensor,
num_big_blanks: int,
fastemit_lambda: float,
clamp: float,
):
"""
Compute gradients for multi-blank transducer loss (https://arxiv.org/pdf/2211.03541).
Args:
grads: Zero Tensor of shape [B, T, U, V + 1 + num_big_blanks]. Is updated by this kernel to contain the gradients
of this batch of samples.
acts: Tensor of shape [B, T, U, V + 1 + num_big_blanks] flattened. Represents the logprobs activation tensor.
denom: Tensor of shape [B, T, U] flattened. Represents the denominator of the logprobs activation tensor
across entire vocabulary.
sigma: Hyper-parameter for logit-undernormalization technique for training multi-blank transducers.
alphas: Alpha variable, contains forward probabilities. A tensor of shape [B, T, U].
betas: Beta varoable, contains backward probabilities. A tensor of shape [B, T, U].
logll: Log-likelihood of the forward variable, represented as a vector of shape [B].
Represents the log-likelihood of the forward pass.
xlen: Vector of length B which contains the actual acoustic sequence lengths in the padded
activation tensor.
ylen: Vector of length B which contains the actual target sequence lengths in the padded
activation tensor.
mlabels: Matrix of shape [B, U+1] (+1 here is due to <SOS> token - usually the RNNT blank).
The matrix contains the padded target transcription that must be predicted.
minibatch: Int representing the batch size.
maxT: The maximum possible acoustic sequence length. Represents T in the logprobs tensor.
maxU: The maximum possible target sequence length. Represents U in the logprobs tensor.
alphabet_size: The vocabulary dimension V+1 (inclusive of RNNT blank).
blank_: Index of the RNNT blank token in the vocabulary. Generally the first or last token in the vocab.
fastemit_lambda: Float scaling factor for FastEmit regularization. Refer to
FastEmit: Low-latency Streaming ASR with Sequence-level Emission Regularization.
clamp: Float value. When set to value >= 0.0, will clamp the gradient to [-clamp, clamp].
big_blank_durations: Vector of supported big blank durations of the model.
num_big_blanks: Number of big blanks of the model.
Updates:
Kernel inplace updates the following inputs:
- grads: Gradients with respect to the log likelihood (logll).
"""
# Kernel call:
# blocks_per_grid = minibatch (b) * maxT (t) * maxU (u)
# threads_per_block = constant buffer size of parallel threads (v :: Constant)
tid = cuda.threadIdx.x # represents v, taking steps of some constant size
idx = tid # index of v < V+1; in steps of constant buffer size
col = cuda.blockIdx.x # represents a fused index of b * t * u
# Decompose original indices from fused `col`
u = col % maxU # (b * t * u) % u = u
bt = (col - u) // maxU # (b * t * u - u) // U = b * t
t = bt % maxT # (b * t) % t = t
mb = (bt - t) // maxT # (b * t - t) // T = b
# constants
T = xlen[mb] # select AM length of current sample
U = ylen[mb] + 1 # select target length of current sample, +1 for the blank token
labels: torch.Tensor = mlabels[mb] # labels = mlabels + mb * (maxU - 1);
# Buffered gradient calculations, broadcast across B=b, T=t and U=u, looped over V with some constant stride.
# Look up gradient calculation from rnnt_numpy.compute_gradient()
if t < T and u < U:
# For cuda kernels, maximum number of threads per block is limited to some value.
# However, it may be the case that vocabulary size is larger than this limit
# To work around this, an arbitrary thread buffer size is chosen such that,
# 1) each element within the thread pool operates independently of the other
# 2) An inner while loop moves the index of each buffer element by the size of the buffer itself,
# such that all elements of the vocabulary size are covered in (V + 1 // thread_buffer) number of steps.
# As such, each thread will perform the while loop at least (V + 1 // thread_buffer) number of times
while idx < alphabet_size:
# remember, `col` represents the tri-index [b, t, u]
# therefore; logpk = denom[b, t, u] + acts[b, t, u, v]
logpk = denom[col] + acts[col * alphabet_size + idx]
# initialize the grad of the sample acts[b, t, u, v]
grad = math.exp(alphas[col] + betas[col] + logpk - logll[mb])
# In all of the following computation, whenever logpk is used, we
# need to subtract sigma based on our derivation of the gradient of
# the logit under-normalization method.
# If FastEmit regularization is enabled, calculate the gradeint of probability of predicting the next label
# at the current timestep.
# The formula for this is Equation 9 in https://arxiv.org/abs/2010.11148, multiplied by the log probability
# of the current step (t, u), normalized by the total log likelihood.
# Once the gradient has been calculated, scale it by `fastemit_lambda`, as in Equation 10.
if fastemit_lambda > 0.0 and u < U - 1:
fastemit_grad = fastemit_lambda * math.exp(
alphas[col] # alphas(t, u)
+ (denom[col] + acts[col * alphabet_size + labels[u]])
+ betas[col + 1] # betas(t, u+1)
+ logpk # log Pr(k|t, u)
- sigma
- logll[mb] # total log likelihood for normalization
)
else:
fastemit_grad = 0.0
# Update the gradient of act[b, t, u, v] with the gradient from FastEmit regularization
grad = grad + fastemit_grad
# grad to last blank transition
# grad[b, T-1, U-1, v=blank] -= exp(alphas[b, t, u) + logpk - sigma - logll[b])
if (idx == blank_) and (t == T - 1) and (u == U - 1):
grad -= math.exp(alphas[col] + logpk - sigma - logll[mb])
else:
# this is one difference of the multi-blank gradient from standard RNN-T
# gradient -- basically, wherever the blank_ symbol is addressed in the
# original code, we need to do similar things to big blanks, and we need
# to change the if conditions to match the duration of the big-blank.
# grad[b, T-duration, U-1, v=big-blank] -= exp(alphas[b, t, u) + logpk - sigma - logll[b])
for i in range(num_big_blanks):
if (idx == blank_ - 1 - i) and (t == T - big_blank_duration[i]) and (u == U - 1):
grad -= math.exp(alphas[col] + logpk - sigma - logll[mb])
# grad of blank across t < T;
# grad[b, t<T-1, u, v=blank] -= exp(alphas[b, t, u] + logpk - sigma - logll[b] betas[b, t + 1, u])
if (idx == blank_) and (t < T - 1):
grad -= math.exp(alphas[col] + logpk - sigma - logll[mb] + betas[col + maxU])
else:
# This is another difference between multi-blank and RNN-T gradients.
# Now we consider gradients for big-blanks.
# grad[b, t<T-duration, u, v=big-blank] -= exp(alphas[b, t, u] + logpk - sigma - logll[b] + betas[b, t + duration, u])
for i in range(num_big_blanks):
if (idx == blank_ - 1 - i) and (t < T - big_blank_duration[i]):
grad -= math.exp(
alphas[col] + logpk - sigma - logll[mb] + betas[col + big_blank_duration[i] * maxU]
)
# grad of correct token across u < U;
# grad[b, t, u<U-1, v=label[u]] -= exp(alphas[b, t, u] + logpk - sigma - logll[b] + betas[b, t, u+1])
# Scale the gradient by (1.0 + FastEmit_lambda) in log space, then exponentiate
if (u < U - 1) and (idx == labels[u]):
# exp(log(1 + fastemit_lambda) + ...) is numerically more stable than
# multiplying (1.0 + fastemit_lambda) with result.
grad -= math.exp(
math.log1p(fastemit_lambda) + alphas[col] + logpk - sigma - logll[mb] + betas[col + 1]
)
# update grads[b, t, u, v] = grad
grads[col * alphabet_size + idx] = grad
# clamp gradient (if needed)
if clamp > 0.0:
g = grads[col * alphabet_size + idx]
g = min(g, clamp)
g = max(g, -clamp)
grads[col * alphabet_size + idx] = g
# update internal index through the thread_buffer;
# until idx < V + 1, such that entire vocabulary has been updated.
idx += GPU_RNNT_THREAD_SIZE
@cuda.jit()
def compute_tdt_alphas_kernel(
acts: torch.Tensor,
duration_acts: torch.Tensor,
denom: torch.Tensor,
sigma: float,
alphas: torch.Tensor,
llForward: torch.Tensor,
xlen: torch.Tensor,
ylen: torch.Tensor,
mlabels: torch.Tensor, # [B]
minibatch: int,
maxT: int,
maxU: int,
alphabet_size: int,
blank_: int,
durations: torch.Tensor,
num_durations: int,
):
"""
Compute alpha (forward variable) probabilities over the transduction step.
Args:
acts: Tensor of shape [B, T, U, V] flattened. Represents the logprobs activation tensor for tokens.
duration_acts: Tensor of shape [B, T, U, D] flattened. Represents the logprobs activation tensor for duration.
denom: Tensor of shape [B, T, U] flattened. Represents the denominator of the logprobs activation tensor for tokens.
alphas: Zero tensor of shape [B, T, U]. Will be updated inside the kernel with the forward variable
probabilities.
llForward: Zero tensor of shape [B]. Represents the log-likelihood of the forward pass.
Returned as the forward pass loss that is reduced by the optimizer.
xlen: Vector of length B which contains the actual acoustic sequence lengths in the padded
activation tensor.
ylen: Vector of length B which contains the actual target sequence lengths in the padded
activation tensor.
mlabels: Matrix of shape [B, U+1] (+1 here is due to <SOS> token - usually the RNNT blank).
The matrix contains the padded target transcription that must be predicted.
minibatch: Int representing the batch size.
maxT: The maximum possible acoustic sequence length. Represents T in the logprobs tensor.
maxU: The maximum possible target sequence length. Represents U in the logprobs tensor.
alphabet_size: The vocabulary dimension V+1 (inclusive of RNNT blank).
blank_: Index of the TDT blank token in the vocabulary. Must be the last token in the vocab.
Updates:
Kernel inplace updates the following inputs:
- alphas: forward variable scores.
- llForward: log-likelihood of forward variable.
"""
# // launch B blocks, each block has U threads
b = cuda.blockIdx.x # // batch id
u = cuda.threadIdx.x # label id, u
T = xlen[b] # select AM length of current sample
U = ylen[b] + 1 # select target length of current sample, +1 for the blank token
labels: torch.Tensor = mlabels[b] # mb label start point, equivalent to mlabels + b * (maxU - 1)
offset = b * maxT * maxU # pointer indexing offset
# alphas += offset # pointer offset, ignored since we explicitly add offset
# Initilize alpha[b, t=0, u=0] for all b in B
if u == 0:
alphas[offset] = 0
# sync until all alphas are initialized
cuda.syncthreads()
# Ordinary alpha calculations, broadcast across B=b and U=u
# Look up forward variable calculation from rnnt_numpy.forward_pass()
for n in range(1, T + U - 1):
t = n - u
if u == 0:
# when u == 0, we only consider blank emissions.
if t > 0 and t < T:
alphas[offset + t * maxU + u] = -INF
for i in range(1, num_durations): # skip 0 since blank emission has to advance by at least one
if t >= durations[i]:
alphas[offset + t * maxU + u] = rnnt_helper.log_sum_exp(
alphas[offset + t * maxU + u], # the current alpha value
alphas[offset + (t - durations[i]) * maxU + u] # alpha(t - duration, u)
+ logp(
denom, acts, maxT, maxU, alphabet_size, b, t - durations[i], u, blank_
) # logp of blank emission
- sigma # logit under-normalization
+ logp_duration(
duration_acts, maxT, maxU, num_durations, b, t - durations[i], u, i
), # logp of duration
)
else:
break # since durations are in ascending order, when we encounter a duration that is too large, then
# there is no need to check larger durations after that.
elif u < U:
# when t == 0, we only consider the non-blank emission.
if t == 0:
alphas[offset + u] = (
alphas[offset + u - 1] # alpha(t, u - 1)
+ logp(
denom, acts, maxT, maxU, alphabet_size, b, t, u - 1, labels[u - 1]
) # logp of token emission
- sigma # logit under-normalization
+ logp_duration(
duration_acts, maxT, maxU, num_durations, b, t, u - 1, 0
) # t = 0, so it must be duration = 0. Therefore the last argument passed to logp_duration() is 0.
)
# now we have t != 0 and u != 0, and we need to consider both non-blank and blank emissions.
elif t > 0 and t < T:
no_emit = -INF # no_emit stores the score for all blank emissions.
for i in range(1, num_durations):
if t >= durations[i]:
no_emit = rnnt_helper.log_sum_exp(
no_emit, # current score
alphas[offset + (t - durations[i]) * maxU + u] # alpha(t - duration, u)
+ logp(
denom, acts, maxT, maxU, alphabet_size, b, t - durations[i], u, blank_
) # logp of blank emission
- sigma # logit under-normalization
+ logp_duration(
duration_acts, maxT, maxU, num_durations, b, t - durations[i], u, i
), # logp of duration
)
else:
break # we can exit the loop early here, same as the case for u == 0 above.
emit = -INF # emit stores the score for non-blank emissions.
for i in range(0, num_durations):
if t >= durations[i]:
emit = rnnt_helper.log_sum_exp(
emit, # current score
alphas[offset + (t - durations[i]) * maxU + u - 1] # alpha(t - duration, u - 1)
+ logp(
denom, acts, maxT, maxU, alphabet_size, b, t - durations[i], u - 1, labels[u - 1]
) # logp of non-blank emission
- sigma # logit under-normalization
+ logp_duration(
duration_acts, maxT, maxU, num_durations, b, t - durations[i], u - 1, i
), # logp of duration
)
else:
break # we can exit the loop early here, same as the case for u == 0 above.
# combining blank and non-blank emissions.
alphas[offset + t * maxU + u] = rnnt_helper.log_sum_exp(emit, no_emit)
# sync across all B=b and U=u
cuda.syncthreads()
# After final sync, the forward log-likelihood can be computed as the summataion of
# alpha(T - duration, U - 1) + logp(blank, duration | t - duration, U - 1), over different durations.
if u == 0:
# first we consider duration = 1
loglike = (
alphas[offset + (T - 1) * maxU + U - 1]
+ logp(denom, acts, maxT, maxU, alphabet_size, b, T - 1, U - 1, blank_)
- sigma
+ logp_duration(duration_acts, maxT, maxU, num_durations, b, T - 1, U - 1, 1)
)
# then we add the scores for duration > 1, if such durations are possible given the audio lengths.
for i in range(2, num_durations):
if T >= durations[i]:
big_blank_loglike = (
alphas[offset + (T - durations[i]) * maxU + U - 1]
+ logp(denom, acts, maxT, maxU, alphabet_size, b, T - durations[i], U - 1, blank_)
- sigma
+ logp_duration(duration_acts, maxT, maxU, num_durations, b, T - durations[i], U - 1, i)
)
loglike = rnnt_helper.log_sum_exp(loglike, big_blank_loglike)
else:
break
llForward[b] = loglike
@cuda.jit()
def compute_tdt_betas_kernel(
acts: torch.Tensor,
duration_acts: torch.Tensor,
denom: torch.Tensor,
sigma: float,
betas: torch.Tensor,
llBackward: torch.Tensor,
xlen: torch.Tensor,
ylen: torch.Tensor,
mlabels: torch.Tensor, # [B, U]
minibatch: int,
maxT: int,
maxU: int,
alphabet_size: int,
blank_: int,
durations: torch.Tensor,
num_durations: int,
):
"""
Compute beta (backward variable) probabilities over the transduction step.
Args:
acts: Tensor of shape [B, T, U, V] flattened. Represents the logprobs activation tensor for tokens.
duration_acts: Tensor of shape [B, T, U, D] flattened. Represents the logprobs activation tensor for duations.
denom: Tensor of shape [B, T, U] flattened. Represents the denominator of the logprobs activation tensor
across entire vocabulary.
betas: Zero tensor of shape [B, T, U]. Will be updated inside the kernel with the backward variable
probabilities.
llBackward: Zero tensor of shape [B]. Represents the log-likelihood of the backward pass.
Returned as the backward pass loss that is reduced by the optimizer.
xlen: Vector of length B which contains the actual acoustic sequence lengths in the padded
activation tensor.
ylen: Vector of length B which contains the actual target sequence lengths in the padded
activation tensor.
mlabels: Matrix of shape [B, U+1] (+1 here is due to <SOS> token - usually the RNNT blank).
The matrix contains the padded target transcription that must be predicted.
minibatch: Int representing the batch size.
maxT: The maximum possible acoustic sequence length. Represents T in the logprobs tensor.
maxU: The maximum possible target sequence length. Represents U in the logprobs tensor.
alphabet_size: The vocabulary dimension V+1 (inclusive of RNNT blank).
blank_: Index of the RNNT blank token in the vocabulary. Generally the first or last token in the vocab.
Updates:
Kernel inplace updates the following inputs:
- betas: backward variable scores.
- llBackward: log-likelihood of backward variable.
"""
# // launch B blocks, each block has U threads
b = cuda.blockIdx.x # // batch id
u = cuda.threadIdx.x # label id, u
T = xlen[b] # select AM length of current sample
U = ylen[b] + 1 # select target length of current sample, +1 for the blank token
labels: torch.Tensor = mlabels[b] # mb label start point, equivalent to mlabels + b * (maxU - 1)
offset = b * maxT * maxU # pointer indexing offset
# betas += offset # pointer offset, ignored since we explicitly add offset
# Initilize beta[b, t=T-1, u=U-1] for all b in B with log_probs[b, t=T-1, u=U-1, blank]
if u == 0:
betas[offset + (T - 1) * maxU + U - 1] = (
logp(denom, acts, maxT, maxU, alphabet_size, b, T - 1, U - 1, blank_)
- sigma
+ logp_duration(duration_acts, maxT, maxU, num_durations, b, T - 1, U - 1, 1)
)
# sync until all betas are initialized
cuda.syncthreads()
# Ordinary beta calculations, broadcast across B=b and U=u
# Look up backward variable calculation from rnnt_numpy.backward_pass()
for n in range(T + U - 2, -1, -1):
t = n - u
if u == U - 1:
# u == U - 1, we only consider blank emissions.
if t >= 0 and t + 1 < T:
betas[offset + t * maxU + U - 1] = -INF
for i in range(1, num_durations):
# although similar, the computation for beta's is slightly more complex for boundary cases.
# the following two cases correspond to whether t is exactly certain duration away from T.
# and they have slightly different update rules.
if t + durations[i] < T:
betas[offset + t * maxU + U - 1] = rnnt_helper.log_sum_exp(
betas[offset + t * maxU + U - 1],
betas[
offset + (t + durations[i]) * maxU + U - 1
] # beta[t, U - 1] depends on the value beta[t + duration, U - 1] here.
+ logp(denom, acts, maxT, maxU, alphabet_size, b, t, U - 1, blank_) # log prob of blank
+ logp_duration(
duration_acts, maxT, maxU, num_durations, b, t, U - 1, i
) # log prob of duration (durations[i])
- sigma, # for logit undernormalization
)
elif t + durations[i] == T:
betas[offset + t * maxU + U - 1] = rnnt_helper.log_sum_exp(
betas[offset + t * maxU + U - 1],
# here we have one fewer term than the "if" block above. This could be seen as having "0" here since
# beta[t + duration, U - 1] isn't defined because t + duration is out of bound.
logp(denom, acts, maxT, maxU, alphabet_size, b, t, U - 1, blank_) # log prob of blank
+ logp_duration(
duration_acts, maxT, maxU, num_durations, b, t, U - 1, i
) # log prob of duration (durations[i])
- sigma, # for logit undernormalization. Basically every time sigma shows up is because of logit undernormalization.
)
elif u < U - 1:
if t == T - 1:
# t == T - 1, so we only consider non-blank with duration 0. (Note, we can't have blank emissions with duration = 0)
betas[offset + (T - 1) * maxU + u] = (
betas[offset + (T - 1) * maxU + u + 1]
+ logp(denom, acts, maxT, maxU, alphabet_size, b, T - 1, u, labels[u]) # non-blank log prob
+ logp_duration(duration_acts, maxT, maxU, num_durations, b, T - 1, u, 0) # log prob of duration 0
- sigma
)
elif t >= 0 and t < T - 1:
# now we need to consider both blank andnon-blanks. Similar to alphas, we first compute them separately with no_emit and emit.
no_emit = -INF
for i in range(1, num_durations):
if t + durations[i] < T:
no_emit = rnnt_helper.log_sum_exp(
no_emit,
betas[offset + (t + durations[i]) * maxU + u]
+ logp(denom, acts, maxT, maxU, alphabet_size, b, t, u, blank_)
+ logp_duration(duration_acts, maxT, maxU, num_durations, b, t, u, i)
- sigma,
)
emit = -INF
for i in range(0, num_durations):
if t + durations[i] < T:
emit = rnnt_helper.log_sum_exp(
emit,
betas[offset + (t + durations[i]) * maxU + u + 1]
+ logp(denom, acts, maxT, maxU, alphabet_size, b, t, u, labels[u])
+ logp_duration(duration_acts, maxT, maxU, num_durations, b, t, u, i)
- sigma,
)
# combining all blank emissions and all non-blank emissions.
betas[offset + t * maxU + u] = rnnt_helper.log_sum_exp(emit, no_emit)
# sync across all B=b and U=u
cuda.syncthreads()
# After final sync, betas[b, 0, 0] gives log-likelihood of backward pass, same with conventional Transducers.
if u == 0:
llBackward[b] = betas[offset]
@cuda.jit()
def compute_tdt_grad_kernel(
label_grads: torch.Tensor,
duration_grads: torch.Tensor,
acts: torch.Tensor,
duration_acts: torch.Tensor,
denom: torch.Tensor,
sigma: float,
alphas: torch.Tensor,
betas: torch.Tensor,
logll: torch.Tensor,
xlen: torch.Tensor,
ylen: torch.Tensor,
mlabels: torch.Tensor, # [B, U]
minibatch: int,
maxT: int,
maxU: int,
alphabet_size: int,
blank_: int,
durations: torch.Tensor,
num_durations: int,
fastemit_lambda: float,
clamp: float,
):
"""
Compute gradients over the transduction step.
Args:
grads: Zero Tensor of shape [B, T, U, V] to store gradients for tokens.
duration_grads: Zero Tensor of shape [B, T, U, D] to store gradients for durations.
acts: Tensor of shape [B, T, U, V] flattened. Represents the logprobs activation tensor for tokens.
duration_acts: Tensor of shape [B, T, U, D] flattened. Represents the logprobs activation tensor for durations.
denom: Tensor of shape [B, T, U] flattened. Represents the denominator of the logprobs activation tensor
across entire vocabulary.
alphas: Alpha variable, contains forward probabilities. A tensor of shape [B, T, U].
betas: Beta varoable, contains backward probabilities. A tensor of shape [B, T, U].
logll: Log-likelihood of the forward variable, represented as a vector of shape [B].
Represents the log-likelihood of the forward pass.
xlen: Vector of length B which contains the actual acoustic sequence lengths in the padded
activation tensor.
ylen: Vector of length B which contains the actual target sequence lengths in the padded
activation tensor.
mlabels: Matrix of shape [B, U+1] (+1 here is due to <SOS> token - usually the RNNT blank).
The matrix contains the padded target transcription that must be predicted.
minibatch: Int representing the batch size.
maxT: The maximum possible acoustic sequence length. Represents T in the logprobs tensor.
maxU: The maximum possible target sequence length. Represents U in the logprobs tensor.
alphabet_size: The vocabulary dimension V+1 (inclusive of RNNT blank).
blank_: Index of the RNNT blank token in the vocabulary. Generally the first or last token in the vocab.
fastemit_lambda: Float scaling factor for FastEmit regularization. Refer to
FastEmit: Low-latency Streaming ASR with Sequence-level Emission Regularization.
clamp: Float value. When set to value >= 0.0, will clamp the gradient to [-clamp, clamp].
Updates:
Kernel inplace updates the following inputs:
- grads: Gradients with respect to the log likelihood (logll).
"""
# Kernel call:
# blocks_per_grid = minibatch (b) * maxT (t) * maxU (u)
# threads_per_block = constant buffer size of parallel threads (v :: Constant)
tid = cuda.threadIdx.x # represents v, taking steps of some constant size
idx = tid # index of v < V+1; in steps of constant buffer size
col = cuda.blockIdx.x # represents a fused index of b * t * u
# Decompose original indices from fused `col`
u = col % maxU # (b * t * u) % u = u
bt = (col - u) // maxU # (b * t * u - u) // U = b * t
t = bt % maxT # (b * t) % t = t
mb = (bt - t) // maxT # (b * t - t) // T = b
# constants
T = xlen[mb] # select AM length of current sample
U = ylen[mb] + 1 # select target length of current sample, +1 for the blank token
labels: torch.Tensor = mlabels[mb] # labels = mlabels + mb * (maxU - 1);
# Buffered gradient calculations, broadcast across B=b, T=t and U=u, looped over V with some constant stride.
# Look up gradient calculation from rnnt_numpy.compute_gradient()
if t < T and u < U:
logpk_blank = (
denom[col] + acts[col * alphabet_size + blank_] - sigma
) # whenever sigma is used, it is for logit under-normalization.
if idx < num_durations:
grad = 0.0
if t + durations[idx] < T and u < U - 1: # for label
logpk_label = denom[col] + acts[col * alphabet_size + labels[u]] - sigma
grad -= math.exp(alphas[col] + betas[col + 1 + durations[idx] * maxU] + logpk_label - logll[mb])
if t + durations[idx] < T and idx > 0: # for blank in the middle
grad -= math.exp(alphas[col] + betas[col + durations[idx] * maxU] + logpk_blank - logll[mb])
if t + durations[idx] == T and idx >= 1 and u == U - 1: # for blank as the last symbol
grad -= math.exp(alphas[col] + logpk_blank - logll[mb])
grad = grad * math.exp(duration_acts[col * num_durations + idx])
duration_grads[col * num_durations + idx] = grad
# For cuda kernels, maximum number of threads per block is limited to some value.
# However, it may be the case that vocabulary size is larger than this limit
# To work around this, an arbitrary thread buffer size is chosen such that,
# 1) each element within the thread pool operates independently of the other
# 2) An inner while loop moves the index of each buffer element by the size of the buffer itself,
# such that all elements of the vocabulary size are covered in (V + 1 // thread_buffer) number of steps.
# As such, each thread will perform the while loop at least (V + 1 // thread_buffer) number of times
while idx < alphabet_size:
# remember, `col` represents the tri-index [b, t, u]
# therefore; logpk = denom[b, t, u] + acts[b, t, u, v]
logpk = denom[col] + acts[col * alphabet_size + idx]
# initialize the grad of the sample acts[b, t, u, v]
grad = math.exp(alphas[col] + betas[col] + logpk - logll[mb])
# If FastEmit regularization is enabled, calculate the gradeint of probability of predicting the next label
# at the current timestep.
# The formula for this is Equation 9 in https://arxiv.org/abs/2010.11148, multiplied by the log probability
# of the current step (t, u), normalized by the total log likelihood.
# Once the gradient has been calculated, scale it by `fastemit_lambda`, as in Equation 10.
if fastemit_lambda > 0.0 and u < U - 1:
fastemit_grad = 0.0
for i in range(0, num_durations):
if t + durations[i] < T:
fastemit_grad += fastemit_lambda * math.exp(
alphas[col] # alphas(t, u)
+ (denom[col] + acts[col * alphabet_size + labels[u]]) # log prob of token emission
+ duration_acts[col * num_durations + i] # duration log-prob
+ betas[col + 1 + durations[i] * maxU] # betas(t, u+1)
+ logpk # log Pr(k|t, u)
- sigma # for logit under-normalization
- logll[mb] # total log likelihood for normalization
)
else:
fastemit_grad = 0.0
# Update the gradient of act[b, t, u, v] with the gradient from FastEmit regularization
grad = grad + fastemit_grad
# grad to last blank transition
# grad[b, T-1, U-1, v=blank] -= exp(alphas[b, t, u] + logpk - sigma - logll[b] + logp(duration) for all possible non-zero durations.
if idx == blank_ and u == U - 1:
for i in range(1, num_durations):
if t == T - durations[i]:
grad -= math.exp(
alphas[col] + logpk - sigma - logll[mb] + duration_acts[col * num_durations + i]
)
# grad of blank across t < T;
# grad[b, t<T-1, u, v=blank] -= exp(alphas[b, t, u] + logpk - sigma + logp_duration - logll[b] + betas[b, t + duration, u]) for all non-zero durations
if idx == blank_:
for i in range(1, num_durations):
if t < T - durations[i]:
grad -= math.exp(
alphas[col]
+ logpk
- sigma
- logll[mb]
+ betas[col + maxU * durations[i]]
+ duration_acts[col * num_durations + i]
)
# grad of correct token across u < U;
# grad[b, t, u<U-1, v=label[u]] -= exp(alphas[b, t, u] + logpk - sigma + logp_duration - logll[b] + betas[b, t + duration, u + 1]) for all blank durations.
# Scale the gradient by (1.0 + FastEmit_lambda) in log space, then exponentiate
if u < U - 1 and idx == labels[u]:
# exp(log(1 + fastemit_lambda) + ...) is numerically more stable than
# multiplying (1.0 + fastemit_lambda) with result.
for i in range(num_durations):
if t + durations[i] < T:
grad -= math.exp(
math.log1p(fastemit_lambda)
+ alphas[col]
+ logpk
- sigma
- logll[mb]
+ betas[col + 1 + maxU * durations[i]]
+ duration_acts[col * num_durations + i]
)
# update grads[b, t, u, v] = grad
label_grads[col * alphabet_size + idx] = grad
# clamp gradient (if needed)
if clamp > 0.0:
g = label_grads[col * alphabet_size + idx]
g = min(g, clamp)
g = max(g, -clamp)
label_grads[col * alphabet_size + idx] = g
# update internal index through the thread_buffer;
# until idx < V + 1, such that entire vocabulary has been updated.
idx += GPU_RNNT_THREAD_SIZE
|
2e207e6fd352613be797d547dd80640bbd72772a
|
e848b142ec5d8f1fdb2fef4f8d550ebc3e15495d
|
/diffusers/src/diffusers/schedulers/scheduling_deis_multistep.py
|
f58284005deda5e40098d415d06623fdd76860bf
|
[
"CC0-1.0",
"Apache-2.0"
] |
permissive
|
drboog/Shifted_Diffusion
|
5d9d6959d53e519b7b59a1219ab9ab588d763855
|
1d9e863f852a2de3bc352f59944c1d738526e0fd
|
refs/heads/main
| 2023-05-23T00:59:30.891957
| 2023-05-18T16:53:37
| 2023-05-18T16:53:37
| 614,598,062
| 113
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 22,420
|
py
|
scheduling_deis_multistep.py
|
# Copyright 2023 FLAIR Lab and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# DISCLAIMER: check https://arxiv.org/abs/2204.13902 and https://github.com/qsh-zh/deis for more info
# The codebase is modified based on https://github.com/huggingface/diffusers/blob/main/src/diffusers/schedulers/scheduling_dpmsolver_multistep.py
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
# Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar
def betas_for_alpha_bar(num_diffusion_timesteps, max_beta=0.999):
"""
Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of
(1-beta) over time from t = [0,1].
Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up
to that part of the diffusion process.
Args:
num_diffusion_timesteps (`int`): the number of betas to produce.
max_beta (`float`): the maximum beta to use; use values lower than 1 to
prevent singularities.
Returns:
betas (`np.ndarray`): the betas used by the scheduler to step the model outputs
"""
def alpha_bar(time_step):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2) ** 2
betas = []
for i in range(num_diffusion_timesteps):
t1 = i / num_diffusion_timesteps
t2 = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))
return torch.tensor(betas, dtype=torch.float32)
class DEISMultistepScheduler(SchedulerMixin, ConfigMixin):
"""
DEIS (https://arxiv.org/abs/2204.13902) is a fast high order solver for diffusion ODEs. We slightly modify the
polynomial fitting formula in log-rho space instead of the original linear t space in DEIS paper. The modification
enjoys closed-form coefficients for exponential multistep update instead of replying on the numerical solver. More
variants of DEIS can be found in https://github.com/qsh-zh/deis.
Currently, we support the log-rho multistep DEIS. We recommend to use `solver_order=2 / 3` while `solver_order=1`
reduces to DDIM.
We also support the "dynamic thresholding" method in Imagen (https://arxiv.org/abs/2205.11487). For pixel-space
diffusion models, you can set `thresholding=True` to use the dynamic thresholding.
[`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__`
function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`.
[`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and
[`~SchedulerMixin.from_pretrained`] functions.
Args:
num_train_timesteps (`int`): number of diffusion steps used to train the model.
beta_start (`float`): the starting `beta` value of inference.
beta_end (`float`): the final `beta` value.
beta_schedule (`str`):
the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from
`linear`, `scaled_linear`, or `squaredcos_cap_v2`.
trained_betas (`np.ndarray`, optional):
option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc.
solver_order (`int`, default `2`):
the order of DEIS; can be `1` or `2` or `3`. We recommend to use `solver_order=2` for guided sampling, and
`solver_order=3` for unconditional sampling.
prediction_type (`str`, default `epsilon`):
indicates whether the model predicts the noise (epsilon), or the data / `x0`. One of `epsilon`, `sample`,
or `v-prediction`.
thresholding (`bool`, default `False`):
whether to use the "dynamic thresholding" method (introduced by Imagen, https://arxiv.org/abs/2205.11487).
Note that the thresholding method is unsuitable for latent-space diffusion models (such as
stable-diffusion).
dynamic_thresholding_ratio (`float`, default `0.995`):
the ratio for the dynamic thresholding method. Default is `0.995`, the same as Imagen
(https://arxiv.org/abs/2205.11487).
sample_max_value (`float`, default `1.0`):
the threshold value for dynamic thresholding. Valid woks when `thresholding=True`
algorithm_type (`str`, default `deis`):
the algorithm type for the solver. current we support multistep deis, we will add other variants of DEIS in
the future
lower_order_final (`bool`, default `True`):
whether to use lower-order solvers in the final steps. Only valid for < 15 inference steps. We empirically
find this trick can stabilize the sampling of DEIS for steps < 15, especially for steps <= 10.
"""
_compatibles = [e.name for e in KarrasDiffusionSchedulers]
order = 1
@register_to_config
def __init__(
self,
num_train_timesteps: int = 1000,
beta_start: float = 0.0001,
beta_end: float = 0.02,
beta_schedule: str = "linear",
trained_betas: Optional[np.ndarray] = None,
solver_order: int = 2,
prediction_type: str = "epsilon",
thresholding: bool = False,
dynamic_thresholding_ratio: float = 0.995,
sample_max_value: float = 1.0,
algorithm_type: str = "deis",
solver_type: str = "logrho",
lower_order_final: bool = True,
):
if trained_betas is not None:
self.betas = torch.tensor(trained_betas, dtype=torch.float32)
elif beta_schedule == "linear":
self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32)
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
self.betas = (
torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
self.betas = betas_for_alpha_bar(num_train_timesteps)
else:
raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}")
self.alphas = 1.0 - self.betas
self.alphas_cumprod = torch.cumprod(self.alphas, dim=0)
# Currently we only support VP-type noise schedule
self.alpha_t = torch.sqrt(self.alphas_cumprod)
self.sigma_t = torch.sqrt(1 - self.alphas_cumprod)
self.lambda_t = torch.log(self.alpha_t) - torch.log(self.sigma_t)
# standard deviation of the initial noise distribution
self.init_noise_sigma = 1.0
# settings for DEIS
if algorithm_type not in ["deis"]:
if algorithm_type in ["dpmsolver", "dpmsolver++"]:
algorithm_type = "deis"
else:
raise NotImplementedError(f"{algorithm_type} does is not implemented for {self.__class__}")
if solver_type not in ["logrho"]:
if solver_type in ["midpoint", "heun", "bh1", "bh2"]:
solver_type = "logrho"
else:
raise NotImplementedError(f"solver type {solver_type} does is not implemented for {self.__class__}")
# setable values
self.num_inference_steps = None
timesteps = np.linspace(0, num_train_timesteps - 1, num_train_timesteps, dtype=np.float32)[::-1].copy()
self.timesteps = torch.from_numpy(timesteps)
self.model_outputs = [None] * solver_order
self.lower_order_nums = 0
def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None):
"""
Sets the timesteps used for the diffusion chain. Supporting function to be run before inference.
Args:
num_inference_steps (`int`):
the number of diffusion steps used when generating samples with a pre-trained model.
device (`str` or `torch.device`, optional):
the device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
"""
self.num_inference_steps = num_inference_steps
timesteps = (
np.linspace(0, self.num_train_timesteps - 1, num_inference_steps + 1)
.round()[::-1][:-1]
.copy()
.astype(np.int64)
)
self.timesteps = torch.from_numpy(timesteps).to(device)
self.model_outputs = [
None,
] * self.config.solver_order
self.lower_order_nums = 0
def convert_model_output(
self, model_output: torch.FloatTensor, timestep: int, sample: torch.FloatTensor
) -> torch.FloatTensor:
"""
Convert the model output to the corresponding type that the algorithm DEIS needs.
Args:
model_output (`torch.FloatTensor`): direct output from learned diffusion model.
timestep (`int`): current discrete timestep in the diffusion chain.
sample (`torch.FloatTensor`):
current instance of sample being created by diffusion process.
Returns:
`torch.FloatTensor`: the converted model output.
"""
if self.config.prediction_type == "epsilon":
alpha_t, sigma_t = self.alpha_t[timestep], self.sigma_t[timestep]
x0_pred = (sample - sigma_t * model_output) / alpha_t
elif self.config.prediction_type == "sample":
x0_pred = model_output
elif self.config.prediction_type == "v_prediction":
alpha_t, sigma_t = self.alpha_t[timestep], self.sigma_t[timestep]
x0_pred = alpha_t * sample - sigma_t * model_output
else:
raise ValueError(
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or"
" `v_prediction` for the DEISMultistepScheduler."
)
if self.config.thresholding:
# Dynamic thresholding in https://arxiv.org/abs/2205.11487
orig_dtype = x0_pred.dtype
if orig_dtype not in [torch.float, torch.double]:
x0_pred = x0_pred.float()
dynamic_max_val = torch.quantile(
torch.abs(x0_pred).reshape((x0_pred.shape[0], -1)), self.config.dynamic_thresholding_ratio, dim=1
)
dynamic_max_val = torch.maximum(
dynamic_max_val,
self.config.sample_max_value * torch.ones_like(dynamic_max_val).to(dynamic_max_val.device),
)[(...,) + (None,) * (x0_pred.ndim - 1)]
x0_pred = torch.clamp(x0_pred, -dynamic_max_val, dynamic_max_val) / dynamic_max_val
x0_pred = x0_pred.type(orig_dtype)
if self.config.algorithm_type == "deis":
alpha_t, sigma_t = self.alpha_t[timestep], self.sigma_t[timestep]
return (sample - alpha_t * x0_pred) / sigma_t
else:
raise NotImplementedError("only support log-rho multistep deis now")
def deis_first_order_update(
self,
model_output: torch.FloatTensor,
timestep: int,
prev_timestep: int,
sample: torch.FloatTensor,
) -> torch.FloatTensor:
"""
One step for the first-order DEIS (equivalent to DDIM).
Args:
model_output (`torch.FloatTensor`): direct output from learned diffusion model.
timestep (`int`): current discrete timestep in the diffusion chain.
prev_timestep (`int`): previous discrete timestep in the diffusion chain.
sample (`torch.FloatTensor`):
current instance of sample being created by diffusion process.
Returns:
`torch.FloatTensor`: the sample tensor at the previous timestep.
"""
lambda_t, lambda_s = self.lambda_t[prev_timestep], self.lambda_t[timestep]
alpha_t, alpha_s = self.alpha_t[prev_timestep], self.alpha_t[timestep]
sigma_t, _ = self.sigma_t[prev_timestep], self.sigma_t[timestep]
h = lambda_t - lambda_s
if self.config.algorithm_type == "deis":
x_t = (alpha_t / alpha_s) * sample - (sigma_t * (torch.exp(h) - 1.0)) * model_output
else:
raise NotImplementedError("only support log-rho multistep deis now")
return x_t
def multistep_deis_second_order_update(
self,
model_output_list: List[torch.FloatTensor],
timestep_list: List[int],
prev_timestep: int,
sample: torch.FloatTensor,
) -> torch.FloatTensor:
"""
One step for the second-order multistep DEIS.
Args:
model_output_list (`List[torch.FloatTensor]`):
direct outputs from learned diffusion model at current and latter timesteps.
timestep (`int`): current and latter discrete timestep in the diffusion chain.
prev_timestep (`int`): previous discrete timestep in the diffusion chain.
sample (`torch.FloatTensor`):
current instance of sample being created by diffusion process.
Returns:
`torch.FloatTensor`: the sample tensor at the previous timestep.
"""
t, s0, s1 = prev_timestep, timestep_list[-1], timestep_list[-2]
m0, m1 = model_output_list[-1], model_output_list[-2]
alpha_t, alpha_s0, alpha_s1 = self.alpha_t[t], self.alpha_t[s0], self.alpha_t[s1]
sigma_t, sigma_s0, sigma_s1 = self.sigma_t[t], self.sigma_t[s0], self.sigma_t[s1]
rho_t, rho_s0, rho_s1 = sigma_t / alpha_t, sigma_s0 / alpha_s0, sigma_s1 / alpha_s1
if self.config.algorithm_type == "deis":
def ind_fn(t, b, c):
# Integrate[(log(t) - log(c)) / (log(b) - log(c)), {t}]
return t * (-np.log(c) + np.log(t) - 1) / (np.log(b) - np.log(c))
coef1 = ind_fn(rho_t, rho_s0, rho_s1) - ind_fn(rho_s0, rho_s0, rho_s1)
coef2 = ind_fn(rho_t, rho_s1, rho_s0) - ind_fn(rho_s0, rho_s1, rho_s0)
x_t = alpha_t * (sample / alpha_s0 + coef1 * m0 + coef2 * m1)
return x_t
else:
raise NotImplementedError("only support log-rho multistep deis now")
def multistep_deis_third_order_update(
self,
model_output_list: List[torch.FloatTensor],
timestep_list: List[int],
prev_timestep: int,
sample: torch.FloatTensor,
) -> torch.FloatTensor:
"""
One step for the third-order multistep DEIS.
Args:
model_output_list (`List[torch.FloatTensor]`):
direct outputs from learned diffusion model at current and latter timesteps.
timestep (`int`): current and latter discrete timestep in the diffusion chain.
prev_timestep (`int`): previous discrete timestep in the diffusion chain.
sample (`torch.FloatTensor`):
current instance of sample being created by diffusion process.
Returns:
`torch.FloatTensor`: the sample tensor at the previous timestep.
"""
t, s0, s1, s2 = prev_timestep, timestep_list[-1], timestep_list[-2], timestep_list[-3]
m0, m1, m2 = model_output_list[-1], model_output_list[-2], model_output_list[-3]
alpha_t, alpha_s0, alpha_s1, alpha_s2 = self.alpha_t[t], self.alpha_t[s0], self.alpha_t[s1], self.alpha_t[s2]
sigma_t, sigma_s0, sigma_s1, simga_s2 = self.sigma_t[t], self.sigma_t[s0], self.sigma_t[s1], self.sigma_t[s2]
rho_t, rho_s0, rho_s1, rho_s2 = (
sigma_t / alpha_t,
sigma_s0 / alpha_s0,
sigma_s1 / alpha_s1,
simga_s2 / alpha_s2,
)
if self.config.algorithm_type == "deis":
def ind_fn(t, b, c, d):
# Integrate[(log(t) - log(c))(log(t) - log(d)) / (log(b) - log(c))(log(b) - log(d)), {t}]
numerator = t * (
np.log(c) * (np.log(d) - np.log(t) + 1)
- np.log(d) * np.log(t)
+ np.log(d)
+ np.log(t) ** 2
- 2 * np.log(t)
+ 2
)
denominator = (np.log(b) - np.log(c)) * (np.log(b) - np.log(d))
return numerator / denominator
coef1 = ind_fn(rho_t, rho_s0, rho_s1, rho_s2) - ind_fn(rho_s0, rho_s0, rho_s1, rho_s2)
coef2 = ind_fn(rho_t, rho_s1, rho_s2, rho_s0) - ind_fn(rho_s0, rho_s1, rho_s2, rho_s0)
coef3 = ind_fn(rho_t, rho_s2, rho_s0, rho_s1) - ind_fn(rho_s0, rho_s2, rho_s0, rho_s1)
x_t = alpha_t * (sample / alpha_s0 + coef1 * m0 + coef2 * m1 + coef3 * m2)
return x_t
else:
raise NotImplementedError("only support log-rho multistep deis now")
def step(
self,
model_output: torch.FloatTensor,
timestep: int,
sample: torch.FloatTensor,
return_dict: bool = True,
) -> Union[SchedulerOutput, Tuple]:
"""
Step function propagating the sample with the multistep DEIS.
Args:
model_output (`torch.FloatTensor`): direct output from learned diffusion model.
timestep (`int`): current discrete timestep in the diffusion chain.
sample (`torch.FloatTensor`):
current instance of sample being created by diffusion process.
return_dict (`bool`): option for returning tuple rather than SchedulerOutput class
Returns:
[`~scheduling_utils.SchedulerOutput`] or `tuple`: [`~scheduling_utils.SchedulerOutput`] if `return_dict` is
True, otherwise a `tuple`. When returning a tuple, the first element is the sample tensor.
"""
if self.num_inference_steps is None:
raise ValueError(
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler"
)
if isinstance(timestep, torch.Tensor):
timestep = timestep.to(self.timesteps.device)
step_index = (self.timesteps == timestep).nonzero()
if len(step_index) == 0:
step_index = len(self.timesteps) - 1
else:
step_index = step_index.item()
prev_timestep = 0 if step_index == len(self.timesteps) - 1 else self.timesteps[step_index + 1]
lower_order_final = (
(step_index == len(self.timesteps) - 1) and self.config.lower_order_final and len(self.timesteps) < 15
)
lower_order_second = (
(step_index == len(self.timesteps) - 2) and self.config.lower_order_final and len(self.timesteps) < 15
)
model_output = self.convert_model_output(model_output, timestep, sample)
for i in range(self.config.solver_order - 1):
self.model_outputs[i] = self.model_outputs[i + 1]
self.model_outputs[-1] = model_output
if self.config.solver_order == 1 or self.lower_order_nums < 1 or lower_order_final:
prev_sample = self.deis_first_order_update(model_output, timestep, prev_timestep, sample)
elif self.config.solver_order == 2 or self.lower_order_nums < 2 or lower_order_second:
timestep_list = [self.timesteps[step_index - 1], timestep]
prev_sample = self.multistep_deis_second_order_update(
self.model_outputs, timestep_list, prev_timestep, sample
)
else:
timestep_list = [self.timesteps[step_index - 2], self.timesteps[step_index - 1], timestep]
prev_sample = self.multistep_deis_third_order_update(
self.model_outputs, timestep_list, prev_timestep, sample
)
if self.lower_order_nums < self.config.solver_order:
self.lower_order_nums += 1
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=prev_sample)
def scale_model_input(self, sample: torch.FloatTensor, *args, **kwargs) -> torch.FloatTensor:
"""
Ensures interchangeability with schedulers that need to scale the denoising model input depending on the
current timestep.
Args:
sample (`torch.FloatTensor`): input sample
Returns:
`torch.FloatTensor`: scaled input sample
"""
return sample
def add_noise(
self,
original_samples: torch.FloatTensor,
noise: torch.FloatTensor,
timesteps: torch.IntTensor,
) -> torch.FloatTensor:
# Make sure alphas_cumprod and timestep have same device and dtype as original_samples
self.alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device, dtype=original_samples.dtype)
timesteps = timesteps.to(original_samples.device)
sqrt_alpha_prod = self.alphas_cumprod[timesteps] ** 0.5
sqrt_alpha_prod = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape) < len(original_samples.shape):
sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1)
sqrt_one_minus_alpha_prod = (1 - self.alphas_cumprod[timesteps]) ** 0.5
sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape):
sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1)
noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def __len__(self):
return self.config.num_train_timesteps
|
6764ec7de8895fc37665e39f496f60a735934933
|
8d67ceb772c5ac08ad152c0a5901be54ab98a527
|
/utils/generate-domains-blocklist/generate-domains-blocklist.py
|
a78a542d61979e9f896a373059065928b86961d3
|
[
"ISC"
] |
permissive
|
DNSCrypt/dnscrypt-proxy
|
f6a9338e99e78ea837ea14ebf180c1c9faf59f7d
|
d48c811ea98d28975f36eb36f68265a58338e166
|
refs/heads/master
| 2023-08-21T10:54:00.587616
| 2023-08-17T12:44:47
| 2023-08-17T12:44:47
| 116,739,828
| 6,924
| 834
|
ISC
| 2023-09-05T06:13:09
| 2018-01-08T23:21:21
|
Go
|
UTF-8
|
Python
| false
| false
| 10,245
|
py
|
generate-domains-blocklist.py
|
#! /usr/bin/env python3
# run with python generate-domains-blocklist.py > list.txt.tmp && mv -f list.txt.tmp list
from __future__ import print_function
import argparse
import re
import sys
import fnmatch
try:
import urllib2 as urllib
URLLIB_NEW = False
except (ImportError, ModuleNotFoundError):
import urllib.request as urllib
from urllib.request import Request
URLLIB_NEW = True
log_info = sys.stderr
log_err = sys.stderr
def parse_trusted_list(content):
rx_comment = re.compile(r"^(#|$)")
rx_inline_comment = re.compile(r"\s*#\s*[a-z0-9-].*$")
rx_trusted = re.compile(r"^([*a-z0-9.-]+)\s*(@\S+)?$")
rx_timed = re.compile(r".+\s*@\S+$")
names = set()
time_restrictions = {}
globs = set()
rx_set = [rx_trusted]
for line in content.splitlines():
line = str.lower(str.strip(line))
if rx_comment.match(line):
continue
line = str.strip(rx_inline_comment.sub("", line))
if is_glob(line) and not rx_timed.match(line):
globs.add(line)
names.add(line)
continue
for rx in rx_set:
matches = rx.match(line)
if not matches:
continue
name = matches.group(1)
names.add(name)
time_restriction = matches.group(2)
if time_restriction:
time_restrictions[name] = time_restriction
return names, time_restrictions, globs
def parse_list(content, trusted=False):
if trusted:
return parse_trusted_list(content)
rx_comment = re.compile(r"^(#|$)")
rx_inline_comment = re.compile(r"\s*#\s*[a-z0-9-].*$")
rx_u = re.compile(
r"^@*\|\|([a-z0-9][a-z0-9.-]*[.][a-z]{2,})\^?(\$(popup|third-party))?$"
)
rx_l = re.compile(r"^([a-z0-9][a-z0-9.-]*[.][a-z]{2,})$")
rx_lw = re.compile(r"^[*][.]([a-z0-9][a-z0-9.-]*[.][a-z]{2,})$")
rx_h = re.compile(
r"^[0-9]{1,3}[.][0-9]{1,3}[.][0-9]{1,3}[.][0-9]{1,3}\s+([a-z0-9][a-z0-9.-]*[.][a-z]{2,})$"
)
rx_mdl = re.compile(r'^"[^"]+","([a-z0-9][a-z0-9.-]*[.][a-z]{2,})",')
rx_b = re.compile(r"^([a-z0-9][a-z0-9.-]*[.][a-z]{2,}),.+,[0-9: /-]+,")
rx_dq = re.compile(r"^address=/([a-z0-9][a-z0-9.-]*[.][a-z]{2,})/.")
names = set()
time_restrictions = {}
globs = set()
rx_set = [rx_u, rx_l, rx_lw, rx_h, rx_mdl, rx_b, rx_dq]
for line in content.splitlines():
line = str.lower(str.strip(line))
if rx_comment.match(line):
continue
line = str.strip(rx_inline_comment.sub("", line))
for rx in rx_set:
matches = rx.match(line)
if not matches:
continue
name = matches.group(1)
names.add(name)
return names, time_restrictions, globs
def print_restricted_name(output_fd, name, time_restrictions):
if name in time_restrictions:
print("{}\t{}".format(
name, time_restrictions[name]), file=output_fd, end="\n")
else:
print(
"# ignored: [{}] was in the time-restricted list, "
"but without a time restriction label".format(name),
file=output_fd,
end="\n",
)
def load_from_url(url):
log_info.write("Loading data from [{}]\n".format(url))
req = urllib.Request(url=url, headers={"User-Agent": "dnscrypt-proxy"})
trusted = False
if URLLIB_NEW:
req_type = req.type
else:
req_type = req.get_type()
if req_type == "file":
trusted = True
response = None
try:
response = urllib.urlopen(req, timeout=int(args.timeout))
except urllib.URLError as err:
raise Exception("[{}] could not be loaded: {}\n".format(url, err))
if trusted is False and response.getcode() != 200:
raise Exception("[{}] returned HTTP code {}\n".format(
url, response.getcode()))
content = response.read()
if URLLIB_NEW:
content = content.decode("utf-8", errors="replace")
return content, trusted
def name_cmp(name):
parts = name.split(".")
parts.reverse()
return str.join(".", parts)
def is_glob(pattern):
maybe_glob = False
for i in range(len(pattern)):
c = pattern[i]
if c == "?" or c == "[":
maybe_glob = True
elif c == "*" and i != 0:
if i < len(pattern) - 1 or pattern[i - 1] == ".":
maybe_glob = True
if maybe_glob:
try:
fnmatch.fnmatch("example", pattern)
return True
except:
pass
return False
def covered_by_glob(globs, name):
if name in globs:
return False
for glob in globs:
try:
if fnmatch.fnmatch(name, glob):
return True
except:
pass
return False
def has_suffix(names, name):
parts = str.split(name, ".")
while parts:
parts = parts[1:]
if str.join(".", parts) in names:
return True
return False
def allowlist_from_url(url):
if not url:
return set()
content, trusted = load_from_url(url)
names, _time_restrictions, _globs = parse_list(content, trusted)
return names
def blocklists_from_config_file(
file, allowlist, time_restricted_url, ignore_retrieval_failure, output_file
):
blocklists = {}
allowed_names = set()
all_names = set()
unique_names = set()
all_globs = set()
# Load conf & blocklists
with open(file) as fd:
for line in fd:
line = str.strip(line)
if str.startswith(line, "#") or line == "":
continue
url = line
try:
content, trusted = load_from_url(url)
names, _time_restrictions, globs = parse_list(content, trusted)
blocklists[url] = names
all_names |= names
all_globs |= globs
except Exception as e:
log_err.write(str(e))
if not ignore_retrieval_failure:
exit(1)
# Time-based blocklist
if time_restricted_url and not re.match(r"^[a-z0-9]+:", time_restricted_url):
time_restricted_url = "file:" + time_restricted_url
output_fd = sys.stdout
if output_file:
output_fd = open(output_file, "w")
if time_restricted_url:
time_restricted_content, _trusted = load_from_url(time_restricted_url)
time_restricted_names, time_restrictions, _globs = parse_trusted_list(
time_restricted_content
)
if time_restricted_names:
print(
"########## Time-based blocklist ##########\n", file=output_fd, end="\n"
)
for name in time_restricted_names:
print_restricted_name(output_fd, name, time_restrictions)
# Time restricted names should be allowed, or they could be always blocked
allowed_names |= time_restricted_names
# Allowed list
if allowlist and not re.match(r"^[a-z0-9]+:", allowlist):
allowlist = "file:" + allowlist
allowed_names |= allowlist_from_url(allowlist)
# Process blocklists
for url, names in blocklists.items():
print(
"\n\n########## Blocklist from {} ##########\n".format(url),
file=output_fd,
end="\n",
)
ignored, glob_ignored, allowed = 0, 0, 0
list_names = list()
for name in names:
if covered_by_glob(all_globs, name):
glob_ignored = glob_ignored + 1
elif has_suffix(all_names, name) or name in unique_names:
ignored = ignored + 1
elif has_suffix(allowed_names, name) or name in allowed_names:
allowed = allowed + 1
else:
list_names.append(name)
unique_names.add(name)
list_names.sort(key=name_cmp)
if ignored:
print("# Ignored duplicates: {}".format(
ignored), file=output_fd, end="\n")
if glob_ignored:
print(
"# Ignored due to overlapping local patterns: {}".format(
glob_ignored),
file=output_fd,
end="\n",
)
if allowed:
print(
"# Ignored entries due to the allowlist: {}".format(allowed),
file=output_fd,
end="\n",
)
if ignored or glob_ignored or allowed:
print(file=output_fd, end="\n")
for name in list_names:
print(name, file=output_fd, end="\n")
output_fd.close()
argp = argparse.ArgumentParser(
description="Create a unified blocklist from a set of local and remote files"
)
argp.add_argument(
"-c",
"--config",
default="domains-blocklist.conf",
help="file containing blocklist sources",
)
argp.add_argument(
"-w",
"--whitelist",
help=argparse.SUPPRESS,
)
argp.add_argument(
"-a",
"--allowlist",
default="domains-allowlist.txt",
help="file containing a set of names to exclude from the blocklist",
)
argp.add_argument(
"-r",
"--time-restricted",
default="domains-time-restricted.txt",
help="file containing a set of names to be time restricted",
)
argp.add_argument(
"-i",
"--ignore-retrieval-failure",
action="store_true",
help="generate list even if some urls couldn't be retrieved",
)
argp.add_argument(
"-o",
"--output-file",
default=None,
help="save generated blocklist to a text file with the provided file name",
)
argp.add_argument("-t", "--timeout", default=30, help="URL open timeout")
args = argp.parse_args()
whitelist = args.whitelist
if whitelist:
print(
"The option to provide a set of names to exclude from the blocklist has been changed from -w to -a\n"
)
argp.print_help()
exit(1)
conf = args.config
allowlist = args.allowlist
time_restricted = args.time_restricted
ignore_retrieval_failure = args.ignore_retrieval_failure
output_file = args.output_file
if output_file:
log_info = sys.stdout
blocklists_from_config_file(
conf, allowlist, time_restricted, ignore_retrieval_failure, output_file
)
|
c5bec9680e52dee6e7863c75b104308879fcfd8b
|
4f2b965bac8f0a205a38c7a1e9091065f5bc88b1
|
/pybb/views.py
|
2240dc7fd460db790a74051c04e1e512dcf0c2e5
|
[
"BSD-2-Clause"
] |
permissive
|
hovel/pybbm
|
92deabc528d490f3510c98cb02c7e2e7382a03fd
|
9481f57b8e67758b35f1180e143e04b3c2af6eb1
|
refs/heads/master
| 2023-02-18T01:38:25.606267
| 2023-01-26T16:09:18
| 2023-01-26T16:09:18
| 3,324,634
| 147
| 113
|
BSD-2-Clause
| 2023-01-26T16:09:36
| 2012-02-01T12:32:57
|
Python
|
UTF-8
|
Python
| false
| false
| 37,947
|
py
|
views.py
|
import math
from django.contrib.auth.decorators import login_required
from django.core.cache import cache
from django.core.exceptions import PermissionDenied, ValidationError
from django.urls import reverse
from django.contrib import messages
from django.db.models import F
from django.forms.utils import ErrorList
from django.http import HttpResponseRedirect, HttpResponse, Http404, HttpResponseBadRequest,\
HttpResponseForbidden
from django.shortcuts import get_object_or_404, redirect, render
from django.utils.translation import ugettext as _
from django.utils.decorators import method_decorator
from django.views.decorators.http import require_POST
from django.views.generic.edit import ModelFormMixin
from django.views.decorators.csrf import csrf_protect
from django.views import generic
from pybb import compat, defaults, util
from pybb.compat import get_atomic_func
from pybb.forms import PostForm, MovePostForm, AdminPostForm, AttachmentFormSet, \
PollAnswerFormSet, PollForm, ForumSubscriptionForm, ModeratorForm
from pybb.models import Category, Forum, ForumSubscription, Topic, Post, TopicReadTracker, \
ForumReadTracker, PollAnswerUser
from pybb.permissions import perms
from pybb.templatetags.pybb_tags import pybb_topic_poll_not_voted
User = compat.get_user_model()
username_field = compat.get_username_field()
Paginator, pure_pagination = compat.get_paginator_class()
class PaginatorMixin(object):
def get_paginator(self, queryset, per_page, orphans=0, allow_empty_first_page=True, **kwargs):
kwargs = {}
if pure_pagination:
kwargs['request'] = self.request
return Paginator(queryset, per_page, orphans=0, allow_empty_first_page=True, **kwargs)
class RedirectToLoginMixin(object):
""" mixin which redirects to settings.LOGIN_URL if the view encounters an PermissionDenied exception
and the user is not authenticated. Views inheriting from this need to implement
get_login_redirect_url(), which returns the URL to redirect to after login (parameter "next")
"""
def dispatch(self, request, *args, **kwargs):
try:
return super(RedirectToLoginMixin, self).dispatch(request, *args, **kwargs)
except PermissionDenied:
if not request.user.is_authenticated:
from django.contrib.auth.views import redirect_to_login
return redirect_to_login(self.get_login_redirect_url())
else:
return HttpResponseForbidden()
def get_login_redirect_url(self):
""" get the url to which we redirect after the user logs in. subclasses should override this """
return '/'
class IndexView(generic.ListView):
template_name = 'pybb/index.html'
context_object_name = 'categories'
def get_context_data(self, **kwargs):
ctx = super(IndexView, self).get_context_data(**kwargs)
categories = ctx['categories']
for category in categories:
category.forums_accessed = perms.filter_forums(self.request.user, category.forums.filter(parent=None))
ctx['categories'] = categories
return ctx
def get_queryset(self):
return perms.filter_categories(self.request.user, Category.objects.all())
class CategoryView(RedirectToLoginMixin, generic.DetailView):
template_name = 'pybb/index.html'
context_object_name = 'category'
def get_login_redirect_url(self):
# returns super.get_object as there is a conflict with the perms in CategoryView.get_object
# Would raise a PermissionDenied and never redirect
return super(CategoryView, self).get_object().get_absolute_url()
def get_queryset(self):
return Category.objects.all()
def get_object(self, queryset=None):
obj = super(CategoryView, self).get_object(queryset)
if not perms.may_view_category(self.request.user, obj):
raise PermissionDenied
return obj
def get_context_data(self, **kwargs):
ctx = super(CategoryView, self).get_context_data(**kwargs)
ctx['category'].forums_accessed = perms.filter_forums(self.request.user, ctx['category'].forums.filter(parent=None))
ctx['categories'] = [ctx['category']]
return ctx
def get(self, *args, **kwargs):
if defaults.PYBB_NICE_URL and (('id' in kwargs) or ('pk' in kwargs)):
return redirect(super(CategoryView, self).get_object(), permanent=defaults.PYBB_NICE_URL_PERMANENT_REDIRECT)
return super(CategoryView, self).get(*args, **kwargs)
class ForumView(RedirectToLoginMixin, PaginatorMixin, generic.ListView):
paginate_by = defaults.PYBB_FORUM_PAGE_SIZE
context_object_name = 'topic_list'
template_name = 'pybb/forum.html'
def dispatch(self, request, *args, **kwargs):
self.forum = self.get_forum(**kwargs)
return super(ForumView, self).dispatch(request, *args, **kwargs)
def get_login_redirect_url(self):
return self.forum.get_absolute_url()
def get_context_data(self, **kwargs):
ctx = super(ForumView, self).get_context_data(**kwargs)
ctx['forum'] = self.forum
if self.request.user.is_authenticated:
try:
ctx['subscription'] = ForumSubscription.objects.get(
user=self.request.user,
forum=self.forum
)
except ForumSubscription.DoesNotExist:
ctx['subscription'] = None
else:
ctx['subscription'] = None
ctx['forum'].forums_accessed = perms.filter_forums(self.request.user, self.forum.child_forums.all())
return ctx
def get_queryset(self):
if not perms.may_view_forum(self.request.user, self.forum):
raise PermissionDenied
qs = self.forum.topics.order_by('-sticky', '-updated', '-id').select_related()
qs = perms.filter_topics(self.request.user, qs)
return qs
def get_forum(self, **kwargs):
if 'pk' in kwargs:
forum = get_object_or_404(Forum.objects.all(), pk=kwargs['pk'])
elif ('slug' and 'category_slug') in kwargs:
forum = get_object_or_404(Forum, slug=kwargs['slug'], category__slug=kwargs['category_slug'])
else:
raise Http404(_('Forum does not exist'))
return forum
def get(self, *args, **kwargs):
if defaults.PYBB_NICE_URL and 'pk' in kwargs:
return redirect(self.forum, permanent=defaults.PYBB_NICE_URL_PERMANENT_REDIRECT)
return super(ForumView, self).get(*args, **kwargs)
class ForumSubscriptionView(RedirectToLoginMixin, generic.FormView):
template_name = 'pybb/forum_subscription.html'
form_class = ForumSubscriptionForm
def get_login_redirect_url(self):
return reverse('pybb:forum_subscription', args=(self.kwargs['pk'],))
def get_success_url(self):
return self.forum.get_absolute_url()
def get_form_kwargs(self):
kw = super(ForumSubscriptionView, self).get_form_kwargs()
self.get_objects()
kw['instance'] = self.forum_subscription
kw['user'] = self.request.user
kw['forum'] = self.forum
return kw
def get_context_data(self, **kwargs):
ctx = super(ForumSubscriptionView, self).get_context_data(**kwargs)
ctx['forum'] = self.forum
ctx['forum_subscription'] = self.forum_subscription
return ctx
def form_valid(self, form):
result = form.process()
if result == 'subscribe-all':
msg = _((
'You subscribed to all existant topics on this forum '
'and you will auto-subscribed to all its new topics.'
))
elif result == 'delete':
msg = _((
'You won\'t be notified anymore each time a new topic '
'is posted on this forum.'
))
elif result == 'delete-all':
msg = _((
'You have been subscribed to all current topics in this forum and you won\'t'
'be auto-subscribed anymore for each new topic posted on this forum.'
))
else:
msg = _((
'You will be notified each time a new topic is posted on this forum.'
))
messages.success(self.request, msg, fail_silently=True)
return super(ForumSubscriptionView, self).form_valid(form)
def get_objects(self):
if not self.request.user.is_authenticated:
raise PermissionDenied
self.forum = get_object_or_404(Forum.objects.all(), pk=self.kwargs['pk'])
try:
self.forum_subscription = ForumSubscription.objects.get(
user=self.request.user,
forum=self.forum
)
except ForumSubscription.DoesNotExist:
self.forum_subscription = None
class LatestTopicsView(PaginatorMixin, generic.ListView):
paginate_by = defaults.PYBB_FORUM_PAGE_SIZE
context_object_name = 'topic_list'
template_name = 'pybb/latest_topics.html'
def get_queryset(self):
qs = Topic.objects.all().select_related()
qs = perms.filter_topics(self.request.user, qs)
return qs.order_by('-updated', '-id')
class PybbFormsMixin(object):
post_form_class = PostForm
admin_post_form_class = AdminPostForm
attachment_formset_class = AttachmentFormSet
poll_form_class = PollForm
poll_answer_formset_class = PollAnswerFormSet
def get_post_form_class(self):
return self.post_form_class
def get_admin_post_form_class(self):
return self.admin_post_form_class
def get_attachment_formset_class(self):
return self.attachment_formset_class
def get_poll_form_class(self):
return self.poll_form_class
def get_poll_answer_formset_class(self):
return self.poll_answer_formset_class
class TopicView(RedirectToLoginMixin, PaginatorMixin, PybbFormsMixin, generic.ListView):
paginate_by = defaults.PYBB_TOPIC_PAGE_SIZE
template_object_name = 'post_list'
template_name = 'pybb/topic.html'
def get(self, request, *args, **kwargs):
if defaults.PYBB_NICE_URL and 'pk' in kwargs:
return redirect(self.topic, permanent=defaults.PYBB_NICE_URL_PERMANENT_REDIRECT)
response = super(TopicView, self).get(request, *args, **kwargs)
self.mark_read()
return response
def get_login_redirect_url(self):
return self.topic.get_absolute_url()
@method_decorator(csrf_protect)
def dispatch(self, request, *args, **kwargs):
self.topic = self.get_topic(**kwargs)
if request.GET.get('first-unread'):
if request.user.is_authenticated:
read_dates = []
try:
read_dates.append(TopicReadTracker.objects.get(user=request.user, topic=self.topic).time_stamp)
except TopicReadTracker.DoesNotExist:
pass
try:
read_dates.append(ForumReadTracker.objects.get(user=request.user, forum=self.topic.forum).time_stamp)
except ForumReadTracker.DoesNotExist:
pass
read_date = read_dates and max(read_dates)
if read_date:
try:
first_unread_topic = self.topic.posts.filter(created__gt=read_date).order_by('created', 'id')[0]
except IndexError:
first_unread_topic = self.topic.last_post
else:
first_unread_topic = self.topic.head
return HttpResponseRedirect(reverse('pybb:post', kwargs={'pk': first_unread_topic.id}))
return super(TopicView, self).dispatch(request, *args, **kwargs)
def get_queryset(self):
if not perms.may_view_topic(self.request.user, self.topic):
raise PermissionDenied
if self.request.user.is_authenticated or not defaults.PYBB_ANONYMOUS_VIEWS_CACHE_BUFFER:
Topic.objects.filter(id=self.topic.id).update(views=F('views') + 1)
else:
cache_key = util.build_cache_key('anonymous_topic_views', topic_id=self.topic.id)
cache.add(cache_key, 0)
if cache.incr(cache_key) % defaults.PYBB_ANONYMOUS_VIEWS_CACHE_BUFFER == 0:
Topic.objects.filter(id=self.topic.id).update(views=F('views') +
defaults.PYBB_ANONYMOUS_VIEWS_CACHE_BUFFER)
cache.set(cache_key, 0)
qs = self.topic.posts.all().select_related('user')
if defaults.PYBB_PROFILE_RELATED_NAME:
qs = qs.select_related('user__%s' % defaults.PYBB_PROFILE_RELATED_NAME)
if not perms.may_moderate_topic(self.request.user, self.topic):
qs = perms.filter_posts(self.request.user, qs)
return qs
def get_context_data(self, **kwargs):
ctx = super(TopicView, self).get_context_data(**kwargs)
if self.request.user.is_authenticated:
self.request.user.is_moderator = perms.may_moderate_topic(self.request.user, self.topic)
self.request.user.is_subscribed = self.request.user in self.topic.subscribers.all()
if defaults.PYBB_ENABLE_ADMIN_POST_FORM and \
perms.may_post_as_admin(self.request.user):
ctx['form'] = self.get_admin_post_form_class()(
initial={'login': getattr(self.request.user, username_field)},
topic=self.topic)
else:
ctx['form'] = self.get_post_form_class()(topic=self.topic)
elif defaults.PYBB_ENABLE_ANONYMOUS_POST:
ctx['form'] = self.get_post_form_class()(topic=self.topic)
else:
ctx['form'] = None
ctx['next'] = self.get_login_redirect_url()
if perms.may_attach_files(self.request.user):
aformset = self.get_attachment_formset_class()()
ctx['aformset'] = aformset
ctx['attachment_max_size'] = defaults.PYBB_ATTACHMENT_SIZE_LIMIT
if defaults.PYBB_FREEZE_FIRST_POST:
ctx['first_post'] = self.topic.head
else:
ctx['first_post'] = None
ctx['topic'] = self.topic
if perms.may_vote_in_topic(self.request.user, self.topic) and \
pybb_topic_poll_not_voted(self.topic, self.request.user):
ctx['poll_form'] = self.get_poll_form_class()(self.topic)
return ctx
@method_decorator(get_atomic_func())
def mark_read(self):
if not self.request.user.is_authenticated:
return
try:
forum_mark = ForumReadTracker.objects.get(forum=self.topic.forum, user=self.request.user)
except ForumReadTracker.DoesNotExist:
forum_mark = None
if (forum_mark is None) or (forum_mark.time_stamp <= self.topic.updated):
topic_mark, topic_mark_new = TopicReadTracker.objects.get_or_create_tracker(topic=self.topic, user=self.request.user)
if not topic_mark_new:
# Bail early if we already read this thread.
if topic_mark.time_stamp >= self.topic.updated:
return
topic_mark.save() # update read time
# Check, if there are any unread topics in forum
readed_trackers = TopicReadTracker.objects.filter(
user=self.request.user, topic__forum=self.topic.forum, time_stamp__gte=F('topic__updated'))
unread = self.topic.forum.topics.exclude(topicreadtracker__in=readed_trackers)
if forum_mark is not None:
unread = unread.filter(updated__gte=forum_mark.time_stamp)
if not unread.exists():
# Clear all topic marks for this forum, mark forum as read
TopicReadTracker.objects.filter(user=self.request.user, topic__forum=self.topic.forum).delete()
forum_mark, forum_mark_new = ForumReadTracker.objects.get_or_create_tracker(
forum=self.topic.forum, user=self.request.user)
if not forum_mark_new:
forum_mark.save() # update read time
def get_topic(self, **kwargs):
if 'pk' in kwargs:
topic = get_object_or_404(Topic, pk=kwargs['pk'], post_count__gt=0)
elif ('slug'and 'forum_slug'and 'category_slug') in kwargs:
topic = get_object_or_404(
Topic,
slug=kwargs['slug'],
forum__slug=kwargs['forum_slug'],
forum__category__slug=kwargs['category_slug'],
post_count__gt=0
)
else:
raise Http404(_('This topic does not exists'))
return topic
class PostEditMixin(PybbFormsMixin):
@method_decorator(get_atomic_func())
def post(self, request, *args, **kwargs):
return super(PostEditMixin, self).post(request, *args, **kwargs)
def get_form_class(self):
if defaults.PYBB_ENABLE_ADMIN_POST_FORM and \
perms.may_post_as_admin(self.request.user):
return self.get_admin_post_form_class()
else:
return self.get_post_form_class()
def get_context_data(self, **kwargs):
ctx = super(PostEditMixin, self).get_context_data(**kwargs)
if perms.may_attach_files(self.request.user) and 'aformset' not in kwargs:
ctx['aformset'] = self.get_attachment_formset_class()(
instance=getattr(self, 'object', None)
)
if perms.may_create_poll(self.request.user) and 'pollformset' not in kwargs:
ctx['pollformset'] = self.get_poll_answer_formset_class()(
instance=self.object.topic if getattr(self, 'object', None) else None
)
return ctx
def form_valid(self, form):
success = True
save_attachments = False
save_poll_answers = False
self.object, topic = form.save(commit=False)
if perms.may_attach_files(self.request.user):
aformset = self.get_attachment_formset_class()(
self.request.POST, self.request.FILES, instance=self.object
)
if aformset.is_valid():
save_attachments = True
else:
success = False
else:
aformset = None
if perms.may_create_poll(self.request.user):
pollformset = self.get_poll_answer_formset_class()()
if getattr(self, 'forum', None) or topic.head == self.object:
if topic.poll_type != Topic.POLL_TYPE_NONE:
pollformset = self.get_poll_answer_formset_class()(
self.request.POST, instance=topic
)
if pollformset.is_valid():
save_poll_answers = True
else:
success = False
else:
topic.poll_question = None
topic.poll_answers.all().delete()
else:
pollformset = None
if success:
try:
topic.save()
except ValidationError as e:
success = False
errors = form._errors.setdefault('name', ErrorList())
errors += e.error_list
else:
self.object.topic = topic
self.object.save()
if save_attachments:
aformset.save()
if self.object.attachments.count():
# re-parse the body to replace attachment's references by URLs
self.object.save()
if save_poll_answers:
pollformset.save()
return HttpResponseRedirect(self.get_success_url())
return self.render_to_response(self.get_context_data(form=form,
aformset=aformset,
pollformset=pollformset))
class AddPostView(PostEditMixin, generic.CreateView):
template_name = 'pybb/add_post.html'
@method_decorator(csrf_protect)
def dispatch(self, request, *args, **kwargs):
if request.user.is_authenticated:
self.user = request.user
else:
if defaults.PYBB_ENABLE_ANONYMOUS_POST:
self.user, new = User.objects.get_or_create(**{username_field: defaults.PYBB_ANONYMOUS_USERNAME})
else:
from django.contrib.auth.views import redirect_to_login
return redirect_to_login(request.get_full_path())
self.forum = None
self.topic = None
if 'forum_id' in kwargs:
self.forum = get_object_or_404(perms.filter_forums(request.user, Forum.objects.all()), pk=kwargs['forum_id'])
if not perms.may_create_topic(self.user, self.forum):
raise PermissionDenied
elif 'topic_id' in kwargs:
self.topic = get_object_or_404(perms.filter_topics(request.user, Topic.objects.all()), pk=kwargs['topic_id'])
if not perms.may_create_post(self.user, self.topic):
raise PermissionDenied
self.quote = ''
if 'quote_id' in request.GET:
try:
quote_id = int(request.GET.get('quote_id'))
except TypeError:
raise Http404
else:
post = get_object_or_404(Post, pk=quote_id)
if not perms.may_view_post(request.user, post):
raise PermissionDenied
profile = util.get_pybb_profile(post.user)
self.quote = util._get_markup_quoter(defaults.PYBB_MARKUP)(post.body, profile.get_display_name())
if self.quote and request.is_ajax():
return HttpResponse(self.quote)
return super(AddPostView, self).dispatch(request, *args, **kwargs)
def get_form_kwargs(self):
ip = self.request.META.get('REMOTE_ADDR', '')
form_kwargs = super(AddPostView, self).get_form_kwargs()
form_kwargs.update(dict(topic=self.topic, forum=self.forum, user=self.user,
ip=ip, initial={}))
if getattr(self, 'quote', None):
form_kwargs['initial']['body'] = self.quote
if defaults.PYBB_ENABLE_ADMIN_POST_FORM and \
perms.may_post_as_admin(self.user):
form_kwargs['initial']['login'] = getattr(self.user, username_field)
form_kwargs['may_create_poll'] = perms.may_create_poll(self.user)
form_kwargs['may_edit_topic_slug'] = perms.may_edit_topic_slug(self.user)
return form_kwargs
def get_context_data(self, **kwargs):
ctx = super(AddPostView, self).get_context_data(**kwargs)
ctx['forum'] = self.forum
ctx['topic'] = self.topic
return ctx
def get_success_url(self):
if (not self.request.user.is_authenticated) and defaults.PYBB_PREMODERATION:
return reverse('pybb:index')
return self.object.get_absolute_url()
class EditPostView(PostEditMixin, generic.UpdateView):
model = Post
context_object_name = 'post'
template_name = 'pybb/edit_post.html'
@method_decorator(login_required)
@method_decorator(csrf_protect)
def dispatch(self, request, *args, **kwargs):
return super(EditPostView, self).dispatch(request, *args, **kwargs)
def get_form_kwargs(self):
form_kwargs = super(EditPostView, self).get_form_kwargs()
form_kwargs['may_create_poll'] = perms.may_create_poll(self.request.user)
return form_kwargs
def get_object(self, queryset=None):
post = super(EditPostView, self).get_object(queryset)
if not perms.may_edit_post(self.request.user, post):
raise PermissionDenied
return post
class MovePostView(RedirectToLoginMixin, generic.UpdateView):
model = Post
form_class = MovePostForm
context_object_name = 'post'
template_name = 'pybb/move_post.html'
@method_decorator(login_required)
@method_decorator(csrf_protect)
def dispatch(self, request, *args, **kwargs):
return super(MovePostView, self).dispatch(request, *args, **kwargs)
def get_form_kwargs(self):
form_kwargs = super(MovePostView, self).get_form_kwargs()
form_kwargs['user'] = self.request.user
return form_kwargs
def get_object(self, queryset=None):
post = super(MovePostView, self).get_object(queryset)
if not perms.may_moderate_topic(self.request.user, post.topic):
raise PermissionDenied
return post
def form_valid(self, *args, **kwargs):
from django.db.models.signals import post_save
from pybb.signals import topic_saved
# FIXME: we should have specific signals to send notifications to topic/forum subscribers
# but for now, we must connect / disconnect the callback
post_save.disconnect(topic_saved, sender=Topic)
response = super(MovePostView, self).form_valid(*args, **kwargs)
post_save.connect(topic_saved, sender=Topic)
return response
def get_success_url(self):
return self.object.topic.get_absolute_url()
class UserView(generic.DetailView):
model = User
template_name = 'pybb/user.html'
context_object_name = 'target_user'
def get_object(self, queryset=None):
if queryset is None:
queryset = self.get_queryset()
return get_object_or_404(queryset, **{username_field: self.kwargs['username']})
def get_context_data(self, **kwargs):
ctx = super(UserView, self).get_context_data(**kwargs)
ctx['topic_count'] = Topic.objects.filter(user=ctx['target_user']).count()
return ctx
class UserPosts(PaginatorMixin, generic.ListView):
model = Post
paginate_by = defaults.PYBB_TOPIC_PAGE_SIZE
template_name = 'pybb/user_posts.html'
def dispatch(self, request, *args, **kwargs):
username = kwargs.pop('username')
self.user = get_object_or_404(**{'klass': User, username_field: username})
return super(UserPosts, self).dispatch(request, *args, **kwargs)
def get_queryset(self):
qs = super(UserPosts, self).get_queryset()
qs = qs.filter(user=self.user)
qs = perms.filter_posts(self.request.user, qs).select_related('topic')
qs = qs.order_by('-created', '-updated', '-id')
return qs
def get_context_data(self, **kwargs):
context = super(UserPosts, self).get_context_data(**kwargs)
context['target_user'] = self.user
return context
class UserTopics(PaginatorMixin, generic.ListView):
model = Topic
paginate_by = defaults.PYBB_FORUM_PAGE_SIZE
template_name = 'pybb/user_topics.html'
def dispatch(self, request, *args, **kwargs):
username = kwargs.pop('username')
self.user = get_object_or_404(**{'klass': User, username_field: username})
return super(UserTopics, self).dispatch(request, *args, **kwargs)
def get_queryset(self):
qs = super(UserTopics, self).get_queryset()
qs = qs.filter(user=self.user)
qs = perms.filter_topics(self.request.user, qs)
qs = qs.order_by('-updated', '-created', '-id')
return qs
def get_context_data(self, **kwargs):
context = super(UserTopics, self).get_context_data(**kwargs)
context['target_user'] = self.user
return context
class PostView(RedirectToLoginMixin, generic.RedirectView):
permanent = False
def dispatch(self, request, *args, **kwargs):
self.post = self.get_post(**kwargs)
return super(PostView, self).dispatch(request, *args, **kwargs)
def get_login_redirect_url(self):
return self.post.get_absolute_url()
def get_redirect_url(self, **kwargs):
if not perms.may_view_post(self.request.user, self.post):
raise PermissionDenied
count = self.post.topic.posts.filter(created__lt=self.post.created).count() + 1
page = math.ceil(count / float(defaults.PYBB_TOPIC_PAGE_SIZE))
return '%s?page=%d#post-%d' % (self.post.topic.get_absolute_url(), page, self.post.id)
def get_post(self, **kwargs):
return get_object_or_404(Post, pk=kwargs['pk'])
class ModeratePost(generic.RedirectView):
permanent = False
def get_redirect_url(self, **kwargs):
post = get_object_or_404(Post, pk=self.kwargs['pk'])
if not perms.may_moderate_topic(self.request.user, post.topic):
raise PermissionDenied
post.on_moderation = False
post.save()
return post.get_absolute_url()
class ProfileEditView(generic.UpdateView):
template_name = 'pybb/edit_profile.html'
def get_object(self, queryset=None):
return util.get_pybb_profile(self.request.user)
def get_form_class(self):
if not self.form_class:
from pybb.forms import EditProfileForm
return EditProfileForm
else:
return super(ProfileEditView, self).get_form_class()
@method_decorator(login_required)
@method_decorator(csrf_protect)
def dispatch(self, request, *args, **kwargs):
return super(ProfileEditView, self).dispatch(request, *args, **kwargs)
def get_success_url(self):
return reverse('pybb:edit_profile')
class DeletePostView(generic.DeleteView):
template_name = 'pybb/delete_post.html'
context_object_name = 'post'
def get_object(self, queryset=None):
post = get_object_or_404(Post.objects.select_related('topic', 'topic__forum'), pk=self.kwargs['pk'])
if not perms.may_delete_post(self.request.user, post):
raise PermissionDenied
self.topic = post.topic
self.forum = post.topic.forum
return post
def delete(self, request, *args, **kwargs):
self.object = self.get_object()
self.object.delete()
redirect_url = self.get_success_url()
if not request.is_ajax():
return HttpResponseRedirect(redirect_url)
else:
return HttpResponse(redirect_url)
def get_success_url(self):
try:
Topic.objects.get(pk=self.topic.id)
except Topic.DoesNotExist:
return self.forum.get_absolute_url()
else:
if not self.request.is_ajax():
return self.topic.get_absolute_url()
else:
return ""
class TopicActionBaseView(generic.View):
def get_topic(self):
return get_object_or_404(Topic, pk=self.kwargs['pk'])
@method_decorator(login_required)
def get(self, *args, **kwargs):
self.topic = self.get_topic()
self.action(self.topic)
return HttpResponseRedirect(self.topic.get_absolute_url())
class StickTopicView(TopicActionBaseView):
def action(self, topic):
if not perms.may_stick_topic(self.request.user, topic):
raise PermissionDenied
topic.sticky = True
topic.save()
class UnstickTopicView(TopicActionBaseView):
def action(self, topic):
if not perms.may_unstick_topic(self.request.user, topic):
raise PermissionDenied
topic.sticky = False
topic.save()
class CloseTopicView(TopicActionBaseView):
def action(self, topic):
if not perms.may_close_topic(self.request.user, topic):
raise PermissionDenied
topic.closed = True
topic.save()
class OpenTopicView(TopicActionBaseView):
def action(self, topic):
if not perms.may_open_topic(self.request.user, topic):
raise PermissionDenied
topic.closed = False
topic.save()
class TopicPollVoteView(PybbFormsMixin, generic.UpdateView):
model = Topic
http_method_names = ['post', ]
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(TopicPollVoteView, self).dispatch(request, *args, **kwargs)
def get_form_class(self):
return self.get_poll_form_class()
def get_form_kwargs(self):
kwargs = super(ModelFormMixin, self).get_form_kwargs()
kwargs['topic'] = self.object
return kwargs
def form_valid(self, form):
# already voted
if not perms.may_vote_in_topic(self.request.user, self.object) or \
not pybb_topic_poll_not_voted(self.object, self.request.user):
return HttpResponseForbidden()
answers = form.cleaned_data['answers']
for answer in answers:
# poll answer from another topic
if answer.topic != self.object:
return HttpResponseBadRequest()
PollAnswerUser.objects.create(poll_answer=answer, user=self.request.user)
return super(ModelFormMixin, self).form_valid(form)
def form_invalid(self, form):
return redirect(self.object)
def get_success_url(self):
return self.object.get_absolute_url()
@login_required
def topic_cancel_poll_vote(request, pk):
topic = get_object_or_404(Topic, pk=pk)
PollAnswerUser.objects.filter(user=request.user, poll_answer__topic_id=topic.id).delete()
return HttpResponseRedirect(topic.get_absolute_url())
@login_required
def delete_subscription(request, topic_id):
topic = get_object_or_404(perms.filter_topics(request.user, Topic.objects.all()), pk=topic_id)
topic.subscribers.remove(request.user)
msg = _('Subscription removed. You will not receive emails from this topic unless you subscribe or post again.')
messages.success(request, msg, fail_silently=True)
return HttpResponseRedirect(topic.get_absolute_url())
@login_required
def add_subscription(request, topic_id):
topic = get_object_or_404(perms.filter_topics(request.user, Topic.objects.all()), pk=topic_id)
if not perms.may_subscribe_topic(request.user, topic):
raise PermissionDenied
topic.subscribers.add(request.user)
msg = _('Subscription added. You will receive email notifications for replies to this topic.')
messages.success(request, msg, fail_silently=True)
return HttpResponseRedirect(topic.get_absolute_url())
@login_required
def post_ajax_preview(request):
content = request.POST.get('data')
html = util._get_markup_formatter()(content)
return render(request, 'pybb/_markitup_preview.html', {'html': html})
@login_required
def mark_all_as_read(request):
for forum in perms.filter_forums(request.user, Forum.objects.all()):
forum_mark, new = ForumReadTracker.objects.get_or_create_tracker(forum=forum, user=request.user)
forum_mark.save()
TopicReadTracker.objects.filter(user=request.user).delete()
msg = _('All forums marked as read')
messages.success(request, msg, fail_silently=True)
return redirect(reverse('pybb:index'))
@login_required
@require_POST
def block_user(request, username):
user = get_object_or_404(User, **{username_field: username})
if not perms.may_block_user(request.user, user):
raise PermissionDenied
user.is_active = False
user.save()
if 'block_and_delete_messages' in request.POST:
# individually delete each post and empty topic to fire method
# with forum/topic counters recalculation
posts = Post.objects.filter(user=user)
topics = posts.values('topic_id').distinct()
forums = posts.values('topic__forum_id').distinct()
posts.delete()
Topic.objects.filter(user=user).delete()
for t in topics:
try:
Topic.objects.get(id=t['topic_id']).update_counters()
except Topic.DoesNotExist:
pass
for f in forums:
try:
Forum.objects.get(id=f['topic__forum_id']).update_counters()
except Forum.DoesNotExist:
pass
msg = _('User successfuly blocked')
messages.success(request, msg, fail_silently=True)
return redirect('pybb:index')
@login_required
@require_POST
def unblock_user(request, username):
user = get_object_or_404(User, **{username_field: username})
if not perms.may_block_user(request.user, user):
raise PermissionDenied
user.is_active = True
user.save()
msg = _('User successfuly unblocked')
messages.success(request, msg, fail_silently=True)
return redirect('pybb:index')
class UserEditPrivilegesView(generic.edit.FormMixin, generic.edit.ProcessFormView, generic.DetailView):
template_name = 'pybb/edit_privileges.html'
form_class = ModeratorForm
model = User
slug_field = 'username'
slug_url_kwarg = 'username'
def get_success_url(self):
return reverse('pybb:edit_privileges', kwargs={'username': self.object.username})
def get_initial(self):
initial = super(UserEditPrivilegesView, self).get_initial()
categories = Category.objects.all()
for category in categories:
initial['cat_%d' % category.pk] = category.forums.filter(moderators=self.object.pk)
return initial
def get_form_kwargs(self):
form_kwargs = super(UserEditPrivilegesView, self).get_form_kwargs()
form_kwargs['user'] = self.request.user
return form_kwargs
def get(self, request, *args, **kwargs):
self.object = self.get_object()
return super(UserEditPrivilegesView, self).get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
self.object = self.get_object()
return super(UserEditPrivilegesView, self).post(request, *args, **kwargs)
def form_valid(self, form):
form.process(self.object)
messages.success(self.request, _("Privileges updated"))
return super(UserEditPrivilegesView, self).form_valid(form)
|
2825643e3cc463ea28c78d3961c218e5e3bcb34b
|
ecaba173879f92f24e3c951866fda23c0a4fc426
|
/perfkitbenchmarker/linux_benchmarks/dpb_terasort_benchmark.py
|
56ba63c422798e5794db0f1f2f394525d990d564
|
[
"Classpath-exception-2.0",
"BSD-3-Clause",
"AGPL-3.0-only",
"MIT",
"GPL-2.0-only",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause"
] |
permissive
|
GoogleCloudPlatform/PerfKitBenchmarker
|
2f4917fd796db4eb90822c557d8fa08a497fbd48
|
d0699f32998898757b036704fba39e5471641f01
|
refs/heads/master
| 2023-09-02T08:14:54.110308
| 2023-09-01T20:28:01
| 2023-09-01T20:28:38
| 21,950,910
| 1,923
| 567
|
Apache-2.0
| 2023-09-13T22:37:42
| 2014-07-17T17:23:26
|
Python
|
UTF-8
|
Python
| false
| false
| 8,507
|
py
|
dpb_terasort_benchmark.py
|
# Copyright 2019 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Executes the 3 stages of Teasort stages on a Apache Hadoop MapReduce cluster.
TeraSort is a popular benchmark that measures the amount of time to sort a
configured amount of randomly distributed data on a given cluster. It is
commonly used to measure MapReduce performance of an Apache Hadoop cluster.
The following report compares performance of a YARN-scheduled TeraSort job on
A full TeraSort benchmark run consists of the following three steps:
* Generating the input data via TeraGen.
* Running the actual TeraSort on the input data.
* Validating the sorted output data via TeraValidate.
The benchmark reports the detailed latency of executing each stage.
"""
import logging
from typing import List
from absl import flags
from perfkitbenchmarker import benchmark_spec
from perfkitbenchmarker import configs
from perfkitbenchmarker import dpb_service
from perfkitbenchmarker import errors
from perfkitbenchmarker import sample
from perfkitbenchmarker.dpb_service import BaseDpbService
BENCHMARK_NAME = 'dpb_terasort_benchmark'
BENCHMARK_CONFIG = """
dpb_terasort_benchmark:
description: Run terasort on dataproc and emr
dpb_service:
service_type: unmanaged_dpb_svc_yarn_cluster
worker_group:
vm_spec:
GCP:
machine_type: n1-standard-4
AWS:
machine_type: m5.xlarge
Azure:
machine_type: Standard_F4s_v2
disk_spec:
GCP:
disk_size: 500
disk_type: pd-standard
mount_point: /scratch_ts
AWS:
disk_size: 500
disk_type: st1
mount_point: /scratch_ts
Azure:
disk_size: 500
disk_type: Standard_LRS
mount_point: /scratch_ts
worker_count: 2
"""
_FS_TYPE_EPHEMERAL = 'ephemeral'
_FS_TYPE_PERSISTENT = 'persistent'
flags.DEFINE_enum('dpb_terasort_storage_type', _FS_TYPE_PERSISTENT,
[_FS_TYPE_EPHEMERAL, _FS_TYPE_PERSISTENT],
'The type of storage for executing the Terasort benchmark')
flags.DEFINE_integer('dpb_terasort_num_records', 10000,
'Number of 100-byte rows to generate.')
flags.DEFINE_bool(
'dpb_terasort_pre_cleanup', False,
'Cleanup the terasort directories on the specified filesystem.')
flags.DEFINE_integer(
'dpb_terasort_block_size_mb', None,
'Virtual blocksize to use on the persistent file system. This controls '
'the parallelism of the map stages of terasort and teravalidated. Defaults '
'to cluster defined defaults. Does not support HDFS.')
FLAGS = flags.FLAGS
SUPPORTED_DPB_BACKENDS = [
dpb_service.DATAPROC, dpb_service.EMR,
dpb_service.UNMANAGED_DPB_SVC_YARN_CLUSTER
]
TERASORT_JAR = 'file:///usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'
UNMANAGED_TERASORT_JAR = '/opt/pkb/hadoop/share/hadoop/mapreduce/hadoop-mapreduce-examples-*.jar'
def GetConfig(user_config):
return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
def CheckPrerequisites(benchmark_config):
"""Verifies that the required resources are present.
Args:
benchmark_config: Config needed to run the terasort benchmark
Raises:
perfkitbenchmarker.errors.Config.InvalidValue: On encountering invalid
configuration.
"""
dpb_service_type = benchmark_config.dpb_service.service_type
if dpb_service_type not in SUPPORTED_DPB_BACKENDS:
raise errors.Config.InvalidValue(
'Invalid backend {} for terasort. Not in:{}'.format(
dpb_service_type, str(SUPPORTED_DPB_BACKENDS)))
if (FLAGS.dpb_terasort_block_size_mb and
FLAGS.dpb_terasort_storage_type != _FS_TYPE_PERSISTENT):
raise errors.Config.InvalidValue('You cannot set HDFS block size.')
def Prepare(spec: benchmark_spec.BenchmarkSpec):
del spec # unused
def Run(spec: benchmark_spec.BenchmarkSpec):
"""Runs the 3 stages of the terasort benchmark.
The following stages are executed based on the selected Job Type:
* Generating the input data via TeraGen.
* Running the actual TeraSort on the input data.
* Validating the sorted output data via TeraValidate.
The samples report the cumulative results along with the results for the
individual stages.
Args:
spec: Spec needed to run the terasort benchmark
Returns:
A list of samples, comprised of the detailed run times of individual stages.
The samples have associated metadata detailing the cluster details and used
filesystem.
Raises:
JobSubmissionError if any job fails.
"""
service = spec.dpb_service
if FLAGS.dpb_terasort_storage_type == _FS_TYPE_PERSISTENT:
base_dir = service.base_dir + '/'
else:
base_dir = '/'
metadata = {}
metadata.update(spec.dpb_service.GetResourceMetadata())
logging.info('metadata %s ', str(metadata))
results = []
# May not exist for preprovisioned cluster.
if service.resource_ready_time and service.create_start_time:
logging.info('Resource create_start_time %s ',
str(service.create_start_time))
logging.info('Resource resource_ready_time %s ',
str(service.resource_ready_time))
create_time = service.GetClusterCreateTime()
logging.info('Resource create_time %s ', str(create_time))
if create_time is not None:
results.append(
sample.Sample('dpb_cluster_create_time', create_time, 'seconds',
metadata.copy()))
metadata.update({'base_dir': base_dir})
metadata.update(
{'dpb_terasort_storage_type': FLAGS.dpb_terasort_storage_type})
metadata.update({'terasort_num_record': FLAGS.dpb_terasort_num_records})
storage_in_gb = (FLAGS.dpb_terasort_num_records * 100) // (1000 * 1000 * 1000)
metadata.update({'terasort_dataset_size_in_GB': storage_in_gb})
if FLAGS.dpb_terasort_block_size_mb:
# TODO(pclay): calculate default blocksize using configuration class?
metadata.update(
{'terasort_block_size_mb': FLAGS.dpb_terasort_block_size_mb})
service.metadata.update(metadata)
unsorted_dir = base_dir + 'unsorted'
sorted_dir = base_dir + 'sorted'
report_dir = base_dir + 'report'
stages = [('teragen', [str(FLAGS.dpb_terasort_num_records), unsorted_dir]),
('terasort', [unsorted_dir, sorted_dir]),
('teravalidate', [sorted_dir, report_dir])]
cumulative_runtime = 0
for (stage, args) in stages:
result = RunStage(spec, stage, args)
logging.info(result)
results.append(
sample.Sample(stage + '_run_time', result.run_time, 'seconds',
metadata))
results.append(
sample.Sample(stage + '_wall_time', result.wall_time, 'seconds',
metadata))
cumulative_runtime += result.run_time
results.append(
sample.Sample('cumulative_runtime', cumulative_runtime, 'seconds',
metadata))
return results
def Cleanup(spec: benchmark_spec.BenchmarkSpec):
"""Cleans up the terasort benchmark."""
del spec # unused
def RunStage(spec: benchmark_spec.BenchmarkSpec, stage: str,
stage_args: List[str]) -> dpb_service.JobResult:
"""Runs one of the 3 job stages of Terasort.
Args:
spec: BencharkSpec; the benchmark_spec
stage: str; name of the stage being executed
stage_args: List[str]; arguments for the stage.
Returns:
JobResult of running job.
Raises:
JobSubmissionError if job fails.
"""
service = spec.dpb_service
if service.dpb_service_type == dpb_service.UNMANAGED_DPB_SVC_YARN_CLUSTER:
jar = UNMANAGED_TERASORT_JAR
else:
jar = TERASORT_JAR
args = [stage]
if FLAGS.dpb_terasort_block_size_mb:
scheme = service.PERSISTENT_FS_PREFIX.strip(':/')
args.append('-Dfs.{}.block.size={}'.format(
scheme, FLAGS.dpb_terasort_block_size_mb * 1024 * 1024))
args += stage_args
return service.SubmitJob(
jarfile=jar, job_arguments=args, job_type=BaseDpbService.HADOOP_JOB_TYPE)
|
3dcd6bbf16251a9c49362999504596e5572fe2b9
|
8d5df43c1611a709ddf19d8b23b8763eb37b4e8f
|
/pika/adapters/utils/nbio_interface.py
|
6662adf89e7924adf566afef6ac4f043a654dbc8
|
[
"BSD-3-Clause"
] |
permissive
|
pika/pika
|
86ed56bec6aa813ffd8a7037bbef756a9388533e
|
f4d8f8ff02a4da4653749c86161b7d52e53f73fe
|
refs/heads/main
| 2023-09-03T18:19:30.231575
| 2023-07-28T23:01:02
| 2023-07-29T21:16:38
| 342,869
| 3,040
| 919
|
BSD-3-Clause
| 2023-08-03T21:20:50
| 2009-10-19T23:22:02
|
Python
|
UTF-8
|
Python
| false
| false
| 17,217
|
py
|
nbio_interface.py
|
"""Non-blocking I/O interface for pika connection adapters.
I/O interface expected by `pika.adapters.base_connection.BaseConnection`
NOTE: This API is modeled after asyncio in python3 for a couple of reasons
1. It's a sensible API
2. To make it easy to implement at least on top of the built-in asyncio
Furthermore, the API caters to the needs of pika core and lack of generalization
is intentional for the sake of reducing complexity of the implementation and
testing and lessening the maintenance burden.
"""
import abc
import pika.compat
class AbstractIOServices(pika.compat.AbstractBase):
"""Interface to I/O services required by `pika.adapters.BaseConnection` and
related utilities.
NOTE: This is not a public API. Pika users should rely on the native I/O
loop APIs (e.g., asyncio event loop, tornado ioloop, twisted reactor, etc.)
that corresponds to the chosen Connection adapter.
"""
@abc.abstractmethod
def get_native_ioloop(self):
"""Returns the native I/O loop instance, such as Twisted reactor,
asyncio's or tornado's event loop
"""
raise NotImplementedError
@abc.abstractmethod
def close(self):
"""Release IOLoop's resources.
the `close()` method is intended to be called by Pika's own test
code only after `start()` returns. After calling `close()`, no other
interaction with the closed instance of `IOLoop` should be performed.
NOTE: This method is provided for Pika's own test scripts that need to
be able to run I/O loops generically to test multiple Connection Adapter
implementations. Pika users should use the native I/O loop's API
instead.
"""
raise NotImplementedError
@abc.abstractmethod
def run(self):
"""Run the I/O loop. It will loop until requested to exit. See `stop()`.
NOTE: the outcome or restarting an instance that had been stopped is
UNDEFINED!
NOTE: This method is provided for Pika's own test scripts that need to
be able to run I/O loops generically to test multiple Connection Adapter
implementations (not all of the supported I/O Loop frameworks have
methods named start/stop). Pika users should use the native I/O loop's
API instead.
"""
raise NotImplementedError
@abc.abstractmethod
def stop(self):
"""Request exit from the ioloop. The loop is NOT guaranteed to
stop before this method returns.
NOTE: The outcome of calling `stop()` on a non-running instance is
UNDEFINED!
NOTE: This method is provided for Pika's own test scripts that need to
be able to run I/O loops generically to test multiple Connection Adapter
implementations (not all of the supported I/O Loop frameworks have
methods named start/stop). Pika users should use the native I/O loop's
API instead.
To invoke `stop()` safely from a thread other than this IOLoop's thread,
call it via `add_callback_threadsafe`; e.g.,
`ioloop.add_callback_threadsafe(ioloop.stop)`
"""
raise NotImplementedError
@abc.abstractmethod
def add_callback_threadsafe(self, callback):
"""Requests a call to the given function as soon as possible. It will be
called from this IOLoop's thread.
NOTE: This is the only thread-safe method offered by the IOLoop adapter.
All other manipulations of the IOLoop adapter and objects governed
by it must be performed from the IOLoop's thread.
NOTE: if you know that the requester is running on the same thread as
the connection it is more efficient to use the
`ioloop.call_later()` method with a delay of 0.
:param callable callback: The callback method; must be callable.
"""
raise NotImplementedError
@abc.abstractmethod
def call_later(self, delay, callback):
"""Add the callback to the IOLoop timer to be called after delay seconds
from the time of call on best-effort basis. Returns a handle to the
timeout.
If two are scheduled for the same time, it's undefined which one will
be called first.
:param float delay: The number of seconds to wait to call callback
:param callable callback: The callback method
:returns: A handle that can be used to cancel the request.
:rtype: AbstractTimerReference
"""
raise NotImplementedError
@abc.abstractmethod
def getaddrinfo(self,
host,
port,
on_done,
family=0,
socktype=0,
proto=0,
flags=0):
"""Perform the equivalent of `socket.getaddrinfo()` asynchronously.
See `socket.getaddrinfo()` for the standard args.
:param callable on_done: user callback that takes the return value of
`socket.getaddrinfo()` upon successful completion or exception upon
failure (check for `BaseException`) as its only arg. It will not be
called if the operation was cancelled.
:rtype: AbstractIOReference
"""
raise NotImplementedError
@abc.abstractmethod
def connect_socket(self, sock, resolved_addr, on_done):
"""Perform the equivalent of `socket.connect()` on a previously-resolved
address asynchronously.
IMPLEMENTATION NOTE: Pika's connection logic resolves the addresses
prior to making socket connections, so we don't need to burden the
implementations of this method with the extra logic of asynchronous
DNS resolution. Implementations can use `socket.inet_pton()` to
verify the address.
:param socket.socket sock: non-blocking socket that needs to be
connected via `socket.socket.connect()`
:param tuple resolved_addr: resolved destination address/port two-tuple
as per `socket.socket.connect()`, except that the first element must
be an actual IP address that's consistent with the given socket's
address family.
:param callable on_done: user callback that takes None upon successful
completion or exception (check for `BaseException`) upon error as
its only arg. It will not be called if the operation was cancelled.
:rtype: AbstractIOReference
:raises ValueError: if host portion of `resolved_addr` is not an IP
address or is inconsistent with the socket's address family as
validated via `socket.inet_pton()`
"""
raise NotImplementedError
@abc.abstractmethod
def create_streaming_connection(self,
protocol_factory,
sock,
on_done,
ssl_context=None,
server_hostname=None):
"""Perform SSL session establishment, if requested, on the already-
connected socket and link the streaming transport/protocol pair.
NOTE: This method takes ownership of the socket.
:param callable protocol_factory: called without args, returns an
instance with the `AbstractStreamProtocol` interface. The protocol's
`connection_made(transport)` method will be called to link it to
the transport after remaining connection activity (e.g., SSL session
establishment), if any, is completed successfully.
:param socket.socket sock: Already-connected, non-blocking
`socket.SOCK_STREAM` socket to be used by the transport. We take
ownership of this socket.
:param callable on_done: User callback
`on_done(BaseException | (transport, protocol))` to be notified when
the asynchronous operation completes. An exception arg indicates
failure (check for `BaseException`); otherwise the two-tuple will
contain the linked transport/protocol pair having
AbstractStreamTransport and AbstractStreamProtocol interfaces
respectively.
:param None | ssl.SSLContext ssl_context: if None, this will proceed as
a plaintext connection; otherwise, if not None, SSL session
establishment will be performed prior to linking the transport and
protocol.
:param str | None server_hostname: For use during SSL session
establishment to match against the target server's certificate. The
value `None` disables this check (which is a huge security risk)
:rtype: AbstractIOReference
"""
raise NotImplementedError
class AbstractFileDescriptorServices(pika.compat.AbstractBase):
"""Interface definition of common non-blocking file descriptor services
required by some utility implementations.
NOTE: This is not a public API. Pika users should rely on the native I/O
loop APIs (e.g., asyncio event loop, tornado ioloop, twisted reactor, etc.)
that corresponds to the chosen Connection adapter.
"""
@abc.abstractmethod
def set_reader(self, fd, on_readable):
"""Call the given callback when the file descriptor is readable.
Replace prior reader, if any, for the given file descriptor.
:param fd: file descriptor
:param callable on_readable: a callback taking no args to be notified
when fd becomes readable.
"""
raise NotImplementedError
@abc.abstractmethod
def remove_reader(self, fd):
"""Stop watching the given file descriptor for readability
:param fd: file descriptor
:returns: True if reader was removed; False if none was registered.
:rtype: bool
"""
raise NotImplementedError
@abc.abstractmethod
def set_writer(self, fd, on_writable):
"""Call the given callback whenever the file descriptor is writable.
Replace prior writer callback, if any, for the given file descriptor.
IMPLEMENTATION NOTE: For portability, implementations of
`set_writable()` should also watch for indication of error on the
socket and treat it as equivalent to the writable indication (e.g.,
also adding the socket to the `exceptfds` arg of `socket.select()`
and calling the `on_writable` callback if `select.select()`
indicates that the socket is in error state). Specifically, Windows
(unlike POSIX) only indicates error on the socket (but not writable)
when connection establishment fails.
:param fd: file descriptor
:param callable on_writable: a callback taking no args to be notified
when fd becomes writable.
"""
raise NotImplementedError
@abc.abstractmethod
def remove_writer(self, fd):
"""Stop watching the given file descriptor for writability
:param fd: file descriptor
:returns: True if reader was removed; False if none was registered.
:rtype: bool
"""
raise NotImplementedError
class AbstractTimerReference(pika.compat.AbstractBase):
"""Reference to asynchronous operation"""
@abc.abstractmethod
def cancel(self):
"""Cancel callback. If already cancelled, has no affect.
"""
raise NotImplementedError
class AbstractIOReference(pika.compat.AbstractBase):
"""Reference to asynchronous I/O operation"""
@abc.abstractmethod
def cancel(self):
"""Cancel pending operation
:returns: False if was already done or cancelled; True otherwise
:rtype: bool
"""
raise NotImplementedError
class AbstractStreamProtocol(pika.compat.AbstractBase):
"""Stream protocol interface. It's compatible with a subset of
`asyncio.protocols.Protocol` for compatibility with asyncio-based
`AbstractIOServices` implementation.
"""
@abc.abstractmethod
def connection_made(self, transport):
"""Introduces transport to protocol after transport is connected.
:param AbstractStreamTransport transport:
:raises Exception: Exception-based exception on error
"""
raise NotImplementedError
@abc.abstractmethod
def connection_lost(self, error):
"""Called upon loss or closing of connection.
NOTE: `connection_made()` and `connection_lost()` are each called just
once and in that order. All other callbacks are called between them.
:param BaseException | None error: An exception (check for
`BaseException`) indicates connection failure. None indicates that
connection was closed on this side, such as when it's aborted or
when `AbstractStreamProtocol.eof_received()` returns a result that
doesn't evaluate to True.
:raises Exception: Exception-based exception on error
"""
raise NotImplementedError
@abc.abstractmethod
def eof_received(self):
"""Called after the remote peer shuts its write end of the connection.
:returns: A falsy value (including None) will cause the transport to
close itself, resulting in an eventual `connection_lost()` call
from the transport. If a truthy value is returned, it will be the
protocol's responsibility to close/abort the transport.
:rtype: falsy|truthy
:raises Exception: Exception-based exception on error
"""
raise NotImplementedError
@abc.abstractmethod
def data_received(self, data):
"""Called to deliver incoming data to the protocol.
:param data: Non-empty data bytes.
:raises Exception: Exception-based exception on error
"""
raise NotImplementedError
# pylint: disable=W0511
# TODO Undecided whether we need write flow-control yet, although it seems
# like a good idea.
# @abc.abstractmethod
# def pause_writing(self):
# """Called when the transport's write buffer size becomes greater than or
# equal to the transport's high-water mark. It won't be called again until
# the transport's write buffer gets back to its low-water mark and then
# returns to/past the hight-water mark again.
# """
# raise NotImplementedError
#
# @abc.abstractmethod
# def resume_writing(self):
# """Called when the transport's write buffer size becomes less than or
# equal to the transport's low-water mark.
# """
# raise NotImplementedError
class AbstractStreamTransport(pika.compat.AbstractBase):
"""Stream transport interface. It's compatible with a subset of
`asyncio.transports.Transport` for compatibility with asyncio-based
`AbstractIOServices` implementation.
"""
@abc.abstractmethod
def abort(self):
"""Close connection abruptly without waiting for pending I/O to
complete. Will invoke the corresponding protocol's `connection_lost()`
method asynchronously (not in context of the abort() call).
:raises Exception: Exception-based exception on error
"""
raise NotImplementedError
@abc.abstractmethod
def get_protocol(self):
"""Return the protocol linked to this transport.
:rtype: AbstractStreamProtocol
:raises Exception: Exception-based exception on error
"""
raise NotImplementedError
@abc.abstractmethod
def write(self, data):
"""Buffer the given data until it can be sent asynchronously.
:param bytes data:
:raises ValueError: if called with empty data
:raises Exception: Exception-based exception on error
"""
raise NotImplementedError
@abc.abstractmethod
def get_write_buffer_size(self):
"""
:returns: Current size of output data buffered by the transport
:rtype: int
"""
raise NotImplementedError
# pylint: disable=W0511
# TODO Udecided whether we need write flow-control yet, although it seems
# like a good idea.
# @abc.abstractmethod
# def set_write_buffer_limits(self, high, low):
# """Set thresholds for calling the protocol's `pause_writing()`
# and `resume_writing()` methods. `low` must be less than or equal to
# `high`.
#
# NOTE The unintuitive order of the args is preserved to match the
# corresponding method in `asyncio.WriteTransport`. I would expect `low`
# to be the first arg, especially since
# `asyncio.WriteTransport.get_write_buffer_limits()` returns them in the
# opposite order. This seems error-prone.
#
# See `asyncio.WriteTransport.get_write_buffer_limits()` for more details
# about the args.
#
# :param int high: non-negative high-water mark.
# :param int low: non-negative low-water mark.
# """
# raise NotImplementedError
|
0301636a52999bd1657dad060d67d27f7c04b304
|
83b8b30ebb633eecd29ca0a7a20cc43a293c9333
|
/tests/basics/compare_multi.py
|
1abd18067db80c71ef1ddeb63abe14cf18b45035
|
[
"MIT",
"GPL-1.0-or-later"
] |
permissive
|
adafruit/circuitpython
|
430ec895149d1eb814b505db39b4977a35ee88a7
|
506dca71b0cbb7af749bb51f86b01021db5483b3
|
refs/heads/main
| 2023-08-21T16:30:46.781068
| 2023-08-20T00:39:44
| 2023-08-20T00:39:44
| 66,166,069
| 3,806
| 1,560
|
MIT
| 2023-09-14T19:23:51
| 2016-08-20T20:10:40
|
C
|
UTF-8
|
Python
| false
| false
| 72
|
py
|
compare_multi.py
|
print(1 < 2 < 3)
print(1 < 2 < 3 < 4)
print(1 > 2 < 3)
print(1 < 2 > 3)
|
0e0f1291684273bce9a9235125f138813fb4dfe9
|
12f0bd77926127cdacc2452d6f9cfed91806b2fe
|
/idaes/apps/caprese/examples/cstr_reduced.py
|
676073d4d5c4f798aa95b2263b1008495607b892
|
[
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
IDAES/idaes-pse
|
e03d2583ae1ba968a7099f9f439fd8c3efa12904
|
deacf4c422bc9e50cb347e11a8cbfa0195bd4274
|
refs/heads/main
| 2023-08-16T19:13:00.355572
| 2023-08-04T04:19:29
| 2023-08-04T04:19:29
| 168,622,088
| 173
| 227
|
NOASSERTION
| 2023-09-11T16:04:55
| 2019-02-01T01:12:51
|
Python
|
UTF-8
|
Python
| false
| false
| 14,289
|
py
|
cstr_reduced.py
|
#################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES).
#
# Copyright (c) 2018-2023 by the software owners: The Regents of the
# University of California, through Lawrence Berkeley National Laboratory,
# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon
# University, West Virginia University Research Corporation, et al.
# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md
# for full copyright and license information.
#################################################################################
"""
Example for Caprese's module for NMPC.
"""
import random
from idaes.apps.caprese.nmpc import NMPCSim
from idaes.apps.caprese.dynamic_block import DynamicBlock
from idaes.apps.caprese.controller import ControllerBlock
from idaes.apps.caprese.util import apply_noise_with_bounds
from idaes.apps.caprese.categorize import (
categorize_dae_variables_and_constraints,
VariableCategory,
ConstraintCategory,
)
from idaes.core.solvers import get_solver
VC = VariableCategory
CC = ConstraintCategory
import pyomo.environ as pyo
from pyomo.dae.flatten import flatten_dae_components
from pyomo.dae.initialization import solve_consistent_initial_conditions
from pyomo.contrib.pynumero.interfaces.pyomo_nlp import PyomoNLP
from pyomo.contrib.incidence_analysis.interface import IncidenceGraphInterface
from pyomo.core.expr.calculus.derivatives import reverse_ad
import idaes.logger as idaeslog
from idaes.apps.caprese.examples.cstr_model import make_model
import numpy as np
import scipy.sparse as sps
import pandas as pd
import matplotlib.pyplot as plt
__author__ = "Robert Parker"
# See if ipopt is available and set up solver
if pyo.SolverFactory("ipopt").available():
solver = get_solver(
solver="ipopt",
options={
"tol": 1e-6,
"bound_push": 1e-8,
"halt_on_ampl_error": "yes",
"linear_solver": "ma57",
},
)
else:
solver = None
class PlotData(object):
def __init__(self, group, location, name=None, t_switch=None):
# Would really like a PlotData class that is constructed based on an
# NMPCVar object that contains necessary setpoint/reference
# information, instead of having to access that in the NMPCVarGroup
time = group.index_set
if t_switch == None:
t_switch = group.t0
self.name = name
var = group.varlist[location]
initial = group.reference[location]
setpoint = group.setpoint[location]
self.data_series = pd.Series(
[var[t].value for t in time], index=[t for t in time]
)
self.setpoint_series = pd.Series(
[initial if t < t_switch else setpoint for t in time]
)
def plot(self):
# fig, ax can be formatted to the user's liking
fig, ax = plt.subplots()
if self.name is not None:
self.data_series.plot(label=self.name)
else:
self.data_series.plot()
return fig, ax
def main(plot_switch=False):
# This tests the same model constructed in the test_nmpc_constructor_1 file
m_controller = make_model(horizon=3, ntfe=30, ntcp=2, bounds=True)
sample_time = 0.5
m_plant = make_model(horizon=sample_time, ntfe=5, ntcp=2)
time_plant = m_plant.fs.time
solve_consistent_initial_conditions(m_plant, time_plant, solver)
#####
# Flatten and categorize controller model
#####
model = m_controller
time = model.fs.time
t0 = time.first()
t1 = time[2]
scalar_vars, dae_vars = flatten_dae_components(
model,
time,
pyo.Var,
)
scalar_cons, dae_cons = flatten_dae_components(
model,
time,
pyo.Constraint,
)
inputs = [
model.fs.mixer.S_inlet.flow_vol,
model.fs.mixer.E_inlet.flow_vol,
]
measurements = [
pyo.Reference(model.fs.cstr.outlet.conc_mol[:, "C"]),
pyo.Reference(model.fs.cstr.outlet.conc_mol[:, "E"]),
pyo.Reference(model.fs.cstr.outlet.conc_mol[:, "S"]),
pyo.Reference(model.fs.cstr.outlet.conc_mol[:, "P"]),
model.fs.cstr.outlet.temperature,
]
model.fs.cstr.control_volume.material_holdup[:, "aq", "Solvent"].fix()
model.fs.cstr.total_flow_balance.deactivate()
var_partition, con_partition = categorize_dae_variables_and_constraints(
model,
dae_vars,
dae_cons,
time,
input_vars=inputs,
)
controller = ControllerBlock(
model=model,
time=time,
measurements=measurements,
category_dict={None: var_partition},
)
controller.construct()
solve_consistent_initial_conditions(m_controller, time, solver)
controller.initialize_to_initial_conditions()
m_controller._dummy_obj = pyo.Objective(expr=0)
nlp = PyomoNLP(m_controller)
igraph = IncidenceGraphInterface(nlp)
m_controller.del_component(m_controller._dummy_obj)
diff_vars = [var[t1] for var in var_partition[VC.DIFFERENTIAL]]
alg_vars = [var[t1] for var in var_partition[VC.ALGEBRAIC]]
deriv_vars = [var[t1] for var in var_partition[VC.DERIVATIVE]]
diff_eqns = [con[t1] for con in con_partition[CC.DIFFERENTIAL]]
alg_eqns = [con[t1] for con in con_partition[CC.ALGEBRAIC]]
# Assemble and factorize "derivative Jacobian"
dfdz = nlp.extract_submatrix_jacobian(diff_vars, diff_eqns)
dfdy = nlp.extract_submatrix_jacobian(alg_vars, diff_eqns)
dgdz = nlp.extract_submatrix_jacobian(diff_vars, alg_eqns)
dgdy = nlp.extract_submatrix_jacobian(alg_vars, alg_eqns)
dfdzdot = nlp.extract_submatrix_jacobian(deriv_vars, diff_eqns)
fact = sps.linalg.splu(dgdy.tocsc())
dydz = fact.solve(dgdz.toarray())
deriv_jac = dfdz - dfdy.dot(dydz)
fact = sps.linalg.splu(dfdzdot.tocsc())
dzdotdz = -fact.solve(deriv_jac)
# Use some heuristic on the eigenvalues of the derivative Jacobian
# to identify fast states.
w, V = np.linalg.eig(dzdotdz)
w_max = np.max(np.abs(w))
(fast_modes,) = np.where(np.abs(w) > w_max / 2)
fast_states = []
for idx in fast_modes:
evec = V[:, idx]
_fast_states, _ = np.where(np.abs(evec) > 0.5)
fast_states.extend(_fast_states)
fast_states = set(fast_states)
# Store components necessary for model reduction in a model-
# independent form.
fast_state_derivs = [
pyo.ComponentUID(var_partition[VC.DERIVATIVE][idx].referent, context=model)
for idx in fast_states
]
fast_state_diffs = [
pyo.ComponentUID(var_partition[VC.DIFFERENTIAL][idx].referent, context=model)
for idx in fast_states
]
fast_state_discs = [
pyo.ComponentUID(con_partition[CC.DISCRETIZATION][idx].referent, context=model)
for idx in fast_states
]
# Perform pseudo-steady state model reduction on the fast states
# and re-categorize
for cuid in fast_state_derivs:
var = cuid.find_component_on(m_controller)
var.fix(0.0)
for cuid in fast_state_diffs:
var = cuid.find_component_on(m_controller)
var[t0].unfix()
for cuid in fast_state_discs:
con = cuid.find_component_on(m_controller)
con.deactivate()
var_partition, con_partition = categorize_dae_variables_and_constraints(
model,
dae_vars,
dae_cons,
time,
input_vars=inputs,
)
controller.del_component(model)
# Re-construct controller block with new categorization
measurements = [
pyo.Reference(model.fs.cstr.outlet.conc_mol[:, "C"]),
pyo.Reference(model.fs.cstr.outlet.conc_mol[:, "E"]),
pyo.Reference(model.fs.cstr.outlet.conc_mol[:, "S"]),
pyo.Reference(model.fs.cstr.outlet.conc_mol[:, "P"]),
]
controller = ControllerBlock(
model=model,
time=time,
measurements=measurements,
category_dict={None: var_partition},
)
controller.construct()
#####
# Construct dynamic block for plant
#####
model = m_plant
time = model.fs.time
t0 = time.first()
t1 = time[2]
scalar_vars, dae_vars = flatten_dae_components(
model,
time,
pyo.Var,
)
scalar_cons, dae_cons = flatten_dae_components(
model,
time,
pyo.Constraint,
)
inputs = [
model.fs.mixer.S_inlet.flow_vol,
model.fs.mixer.E_inlet.flow_vol,
]
measurements = [
pyo.Reference(model.fs.cstr.outlet.conc_mol[:, "C"]),
pyo.Reference(model.fs.cstr.outlet.conc_mol[:, "E"]),
pyo.Reference(model.fs.cstr.outlet.conc_mol[:, "S"]),
pyo.Reference(model.fs.cstr.outlet.conc_mol[:, "P"]),
]
model.fs.cstr.control_volume.material_holdup[:, "aq", "Solvent"].fix()
model.fs.cstr.total_flow_balance.deactivate()
var_partition, con_partition = categorize_dae_variables_and_constraints(
model,
dae_vars,
dae_cons,
time,
input_vars=inputs,
)
plant = DynamicBlock(
model=model,
time=time,
measurements=measurements,
category_dict={None: var_partition},
)
plant.construct()
p_t0 = plant.time.first()
c_t0 = controller.time.first()
p_ts = plant.sample_points[1]
c_ts = controller.sample_points[1]
controller.set_sample_time(sample_time)
plant.set_sample_time(sample_time)
# We now perform the "RTO" calculation: Find the optimal steady state
# to achieve the following setpoint
setpoint = [
(controller.mod.fs.cstr.outlet.conc_mol[0, "P"], 0.4),
# (controller.mod.fs.cstr.outlet.conc_mol[0, 'S'], 0.01),
(controller.mod.fs.cstr.outlet.conc_mol[0, "S"], 0.1),
(controller.mod.fs.cstr.control_volume.energy_holdup[0, "aq"], 300),
(controller.mod.fs.mixer.E_inlet.flow_vol[0], 0.1),
(controller.mod.fs.mixer.S_inlet.flow_vol[0], 2.0),
(controller.mod.fs.cstr.volume[0], 1.0),
]
setpoint_weights = [
(controller.mod.fs.cstr.outlet.conc_mol[0, "P"], 1.0),
(controller.mod.fs.cstr.outlet.conc_mol[0, "S"], 1.0),
(controller.mod.fs.cstr.control_volume.energy_holdup[0, "aq"], 1.0),
(controller.mod.fs.mixer.E_inlet.flow_vol[0], 1.0),
(controller.mod.fs.mixer.S_inlet.flow_vol[0], 1.0),
(controller.mod.fs.cstr.volume[0], 1.0),
]
# Some of the "differential variables" that have been fixed in the
# model file are different from the measurements listed above. We
# unfix them here so the RTO solve is not overconstrained.
# (The RTO solve will only automatically unfix inputs and measurements.)
controller.mod.fs.cstr.control_volume.material_holdup[0, ...].unfix()
controller.mod.fs.cstr.control_volume.energy_holdup[0, ...].unfix()
# controller.mod.fs.cstr.volume[0].unfix()
controller.mod.fs.cstr.control_volume.material_holdup[0, "aq", "Solvent"].fix()
controller.add_setpoint_objective(setpoint, setpoint_weights)
controller.solve_setpoint(solver)
# Now we are ready to construct the tracking NMPC problem
tracking_weights = [
*((v, 1.0) for v in controller.vectors.differential[:, 0]),
*((v, 1.0) for v in controller.vectors.input[:, 0]),
]
controller.add_tracking_objective(tracking_weights)
controller.constrain_control_inputs_piecewise_constant()
controller.initialize_to_initial_conditions()
# Solve the first control problem
controller.vectors.input[...].unfix()
controller.vectors.input[:, 0].fix()
solver.solve(controller, tee=True)
# For a proper NMPC simulation, we must have noise.
# We do this by treating inputs and measurements as Gaussian random
# variables with the following variances (and bounds).
cstr = controller.mod.fs.cstr
variance = [
(cstr.outlet.conc_mol[0.0, "S"], 0.01),
(cstr.outlet.conc_mol[0.0, "E"], 0.005),
(cstr.outlet.conc_mol[0.0, "C"], 0.01),
(cstr.outlet.conc_mol[0.0, "P"], 0.005),
(cstr.outlet.temperature[0.0], 1.0),
(cstr.volume[0.0], 0.05),
]
controller.set_variance(variance)
measurement_variance = [v.variance for v in controller.MEASUREMENT_BLOCK[:].var]
measurement_noise_bounds = [
(0.0, var[c_t0].ub) for var in controller.MEASUREMENT_BLOCK[:].var
]
mx = plant.mod.fs.mixer
variance = [
(mx.S_inlet_state[0.0].flow_vol, 0.02),
(mx.E_inlet_state[0.0].flow_vol, 0.001),
]
plant.set_variance(variance)
input_variance = [v.variance for v in plant.INPUT_BLOCK[:].var]
input_noise_bounds = [(0.0, var[p_t0].ub) for var in plant.INPUT_BLOCK[:].var]
random.seed(100)
# Extract inputs from controller and inject them into plant
inputs = controller.generate_inputs_at_time(c_ts)
plant.inject_inputs(inputs)
# This "initialization" really simulates the plant with the new inputs.
plant.vectors.input[:, :].fix()
plant.initialize_by_solving_elements(solver)
plant.vectors.input[:, :].fix()
solver.solve(plant, tee=True)
for i in range(1, 11):
print("\nENTERING NMPC LOOP ITERATION %s\n" % i)
measured = plant.generate_measurements_at_time(p_ts)
plant.advance_one_sample()
plant.initialize_to_initial_conditions()
measured = apply_noise_with_bounds(
measured,
measurement_variance,
random.gauss,
measurement_noise_bounds,
)
controller.advance_one_sample()
controller.load_measurements(measured)
solver.solve(controller, tee=True)
inputs = controller.generate_inputs_at_time(c_ts)
inputs = apply_noise_with_bounds(
inputs,
input_variance,
random.gauss,
input_noise_bounds,
)
plant.inject_inputs(inputs)
plant.initialize_by_solving_elements(solver)
solver.solve(plant)
import pdb
pdb.set_trace()
if __name__ == "__main__":
main()
|
81eb7d6914cf04aebfb46a8e5b3cfa23e22441ba
|
10cb11f83e1c8b51b9d72c28d6259a56ff1a97c8
|
/tests/unit/lib/remote_invoke/test_remote_invoke_executor_factory.py
|
8f76be83031f946aff6bbed73967c6aa5b1cacc2
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"BSD-2-Clause"
] |
permissive
|
aws/aws-sam-cli
|
6d4411aacf7f861e75e5cf4882a32858797a276d
|
b297ff015f2b69d7c74059c2d42ece1c29ea73ee
|
refs/heads/develop
| 2023-08-30T23:28:36.179932
| 2023-08-30T21:58:26
| 2023-08-30T21:58:26
| 92,205,085
| 1,402
| 470
|
Apache-2.0
| 2023-09-14T21:14:23
| 2017-05-23T18:16:23
|
Python
|
UTF-8
|
Python
| false
| false
| 9,655
|
py
|
test_remote_invoke_executor_factory.py
|
import itertools
from unittest import TestCase
from unittest.mock import patch, Mock
from parameterized import parameterized
from samcli.lib.remote_invoke.remote_invoke_executor_factory import RemoteInvokeExecutorFactory, AWS_LAMBDA_FUNCTION
from samcli.lib.remote_invoke.remote_invoke_executors import RemoteInvokeOutputFormat
class TestRemoteInvokeExecutorFactory(TestCase):
def setUp(self) -> None:
self.boto_client_provider_mock = Mock()
self.remote_invoke_executor_factory = RemoteInvokeExecutorFactory(self.boto_client_provider_mock)
def test_supported_resource_executors(self):
supported_executors = self.remote_invoke_executor_factory.REMOTE_INVOKE_EXECUTOR_MAPPING
self.assertEqual(1, len(supported_executors))
expected_executors = {AWS_LAMBDA_FUNCTION}
self.assertEqual(expected_executors, set(supported_executors.keys()))
@patch(
"samcli.lib.remote_invoke.remote_invoke_executor_factory.RemoteInvokeExecutorFactory.REMOTE_INVOKE_EXECUTOR_MAPPING"
)
def test_create_remote_invoke_executor(self, patched_executor_mapping):
given_executor_creator_method = Mock()
patched_executor_mapping.get.return_value = given_executor_creator_method
given_executor = Mock()
given_executor_creator_method.return_value = given_executor
given_cfn_resource_summary = Mock()
given_output_format = Mock()
given_response_consumer = Mock()
given_log_consumer = Mock()
executor = self.remote_invoke_executor_factory.create_remote_invoke_executor(
given_cfn_resource_summary, given_output_format, given_response_consumer, given_log_consumer
)
patched_executor_mapping.get.assert_called_with(given_cfn_resource_summary.resource_type)
given_executor_creator_method.assert_called_with(
self.remote_invoke_executor_factory,
given_cfn_resource_summary,
given_output_format,
given_response_consumer,
given_log_consumer,
)
self.assertEqual(executor, given_executor)
def test_failed_create_test_executor(self):
given_cfn_resource_summary = Mock()
executor = self.remote_invoke_executor_factory.create_remote_invoke_executor(
given_cfn_resource_summary, Mock(), Mock(), Mock()
)
self.assertIsNone(executor)
@parameterized.expand(
itertools.product([True, False], [RemoteInvokeOutputFormat.JSON, RemoteInvokeOutputFormat.TEXT])
)
@patch("samcli.lib.remote_invoke.remote_invoke_executor_factory.LambdaInvokeExecutor")
@patch("samcli.lib.remote_invoke.remote_invoke_executor_factory.LambdaInvokeWithResponseStreamExecutor")
@patch("samcli.lib.remote_invoke.remote_invoke_executor_factory.DefaultConvertToJSON")
@patch("samcli.lib.remote_invoke.remote_invoke_executor_factory.LambdaResponseConverter")
@patch("samcli.lib.remote_invoke.remote_invoke_executor_factory.LambdaStreamResponseConverter")
@patch("samcli.lib.remote_invoke.remote_invoke_executor_factory.ResponseObjectToJsonStringMapper")
@patch("samcli.lib.remote_invoke.remote_invoke_executor_factory.RemoteInvokeExecutor")
@patch("samcli.lib.remote_invoke.remote_invoke_executor_factory._is_function_invoke_mode_response_stream")
def test_create_lambda_test_executor(
self,
is_function_invoke_mode_response_stream,
remote_invoke_output_format,
patched_is_function_invoke_mode_response_stream,
patched_remote_invoke_executor,
patched_object_to_json_converter,
patched_stream_response_converter,
patched_response_converter,
patched_convert_to_default_json,
patched_lambda_invoke_with_response_stream_executor,
patched_lambda_invoke_executor,
):
patched_is_function_invoke_mode_response_stream.return_value = is_function_invoke_mode_response_stream
given_physical_resource_id = "physical_resource_id"
given_cfn_resource_summary = Mock(physical_resource_id=given_physical_resource_id)
given_lambda_client = Mock()
self.boto_client_provider_mock.return_value = given_lambda_client
given_remote_invoke_executor = Mock()
patched_remote_invoke_executor.return_value = given_remote_invoke_executor
given_response_consumer = Mock()
given_log_consumer = Mock()
lambda_executor = self.remote_invoke_executor_factory._create_lambda_boto_executor(
given_cfn_resource_summary, remote_invoke_output_format, given_response_consumer, given_log_consumer
)
self.assertEqual(lambda_executor, given_remote_invoke_executor)
self.boto_client_provider_mock.assert_called_with("lambda")
patched_convert_to_default_json.assert_called_once()
if is_function_invoke_mode_response_stream:
expected_mappers = []
if remote_invoke_output_format == RemoteInvokeOutputFormat.JSON:
patched_object_to_json_converter.assert_called_once()
patched_stream_response_converter.assert_called_once()
patched_lambda_invoke_with_response_stream_executor.assert_called_with(
given_lambda_client, given_physical_resource_id, remote_invoke_output_format
)
expected_mappers = [
patched_stream_response_converter(),
patched_object_to_json_converter(),
]
patched_remote_invoke_executor.assert_called_with(
request_mappers=[patched_convert_to_default_json()],
response_mappers=expected_mappers,
boto_action_executor=patched_lambda_invoke_with_response_stream_executor(),
response_consumer=given_response_consumer,
log_consumer=given_log_consumer,
)
else:
expected_mappers = []
if remote_invoke_output_format == RemoteInvokeOutputFormat.JSON:
patched_object_to_json_converter.assert_called_once()
patched_response_converter.assert_called_once()
patched_lambda_invoke_executor.assert_called_with(
given_lambda_client, given_physical_resource_id, remote_invoke_output_format
)
expected_mappers = [
patched_response_converter(),
patched_object_to_json_converter(),
]
patched_remote_invoke_executor.assert_called_with(
request_mappers=[patched_convert_to_default_json()],
response_mappers=expected_mappers,
boto_action_executor=patched_lambda_invoke_executor(),
response_consumer=given_response_consumer,
log_consumer=given_log_consumer,
)
@parameterized.expand(itertools.product([RemoteInvokeOutputFormat.JSON, RemoteInvokeOutputFormat.TEXT]))
@patch("samcli.lib.remote_invoke.remote_invoke_executor_factory.StepFunctionsStartExecutionExecutor")
@patch("samcli.lib.remote_invoke.remote_invoke_executor_factory.SfnDescribeExecutionResponseConverter")
@patch("samcli.lib.remote_invoke.remote_invoke_executor_factory.DefaultConvertToJSON")
@patch("samcli.lib.remote_invoke.remote_invoke_executor_factory.ResponseObjectToJsonStringMapper")
@patch("samcli.lib.remote_invoke.remote_invoke_executor_factory.RemoteInvokeExecutor")
def test_create_stepfunctions_test_executor(
self,
remote_invoke_output_format,
patched_remote_invoke_executor,
patched_object_to_json_converter,
patched_convert_to_default_json,
patched_response_converter,
patched_stepfunctions_invoke_executor,
):
given_physical_resource_id = "physical_resource_id"
given_cfn_resource_summary = Mock(physical_resource_id=given_physical_resource_id)
given_stepfunctions_client = Mock()
self.boto_client_provider_mock.return_value = given_stepfunctions_client
given_remote_invoke_executor = Mock()
patched_remote_invoke_executor.return_value = given_remote_invoke_executor
given_response_consumer = Mock()
given_log_consumer = Mock()
stepfunctions_executor = self.remote_invoke_executor_factory._create_stepfunctions_boto_executor(
given_cfn_resource_summary, remote_invoke_output_format, given_response_consumer, given_log_consumer
)
self.assertEqual(stepfunctions_executor, given_remote_invoke_executor)
self.boto_client_provider_mock.assert_called_with("stepfunctions")
patched_convert_to_default_json.assert_called_once()
expected_mappers = []
if remote_invoke_output_format == RemoteInvokeOutputFormat.JSON:
patched_object_to_json_converter.assert_called_once()
patched_response_converter.assert_called_once()
patched_stepfunctions_invoke_executor.assert_called_with(
given_stepfunctions_client, given_physical_resource_id, remote_invoke_output_format
)
expected_mappers = [
patched_response_converter(),
patched_object_to_json_converter(),
]
patched_remote_invoke_executor.assert_called_with(
request_mappers=[patched_convert_to_default_json()],
response_mappers=expected_mappers,
boto_action_executor=patched_stepfunctions_invoke_executor(),
response_consumer=given_response_consumer,
log_consumer=given_log_consumer,
)
|
15e662d7707df92e5f34bb3670ffa241ddec4303
|
bceec65d72798ce525b0c88138bb80d8348eff96
|
/tests/test_objects.py
|
75d18a35a0c72ed062b2dfd9a77b8f608495cfff
|
[
"MIT"
] |
permissive
|
brandondube/prysm
|
a53d7eb95600db7d3b2792cff01b5ab0f92a958f
|
af89c94d500a274eda664188ddb97fcae30c6ac5
|
refs/heads/master
| 2023-08-14T08:30:18.847011
| 2023-08-12T23:51:22
| 2023-08-12T23:51:22
| 121,337,323
| 192
| 38
|
MIT
| 2023-09-10T20:11:29
| 2018-02-13T04:10:16
|
Python
|
UTF-8
|
Python
| false
| false
| 1,354
|
py
|
test_objects.py
|
"""Tests for object (target) synthesis routines."""
import pytest
import numpy as np
from prysm import objects, coordinates
@pytest.fixture
def xy():
x, y = coordinates.make_xy_grid(32, diameter=1)
return x, y
@pytest.fixture
def rt(xy):
x, y = xy
return coordinates.cart_to_polar(x, y)
@pytest.mark.parametrize(['wx', 'wy'], [
[None, .05],
[.05, None],
[.05, .05]])
def test_slit(xy, wx, wy):
x, y = xy
ary = objects.slit(x, y, wx, wy)
assert ary.any() # at least something white
def test_pinhole(rt):
r, _ = rt
assert objects.pinhole(1, r).any()
@pytest.mark.parametrize('bg', ['w', 'b'])
def test_siemensstar(rt, bg):
star = objects.siemensstar(*rt, 80, background=bg)
assert star.any()
@pytest.mark.parametrize('bg', ['w', 'b'])
def test_tiltedsquare(xy, bg):
sq = objects.tiltedsquare(*xy, background=bg)
assert sq.any()
@pytest.mark.parametrize('crossed', [True, False])
def test_slantededge(xy, crossed):
se = objects.slantededge(*xy, crossed=crossed)
assert se.any()
def test_pinhole_ft_functional(rt):
r, _ = rt
assert objects.pinhole_ft(1., r).any()
@pytest.mark.parametrize(['wx', 'wy'], [
[None, .05],
[.05, None],
[.05, .05]])
def test_slit_ft_functional(xy, wx, wy):
r, _ = xy
assert objects.slit_ft(wx, wy, *xy).any()
|
7f54a5c584261c4e78d6b1e4f66826ba51df4cad
|
7bea5adf7d6284fbad0131d665e957d58adfe7c7
|
/allauth/socialaccount/providers/saml/provider.py
|
6dcbf537f5fbe2876e1b907b6545ed9d7f7e3afa
|
[
"MIT"
] |
permissive
|
pennersr/django-allauth
|
50c9e71c3666785368e92ed9e19ea0f6a5438cd2
|
6b8911a5ebbabda0d446f2743bd4d00d250ed500
|
refs/heads/main
| 2023-09-03T16:48:10.988418
| 2023-09-02T08:00:53
| 2023-09-02T08:00:53
| 976,994
| 7,719
| 3,481
|
MIT
| 2023-09-14T15:06:57
| 2010-10-10T20:10:52
|
Python
|
UTF-8
|
Python
| false
| false
| 2,880
|
py
|
provider.py
|
from django.urls import reverse
from django.utils.http import urlencode
from allauth.socialaccount.providers.base import Provider, ProviderAccount
class SAMLAccount(ProviderAccount):
def to_str(self):
return super().to_str()
class SAMLProvider(Provider):
id = "saml"
name = "SAML"
account_class = SAMLAccount
default_attribute_mapping = {
"uid": [
"http://schemas.auth0.com/clientID",
"urn:oasis:names:tc:SAML:attribute:subject-id",
],
"email": [
"urn:oid:0.9.2342.19200300.100.1.3",
"http://schemas.xmlsoap.org/ws/2005/05/identity/claims/emailaddress",
],
"email_verified": [
"http://schemas.auth0.com/email_verified",
],
"first_name": [
"http://schemas.xmlsoap.org/ws/2005/05/identity/claims/givenname",
"urn:oid:2.5.4.42",
],
"last_name": [
"urn:oid:2.5.4.4",
],
"username": [
"http://schemas.auth0.com/nickname",
],
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.name = self.app.name or self.app.client_id or self.name
def get_login_url(self, request, **kwargs):
url = reverse("saml_login", kwargs={"organization_slug": self.app.client_id})
if kwargs:
url = url + "?" + urlencode(kwargs)
return url
def extract_extra_data(self, data):
return data.get_attributes()
def extract_uid(self, data):
"""
The `uid` is not unique across different SAML IdP's. Therefore,
we're using a fully qualified ID: <uid>@<entity_id>.
"""
return self._extract(data)["uid"]
def extract_common_fields(self, data):
ret = self._extract(data)
ret.pop("uid", None)
return ret
def _extract(self, data):
provider_config = self.app.settings
raw_attributes = data.get_attributes()
attributes = {}
attribute_mapping = provider_config.get(
"attribute_mapping", self.default_attribute_mapping
)
# map configured provider attributes
for key, provider_keys in attribute_mapping.items():
if isinstance(provider_keys, str):
provider_keys = [provider_keys]
for provider_key in provider_keys:
attribute_list = raw_attributes.get(provider_key, [""])
if len(attribute_list) > 0:
attributes[key] = attribute_list[0]
break
email_verified = attributes.get("email_verified")
if email_verified:
email_verified = email_verified.lower() in ["true", "1", "t", "y", "yes"]
attributes["email_verified"] = email_verified
return attributes
provider_classes = [SAMLProvider]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.