blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a17a5d0ec7210603eb8cc0fe123717bd12c1db4f
|
811f4cdb25e26f3b27640aaa2e2bca93e660d2d7
|
/src/anomalib/models/reverse_distillation/lightning_model.py
|
5489daab5b556a183c4b16a0a73773c3db8ca567
|
[
"CC-BY-SA-4.0",
"CC-BY-SA-3.0",
"CC-BY-NC-SA-4.0",
"Python-2.0",
"Apache-2.0",
"MIT"
] |
permissive
|
openvinotoolkit/anomalib
|
4467dfc392398845e816387267cdf979ff76fe15
|
4abfa93dcfcb98771bc768b334c929ff9a02ce8b
|
refs/heads/main
| 2023-09-03T16:49:05.019269
| 2023-08-28T14:22:19
| 2023-08-28T14:22:19
| 423,775,360
| 2,325
| 454
|
Apache-2.0
| 2023-09-14T11:21:33
| 2021-11-02T09:11:38
|
Python
|
UTF-8
|
Python
| false
| false
| 5,411
|
py
|
lightning_model.py
|
"""Anomaly Detection via Reverse Distillation from One-Class Embedding.
https://arxiv.org/abs/2201.10703v2
"""
# Copyright (C) 2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
from omegaconf import DictConfig, ListConfig
from pytorch_lightning.callbacks import EarlyStopping
from pytorch_lightning.utilities.types import STEP_OUTPUT
from torch import Tensor, optim
from anomalib.models.components import AnomalyModule
from .anomaly_map import AnomalyMapGenerationMode
from .loss import ReverseDistillationLoss
from .torch_model import ReverseDistillationModel
class ReverseDistillation(AnomalyModule):
"""PL Lightning Module for Reverse Distillation Algorithm.
Args:
input_size (tuple[int, int]): Size of model input
backbone (str): Backbone of CNN network
layers (list[str]): Layers to extract features from the backbone CNN
pre_trained (bool, optional): Boolean to check whether to use a pre_trained backbone.
"""
def __init__(
self,
input_size: tuple[int, int],
backbone: str,
layers: list[str],
anomaly_map_mode: AnomalyMapGenerationMode,
lr: float,
beta1: float,
beta2: float,
pre_trained: bool = True,
) -> None:
super().__init__()
self.model = ReverseDistillationModel(
backbone=backbone,
pre_trained=pre_trained,
layers=layers,
input_size=input_size,
anomaly_map_mode=anomaly_map_mode,
)
self.loss = ReverseDistillationLoss()
# TODO: LR should be part of optimizer in config.yaml! Since reverse distillation has custom
# optimizer this is to be addressed later.
self.learning_rate = lr
self.beta1 = beta1
self.beta2 = beta2
def configure_optimizers(self) -> optim.Adam:
"""Configures optimizers for decoder and bottleneck.
Note:
This method is used for the existing CLI.
When PL CLI is introduced, configure optimizers method will be
deprecated, and optimizers will be configured from either
config.yaml file or from CLI.
Returns:
Optimizer: Adam optimizer for each decoder
"""
return optim.Adam(
params=list(self.model.decoder.parameters()) + list(self.model.bottleneck.parameters()),
lr=self.learning_rate,
betas=(self.beta1, self.beta2),
)
def training_step(self, batch: dict[str, str | Tensor], *args, **kwargs) -> STEP_OUTPUT:
"""Training Step of Reverse Distillation Model.
Features are extracted from three layers of the Encoder model. These are passed to the bottleneck layer
that are passed to the decoder network. The loss is then calculated based on the cosine similarity between the
encoder and decoder features.
Args:
batch (batch: dict[str, str | Tensor]): Input batch
Returns:
Feature Map
"""
del args, kwargs # These variables are not used.
loss = self.loss(*self.model(batch["image"]))
self.log("train_loss", loss.item(), on_epoch=True, prog_bar=True, logger=True)
return {"loss": loss}
def validation_step(self, batch: dict[str, str | Tensor], *args, **kwargs) -> STEP_OUTPUT:
"""Validation Step of Reverse Distillation Model.
Similar to the training step, encoder/decoder features are extracted from the CNN for each batch, and
anomaly map is computed.
Args:
batch (dict[str, str | Tensor]): Input batch
Returns:
Dictionary containing images, anomaly maps, true labels and masks.
These are required in `validation_epoch_end` for feature concatenation.
"""
del args, kwargs # These variables are not used.
batch["anomaly_maps"] = self.model(batch["image"])
return batch
class ReverseDistillationLightning(ReverseDistillation):
"""PL Lightning Module for Reverse Distillation Algorithm.
Args:
hparams(DictConfig | ListConfig): Model parameters
"""
def __init__(self, hparams: DictConfig | ListConfig) -> None:
super().__init__(
input_size=hparams.model.input_size,
backbone=hparams.model.backbone,
layers=hparams.model.layers,
pre_trained=hparams.model.pre_trained,
anomaly_map_mode=hparams.model.anomaly_map_mode,
lr=hparams.model.lr,
beta1=hparams.model.beta1,
beta2=hparams.model.beta2,
)
self.hparams: DictConfig | ListConfig # type: ignore
self.save_hyperparameters(hparams)
def configure_callbacks(self) -> list[EarlyStopping]:
"""Configure model-specific callbacks.
Note:
This method is used for the existing CLI.
When PL CLI is introduced, configure callback method will be
deprecated, and callbacks will be configured from either
config.yaml file or from CLI.
"""
early_stopping = EarlyStopping(
monitor=self.hparams.model.early_stopping.metric,
patience=self.hparams.model.early_stopping.patience,
mode=self.hparams.model.early_stopping.mode,
)
return [early_stopping]
|
c61b4e7edec60800e7f19e6ed9b8b3cdc798d8eb
|
182bbadb0ee7f59f1abd154d06484e555a30c6d8
|
/api/tests/integration/tests/formats/smiles_x_smiles_conv.py
|
b9d1db5c61281f234102f98ffb4439494b6b915a
|
[
"Apache-2.0"
] |
permissive
|
epam/Indigo
|
08559861adf474122366b6e2e499ed3aa56272d1
|
8e473e69f393c3a57ff75b7728999c5fb4cbf1a3
|
refs/heads/master
| 2023-09-02T10:14:46.843829
| 2023-08-25T08:39:24
| 2023-08-25T08:39:24
| 37,536,320
| 265
| 106
|
Apache-2.0
| 2023-09-14T17:34:00
| 2015-06-16T14:45:56
|
C++
|
UTF-8
|
Python
| false
| false
| 4,085
|
py
|
smiles_x_smiles_conv.py
|
import errno
import os
import sys
sys.path.append(
os.path.normpath(
os.path.join(os.path.abspath(__file__), "..", "..", "..", "common")
)
)
from env_indigo import * # noqa
if not os.path.exists(joinPathPy("out", __file__)):
try:
os.makedirs(joinPathPy("out", __file__))
except OSError as e:
if e.errno != errno.EEXIST:
raise
indigo = Indigo()
def testMultipleSave(smifile, iterfunc, issmi):
print("TESTING " + relativePath(smifile))
sdfout = indigo.writeFile(joinPathPy("out/structures.sdf", __file__))
cmlout = indigo.writeFile(joinPathPy("out/structures.cml", __file__))
rdfout = indigo.writeFile(joinPathPy("out/structures.rdf", __file__))
smiout = indigo.writeFile(joinPathPy("out/structures.smi", __file__))
rdfout.rdfHeader()
cmlout.cmlHeader()
for item in iterfunc(smifile):
exc = False
try:
item.countAtoms()
item.smiles()
except IndigoException as e:
print("{0} : {1}".format(item.index(), getIndigoExceptionText(e)))
if issmi:
print(item.rawData())
exc = True
if not exc:
# item.clearCisTrans()
for bond in item.iterateBonds():
if bond.topology() == Indigo.RING and bond.bondOrder() == 2:
bond.resetStereo()
try:
item.markEitherCisTrans()
except IndigoException as e:
print(
"{0} (while markEitherCisTrans) : {1}".format(
item.index(), getIndigoExceptionText(e)
)
)
if issmi:
print(item.rawData())
continue
if issmi:
item.setName(
"structure-{0} {1}".format((item.index()), item.rawData())
)
else:
item.setName("structure-{0}".format(item.index()))
item.setProperty("NUMBER", str(item.index()))
cmlout.cmlAppend(item)
smiout.smilesAppend(item)
item.layout()
indigo.setOption("molfile-saving-mode", "2000")
sdfout.sdfAppend(item)
indigo.setOption("molfile-saving-mode", "3000")
rdfout.rdfAppend(item)
cmlout.cmlFooter()
sdfout.close()
cmlout.close()
rdfout.close()
smiout.close()
cmliter = indigo.iterateCMLFile(joinPathPy("out/structures.cml", __file__))
sdfiter = indigo.iterateSDFile(joinPathPy("out/structures.sdf", __file__))
rdfiter = indigo.iterateRDFile(joinPathPy("out/structures.rdf", __file__))
smiiter = indigo.iterateSmilesFile(
joinPathPy("out/structures.smi", __file__)
)
idx = 1
while sdfiter.hasNext():
cml = cmliter.next()
sdf = sdfiter.next()
rdf = rdfiter.next()
smi = smiiter.next()
print("{0} {1}".format(sdf.index(), sdf.name()))
sdf.resetSymmetricCisTrans()
rdf.resetSymmetricCisTrans()
try:
cs1 = sdf.canonicalSmiles()
cs2 = rdf.canonicalSmiles()
cs3 = smi.canonicalSmiles()
cs4 = cml.canonicalSmiles()
except IndigoException as e:
print(getIndigoExceptionText(e))
continue
print(cs1)
print(cs2)
print(cs3)
print(cs4)
if cs2 != cs1:
print("MISMATCH")
if cs3 != cs1:
print("MISMATCH")
if cs4 != cs1:
print("MISMATCH")
idx += 1
testMultipleSave(
joinPathPy("../../../../../data/molecules/basic/helma.smi", __file__),
indigo.iterateSmilesFile,
True,
)
testMultipleSave(
joinPathPy("molecules/chemical-structures.smi", __file__),
indigo.iterateSmilesFile,
True,
)
testMultipleSave(
joinPathPy("molecules/pubchem_7m_err.sdf", __file__),
indigo.iterateSDFile,
False,
)
testMultipleSave(
joinPathPy("molecules/acd2d_err.sdf", __file__),
indigo.iterateSDFile,
False,
)
|
d503dc6e43ef35a4491cdbb0dd0ab84f42fed98f
|
c9ff14ff176600169b6e9f6490ab32f5c3af60e0
|
/jcvi/projects/pistachio.py
|
83e1ca934720820589be0176108292311b5cbefb
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
tanghaibao/jcvi
|
c7a070692d53784a34378e19e435cb9a86d2cd2e
|
695bd2eee98b14118b54fc37e38cd0222ce6a5e9
|
refs/heads/main
| 2023-09-01T01:22:04.353148
| 2023-08-30T01:59:11
| 2023-08-30T01:59:11
| 1,130,393
| 641
| 193
|
BSD-2-Clause
| 2023-09-01T03:17:24
| 2010-12-01T23:18:02
|
Python
|
UTF-8
|
Python
| false
| false
| 2,367
|
py
|
pistachio.py
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Functions related to processing of the pistachio genome.
"""
import sys
from jcvi.apps.base import OptionParser, ActionDispatcher
def main():
actions = (("agp", "convert from the table file to agp format"),)
p = ActionDispatcher(actions)
p.dispatch(globals())
def agp(args):
"""
%prog agp Siirt_Female_pistachio_23May2017_table.txt
The table file, as prepared by Dovetail Genomics, is not immediately useful
to convert gene model coordinates, as assumed by formats.chain.fromagp().
This is a quick script to do such conversion. The file structure of this
table file is described in the .manifest file shipped in the same package::
pistachio_b_23May2017_MeyIy.table.txt
Tab-delimited table describing positions of input assembly scaffolds
in the Hirise scaffolds. The table has the following format:
1. HiRise scaffold name
2. Input sequence name
3. Starting base (zero-based) of the input sequence
4. Ending base of the input sequence
5. Strand (- or +) of the input sequence in the scaffold
6. Starting base (zero-based) in the HiRise scaffold
7. Ending base in the HiRise scaffold
where '-' in the strand column indicates that the sequence is reverse
complemented relative to the input assembly.
CAUTION: This is NOT a proper AGP format since it does not have gaps in
them.
"""
p = OptionParser(agp.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
(tablefile,) = args
fp = open(tablefile)
for row in fp:
atoms = row.split()
hr = atoms[0]
scaf = atoms[1]
scaf_start = int(atoms[2]) + 1
scaf_end = int(atoms[3])
strand = atoms[4]
hr_start = int(atoms[5]) + 1
hr_end = int(atoms[6])
print(
"\t".join(
str(x)
for x in (
hr,
hr_start,
hr_end,
1,
"W",
scaf,
scaf_start,
scaf_end,
strand,
)
)
)
if __name__ == "__main__":
main()
|
231019cb7926cf98a871e90da107d83bd3c9dbe5
|
444a9480bce2035565332d4d4654244c0b5cd47b
|
/research/cv/resnet3d/eval_onnx.py
|
e879324762838633bfa4208397b7abb9b1401cb1
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
mindspore-ai/models
|
7ede9c6454e77e995e674628204e1c6e76bd7b27
|
eab643f51336dbf7d711f02d27e6516e5affee59
|
refs/heads/master
| 2023-07-20T01:49:34.614616
| 2023-07-17T11:43:18
| 2023-07-17T11:43:18
| 417,393,380
| 301
| 92
|
Apache-2.0
| 2023-05-17T11:22:28
| 2021-10-15T06:38:37
|
Python
|
UTF-8
|
Python
| false
| false
| 5,485
|
py
|
eval_onnx.py
|
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
Eval.
"""
import time
import random
import json
from collections import defaultdict
import numpy as np
import onnxruntime
from mindspore import dataset as de
from mindspore.common import set_seed
from src.config import config as args_opt
from src.dataset import create_eval_dataset
from src.inference import (topk_, get_video_results, load_ground_truth, load_result,
remove_nonexistent_ground_truth, calculate_clip_acc)
from src.videodataset_multiclips import get_target_path
random.seed(1)
np.random.seed(1)
de.config.set_seed(1)
set_seed(1)
if __name__ == '__main__':
t1_ = time.time()
cfg = args_opt
print(cfg)
target = args_opt.device_target
if target == 'GPU':
providers = ['CUDAExecutionProvider']
elif target == 'CPU':
providers = ['CPUExecutionProvider']
else:
raise ValueError(
f'Unsupported target device {target}, '
f'Expected one of: "CPU", "GPU"'
)
session = onnxruntime.InferenceSession(args_opt.onnx_path, providers=providers)
predict_data = create_eval_dataset(
cfg.video_path, cfg.annotation_path, cfg)
size = predict_data.get_dataset_size()
total_target_path = get_target_path(cfg.annotation_path)
with total_target_path.open('r') as f:
total_target_data = json.load(f)
results = {'results': defaultdict(list)}
count = 0
for data in predict_data.create_dict_iterator(output_numpy=True):
t1 = time.time()
x, label = data['data'][0], data['label'].tolist()
video_ids, segments = zip(
*total_target_data['targets'][str(label[0])])
x_list = np.split(x, x.shape[0], axis=0)
outputs = []
for x in x_list:
inputs = {session.get_inputs()[0].name: x}
output = session.run(None, inputs)[0]
outputs.append(output)
outputs = np.concatenate(outputs, axis=0)
_, locs = topk_(outputs, K=1)
locs = locs.reshape(1, -1)
t2 = time.time()
print("[{} / {}] Net time: {} ms".format(count, size, (t2 - t1) * 1000))
for j in range(0, outputs.shape[0]):
results['results'][video_ids[j]].append({
'segment': segments[j],
'output': outputs[j]
})
count += 1
class_names = total_target_data['class_names']
inference_results = {'results': {}}
clips_inference_results = {'results': {}}
for video_id, video_results in results['results'].items():
video_outputs = [
segment_result['output'] for segment_result in video_results
]
video_outputs = np.stack(video_outputs, axis=0)
average_scores = np.mean(video_outputs, axis=0)
clips_inference_results['results'][video_id] = get_video_results(
average_scores, class_names, 5)
inference_results['results'][video_id] = []
for segment_result in video_results:
segment = segment_result['segment']
result = get_video_results(segment_result['output'],
class_names, 5)
inference_results['results'][video_id].append({
'segment': segment,
'result': result
})
# init context
print('load ground truth')
ground_truth, class_labels_map = load_ground_truth(
cfg.annotation_path, "validation")
print('number of ground truth: {}'.format(len(ground_truth)))
n_ground_truth_top_1 = len(ground_truth)
n_ground_truth_top_5 = len(ground_truth)
result_top1, result_top5 = load_result(
clips_inference_results, class_labels_map)
ground_truth_top1 = remove_nonexistent_ground_truth(
ground_truth, result_top1)
ground_truth_top5 = remove_nonexistent_ground_truth(
ground_truth, result_top5)
if cfg.ignore:
n_ground_truth_top_1 = len(ground_truth_top1)
n_ground_truth_top_5 = len(ground_truth_top5)
correct_top1 = [1 if line[1] in result_top1[line[0]]
else 0 for line in ground_truth_top1]
correct_top5 = [1 if line[1] in result_top5[line[0]]
else 0 for line in ground_truth_top5]
clip_acc = calculate_clip_acc(
inference_results, ground_truth, class_labels_map)
print(sum(correct_top1))
print(n_ground_truth_top_1)
print(sum(correct_top5))
print(n_ground_truth_top_5)
accuracy_top1 = float(sum(correct_top1)) / float(n_ground_truth_top_1)
accuracy_top5 = float(sum(correct_top5)) / float(n_ground_truth_top_5)
print('==================Accuracy=================\n'
' clip-acc : {} \ttop-1 : {} \ttop-5: {}'.format(clip_acc, accuracy_top1, accuracy_top5))
t2_ = time.time()
print("Total time : {} s".format(t2_ - t1_))
|
0fae8cbd76f6f3df964a4b902c5da9806bb9f57b
|
7ba2c56d8cfda999972d19b254a36e8d1a14e1c2
|
/2020-05-09-SpamAndFlags/ots/code.py
|
b98962188a201b1048772b4d504e016cfca64788
|
[] |
no_license
|
TFNS/writeups
|
def1c9cb03d9d82b9ba08ab48fd21dbdf7d8f4fc
|
898564c9ec688f5b454f3c811a80a9c3f5efd7c5
|
refs/heads/master
| 2023-07-08T19:30:18.393665
| 2023-06-27T00:17:30
| 2023-07-04T20:20:25
| 238,048,794
| 110
| 21
| null | 2023-07-04T20:20:26
| 2020-02-03T19:53:01
|
Python
|
UTF-8
|
Python
| false
| false
| 1,203
|
py
|
code.py
|
class OTS:
def __init__(self):
self.key_len = 128
self.priv_key = secrets.token_bytes(16*self.key_len)
self.pub_key = b''.join([self.hash_iter(self.priv_key[16*i:16*(i+1)], 255) for i in range(self.key_len)]).hex()
def hash_iter(self, msg, n):
assert len(msg) == 16
for i in range(n):
msg = hashlib.md5(msg).digest()
return msg
def wrap(self, msg):
raw = msg.encode('utf-8')
assert len(raw) <= self.key_len - 16
raw = raw + b'\x00'*(self.key_len - 16 - len(raw))
raw = raw + hashlib.md5(raw).digest()
return raw
def sign(self, msg):
raw = self.wrap(msg)
signature = b''.join([self.hash_iter(self.priv_key[16*i:16*(i+1)], 255-raw[i]) for i in range(len(raw))]).hex()
self.verify(msg, signature)
return signature
def verify(self, msg, signature):
raw = self.wrap(msg)
signature = bytes.fromhex(signature)
assert len(signature) == self.key_len * 16
calc_pub_key = b''.join([self.hash_iter(signature[16*i:16*(i+1)], raw[i]) for i in range(len(raw))]).hex()
assert hmac.compare_digest(self.pub_key, calc_pub_key)
|
b79f276c9e3c88196fd851a0ef87f5c4bcbab177
|
97b5bdc26c0f06e3224363755c228cdf6844da1f
|
/custom_components/wiser/cover.py
|
ebd7472a5c7329c5699482f0bd66cb7e804f22bf
|
[
"MIT"
] |
permissive
|
asantaga/wiserHomeAssistantPlatform
|
7ee1fc2ac4630967849fce127e24c31cd1b81fad
|
c6687b17bb2e0e630ba0f778964a7cb6b8a8a8c7
|
refs/heads/master
| 2023-08-31T23:35:09.790710
| 2023-08-26T17:52:24
| 2023-08-26T17:52:24
| 159,080,189
| 192
| 44
|
MIT
| 2023-08-26T17:52:25
| 2018-11-25T22:22:07
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 7,325
|
py
|
cover.py
|
"""
Cover Platform Device for Wiser.
https://github.com/asantaga/wiserHomeAssistantPlatform
Angelosantagata@gmail.com
"""
import asyncio
from homeassistant.components.cover import (
ATTR_POSITION,
CoverEntity,
CoverEntityFeature,
)
from homeassistant.core import callback
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .schedules import WiserScheduleEntity
from .const import (
DATA,
DOMAIN,
MANUFACTURER,
)
from .helpers import get_device_name, get_identifier
import logging
# TODO: Set this based on model of hub
MANUFACTURER = "Schneider Electric"
_LOGGER = logging.getLogger(__name__)
SUPPORT_FLAGS = (
CoverEntityFeature.OPEN | CoverEntityFeature.CLOSE | CoverEntityFeature.SET_POSITION | CoverEntityFeature.STOP
)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up Wiser shutter device."""
data = hass.data[DOMAIN][config_entry.entry_id][DATA]
wiser_shutters = []
if data.wiserhub.devices.shutters:
_LOGGER.debug("Setting up shutter entities")
for shutter in data.wiserhub.devices.shutters.all:
if shutter.product_type == "Shutter":
wiser_shutters.append(WiserShutter(data, shutter.id))
async_add_entities(wiser_shutters, True)
class WiserShutter(CoordinatorEntity, CoverEntity, WiserScheduleEntity):
"""Wisershutter ClientEntity Object."""
def __init__(self, coordinator, shutter_id):
"""Initialize the sensor."""
super().__init__(coordinator)
self._data = coordinator
self._device_id = shutter_id
self._device = self._data.wiserhub.devices.shutters.get_by_id(self._device_id)
self._schedule = self._device.schedule
_LOGGER.debug(f"{self._data.wiserhub.system.name} {self.name} initialise")
async def async_force_update(self, delay: int = 0):
_LOGGER.debug(f"Hub update initiated by {self.name}")
if delay:
asyncio.sleep(delay)
await self._data.async_refresh()
@callback
def _handle_coordinator_update(self) -> None:
_LOGGER.debug(f"{self.name} updating")
self._device = self._data.wiserhub.devices.shutters.get_by_id(self._device_id)
self._schedule = self._device.schedule
self.async_write_ha_state()
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_FLAGS
# TODO: What is this for?
@property
def scheduled_position(self):
"""Return scheduled position from data."""
return self._device.scheduled_lift
@property
def device_info(self):
"""Return device specific attributes."""
return {
"name": get_device_name(self._data, self._device_id),
"identifiers": {(DOMAIN, get_identifier(self._data, self._device_id))},
"manufacturer": MANUFACTURER,
"model": self._data.wiserhub.devices.get_by_id(self._device_id).model,
"via_device": (DOMAIN, self._data.wiserhub.system.name),
}
@property
def icon(self):
"""Return icon to show if shutter is closed or Open."""
return "mdi:window-shutter" if self.is_closed else "mdi:window-shutter-open"
@property
def name(self):
"""Return Name of device"""
return f"{get_device_name(self._data, self._device_id)} Control"
@property
def current_cover_position(self):
"""Return current position from data."""
return self._device.current_lift
@property
def is_closed(self):
return self._device.is_closed
@property
def is_opening(self):
return self._device.is_opening
@property
def is_closing(self):
return self._device.is_closing
@property
def unique_id(self):
"""Return unique Id."""
return f"{self._data.wiserhub.system.name}-Wisershutter-{self._device_id}-{self.name}"
@property
def extra_state_attributes(self):
"""Return state attributes."""
# Generic attributes
attrs = super().state_attributes
# Shutter Identification
attrs["name"] = self._device.name
attrs["model"] = self._device.model
attrs["product_type"] = self._device.product_type
attrs["product_identifier"] = self._device.product_identifier
attrs["product_model"] = self._device.product_model
attrs["serial_number"] = self._device.serial_number
attrs["firmware"] = self._device.firmware_version
# Room
if self._data.wiserhub.rooms.get_by_id(self._device.room_id) is not None:
attrs["room"] = self._data.wiserhub.rooms.get_by_id(self._device.room_id).name
else:
attrs["room"] = "Unassigned"
# Settings
attrs["shutter_id"] = self._device_id
attrs["away_mode_action"] = self._device.away_mode_action
attrs["mode"] = self._device.mode
attrs["lift_open_time"] = self._device.drive_config.open_time
attrs["lift_close_time"] = self._device.drive_config.close_time
# Command state
attrs["control_source"] = self._device.control_source
# Status
attrs["is_open"] = self._device.is_open
attrs["is_closed"] = self._device.is_closed
if self._device.is_open:
attrs["current_state"] = "Open"
elif self._device.is_closed:
attrs["current_state"] = "Closed"
elif self._device.is_open == False and self._device.is_closed == False:
attrs["current_state"] = "Middle"
attrs["lift_movement"] = self._device.lift_movement
# Positions
attrs["current_lift"] = self._device.current_lift
attrs["manual_lift"] = self._device.manual_lift
attrs["target_lift"] = self._device.target_lift
attrs["scheduled_lift"] = self._device.scheduled_lift
# Schedule
attrs["schedule_id"] = self._device.schedule_id
if self._device.schedule:
attrs["schedule_name"] = self._device.schedule.name
attrs["next_day_change"] = str(self._device.schedule.next.day)
attrs["next_schedule_change"] = str(self._device.schedule.next.time)
attrs["next_schedule_datetime"] = str(self._device.schedule.next.datetime)
attrs["next_schedule_state"] = self._device.schedule.next.setting
return attrs
async def async_set_cover_position(self, **kwargs):
"""Move the cover to a specific position."""
position = kwargs[ATTR_POSITION]
_LOGGER.debug(f"Setting cover position for {self.name} to {position}")
await self._device.open(position)
await self.async_force_update()
async def async_close_cover(self, **kwargs):
"""Close shutter"""
_LOGGER.debug(f"Closing {self.name}")
await self._device.close()
await self.async_force_update()
async def async_open_cover(self, **kwargs):
"""Close shutter"""
_LOGGER.debug(f"Opening {self.name}")
await self._device.open()
await self.async_force_update()
async def async_stop_cover(self, **kwargs):
"""Stop shutter"""
_LOGGER.debug(f"Stopping {self.name}")
await self._device.stop()
await self.async_force_update()
|
f993880501479c9c367ca3ce21217ddb0daa1fb5
|
10ddfb2d43a8ec5d47ce35dc0b8acf4fd58dea94
|
/Python/modify-graph-edge-weights.py
|
65dbebcc05c1d765e9e5c430cf7e287c3d6cc86c
|
[
"MIT"
] |
permissive
|
kamyu104/LeetCode-Solutions
|
f54822059405ef4df737d2e9898b024f051fd525
|
4dc4e6642dc92f1983c13564cc0fd99917cab358
|
refs/heads/master
| 2023-09-02T13:48:26.830566
| 2023-08-28T10:11:12
| 2023-08-28T10:11:12
| 152,631,182
| 4,549
| 1,651
|
MIT
| 2023-05-31T06:10:33
| 2018-10-11T17:38:35
|
C++
|
UTF-8
|
Python
| false
| false
| 1,621
|
py
|
modify-graph-edge-weights.py
|
# Time: O((|E| + |V|) * log|V|) = O(|E| * log|V|) by using binary heap,
# if we can further to use Fibonacci heap, it would be O(|E| + |V| * log|V|)
# Space: O(|E| + |V|) = O(|E|)
import heapq
# dijkstra's algorithm
class Solution(object):
def modifiedGraphEdges(self, n, edges, source, destination, target):
"""
:type n: int
:type edges: List[List[int]]
:type source: int
:type destination: int
:type target: int
:rtype: List[List[int]]
"""
def dijkstra(start, x):
best = [target+1]*len(adj)
best[start] = 0
min_heap = [(0, start)]
while min_heap:
curr, u = heapq.heappop(min_heap)
if curr > best[u]:
continue
for v, w in adj[u]:
if w == -1:
w = x
if curr+w >= best[v]:
continue
best[v] = curr+w
heapq.heappush(min_heap, (best[v], v))
return best
adj = [[] for _ in xrange(n)]
for u, v, w in edges:
adj[u].append((v, w))
adj[v].append((u, w))
left = dijkstra(source, 1)
if not (left[destination] <= target):
return []
right= dijkstra(destination, target+1)
if not (right[source] >= target):
return []
for e in edges:
if e[2] == -1:
e[2] = max(target-left[e[0]]-right[e[1]], target-left[e[1]]-right[e[0]], 1)
return edges
|
cfcfcd6215ed85af010f90c81fe5f5e5a073dea2
|
3395a234e7c80d011607e79c49cd48bf516f256b
|
/dependencies/jedi/third_party/typeshed/third_party/2and3/flask/globals.pyi
|
8ce1caf1653afc2c2201d185692783813bb455d1
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
srusskih/SublimeJEDI
|
67329b72e184bc9584843968dcc534a002c797a1
|
95c185d778425c04536d53517b0e3fe6dedf8e59
|
refs/heads/master
| 2023-08-24T11:30:37.801834
| 2022-08-30T09:04:17
| 2022-08-30T09:04:17
| 6,241,108
| 669
| 125
|
MIT
| 2022-08-30T09:04:18
| 2012-10-16T08:23:57
|
Python
|
UTF-8
|
Python
| false
| false
| 438
|
pyi
|
globals.pyi
|
# Stubs for flask.globals (Python 3.6)
#
# NOTE: This dynamically typed stub was automatically generated by stubgen.
from .app import Flask
from .wrappers import Request
from typing import Any
from werkzeug.local import LocalStack
class _FlaskLocalProxy(Flask):
def _get_current_object(self) -> Flask: ...
_request_ctx_stack: LocalStack
_app_ctx_stack: LocalStack
current_app: _FlaskLocalProxy
request: Request
session: Any
g: Any
|
d241d3599b300a7fd71322d2a7713437eca3cf09
|
975b2d421d3661e6770b601929d5f11d981d8985
|
/msgraph/generated/models/android_minimum_operating_system.py
|
824a232c8abbbbae851e0be7e6a4f04879d5412c
|
[
"MIT"
] |
permissive
|
microsoftgraph/msgraph-sdk-python
|
a7c551b85daadeebf76ec4ae12668664ea639b42
|
27de7ccbe688d7614b2f6bde0fdbcda4bc5cc949
|
refs/heads/main
| 2023-09-03T21:45:27.989672
| 2023-08-31T06:22:18
| 2023-08-31T06:22:18
| 534,665,999
| 135
| 18
|
MIT
| 2023-09-14T11:04:11
| 2022-09-09T14:00:17
|
Python
|
UTF-8
|
Python
| false
| false
| 7,402
|
py
|
android_minimum_operating_system.py
|
from __future__ import annotations
from dataclasses import dataclass, field
from kiota_abstractions.serialization import AdditionalDataHolder, Parsable, ParseNode, SerializationWriter
from kiota_abstractions.store import BackedModel, BackingStore, BackingStoreFactorySingleton
from typing import Any, Callable, Dict, List, Optional, TYPE_CHECKING, Union
@dataclass
class AndroidMinimumOperatingSystem(AdditionalDataHolder, BackedModel, Parsable):
"""
Contains properties for the minimum operating system required for an Android mobile app.
"""
# Stores model information.
backing_store: BackingStore = field(default_factory=BackingStoreFactorySingleton(backing_store_factory=None).backing_store_factory.create_backing_store, repr=False)
# Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
additional_data: Dict[str, Any] = field(default_factory=dict)
# The OdataType property
odata_type: Optional[str] = None
# When TRUE, only Version 10.0 or later is supported. Default value is FALSE. Exactly one of the minimum operating system boolean values will be TRUE.
v10_0: Optional[bool] = None
# When TRUE, only Version 11.0 or later is supported. Default value is FALSE. Exactly one of the minimum operating system boolean values will be TRUE.
v11_0: Optional[bool] = None
# When TRUE, only Version 4.0 or later is supported. Default value is FALSE. Exactly one of the minimum operating system boolean values will be TRUE.
v4_0: Optional[bool] = None
# When TRUE, only Version 4.0.3 or later is supported. Default value is FALSE. Exactly one of the minimum operating system boolean values will be TRUE.
v4_0_3: Optional[bool] = None
# When TRUE, only Version 4.1 or later is supported. Default value is FALSE. Exactly one of the minimum operating system boolean values will be TRUE.
v4_1: Optional[bool] = None
# When TRUE, only Version 4.2 or later is supported. Default value is FALSE. Exactly one of the minimum operating system boolean values will be TRUE.
v4_2: Optional[bool] = None
# When TRUE, only Version 4.3 or later is supported. Default value is FALSE. Exactly one of the minimum operating system boolean values will be TRUE.
v4_3: Optional[bool] = None
# When TRUE, only Version 4.4 or later is supported. Default value is FALSE. Exactly one of the minimum operating system boolean values will be TRUE.
v4_4: Optional[bool] = None
# When TRUE, only Version 5.0 or later is supported. Default value is FALSE. Exactly one of the minimum operating system boolean values will be TRUE.
v5_0: Optional[bool] = None
# When TRUE, only Version 5.1 or later is supported. Default value is FALSE. Exactly one of the minimum operating system boolean values will be TRUE.
v5_1: Optional[bool] = None
# When TRUE, only Version 6.0 or later is supported. Default value is FALSE. Exactly one of the minimum operating system boolean values will be TRUE.
v6_0: Optional[bool] = None
# When TRUE, only Version 7.0 or later is supported. Default value is FALSE. Exactly one of the minimum operating system boolean values will be TRUE.
v7_0: Optional[bool] = None
# When TRUE, only Version 7.1 or later is supported. Default value is FALSE. Exactly one of the minimum operating system boolean values will be TRUE.
v7_1: Optional[bool] = None
# When TRUE, only Version 8.0 or later is supported. Default value is FALSE. Exactly one of the minimum operating system boolean values will be TRUE.
v8_0: Optional[bool] = None
# When TRUE, only Version 8.1 or later is supported. Default value is FALSE. Exactly one of the minimum operating system boolean values will be TRUE.
v8_1: Optional[bool] = None
# When TRUE, only Version 9.0 or later is supported. Default value is FALSE. Exactly one of the minimum operating system boolean values will be TRUE.
v9_0: Optional[bool] = None
@staticmethod
def create_from_discriminator_value(parse_node: Optional[ParseNode] = None) -> AndroidMinimumOperatingSystem:
"""
Creates a new instance of the appropriate class based on discriminator value
Args:
parse_node: The parse node to use to read the discriminator value and create the object
Returns: AndroidMinimumOperatingSystem
"""
if not parse_node:
raise TypeError("parse_node cannot be null.")
return AndroidMinimumOperatingSystem()
def get_field_deserializers(self,) -> Dict[str, Callable[[ParseNode], None]]:
"""
The deserialization information for the current model
Returns: Dict[str, Callable[[ParseNode], None]]
"""
fields: Dict[str, Callable[[Any], None]] = {
"@odata.type": lambda n : setattr(self, 'odata_type', n.get_str_value()),
"v10_0": lambda n : setattr(self, 'v10_0', n.get_bool_value()),
"v11_0": lambda n : setattr(self, 'v11_0', n.get_bool_value()),
"v4_0": lambda n : setattr(self, 'v4_0', n.get_bool_value()),
"v4_0_3": lambda n : setattr(self, 'v4_0_3', n.get_bool_value()),
"v4_1": lambda n : setattr(self, 'v4_1', n.get_bool_value()),
"v4_2": lambda n : setattr(self, 'v4_2', n.get_bool_value()),
"v4_3": lambda n : setattr(self, 'v4_3', n.get_bool_value()),
"v4_4": lambda n : setattr(self, 'v4_4', n.get_bool_value()),
"v5_0": lambda n : setattr(self, 'v5_0', n.get_bool_value()),
"v5_1": lambda n : setattr(self, 'v5_1', n.get_bool_value()),
"v6_0": lambda n : setattr(self, 'v6_0', n.get_bool_value()),
"v7_0": lambda n : setattr(self, 'v7_0', n.get_bool_value()),
"v7_1": lambda n : setattr(self, 'v7_1', n.get_bool_value()),
"v8_0": lambda n : setattr(self, 'v8_0', n.get_bool_value()),
"v8_1": lambda n : setattr(self, 'v8_1', n.get_bool_value()),
"v9_0": lambda n : setattr(self, 'v9_0', n.get_bool_value()),
}
return fields
def serialize(self,writer: SerializationWriter) -> None:
"""
Serializes information the current object
Args:
writer: Serialization writer to use to serialize this model
"""
if not writer:
raise TypeError("writer cannot be null.")
writer.write_str_value("@odata.type", self.odata_type)
writer.write_bool_value("v10_0", self.v10_0)
writer.write_bool_value("v11_0", self.v11_0)
writer.write_bool_value("v4_0", self.v4_0)
writer.write_bool_value("v4_0_3", self.v4_0_3)
writer.write_bool_value("v4_1", self.v4_1)
writer.write_bool_value("v4_2", self.v4_2)
writer.write_bool_value("v4_3", self.v4_3)
writer.write_bool_value("v4_4", self.v4_4)
writer.write_bool_value("v5_0", self.v5_0)
writer.write_bool_value("v5_1", self.v5_1)
writer.write_bool_value("v6_0", self.v6_0)
writer.write_bool_value("v7_0", self.v7_0)
writer.write_bool_value("v7_1", self.v7_1)
writer.write_bool_value("v8_0", self.v8_0)
writer.write_bool_value("v8_1", self.v8_1)
writer.write_bool_value("v9_0", self.v9_0)
writer.write_additional_data_value(self.additional_data)
|
b98b3a45d969de68b67fd70463b39834a7ece7fb
|
a3d6556180e74af7b555f8d47d3fea55b94bcbda
|
/ash/host/DEPS
|
dba3bec9afa7c1c34af09c726785e9e47a84e13d
|
[
"BSD-3-Clause"
] |
permissive
|
chromium/chromium
|
aaa9eda10115b50b0616d2f1aed5ef35d1d779d6
|
a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c
|
refs/heads/main
| 2023-08-24T00:35:12.585945
| 2023-08-23T22:01:11
| 2023-08-23T22:01:11
| 120,360,765
| 17,408
| 7,102
|
BSD-3-Clause
| 2023-09-10T23:44:27
| 2018-02-05T20:55:32
| null |
UTF-8
|
Python
| false
| false
| 338
|
DEPS
|
include_rules = [
"-ash",
"+ash/host",
"+ash/ash_export.h",
"+ash/constants",
]
specific_include_rules = {
"ash_window_tree_host_platform_unittest.cc" : [
"+ash/test/ash_test_base.h",
],
"ash_window_tree_host_unified_unittest.cc" : [
"+ash/display/mirror_window_test_api.h",
"+ash/test/ash_test_base.h",
],
}
|
|
7acb95de82f532682b8cbaa69df5321bc8f63551
|
069c2295076c482afadfe6351da5ae02be8e18e6
|
/tests/generic_views/test_base.py
|
add485245a871b75a9a580664826c163f9996b51
|
[
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"GPL-1.0-or-later",
"Python-2.0.1",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-other-permissive",
"Python-2.0"
] |
permissive
|
django/django
|
5eb557f57053631cd4f566f451e43197309dbeeb
|
c74a6fad5475495756a5bdb18b2cab2b68d429bc
|
refs/heads/main
| 2023-09-01T03:43:44.033530
| 2023-08-31T08:27:32
| 2023-08-31T08:27:32
| 4,164,482
| 73,530
| 38,187
|
BSD-3-Clause
| 2023-09-14T20:03:48
| 2012-04-28T02:47:18
|
Python
|
UTF-8
|
Python
| false
| false
| 22,529
|
py
|
test_base.py
|
import time
from django.core.exceptions import ImproperlyConfigured
from django.http import HttpResponse
from django.test import RequestFactory, SimpleTestCase, override_settings
from django.test.utils import require_jinja2
from django.urls import resolve
from django.views.generic import RedirectView, TemplateView, View
from . import views
class SimpleView(View):
"""
A simple view with a docstring.
"""
def get(self, request):
return HttpResponse("This is a simple view")
class SimplePostView(SimpleView):
post = SimpleView.get
class PostOnlyView(View):
def post(self, request):
return HttpResponse("This view only accepts POST")
class CustomizableView(SimpleView):
parameter = {}
def decorator(view):
view.is_decorated = True
return view
class DecoratedDispatchView(SimpleView):
@decorator
def dispatch(self, request, *args, **kwargs):
return super().dispatch(request, *args, **kwargs)
class AboutTemplateView(TemplateView):
def get(self, request):
return self.render_to_response({})
def get_template_names(self):
return ["generic_views/about.html"]
class AboutTemplateAttributeView(TemplateView):
template_name = "generic_views/about.html"
def get(self, request):
return self.render_to_response(context={})
class InstanceView(View):
def get(self, request):
return self
class ViewTest(SimpleTestCase):
rf = RequestFactory()
def _assert_simple(self, response):
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b"This is a simple view")
def test_no_init_kwargs(self):
"""
A view can't be accidentally instantiated before deployment
"""
msg = "This method is available only on the class, not on instances."
with self.assertRaisesMessage(AttributeError, msg):
SimpleView(key="value").as_view()
def test_no_init_args(self):
"""
A view can't be accidentally instantiated before deployment
"""
msg = "as_view() takes 1 positional argument but 2 were given"
with self.assertRaisesMessage(TypeError, msg):
SimpleView.as_view("value")
def test_pathological_http_method(self):
"""
The edge case of an HTTP request that spoofs an existing method name is
caught.
"""
self.assertEqual(
SimpleView.as_view()(
self.rf.get("/", REQUEST_METHOD="DISPATCH")
).status_code,
405,
)
def test_get_only(self):
"""
Test a view which only allows GET doesn't allow other methods.
"""
self._assert_simple(SimpleView.as_view()(self.rf.get("/")))
self.assertEqual(SimpleView.as_view()(self.rf.post("/")).status_code, 405)
self.assertEqual(
SimpleView.as_view()(self.rf.get("/", REQUEST_METHOD="FAKE")).status_code,
405,
)
def test_get_and_head(self):
"""
Test a view which supplies a GET method also responds correctly to HEAD.
"""
self._assert_simple(SimpleView.as_view()(self.rf.get("/")))
response = SimpleView.as_view()(self.rf.head("/"))
self.assertEqual(response.status_code, 200)
def test_setup_get_and_head(self):
view_instance = SimpleView()
self.assertFalse(hasattr(view_instance, "head"))
view_instance.setup(self.rf.get("/"))
self.assertTrue(hasattr(view_instance, "head"))
self.assertEqual(view_instance.head, view_instance.get)
def test_head_no_get(self):
"""
Test a view which supplies no GET method responds to HEAD with HTTP 405.
"""
response = PostOnlyView.as_view()(self.rf.head("/"))
self.assertEqual(response.status_code, 405)
def test_get_and_post(self):
"""
Test a view which only allows both GET and POST.
"""
self._assert_simple(SimplePostView.as_view()(self.rf.get("/")))
self._assert_simple(SimplePostView.as_view()(self.rf.post("/")))
self.assertEqual(
SimplePostView.as_view()(
self.rf.get("/", REQUEST_METHOD="FAKE")
).status_code,
405,
)
def test_invalid_keyword_argument(self):
"""
View arguments must be predefined on the class and can't
be named like an HTTP method.
"""
msg = (
"The method name %s is not accepted as a keyword argument to "
"SimpleView()."
)
# Check each of the allowed method names
for method in SimpleView.http_method_names:
with self.assertRaisesMessage(TypeError, msg % method):
SimpleView.as_view(**{method: "value"})
# Check the case view argument is ok if predefined on the class...
CustomizableView.as_view(parameter="value")
# ...but raises errors otherwise.
msg = (
"CustomizableView() received an invalid keyword 'foobar'. "
"as_view only accepts arguments that are already attributes of "
"the class."
)
with self.assertRaisesMessage(TypeError, msg):
CustomizableView.as_view(foobar="value")
def test_calling_more_than_once(self):
"""
Test a view can only be called once.
"""
request = self.rf.get("/")
view = InstanceView.as_view()
self.assertNotEqual(view(request), view(request))
def test_class_attributes(self):
"""
The callable returned from as_view() has proper special attributes.
"""
cls = SimpleView
view = cls.as_view()
self.assertEqual(view.__doc__, cls.__doc__)
self.assertEqual(view.__name__, "view")
self.assertEqual(view.__module__, cls.__module__)
self.assertEqual(view.__qualname__, f"{cls.as_view.__qualname__}.<locals>.view")
self.assertEqual(view.__annotations__, cls.dispatch.__annotations__)
self.assertFalse(hasattr(view, "__wrapped__"))
def test_dispatch_decoration(self):
"""
Attributes set by decorators on the dispatch method
are also present on the closure.
"""
self.assertTrue(DecoratedDispatchView.as_view().is_decorated)
def test_options(self):
"""
Views respond to HTTP OPTIONS requests with an Allow header
appropriate for the methods implemented by the view class.
"""
request = self.rf.options("/")
view = SimpleView.as_view()
response = view(request)
self.assertEqual(200, response.status_code)
self.assertTrue(response.headers["Allow"])
def test_options_for_get_view(self):
"""
A view implementing GET allows GET and HEAD.
"""
request = self.rf.options("/")
view = SimpleView.as_view()
response = view(request)
self._assert_allows(response, "GET", "HEAD")
def test_options_for_get_and_post_view(self):
"""
A view implementing GET and POST allows GET, HEAD, and POST.
"""
request = self.rf.options("/")
view = SimplePostView.as_view()
response = view(request)
self._assert_allows(response, "GET", "HEAD", "POST")
def test_options_for_post_view(self):
"""
A view implementing POST allows POST.
"""
request = self.rf.options("/")
view = PostOnlyView.as_view()
response = view(request)
self._assert_allows(response, "POST")
def _assert_allows(self, response, *expected_methods):
"Assert allowed HTTP methods reported in the Allow response header"
response_allows = set(response.headers["Allow"].split(", "))
self.assertEqual(set(expected_methods + ("OPTIONS",)), response_allows)
def test_args_kwargs_request_on_self(self):
"""
Test a view only has args, kwargs & request once `as_view`
has been called.
"""
bare_view = InstanceView()
view = InstanceView.as_view()(self.rf.get("/"))
for attribute in ("args", "kwargs", "request"):
self.assertNotIn(attribute, dir(bare_view))
self.assertIn(attribute, dir(view))
def test_overridden_setup(self):
class SetAttributeMixin:
def setup(self, request, *args, **kwargs):
self.attr = True
super().setup(request, *args, **kwargs)
class CheckSetupView(SetAttributeMixin, SimpleView):
def dispatch(self, request, *args, **kwargs):
assert hasattr(self, "attr")
return super().dispatch(request, *args, **kwargs)
response = CheckSetupView.as_view()(self.rf.get("/"))
self.assertEqual(response.status_code, 200)
def test_not_calling_parent_setup_error(self):
class TestView(View):
def setup(self, request, *args, **kwargs):
pass # Not calling super().setup()
msg = (
"TestView instance has no 'request' attribute. Did you override "
"setup() and forget to call super()?"
)
with self.assertRaisesMessage(AttributeError, msg):
TestView.as_view()(self.rf.get("/"))
def test_setup_adds_args_kwargs_request(self):
request = self.rf.get("/")
args = ("arg 1", "arg 2")
kwargs = {"kwarg_1": 1, "kwarg_2": "year"}
view = View()
view.setup(request, *args, **kwargs)
self.assertEqual(request, view.request)
self.assertEqual(args, view.args)
self.assertEqual(kwargs, view.kwargs)
def test_direct_instantiation(self):
"""
It should be possible to use the view by directly instantiating it
without going through .as_view() (#21564).
"""
view = PostOnlyView()
response = view.dispatch(self.rf.head("/"))
self.assertEqual(response.status_code, 405)
@override_settings(ROOT_URLCONF="generic_views.urls")
class TemplateViewTest(SimpleTestCase):
rf = RequestFactory()
def _assert_about(self, response):
response.render()
self.assertContains(response, "<h1>About</h1>")
def test_get(self):
"""
Test a view that simply renders a template on GET
"""
self._assert_about(AboutTemplateView.as_view()(self.rf.get("/about/")))
def test_head(self):
"""
Test a TemplateView responds correctly to HEAD
"""
response = AboutTemplateView.as_view()(self.rf.head("/about/"))
self.assertEqual(response.status_code, 200)
def test_get_template_attribute(self):
"""
Test a view that renders a template on GET with the template name as
an attribute on the class.
"""
self._assert_about(AboutTemplateAttributeView.as_view()(self.rf.get("/about/")))
def test_get_generic_template(self):
"""
Test a completely generic view that renders a template on GET
with the template name as an argument at instantiation.
"""
self._assert_about(
TemplateView.as_view(template_name="generic_views/about.html")(
self.rf.get("/about/")
)
)
def test_template_name_required(self):
"""
A template view must provide a template name.
"""
msg = (
"TemplateResponseMixin requires either a definition of "
"'template_name' or an implementation of 'get_template_names()'"
)
with self.assertRaisesMessage(ImproperlyConfigured, msg):
self.client.get("/template/no_template/")
@require_jinja2
def test_template_engine(self):
"""
A template view may provide a template engine.
"""
request = self.rf.get("/using/")
view = TemplateView.as_view(template_name="generic_views/using.html")
self.assertEqual(view(request).render().content, b"DTL\n")
view = TemplateView.as_view(
template_name="generic_views/using.html", template_engine="django"
)
self.assertEqual(view(request).render().content, b"DTL\n")
view = TemplateView.as_view(
template_name="generic_views/using.html", template_engine="jinja2"
)
self.assertEqual(view(request).render().content, b"Jinja2\n")
def test_template_params(self):
"""
A generic template view passes kwargs as context.
"""
response = self.client.get("/template/simple/bar/")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context["foo"], "bar")
self.assertIsInstance(response.context["view"], View)
def test_extra_template_params(self):
"""
A template view can be customized to return extra context.
"""
response = self.client.get("/template/custom/bar/")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context["foo"], "bar")
self.assertEqual(response.context["key"], "value")
self.assertIsInstance(response.context["view"], View)
def test_cached_views(self):
"""
A template view can be cached
"""
response = self.client.get("/template/cached/bar/")
self.assertEqual(response.status_code, 200)
time.sleep(1.0)
response2 = self.client.get("/template/cached/bar/")
self.assertEqual(response2.status_code, 200)
self.assertEqual(response.content, response2.content)
time.sleep(2.0)
# Let the cache expire and test again
response2 = self.client.get("/template/cached/bar/")
self.assertEqual(response2.status_code, 200)
self.assertNotEqual(response.content, response2.content)
def test_content_type(self):
response = self.client.get("/template/content_type/")
self.assertEqual(response.headers["Content-Type"], "text/plain")
def test_resolve_view(self):
match = resolve("/template/content_type/")
self.assertIs(match.func.view_class, TemplateView)
self.assertEqual(match.func.view_initkwargs["content_type"], "text/plain")
def test_resolve_login_required_view(self):
match = resolve("/template/login_required/")
self.assertIs(match.func.view_class, TemplateView)
def test_extra_context(self):
response = self.client.get("/template/extra_context/")
self.assertEqual(response.context["title"], "Title")
@override_settings(ROOT_URLCONF="generic_views.urls")
class RedirectViewTest(SimpleTestCase):
rf = RequestFactory()
def test_no_url(self):
"Without any configuration, returns HTTP 410 GONE"
response = RedirectView.as_view()(self.rf.get("/foo/"))
self.assertEqual(response.status_code, 410)
def test_default_redirect(self):
"Default is a temporary redirect"
response = RedirectView.as_view(url="/bar/")(self.rf.get("/foo/"))
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, "/bar/")
def test_permanent_redirect(self):
"Permanent redirects are an option"
response = RedirectView.as_view(url="/bar/", permanent=True)(
self.rf.get("/foo/")
)
self.assertEqual(response.status_code, 301)
self.assertEqual(response.url, "/bar/")
def test_temporary_redirect(self):
"Temporary redirects are an option"
response = RedirectView.as_view(url="/bar/", permanent=False)(
self.rf.get("/foo/")
)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, "/bar/")
def test_include_args(self):
"GET arguments can be included in the redirected URL"
response = RedirectView.as_view(url="/bar/")(self.rf.get("/foo/"))
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, "/bar/")
response = RedirectView.as_view(url="/bar/", query_string=True)(
self.rf.get("/foo/?pork=spam")
)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, "/bar/?pork=spam")
def test_include_urlencoded_args(self):
"GET arguments can be URL-encoded when included in the redirected URL"
response = RedirectView.as_view(url="/bar/", query_string=True)(
self.rf.get("/foo/?unicode=%E2%9C%93")
)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, "/bar/?unicode=%E2%9C%93")
def test_parameter_substitution(self):
"Redirection URLs can be parameterized"
response = RedirectView.as_view(url="/bar/%(object_id)d/")(
self.rf.get("/foo/42/"), object_id=42
)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, "/bar/42/")
def test_named_url_pattern(self):
"Named pattern parameter should reverse to the matching pattern"
response = RedirectView.as_view(pattern_name="artist_detail")(
self.rf.get("/foo/"), pk=1
)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.headers["Location"], "/detail/artist/1/")
def test_named_url_pattern_using_args(self):
response = RedirectView.as_view(pattern_name="artist_detail")(
self.rf.get("/foo/"), 1
)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.headers["Location"], "/detail/artist/1/")
def test_redirect_POST(self):
"Default is a temporary redirect"
response = RedirectView.as_view(url="/bar/")(self.rf.post("/foo/"))
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, "/bar/")
def test_redirect_HEAD(self):
"Default is a temporary redirect"
response = RedirectView.as_view(url="/bar/")(self.rf.head("/foo/"))
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, "/bar/")
def test_redirect_OPTIONS(self):
"Default is a temporary redirect"
response = RedirectView.as_view(url="/bar/")(self.rf.options("/foo/"))
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, "/bar/")
def test_redirect_PUT(self):
"Default is a temporary redirect"
response = RedirectView.as_view(url="/bar/")(self.rf.put("/foo/"))
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, "/bar/")
def test_redirect_PATCH(self):
"Default is a temporary redirect"
response = RedirectView.as_view(url="/bar/")(self.rf.patch("/foo/"))
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, "/bar/")
def test_redirect_DELETE(self):
"Default is a temporary redirect"
response = RedirectView.as_view(url="/bar/")(self.rf.delete("/foo/"))
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, "/bar/")
def test_redirect_when_meta_contains_no_query_string(self):
"regression for #16705"
# we can't use self.rf.get because it always sets QUERY_STRING
response = RedirectView.as_view(url="/bar/")(self.rf.request(PATH_INFO="/foo/"))
self.assertEqual(response.status_code, 302)
def test_direct_instantiation(self):
"""
It should be possible to use the view without going through .as_view()
(#21564).
"""
view = RedirectView()
response = view.dispatch(self.rf.head("/foo/"))
self.assertEqual(response.status_code, 410)
class GetContextDataTest(SimpleTestCase):
def test_get_context_data_super(self):
test_view = views.CustomContextView()
context = test_view.get_context_data(kwarg_test="kwarg_value")
# the test_name key is inserted by the test classes parent
self.assertIn("test_name", context)
self.assertEqual(context["kwarg_test"], "kwarg_value")
self.assertEqual(context["custom_key"], "custom_value")
# test that kwarg overrides values assigned higher up
context = test_view.get_context_data(test_name="test_value")
self.assertEqual(context["test_name"], "test_value")
def test_object_at_custom_name_in_context_data(self):
# Checks 'pony' key presence in dict returned by get_context_date
test_view = views.CustomSingleObjectView()
test_view.context_object_name = "pony"
context = test_view.get_context_data()
self.assertEqual(context["pony"], test_view.object)
def test_object_in_get_context_data(self):
# Checks 'object' key presence in dict returned by get_context_date #20234
test_view = views.CustomSingleObjectView()
context = test_view.get_context_data()
self.assertEqual(context["object"], test_view.object)
class UseMultipleObjectMixinTest(SimpleTestCase):
rf = RequestFactory()
def test_use_queryset_from_view(self):
test_view = views.CustomMultipleObjectMixinView()
test_view.get(self.rf.get("/"))
# Don't pass queryset as argument
context = test_view.get_context_data()
self.assertEqual(context["object_list"], test_view.queryset)
def test_overwrite_queryset(self):
test_view = views.CustomMultipleObjectMixinView()
test_view.get(self.rf.get("/"))
queryset = [{"name": "Lennon"}, {"name": "Ono"}]
self.assertNotEqual(test_view.queryset, queryset)
# Overwrite the view's queryset with queryset from kwarg
context = test_view.get_context_data(object_list=queryset)
self.assertEqual(context["object_list"], queryset)
class SingleObjectTemplateResponseMixinTest(SimpleTestCase):
def test_template_mixin_without_template(self):
"""
We want to makes sure that if you use a template mixin, but forget the
template, it still tells you it's ImproperlyConfigured instead of
TemplateDoesNotExist.
"""
view = views.TemplateResponseWithoutTemplate()
msg = (
"TemplateResponseMixin requires either a definition of "
"'template_name' or an implementation of 'get_template_names()'"
)
with self.assertRaisesMessage(ImproperlyConfigured, msg):
view.get_template_names()
|
3ea9345ecee6a0405d8b88f4e4a9068f159aee75
|
61b95ee2aefbcfbd6c4abf9511d976d0b9d0e100
|
/faker/providers/lorem/pl_PL/__init__.py
|
41dd8865afa77a7487f30db95d0a40e81e3801f8
|
[
"MIT"
] |
permissive
|
joke2k/faker
|
fed7472580ced2bce326fe4ea0c3d1c810853d5e
|
33e36b1b6cc9c6f039fe387988853771bab60624
|
refs/heads/master
| 2023-09-04T00:43:33.599705
| 2023-08-31T16:15:04
| 2023-08-31T16:15:04
| 6,662,075
| 14,544
| 2,215
|
MIT
| 2023-09-11T16:06:14
| 2012-11-12T23:00:09
|
Python
|
UTF-8
|
Python
| false
| false
| 38,494
|
py
|
__init__.py
|
from typing import Dict
from .. import Provider as LoremProvider
class Provider(LoremProvider):
"""Implement lorem provider for ``pl_PL`` locale.
Source: https://pl.wiktionary.org/wiki/Indeks%3APolski_-_Najpopularniejsze_s%C5%82owa_1-2000
"""
word_list = (
"w",
"z",
"być",
"na",
"i",
"do",
"nie",
"który",
"lub",
"to",
"się",
"o",
"mieć",
"coś",
"ten",
"dotyczyć",
"on",
"od",
"co",
"język",
"po",
"że",
"ktoś",
"przez",
"osoba",
"miasto",
"jeden",
"jak",
"za",
"ja",
"rok",
"a",
"bardzo",
"swój",
"dla",
"taki",
"człowiek",
"cecha",
"kobieta",
"mój",
"część",
"związany",
"móc",
"dwa",
"ona",
"związać",
"ze",
"mały",
"jakiś",
"miejsce",
"inny",
"duży",
"bez",
"czas",
"ale",
"czy",
"jako",
"sposób",
"rodzaj",
"Polska",
"rodzina",
"tylko",
"mieszkaniec",
"dzień",
"praca",
"przed",
"dom",
"dziecko",
"ty",
"pod",
"tak",
"woda",
"np.",
"już",
"rzeka",
"zostać",
"dobry",
"życie",
"państwo",
"mówić",
"pierwszy",
"nasz",
"cały",
"nad",
"wiele",
"zwierzę",
"przy",
"roślina",
"ta",
"u",
"jego",
"gatunek",
"nowy",
"chcieć",
"sobie",
"wielki",
"często",
"trzy",
"kolor",
"używać",
"musieć",
"kraj",
"robić",
"strona",
"każdy",
"wysoki",
"nazwa",
"mężczyzna",
"grupa",
"my",
"stary",
"sam",
"stan",
"drugi",
"zrobić",
"iść",
"oraz",
"polski",
"litera",
"kto",
"prawo",
"drzewo",
"ptak",
"książka",
"świat",
"samochód",
"rzecz",
"stolica",
"między",
"droga",
"należeć",
"mieszkanka",
"słowo",
"gdy",
"głowa",
"pies",
"młody",
"symbol",
"oni",
"bo",
"ziemia",
"aby",
"owoc",
"liczba",
"wiek",
"nie-",
"kilka",
"zły",
"środek",
"znajdować się",
"raz",
"dobrze",
"pan",
"kiedy",
"okres",
"pochodzić",
"ojciec",
"długi",
"ręka",
"itp.",
"odnosić się",
"dużo",
"podczas",
"biały",
"albo",
"ruch",
"jaki",
"przedmiot",
"służyć",
"matka",
"we",
"znak",
"ci",
"siebie",
"liczba atomowa",
"jeszcze",
"niż",
"cztery",
"wszystko",
"widzieć",
"żona",
"koń",
"szkoła",
"ciało",
"stać",
"kupić",
"zawsze",
"forma",
"sprawa",
"Rosja",
"wieś",
"góra",
"wyspa",
"oko",
"działanie",
"twój",
"występować",
"koniec",
"rząd",
"pięć",
"pokój",
"nauka",
"gdzie",
"kwiat",
"choroba",
"zwykle",
"powiedzieć",
"mieszkać",
"wiedzieć",
"imię",
"prowadzić",
"element",
"dać",
"godzina",
"żyć",
"ryba",
"wszyscy",
"zawierać",
"pracować",
"by",
"alfabet",
"członek",
"syn",
"jednostka",
"herb",
"brat",
"las",
"urządzenie",
"miesiąc",
"dziewczyna",
"obszar",
"grać",
"różny",
"teren",
"piękny",
"jeść",
"nic",
"brak",
"żeby",
"lubić",
"dany",
"budynek",
"położyć",
"czerwony",
"cel",
"stopień",
"siła",
"światło",
"leżeć",
"dawać",
"gra",
"sztuka",
"czarny",
"one",
"jej",
"wino",
"chodzić",
"statek",
"krótki",
"śmierć",
"wartość",
"dźwięk",
"sytuacja",
"teraz",
"główny",
"zajmować się",
"wykonywać",
"związek",
"ważny",
"ostatni",
"1000",
"tam",
"noc",
"dziś",
"pierwiastek chemiczny",
"wojna",
"noga",
"sklep",
"skóra",
"pani",
"własny",
"materiał",
"niektóry",
"tworzyć",
"system",
"znany",
"także",
"wykonać",
"niebo",
"święty",
"władza",
"wczoraj",
"film",
"twarz",
"flaga",
"morze",
"nawet",
"mięso",
"głos",
"Europa",
"?",
"pieniądz",
"powierzchnia",
"proces",
"tydzień",
"posiadać",
"ilość",
"obwód",
"działać",
"północny",
"region",
"jeśli",
"trwać",
"szybko",
"Bóg",
"silny",
"!",
"lecz",
"zielony",
"określony",
"król",
"pole",
"przyjaciel",
"1",
"dwadzieścia",
"serce",
"sześć",
"słońce",
"pisać",
"kot",
"drzwi",
"znać",
"początek",
"tysiąc",
"mleko",
"południowy",
"obraz",
"nosić",
"wiatr",
"niski",
"tekst",
"pić",
"zmiana",
"dawny",
"ulica",
"kierunek",
"linia",
"jechać",
"wyraz",
"stanowić",
"charakterystyczny",
"składać się",
"tu",
"uważać",
"siedem",
"miłość",
"podobny",
"więc",
"żołnierz",
"siostra",
"córka",
"też",
"chleb",
"zacząć",
"koło",
"granica",
"powietrze",
"pewien",
"włos",
"charakter",
"punkt",
"dzisiaj",
"ludzie",
"mało",
"liść",
"(…)",
"znaleźć",
"kościół",
"badanie",
"niewielki",
"wziąć",
"prosty",
"krew",
"mąż",
"–",
"wolny",
"kawa",
"problem",
"pójść",
"powodować",
"czyjś",
"drewno",
"kształt",
"stać się",
"właściwy",
"trzeci",
"znaczenie",
"brzeg",
"historia",
"ich",
"zasada",
"brać",
"dziesięć",
"powinien",
"żaden",
"jezioro",
"okno",
"kultura",
"niemiecki",
"ostry",
"but",
"stosować",
"ogień",
"nigdy",
"zbiór",
"samolot",
"ból",
"osiem",
"można",
"gwiazda",
"walka",
"Ukraina",
"prawdziwy",
"ciężki",
"zespół",
"drogi",
"pracownik",
"Francja",
"myśleć",
"zachowanie",
"polegać",
"uwaga",
"pomoc",
"przypominać",
"grecki",
"Niemcy",
"ząb",
"ile",
"informacja",
"chwila",
"deszcz",
"istnieć",
"nauczyciel",
"żółty",
"chory",
"piwo",
"według",
"dostać",
"uczeń",
"jedzenie",
"śnieg",
"jednak",
"również",
"ani",
"zwłaszcza",
"utwór",
"czysty",
"firma",
"siedzieć",
"francuski",
"łączyć",
"południe",
"zbyt",
"trudny",
"urząd",
"stół",
"lekarz",
"muzyka",
"czynność",
"układ okresowy",
"pociąg",
"jasny",
"klasa",
"męski",
"kamień",
"pierwiastek",
"ubranie",
"ściana",
"postać",
"pełny",
"organizm",
"5",
"księżyc",
"gmina",
"rosnąć",
"w celu",
"wydawać",
"źródło",
"funkcja",
"położenie",
"typ",
"starożytny",
"jutro",
"dziewięć",
"trzeba",
"społeczny",
"prawy",
"program",
"pojazd",
"może",
"historyczny",
"2",
"substancja",
"wszystkie",
"piec",
"układ",
"bóg",
"polityczny",
"chłopiec",
"cena",
"słaby",
"głupi",
"ludzki",
"trzymać",
"zupa",
"około",
"mieszkanie",
"zdanie",
"naczynie",
"uprawiać",
"północ",
"kraina",
"numer",
"para",
"dokument",
"uczucie",
"prawda",
"złoty",
"za pomocą",
"elektryczny",
"dziedzina",
"zachodni",
"alkohol",
"trochę",
"prowincja",
"prosić",
"list",
"bliski",
"komputer",
"towar",
"szybki",
"spać",
"niebieski",
"aż",
"przypadek",
"organizacja",
"herbata",
"szeroki",
"kawałek",
"czytać",
"obejmować",
"wojskowy",
"narzędzie",
"przyjść",
"myśl",
"ogród",
"Włochy",
"całość",
"wieczór",
"lód",
"wiedza",
"powiat",
"połowa",
"angielski",
"głównie",
"zjawisko",
"chłopak",
"wpływ",
"mowa",
"naturalny",
"morski",
"produkt",
"lewy",
"prawie",
"lek",
"miejscowość",
"napój",
"wschodni",
"księga",
"stopa",
"drobny",
"ciasto",
"kuchnia",
"plan",
"powstać",
"pełen",
"wokół",
"kochać",
"palec",
"zobaczyć",
"poprzez",
"maszyna",
"dziadek",
"wielkość",
"nos",
"złoto",
"pewny",
"partia",
"większość",
"obiekt",
"publiczny",
"pismo",
"wybitny",
"wszystek",
"błąd",
"broń",
"sen",
"trzydzieści",
"gruby",
"spotkanie",
"tkanina",
"smak",
"gość",
"potrawa",
"pytanie",
"produkcja",
"wy",
"razem",
"obywatel",
"jajko",
"3",
"zima",
"nazywać",
"policja",
"nikt",
"słodki",
"dopływ",
"butelka",
"energia",
"składać",
"łóżko",
"urodzenie",
"zdrowie",
"odmiana",
"zdjęcie",
"mocny",
"poza",
"4",
"lekki",
"czynić",
"przeciwny",
"duch",
"sąd",
"przeznaczyć",
"zapach",
"stały",
"Afryka",
"styl",
"karta",
"wypadek",
"babcia",
"wojsko",
"wodny",
"równy",
"rola",
"rejon",
"wybrzeże",
"naród",
"wiadomość",
"kość",
"tytuł",
"cukier",
"barwa",
"żywy",
"szczyt",
"rozwój",
"sieć",
"30",
"ponad",
"lato",
"warstwa",
"jabłko",
"wyrażać",
"bogaty",
"odbywać się",
"podstawowy",
"cześć",
"z powodu",
"ponieważ",
"wyjść",
"poziom",
"wyglądać",
"śpiewać",
"oznaczać",
"rozmowa",
"ciemny",
"papier",
"900",
"palić",
"lud",
"długość",
"usta",
"ucho",
"urodzić",
"wewnątrz",
"wśród",
"przedstawiciel",
"środkowy",
"obok",
"dzieło",
"arabski",
"krowa",
"taniec",
"rano",
"grzyb",
"długo",
"wydarzenie",
"pięćdziesiąt",
"włoski",
"słuchać",
"ser",
"właśnie",
"stanowisko",
"odpowiedni",
"korona",
"rower",
"święto",
"czekać",
"szukać",
"100",
"religia",
"piłka",
"opinia",
"wynik",
"pozycja",
"pochodzenie",
"metoda",
"ciepły",
"potem",
"udział",
"Hiszpania",
"rozumieć",
"6",
"wspólny",
"środowisko",
"całkowicie",
"budowa",
"ramię",
"gazeta",
"zabawa",
"nie ma",
"szczęście",
"pomieszczenie",
"strach",
"fala",
"patrzeć",
"odcień",
"temperatura",
"warunek",
"zdolność",
"sól",
"rosyjski",
"podróż",
"wykorzystywać",
"Ziemia",
"religijny",
"centrum",
"zbierać",
"zupełnie",
"przestrzeń",
"pas",
"połączenie",
"wobec",
"stawać się",
"potrzeba",
"narodowy",
"liczyć",
"otwarty",
"wejść",
"pozbawić",
"masa",
"głęboki",
"ono",
"wywoływać",
"zachód",
"wschód",
"powód",
"Azja",
"administracyjny",
"temat",
"odpowiadać",
"szpital",
"zajmować",
"czterdzieści",
"sto",
"sobą",
"pogląd",
"chronić",
"wysokość",
"słownik",
"rodzic",
"świnia",
"zaczynać",
"moneta",
"możliwość",
"mama",
"gdzieś",
"egzamin",
"pogoda",
"chemiczny",
"gorący",
"zadanie",
"więzienie",
"zakład",
"ofiara",
"obiad",
"wąski",
"zamek",
"moc",
"stosunek",
"natura",
"8",
"zazwyczaj",
"założyć",
"skrzydło",
"otrzymać",
"oficjalny",
"chmura",
"ten sam",
"złożyć",
"wewnętrzny",
"wspaniały",
"przyczyna",
"miły",
"dziki",
"kara",
"listopad",
"komórka",
"instytucja",
"skała",
"ogromny",
"wygląd",
"sześćdziesiąt",
"możliwy",
"wąż",
"umrzeć",
"określać",
"amerykański",
"płynąć",
"walczyć",
"nóż",
"nagle",
"instrument",
"20",
"rynek",
"Grecja",
"umowa",
"niedziela",
"szczęśliwy",
"tutaj",
"zmieniać",
"węgiel",
"sylaba",
"Warszawa",
"ładny",
"europejski",
"czwarty",
"styczeń",
"hiszpański",
"posługiwać się",
"papieros",
"fizyczny",
"dach",
"zimny",
"ogon",
"trawa",
"telefon",
"płyn",
"przedstawiać",
"metal",
"dlaczego",
"próbować",
"10",
"7",
"sportowy",
"oddział",
"obecnie",
"9",
"miara",
"prezydent",
"pierś",
"rodowity",
"stworzyć",
"dział",
"dusza",
"wierzyć",
"domowy",
"właściciel",
"wyrób",
"autobus",
"ponownie",
"gaz",
"właściwość",
"rada",
"rzymski",
"bieg",
"zgoda",
"obowiązek",
"owca",
"zamieszkiwać",
"przyjąć",
"muzyczny",
"przyrząd",
"piąty",
"szczególnie",
"kupować",
"istota",
"stracić",
"artykuł",
"ochrona",
"te",
"napisać",
"specjalista",
"ku",
"górski",
"należy",
"określenie",
"pomiędzy",
"Rzym",
"ssak",
"zwolennik",
"odpowiedź",
"działalność",
"miejski",
"wcześnie",
"zdobyć",
"górny",
"uniwersytet",
"bić",
"wymagać",
"miękki",
"źle",
"40",
"państwowy",
"ludność",
"minuta",
"cierpieć",
"ogół",
"naprawdę",
"blisko",
"surowy",
"dodatek",
"radość",
"akcja",
"w kształcie",
"polityka",
"obcy",
"ziemniak",
"podstawa",
"przemysł",
"udać się",
"brzuch",
"suchy",
"krzew",
"terytorium",
"wolność",
"czyli",
"klucz",
"Jan",
"kolejny",
"uczyć się",
"postępowanie",
"sok",
"50",
"łatwo",
"jeździć",
"decyzja",
"naukowy",
"szanowny",
"warzywo",
"nadzieja",
"wrzesień",
"kierować",
"student",
"kąt",
"seksualny",
"piasek",
"drewniany",
"obchodzić",
"wróg",
"przeciwko",
"żeński",
"potrafić",
"pamięć",
"teatr",
"dwudziesty",
"znowu",
"potrzebować",
"owad",
"cienki",
"ziarno",
"moment",
"wiosna",
"wydać",
"literatura",
"tradycyjny",
"leczenie",
"poważny",
"siedemdziesiąt",
"silnik",
"spokój",
"luty",
"biedny",
"czuć",
"drużyna",
"dialekt",
"dzięki",
"grudzień",
"jedyny",
"pragnienie",
"siedziba",
"służba",
"wiara",
"pióro",
"wzrost",
"proszę",
"osiemdziesiąt",
"społeczeństwo",
"dokładnie",
"przykład",
"szacunek",
"marzec",
"róg",
"połączyć",
"uderzenie",
"zwyczaj",
"podawać",
"mocno",
"zwykły",
"kolega",
"międzynarodowy",
"sala",
"nadawać",
"tamten",
"szósty",
"lekcja",
"pomagać",
"republika",
"zjeść",
"typowy",
"modlitwa",
"dół",
"dlatego",
"rasa",
"użycie",
"dziewięćdziesiąt",
"bok",
"zatoka",
"wiersz",
"Szwecja",
"japoński",
"gałąź",
"wrogi",
"przyjmować",
"więcej",
"łatwy",
"atak",
"wychodzić",
"wtedy",
"płyta",
"milion",
"padać",
"kanał",
"poniedziałek",
"wzór",
"twardy",
"podatek",
"rzucać",
"świeży",
"bilet",
"zakładać",
"złapać",
"przyszłość",
"przyjęcie",
"zewnętrzny",
"zamknąć",
"przynosić",
"obecny",
"strój",
"popularny",
"późno",
"płaski",
"struktura",
"pieniądze",
"projekt",
"doświadczenie",
"szyja",
"rozmawiać",
"literacki",
"okolica",
"mur",
"małżeństwo",
"bitwa",
"kwiecień",
"maj",
"specjalny",
"poruszać się",
"sąsiad",
"organ",
"pamiętać",
"uczyć",
"termin",
"bank",
"pusty",
"pół",
"wchodzić",
"czyn",
"Japonia",
"przeciw",
"wczesny",
"wejście",
"ciągle",
"bać się",
"Stany Zjednoczone",
"delikatny",
"wilk",
"kula",
"r.",
"wnętrze",
"prąd",
"sprzedawać",
"port",
"spokojny",
"waga",
"sztuczny",
"Polak",
"jajo",
"dym",
"pszczoła",
"technika",
"współczesny",
"widoczny",
"krok",
"próba",
"gęsty",
"miód",
"ciepło",
"mapa",
"kapelusz",
"otwór",
"lew",
"osioł",
"gwałtowny",
"siódmy",
"budować",
"los",
"telewizja",
"spowodować",
"dłoń",
"akt",
"mysz",
"jesień",
"składnik",
"słyszeć",
"zgodnie",
"zdrowy",
"masło",
"gwiazdozbiór",
"kino",
"podać",
"zmienić",
"przechodzić",
"fabryka",
"dość",
"daleko",
"z krwi i kości",
"płeć",
"chiński",
"sędzia",
"pokryć",
"lotniczy",
"nazwisko",
"bądź",
"Słowacja",
"umieć",
"majątek",
"ocena",
"pływać",
"komputerowy",
"dziać się",
"ósmy",
"autor",
"sierpień",
"łagodny",
"zakres",
"wybrać",
"następny",
"odległość",
"most",
"policjant",
"panować",
"zawód",
"zwrot",
"wybór",
"Chiny",
"internetowy",
"wytwarzać",
"lipiec",
"bohater",
"prasa",
"penis",
"Czechy",
"80",
"fakt",
"piosenka",
"mąka",
"badać",
"sobota",
"piątek",
"znajdować",
"straszny",
"waluta",
"pojechać",
"otwierać",
"umiejętność",
"ślub",
"restauracja",
"przedsiębiorstwo",
"towarzystwo",
"model",
"no",
"prywatny",
"reakcja",
"okazja",
"porządek",
"opowiadać",
"przeciwnik",
"mięsień",
"zysk",
"sprzedaż",
"zabić",
"różnica",
"klasztor",
"osiągnąć",
"niebezpieczny",
"pisarz",
"wrócić",
"skład",
"m.in.",
"sprawiać",
"chrześcijański",
"zapomnieć",
"gniew",
"planeta",
"postawić",
"przejść",
"kurs",
"przygotować",
"dzielić",
"dzielnica",
"kierowca",
"własność",
"królowa",
"korzeń",
"artysta",
"stawiać",
"jakość",
"przyjemność",
"średni",
"ludowy",
"całkowity",
"Dania",
"biblioteka",
"dopiero",
"zero",
"gniazdo",
"pieśń",
"urzędnik",
"przestać",
"dziura",
"Anglia",
"mózg",
"liczny",
"uderzać",
"efekt",
"rozmiar",
"przyjemny",
"norma",
"pozwalać",
"rana",
"korzyść",
"tańczyć",
"kosztować",
"Słońce",
"podział",
"samica",
"przepis",
"hotel",
"rzadko",
"wykonanie",
"brzydki",
"otworzyć",
"armia",
"kiedyś",
"brązowy",
"rzeczywistość",
"prędkość",
"szef",
"ciecz",
"kaczka",
"szkolny",
"dokonywać",
"fałszywy",
"koszula",
"tyle",
"rzeczownik",
"złożony",
"zawodnik",
"-",
"tradycja",
"śniadanie",
"usługa",
"skończyć",
"Białoruś",
"znów",
"handel",
"mieć na imię",
"królestwo",
"jądro",
"powstawać",
"okrągły",
"spodnie",
"powoli",
"godny",
"jeżeli",
"ślad",
"przedstawienie",
"olej",
"jazda",
"dyskusja",
"wyrażenie",
"daleki",
"sądzić",
"Ameryka",
"tracić",
"znosić",
"profesor",
"świątynia",
"szary",
"piłka nożna",
"zboże",
"uderzyć",
"wola",
"srebro",
"dolina",
"w postaci",
"różowy",
"zamykać",
"wrogość",
"Indie",
"dziwny",
"czasem",
"temu",
"wtorek",
"oglądać",
"sport",
"małpa",
"spotkać",
"zdarzenie",
"wódka",
"wrażenie",
"kalendarz",
"pomysł",
"odczuwać",
"koszt",
"plemię",
"bydło",
"strumień",
"skutek",
"książę",
"całkiem",
"papież",
"dodawać",
"brudny",
"przyszły",
"mecz",
"scena",
"wolno",
"klient",
"opisać",
"szereg",
"ciąża",
"coraz",
"złodziej",
"Izrael",
"głód",
"otaczać",
"władca",
"transport",
"w formie",
"niebezpieczeństwo",
"słoneczny",
"figura",
"wszelki",
"wysiłek",
"kolano",
"niech",
"tłuszcz",
"zakończenie",
"mi",
"ksiądz",
"żelazo",
"łuk",
"mebel",
"Afganistan",
"nieszczęście",
"wskazywać",
"plaża",
"fragment",
"zaś",
"metr",
"kościelny",
"samochodowy",
"zachowywać się",
"obrona",
"danie",
"wierny",
"amharski",
"lista",
"żart",
"ogólny",
"kontrola",
"budzić",
"90",
"tłum",
"naj-",
"kontakt",
"czasownik",
"gotowy",
"Jezus",
"koza",
"zbiornik",
"obserwować",
"grób",
"stacja",
"robotnik",
"czerwiec",
"październik",
"konstrukcja",
"choć",
"wyjście",
"minerał",
"kosz",
"60",
"cebula",
"samiec",
"sos",
"zmarły",
"ojczyzna",
"bycie",
"szkoda",
"niszczyć",
"majuskuła",
"przejaw",
"zniszczyć",
"niedźwiedź",
"pokazywać",
"gospodarka",
"zbudować",
"dodatkowy",
"park",
"opłata",
"wysoko",
"Egipt",
"zegar",
"wujek",
"dawno",
"studia",
"cesarz",
"wizyta",
"przyprawa",
"łódź",
"powszechny",
"robota",
"metalowy",
"biec",
"dobro",
"dzisiejszy",
"obóz",
"żydowski",
"USA",
"Chrystus",
"oddawać",
"widok",
"marka",
"pojęcie",
"miecz",
"krzyż",
"tajemnica",
"chłop",
"Austria",
"lecieć",
"bezpieczeństwo",
"królewski",
"śmiech",
"postawa",
"sukces",
"zgodny",
"płaszcz",
"Turcja",
"przeszkoda",
"prostytutka",
"operacja",
"wywołać",
"narząd",
"futro",
"świeca",
"Australia",
"prawny",
"wciąż",
"Szwajcaria",
"powieść",
"gotować",
"szczególny",
"rozwiązanie",
"relacja",
"studiować",
"stado",
"w czasie",
"kontynent",
"przychodzić",
"lis",
"strefa",
"70",
"wypowiedź",
"dziewiąty",
"idea",
"kura",
"grunt",
"farba",
"wóz",
"epoka",
"lęk",
"smutny",
"kolejowy",
"dodać",
"uchodzić",
"przygotowywać",
"przynieść",
"umysł",
"suma",
"interes",
"produkować",
"Boże Narodzenie",
"wieża",
"handlowy",
"gdyby",
"Kraków",
"utrzymywać",
"urodziny",
"natychmiast",
"uciekać",
"chociaż",
"słoń",
"prezent",
"odwaga",
"ciężar",
"płacić",
"podłoga",
"atmosfera",
"wspólnota",
"zwycięstwo",
"treść",
"zainteresowanie",
"zamiast",
"tor",
"artystyczny",
"dwanaście",
"zdolny",
"pojedynczy",
"przejście",
"moralny",
"reguła",
"naukowiec",
"osobisty",
"mnóstwo",
"wybory",
"jedynie",
"wada",
"sygnał",
"wykonywanie",
"wybierać",
"umieszczać",
"mistrz",
"nagły",
"dno",
"pomarańczowy",
"telewizyjny",
"radio",
"przerwa",
"matematyka",
"klub",
"środa",
"muzeum",
"finansowy",
"malować",
"opieka",
"Żyd",
"ośrodek",
"krzesło",
"ukraiński",
"kolej",
"kłopot",
"ryż",
"cień",
"szwedzki",
"usuwać",
"katolicki",
"cierpienie",
"znaczny",
"umożliwiać",
"Rumunia",
"poznać",
"wynosić",
"pijany",
"zakończyć",
"intensywny",
"kostka",
"świadczyć",
"wydawać się",
"godność",
"Unia Europejska",
"orzeł",
"burza",
"chrześcijaństwo",
"błoto",
"biskup",
"gardło",
"szkło",
"polityk",
"umieścić",
"pozostać",
"czwartek",
"piętro",
"odkryć",
"powstanie",
"zakon",
"oddech",
"nastrój",
"teoria",
"doskonały",
"dolny",
"spadek",
"zawartość",
"zatrzymać",
"aktor",
"grzech",
"otrzymywać",
"anioł",
"szklanka",
"ciekawy",
"pomóc",
"pomidor",
"smutek",
"Wielka Brytania",
"pora",
"śmiać się",
"abugida",
"odcinek",
"nasiono",
"pokarm",
"zimno",
"wieczorem",
"wracać",
"azjatycki",
"wysłać",
"sprzęt",
"posiłek",
"ozdobny",
"impreza",
"potrzebny",
"znaczyć",
"łyżka",
"narkotyk",
"biuro",
"parlament",
"obywatelka",
"babka",
"zabawka",
"dorosły",
"ćwiczenie",
"ocean",
"nadmierny",
"niezwykły",
"bieda",
"użytkownik",
"polować",
"dyrektor",
"procent",
"ziemski",
"spór",
"żaba",
"starać się",
"w wyniku",
"pacjent",
"Litwa",
"wycieczka",
"istotny",
"lampa",
"mgła",
"Węgry",
"późny",
"dziewczynka",
"lina",
"w ciągu",
"mocz",
"motyl",
"półwysep",
"staw",
"przybyć",
"duński",
"nieprzyjemny",
"wakacje",
"przestępstwo",
"centralny",
"odzież",
"głośny",
"wysyłać",
"wina",
"pożar",
"pasek",
"przyjaźń",
"koncert",
"zarówno",
"turecki",
"na zewnątrz",
"kilometr",
"zapalenie",
"tani",
"pytać",
"św.",
"dane",
"poeta",
"łąka",
"trudność",
"ciotka",
"seks",
"bar",
"pasmo",
"zaraz",
"ubogi",
"po prostu",
"igła",
"cmentarz",
"dziób",
"róża",
"pozostawać",
"zawodowy",
"tablica",
"klimat",
"cisza",
"okropny",
"włosy",
"wzdłuż",
"medycyna",
"bawić się",
"wzrok",
"w.",
"bogini",
"wioska",
"letni",
"chyba",
"poczta",
"deska",
"hodować",
"wreszcie",
"przyjechać",
"filmowy",
"kończyć",
"psychiczny",
"uzyskać",
"rachunek",
"minister",
"dowód",
"lata",
"mrówka",
"radiowy",
"średniowieczny",
"mądry",
"przeprowadzać",
"kolacja",
"jakby",
"pragnąć",
"sądowy",
"ustawa",
"zaufanie",
"wojenny",
"obowiązywać",
"promień",
"Kościół",
"dać się",
"kult",
"traktować",
"czapka",
"ciągnąć",
"paliwo",
"—",
"diabeł",
"Holandia",
"broda",
"w końcu",
"powolny",
"muzyk",
"korzystać",
"sowa",
"dokładny",
"czoło",
"zając",
"na przykład",
"płakać",
"podnieść",
"wybuch",
"spaść",
"byk",
"budowla",
"zgromadzenie",
"odważny",
"czynnik",
"zeszły",
"wesoły",
"pająk",
"opuścić",
"ciemność",
"kij",
"pałac",
"archipelag",
"pojawiać się",
"panna",
"gęś",
"nauczycielka",
"zajęcie",
"trudno",
"pustynia",
"kieszeń",
"fotografia",
"tytoń",
"upadek",
"wyrok",
"istnienie",
"zanim",
"wyścig",
"chęć",
"świecić",
"częściowo",
"dokonać",
"żywność",
"sukienka",
"obrót",
"toponim",
"wpaść",
"podróżować",
"kolumna",
"rodzinny",
"poprzedni",
"Niemiec",
"pisanie",
"oddać",
"rzadki",
"bułgarski",
"otoczenie",
"kobiecy",
"kolorowy",
"kartka",
"urodzić się",
"piętnaście",
"uznawać",
"okręt",
"trzydziesty",
"wniosek",
"głupiec",
"strata",
"większy",
"podnosić",
"nocny",
"wywodzić się",
"filozofia",
"inaczej",
"Pan",
"ozdoba",
"uciec",
"martwy",
"hałas",
"lotnisko",
"tył",
"łaciński",
"położony",
"pełnić",
"kwestia",
"tarcza",
"0",
"skłonność",
"go",
"talerz",
"wygrać",
"Morze Śródziemne",
"minuskuła",
"szlachetny",
"poruszać",
"jadalny",
"jedenaście",
"nieść",
"szkodliwy",
"użyć",
"lot",
"wystawa",
"pokonać",
"przebywać",
"przeszłość",
"adres",
"wisieć",
"oś",
"zmęczony",
"katastrofa",
"zamiar",
"bogactwo",
"niechęć",
"poduszka",
"rak",
"jednocześnie",
"dziecięcy",
"wstyd",
"białoruski",
"rozpocząć",
"rzucić",
"ulegać",
"policzek",
"wzgórze",
"hasło",
"lustro",
"wkrótce",
"narodowość",
"pojawić się",
"skala",
"zapis",
"stowarzyszenie",
"zgadzać się",
"rezultat",
"oba",
"przecież",
"czeski",
"tłumaczyć",
"rysunek",
"kłaść",
"aktywny",
"gołąb",
"praktyka",
"okoliczność",
"trwały",
"oczekiwać",
"ryzyko",
"dostęp",
"wyłącznie",
"czekolada",
"oczywiście",
"dalej",
"dar",
"włożyć",
"zrozumieć",
"postępować",
"srebrny",
"doprowadzić",
"analiza",
"mierzyć",
"banknot",
"głupota",
"głupek",
"słowacki",
"plama",
"uśmiech",
"konflikt",
"gleba",
"gospodarczy",
"plecy",
"następować",
"zaburzenie",
"blady",
"spadać",
"plac",
"cichy",
"alkoholowy",
"pomarańcza",
"bajka",
"wprowadzać",
"żołądek",
"latać",
"niewolnik",
"rolnik",
"wspomnienie",
"zająć",
"nasienie",
"Belgia",
"wątpliwość",
"bezpośrednio",
"graniczyć",
"gorączka",
"bronić",
"rządzić",
"drapieżny",
"pojemnik",
"Piotr",
)
parts_of_speech: Dict[str, tuple] = {}
|
2bfb3856e2c27208008da619485e38b676313449
|
4b64dbbcd51ac0ff67c1ef9e56383f92630f9dc0
|
/smr/test/integrated/test_test.py
|
ab559cbd10399d99539e7640cc33c2583998f4a6
|
[
"Apache-2.0",
"MIT",
"BSD-2-Clause"
] |
permissive
|
naver/nbase-arc
|
e181a57344566a042014b67c9a054c0683c87fad
|
5cc17be3f423e7ae897ad2c2939278b2584487d8
|
refs/heads/master
| 2023-08-24T15:28:13.223923
| 2023-04-11T02:48:42
| 2023-04-11T02:48:42
| 46,257,023
| 185
| 67
|
Apache-2.0
| 2023-04-11T02:48:43
| 2015-11-16T06:47:07
|
C
|
UTF-8
|
Python
| false
| false
| 6,775
|
py
|
test_test.py
|
#
# Copyright 2015 Naver Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import io
import socket
import os
import time
import traceback, sys
import Conf, Cm, Pg, Pgs, Smr, Be, Util, Conn
class TestTest (unittest.TestCase):
def test_all(self):
self._test_cm()
self._test_pgs()
self._test_smr()
self._test_be()
self._test_pg()
self._test_client()
def _test_cm(self):
cm = None
try:
cm = Cm.CM("test_cm")
assert cm is not None
cm.create_workspace()
dir = cm.dir
assert os.path.exists(dir)
cm.remove_workspace()
assert cm.dir is None
assert os.path.exists(dir) == False
cm = None
finally:
if cm != None:
cm.remove_workspace()
def _test_pgs(self):
cm = None
pgs = None
try:
cm = Cm.CM("test_pgs")
assert cm is not None
cm.create_workspace()
assert cm.dir != None
pgs = Pgs.PGS(0, 'localhost', 1900, cm.dir)
assert pgs is not None
pgs.start_smr()
assert pgs.smr != None
role = pgs.smr.get_role()
assert role == Smr.SMR.NONE
pgs.start_be()
assert pgs.be != None
pgs.smr.wait_role(Smr.SMR.LCONN)
except:
#Util.tstop('Exception Occurred')
raise
finally:
if pgs is not None:
pgs.kill_be()
pgs.kill_smr()
if cm is not None:
cm.remove_workspace()
def _test_smr_singleton(self):
c1 = None
c2 = None
try:
c1 = Conn.Conn('localhost', 1903)
c2 = Conn.Conn('localhost', 1903)
# set
r1 = c1.do_request('singleton a')
assert r1[0] == '+OK'
# override with same value
r1 = c1.do_request('singleton a')
assert r1[0] == '+OK'
# override with other value
r1 = c1.do_request('singleton b')
assert r1[0] == '+OK'
# bad command
r2 = c2.do_request('singleton a b c')
assert r2[0].startswith('-ERR')
# set with already set singleton value
r2 = c2.do_request('singleton b')
assert r2[0] == '+OK'
try:
c1.do_request('ping')
# -- must not be reached
assert False
except:
c1.disconnect() # clear
c1 = Conn.Conn('localhost', 1903)
finally:
if c1 != None:
c1.disconnect()
if c2 != None:
c2.disconnect()
def _test_smr(self):
cm = None
pgs = None
try:
cm = Cm.CM("test_pgs")
assert cm is not None
cm.create_workspace()
assert cm.dir != None
pgs = Pgs.PGS(0, 'localhost', 1900, cm.dir)
assert pgs is not None
pgs.start_smr()
assert pgs.smr != None
# send confset with empty arguments
self.assertRaisesRegexp(Exception, '-ERR bad number of token:0',
pgs.smr.confset, '', '')
# other command specific tests
self._test_smr_singleton()
finally:
if pgs is not None:
pgs.kill_smr()
if cm is not None:
cm.remove_workspace()
def _test_be(self):
cm = None
pgs = None
try:
cm = Cm.CM("test_be")
cm.create_workspace()
pg = Pg.PG(0)
# pgs --> master
pgs = Pgs.PGS(0, 'localhost', 1900, cm.dir)
pg.join(pgs, start=True)
pgs.smr.wait_role(Smr.SMR.MASTER)
# Test basic op
old = pgs.be.set(0, '100')
assert old >= 0
r = pgs.be.reset()
assert r == 0
new = pgs.be.set(0, '100')
assert old == new
r = pgs.be.ping()
assert r == 0
# do checkpoint
r = pgs.be.ckpt()
assert r == 0
# restart
pg.leave(pgs.id, kill=True)
pg.join(pgs, start=True)
pgs.smr.wait_role(Smr.SMR.MASTER)
# check crc of key = 0
new = pgs.be.get(0)
assert old == new
except:
#Util.tstop('Exception Occurred')
raise
finally:
if pgs is not None:
pgs.kill_smr()
pgs.kill_be()
if cm is not None:
cm.remove_workspace()
def _test_pg(self):
cm = None
pgs1 = None
pgs2 = None
try:
cm = Cm.CM("test_pgs")
cm.create_workspace()
pg = Pg.PG(0)
# pgs1 --> master
pgs1 = Pgs.PGS(0, 'localhost', 1900, cm.dir)
pg.join(pgs1, start=True)
pgs1.smr.wait_role(Smr.SMR.MASTER)
# pgs2 --> slave
pgs2 = Pgs.PGS(1, 'localhost', 1910, cm.dir)
pg.join(pgs2, start=True)
pgs2.smr.wait_role(Smr.SMR.SLAVE)
# kill pgs2, check quorum,
pg.leave(pgs2.id, kill = True)
assert pgs1.smr.getquorum() == 0
# join pgs2
pg.join(pgs2, start=True)
pgs2.smr.wait_role(Smr.SMR.SLAVE)
# kill pgs1 (check pgs1 is master)
pg.leave(pgs1.id, kill = True)
pgs2.smr.wait_role(Smr.SMR.MASTER)
assert pgs2.smr.getquorum() == 0
# join pgs1
pg.join(pgs1, start=True)
pgs1.smr.wait_role(Smr.SMR.SLAVE)
finally:
if pgs1 is not None:
pgs1.kill_smr()
pgs1.kill_be()
if pgs2 is not None:
pgs2.kill_smr()
pgs2.kill_be()
if cm is not None:
cm.remove_workspace()
def _test_client(self):
# TODO implement smr-client test code
pass
if __name__ == '__main__':
unittest.main()
|
a9bb8fc02d430a1f7146cc3280d6c1e09a492eb0
|
21b1ebd3c489b99d834f08c63387b045116a01ed
|
/tests/unit/lib/test_elasticbeanstalk.py
|
3c771043d2d79e54f69586abf842165361ef68d6
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
aws/aws-elastic-beanstalk-cli
|
8d1f34fbec3e89164d8b607666c0800c28e334f7
|
252101641a7b6acb5de17fafd6adf8b96418426f
|
refs/heads/master
| 2023-09-03T15:04:58.036979
| 2023-08-31T17:26:43
| 2023-08-31T17:26:43
| 175,470,423
| 149
| 84
|
Apache-2.0
| 2023-09-12T14:53:42
| 2019-03-13T17:42:43
|
Python
|
UTF-8
|
Python
| false
| false
| 67,934
|
py
|
test_elasticbeanstalk.py
|
# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import datetime
from dateutil import tz
import mock
import unittest
from ebcli.lib import elasticbeanstalk
from ebcli.objects.requests import CreateEnvironmentRequest, CloneEnvironmentRequest
from ebcli.objects.platform import PlatformVersion
from ebcli.objects.buildconfiguration import BuildConfiguration
from .. import mock_responses
class TestElasticbeanstalk(unittest.TestCase):
@mock.patch('ebcli.lib.elasticbeanstalk.aws.make_api_call')
def test_create_application_version(
self,
make_api_call_mock
):
make_api_call_mock.return_value = mock_responses.CREATE_APPLICATION_VERSION_RESPONSE
self.assertEqual(
mock_responses.CREATE_APPLICATION_VERSION_RESPONSE,
elasticbeanstalk.create_application_version(
'my-application',
'v1',
'MyAppv1',
'my-bucket',
'sample-war'
)
)
make_api_call_mock.assert_called_once_with(
'elasticbeanstalk',
'create_application_version',
ApplicationName='my-application',
Description='MyAppv1',
Process=False,
SourceBundle={
'S3Bucket': 'my-bucket',
'S3Key': 'sample-war'
},
VersionLabel='v1'
)
@mock.patch('ebcli.lib.elasticbeanstalk.aws.make_api_call')
def test_create_application_version_with_codecommit(
self,
make_api_call_mock
):
make_api_call_mock.return_value = mock_responses.CREATE_APPLICATION_VERSION_RESPONSE__WITH_CODECOMMIT
self.assertEqual(
mock_responses.CREATE_APPLICATION_VERSION_RESPONSE__WITH_CODECOMMIT,
elasticbeanstalk.create_application_version(
'my-application',
'v1',
'MyAppversion',
None,
None,
repository='my-repository',
commit_id='532452452eeaadcbf532452452eeaadcbf'
)
)
@mock.patch('ebcli.lib.elasticbeanstalk.aws.make_api_call')
def test_create_application_version_with_codebuild(
self,
make_api_call_mock
):
make_api_call_mock.return_value = mock_responses.CREATE_APPLICATION_VERSION_RESPONSE__WITH_CODECOMMIT
self.assertEqual(
mock_responses.CREATE_APPLICATION_VERSION_RESPONSE__WITH_CODECOMMIT,
elasticbeanstalk.create_application_version(
'my-application',
'v1',
'MyAppversion',
'my-bucket',
'sample-war',
build_configuration=BuildConfiguration(
service_role='CodeBuildServiceRole',
image='Java 8 Image',
compute_type='t2.micro',
timeout='5',
)
)
)
make_api_call_mock.assert_called_with(
'elasticbeanstalk',
'create_application_version',
ApplicationName='my-application',
BuildConfiguration={
'CodeBuildServiceRole': 'CodeBuildServiceRole',
'Image': 'Java 8 Image',
'ComputeType': 't2.micro',
'TimeoutInMinutes': '5'
},
Description='MyAppversion',
Process=True,
SourceBuildInformation={
'SourceType': 'Zip',
'SourceRepository': 'S3',
'SourceLocation': 'my-bucket/sample-war'
},
VersionLabel='v1'
)
@mock.patch('ebcli.lib.elasticbeanstalk.aws.make_api_call')
def test_get_environments__attempting_to_match_single_env__match_found(
self,
make_api_call_mock
):
make_api_call_mock.return_value = mock_responses.DESCRIBE_ENVIRONMENTS_RESPONSE
environments = elasticbeanstalk.get_environments(['environment-1'])
self.assertEqual('Environment', environments[0].__class__.__name__)
@mock.patch('ebcli.lib.elasticbeanstalk.aws.make_api_call')
def test_get_environments__attempting_to_match_single_env__match_not_found(
self,
make_api_call_mock
):
make_api_call_mock.return_value = {
'Environments': []
}
with self.assertRaises(elasticbeanstalk.NotFoundError) as context_manager:
elasticbeanstalk.get_environments(['my-environment'])
self.assertEqual(
'Could not find any environments from the list: my-environment',
str(context_manager.exception)
)
@mock.patch('ebcli.lib.elasticbeanstalk.aws.make_api_call')
def test_get_environments__attempting_to_match_multiple_env__match_not_found(
self,
make_api_call_mock
):
make_api_call_mock.return_value = {
'Environments': []
}
with self.assertRaises(elasticbeanstalk.NotFoundError) as context_manager:
elasticbeanstalk.get_environments(
[
'my-absent-environment-1',
'my-absent-environment-2'
]
)
self.assertEqual(
'Could not find any environments from the list: my-absent-environment-1, my-absent-environment-2',
str(context_manager.exception)
)
@mock.patch('ebcli.lib.elasticbeanstalk.aws.make_api_call')
def test_get_environments__attempting_to_match_multiple_env__partial_match_found(
self,
make_api_call_mock
):
make_api_call_mock.return_value = mock_responses.DESCRIBE_ENVIRONMENTS_RESPONSE
environments = elasticbeanstalk.get_environments(
[
'environment-1',
'my-absent-environment'
]
)
self.assertEqual(4, len(environments))
self.assertEqual('Environment', environments[0].__class__.__name__)
@mock.patch('ebcli.lib.elasticbeanstalk.aws.make_api_call')
def test_get_app_version_labels(
self,
make_api_call_mock
):
make_api_call_mock.return_value = mock_responses.DESCRIBE_APPLICATION_VERSIONS_RESPONSE
version_labels = elasticbeanstalk.get_app_version_labels('my-application')
self.assertEqual(
[
'version-label-1',
'version-label-2',
],
version_labels
)
@mock.patch('ebcli.lib.elasticbeanstalk.aws.make_api_call')
def test_get_app_version_labels__no_version_labels(
self,
make_api_call_mock
):
make_api_call_mock.return_value = {
'ApplicationVersions': []
}
version_labels = elasticbeanstalk.get_app_version_labels('my-application')
self.assertEqual(
[],
version_labels
)
@mock.patch('ebcli.lib.elasticbeanstalk._make_api_call')
def test_application_version_exists(
self,
_make_api_call_mock
):
_make_api_call_mock.return_value = mock_responses.DESCRIBE_APPLICATION_VERSIONS_RESPONSE
application_version = elasticbeanstalk.application_version_exists('my-application', 'version-label-1')
self.assertEqual(
{
"ApplicationName": "my-application",
"VersionLabel": "version-label-1",
"Description": "update cover page",
"DateCreated": "2015-07-23T01:32:26.079Z",
"DateUpdated": "2015-07-23T01:32:26.079Z",
"SourceBundle": {
"S3Bucket": "elasticbeanstalk-us-west-2-123123123123",
"S3Key": "my-app/9112-stage-150723_224258.war"
}
},
application_version
)
@mock.patch('ebcli.lib.elasticbeanstalk._make_api_call')
def test_application_version_exists__application_version_not_found(
self,
_make_api_call_mock
):
_make_api_call_mock.return_value = {
'ApplicationVersions': []
}
application_version = elasticbeanstalk.application_version_exists('my-application', 'version-label-1')
self.assertIsNone(application_version)
@mock.patch('ebcli.lib.elasticbeanstalk.aws.make_api_call')
def test_delete_platform(
self,
make_api_call_mock
):
make_api_call_mock.return_value = mock_responses.DELETE_PLATFORM_VERSION_RESPONSE
self.assertEqual(
mock_responses.DELETE_PLATFORM_VERSION_RESPONSE,
elasticbeanstalk.delete_platform(
'arn:aws:elasticbeanstalk:us-west-2:123123123123:platform/zqozvhohaq-custom-platform/1.0.0'
)
)
@mock.patch('ebcli.lib.elasticbeanstalk.aws.make_api_call')
def test_list_platform_versions(
self,
make_api_call_mock
):
make_api_call_mock.side_effect = [
mock_responses.LIST_CUSTOM_PLATFOM_VERSIONS_RESPONSE__WITH_NEXT_TOKEN,
mock_responses.LIST_CUSTOM_PLATFOM_VERSIONS_RESPONSE
]
self.assertEqual(
(
mock_responses.LIST_CUSTOM_PLATFOM_VERSIONS_RESPONSE['PlatformSummaryList']
+
mock_responses.LIST_CUSTOM_PLATFOM_VERSIONS_RESPONSE__WITH_NEXT_TOKEN['PlatformSummaryList']
),
elasticbeanstalk.list_platform_versions(
filters=[
{
'Type': 'PlatformOwner',
'Operator': '=',
'Values': 'self'
}
]
)
)
@mock.patch('ebcli.lib.elasticbeanstalk.aws.make_api_call')
def test_describe_platform_version(
self,
make_api_call_mock
):
make_api_call_mock.return_value = mock_responses.DESCRIBE_CUSTOM_PLATFORM_VERSION_RESPONSE
self.assertEqual(
mock_responses.DESCRIBE_CUSTOM_PLATFORM_VERSION_RESPONSE['PlatformDescription'],
elasticbeanstalk.describe_platform_version(
"arn:aws:elasticbeanstalk:us-west-2:123123123123:platform/xutrezqmqw-custom-platform/1.0.0"
)
)
@mock.patch('ebcli.lib.elasticbeanstalk.aws.make_api_call')
def test_create_application(
self,
make_api_call_mock
):
make_api_call_mock.return_value = mock_responses.CREATE_APPLICATION_RESPONSE
self.assertEqual(
mock_responses.CREATE_APPLICATION_RESPONSE,
elasticbeanstalk.create_application(
'my-application',
'my-application'
)
)
@mock.patch('ebcli.lib.elasticbeanstalk.aws.make_api_call')
def test_create_application__invalid_parameter_value_error_encountered(
self,
make_api_call_mock
):
make_api_call_mock.side_effect = elasticbeanstalk.aws.InvalidParameterValueError(
message='Application existing-application already exists.'
)
with self.assertRaises(elasticbeanstalk.AlreadyExistsError):
elasticbeanstalk.create_application(
'existing-application',
'my-application'
)
@mock.patch('ebcli.lib.elasticbeanstalk.aws.make_api_call')
def test_create_application__general_exception(
self,
make_api_call_mock
):
make_api_call_mock.side_effect = Exception('could not create app')
with self.assertRaises(Exception) as context_manager:
elasticbeanstalk.create_application(
'existing-application',
'my-application'
)
self.assertEqual(
'could not create app',
str(context_manager.exception)
)
@mock.patch('ebcli.lib.elasticbeanstalk.aws.make_api_call')
def test_create_platform_version(
self,
make_api_call_mock
):
elasticbeanstalk.create_platform_version(
'my-custom-platform',
'1.2.3',
'my-bucket',
'my-key',
'my-instance-profile',
'my-ec2-key-name',
't2.micro',
tags=[
{'Key': 'a', 'Value': 'value1'},
{'Key': 'b', 'Value': 'value2'}
],
vpc={
'id': 'vpc-id',
'subnets': 'subnet-id-1,subnet-id-2',
'publicip': 'true',
}
)
make_api_call_mock.assert_called_once_with(
'elasticbeanstalk',
'create_platform_version',
OptionSettings=[
{
'Namespace': 'aws:autoscaling:launchconfiguration',
'OptionName': 'IamInstanceProfile',
'Value': 'my-instance-profile'
},
{
'Namespace': 'aws:autoscaling:launchconfiguration',
'OptionName': 'EC2KeyName',
'Value': 'my-ec2-key-name'
},
{
'Namespace': 'aws:autoscaling:launchconfiguration',
'OptionName': 'InstanceType',
'Value': 't2.micro'
},
{
'Namespace': 'aws:ec2:vpc',
'OptionName': 'VPCId',
'Value': 'vpc-id'
},
{
'Namespace': 'aws:ec2:vpc',
'OptionName': 'Subnets',
'Value': 'subnet-id-1,subnet-id-2'
},
{
'Namespace': 'aws:ec2:vpc',
'OptionName': 'AssociatePublicIpAddress',
'Value': 'true'
},
{
'Namespace': 'aws:elasticbeanstalk:healthreporting:system',
'OptionName': 'SystemType',
'Value': 'enhanced'
},
{
'Namespace': 'aws:elasticbeanstalk:environment',
'OptionName': 'ServiceRole',
'Value': 'aws-elasticbeanstalk-service-role'
}
],
PlatformDefinitionBundle={
'S3Bucket': 'my-bucket',
'S3Key': 'my-key'
},
PlatformName='my-custom-platform',
PlatformVersion='1.2.3',
Tags=[
{'Key': 'a', 'Value': 'value1'},
{'Key': 'b', 'Value': 'value2'}
],
)
@mock.patch('ebcli.lib.elasticbeanstalk.aws.make_api_call')
@mock.patch('ebcli.lib.elasticbeanstalk.aws.get_region_name')
@mock.patch('ebcli.lib.ec2.has_default_vpc')
def test_create_environment(
self,
has_default_vpc_mock,
get_region_name_mock,
make_api_call_mock
):
self.maxDiff = None
has_default_vpc_mock.return_value = True
get_region_name_mock.return_value = 'us-east-1'
make_api_call_mock.return_value = mock_responses.CREATE_ENVIRONMENT_RESPONSE
environment_request = CreateEnvironmentRequest(
app_name='my-application',
env_name='environment-1',
platform=PlatformVersion("arn:aws:elasticbeanstalk:us-west-2::platform/Docker running on 64bit Amazon Linux/2.1.0"),
database={
'username': 'root',
'password': 'password',
'engine': 'mysql',
'size': '10',
'instance': 'db.t2.micro',
'version': '5.6.35'
},
vpc={
'id': 'my-vpc-id',
'ec2subnets': 'subnet-1,subnet-2,subnet-3',
'elbsubnets': 'subnet-1,subnet-2,subnet-3',
'elbscheme': 'public',
'publicip': 'true',
'securitygroups': 'security-group-1,security-group-2',
'dbsubnets': 'subnet-1,subnet-2,subnet-3',
}
)
elasticbeanstalk.create_environment(environment_request)
make_api_call_mock.assert_called_once_with(
'elasticbeanstalk',
'create_environment',
ApplicationName='my-application',
Description='Environment created from the EB CLI using "eb create"',
EnvironmentName='environment-1',
OptionSettings=[
{
'Namespace': 'aws:elasticbeanstalk:healthreporting:system',
'OptionName': 'SystemType',
'Value': 'enhanced'
},
{
'Namespace': 'aws:elasticbeanstalk:command',
'OptionName': 'BatchSize',
'Value': '30'
},
{
'Namespace': 'aws:elasticbeanstalk:command',
'OptionName': 'BatchSizeType',
'Value': 'Percentage'
},
{
'Namespace': 'aws:elb:policies',
'OptionName': 'ConnectionDrainingEnabled',
'Value': 'true'
},
{
'Namespace': 'aws:elb:loadbalancer',
'OptionName': 'CrossZone',
'Value': 'true'
},
{
'Namespace': 'aws:autoscaling:updatepolicy:rollingupdate',
'OptionName': 'RollingUpdateEnabled',
'Value': 'true'
},
{
'Namespace': 'aws:autoscaling:updatepolicy:rollingupdate',
'OptionName': 'RollingUpdateType',
'Value': 'Health'
},
{
'Namespace': 'aws:rds:dbinstance',
'OptionName': 'DBPassword',
'Value': 'password'
},
{
'Namespace': 'aws:rds:dbinstance',
'OptionName': 'DBUser',
'Value': 'root'
},
{
'Namespace': 'aws:rds:dbinstance',
'OptionName': 'DBInstanceClass',
'Value': 'db.t2.micro'
},
{
'Namespace': 'aws:rds:dbinstance',
'OptionName': 'DBAllocatedStorage',
'Value': '10'
},
{
'Namespace': 'aws:rds:dbinstance',
'OptionName': 'DBEngine',
'Value': 'mysql'
},
{
'Namespace': 'aws:rds:dbinstance',
'OptionName': 'DBEngineVersion',
'Value': '5.6.35'
},
{
'Namespace': 'aws:rds:dbinstance',
'OptionName': 'DBDeletionPolicy',
'Value': 'Snapshot'
},
{
'Namespace': 'aws:ec2:vpc',
'OptionName': 'VPCId',
'Value': 'my-vpc-id'
},
{
'Namespace': 'aws:ec2:vpc',
'OptionName': 'AssociatePublicIpAddress',
'Value': 'true'
},
{
'Namespace': 'aws:ec2:vpc',
'OptionName': 'ELBScheme',
'Value': 'public'
},
{
'Namespace': 'aws:ec2:vpc',
'OptionName': 'ELBSubnets',
'Value': 'subnet-1,subnet-2,subnet-3'
},
{
'Namespace': 'aws:ec2:vpc',
'OptionName': 'Subnets',
'Value': 'subnet-1,subnet-2,subnet-3'
},
{
'Namespace': 'aws:autoscaling:launchconfiguration',
'OptionName': 'SecurityGroups',
'Value': 'security-group-1,security-group-2'
},
{
'Namespace': 'aws:ec2:vpc',
'OptionName': 'DBSubnets',
'Value': 'subnet-1,subnet-2,subnet-3'
}
],
PlatformArn='arn:aws:elasticbeanstalk:us-west-2::platform/Docker running on 64bit Amazon Linux/2.1.0',
TemplateSpecification={
'TemplateSnippets': [
{
'SnippetName': 'RdsExtensionEB',
'Order': 10000,
'SourceUrl': 'https://s3.amazonaws.com/elasticbeanstalk-env-resources-us-east-1/eb_snippets/rds/rds.json'
}
]
}
)
@mock.patch('ebcli.lib.elasticbeanstalk.aws.make_api_call')
@mock.patch('ebcli.lib.elasticbeanstalk.aws.get_region_name')
@mock.patch('ebcli.lib.ec2.has_default_vpc')
def test_clone_environment(
self,
has_default_vpc_mock,
get_region_name_mock,
make_api_call_mock
):
self.maxDiff = None
has_default_vpc_mock.return_value = True
get_region_name_mock.return_value = 'us-east-1'
make_api_call_mock.return_value = mock_responses.CREATE_ENVIRONMENT_RESPONSE
environment_request = CloneEnvironmentRequest(
app_name='my-application',
env_name='environment-1-clone',
original_name='environment-1',
platform=PlatformVersion("arn:aws:elasticbeanstalk:us-west-2::platform/Docker running on 64bit Amazon Linux/2.1.0"),
)
elasticbeanstalk.clone_environment(environment_request)
make_api_call_mock.assert_called_once_with(
'elasticbeanstalk',
'create_environment',
ApplicationName='my-application',
Description='Environment cloned from environment-1-clone from the EB CLI using "eb clone"',
EnvironmentName='environment-1-clone',
OptionSettings=[],
PlatformArn='arn:aws:elasticbeanstalk:us-west-2::platform/Docker running on 64bit Amazon Linux/2.1.0',
TemplateSpecification={
'TemplateSource': {
'EnvironmentName': 'environment-1'
}
}
)
@mock.patch('ebcli.lib.elasticbeanstalk.aws.make_api_call')
def test_delete_application(
self,
mock_api_call_mock
):
mock_api_call_mock.return_value = {
'ResponseMetadata': {
'RequestId': '123123123123'
}
}
elasticbeanstalk.delete_application('my-application')
mock_api_call_mock.assert_called_once_with(
'elasticbeanstalk',
'delete_application',
ApplicationName='my-application'
)
@mock.patch('ebcli.lib.elasticbeanstalk.aws.make_api_call')
def test_delete_application_version(
self,
mock_api_call_mock
):
mock_api_call_mock.return_value = {
'ResponseMetadata': {
'RequestId': '123123123123'
}
}
elasticbeanstalk.delete_application_version(
'my-application',
'v1'
)
mock_api_call_mock.assert_called_once_with(
'elasticbeanstalk',
'delete_application_version',
ApplicationName='my-application',
DeleteSourceBundle=True,
VersionLabel='v1'
)
@mock.patch('ebcli.lib.elasticbeanstalk.aws.make_api_call')
def test_delete_application_and_envs(
self,
mock_api_call_mock
):
mock_api_call_mock.return_value = {
'ResponseMetadata': {
'RequestId': '123123123123'
}
}
elasticbeanstalk.delete_application_and_envs(
'my-application'
)
mock_api_call_mock.assert_called_once_with(
'elasticbeanstalk',
'delete_application',
ApplicationName='my-application',
TerminateEnvByForce=True
)
@mock.patch('ebcli.lib.elasticbeanstalk.aws.make_api_call')
def test_describe_application(
self,
mock_api_call_mock
):
mock_api_call_mock.return_value = {
'Applications': [
{
'ApplicationName': 'my-application'
}
],
'ResponseMetadata': {
'RequestId': 'd88449fe-feef-4d28-afdb-c8a34e99f757',
'HTTPStatusCode': 200,
'date': 'Wed, 16 May 2018 00:43:52 GMT',
'RetryAttempts': 0
}
}
self.assertEqual(
{'ApplicationName': 'my-application'},
elasticbeanstalk.describe_application(
'my-application'
)
)
mock_api_call_mock.assert_called_once_with(
'elasticbeanstalk',
'describe_applications',
ApplicationNames=['my-application']
)
@mock.patch('ebcli.lib.elasticbeanstalk.aws.make_api_call')
def test_describe_application__not_found(
self,
mock_api_call_mock
):
mock_api_call_mock.return_value = {
'Applications': [],
'ResponseMetadata': {
'RequestId': 'd88449fe-feef-4d28-afdb-c8a34e99f757',
'HTTPStatusCode': 200,
'date': 'Wed, 16 May 2018 00:43:52 GMT',
'RetryAttempts': 0
}
}
with self.assertRaises(elasticbeanstalk.NotFoundError):
elasticbeanstalk.describe_application(
'absent-application'
)
mock_api_call_mock.assert_called_once_with(
'elasticbeanstalk',
'describe_applications',
ApplicationNames=['absent-application']
)
@mock.patch('ebcli.lib.elasticbeanstalk.aws.make_api_call')
def test_is_cname_available(
self,
make_api_call_mock
):
make_api_call_mock.return_value = mock_responses.CHECK_DNS_AVAILABILITY_RESPONSE
self.assertTrue(
elasticbeanstalk.is_cname_available(
"my-cname.elasticbeanstalk.com"
)
)
@mock.patch('ebcli.lib.elasticbeanstalk.aws.make_api_call')
def test_swap_environment_cnames(
self,
mock_api_call_mock
):
mock_api_call_mock.return_value = {
'Applications': [],
'ResponseMetadata': {
'RequestId': 'd88449fe-feef-4d28-afdb-c8a34e99f757',
'HTTPStatusCode': 200,
'date': 'Wed, 16 May 2018 00:43:52 GMT',
'RetryAttempts': 0
}
}
elasticbeanstalk.swap_environment_cnames(
'source-environment',
'dest-environment'
)
mock_api_call_mock.assert_called_once_with(
'elasticbeanstalk',
'swap_environment_cnames',
DestinationEnvironmentName='dest-environment',
SourceEnvironmentName='source-environment'
)
@mock.patch('ebcli.lib.elasticbeanstalk.aws.make_api_call')
def test_describe_applications(
self,
make_api_call_mock
):
make_api_call_mock.return_value = mock_responses.DESCRIBE_APPLICATIONS_RESPONSE
self.assertEqual(
mock_responses.DESCRIBE_APPLICATIONS_RESPONSE['Applications'],
elasticbeanstalk.describe_applications()
)
@mock.patch('ebcli.lib.elasticbeanstalk.aws.make_api_call')
def test_application_exist(
self,
make_api_call_mock
):
make_api_call_mock.return_value = mock_responses.DESCRIBE_APPLICATION_RESPONSE
self.assertTrue(elasticbeanstalk.application_exist('my-application'))
@mock.patch('ebcli.lib.elasticbeanstalk.aws.make_api_call')
def test_application_exist__false(
self,
make_api_call_mock
):
make_api_call_mock.return_value = {
'Applications': []
}
self.assertFalse(elasticbeanstalk.application_exist('my-application'))
@mock.patch('ebcli.lib.elasticbeanstalk.aws.make_api_call')
def test_describe_configuration_settings(
self,
make_api_call_mock
):
make_api_call_mock.return_value = mock_responses.DESCRIBE_CONFIGURATION_SETTINGS_RESPONSE
self.assertEqual(
mock_responses.DESCRIBE_CONFIGURATION_SETTINGS_RESPONSE['ConfigurationSettings'][0],
elasticbeanstalk.describe_configuration_settings(
'my-application',
'environment-1'
)
)
@mock.patch('ebcli.lib.elasticbeanstalk.aws.make_api_call')
def test_get_application_names(
self,
make_api_call_mock
):
make_api_call_mock.return_value = mock_responses.DESCRIBE_APPLICATIONS_RESPONSE
self.assertEqual(
['my-application', 'my-application-2', 'my-application-3'],
elasticbeanstalk.get_application_names()
)
@mock.patch('ebcli.lib.elasticbeanstalk.aws.make_api_call')
def test_get_option_setting_from_environment(
self,
make_api_call_mock
):
make_api_call_mock.return_value = mock_responses.DESCRIBE_CONFIGURATION_SETTINGS_RESPONSE
self.assertEqual(
'20',
elasticbeanstalk.get_option_setting_from_environment(
'my-application',
'environment-1',
'aws:elb:policies',
'ConnectionDrainingTimeout'
)
)
def test_get_option_setting(self):
self.assertEqual(
'20',
elasticbeanstalk.get_option_setting(
mock_responses.DESCRIBE_CONFIGURATION_SETTINGS_RESPONSE['ConfigurationSettings'][0]['OptionSettings'],
'aws:elb:policies',
'ConnectionDrainingTimeout'
)
)
def test_get_option_setting__option_not_found(self):
self.assertIsNone(
elasticbeanstalk.get_option_setting(
mock_responses.DESCRIBE_CONFIGURATION_SETTINGS_RESPONSE['ConfigurationSettings'][0]['OptionSettings'],
'aws:elb:policies21313213',
'ConnectionDrainingTimeout'
)
)
def test_create_option_setting(self):
self.assertEqual(
{
'Namespace': 'aws:elb:policies',
'OptionName': 'ConnectionDrainingTimeout',
'Value': '20'
},
elasticbeanstalk.create_option_setting(
'aws:elb:policies',
'ConnectionDrainingTimeout',
'20'
)
)
def test_get_specific_configuration(self):
self.assertEqual(
'20',
elasticbeanstalk.get_specific_configuration(
mock_responses.DESCRIBE_CONFIGURATION_SETTINGS_RESPONSE['ConfigurationSettings'][0],
'aws:elb:policies',
'ConnectionDrainingTimeout'
)
)
@mock.patch('ebcli.lib.elasticbeanstalk.aws.make_api_call')
def test_get_specific_configuration_for_env(
self,
make_api_call_mock
):
make_api_call_mock.return_value = mock_responses.DESCRIBE_CONFIGURATION_SETTINGS_RESPONSE
self.assertEqual(
'20',
elasticbeanstalk.get_specific_configuration_for_env(
'my-application',
'environment-1',
'aws:elb:policies',
'ConnectionDrainingTimeout'
)
)
@mock.patch('ebcli.lib.elasticbeanstalk.aws.make_api_call')
def test_get_available_solution_stacks__none_available(
self,
make_api_call_mock
):
make_api_call_mock.return_value = {
'SolutionStacks': []
}
with self.assertRaises(elasticbeanstalk.NotFoundError) as context_manager:
elasticbeanstalk.get_available_solution_stacks()
self.assertEqual(
'Elastic Beanstalk could not find any platforms. '
'Ensure you have the necessary permissions to access Elastic Beanstalk.',
str(context_manager.exception)
)
@mock.patch('ebcli.lib.elasticbeanstalk.aws.make_api_call')
def test_get_available_solution_stacks(
self,
make_api_call_mock
):
make_api_call_mock.return_value = mock_responses.LIST_AVAILABLE_SOLUTION_STACKS
self.assertEqual(
len(mock_responses.LIST_AVAILABLE_SOLUTION_STACKS['SolutionStacks']),
len(elasticbeanstalk.get_available_solution_stacks())
)
@mock.patch('ebcli.lib.elasticbeanstalk.aws.make_api_call')
def test_get_application_versions(
self,
make_api_call_mock
):
make_api_call_mock.return_value = mock_responses.DESCRIBE_APPLICATION_VERSIONS_RESPONSE
elasticbeanstalk.get_application_versions(
'my-application',
version_labels=['v1', 'v2'],
max_records=10,
next_token='asdfadfasdf'
)
make_api_call_mock.assert_called_once_with(
'elasticbeanstalk',
'describe_application_versions',
ApplicationName='my-application',
MaxRecords=10,
NextToken='asdfadfasdf',
VersionLabels=['v1', 'v2']
)
@mock.patch('ebcli.lib.elasticbeanstalk.aws.make_api_call')
def test_get_applications(
self,
make_api_call_mock
):
make_api_call_mock.return_value = mock_responses.DESCRIBE_APPLICATIONS_RESPONSE
applications = elasticbeanstalk.get_all_applications()
self.assertEqual(
{'my-application-2', 'my-application', 'my-application-3'},
set([application.name for application in applications]),
)
make_api_call_mock.assert_called_once_with(
'elasticbeanstalk',
'describe_applications'
)
@mock.patch('ebcli.lib.elasticbeanstalk.aws.make_api_call')
def test_get_raw_app_environments(
self,
make_api_call_mock
):
make_api_call_mock.return_value = mock_responses.DESCRIBE_ENVIRONMENTS_RESPONSE
self.assertEqual(
mock_responses.DESCRIBE_ENVIRONMENTS_RESPONSE['Environments'],
elasticbeanstalk.get_raw_app_environments(
'my-application',
include_deleted=True,
deleted_back_to="2015-08-13T23:30:07Z"
)
)
make_api_call_mock.assert_called_once_with(
'elasticbeanstalk',
'describe_environments',
ApplicationName='my-application',
IncludeDeleted=True,
IncludedDeletedBackTo='2015-08-13T23:30:07Z'
)
@mock.patch('ebcli.lib.elasticbeanstalk.aws.make_api_call')
def test_get_app_environments(
self,
make_api_call_mock
):
make_api_call_mock.return_value = mock_responses.DESCRIBE_ENVIRONMENTS_RESPONSE
self.assertEqual(
len(mock_responses.DESCRIBE_ENVIRONMENTS_RESPONSE['Environments']),
len(
elasticbeanstalk.get_app_environments(
'my-application',
include_deleted=True,
deleted_back_to="2015-08-13T23:30:07Z"
)
)
)
make_api_call_mock.assert_called_once_with(
'elasticbeanstalk',
'describe_environments',
ApplicationName='my-application',
IncludeDeleted=True,
IncludedDeletedBackTo='2015-08-13T23:30:07Z'
)
@mock.patch('ebcli.lib.elasticbeanstalk.aws.make_api_call')
def test_get_all_environment_names(
self,
make_api_call_mock
):
make_api_call_mock.return_value = mock_responses.DESCRIBE_ENVIRONMENTS_RESPONSE
self.assertEqual(
{'environment-1', 'environment-2', 'environment-3', 'environment-4'},
set(elasticbeanstalk.get_all_environment_names())
)
@mock.patch('ebcli.lib.elasticbeanstalk.aws.make_api_call')
def test_get_all_environments(
self,
make_api_call_mock
):
make_api_call_mock.return_value = mock_responses.DESCRIBE_ENVIRONMENTS_RESPONSE
self.assertEqual(
4,
len(elasticbeanstalk.get_all_environment_names())
)
@mock.patch('ebcli.lib.elasticbeanstalk.aws.make_api_call')
def test_get_environment(
self,
make_api_call_mock
):
make_api_call_mock.return_value = mock_responses.DESCRIBE_ENVIRONMENTS_RESPONSE__SINGLE_ENVIRONMENT
environment = elasticbeanstalk.get_environment(
app_name='my-application',
env_name='environment-1',
env_id='e-sfsdfsfasdads',
include_deleted=True,
deleted_back_to='2015-08-13T23:30:07Z',
)
self.assertEqual('environment-1', environment.name)
self.assertEqual('e-sfsdfsfasdads', environment.id)
make_api_call_mock.assert_called_once_with(
'elasticbeanstalk',
'describe_environments',
ApplicationName='my-application',
EnvironmentIds=['e-sfsdfsfasdads'],
EnvironmentNames=['environment-1'],
IncludeDeleted=True,
IncludedDeletedBackTo='2015-08-13T23:30:07Z'
)
@mock.patch('ebcli.lib.elasticbeanstalk.aws.make_api_call')
def test_get_environment__environment_not_found(
self,
make_api_call_mock
):
make_api_call_mock.return_value = {
'Environments': []
}
with self.assertRaises(elasticbeanstalk.NotFoundError) as context_manager:
elasticbeanstalk.get_environment(
app_name='my-application',
env_name='environment-1',
env_id='e-sfsdfsfasdads',
include_deleted=True,
deleted_back_to='2015-08-13T23:30:07Z',
)
self.assertEqual(
'Environment "environment-1" not Found.',
str(context_manager.exception)
)
make_api_call_mock.assert_called_once_with(
'elasticbeanstalk',
'describe_environments',
ApplicationName='my-application',
EnvironmentIds=['e-sfsdfsfasdads'],
EnvironmentNames=['environment-1'],
IncludeDeleted=True,
IncludedDeletedBackTo='2015-08-13T23:30:07Z'
)
@mock.patch('ebcli.lib.elasticbeanstalk.aws.make_api_call')
def test_get_environment_names(
self,
make_api_call_mock
):
make_api_call_mock.return_value = mock_responses.DESCRIBE_ENVIRONMENTS_RESPONSE
self.assertEqual(
{'environment-2', 'environment-3', 'environment-1', 'environment-4'},
set(elasticbeanstalk.get_environment_names('my-application'))
)
make_api_call_mock.assert_called_once_with(
'elasticbeanstalk',
'describe_environments',
ApplicationName='my-application',
IncludeDeleted=False
)
@mock.patch('ebcli.lib.elasticbeanstalk.aws.make_api_call')
def test_get_app_version_labels(
self,
make_api_call_mock
):
make_api_call_mock.return_value = mock_responses.DESCRIBE_APPLICATION_VERSIONS_RESPONSE
self.assertEqual(
{'version-label-2', 'version-label-1'},
set(elasticbeanstalk.get_app_version_labels('my-application'))
)
make_api_call_mock.assert_called_once_with(
'elasticbeanstalk',
'describe_application_versions',
ApplicationName='my-application'
)
@mock.patch('ebcli.lib.elasticbeanstalk.aws.make_api_call')
def test_get_environment_settings(
self,
make_api_call_mock
):
make_api_call_mock.return_value = mock_responses.DESCRIBE_CONFIGURATION_SETTINGS_RESPONSE
environment = elasticbeanstalk.get_environment_settings(
'my-application',
'environment-1'
)
self.assertEqual('environment-1', environment.name)
self.assertEqual(
mock_responses.DESCRIBE_CONFIGURATION_SETTINGS_RESPONSE['ConfigurationSettings'][0]['OptionSettings'],
environment.option_settings
)
make_api_call_mock.assert_called_once_with(
'elasticbeanstalk',
'describe_configuration_settings',
ApplicationName='my-application',
EnvironmentName='environment-1'
)
@mock.patch('ebcli.lib.elasticbeanstalk.aws.make_api_call')
def test_get_environment_resources(
self,
make_api_call_mock
):
make_api_call_mock.return_value = mock_responses.DESCRIBE_ENVIRONMENT_RESOURCES_RESPONSE
self.assertEqual(
mock_responses.DESCRIBE_ENVIRONMENT_RESOURCES_RESPONSE,
elasticbeanstalk.get_environment_resources('environment-1')
)
make_api_call_mock.assert_called_once_with(
'elasticbeanstalk',
'describe_environment_resources',
EnvironmentName='environment-1'
)
@mock.patch('ebcli.lib.elasticbeanstalk.aws.make_api_call')
def test_get_new_events__no_new_events(
self,
make_api_call_mock
):
make_api_call_mock.return_value = {
'Events': []
}
self.assertEqual(
[],
elasticbeanstalk.get_new_events(
'my-application',
'environment-1',
'1341234123412341234134',
last_event_time=datetime.datetime(2018, 3, 27, 23, 47, 41, 830000, tzinfo=tz.tzutc()),
version_label='v1',
platform_arn='arn:aws:elasticbeanstalk:us-west-2::platform/PHP 7.0 running on 64bit Amazon Linux/2.7.0'
)
)
make_api_call_mock.assert_called_once_with(
'elasticbeanstalk',
'describe_events',
ApplicationName='my-application',
EnvironmentName='environment-1',
PlatformArn='arn:aws:elasticbeanstalk:us-west-2::platform/PHP 7.0 running on 64bit Amazon Linux/2.7.0',
RequestId='1341234123412341234134',
StartTime='2018-03-27 23:47:41.831000+00:00',
VersionLabel='v1'
)
@mock.patch('ebcli.lib.elasticbeanstalk.aws.make_api_call')
def test_get_new_events(
self,
make_api_call_mock
):
make_api_call_mock.return_value = mock_responses.DESCRIBE_EVENTS_RESPONSE
self.assertEqual(
set(elasticbeanstalk.Event.json_to_event_objects(mock_responses.DESCRIBE_EVENTS_RESPONSE['Events'])),
set(elasticbeanstalk.get_new_events(
'my-application',
'environment-1',
'1341234123412341234134',
last_event_time=datetime.datetime(2018, 3, 27, 23, 47, 41, 830000, tzinfo=tz.tzutc()),
version_label='v1',
platform_arn='arn:aws:elasticbeanstalk:us-west-2::platform/PHP 7.0 running on 64bit Amazon Linux/2.7.0'
))
)
make_api_call_mock.assert_called_once_with(
'elasticbeanstalk',
'describe_events',
ApplicationName='my-application',
EnvironmentName='environment-1',
PlatformArn='arn:aws:elasticbeanstalk:us-west-2::platform/PHP 7.0 running on 64bit Amazon Linux/2.7.0',
RequestId='1341234123412341234134',
StartTime='2018-03-27 23:47:41.831000+00:00',
VersionLabel='v1'
)
@mock.patch('ebcli.lib.elasticbeanstalk.aws.make_api_call')
def test_get_storage_location(
self,
make_api_call_mock
):
make_api_call_mock.return_value = mock_responses.CREATE_STORAGE_LOCATION_RESPONSE
self.assertEqual(
'elasticbeanstalk-us-west-2-0123456789012',
elasticbeanstalk.get_storage_location()
)
make_api_call_mock.assert_called_once_with(
'elasticbeanstalk',
'create_storage_location'
)
@mock.patch('ebcli.lib.elasticbeanstalk.aws.make_api_call')
def test_abort_environment_update(
self,
make_api_call_mock
):
elasticbeanstalk.abort_environment_update('environment-1')
make_api_call_mock.assert_called_once_with(
'elasticbeanstalk',
'abort_environment_update',
EnvironmentName='environment-1'
)
@mock.patch('ebcli.lib.elasticbeanstalk.aws.make_api_call')
def test_create_configuration_template(
self,
make_api_call_mock
):
make_api_call_mock.return_value = mock_responses.CREATE_CONFIGURATION_TEMPLATE_RESPONSE
self.assertEqual(
mock_responses.CREATE_CONFIGURATION_TEMPLATE_RESPONSE,
elasticbeanstalk.create_configuration_template(
'my-application',
'environment-1',
'my-template',
'my configuration template',
'mytag=tag-value'
)
)
make_api_call_mock.assert_called_once_with(
'elasticbeanstalk',
'create_configuration_template',
ApplicationName='my-application',
Description='my configuration template',
TemplateName='my-template',
TemplateSpecification={
'TemplateSource': {
'EnvironmentName': 'environment-1'
}
},
Tags='mytag=tag-value'
)
@mock.patch('ebcli.lib.elasticbeanstalk.aws.make_api_call')
def test_delete_configuration_template(
self,
make_api_call_mock
):
elasticbeanstalk.delete_configuration_template(
'my-application',
'environment-1'
)
make_api_call_mock.assert_called_once_with(
'elasticbeanstalk',
'delete_configuration_template',
ApplicationName='my-application',
TemplateName='environment-1'
)
@mock.patch('ebcli.lib.elasticbeanstalk.aws.make_api_call')
def test_validate_template__template_is_valid(
self,
make_api_call_mock
):
make_api_call_mock.return_value = mock_responses.VALIDATE_CONFIGURATION_SETTINGS_RESPONSE__VALID
self.assertEqual(
mock_responses.VALIDATE_CONFIGURATION_SETTINGS_RESPONSE__VALID,
elasticbeanstalk.validate_template(
'my-application',
'my-template',
'64bit Debian jessie v2.10.0 running Python 3.4 (Preconfigured - Docker)'
)
)
make_api_call_mock.assert_called_once_with(
'elasticbeanstalk',
'validate_configuration_settings',
ApplicationName='my-application',
TemplateName='my-template',
TemplateSpecification={
'TemplateSource': {
'SolutionStackName': '64bit Debian jessie v2.10.0 running Python 3.4 (Preconfigured - Docker)'
}
}
)
@mock.patch('ebcli.lib.elasticbeanstalk.aws.make_api_call')
def test_validate_template__template_is_invalid(
self,
make_api_call_mock
):
make_api_call_mock.return_value = mock_responses.VALIDATE_CONFIGURATION_SETTINGS_RESPONSE__INVALID
self.assertEqual(
mock_responses.VALIDATE_CONFIGURATION_SETTINGS_RESPONSE__INVALID,
elasticbeanstalk.validate_template(
'my-application',
'my-template',
'64bit Debian jessie v2.10.0 running Python 3.4 (Preconfigured - Docker)'
)
)
make_api_call_mock.assert_called_once_with(
'elasticbeanstalk',
'validate_configuration_settings',
ApplicationName='my-application',
TemplateName='my-template',
TemplateSpecification={
'TemplateSource': {
'SolutionStackName': '64bit Debian jessie v2.10.0 running Python 3.4 (Preconfigured - Docker)'
}
}
)
@mock.patch('ebcli.lib.elasticbeanstalk.aws.make_api_call')
def test_validate_template__platform_arn_is_specified(
self,
make_api_call_mock
):
make_api_call_mock.return_value = mock_responses.VALIDATE_CONFIGURATION_SETTINGS_RESPONSE__VALID
self.assertEqual(
mock_responses.VALIDATE_CONFIGURATION_SETTINGS_RESPONSE__VALID,
elasticbeanstalk.validate_template(
'my-application',
'my-template',
'arn:aws:elasticbeanstalk:us-west-2::platform/PHP 7.1 running on 64bit Amazon Linux/2.6.5'
)
)
make_api_call_mock.assert_called_once_with(
'elasticbeanstalk',
'validate_configuration_settings',
ApplicationName='my-application',
TemplateName='my-template',
TemplateSpecification={
'TemplateSource': {
'PlatformArn': 'arn:aws:elasticbeanstalk:us-west-2::platform/PHP 7.1 running on 64bit Amazon Linux/2.6.5'
}
}
)
@mock.patch('ebcli.lib.elasticbeanstalk.aws.make_api_call')
def test_describe_template(
self,
make_api_call_mock
):
make_api_call_mock.return_value = mock_responses.DESCRIBE_CONFIGURATION_SETTINGS_RESPONSE
self.assertEqual(
mock_responses.DESCRIBE_CONFIGURATION_SETTINGS_RESPONSE['ConfigurationSettings'][0],
elasticbeanstalk.describe_template('my-application', 'my-template')
)
make_api_call_mock.assert_called_once_with(
'elasticbeanstalk',
'describe_configuration_settings',
ApplicationName='my-application',
TemplateName='my-template'
)
@mock.patch('ebcli.lib.elasticbeanstalk.aws.make_api_call')
def test_get_environment_health(
self,
make_api_call_mock
):
make_api_call_mock.return_value = mock_responses.DESCRIBE_ENVIRONMENT_HEALTH_RESPONSE
self.assertEqual(
mock_responses.DESCRIBE_ENVIRONMENT_HEALTH_RESPONSE,
elasticbeanstalk.get_environment_health('environment-1')
)
make_api_call_mock.assert_called_once_with(
'elasticbeanstalk',
'describe_environment_health',
AttributeNames=[
'HealthStatus',
'Status',
'Color',
'Causes',
'ApplicationMetrics',
'InstancesHealth',
'RefreshedAt'
],
EnvironmentName='environment-1'
)
@mock.patch('ebcli.lib.elasticbeanstalk.aws.make_api_call')
def test_get_environment_health__attributes_are_specified(
self,
make_api_call_mock
):
make_api_call_mock.return_value = {
'HealthStatus': 'Ok'
}
self.assertEqual(
{
'HealthStatus': 'Ok'
},
elasticbeanstalk.get_environment_health('environment-1', attributes=['HealthStatus'])
)
make_api_call_mock.assert_called_once_with(
'elasticbeanstalk',
'describe_environment_health',
AttributeNames=[
'HealthStatus'
],
EnvironmentName='environment-1'
)
@mock.patch('ebcli.lib.elasticbeanstalk.aws.make_api_call')
def test_get_instance_health(
self,
make_api_call_mock
):
make_api_call_mock.return_value = mock_responses.DESCRIBE_INSTANCES_HEALTH_RESPONSE
self.assertEqual(
mock_responses.DESCRIBE_INSTANCES_HEALTH_RESPONSE,
elasticbeanstalk.get_instance_health(
'environment-1',
next_token='1234123412341234'
)
)
make_api_call_mock.assert_called_once_with(
'elasticbeanstalk',
'describe_instances_health',
AttributeNames=[
'HealthStatus',
'Color',
'Causes',
'ApplicationMetrics',
'RefreshedAt',
'LaunchedAt',
'System',
'Deployment',
'AvailabilityZone',
'InstanceType'
],
EnvironmentName='environment-1',
NextToken='1234123412341234'
)
@mock.patch('ebcli.lib.elasticbeanstalk.aws.make_api_call')
def test_compose_environments(
self,
make_api_call_mock
):
elasticbeanstalk.compose_environments(
'my-application',
[
'v1',
'v2'
],
group_name='dev'
)
make_api_call_mock.assert_called_once_with(
'elasticbeanstalk',
'compose_environments',
ApplicationName='my-application',
GroupName='dev',
VersionLabels=['v1', 'v2']
)
@mock.patch('ebcli.lib.elasticbeanstalk.aws.make_api_call')
def test_rebuild_environments(
self,
make_api_call_mock
):
elasticbeanstalk.rebuild_environment(
env_id='e-1234123434',
env_name='environment-1'
)
make_api_call_mock.assert_called_once_with(
'elasticbeanstalk',
'rebuild_environment',
EnvironmentId='e-1234123434',
EnvironmentName='environment-1'
)
@mock.patch('ebcli.lib.elasticbeanstalk.aws.make_api_call')
def test_get_environment_arn(
self,
make_api_call_mock
):
make_api_call_mock.return_value = mock_responses.DESCRIBE_ENVIRONMENTS_RESPONSE__SINGLE_ENVIRONMENT
self.assertEqual(
'arn:aws:elasticbeanstalk:us-west-2:123123123123:environment/my-application/environment-1',
elasticbeanstalk.get_environment_arn('environment-1')
)
make_api_call_mock.assert_called_once_with(
'elasticbeanstalk',
'describe_environments',
EnvironmentNames=['environment-1'],
IncludeDeleted=False
)
@mock.patch('ebcli.lib.elasticbeanstalk.aws.make_api_call')
def test_list_tags_for_resource(
self,
make_api_call_mock
):
make_api_call_mock.side_effect = [
mock_responses.LIST_TAGS_FOR_RESOURCE_RESPONSE,
]
self.assertEqual(
[
{
'Key': 'Name',
'Value': 'environment-1'
},
{
'Key': 'elasticbeanstalk:environment-id',
'Value': 'e-cnpdgh26cm'
},
{
'Key': 'elasticbeanstalk:environment-name',
'Value': 'environment-1'
}
],
elasticbeanstalk.list_tags_for_resource('arn:aws:elasticbeanstalk:us-west-2:123123123123:environment/my-application/environment-1')
)
make_api_call_mock.assert_has_calls(
[
mock.call(
'elasticbeanstalk',
'list_tags_for_resource',
ResourceArn='arn:aws:elasticbeanstalk:us-west-2:123123123123:environment/my-application/environment-1'
)
]
)
@mock.patch('ebcli.lib.elasticbeanstalk.aws.make_api_call')
def test_update_tags_for_resource(
self,
make_api_call_mock
):
elasticbeanstalk.update_tags_for_resource(
'arn:aws:elasticbeanstalk:us-west-2:123123123123:environment/my-application/environment-1',
tags_to_add='KEY1=VALUE1,KEY2=VALUE2',
tags_to_remove='KEY3'
)
make_api_call_mock.assert_has_calls(
[
mock.call(
'elasticbeanstalk',
'update_tags_for_resource',
ResourceArn='arn:aws:elasticbeanstalk:us-west-2:123123123123:environment/my-application/environment-1',
TagsToAdd='KEY1=VALUE1,KEY2=VALUE2',
TagsToRemove='KEY3'
)
]
)
@mock.patch('ebcli.lib.elasticbeanstalk._make_api_call')
def test_list_platform_branches__no_filters(
self,
_make_api_call_mock
):
api_response = {
'PlatformBranchSummaryList': [
{'PlatformName': 'Python', 'BranchName': 'Python 3.6 running on 64bit Amazon Linux', 'LifecycleState': 'Supported'}
],
}
_make_api_call_mock.return_value = api_response
expected_result = api_response['PlatformBranchSummaryList']
result = elasticbeanstalk.list_platform_branches()
_make_api_call_mock.assert_called_once_with('list_platform_branches')
self.assertEqual(result, expected_result)
@mock.patch('ebcli.lib.elasticbeanstalk._make_api_call')
def test_list_platform_branches__with_filters(
self,
_make_api_call_mock
):
api_response = {
'PlatformBranchSummaryList': [
{'PlatformName': 'Python', 'BranchName': 'Python 3.6 running on 64bit Amazon Linux', 'LifecycleState': 'Supported'}
],
}
filters = [
{'Attribute': 'PlatformName', 'Operator': '=', 'Values': ['Python']}
]
_make_api_call_mock.return_value = api_response
expected_result = api_response['PlatformBranchSummaryList']
result = elasticbeanstalk.list_platform_branches(filters=filters)
_make_api_call_mock.assert_called_once_with('list_platform_branches', Filters=filters)
self.assertEqual(result, expected_result)
@mock.patch('ebcli.lib.elasticbeanstalk._make_api_call')
def test_list_platform_branches__pagination(
self,
_make_api_call_mock
):
api_responses = [
{
'PlatformBranchSummaryList': [
{'PlatformName': 'Python', 'BranchName': 'Python 3.6 running on 64bit Amazon Linux', 'LifecycleState': 'Supported'},
{'PlatformName': 'Python', 'BranchName': 'Python 3.4 running on 64bit Amazon Linux', 'LifecycleState': 'Deprecated'}
],
'NextToken': 's91T7CbPGkFOIOXjkrEMYzEjMSNxNDY5aTY='
},
{
'PlatformBranchSummaryList': [
{'PlatformName': 'Python', 'BranchName': 'Python 2.7 running on 64bit Amazon Linux', 'LifecycleState': 'Deprecated'},
{'PlatformName': 'Python', 'BranchName': 'Python 2.7 running on 32bit Amazon Linux', 'LifecycleState': 'Retired'}
],
'NextToken': 'TL+36+iG/RNeofj8Jle/szIjMSNxNDY5anY='
},
{
'PlatformBranchSummaryList': [
{'PlatformName': 'Python', 'BranchName': 'Python 2.6 running on 32bit Amazon Linux', 'LifecycleState': 'Retired'},
{'PlatformName': 'Python', 'BranchName': 'Python 2.6 running on 64bit Amazon Linux', 'LifecycleState': 'Deprecated'}
],
}
]
expected_result = api_responses[0]['PlatformBranchSummaryList']\
+ api_responses[1]['PlatformBranchSummaryList']\
+ api_responses[2]['PlatformBranchSummaryList']
_make_api_call_mock.side_effect = api_responses
result = elasticbeanstalk.list_platform_branches()
_make_api_call_mock.assert_has_calls(
[
mock.call('list_platform_branches'),
mock.call('list_platform_branches', NextToken=api_responses[0]['NextToken']),
mock.call('list_platform_branches', NextToken=api_responses[1]['NextToken']),
],
any_order=False
)
self.assertEqual(result, expected_result)
@mock.patch('ebcli.lib.elasticbeanstalk.describe_configuration_options')
def test_list_application_load_balancers__no_vpc(
self,
describe_configuration_options_mock
):
PlatformArn = "arn:aws:elasticbeanstalk:us-east-1::platform/Python 3.6 running on 64bit Amazon Linux/2.9.12"
vpc = None
api_response = {
'SolutionStackName': '64bit Amazon Linux 2018.03 v2.9.12 running Python 3.6',
'PlatformArn': 'arn:aws:elasticbeanstalk:us-east-1::platform/Python 3.6 running on 64bit Amazon Linux/2.9.12',
'Tier': {'Name': 'WebServer', 'Type': 'Standard', 'Version': '1.0'},
'Options': [
{
'Namespace': 'aws:elbv2:loadbalancer',
'Name': 'SharedLoadBalancer',
'ChangeSeverity': 'Unknown',
'UserDefined': False,
'ValueType': 'Scalar',
'ValueOptions': [
'arn:aws:elasticloadbalancing:us-east-1:881508045124:loadbalancer/app/alb-1/72074d479748b405',
'arn:aws:elasticloadbalancing:us-east-1:881508045124:loadbalancer/app/alb-2/5a957e362e1339a9',
'arn:aws:elasticloadbalancing:us-east-1:881508045124:loadbalancer/app/alb-3/3dfc9ab663f79319',
'arn:aws:elasticloadbalancing:us-east-1:881508045124:loadbalancer/app/alb-4/5791574adb5d39c4'],
'Description': 'The arn of an existing load balancer to use for environment Load Balancer.'
},
{
'Namespace': 'aws:elbv2:listener',
'Name': 'Rules',
'ChangeSeverity': 'Unknown',
'UserDefined': False,
'ValueType': 'List',
'Description': 'List of rules to apply for the listener. These rules are defined in aws:elbv2:listenerrule namespace.'
}
],
'ResponseMetadata': {'RequestId': '0538eaa9-5dc2-4976-81e0-c485da2f9234', 'HTTPStatusCode': 200, 'date': 'Tue, 07 Jul 2020 18:52:17 GMT', 'RetryAttempts': 0}
}
kwargs = {
'OptionSettings': [
{'Namespace': 'aws:elasticbeanstalk:environment', 'OptionName': 'LoadBalancerType', 'Value': 'application'},
{'Namespace': 'aws:elasticbeanstalk:environment', 'OptionName': 'LoadBalancerIsShared', 'Value': 'true'}
],
'Options': [
{'Namespace': 'aws:elbv2:loadbalancer', 'OptionName': 'SharedLoadBalancer'}
],
'PlatformArn': 'arn:aws:elasticbeanstalk:us-east-1::platform/Python 3.6 running on 64bit Amazon Linux/2.9.12'}
describe_configuration_options_mock.return_value = api_response
expected_result = api_response['Options'][0]['ValueOptions']
result = elasticbeanstalk.list_application_load_balancers(PlatformArn, vpc)
describe_configuration_options_mock.assert_called_once_with(**kwargs)
self.assertEqual(
expected_result, result
)
@mock.patch('ebcli.lib.elasticbeanstalk.describe_configuration_options')
def test_list_application_load_balancers__with_vpc(
self,
describe_configuration_options_mock
):
PlatformArn = "arn:aws:elasticbeanstalk:us-east-1::platform/Python 3.6 running on 64bit Amazon Linux/2.9.12"
vpc = {'id': 'vpc-00252f9da55164b47', 'ec2subnets': 'subnet-018b695a5badc7ec7,subnet-07ce18248accbe5c9'}
api_response = {
'SolutionStackName': '64bit Amazon Linux 2018.03 v2.9.12 running Python 3.6',
'PlatformArn': 'arn:aws:elasticbeanstalk:us-east-1::platform/Python 3.6 running on 64bit Amazon Linux/2.9.12',
'Tier': {'Name': 'WebServer', 'Type': 'Standard', 'Version': '1.0'},
'Options': [
{
'Namespace': 'aws:elbv2:loadbalancer',
'Name': 'SharedLoadBalancer',
'ChangeSeverity': 'Unknown',
'UserDefined': False,
'ValueType': 'Scalar',
'ValueOptions': [
'arn:aws:elasticloadbalancing:us-east-1:881508045124:loadbalancer/app/alb-vpc1/a2f730eefb8aab29',
'arn:aws:elasticloadbalancing:us-east-1:881508045124:loadbalancer/app/alb-vpc2/43ca57d4b9462ba6'],
'Description': 'The arn of an existing load balancer to use for environment Load Balancer.'
},
{
'Namespace': 'aws:elbv2:listener',
'Name': 'Rules',
'ChangeSeverity': 'Unknown',
'UserDefined': False,
'ValueType': 'List',
'Description': 'List of rules to apply for the listener. These rules are defined in aws:elbv2:listenerrule namespace.'
}
],
'ResponseMetadata': {'RequestId': '6a823882-f6af-46b4-8fa3-ccc8004766c8', 'HTTPStatusCode': 200, 'date': 'Tue, 07 Jul 2020 20:26:26 GMT', 'RetryAttempts': 0}
}
kwargs = {
'OptionSettings': [
{'Namespace': 'aws:elasticbeanstalk:environment', 'OptionName': 'LoadBalancerType', 'Value': 'application'},
{'Namespace': 'aws:elasticbeanstalk:environment', 'OptionName': 'LoadBalancerIsShared', 'Value': 'true'},
{'Namespace': 'aws:ec2:vpc', 'OptionName': 'VPCId', 'Value': 'vpc-00252f9da55164b47'},
{'Namespace': 'aws:ec2:vpc', 'OptionName': 'Subnets', 'Value': 'subnet-018b695a5badc7ec7,subnet-07ce18248accbe5c9'}
],
'Options': [
{'Namespace': 'aws:elbv2:loadbalancer', 'OptionName': 'SharedLoadBalancer'}
],
'PlatformArn': 'arn:aws:elasticbeanstalk:us-east-1::platform/Python 3.6 running on 64bit Amazon Linux/2.9.12'}
describe_configuration_options_mock.return_value = api_response
expected_result = api_response['Options'][0]['ValueOptions']
result = elasticbeanstalk.list_application_load_balancers(PlatformArn, vpc)
describe_configuration_options_mock.assert_called_once_with(**kwargs)
self.assertEqual(
expected_result, result
)
|
b705c2a687cade4f4d552192ec4c00f275ef5cc0
|
e7f2a8c466c14b9821e59740ed0407107e1254a4
|
/rasa/shared/importers/rasa.py
|
53ba35b5ee80e865a79bff460c315d21ac3f6b64
|
[
"LicenseRef-scancode-generic-cla",
"Apache-2.0",
"MIT"
] |
permissive
|
RasaHQ/rasa
|
4a31134308a9a4d8824fe7faef02526accdd0f19
|
50857610bdf0c26dc61f3203a6cbb4bcf193768c
|
refs/heads/main
| 2023-08-28T01:53:56.981600
| 2023-08-25T10:20:49
| 2023-08-25T10:20:49
| 70,908,208
| 13,167
| 3,739
|
Apache-2.0
| 2023-09-14T09:54:40
| 2016-10-14T12:27:49
|
Python
|
UTF-8
|
Python
| false
| false
| 3,388
|
py
|
rasa.py
|
import logging
import os
from typing import Dict, List, Optional, Text, Union
import rasa.shared.data
import rasa.shared.utils.common
import rasa.shared.utils.io
from rasa.shared.core.training_data.structures import StoryGraph
from rasa.shared.importers import utils
from rasa.shared.importers.importer import TrainingDataImporter
from rasa.shared.nlu.training_data.training_data import TrainingData
from rasa.shared.core.domain import InvalidDomain, Domain
from rasa.shared.core.training_data.story_reader.yaml_story_reader import (
YAMLStoryReader,
)
logger = logging.getLogger(__name__)
class RasaFileImporter(TrainingDataImporter):
"""Default `TrainingFileImporter` implementation."""
def __init__(
self,
config_file: Optional[Text] = None,
domain_path: Optional[Text] = None,
training_data_paths: Optional[Union[List[Text], Text]] = None,
):
self._domain_path = domain_path
self._nlu_files = rasa.shared.data.get_data_files(
training_data_paths, rasa.shared.data.is_nlu_file
)
self._story_files = rasa.shared.data.get_data_files(
training_data_paths, YAMLStoryReader.is_stories_file
)
self._conversation_test_files = rasa.shared.data.get_data_files(
training_data_paths, YAMLStoryReader.is_test_stories_file
)
self.config_file = config_file
def get_config(self) -> Dict:
"""Retrieves model config (see parent class for full docstring)."""
if not self.config_file or not os.path.exists(self.config_file):
logger.debug("No configuration file was provided to the RasaFileImporter.")
return {}
config = rasa.shared.utils.io.read_model_configuration(self.config_file)
return config
@rasa.shared.utils.common.cached_method
def get_config_file_for_auto_config(self) -> Optional[Text]:
"""Returns config file path for auto-config only if there is a single one."""
return self.config_file
def get_stories(self, exclusion_percentage: Optional[int] = None) -> StoryGraph:
"""Retrieves training stories / rules (see parent class for full docstring)."""
return utils.story_graph_from_paths(
self._story_files, self.get_domain(), exclusion_percentage
)
def get_conversation_tests(self) -> StoryGraph:
"""Retrieves conversation test stories (see parent class for full docstring)."""
return utils.story_graph_from_paths(
self._conversation_test_files, self.get_domain()
)
def get_nlu_data(self, language: Optional[Text] = "en") -> TrainingData:
"""Retrieves NLU training data (see parent class for full docstring)."""
return utils.training_data_from_paths(self._nlu_files, language)
def get_domain(self) -> Domain:
"""Retrieves model domain (see parent class for full docstring)."""
domain = Domain.empty()
# If domain path is None, return an empty domain
if not self._domain_path:
return domain
try:
domain = Domain.load(self._domain_path)
except InvalidDomain as e:
rasa.shared.utils.io.raise_warning(
f"Loading domain from '{self._domain_path}' failed. Using "
f"empty domain. Error: '{e}'"
)
return domain
|
0836644cc9cc786f39de4ff25645ba60fa8feb48
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/_sweep/search_space/uniform.py
|
2eb1d98f35161c6fcd7a14036861b34501bc372d
|
[
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-python-cwi",
"LGPL-2.1-or-later",
"PSF-2.0",
"LGPL-2.0-or-later",
"GPL-3.0-or-later",
"GPL-1.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"Python-2.0",
"MPL-2.0",
"LicenseRef-scancode-other-copyleft",
"HPND",
"ODbL-1.0",
"GPL-3.0-only",
"ZPL-2.1",
"MIT",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012
| 2023-09-06T01:08:06
| 2023-09-06T01:08:06
| 4,127,088
| 4,046
| 2,755
|
MIT
| 2023-09-14T21:48:49
| 2012-04-24T16:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 2,784
|
py
|
uniform.py
|
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
# pylint: disable=unused-argument
from marshmallow import ValidationError, fields, post_load, pre_dump
from azure.ai.ml._schema._sweep._constants import BASE_ERROR_MESSAGE
from azure.ai.ml._schema.core.fields import DumpableIntegerField, StringTransformedEnum, UnionField
from azure.ai.ml._schema.core.schema import PatchedSchemaMeta
from azure.ai.ml.constants._common import TYPE
from azure.ai.ml.constants._job.sweep import SearchSpace
class UniformSchema(metaclass=PatchedSchemaMeta):
type = StringTransformedEnum(required=True, allowed_values=SearchSpace.UNIFORM_LOGUNIFORM)
min_value = UnionField([DumpableIntegerField(strict=True), fields.Float()], required=True)
max_value = UnionField([DumpableIntegerField(strict=True), fields.Float()], required=True)
@pre_dump
def predump(self, data, **kwargs):
from azure.ai.ml.sweep import LogUniform, Uniform
if not isinstance(data, (Uniform, LogUniform)):
raise ValidationError("Cannot dump non-Uniform or non-LogUniform object into UniformSchema")
if data.type.lower() not in SearchSpace.UNIFORM_LOGUNIFORM:
raise ValidationError(BASE_ERROR_MESSAGE + str(SearchSpace.UNIFORM_LOGUNIFORM))
return data
@post_load
def make(self, data, **kwargs):
from azure.ai.ml.sweep import LogUniform, Uniform
return Uniform(**data) if data[TYPE] == SearchSpace.UNIFORM else LogUniform(**data)
class QUniformSchema(metaclass=PatchedSchemaMeta):
type = StringTransformedEnum(required=True, allowed_values=SearchSpace.QUNIFORM_QLOGUNIFORM)
min_value = UnionField([DumpableIntegerField(strict=True), fields.Float()], required=True)
max_value = UnionField([DumpableIntegerField(strict=True), fields.Float()], required=True)
q = UnionField([DumpableIntegerField(strict=True), fields.Float()], required=True)
@post_load
def make(self, data, **kwargs):
from azure.ai.ml.sweep import QLogUniform, QUniform
return QUniform(**data) if data[TYPE] == SearchSpace.QUNIFORM else QLogUniform(**data)
@pre_dump
def predump(self, data, **kwargs):
from azure.ai.ml.sweep import QLogUniform, QUniform
if not isinstance(data, (QUniform, QLogUniform)):
raise ValidationError("Cannot dump non-QUniform or non-QLogUniform object into UniformSchema")
return data
class IntegerQUniformSchema(QUniformSchema):
min_value = DumpableIntegerField(strict=True, required=True)
max_value = DumpableIntegerField(strict=True, required=True)
q = DumpableIntegerField(strict=True, required=True)
|
c3f030f6fa179d017e345138950819b1ee7ebe41
|
6923f79f1eaaba0ab28b25337ba6cb56be97d32d
|
/Learning_TF_Hope/06__word_embeddings_and_rnns/GRU_pretrained_GloVe.py
|
9b2ecb0cd35688e5d7ef7e7a5d185ca2fc249759
|
[
"MIT"
] |
permissive
|
burakbayramli/books
|
9fe7ba0cabf06e113eb125d62fe16d4946f4a4f0
|
5e9a0e03aa7ddf5e5ddf89943ccc68d94b539e95
|
refs/heads/master
| 2023-08-17T05:31:08.885134
| 2023-08-14T10:05:37
| 2023-08-14T10:05:37
| 72,460,321
| 223
| 174
| null | 2022-10-24T12:15:06
| 2016-10-31T17:24:00
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 8,578
|
py
|
GRU_pretrained_GloVe.py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 1 12:18:27 2017
@author: tomhope
"""
import zipfile
import numpy as np
import tensorflow as tf
path_to_glove = "c:\\tmp\\data\\glove.840B.300d.zip"
PRE_TRAINED = True
GLOVE_SIZE = 300
batch_size=128
embedding_dimension = 64
num_classes = 2
hidden_layer_size = 32
times_steps = 6
digit_to_word_map = {1:"One",2:"Two", 3:"Three", 4:"Four", 5:"Five",
6:"Six",7:"Seven",8:"Eight",9:"Nine"}
digit_to_word_map[0]="PAD_TOKEN"
even_sentences = []
odd_sentences = []
seqlens = []
for i in range(10000):
rand_seq_len = np.random.choice(range(3,7))
seqlens.append(rand_seq_len)
rand_odd_ints = np.random.choice(range(1,10,2),
rand_seq_len)
rand_even_ints = np.random.choice(range(2,10,2),
rand_seq_len)
if rand_seq_len<6:
rand_odd_ints = np.append(rand_odd_ints,
[0]*(6-rand_seq_len))
rand_even_ints = np.append(rand_even_ints,
[0]*(6-rand_seq_len))
even_sentences.append(" ".join([digit_to_word_map[r] for
r in rand_odd_ints]))
odd_sentences.append(" ".join([digit_to_word_map[r] for
r in rand_even_ints]))
data = even_sentences+odd_sentences
#same seq lengths for even, odd sentences
seqlens*=2
labels = [1]*10000 + [0]*10000
for i in range(len(labels)):
label = labels[i]
one_hot_encoding = [0]*2
one_hot_encoding[label] = 1
labels[i] = one_hot_encoding
word2index_map ={}
index=0
for sent in data:
for word in sent.split():
if word not in word2index_map:
word2index_map[word] = index
index+=1
index2word_map = {index: word for word, index in word2index_map.items()}
vocabulary_size = len(index2word_map)
def get_glove(path_to_glove,word2index_map):
embedding_weights = {}
count_all_words = 0
with zipfile.ZipFile(path_to_glove) as z:
with z.open("glove.840B.300d.txt") as f:
for line in f:
vals = line.split()
word = str(vals[0].decode("utf-8"))
if word in word2index_map:
print(word)
count_all_words+=1
coefs = np.asarray(vals[1:], dtype='float32')
coefs/=np.linalg.norm(coefs)
embedding_weights[word] = coefs
if count_all_words==len(word2index_map)-1:
break
return embedding_weights
word2embedding_dict = get_glove(path_to_glove,word2index_map)
embedding_matrix = np.zeros((vocabulary_size ,GLOVE_SIZE))
for word,index in word2index_map.items():
if not word == "PAD_TOKEN":
word_embedding = word2embedding_dict[word]
embedding_matrix[index,:] = word_embedding
data_indices = list(range(len(data)))
np.random.shuffle(data_indices)
data = np.array(data)[data_indices]
labels = np.array(labels)[data_indices]
seqlens = np.array(seqlens)[data_indices]
train_x = data[:10000]
train_y = labels[:10000]
train_seqlens = seqlens[:10000]
test_x = data[10000:]
test_y = labels[10000:]
test_seqlens = seqlens[10000:]
def get_sentence_batch(batch_size,data_x,
data_y,data_seqlens):
instance_indices = list(range(len(data_x)))
np.random.shuffle(instance_indices)
batch = instance_indices[:batch_size]
x = [[word2index_map[word] for word in data_x[i].split()]
for i in batch]
y = [data_y[i] for i in batch]
seqlens = [data_seqlens[i] for i in batch]
return x,y,seqlens
_inputs = tf.placeholder(tf.int32, shape=[batch_size,times_steps])
embedding_placeholder = tf.placeholder(tf.float32, [vocabulary_size,
GLOVE_SIZE])
_labels = tf.placeholder(tf.float32, shape=[batch_size, num_classes])
_seqlens = tf.placeholder(tf.int32, shape=[batch_size])
if PRE_TRAINED:
embeddings = tf.Variable(tf.constant(0.0, shape=[vocabulary_size, GLOVE_SIZE]),
trainable=True)
#if using pre-trained embeddings, assign them to the embeddings variable
embedding_init = embeddings.assign(embedding_placeholder)
embed = tf.nn.embedding_lookup(embeddings, _inputs)
else:
embeddings = tf.Variable(
tf.random_uniform([vocabulary_size,
embedding_dimension],
-1.0, 1.0))
embed = tf.nn.embedding_lookup(embeddings, _inputs)
with tf.name_scope("biGRU"):
with tf.variable_scope('forward'):
gru_fw_cell = tf.contrib.rnn.GRUCell(hidden_layer_size)
gru_fw_cell = tf.contrib.rnn.DropoutWrapper(gru_fw_cell)
with tf.variable_scope('backward'):
gru_bw_cell = tf.contrib.rnn.GRUCell(hidden_layer_size)
gru_bw_cell = tf.contrib.rnn.DropoutWrapper(gru_bw_cell)
outputs, states = tf.nn.bidirectional_dynamic_rnn(cell_fw=gru_fw_cell,
cell_bw=gru_bw_cell,
inputs=embed,
sequence_length=
_seqlens,
dtype=tf.float32,
scope="biGRU")
states = tf.concat(values=states, axis=1)
weights = {
'linear_layer': tf.Variable(tf.truncated_normal([2*hidden_layer_size,
num_classes],
mean=0,stddev=.01))
}
biases = {
'linear_layer':tf.Variable(tf.truncated_normal([num_classes],
mean=0,stddev=.01))
}
#extract the final state and use in a linear layer
final_output = tf.matmul(states,
weights["linear_layer"]) + biases["linear_layer"]
softmax = tf.nn.softmax_cross_entropy_with_logits(logits=final_output,
labels=_labels)
cross_entropy = tf.reduce_mean(softmax)
train_step = tf.train.RMSPropOptimizer(0.001, 0.9).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(_labels,1),
tf.argmax(final_output,1))
accuracy = (tf.reduce_mean(tf.cast(correct_prediction,
tf.float32)))*100
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(embedding_init, feed_dict=
{embedding_placeholder: embedding_matrix})
for step in range(1000):
x_batch, y_batch,seqlen_batch = get_sentence_batch(batch_size,
train_x,train_y,
train_seqlens)
sess.run(train_step,feed_dict={_inputs:x_batch, _labels:y_batch,
_seqlens:seqlen_batch})
if step % 100 == 0:
acc = sess.run(accuracy,feed_dict={_inputs:x_batch,
_labels:y_batch,
_seqlens:seqlen_batch})
print("Accuracy at %d: %.5f" % (step, acc))
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings),
1, keep_dims=True))
normalized_embeddings = embeddings / norm
normalized_embeddings_matrix = sess.run(normalized_embeddings)
for test_batch in range(5):
x_test, y_test,seqlen_test = get_sentence_batch(batch_size,
test_x,test_y,
test_seqlens)
batch_pred,batch_acc = sess.run([tf.argmax(final_output,1),
accuracy],
feed_dict={_inputs:x_test,
_labels:y_test,
_seqlens:seqlen_test})
print("Test batch accuracy %d: %.5f" % (test_batch, batch_acc))
ref_word = normalized_embeddings_matrix[word2index_map["Three"]]
cosine_dists = np.dot(normalized_embeddings_matrix,ref_word)
ff = np.argsort(cosine_dists)[::-1][1:10]
for f in ff:
print(index2word_map[f])
print(cosine_dists[f])
|
be9cfacfb7b8dd4048dc708ce101c2478975bd0d
|
97e78e8f9b1510eae91f00ee6abb06b235f0f5dc
|
/experiments/lidc/_init_paths.py
|
0e03a6c9a89fb78c73ef2655fed5f2485959fdb7
|
[
"Apache-2.0"
] |
permissive
|
M3DV/ACSConv
|
7fc5c01751b005be8cb39dea39258457c7cd3cb4
|
95dc860a77e309f010a3d8be1f675e77c7dfeda4
|
refs/heads/master
| 2023-08-16T23:48:37.571616
| 2023-08-08T01:39:38
| 2023-08-08T01:39:38
| 223,711,776
| 156
| 22
|
Apache-2.0
| 2023-08-08T01:39:39
| 2019-11-24T08:06:30
|
Python
|
UTF-8
|
Python
| false
| false
| 277
|
py
|
_init_paths.py
|
""" setup python path """
import sys
import os
def add_path(path):
if path not in sys.path:
sys.path.insert(-1, path)
add_path(os.path.join(sys.path[0], '../'))
add_path(os.path.join(sys.path[0], '../../'))
print("add code root path (with `mylib`, 'acsconv').")
|
e8a08eb88677c06e36afddb540c7770be29338bc
|
b049a961f100444dde14599bab06a0a4224d869b
|
/sdk/python/pulumi_azure_native/network/v20190901/_inputs.py
|
dde1db84bd3176af8ccd9a3f33de82627c6bf13d
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
pulumi/pulumi-azure-native
|
b390c88beef8381f9a71ab2bed5571e0dd848e65
|
4c499abe17ec6696ce28477dde1157372896364e
|
refs/heads/master
| 2023-08-30T08:19:41.564780
| 2023-08-28T19:29:04
| 2023-08-28T19:29:04
| 172,386,632
| 107
| 29
|
Apache-2.0
| 2023-09-14T13:17:00
| 2019-02-24T20:30:21
|
Python
|
UTF-8
|
Python
| false
| false
| 3,684
|
py
|
_inputs.py
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'ConnectionMonitorDestinationArgs',
'ConnectionMonitorSourceArgs',
]
@pulumi.input_type
class ConnectionMonitorDestinationArgs:
def __init__(__self__, *,
address: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[int]] = None,
resource_id: Optional[pulumi.Input[str]] = None):
"""
Describes the destination of connection monitor.
:param pulumi.Input[str] address: Address of the connection monitor destination (IP or domain name).
:param pulumi.Input[int] port: The destination port used by connection monitor.
:param pulumi.Input[str] resource_id: The ID of the resource used as the destination by connection monitor.
"""
if address is not None:
pulumi.set(__self__, "address", address)
if port is not None:
pulumi.set(__self__, "port", port)
if resource_id is not None:
pulumi.set(__self__, "resource_id", resource_id)
@property
@pulumi.getter
def address(self) -> Optional[pulumi.Input[str]]:
"""
Address of the connection monitor destination (IP or domain name).
"""
return pulumi.get(self, "address")
@address.setter
def address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "address", value)
@property
@pulumi.getter
def port(self) -> Optional[pulumi.Input[int]]:
"""
The destination port used by connection monitor.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "port", value)
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the resource used as the destination by connection monitor.
"""
return pulumi.get(self, "resource_id")
@resource_id.setter
def resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_id", value)
@pulumi.input_type
class ConnectionMonitorSourceArgs:
def __init__(__self__, *,
resource_id: pulumi.Input[str],
port: Optional[pulumi.Input[int]] = None):
"""
Describes the source of connection monitor.
:param pulumi.Input[str] resource_id: The ID of the resource used as the source by connection monitor.
:param pulumi.Input[int] port: The source port used by connection monitor.
"""
pulumi.set(__self__, "resource_id", resource_id)
if port is not None:
pulumi.set(__self__, "port", port)
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> pulumi.Input[str]:
"""
The ID of the resource used as the source by connection monitor.
"""
return pulumi.get(self, "resource_id")
@resource_id.setter
def resource_id(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_id", value)
@property
@pulumi.getter
def port(self) -> Optional[pulumi.Input[int]]:
"""
The source port used by connection monitor.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "port", value)
|
c183746f7f8f0004a2c17299cc8c151958555853
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/TrackingTools/GeomPropagators/python/BeamHaloPropagatorOpposite_cfi.py
|
e68d6a07b90d5d910d84647acf53336191e7b8af
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 393
|
py
|
BeamHaloPropagatorOpposite_cfi.py
|
import FWCore.ParameterSet.Config as cms
BeamHaloPropagatorOpposite = cms.ESProducer("BeamHaloPropagatorESProducer",
ComponentName = cms.string('BeamHaloPropagatorOpposite'),
CrossingTrackerPropagator = cms.string('BeamHaloSHPropagatorOpposite'),
PropagationDirection = cms.string('oppositeToMomentum'),
EndCapTrackerPropagator = cms.string('BeamHaloMPropagatorOpposite')
)
|
036f5fca6870f6365c8b49013f6a30d1c36ffd40
|
8b57ca58722bdd3b9335b10ead2ce578d67a636f
|
/test/controllers_juju/test_controllers_clouds_tui.py
|
c278f35b51a5be3cc68c47ff7c72ce47d006a0db
|
[
"MIT"
] |
permissive
|
conjure-up/conjure-up
|
5257d1937961c13babb83cdb396701ff69aabcc4
|
d2bf8ab8e71ff01321d0e691a8d3e3833a047678
|
refs/heads/master
| 2023-09-03T11:56:43.476146
| 2021-04-12T14:27:43
| 2021-04-12T14:27:43
| 45,847,491
| 473
| 78
|
MIT
| 2021-04-12T14:27:43
| 2015-11-09T15:36:18
|
Python
|
UTF-8
|
Python
| false
| false
| 4,115
|
py
|
test_controllers_clouds_tui.py
|
#!/usr/bin/env python
#
# tests controllers/clouds/tui.py
#
# Copyright 2016 Canonical, Ltd.
import unittest
from unittest.mock import MagicMock, call, patch
from conjureup import events
from conjureup.controllers.juju.clouds.tui import CloudsController
class CloudsTUIRenderTestCase(unittest.TestCase):
def setUp(self):
self.controller = CloudsController()
self.utils_patcher = patch(
'conjureup.controllers.juju.clouds.tui.utils')
self.mock_utils = self.utils_patcher.start()
self.finish_patcher = patch(
'conjureup.controllers.juju.clouds.tui.CloudsController.finish')
self.mock_finish = self.finish_patcher.start()
self.app_patcher = patch(
'conjureup.controllers.juju.clouds.tui.app')
self.mock_app = self.app_patcher.start()
self.mock_app.ui = MagicMock(name="app.ui")
self.ev_app_patcher = patch(
'conjureup.events.app', self.mock_app)
self.ev_app_patcher.start()
self.juju_patcher = patch(
'conjureup.controllers.juju.clouds.tui.juju')
self.mock_juju = self.juju_patcher.start()
events.Shutdown.clear()
def tearDown(self):
self.utils_patcher.stop()
self.finish_patcher.stop()
self.app_patcher.stop()
self.ev_app_patcher.stop()
self.juju_patcher.stop()
def test_render(self):
"Rendering with a known cloud should call finish"
self.controller._check_lxd_compat = MagicMock()
self.mock_app.provider.cloud = "aws"
t = ['aws']
self.mock_juju.get_clouds.return_value.keys.return_value = t
self.controller.render()
assert self.mock_app.loop.create_task.called
class CloudsTUIFinishTestCase(unittest.TestCase):
def setUp(self):
self.controller = CloudsController()
self.controllers_patcher = patch(
'conjureup.controllers.juju.clouds.tui.controllers')
self.mock_controllers = self.controllers_patcher.start()
self.utils_patcher = patch(
'conjureup.controllers.juju.clouds.tui.utils')
self.mock_utils = self.utils_patcher.start()
self.render_patcher = patch(
'conjureup.controllers.juju.clouds.tui.CloudsController.render')
self.mock_render = self.render_patcher.start()
self.app_patcher = patch(
'conjureup.controllers.juju.clouds.tui.app')
self.mock_app = self.app_patcher.start()
self.mock_app.ui = MagicMock(name="app.ui")
self.mock_app.conjurefile = {}
self.ev_app_patcher = patch(
'conjureup.events.app', self.mock_app)
self.ev_app_patcher.start()
self.juju_patcher = patch(
'conjureup.controllers.juju.clouds.tui.juju')
self.mock_juju = self.juju_patcher.start()
self.gcc_patcher = patch(
'conjureup.controllers.juju.clouds.'
'tui.juju.get_controller_in_cloud')
self.mock_gcc = self.gcc_patcher.start()
def tearDown(self):
self.controllers_patcher.stop()
self.utils_patcher.stop()
self.render_patcher.stop()
self.app_patcher.stop()
self.ev_app_patcher.stop()
self.juju_patcher.stop()
self.gcc_patcher.stop()
def test_finish_w_model(self):
"clouds.finish with an existing controller"
self.mock_gcc.return_value = 'testcontroller'
self.mock_app.conjurefile['model'] = 'testmodel'
self.mock_app.provider.cloud = 'cloud'
self.controller.finish()
self.mock_controllers.use.assert_has_calls([
call('credentials'), call().render()])
def test_finish_no_model(self):
"clouds.finish without existing controller"
self.mock_gcc.return_value = None
self.mock_app.conjurefile['cloud'] = 'testcloud'
self.mock_app.conjurefile['controller'] = None
self.mock_app.conjurefile['model'] = None
self.controller.finish()
self.mock_controllers.use.assert_has_calls([
call('credentials'), call().render()])
|
6b84d7cc4170fe8c090428de8d5a91c9a53a3eec
|
152b74ed7d60d75a9d70f6637c107fff9b064ff9
|
/Chapter08/Testing Adversarial-Robustness of Neural Networks/abs_models/nets.py
|
84c17cb1ad43860f2b063adaa4a488e6f37ecb3b
|
[
"MIT"
] |
permissive
|
PacktPublishing/Machine-Learning-for-Cybersecurity-Cookbook
|
1d7a50fb79b5da8c411eda9dc9cface4d0f78125
|
19b9757020cbcb09d9bb4249605fbb9c7322d92b
|
refs/heads/master
| 2023-05-12T08:29:13.569598
| 2023-01-18T10:19:07
| 2023-01-18T10:19:07
| 222,411,828
| 250
| 164
|
MIT
| 2023-05-01T20:11:44
| 2019-11-18T09:33:53
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 10,406
|
py
|
nets.py
|
import torch
import numpy as np
from torch import nn
from abs_models import utils as u
class Architectures(nn.Module):
def __init__(self, input_size=None):
super(Architectures, self).__init__()
self.c = input_size
self.iters = 0
def forward(self, input):
for module in self._modules.values():
input = module(input)
return input
class ConvAE(Architectures):
def __init__(self, EncArgs, DecArgs):
super().__init__(input_size=None)
self.latent = None
self.Encoder = ConvEncoder(**EncArgs)
self.Decoder = ConvDecoder(**DecArgs)
def forward(self, x):
self.latent = self.Encoder.forward(x)
return self.Decoder.forward(self.latent)
class VariationalAutoEncoder(ConvAE):
def __init__(self, EncArgs, DecArgs, latent_act_fct=nn.Tanh):
self.fac = 2
# Decoder must match encoder
EncArgs['feat_maps'][-1] = int(EncArgs['feat_maps'][-1] * self.fac)
self.n_latent = int(EncArgs['feat_maps'][-1])
self.depth = len(EncArgs['feat_maps'])
if 'act_fcts' not in EncArgs.keys():
EncArgs['act_fcts'] = self.depth * [torch.nn.ELU]
EncArgs['act_fcts'][-1] = None
# half amount of layers (half mu, half sigma)
DecArgs['input_sizes'] = [int(EncArgs['feat_maps'][-1] / self.fac)]
super().__init__(EncArgs, DecArgs)
EncArgs['feat_maps'][-1] = int(EncArgs['feat_maps'][-1] / self.fac)
self.std = None
self.mu = None
self.logvar = None
self.latent_act_fct = latent_act_fct()
def reparameterize(self, inp):
self.mu = self.latent_act_fct(
inp[:, :int(self.n_latent / self.fac), :, :])
if self.training:
# std
self.logvar = inp[:, int(self.n_latent / 2):, :, :]
self.std = self.logvar.mul(0.5).exp_()
# reparam of mu
eps = torch.empty_like(self.mu.data).normal_()
self.latent = eps.mul(self.std).add_(self.mu)
else: # test
self.latent = self.mu
self.logvar = inp[:, int(self.n_latent / 2):, :, :]
self.std = self.logvar.mul(0.5).exp_()
def forward(self, x):
prelatent = self.Encoder.forward(x)
self.reparameterize(prelatent)
out = self.Decoder(self.latent)
return out
class ConvEncoder(nn.Sequential):
def __init__(self, feat_maps=(256, 128, 128), input_sizes=(1, 28, 28),
kernels=(5, 3, 3),
BNs=None, act_fcts=None, dilations=None, strides=None):
super().__init__()
self.latent = None
self.depth = len(feat_maps)
if BNs is None:
BNs = self.depth * [True]
BNs[-1] = False
if act_fcts is None:
act_fcts = self.depth * [nn.ELU]
act_fcts[-1] = nn.Tanh
if dilations is None:
dilations = self.depth * [1]
if strides is None:
strides = self.depth * [1]
# check
args = [feat_maps, kernels, dilations, strides]
for i, it in enumerate(args):
if len(it) != self.depth:
raise Exception('wrong length' + str(it) + str(i))
feat_maps = [input_sizes[0]] + list(feat_maps)
# build net
for i, (BN, act_fct, kx, dil, stride) in enumerate(
zip(BNs, act_fcts, kernels, dilations, strides)):
self.add_module('conv_%i' % i, nn.Conv2d(
feat_maps[i], feat_maps[i + 1], kx,
stride=stride, dilation=dil))
if BN:
self.add_module('bn_%i' % i, nn.BatchNorm2d(feat_maps[i + 1]))
if act_fct is not None:
self.add_module('nl_%i' % i, act_fct())
def forward(self, input):
for module in self._modules.values():
input = module(input)
self.latent = input
return input
class ConvDecoder(nn.Sequential):
def __init__(self, feat_maps=(32, 32, 1), input_sizes=(2, 1, 1),
kernels=(3, 3, 3),
BNs=None, act_fcts=None, dilations=None, strides=(1, 1, 1),
conv_fct=None):
super().__init__()
self.depth = len(feat_maps)
if BNs is None:
BNs = self.depth * [True]
BNs[-1] = False
if act_fcts is None:
act_fcts = self.depth * [nn.ELU]
act_fcts[-1] = u.LinearActFct
if dilations is None:
dilations = self.depth * [1]
# check
args = [feat_maps, kernels, dilations, strides]
for i, it in enumerate(args):
if len(it) != self.depth:
raise Exception('wrong length' + str(it) + str(i))
feat_maps = [input_sizes[0]] + list(feat_maps)
if conv_fct is None:
conv_fct = nn.ConvTranspose2d
# build net
for i, (BN, act_fct, kx, dil, stride) in enumerate(
zip(BNs, act_fcts, kernels, dilations, strides)):
self.add_module('conv_%i' % i, conv_fct(
feat_maps[i], feat_maps[i + 1], kx, stride=stride))
if BN:
self.add_module('bn_%i' % i, nn.BatchNorm2d(feat_maps[i + 1]))
self.add_module('nl_%i' % i, act_fct())
# Other models
# ------------
class NN(Architectures):
def __init__(self, feat_maps=(16, 16, 8), input_sizes=(1, 28, 28),
kernels=(5, 3, 3), strides=None,
BNs=None, act_fcts=None):
super().__init__(input_size=input_sizes)
self.depth = len(feat_maps)
ad_feat_maps = [input_sizes[0]] + list(feat_maps)
if strides is None:
strides = self.depth * [1]
if BNs is None:
BNs = self.depth * [True]
BNs[-1] = False
if act_fcts is None:
act_fcts = self.depth * [nn.ELU]
act_fcts[-1] = None
net_builder(self, BNs, act_fcts=act_fcts, feat_maps=ad_feat_maps,
kernel_sizes=kernels, strides=strides)
class View(nn.Module):
def __init__(self, *shape):
super(View, self).__init__()
self.shape = shape
def forward(self, input):
bs = input.size()[0]
return input.view((bs,) + self.shape)
class NearestNeighbor(nn.Module):
def __init__(self, samples, classes, n_classes):
"""
:param samples: 4D: (n_samples, nchannels, nx, ny)
:param classes: 1D: (2, 3, 4, 1, ...) (n_samples)
"""
super().__init__()
self.samples = samples[None, ...] # (1, n_samples, nch, x, y)
self.classes = classes
self.n_classes = n_classes
self.max_bs = 20
def forward(self, input_batch, return_more=True):
assert len(input_batch.size()) == 4
assert input_batch.size()[-1] == self.samples.size()[-1]
assert input_batch.size()[-2] == self.samples.size()[-2]
assert input_batch.size()[-3] == self.samples.size()[-3]
bs = input_batch.shape[0]
input_batch = input_batch[:, None, ...].to(u.dev()) # (bs, 1, nch, x, y)
def calc_dist(input_batch):
dists = u.L2(self.samples, input_batch, axes=[2, 3, 4])
l2, best_ind_classes = torch.min(dists, 1)
return l2, best_ind_classes
l2s, best_ind_classes = u.auto_batch(self.max_bs, calc_dist, input_batch)
# boring bookkeeping
pred = self.get_classes(bs, input_batch, best_ind_classes)
imgs = self.samples[0, best_ind_classes]
# print(pred, imgs, l2s)\
if return_more:
return pred, imgs, l2s
else:
return pred
def get_classes(self, bs, input_batch, best_ind_classes):
pred = torch.zeros(bs, self.n_classes).to(u.dev())
pred[range(bs), self.classes[best_ind_classes]] = 1.
return pred
class NearestNeighborLogits(NearestNeighbor):
def __init__(self, samples, classes, n_classes):
"""
:param samples: 4D: (n_samples, nchannels, nx, ny)
:param classes: 1D: (2, 3, 4, 1, ...) (n_samples)
"""
super().__init__(samples, classes, n_classes=10)
self.samples = None
self.all_samples = samples
self.class_samples = [self.all_samples[self.classes == i] for i in range(n_classes)]
self.max_bs = 40
def forward(self, input_batch, return_more=True):
bs, nch, nx, ny = input_batch.shape
all_imgs, all_l2s = [], []
for i, samples in enumerate(self.class_samples):
self.samples = samples[None, ...]
_, imgs, l2s = super().forward(input_batch, return_more=True)
all_imgs.append(imgs)
all_l2s.append(l2s)
all_l2s = torch.cat(all_l2s).view(self.n_classes, -1).transpose(0, 1)
if return_more:
all_imgs = torch.cat(all_imgs).view(self.n_classes, -1, nch, nx, ny).transpose(0, 1)
return -all_l2s, all_imgs, all_l2s
else:
return -all_l2s
def get_classes(self, *args, **kwargs):
return None
def net_builder(net, BNs, act_fcts, feat_maps, kernel_sizes, strides):
# build net
for i, (BN, act_fct, kx, stride) in enumerate(
zip(BNs, act_fcts, kernel_sizes, strides)):
net.add_module('conv_%i' % i, nn.Conv2d(
feat_maps[i], feat_maps[i + 1], kx, stride=stride))
if BN:
net.add_module('bn_%i' % i, nn.BatchNorm2d(feat_maps[i + 1]))
if act_fct is not None:
net.add_module('nl_%i' % i, act_fct())
def calc_fov(x, kernels, paddings=None, dilations=None, strides=None):
l_x = x
n_layer = len(kernels)
if paddings is None:
paddings = [0.] * n_layer
if dilations is None:
dilations = [1.] * n_layer
if strides is None:
strides = [1.] * n_layer
for p, d, k, s in zip(paddings, dilations, kernels, strides):
l_x = calc_fov_layer(l_x, k, p, d, s)
return l_x
def calc_fov_layer(x, kernel, padding=0, dilation=1, stride=1):
p, d, k, s = padding, dilation, kernel, float(stride)
print('s', s, 'p', p, 'd', d, 'k', k, )
if np.floor((x + 2. * p - d * (k - 1.) - 1.) / s + 1.) != (x + 2. * p - d * (k - 1.) - 1.) / s + 1.: # noqa: E501
print('boundary problems')
return np.floor((x + 2. * p - d * (k - 1.) - 1.) / s + 1.)
|
8d4c4477f989207784ba5a5a010557a84160fed8
|
b6496c1fd252fd71119ef09e78458547a9fc2d28
|
/src/webargs/multidictproxy.py
|
a2771781adad4d9c3432a0a7b10b05a7ddb8458e
|
[
"MIT",
"BSD-3-Clause"
] |
permissive
|
marshmallow-code/webargs
|
65a1488758d3b7cd155eeacfafb95e666c8110d6
|
fafa9414ae9db0af8e1714c022ad228ceb6f1102
|
refs/heads/dev
| 2023-08-29T14:18:33.276655
| 2023-08-29T01:17:01
| 2023-08-29T06:52:40
| 16,899,445
| 673
| 86
|
MIT
| 2023-09-14T01:11:41
| 2014-02-17T02:16:12
|
Python
|
UTF-8
|
Python
| false
| false
| 3,080
|
py
|
multidictproxy.py
|
from collections.abc import Mapping
import typing
import marshmallow as ma
class MultiDictProxy(Mapping):
"""
A proxy object which wraps multidict types along with a matching schema
Whenever a value is looked up, it is checked against the schema to see if
there is a matching field where `is_multiple` is True. If there is, then
the data should be loaded as a list or tuple.
In all other cases, __getitem__ proxies directly to the input multidict.
"""
def __init__(
self,
multidict,
schema: ma.Schema,
known_multi_fields: typing.Tuple[typing.Type, ...] = (
ma.fields.List,
ma.fields.Tuple,
),
):
self.data = multidict
self.known_multi_fields = known_multi_fields
self.multiple_keys = self._collect_multiple_keys(schema)
def _is_multiple(self, field: ma.fields.Field) -> bool:
"""Return whether or not `field` handles repeated/multi-value arguments."""
# fields which set `is_multiple = True/False` will have the value selected,
# otherwise, we check for explicit criteria
is_multiple_attr = getattr(field, "is_multiple", None)
if is_multiple_attr is not None:
return is_multiple_attr
return isinstance(field, self.known_multi_fields)
def _collect_multiple_keys(self, schema: ma.Schema):
result = set()
for name, field in schema.fields.items():
if not self._is_multiple(field):
continue
result.add(field.data_key if field.data_key is not None else name)
return result
def __getitem__(self, key):
val = self.data.get(key, ma.missing)
if val is ma.missing or key not in self.multiple_keys:
return val
if hasattr(self.data, "getlist"):
return self.data.getlist(key)
if hasattr(self.data, "getall"):
return self.data.getall(key)
if isinstance(val, (list, tuple)):
return val
if val is None:
return None
return [val]
def __str__(self): # str(proxy) proxies to str(proxy.data)
return str(self.data)
def __repr__(self):
return "MultiDictProxy(data={!r}, multiple_keys={!r})".format(
self.data, self.multiple_keys
)
def __delitem__(self, key):
del self.data[key]
def __setitem__(self, key, value):
self.data[key] = value
def __getattr__(self, name):
return getattr(self.data, name)
def __iter__(self):
for x in iter(self.data):
# special case for header dicts which produce an iterator of tuples
# instead of an iterator of strings
if isinstance(x, tuple):
yield x[0]
else:
yield x
def __contains__(self, x):
return x in self.data
def __len__(self):
return len(self.data)
def __eq__(self, other):
return self.data == other
def __ne__(self, other):
return self.data != other
|
846ba0f3cc70d5d9bf6f86975d59340f706e04bc
|
6c37d1d2437a08e43b13d621d4a8da4da7135b3a
|
/yt_dlp/extractor/golem.py
|
c33d950191a0edb0cec75fbfabc4df2a69a55ecb
|
[
"Unlicense",
"GPL-2.0-or-later",
"MPL-2.0",
"BSD-3-Clause",
"GPL-3.0-or-later",
"LGPL-2.1-only",
"BSD-2-Clause",
"MIT"
] |
permissive
|
yt-dlp/yt-dlp
|
be040bde10cc40258c879c75ab30215686352824
|
d3d81cc98f554d0adb87d24bfd6fabaaa803944d
|
refs/heads/master
| 2023-09-05T21:15:21.050538
| 2023-09-05T20:35:23
| 2023-09-05T20:35:23
| 307,260,205
| 52,742
| 5,376
|
Unlicense
| 2023-09-14T05:22:08
| 2020-10-26T04:22:55
|
Python
|
UTF-8
|
Python
| false
| false
| 2,116
|
py
|
golem.py
|
from .common import InfoExtractor
from ..compat import (
compat_str,
compat_urlparse,
)
from ..utils import (
determine_ext,
)
class GolemIE(InfoExtractor):
_VALID_URL = r'^https?://video\.golem\.de/.+?/(?P<id>.+?)/'
_TEST = {
'url': 'http://video.golem.de/handy/14095/iphone-6-und-6-plus-test.html',
'md5': 'c1a2c0a3c863319651c7c992c5ee29bf',
'info_dict': {
'id': '14095',
'format_id': 'high',
'ext': 'mp4',
'title': 'iPhone 6 und 6 Plus - Test',
'duration': 300.44,
'filesize': 65309548,
}
}
_PREFIX = 'http://video.golem.de'
def _real_extract(self, url):
video_id = self._match_id(url)
config = self._download_xml(
'https://video.golem.de/xml/{0}.xml'.format(video_id), video_id)
info = {
'id': video_id,
'title': config.findtext('./title', 'golem'),
'duration': self._float(config.findtext('./playtime'), 'duration'),
}
formats = []
for e in config:
url = e.findtext('./url')
if not url:
continue
formats.append({
'format_id': compat_str(e.tag),
'url': compat_urlparse.urljoin(self._PREFIX, url),
'height': self._int(e.get('height'), 'height'),
'width': self._int(e.get('width'), 'width'),
'filesize': self._int(e.findtext('filesize'), 'filesize'),
'ext': determine_ext(e.findtext('./filename')),
})
info['formats'] = formats
thumbnails = []
for e in config.findall('.//teaser'):
url = e.findtext('./url')
if not url:
continue
thumbnails.append({
'url': compat_urlparse.urljoin(self._PREFIX, url),
'width': self._int(e.get('width'), 'thumbnail width'),
'height': self._int(e.get('height'), 'thumbnail height'),
})
info['thumbnails'] = thumbnails
return info
|
ae107f6258d1fbc1faae480eb9f8a68a776cbed1
|
50177ddaa15d7a6c04d5669130f43fec383bf7f4
|
/tests/utils.py
|
744443dad8d3fd5ea7f3fc909faf3f2cd3759269
|
[
"Apache-2.0"
] |
permissive
|
Hipo/drf-extra-fields
|
a1b561ec3c0f7ae9ab5e9c52ef016ac5c163cc5c
|
8c18a7542c8a38fe3dccd1874a74a38410aa3a7f
|
refs/heads/master
| 2023-08-21T18:05:30.125318
| 2023-08-08T18:13:21
| 2023-08-08T18:13:21
| 21,973,580
| 635
| 123
|
Apache-2.0
| 2023-08-08T18:13:22
| 2014-07-18T08:35:45
|
Python
|
UTF-8
|
Python
| false
| false
| 1,545
|
py
|
utils.py
|
# Copied from django-rest-framework/tests/utils.py
from django.core.exceptions import ObjectDoesNotExist
class MockObject:
def __init__(self, **kwargs):
self._kwargs = kwargs
for key, val in kwargs.items():
setattr(self, key, val)
def __str__(self):
kwargs_str = ', '.join([
f'{key}={value}'
for key, value in sorted(self._kwargs.items())
])
return '<MockObject %s>' % kwargs_str
@property
def foo_property(self):
return MockQueryset(
[
MockObject(pk=3, name="foo"),
MockObject(pk=1, name="bar"),
MockObject(pk=2, name="baz"),
]
)
def foo_function(self):
return self.foo_property
@property
def bar_property(self):
return MockObject(pk=3, name="foo")
class MockQueryset:
def __init__(self, iterable):
self.items = iterable
def get(self, **lookup):
for item in self.items:
if all([
getattr(item, key, None) == value
for key, value in lookup.items()
]):
return item
raise ObjectDoesNotExist()
def __iter__(self):
return MockIterator(self.items)
class MockIterator:
def __init__(self, items):
self.items = items
self.index = 0
def __next__(self):
if self.index >= len(self.items):
raise StopIteration
self.index += 1
return self.items[self.index - 1]
|
c7c3a90173aa9cd7ca05bc153897fd0c896a61c5
|
154d2907648416fcecefed51deac0f5d021edabb
|
/tests/file/conftest.py
|
836025ee7673430da71d4ac83dc1174339c5b5d2
|
[
"Apache-2.0"
] |
permissive
|
mopidy/mopidy
|
dbeb376d9c8f85fc0008195a963877b5e0192e18
|
3e8c978d6ffe22fb581a0fec7a47c489ae9bafd4
|
refs/heads/develop
| 2023-08-30T02:09:54.224844
| 2023-08-21T16:29:02
| 2023-08-21T16:29:02
| 447,036
| 7,271
| 783
|
Apache-2.0
| 2023-08-21T16:29:03
| 2009-12-23T14:25:36
|
Python
|
UTF-8
|
Python
| false
| false
| 702
|
py
|
conftest.py
|
from unittest import mock
import pytest
from mopidy.file import backend
from tests import path_to_data_dir
@pytest.fixture()
def media_dirs():
return [str(path_to_data_dir(""))]
@pytest.fixture()
def follow_symlinks():
return False
@pytest.fixture()
def config(media_dirs, follow_symlinks):
return {
"proxy": {},
"file": {
"show_dotfiles": False,
"media_dirs": media_dirs,
"excluded_file_extensions": [".conf"],
"follow_symlinks": follow_symlinks,
"metadata_timeout": 1000,
},
}
@pytest.fixture()
def provider(config):
return backend.FileBackend(audio=mock.Mock(), config=config).library
|
61eef177692a5ca54e9c76a67a1e09028db9a841
|
d668209e9951d249020765c011a836f193004c01
|
/tools/pnnx/tests/test_torch_gt.py
|
b88c7dec44caf719e9be414694d70cb5eb574239
|
[
"BSD-3-Clause",
"Zlib",
"BSD-2-Clause"
] |
permissive
|
Tencent/ncnn
|
d8371746c00439304c279041647362a723330a79
|
14b000d2b739bd0f169a9ccfeb042da06fa0a84a
|
refs/heads/master
| 2023-08-31T14:04:36.635201
| 2023-08-31T04:19:23
| 2023-08-31T04:19:23
| 95,879,426
| 18,818
| 4,491
|
NOASSERTION
| 2023-09-14T15:44:56
| 2017-06-30T10:55:37
|
C++
|
UTF-8
|
Python
| false
| false
| 1,681
|
py
|
test_torch_gt.py
|
# Tencent is pleased to support the open source community by making ncnn available.
#
# Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
def forward(self, x, y, z):
out0 = torch.gt(x, y)
out1 = torch.gt(y, y)
out2 = torch.gt(z, 1)
return out0, out1, out2
def test():
net = Model()
net.eval()
torch.manual_seed(0)
x = torch.rand(3, 16)
y = torch.rand(3, 16)
z = torch.rand(5, 9, 3)
a0, a1, a2 = net(x, y, z)
# export torchscript
mod = torch.jit.trace(net, (x, y, z))
mod.save("test_torch_gt.pt")
# torchscript to pnnx
import os
os.system("../src/pnnx test_torch_gt.pt inputshape=[3,16],[3,16],[5,9,3]")
# pnnx inference
import test_torch_gt_pnnx
b0, b1, b2 = test_torch_gt_pnnx.test_inference()
return torch.equal(a0, b0) and torch.equal(a1, b1) and torch.equal(a2, b2)
if __name__ == "__main__":
if test():
exit(0)
else:
exit(1)
|
ad6380fa3ddc96af23f1938bc6b644d1d645edb7
|
9c87c7ddaf5011cc475ec6f4343cc1c7ff290b33
|
/tests/example/test_regular_examples.py
|
effc47a6dd0a5031a96601a6a43e62cdd6501883
|
[
"MIT"
] |
permissive
|
earthobservations/wetterdienst
|
c74d80bbb4ce178f3e42936ca7364f9bee66d83b
|
448fbd56b67978cf8f4215dedc02a11b89f66b01
|
refs/heads/main
| 2023-08-08T12:02:45.115090
| 2023-07-30T16:13:41
| 2023-07-30T16:37:09
| 160,953,150
| 283
| 42
|
MIT
| 2023-09-10T22:36:33
| 2018-12-08T15:39:42
|
Python
|
UTF-8
|
Python
| false
| false
| 2,025
|
py
|
test_regular_examples.py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2018-2021, earthobservations developers.
# Distributed under the MIT License. See LICENSE for more info.
from pathlib import Path
import pytest
from tests.conftest import ENSURE_ECCODES_PDBUFR, IS_CI, IS_LINUX
HERE = Path(__name__).parent.absolute()
EXAMPLES_DIR = HERE.parent.parent / "example"
@pytest.mark.cflake
def test_regular_examples():
from example import (
dwd_describe_fields,
mosmix_forecasts,
observations_sql,
observations_stations,
)
assert dwd_describe_fields.main() is None
assert mosmix_forecasts.main() is None
assert observations_sql.main() is None
assert observations_stations.main() is None
@pytest.mark.skipif(not ENSURE_ECCODES_PDBUFR, reason="eccodes and pdbufr required")
def test_pdbufr_examples():
from example import dwd_road_weather
assert dwd_road_weather.main() is None
@pytest.mark.skipif(IS_CI and IS_LINUX, reason="stalls on Mac/Windows in CI")
@pytest.mark.cflake
def test_gaussian_example():
from example import observations_station_gaussian_model
assert observations_station_gaussian_model.main() is None
# @pytest.mark.skipif(IS_CI, reason="radar examples not working in CI")
@pytest.mark.cflake
def test_radar_examples():
pytest.importorskip("wradlib")
from example.radar import (
radar_composite_rw,
radar_radolan_cdc,
radar_radolan_rw,
radar_site_dx,
radar_sweep_hdf5,
)
assert radar_composite_rw.main() is None
assert radar_radolan_cdc.main() is None
assert radar_radolan_rw.main() is None
assert radar_site_dx.main() is None
assert radar_sweep_hdf5.main() is None
@pytest.mark.skipif(IS_CI, reason="radar scans cause segfault in ci")
@pytest.mark.cflake
def test_radar_examples_failing():
pytest.importorskip("wradlib")
from example.radar import radar_scan_precip, radar_scan_volume
assert radar_scan_precip.main() is None
assert radar_scan_volume.main() is None
|
1a0f26cb911d8b8c40cce4943f5137597a7a47a4
|
10ddfb2d43a8ec5d47ce35dc0b8acf4fd58dea94
|
/Python/longest-cycle-in-a-graph.py
|
3ed4024bf384b08ce57149651485e38dc4f01f71
|
[
"MIT"
] |
permissive
|
kamyu104/LeetCode-Solutions
|
f54822059405ef4df737d2e9898b024f051fd525
|
4dc4e6642dc92f1983c13564cc0fd99917cab358
|
refs/heads/master
| 2023-09-02T13:48:26.830566
| 2023-08-28T10:11:12
| 2023-08-28T10:11:12
| 152,631,182
| 4,549
| 1,651
|
MIT
| 2023-05-31T06:10:33
| 2018-10-11T17:38:35
|
C++
|
UTF-8
|
Python
| false
| false
| 654
|
py
|
longest-cycle-in-a-graph.py
|
# Time: O(n)
# Space: O(n)
# graph
class Solution(object):
def longestCycle(self, edges):
"""
:type edges: List[int]
:rtype: int
"""
result = -1
lookup = [-1]*len(edges)
idx = 0
for i in xrange(len(edges)):
if lookup[i] != -1:
continue
start = idx
while i != -1:
if lookup[i] != -1:
break
lookup[i] = idx
idx += 1
i = edges[i]
if i != -1 and lookup[i] >= start:
result = max(result, idx-lookup[i])
return result
|
3151912b0e097323e0c4c4403353b0e445ea3248
|
10ddfb2d43a8ec5d47ce35dc0b8acf4fd58dea94
|
/Python/array-of-doubled-pairs.py
|
cb77733a9ff80c53c3b37e3681a5f3fca463267a
|
[
"MIT"
] |
permissive
|
kamyu104/LeetCode-Solutions
|
f54822059405ef4df737d2e9898b024f051fd525
|
4dc4e6642dc92f1983c13564cc0fd99917cab358
|
refs/heads/master
| 2023-09-02T13:48:26.830566
| 2023-08-28T10:11:12
| 2023-08-28T10:11:12
| 152,631,182
| 4,549
| 1,651
|
MIT
| 2023-05-31T06:10:33
| 2018-10-11T17:38:35
|
C++
|
UTF-8
|
Python
| false
| false
| 392
|
py
|
array-of-doubled-pairs.py
|
# Time: O(n + klogk)
# Space: O(k)
import collections
class Solution(object):
def canReorderDoubled(self, A):
"""
:type A: List[int]
:rtype: bool
"""
count = collections.Counter(A)
for x in sorted(count, key=abs):
if count[x] > count[2*x]:
return False
count[2*x] -= count[x]
return True
|
d9fa7b90bbef844ba12c15794e9c419ff64b28e1
|
2dd26e031162e75f37ecb1f7dd7f675eeb634c63
|
/nemo/collections/nlp/losses/sgd_loss.py
|
0a109a609d5c403ec7b5d561dda05059c0af88d1
|
[
"Apache-2.0"
] |
permissive
|
NVIDIA/NeMo
|
1b001fa2ae5d14defbfd02f3fe750c5a09e89dd1
|
c20a16ea8aa2a9d8e31a98eb22178ddb9d5935e7
|
refs/heads/main
| 2023-08-21T15:28:04.447838
| 2023-08-21T00:49:36
| 2023-08-21T00:49:36
| 200,722,670
| 7,957
| 1,986
|
Apache-2.0
| 2023-09-14T18:49:54
| 2019-08-05T20:16:42
|
Python
|
UTF-8
|
Python
| false
| false
| 10,084
|
py
|
sgd_loss.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
This file contains code artifacts adapted from the original implementation:
https://github.com/google-research/google-research/blob/master/schema_guided_dst/baseline/train_and_predict.py
'''
import torch
from nemo.core.classes import Loss, Typing, typecheck
from nemo.core.neural_types import ChannelType, LabelsType, LogitsType, LossType, NeuralType
from nemo.utils import logging
__all__ = ['SGDDialogueStateLoss']
class SGDDialogueStateLoss(Loss, Typing):
"""
Neural module which implements loss for SGD model.
"""
@property
def input_types(self):
"""Returns definitions of module input ports.
logit_intent_status: Output of SGD model
intent_status: intent label
logit_req_slot_status: Output of SGD model
requested_slot_status: Takes value 1 if the corresponding slot is requested, 0 otherwise
logit_cat_slot_status: Output of SGD model
categorical_slot_status: The status of each categorical slot in the service
logit_cat_slot_value_status: Output of SGD model
categorical_slot_value_status: Takes value 1 if the corresponding slot value is correct, 0 otherwise
logit_noncat_slot_status: Output of SGD model
noncategorical_slot_status: The status of each noncategorical slot in the service\
logit_spans: Output of SGD model
noncategorical_slot_value_start: The index of the starting subword corresponding to the slot span for a non-categorical slot value
noncategorical_slot_value_end: The index of the ending (inclusive) subword corresponding to the slot span for a non-categorical slot value
task_mask: Mask contains 1 if its the current task, 0 otherwise
"""
return {
"logit_intent_status": NeuralType(('B', 'T'), LogitsType()),
"intent_status": NeuralType(('B'), LabelsType()),
"logit_req_slot_status": NeuralType(('B', 'T'), LogitsType()),
"requested_slot_status": NeuralType(('B'), LabelsType()),
"logit_cat_slot_status": NeuralType(('B', 'T'), LogitsType()),
"categorical_slot_status": NeuralType(('B'), LabelsType()),
"logit_cat_slot_value_status": NeuralType(('B', 'T'), LogitsType()),
"categorical_slot_value_status": NeuralType(('B'), LabelsType()),
"logit_noncat_slot_status": NeuralType(('B', 'T'), LogitsType()),
"noncategorical_slot_status": NeuralType(('B'), LabelsType()),
"logit_spans": NeuralType(('B', 'T', 'D'), LogitsType()),
"noncategorical_slot_value_start": NeuralType(('B'), LabelsType()),
"noncategorical_slot_value_end": NeuralType(('B'), LabelsType()),
"task_mask": NeuralType(('B', 'T'), ChannelType()),
}
@property
def output_types(self):
"""
Returns definitions of module output ports.
loss:
NeuralType(None)
"""
return {"loss": NeuralType(elements_type=LossType())}
def __init__(self, reduction: str = 'mean'):
"""
Args:
reduction: specifies the reduction to apply to the final loss, choose 'mean' or 'sum'
"""
super().__init__()
if reduction not in ['mean', 'sum']:
logging.warning(f'{reduction} reduction is not supported. Setting reduction to "mean"')
reduction = 'mean'
self.reduction = reduction
self._cross_entropy = torch.nn.CrossEntropyLoss(reduction=self.reduction)
self._cross_entropy_bin = torch.nn.BCEWithLogitsLoss(reduction=self.reduction)
def _helper(self, logits, labels, loss_mask=None):
"""
flattens logits and labels according loss mask
Args:
logits: logits
labels: labels
loss_mask: loss mask
Returns:
logits_flatten: flattened logits where loss mask is true
labels_flatten: flattened labels where loss mask is true
"""
logits_flatten = torch.flatten(logits, start_dim=0, end_dim=-2)
labels_flatten = torch.flatten(labels, start_dim=0, end_dim=-1)
if loss_mask is not None:
if loss_mask.dtype is not torch.bool:
loss_mask = loss_mask > 0.5
loss_mask_flatten = torch.flatten(loss_mask, start_dim=0, end_dim=-1)
logits_flatten = logits_flatten[loss_mask_flatten]
labels_flatten = labels_flatten[loss_mask_flatten]
return logits_flatten, labels_flatten
@typecheck()
def forward(
self,
logit_intent_status,
intent_status,
logit_req_slot_status,
requested_slot_status,
logit_cat_slot_status,
categorical_slot_status,
logit_cat_slot_value_status,
categorical_slot_value_status,
logit_noncat_slot_status,
noncategorical_slot_status,
logit_spans,
noncategorical_slot_value_start,
noncategorical_slot_value_end,
task_mask,
):
# Intent loss
old_logit_intent_status = logit_intent_status
logit_intent_status, intent_status = self._helper(logit_intent_status, intent_status, task_mask[:, 0])
if len(intent_status) == 0:
intent_loss = torch.clamp(torch.max(old_logit_intent_status.view(-1)), 0, 0)
else:
intent_loss = self._cross_entropy_bin(logit_intent_status.squeeze(dim=-1), intent_status)
old_logit_req_slot_status = logit_req_slot_status
logit_req_slot_status, requested_slot_status = self._helper(
logit_req_slot_status, requested_slot_status, task_mask[:, 1]
)
if len(requested_slot_status) == 0:
requested_slot_loss = torch.clamp(torch.max(old_logit_req_slot_status.view(-1)), 0, 0)
else:
requested_slot_loss = self._cross_entropy_bin(logit_req_slot_status.squeeze(dim=-1), requested_slot_status)
old_logit_cat_slot_status = logit_cat_slot_status
logit_cat_slot_status, categorical_slot_status = self._helper(
logit_cat_slot_status, categorical_slot_status, task_mask[:, 2]
)
if len(categorical_slot_status) == 0:
cat_slot_status_loss = torch.clamp(torch.max(old_logit_cat_slot_status.view(-1)), 0, 0)
else:
cat_slot_status_loss = self._cross_entropy(logit_cat_slot_status, categorical_slot_status,)
old_logit_cat_slot_value_status = logit_cat_slot_value_status
logit_cat_slot_value_status, categorical_slot_value_status = self._helper(
logit_cat_slot_value_status, categorical_slot_value_status, task_mask[:, 3]
)
if len(categorical_slot_value_status) == 0:
cat_slot_value_status_loss = torch.clamp(torch.max(old_logit_cat_slot_value_status.view(-1)), 0, 0)
else:
cat_slot_value_status_loss = self._cross_entropy_bin(
logit_cat_slot_value_status.squeeze(dim=-1), categorical_slot_value_status
)
old_logit_noncat_slot_status = logit_noncat_slot_status
logit_noncat_slot_status, noncategorical_slot_status = self._helper(
logit_noncat_slot_status, noncategorical_slot_status, task_mask[:, 4]
)
if len(noncategorical_slot_status) == 0:
noncat_slot_status_loss = torch.clamp(torch.max(old_logit_noncat_slot_status.view(-1)), 0, 0)
else:
noncat_slot_status_loss = self._cross_entropy(logit_noncat_slot_status, noncategorical_slot_status,)
logit_noncat_slot_start, logit_noncat_slot_end = torch.unbind(logit_spans, dim=-1)
_, max_num_tokens = logit_noncat_slot_start.size()
old_logit_noncat_slot_start = logit_noncat_slot_start
logit_noncat_slot_start, noncategorical_slot_value_start = self._helper(
logit_noncat_slot_start, noncategorical_slot_value_start, task_mask[:, 5]
)
if len(noncategorical_slot_value_start) == 0:
span_start_loss = torch.clamp(torch.max(old_logit_noncat_slot_start.view(-1)), 0, 0)
else:
span_start_loss = self._cross_entropy(logit_noncat_slot_start, noncategorical_slot_value_start)
old_logit_noncat_slot_end = logit_noncat_slot_end
logit_noncat_slot_end, noncategorical_slot_value_end = self._helper(
logit_noncat_slot_end, noncategorical_slot_value_end, task_mask[:, 5]
)
if len(noncategorical_slot_value_end) == 0:
span_end_loss = torch.clamp(torch.max(old_logit_noncat_slot_end.view(-1)), 0, 0)
else:
span_end_loss = self._cross_entropy(logit_noncat_slot_end, noncategorical_slot_value_end)
losses = {
"intent_loss": intent_loss,
"requested_slot_loss": requested_slot_loss,
"cat_slot_status_loss": cat_slot_status_loss,
"cat_slot_value_status_loss": cat_slot_value_status_loss,
"noncat_slot_status_loss": noncat_slot_status_loss,
"span_start_loss": span_start_loss,
"span_end_loss": span_end_loss,
}
total_loss = sum(losses.values())
if self.reduction == 'mean':
total_loss = total_loss / len(losses)
else:
batch_size = logit_intent_status.shape[0]
total_loss = total_loss / batch_size
return total_loss
|
6049469e94789a0a2bfae6444babc7f46535cbff
|
7eb606a7957e5500f163c93dc4b19418cf9cf335
|
/ludwig/features/vector_feature.py
|
b3ebf2e7649074e2ad38893595f8c43c039d94c4
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
ludwig-ai/ludwig
|
024f74da86567a57ec8e30efcb4600f0c52333a1
|
e1d023e41606c9b76b35e1d231c2f13368a30eca
|
refs/heads/master
| 2023-09-03T08:07:32.978301
| 2023-09-01T19:39:32
| 2023-09-01T19:39:32
| 163,346,054
| 2,567
| 285
|
Apache-2.0
| 2023-09-14T20:34:52
| 2018-12-27T23:58:12
|
Python
|
UTF-8
|
Python
| false
| false
| 8,929
|
py
|
vector_feature.py
|
#! /usr/bin/env python
# Copyright (c) 2019 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import logging
from typing import Dict, List, Union
import numpy as np
import torch
from ludwig.constants import COLUMN, HIDDEN, LOGITS, NAME, PREDICTIONS, PROC_COLUMN, VECTOR
from ludwig.features.base_feature import InputFeature, OutputFeature, PredictModule
from ludwig.schema.features.vector_feature import VectorInputFeatureConfig, VectorOutputFeatureConfig
from ludwig.types import (
FeatureMetadataDict,
FeaturePostProcessingOutputDict,
PreprocessingConfigDict,
TrainingSetMetadataDict,
)
from ludwig.utils import output_feature_utils
from ludwig.utils.types import TorchscriptPreprocessingInput
logger = logging.getLogger(__name__)
class _VectorPreprocessing(torch.nn.Module):
def forward(self, v: TorchscriptPreprocessingInput) -> torch.Tensor:
if torch.jit.isinstance(v, torch.Tensor):
out = v
elif torch.jit.isinstance(v, List[torch.Tensor]):
out = torch.stack(v)
elif torch.jit.isinstance(v, List[str]):
vectors = []
for sample in v:
vector = torch.tensor([float(x) for x in sample.split()], dtype=torch.float32)
vectors.append(vector)
out = torch.stack(vectors)
else:
raise ValueError(f"Unsupported input: {v}")
if out.isnan().any():
raise ValueError("Scripted NaN handling not implemented for Vector feature")
return out
class _VectorPostprocessing(torch.nn.Module):
def __init__(self):
super().__init__()
self.predictions_key = PREDICTIONS
self.logits_key = LOGITS
def forward(self, preds: Dict[str, torch.Tensor], feature_name: str) -> FeaturePostProcessingOutputDict:
predictions = output_feature_utils.get_output_feature_tensor(preds, feature_name, self.predictions_key)
logits = output_feature_utils.get_output_feature_tensor(preds, feature_name, self.logits_key)
return {self.predictions_key: predictions, self.logits_key: logits}
class _VectorPredict(PredictModule):
def forward(self, inputs: Dict[str, torch.Tensor], feature_name: str) -> Dict[str, torch.Tensor]:
logits = output_feature_utils.get_output_feature_tensor(inputs, feature_name, self.logits_key)
return {self.predictions_key: logits, self.logits_key: logits}
class VectorFeatureMixin:
@staticmethod
def type():
return VECTOR
@staticmethod
def cast_column(column, backend):
return column
@staticmethod
def get_feature_meta(
column, preprocessing_parameters: PreprocessingConfigDict, backend, is_input_feature: bool
) -> FeatureMetadataDict:
return {"preprocessing": preprocessing_parameters}
@staticmethod
def add_feature_data(
feature_config,
input_df,
proc_df,
metadata,
preprocessing_parameters: PreprocessingConfigDict,
backend,
skip_save_processed_input,
):
"""Expects all the vectors to be of the same size.
The vectors need to be whitespace delimited strings. Missing values are not handled.
"""
if len(input_df[feature_config[COLUMN]]) == 0:
raise ValueError("There are no vectors in the dataset provided")
# Convert the string of features into a numpy array
try:
proc_df[feature_config[PROC_COLUMN]] = backend.df_engine.map_objects(
input_df[feature_config[COLUMN]], lambda x: np.array(x.split(), dtype=np.float32)
)
except ValueError:
logger.error(
"Unable to read the vector data. Make sure that all the vectors"
" are of the same size and do not have missing/null values."
)
raise
# Determine vector size
vector_size = backend.df_engine.compute(proc_df[feature_config[PROC_COLUMN]].map(len).max())
vector_size_param = preprocessing_parameters.get("vector_size")
if vector_size_param is not None:
# TODO(travis): do we even need a user param for vector size if we're going to auto-infer it in all
# cases? Is this only useful as a sanity check for the user to make sure their data conforms to
# expectations?
if vector_size != vector_size_param:
raise ValueError(
"The user provided value for vector size ({}) does not "
"match the value observed in the data: {}".format(preprocessing_parameters, vector_size)
)
else:
logger.debug(f"Detected vector size: {vector_size}")
metadata[feature_config[NAME]]["vector_size"] = vector_size
return proc_df
class VectorInputFeature(VectorFeatureMixin, InputFeature):
def __init__(self, input_feature_config: VectorInputFeatureConfig, encoder_obj=None, **kwargs):
super().__init__(input_feature_config, **kwargs)
# input_feature_config.encoder.input_size = input_feature_config.encoder.vector_size
if encoder_obj:
self.encoder_obj = encoder_obj
else:
self.encoder_obj = self.initialize_encoder(input_feature_config.encoder)
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
assert isinstance(inputs, torch.Tensor)
assert inputs.dtype in [torch.float32, torch.float64]
assert len(inputs.shape) == 2
inputs_encoded = self.encoder_obj(inputs)
return inputs_encoded
@property
def input_shape(self) -> torch.Size:
return torch.Size([self.encoder_obj.config.input_size])
@property
def output_shape(self) -> torch.Size:
return self.encoder_obj.output_shape
@staticmethod
def update_config_with_metadata(feature_config, feature_metadata, *args, **kwargs):
feature_config.encoder.input_size = feature_metadata["vector_size"]
@staticmethod
def create_preproc_module(metadata: TrainingSetMetadataDict) -> torch.nn.Module:
return _VectorPreprocessing()
@staticmethod
def get_schema_cls():
return VectorInputFeatureConfig
class VectorOutputFeature(VectorFeatureMixin, OutputFeature):
def __init__(
self,
output_feature_config: Union[VectorOutputFeatureConfig, Dict],
output_features: Dict[str, OutputFeature],
**kwargs,
):
self.vector_size = output_feature_config.vector_size
super().__init__(output_feature_config, output_features, **kwargs)
output_feature_config.decoder.output_size = self.vector_size
self.decoder_obj = self.initialize_decoder(output_feature_config.decoder)
self._setup_loss()
self._setup_metrics()
def logits(self, inputs, **kwargs): # hidden
hidden = inputs[HIDDEN]
return self.decoder_obj(hidden)
def metric_kwargs(self):
return dict(num_outputs=self.output_shape[0])
def create_predict_module(self) -> PredictModule:
return _VectorPredict()
def get_prediction_set(self):
return {PREDICTIONS, LOGITS}
@classmethod
def get_output_dtype(cls):
return torch.float32
@property
def output_shape(self) -> torch.Size:
return torch.Size([self.vector_size])
@property
def input_shape(self) -> torch.Size:
return torch.Size([self.input_size])
@staticmethod
def update_config_with_metadata(feature_config, feature_metadata, *args, **kwargs):
feature_config.vector_size = feature_metadata["vector_size"]
@staticmethod
def calculate_overall_stats(predictions, targets, train_set_metadata):
# no overall stats, just return empty dictionary
return {}
def postprocess_predictions(
self,
result,
metadata,
):
predictions_col = f"{self.feature_name}_{PREDICTIONS}"
if predictions_col in result:
result[predictions_col] = result[predictions_col].map(lambda pred: pred.tolist())
return result
@staticmethod
def create_postproc_module(metadata: TrainingSetMetadataDict) -> torch.nn.Module:
return _VectorPostprocessing()
@staticmethod
def get_schema_cls():
return VectorOutputFeatureConfig
|
1d108f8c1ef76afe05f9c61147aec0c643692eea
|
8a51a96f61699f0318315ccc89cef39f6866f2b5
|
/src/tools/rcgen
|
0c84772163cd0d092733580e51958bb0a8a28970
|
[
"PostgreSQL"
] |
permissive
|
postgres/postgres
|
979febf2b41c00090d1256228f768f33e7ef3b6f
|
b5934bfd6071fed3a38cea0cfaa93afda63d9c0c
|
refs/heads/master
| 2023-08-31T00:10:01.373472
| 2023-08-30T23:07:48
| 2023-08-30T23:07:48
| 927,442
| 13,691
| 4,807
|
NOASSERTION
| 2023-09-09T13:59:15
| 2010-09-21T11:35:45
|
C
|
UTF-8
|
Python
| false
| false
| 3,431
|
rcgen
|
#!/usr/bin/env python3
# Helper for building resource files when building for windows. Always
# generates a .rc from the input .rc file. When building with msvc we
# additionally generate a .res file with 'rc', when building with gcc, we use
# windres to directly generate a .o. Additionally we generate basic
# dependencies if depfile is specified.
import argparse
import os
import subprocess
import sys
parser = argparse.ArgumentParser(description='generate PostgreSQL rc file')
parser.add_argument('--srcdir', type=os.path.abspath,
required=True)
parser.add_argument('--builddir', type=os.path.abspath,
required=True)
binaries = parser.add_argument_group('binaries')
binaries.add_argument('--windres', type=os.path.abspath)
binaries.add_argument('--rc', type=os.path.abspath)
inout = parser.add_argument_group('inout')
inout.add_argument('--depfile', type=argparse.FileType('w'))
inout.add_argument('--input', type=argparse.FileType('r'),
required=True)
inout.add_argument('--rcout', type=argparse.FileType('w'),
required=True)
inout.add_argument('--out', type=str,
required=True)
replacements = parser.add_argument_group('replacements')
replacements.add_argument('--FILEDESC', type=str)
replacements.add_argument('--NAME', type=str, required=True)
replacements.add_argument('--VFT_TYPE', type=str, required=True)
replacements.add_argument('--FILEENDING', type=str, required=True)
replacements.add_argument('--ICO', type=str)
args = parser.parse_args()
# determine replacement strings
internal_name = '"{0}"'.format(args.NAME)
original_name = '"{0}.{1}"'.format(args.NAME, args.FILEENDING)
# if no description is passed in, generate one based on the name
if args.FILEDESC:
filedesc = args.FILEDESC
elif args.NAME:
if args.VFT_TYPE == 'VFT_DLL':
filedesc = 'PostgreSQL {0} library'.format(args.NAME)
else:
filedesc = 'PostgreSQL {0} binary'.format(args.NAME)
filedesc = '"{0}"'.format(filedesc)
if args.ICO:
ico = 'IDI_ICON ICON "{0}"'.format(args.ICO)
if args.depfile:
args.depfile.write("{0} : {1}\n".format(args.rcout.name, args.ICO))
else:
ico = ''
data = args.input.read()
data = data.replace('VFT_APP', args.VFT_TYPE)
data = data.replace('_INTERNAL_NAME_', internal_name)
data = data.replace('_ORIGINAL_NAME_', original_name)
data = data.replace('FILEDESC', filedesc)
data = data.replace("_ICO_", ico)
args.rcout.write(data)
args.rcout.close()
if args.windres:
cmd = [
args.windres,
'-I{0}/src/include/'.format(args.builddir),
'-I{0}/src/include/'.format(args.srcdir),
'-o', args.out, '-i', args.rcout.name,
]
elif args.rc:
cmd = [
args.rc, '/nologo',
'-I{0}/src/include/'.format(args.builddir),
'-I{0}/src/include/'.format(args.srcdir),
'/fo', args.out, args.rcout.name,
]
else:
sys.exit('either --windres or --rc needs to be specified')
sp = subprocess.run(cmd)
if sp.returncode != 0:
sys.exit(sp.returncode)
# It'd be nicer if we could generate correct dependencies here, but 'rc'
# doesn't support doing so. It's unlikely we'll ever need more, so...
if args.depfile:
args.depfile.write("{0} : {1}\n".format(
args.rcout.name, args.input.name))
args.depfile.write("{0} : {1}/{2}\n".format(
args.out, args.builddir, 'src/include/pg_config.h'))
|
|
ac4c1451173567abf61af005110fb77f7d7299e3
|
c26304a54824faa7c1b34bb7882ee7a335a8e7fb
|
/flink-python/pyflink/shell.py
|
ce81574c2194885b328967ed235bfb17824b3733
|
[
"BSD-3-Clause",
"OFL-1.1",
"ISC",
"MIT",
"Apache-2.0"
] |
permissive
|
apache/flink
|
905e0709de6389fc9212a7c48a82669706c70b4a
|
fbef3c22757a2352145599487beb84e02aaeb389
|
refs/heads/master
| 2023-09-04T08:11:07.253750
| 2023-09-04T01:33:25
| 2023-09-04T01:33:25
| 20,587,599
| 23,573
| 14,781
|
Apache-2.0
| 2023-09-14T21:49:04
| 2014-06-07T07:00:10
|
Java
|
UTF-8
|
Python
| false
| false
| 8,093
|
py
|
shell.py
|
#!/usr/bin/env python
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import platform
import sys
from pyflink.common import *
from pyflink.datastream import *
from pyflink.table import *
from pyflink.table.catalog import *
from pyflink.table.descriptors import *
from pyflink.table.window import *
from pyflink.metrics import *
utf8_out = open(sys.stdout.fileno(), mode='w', encoding='utf8', buffering=1)
print("Using Python version %s (%s, %s)" % (
platform.python_version(),
platform.python_build()[0],
platform.python_build()[1]))
welcome_msg = u'''
\u2592\u2593\u2588\u2588\u2593\u2588\u2588\u2592
\u2593\u2588\u2588\u2588\u2588\u2592\u2592\u2588\u2593\u2592\u2593\u2588\u2588\u2588\u2593\u2592
\u2593\u2588\u2588\u2588\u2593\u2591\u2591 \u2592\u2592\u2592\u2593\u2588\u2588\u2592 \u2592
\u2591\u2588\u2588\u2592 \u2592\u2592\u2593\u2593\u2588\u2593\u2593\u2592\u2591 \u2592\u2588\u2588\u2588\u2588
\u2588\u2588\u2592 \u2591\u2592\u2593\u2588\u2588\u2588\u2592 \u2592\u2588\u2592\u2588\u2592
\u2591\u2593\u2588 \u2588\u2588\u2588 \u2593\u2591\u2592\u2588\u2588
\u2593\u2588 \u2592\u2592\u2592\u2592\u2592\u2593\u2588\u2588\u2593\u2591\u2592\u2591\u2593\u2593\u2588
\u2588\u2591 \u2588 \u2592\u2592\u2591 \u2588\u2588\u2588\u2593\u2593\u2588 \u2592\u2588\u2592\u2592\u2592
\u2588\u2588\u2588\u2588\u2591 \u2592\u2593\u2588\u2593 \u2588\u2588\u2592\u2592\u2592 \u2593\u2588\u2588\u2588\u2592
\u2591\u2592\u2588\u2593\u2593\u2588\u2588 \u2593\u2588\u2592 \u2593\u2588\u2592\u2593\u2588\u2588\u2593 \u2591\u2588\u2591
\u2593\u2591\u2592\u2593\u2588\u2588\u2588\u2588\u2592 \u2588\u2588 \u2592\u2588 \u2588\u2593\u2591\u2592\u2588\u2592\u2591\u2592\u2588\u2592
\u2588\u2588\u2588\u2593\u2591\u2588\u2588\u2593 \u2593\u2588 \u2588 \u2588\u2593 \u2592\u2593\u2588\u2593\u2593\u2588\u2592
\u2591\u2588\u2588\u2593 \u2591\u2588\u2591 \u2588 \u2588\u2592 \u2592\u2588\u2588\u2588\u2588\u2588\u2593\u2592 \u2588\u2588\u2593\u2591\u2592
\u2588\u2588\u2588\u2591 \u2591 \u2588\u2591 \u2593 \u2591\u2588 \u2588\u2588\u2588\u2588\u2588\u2592\u2591\u2591 \u2591\u2588\u2591\u2593 \u2593\u2591
\u2588\u2588\u2593\u2588 \u2592\u2592\u2593\u2592 \u2593\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2593\u2591 \u2592\u2588\u2592 \u2592\u2593 \u2593\u2588\u2588\u2593
\u2592\u2588\u2588\u2593 \u2593\u2588 \u2588\u2593\u2588 \u2591\u2592\u2588\u2588\u2588\u2588\u2588\u2593\u2593\u2592\u2591 \u2588\u2588\u2592\u2592 \u2588 \u2592 \u2593\u2588\u2592
\u2593\u2588\u2593 \u2593\u2588 \u2588\u2588\u2593 \u2591\u2593\u2593\u2593\u2593\u2593\u2593\u2593\u2592 \u2592\u2588\u2588\u2593 \u2591\u2588\u2592
\u2593\u2588 \u2588 \u2593\u2588\u2588\u2588\u2593\u2592\u2591 \u2591\u2593\u2593\u2593\u2588\u2588\u2588\u2593 \u2591\u2592\u2591 \u2593\u2588
\u2588\u2588\u2593 \u2588\u2588\u2592 \u2591\u2592\u2593\u2593\u2588\u2588\u2588\u2593\u2593\u2593\u2593\u2593\u2588\u2588\u2588\u2588\u2588\u2588\u2593\u2592 \u2593\u2588\u2588\u2588 \u2588
\u2593\u2588\u2588\u2588\u2592 \u2588\u2588\u2588 \u2591\u2593\u2593\u2592\u2591\u2591 \u2591\u2593\u2588\u2588\u2588\u2588\u2593\u2591 \u2591\u2592\u2593\u2592 \u2588\u2593
\u2588\u2593\u2592\u2592\u2593\u2593\u2588\u2588 \u2591\u2592\u2592\u2591\u2591\u2591\u2592\u2592\u2592\u2592\u2593\u2588\u2588\u2593\u2591 \u2588\u2593
\u2588\u2588 \u2593\u2591\u2592\u2588 \u2593\u2593\u2593\u2593\u2592\u2591\u2591 \u2592\u2588\u2593 \u2592\u2593\u2593\u2588\u2588\u2593 \u2593\u2592 \u2592\u2592\u2593
\u2593\u2588\u2593 \u2593\u2592\u2588 \u2588\u2593\u2591 \u2591\u2592\u2593\u2593\u2588\u2588\u2592 \u2591\u2593\u2588\u2592 \u2592\u2592\u2592\u2591\u2592\u2592\u2593\u2588\u2588\u2588\u2588\u2588\u2592
\u2588\u2588\u2591 \u2593\u2588\u2592\u2588\u2592 \u2592\u2593\u2593\u2592 \u2593\u2588 \u2588\u2591 \u2591\u2591\u2591\u2591 \u2591\u2588\u2592
\u2593\u2588 \u2592\u2588\u2593 \u2591 \u2588\u2591 \u2592\u2588 \u2588\u2593
\u2588\u2593 \u2588\u2588 \u2588\u2591 \u2593\u2593 \u2592\u2588\u2593\u2593\u2593\u2592\u2588\u2591
\u2588\u2593 \u2591\u2593\u2588\u2588\u2591 \u2593\u2592 \u2593\u2588\u2593\u2592\u2591\u2591\u2591\u2592\u2593\u2588\u2591 \u2592\u2588
\u2588\u2588 \u2593\u2588\u2593\u2591 \u2592 \u2591\u2592\u2588\u2592\u2588\u2588\u2592 \u2593\u2593
\u2593\u2588\u2592 \u2592\u2588\u2593\u2592\u2591 \u2592\u2592 \u2588\u2592\u2588\u2593\u2592\u2592\u2591\u2591\u2592\u2588\u2588
\u2591\u2588\u2588\u2592 \u2592\u2593\u2593\u2592 \u2593\u2588\u2588\u2593\u2592\u2588\u2592 \u2591\u2593\u2593\u2593\u2593\u2592\u2588\u2593
\u2591\u2593\u2588\u2588\u2592 \u2593\u2591 \u2592\u2588\u2593\u2588 \u2591\u2591\u2592\u2592\u2592
\u2592\u2593\u2593\u2593\u2593\u2593\u2592\u2592\u2592\u2592\u2592\u2592\u2592\u2592\u2592\u2592\u2592\u2592\u2592\u2592\u2592\u2592\u2592\u2592\u2592\u2592\u2592\u2592\u2592\u2591\u2591\u2593\u2593 \u2593\u2591\u2592\u2588\u2591
F L I N K - P Y T H O N - S H E L L
NOTE: Use the prebound Table Environment to implement batch or streaming Table programs.
Streaming - Use 's_env' and 'st_env' variables
```
import os
import shutil
import tempfile
sink_path = tempfile.gettempdir() + '/streaming.csv'
if os.path.exists(sink_path):
if os.path.isfile(sink_path):
os.remove(sink_path)
else:
shutil.rmtree(sink_path)
s_env.set_parallelism(1)
t = st_env.from_elements([(1, 'hi', 'hello'), (2, 'hi', 'hello')], ['a', 'b', 'c'])
st_env.create_temporary_table("stream_sink", TableDescriptor.for_connector("filesystem")
.schema(Schema.new_builder()
.column("a", DataTypes.BIGINT())
.column("b", DataTypes.STRING())
.column("c", DataTypes.STRING())
.build())
.option("path", sink_path)
.format(FormatDescriptor.for_format("csv")
.option("field-delimiter", ",")
.build())
.build())
t.select(col('a') + 1, col('b'), col('c')).insert_into("stream_sink")
st_env.execute("stream_job")
# show the results
with open(os.path.join(sink_path, os.listdir(sink_path)[0]), 'r') as f:
print(f.read())
```
'''
utf8_out.write(welcome_msg)
s_env = StreamExecutionEnvironment.get_execution_environment()
st_env = StreamTableEnvironment.create(s_env)
|
10918e1927f0ef53ff7a40a7e0d784ef1e850624
|
dee9d197c6adfbdb49cd9e33bd3f8614b7d98f06
|
/mt-bluebert/mt_bluebert/blue_exp_def.py
|
02fd843527942c6acfdd3e9a85f0424e79870ebb
|
[
"LicenseRef-scancode-us-govt-public-domain"
] |
permissive
|
ncbi-nlp/bluebert
|
2ccd72a19283f8a206e30426375322ec6831398d
|
f4b8af9db9f8c4503d62d0c205de7256f38c5890
|
refs/heads/master
| 2023-05-30T11:13:51.757657
| 2022-04-11T01:44:30
| 2022-04-11T01:44:30
| 190,591,634
| 422
| 73
|
NOASSERTION
| 2023-03-25T01:21:44
| 2019-06-06T14:02:54
|
Python
|
UTF-8
|
Python
| false
| false
| 2,287
|
py
|
blue_exp_def.py
|
from typing import Dict, Set
import yaml
from mt_bluebert.data_utils.task_def import TaskType, DataFormat, EncoderModelType
from mt_bluebert.data_utils.vocab import Vocabulary
from mt_bluebert.blue_metrics import BlueMetric
class BlueTaskDefs:
def __init__(self, task_def_path):
with open(task_def_path) as fp:
self.task_def_dic = yaml.load(fp, yaml.FullLoader)
self.label_mapper_map = {} # type: Dict[str, Vocabulary]
self.n_class_map = {} # type: Dict[str, int]
self.data_format_map = {}
self.task_type_map = {}
self.metric_meta_map = {}
self.enable_san_map = {}
self.dropout_p_map = {}
self.split_names_map = {}
self.encoder_type = None
for task, task_def in self.task_def_dic.items():
assert "_" not in task, "task name should not contain '_', current task name: %s" % task
self.n_class_map[task] = task_def["n_class"]
self.data_format_map[task] = DataFormat[task_def["data_format"]]
self.task_type_map[task] = TaskType[task_def["task_type"]]
self.metric_meta_map[task] = tuple(BlueMetric[metric_name] for metric_name in task_def["metric_meta"])
self.enable_san_map[task] = task_def["enable_san"]
if self.encoder_type is None:
self.encoder_type = EncoderModelType[task_def["encoder_type"]]
else:
if self.encoder_type != EncoderModelType[task_def["encoder_type"]]:
raise ValueError('The shared encoder has to be the same.')
if "labels" in task_def:
label_mapper = Vocabulary(True)
for label in task_def["labels"]:
label_mapper.add(label)
self.label_mapper_map[task] = label_mapper
else:
self.label_mapper_map[task] = None
if "dropout_p" in task_def:
self.dropout_p_map[task] = task_def["dropout_p"]
if 'split_names' in task_def:
self.split_names_map[task] = task_def['split_names']
else:
self.split_names_map[task] = ["train", "dev", "test"]
@property
def tasks(self) -> Set[str]:
return self.task_def_dic.keys()
|
92f7cdcf98b48cfcacb626e16a1d2cbeba9e2453
|
99bdb3251fecee538e0630f15f6574054dfc1468
|
/bsp/stm32/libraries/STM32L4xx_HAL/SConscript
|
ebc7c4a01c87c2da154cbcea746a7f4c54f0fdda
|
[
"Apache-2.0",
"Zlib",
"LicenseRef-scancode-proprietary-license",
"MIT",
"BSD-3-Clause",
"X11",
"BSD-4-Clause-UC",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
RT-Thread/rt-thread
|
03a7c52c2aeb1b06a544143b0e803d72f47d1ece
|
3602f891211904a27dcbd51e5ba72fefce7326b2
|
refs/heads/master
| 2023-09-01T04:10:20.295801
| 2023-08-31T16:20:55
| 2023-08-31T16:20:55
| 7,408,108
| 9,599
| 5,805
|
Apache-2.0
| 2023-09-14T13:37:26
| 2013-01-02T14:49:21
|
C
|
UTF-8
|
Python
| false
| false
| 4,651
|
SConscript
|
import rtconfig
from building import *
# get current directory
cwd = GetCurrentDir()
# The set of source files associated with this SConscript file.
src = Split('''
CMSIS/Device/ST/STM32L4xx/Source/Templates/system_stm32l4xx.c
STM32L4xx_HAL_Driver/Src/stm32l4xx_hal.c
STM32L4xx_HAL_Driver/Src/stm32l4xx_hal_comp.c
STM32L4xx_HAL_Driver/Src/stm32l4xx_hal_cortex.c
STM32L4xx_HAL_Driver/Src/stm32l4xx_hal_crc.c
STM32L4xx_HAL_Driver/Src/stm32l4xx_hal_crc_ex.c
STM32L4xx_HAL_Driver/Src/stm32l4xx_hal_cryp.c
STM32L4xx_HAL_Driver/Src/stm32l4xx_hal_cryp_ex.c
STM32L4xx_HAL_Driver/Src/stm32l4xx_hal_dma.c
STM32L4xx_HAL_Driver/Src/stm32l4xx_hal_dma_ex.c
STM32L4xx_HAL_Driver/Src/stm32l4xx_hal_exti.c
STM32L4xx_HAL_Driver/Src/stm32l4xx_hal_pwr.c
STM32L4xx_HAL_Driver/Src/stm32l4xx_hal_pwr_ex.c
STM32L4xx_HAL_Driver/Src/stm32l4xx_hal_rcc.c
STM32L4xx_HAL_Driver/Src/stm32l4xx_hal_rcc_ex.c
STM32L4xx_HAL_Driver/Src/stm32l4xx_hal_rng.c
STM32L4xx_HAL_Driver/Src/stm32l4xx_hal_gpio.c
''')
if GetDepend(['RT_USING_SERIAL']):
src += ['STM32L4xx_HAL_Driver/Src/stm32l4xx_hal_uart.c']
src += ['STM32L4xx_HAL_Driver/Src/stm32l4xx_hal_uart_ex.c']
src += ['STM32L4xx_HAL_Driver/Src/stm32l4xx_hal_usart.c']
src += ['STM32L4xx_HAL_Driver/Src/stm32l4xx_hal_usart_ex.c']
if GetDepend(['RT_USING_I2C']):
src += ['STM32L4xx_HAL_Driver/Src/stm32l4xx_hal_i2c.c']
src += ['STM32L4xx_HAL_Driver/Src/stm32l4xx_hal_i2c_ex.c']
if GetDepend(['RT_USING_SPI']):
src += ['STM32L4xx_HAL_Driver/Src/stm32l4xx_hal_spi.c']
src += ['STM32L4xx_HAL_Driver/Src/stm32l4xx_hal_spi_ex.c']
src += ['STM32L4xx_HAL_Driver/Src/stm32l4xx_hal_qspi.c']
if GetDepend(['RT_USING_USB']):
src += ['STM32L4xx_HAL_Driver/Src/stm32l4xx_hal_hcd.c']
src += ['STM32L4xx_HAL_Driver/Src/stm32l4xx_hal_pcd.c']
src += ['STM32L4xx_HAL_Driver/Src/stm32l4xx_hal_pcd_ex.c']
src += ['STM32L4xx_HAL_Driver/Src/stm32l4xx_ll_usb.c']
if GetDepend(['RT_USING_CAN']):
src += ['STM32L4xx_HAL_Driver/Src/stm32l4xx_hal_can.c']
if GetDepend(['RT_USING_HWTIMER']) or GetDepend(['RT_USING_PWM']):
src += ['STM32L4xx_HAL_Driver/Src/stm32l4xx_hal_lptim.c']
src += ['STM32L4xx_HAL_Driver/Src/stm32l4xx_hal_tim.c']
src += ['STM32L4xx_HAL_Driver/Src/stm32l4xx_hal_tim_ex.c']
if GetDepend(['RT_USING_ADC']):
src += ['STM32L4xx_HAL_Driver/Src/stm32l4xx_hal_adc.c']
src += ['STM32L4xx_HAL_Driver/Src/stm32l4xx_hal_adc_ex.c']
if GetDepend(['RT_USING_DAC']):
src += ['STM32L4xx_HAL_Driver/Src/stm32l4xx_hal_dac.c']
src += ['STM32L4xx_HAL_Driver/Src/stm32l4xx_hal_dac_ex.c']
if GetDepend(['RT_USING_RTC']):
src += ['STM32L4xx_HAL_Driver/Src/stm32l4xx_hal_rtc.c']
src += ['STM32L4xx_HAL_Driver/Src/stm32l4xx_hal_rtc_ex.c']
if GetDepend(['RT_USING_WDT']):
src += ['STM32L4xx_HAL_Driver/Src/stm32l4xx_hal_iwdg.c']
src += ['STM32L4xx_HAL_Driver/Src/stm32l4xx_hal_wwdg.c']
if GetDepend(['RT_USING_SDIO']):
src += ['STM32L4xx_HAL_Driver/Src/stm32l4xx_hal_sd.c']
src += ['STM32L4xx_HAL_Driver/Src/stm32l4xx_hal_sd_ex.c']
src += ['STM32L4xx_HAL_Driver/Src/stm32l4xx_ll_sdmmc.c']
if GetDepend(['RT_USING_AUDIO']):
src += ['STM32L4xx_HAL_Driver/Src/stm32l4xx_hal_sai.c']
src += ['STM32L4xx_HAL_Driver/Src/stm32l4xx_hal_sai_ex.c']
if GetDepend(['RT_USING_MTD_NOR']):
src += ['STM32L4xx_HAL_Driver/Src/stm32l4xx_hal_nor.c']
if GetDepend(['RT_USING_MTD_NAND']):
src += ['STM32L4xx_HAL_Driver/Src/stm32l4xx_hal_nand.c']
if GetDepend(['RT_USING_PM']):
src += ['STM32L4xx_HAL_Driver/Src/stm32l4xx_hal_lptim.c']
if GetDepend(['BSP_USING_ON_CHIP_FLASH']):
src += ['STM32L4xx_HAL_Driver/Src/stm32l4xx_hal_flash.c']
src += ['STM32L4xx_HAL_Driver/Src/stm32l4xx_hal_flash_ex.c']
src += ['STM32L4xx_HAL_Driver/Src/stm32l4xx_hal_flash_ramfunc.c']
if GetDepend(['BSP_USING_FMC']):
src += ['STM32L4xx_HAL_Driver/Src/stm32l4xx_ll_fmc.c']
if GetDepend(['BSP_USING_GFXMMU']):
src += ['STM32L4xx_HAL_Driver/Src/stm32l4xx_hal_gfxmmu.c']
if GetDepend(['BSP_USING_DSI']):
src += ['STM32L4xx_HAL_Driver/Src/stm32l4xx_hal_dsi.c']
src += ['STM32L4xx_HAL_Driver/Src/stm32l4xx_hal_dma2d.c']
src += ['STM32L4xx_HAL_Driver/Src/stm32l4xx_ll_dma2d.c']
src += ['STM32L4xx_HAL_Driver/Src/stm32l4xx_hal_ltdc.c']
src += ['STM32L4xx_HAL_Driver/Src/stm32l4xx_hal_ltdc_ex.c']
if GetDepend(['BSP_USING_SRAM']):
src += ['STM32L4xx_HAL_Driver/Src/stm32l4xx_hal_sram.c']
path = [cwd + '/STM32L4xx_HAL_Driver/Inc',
cwd + '/CMSIS/Device/ST/STM32L4xx/Include']
CPPDEFINES = ['USE_HAL_DRIVER']
group = DefineGroup('Libraries', src, depend = [''], CPPPATH = path, CPPDEFINES = CPPDEFINES)
Return('group')
|
|
7f5840597ecbf5b38b850798c65bd1f3a6e16286
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/testData/override/classmethodNewStyle_after.py
|
98f09cfcf529c9be668c3ddf4e5c0fc325d175e2
|
[
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 153
|
py
|
classmethodNewStyle_after.py
|
class A(object):
@classmethod
def m(cls):
pass
class B(A):
@classmethod
def m(cls):
<selection>super().m()</selection>
|
682bf23c8a83245a7f64cb9dd5973d8754392f0f
|
7f49e3702ef49c17491c1e7758ff8ce931c68e96
|
/CreatorUtils/imageCompress.py
|
679763126ca25945e12ac12049c328f1640afb72
|
[
"Apache-2.0"
] |
permissive
|
Leo501/CocosCreatorTutorial
|
6bec42de0f48a95cf8d550d88222a1aa1702782b
|
00c8568bb8683b2956a183ef80c4d3e30781fd23
|
refs/heads/master
| 2022-06-29T15:13:53.190484
| 2022-01-22T11:30:55
| 2022-01-22T11:30:55
| 125,846,008
| 625
| 317
|
Apache-2.0
| 2019-06-29T02:57:44
| 2018-03-19T11:15:49
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 5,958
|
py
|
imageCompress.py
|
# -*-coding:utf8 -*-
'''
This is a convenient utility for compressing png images by tinypng API.Hook up your server to TinyJPG and TinyPNG and compress all your images on the fly.
The API uses the compression engine that powers the web services so you can expect exactly the same results.
Enter your name and email address below to retrieve your key and get started.
Tinypng official website:https://tinypng.com
API Reference:https://tinypng.com/developers/reference/python
参考:https://hellokugo.github.io/2016/09/10/python%E8%84%9A%E6%9C%AC%E5%AE%9E%E7%8E%B0%E5%9B%BE%E7%89%87%E5%8E%8B%E7%BC%A9/
'''
import subprocess
import os
import shutil
from PIL import Image
import sys
import tinify
# 自定义多个tinify_keys存放到列表,使用前请替换
tinify_keys = ["KPnTnm9wcrX7SYJ3hJBLxBwgWTSnjZb6"]
def CompressByPillow(fromFile, out_dir):
print("do CompressByPillow..")
try:
for root, dir, files in os.walk(fromFile):
print(
"****************************************************************************************")
print("root dir:"+root)
print("dir:"+str(dir))
for file in files:
current_file = os.path.join(root, file)
dirName = os.path.basename(root)
# 如果没有指定输出路径,则默认覆盖当前文件
if not out_dir:
out_dir = fromFile
targetDir = os.path.join(out_dir, dirName)
if not os.path.exists(targetDir):
os.makedirs(targetDir)
# 如果是.9图片或者非图片文件不做处理,直接做拷贝
if not file.endswith(".9.png") and (file.endswith(".png") or file.endswith(".jpg")):
print(
"--------------------------------------------------------------------------------------------")
print("currrent file:"+current_file)
im = Image.open(current_file)
origin_size = os.path.getsize(current_file)
if file.endswith(".png"):
im = im.convert('P')
im.save(os.path.join(targetDir, file), optimize=True)
target_file = os.path.join(targetDir, file)
compress_size = os.path.getsize(target_file)
print('%.2f' % ((origin_size - compress_size) / origin_size))
else:
if not out_dir or out_dir == fromFile:
continue
shutil.copy(current_file, os.path.join(targetDir, file))
except Exception as e:
print(e.message)
def CompressByTinypng(fromFile, out_dir):
print("do CompressByTinypng..")
try:
for root, dir, files in os.walk(fromFile):
print(
"****************************************************************************************")
print("root dir:"+root)
print("dir:"+str(dir))
for file in files:
current_file = os.path.join(root, file)
dirName = os.path.basename(root)
# 如果没有指定输出路径,则默认覆盖当前文件
if not out_dir:
out_dir = fromFile
targetDir = os.path.join(out_dir, dirName)
if not os.path.exists(targetDir):
os.makedirs(targetDir)
# 如果是.9图片或者非图片文件不做处理,直接做拷贝
if not file.endswith(".9.png") and (file.endswith(".png") or file.endswith(".jpg")):
print(
"--------------------------------------------------------------------------------------------")
# for key in tinify_keys:
# 验证当前API key是否可以用,不可以用就切换下一个账号
tinify.key = tinify_keys[0]
try:
valid = tinify.validate()
if valid:
print("currrent file:"+current_file)
origin_size = os.path.getsize(current_file)
source = tinify.from_file(current_file)
target_file = os.path.join(targetDir, file)
source.to_file(target_file)
compress_size = os.path.getsize(target_file)
print(
'%.2f' % ((origin_size - compress_size) / origin_size))
else:
continue
except Exception as e:
# Something else went wrong, unrelated to the Tinify API.
print("error while compressing png image:"+e.message)
continue
else:
if not out_dir or out_dir == fromFile:
continue
shutil.copy(current_file, os.path.join(targetDir, file))
except Exception as e:
print(e.message)
'''
用法说明:
命令行输入
python imgCompress.py 压缩文件(或者目录,必填) 输出目录(选填,如果不填则覆盖当前目录)
'''
if __name__ == "__main__":
command = sys.argv
if len(command) < 2:
print("command line params must have 2 parameters at least")
raise ValueError("command line params must have 2 parameters at least")
src_dir = command[1]
print(src_dir)
out_dir = None
try:
if os.path.isdir(command[2]):
out_dir = command[2]
except:
out_dir = None
pass
# Tinypng
CompressByTinypng(src_dir, out_dir)
# Pngquant
# CompressByPillow(src_dir,out_dir)
# Pillow
# CompressByPillow(src_dir,out_dir)
|
0d01dec8a35a2e57b7b41201432bc11df1d17269
|
2a1b8a671aceda6bc446f8ce26400aa84fa444a6
|
/Packs/Exabeam/Integrations/Exabeam/test_data/response_incidents.py
|
cc9aaba05da67cfd9ac370e27708a5f1254bb8e0
|
[
"MIT"
] |
permissive
|
demisto/content
|
6d4722d46f0ff0beea2748e9f7de585bf91a78b4
|
890def5a0e0ae8d6eaa538148249ddbc851dbb6b
|
refs/heads/master
| 2023-09-04T00:02:25.618032
| 2023-09-03T21:56:22
| 2023-09-03T21:56:22
| 60,525,392
| 1,023
| 1,921
|
MIT
| 2023-09-14T20:55:24
| 2016-06-06T12:17:02
|
Python
|
UTF-8
|
Python
| false
| false
| 13,900
|
py
|
response_incidents.py
|
INCIDENTS = {
"totalCount": 23,
"offset": 0,
"count": 23,
"maxCount": 10000,
"incidents": [
{
"incidentId": "SOC-402",
"name": "Exabeam Alert Active Service Discovery via Net Tool found",
"baseFields": {
"incidentType": ["ueba"],
"owner": "unassigned",
"queue": "1",
"priority": "medium",
"status": "new",
"startedDate": 1670420803000,
"createdAt": 1670421189876,
"createdBy": "admin",
"updatedAt": 1670421199904,
"updatedBy": "system",
},
},
{
"incidentId": "SOC-403",
"name": "Exabeam Alert Active Service Discovery via Net Tool found",
"baseFields": {
"incidentType": ["ueba"],
"owner": "unassigned",
"queue": "1",
"priority": "medium",
"status": "new",
"startedDate": 1670421787000,
"createdAt": 1670422094457,
"createdBy": "admin",
"updatedAt": 1670422101646,
"updatedBy": "system",
},
},
{
"incidentId": "SOC-404",
"name": "Exabeam Alert Active Service Discovery via Net Tool found",
"baseFields": {
"incidentType": ["ueba"],
"owner": "unassigned",
"queue": "1",
"priority": "medium",
"status": "new",
"startedDate": 1670422364000,
"createdAt": 1670422689455,
"createdBy": "admin",
"updatedAt": 1670422697329,
"updatedBy": "system",
},
},
{
"incidentId": "SOC-405",
"name": "Exabeam Alert Active Service Discovery via Net Tool found",
"baseFields": {
"incidentType": ["ueba"],
"owner": "unassigned",
"queue": "1",
"priority": "medium",
"status": "new",
"startedDate": 1670422846000,
"createdAt": 1670423294446,
"createdBy": "admin",
"updatedAt": 1670423301789,
"updatedBy": "system",
},
},
{
"incidentId": "SOC-406",
"name": "Exabeam Alert Active Service Discovery via Net Tool found",
"baseFields": {
"incidentType": ["ueba"],
"owner": "unassigned",
"queue": "1",
"priority": "medium",
"status": "new",
"startedDate": 1670486326000,
"createdAt": 1670486609451,
"createdBy": "admin",
"updatedAt": 1670486615032,
"updatedBy": "system",
},
},
{
"incidentId": "SOC-407",
"name": "Exabeam Alert Active Service Discovery via Net Tool found",
"baseFields": {
"incidentType": ["ueba"],
"owner": "unassigned",
"queue": "1",
"priority": "medium",
"status": "new",
"startedDate": 1670487741000,
"createdAt": 1670488109448,
"createdBy": "admin",
"updatedAt": 1670488119700,
"updatedBy": "system",
},
},
{
"incidentId": "SOC-408",
"name": "Exabeam Alert Create a new TestService using cmdexe found",
"baseFields": {
"incidentType": ["ueba"],
"owner": "unassigned",
"queue": "1",
"priority": "medium",
"status": "new",
"startedDate": 1670487755000,
"createdAt": 1670488109588,
"createdBy": "admin",
"updatedAt": 1670488120487,
"updatedBy": "system",
},
},
{
"incidentId": "SOC-409",
"name": "Exabeam Alert ENCODEDECODE A FILE USING CERTUTIL TOOL found",
"baseFields": {
"incidentType": ["ueba"],
"owner": "unassigned",
"queue": "1",
"priority": "medium",
"status": "new",
"startedDate": 1670487753000,
"createdAt": 1670488109713,
"createdBy": "admin",
"updatedAt": 1670488121356,
"updatedBy": "system",
},
},
{
"incidentId": "SOC-410",
"name": "Exabeam Alert User Create or Delete found",
"baseFields": {
"incidentType": ["ueba"],
"owner": "unassigned",
"queue": "1",
"priority": "medium",
"status": "new",
"startedDate": 1670488527000,
"createdAt": 1670488709459,
"createdBy": "admin",
"updatedAt": 1670488727692,
"updatedBy": "system",
},
},
{
"incidentId": "SOC-411",
"name": "Exabeam Alert Active Service Discovery via Net Tool found",
"baseFields": {
"incidentType": ["ueba"],
"owner": "unassigned",
"queue": "1",
"priority": "medium",
"status": "new",
"startedDate": 1670488515000,
"createdAt": 1670488709610,
"createdBy": "admin",
"updatedAt": 1670488728528,
"updatedBy": "system",
},
},
{
"incidentId": "SOC-412",
"name": "Exabeam Alert EXECUTE POWERSHELL DOWNLOADSTRING METHOD found",
"baseFields": {
"incidentType": ["ueba"],
"owner": "unassigned",
"queue": "1",
"priority": "medium",
"status": "new",
"startedDate": 1670488526000,
"createdAt": 1670488709736,
"createdBy": "admin",
"updatedAt": 1670488729348,
"updatedBy": "system",
},
},
{
"incidentId": "SOC-413",
"name": "Exabeam Alert GATHER CREDENTIALS USING MIMIKATZ TOOL found",
"baseFields": {
"incidentType": ["ueba"],
"owner": "unassigned",
"queue": "1",
"priority": "medium",
"status": "new",
"startedDate": 1670488535000,
"createdAt": 1670488709864,
"createdBy": "admin",
"updatedAt": 1670488730162,
"updatedBy": "system",
},
},
],
}
EXPECTED_INCIDENTS = {
'first_fetch': [
{
'incidentId': 'SOC-402',
'name': 'Exabeam Alert Active Service Discovery via Net Tool found',
'baseFields': {
'incidentType': [
'ueba'
],
'owner': 'unassigned',
'queue': '1',
'priority': 'medium',
'status': 'new',
'startedDate': '2022-12-07T13:46:43Z',
'createdAt': '2022-12-07T13:53:09Z',
'createdBy': 'admin',
'updatedAt': '2022-12-07T13:53:19Z',
'updatedBy': 'system'
}
},
{
'incidentId': 'SOC-403',
'name': 'Exabeam Alert Active Service Discovery via Net Tool found',
'baseFields': {
'incidentType': [
'ueba'
],
'owner': 'unassigned',
'queue': '1',
'priority': 'medium',
'status': 'new',
'startedDate': '2022-12-07T14:03:07Z',
'createdAt': '2022-12-07T14:08:14Z',
'createdBy': 'admin',
'updatedAt': '2022-12-07T14:08:21Z',
'updatedBy': 'system'
}
},
{
'incidentId': 'SOC-404',
'name': 'Exabeam Alert Active Service Discovery via Net Tool found',
'baseFields': {
'incidentType': [
'ueba'
],
'owner': 'unassigned',
'queue': '1',
'priority': 'medium',
'status': 'new',
'startedDate': '2022-12-07T14:12:44Z',
'createdAt': '2022-12-07T14:18:09Z',
'createdBy': 'admin',
'updatedAt': '2022-12-07T14:18:17Z',
'updatedBy': 'system'
}
}],
'second_fetch': [
{
'incidentId': 'SOC-405',
'name': 'Exabeam Alert Active Service Discovery via Net Tool found',
'baseFields': {
'incidentType': [
'ueba'
],
'owner': 'unassigned',
'queue': '1',
'priority': 'medium',
'status': 'new',
'startedDate': '2022-12-07T14:20:46Z',
'createdAt': '2022-12-07T14:28:14Z',
'createdBy': 'admin',
'updatedAt': '2022-12-07T14:28:21Z',
'updatedBy': 'system'
}
},
{
'incidentId': 'SOC-406',
'name': 'Exabeam Alert Active Service Discovery via Net Tool found',
'baseFields': {
'incidentType': [
'ueba'
],
'owner': 'unassigned',
'queue': '1',
'priority': 'medium',
'status': 'new',
'startedDate': '2022-12-08T07:58:46Z',
'createdAt': '2022-12-08T08:03:29Z',
'createdBy': 'admin',
'updatedAt': '2022-12-08T08:03:35Z',
'updatedBy': 'system'
}
},
{
'incidentId': 'SOC-407',
'name': 'Exabeam Alert Active Service Discovery via Net Tool found',
'baseFields': {
'incidentType': [
'ueba'
],
'owner': 'unassigned',
'queue': '1',
'priority': 'medium',
'status': 'new',
'startedDate': '2022-12-08T08:22:21Z',
'createdAt': '2022-12-08T08:28:29Z',
'createdBy': 'admin',
'updatedAt': '2022-12-08T08:28:39Z',
'updatedBy': 'system'
}
}],
'third_fetch': [
{
'incidentId': 'SOC-408',
'name': 'Exabeam Alert Create a new TestService using cmdexe found',
'baseFields': {
'incidentType': [
'ueba'
],
'owner': 'unassigned',
'queue': '1',
'priority': 'medium',
'status': 'new',
'startedDate': '2022-12-08T08:22:35Z',
'createdAt': '2022-12-08T08:28:29Z',
'createdBy': 'admin',
'updatedAt': '2022-12-08T08:28:40Z',
'updatedBy': 'system'
}
},
{
'incidentId': 'SOC-409',
'name': 'Exabeam Alert ENCODEDECODE A FILE USING CERTUTIL TOOL found',
'baseFields': {
'incidentType': [
'ueba'
],
'owner': 'unassigned',
'queue': '1',
'priority': 'medium',
'status': 'new',
'startedDate': '2022-12-08T08:22:33Z',
'createdAt': '2022-12-08T08:28:29Z',
'createdBy': 'admin',
'updatedAt': '2022-12-08T08:28:41Z',
'updatedBy': 'system'
}
},
{
'incidentId': 'SOC-410',
'name': 'Exabeam Alert User Create or Delete found',
'baseFields': {
'incidentType': [
'ueba'
],
'owner': 'unassigned',
'queue': '1',
'priority': 'medium',
'status': 'new',
'startedDate': '2022-12-08T08:35:27Z',
'createdAt': '2022-12-08T08:38:29Z',
'createdBy': 'admin',
'updatedAt': '2022-12-08T08:38:47Z',
'updatedBy': 'system'
}
}],
}
EXPECTED_LAST_RUN = {
'first_fetch': {
'limit': 6,
'time': '2022-12-07T14:18:09.456000',
'found_incident_ids': {
'SOC-402': '',
'SOC-403': '',
'SOC-404': '',
}
},
'second_fetch': {
'limit': 9,
'time': '2022-12-08T08:28:29.449000',
'found_incident_ids': {
'SOC-405': '',
'SOC-406': '',
'SOC-407': '',
}
},
'third_fetch': {
'limit': 12,
'time': '2022-12-08T08:38:29.460000',
'found_incident_ids': {
'SOC-408': '',
'SOC-409': '',
'SOC-410': '',
}
},
}
EXPECTED_CALL_ARGS = {
'queryMap': {
'status': ['new'],
'incidentType': [
'generic', 'abnormalAuth'
],
'priority': [
'medium'
],
'createdAt': [
'1671717185195', '1671976385145'
]
},
'sortBy': 'createdAt',
'sortOrder': 'asc',
'idOnly': False,
'offset': 0,
'length': 3
}
|
6a9407701c707687830c825d03e0d82911e2bb95
|
463d49f20a5c0c0851c53d5e16514c265f8910aa
|
/datausa/onet/models.py
|
ef0d2671dfd4ae0018c55e2a9b76e8ea631a7d22
|
[] |
no_license
|
DataUSA/datausa-api
|
f6c503680f66b470c77a4ab9f0e7a4643659252f
|
7288dede082eda07b61e11cf6dc801fe692f6334
|
refs/heads/master
| 2022-02-14T00:03:13.241210
| 2022-01-31T17:16:27
| 2022-01-31T17:16:27
| 37,325,775
| 251
| 50
| null | 2022-01-13T13:00:27
| 2015-06-12T14:13:50
|
Python
|
UTF-8
|
Python
| false
| false
| 1,295
|
py
|
models.py
|
from datausa.database import db
from datausa.attrs.models import Skill
from datausa.ipeds.abstract_models import CipId
from datausa.core.models import BaseModel
from sqlalchemy.ext.declarative import declared_attr
class BaseOnet(db.Model, BaseModel):
__abstract__ = True
__table_args__ = {"schema": "onet"}
supported_levels = {}
source_title = 'O*NET'
source_link = 'http://www.onetonline.org/'
source_org = 'Department of Labor'
class SkillId(object):
@declared_attr
def skill(cls):
return db.Column(db.String(), db.ForeignKey(Skill.id), primary_key=True)
@classmethod
def get_supported_levels(cls):
return {"cip": ["2", "4", "6"]}
class SkillByCip(BaseOnet, SkillId, CipId):
__tablename__ = "skills_by_cip"
median_moe = 1
value = db.Column(db.Float)
value_rca = db.Column(db.Float)
@classmethod
def get_supported_levels(cls):
return {"cip": ["2", "4", "6", "all"], "skill": ["all"]}
class SkillBySoc(BaseOnet, SkillId):
__tablename__ = "skills_by_soc"
median_moe = 1
soc = db.Column(db.String(), primary_key=True)
value = db.Column(db.Float)
value_rca = db.Column(db.Float)
@classmethod
def get_supported_levels(cls):
return {"soc": ["all"], "skill": ["all"]}
|
afaa8cdec4d948f43f62d88d684c1f5b09b73c83
|
e7efae2b83216d9621bd93390959d652de779c3d
|
/datadog_checks_dev/datadog_checks/dev/spec.py
|
9fa4b832d52a8bafb717ec8d6746978f7e6bf3d9
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"BSD-3-Clause-Modification",
"Unlicense",
"Apache-2.0",
"LGPL-3.0-only",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"CC0-1.0"
] |
permissive
|
DataDog/integrations-core
|
ee1886cc7655972b2791e6ab8a1c62ab35afdb47
|
406072e4294edff5b46b513f0cdf7c2c00fac9d2
|
refs/heads/master
| 2023-08-31T04:08:06.243593
| 2023-08-30T18:22:10
| 2023-08-30T18:22:10
| 47,203,045
| 852
| 1,548
|
BSD-3-Clause
| 2023-09-14T16:39:54
| 2015-12-01T16:41:45
|
Python
|
UTF-8
|
Python
| false
| false
| 933
|
py
|
spec.py
|
# (C) Datadog, Inc. 2019-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import json
import yaml
from .utils import file_exists, path_join, read_file
def load_spec(check_root):
spec_path = get_spec_path(check_root)
return yaml.safe_load(read_file(spec_path))
def get_spec_path(check_root):
manifest = json.loads(read_file(path_join(check_root, 'manifest.json')))
assets = manifest.get('assets', {})
if 'integration' in assets:
relative_spec_path = assets['integration'].get('configuration', {}).get('spec', '')
else:
relative_spec_path = assets.get('configuration', {}).get('spec', '')
if not relative_spec_path:
raise ValueError('No config spec defined')
spec_path = path_join(check_root, *relative_spec_path.split('/'))
if not file_exists(spec_path):
raise ValueError('No config spec found')
return spec_path
|
60ff9443ea9476289f9841c3c166356668a5c58c
|
b095173b2dbc77c8ad61c42403258c76169b7a63
|
/tests/integ/test_tf_efs_fsx.py
|
a51b18acb62a399863e77ed815975f224dbe0045
|
[
"Apache-2.0"
] |
permissive
|
aws/sagemaker-python-sdk
|
666665e717cfb76698ba3ea7563b45344634264d
|
8d5d7fd8ae1a917ed3e2b988d5e533bce244fd85
|
refs/heads/master
| 2023-09-04T01:00:20.663626
| 2023-08-31T15:29:19
| 2023-08-31T15:29:19
| 110,621,895
| 2,050
| 1,255
|
Apache-2.0
| 2023-09-14T17:37:15
| 2017-11-14T01:03:33
|
Python
|
UTF-8
|
Python
| false
| false
| 8,477
|
py
|
test_tf_efs_fsx.py
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
import os
import time
import pytest
from sagemaker.inputs import FileSystemInput
from sagemaker.parameter import IntegerParameter
from sagemaker.tensorflow import TensorFlow
from sagemaker.tuner import HyperparameterTuner
from sagemaker.utils import unique_name_from_base
import tests
from tests.integ import TRAINING_DEFAULT_TIMEOUT_MINUTES, TUNING_DEFAULT_TIMEOUT_MINUTES
from tests.integ.file_system_input_utils import tear_down, set_up_efs_fsx
from tests.integ.s3_utils import assert_s3_file_patterns_exist
from tests.integ.timeout import timeout
RESOURCE_PATH = os.path.join(os.path.dirname(__file__), "..", "data")
MNIST_RESOURCE_PATH = os.path.join(RESOURCE_PATH, "tensorflow_mnist")
SCRIPT = os.path.join(MNIST_RESOURCE_PATH, "mnist.py")
TFS_RESOURCE_PATH = os.path.join(RESOURCE_PATH, "tfs", "tfs-test-entrypoint-with-handler")
EFS_DIR_PATH = "/tensorflow"
FSX_DIR_PATH = "/fsx/tensorflow"
MAX_JOBS = 2
MAX_PARALLEL_JOBS = 2
PY_VERSION = "py37"
@pytest.fixture(scope="module")
def efs_fsx_setup(sagemaker_session, ec2_instance_type):
fs_resources = None
try:
fs_resources = set_up_efs_fsx(sagemaker_session, ec2_instance_type)
yield fs_resources
finally:
if fs_resources:
tear_down(sagemaker_session, fs_resources)
@pytest.mark.skipif(
tests.integ.test_region() not in tests.integ.EFS_TEST_ENABLED_REGION,
reason="EFS integration tests need to be fixed before running in all regions.",
)
def test_mnist_efs(
efs_fsx_setup,
sagemaker_session,
cpu_instance_type,
tensorflow_training_latest_version,
tensorflow_training_latest_py_version,
):
role = efs_fsx_setup["role_name"]
subnets = [efs_fsx_setup["subnet_id"]]
security_group_ids = efs_fsx_setup["security_group_ids"]
estimator = TensorFlow(
entry_point=SCRIPT,
role=role,
instance_count=1,
instance_type=cpu_instance_type,
sagemaker_session=sagemaker_session,
framework_version=tensorflow_training_latest_version,
py_version=tensorflow_training_latest_py_version,
subnets=subnets,
security_group_ids=security_group_ids,
)
file_system_efs_id = efs_fsx_setup["file_system_efs_id"]
content_type = "application/json"
file_system_input = FileSystemInput(
file_system_id=file_system_efs_id,
file_system_type="EFS",
directory_path=EFS_DIR_PATH,
content_type=content_type,
)
with timeout(minutes=TRAINING_DEFAULT_TIMEOUT_MINUTES):
estimator.fit(inputs=file_system_input, job_name=unique_name_from_base("test-mnist-efs"))
assert_s3_file_patterns_exist(
sagemaker_session,
estimator.model_dir,
[r"model\.ckpt-\d+\.index", r"checkpoint"],
)
@pytest.mark.skipif(
tests.integ.test_region() not in tests.integ.EFS_TEST_ENABLED_REGION,
reason="EFS integration tests need to be fixed before running in all regions.",
)
def test_mnist_lustre(
efs_fsx_setup,
sagemaker_session,
cpu_instance_type,
tensorflow_training_latest_version,
tensorflow_training_latest_py_version,
):
role = efs_fsx_setup["role_name"]
subnets = [efs_fsx_setup["subnet_id"]]
security_group_ids = efs_fsx_setup["security_group_ids"]
estimator = TensorFlow(
entry_point=SCRIPT,
role=role,
instance_count=1,
instance_type=cpu_instance_type,
sagemaker_session=sagemaker_session,
framework_version=tensorflow_training_latest_version,
py_version=tensorflow_training_latest_py_version,
subnets=subnets,
security_group_ids=security_group_ids,
)
file_system_fsx_id = efs_fsx_setup["file_system_fsx_id"]
file_system_input = FileSystemInput(
file_system_id=file_system_fsx_id, file_system_type="FSxLustre", directory_path=FSX_DIR_PATH
)
with timeout(minutes=TRAINING_DEFAULT_TIMEOUT_MINUTES):
estimator.fit(inputs=file_system_input, job_name=unique_name_from_base("test-mnist-lustre"))
assert_s3_file_patterns_exist(
sagemaker_session,
estimator.model_dir,
[r"model\.ckpt-\d+\.index", r"checkpoint"],
)
@pytest.mark.skipif(
tests.integ.test_region() not in tests.integ.EFS_TEST_ENABLED_REGION,
reason="EFS integration tests need to be fixed before running in all regions.",
)
def test_tuning_tf_efs(
efs_fsx_setup,
sagemaker_session,
cpu_instance_type,
tensorflow_training_latest_version,
tensorflow_training_latest_py_version,
):
role = efs_fsx_setup["role_name"]
subnets = [efs_fsx_setup["subnet_id"]]
security_group_ids = efs_fsx_setup["security_group_ids"]
estimator = TensorFlow(
entry_point=SCRIPT,
role=role,
instance_count=1,
instance_type=cpu_instance_type,
sagemaker_session=sagemaker_session,
framework_version=tensorflow_training_latest_version,
py_version=tensorflow_training_latest_py_version,
subnets=subnets,
security_group_ids=security_group_ids,
)
hyperparameter_ranges = {"epochs": IntegerParameter(1, 2)}
objective_metric_name = "accuracy"
metric_definitions = [{"Name": objective_metric_name, "Regex": "accuracy = ([0-9\\.]+)"}]
tuner = HyperparameterTuner(
estimator,
objective_metric_name,
hyperparameter_ranges,
metric_definitions,
max_jobs=MAX_JOBS,
max_parallel_jobs=MAX_PARALLEL_JOBS,
)
file_system_efs_id = efs_fsx_setup["file_system_efs_id"]
file_system_input = FileSystemInput(
file_system_id=file_system_efs_id, file_system_type="EFS", directory_path=EFS_DIR_PATH
)
with timeout(minutes=TUNING_DEFAULT_TIMEOUT_MINUTES):
tuning_job_name = unique_name_from_base("test-tuning-tf-script-mode-efs", max_length=32)
tuner.fit(file_system_input, job_name=tuning_job_name)
time.sleep(15)
tuner.wait()
best_training_job = tuner.best_training_job()
assert best_training_job
@pytest.mark.skipif(
tests.integ.test_region() not in tests.integ.EFS_TEST_ENABLED_REGION,
reason="EFS integration tests need to be fixed before running in all regions.",
)
def test_tuning_tf_lustre(
efs_fsx_setup,
sagemaker_session,
cpu_instance_type,
tensorflow_training_latest_version,
tensorflow_training_latest_py_version,
):
role = efs_fsx_setup["role_name"]
subnets = [efs_fsx_setup["subnet_id"]]
security_group_ids = efs_fsx_setup["security_group_ids"]
estimator = TensorFlow(
entry_point=SCRIPT,
role=role,
instance_count=1,
instance_type=cpu_instance_type,
sagemaker_session=sagemaker_session,
framework_version=tensorflow_training_latest_version,
py_version=tensorflow_training_latest_py_version,
subnets=subnets,
security_group_ids=security_group_ids,
)
hyperparameter_ranges = {"epochs": IntegerParameter(1, 2)}
objective_metric_name = "accuracy"
metric_definitions = [{"Name": objective_metric_name, "Regex": "accuracy = ([0-9\\.]+)"}]
tuner = HyperparameterTuner(
estimator,
objective_metric_name,
hyperparameter_ranges,
metric_definitions,
max_jobs=MAX_JOBS,
max_parallel_jobs=MAX_PARALLEL_JOBS,
)
file_system_fsx_id = efs_fsx_setup["file_system_fsx_id"]
file_system_input = FileSystemInput(
file_system_id=file_system_fsx_id, file_system_type="FSxLustre", directory_path=FSX_DIR_PATH
)
with timeout(minutes=TUNING_DEFAULT_TIMEOUT_MINUTES):
tuning_job_name = unique_name_from_base("test-tuning-tf-script-mode-lustre", max_length=32)
tuner.fit(file_system_input, job_name=tuning_job_name)
time.sleep(15)
tuner.wait()
best_training_job = tuner.best_training_job()
assert best_training_job
|
5a2a400f58964195458e6563f878d96d2a612ef4
|
2dd26e031162e75f37ecb1f7dd7f675eeb634c63
|
/nemo/utils/exp_manager.py
|
63775f4058c53f303a1faecf03e646aec172b693
|
[
"Apache-2.0"
] |
permissive
|
NVIDIA/NeMo
|
1b001fa2ae5d14defbfd02f3fe750c5a09e89dd1
|
c20a16ea8aa2a9d8e31a98eb22178ddb9d5935e7
|
refs/heads/main
| 2023-08-21T15:28:04.447838
| 2023-08-21T00:49:36
| 2023-08-21T00:49:36
| 200,722,670
| 7,957
| 1,986
|
Apache-2.0
| 2023-09-14T18:49:54
| 2019-08-05T20:16:42
|
Python
|
UTF-8
|
Python
| false
| false
| 46,436
|
py
|
exp_manager.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import os
import subprocess
import sys
import time
import warnings
from dataclasses import dataclass
from datetime import timedelta
from pathlib import Path
from shutil import copy, move
from typing import Any, Dict, List, Optional, Tuple, Union
import pytorch_lightning
import torch
from hydra.core.hydra_config import HydraConfig
from hydra.utils import get_original_cwd
from omegaconf import DictConfig, OmegaConf, open_dict
from pytorch_lightning.callbacks import Callback, ModelCheckpoint
from pytorch_lightning.callbacks.early_stopping import EarlyStopping
from pytorch_lightning.callbacks.timer import Interval, Timer
from pytorch_lightning.loggers import MLFlowLogger, TensorBoardLogger, WandbLogger
from pytorch_lightning.loops import _TrainingEpochLoop
from pytorch_lightning.strategies.ddp import DDPStrategy
from nemo.collections.common.callbacks import EMA
from nemo.constants import NEMO_ENV_VARNAME_TESTING, NEMO_ENV_VARNAME_VERSION
from nemo.utils import logging, timers
from nemo.utils.app_state import AppState
from nemo.utils.callbacks import NeMoModelCheckpoint, PreemptionCallback
from nemo.utils.env_var_parsing import get_envbool
from nemo.utils.exceptions import NeMoBaseException
from nemo.utils.get_rank import is_global_rank_zero
from nemo.utils.lightning_logger_patch import add_filehandlers_to_pl_logger
from nemo.utils.loggers import ClearMLLogger, ClearMLParams, DLLogger, DLLoggerParams, MLFlowParams
from nemo.utils.model_utils import uninject_model_parallel_rank
class NotFoundError(NeMoBaseException):
""" Raised when a file or folder is not found"""
class LoggerMisconfigurationError(NeMoBaseException):
""" Raised when a mismatch between trainer.logger and exp_manager occurs"""
def __init__(self, message):
message = (
message
+ " You can disable lighning's trainer from creating a logger by passing logger=False to its constructor."
)
super().__init__(message)
class CheckpointMisconfigurationError(NeMoBaseException):
""" Raised when a mismatch between trainer.callbacks and exp_manager occurs"""
@dataclass
class EarlyStoppingParams:
monitor: str = "val_loss" # The metric that early stopping should consider.
mode: str = "min" # inform early stopping whether to look for increase or decrease in monitor.
min_delta: float = 0.001 # smallest change to consider as improvement.
patience: int = 10 # how many (continuous) validation cycles to wait with no improvement and stopping training.
verbose: bool = True
strict: bool = True
check_finite: bool = True
stopping_threshold: Optional[float] = None
divergence_threshold: Optional[float] = None
check_on_train_epoch_end: Optional[bool] = None
log_rank_zero_only: bool = False
@dataclass
class CallbackParams:
filepath: Optional[str] = None # Deprecated
dirpath: Optional[str] = None # If None, exp_manager will attempt to handle the filepath
filename: Optional[str] = None # If None, exp_manager will attempt to handle the filepath
monitor: Optional[str] = "val_loss"
verbose: Optional[bool] = True
save_last: Optional[bool] = True
save_top_k: Optional[int] = 3
save_weights_only: Optional[bool] = False
mode: Optional[str] = "min"
auto_insert_metric_name: bool = True
every_n_epochs: Optional[int] = 1
every_n_train_steps: Optional[int] = None
train_time_interval: Optional[str] = None
prefix: Optional[str] = None # If None, exp_manager will attempt to handle the filepath
postfix: str = ".nemo"
save_best_model: bool = False
always_save_nemo: bool = False
save_nemo_on_train_end: Optional[bool] = True # Whether to automatically save .nemo file durin on_train_end hook
model_parallel_size: Optional[int] = None # tensor parallel size * pipeline parallel size
save_on_train_epoch_end: Optional[bool] = False # Save after training, not after validation
@dataclass
class StepTimingParams:
reduction: Optional[str] = "mean"
# if True torch.cuda.synchronize() is called on start/stop
sync_cuda: Optional[bool] = False
# if positive, defines the size of a sliding window for computing mean
buffer_size: Optional[int] = 1
@dataclass
class EMAParams:
enable: Optional[bool] = False
decay: Optional[float] = 0.999
cpu_offload: Optional[bool] = False
validate_original_weights: Optional[bool] = False
every_n_steps: int = 1
@dataclass
class ExpManagerConfig:
"""Experiment Manager config for validation of passed arguments.
"""
# Log dir creation parameters
explicit_log_dir: Optional[str] = None
exp_dir: Optional[str] = None
name: Optional[str] = None
version: Optional[str] = None
use_datetime_version: Optional[bool] = True
resume_if_exists: Optional[bool] = False
resume_past_end: Optional[bool] = False
resume_ignore_no_checkpoint: Optional[bool] = False
resume_from_checkpoint: Optional[str] = None
# Logging parameters
create_tensorboard_logger: Optional[bool] = True
summary_writer_kwargs: Optional[Dict[Any, Any]] = None
create_wandb_logger: Optional[bool] = False
wandb_logger_kwargs: Optional[Dict[Any, Any]] = None
create_mlflow_logger: Optional[bool] = False
mlflow_logger_kwargs: Optional[MLFlowParams] = MLFlowParams()
create_dllogger_logger: Optional[bool] = False
dllogger_logger_kwargs: Optional[DLLoggerParams] = DLLoggerParams()
create_clearml_logger: Optional[bool] = False
clearml_logger_kwargs: Optional[ClearMLParams] = ClearMLParams()
# Checkpointing parameters
create_checkpoint_callback: Optional[bool] = True
checkpoint_callback_params: Optional[CallbackParams] = CallbackParams()
create_early_stopping_callback: Optional[bool] = False
early_stopping_callback_params: Optional[EarlyStoppingParams] = EarlyStoppingParams()
create_preemption_callback: Optional[bool] = True
# Additional exp_manager arguments
files_to_copy: Optional[List[str]] = None
# logs timing of train/val/test steps
log_step_timing: Optional[bool] = True
step_timing_kwargs: Optional[StepTimingParams] = StepTimingParams()
# Configures creation of log files for different ranks
log_local_rank_0_only: Optional[bool] = False
log_global_rank_0_only: Optional[bool] = False
# disable initial validation when resuming from a checkpoint saved during validation
disable_validation_on_resume: Optional[bool] = True
ema: Optional[EMAParams] = EMAParams()
# Wall clock time limit
max_time_per_run: Optional[str] = None
class TimingCallback(Callback):
"""
Logs execution time of train/val/test steps
"""
def __init__(self, timer_kwargs={}):
self.timer = timers.NamedTimer(**timer_kwargs)
def _on_batch_start(self, name):
# reset only if we do not return mean of a sliding window
if self.timer.buffer_size <= 0:
self.timer.reset(name)
self.timer.start(name)
def _on_batch_end(self, name, pl_module):
self.timer.stop(name)
# Set the `batch_size=1` as WAR for `dataloader_iter`, which is not used for any metric
pl_module.log(name, self.timer[name], on_step=True, on_epoch=False, batch_size=1)
def on_train_batch_start(self, trainer, pl_module, batch, batch_idx):
self._on_batch_start("train_step_timing")
def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx):
self._on_batch_end("train_step_timing", pl_module)
def on_validation_batch_start(self, trainer, pl_module, batch, batch_idx, dataloader_idx=0):
self._on_batch_start("validation_step_timing")
def on_validation_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx=0):
self._on_batch_end("validation_step_timing", pl_module)
def on_test_batch_start(self, trainer, pl_module, batch, batch_idx, dataloader_idx=0):
self._on_batch_start("test_step_timing")
def on_test_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx=0):
self._on_batch_end("test_step_timing", pl_module)
def on_before_backward(self, trainer, pl_module, loss):
self._on_batch_start("train_backward_timing")
def on_after_backward(self, trainer, pl_module):
self._on_batch_end("train_backward_timing", pl_module)
def exp_manager(trainer: 'pytorch_lightning.Trainer', cfg: Optional[Union[DictConfig, Dict]] = None) -> Optional[Path]:
"""
exp_manager is a helper function used to manage folders for experiments. It follows the pytorch lightning paradigm
of exp_dir/model_or_experiment_name/version. If the lightning trainer has a logger, exp_manager will get exp_dir,
name, and version from the logger. Otherwise it will use the exp_dir and name arguments to create the logging
directory. exp_manager also allows for explicit folder creation via explicit_log_dir.
The version can be a datetime string or an integer. Datestime version can be disabled if use_datetime_version is set
to False. It optionally creates TensorBoardLogger, WandBLogger, DLLogger, MLFlowLogger, ClearMLLogger,
ModelCheckpoint objects from pytorch lightning.
It copies sys.argv, and git information if available to the logging directory. It creates a log file for each
process to log their output into.
exp_manager additionally has a resume feature (resume_if_exists) which can be used to continuing training from
the constructed log_dir. When you need to continue the training repeatedly (like on a cluster which you need
multiple consecutive jobs), you need to avoid creating the version folders. Therefore from v1.0.0, when
resume_if_exists is set to True, creating the version folders is ignored.
Args:
trainer (pytorch_lightning.Trainer): The lightning trainer.
cfg (DictConfig, dict): Can have the following keys:
- explicit_log_dir (str, Path): Can be used to override exp_dir/name/version folder creation. Defaults to
None, which will use exp_dir, name, and version to construct the logging directory.
- exp_dir (str, Path): The base directory to create the logging directory. Defaults to None, which logs to
./nemo_experiments.
- name (str): The name of the experiment. Defaults to None which turns into "default" via name = name or
"default".
- version (str): The version of the experiment. Defaults to None which uses either a datetime string or
lightning's TensorboardLogger system of using version_{int}.
- use_datetime_version (bool): Whether to use a datetime string for version. Defaults to True.
- resume_if_exists (bool): Whether this experiment is resuming from a previous run. If True, it sets
trainer._checkpoint_connector._ckpt_path so that the trainer should auto-resume. exp_manager will move files
under log_dir to log_dir/run_{int}. Defaults to False. From v1.0.0, when resume_if_exists is True,
we would not create version folders to make it easier to find the log folder for next runs.
- resume_past_end (bool): exp_manager errors out if resume_if_exists is True and a checkpoint matching
``*end.ckpt`` indicating a previous training run fully completed. This behaviour can be disabled, in which
case the ``*end.ckpt`` will be loaded by setting resume_past_end to True. Defaults to False.
- resume_ignore_no_checkpoint (bool): exp_manager errors out if resume_if_exists is True and no checkpoint
could be found. This behaviour can be disabled, in which case exp_manager will print a message and
continue without restoring, by setting resume_ignore_no_checkpoint to True. Defaults to False.
- resume_from_checkpoint (str): Can be used to specify a path to a specific checkpoint file to load from. This will
override any checkpoint found when resume_if_exists is True. Defaults to None.
- create_tensorboard_logger (bool): Whether to create a tensorboard logger and attach it to the pytorch
lightning trainer. Defaults to True.
- summary_writer_kwargs (dict): A dictionary of kwargs that can be passed to lightning's TensorboardLogger
class. Note that log_dir is passed by exp_manager and cannot exist in this dict. Defaults to None.
- create_wandb_logger (bool): Whether to create a Weights and Baises logger and attach it to the pytorch
lightning trainer. Defaults to False.
- wandb_logger_kwargs (dict): A dictionary of kwargs that can be passed to lightning's WandBLogger
class. Note that name and project are required parameters if create_wandb_logger is True.
Defaults to None.
- create_mlflow_logger (bool): Whether to create an MLFlow logger and attach it to the pytorch lightning
training. Defaults to False
- mlflow_logger_kwargs (dict): optional parameters for the MLFlow logger
- create_dllogger_logger (bool): Whether to create an DLLogger logger and attach it to the pytorch lightning
training. Defaults to False
- dllogger_logger_kwargs (dict): optional parameters for the DLLogger logger
- create_clearml_logger (bool): Whether to create an ClearML logger and attach it to the pytorch lightning
training. Defaults to False
- clearml_logger_kwargs (dict): optional parameters for the ClearML logger
- create_checkpoint_callback (bool): Whether to create a ModelCheckpoint callback and attach it to the
pytorch lightning trainer. The ModelCheckpoint saves the top 3 models with the best "val_loss", the most
recent checkpoint under ``*last.ckpt``, and the final checkpoint after training completes under ``*end.ckpt``.
Defaults to True.
- create_early_stopping_callback (bool): Flag to decide if early stopping should be used to stop training. Default is False.
See EarlyStoppingParams dataclass above.
- create_preemption_callback (bool): Flag to decide whether to enable preemption callback to save checkpoints and exit training
immediately upon preemption. Default is True.
- files_to_copy (list): A list of files to copy to the experiment logging directory. Defaults to None which
copies no files.
- log_local_rank_0_only (bool): Whether to only create log files for local rank 0. Defaults to False.
Set this to True if you are using DDP with many GPUs and do not want many log files in your exp dir.
- log_global_rank_0_only (bool): Whether to only create log files for global rank 0. Defaults to False.
Set this to True if you are using DDP with many GPUs and do not want many log files in your exp dir.
- max_time (str): The maximum wall clock time *per run*. This is intended to be used on clusters where you want
a checkpoint to be saved after this specified time and be able to resume from that checkpoint. Defaults to None.
returns:
log_dir (Path): The final logging directory where logging files are saved. Usually the concatenation of
exp_dir, name, and version.
"""
# Add rank information to logger
# Note: trainer.global_rank and trainer.is_global_zero are not set until trainer.fit, so have to hack around it
local_rank = int(os.environ.get("LOCAL_RANK", 0))
global_rank = trainer.node_rank * trainer.num_devices + local_rank
logging.rank = global_rank
if cfg is None:
logging.error("exp_manager did not receive a cfg argument. It will be disabled.")
return
if trainer.fast_dev_run:
logging.info("Trainer was called with fast_dev_run. exp_manager will return without any functionality.")
return
# Ensure passed cfg is compliant with ExpManagerConfig
schema = OmegaConf.structured(ExpManagerConfig)
if isinstance(cfg, dict):
cfg = OmegaConf.create(cfg)
elif not isinstance(cfg, DictConfig):
raise ValueError(f"cfg was type: {type(cfg)}. Expected either a dict or a DictConfig")
cfg = OmegaConf.create(OmegaConf.to_container(cfg, resolve=True))
cfg = OmegaConf.merge(schema, cfg)
error_checks(trainer, cfg) # Ensures that trainer options are compliant with NeMo and exp_manager arguments
log_dir, exp_dir, name, version = get_log_dir(
trainer=trainer,
exp_dir=cfg.exp_dir,
name=cfg.name,
version=cfg.version,
explicit_log_dir=cfg.explicit_log_dir,
use_datetime_version=cfg.use_datetime_version,
resume_if_exists=cfg.resume_if_exists,
)
if cfg.resume_if_exists:
# Check for existing checkpoints in `dirpath` if it's specified, use <log_dir>/checkpoints otherwise
if cfg.checkpoint_callback_params.dirpath:
check_resume(
trainer,
log_dir,
cfg.resume_past_end,
cfg.resume_ignore_no_checkpoint,
cfg.checkpoint_callback_params.dirpath,
)
else:
check_resume(trainer, log_dir, cfg.resume_past_end, cfg.resume_ignore_no_checkpoint)
# TODO: this behavior is undesirable, need ckpts in exp_dir to take priority if present over resume_from_checkpoint
# if cfg.resume_from_checkpoint is not None:
# trainer.ckpt_path = cfg.resume_from_checkpoint
logging.info(f'Resuming training from checkpoint: {trainer.ckpt_path}')
checkpoint_name = name
# If name returned from get_log_dir is "", use cfg.name for checkpointing
if checkpoint_name is None or checkpoint_name == '':
checkpoint_name = cfg.name or "default"
# Set mlflow name if it's not set, before the main name is erased
if cfg.create_mlflow_logger and (not cfg.mlflow_logger_kwargs.get("experiment_name", None)):
cfg.mlflow_logger_kwargs.experiment_name = cfg.name
logging.warning(
'mlflow logger specified but no experiment name set. Using the same as Tensorboard: %s',
cfg.mlflow_logger_kwargs.experiment_name,
)
cfg.name = name # Used for configure_loggers so that the log_dir is properly set even if name is ""
cfg.version = version
# update app_state with log_dir, exp_dir, etc
app_state = AppState()
app_state.log_dir = log_dir
app_state.exp_dir = exp_dir
app_state.name = name
app_state.version = version
app_state.checkpoint_name = checkpoint_name
app_state.create_checkpoint_callback = cfg.create_checkpoint_callback
app_state.checkpoint_callback_params = cfg.checkpoint_callback_params
# Create the logging directory if it does not exist
os.makedirs(log_dir, exist_ok=True) # Cannot limit creation to global zero as all ranks write to own log file
logging.info(f'Experiments will be logged at {log_dir}')
trainer._default_root_dir = log_dir
if cfg.log_local_rank_0_only is True and cfg.log_global_rank_0_only is True:
raise ValueError(
f"Cannot set both log_local_rank_0_only and log_global_rank_0_only to True. Please set either one or neither."
)
# This is set if the env var NEMO_TESTING is set to True.
nemo_testing = get_envbool(NEMO_ENV_VARNAME_TESTING, False)
# Handle logging to file
log_file = log_dir / f'nemo_log_globalrank-{global_rank}_localrank-{local_rank}.txt'
if cfg.log_local_rank_0_only is True and not nemo_testing:
if local_rank == 0:
logging.add_file_handler(log_file)
elif cfg.log_global_rank_0_only is True and not nemo_testing:
if global_rank == 0:
logging.add_file_handler(log_file)
else:
# Logs on all ranks.
logging.add_file_handler(log_file)
# For some reason, LearningRateLogger requires trainer to have a logger. Safer to create logger on all ranks
# not just global rank 0.
if (
cfg.create_tensorboard_logger
or cfg.create_wandb_logger
or cfg.create_mlflow_logger
or cfg.create_dllogger_logger
or cfg.create_clearml_logger
):
configure_loggers(
trainer,
exp_dir,
log_dir,
cfg.name,
cfg.version,
cfg.checkpoint_callback_params,
cfg.create_tensorboard_logger,
cfg.summary_writer_kwargs,
cfg.create_wandb_logger,
cfg.wandb_logger_kwargs,
cfg.create_mlflow_logger,
cfg.mlflow_logger_kwargs,
cfg.create_dllogger_logger,
cfg.dllogger_logger_kwargs,
cfg.create_clearml_logger,
cfg.clearml_logger_kwargs,
)
# add loggers timing callbacks
if cfg.log_step_timing:
timing_callback = TimingCallback(timer_kwargs=cfg.step_timing_kwargs or {})
trainer.callbacks.insert(0, timing_callback)
if cfg.ema.enable:
ema_callback = EMA(
decay=cfg.ema.decay,
validate_original_weights=cfg.ema.validate_original_weights,
cpu_offload=cfg.ema.cpu_offload,
every_n_steps=cfg.ema.every_n_steps,
)
trainer.callbacks.append(ema_callback)
if cfg.create_early_stopping_callback:
early_stop_callback = EarlyStopping(**cfg.early_stopping_callback_params)
trainer.callbacks.append(early_stop_callback)
if cfg.create_checkpoint_callback:
configure_checkpointing(
trainer,
log_dir,
checkpoint_name,
cfg.resume_if_exists,
cfg.checkpoint_callback_params,
cfg.create_preemption_callback,
)
if cfg.disable_validation_on_resume:
# extend training loop to skip initial validation when resuming from checkpoint
configure_no_restart_validation_training_loop(trainer)
# Setup a stateless timer for use on clusters.
if cfg.max_time_per_run is not None:
found_ptl_timer = False
for idx, callback in enumerate(trainer.callbacks):
if isinstance(callback, Timer):
# NOTE: PTL does not expose a `trainer.max_time`. By the time we are in this function, PTL has already setup a timer if the user specifies `trainer.max_time` so best we can do is replace that.
# Working: If only `trainer.max_time` is set - it behaves as a normal PTL timer. If only `exp_manager.max_time_per_run` is set - it behaves as a StateLessTimer. If both are set, it also behaves as a StateLessTimer.
logging.warning(
f'Found a PTL Timer callback, replacing with a StatelessTimer callback. This will happen if you set trainer.max_time as well as exp_manager.max_time_per_run.'
)
trainer.callbacks[idx] = StatelessTimer(cfg.max_time_per_run)
found_ptl_timer = True
break
if not found_ptl_timer:
trainer.max_time = cfg.max_time_per_run
trainer.callbacks.append(StatelessTimer(cfg.max_time_per_run))
if is_global_rank_zero():
# Move files_to_copy to folder and add git information if present
if cfg.files_to_copy:
for _file in cfg.files_to_copy:
copy(Path(_file), log_dir)
# Create files for cmd args and git info
with open(log_dir / 'cmd-args.log', 'w', encoding='utf-8') as _file:
_file.write(" ".join(sys.argv))
# Try to get git hash
git_repo, git_hash = get_git_hash()
if git_repo:
with open(log_dir / 'git-info.log', 'w', encoding='utf-8') as _file:
_file.write(f'commit hash: {git_hash}')
_file.write(get_git_diff())
# Add err_file logging to global_rank zero
logging.add_err_file_handler(log_dir / 'nemo_error_log.txt')
# Add lightning file logging to global_rank zero
add_filehandlers_to_pl_logger(log_dir / 'lightning_logs.txt', log_dir / 'nemo_error_log.txt')
return log_dir
def error_checks(trainer: 'pytorch_lightning.Trainer', cfg: Optional[Union[DictConfig, Dict]] = None):
"""
Checks that the passed trainer is compliant with NeMo and exp_manager's passed configuration. Checks that:
- Throws error when hydra has changed the working directory. This causes issues with lightning's DDP
- Throws error when trainer has loggers defined but create_tensorboard_logger or create_wandB_logger
or create_mlflow_logger or create_dllogger_logger is True
- Prints error messages when 1) run on multi-node and not Slurm, and 2) run on multi-gpu without DDP
"""
if HydraConfig.initialized() and get_original_cwd() != os.getcwd():
raise ValueError(
"Hydra changed the working directory. This interferes with ExpManger's functionality. Please pass "
"hydra.run.dir=. to your python script."
)
if trainer.logger is not None and (
cfg.create_tensorboard_logger or cfg.create_wandb_logger or cfg.create_mlflow_logger
):
raise LoggerMisconfigurationError(
"The pytorch lightning trainer that was passed to exp_manager contained a logger, and either "
f"create_tensorboard_logger: {cfg.create_tensorboard_logger} or create_wandb_logger: "
f"{cfg.create_wandb_logger} or create_mlflow_logger: {cfg.create_mlflow_logger}"
f"or create_dllogger_logger: {cfg.create_mlflow_logger} was set to True. "
"These can only be used if trainer does not already have a logger."
)
if trainer.num_nodes > 1 and not check_slurm(trainer):
logging.error(
"You are running multi-node training without SLURM handling the processes."
" Please note that this is not tested in NeMo and could result in errors."
)
if trainer.num_devices > 1 and not isinstance(trainer.strategy, DDPStrategy):
logging.error(
"You are running multi-gpu without ddp.Please note that this is not tested in NeMo and could result in "
"errors."
)
def check_resume(
trainer: 'pytorch_lightning.Trainer',
log_dir: str,
resume_past_end: bool = False,
resume_ignore_no_checkpoint: bool = False,
dirpath: str = None,
):
"""Checks that resume=True was used correctly with the arguments pass to exp_manager. Sets
trainer._checkpoint_connector._ckpt_path as necessary.
Returns:
log_dir (Path): The log_dir
exp_dir (str): The base exp_dir without name nor version
name (str): The name of the experiment
version (str): The version of the experiment
Raises:
NotFoundError: If resume is True, resume_ignore_no_checkpoint is False, and checkpoints could not be found.
ValueError: If resume is True, and there were more than 1 checkpoint could found.
"""
if not log_dir:
raise ValueError(f"Resuming requires the log_dir {log_dir} to be passed to exp_manager")
# Use <log_dir>/checkpoints/ unless `dirpath` is set
checkpoint_dir = Path(dirpath) if dirpath else Path(Path(log_dir) / "checkpoints")
checkpoint = None
end_checkpoints = list(checkpoint_dir.rglob("*end.ckpt"))
last_checkpoints = list(checkpoint_dir.rglob("*last.ckpt"))
if not checkpoint_dir.exists():
if resume_ignore_no_checkpoint:
logging.warning(
f"There was no checkpoint folder at checkpoint_dir :{checkpoint_dir}. Training from scratch."
)
return
else:
raise NotFoundError(f"There was no checkpoint folder at checkpoint_dir :{checkpoint_dir}. Cannot resume.")
elif len(end_checkpoints) > 0:
if resume_past_end:
if len(end_checkpoints) > 1:
if 'mp_rank' in str(end_checkpoints[0]):
checkpoint = end_checkpoints[0]
else:
raise ValueError(f"Multiple checkpoints {end_checkpoints} that matches *end.ckpt.")
logging.info(f"Resuming from {end_checkpoints[0]}")
else:
raise ValueError(
f"Found {end_checkpoints[0]} indicating that the last training run has already completed."
)
elif not len(last_checkpoints) > 0:
if resume_ignore_no_checkpoint:
logging.warning(f"There were no checkpoints found in {checkpoint_dir}. Training from scratch.")
return
else:
raise NotFoundError(f"There were no checkpoints found in {checkpoint_dir}. Cannot resume.")
elif len(last_checkpoints) > 1:
if 'mp_rank' in str(last_checkpoints[0]) or 'tp_rank' in str(last_checkpoints[0]):
checkpoint = last_checkpoints[0]
checkpoint = uninject_model_parallel_rank(checkpoint)
else:
raise ValueError(f"Multiple checkpoints {last_checkpoints} that matches *last.ckpt.")
else:
logging.info(f"Resuming from {last_checkpoints[0]}")
checkpoint = last_checkpoints[0]
# PTL 2.0 supports ckpt_path instead of resume_from_checkpoint as the trainer flag
trainer.ckpt_path = str(checkpoint)
if is_global_rank_zero():
# Check to see if any files exist that need to be moved
files_to_move = []
for child in Path(log_dir).iterdir():
if child.is_file():
files_to_move.append(child)
if len(files_to_move) > 0:
# Move old files to a new folder
other_run_dirs = Path(log_dir).glob("run_*")
run_count = 0
for fold in other_run_dirs:
if fold.is_dir():
run_count += 1
new_run_dir = Path(Path(log_dir) / f"run_{run_count}")
new_run_dir.mkdir()
for _file in files_to_move:
move(str(_file), str(new_run_dir))
def check_explicit_log_dir(
trainer: 'pytorch_lightning.Trainer', explicit_log_dir: Union[Path, str], exp_dir: str, name: str, version: str
) -> Tuple[Path, str, str, str]:
""" Checks that the passed arguments are compatible with explicit_log_dir.
Returns:
log_dir (Path): the log_dir
exp_dir (str): the base exp_dir without name nor version
name (str): The name of the experiment
version (str): The version of the experiment
Raise:
LoggerMisconfigurationError
"""
if trainer.logger is not None:
raise LoggerMisconfigurationError(
"The pytorch lightning trainer that was passed to exp_manager contained a logger and explicit_log_dir: "
f"{explicit_log_dir} was pass to exp_manager. Please remove the logger from the lightning trainer."
)
# Checking only (explicit_log_dir) vs (exp_dir and version).
# The `name` will be used as the actual name of checkpoint/archive.
if exp_dir or version:
logging.error(
f"exp_manager received explicit_log_dir: {explicit_log_dir} and at least one of exp_dir: {exp_dir}, "
f"or version: {version}. Please note that exp_dir, name, and version will be ignored."
)
if is_global_rank_zero() and Path(explicit_log_dir).exists():
logging.warning(f"Exp_manager is logging to {explicit_log_dir}, but it already exists.")
return Path(explicit_log_dir), str(explicit_log_dir), "", ""
def get_log_dir(
trainer: 'pytorch_lightning.Trainer',
exp_dir: str = None,
name: str = None,
version: str = None,
explicit_log_dir: str = None,
use_datetime_version: bool = True,
resume_if_exists: bool = False,
) -> Tuple[Path, str, str, str]:
"""
Obtains the log_dir used for exp_manager.
Returns:
log_dir (Path): the log_dir
exp_dir (str): the base exp_dir without name nor version
name (str): The name of the experiment
version (str): The version of the experiment
explicit_log_dir (str): The explicit path to the log folder. Defaults to False.
use_datetime_version (bool): Uses date and time as the version of the log folder. Defaults to True.
resume_if_exists (bool): if resume_if_exists of the exp_manager's config is enabled or not. When enabled, the
version folders would not get created.
Raise:
LoggerMisconfigurationError: If trainer is incompatible with arguments
NotFoundError: If resume is True, resume_ignore_no_checkpoint is False, and checkpoints could not be found.
ValueError: If resume is True, and there were more than 1 checkpoint could found.
"""
if explicit_log_dir: # If explicit log_dir was passed, short circuit
return check_explicit_log_dir(trainer, explicit_log_dir, exp_dir, name, version)
# Default exp_dir to ./nemo_experiments if None was passed
_exp_dir = exp_dir
if exp_dir is None:
_exp_dir = str(Path.cwd() / 'nemo_experiments')
# If the user has already defined a logger for the trainer, use the logger defaults for logging directory
if trainer.logger is not None:
if trainer.logger.save_dir:
if exp_dir:
raise LoggerMisconfigurationError(
"The pytorch lightning trainer that was passed to exp_manager contained a logger, the logger's "
f"save_dir was not None, and exp_dir ({exp_dir}) was not None. If trainer.logger.save_dir "
"exists, exp_manager will use trainer.logger.save_dir as the logging directory and exp_dir "
"must be None."
)
_exp_dir = trainer.logger.save_dir
if name:
raise LoggerMisconfigurationError(
"The pytorch lightning trainer that was passed to exp_manager contained a logger, and name: "
f"{name} was also passed to exp_manager. If the trainer contains a "
"logger, exp_manager will use trainer.logger.name, and name passed to exp_manager must be None."
)
name = trainer.logger.name
version = f"version_{trainer.logger.version}"
# Use user-defined exp_dir, project_name, exp_name, and versioning options
else:
name = name or "default"
version = version or os.environ.get(NEMO_ENV_VARNAME_VERSION, None)
if not version:
if resume_if_exists:
logging.warning(
"No version folders would be created under the log folder as 'resume_if_exists' is enabled."
)
version = None
elif is_global_rank_zero():
if use_datetime_version:
version = time.strftime('%Y-%m-%d_%H-%M-%S')
else:
tensorboard_logger = TensorBoardLogger(save_dir=Path(_exp_dir), name=name, version=version)
version = f"version_{tensorboard_logger.version}"
os.environ[NEMO_ENV_VARNAME_VERSION] = "" if version is None else version
log_dir = Path(_exp_dir) / Path(str(name)) / Path("" if version is None else str(version))
return log_dir, str(_exp_dir), name, version
def get_git_hash():
"""
Helper function that tries to get the commit hash if running inside a git folder
returns:
Bool: Whether the git subprocess ran without error
str: git subprocess output or error message
"""
try:
return (
True,
subprocess.check_output(['git', 'rev-parse', 'HEAD'], stderr=subprocess.STDOUT).decode(),
)
except subprocess.CalledProcessError as err:
return False, "{}\n".format(err.output.decode("utf-8"))
def get_git_diff():
"""
Helper function that tries to get the git diff if running inside a git folder
returns:
Bool: Whether the git subprocess ran without error
str: git subprocess output or error message
"""
try:
return subprocess.check_output(['git', 'diff'], stderr=subprocess.STDOUT).decode()
except subprocess.CalledProcessError as err:
return "{}\n".format(err.output.decode("utf-8"))
def configure_loggers(
trainer: 'pytorch_lightning.Trainer',
exp_dir: [Path, str],
log_dir: [Path, str],
name: str,
version: str,
checkpoint_callback_params: dict,
create_tensorboard_logger: bool,
summary_writer_kwargs: dict,
create_wandb_logger: bool,
wandb_kwargs: dict,
create_mlflow_logger: bool,
mlflow_kwargs: dict,
create_dllogger_logger: bool,
dllogger_kwargs: dict,
create_clearml_logger: bool,
clearml_kwargs: dict,
):
"""
Creates TensorboardLogger and/or WandBLogger / MLFlowLogger / DLlogger / ClearMLLogger and attach them to trainer.
Raises ValueError if summary_writer_kwargs or wandb_kwargs are misconfigured.
"""
# Potentially create tensorboard logger and/or WandBLogger / MLFlowLogger / DLLogger
logger_list = []
if create_tensorboard_logger:
if summary_writer_kwargs is None:
summary_writer_kwargs = {}
elif "log_dir" in summary_writer_kwargs:
raise ValueError(
"You cannot pass `log_dir` as part of `summary_writer_kwargs`. `log_dir` is handled by lightning's "
"TensorBoardLogger logger."
)
tensorboard_logger = TensorBoardLogger(save_dir=exp_dir, name=name, version=version, **summary_writer_kwargs)
logger_list.append(tensorboard_logger)
logging.info("TensorboardLogger has been set up")
if create_wandb_logger:
if wandb_kwargs is None:
wandb_kwargs = {}
if "name" not in wandb_kwargs and "project" not in wandb_kwargs:
raise ValueError("name and project are required for wandb_logger")
# Update the wandb save_dir
if wandb_kwargs.get('save_dir', None) is None:
wandb_kwargs['save_dir'] = exp_dir
os.makedirs(wandb_kwargs['save_dir'], exist_ok=True)
wandb_logger = WandbLogger(version=version, **wandb_kwargs)
logger_list.append(wandb_logger)
logging.info("WandBLogger has been set up")
if create_mlflow_logger:
mlflow_logger = MLFlowLogger(run_name=version, **mlflow_kwargs)
logger_list.append(mlflow_logger)
logging.info("MLFlowLogger has been set up")
if create_dllogger_logger:
dllogger_logger = DLLogger(**dllogger_kwargs)
logger_list.append(dllogger_logger)
logging.info("DLLogger has been set up")
if create_clearml_logger:
clearml_logger = ClearMLLogger(
clearml_cfg=clearml_kwargs,
log_dir=log_dir,
prefix=name,
save_best_model=checkpoint_callback_params.save_best_model,
)
logger_list.append(clearml_logger)
logging.info("ClearMLLogger has been set up")
trainer._logger_connector.configure_logger(logger_list)
def configure_checkpointing(
trainer: 'pytorch_lightning.Trainer',
log_dir: Path,
name: str,
resume: bool,
params: 'DictConfig',
create_preemption_callback: bool,
):
""" Adds ModelCheckpoint to trainer. Raises CheckpointMisconfigurationError if trainer already has a ModelCheckpoint
callback
"""
for callback in trainer.callbacks:
if isinstance(callback, ModelCheckpoint):
raise CheckpointMisconfigurationError(
"The pytorch lightning trainer that was passed to exp_manager contained a ModelCheckpoint "
"and create_checkpoint_callback was set to True. Please either set create_checkpoint_callback "
"to False, or remove ModelCheckpoint from the lightning trainer"
)
# Create the callback and attach it to trainer
if "filepath" in params:
if params.filepath is not None:
logging.warning("filepath is deprecated. Please switch to dirpath and filename instead")
if params.dirpath is None:
params.dirpath = Path(params.filepath).parent
if params.filename is None:
params.filename = Path(params.filepath).name
with open_dict(params):
del params["filepath"]
if params.dirpath is None:
params.dirpath = Path(log_dir / 'checkpoints')
if params.filename is None:
params.filename = f'{name}--{{{params.monitor}:.4f}}-{{epoch}}'
if params.prefix is None:
params.prefix = name
NeMoModelCheckpoint.CHECKPOINT_NAME_LAST = params.filename + '-last'
logging.debug(params.dirpath)
logging.debug(params.filename)
logging.debug(params.prefix)
if "val" in params.monitor:
if (
trainer.max_epochs is not None
and trainer.max_epochs != -1
and trainer.max_epochs < trainer.check_val_every_n_epoch
):
logging.error(
"The checkpoint callback was told to monitor a validation value but trainer.max_epochs("
f"{trainer.max_epochs}) was less than trainer.check_val_every_n_epoch({trainer.check_val_every_n_epoch}"
f"). It is very likely this run will fail with ModelCheckpoint(monitor='{params.monitor}') not found "
"in the returned metrics. Please ensure that validation is run within trainer.max_epochs."
)
elif trainer.max_steps is not None and trainer.max_steps != -1:
logging.warning(
"The checkpoint callback was told to monitor a validation value and trainer's max_steps was set to "
f"{trainer.max_steps}. Please ensure that max_steps will run for at least "
f"{trainer.check_val_every_n_epoch} epochs to ensure that checkpointing will not error out."
)
checkpoint_callback = NeMoModelCheckpoint(n_resume=resume, **params)
checkpoint_callback.last_model_path = trainer.ckpt_path or ""
if 'mp_rank' in checkpoint_callback.last_model_path or 'tp_rank' in checkpoint_callback.last_model_path:
checkpoint_callback.last_model_path = uninject_model_parallel_rank(checkpoint_callback.last_model_path)
trainer.callbacks.append(checkpoint_callback)
if create_preemption_callback:
# Check if cuda is avialable as preemption is supported only on GPUs
if torch.cuda.is_available():
## By default PreemptionCallback handles SIGTERM. To handle other signals pass the signal in the call as below:
## PreemptionCallback(checkpoint_callback, signal.SIGCHLD)
preemption_callback = PreemptionCallback(checkpoint_callback)
trainer.callbacks.append(preemption_callback)
else:
logging.info("Preemption is supported only on GPUs, disabling preemption")
def check_slurm(trainer):
try:
return trainer.accelerator_connector.is_slurm_managing_tasks
except AttributeError:
return False
class StatelessTimer(Timer):
"""Extension of PTL timers to be per run."""
def __init__(self, duration: timedelta = None, interval: str = Interval.step, verbose: bool = True,) -> None:
super().__init__(duration, interval, verbose)
# Override PTL Timer's state dict to not store elapsed time information so that we can restore and continue training.
def state_dict(self) -> Dict[str, Any]:
return {}
def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
return
def configure_no_restart_validation_training_loop(trainer: pytorch_lightning.Trainer) -> None:
if type(trainer.fit_loop.epoch_loop) != _TrainingEpochLoop:
warnings.warn("Detected custom epoch loop. Skipping no validation on restart support.", UserWarning)
return
## Pass trainer object to avoid trainer getting overwritten as None
loop = SkipResumeTrainingValidationLoop(trainer, trainer.min_steps, trainer.max_steps)
trainer.fit_loop.epoch_loop = loop
class SkipResumeTrainingValidationLoop(_TrainingEpochLoop):
"""
Extend the PTL Epoch loop to skip validating when resuming.
This happens when resuming a checkpoint that has already run validation, but loading restores
the training state before validation has run.
"""
def _should_check_val_fx(self) -> bool:
if self.restarting and self.global_step % self.trainer.val_check_batch == 0:
return False
return super()._should_check_val_fx()
def clean_exp_ckpt(exp_log_dir: Union[str, Path], remove_ckpt: bool = True, remove_nemo: bool = False):
"""
Helper method that removes Pytorch Lightning .ckpt files or NeMo .nemo files from the checkpoint directory
Args:
exp_log_dir: str path to the root directory of the current experiment.
remove_ckpt: bool, whether to remove all *.ckpt files in the checkpoints directory.
remove_nemo: bool, whether to remove all *.nemo files in the checkpoints directory.
"""
exp_log_dir = str(exp_log_dir)
if remove_ckpt:
logging.info("Deleting *.ckpt files ...")
ckpt_files = glob.glob(os.path.join(exp_log_dir, "checkpoints", "*.ckpt"))
for filepath in ckpt_files:
os.remove(filepath)
logging.info(f"Deleted file : {filepath}")
if remove_nemo:
logging.info("Deleting *.nemo files ...")
nemo_files = glob.glob(os.path.join(exp_log_dir, "checkpoints", "*.nemo"))
for filepath in nemo_files:
os.remove(filepath)
logging.info(f"Deleted file : {filepath}")
|
b3bbf1cb4881c5863089be23850b5abfea1e3f5f
|
0d543b6f877114fc7ff7f5c2485230f606f6d98d
|
/2022/5.py
|
ddd3325ac0f0d272080310656724d3fb26c56202
|
[] |
no_license
|
jonathanpaulson/AdventOfCode
|
eca9d1732ec80dd640d6eed01b3a18d3b3ee455b
|
215f18d7d5b9761ec181954d2e62b6fed3bd12f5
|
refs/heads/master
| 2023-01-08T00:25:09.651009
| 2022-12-25T05:39:11
| 2022-12-25T05:39:11
| 321,228,487
| 227
| 103
| null | 2022-12-01T09:31:36
| 2020-12-14T04:03:53
|
Python
|
UTF-8
|
Python
| false
| false
| 986
|
py
|
5.py
|
#!/usr/bin/python3
import sys
from copy import deepcopy
infile = sys.argv[1] if len(sys.argv)>1 else '5.in'
data = open(infile).read()
lines = [x for x in data.split('\n')]
S = []
cmds = []
for line in lines:
if line == '':
break
sz = (len(line)+1)//4
while len(S) < sz:
S.append([])
for i in range(len(S)):
ch = line[1+4*i]
if ch != ' ' and 'A'<=ch<='Z':
S[i].append(ch)
#print(S)
S1 = deepcopy(S)
S2 = deepcopy(S)
found = False
for cmd in lines:
if cmd == '':
found = True
continue
if not found:
continue
words = cmd.split()
qty = int(words[1])
from_ = int(words[3])-1
to_ = int(words[5])-1
for (ST, do_rev) in [(S1, True), (S2, False)]:
MOVE = ST[from_][:qty]
ST[from_] = ST[from_][qty:]
ST[to_] = (list(reversed(MOVE)) if do_rev else MOVE) + ST[to_]
print(''.join([s[0] for s in S1 if len(s)>0]))
print(''.join([s[0] for s in S2 if len(s)>0]))
|
fb81846ee89cfcd9af0c5f299e4778058835e1a0
|
fe131d9715049e3339d1ab14f3e9a0c97a47c5db
|
/link_tracker_usability/__manifest__.py
|
cd5ab778653ef5d2f10a382ab70349b9c784e363
|
[] |
no_license
|
akretion/odoo-usability
|
12f5412a27dda2de2436a282d36222f596d219bc
|
5f731d18f1d9016a2faf6ef439be6caf2597aa16
|
refs/heads/16.0
| 2023-07-21T14:41:54.002291
| 2023-07-15T13:40:48
| 2023-07-15T13:40:48
| 28,053,269
| 115
| 183
| null | 2023-08-31T14:04:39
| 2014-12-15T19:51:41
|
Python
|
UTF-8
|
Python
| false
| false
| 770
|
py
|
__manifest__.py
|
# Copyright 2019-2021 Akretion France (http://www.akretion.com)
# @author Alexis de Lattre <alexis.delattre@akretion.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
{
'name': 'Link Tracker Usability',
'version': '14.0.1.0.0',
'category': 'Marketing',
'license': 'AGPL-3',
'summary': 'Improve usability for link tracker',
'description': """
Link Tracker Usability
======================
Several small usability improvements.
This module has been written by Alexis de Lattre from Akretion
<alexis.delattre@akretion.com>.
""",
'author': 'Akretion',
'website': 'http://www.akretion.com',
'depends': ['link_tracker'],
'data': [
'views/link_tracker_click.xml',
],
'installable': False,
}
|
887069fddf7656a2ebcd12ecda80ff4e68a3fcde
|
636849fc7edd9dcb095cf3410a121ab37de69f02
|
/SoftLayer/shell/cmd_env.py
|
30f1681ab30099627513769ee5fa3cb3d63ada65
|
[
"MIT"
] |
permissive
|
softlayer/softlayer-python
|
bcb09306c3367fdbd2f1407f770c4959729b074c
|
5798373055d9f34dfd531d81638a64d0a7901a13
|
refs/heads/master
| 2023-08-23T19:32:36.990701
| 2023-08-21T03:29:44
| 2023-08-21T03:29:44
| 622,291
| 126
| 182
|
MIT
| 2023-09-14T15:04:48
| 2010-04-21T20:36:31
|
Python
|
UTF-8
|
Python
| false
| false
| 461
|
py
|
cmd_env.py
|
"""Print environment variables."""
# :license: MIT, see LICENSE for more details.
import click
from SoftLayer.CLI import environment
from SoftLayer.CLI import formatting
@click.command()
@environment.pass_env
def cli(env):
"""Print environment variables."""
filtered_vars = dict([(k, v)
for k, v in env.vars.items()
if not k.startswith('_')])
env.fout(formatting.iter_to_table(filtered_vars))
|
7215452f39d7b368ec0994e739e502c3549af71f
|
bb33e6be8316f35decbb2b81badf2b6dcf7df515
|
/source/res/scripts/client/gui/impl/gen/view_models/views/lobby/matchmaker/active_test_confirm_view_model.py
|
8fdfb608a6066aa8eaac991b645efeddd3818332
|
[] |
no_license
|
StranikS-Scan/WorldOfTanks-Decompiled
|
999c9567de38c32c760ab72c21c00ea7bc20990c
|
d2fe9c195825ececc728e87a02983908b7ea9199
|
refs/heads/1.18
| 2023-08-25T17:39:27.718097
| 2022-09-22T06:49:44
| 2022-09-22T06:49:44
| 148,696,315
| 103
| 39
| null | 2022-09-14T17:50:03
| 2018-09-13T20:49:11
|
Python
|
UTF-8
|
Python
| false
| false
| 1,276
|
py
|
active_test_confirm_view_model.py
|
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/impl/gen/view_models/views/lobby/matchmaker/active_test_confirm_view_model.py
from gui.impl.gen.view_models.windows.full_screen_dialog_window_model import FullScreenDialogWindowModel
class ActiveTestConfirmViewModel(FullScreenDialogWindowModel):
__slots__ = ('onOpenPortalClicked',)
def __init__(self, properties=14, commands=4):
super(ActiveTestConfirmViewModel, self).__init__(properties=properties, commands=commands)
def getClusterName(self):
return self._getString(11)
def setClusterName(self, value):
self._setString(11, value)
def getTimeRangeStart(self):
return self._getNumber(12)
def setTimeRangeStart(self, value):
self._setNumber(12, value)
def getTimeRangeEnd(self):
return self._getNumber(13)
def setTimeRangeEnd(self, value):
self._setNumber(13, value)
def _initialize(self):
super(ActiveTestConfirmViewModel, self)._initialize()
self._addStringProperty('clusterName', '')
self._addNumberProperty('timeRangeStart', 0)
self._addNumberProperty('timeRangeEnd', 0)
self.onOpenPortalClicked = self._addCommand('onOpenPortalClicked')
|
e475c1ef6448cd84c5004575f32ca364491d0d5a
|
554718851656376ad2bceb282de30459167ffeb2
|
/tests/mxnet/test_hook_save_all.py
|
4c10d8f927960135e14f677ec97bd8e398c5efa5
|
[
"Apache-2.0"
] |
permissive
|
awslabs/sagemaker-debugger
|
d6ae6a6177a6cb457972772e2b3021e8a9dcc621
|
37ecf0aaeb24ab2adbe7f0ad664d0e50fa4154f2
|
refs/heads/master
| 2023-09-05T05:20:02.458427
| 2023-04-20T20:48:11
| 2023-04-20T20:48:11
| 222,554,670
| 162
| 89
|
Apache-2.0
| 2023-08-23T14:31:27
| 2019-11-18T22:12:36
|
Python
|
UTF-8
|
Python
| false
| false
| 1,778
|
py
|
test_hook_save_all.py
|
# Standard Library
import shutil
from datetime import datetime
# First Party
from smdebug.mxnet import SaveConfig
from smdebug.mxnet.hook import Hook as t_hook
from smdebug.trials import create_trial
# Local
from .mnist_gluon_model import run_mnist_gluon_model
def test_save_all(hook=None, out_dir=None):
hook_created = False
if hook is None:
hook_created = True
save_config = SaveConfig(save_steps=[0, 1, 2, 3])
run_id = "trial_" + datetime.now().strftime("%Y%m%d-%H%M%S%f")
out_dir = "/tmp/" + run_id
print("Registering the hook with out_dir {}".format(out_dir))
hook = t_hook(out_dir=out_dir, save_config=save_config, save_all=True)
run_mnist_gluon_model(hook=hook, num_steps_train=7, num_steps_eval=5)
# assert for steps and tensor_names
print("Created the trial with out_dir {}".format(out_dir))
tr = create_trial(out_dir)
tensor_list = tr.tensor_names()
assert tr
assert len(tr.steps()) == 4
# some tensor names, like input and output, can't be retrieved from training session, so here we only assert for tensor numbers
# 46 is gotten from index file
# if no assertion failure, then the script could save all tensors
assert len(tensor_list) == 46
if hook_created:
shutil.rmtree(out_dir)
def test_save_all_hook_from_json():
from smdebug.core.json_config import CONFIG_FILE_PATH_ENV_STR
import os
out_dir = "/tmp/newlogsRunTest2/test_hook_save_all_hook_from_json"
shutil.rmtree(out_dir, True)
os.environ[
CONFIG_FILE_PATH_ENV_STR
] = "tests/mxnet/test_json_configs/test_hook_save_all_hook.json"
hook = t_hook.create_from_json_file()
test_save_all(hook, out_dir)
# delete output
shutil.rmtree(out_dir, True)
|
463c591767a62922699eec13d558de64e6cdfc1d
|
22c1865e6768e0ffae2a32db8044c155e245b3d7
|
/tests/gems/arch/test_pacman.py
|
283746f39af109ef0a00190343d0520b64be5cda
|
[
"Zlib"
] |
permissive
|
vinifmor/bauh
|
d33ffbe40c18c8db9f8fe053a26846420cae30f9
|
d80b23a952808c883045759270776847fcd7c4ec
|
refs/heads/master
| 2023-08-18T20:59:36.763518
| 2023-03-07T11:06:50
| 2023-03-07T11:06:50
| 201,946,404
| 815
| 89
|
Zlib
| 2023-09-09T15:16:23
| 2019-08-12T14:27:18
|
Python
|
UTF-8
|
Python
| false
| false
| 4,193
|
py
|
test_pacman.py
|
import os
import warnings
from unittest import TestCase
from unittest.mock import patch, Mock
from bauh import __app_name__
from bauh.gems.arch import pacman
FILE_DIR = os.path.dirname(os.path.abspath(__file__))
class PacmanTest(TestCase):
@classmethod
def setUpClass(cls):
warnings.filterwarnings('ignore', category=DeprecationWarning)
def test_list_ignored_packages(self):
ignored = pacman.list_ignored_packages(FILE_DIR + '/resources/pacman_ign_pkgs.conf')
self.assertIsNotNone(ignored)
self.assertEqual(2, len(ignored))
self.assertIn('google-chrome', ignored)
self.assertIn('firefox', ignored)
def test_list_ignored_packages__no_ignored_packages(self):
ignored = pacman.list_ignored_packages(FILE_DIR + '/resources/pacman.conf')
self.assertIsNotNone(ignored)
self.assertEqual(0, len(ignored))
@patch(f'{__app_name__}.gems.arch.pacman.run_cmd', return_value="""
Name : package-test
Version : 3.4.4-1
Description : Test
Depends On : embree freetype2 libglvnd
Optional Deps : lib32-vulkan-icd-loader: Vulkan support [installed]
Required By : None
""")
def test_map_optional_deps__no_remote_and_not_installed__only_one_installed_with_description(self, run_cmd: Mock):
res = pacman.map_optional_deps(('package-test',), remote=False, not_installed=True)
run_cmd.assert_called_once_with('pacman -Qi package-test')
self.assertEqual({'package-test': {}}, res)
@patch(f'{__app_name__}.gems.arch.pacman.run_cmd', return_value="""
Name : package-test
Version : 3.4.4-1
Description : Test
Depends On : embree freetype2 libglvnd
Optional Deps : lib32-vulkan-icd-loader: Vulkan support
Required By : None
""")
def test_map_optional_deps__no_remote_and_not_installed__only_one_not_installed_with_description(self, run_cmd: Mock):
res = pacman.map_optional_deps(('package-test',), remote=False, not_installed=True)
run_cmd.assert_called_once_with('pacman -Qi package-test')
self.assertEqual({'package-test': {'lib32-vulkan-icd-loader': 'Vulkan support'}}, res)
@patch(f'{__app_name__}.gems.arch.pacman.run_cmd', return_value="""
Name : package-test
Version : 3.4.4-1
Description : Test
Depends On : embree freetype2 libglvnd
Optional Deps : pipewire-alsa
Required By : None
""")
def test_map_optional_deps__no_remote_and_not_installed__only_one_not_installed_no_description(self, run_cmd: Mock):
res = pacman.map_optional_deps(('package-test',), remote=False, not_installed=True)
run_cmd.assert_called_once_with('pacman -Qi package-test')
self.assertEqual({'package-test': {'pipewire-alsa': ''}}, res)
@patch(f'{__app_name__}.gems.arch.pacman.run_cmd', return_value="""
Name : package-test
Version : 3.4.4-1
Description : Test
Depends On : embree freetype2 libglvnd
Optional Deps : pipewire-alsa [installed]
Required By : None
""")
def test_map_optional_deps__no_remote_and_not_installed__only_one_installed_no_description(self, run_cmd: Mock):
res = pacman.map_optional_deps(('package-test',), remote=False, not_installed=True)
run_cmd.assert_called_once_with('pacman -Qi package-test')
self.assertEqual({'package-test': {}}, res)
@patch(f'{__app_name__}.gems.arch.pacman.run_cmd', return_value="""
Name : package-test
Version : 3.4.4-1
Description : Test
Depends On : embree freetype2 libglvnd libtheora
Optional Deps : pipewire-alsa
pipewire-pulse [installed]
pipewire
lib32-vulkan-icd-loader: Vulkan support [installed]
Required By : None
""")
def test_map_optional_deps__no_remote_and_not_installed__several(self, run_cmd: Mock):
res = pacman.map_optional_deps(('package-test',), remote=False, not_installed=True)
run_cmd.assert_called_once_with('pacman -Qi package-test')
self.assertEqual({'package-test': {'pipewire-alsa': '', 'pipewire': ''}}, res)
|
6edbaf64c4b140ca2e246a78c46d464ff5419f5f
|
71fb04f723b46a1bf45295be239bcec25e07f98c
|
/keras_cv/losses/simclr_loss.py
|
ad48d8c44171f1efa29125521b0d0726761e432f
|
[
"Apache-2.0"
] |
permissive
|
keras-team/keras-cv
|
9bca4479474e853ec3a1c541b8be20fea2447a1a
|
e83f229f1b7b847cd712d5cd4810097d3e06d14e
|
refs/heads/master
| 2023-08-31T10:22:08.406394
| 2023-08-30T20:24:57
| 2023-08-30T20:24:57
| 265,079,853
| 818
| 287
|
NOASSERTION
| 2023-09-12T16:49:01
| 2020-05-18T22:39:21
|
Python
|
UTF-8
|
Python
| false
| false
| 3,809
|
py
|
simclr_loss.py
|
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.backend import ops
LARGE_NUM = 1e9
def l2_normalize(x, axis):
epsilon = keras.backend.epsilon()
power_sum = ops.sum(ops.square(x), axis=axis, keepdims=True)
norm = ops.reciprocal(ops.sqrt(ops.maximum(power_sum, epsilon)))
return ops.multiply(x, norm)
@keras_cv_export("keras_cv.losses.SimCLRLoss")
class SimCLRLoss(keras.losses.Loss):
"""Implements SimCLR Cosine Similarity loss.
SimCLR loss is used for contrastive self-supervised learning.
Args:
temperature: a float value between 0 and 1, used as a scaling factor for
cosine similarity.
References:
- [SimCLR paper](https://arxiv.org/pdf/2002.05709)
"""
def __init__(self, temperature, **kwargs):
super().__init__(**kwargs)
self.temperature = temperature
def call(self, projections_1, projections_2):
"""Computes SimCLR loss for a pair of projections in a contrastive
learning trainer.
Note that unlike most loss functions, this should not be called with
y_true and y_pred, but with two unlabeled projections. It can otherwise
be treated as a normal loss function.
Args:
projections_1: a tensor with the output of the first projection
model in a contrastive learning trainer
projections_2: a tensor with the output of the second projection
model in a contrastive learning trainer
Returns:
A tensor with the SimCLR loss computed from the input projections
"""
# Normalize the projections
projections_1 = l2_normalize(projections_1, axis=1)
projections_2 = l2_normalize(projections_2, axis=1)
# Produce artificial labels, 1 for each image in the batch.
batch_size = ops.shape(projections_1)[0]
labels = ops.one_hot(ops.arange(batch_size), batch_size * 2)
masks = ops.one_hot(ops.arange(batch_size), batch_size)
# Compute logits
logits_11 = (
ops.matmul(projections_1, ops.transpose(projections_1))
/ self.temperature
)
logits_11 = logits_11 - ops.cast(masks * LARGE_NUM, logits_11.dtype)
logits_22 = (
ops.matmul(projections_2, ops.transpose(projections_2))
/ self.temperature
)
logits_22 = logits_22 - ops.cast(masks * LARGE_NUM, logits_22.dtype)
logits_12 = (
ops.matmul(projections_1, ops.transpose(projections_2))
/ self.temperature
)
logits_21 = (
ops.matmul(projections_2, ops.transpose(projections_1))
/ self.temperature
)
loss_a = keras.losses.categorical_crossentropy(
labels, ops.concatenate([logits_12, logits_11], 1), from_logits=True
)
loss_b = keras.losses.categorical_crossentropy(
labels, ops.concatenate([logits_21, logits_22], 1), from_logits=True
)
return loss_a + loss_b
def get_config(self):
config = super().get_config()
config.update({"temperature": self.temperature})
return config
|
0d1eed98c0f771d9d2a7d8c7d6c3fce41b1f5051
|
6c3989a6de8521ae478edcd6f457f54baa57f289
|
/plugin/save_command.py
|
170712df861d0b9a7718078506645df0aff7f4a9
|
[
"MIT"
] |
permissive
|
sublimelsp/LSP
|
18ba4b72ad390ee4da713d9b383869112c6d2d98
|
e6bbc8ffecd9d705c884c69160132265294c6430
|
refs/heads/main
| 2023-08-31T13:29:36.341484
| 2023-08-18T16:07:57
| 2023-08-18T16:07:57
| 87,645,313
| 909
| 138
|
MIT
| 2023-09-13T19:55:06
| 2017-04-08T15:51:20
|
Python
|
UTF-8
|
Python
| false
| false
| 4,459
|
py
|
save_command.py
|
from .core.registry import LspTextCommand
from .core.settings import userprefs
from .core.typing import Callable, List, Type
from abc import ABCMeta, abstractmethod
import sublime
import sublime_plugin
class SaveTask(metaclass=ABCMeta):
"""
Base class for tasks that run on save.
Note: The whole task runs on the async thread.
"""
@classmethod
@abstractmethod
def is_applicable(cls, view: sublime.View) -> bool:
pass
def __init__(self, task_runner: LspTextCommand, on_done: Callable[[], None]):
self._task_runner = task_runner
self._on_done = on_done
self._completed = False
self._cancelled = False
self._status_key = type(self).__name__
def run_async(self) -> None:
self._erase_view_status()
sublime.set_timeout_async(self._on_timeout, userprefs().on_save_task_timeout_ms)
def _on_timeout(self) -> None:
if not self._completed and not self._cancelled:
self._set_view_status('LSP: Timeout processing {}'.format(self.__class__.__name__))
self._cancelled = True
self._on_done()
def cancel(self) -> None:
self._cancelled = True
def _set_view_status(self, text: str) -> None:
self._task_runner.view.set_status(self._status_key, text)
sublime.set_timeout_async(self._erase_view_status, 5000)
def _erase_view_status(self) -> None:
self._task_runner.view.erase_status(self._status_key)
def _on_complete(self) -> None:
assert not self._completed
self._completed = True
if not self._cancelled:
self._on_done()
def _purge_changes_async(self) -> None:
# Supermassive hack that will go away later.
listeners = sublime_plugin.view_event_listeners.get(self._task_runner.view.id(), [])
for listener in listeners:
if listener.__class__.__name__ == 'DocumentSyncListener':
listener.purge_changes_async() # type: ignore
break
class LspSaveCommand(LspTextCommand):
"""
A command used as a substitute for native save command. Runs code actions and document
formatting before triggering the native save command.
"""
_tasks = [] # type: List[Type[SaveTask]]
@classmethod
def register_task(cls, task: Type[SaveTask]) -> None:
assert task not in cls._tasks
cls._tasks.append(task)
def __init__(self, view: sublime.View) -> None:
super().__init__(view)
self._pending_tasks = [] # type: List[SaveTask]
def run(self, edit: sublime.Edit) -> None:
if self._pending_tasks:
for task in self._pending_tasks:
task.cancel()
self._pending_tasks = []
sublime.set_timeout_async(self._trigger_on_pre_save_async)
for Task in self._tasks:
if Task.is_applicable(self.view):
self._pending_tasks.append(Task(self, self._on_task_completed_async))
if self._pending_tasks:
sublime.set_timeout_async(self._run_next_task_async)
else:
self._trigger_native_save()
def _trigger_on_pre_save_async(self) -> None:
# Supermassive hack that will go away later.
listeners = sublime_plugin.view_event_listeners.get(self.view.id(), [])
for listener in listeners:
if listener.__class__.__name__ == 'DocumentSyncListener':
listener.trigger_on_pre_save_async() # type: ignore
break
def _run_next_task_async(self) -> None:
current_task = self._pending_tasks[0]
current_task.run_async()
def _on_task_completed_async(self) -> None:
self._pending_tasks.pop(0)
if self._pending_tasks:
self._run_next_task_async()
else:
self._trigger_native_save()
def _trigger_native_save(self) -> None:
# Triggered from set_timeout to preserve original semantics of on_pre_save handling
sublime.set_timeout(lambda: self.view.run_command('save', {"async": True}))
class LspSaveAllCommand(sublime_plugin.WindowCommand):
def run(self) -> None:
done = set()
for view in self.window.views():
buffer_id = view.buffer_id()
if buffer_id in done:
continue
if not view.is_dirty():
continue
done.add(buffer_id)
view.run_command("lsp_save", None)
|
3cc8915156bda55ed77da7123137d5d76b7fe58b
|
23c0a6071860971616326ffeeac0b56135c5c6ee
|
/tests/test_resources.py
|
3721134fa9c5045917908fdd1d9cf07966207e5b
|
[
"MIT"
] |
permissive
|
sissaschool/xmlschema
|
36d74acb2a36459512855ea0264cc4d1ebbef8f5
|
6bf6d8e6d19cfc0ba151effb25cc57c3789d16fd
|
refs/heads/master
| 2023-08-31T11:07:35.750326
| 2023-08-07T09:47:09
| 2023-08-07T09:47:09
| 70,905,710
| 272
| 55
|
MIT
| 2023-08-25T20:03:20
| 2016-10-14T11:52:54
|
Python
|
UTF-8
|
Python
| false
| false
| 70,655
|
py
|
test_resources.py
|
#!/usr/bin/env python
#
# Copyright (c), 2016-2020, SISSA (International School for Advanced Studies).
# All rights reserved.
# This file is distributed under the terms of the MIT License.
# See the file 'LICENSE' in the root directory of the present
# distribution, or http://opensource.org/licenses/MIT.
#
# @author Davide Brunato <brunato@sissa.it>
#
"""Tests concerning XML resources"""
import unittest
import os
import pathlib
import platform
import warnings
from io import StringIO, BytesIO
from urllib.error import URLError
from urllib.request import urlopen
from urllib.parse import urlsplit, uses_relative
from pathlib import Path, PurePath, PureWindowsPath, PurePosixPath
from unittest.mock import patch, MagicMock
from xml.etree import ElementTree
try:
import lxml.etree as lxml_etree
except ImportError:
lxml_etree = None
from elementpath.etree import PyElementTree, is_etree_element
from xmlschema import fetch_namespaces, fetch_resource, normalize_url, \
fetch_schema, fetch_schema_locations, XMLResource, XMLResourceError, XMLSchema
from xmlschema.names import XSD_NAMESPACE
import xmlschema.resources
from xmlschema.resources import is_url, is_local_url, is_remote_url, \
url_path_is_file, normalize_locations
from xmlschema.testing import SKIP_REMOTE_TESTS
TEST_CASES_DIR = str(pathlib.Path(__file__).absolute().parent.joinpath('test_cases'))
DRIVE_REGEX = '(/[a-zA-Z]:|/)' if platform.system() == 'Windows' else ''
XML_WITH_NAMESPACES = '<pfa:root xmlns:pfa="http://xmlschema.test/nsa">\n' \
' <pfb:elem xmlns:pfb="http://xmlschema.test/nsb"/>\n' \
'</pfa:root>'
def casepath(relative_path):
return str(pathlib.Path(TEST_CASES_DIR).joinpath(relative_path))
def is_windows_path(path):
"""Checks if the path argument is a Windows platform path."""
return '\\' in path or ':' in path or '|' in path
def add_leading_slash(path):
return '/' + path if path and path[0] not in ('/', '\\') else path
def filter_windows_path(path):
if path.startswith('/\\'):
return path[1:]
elif path and path[0] not in ('/', '\\'):
return '/' + path
else:
return path
class TestResources(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.vh_dir = casepath('examples/vehicles')
cls.vh_xsd_file = casepath('examples/vehicles/vehicles.xsd')
cls.vh_xml_file = casepath('examples/vehicles/vehicles.xml')
cls.col_dir = casepath('examples/collection')
cls.col_xsd_file = casepath('examples/collection/collection.xsd')
cls.col_xml_file = casepath('examples/collection/collection.xml')
def check_url(self, url, expected):
url_parts = urlsplit(url)
if urlsplit(expected).scheme not in uses_relative:
expected = add_leading_slash(expected)
expected_parts = urlsplit(expected, scheme='file')
self.assertEqual(url_parts.scheme, expected_parts.scheme,
"%r: Schemes differ." % url)
self.assertEqual(url_parts.netloc, expected_parts.netloc,
"%r: Netloc parts differ." % url)
self.assertEqual(url_parts.query, expected_parts.query,
"%r: Query parts differ." % url)
self.assertEqual(url_parts.fragment, expected_parts.fragment,
"%r: Fragment parts differ." % url)
if is_windows_path(url_parts.path) or is_windows_path(expected_parts.path):
path = PureWindowsPath(filter_windows_path(url_parts.path))
expected_path = PureWindowsPath(filter_windows_path(expected_parts.path))
else:
path = PurePath(url_parts.path)
expected_path = PurePath(expected_parts.path)
self.assertEqual(path, expected_path, "%r: Paths differ." % url)
def test_path_from_uri(self):
_PurePath = xmlschema.resources._PurePath
_PosixPurePath = xmlschema.resources._PurePosixPath
_WindowsPurePath = xmlschema.resources._PureWindowsPath
with self.assertRaises(ValueError) as ec:
_PurePath.from_uri('')
self.assertEqual(str(ec.exception), 'Empty URI provided!')
path = _PurePath.from_uri('https://example.com/names/?name=foo')
self.assertIsInstance(path, _PosixPurePath)
self.assertEqual(str(path), '/names')
path = _PosixPurePath.from_uri('file:///home/foo/names/?name=foo')
self.assertIsInstance(path, _PosixPurePath)
self.assertEqual(str(path), '/home/foo/names')
path = _PosixPurePath.from_uri('file:///home/foo/names#foo')
self.assertIsInstance(path, _PosixPurePath)
self.assertEqual(str(path), '/home/foo/names')
path = _PosixPurePath.from_uri('file:///home\\foo\\names#foo')
self.assertIsInstance(path, _WindowsPurePath)
self.assertTrue(path.as_posix().endswith('/home/foo/names'))
path = _PosixPurePath.from_uri('file:///c:/home/foo/names/')
self.assertIsInstance(path, _WindowsPurePath)
self.assertEqual(str(path), r'c:\home\foo\names')
self.assertEqual(path.as_uri(), 'file:///c:/home/foo/names')
path = _PosixPurePath.from_uri('file:c:/home/foo/names/')
self.assertIsInstance(path, _WindowsPurePath)
self.assertEqual(str(path), r'c:\home\foo\names')
self.assertEqual(path.as_uri(), 'file:///c:/home/foo/names')
with self.assertRaises(ValueError) as ec:
_PurePath.from_uri('file://c:/home/foo/names/')
self.assertEqual(str(ec.exception), "Invalid URI 'file://c:/home/foo/names/'")
@unittest.skipIf(platform.system() == 'Windows', "Run only on posix systems")
def test_normalize_url_posix(self):
url1 = "https://example.com/xsd/other_schema.xsd"
self.check_url(normalize_url(url1, base_url="/path_my_schema/schema.xsd"), url1)
parent_dir = os.path.dirname(os.getcwd())
self.check_url(normalize_url('../dir1/./dir2'), os.path.join(parent_dir, 'dir1/dir2'))
self.check_url(normalize_url('../dir1/./dir2', '/home', keep_relative=True),
'file:///dir1/dir2')
self.check_url(normalize_url('../dir1/./dir2', 'file:///home'), 'file:///dir1/dir2')
self.check_url(normalize_url('other.xsd', 'file:///home'), 'file:///home/other.xsd')
self.check_url(normalize_url('other.xsd', 'file:///home/'), 'file:///home/other.xsd')
self.check_url(normalize_url('file:other.xsd', 'file:///home'), 'file:///home/other.xsd')
cwd = os.getcwd()
cwd_url = 'file://{}/'.format(cwd) if cwd.startswith('/') else 'file:///{}/'.format(cwd)
self.check_url(normalize_url('other.xsd', keep_relative=True), 'file:other.xsd')
self.check_url(normalize_url('file:other.xsd', keep_relative=True), 'file:other.xsd')
self.check_url(normalize_url('file:other.xsd'), cwd_url + 'other.xsd')
self.check_url(normalize_url('file:other.xsd', 'https://site/base', True), 'file:other.xsd')
self.check_url(normalize_url('file:other.xsd', 'http://site/base'), cwd_url + 'other.xsd')
self.check_url(normalize_url('dummy path.xsd'), cwd_url + 'dummy%20path.xsd')
self.check_url(normalize_url('dummy path.xsd', 'http://site/base'),
'http://site/base/dummy%20path.xsd')
self.check_url(normalize_url('dummy path.xsd', 'file://host/home/'),
PurePath('//host/home/dummy path.xsd').as_uri())
url = "file:///c:/Downloads/file.xsd"
self.check_url(normalize_url(url, base_url="file:///d:/Temp/"), url)
def test_normalize_url_windows(self):
win_abs_path1 = 'z:\\Dir_1_0\\Dir2-0\\schemas/XSD_1.0/XMLSchema.xsd'
win_abs_path2 = 'z:\\Dir-1.0\\Dir-2_0\\'
self.check_url(normalize_url(win_abs_path1), win_abs_path1)
self.check_url(normalize_url('k:\\Dir3\\schema.xsd', win_abs_path1),
'file:///k:/Dir3/schema.xsd')
self.check_url(normalize_url('k:\\Dir3\\schema.xsd', win_abs_path2),
'file:///k:/Dir3/schema.xsd')
self.check_url(normalize_url('schema.xsd', win_abs_path2),
'file:///z:/Dir-1.0/Dir-2_0/schema.xsd')
self.check_url(normalize_url('xsd1.0/schema.xsd', win_abs_path2),
'file:///z:/Dir-1.0/Dir-2_0/xsd1.0/schema.xsd')
with self.assertRaises(ValueError) as ec:
normalize_url('file:///\\k:\\Dir A\\schema.xsd')
self.assertIn("Invalid URI", str(ec.exception))
def test_normalize_url_unc_paths__issue_246(self):
url = PureWindowsPath(r'\\host\share\file.xsd').as_uri()
self.assertNotEqual(normalize_url(r'\\host\share\file.xsd'),
url) # file://host/share/file.xsd
self.assertEqual(normalize_url(r'\\host\share\file.xsd'),
url.replace('file://', 'file:////'))
def test_normalize_url_unc_paths__issue_268(self,):
unc_path = r'\\filer01\MY_HOME\dev\XMLSCHEMA\test.xsd'
url = PureWindowsPath(unc_path).as_uri()
self.assertEqual(str(PureWindowsPath(unc_path)), unc_path)
self.assertEqual(url, 'file://filer01/MY_HOME/dev/XMLSCHEMA/test.xsd')
# Same UNC path as URI with the host inserted in path path.
url_host_in_path = url.replace('file://', 'file:////')
self.assertEqual(url_host_in_path, 'file:////filer01/MY_HOME/dev/XMLSCHEMA/test.xsd')
self.assertEqual(normalize_url(unc_path), url_host_in_path)
with patch.object(os, 'name', 'nt'):
self.assertEqual(os.name, 'nt')
path = PurePath(unc_path)
self.assertIs(path.__class__, PureWindowsPath)
self.assertEqual(path.as_uri(), url)
self.assertEqual(xmlschema.resources.os.name, 'nt')
path = xmlschema.resources._PurePath(unc_path)
self.assertIs(path.__class__, xmlschema.resources._PureWindowsPath)
self.assertEqual(path.as_uri(), url_host_in_path)
self.assertEqual(normalize_url(unc_path), url_host_in_path)
with patch.object(os, 'name', 'posix'):
self.assertEqual(os.name, 'posix')
path = PurePath(unc_path)
self.assertIs(path.__class__, PurePosixPath)
self.assertEqual(str(path), unc_path)
self.assertRaises(ValueError, path.as_uri) # Not recognized as UNC path
self.assertEqual(xmlschema.resources.os.name, 'posix')
path = xmlschema.resources._PurePath(unc_path)
self.assertIs(path.__class__, xmlschema.resources._PurePosixPath)
self.assertEqual(str(path), unc_path)
self.assertNotEqual(path.as_uri(), url)
self.assertEqual(normalize_url(unc_path), url_host_in_path)
def test_normalize_url_with_base_unc_path(self,):
base_unc_path = '\\\\filer01\\MY_HOME\\'
base_url = PureWindowsPath(base_unc_path).as_uri()
self.assertEqual(str(PureWindowsPath(base_unc_path)), base_unc_path)
self.assertEqual(base_url, 'file://filer01/MY_HOME/')
# Same UNC path as URI with the host inserted in path path.
base_url_host_in_path = base_url.replace('file://', 'file:////')
self.assertEqual(base_url_host_in_path, 'file:////filer01/MY_HOME/')
self.assertEqual(normalize_url(base_unc_path), base_url_host_in_path)
with patch.object(os, 'name', 'nt'):
self.assertEqual(os.name, 'nt')
path = PurePath('dir/file')
self.assertIs(path.__class__, PureWindowsPath)
url = normalize_url(r'dev\XMLSCHEMA\test.xsd', base_url=base_unc_path)
self.assertEqual(url, 'file:////filer01/MY_HOME/dev/XMLSCHEMA/test.xsd')
url = normalize_url(r'dev\XMLSCHEMA\test.xsd', base_url=base_url)
self.assertEqual(url, 'file:////filer01/MY_HOME/dev/XMLSCHEMA/test.xsd')
url = normalize_url(r'dev\XMLSCHEMA\test.xsd', base_url=base_url_host_in_path)
self.assertEqual(url, 'file:////filer01/MY_HOME/dev/XMLSCHEMA/test.xsd')
with patch.object(os, 'name', 'posix'):
self.assertEqual(os.name, 'posix')
path = PurePath('dir/file')
self.assertIs(path.__class__, PurePosixPath)
url = normalize_url(r'dev\XMLSCHEMA\test.xsd', base_url=base_unc_path)
self.assertEqual(url, 'file:////filer01/MY_HOME/dev/XMLSCHEMA/test.xsd')
url = normalize_url(r'dev/XMLSCHEMA/test.xsd', base_url=base_url)
self.assertEqual(url, 'file:////filer01/MY_HOME/dev/XMLSCHEMA/test.xsd')
url = normalize_url(r'dev/XMLSCHEMA/test.xsd', base_url=base_url_host_in_path)
self.assertEqual(url, 'file:////filer01/MY_HOME/dev/XMLSCHEMA/test.xsd')
def test_normalize_url_slashes(self):
# Issue #116
url = '//anaconda/envs/testenv/lib/python3.6/site-packages/xmlschema/validators/schemas/'
if os.name == 'posix':
self.assertEqual(normalize_url(url), pathlib.PurePath(url).as_uri())
else:
# On Windows // is interpreted as a network share UNC path
self.assertEqual(os.name, 'nt')
self.assertEqual(normalize_url(url),
pathlib.PurePath(url).as_uri().replace('file://', 'file:////'))
self.assertRegex(normalize_url('/root/dir1/schema.xsd'),
f'file://{DRIVE_REGEX}/root/dir1/schema.xsd')
self.assertRegex(normalize_url('////root/dir1/schema.xsd'),
f'file://{DRIVE_REGEX}/root/dir1/schema.xsd')
self.assertRegex(normalize_url('dir2/schema.xsd', '////root/dir1'),
f'file://{DRIVE_REGEX}/root/dir1/dir2/schema.xsd')
self.assertEqual(normalize_url('//root/dir1/schema.xsd'),
'file:////root/dir1/schema.xsd')
self.assertEqual(normalize_url('dir2/schema.xsd', '//root/dir1/'),
'file:////root/dir1/dir2/schema.xsd')
self.assertEqual(normalize_url('dir2/schema.xsd', '//root/dir1'),
'file:////root/dir1/dir2/schema.xsd')
def test_normalize_url_hash_character(self):
url = normalize_url('issue #000.xml', 'file:///dir1/dir2/')
self.assertRegex(url, f'file://{DRIVE_REGEX}/dir1/dir2/issue%20%23000.xml')
url = normalize_url('data.xml', 'file:///dir1/dir2/issue%20001')
self.assertRegex(url, f'file://{DRIVE_REGEX}/dir1/dir2/issue%20001/data.xml')
url = normalize_url('data.xml', '/dir1/dir2/issue #002')
self.assertRegex(url, f'{DRIVE_REGEX}/dir1/dir2/issue%20%23002/data.xml')
def test_is_url_function(self):
self.assertTrue(is_url(self.col_xsd_file))
self.assertFalse(is_url('http://example.com['))
self.assertTrue(is_url(b'http://example.com'))
self.assertFalse(is_url(' \t<root/>'))
self.assertFalse(is_url(b' <root/>'))
self.assertFalse(is_url('line1\nline2'))
self.assertFalse(is_url(None))
def test_is_local_url_function(self):
self.assertTrue(is_local_url(self.col_xsd_file))
self.assertTrue(is_local_url(Path(self.col_xsd_file)))
self.assertTrue(is_local_url('/home/user/'))
self.assertFalse(is_local_url('<home/>'))
self.assertTrue(is_local_url('/home/user/schema.xsd'))
self.assertTrue(is_local_url(' /home/user/schema.xsd '))
self.assertTrue(is_local_url('C:\\Users\\foo\\schema.xsd'))
self.assertTrue(is_local_url(' file:///home/user/schema.xsd'))
self.assertFalse(is_local_url('http://example.com/schema.xsd'))
self.assertTrue(is_local_url(b'/home/user/'))
self.assertFalse(is_local_url(b'<home/>'))
self.assertTrue(is_local_url(b'/home/user/schema.xsd'))
self.assertTrue(is_local_url(b' /home/user/schema.xsd '))
self.assertTrue(is_local_url(b'C:\\Users\\foo\\schema.xsd'))
self.assertTrue(is_local_url(b' file:///home/user/schema.xsd'))
self.assertFalse(is_local_url(b'http://example.com/schema.xsd'))
def test_is_remote_url_function(self):
self.assertFalse(is_remote_url(self.col_xsd_file))
self.assertFalse(is_remote_url('/home/user/'))
self.assertFalse(is_remote_url('<home/>'))
self.assertFalse(is_remote_url('/home/user/schema.xsd'))
self.assertFalse(is_remote_url(' file:///home/user/schema.xsd'))
self.assertTrue(is_remote_url(' http://example.com/schema.xsd'))
self.assertFalse(is_remote_url(b'/home/user/'))
self.assertFalse(is_remote_url(b'<home/>'))
self.assertFalse(is_remote_url(b'/home/user/schema.xsd'))
self.assertFalse(is_remote_url(b' file:///home/user/schema.xsd'))
self.assertTrue(is_remote_url(b' http://example.com/schema.xsd'))
def test_url_path_is_file_function(self):
self.assertTrue(url_path_is_file(self.col_xml_file))
self.assertTrue(url_path_is_file(normalize_url(self.col_xml_file)))
self.assertFalse(url_path_is_file(self.col_dir))
self.assertFalse(url_path_is_file('http://example.com/'))
with patch('platform.system', MagicMock(return_value="Windows")):
self.assertFalse(url_path_is_file('file:///c:/Windows/unknown'))
def test_normalize_locations_function(self):
locations = normalize_locations(
[('tns0', 'alpha'), ('tns1', 'http://example.com/beta')], base_url='/home/user'
)
self.assertEqual(locations[0][0], 'tns0')
self.assertRegex(locations[0][1], f'file://{DRIVE_REGEX}/home/user/alpha')
self.assertEqual(locations[1][0], 'tns1')
self.assertEqual(locations[1][1], 'http://example.com/beta')
locations = normalize_locations(
{'tns0': 'alpha', 'tns1': 'http://example.com/beta'}, base_url='/home/user'
)
self.assertEqual(locations[0][0], 'tns0')
self.assertRegex(locations[0][1], f'file://{DRIVE_REGEX}/home/user/alpha')
self.assertEqual(locations[1][0], 'tns1')
self.assertEqual(locations[1][1], 'http://example.com/beta')
locations = normalize_locations(
{'tns0': ['alpha', 'beta'], 'tns1': 'http://example.com/beta'}, base_url='/home/user'
)
self.assertEqual(locations[0][0], 'tns0')
self.assertRegex(locations[0][1], f'file://{DRIVE_REGEX}/home/user/alpha')
self.assertEqual(locations[1][0], 'tns0')
self.assertRegex(locations[1][1], f'file://{DRIVE_REGEX}/home/user/beta')
self.assertEqual(locations[2][0], 'tns1')
self.assertEqual(locations[2][1], 'http://example.com/beta')
locations = normalize_locations(
{'tns0': 'alpha', 'tns1': 'http://example.com/beta'}, keep_relative=True
)
self.assertListEqual(locations, [('tns0', 'file:alpha'),
('tns1', 'http://example.com/beta')])
def test_fetch_resource_function(self):
with self.assertRaises(ValueError) as ctx:
fetch_resource('')
self.assertIn('argument must contain a not empty string', str(ctx.exception))
wrong_path = casepath('resources/dummy_file.txt')
self.assertRaises(XMLResourceError, fetch_resource, wrong_path)
wrong_path = casepath('/home/dummy_file.txt')
self.assertRaises(XMLResourceError, fetch_resource, wrong_path)
right_path = casepath('resources/dummy file.txt')
self.assertTrue(fetch_resource(right_path).endswith('dummy%20file.txt'))
right_path = Path(casepath('resources/dummy file.txt')).relative_to(os.getcwd())
self.assertTrue(fetch_resource(str(right_path), '/home').endswith('dummy%20file.txt'))
with self.assertRaises(XMLResourceError):
fetch_resource(str(right_path.parent.joinpath('dummy_file.txt')), '/home')
ambiguous_path = casepath('resources/dummy file #2.txt')
self.assertTrue(fetch_resource(ambiguous_path).endswith('dummy%20file%20%232.txt'))
with urlopen(fetch_resource(ambiguous_path)) as res:
self.assertEqual(res.read(), b'DUMMY CONTENT')
def test_fetch_namespaces_function(self):
self.assertFalse(fetch_namespaces(casepath('resources/malformed.xml')))
def test_fetch_schema_locations(self):
locations = fetch_schema_locations(self.col_xml_file)
self.check_url(locations[0], self.col_xsd_file)
self.assertEqual(locations[1][0][0], 'http://example.com/ns/collection')
self.check_url(locations[1][0][1], self.col_xsd_file)
self.check_url(fetch_schema(self.vh_xml_file), self.vh_xsd_file)
with self.assertRaises(ValueError) as ctx:
fetch_schema_locations('<empty/>')
self.assertIn('does not contain any schema location hint', str(ctx.exception))
# Tests on XMLResource instances
def test_xml_resource_representation(self):
resource = XMLResource(self.vh_xml_file)
self.assertTrue(str(resource).startswith(
"XMLResource(root=<Element '{http://example.com/vehicles}vehicles'"
))
def test_xml_resource_from_url(self):
resource = XMLResource(self.vh_xml_file, lazy=True)
self.assertEqual(resource.source, self.vh_xml_file)
self.assertEqual(resource.root.tag, '{http://example.com/vehicles}vehicles')
self.check_url(resource.url, self.vh_xml_file)
self.assertTrue(resource.filepath.endswith('vehicles.xml'))
self.assertIsNone(resource.text)
with self.assertRaises(XMLResourceError) as ctx:
resource.load()
self.assertIn('cannot load a lazy XML resource', str(ctx.exception))
self.assertIsNone(resource.text)
resource = XMLResource(self.vh_xml_file, lazy=False)
self.assertEqual(resource.source, self.vh_xml_file)
self.assertEqual(resource.root.tag, '{http://example.com/vehicles}vehicles')
self.check_url(resource.url, self.vh_xml_file)
self.assertIsNone(resource.text)
resource.load()
self.assertTrue(resource.text.startswith('<?xml'))
resource = XMLResource(self.vh_xml_file, lazy=False)
resource._url = resource._url[:-12] + 'unknown.xml'
with self.assertRaises(XMLResourceError):
resource.load()
def test_xml_resource_from_url_in_bytes(self):
resource = XMLResource(self.vh_xml_file.encode('utf-8'), lazy=False)
self.assertEqual(resource.source, self.vh_xml_file.encode('utf-8'))
self.assertEqual(resource.root.tag, '{http://example.com/vehicles}vehicles')
self.check_url(resource.url, self.vh_xml_file)
self.assertIsNone(resource.text)
resource.load()
self.assertTrue(resource.text.startswith('<?xml'))
def test_xml_resource_from_path(self):
path = Path(self.vh_xml_file)
resource = XMLResource(path, lazy=True)
self.assertIs(resource.source, path)
self.assertEqual(resource.root.tag, '{http://example.com/vehicles}vehicles')
self.check_url(resource.url, path.as_uri())
self.assertTrue(resource.filepath.endswith('vehicles.xml'))
self.assertIsNone(resource.text)
with self.assertRaises(XMLResourceError) as ctx:
resource.load()
self.assertIn('cannot load a lazy XML resource', str(ctx.exception))
self.assertIsNone(resource.text)
resource = XMLResource(path, lazy=False)
self.assertEqual(resource.source, path)
self.assertEqual(resource.root.tag, '{http://example.com/vehicles}vehicles')
self.check_url(resource.url, path.as_uri())
self.assertIsNone(resource.text)
resource.load()
self.assertTrue(resource.text.startswith('<?xml'))
resource = XMLResource(path, lazy=False)
resource._url = resource._url[:-12] + 'unknown.xml'
with self.assertRaises(XMLResourceError):
resource.load()
def test_xml_resource_from_element_tree(self):
vh_etree = ElementTree.parse(self.vh_xml_file)
vh_root = vh_etree.getroot()
resource = XMLResource(vh_etree)
self.assertEqual(resource.source, vh_etree)
self.assertEqual(resource.root.tag, '{http://example.com/vehicles}vehicles')
self.assertIsNone(resource.url)
self.assertIsNone(resource.filepath)
self.assertIsNone(resource.text)
resource.load()
self.assertIsNone(resource.text)
resource = XMLResource(vh_root)
self.assertEqual(resource.source, vh_root)
self.assertEqual(resource.root.tag, '{http://example.com/vehicles}vehicles')
self.assertIsNone(resource.url)
self.assertIsNone(resource.filepath)
self.assertIsNone(resource.text)
resource.load()
self.assertIsNone(resource.text)
@unittest.skipIf(lxml_etree is None, "Skip: lxml is not available.")
def test_xml_resource_from_lxml(self):
vh_etree = lxml_etree.parse(self.vh_xml_file)
vh_root = vh_etree.getroot()
resource = XMLResource(vh_etree)
self.assertEqual(resource.source, vh_etree)
self.assertEqual(resource.root.tag, '{http://example.com/vehicles}vehicles')
self.assertIsNone(resource.url)
self.assertIsNone(resource.filepath)
self.assertIsNone(resource.text)
resource.load()
self.assertIsNone(resource.text)
resource = XMLResource(vh_root)
self.assertEqual(resource.source, vh_root)
self.assertEqual(resource.root.tag, '{http://example.com/vehicles}vehicles')
self.assertIsNone(resource.url)
self.assertIsNone(resource.filepath)
self.assertIsNone(resource.text)
resource.load()
self.assertIsNone(resource.text)
xml_text = resource.get_text()
self.assertIn('<vh:vehicles ', xml_text)
self.assertIn('<!-- Comment -->', xml_text)
self.assertIn('</vh:vehicles>', xml_text)
def test_xml_resource_from_resource(self):
xml_file = urlopen('file://{}'.format(add_leading_slash(self.vh_xml_file)))
try:
resource = XMLResource(xml_file, lazy=False)
self.assertEqual(resource.source, xml_file)
self.assertEqual(resource.root.tag, '{http://example.com/vehicles}vehicles')
self.assertIsNone(resource.url)
self.assertIsNone(resource.text)
resource.load()
self.assertTrue(resource.text.startswith('<?xml'))
self.assertFalse(xml_file.closed)
finally:
xml_file.close()
with open(self.vh_xml_file) as fp:
resource = XMLResource(fp)
self.assertIsNone(resource.text)
with self.assertRaises(XMLResourceError):
resource.load()
def test_xml_resource_from_file(self):
with open(self.vh_xsd_file) as schema_file:
resource = XMLResource(schema_file, lazy=False)
self.assertEqual(resource.source, schema_file)
self.assertEqual(resource.root.tag, '{http://www.w3.org/2001/XMLSchema}schema')
self.assertIsNone(resource.url)
self.assertIsNone(resource.text)
resource.load()
self.assertTrue(resource.text.startswith('<xs:schema'))
self.assertFalse(schema_file.closed)
for _ in resource.iter():
pass
self.assertFalse(schema_file.closed)
for _ in resource.iter_depth():
pass
self.assertFalse(schema_file.closed)
with open(self.vh_xsd_file) as schema_file:
resource = XMLResource(schema_file, lazy=True)
self.assertEqual(resource.source, schema_file)
self.assertEqual(resource.root.tag, '{http://www.w3.org/2001/XMLSchema}schema')
self.assertIsNone(resource.url)
self.assertIsNone(resource.text)
with self.assertRaises(XMLResourceError) as ctx:
resource.load()
self.assertEqual("cannot load a lazy XML resource", str(ctx.exception))
self.assertFalse(schema_file.closed)
for _ in resource.iter():
pass
self.assertFalse(schema_file.closed)
for _ in resource.iter_depth():
pass
self.assertFalse(schema_file.closed)
def test_xml_resource_from_string(self):
with open(self.vh_xsd_file) as schema_file:
schema_text = schema_file.read()
resource = XMLResource(schema_text, lazy=False)
self.assertEqual(resource.source, schema_text)
self.assertEqual(resource.root.tag, '{http://www.w3.org/2001/XMLSchema}schema')
self.assertIsNone(resource.url)
self.assertTrue(resource.text.startswith('<xs:schema'))
invalid_xml = '<tns0:root>missing namespace declaration</tns0:root>'
with self.assertRaises(ElementTree.ParseError) as ctx:
XMLResource(invalid_xml)
self.assertEqual(str(ctx.exception), 'unbound prefix: line 1, column 0')
def test_xml_resource_from_string_io(self):
with open(self.vh_xsd_file) as schema_file:
schema_text = schema_file.read()
schema_file = StringIO(schema_text)
resource = XMLResource(schema_file)
self.assertEqual(resource.source, schema_file)
self.assertEqual(resource.root.tag, '{http://www.w3.org/2001/XMLSchema}schema')
self.assertIsNone(resource.url)
self.assertTrue(resource.text.startswith('<xs:schema'))
schema_file = StringIO(schema_text)
resource = XMLResource(schema_file, lazy=False)
self.assertEqual(resource.source, schema_file)
self.assertEqual(resource.root.tag, '{http://www.w3.org/2001/XMLSchema}schema')
self.assertIsNone(resource.url)
self.assertTrue(resource.text.startswith('<xs:schema'))
def test_xml_resource_from_bytes_io(self):
source = '<?xml version="1.0" encoding="iso-8859-1"?>\n<a>ç</a>'
resource = XMLResource(BytesIO(source.encode('iso-8859-1')))
self.assertIsNone(resource.text)
resource.load()
self.assertEqual(resource.text, source)
def test_xml_resource_from_malformed_source(self):
# related to issue #224
malformed_xml_file = casepath('resources/malformed.xml')
with self.assertRaises(ElementTree.ParseError):
XMLResource(malformed_xml_file)
with self.assertRaises(ElementTree.ParseError):
XMLResource(malformed_xml_file, defuse='always')
# the incremental parser does not found the incomplete root before the end
resource = XMLResource(malformed_xml_file, lazy=True)
self.assertEqual(resource.root.tag, 'malformed_xml_file')
resource = XMLResource('<malformed_xml_file>>', lazy=True)
self.assertEqual(resource.root.tag, 'malformed_xml_file')
with self.assertRaises(ElementTree.ParseError):
XMLResource('<malformed_xml_file<>', lazy=True)
def test_xml_resource_from_wrong_arguments(self):
self.assertRaises(TypeError, XMLResource, [b'<UNSUPPORTED_DATA_TYPE/>'])
with self.assertRaises(TypeError) as ctx:
XMLResource('<root/>', base_url=[b'/home'])
self.assertIn(' ', str(ctx.exception))
def test_xml_resource_namespace(self):
resource = XMLResource(self.vh_xml_file)
self.assertEqual(resource.namespace, 'http://example.com/vehicles')
resource = XMLResource(self.vh_xsd_file)
self.assertEqual(resource.namespace, 'http://www.w3.org/2001/XMLSchema')
resource = XMLResource(self.col_xml_file)
self.assertEqual(resource.namespace, 'http://example.com/ns/collection')
self.assertEqual(XMLResource('<A/>').namespace, '')
def test_xml_resource_update_nsmap_method(self):
resource = XMLResource(self.vh_xml_file)
nsmap = {}
resource._update_nsmap(nsmap, 'xs', XSD_NAMESPACE)
self.assertEqual(nsmap, {'xs': XSD_NAMESPACE})
resource._update_nsmap(nsmap, 'xs', XSD_NAMESPACE)
self.assertEqual(nsmap, {'xs': XSD_NAMESPACE})
resource._update_nsmap(nsmap, 'tns0', 'http://example.com/ns')
self.assertEqual(nsmap, {'xs': XSD_NAMESPACE, 'tns0': 'http://example.com/ns'})
resource._update_nsmap(nsmap, 'xs', 'http://example.com/ns')
self.assertEqual(nsmap, {'xs': XSD_NAMESPACE,
'xs0': 'http://example.com/ns',
'tns0': 'http://example.com/ns'})
resource._update_nsmap(nsmap, 'xs', 'http://example.com/ns')
self.assertEqual(nsmap, {'xs': XSD_NAMESPACE,
'xs0': 'http://example.com/ns',
'tns0': 'http://example.com/ns'})
resource._update_nsmap(nsmap, 'xs', 'http://example.com/ns2')
self.assertEqual(nsmap, {'xs': XSD_NAMESPACE,
'xs0': 'http://example.com/ns',
'xs1': 'http://example.com/ns2',
'tns0': 'http://example.com/ns'})
def test_xml_resource_access(self):
resource = XMLResource(self.vh_xml_file)
base_url = resource.base_url
XMLResource(self.vh_xml_file, allow='local')
XMLResource(
self.vh_xml_file, base_url=os.path.dirname(self.vh_xml_file), allow='sandbox'
)
with self.assertRaises(XMLResourceError) as ctx:
XMLResource(self.vh_xml_file, allow='remote')
self.assertTrue(str(ctx.exception).startswith("block access to local resource"))
with self.assertRaises(URLError):
XMLResource("https://xmlschema.test/vehicles.xsd", allow='remote')
with self.assertRaises(XMLResourceError) as ctx:
XMLResource("https://xmlschema.test/vehicles.xsd", allow='local')
self.assertEqual(str(ctx.exception),
"block access to remote resource https://xmlschema.test/vehicles.xsd")
with self.assertRaises(XMLResourceError) as ctx:
XMLResource("https://xmlschema.test/vehicles.xsd", allow='sandbox')
self.assertEqual(str(ctx.exception),
"block access to files out of sandbox requires 'base_url' to be set")
with self.assertRaises(XMLResourceError) as ctx:
XMLResource("/tmp/vehicles.xsd", allow='sandbox')
self.assertEqual(
str(ctx.exception),
"block access to files out of sandbox requires 'base_url' to be set",
)
source = "/tmp/vehicles.xsd"
with self.assertRaises(XMLResourceError) as ctx:
XMLResource(source, base_url=base_url, allow='sandbox')
self.assertEqual(
str(ctx.exception),
"block access to out of sandbox file {}".format(normalize_url(source)),
)
with self.assertRaises(TypeError) as ctx:
XMLResource("https://xmlschema.test/vehicles.xsd", allow=None)
self.assertEqual(str(ctx.exception),
"invalid type <class 'NoneType'> for argument 'allow'")
with self.assertRaises(ValueError) as ctx:
XMLResource("https://xmlschema.test/vehicles.xsd", allow='any')
self.assertEqual(str(ctx.exception),
"'allow' argument: 'any' is not a security mode")
with self.assertRaises(XMLResourceError) as ctx:
XMLResource(self.vh_xml_file, allow='none')
self.assertTrue(str(ctx.exception).startswith('block access to resource'))
self.assertTrue(str(ctx.exception).endswith('vehicles.xml'))
with open(self.vh_xml_file) as fp:
resource = XMLResource(fp, allow='none')
self.assertIsInstance(resource, XMLResource)
self.assertIsNone(resource.url)
with open(self.vh_xml_file) as fp:
resource = XMLResource(fp.read(), allow='none')
self.assertIsInstance(resource, XMLResource)
self.assertIsNone(resource.url)
with open(self.vh_xml_file) as fp:
resource = XMLResource(StringIO(fp.read()), allow='none')
self.assertIsInstance(resource, XMLResource)
self.assertIsNone(resource.url)
def test_xml_resource_defuse(self):
resource = XMLResource(self.vh_xml_file, defuse='never', lazy=True)
self.assertEqual(resource.defuse, 'never')
self.assertRaises(ValueError, XMLResource, self.vh_xml_file, defuse='all')
self.assertRaises(TypeError, XMLResource, self.vh_xml_file, defuse=None)
self.assertIsInstance(resource.root, ElementTree.Element)
resource = XMLResource(self.vh_xml_file, defuse='always', lazy=True)
self.assertIsInstance(resource.root, PyElementTree.Element)
xml_file = casepath('resources/with_entity.xml')
self.assertIsInstance(XMLResource(xml_file, lazy=True), XMLResource)
with self.assertRaises(ElementTree.ParseError):
XMLResource(xml_file, defuse='always', lazy=True)
xml_file = casepath('resources/unused_external_entity.xml')
self.assertIsInstance(XMLResource(xml_file, lazy=True), XMLResource)
with self.assertRaises(ElementTree.ParseError):
XMLResource(xml_file, defuse='always', lazy=True)
def test_xml_resource_defuse_other_source_types(self):
xml_file = casepath('resources/external_entity.xml')
self.assertIsInstance(XMLResource(xml_file, lazy=True), XMLResource)
with self.assertRaises(ElementTree.ParseError):
XMLResource(xml_file, defuse='always', lazy=True)
with self.assertRaises(ElementTree.ParseError):
XMLResource(xml_file, defuse='always', lazy=False)
with self.assertRaises(ElementTree.ParseError):
XMLResource(xml_file, defuse='always', lazy=True)
with self.assertRaises(ElementTree.ParseError):
with open(xml_file) as fp:
XMLResource(fp, defuse='always', lazy=False)
with self.assertRaises(ElementTree.ParseError):
with open(xml_file) as fp:
XMLResource(fp.read(), defuse='always', lazy=False)
with self.assertRaises(ElementTree.ParseError):
with open(xml_file) as fp:
XMLResource(StringIO(fp.read()), defuse='always', lazy=False)
def test_xml_resource_defuse_nonlocal(self):
xml_file = casepath('resources/external_entity.xml')
resource = XMLResource(xml_file, defuse='nonlocal', lazy=True)
self.assertIsInstance(resource, XMLResource)
with self.assertRaises(ElementTree.ParseError):
with open(xml_file) as fp:
XMLResource(fp, defuse='nonlocal', lazy=True)
with self.assertRaises(ElementTree.ParseError):
with open(xml_file) as fp:
XMLResource(fp.read(), defuse='nonlocal', lazy=True)
with self.assertRaises(ElementTree.ParseError):
with open(xml_file) as fp:
XMLResource(StringIO(fp.read()), defuse='nonlocal', lazy=True)
def test_xml_resource_timeout(self):
resource = XMLResource(self.vh_xml_file, timeout=30)
self.assertEqual(resource.timeout, 30)
self.assertRaises(TypeError, XMLResource, self.vh_xml_file, timeout='100')
self.assertRaises(ValueError, XMLResource, self.vh_xml_file, timeout=0)
def test_xml_resource_laziness(self):
resource = XMLResource(self.vh_xml_file, lazy=True)
self.assertTrue(resource.is_lazy())
resource = XMLResource(self.vh_xml_file, lazy=False)
self.assertFalse(resource.is_lazy())
resource = XMLResource(self.vh_xml_file, lazy=1)
self.assertTrue(resource.is_lazy())
resource = XMLResource(self.vh_xml_file, lazy=2)
self.assertTrue(resource.is_lazy())
resource = XMLResource(self.vh_xml_file, lazy=0)
self.assertFalse(resource.is_lazy())
with self.assertRaises(ValueError):
XMLResource(self.vh_xml_file, lazy=-1)
with self.assertRaises(TypeError):
XMLResource(self.vh_xml_file, lazy='1')
def test_xml_resource_base_url(self):
resource = XMLResource(self.vh_xml_file)
base_url = resource.base_url
self.assertEqual(base_url, XMLResource(self.vh_xml_file, '/other').base_url)
with open(self.vh_xml_file) as fp:
self.assertIsNone(XMLResource(fp.read()).base_url)
with open(self.vh_xml_file) as fp:
resource = XMLResource(fp.read(), base_url='/foo')
self.assertEqual(resource.base_url, '/foo')
base_url = Path(self.vh_xml_file).parent
resource = XMLResource('vehicles.xml', base_url)
self.assertEqual(resource.base_url, base_url.as_uri())
resource = XMLResource('vehicles.xml', str(base_url))
self.assertEqual(resource.base_url, base_url.as_uri())
resource = XMLResource('vehicles.xml', str(base_url).encode())
self.assertEqual(resource.base_url, base_url.as_uri())
self.assertEqual(resource.base_url, base_url.as_uri())
with self.assertRaises(TypeError):
XMLResource(self.vh_xml_file, base_url=False)
with self.assertRaises(ValueError):
XMLResource(self.vh_xml_file, base_url='<root/>')
with self.assertRaises(ValueError):
XMLResource(self.vh_xml_file, base_url=b'<root/>')
def test_xml_resource_is_local(self):
resource = XMLResource(self.vh_xml_file)
self.assertTrue(resource.is_local())
def test_xml_resource_is_remote(self):
resource = XMLResource(self.vh_xml_file)
self.assertFalse(resource.is_remote())
def test_xml_resource_is_loaded(self):
resource = XMLResource(self.vh_xml_file, lazy=False)
self.assertFalse(resource.is_loaded())
resource.load()
self.assertTrue(resource.is_loaded())
def test_xml_resource__lazy_iterparse(self):
resource = XMLResource(self.vh_xml_file, lazy=True)
self.assertEqual(resource.defuse, 'remote')
for _, elem in resource._lazy_iterparse(self.col_xml_file):
self.assertTrue(is_etree_element(elem))
nsmap = []
for _, elem in resource._lazy_iterparse(self.col_xml_file, nsmap=nsmap):
self.assertTrue(is_etree_element(elem))
self.assertListEqual(
nsmap, [('col', 'http://example.com/ns/collection'),
('xsi', 'http://www.w3.org/2001/XMLSchema-instance')])
resource._defuse = 'always'
for _, elem in resource._lazy_iterparse(self.col_xml_file):
self.assertTrue(is_etree_element(elem))
def test_xml_resource__iterparse(self):
resource = XMLResource(self.vh_xml_file, lazy=False)
self.assertEqual(resource.defuse, 'remote')
with open(self.col_xml_file) as fp:
resource._parse(fp)
self.assertTrue(is_etree_element(resource.root))
resource._defuse = 'always'
with open(self.col_xml_file) as fp:
resource._parse(fp)
self.assertTrue(is_etree_element(resource.root))
with urlopen(resource.url) as fp:
resource._parse(fp)
self.assertTrue(is_etree_element(resource.root))
def test_xml_resource_tostring(self):
resource = XMLResource(self.vh_xml_file)
self.assertTrue(resource.tostring().startswith('<vh:vehicles'))
resource = XMLResource(self.vh_xml_file, lazy=True)
with self.assertRaises(XMLResourceError) as ctx:
resource.tostring()
self.assertEqual("cannot serialize a lazy XML resource", str(ctx.exception))
resource = XMLResource(XML_WITH_NAMESPACES)
result = resource.tostring()
self.assertNotEqual(result, XML_WITH_NAMESPACES)
# With xml.etree.ElementTree namespace declarations are serialized
# with a loss of information (all collapsed into the root element).
self.assertEqual(result, '<pfa:root xmlns:pfa="http://xmlschema.test/nsa" '
'xmlns:pfb="http://xmlschema.test/nsb">\n'
' <pfb:elem />\n</pfa:root>')
if lxml_etree is not None:
root = lxml_etree.XML(XML_WITH_NAMESPACES)
resource = XMLResource(root)
# With lxml.etree there is no information loss.
self.assertEqual(resource.tostring(), XML_WITH_NAMESPACES)
def test_xml_resource_open(self):
resource = XMLResource(self.vh_xml_file)
xml_file = resource.open()
self.assertIsNot(xml_file, resource.source)
data = xml_file.read().decode('utf-8')
self.assertTrue(data.startswith('<?xml '))
xml_file.close()
resource._url = 'file:not-a-file'
with self.assertRaises(XMLResourceError):
resource.open()
resource = XMLResource('<A/>')
self.assertRaises(XMLResourceError, resource.open)
resource = XMLResource(source=open(self.vh_xml_file))
xml_file = resource.open()
self.assertIs(xml_file, resource.source)
xml_file.close()
def test_xml_resource_seek(self):
resource = XMLResource(self.vh_xml_file)
self.assertIsNone(resource.seek(0))
self.assertIsNone(resource.seek(1))
xml_file = open(self.vh_xml_file)
resource = XMLResource(source=xml_file)
self.assertEqual(resource.seek(0), 0)
self.assertEqual(resource.seek(1), 1)
xml_file.close()
def test_xml_resource_close(self):
resource = XMLResource(self.vh_xml_file)
resource.close()
xml_file = resource.open()
try:
self.assertTrue(callable(xml_file.read))
finally:
resource.close()
with open(self.vh_xml_file) as xml_file:
resource = XMLResource(source=xml_file)
resource.close()
with self.assertRaises(XMLResourceError):
resource.open()
with open(self.vh_xml_file) as xml_file:
resource = XMLResource(xml_file)
with self.assertRaises(XMLResourceError):
resource.load() # I/O operation on closed file
def test_xml_resource_iter(self):
resource = XMLResource(XMLSchema.meta_schema.source.url)
self.assertFalse(resource.is_lazy())
lazy_resource = XMLResource(XMLSchema.meta_schema.source.url, lazy=True)
self.assertTrue(lazy_resource.is_lazy())
tags = [x.tag for x in resource.iter()]
self.assertEqual(len(tags), 1390)
self.assertEqual(tags[0], '{%s}schema' % XSD_NAMESPACE)
lazy_tags = [x.tag for x in lazy_resource.iter()]
self.assertEqual(len(lazy_tags), 1390)
self.assertEqual(lazy_tags[-1], '{%s}schema' % XSD_NAMESPACE)
self.assertNotEqual(tags, lazy_tags)
tags = [x.tag for x in resource.iter('{%s}complexType' % XSD_NAMESPACE)]
self.assertEqual(len(tags), 56)
self.assertEqual(tags[0], '{%s}complexType' % XSD_NAMESPACE)
self.assertListEqual(
tags, [x.tag for x in lazy_resource.iter('{%s}complexType' % XSD_NAMESPACE)]
)
def test_xml_resource_iter_depth(self):
resource = XMLResource(XMLSchema.meta_schema.source.url)
self.assertFalse(resource.is_lazy())
lazy_resource = XMLResource(XMLSchema.meta_schema.source.url, lazy=True)
self.assertTrue(lazy_resource.is_lazy())
# Note: Element change with lazy resource so compare only tags
nsmap = []
tags = [x.tag for x in resource.iter_depth(nsmap=nsmap)]
self.assertEqual(len(tags), 1)
self.assertEqual(tags[0], '{%s}schema' % XSD_NAMESPACE)
self.assertListEqual(
nsmap, [('xs', 'http://www.w3.org/2001/XMLSchema'),
('hfp', 'http://www.w3.org/2001/XMLSchema-hasFacetAndProperty')])
lazy_tags = [x.tag for x in lazy_resource.iter_depth()]
self.assertEqual(len(lazy_tags), 156)
self.assertEqual(lazy_tags[0], '{%s}annotation' % XSD_NAMESPACE)
self.assertEqual(lazy_tags[-1], '{%s}element' % XSD_NAMESPACE)
lazy_tags = [x.tag for x in lazy_resource.iter_depth(mode=2)]
self.assertListEqual(tags, lazy_tags)
lazy_tags = [x.tag for x in lazy_resource.iter_depth(mode=1)]
self.assertEqual(len(lazy_tags), 156)
lazy_tags = [x.tag for x in lazy_resource.iter_depth(mode=3)]
self.assertEqual(len(lazy_tags), 157)
self.assertEqual(tags[0], lazy_tags[-1])
lazy_tags = [x.tag for x in lazy_resource.iter_depth(mode=4)]
self.assertEqual(len(lazy_tags), 158)
self.assertEqual(tags[0], lazy_tags[0])
self.assertEqual(tags[0], lazy_tags[-1])
with self.assertRaises(ValueError) as ctx:
_ = [x.tag for x in lazy_resource.iter_depth(mode=5)]
self.assertEqual("invalid argument mode=5", str(ctx.exception))
source = StringIO('<a xmlns:tns0="http://example.com/ns0"><b1>'
' <c1 xmlns:tns1="http://example.com/ns1"/>'
' <c2 xmlns:tns2="http://example.com/ns2" x="2"/>'
'</b1><b2><c3><d1/></c3></b2></a>')
resource = XMLResource(source, lazy=3)
nsmap = []
ancestors = []
self.assertIs(next(resource.iter_depth(nsmap=nsmap, ancestors=ancestors)),
resource.root[1][0][0])
self.assertListEqual(nsmap, [('tns0', 'http://example.com/ns0')])
self.assertListEqual(ancestors, [resource.root, resource.root[1], resource.root[1][0]])
def test_xml_resource_iterfind(self):
namespaces = {'xs': XSD_NAMESPACE}
resource = XMLResource(XMLSchema.meta_schema.source.url)
self.assertFalse(resource.is_lazy())
lazy_resource = XMLResource(XMLSchema.meta_schema.source.url, lazy=True)
self.assertTrue(lazy_resource.is_lazy())
tags = [x.tag for x in resource.iterfind(path='.')]
self.assertEqual(len(tags), 1)
self.assertEqual(tags[0], '{%s}schema' % XSD_NAMESPACE)
lazy_tags = [x.tag for x in lazy_resource.iterfind(path='.')]
self.assertListEqual(tags, lazy_tags)
tags = [x.tag for x in resource.iterfind(path='*')]
self.assertEqual(len(tags), 156)
self.assertEqual(tags[0], '{%s}annotation' % XSD_NAMESPACE)
lazy_tags = [x.tag for x in lazy_resource.iterfind(path='*')]
self.assertListEqual(tags, lazy_tags)
tags = [x.tag for x in resource.iterfind('xs:complexType', namespaces)]
self.assertEqual(len(tags), 35)
self.assertTrue(all(t == '{%s}complexType' % XSD_NAMESPACE for t in tags))
lazy_tags = [x.tag for x in lazy_resource.iterfind('xs:complexType', namespaces)]
self.assertListEqual(tags, lazy_tags)
tags = [x.tag for x in resource.iterfind('. /. / xs:complexType', namespaces)]
self.assertEqual(len(tags), 35)
self.assertTrue(all(t == '{%s}complexType' % XSD_NAMESPACE for t in tags))
lazy_tags = [
x.tag for x in lazy_resource.iterfind('. /. / xs:complexType', namespaces)
]
self.assertListEqual(tags, lazy_tags)
def test_xml_resource_find(self):
root = ElementTree.XML('<a><b1><c1/><c2 x="2"/></b1><b2/></a>')
resource = XMLResource(root)
self.assertIs(resource.find('*/c2'), root[0][1])
self.assertIsNone(resource.find('*/c3'))
resource = XMLResource('<a><b1>'
' <c1 xmlns:tns1="http://example.com/ns1"/>'
' <c2 xmlns:tns2="http://example.com/ns2" x="2"/>'
'</b1><b2/></a>')
nsmap = []
self.assertIs(resource.find('*/c2', nsmap=nsmap), resource.root[0][1])
self.assertListEqual(nsmap, [('tns2', 'http://example.com/ns2')])
nsmap = []
ancestors = []
self.assertIs(resource.find('*/c2', nsmap=nsmap, ancestors=ancestors),
resource.root[0][1])
self.assertListEqual(nsmap, [('tns2', 'http://example.com/ns2')])
self.assertListEqual(ancestors, [resource.root, resource.root[0]])
nsmap = []
ancestors = []
self.assertIs(resource.find('.', nsmap=nsmap, ancestors=ancestors),
resource.root)
self.assertListEqual(nsmap, [])
self.assertListEqual(ancestors, [])
nsmap = []
ancestors = []
self.assertIsNone(resource.find('b3', nsmap=nsmap, ancestors=ancestors))
self.assertListEqual(nsmap, [])
self.assertListEqual(ancestors, [])
def test_xml_resource_lazy_find(self):
source = StringIO('<a><b1><c1/><c2 x="2"/></b1><b2/></a>')
resource = XMLResource(source, lazy=True)
self.assertIs(resource.find('*/c2'), resource.root[0][1])
source = StringIO('<a xmlns:tns0="http://example.com/ns0"><b1>'
' <c1 xmlns:tns1="http://example.com/ns1"/>'
' <c2 xmlns:tns2="http://example.com/ns2" x="2"/>'
'</b1><b2><c3><d1/></c3></b2></a>')
resource = XMLResource(source, lazy=True)
nsmap = []
ancestors = []
self.assertIs(resource.find('*/c2', nsmap=nsmap, ancestors=ancestors),
resource.root[0][1])
self.assertListEqual(nsmap, [('tns0', 'http://example.com/ns0'),
('tns2', 'http://example.com/ns2')])
self.assertListEqual(ancestors, [resource.root, resource.root[0]])
nsmap = []
ancestors = []
self.assertIs(resource.find('*/c3', nsmap=nsmap, ancestors=ancestors),
resource.root[1][0])
self.assertListEqual(nsmap, [('tns0', 'http://example.com/ns0')])
self.assertListEqual(ancestors, [resource.root, resource.root[1]])
nsmap = []
ancestors = []
self.assertIs(resource.find('*/c3/d1', nsmap=nsmap, ancestors=ancestors),
resource.root[1][0][0])
self.assertListEqual(nsmap, [('tns0', 'http://example.com/ns0')])
self.assertListEqual(ancestors,
[resource.root, resource.root[1], resource.root[1][0]])
nsmap = []
ancestors = []
self.assertIs(resource.find('*', nsmap=nsmap, ancestors=ancestors),
resource.root[0])
self.assertListEqual(nsmap, [('tns0', 'http://example.com/ns0')])
self.assertListEqual(ancestors, [resource.root])
nsmap = []
ancestors = []
self.assertIsNone(resource.find('/b1', nsmap=nsmap, ancestors=ancestors))
self.assertListEqual(nsmap, [])
self.assertListEqual(ancestors, [])
source.seek(0)
resource = XMLResource(source, lazy=2)
nsmap = []
ancestors = []
self.assertIs(resource.find('*/c2', nsmap=nsmap, ancestors=ancestors),
resource.root[0][1])
self.assertListEqual(nsmap, [('tns0', 'http://example.com/ns0'),
('tns2', 'http://example.com/ns2')])
self.assertListEqual(ancestors, [resource.root, resource.root[0]])
def test_xml_resource_findall(self):
root = ElementTree.XML('<a><b1><c1/><c2/></b1><b2/></a>')
resource = XMLResource(root)
self.assertListEqual(resource.findall('*/*'), root[0][:])
self.assertListEqual(resource.findall('*/c3'), [])
def test_xml_resource_nsmap_tracking(self):
xsd_file = casepath('examples/collection/collection4.xsd')
resource = XMLResource(xsd_file)
root = resource.root
nsmap = []
for elem in resource.iter(nsmap=nsmap):
if elem is root[2][0] or elem in root[2][0]:
self.assertEqual(dict(nsmap), {'xs': 'http://www.w3.org/2001/XMLSchema',
'': 'http://www.w3.org/2001/XMLSchema'})
else:
self.assertEqual(dict(nsmap), {'xs': 'http://www.w3.org/2001/XMLSchema',
'': 'http://example.com/ns/collection'})
nsmap.clear()
resource._nsmap.clear()
resource._nsmap[resource._root] = []
for _ in resource.iter(nsmap=nsmap):
self.assertEqual(nsmap, [])
nsmap.clear()
if lxml_etree is not None:
tree = lxml_etree.parse(xsd_file)
resource = XMLResource(tree)
root = resource.root
for elem in resource.iter(nsmap=nsmap):
if callable(elem.tag):
continue
if elem is root[2][0] or elem in root[2][0]:
self.assertEqual(dict(nsmap), {'xs': 'http://www.w3.org/2001/XMLSchema',
'': 'http://www.w3.org/2001/XMLSchema'})
else:
self.assertEqual(dict(nsmap), {'xs': 'http://www.w3.org/2001/XMLSchema',
'': 'http://example.com/ns/collection'})
nsmap = {}
resource = XMLResource(xsd_file, lazy=True)
root = elem = resource.root
for elem in resource.iter(nsmap=nsmap):
try:
if elem is resource.root[2][0] or elem in resource.root[2][0]:
self.assertEqual(nsmap['default'], 'http://www.w3.org/2001/XMLSchema')
self.assertEqual(nsmap[''], 'http://example.com/ns/collection')
except IndexError:
self.assertEqual(nsmap[''], 'http://example.com/ns/collection')
self.assertIs(elem, resource.root)
self.assertIsNot(root, resource.root)
def test_xml_resource_get_namespaces(self):
with open(self.vh_xml_file) as schema_file:
resource = XMLResource(schema_file)
self.assertIsNone(resource.url)
self.assertEqual(set(resource.get_namespaces().keys()), {'vh', 'xsi'})
self.assertFalse(schema_file.closed)
with open(self.vh_xsd_file) as schema_file:
resource = XMLResource(schema_file)
self.assertIsNone(resource.url)
self.assertEqual(set(resource.get_namespaces().keys()), {'xs', 'vh'})
self.assertFalse(schema_file.closed)
resource = XMLResource(self.col_xml_file)
self.assertEqual(resource.url, normalize_url(self.col_xml_file))
self.assertEqual(set(resource.get_namespaces().keys()), {'col', 'xsi'})
resource = XMLResource(self.col_xsd_file)
self.assertEqual(resource.url, normalize_url(self.col_xsd_file))
self.assertEqual(set(resource.get_namespaces().keys()), {'', 'xs'})
resource = XMLResource("""<?xml version="1.0" ?>
<root xmlns="tns1">
<tns:elem1 xmlns:tns="tns1" xmlns="unknown"/>
</root>""", lazy=False)
self.assertEqual(set(resource.get_namespaces(root_only=False).keys()),
{'', 'tns', 'default'})
resource = XMLResource("""<?xml version="1.0" ?>
<root xmlns:tns="tns1">
<tns:elem1 xmlns:tns="tns1" xmlns="unknown"/>
</root>""", lazy=False)
self.assertEqual(set(resource.get_namespaces(root_only=False).keys()), {'default', 'tns'})
self.assertEqual(resource.get_namespaces(root_only=True).keys(), {'tns'})
resource = XMLResource("""<?xml version="1.0" ?>
<root xmlns:tns="tns1">
<tns:elem1 xmlns:tns="tns3" xmlns="unknown"/>
</root>""", lazy=False)
self.assertEqual(set(resource.get_namespaces(root_only=False).keys()),
{'default', 'tns', 'tns0'})
resource = XMLResource('<root/>')
with self.assertRaises(ValueError) as ctx:
resource.get_namespaces(namespaces={'xml': "http://example.com/ne"})
self.assertIn("reserved prefix 'xml'", str(ctx.exception))
def test_xml_resource_get_locations(self):
resource = XMLResource(self.col_xml_file)
self.check_url(resource.url, normalize_url(self.col_xml_file))
locations = resource.get_locations([('ns', 'other.xsd')])
self.assertEqual(len(locations), 2)
self.check_url(locations[0][1], os.path.join(self.col_dir, 'other.xsd'))
self.check_url(locations[1][1], normalize_url(self.col_xsd_file))
source = StringIO('<a xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"'
' xsi:schemaLocation="http://example.com/ns1 /loc1"><b1>'
' <c1 xsi:schemaLocation="http://example.com/ns2 /loc2"/>'
' <c2 xmlns:tns2="http://example.com/ns2" x="2"/>'
'</b1></a>')
resource = XMLResource(source)
locations = resource.get_locations()
self.assertEqual(len(locations), 2)
self.assertEqual(locations[0][0], 'http://example.com/ns1')
self.assertRegex(locations[0][1], f'file://{DRIVE_REGEX}/loc1')
self.assertEqual(locations[1][0], 'http://example.com/ns2')
self.assertRegex(locations[1][1], f'file://{DRIVE_REGEX}/loc2')
locations = resource.get_locations(root_only=True)
self.assertEqual(len(locations), 1)
self.assertEqual(locations[0][0], 'http://example.com/ns1')
self.assertRegex(locations[0][1], f'file://{DRIVE_REGEX}/loc1')
@unittest.skipIf(SKIP_REMOTE_TESTS or platform.system() == 'Windows',
"Remote networks are not accessible or avoid SSL "
"verification error on Windows.")
def test_remote_resource_loading(self):
url = "https://raw.githubusercontent.com/brunato/xmlschema/master/" \
"tests/test_cases/examples/collection/collection.xsd"
with urlopen(url) as rh:
col_xsd_resource = XMLResource(rh)
self.assertEqual(col_xsd_resource.url, url)
self.assertIsNone(col_xsd_resource.filepath)
self.assertEqual(col_xsd_resource.namespace, XSD_NAMESPACE)
self.assertIsNone(col_xsd_resource.seek(0))
col_xsd_resource.load()
col_schema = XMLSchema(col_xsd_resource.get_text())
self.assertTrue(isinstance(col_schema, XMLSchema))
vh_schema = XMLSchema("https://raw.githubusercontent.com/brunato/xmlschema/master/"
"tests/test_cases/examples/vehicles/vehicles.xsd")
self.assertTrue(isinstance(vh_schema, XMLSchema))
self.assertTrue(vh_schema.source.is_remote())
def test_schema_defuse(self):
vh_schema = XMLSchema(self.vh_xsd_file, defuse='always')
self.assertIsInstance(vh_schema.root, ElementTree.Element)
for schema in vh_schema.maps.iter_schemas():
self.assertIsInstance(schema.root, ElementTree.Element)
def test_schema_resource_access(self):
vh_schema = XMLSchema(self.vh_xsd_file, allow='sandbox')
self.assertTrue(isinstance(vh_schema, XMLSchema))
xsd_source = """
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema"
xmlns:vh="http://example.com/vehicles">
<xs:import namespace="http://example.com/vehicles" schemaLocation="{}"/>
</xs:schema>""".format(self.vh_xsd_file)
schema = XMLSchema(xsd_source, allow='all')
self.assertTrue(isinstance(schema, XMLSchema))
self.assertIn("http://example.com/vehicles", schema.maps.namespaces)
self.assertEqual(len(schema.maps.namespaces["http://example.com/vehicles"]), 4)
with warnings.catch_warnings(record=True) as ctx:
warnings.simplefilter("always")
XMLSchema(xsd_source, allow='remote')
self.assertEqual(len(ctx), 1, "Expected one import warning")
self.assertIn("block access to local resource", str(ctx[0].message))
schema = XMLSchema(xsd_source, allow='local')
self.assertTrue(isinstance(schema, XMLSchema))
self.assertIn("http://example.com/vehicles", schema.maps.namespaces)
self.assertEqual(len(schema.maps.namespaces["http://example.com/vehicles"]), 4)
with self.assertRaises(XMLResourceError) as ctx:
XMLSchema(xsd_source, allow='sandbox')
self.assertIn("block access to files out of sandbox", str(ctx.exception))
schema = XMLSchema(
xsd_source, base_url=os.path.dirname(self.vh_xsd_file), allow='all'
)
self.assertTrue(isinstance(schema, XMLSchema))
self.assertIn("http://example.com/vehicles", schema.maps.namespaces)
self.assertEqual(len(schema.maps.namespaces["http://example.com/vehicles"]), 4)
with warnings.catch_warnings(record=True) as ctx:
warnings.simplefilter("always")
XMLSchema(xsd_source, base_url='/improbable', allow='sandbox')
self.assertEqual(len(ctx), 1, "Expected one import warning")
self.assertIn("block access to out of sandbox", str(ctx[0].message))
def test_fid_with_name_attr(self):
"""XMLResource gets correct data when passed a file like object
with a name attribute that isn't on disk.
These file descriptors appear when working with the contents from a
zip using the zipfile module and with Django files in some
instances.
"""
class FileProxy(object):
def __init__(self, fid, fake_name):
self._fid = fid
self.name = fake_name
def __getattr__(self, attr):
try:
return self.__dict__[attr]
except (KeyError, AttributeError):
return getattr(self.__dict__["_fid"], attr)
with open(self.vh_xml_file) as xml_file:
resource = XMLResource(FileProxy(xml_file, fake_name="not__on____disk.xml"))
self.assertIsNone(resource.url)
self.assertEqual(set(resource.get_namespaces().keys()), {'vh', 'xsi'})
self.assertFalse(xml_file.closed)
def test_parent_map(self):
root = ElementTree.XML('<a><b1><c1/><c2/></b1><b2/></a>')
resource = XMLResource(root)
self.assertIsNone(resource.parent_map[root])
self.assertIs(resource.parent_map[root[0]], root)
self.assertIs(resource.parent_map[root[1]], root)
self.assertIs(resource.parent_map[root[0][0]], root[0])
self.assertIs(resource.parent_map[root[0][1]], root[0])
resource = XMLResource(StringIO('<a><b1><c1/><c2/></b1><b2/></a>'), lazy=True)
with self.assertRaises(XMLResourceError) as ctx:
_ = resource.parent_map
self.assertEqual("cannot create the parent map of a lazy XML resource",
str(ctx.exception))
def test_get_nsmap(self):
source = '<a xmlns="uri1"><b1 xmlns:x="uri2"><c1/><c2/></b1><b2 xmlns="uri3"/></a>'
alien_elem = ElementTree.XML('<a/>')
root = ElementTree.XML(source)
resource = XMLResource(root)
self.assertListEqual(resource.get_nsmap(root), [])
self.assertListEqual(resource.get_nsmap(root[1]), [])
self.assertListEqual(resource.get_nsmap(alien_elem), [])
if lxml_etree is not None:
root = lxml_etree.XML(source)
resource = XMLResource(root)
self.assertListEqual(resource.get_nsmap(root), [('', 'uri1')])
self.assertListEqual(resource.get_nsmap(root[0]), [('x', 'uri2'), ('', 'uri1')])
self.assertListEqual(resource.get_nsmap(root[1]), [('', 'uri3')])
self.assertListEqual(resource.get_nsmap(alien_elem), [])
resource = XMLResource(source)
root = resource.root
self.assertListEqual(resource.get_nsmap(root), [('', 'uri1')])
self.assertListEqual(resource.get_nsmap(root[0]), [('', 'uri1'), ('x', 'uri2')])
self.assertListEqual(resource.get_nsmap(root[1]), [('', 'uri1'), ('', 'uri3')])
self.assertListEqual(resource.get_nsmap(alien_elem), [])
resource = XMLResource(StringIO(source), lazy=True)
root = resource.root
self.assertTrue(resource.is_lazy())
self.assertListEqual(resource.get_nsmap(root), [('', 'uri1')])
self.assertListEqual(resource.get_nsmap(root[0]), [])
self.assertListEqual(resource.get_nsmap(root[1]), [])
self.assertListEqual(resource.get_nsmap(alien_elem), [])
def test_xml_subresource(self):
resource = XMLResource(self.vh_xml_file, lazy=True)
with self.assertRaises(XMLResourceError) as ctx:
resource.subresource(resource.root)
self.assertEqual("cannot create a subresource from a lazy XML resource",
str(ctx.exception))
resource = XMLResource(self.vh_xml_file)
root = resource.root
subresource = resource.subresource(root[0])
self.assertIs(subresource.root, resource.root[0])
with self.assertRaises(XMLResourceError) as ctx:
resource.subresource(None)
self.assertEqual("None is not an element or the XML resource tree", str(ctx.exception))
if lxml_etree is not None:
resource = XMLResource(lxml_etree.parse(self.vh_xml_file).getroot())
root = resource.root
subresource = resource.subresource(root[0])
self.assertIs(subresource.root, resource.root[0])
xml_text = '<a><b1 xmlns:x="tns0"><c1 xmlns:y="tns1"/><c2/></b1><b2/></a>'
resource = XMLResource(xml_text)
root = resource.root
subresource = resource.subresource(root[0])
self.assertIs(subresource.root, resource.root[0])
def test_loading_from_unrelated_dirs__issue_237(self):
relative_path = str(pathlib.Path(__file__).parent.joinpath(
'test_cases/issues/issue_237/dir1/issue_237.xsd'
))
schema = XMLSchema(relative_path)
self.assertEqual(schema.maps.namespaces[''][1].name, 'issue_237a.xsd')
self.assertEqual(schema.maps.namespaces[''][2].name, 'issue_237b.xsd')
if __name__ == '__main__':
header_template = "Test xmlschema's XML resources with Python {} on platform {}"
header = header_template.format(platform.python_version(), platform.platform())
print('{0}\n{1}\n{0}'.format("*" * len(header), header))
unittest.main()
|
def400197d99307cd0a611c48cc64b4f5b6d1a8d
|
a4ea525e226d6c401fdb87a6e9adfdc5d07e6020
|
/src/azure-cli/azure/cli/command_modules/keyvault/vendored_sdks/azure_keyvault_t1/key_vault_client.py
|
e2f63c07211d65477ec77391ff9f10afb8583d7b
|
[
"MIT",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MPL-2.0",
"LGPL-2.1-only",
"Apache-2.0",
"LGPL-2.1-or-later",
"BSD-2-Clause"
] |
permissive
|
Azure/azure-cli
|
13340eeca2e288e66e84d393fa1c8a93d46c8686
|
a40fd14ad0b6e89720a2e58d4d9be3a6ce1535ca
|
refs/heads/dev
| 2023-08-17T06:25:37.431463
| 2023-08-17T06:00:10
| 2023-08-17T06:00:10
| 51,040,886
| 4,018
| 3,310
|
MIT
| 2023-09-14T11:11:05
| 2016-02-04T00:21:51
|
Python
|
UTF-8
|
Python
| false
| false
| 10,250
|
py
|
key_vault_client.py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure import AzureConfiguration
from msrestazure.azure_active_directory import BasicTokenAuthentication
from azure.profiles import KnownProfiles, ProfileDefinition
from azure.profiles.multiapiclient import MultiApiClientMixin
from .version import VERSION
from . import KeyVaultAuthentication
from .v7_2.version import VERSION as v7_2_VERSION
class KeyVaultClientConfiguration(AzureConfiguration):
"""Configuration for KeyVaultClient
Note that all parameters used to create this instance are saved as instance
attributes.
:param credentials: Credentials needed for the client to connect to Azure.
:type credentials: :mod:`A msrestazure Credentials
object<msrestazure.azure_active_directory>`
"""
def __init__(
self, credentials):
if credentials is None:
raise ValueError("Parameter 'credentials' must not be None.")
base_url = '{vaultBaseUrl}'
super(KeyVaultClientConfiguration, self).__init__(base_url)
self.add_user_agent('azure-keyvault/{}'.format(VERSION))
self.add_user_agent('Azure-SDK-For-Python')
self.credentials = credentials
class KeyVaultClient(MultiApiClientMixin):
"""The key vault client performs cryptographic key operations and vault operations against the Key Vault service.
Implementation depends on the API version:
* 2016-10-01: :class:`v2016_10_01.KeyVaultClient<azure.keyvault.v2016_10_01.KeyVaultClient>`
* 7.0: :class:`v7_0.KeyVaultClient<azure.mgmt.keyvault.v7_0.KeyVaultClient>`
:ivar config: Configuration for client.
:vartype config: KeyVaultClientConfiguration
:param credentials: Credentials needed for the client to connect to Azure.
:type credentials: :mod:`A msrestazure Credentials
object<msrestazure.azure_active_directory>`
:param str api_version: API version to use if no profile is provided, or if
missing in profile.
:param profile: A profile definition, from KnownProfiles to dict.
:type profile: azure.profiles.KnownProfiles
"""
DEFAULT_API_VERSION = '7.2'
_PROFILE_TAG = "azure.keyvault.KeyVaultClient"
LATEST_PROFILE = ProfileDefinition({
_PROFILE_TAG: {
None: DEFAULT_API_VERSION
}},
_PROFILE_TAG + " latest"
)
_init_complete = False
def __init__(self, credentials, api_version=None, profile=KnownProfiles.default):
self.config = KeyVaultClientConfiguration(credentials)
self._client_impls = {}
self._entered = False
super(KeyVaultClient, self).__init__(
api_version=api_version,
profile=profile
)
# if the supplied credentials instance is not derived from KeyVaultAuthBase but is an AAD credential type
if not isinstance(credentials, KeyVaultAuthentication) and isinstance(credentials, BasicTokenAuthentication):
# wrap the supplied credentials with a KeyVaultAuthentication instance. Use that for the credentials
# supplied to the base client
credentials = KeyVaultAuthentication(credentials=credentials)
self._credentials = credentials
self._init_complete = True
@property
def models(self):
api_version = self._get_api_version(None)
if api_version == v7_2_VERSION:
from .v7_2 import models as implModels
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
return implModels
def _get_client_impl(self):
"""
Get the versioned client implementation corresponding to the current profile.
:return: The versioned client implementation.
"""
api_version = self._get_api_version(None)
if api_version not in self._client_impls:
self._create_client_impl(api_version)
return self._client_impls[api_version]
def _create_client_impl(self, api_version):
"""
Creates the client implementation corresponding to the specifeid api_version.
:param api_version:
:return:
"""
if api_version == v7_2_VERSION:
from .v7_2 import KeyVaultClient as ImplClient
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
impl = ImplClient(credentials=self._credentials)
impl.config = self.config
# if __enter__ has previously been called and the impl client has __enter__ defined we need to call it
if self._entered and hasattr(impl, '__enter__'):
impl.__enter__()
self._client_impls[api_version] = impl
return impl
def __enter__(self, *args, **kwargs):
"""
Calls __enter__ on all client implementations which support it
:param args: positional arguments to relay to client implementations of __enter__
:param kwargs: keyword arguments to relay to client implementations of __enter__
:return: returns the current KeyVaultClient instance
"""
for _, impl in self._client_impls.items():
if hasattr(impl, '__enter__'):
impl.__enter__(*args, **kwargs)
# mark the current KeyVaultClient as _entered so that client implementations instantiated
# subsequently will also have __enter__ called on them as appropriate
self._entered = True
return self
def __exit__(self, *args, **kwargs):
"""
Calls __exit__ on all client implementations which support it
:param args: positional arguments to relay to client implementations of __enter__
:param kwargs: keyword arguments to relay to client implementations of __enter__
:return: returns the current KeyVaultClient instance
"""
for _, impl in self._client_impls.items():
if hasattr(impl, '__exit__'):
impl.__exit__(*args, **kwargs)
return self
def __getattr__(self, name):
"""
In the case that the attribute is not defined on the custom KeyVaultClient. Attempt to get
the attribute from the versioned client implementation corresponding to the current profile.
:param name: Name of the attribute retrieve from the current versioned client implementation
:return: The value of the specified attribute on the current client implementation.
"""
impl = self._get_client_impl()
return getattr(impl, name)
def __setattr__(self, name, attr):
"""
Sets the specified attribute either on the custom KeyVaultClient or the current underlying implementation.
:param name: Name of the attribute to set
:param attr: Value of the attribute to set
:return: None
"""
if self._init_complete and not hasattr(self, name):
impl = self._get_client_impl()
setattr(impl, name, attr)
else:
super(KeyVaultClient, self).__setattr__(name, attr)
def get_pending_certificate_signing_request(self, vault_base_url, certificate_name, custom_headers=None, raw=False, **operation_config):
"""Gets the Base64 pending certificate signing request (PKCS-10).
:param vault_base_url: The vault name, e.g.
https://myvault.vault.azure.net
:type vault_base_url: str
:param certificate_name: The name of the certificate
:type certificate_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: Base64 encoded pending certificate signing request (PKCS-10).
:rtype: str
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/certificates/{certificate-name}/pending'
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'certificate-name': self._serialize.url("certificate_name", certificate_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/pkcs10'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=True, **operation_config)
if response.status_code not in [200]:
raise self.models.KeyVaultErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = response.body() if hasattr(response, 'body') else response.content
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
|
00a2def8feda5dd76538bf1ebb27bc5ff4c2bad2
|
8f2c55a2530c3e59dab5907c0044c618b88dd09b
|
/_pydevd_frame_eval/vendored/bytecode/tests/test_bytecode.py
|
c629f75e941d59b89bbe12d1a241ab125ce08e7a
|
[
"Apache-2.0",
"EPL-1.0",
"MIT"
] |
permissive
|
fabioz/PyDev.Debugger
|
5a9c6d4c09be85a0e2d9fb93567fd65faf04c81d
|
26864816cbfcf002a99913bcc31ebef48042a4ac
|
refs/heads/main
| 2023-08-18T01:08:34.323363
| 2023-04-15T11:15:47
| 2023-04-15T11:15:47
| 21,870,144
| 363
| 126
|
Apache-2.0
| 2023-07-30T23:03:31
| 2014-07-15T18:01:12
|
Python
|
UTF-8
|
Python
| false
| false
| 15,909
|
py
|
test_bytecode.py
|
import pytest
from tests_python.debugger_unittest import IS_PY36_OR_GREATER, IS_CPYTHON
from tests_python.debug_constants import TEST_CYTHON
pytestmark = pytest.mark.skipif(not IS_PY36_OR_GREATER or not IS_CPYTHON or not TEST_CYTHON, reason='Requires CPython >= 3.6')
#!/usr/bin/env python3
import sys
import textwrap
import unittest
from _pydevd_frame_eval.vendored.bytecode import Label, Instr, FreeVar, Bytecode, SetLineno, ConcreteInstr
from _pydevd_frame_eval.vendored.bytecode.tests import TestCase, get_code
class BytecodeTests(TestCase):
maxDiff = 80 * 100
def test_constructor(self):
code = Bytecode()
self.assertEqual(code.name, "<module>")
self.assertEqual(code.filename, "<string>")
self.assertEqual(code.flags, 0)
self.assertEqual(code, [])
def test_invalid_types(self):
code = Bytecode()
code.append(123)
with self.assertRaises(ValueError):
list(code)
with self.assertRaises(ValueError):
code.legalize()
with self.assertRaises(ValueError):
Bytecode([123])
def test_legalize(self):
code = Bytecode()
code.first_lineno = 3
code.extend(
[
Instr("LOAD_CONST", 7),
Instr("STORE_NAME", "x"),
Instr("LOAD_CONST", 8, lineno=4),
Instr("STORE_NAME", "y"),
Label(),
SetLineno(5),
Instr("LOAD_CONST", 9, lineno=6),
Instr("STORE_NAME", "z"),
]
)
code.legalize()
self.assertListEqual(
code,
[
Instr("LOAD_CONST", 7, lineno=3),
Instr("STORE_NAME", "x", lineno=3),
Instr("LOAD_CONST", 8, lineno=4),
Instr("STORE_NAME", "y", lineno=4),
Label(),
Instr("LOAD_CONST", 9, lineno=5),
Instr("STORE_NAME", "z", lineno=5),
],
)
def test_slice(self):
code = Bytecode()
code.first_lineno = 3
code.extend(
[
Instr("LOAD_CONST", 7),
Instr("STORE_NAME", "x"),
SetLineno(4),
Instr("LOAD_CONST", 8),
Instr("STORE_NAME", "y"),
SetLineno(5),
Instr("LOAD_CONST", 9),
Instr("STORE_NAME", "z"),
]
)
sliced_code = code[:]
self.assertEqual(code, sliced_code)
for name in (
"argcount",
"posonlyargcount",
"kwonlyargcount",
"first_lineno",
"name",
"filename",
"docstring",
"cellvars",
"freevars",
"argnames",
):
self.assertEqual(
getattr(code, name, None), getattr(sliced_code, name, None)
)
def test_copy(self):
code = Bytecode()
code.first_lineno = 3
code.extend(
[
Instr("LOAD_CONST", 7),
Instr("STORE_NAME", "x"),
SetLineno(4),
Instr("LOAD_CONST", 8),
Instr("STORE_NAME", "y"),
SetLineno(5),
Instr("LOAD_CONST", 9),
Instr("STORE_NAME", "z"),
]
)
copy_code = code.copy()
self.assertEqual(code, copy_code)
for name in (
"argcount",
"posonlyargcount",
"kwonlyargcount",
"first_lineno",
"name",
"filename",
"docstring",
"cellvars",
"freevars",
"argnames",
):
self.assertEqual(getattr(code, name, None), getattr(copy_code, name, None))
def test_from_code(self):
code = get_code(
"""
if test:
x = 1
else:
x = 2
"""
)
bytecode = Bytecode.from_code(code)
label_else = Label()
label_exit = Label()
if sys.version_info < (3, 10):
self.assertEqual(
bytecode,
[
Instr("LOAD_NAME", "test", lineno=1),
Instr("POP_JUMP_IF_FALSE", label_else, lineno=1),
Instr("LOAD_CONST", 1, lineno=2),
Instr("STORE_NAME", "x", lineno=2),
Instr("JUMP_FORWARD", label_exit, lineno=2),
label_else,
Instr("LOAD_CONST", 2, lineno=4),
Instr("STORE_NAME", "x", lineno=4),
label_exit,
Instr("LOAD_CONST", None, lineno=4),
Instr("RETURN_VALUE", lineno=4),
],
)
# Control flow handling appears to have changed under Python 3.10
else:
self.assertEqual(
bytecode,
[
Instr("LOAD_NAME", "test", lineno=1),
Instr("POP_JUMP_IF_FALSE", label_else, lineno=1),
Instr("LOAD_CONST", 1, lineno=2),
Instr("STORE_NAME", "x", lineno=2),
Instr("LOAD_CONST", None, lineno=2),
Instr("RETURN_VALUE", lineno=2),
label_else,
Instr("LOAD_CONST", 2, lineno=4),
Instr("STORE_NAME", "x", lineno=4),
Instr("LOAD_CONST", None, lineno=4),
Instr("RETURN_VALUE", lineno=4),
],
)
def test_from_code_freevars(self):
ns = {}
exec(
textwrap.dedent(
"""
def create_func():
x = 1
def func():
return x
return func
func = create_func()
"""
),
ns,
ns,
)
code = ns["func"].__code__
bytecode = Bytecode.from_code(code)
self.assertEqual(
bytecode,
[
Instr("LOAD_DEREF", FreeVar("x"), lineno=5),
Instr("RETURN_VALUE", lineno=5),
],
)
def test_from_code_load_fast(self):
code = get_code(
"""
def func():
x = 33
y = x
""",
function=True,
)
code = Bytecode.from_code(code)
self.assertEqual(
code,
[
Instr("LOAD_CONST", 33, lineno=2),
Instr("STORE_FAST", "x", lineno=2),
Instr("LOAD_FAST", "x", lineno=3),
Instr("STORE_FAST", "y", lineno=3),
Instr("LOAD_CONST", None, lineno=3),
Instr("RETURN_VALUE", lineno=3),
],
)
def test_setlineno(self):
# x = 7
# y = 8
# z = 9
code = Bytecode()
code.first_lineno = 3
code.extend(
[
Instr("LOAD_CONST", 7),
Instr("STORE_NAME", "x"),
SetLineno(4),
Instr("LOAD_CONST", 8),
Instr("STORE_NAME", "y"),
SetLineno(5),
Instr("LOAD_CONST", 9),
Instr("STORE_NAME", "z"),
]
)
concrete = code.to_concrete_bytecode()
self.assertEqual(concrete.consts, [7, 8, 9])
self.assertEqual(concrete.names, ["x", "y", "z"])
self.assertListEqual(
list(concrete),
[
ConcreteInstr("LOAD_CONST", 0, lineno=3),
ConcreteInstr("STORE_NAME", 0, lineno=3),
ConcreteInstr("LOAD_CONST", 1, lineno=4),
ConcreteInstr("STORE_NAME", 1, lineno=4),
ConcreteInstr("LOAD_CONST", 2, lineno=5),
ConcreteInstr("STORE_NAME", 2, lineno=5),
],
)
def test_to_code(self):
code = Bytecode()
code.first_lineno = 50
code.extend(
[
Instr("LOAD_NAME", "print"),
Instr("LOAD_CONST", "%s"),
Instr("LOAD_GLOBAL", "a"),
Instr("BINARY_MODULO"),
Instr("CALL_FUNCTION", 1),
Instr("RETURN_VALUE"),
]
)
co = code.to_code()
# hopefully this is obvious from inspection? :-)
self.assertEqual(co.co_stacksize, 3)
co = code.to_code(stacksize=42)
self.assertEqual(co.co_stacksize, 42)
def test_negative_size_unary(self):
opnames = (
"UNARY_POSITIVE",
"UNARY_NEGATIVE",
"UNARY_NOT",
"UNARY_INVERT",
)
for opname in opnames:
with self.subTest():
code = Bytecode()
code.first_lineno = 1
code.extend([Instr(opname)])
with self.assertRaises(RuntimeError):
code.compute_stacksize()
def test_negative_size_unary_with_disable_check_of_pre_and_post(self):
opnames = (
"UNARY_POSITIVE",
"UNARY_NEGATIVE",
"UNARY_NOT",
"UNARY_INVERT",
)
for opname in opnames:
with self.subTest():
code = Bytecode()
code.first_lineno = 1
code.extend([Instr(opname)])
co = code.to_code(check_pre_and_post=False)
self.assertEqual(co.co_stacksize, 0)
def test_negative_size_binary(self):
opnames = (
"BINARY_POWER",
"BINARY_MULTIPLY",
"BINARY_MATRIX_MULTIPLY",
"BINARY_FLOOR_DIVIDE",
"BINARY_TRUE_DIVIDE",
"BINARY_MODULO",
"BINARY_ADD",
"BINARY_SUBTRACT",
"BINARY_SUBSCR",
"BINARY_LSHIFT",
"BINARY_RSHIFT",
"BINARY_AND",
"BINARY_XOR",
"BINARY_OR",
)
for opname in opnames:
with self.subTest():
code = Bytecode()
code.first_lineno = 1
code.extend([Instr("LOAD_CONST", 1), Instr(opname)])
with self.assertRaises(RuntimeError):
code.compute_stacksize()
def test_negative_size_binary_with_disable_check_of_pre_and_post(self):
opnames = (
"BINARY_POWER",
"BINARY_MULTIPLY",
"BINARY_MATRIX_MULTIPLY",
"BINARY_FLOOR_DIVIDE",
"BINARY_TRUE_DIVIDE",
"BINARY_MODULO",
"BINARY_ADD",
"BINARY_SUBTRACT",
"BINARY_SUBSCR",
"BINARY_LSHIFT",
"BINARY_RSHIFT",
"BINARY_AND",
"BINARY_XOR",
"BINARY_OR",
)
for opname in opnames:
with self.subTest():
code = Bytecode()
code.first_lineno = 1
code.extend([Instr("LOAD_CONST", 1), Instr(opname)])
co = code.to_code(check_pre_and_post=False)
self.assertEqual(co.co_stacksize, 1)
def test_negative_size_call(self):
code = Bytecode()
code.first_lineno = 1
code.extend([Instr("CALL_FUNCTION", 0)])
with self.assertRaises(RuntimeError):
code.compute_stacksize()
def test_negative_size_unpack(self):
opnames = (
"UNPACK_SEQUENCE",
"UNPACK_EX",
)
for opname in opnames:
with self.subTest():
code = Bytecode()
code.first_lineno = 1
code.extend([Instr(opname, 1)])
with self.assertRaises(RuntimeError):
code.compute_stacksize()
def test_negative_size_build(self):
opnames = (
"BUILD_TUPLE",
"BUILD_LIST",
"BUILD_SET",
)
if sys.version_info >= (3, 6):
opnames = (*opnames, "BUILD_STRING")
for opname in opnames:
with self.subTest():
code = Bytecode()
code.first_lineno = 1
code.extend([Instr(opname, 1)])
with self.assertRaises(RuntimeError):
code.compute_stacksize()
def test_negative_size_build_map(self):
code = Bytecode()
code.first_lineno = 1
code.extend([Instr("LOAD_CONST", 1), Instr("BUILD_MAP", 1)])
with self.assertRaises(RuntimeError):
code.compute_stacksize()
def test_negative_size_build_map_with_disable_check_of_pre_and_post(self):
code = Bytecode()
code.first_lineno = 1
code.extend([Instr("LOAD_CONST", 1), Instr("BUILD_MAP", 1)])
co = code.to_code(check_pre_and_post=False)
self.assertEqual(co.co_stacksize, 1)
@unittest.skipIf(sys.version_info < (3, 6), "Inexistent opcode")
def test_negative_size_build_const_map(self):
code = Bytecode()
code.first_lineno = 1
code.extend([Instr("LOAD_CONST", ("a",)), Instr("BUILD_CONST_KEY_MAP", 1)])
with self.assertRaises(RuntimeError):
code.compute_stacksize()
@unittest.skipIf(sys.version_info < (3, 6), "Inexistent opcode")
def test_negative_size_build_const_map_with_disable_check_of_pre_and_post(self):
code = Bytecode()
code.first_lineno = 1
code.extend([Instr("LOAD_CONST", ("a",)), Instr("BUILD_CONST_KEY_MAP", 1)])
co = code.to_code(check_pre_and_post=False)
self.assertEqual(co.co_stacksize, 1)
def test_empty_dup(self):
code = Bytecode()
code.first_lineno = 1
code.extend([Instr("DUP_TOP")])
with self.assertRaises(RuntimeError):
code.compute_stacksize()
def test_not_enough_dup(self):
code = Bytecode()
code.first_lineno = 1
code.extend([Instr("LOAD_CONST", 1), Instr("DUP_TOP_TWO")])
with self.assertRaises(RuntimeError):
code.compute_stacksize()
def test_not_enough_rot(self):
opnames = ["ROT_TWO", "ROT_THREE"]
if sys.version_info >= (3, 8):
opnames.append("ROT_FOUR")
for opname in opnames:
with self.subTest():
code = Bytecode()
code.first_lineno = 1
code.extend([Instr("LOAD_CONST", 1), Instr(opname)])
with self.assertRaises(RuntimeError):
code.compute_stacksize()
def test_not_enough_rot_with_disable_check_of_pre_and_post(self):
opnames = ["ROT_TWO", "ROT_THREE"]
if sys.version_info >= (3, 8):
opnames.append("ROT_FOUR")
for opname in opnames:
with self.subTest():
code = Bytecode()
code.first_lineno = 1
code.extend([Instr("LOAD_CONST", 1), Instr(opname)])
co = code.to_code(check_pre_and_post=False)
self.assertEqual(co.co_stacksize, 1)
def test_for_iter_stack_effect_computation(self):
with self.subTest():
code = Bytecode()
code.first_lineno = 1
lab1 = Label()
lab2 = Label()
code.extend(
[
lab1,
Instr("FOR_ITER", lab2),
Instr("STORE_FAST", "i"),
Instr("JUMP_ABSOLUTE", lab1),
lab2,
]
)
with self.assertRaises(RuntimeError):
# Use compute_stacksize since the code is so broken that conversion
# to from concrete is actually broken
code.compute_stacksize(check_pre_and_post=False)
if __name__ == "__main__":
unittest.main() # pragma: no cover
|
aa4761a9d9a7bd88f13a7e3a3f11b418aebf66b5
|
0f65fe111dc1a0b91807572338940920b9c0d31d
|
/python-examples/rbf-interpolation-polynomial.py
|
6170a39a467607faa37e108274c7da452c0b2662
|
[
"MIT"
] |
permissive
|
yuki-koyama/mathtoolbox
|
e46fb8fb3331368e4cb551884040777520bb0b64
|
edc26c9680750e022fd41cdee5ae942784a5aff4
|
refs/heads/master
| 2022-05-19T19:07:04.876938
| 2022-04-10T06:16:13
| 2022-04-10T06:16:13
| 129,524,709
| 241
| 28
|
MIT
| 2022-04-10T06:16:14
| 2018-04-14T14:30:58
|
C++
|
UTF-8
|
Python
| false
| false
| 2,523
|
py
|
rbf-interpolation-polynomial.py
|
import pymathtoolbox
import numpy as np
import random
import math
import matplotlib.pyplot as plt
import seaborn as sns
# Define constants
FIG_SIZE = (8, 4)
Y_RANGE = (-0.5, 3.0)
IMAGE_FORMAT = "png"
DPI = 200
# Generate data points
large_x = np.array([[0.1, 0.4, 0.7, 0.9]])
small_y = np.array([1.0, 1.6, 0.8, 1.5])
# Set up the plot design
sns.set()
sns.set_context()
plt.rcParams['font.sans-serif'] = ["Linux Biolinum O", "Linux Biolinum"]
# Prepare a figure object
fig = plt.figure(figsize=FIG_SIZE, dpi=DPI)
# Define interpolation settings
conditions = [
{
"use_polynomial_term": False,
},
{
"use_polynomial_term": True,
},
]
regularization_weight = 1e-08
for index, condition in enumerate(conditions):
use_polynomial_term = condition["use_polynomial_term"]
rbf_kernels = [
(pymathtoolbox.GaussianRbfKernel(10.0), "Gaussian"),
(pymathtoolbox.ThinPlateSplineRbfKernel(), "thin plate spline"),
]
# Begin to draw the plot
ax = fig.add_subplot(1, 2, index + 1)
ax.set_ylim(Y_RANGE)
for i, rbf_kernel in enumerate(rbf_kernels):
# Instantiate the interpolator
interpolator = pymathtoolbox.RbfInterpolator(rbf_kernel[0],
use_polynomial_term)
# Prepare interpolator
interpolator.set_data(large_x, small_y)
interpolator.calc_weights(True, regularization_weight)
# Calculate sequences of interpolated values
x_samples = np.arange(-0.8, 1.8, 0.001)
vec_func = np.vectorize(
lambda x: interpolator.calc_value(np.array([x])))
values = vec_func(x_samples)
# Plot the interpolated values
ax.plot(x_samples,
values,
label="Interpolation ({})".format(rbf_kernel[1]),
color=sns.color_palette()[1 + i])
# Plot the observed sampling points
ax.plot(np.transpose(large_x),
small_y,
marker="o",
linewidth=0.0,
markersize=4.0,
label="Observed data",
color=sns.color_palette()[0])
# Show legends
ax.legend(loc="upper left")
# Set title
title = "RBF Interpolation " + ("with" if use_polynomial_term else
"without") + " Polynomial Term"
ax.set_title(title)
# Tighten the layout
fig.tight_layout()
# Export the figure as an image file
output_path = "./rbf-interpolation-out." + IMAGE_FORMAT
fig.savefig(output_path)
|
9eaac4b0f8406949dddc6daf47831a1eb3e8dd0d
|
67ce6a1d1369463b15023cc5bd1be9e823bab398
|
/lib/pymedphys/tests/experimental/paulking/test_coll/test_equivalent_fs.py
|
5dd0f2df64a386597a3c76afa4ebcf4e21255b88
|
[
"Apache-2.0"
] |
permissive
|
pymedphys/pymedphys
|
2487efe7259cc4e226e93d32fe86cef01673016e
|
f6acdf9bd2e8a32e372966879284fbd71c612358
|
refs/heads/main
| 2023-08-05T06:27:48.110296
| 2023-06-07T18:22:09
| 2023-06-07T18:22:09
| 168,238,552
| 288
| 79
|
Apache-2.0
| 2023-05-30T03:23:50
| 2019-01-29T22:20:04
|
Python
|
UTF-8
|
Python
| false
| false
| 2,468
|
py
|
test_equivalent_fs.py
|
# Copyright (C) 2018 Paul King
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pymedphys._imports import numpy as np
from pymedphys._utilities.constants import A_LEAF_TYPE, AGILITY
from pymedphys._experimental.paulking.collequivalent import (
mlc_equivalent_square_fs as equivalent_square,
)
# import pytest
def test_equivalent_mlc():
"""Compare effective field size for known pattern against benchmark."""
mlc_segments = [(0.0, 0.0)] * 14
mlc_segments += [
(3.03, 2.47),
(2.88, 2.46),
(3.08, 2.51),
(2.86, 2.46),
(2.88, 2.46),
(2.91, 5.04),
(2.5, 5.04),
(2.55, 4.87),
(2.38, 4.61),
(2.38, 7.04),
(2.61, 7.46),
(2.48, 6.55),
(3.02, 6.52),
(3.9, 7.2),
(4.5, 7.5),
(4.5, 7.5),
(4.5, 7.5),
(4.5, 7.5),
(4.45, 7.5),
(4.0, 7.5),
(3.5, 7.5),
(3.49, 7.5),
(3.0, 7.5),
(3.0, 7.5),
(3.0, 7.5),
(2.5, 7.5),
(2.5, 7.5),
(2.49, 6.52),
]
mlc_segments += [(0.0, 0.0)] * 18
mlc_segments = np.array(mlc_segments) * 10 # convert to mm
assert abs(equivalent_square(mlc_segments, A_LEAF_TYPE) - 107.25) < 0.05
def an_equivalent_square(square_size):
open_leaves = square_size // 5
# Make sure evenly divides
assert open_leaves == square_size / 5
num_remaining_leaves = 80 - open_leaves
leaves_on_top = num_remaining_leaves // 2
leaves_on_bottom = num_remaining_leaves - leaves_on_top
mlc_segments = (
[(0, 0)] * leaves_on_top
+ [(square_size / 2, square_size / 2)] * open_leaves
+ [(0, 0)] * leaves_on_bottom
)
assert equivalent_square(mlc_segments, AGILITY) == square_size
# @pytest.mark.xfail
def test_equivalent_squares():
sizes_to_test = (10, 20, 50, 100, 200, 400)
for square_size in sizes_to_test:
an_equivalent_square(square_size)
|
7a7a197b76dde65cab0b742c8dd13c8dff0f9965
|
22b8c680d7787cc9fcee678cdeed73dc685a4d0f
|
/sdk/python/gnmi/samples/test_utils.py
|
6ef4354c5f029cbda4f19c00972b9167c033c347
|
[
"Apache-2.0"
] |
permissive
|
CiscoDevNet/ydk-gen
|
cf36433acb8d90b514f8748531a2cb06e66f7f2d
|
27dd7d85134a62aa9e9fa48edc0359d32b6a31ec
|
refs/heads/master
| 2023-05-13T22:52:26.135573
| 2023-02-01T03:27:31
| 2023-02-01T03:27:31
| 53,680,541
| 138
| 98
|
Apache-2.0
| 2023-09-07T21:55:37
| 2016-03-11T16:27:20
|
C++
|
UTF-8
|
Python
| false
| false
| 2,474
|
py
|
test_utils.py
|
# ----------------------------------------------------------------
# Copyright 2016 Cisco Systems
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------
"""
test_utils.py
Utility function for test cases.
"""
import re
import logging
from ydk.entity_utils import get_data_node_from_entity
from ydk.errors import YCoreError
def assert_with_error(pattern, ErrorClass):
def assert_with_pattern(func):
def helper(self, *args, **kwargs):
try:
func(self)
except ErrorClass as error:
res = re.match(pattern, error.message.strip())
self.assertEqual(res is not None, True)
return helper
return assert_with_pattern
def datanode_to_str(dn, indent = ''):
try:
s = dn.get_schema_node().get_statement()
if s.keyword == "leaf" or s.keyword == "leaf-list" or s.keyword == "anyxml":
out = indent + "<" + s.arg + ">" + dn.get_value() + "</" + s.arg + ">\n"
else:
out = indent + "<" + s.arg + ">\n"
child_indent = indent + " "
for child in dn.get_children():
out += datanode_to_str(child, child_indent)
out += indent + "</" + s.arg + ">\n"
return out
except YCoreError as ex:
print(ex.message)
def print_data_node(dn):
try:
print("\n=====> Printing DataNode: '{}'".format(dn.get_path()))
print(datanode_to_str(dn))
except YPYCoreError as ex:
print(ex.message)
def print_entity(entity, root_schema):
dn = get_data_node_from_entity( entity, root_schema);
print_data_node(dn)
def enable_logging(level):
log = logging.getLogger('ydk')
log.setLevel(level)
handler = logging.StreamHandler()
formatter = logging.Formatter(("%(asctime)s - %(name)s - %(levelname)s - %(message)s"))
handler.setFormatter(formatter)
log.addHandler(handler)
|
f1c1f19928ff39d9ee494d9e7e788c5318e3838e
|
ca35ba3894b46c7c851ac12ba4723d5b9714102b
|
/src/pyscaffold/file_system.py
|
c850ac3bb06439838e791773e2ac274bf29a73fe
|
[
"MIT",
"0BSD"
] |
permissive
|
pyscaffold/pyscaffold
|
4ec2fd57938e3e9d159bd9289c6b41c2feecde91
|
14ff8554f25c83845687315c0a251048e76784ba
|
refs/heads/master
| 2023-08-30T21:55:56.370277
| 2023-06-20T15:54:36
| 2023-06-20T15:54:36
| 18,357,535
| 1,479
| 159
|
NOASSERTION
| 2023-06-28T04:37:01
| 2014-04-02T07:01:57
|
Python
|
UTF-8
|
Python
| false
| false
| 9,860
|
py
|
file_system.py
|
"""Internal library that encapsulate file system manipulation.
Examples include: creating/removing files and directories, changing permissions, etc.
Functions in this library usually extend the behaviour of Python's standard lib by
providing proper error handling or adequate logging/control flow in the context of
PyScaffold (an example of adequate control flow logic is dealing with the ``pretend``
flag).
"""
import errno
import os
import shutil
import stat
from contextlib import contextmanager
from functools import partial
from pathlib import Path
from tempfile import mkstemp
from typing import Callable, Optional, Union
from .log import logger
from .shell import IS_WINDOWS
PathLike = Union[str, os.PathLike]
@contextmanager
def tmpfile(**kwargs):
"""Context manager that yields a temporary :obj:`Path`"""
fp, path = mkstemp(**kwargs)
os.close(fp) # we don't need the low level file handler
file = Path(path)
try:
yield file
finally:
file.unlink()
@contextmanager
def chdir(path: PathLike, **kwargs):
"""Contextmanager to change into a directory
Args:
path : path to change current working directory to
Keyword Args:
pretend (bool): skip execution (but log) when pretending.
Default ``False``.
"""
should_pretend = kwargs.get("pretend")
# ^ When pretending, automatically output logs
# (after all, this is the primary purpose of pretending)
curr_dir = os.getcwd()
try:
logger.report("chdir", path)
with logger.indent():
if not should_pretend:
os.chdir(path)
yield
finally:
os.chdir(curr_dir)
def move(*src: PathLike, target: PathLike, **kwargs):
"""Move files or directories to (into) a new location
Args:
*src (PathLike): one or more files/directories to be moved
Keyword Args:
target (PathLike): if target is a directory, ``src`` will be
moved inside it. Otherwise, it will be the new path (note that it
may be overwritten)
pretend (bool): skip execution (but log) when pretending.
Default ``False``.
"""
should_pretend = kwargs.get("pretend")
for path in src:
if not should_pretend:
shutil.move(str(path), str(target))
logger.report("move", path, target=target)
def create_file(path: PathLike, content: str, pretend=False, encoding="utf-8"):
"""Create a file in the given path.
This function reports the operation in the logs.
Args:
path: path in the file system where contents will be written.
content: what will be written.
pretend (bool): false by default. File is not written when pretending,
but operation is logged.
Returns:
Path: given path
"""
path = Path(path)
if not pretend:
path.write_text(content, encoding=encoding)
logger.report("create", path)
return path
def create_directory(path: PathLike, update=False, pretend=False) -> Optional[Path]:
"""Create a directory in the given path.
This function reports the operation in the logs.
Args:
path: path in the file system where contents will be written.
update (bool): false by default. A :obj:`OSError` can be raised
when update is false and the directory already exists.
pretend (bool): false by default. Directory is not created when
pretending, but operation is logged.
"""
path = Path(path)
if path.is_dir() and update:
logger.report("skip", path)
return None
if not pretend:
try:
path.mkdir(parents=True, exist_ok=True)
except OSError:
if not update:
raise
return path # Do not log if not created
logger.report("create", path)
return path
def chmod(path: PathLike, mode: int, pretend=False) -> Path:
"""Change the permissions of file in the given path.
This function reports the operation in the logs.
Args:
path: path in the file system whose permissions will be changed
mode: new permissions, should be a combination of
:obj`stat.S_* <stat.S_IXUSR>` (see :obj:`os.chmod`).
pretend (bool): false by default. File is not changed when pretending,
but operation is logged.
"""
path = Path(path)
mode = stat.S_IMODE(mode)
if not pretend:
path.chmod(mode)
logger.report(f"chmod {mode:03o}", path)
return path
def localize_path(path_string: str) -> str:
"""Localize path for Windows, Unix, i.e. / or \\
Args:
path_string (str): path using /
Returns:
str: path depending on OS
"""
return str(Path(path_string))
#: Windows-specific error code indicating an invalid pathname.
ERROR_INVALID_NAME = 123
def is_pathname_valid(pathname: str) -> bool:
"""Check if a pathname is valid
Code by Cecil Curry from StackOverflow
Args:
pathname (str): string to validate
Returns:
`True` if the passed pathname is a valid pathname for the current OS;
`False` otherwise.
"""
# If this pathname is either not a string or is but is empty, this pathname
# is invalid.
try:
if not isinstance(pathname, str) or not pathname:
return False
# Strip this pathname's Windows-specific drive specifier (e.g., `C:\`)
# if any. Since Windows prohibits path components from containing `:`
# characters, failing to strip this `:`-suffixed prefix would
# erroneously invalidate all valid absolute Windows pathnames.
_, pathname = os.path.splitdrive(pathname)
# Directory guaranteed to exist. If the current OS is Windows, this is
# the drive to which Windows was installed (e.g., the "%HOMEDRIVE%"
# environment variable); else, the typical root directory.
root_dirname = os.environ.get("HOMEDRIVE", "C:") if IS_WINDOWS else os.path.sep
assert os.path.isdir(root_dirname) # ...Murphy and her ironclad Law
# Append a path separator to this directory if needed.
root_dirname = root_dirname.rstrip(os.path.sep) + os.path.sep
# Test whether each path component split from this pathname is valid or
# not, ignoring non-existent and non-readable path components.
for pathname_part in pathname.split(os.path.sep):
try:
os.lstat(root_dirname + pathname_part)
# If an OS-specific exception is raised, its error code
# indicates whether this pathname is valid or not. Unless this
# is the case, this exception implies an ignorable kernel or
# filesystem complaint (e.g., path not found or inaccessible).
#
# Only the following exceptions indicate invalid pathnames:
#
# * Instances of the Windows-specific "WindowsError" class
# defining the "winerror" attribute whose value is
# "ERROR_INVALID_NAME". Under Windows, "winerror" is more
# fine-grained and hence useful than the generic "errno"
# attribute. When a too-long pathname is passed, for example,
# "errno" is "ENOENT" (i.e., no such file or directory) rather
# than "ENAMETOOLONG" (i.e., file name too long).
# * Instances of the cross-platform "OSError" class defining the
# generic "errno" attribute whose value is either:
# * Under most POSIX-compatible OSes, "ENAMETOOLONG".
# * Under some edge-case OSes (e.g., SunOS, *BSD), "ERANGE".
except OSError as exc:
if hasattr(exc, "winerror"):
if exc.winerror == ERROR_INVALID_NAME: # type: ignore
return False
elif exc.errno in {errno.ENAMETOOLONG, errno.ERANGE}:
return False
# If a "TypeError" exception was raised, it almost certainly has the
# error message "embedded NUL character" indicating an invalid pathname.
except TypeError:
return False
# If no exception was raised, all path components and hence this
# pathname itself are valid. (Praise be to the curmudgeonly python.)
else:
return True
# If any other exception was raised, this is an unrelated fatal issue
# (e.g., a bug). Permit this exception to unwind the call stack.
#
# Did we mention this should be shipped with Python already?
def on_ro_error(func, path, exc_info):
"""Error handler for ``shutil.rmtree``.
If the error is due to an access error (read only file)
it attempts to add write permission and then retries.
If the error is for another reason it re-raises the error.
Usage : ``shutil.rmtree(path, onerror=onerror)``
Args:
func (callable): function which raised the exception
path (str): path passed to `func`
exc_info (tuple of str): exception info returned by sys.exc_info()
"""
import stat
from time import sleep
# Sometimes the SO is just asynchronously (??!) slow, but it does remove the file
sleep(0.5)
if not Path(path).exists():
return
if not os.access(path, os.W_OK):
# Is the error an access error ?
os.chmod(path, stat.S_IWUSR)
return func(path)
raise
def rm_rf(path: PathLike, pretend=False):
"""Remove ``path`` by all means like ``rm -rf`` in Linux"""
target = Path(path)
if not target.exists():
return None
if target.is_dir():
remove: Callable = partial(shutil.rmtree, onerror=on_ro_error)
else:
remove = Path.unlink
if not pretend:
remove(target)
logger.report("remove", target)
return path
|
5d4e5a7ee46991ed296c8de2ba9cc322e0ba0a92
|
ef2c1a0ae0f1746e58fcc160844788ab92a8d488
|
/scripts/supergraph/download_datasets/torchvision_ds.py
|
b824381e8823238b9fd2c35e03b00d1c82bf58b6
|
[
"MIT",
"LicenseRef-scancode-free-unknown",
"LGPL-2.1-or-later",
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
microsoft/archai
|
4d04476ef6a434148638ef91df0ef3bf2c948422
|
95d6e19a1523a701b3fbc249dd1a7d1e7ba44aee
|
refs/heads/main
| 2023-09-03T13:23:48.576626
| 2023-07-27T01:30:01
| 2023-07-27T01:30:01
| 245,036,506
| 439
| 97
|
MIT
| 2023-05-09T21:10:10
| 2020-03-05T00:54:29
|
Python
|
UTF-8
|
Python
| false
| false
| 582
|
py
|
torchvision_ds.py
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import dataset_utils
import torchvision
from torchvision import transforms
if __name__ == "__main__":
dataroot = dataset_utils.get_dataroot()
torchvision.datasets.STL10(
root=dataroot,
split="train",
# train=True,
download=True,
transform=transforms.Compose([]),
)
torchvision.datasets.STL10(
root=dataroot,
split="test",
# train=False,
download=True,
transform=transforms.Compose([]),
)
print("done")
|
9b67de63f09f3a3926bcd3d5dd8760a5f5b952aa
|
2551b9f15913b1f28d6323b2b868b85e9f408dad
|
/cities/migrations/0007_add_currency_and_postal_code_fields_to_country_model.py
|
4ea08ecc231d72a3a39c953f3846025fca22fde0
|
[
"MIT"
] |
permissive
|
coderholic/django-cities
|
f4b98a6926a62e342de40a0b0ebbaf523fa13af7
|
3a40381498e4b3aae7557cdd8d7ecae673b945af
|
refs/heads/master
| 2023-08-03T00:05:53.509925
| 2023-02-27T06:32:46
| 2023-02-27T06:32:46
| 1,068,056
| 711
| 259
|
MIT
| 2023-02-27T06:32:47
| 2010-11-10T11:32:39
|
Python
|
UTF-8
|
Python
| false
| false
| 1,108
|
py
|
0007_add_currency_and_postal_code_fields_to_country_model.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-24 18:36
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cities', '0006_typify_alt_names_and_add_is_historic'),
]
operations = [
migrations.AddField(
model_name='country',
name='currency_symbol',
field=models.CharField(blank=True, max_length=31, null=True),
),
migrations.AddField(
model_name='country',
name='postal_code_format',
field=models.CharField(default='', max_length=127),
preserve_default=False,
),
migrations.AddField(
model_name='country',
name='postal_code_regex',
field=models.CharField(default='', max_length=255),
preserve_default=False,
),
migrations.AlterField(
model_name='country',
name='currency_name',
field=models.CharField(blank=True, max_length=50, null=True),
),
]
|
e84fbe47c66a92e2ee58f91a397c003f4f93e2c6
|
8730cd656903205527da131c3308579f1491cd04
|
/vue/decorators/method.py
|
b04519fa09fc7e26d4ce28de8774b3e883363ec6
|
[
"MIT"
] |
permissive
|
stefanhoelzl/vue.py
|
9509cff8a8fbd1b4a835b9f2e362935e76bf39ba
|
581e764d57e2b476e700034bc11000cd07f176df
|
refs/heads/master
| 2023-02-21T18:13:21.969549
| 2023-02-15T14:37:17
| 2023-02-15T14:43:10
| 139,488,690
| 323
| 21
|
MIT
| 2023-02-15T12:02:26
| 2018-07-02T20:05:06
|
Python
|
UTF-8
|
Python
| false
| false
| 650
|
py
|
method.py
|
from .base import pyjs_bridge, VueDecorator
class Method(VueDecorator):
def __init__(self, fn):
if hasattr(fn, "__coroutinefunction__"):
fn = coroutine(fn)
self.__value__ = pyjs_bridge(fn, inject_vue_instance=True)
self.__key__ = f"methods.{fn.__name__}"
def coroutine(_coroutine):
def wrapper(*args, **kwargs):
import asyncio
return asyncio.ensure_future(_coroutine(*args, **kwargs))
wrapper.__name__ = _coroutine.__name__
return wrapper
def method(_method):
if hasattr(_method, "__coroutinefunction__"):
_method = coroutine(_method)
return Method(_method)
|
7f1f89e4d078e5d09a7dcca6fed5e36947a07f92
|
a40a5748b599ca6b080b986343e41b9a9b551705
|
/setup.py
|
712204c383efa11a55d50408e11b37ea509f00a4
|
[
"BSD-2-Clause"
] |
permissive
|
miracle2k/tarsnapper
|
36a1d5131fbfa7b5a83d4dcfdd86cde337ad8141
|
e6cc0331cf1396d38df43629c60733f5efc66381
|
refs/heads/master
| 2023-06-28T04:53:38.609210
| 2021-08-27T09:47:50
| 2021-08-27T09:47:50
| 710,922
| 162
| 35
|
BSD-2-Clause
| 2023-06-18T13:05:33
| 2010-06-09T04:49:56
|
Python
|
UTF-8
|
Python
| false
| false
| 1,399
|
py
|
setup.py
|
#!/usr/bin/env python
# encoding: utf8
"""Adapted from virtualenv's setup.py.
"""
import sys, os
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
kw = {'scripts': ['scripts/tarsnapper']}
else:
kw = {'entry_points':
"""[console_scripts]\ntarsnapper = tarsnapper.script:run\n""",
'zip_safe': False}
import re
here = os.path.dirname(os.path.abspath(__file__))
# Figure out the version
version_re = re.compile(
r'__version__ = (\(.*?\))')
fp = open(os.path.join(here, 'tarsnapper/__init__.py'))
version = None
for line in fp:
match = version_re.search(line)
if match:
exec("version = %s" % match.group(1))
version = ".".join(map(str, version))
break
else:
raise Exception("Cannot find version in __init__.py")
fp.close()
setup(name='tarsnapper',
version=version,
description="Manages tarsnap backups",
classifiers=[
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3'
],
author='Michael Elsdoerfer',
author_email='michael@elsdoerfer.com',
url='http://github.com/miracle2k/tarsnapper',
license='BSD',
packages=['tarsnapper'],
install_requires = ['argparse>=1.1', 'pyyaml>=3.09', 'python-dateutil>=2.4.0', 'pexpect>=3.1'],
**kw
)
|
d881a0a2589068593ac3123c3c84c6b0e3b8725f
|
2b7e3535fdf055643d07499a5eff12c6e9e29e2f
|
/sopel/modules/isup.py
|
7982975fbb616b58f137be0c9cf7199db0377eee
|
[
"EFL-2.0"
] |
permissive
|
sopel-irc/sopel
|
fb9669d82df137a322f38d9a9b38911dd2fdb5c2
|
bc688b4eaee25a1be4fef66477f016bc21ea61d8
|
refs/heads/master
| 2023-09-03T20:10:40.233784
| 2023-08-16T20:55:45
| 2023-08-16T20:55:45
| 3,035,586
| 598
| 422
|
NOASSERTION
| 2023-09-04T06:59:41
| 2011-12-22T17:59:00
|
Python
|
UTF-8
|
Python
| false
| false
| 3,414
|
py
|
isup.py
|
"""
isup.py - Sopel Website Status Check Plugin
Copyright 2011, Elsie Powell http://embolalia.com
Licensed under the Eiffel Forum License 2.
https://sopel.chat
"""
from __future__ import annotations
import requests
from sopel import plugin
PLUGIN_OUTPUT_PREFIX = '[isup] '
def get_site_url(site):
"""Get a ``site`` URL
:param str site: the site to get URL for
:return: a valid site URL
:raise ValueError: when site is empty, or isn't well formatted
The ``site`` argument is checked: its scheme must be ``http`` or ``https``,
or a :exc:`ValueError` is raised.
If the ``site`` does not have a scheme, ``http`` is used. If it doesn't
have a TLD, a :exc:`ValueError` is raised.
"""
site = site.strip() if site else ''
if not site:
raise ValueError('What site do you want to check?')
if not site.startswith(('http://', 'https://')):
if '://' in site:
protocol = site.split('://')[0] + '://'
raise ValueError('Try it again without the %s' % protocol)
site = 'http://' + site
domain = site.split('/')[2].split(':')[0]
if '.' not in domain:
raise ValueError('I need a fully qualified domain name (with a dot).')
if domain.endswith(('.local', '.example', '.test', '.invalid', '.localhost')):
raise ValueError("I can't check LAN-local or invalid domains.")
return site
def handle_isup(bot, trigger, secure=True):
"""Handle the ``bot`` command from ``trigger``
:param bot: Sopel instance
:type bot: :class:`sopel.bot.SopelWrapper`
:param trigger: Command's trigger instance
:type trigger: :class:`sopel.trigger.Trigger`
:param bool secure: Check SSL error if ``True`` (the default)
"""
try:
site = get_site_url(trigger.group(2))
except ValueError as error:
bot.reply(str(error))
return
try:
response = requests.head(site, verify=secure, timeout=(10.0, 5.0))
response.raise_for_status()
except requests.exceptions.SSLError:
bot.say(
'{} looks down to me (SSL error). Try using `{}isupinsecure`.'
.format(site, bot.config.core.help_prefix))
except requests.HTTPError:
bot.say(
'{} looks down to me (HTTP {} "{}").'
.format(site, response.status_code, response.reason))
except requests.ConnectTimeout:
bot.say(
'{} looks down to me (timed out while connecting).'
.format(site))
except requests.ReadTimeout:
bot.say(
'{} looks down to me (timed out waiting for reply).'
.format(site))
except requests.ConnectionError:
bot.say(
'{} looks down to me (connection error).'
.format(site))
except ValueError:
bot.reply('"{}" is not a valid URL.'.format(site))
else:
# If no exception happened, the request must have succeeded.
bot.say(site + ' looks fine to me.')
@plugin.command('isupinsecure')
@plugin.output_prefix(PLUGIN_OUTPUT_PREFIX)
def isup_insecure(bot, trigger):
"""Check if a website is up (without verifying HTTPS)."""
handle_isup(bot, trigger, secure=False)
@plugin.command('isup')
@plugin.example('.isup google.com')
@plugin.output_prefix(PLUGIN_OUTPUT_PREFIX)
def isup(bot, trigger):
"""Check if a website is up or not."""
handle_isup(bot, trigger, secure=True)
|
d4a9fddda0017dfdcc2e67050596f75612b800de
|
e4c5238c86c8a114d49b7ba3ecc5ef9d5157e152
|
/toqito/channel_ops/partial_channel.py
|
d3235a7f30236a371e86afc6b415426eedf76017
|
[
"MIT"
] |
permissive
|
vprusso/toqito
|
64a9963c02b73127836b76d886543a0642b93664
|
7e6869d783f98cb241579ea89e0f9ff61eff9d9b
|
refs/heads/master
| 2023-07-22T17:08:18.392204
| 2023-07-19T07:27:37
| 2023-07-19T07:27:37
| 235,493,396
| 116
| 53
|
MIT
| 2023-09-12T13:35:38
| 2020-01-22T03:47:16
|
Python
|
UTF-8
|
Python
| false
| false
| 6,753
|
py
|
partial_channel.py
|
"""Apply channel a subsystem of an operator."""
from __future__ import annotations
import numpy as np
import itertools
from toqito.channel_ops import apply_channel
from toqito.states import max_entangled
from toqito.perms import permute_systems
def partial_channel(
rho: np.ndarray,
phi_map: np.ndarray | list[list[np.ndarray]],
sys: int = 2,
dim: list[int] | np.ndarray = None,
) -> np.ndarray:
r"""Apply channel to a subsystem of an operator [WatPMap18]_.
Applies the operator
.. math::
\left(\mathbb{I} \otimes \Phi \right) \left(\rho \right).
In other words, it is the result of applying the channel :math:`\Phi` to the second subsystem
of :math:`\rho`, which is assumed to act on two subsystems of equal dimension.
The input :code:`phi_map` should be provided as a Choi matrix.
This function is adapted from the QETLAB package.
Examples
==========
The following applies the completely depolarizing channel to the second
subsystem of a random density matrix.
>>> from toqito.channel_ops import partial_channel
>>> from toqito.channels import depolarizing
>>> rho = np.array([[0.3101, -0.0220-0.0219*1j, -0.0671-0.0030*1j, -0.0170-0.0694*1j],
>>> [-0.0220+0.0219*1j, 0.1008, -0.0775+0.0492*1j, -0.0613+0.0529*1j],
>>> [-0.0671+0.0030*1j, -0.0775-0.0492*1j, 0.1361, 0.0602 + 0.0062*1j],
>>> [-0.0170+0.0694*1j, -0.0613-0.0529*1j, 0.0602-0.0062*1j, 0.4530]])
>>> phi_x = partial_channel(rho, depolarizing(2))
[[ 0.20545+0.j 0. +0.j -0.0642 +0.02495j 0. +0.j ]
[ 0. +0.j 0.20545+0.j 0. +0.j -0.0642 +0.02495j]
[-0.0642 -0.02495j 0. +0.j 0.29455+0.j 0. +0.j ]
[ 0. +0.j -0.0642 -0.02495j 0. +0.j 0.29455+0.j ]]
The following applies the completely depolarizing channel to the first
subsystem.
>>> from toqito.channel_ops import partial_channel
>>> from toqito.channels import depolarizing
>>> rho = np.array([[0.3101, -0.0220-0.0219*1j, -0.0671-0.0030*1j, -0.0170-0.0694*1j],
>>> [-0.0220+0.0219*1j, 0.1008, -0.0775+0.0492*1j, -0.0613+0.0529*1j],
>>> [-0.0671+0.0030*1j, -0.0775-0.0492*1j, 0.1361, 0.0602 + 0.0062*1j],
>>> [-0.0170+0.0694*1j, -0.0613-0.0529*1j, 0.0602-0.0062*1j, 0.4530]])
>>> phi_x = partial_channel(rho, depolarizing(2), 1)
[[0.2231+0.j 0.0191-0.00785j 0. +0.j 0. +0.j ]
[0.0191+0.00785j 0.2769+0.j 0. +0.j 0. +0.j ]
[0. +0.j 0. +0.j 0.2231+0.j 0.0191-0.00785j]
[0. +0.j 0. +0.j 0.0191+0.00785j 0.2769+0.j ]]
References
==========
.. [WatPMap18] Watrous, John.
The theory of quantum information.
Cambridge University Press, 2018.
:raises ValueError: If Phi map is not provided as a Choi matrix or Kraus
operators.
:param rho: A matrix.
:param phi_map: The map to partially apply.
:param sys: Scalar or vector specifying the size of the subsystems.
:param dim: Dimension of the subsystems. If :code:`None`, all dimensions
are assumed to be equal.
:return: The partial map :code:`phi_map` applied to matrix :code:`rho`.
"""
if dim is None:
dim = np.round(np.sqrt(list(rho.shape))).conj().T * np.ones(2)
if isinstance(dim, list):
dim = np.array(dim)
# Force dim to be a row vector.
if dim.ndim == 1:
dim = dim.T.flatten()
dim = np.array([dim, dim])
prod_dim_r1 = int(np.prod(dim[0, : sys - 1]))
prod_dim_c1 = int(np.prod(dim[1, : sys - 1]))
prod_dim_r2 = int(np.prod(dim[0, sys:]))
prod_dim_c2 = int(np.prod(dim[1, sys:]))
if isinstance(phi_map, list):
# Compute the Kraus operators on the full system.
s_phi_1, s_phi_2 = len(phi_map), len(phi_map[0])
phi_list = []
# Map is completely positive if input is given as:
# 1. [K1, K2, .. Kr]
# 2. [[K1], [K2], .. [Kr]]
# 3. [[K1, K2, .. Kr]] and r > 2
if isinstance(phi_map[0], np.ndarray):
phi_list = phi_map
elif s_phi_2 == 1 or s_phi_1 == 1 and s_phi_2 > 2:
phi_list = list(itertools.chain(*phi_map))
if phi_list:
phi = []
for m in phi_list:
phi.append(
np.kron(
np.kron(np.identity(prod_dim_r1), m),
np.identity(prod_dim_r2),
)
)
phi_x = apply_channel(rho, phi)
else:
phi_1 = []
for m in phi_map:
phi_1.append(
np.kron(
np.kron(np.identity(prod_dim_r1), m[0]),
np.identity(prod_dim_r2),
)
)
phi_2 = []
for m in phi_map:
phi_2.append(
np.kron(
np.kron(np.identity(prod_dim_c1), m[1]),
np.identity(prod_dim_c2),
)
)
phi_x = [list(l) for l in zip(phi_1, phi_2)]
phi_x = apply_channel(rho, phi_x)
return phi_x
# The `phi_map` variable is provided as a Choi matrix.
if isinstance(phi_map, np.ndarray):
dim_phi = phi_map.shape
dim = np.array(
[
[
prod_dim_r1,
prod_dim_r1,
int(dim[0, sys - 1]),
int(dim_phi[0] / dim[0, sys - 1]),
prod_dim_r2,
prod_dim_r2,
],
[
prod_dim_c1,
prod_dim_c1,
int(dim[1, sys - 1]),
int(dim_phi[1] / dim[1, sys - 1]),
prod_dim_c2,
prod_dim_c2,
],
]
)
psi_r1 = max_entangled(prod_dim_r1, False, False)
psi_c1 = max_entangled(prod_dim_c1, False, False)
psi_r2 = max_entangled(prod_dim_r2, False, False)
psi_c2 = max_entangled(prod_dim_c2, False, False)
phi_map = permute_systems(
np.kron(np.kron(psi_r1 * psi_c1.conj().T, phi_map), psi_r2 * psi_c2.conj().T),
[1, 3, 5, 2, 4, 6],
dim,
)
phi_x = apply_channel(rho, phi_map)
return phi_x
raise ValueError(
"The `phi_map` variable is assumed to be provided as "
"either a Choi matrix or a list of Kraus operators."
)
|
ed5a7f09d264beccf7284a78562a6d766bd5e3b2
|
d2e75f67fbb9815a63b82082fafc173d8ae78ea3
|
/tools/kitti_object_eval_python/evaluate.py
|
e56bfe5b81f5d502dac588831e94b2cf63d28cd8
|
[
"MIT"
] |
permissive
|
happinesslz/EPNet
|
adb931478de38c4e9eed837fc34922e55b5953c9
|
0123c341243846aa3b412addcb9e2c07fd305237
|
refs/heads/master
| 2022-12-18T00:15:23.719957
| 2020-08-25T09:49:46
| 2020-08-25T09:49:46
| 276,774,529
| 231
| 43
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 974
|
py
|
evaluate.py
|
import time
import fire
import tools.kitti_object_eval_python.kitti_common as kitti
from tools.kitti_object_eval_python.eval import get_official_eval_result, get_coco_eval_result
def _read_imageset_file(path):
with open(path, 'r') as f:
lines = f.readlines()
return [int(line) for line in lines]
def evaluate(label_path,
result_path,
label_split_file,
current_class = 0,
coco = False,
score_thresh = -1):
dt_annos = kitti.get_label_annos(result_path)
if score_thresh > 0:
dt_annos = kitti.filter_annos_low_score(dt_annos, score_thresh)
val_image_ids = _read_imageset_file(label_split_file)
gt_annos = kitti.get_label_annos(label_path, val_image_ids)
if coco:
return get_coco_eval_result(gt_annos, dt_annos, current_class)
else:
return get_official_eval_result(gt_annos, dt_annos, current_class)
if __name__ == '__main__':
fire.Fire()
|
714a4ac20fc78c5a8f9f57546f1028d9140e44f4
|
3aab11d445011f4a0de1376886dd3899aba44e68
|
/opps/channels/templatetags/channels_tags.py
|
bb3432ee1b92f0b126dfa8355b71fd57e0b9ae81
|
[
"MIT"
] |
permissive
|
opps/opps
|
4ba6a08ac5aa31be48c245b2e8f9d9a714a5e473
|
5552924fa34ea40d24febeac5046bd59f62e0e4f
|
refs/heads/master
| 2023-08-24T21:09:23.489540
| 2023-05-22T20:07:33
| 2023-05-22T20:07:33
| 7,712,379
| 166
| 76
|
MIT
| 2022-01-06T22:53:23
| 2013-01-20T03:56:15
|
Python
|
UTF-8
|
Python
| false
| false
| 1,075
|
py
|
channels_tags.py
|
# -*- coding: utf-8 -*-
from django import template
from django.conf import settings
from django.core.cache import cache
from opps.channels.models import Channel
register = template.Library()
@register.assignment_tag
def get_channel(slug):
"""
Usage:
{% get_channel "videos" %}
"""
try:
return Channel.objects.get(site=settings.SITE_ID, slug=slug,
published=True)
except Channel.DoesNotExist:
return Channel.objects.none()
@register.assignment_tag
def get_channels_by(**filters):
"""Return a list of channels filtered by given args"""
filters['site'] = settings.SITE_ID
filters['published'] = True
cache_key = u'getchannelsby-{0}'.format(hash(frozenset(filters.items())))
cache_timeout = getattr(settings, 'OPPS_CACHE_EXPIRE', 0)
if cache_timeout and cache.get(cache_key):
return cache.get(cache_key)
channels = Channel.objects.filter(**filters)
if cache_timeout:
cache.set(cache_key, channels, cache_timeout)
return channels
|
4ff82f402b84ac723bb9526910aca9c0b2e5a81e
|
48cd6a93fe538693fec65aaa81306e6b69b642ad
|
/dask/dataframe/tests/test_ufunc.py
|
e23bfe402527dbf3a4a92df87ae485acdbf053e1
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
dask/dask
|
0138cc2fb9aad27287643fe5ee240b8b09f2300d
|
18098d35298bad21c878c339d73de784612566c7
|
refs/heads/main
| 2023-09-04T02:39:37.886054
| 2023-09-01T19:02:00
| 2023-09-01T19:02:00
| 28,782,747
| 11,423
| 2,116
|
BSD-3-Clause
| 2023-09-14T17:36:04
| 2015-01-04T18:50:00
|
Python
|
UTF-8
|
Python
| false
| false
| 16,595
|
py
|
test_ufunc.py
|
from __future__ import annotations
import warnings
import pytest
pd = pytest.importorskip("pandas")
import numpy as np
import dask.array as da
import dask.dataframe as dd
from dask.dataframe.utils import assert_eq
_BASE_UFUNCS = [
"conj",
"exp",
"log",
"log2",
"log10",
"log1p",
"expm1",
"sqrt",
"square",
"sin",
"cos",
"tan",
"arcsin",
"arccos",
"arctan",
"sinh",
"cosh",
"tanh",
"arcsinh",
"arccosh",
"arctanh",
"deg2rad",
"rad2deg",
"isfinite",
"isinf",
"isnan",
"signbit",
"degrees",
"radians",
"rint",
"fabs",
"sign",
"absolute",
"floor",
"ceil",
"trunc",
"logical_not",
"cbrt",
"exp2",
"negative",
"reciprocal",
"spacing",
]
@pytest.mark.parametrize(
"pandas_input",
[
pd.Series(np.random.randint(1, 100, size=20)),
pd.Series(np.abs(np.random.randn(100))),
pd.DataFrame(
{
"A": np.random.randint(1, 100, size=20),
"B": np.random.randint(1, 100, size=20),
"C": np.abs(np.random.randn(20)),
}
),
pd.Series(
np.random.randint(1, 100, size=20), index=list("abcdefghijklmnopqrst")
),
pd.Series(np.abs(np.random.randn(20)), index=list("abcdefghijklmnopqrst")),
pd.DataFrame(
{
"A": np.random.randint(1, 100, size=20),
"B": np.random.randint(1, 100, size=20),
"C": np.abs(np.random.randn(20)),
},
index=list("abcdefghijklmnopqrst"),
),
],
)
@pytest.mark.parametrize("ufunc", _BASE_UFUNCS)
def test_ufunc(pandas_input, ufunc):
dafunc = getattr(da, ufunc)
npfunc = getattr(np, ufunc)
dask_input = dd.from_pandas(pandas_input, 3)
pandas_type = pandas_input.__class__
dask_type = dask_input.__class__
# applying Dask ufunc doesn't trigger computation
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
# Some cause warnings (arcsine)
assert isinstance(dafunc(dask_input), dask_type)
assert_eq(dafunc(dask_input), npfunc(pandas_input))
# applying NumPy ufunc is lazy
if isinstance(npfunc, np.ufunc):
assert isinstance(npfunc(dask_input), dask_type)
else:
assert isinstance(npfunc(dask_input), pandas_type)
assert_eq(npfunc(dask_input), npfunc(pandas_input))
# applying Dask ufunc to normal Series triggers computation
assert isinstance(dafunc(pandas_input), pandas_type)
assert_eq(dafunc(dask_input), npfunc(pandas_input))
# Index
if pandas_input.index.dtype in [object, str]:
return
if ufunc in ("logical_not", "signbit", "isnan", "isinf", "isfinite"):
return
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
assert isinstance(dafunc(dask_input.index), dd.Index)
assert_eq(
dafunc(dask_input.index),
npfunc(pandas_input.index),
check_divisions=ufunc != "spacing",
)
# applying NumPy ufunc is lazy
if isinstance(npfunc, np.ufunc):
assert isinstance(npfunc(dask_input.index), dd.Index)
else:
assert isinstance(npfunc(dask_input.index), pd.Index)
assert_eq(
npfunc(dask_input.index),
npfunc(dask_input.index),
check_divisions=ufunc != "spacing",
)
# applying Dask ufunc to normal Series triggers computation
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
# some (da.log) cause warnings
assert isinstance(dafunc(pandas_input.index), pd.Index)
assert_eq(dafunc(pandas_input), npfunc(pandas_input))
@pytest.mark.parametrize(
"ufunc",
[
"isreal",
"iscomplex",
"real",
"imag",
"angle",
"fix",
"i0",
"sinc",
"nan_to_num",
],
)
def test_ufunc_wrapped(ufunc):
"""
some np.ufuncs doesn't call __array_wrap__
(or __array_ufunc__ starting from numpy v.1.13.0), it should work as below
- da.ufunc(dd.Series) => da.Array
- da.ufunc(pd.Series) => np.ndarray
- np.ufunc(dd.Series) => np.ndarray
- np.ufunc(pd.Series) => np.ndarray
"""
from dask.array.utils import assert_eq as da_assert_eq
if ufunc == "fix":
pytest.skip("fix calls floor in a way that we do not yet support")
dafunc = getattr(da, ufunc)
npfunc = getattr(np, ufunc)
s = pd.Series(
np.random.randint(1, 100, size=20), index=list("abcdefghijklmnopqrst")
)
ds = dd.from_pandas(s, 3)
# applying Dask ufunc doesn't trigger computation
assert isinstance(dafunc(ds), da.Array)
da_assert_eq(dafunc(ds), npfunc(s))
assert isinstance(npfunc(ds), np.ndarray)
np.testing.assert_equal(npfunc(ds), npfunc(s))
assert isinstance(dafunc(s), np.ndarray)
np.testing.assert_array_equal(dafunc(s), npfunc(s))
df = pd.DataFrame(
{
"A": np.random.randint(1, 100, size=20),
"B": np.random.randint(1, 100, size=20),
"C": np.abs(np.random.randn(20)),
},
index=list("abcdefghijklmnopqrst"),
)
ddf = dd.from_pandas(df, 3)
# applying Dask ufunc doesn't trigger computation
assert isinstance(dafunc(ddf), da.Array)
da_assert_eq(dafunc(ddf), npfunc(df))
assert isinstance(npfunc(ddf), np.ndarray)
np.testing.assert_array_equal(npfunc(ddf), npfunc(df))
assert isinstance(dafunc(df), np.ndarray)
np.testing.assert_array_equal(dafunc(df), npfunc(df))
def test_ufunc_wrapped_not_implemented():
s = pd.Series(
np.random.randint(1, 100, size=20), index=list("abcdefghijklmnopqrst")
)
ds = dd.from_pandas(s, 3)
with pytest.raises(NotImplementedError, match="`repeat` is not implemented"):
np.repeat(ds, 10)
df = pd.DataFrame(
{
"A": np.random.randint(1, 100, size=20),
"B": np.random.randint(1, 100, size=20),
"C": np.abs(np.random.randn(20)),
},
index=list("abcdefghijklmnopqrst"),
)
ddf = dd.from_pandas(df, 3)
with pytest.raises(NotImplementedError, match="`repeat` is not implemented"):
np.repeat(ddf, 10)
_UFUNCS_2ARG = [
"logaddexp",
"logaddexp2",
"arctan2",
"hypot",
"copysign",
"nextafter",
pytest.param("ldexp", marks=[pytest.mark.filterwarnings("ignore::RuntimeWarning")]),
pytest.param("fmod", marks=[pytest.mark.filterwarnings("ignore::RuntimeWarning")]),
"logical_and",
"logical_or",
"logical_xor",
"maximum",
"minimum",
"fmax",
"fmin",
"greater",
"greater_equal",
"less",
"less_equal",
"not_equal",
"equal",
"logical_or",
"logical_and",
"logical_xor",
]
@pytest.mark.parametrize("ufunc", _UFUNCS_2ARG)
@pytest.mark.parametrize(
"make_pandas_input",
[
lambda: pd.Series(np.random.randint(1, 100, size=20)),
lambda: pd.DataFrame(
np.random.randint(1, 100, size=(20, 2)), columns=["A", "B"]
),
],
)
def test_ufunc_with_2args(ufunc, make_pandas_input):
dafunc = getattr(da, ufunc)
npfunc = getattr(np, ufunc)
pandas1 = make_pandas_input()
pandas2 = make_pandas_input()
dask1 = dd.from_pandas(pandas1, 3)
dask2 = dd.from_pandas(pandas2, 4)
pandas_type = pandas1.__class__
dask_type = dask1.__class__
# applying Dask ufunc doesn't trigger computation
assert isinstance(dafunc(dask1, dask2), dask_type)
assert_eq(dafunc(dask1, dask2), npfunc(pandas1, pandas2))
# should be fine with pandas as a second arg, too
assert isinstance(dafunc(dask1, pandas2), dask_type)
assert_eq(dafunc(dask1, pandas2), npfunc(pandas1, pandas2))
# applying NumPy ufunc is lazy
if isinstance(npfunc, np.ufunc):
assert isinstance(npfunc(dask1, dask2), dask_type)
assert isinstance(npfunc(dask1, pandas2), dask_type)
else:
assert isinstance(npfunc(dask1, dask2), pandas_type)
assert isinstance(npfunc(dask1, pandas2), pandas_type)
assert_eq(npfunc(dask1, dask2), npfunc(pandas1, pandas2))
assert_eq(npfunc(dask1, pandas2), npfunc(pandas1, pandas2))
# applying Dask ufunc to normal Series triggers computation
assert isinstance(dafunc(pandas1, pandas2), pandas_type)
assert_eq(dafunc(pandas1, pandas2), npfunc(pandas1, pandas2))
@pytest.mark.parametrize(
"pandas,min,max",
[
(pd.Series(np.random.randint(1, 100, size=20)), 5, 50),
(
pd.DataFrame(np.random.randint(1, 100, size=(20, 2)), columns=["A", "B"]),
5.5,
40.5,
),
],
)
def test_clip(pandas, min, max):
dask = dd.from_pandas(pandas, 3)
pandas_type = pandas.__class__
dask_type = dask.__class__
# clip internally calls dd.Series.clip
# applying Dask ufunc doesn't trigger computation
assert isinstance(da.clip(dask, min, max), dask_type)
assert_eq(da.clip(dask, min, max), np.clip(pandas, min, max))
# applying Numpy ufunc doesn't trigger computation
assert isinstance(np.clip(dask, min, max), dask_type)
assert_eq(np.clip(dask, min, max), np.clip(pandas, min, max))
# applying Dask ufunc to normal pandas objects triggers computation
assert isinstance(da.clip(pandas, min, max), pandas_type)
assert_eq(da.clip(pandas, min, max), np.clip(pandas, min, max))
@pytest.mark.parametrize("ufunc", _BASE_UFUNCS)
def test_frame_ufunc_out(ufunc):
npfunc = getattr(np, ufunc)
dafunc = getattr(da, ufunc)
input_matrix = np.random.randint(1, 100, size=(20, 2))
df = pd.DataFrame(input_matrix, columns=["A", "B"])
ddf = dd.from_pandas(df, 3)
df_out = pd.DataFrame(np.random.randint(1, 100, size=(20, 2)), columns=["Y", "Z"])
ddf_out_np = dd.from_pandas(df_out, 3)
ddf_out_da = dd.from_pandas(df_out, 3)
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
npfunc(ddf, out=ddf_out_np)
dafunc(ddf, out=ddf_out_da)
assert_eq(ddf_out_np, ddf_out_da)
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
expected = pd.DataFrame(npfunc(input_matrix), columns=["A", "B"])
assert_eq(ddf_out_np, expected)
def test_frame_2ufunc_out():
input_matrix = np.random.randint(1, 100, size=(20, 2))
df = pd.DataFrame(input_matrix, columns=["A", "B"])
ddf = dd.from_pandas(df, 3)
# column number mismatch
df_out = pd.DataFrame(
np.random.randint(1, 100, size=(20, 3)), columns=["X", "Y", "Z"]
)
ddf_out = dd.from_pandas(df_out, 3)
with pytest.raises(ValueError):
np.sin(ddf, out=ddf_out)
# types mismatch
ddf_out = dd.from_pandas(pd.Series([0]), 1)
with pytest.raises(TypeError):
np.sin(ddf, out=ddf_out)
df_out = pd.DataFrame(np.random.randint(1, 100, size=(20, 2)), columns=["X", "Y"])
ddf_out = dd.from_pandas(df_out, 3)
np.sin(ddf, out=ddf_out)
np.add(ddf_out, 10, out=ddf_out)
expected = pd.DataFrame(np.sin(input_matrix) + 10, columns=["A", "B"])
assert_eq(ddf_out, expected)
@pytest.mark.parametrize(
"arg1",
[
pd.Series(np.abs(np.random.randn(100))),
pd.DataFrame(
{
"A": np.random.randint(1, 100, size=20),
"B": np.random.randint(1, 100, size=20),
"C": np.abs(np.random.randn(20)),
}
),
],
)
@pytest.mark.parametrize("arg2", [2, dd.from_pandas(pd.Series([0]), 1).sum()])
@pytest.mark.parametrize("ufunc", _UFUNCS_2ARG)
def test_mixed_types(ufunc, arg1, arg2):
npfunc = getattr(np, ufunc)
dafunc = getattr(da, ufunc)
dask = dd.from_pandas(arg1, 3)
pandas_type = arg1.__class__
dask_type = dask.__class__
# applying Dask ufunc doesn't trigger computation
assert isinstance(dafunc(dask, arg2), dask_type)
assert_eq(dafunc(dask, arg2), npfunc(dask, arg2))
# applying NumPy ufunc is lazy
assert isinstance(npfunc(dask, arg2), dask_type)
assert_eq(npfunc(dask, arg2), npfunc(arg1, arg2))
# applying Dask ufunc to normal Series triggers computation
assert isinstance(dafunc(arg1, arg2), pandas_type)
assert_eq(dafunc(arg1, arg2), npfunc(arg1, arg2))
# swapping arguments
# first parameter of ldexp should be array-like
if ufunc == "ldexp":
return
# applying Dask ufunc doesn't trigger computation
assert isinstance(dafunc(arg2, dask), dask_type)
assert_eq(dafunc(arg2, dask), npfunc(arg2, dask))
# applying NumPy ufunc is lazy
assert isinstance(npfunc(arg2, dask), dask_type)
assert_eq(npfunc(arg2, dask), npfunc(arg2, dask))
# applying Dask ufunc to normal Series triggers computation
assert isinstance(dafunc(arg2, arg1), pandas_type)
assert_eq(dafunc(arg2, arg1), npfunc(arg2, arg1))
@pytest.mark.parametrize("ufunc", _UFUNCS_2ARG)
@pytest.mark.parametrize(
"pandas,darray",
[
(
pd.Series(np.random.randint(1, 100, size=(100,))),
da.from_array(np.random.randint(1, 100, size=(100,)), chunks=(50,)),
),
(
pd.DataFrame(np.random.randint(1, 100, size=(20, 2)), columns=["A", "B"]),
da.from_array(np.random.randint(1, 100, size=(20, 2)), chunks=(10, 2)),
),
],
)
def test_2args_with_array(ufunc, pandas, darray):
dafunc = getattr(da, ufunc)
npfunc = getattr(np, ufunc)
dask = dd.from_pandas(pandas, 2)
dask_type = dask.__class__
# applying Dask ufunc doesn't trigger computation
assert isinstance(dafunc(dask, darray), dask_type)
assert isinstance(dafunc(darray, dask), dask_type)
np.testing.assert_array_equal(
dafunc(dask, darray).compute().values, npfunc(pandas.values, darray).compute()
)
# applying NumPy ufunc is lazy
assert isinstance(npfunc(dask, darray), dask_type)
assert isinstance(npfunc(darray, dask), dask_type)
np.testing.assert_array_equal(
npfunc(dask, darray).compute().values, npfunc(pandas.values, darray.compute())
)
np.testing.assert_array_equal(
npfunc(darray, dask).compute().values, npfunc(darray.compute(), pandas.values)
)
@pytest.mark.parametrize("redfunc", ["sum", "prod", "min", "max", "mean"])
@pytest.mark.parametrize("ufunc", _BASE_UFUNCS)
@pytest.mark.parametrize(
"pandas",
[
pd.Series(np.abs(np.random.randn(100))),
pd.DataFrame(
{
"A": np.random.randint(1, 100, size=20),
"B": np.random.randint(1, 100, size=20),
"C": np.abs(np.random.randn(20)),
}
),
],
)
def test_ufunc_with_reduction(redfunc, ufunc, pandas):
dask = dd.from_pandas(pandas, 3)
np_redfunc = getattr(np, redfunc)
np_ufunc = getattr(np, ufunc)
if (
redfunc == "prod"
and ufunc in ["conj", "square", "negative", "absolute"]
and isinstance(pandas, pd.DataFrame)
):
# TODO(pandas) follow pandas behaviour?
# starting with pandas 1.2.0, the ufunc is applied column-wise, and therefore
# applied on the integer columns separately, overflowing for those columns
# (instead of being applied on 2D ndarray that was converted to float)
pytest.xfail("'prod' overflowing with integer columns in pandas 1.2.0")
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
warnings.simplefilter("ignore", FutureWarning)
assert isinstance(np_redfunc(dask), (dd.DataFrame, dd.Series, dd.core.Scalar))
assert_eq(np_redfunc(np_ufunc(dask)), np_redfunc(np_ufunc(pandas)))
@pytest.mark.parametrize(
"pandas",
[
pd.Series(np.random.randint(1, 100, size=100)),
pd.DataFrame(
{
"A": np.random.randint(1, 100, size=20),
"B": np.random.randint(1, 100, size=20),
"C": np.abs(np.random.randn(20)),
}
),
],
)
@pytest.mark.parametrize("scalar", [15, 16.4, np.int64(15), np.float64(16.4)])
def test_ufunc_numpy_scalar_comparison(pandas, scalar):
# Regression test for issue #3392
dask_compare = scalar >= dd.from_pandas(pandas, npartitions=3)
pandas_compare = scalar >= pandas
assert_eq(dask_compare, pandas_compare)
|
4e4b256dad2964346ffc5ed4c81e5bad0fd28491
|
0ca218c0f54dac33a2ade4accfdf8f5be3207588
|
/lib/sqlalchemy/dialects/postgresql/ext.py
|
ad1267750bb61d7aa28ea742b55e85e04ef89e92
|
[
"MIT"
] |
permissive
|
sqlalchemy/sqlalchemy
|
9d949c67c9b5396b1f33e7ff0f3230c81babf5be
|
b382bff6e3464f039db0fd1f2ce1b79038675e48
|
refs/heads/main
| 2023-08-31T17:40:59.565421
| 2023-08-30T15:01:41
| 2023-08-30T15:01:41
| 159,271,175
| 8,083
| 1,489
|
MIT
| 2023-09-12T18:53:55
| 2018-11-27T03:35:03
|
Python
|
UTF-8
|
Python
| false
| false
| 16,253
|
py
|
ext.py
|
# postgresql/ext.py
# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
# mypy: ignore-errors
from __future__ import annotations
from typing import Any
from typing import TYPE_CHECKING
from typing import TypeVar
from . import types
from .array import ARRAY
from ...sql import coercions
from ...sql import elements
from ...sql import expression
from ...sql import functions
from ...sql import roles
from ...sql import schema
from ...sql.schema import ColumnCollectionConstraint
from ...sql.sqltypes import TEXT
from ...sql.visitors import InternalTraversal
_T = TypeVar("_T", bound=Any)
if TYPE_CHECKING:
from ...sql.visitors import _TraverseInternalsType
class aggregate_order_by(expression.ColumnElement):
"""Represent a PostgreSQL aggregate order by expression.
E.g.::
from sqlalchemy.dialects.postgresql import aggregate_order_by
expr = func.array_agg(aggregate_order_by(table.c.a, table.c.b.desc()))
stmt = select(expr)
would represent the expression::
SELECT array_agg(a ORDER BY b DESC) FROM table;
Similarly::
expr = func.string_agg(
table.c.a,
aggregate_order_by(literal_column("','"), table.c.a)
)
stmt = select(expr)
Would represent::
SELECT string_agg(a, ',' ORDER BY a) FROM table;
.. versionchanged:: 1.2.13 - the ORDER BY argument may be multiple terms
.. seealso::
:class:`_functions.array_agg`
"""
__visit_name__ = "aggregate_order_by"
stringify_dialect = "postgresql"
_traverse_internals: _TraverseInternalsType = [
("target", InternalTraversal.dp_clauseelement),
("type", InternalTraversal.dp_type),
("order_by", InternalTraversal.dp_clauseelement),
]
def __init__(self, target, *order_by):
self.target = coercions.expect(roles.ExpressionElementRole, target)
self.type = self.target.type
_lob = len(order_by)
if _lob == 0:
raise TypeError("at least one ORDER BY element is required")
elif _lob == 1:
self.order_by = coercions.expect(
roles.ExpressionElementRole, order_by[0]
)
else:
self.order_by = elements.ClauseList(
*order_by, _literal_as_text_role=roles.ExpressionElementRole
)
def self_group(self, against=None):
return self
def get_children(self, **kwargs):
return self.target, self.order_by
def _copy_internals(self, clone=elements._clone, **kw):
self.target = clone(self.target, **kw)
self.order_by = clone(self.order_by, **kw)
@property
def _from_objects(self):
return self.target._from_objects + self.order_by._from_objects
class ExcludeConstraint(ColumnCollectionConstraint):
"""A table-level EXCLUDE constraint.
Defines an EXCLUDE constraint as described in the `PostgreSQL
documentation`__.
__ https://www.postgresql.org/docs/current/static/sql-createtable.html#SQL-CREATETABLE-EXCLUDE
""" # noqa
__visit_name__ = "exclude_constraint"
where = None
inherit_cache = False
create_drop_stringify_dialect = "postgresql"
@elements._document_text_coercion(
"where",
":class:`.ExcludeConstraint`",
":paramref:`.ExcludeConstraint.where`",
)
def __init__(self, *elements, **kw):
r"""
Create an :class:`.ExcludeConstraint` object.
E.g.::
const = ExcludeConstraint(
(Column('period'), '&&'),
(Column('group'), '='),
where=(Column('group') != 'some group'),
ops={'group': 'my_operator_class'}
)
The constraint is normally embedded into the :class:`_schema.Table`
construct
directly, or added later using :meth:`.append_constraint`::
some_table = Table(
'some_table', metadata,
Column('id', Integer, primary_key=True),
Column('period', TSRANGE()),
Column('group', String)
)
some_table.append_constraint(
ExcludeConstraint(
(some_table.c.period, '&&'),
(some_table.c.group, '='),
where=some_table.c.group != 'some group',
name='some_table_excl_const',
ops={'group': 'my_operator_class'}
)
)
The exclude constraint defined in this example requires the
``btree_gist`` extension, that can be created using the
command ``CREATE EXTENSION btree_gist;``.
:param \*elements:
A sequence of two tuples of the form ``(column, operator)`` where
"column" is either a :class:`_schema.Column` object, or a SQL
expression element (e.g. ``func.int8range(table.from, table.to)``)
or the name of a column as string, and "operator" is a string
containing the operator to use (e.g. `"&&"` or `"="`).
In order to specify a column name when a :class:`_schema.Column`
object is not available, while ensuring
that any necessary quoting rules take effect, an ad-hoc
:class:`_schema.Column` or :func:`_expression.column`
object should be used.
The ``column`` may also be a string SQL expression when
passed as :func:`_expression.literal_column` or
:func:`_expression.text`
:param name:
Optional, the in-database name of this constraint.
:param deferrable:
Optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when
issuing DDL for this constraint.
:param initially:
Optional string. If set, emit INITIALLY <value> when issuing DDL
for this constraint.
:param using:
Optional string. If set, emit USING <index_method> when issuing DDL
for this constraint. Defaults to 'gist'.
:param where:
Optional SQL expression construct or literal SQL string.
If set, emit WHERE <predicate> when issuing DDL
for this constraint.
:param ops:
Optional dictionary. Used to define operator classes for the
elements; works the same way as that of the
:ref:`postgresql_ops <postgresql_operator_classes>`
parameter specified to the :class:`_schema.Index` construct.
.. versionadded:: 1.3.21
.. seealso::
:ref:`postgresql_operator_classes` - general description of how
PostgreSQL operator classes are specified.
"""
columns = []
render_exprs = []
self.operators = {}
expressions, operators = zip(*elements)
for (expr, column, strname, add_element), operator in zip(
coercions.expect_col_expression_collection(
roles.DDLConstraintColumnRole, expressions
),
operators,
):
if add_element is not None:
columns.append(add_element)
name = column.name if column is not None else strname
if name is not None:
# backwards compat
self.operators[name] = operator
render_exprs.append((expr, name, operator))
self._render_exprs = render_exprs
ColumnCollectionConstraint.__init__(
self,
*columns,
name=kw.get("name"),
deferrable=kw.get("deferrable"),
initially=kw.get("initially"),
)
self.using = kw.get("using", "gist")
where = kw.get("where")
if where is not None:
self.where = coercions.expect(roles.StatementOptionRole, where)
self.ops = kw.get("ops", {})
def _set_parent(self, table, **kw):
super()._set_parent(table)
self._render_exprs = [
(
expr if not isinstance(expr, str) else table.c[expr],
name,
operator,
)
for expr, name, operator in (self._render_exprs)
]
def _copy(self, target_table=None, **kw):
elements = [
(
schema._copy_expression(expr, self.parent, target_table),
operator,
)
for expr, _, operator in self._render_exprs
]
c = self.__class__(
*elements,
name=self.name,
deferrable=self.deferrable,
initially=self.initially,
where=self.where,
using=self.using,
)
c.dispatch._update(self.dispatch)
return c
def array_agg(*arg, **kw):
"""PostgreSQL-specific form of :class:`_functions.array_agg`, ensures
return type is :class:`_postgresql.ARRAY` and not
the plain :class:`_types.ARRAY`, unless an explicit ``type_``
is passed.
"""
kw["_default_array_type"] = ARRAY
return functions.func.array_agg(*arg, **kw)
class _regconfig_fn(functions.GenericFunction[_T]):
inherit_cache = True
def __init__(self, *args, **kwargs):
args = list(args)
if len(args) > 1:
initial_arg = coercions.expect(
roles.ExpressionElementRole,
args.pop(0),
name=getattr(self, "name", None),
apply_propagate_attrs=self,
type_=types.REGCONFIG,
)
initial_arg = [initial_arg]
else:
initial_arg = []
addtl_args = [
coercions.expect(
roles.ExpressionElementRole,
c,
name=getattr(self, "name", None),
apply_propagate_attrs=self,
)
for c in args
]
super().__init__(*(initial_arg + addtl_args), **kwargs)
class to_tsvector(_regconfig_fn):
"""The PostgreSQL ``to_tsvector`` SQL function.
This function applies automatic casting of the REGCONFIG argument
to use the :class:`_postgresql.REGCONFIG` datatype automatically,
and applies a return type of :class:`_postgresql.TSVECTOR`.
Assuming the PostgreSQL dialect has been imported, either by invoking
``from sqlalchemy.dialects import postgresql``, or by creating a PostgreSQL
engine using ``create_engine("postgresql...")``,
:class:`_postgresql.to_tsvector` will be used automatically when invoking
``sqlalchemy.func.to_tsvector()``, ensuring the correct argument and return
type handlers are used at compile and execution time.
.. versionadded:: 2.0.0rc1
"""
inherit_cache = True
type = types.TSVECTOR
class to_tsquery(_regconfig_fn):
"""The PostgreSQL ``to_tsquery`` SQL function.
This function applies automatic casting of the REGCONFIG argument
to use the :class:`_postgresql.REGCONFIG` datatype automatically,
and applies a return type of :class:`_postgresql.TSQUERY`.
Assuming the PostgreSQL dialect has been imported, either by invoking
``from sqlalchemy.dialects import postgresql``, or by creating a PostgreSQL
engine using ``create_engine("postgresql...")``,
:class:`_postgresql.to_tsquery` will be used automatically when invoking
``sqlalchemy.func.to_tsquery()``, ensuring the correct argument and return
type handlers are used at compile and execution time.
.. versionadded:: 2.0.0rc1
"""
inherit_cache = True
type = types.TSQUERY
class plainto_tsquery(_regconfig_fn):
"""The PostgreSQL ``plainto_tsquery`` SQL function.
This function applies automatic casting of the REGCONFIG argument
to use the :class:`_postgresql.REGCONFIG` datatype automatically,
and applies a return type of :class:`_postgresql.TSQUERY`.
Assuming the PostgreSQL dialect has been imported, either by invoking
``from sqlalchemy.dialects import postgresql``, or by creating a PostgreSQL
engine using ``create_engine("postgresql...")``,
:class:`_postgresql.plainto_tsquery` will be used automatically when
invoking ``sqlalchemy.func.plainto_tsquery()``, ensuring the correct
argument and return type handlers are used at compile and execution time.
.. versionadded:: 2.0.0rc1
"""
inherit_cache = True
type = types.TSQUERY
class phraseto_tsquery(_regconfig_fn):
"""The PostgreSQL ``phraseto_tsquery`` SQL function.
This function applies automatic casting of the REGCONFIG argument
to use the :class:`_postgresql.REGCONFIG` datatype automatically,
and applies a return type of :class:`_postgresql.TSQUERY`.
Assuming the PostgreSQL dialect has been imported, either by invoking
``from sqlalchemy.dialects import postgresql``, or by creating a PostgreSQL
engine using ``create_engine("postgresql...")``,
:class:`_postgresql.phraseto_tsquery` will be used automatically when
invoking ``sqlalchemy.func.phraseto_tsquery()``, ensuring the correct
argument and return type handlers are used at compile and execution time.
.. versionadded:: 2.0.0rc1
"""
inherit_cache = True
type = types.TSQUERY
class websearch_to_tsquery(_regconfig_fn):
"""The PostgreSQL ``websearch_to_tsquery`` SQL function.
This function applies automatic casting of the REGCONFIG argument
to use the :class:`_postgresql.REGCONFIG` datatype automatically,
and applies a return type of :class:`_postgresql.TSQUERY`.
Assuming the PostgreSQL dialect has been imported, either by invoking
``from sqlalchemy.dialects import postgresql``, or by creating a PostgreSQL
engine using ``create_engine("postgresql...")``,
:class:`_postgresql.websearch_to_tsquery` will be used automatically when
invoking ``sqlalchemy.func.websearch_to_tsquery()``, ensuring the correct
argument and return type handlers are used at compile and execution time.
.. versionadded:: 2.0.0rc1
"""
inherit_cache = True
type = types.TSQUERY
class ts_headline(_regconfig_fn):
"""The PostgreSQL ``ts_headline`` SQL function.
This function applies automatic casting of the REGCONFIG argument
to use the :class:`_postgresql.REGCONFIG` datatype automatically,
and applies a return type of :class:`_types.TEXT`.
Assuming the PostgreSQL dialect has been imported, either by invoking
``from sqlalchemy.dialects import postgresql``, or by creating a PostgreSQL
engine using ``create_engine("postgresql...")``,
:class:`_postgresql.ts_headline` will be used automatically when invoking
``sqlalchemy.func.ts_headline()``, ensuring the correct argument and return
type handlers are used at compile and execution time.
.. versionadded:: 2.0.0rc1
"""
inherit_cache = True
type = TEXT
def __init__(self, *args, **kwargs):
args = list(args)
# parse types according to
# https://www.postgresql.org/docs/current/textsearch-controls.html#TEXTSEARCH-HEADLINE
if len(args) < 2:
# invalid args; don't do anything
has_regconfig = False
elif (
isinstance(args[1], elements.ColumnElement)
and args[1].type._type_affinity is types.TSQUERY
):
# tsquery is second argument, no regconfig argument
has_regconfig = False
else:
has_regconfig = True
if has_regconfig:
initial_arg = coercions.expect(
roles.ExpressionElementRole,
args.pop(0),
apply_propagate_attrs=self,
name=getattr(self, "name", None),
type_=types.REGCONFIG,
)
initial_arg = [initial_arg]
else:
initial_arg = []
addtl_args = [
coercions.expect(
roles.ExpressionElementRole,
c,
name=getattr(self, "name", None),
apply_propagate_attrs=self,
)
for c in args
]
super().__init__(*(initial_arg + addtl_args), **kwargs)
|
989d783ffb7945512b1ed422a55f6941b9b058ae
|
1f48b50d3cf179ed0892b4c29752a2ea9c9eac89
|
/ahrs/utils/core.py
|
cc07b5ed7d8bd016846006d3c644181609544410
|
[
"MIT"
] |
permissive
|
Mayitzin/ahrs
|
f8b13b6e3b2bb319a0cd0e51eac39b6e6ff10572
|
6ef1c1bfc519c4a7d405780a9445a7b06ebeba2c
|
refs/heads/master
| 2023-09-01T20:53:55.955637
| 2023-08-22T12:10:30
| 2023-08-22T12:10:30
| 183,612,062
| 421
| 75
|
MIT
| 2022-12-04T23:41:15
| 2019-04-26T10:50:54
|
Python
|
UTF-8
|
Python
| false
| false
| 1,201
|
py
|
core.py
|
"""
Contains the core utilities for the proper use of AHRS: assertions, data
handling, etc.
These functions have no other goal in this package than to be used by other
modules. They are not meant to be used by the user.
This module is private. All functions and objects are available in the main
``ahrs`` namespace, or its corresponding submodule - use that instead.
"""
import numpy as np
def _assert_valid_array_type(item, item_name: str = 'iterable'):
"""Assert it is a list, tuple, or numpy array"""
# NOTE: This could be changed to a more pythonic solution looking for the
# dunder method __iter__(), but that yields strings too.
if not isinstance(item, (list, tuple, np.ndarray)):
raise TypeError(f"{item_name} must be given as an array. Got {type(item)}")
def _assert_numerical_iterable(item, item_name: str = 'iterable'):
"""Assert it is a list, tuple, or numpy array, and that it has numerical values"""
_assert_valid_array_type(item, item_name)
item_copy = np.copy(item)
if not(item_copy.dtype == np.dtype(int) or item_copy.dtype == np.dtype(float)):
raise TypeError(f"{item_name} must have numerical values. Got {item_copy.dtype.name}")
|
047ae15640e29cc661c00f852124b2b19879c1e6
|
c9a5115edaee9bebae02c880113c32e0199375cd
|
/formulaic/utils/sparse.py
|
65ded255115f4f4880c0e03570985e5b57a99996
|
[
"MIT"
] |
permissive
|
matthewwardrop/formulaic
|
ceeb0d7ac4d450da1e7a99c244f03096194ea675
|
b3d2d92b5a3ac3e8a72553b9f6be1ab582771fda
|
refs/heads/main
| 2023-07-12T03:25:14.391092
| 2023-07-11T04:26:32
| 2023-07-11T04:26:32
| 205,769,591
| 236
| 14
|
MIT
| 2023-07-11T04:19:24
| 2019-09-02T03:23:35
|
Python
|
UTF-8
|
Python
| false
| false
| 1,684
|
py
|
sparse.py
|
from typing import Iterable, Optional, Tuple, List
import numpy
import pandas
import scipy.sparse as spsparse
def categorical_encode_series_to_sparse_csc_matrix(
series: Iterable, levels: Optional[Iterable[str]] = None, drop_first: bool = False
) -> Tuple[List, spsparse.csc_matrix]:
"""
Categorically encode (via dummy encoding) a `series` as a sparse matrix.
Args:
series: The iterable which should be sparse encoded.
levels: The levels for which to generate dummies (if not specified, a
dummy variable is generated for every level in `series`).
drop_first: Whether to omit the first column in order to avoid
structural collinearity.
Returns:
A tuple of form `(levels, sparse_matrix)`, where `levels` contains the
levels that were used to generate dummies, and `sparse_matrix` is the
sparse (column-major) matrix representation of the series dummy
encoding.
"""
series = pandas.Categorical(series, levels)
levels = list(levels or series.categories)
if not levels:
return levels, spsparse.csc_matrix((series.shape[0], 0))
if drop_first:
series = series.remove_categories(levels[0])
levels = levels[1:]
codes = series.codes
non_null_code_indices = codes != -1
indices = numpy.arange(series.shape[0])[non_null_code_indices]
codes = codes[non_null_code_indices]
sparse_matrix = spsparse.csc_matrix(
(
numpy.ones(codes.shape[0], dtype=float), # data
(indices, codes), # row # column
),
shape=(series.shape[0], len(levels)),
)
return levels, sparse_matrix
|
1bebb3c04da1ceab93fc5b4d4f379d1ecf55bd2f
|
b8a25cba3c725bda12e78454910bfac6658283cd
|
/nyaggle/util/traits.py
|
1e18a7f345f5761941f2abb37faec7062552abd0
|
[
"MIT"
] |
permissive
|
nyanp/nyaggle
|
e3f125fbba816c77aefb21fef5e220bd7ee36949
|
86a9db4375d4d4974a71692a756d1c4818e15122
|
refs/heads/master
| 2023-08-16T13:01:43.565349
| 2023-07-22T14:15:41
| 2023-07-22T14:15:41
| 228,955,139
| 286
| 41
|
MIT
| 2023-09-10T04:09:54
| 2019-12-19T02:01:19
|
Python
|
UTF-8
|
Python
| false
| false
| 3,571
|
py
|
traits.py
|
# Original work of safe_instance:
# https://github.com/slundberg/shap/blob/master/shap/common.py
# -----------------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2018 Scott Lundberg
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# -----------------------------------------------------------------------------
import importlib
from typing import List, Tuple, Union
def is_instance(obj, class_path_str: Union[str, List, Tuple]) -> bool:
"""
Acts as a safe version of isinstance without having to explicitly
import packages which may not exist in the users environment.
Checks if obj is an instance of type specified by class_path_str.
Parameters
----------
obj: Any
Some object you want to test against
class_path_str: str or list
A string or list of strings specifying full class paths
Example: `sklearn.ensemble.RandomForestRegressor`
Returns
--------
bool: True if isinstance is true and the package exists, False otherwise
"""
if isinstance(class_path_str, str):
class_path_strs = [class_path_str]
elif isinstance(class_path_str, list) or isinstance(class_path_str, tuple):
class_path_strs = class_path_str
else:
class_path_strs = ['']
# try each module path in order
for class_path_str in class_path_strs:
if "." not in class_path_str:
raise ValueError("class_path_str must be a string or list of strings specifying a full \
module path to a class. Eg, 'sklearn.ensemble.RandomForestRegressor'")
# Splits on last occurence of "."
module_name, class_name = class_path_str.rsplit(".", 1)
# Check module exists
try:
spec = importlib.util.find_spec(module_name)
except:
spec = None
if spec is None:
continue
module = importlib.import_module(module_name)
# Get class
_class = getattr(module, class_name, None)
if _class is None:
continue
if isinstance(obj, _class):
return True
return False
def is_gbdt_instance(obj, algorithm_type: Union[str, Tuple]) -> bool:
if isinstance(algorithm_type, str):
algorithm_type = (algorithm_type,)
gbdt_instance_name = {
'lgbm': 'lightgbm.sklearn.LGBMModel',
'xgb': 'xgboost.sklearn.XGBModel',
'cat': 'catboost.core.CatBoost'
}
return is_instance(obj, tuple(gbdt_instance_name[t] for t in algorithm_type))
|
c5fba06321b6254689d2205d9f501dceaf069b26
|
d13d938d16058b1260e1c4e89c5a0ecfb559dc01
|
/ImageProcessing_5/topAndBlackHat_practice.py
|
35ce3c6fba3326ded7c0bd9264dbc8fcc9769bb0
|
[] |
no_license
|
LeBron-Jian/ComputerVisionPractice
|
61ecb70d7938236355eb1653c88b5fda8eeda0e7
|
f60f4d4954f35b08ea356f5eb1a4acfb18d05d47
|
refs/heads/master
| 2023-09-01T01:20:35.937692
| 2023-08-25T02:17:54
| 2023-08-25T02:17:54
| 298,447,700
| 279
| 86
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 750
|
py
|
topAndBlackHat_practice.py
|
import cv2
def hat_algorithm(img_path):
original_img0 = cv2.imread(img_path)
original_img = cv2.imread(img_path, 0)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3)) # 定义矩形结构元素
TOPHAT_img = cv2.morphologyEx(original_img, cv2.MORPH_TOPHAT, kernel) # 顶帽运算
BLACKHAT_img = cv2.morphologyEx(original_img, cv2.MORPH_BLACKHAT, kernel) # 黒帽运算
# 显示图像
cv2.imshow("original_img0", original_img0)
cv2.imshow("original_img", original_img)
cv2.imshow("TOPHAT_img", TOPHAT_img)
cv2.imshow("BLACKHAT_img", BLACKHAT_img)
cv2.waitKey(0)
cv2.destroyAllWindows()
if __name__ == '__main__':
img_path = 'butterfly_Gaussian.jpg'
hat_algorithm(img_path)
|
4c1fd51378231507d3c879e805a641dec3d9c4eb
|
d1c2d00078520cd556f60b7213c27856f8b3460d
|
/sdks/python/apache_beam/runners/interactive/user_pipeline_tracker.py
|
53ee54ac8a35ed3bec5bb7812ecfd1de5029e5d3
|
[
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-protobuf",
"Apache-2.0",
"Python-2.0"
] |
permissive
|
apache/beam
|
ed11b9e043465c720659eac20ac71b5b171bfa88
|
6d5048e05087ea54abc889ce402ae2a0abb9252b
|
refs/heads/master
| 2023-09-04T07:41:07.002653
| 2023-09-01T23:01:05
| 2023-09-01T23:01:05
| 50,904,245
| 7,061
| 4,522
|
Apache-2.0
| 2023-09-14T21:43:38
| 2016-02-02T08:00:06
|
Java
|
UTF-8
|
Python
| false
| false
| 5,735
|
py
|
user_pipeline_tracker.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Class that tracks derived/pipeline fragments from user pipelines.
For internal use only; no backwards-compatibility guarantees.
In the InteractiveRunner the design is to keep the user pipeline unchanged,
create a copy of the user pipeline, and modify the copy. When the derived
pipeline runs, there should only be per-user pipeline state. This makes sure
that derived pipelines can link back to the parent user pipeline.
"""
import shutil
from typing import Iterator
from typing import Optional
import apache_beam as beam # type: ignore
class UserPipelineTracker:
"""Tracks user pipelines from derived pipelines.
This data structure is similar to a disjoint set data structure. A derived
pipeline can only have one parent user pipeline. A user pipeline can have many
derived pipelines.
"""
def __init__(self):
self._user_pipelines: dict[beam.Pipeline, list[beam.Pipeline]] = {}
self._derived_pipelines: dict[beam.Pipeline] = {}
self._pid_to_pipelines: dict[beam.Pipeline] = {}
def __iter__(self) -> Iterator[beam.Pipeline]:
"""Iterates through all the user pipelines."""
for p in self._user_pipelines:
yield p
def _key(self, pipeline: beam.Pipeline) -> str:
return str(id(pipeline))
def evict(self, pipeline: beam.Pipeline) -> None:
"""Evicts the pipeline.
Removes the given pipeline and derived pipelines if a user pipeline.
Otherwise, removes the given derived pipeline.
"""
user_pipeline = self.get_user_pipeline(pipeline)
if user_pipeline:
for d in self._user_pipelines[user_pipeline]:
del self._derived_pipelines[d]
del self._user_pipelines[user_pipeline]
elif pipeline in self._derived_pipelines:
del self._derived_pipelines[pipeline]
def clear(self) -> None:
"""Clears the tracker of all user and derived pipelines."""
# Remove all local_tempdir of created pipelines.
for p in self._pid_to_pipelines.values():
shutil.rmtree(p.local_tempdir, ignore_errors=True)
self._user_pipelines.clear()
self._derived_pipelines.clear()
self._pid_to_pipelines.clear()
def get_pipeline(self, pid: str) -> Optional[beam.Pipeline]:
"""Returns the pipeline corresponding to the given pipeline id."""
return self._pid_to_pipelines.get(pid, None)
def add_user_pipeline(self, p: beam.Pipeline) -> beam.Pipeline:
"""Adds a user pipeline with an empty set of derived pipelines."""
self._memoize_pipieline(p)
# Create a new node for the user pipeline if it doesn't exist already.
user_pipeline = self.get_user_pipeline(p)
if not user_pipeline:
user_pipeline = p
self._user_pipelines[p] = []
return user_pipeline
def _memoize_pipieline(self, p: beam.Pipeline) -> None:
"""Memoizes the pid of the pipeline to the pipeline object."""
pid = self._key(p)
if pid not in self._pid_to_pipelines:
self._pid_to_pipelines[pid] = p
def add_derived_pipeline(
self, maybe_user_pipeline: beam.Pipeline,
derived_pipeline: beam.Pipeline) -> None:
"""Adds a derived pipeline with the user pipeline.
If the `maybe_user_pipeline` is a user pipeline, then the derived pipeline
will be added to its set. Otherwise, the derived pipeline will be added to
the user pipeline of the `maybe_user_pipeline`.
By doing the above one can do:
p = beam.Pipeline()
derived1 = beam.Pipeline()
derived2 = beam.Pipeline()
ut = UserPipelineTracker()
ut.add_derived_pipeline(p, derived1)
ut.add_derived_pipeline(derived1, derived2)
# Returns p.
ut.get_user_pipeline(derived2)
"""
self._memoize_pipieline(maybe_user_pipeline)
self._memoize_pipieline(derived_pipeline)
# Cannot add a derived pipeline twice.
assert derived_pipeline not in self._derived_pipelines
# Get the "true" user pipeline. This allows for the user to derive a
# pipeline from another derived pipeline, use both as arguments, and this
# method will still get the correct user pipeline.
user = self.add_user_pipeline(maybe_user_pipeline)
# Map the derived pipeline to the user pipeline.
self._derived_pipelines[derived_pipeline] = user
self._user_pipelines[user].append(derived_pipeline)
def get_user_pipeline(self, p: beam.Pipeline) -> Optional[beam.Pipeline]:
"""Returns the user pipeline of the given pipeline.
If the given pipeline has no user pipeline, i.e. not added to this tracker,
then this returns None. If the given pipeline is a user pipeline then this
returns the same pipeline. If the given pipeline is a derived pipeline then
this returns the user pipeline.
"""
# If `p` is a user pipeline then return it.
if p in self._user_pipelines:
return p
# If `p` exists then return its user pipeline.
if p in self._derived_pipelines:
return self._derived_pipelines[p]
# Otherwise, `p` is not in this tracker.
return None
|
f56747769c4539272d5147588c27c39e965e2595
|
767dae79df18f9868855774464d08864a1d8629b
|
/protonfixes/gamefixes/91100.py
|
0a0c579c2b319e03b689c5f9970dc2ca1ea4c401
|
[
"BSD-2-Clause"
] |
permissive
|
simons-public/protonfixes
|
05cd9c2c37c35ce56ec4c3cdcdba375c6eadf530
|
681411ba8ceb5d2d790e674eb7a5b98951d426e6
|
refs/heads/master
| 2022-11-16T04:16:32.764931
| 2022-11-15T00:24:24
| 2022-11-15T00:24:24
| 150,211,569
| 245
| 75
|
NOASSERTION
| 2022-11-15T00:24:25
| 2018-09-25T05:20:02
|
Python
|
UTF-8
|
Python
| false
| false
| 197
|
py
|
91100.py
|
""" Game fix for SkyDrift
"""
#pylint: disable=C0103
from protonfixes import util
def main():
""" Use d9vk to avoid texture glitches
"""
util.set_environment('PROTON_USE_D9VK', '1')
|
b3351006228e37c2e22fb25cf1fc9f24e165ee31
|
e8b04bef9aa1ac8e2c109dd315f133c8f4d28ae6
|
/tests/default/controllers/test_suite_supervisor/test_suite_supervisor.py
|
fafbd52126f53379079794494f68cf6e69754d47
|
[
"Apache-2.0"
] |
permissive
|
cyberbotics/webots
|
f075dacf4067e8dcebbfd89e8690df8525f6d745
|
8aba6eaae76989facf3442305c8089d3cc366bcf
|
refs/heads/master
| 2023-08-31T09:41:13.205940
| 2023-08-18T10:48:30
| 2023-08-18T10:48:30
| 156,228,018
| 2,495
| 1,525
|
Apache-2.0
| 2023-08-28T16:30:33
| 2018-11-05T14:09:10
|
C++
|
UTF-8
|
Python
| false
| false
| 10,385
|
py
|
test_suite_supervisor.py
|
# Copyright 1996-2023 Cyberbotics Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Supervisor used by the test suite."""
import sys
import shlex
import os.path
import time
from controller import Supervisor
class FileManager:
"""Encapsulate a file."""
def __init__(self, filename):
"""Constructor. Check the validity of the file."""
self.filename = filename
if not os.path.isfile(self.filename):
self.fatalError('File does not exist')
def fatalError(self, msg):
"""Exit the controller with an error message."""
sys.exit(self.__class__.__name__ + ' Error: ' + msg)
class IndexFileManager(FileManager):
"""Manage the index file."""
def readIndex(self):
"""Get the current index."""
file = open(self.filename)
firstLine = file.readline().strip()
index = int(firstLine)
file.close()
return index
def incrementIndex(self):
"""Increment the index."""
newIndex = self.readIndex() + 1
assert newIndex >= 0
file = open(self.filename, 'w')
file.write(str(newIndex) + '\n')
file.close()
return newIndex
class SimulationFileManager(FileManager):
"""Manage the simulation file."""
def countSimulations(self):
"""Get the simulation counter."""
# count only lines having some content
file = open(self.filename)
counter = 0
for line in file:
if line.strip():
counter += 1
file.close()
return counter
def filenameAtLine(self, index):
"""Get the filename at a given line."""
file = open(self.filename)
lines = file.readlines()
file.close()
line = lines[index].strip()
relativeFilename = line
return relativeFilename
class OutputFileManager(FileManager):
"""Manage the output file."""
def write(self, txt):
"""Write a result."""
file = open(self.filename, 'a')
file.write(txt)
file.close()
class StdFileManager(FileManager):
"""Manage the stdout or stderr text redirector to a file."""
def reset(self):
"""Empty the stdout or stderr file."""
file = open(self.filename, 'w')
file.close()
def isEmpty(self):
"""Check if the file is empty."""
return os.stat(self.filename).st_size == 0
def contains(self, expectedString):
"""Check if the file contains the expected string."""
file = open(self.filename)
found = False
line = file.readline()
while line:
if expectedString in line:
found = True
break
line = file.readline()
file.close()
return found
def dump(self):
"""Dumps the contents of the file."""
file = open(self.filename)
line = file.readline()
string = ''
while line:
string = string + line
line = file.readline()
file.close()
return string
class TestSuite (Supervisor):
"""Supervisor class."""
if 'WEBOTS_TEST_SUITE' not in os.environ:
sys.exit(0)
# file path based on api, proto, parser, etc. folders
indexFilename = 'worlds_index.txt'
simulationFilename = 'worlds.txt'
outputFilename = '../output.txt'
tempWorldCounterFilename = '../world_counter.txt'
def __init__(self, *args, **kwargs):
"""Supervisor constructor."""
self.lastSimulation = False
Supervisor.__init__(self, *args, **kwargs)
self.isParserTest = "parser" in self.getCustomData()
isDefaultWorld = "empty" in self.getCustomData()
if isDefaultWorld:
self.cwdPrefix = os.path.join('..', '..', '..', 'parser')
else:
self.cwdPrefix = os.path.join('..', '..')
self.lastSimulation = False
# prepare the next simulation
self.indexFileManager = IndexFileManager(os.path.join(self.cwdPrefix, self.indexFilename))
self.simulationFileManager = SimulationFileManager(os.path.join(self.cwdPrefix, self.simulationFilename))
self.outputFileManager = OutputFileManager(os.path.join(self.cwdPrefix, self.outputFilename))
currentIndex = self.indexFileManager.readIndex()
self.currentSimulationFilename = self.simulationFileManager.filenameAtLine(currentIndex)
print('RUN: ' + self.currentSimulationFilename)
nSimulations = self.simulationFileManager.countSimulations()
if currentIndex + 1 >= nSimulations:
self.lastSimulation = True
def initParserTest(self):
"""Prepare for the parser tests."""
self.stdoutFileManager = StdFileManager(os.path.join(self.cwdPrefix, '..', 'webots_stdout.txt'))
self.stderrFileManager = StdFileManager(os.path.join(self.cwdPrefix, '..', 'webots_stderr.txt'))
with open(os.path.join(self.cwdPrefix, 'expected_results.txt'), 'r') as expectedStringFile:
content = expectedStringFile.readlines()
self.expectedString = ""
found = False
localWorldPath = os.path.normpath(
self.currentSimulationFilename.replace(os.path.dirname(os.path.abspath(self.cwdPrefix)) + os.sep, ''))
for line in content:
line.strip()
if line:
[world, expected] = shlex.split(line)
if os.path.normpath(world) == localWorldPath:
found = True
if expected != 'VOID':
self.expectedString = expected
break
line = expectedStringFile.readline()
if not found:
self.outputFileManager.write(
'FAILURE with ' + self.currentSimulationFilename +
': world name not found in \'expected_results.txt\'\n'
)
return found
def assessParserTest(self):
success = False
if self.expectedString:
success = self.stderrFileManager.contains(self.expectedString) or \
self.stdoutFileManager.contains(self.expectedString)
if not success:
self.outputFileManager.write(
'FAILURE with ' + self.currentSimulationFilename +
': Expected message not found \"' + self.expectedString + '\"\n'
)
else:
success = self.stderrFileManager.isEmpty()
if not success:
self.outputFileManager.write(
'FAILURE with ' + self.currentSimulationFilename +
': Some error messages detected:\n' + self.stderrFileManager.dump() + '\n'
)
self.parserTestStepCount = 0
if success:
self.outputFileManager.write(
'OK: ' + os.path.splitext(os.path.basename(self.currentSimulationFilename))[0] + '\n'
)
def loadNextWorld(self):
IndexFileManager(os.path.join(self.cwdPrefix, self.tempWorldCounterFilename)).incrementIndex()
if self.lastSimulation:
self.simulationQuit(0)
else:
newIndex = self.indexFileManager.incrementIndex()
self.worldLoad(self.simulationFileManager.filenameAtLine(newIndex))
def run(self):
"""Supervisor run function."""
basicTimeStep = int(self.getBasicTimeStep())
if self.isParserTest:
if not self.initParserTest():
self.loadNextWorld()
return
else:
receiver = self.getDevice("ts_receiver")
receiver.enable(basicTimeStep)
# 30 seconds before executing the next world, 60 seconds for the robot_window_html test
delay = 60 if self.currentSimulationFilename.endswith('/robot_window_html.wbt') else 30
timeout = time.time() + delay
running_controllers_pid = []
test_started = False
self.parserTestStepCount = 0
while self.step(basicTimeStep) != -1:
testCompleted = False
if self.isParserTest:
self.parserTestStepCount = self.parserTestStepCount + 1
if self.parserTestStepCount > 5:
self.assessParserTest()
testCompleted = True
print("testCompleted " + str(testCompleted))
else:
# wait for controllers start or termination messages
while receiver.getQueueLength() > 0:
data = receiver.getString()
dataList = data.split(' ')
if dataList[0] == 'ts':
if dataList[1] == '1':
if dataList[2] not in running_controllers_pid:
running_controllers_pid.append(dataList[2])
test_started = True
elif dataList[2] in running_controllers_pid:
running_controllers_pid.remove(dataList[2])
receiver.nextPacket()
testCompleted = test_started and \
len(running_controllers_pid) == 0
if testCompleted:
self.loadNextWorld()
if self.isParserTest:
self.stdoutFileManager.reset()
self.stderrFileManager.reset()
return
elif time.time() > timeout:
self.outputFileManager.write(
'FAILURE with ' + self.currentSimulationFilename +
': Timeout the results file has not been written ' +
'quickly enough\n'
)
self.loadNextWorld()
return
controller = TestSuite()
controller.run()
|
7647fbd2c5d134c46dce409dd8cd65955646d0ea
|
5ff0b54063046dee4d64e8ab408c553c6c686aae
|
/example_project/urls.py
|
fef369ee99b8a2189d252ca91e505b45b363000b
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
mixcloud/django-experiments
|
dbf56a3058173db2bf74e5215fe8a1c9a7aa0733
|
a0762b51596ffd3fd59ac40b6b2a5a8f898e9081
|
refs/heads/master
| 2023-09-02T20:17:43.316780
| 2023-07-25T10:13:27
| 2023-07-25T10:13:27
| 3,918,582
| 258
| 83
|
MIT
| 2022-11-03T21:21:21
| 2012-04-03T14:05:13
|
Python
|
UTF-8
|
Python
| false
| false
| 418
|
py
|
urls.py
|
from django.contrib import admin
from django.urls import include, path
from django.views.generic import TemplateView
admin.autodiscover()
urlpatterns = [
path('experiments/', include('experiments.urls')),
path('admin/', admin.site.urls),
path('', TemplateView.as_view(template_name="test_page.html"), name="test_page"),
path('goal/', TemplateView.as_view(template_name="goal.html"), name="goal"),
]
|
120369ef14c7fe0dae43ad1016aeb5294c3a748d
|
fbebc09f50a6ac0749e68d5dcab20afd009de71f
|
/snippets/ch11/mcpi.py
|
853f6b51e84d1c7ac3400cb2cdcd8f9976f0cbe1
|
[
"Apache-2.0"
] |
permissive
|
foxbook/atap
|
7e9adbaa1f64ff4e7a2e58a5d9b1717150f063ba
|
43fd3317b641e0830905a734226afad3a0ea19f6
|
refs/heads/master
| 2023-08-17T06:37:34.571614
| 2022-12-01T14:41:35
| 2022-12-01T14:41:35
| 88,289,875
| 401
| 316
|
Apache-2.0
| 2020-08-06T12:54:54
| 2017-04-14T17:48:37
|
Python
|
UTF-8
|
Python
| false
| false
| 1,085
|
py
|
mcpi.py
|
import time
import random
import multiprocessing as mp
from functools import wraps
def timeit(func):
@wraps(func)
def wrapper(*args, **kwargs):
start = time.time()
result = func(*args, **kwargs)
return result, time.time() - start
return wrapper
def mcpi_samples(n):
"""
Compute the number of points in the unit circle out of n points.
"""
count = 0
for i in range(n):
x, y = random.random(), random.random()
if x*x + y*y <= 1:
count += 1
return count
@timeit
def mcpi_sequential(N):
count = mcpi_samples(N)
return count / N * 4
@timeit
def mcpi_parallel(N):
procs = mp.cpu_count()
pool = mp.Pool(processes=procs)
parts = [int(N/procs)] * procs
count = sum(pool.map(mcpi_samples, parts))
return count / N * 4
if __name__ == '__main__':
N = 10000000
pi, delta = mcpi_sequential(N)
print("sequential pi: {} in {:0.2f} seconds".format(pi, delta))
pi, delta = mcpi_parallel(N)
print("parallel pi: {} in {:0.2f} seconds".format(pi, delta))
|
75284748c24245d7645cfd06c8fe546b47f17cfd
|
38bed8ec0229b2d42ebdb33e09930ba8ee6ba5b7
|
/torchvision/prototype/datasets/_builtin/stanford_cars.py
|
e6bd6c0cf2b090453c469d0198c8650e6fd9fbe1
|
[
"BSD-3-Clause",
"CC-BY-NC-4.0"
] |
permissive
|
pytorch/vision
|
10443ac1eddf7a32ecb288fe8f58e28cab2a60a1
|
1f94320d8db8d102214a7dc02c22fa65ee9ac58a
|
refs/heads/main
| 2023-09-06T03:48:02.303020
| 2023-09-04T18:25:36
| 2023-09-04T18:25:36
| 73,328,905
| 15,620
| 8,564
|
BSD-3-Clause
| 2023-09-14T17:52:49
| 2016-11-09T23:11:43
|
Python
|
UTF-8
|
Python
| false
| false
| 4,401
|
py
|
stanford_cars.py
|
import pathlib
from typing import Any, BinaryIO, Dict, Iterator, List, Tuple, Union
from torchdata.datapipes.iter import Filter, IterDataPipe, Mapper, Zipper
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import (
hint_sharding,
hint_shuffling,
path_comparator,
read_categories_file,
read_mat,
)
from torchvision.prototype.tv_tensors import Label
from torchvision.tv_tensors import BoundingBoxes
from .._api import register_dataset, register_info
class StanfordCarsLabelReader(IterDataPipe[Tuple[int, int, int, int, int, str]]):
def __init__(self, datapipe: IterDataPipe[Dict[str, Any]]) -> None:
self.datapipe = datapipe
def __iter__(self) -> Iterator[Tuple[int, int, int, int, int, str]]:
for _, file in self.datapipe:
data = read_mat(file, squeeze_me=True)
for ann in data["annotations"]:
yield tuple(ann) # type: ignore[misc]
NAME = "stanford-cars"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=read_categories_file(NAME))
@register_dataset(NAME)
class StanfordCars(Dataset):
"""Stanford Cars dataset.
homepage="https://ai.stanford.edu/~jkrause/cars/car_dataset.html",
dependencies=scipy
"""
def __init__(
self,
root: Union[str, pathlib.Path],
*,
split: str = "train",
skip_integrity_check: bool = False,
) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "test"})
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check, dependencies=("scipy",))
_URL_ROOT = "https://ai.stanford.edu/~jkrause/"
_URLS = {
"train": f"{_URL_ROOT}car196/cars_train.tgz",
"test": f"{_URL_ROOT}car196/cars_test.tgz",
"cars_test_annos_withlabels": f"{_URL_ROOT}car196/cars_test_annos_withlabels.mat",
"car_devkit": f"{_URL_ROOT}cars/car_devkit.tgz",
}
_CHECKSUM = {
"train": "b97deb463af7d58b6bfaa18b2a4de9829f0f79e8ce663dfa9261bf7810e9accd",
"test": "bffea656d6f425cba3c91c6d83336e4c5f86c6cffd8975b0f375d3a10da8e243",
"cars_test_annos_withlabels": "790f75be8ea34eeded134cc559332baf23e30e91367e9ddca97d26ed9b895f05",
"car_devkit": "512b227b30e2f0a8aab9e09485786ab4479582073a144998da74d64b801fd288",
}
def _resources(self) -> List[OnlineResource]:
resources: List[OnlineResource] = [HttpResource(self._URLS[self._split], sha256=self._CHECKSUM[self._split])]
if self._split == "train":
resources.append(HttpResource(url=self._URLS["car_devkit"], sha256=self._CHECKSUM["car_devkit"]))
else:
resources.append(
HttpResource(
self._URLS["cars_test_annos_withlabels"], sha256=self._CHECKSUM["cars_test_annos_withlabels"]
)
)
return resources
def _prepare_sample(self, data: Tuple[Tuple[str, BinaryIO], Tuple[int, int, int, int, int, str]]) -> Dict[str, Any]:
image, target = data
path, buffer = image
image = EncodedImage.from_file(buffer)
return dict(
path=path,
image=image,
label=Label(target[4] - 1, categories=self._categories),
bounding_boxes=BoundingBoxes(target[:4], format="xyxy", spatial_size=image.spatial_size),
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
images_dp, targets_dp = resource_dps
if self._split == "train":
targets_dp = Filter(targets_dp, path_comparator("name", "cars_train_annos.mat"))
targets_dp = StanfordCarsLabelReader(targets_dp)
dp = Zipper(images_dp, targets_dp)
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def _generate_categories(self) -> List[str]:
resources = self._resources()
devkit_dp = resources[1].load(self._root)
meta_dp = Filter(devkit_dp, path_comparator("name", "cars_meta.mat"))
_, meta_file = next(iter(meta_dp))
return list(read_mat(meta_file, squeeze_me=True)["class_names"])
def __len__(self) -> int:
return 8_144 if self._split == "train" else 8_041
|
3c6c924462fad2c1b1316a726ae0e86f93138447
|
1b32a80362ce9c2d8f0eb1948637c6599d85aa99
|
/torchlayers/_name.py
|
2938407a2ae50870471bdeaba68e56fc513e428e
|
[
"MIT"
] |
permissive
|
szymonmaszke/torchlayers
|
4492c628a49f4db30a76a17b5d38591a85109964
|
1eff7c55fdb3733e0acc180be79354ed35e4167c
|
refs/heads/master
| 2022-07-06T18:02:48.567112
| 2021-05-25T13:58:50
| 2022-06-13T19:09:28
| 201,987,932
| 599
| 47
|
MIT
| 2022-06-13T19:09:29
| 2019-08-12T18:35:56
|
Python
|
UTF-8
|
Python
| false
| false
| 22
|
py
|
_name.py
|
_name = "torchlayers"
|
073d629b93066fd3b57c4c5f5ac5ecf3f06eabef
|
29f9af1b48c3e1c25cec5dc3fc8d780b24331e3c
|
/Sniffer-v2.0/Toolbox/__init__.py
|
96da9c7ec84d20fe279e25b5c3138658f3e90618
|
[
"MIT"
] |
permissive
|
Macr0phag3/Sniffer
|
845da6fdd7d87b765b687cebf747adfb8f74d317
|
60e11f1b611abbbb7cb441dc169487bc9e1edbbe
|
refs/heads/master
| 2022-01-25T23:12:36.047051
| 2021-12-24T08:19:12
| 2021-12-24T08:19:12
| 123,878,049
| 165
| 39
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16
|
py
|
__init__.py
|
#encoding: utf8
|
611eba150dc98881c14a527730e30c0ecde70cae
|
d6aae799e18e907fb413b715200c7832252a87e5
|
/responsible_ai/utils/data_process.py
|
26198155b287b25cce7e725c2b073cf467782b78
|
[
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-proprietary-license",
"Apache-2.0",
"CC-BY-NC-4.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
sony/nnabla-examples
|
0d0bbd5df3028996e790bcf07248fdb0932697d1
|
41f71faa6efff7774a76bbd5af3198322a90a6ab
|
refs/heads/master
| 2023-09-04T03:45:54.023899
| 2023-08-22T03:31:21
| 2023-08-22T03:31:21
| 109,625,584
| 308
| 108
|
Apache-2.0
| 2023-08-22T03:31:23
| 2017-11-05T23:30:40
|
Python
|
UTF-8
|
Python
| false
| false
| 9,471
|
py
|
data_process.py
|
# Copyright 2023 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
import numpy as np
class DataPreprocessing:
def __init__(self, df, categorical_features,
protected_attribute, privileged_class, target_variable, favorable_class, selected_features=[],
excluded_features=[], specific_data_preparation=None):
"""
The DataPreprocessing class preprocess data by handling missing values, selecting features and handling
categorical and protected attributes.
Args:
df (pandas dataframe): input dataframe
categorical_features (list) : list of categorical features
protected_attribute (list): list of the protected attributes
privileged_class (list): list of privileged class for the protected attribute
target_variable : the name of the target variable
favorable_class : the favourable class of the target variable
selected_features (list): list of selected features to include in the preprocessed dataset
excluded_features (list): list of excluded features to exclude from the preprocessed dataset
specific_data_preparation : specific data preparation function, it is used to preprocess the input dataframe
"""
if specific_data_preparation:
df = specific_data_preparation(df)
self.protected_attribute = protected_attribute
self.target_variable = target_variable
categorical_features = sorted(
set(categorical_features) - set(protected_attribute) -
set(target_variable) - set(excluded_features),
key=df.columns.get_loc)
selected_features = selected_features or df.columns.tolist()
selected = (set(selected_features) | set(self.protected_attribute)
| set(categorical_features) | set(self.target_variable))
df = df[sorted(selected - set(excluded_features),
key=df.columns.get_loc)]
self.data = df.dropna()
self.data = pd.get_dummies(
self.data, columns=categorical_features, prefix_sep='__')
self.data, self.privileged_classes, self.unprivileged_classes = self.preprocess_protected_attributes(
self.data, protected_attribute, privileged_class)
self.data, self.favorable_label, self.unfavorable_label = self.map_favorable_class(self.data,
target_variable,
favorable_class)
def map_favorable_class(self, df, label_name, favorable_class, favorable_label=1, unfavorable_label=0):
"""
This function maps a given label column in pandas dataframe to a binary label column based on a given favourable
class.
if the favorable_class argument is callable, the function applies it to the label column.
Otherwise,it assumes that the label column contains the binary labels and determine the favourable and
unfavorable class based on favorable_class argument.
Args:
df (pandas dataframe): input dataframe
label_name (str) : name of the label column
favorable_class : favourable class is map the positive label. if a callable is provided, it is applied to label
column.
favorable_label (int) : the label value to use for the favourable class. Default is 1
unfavorable_label (int) : the label value to use for the unfavourable class. Default is 0
Returns:
df (pandas dataframe): modified dataframe with the label column mapped to binary labels based on the favourable
class
favorable_label : label which used for the favourable class
unfavorable_label :label used for the unfavourable class
"""
if callable(favorable_class):
df[label_name] = df[label_name].apply(favorable_class)
else:
unique_labels = set(df[label_name])
if len(unique_labels) == 2:
favorable_label = favorable_class[0]
unfavorable_label = unique_labels.difference(
favorable_class).pop()
else:
pos = np.isin(df[label_name].to_numpy(), favorable_class)
df[label_name] = np.where(
pos, favorable_label, unfavorable_label)
return df, favorable_label, unfavorable_label
def preprocess_protected_attributes(self, df, protected_attribute_names, privileged_classes, privileged_values=1,
unprivileged_values=0):
"""
Preprocess protected attribute columns in Pandas Dataframe by identifying the privileged and unprivileged class and replacing
their values with privileged and unprivileged values respectively.
Args:
df (pandas.DataFrame): Dataframe containing the protected attribute columns to be processed
protected_attribute_names (list) : list of string that representing the name of the protected attribute column
privileged_classes(list or callable) : list of privileged classes, or callable that maps the protected
attribute values to privileged class.
privileged_values (float) : value to be assigned to rows with privileged class. Default is 1
unprivileged_values (float) : value to be assigned to rows with unprivileged class. Default is 0
Returns:
df (pandas dataframe): modified dataframe
privileged_attributes : privileged values
unprivileged_attributes : unprivileged values
"""
privileged_attributes = []
unprivileged_attributes = []
for attr, vals in zip(protected_attribute_names, privileged_classes):
if callable(vals):
df[attr] = df[attr].apply(vals)
elif np.issubdtype(df[attr].dtype, np.number):
privileged_values = np.asarray(vals, dtype=np.float64)
unprivileged_values = np.asarray(
list(set(df[attr]).difference(vals)), dtype=np.float64)
else:
priv = np.isin(df[attr], vals)
df.loc[priv, attr] = privileged_values
df.loc[~priv, attr] = unprivileged_values
privileged_attributes.append(privileged_values)
unprivileged_attributes.append(unprivileged_values)
return df, privileged_attributes, unprivileged_attributes
def train_test_split(self, test_size=0.2, random_state=None, shuffle=True):
"""
Split the input data into training and testing sets.
Parameters:
test_size (float or int): If float, should be between 0.0 and 1.0 and represent the proportion of the data
to include in the test split.If int, represents the absolute number of test samples.
random_state (int or None): The seed value for the random number generator (default None).
shuffle (bool): Whether or not to shuffle the data before splitting (default True).
Returns:
X_train (pandas.Dataframe): features set of the training dataset
X_test (pandas.Dataframe): features set of the testing dataset
Y_train (pandas.Dataframe): target variable set of the training dataset
Y_test (pandas.Dataframe): target variable set of the testing dataset
Z_train (pandas.Dataframe): protected attribute set of the training dataset
Z_test (pandas.Dataframe): protected attribute set of the testing dataset
"""
if isinstance(test_size, float) and (test_size <= 0.0 or test_size >= 1.0):
raise ValueError('Test size should be between 0.0 and 1.0')
if isinstance(test_size, float):
n_test = int(test_size * len(self.data))
elif isinstance(test_size, int):
n_test = test_size
# Set random seed if provided
if random_state is not None:
np.random.seed(random_state)
# Shuffle data if necessary
if shuffle:
indices = np.random.permutation(len(self.data))
self.data = self.data.iloc[indices, :]
sensitive_attribs = self.data.loc[:, self.protected_attribute]
target = self.data.loc[:, self.target_variable]
features = self.data.drop(columns=self.target_variable)
# Split data
X_train, X_test = features.iloc[n_test:, :], features.iloc[:n_test, :]
Y_train, Y_test = target.iloc[n_test:, :], target.iloc[:n_test, :]
Z_train, Z_test = sensitive_attribs.iloc[n_test:,
:], sensitive_attribs.iloc[:n_test, :]
return X_train, X_test, Y_train, Y_test, Z_train, Z_test
|
ee44863ab0cdabb6d774b03ade496a47a5124363
|
839ccbc4259cadab261616c4fb17bded7dc65599
|
/ndscheduler/corescheduler/core/base_test.py
|
18cddea87214335da26f58e4db60ad4b6c73ab66
|
[
"BSD-2-Clause"
] |
permissive
|
Nextdoor/ndscheduler
|
870d7d608c7635a53a9a20ea1eca66b218bebab1
|
d31016aaca480e38a69d75a66a9978a937c6a0b0
|
refs/heads/master
| 2023-06-28T03:46:58.272157
| 2020-01-03T16:56:06
| 2020-01-03T16:56:06
| 39,963,912
| 1,116
| 235
|
BSD-2-Clause
| 2023-05-22T14:02:24
| 2015-07-30T17:41:48
|
Python
|
UTF-8
|
Python
| false
| false
| 1,320
|
py
|
base_test.py
|
"""Unit tests for BaseScheduler class."""
import unittest
import mock
from ndscheduler.corescheduler.core.base import BaseScheduler
from ndscheduler.corescheduler.datastore.providers.sqlite import DatastoreSqlite
class BaseSchedulerTest(unittest.TestCase):
def test_is_okay_to_run(self):
with mock.patch(('ndscheduler.corescheduler.core.base.'
'BaseScheduler.is_okay_to_run')) as mock_should_run:
mock_should_run.return_value = True
job_stores = {'default': DatastoreSqlite.get_instance()}
dcp = 'ndscheduler.corescheduler.datastore.providers.sqlite.DatastoreSqlite'
sched = BaseScheduler(dcp, jobstores=job_stores)
self.assertNotEqual(sched._process_jobs(), sched.DEFAULT_WAIT_SECONDS)
def test_is_not_okay_to_run(self):
with mock.patch(('ndscheduler.corescheduler.core.base.'
'BaseScheduler.is_okay_to_run')) as mock_should_run:
mock_should_run.return_value = False
job_stores = {'default': DatastoreSqlite.get_instance()}
dcp = 'ndscheduler.corescheduler.datastore.providers.sqlite.DatastoreSqlite'
sched = BaseScheduler(dcp, jobstores=job_stores)
self.assertEqual(sched._process_jobs(), sched.DEFAULT_WAIT_SECONDS)
|
1c93de370e3cf5acfe25db0134bdab86dcb04960
|
607dc8df19fc5248f6289cdda97857b5d58ca16f
|
/benchmark/src/wrappers/v20.py
|
9f2be56dcb7a4b27ce3e343ed2049eb50f0ab469
|
[
"BSD-3-Clause",
"LicenseRef-scancode-philippe-de-muyter",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
automl/SMAC3
|
7dce243a33023c52d6819deff966f7b502e90ed0
|
541ee7e0383b491b86d1a23dcff669f2efad616d
|
refs/heads/main
| 2023-08-31T17:36:06.067579
| 2023-08-01T13:02:51
| 2023-08-01T13:02:51
| 65,900,469
| 943
| 259
|
NOASSERTION
| 2023-09-11T02:36:57
| 2016-08-17T10:58:05
|
Python
|
UTF-8
|
Python
| false
| false
| 4,091
|
py
|
v20.py
|
from __future__ import annotations
from src.tasks.task import Task
from src.utils.exceptions import NotSupportedError
from src.wrappers.wrapper import Wrapper
class Version20(Wrapper):
supported_versions: list[str] = ["2.0.2"]
def __init__(self, task: Task, seed: int) -> None:
super().__init__(task, seed)
from smac.facade import AbstractFacade
self._smac: AbstractFacade | None = None
def run(self) -> None:
from smac import (
BlackBoxFacade,
HyperparameterOptimizationFacade,
MultiFidelityFacade,
Scenario,
AlgorithmConfigurationFacade
)
# Get instances
instances = None
instance_features = None
if self.task.use_instances:
instances = self.task.model.dataset.get_instances()
instance_features = self.task.model.dataset.get_instance_features()
# Create scenario
scenario = Scenario(
self.model.configspace,
n_trials=self.task.n_trials,
walltime_limit=self.task.walltime_limit,
deterministic=self.task.deterministic,
instances=instances,
instance_features=instance_features,
min_budget=self.task.min_budget,
max_budget=self.task.max_budget,
n_workers=self.task.n_workers,
seed=self.seed,
)
intensifier_kwargs = {}
# Create facade
if self.task.optimization_type == "bb":
facade_object = BlackBoxFacade
intensifier_kwargs["max_config_calls"] = self.task.max_config_calls
elif self.task.optimization_type == "hpo":
facade_object = HyperparameterOptimizationFacade
intensifier_kwargs["max_config_calls"] = self.task.max_config_calls
elif self.task.optimization_type == "mf":
facade_object = MultiFidelityFacade
intensifier_kwargs["n_seeds"] = self.task.n_seeds
intensifier_kwargs["incumbent_selection"] = self.task.incumbent_selection
elif self.task.optimization_type == "ac":
facade_object = AlgorithmConfigurationFacade
intensifier_kwargs["max_config_calls"] = self.task.max_config_calls
else:
raise RuntimeError("Unknown optimization type.")
if self.task.intensifier is None:
intensifier = facade_object.get_intensifier(scenario, **intensifier_kwargs)
else:
if self.task.intensifier == "successive_halving":
from smac.intensifier.successive_halving import SuccessiveHalving
intensifier = SuccessiveHalving(scenario, **intensifier_kwargs)
else:
raise RuntimeError("Unsupported intensifier.")
config_selector = facade_object.get_config_selector(scenario, retrain_after=self.task.retrain_after)
smac = facade_object(
scenario,
self.task.model.train,
intensifier=intensifier,
config_selector=config_selector,
logging_level=99999,
overwrite=True,
)
smac.optimize()
self._smac = smac
def get_trajectory(self, sort_by: str = "trials") -> tuple[list[float], list[float]]:
if len(self.task.objectives) > 1:
raise NotSupportedError
assert self._smac is not None
rh = self._smac.runhistory
trajectory = self._smac.intensifier.trajectory
X: list[int | float] = []
Y: list[float] = []
for traj in trajectory:
assert len(traj.config_ids) == 1
config_id = traj.config_ids[0]
config = rh.get_config(config_id)
cost = rh.get_cost(config)
if cost > 1e6:
continue
if sort_by == "trials":
X.append(traj.trial)
elif sort_by == "walltime":
X.append(traj.walltime)
else:
raise RuntimeError("Unknown sort_by.")
Y.append(cost)
return X, Y
|
bb9f1f510b3ce2d5c95e04e0a2ab054d440b45c6
|
a5a36aa7200b0be6ea11ad669ba0534ee1b896a6
|
/packages/vaex-core/setup.py
|
b5c98c18ca533e4a749ff63d17e39a207fc7b3d8
|
[
"MIT",
"MPL-2.0"
] |
permissive
|
vaexio/vaex
|
ec42919f272a723f884fece3c83975112e7a6f30
|
15245cf4332d4423ac58bd737aee27d911a1b252
|
refs/heads/master
| 2023-08-11T08:03:33.248943
| 2023-07-21T10:40:58
| 2023-07-21T10:40:58
| 24,528,468
| 7,892
| 686
|
MIT
| 2023-09-04T05:07:11
| 2014-09-27T09:44:42
|
Python
|
UTF-8
|
Python
| false
| false
| 9,155
|
py
|
setup.py
|
from setuptools import setup
import sys
import os
import imp
from setuptools import Extension
import platform
use_skbuild = len(os.environ.get('VAEX_BUILD_SKBUILD', '')) > 0
if use_skbuild:
from skbuild import setup
import skbuild.command.build_ext
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
dirname = os.path.dirname(__file__)
path_version = os.path.join(dirname, "vaex/core/_version.py")
version = imp.load_source('version', path_version)
name = 'vaex'
author = "Maarten A. Breddels"
author_email = "maartenbreddels@gmail.com"
license = 'MIT'
version = version.__version__
url = 'https://www.github.com/maartenbreddels/vaex'
# TODO: can we do without requests and progressbar2?
# TODO: after python2 supports frops, future and futures can also be dropped
# TODO: would be nice to have astropy only as dep in vaex-astro
install_requires_core = ["numpy>=1.16", "aplus", "tabulate>=0.8.3",
"future>=0.15.2", "pyyaml", "progressbar2",
"requests", "six", "cloudpickle", "pandas", "dask!=2022.4.0",
"nest-asyncio>=1.3.3", "pyarrow>=5.0.0", "frozendict!=2.2.0",
"blake3", "filelock", "pydantic>=1.8.0", "rich",
]
if sys.version_info[0] == 2:
install_requires_core.append("futures>=2.2.0")
install_requires_viz = ["matplotlib>=1.3.1", ]
install_requires_astro = ["kapteyn"]
if "MACOSX_DEPLOYMENT_TARGET" not in os.environ:
os.environ["MACOSX_DEPLOYMENT_TARGET"] = "10.9"
extra_dev_options = []
# MB: I like these options during development, the second if for ccache
# extra_dev_options = ['-fmax-errors=4', '-fdiagnostics-color', '-pedantic-errors']
class get_numpy_include(object):
"""Helper class to determine the numpy include path
The purpose of this class is to postpone importing numpy
until it is actually installed, so that the ``get_include()``
method can be invoked. """
def __init__(self):
pass
def __str__(self):
import numpy as np
return np.get_include()
class get_pybind_include(object):
"""Helper class to determine the pybind11 include path
The purpose of this class is to postpone importing pybind11
until it is actually installed, so that the ``get_include()``
method can be invoked. """
def __init__(self, user=False):
self.user = user
def __str__(self):
# this trick does not work anymore it seems, we now just vendor it
# import pybind11
# return pybind11.get_include(self.user)
return 'vendor/pybind11/include'
USE_ABSL = False
USE_TSL = True
define_macros = []
if USE_ABSL:
define_macros += [('VAEX_USE_ABSL', None)]
if USE_TSL:
define_macros += [('VAEX_USE_TSL', None)]
dll_files = []
if platform.system().lower() == 'windows':
extra_compile_args = ["/EHsc"]
dll_files = ['pcre.dll', 'pcrecpp.dll', 'vcruntime140_1.dll']
else:
# TODO: maybe enable these flags for non-wheel/conda builds? ["-mtune=native", "-march=native"]
extra_compile_args = ["-std=c++11", "-O3", "-funroll-loops", "-Werror=return-type", "-Wno-unused-parameter"]
extra_compile_args.append("-g")
extra_compile_args += extra_dev_options
if sys.platform == 'darwin':
extra_compile_args.append("-mmacosx-version-min=10.9")
# on windows (Conda-forge builds), the dirname is an absolute path
extension_vaexfast = Extension("vaex.vaexfast", [os.path.relpath(os.path.join(dirname, "src/vaexfast.cpp"))],
include_dirs=[get_numpy_include()],
extra_compile_args=extra_compile_args)
extension_strings = Extension("vaex.superstrings", [
os.path.relpath(os.path.join(dirname, "src/strings.cpp")),
os.path.relpath(os.path.join(dirname, "src/string_utils.cpp")),
],
include_dirs=[
get_numpy_include(),
get_pybind_include(),
get_pybind_include(user=True),
'vendor/string-view-lite/include',
'vendor/boost',
os.path.join(sys.prefix, 'include'),
os.path.join(sys.prefix, 'Library', 'include'), # windows
os.path.join(dirname, 'vendor', 'pcre', 'Library', 'include') # windows pcre from conda-forge
],
library_dirs=[
os.path.join(sys.prefix, 'lib'),
os.path.join(sys.prefix, 'Library', 'lib'), # windows
os.path.join(dirname, 'vendor', 'pcre', 'Library', 'lib'), # windows pcre from conda-forge
],
extra_compile_args=extra_compile_args,
libraries=['pcre', 'pcrecpp']
)
extension_superutils = Extension("vaex.superutils", [
os.path.relpath(os.path.join(dirname, "src/hash_string.cpp")),
os.path.relpath(os.path.join(dirname, "src/hash_primitives_pot.cpp")),
os.path.relpath(os.path.join(dirname, "src/hash_object.cpp")),
os.path.relpath(os.path.join(dirname, "src/hash_primitives_prime.cpp")),
os.path.relpath(os.path.join(dirname, "src/superutils.cpp")),
os.path.relpath(os.path.join(dirname, "src/string_utils.cpp")),
] + ([os.path.relpath(os.path.join(dirname, "vendor/abseil-cpp/absl/container/internal/raw_hash_set.cc"))] if USE_ABSL else []),
include_dirs=[
get_numpy_include(), get_pybind_include(),
get_pybind_include(user=True),
'vendor/abseil-cpp',
'vendor/flat_hash_map',
'vendor/sparse-map/include',
'vendor/hopscotch-map/include',
'vendor/string-view-lite/include',
],
extra_compile_args=extra_compile_args,
define_macros=define_macros,
)
extension_superagg = Extension("vaex.superagg", [
os.path.relpath(os.path.join(dirname, "src/agg_nunique_string.cpp")),
os.path.relpath(os.path.join(dirname, "src/agg_minmax.cpp")),
os.path.relpath(os.path.join(dirname, "src/agg_nunique.cpp")),
os.path.relpath(os.path.join(dirname, "src/agg_sum.cpp")),
os.path.relpath(os.path.join(dirname, "src/agg_first.cpp")),
os.path.relpath(os.path.join(dirname, "src/agg_list.cpp")),
os.path.relpath(os.path.join(dirname, "src/agg_count.cpp")),
os.path.relpath(os.path.join(dirname, "src/agg.cpp")),
os.path.relpath(os.path.join(dirname, "src/binner_combined.cpp")),
os.path.relpath(os.path.join(dirname, "src/binner_ordinal.cpp")),
os.path.relpath(os.path.join(dirname, "src/binner_hash.cpp")),
os.path.relpath(os.path.join(dirname, "src/binners.cpp")),
os.path.relpath(os.path.join(dirname, "src/string_utils.cpp")),
],
include_dirs=[
get_numpy_include(), get_pybind_include(),
get_pybind_include(user=True),
'vendor/flat_hash_map',
'vendor/sparse-map/include',
'vendor/hopscotch-map/include',
'vendor/string-view-lite/include'
],
extra_compile_args=extra_compile_args,
define_macros=define_macros,
)
setup(name=name + '-core',
version=version,
description='Core of vaex',
url=url,
author=author,
author_email=author_email,
setup_requires=['numpy'],
install_requires=install_requires_core,
license=license,
package_data={'vaex': dll_files + ['test/files/*.fits', 'test/files/*.vot', 'test/files/*.hdf5']},
packages=['vaex', 'vaex.arrow', 'vaex.core', 'vaex.file', 'vaex.test', 'vaex.ext', 'vaex.misc', 'vaex.datasets'],
include_package_data=True,
ext_modules=([extension_vaexfast] if on_rtd else [extension_vaexfast, extension_strings, extension_superutils, extension_superagg]) if not use_skbuild else [],
zip_safe=False,
classifiers=[
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
],
extras_require={
'all': ["gcsfs>=0.6.2", "s3fs"]
},
entry_points={
'console_scripts': ['vaex = vaex.__main__:main'],
'gui_scripts': ['vaexgui = vaex.__main__:main'], # sometimes in osx, you need to run with this
'vaex.dataframe.accessor': [
'geo = vaex.geo:DataFrameAccessorGeo',
'struct = vaex.struct:DataFrameAccessorStruct',
],
'vaex.dataset.opener': [
'csv = vaex.csv:DatasetCsvLazy',
'arrow = vaex.arrow.opener:ArrowOpener',
'parquet = vaex.arrow.opener:ParquetOpener',
'feather = vaex.arrow.opener:FeatherOpener',
],
'vaex.memory.tracker': [
'default = vaex.memory:MemoryTracker'
],
'vaex.progressbar': [
'vaex = vaex.progress:simple',
'simple = vaex.progress:simple',
'widget = vaex.progress:widget',
'rich = vaex.progress:rich',
],
'vaex.file.scheme': [
's3 = vaex.file.s3',
'fsspec+s3 = vaex.file.s3fs',
'arrow+s3 = vaex.file.s3arrow',
'gs = vaex.file.gcs',
'fsspec+gs = vaex.file.gcs',
]
}
)
|
990e433b56bc0d0216ce1ecc974d49b8f5370a33
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/tests/components/nest/test_config_flow.py
|
7ab4a6dafc13a437fb5b9e8ba9d67c9337ccc553
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 25,198
|
py
|
test_config_flow.py
|
"""Test the Google Nest Device Access config flow."""
from __future__ import annotations
from typing import Any
from unittest.mock import patch
from google_nest_sdm.exceptions import (
AuthException,
ConfigurationException,
SubscriberException,
)
from google_nest_sdm.structure import Structure
import pytest
from homeassistant import config_entries
from homeassistant.components import dhcp
from homeassistant.components.nest.const import DOMAIN, OAUTH2_AUTHORIZE, OAUTH2_TOKEN
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.data_entry_flow import FlowResult
from homeassistant.helpers import config_entry_oauth2_flow
from .common import (
CLIENT_ID,
CLOUD_PROJECT_ID,
PROJECT_ID,
SUBSCRIBER_ID,
TEST_CONFIG_APP_CREDS,
TEST_CONFIGFLOW_APP_CREDS,
NestTestConfig,
)
from tests.common import MockConfigEntry
WEB_REDIRECT_URL = "https://example.com/auth/external/callback"
APP_REDIRECT_URL = "urn:ietf:wg:oauth:2.0:oob"
FAKE_DHCP_DATA = dhcp.DhcpServiceInfo(
ip="127.0.0.2", macaddress="00:11:22:33:44:55", hostname="fake_hostname"
)
@pytest.fixture
def nest_test_config(request) -> NestTestConfig:
"""Fixture with empty configuration and no existing config entry."""
return TEST_CONFIGFLOW_APP_CREDS
class OAuthFixture:
"""Simulate the oauth flow used by the config flow."""
def __init__(self, hass, hass_client_no_auth, aioclient_mock):
"""Initialize OAuthFixture."""
self.hass = hass
self.hass_client = hass_client_no_auth
self.aioclient_mock = aioclient_mock
async def async_app_creds_flow(
self,
result: dict,
cloud_project_id: str = CLOUD_PROJECT_ID,
project_id: str = PROJECT_ID,
) -> None:
"""Invoke multiple steps in the app credentials based flow."""
assert result.get("type") == "form"
assert result.get("step_id") == "cloud_project"
result = await self.async_configure(
result, {"cloud_project_id": CLOUD_PROJECT_ID}
)
assert result.get("type") == "form"
assert result.get("step_id") == "device_project"
result = await self.async_configure(result, {"project_id": project_id})
await self.async_oauth_web_flow(result, project_id=project_id)
async def async_oauth_web_flow(self, result: dict, project_id=PROJECT_ID) -> None:
"""Invoke the oauth flow for Web Auth with fake responses."""
state = self.create_state(result, WEB_REDIRECT_URL)
assert result["type"] == "external"
assert result["url"] == self.authorize_url(
state,
WEB_REDIRECT_URL,
CLIENT_ID,
project_id,
)
# Simulate user redirect back with auth code
client = await self.hass_client()
resp = await client.get(f"/auth/external/callback?code=abcd&state={state}")
assert resp.status == 200
assert resp.headers["content-type"] == "text/html; charset=utf-8"
await self.async_mock_refresh(result)
async def async_reauth(self, config_entry: ConfigEntry) -> dict:
"""Initiate a reuath flow."""
config_entry.async_start_reauth(self.hass)
await self.hass.async_block_till_done()
# Advance through the reauth flow
result = self.async_progress()
assert result["step_id"] == "reauth_confirm"
# Advance to the oauth flow
return await self.hass.config_entries.flow.async_configure(
result["flow_id"], {}
)
def async_progress(self) -> FlowResult:
"""Return the current step of the config flow."""
flows = self.hass.config_entries.flow.async_progress()
assert len(flows) == 1
return flows[0]
def create_state(self, result: dict, redirect_url: str) -> str:
"""Create state object based on redirect url."""
return config_entry_oauth2_flow._encode_jwt(
self.hass,
{
"flow_id": result["flow_id"],
"redirect_uri": redirect_url,
},
)
def authorize_url(
self, state: str, redirect_url: str, client_id: str, project_id: str
) -> str:
"""Generate the expected authorization url."""
oauth_authorize = OAUTH2_AUTHORIZE.format(project_id=project_id)
return (
f"{oauth_authorize}?response_type=code&client_id={client_id}"
f"&redirect_uri={redirect_url}"
f"&state={state}&scope=https://www.googleapis.com/auth/sdm.service"
"+https://www.googleapis.com/auth/pubsub"
"&access_type=offline&prompt=consent"
)
async def async_mock_refresh(self, result, user_input: dict = None) -> None:
"""Finish the OAuth flow exchanging auth token for refresh token."""
self.aioclient_mock.post(
OAUTH2_TOKEN,
json={
"refresh_token": "mock-refresh-token",
"access_token": "mock-access-token",
"type": "Bearer",
"expires_in": 60,
},
)
async def async_finish_setup(
self, result: dict, user_input: dict = None
) -> ConfigEntry:
"""Finish the OAuth flow exchanging auth token for refresh token."""
with patch(
"homeassistant.components.nest.async_setup_entry", return_value=True
) as mock_setup:
await self.async_configure(result, user_input)
assert len(mock_setup.mock_calls) == 1
await self.hass.async_block_till_done()
return self.get_config_entry()
async def async_configure(
self, result: dict[str, Any], user_input: dict[str, Any]
) -> dict:
"""Advance to the next step in the config flow."""
return await self.hass.config_entries.flow.async_configure(
result["flow_id"],
user_input,
)
async def async_pubsub_flow(self, result: dict, cloud_project_id="") -> None:
"""Verify the pubsub creation step."""
# Render form with a link to get an auth token
assert result["type"] == "form"
assert result["step_id"] == "pubsub"
assert "description_placeholders" in result
assert "url" in result["description_placeholders"]
assert result["data_schema"]({}) == {"cloud_project_id": cloud_project_id}
def get_config_entry(self) -> ConfigEntry:
"""Get the config entry."""
entries = self.hass.config_entries.async_entries(DOMAIN)
assert len(entries) >= 1
return entries[0]
@pytest.fixture
async def oauth(hass, hass_client_no_auth, aioclient_mock, current_request_with_host):
"""Create the simulated oauth flow."""
return OAuthFixture(hass, hass_client_no_auth, aioclient_mock)
async def test_app_credentials(
hass: HomeAssistant, oauth, subscriber, setup_platform
) -> None:
"""Check full flow."""
await setup_platform()
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
await oauth.async_app_creds_flow(result)
entry = await oauth.async_finish_setup(result)
data = dict(entry.data)
assert "token" in data
data["token"].pop("expires_in")
data["token"].pop("expires_at")
assert "subscriber_id" in data
assert f"projects/{CLOUD_PROJECT_ID}/subscriptions" in data["subscriber_id"]
data.pop("subscriber_id")
assert data == {
"sdm": {},
"auth_implementation": "imported-cred",
"cloud_project_id": CLOUD_PROJECT_ID,
"project_id": PROJECT_ID,
"token": {
"refresh_token": "mock-refresh-token",
"access_token": "mock-access-token",
"type": "Bearer",
},
}
async def test_config_flow_restart(
hass: HomeAssistant, oauth, subscriber, setup_platform
) -> None:
"""Check with auth implementation is re-initialized when aborting the flow."""
await setup_platform()
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
await oauth.async_app_creds_flow(result)
# At this point, we should have a valid auth implementation configured.
# Simulate aborting the flow and starting over to ensure we get prompted
# again to configure everything.
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result.get("type") == "form"
assert result.get("step_id") == "cloud_project"
# Change the values to show they are reflected below
result = await oauth.async_configure(
result, {"cloud_project_id": "new-cloud-project-id"}
)
assert result.get("type") == "form"
assert result.get("step_id") == "device_project"
result = await oauth.async_configure(result, {"project_id": "new-project-id"})
await oauth.async_oauth_web_flow(result, "new-project-id")
entry = await oauth.async_finish_setup(result, {"code": "1234"})
data = dict(entry.data)
assert "token" in data
data["token"].pop("expires_in")
data["token"].pop("expires_at")
assert "subscriber_id" in data
assert "projects/new-cloud-project-id/subscriptions" in data["subscriber_id"]
data.pop("subscriber_id")
assert data == {
"sdm": {},
"auth_implementation": "imported-cred",
"cloud_project_id": "new-cloud-project-id",
"project_id": "new-project-id",
"token": {
"refresh_token": "mock-refresh-token",
"access_token": "mock-access-token",
"type": "Bearer",
},
}
async def test_config_flow_wrong_project_id(
hass: HomeAssistant, oauth, subscriber, setup_platform
) -> None:
"""Check the case where the wrong project ids are entered."""
await setup_platform()
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result.get("type") == "form"
assert result.get("step_id") == "cloud_project"
result = await oauth.async_configure(result, {"cloud_project_id": CLOUD_PROJECT_ID})
assert result.get("type") == "form"
assert result.get("step_id") == "device_project"
# Enter the cloud project id instead of device access project id (really we just check
# they are the same value which is never correct)
result = await oauth.async_configure(result, {"project_id": CLOUD_PROJECT_ID})
assert result["type"] == "form"
assert "errors" in result
assert "project_id" in result["errors"]
assert result["errors"]["project_id"] == "wrong_project_id"
# Fix with a correct value and complete the rest of the flow
result = await oauth.async_configure(result, {"project_id": PROJECT_ID})
await oauth.async_oauth_web_flow(result)
await hass.async_block_till_done()
entry = await oauth.async_finish_setup(result, {"code": "1234"})
data = dict(entry.data)
assert "token" in data
data["token"].pop("expires_in")
data["token"].pop("expires_at")
assert "subscriber_id" in data
assert f"projects/{CLOUD_PROJECT_ID}/subscriptions" in data["subscriber_id"]
data.pop("subscriber_id")
assert data == {
"sdm": {},
"auth_implementation": "imported-cred",
"cloud_project_id": CLOUD_PROJECT_ID,
"project_id": PROJECT_ID,
"token": {
"refresh_token": "mock-refresh-token",
"access_token": "mock-access-token",
"type": "Bearer",
},
}
async def test_config_flow_pubsub_configuration_error(
hass: HomeAssistant,
oauth,
setup_platform,
mock_subscriber,
) -> None:
"""Check full flow fails with configuration error."""
await setup_platform()
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
await oauth.async_app_creds_flow(result)
mock_subscriber.create_subscription.side_effect = ConfigurationException
result = await oauth.async_configure(result, {"code": "1234"})
assert result["type"] == "form"
assert "errors" in result
assert "cloud_project_id" in result["errors"]
assert result["errors"]["cloud_project_id"] == "bad_project_id"
async def test_config_flow_pubsub_subscriber_error(
hass: HomeAssistant, oauth, setup_platform, mock_subscriber
) -> None:
"""Check full flow with a subscriber error."""
await setup_platform()
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
await oauth.async_app_creds_flow(result)
mock_subscriber.create_subscription.side_effect = SubscriberException()
result = await oauth.async_configure(result, {"code": "1234"})
assert result["type"] == "form"
assert "errors" in result
assert "cloud_project_id" in result["errors"]
assert result["errors"]["cloud_project_id"] == "subscriber_error"
@pytest.mark.parametrize("nest_test_config", [TEST_CONFIG_APP_CREDS])
async def test_multiple_config_entries(
hass: HomeAssistant, oauth, setup_platform
) -> None:
"""Verify config flow can be started when existing config entry exists."""
await setup_platform()
entries = hass.config_entries.async_entries(DOMAIN)
assert len(entries) == 1
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
await oauth.async_app_creds_flow(result, project_id="project-id-2")
entry = await oauth.async_finish_setup(result)
assert entry.title == "Mock Title"
assert "token" in entry.data
entries = hass.config_entries.async_entries(DOMAIN)
assert len(entries) == 2
@pytest.mark.parametrize("nest_test_config", [TEST_CONFIG_APP_CREDS])
async def test_duplicate_config_entries(
hass: HomeAssistant, oauth, setup_platform
) -> None:
"""Verify that config entries must be for unique projects."""
await setup_platform()
entries = hass.config_entries.async_entries(DOMAIN)
assert len(entries) == 1
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result.get("type") == "form"
assert result.get("step_id") == "cloud_project"
result = await oauth.async_configure(result, {"cloud_project_id": CLOUD_PROJECT_ID})
assert result.get("type") == "form"
assert result.get("step_id") == "device_project"
result = await oauth.async_configure(result, {"project_id": PROJECT_ID})
assert result.get("type") == "abort"
assert result.get("reason") == "already_configured"
@pytest.mark.parametrize("nest_test_config", [TEST_CONFIG_APP_CREDS])
async def test_reauth_multiple_config_entries(
hass: HomeAssistant, oauth, setup_platform, config_entry
) -> None:
"""Test Nest reauthentication with multiple existing config entries."""
await setup_platform()
old_entry = MockConfigEntry(
domain=DOMAIN,
data={
**config_entry.data,
"extra_data": True,
},
)
old_entry.add_to_hass(hass)
entries = hass.config_entries.async_entries(DOMAIN)
assert len(entries) == 2
orig_subscriber_id = config_entry.data.get("subscriber_id")
# Invoke the reauth flow
result = await oauth.async_reauth(config_entry)
await oauth.async_oauth_web_flow(result)
await oauth.async_finish_setup(result)
# Only reauth entry was updated, the other entry is preserved
entries = hass.config_entries.async_entries(DOMAIN)
assert len(entries) == 2
entry = entries[0]
assert entry.unique_id == PROJECT_ID
entry.data["token"].pop("expires_at")
assert entry.data["token"] == {
"refresh_token": "mock-refresh-token",
"access_token": "mock-access-token",
"type": "Bearer",
"expires_in": 60,
}
assert entry.data.get("subscriber_id") == orig_subscriber_id # Not updated
assert not entry.data.get("extra_data")
# Other entry was not refreshed
entry = entries[1]
entry.data["token"].pop("expires_at")
assert entry.data.get("token", {}).get("access_token") == "some-token"
assert entry.data.get("extra_data")
async def test_pubsub_subscription_strip_whitespace(
hass: HomeAssistant, oauth, subscriber, setup_platform
) -> None:
"""Check that project id has whitespace stripped on entry."""
await setup_platform()
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
await oauth.async_app_creds_flow(
result, cloud_project_id=" " + CLOUD_PROJECT_ID + " "
)
entry = await oauth.async_finish_setup(result, {"code": "1234"})
assert entry.title == "Import from configuration.yaml"
assert "token" in entry.data
entry.data["token"].pop("expires_at")
assert entry.unique_id == PROJECT_ID
assert entry.data["token"] == {
"refresh_token": "mock-refresh-token",
"access_token": "mock-access-token",
"type": "Bearer",
"expires_in": 60,
}
assert "subscriber_id" in entry.data
assert entry.data["cloud_project_id"] == CLOUD_PROJECT_ID
async def test_pubsub_subscription_auth_failure(
hass: HomeAssistant, oauth, setup_platform, mock_subscriber
) -> None:
"""Check flow that creates a pub/sub subscription."""
await setup_platform()
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
mock_subscriber.create_subscription.side_effect = AuthException()
await oauth.async_app_creds_flow(result)
result = await oauth.async_configure(result, {"code": "1234"})
assert result["type"] == "abort"
assert result["reason"] == "invalid_access_token"
@pytest.mark.parametrize("nest_test_config", [TEST_CONFIG_APP_CREDS])
async def test_pubsub_subscriber_config_entry_reauth(
hass: HomeAssistant,
oauth,
setup_platform,
subscriber,
config_entry,
) -> None:
"""Test the pubsub subscriber id is preserved during reauth."""
await setup_platform()
result = await oauth.async_reauth(config_entry)
await oauth.async_oauth_web_flow(result)
# Entering an updated access token refreshes the config entry.
entry = await oauth.async_finish_setup(result, {"code": "1234"})
entry.data["token"].pop("expires_at")
assert entry.unique_id == PROJECT_ID
assert entry.data["token"] == {
"refresh_token": "mock-refresh-token",
"access_token": "mock-access-token",
"type": "Bearer",
"expires_in": 60,
}
assert entry.data["auth_implementation"] == "imported-cred"
assert entry.data["subscriber_id"] == SUBSCRIBER_ID
assert entry.data["cloud_project_id"] == CLOUD_PROJECT_ID
async def test_config_entry_title_from_home(
hass: HomeAssistant, oauth, setup_platform, subscriber
) -> None:
"""Test that the Google Home name is used for the config entry title."""
device_manager = await subscriber.async_get_device_manager()
device_manager.add_structure(
Structure.MakeStructure(
{
"name": f"enterprise/{PROJECT_ID}/structures/some-structure-id",
"traits": {
"sdm.structures.traits.Info": {
"customName": "Example Home",
},
},
}
)
)
await setup_platform()
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
await oauth.async_app_creds_flow(result)
entry = await oauth.async_finish_setup(result, {"code": "1234"})
assert entry.title == "Example Home"
assert "token" in entry.data
assert "subscriber_id" in entry.data
assert entry.data["cloud_project_id"] == CLOUD_PROJECT_ID
async def test_config_entry_title_multiple_homes(
hass: HomeAssistant, oauth, setup_platform, subscriber
) -> None:
"""Test handling of multiple Google Homes authorized."""
device_manager = await subscriber.async_get_device_manager()
device_manager.add_structure(
Structure.MakeStructure(
{
"name": f"enterprise/{PROJECT_ID}/structures/id-1",
"traits": {
"sdm.structures.traits.Info": {
"customName": "Example Home #1",
},
},
}
)
)
device_manager.add_structure(
Structure.MakeStructure(
{
"name": f"enterprise/{PROJECT_ID}/structures/id-2",
"traits": {
"sdm.structures.traits.Info": {
"customName": "Example Home #2",
},
},
}
)
)
await setup_platform()
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
await oauth.async_app_creds_flow(result)
entry = await oauth.async_finish_setup(result, {"code": "1234"})
assert entry.title == "Example Home #1, Example Home #2"
async def test_title_failure_fallback(
hass: HomeAssistant, oauth, setup_platform, mock_subscriber
) -> None:
"""Test exception handling when determining the structure names."""
await setup_platform()
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
await oauth.async_app_creds_flow(result)
mock_subscriber.async_get_device_manager.side_effect = AuthException()
entry = await oauth.async_finish_setup(result, {"code": "1234"})
assert entry.title == "Import from configuration.yaml"
assert "token" in entry.data
assert "subscriber_id" in entry.data
assert entry.data["cloud_project_id"] == CLOUD_PROJECT_ID
async def test_structure_missing_trait(
hass: HomeAssistant, oauth, setup_platform, subscriber
) -> None:
"""Test handling the case where a structure has no name set."""
device_manager = await subscriber.async_get_device_manager()
device_manager.add_structure(
Structure.MakeStructure(
{
"name": f"enterprise/{PROJECT_ID}/structures/id-1",
# Missing Info trait
"traits": {},
}
)
)
await setup_platform()
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
await oauth.async_app_creds_flow(result)
entry = await oauth.async_finish_setup(result, {"code": "1234"})
# Fallback to default name
assert entry.title == "Import from configuration.yaml"
@pytest.mark.parametrize("nest_test_config", [NestTestConfig()])
async def test_dhcp_discovery(
hass: HomeAssistant, oauth: OAuthFixture, nest_test_config: NestTestConfig
) -> None:
"""Exercise discovery dhcp starts the config flow and kicks user to frontend creds flow."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_DHCP},
data=FAKE_DHCP_DATA,
)
await hass.async_block_till_done()
assert result["type"] == "form"
assert result["step_id"] == "create_cloud_project"
result = await oauth.async_configure(result, {})
assert result.get("type") == "abort"
assert result.get("reason") == "missing_credentials"
async def test_dhcp_discovery_with_creds(
hass: HomeAssistant, oauth, subscriber, setup_platform
) -> None:
"""Exercise discovery dhcp with no config present (can't run)."""
await setup_platform()
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_DHCP},
data=FAKE_DHCP_DATA,
)
await hass.async_block_till_done()
assert result.get("type") == "form"
assert result.get("step_id") == "cloud_project"
result = await oauth.async_configure(result, {"cloud_project_id": CLOUD_PROJECT_ID})
assert result.get("type") == "form"
assert result.get("step_id") == "device_project"
result = await oauth.async_configure(result, {"project_id": PROJECT_ID})
await oauth.async_oauth_web_flow(result)
entry = await oauth.async_finish_setup(result, {"code": "1234"})
await hass.async_block_till_done()
data = dict(entry.data)
assert "token" in data
data["token"].pop("expires_in")
data["token"].pop("expires_at")
assert "subscriber_id" in data
assert f"projects/{CLOUD_PROJECT_ID}/subscriptions" in data["subscriber_id"]
data.pop("subscriber_id")
assert data == {
"sdm": {},
"auth_implementation": "imported-cred",
"cloud_project_id": CLOUD_PROJECT_ID,
"project_id": PROJECT_ID,
"token": {
"refresh_token": "mock-refresh-token",
"access_token": "mock-access-token",
"type": "Bearer",
},
}
|
e11849608e82842415112bc9d19867d4e2dc31b1
|
767b09cdf51803d533ebb5906042ed1f92f91a7c
|
/allennlp_models/rc/modules/__init__.py
|
8aaf9fb3879e8bf43fc9b31ed8dc328e2d33cdea
|
[
"Apache-2.0"
] |
permissive
|
allenai/allennlp-models
|
e93bb3b084e99e211d5ebb515b765de117e41970
|
b1f372248c17ad12684d344955fbcd98e957e77e
|
refs/heads/main
| 2023-09-05T01:57:37.434101
| 2022-11-24T00:06:05
| 2022-11-24T00:06:05
| 246,170,605
| 520
| 172
|
Apache-2.0
| 2022-11-24T00:06:06
| 2020-03-10T00:22:21
|
Python
|
UTF-8
|
Python
| false
| false
| 79
|
py
|
__init__.py
|
# flake8: noqa: F403
from allennlp_models.rc.modules.seq2seq_encoders import *
|
058a11d279d2e50d68942b84c6b36817ee1187a6
|
ea0a28a1759bdd2292c53fe202f1c66e219a2453
|
/src/zhon/zhuyin.py
|
540bebeedf1794d17c19d1c2eb566c78a8c4c24e
|
[
"MIT"
] |
permissive
|
tsroten/zhon
|
6412ffaef6f9cb7205c2f27b6e68b8838d54038b
|
c22db568f86b4519db94de8e7dc86c007cba18d1
|
refs/heads/main
| 2023-08-05T11:34:10.434476
| 2023-07-23T10:25:32
| 2023-07-23T10:25:32
| 9,836,043
| 316
| 45
|
MIT
| 2023-06-27T10:41:50
| 2013-05-03T13:03:40
|
Python
|
UTF-8
|
Python
| false
| false
| 1,887
|
py
|
zhuyin.py
|
# -*- coding: utf-8 -*-
"""Constants for working with Zhuyin (Bopomofo)."""
#: A string containing all Zhuyin characters.
characters = "ㄅㄆㄇㄈㄉㄊㄋㄌㄍㄎㄏㄐㄑㄒㄓㄔㄕㄖㄗㄘㄙ" "ㄚㄛㄝㄜㄞㄟㄠㄡㄢㄣㄤㄥㄦㄧㄨㄩㄭ"
#: A string containing all Zhuyin tone marks.
marks = (
"\u02C7" # Caron
"\u02CA" # Modifier letter accute accent
"\u02CB" # Modifier letter grave accent
"\u02D9" # Dot above
)
#: A regular expression pattern for a Zhuyin syllable.
syl = syllable = (
"(?:"
"[ㄇㄉㄊㄋㄌㄍㄎㄏㄓㄔㄕㄖㄗㄘㄙ]?ㄜ|"
"[ㄅㄆㄇㄉㄊㄋㄌㄍㄎㄏㄓㄔㄕㄗㄘㄙㄧ]?ㄞ|"
"[ㄅㄆㄇㄈㄉㄋㄌㄍㄏㄓㄕㄗ]?ㄟ|"
"[ㄅㄆㄇㄈㄋㄍㄎㄏㄓㄔㄕㄖㄗㄘㄙ]?ㄣ|"
"[ㄉㄌㄐㄑㄒ]?ㄧㄚ|"
"[ㄅㄆㄇㄈㄉㄊㄋㄌㄍㄎㄏㄓㄔㄕㄗㄘㄙ]?ㄚ|"
"[ㄅㄆㄇㄉㄊㄋㄌㄍㄎㄏㄓㄔㄕㄖㄗㄘㄙ]?ㄠ|"
"[ㄆㄇㄈㄉㄊㄋㄌㄍㄎㄏㄓㄔㄕㄖㄗㄘㄙ]?ㄡ|"
"[ㄅㄆㄇㄈㄉㄊㄋㄌㄍㄎㄏㄓㄔㄕㄖㄗㄘㄙ]?ㄢ|"
"[ㄇㄉㄋㄌㄐㄑㄒ]?ㄧㄡ|"
"[ㄅㄆㄇㄋㄌㄐㄑㄒ]?ㄧㄣ|"
"[ㄐㄑㄒ]?ㄩ[ㄢㄥ]|"
"[ㄌㄐㄑㄒ]?ㄩㄣ|"
"[ㄋㄌㄐㄑㄒ]?(?:ㄩㄝ?|ㄧㄤ)|"
"[ㄅㄆㄇㄈㄌㄧ]?ㄛ|"
"[ㄅㄆㄇㄉㄊㄋㄌㄐㄑㄒ]?ㄧ[ㄝㄠㄢㄥ]?|"
"[ㄅㄆㄇㄈㄉㄊㄋㄌㄍㄎㄏㄓㄔㄕㄖㄗㄘㄙ]?[ㄤㄥ]|"
"[ㄍㄎㄏㄓㄔㄕ]?ㄨ[ㄚㄞㄤ]|"
"[ㄉㄊㄋㄌㄍㄎㄏㄓㄔㄕㄖㄗㄘㄙ]?ㄨㄛ|"
"[ㄉㄊㄍㄎㄏㄓㄔㄕㄖㄗㄘㄙ]?ㄨㄟ|"
"[ㄉㄊㄋㄌㄍㄎㄏㄓㄔㄕㄖㄗㄘㄙ]?ㄨㄢ|"
"[ㄉㄊㄌㄍㄎㄏㄓㄔㄕㄖㄗㄘㄙ]?ㄨㄣ|"
"[ㄉㄊㄋㄌㄍㄎㄏㄓㄔㄖㄗㄘㄙ]?ㄨㄥ|"
"[ㄅㄆㄇㄈㄉㄊㄋㄌㄍㄎㄏㄓㄔㄕㄖㄗㄘㄙ]?ㄨ|"
"[ㄓㄔㄕㄖㄗㄘㄙㄝㄦㄧ]"
")[{marks}]?"
).format(marks=marks)
|
72729c4b76f18e8f8be5b6606f2021c120b7709c
|
2713a0416f4be097f9d4f82094f75bed482b8db2
|
/models/mtcnn/mtcnn_pytorch/src/first_stage.py
|
300cf851ee0e410b700d6b65bce49fa3f3d004a0
|
[
"MIT",
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
eladrich/pixel2style2pixel
|
602bf7c1d533309a6e626b1a92435c295ca9e307
|
5cfff385beb7b95fbce775662b48fcc80081928d
|
refs/heads/master
| 2022-10-09T19:55:07.136339
| 2022-10-01T11:23:39
| 2022-10-01T11:23:39
| 300,247,371
| 3,083
| 581
|
MIT
| 2022-10-01T11:23:39
| 2020-10-01T11:01:03
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 3,284
|
py
|
first_stage.py
|
import torch
from torch.autograd import Variable
import math
from PIL import Image
import numpy as np
from .box_utils import nms, _preprocess
# device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
device = 'cuda:0'
def run_first_stage(image, net, scale, threshold):
"""Run P-Net, generate bounding boxes, and do NMS.
Arguments:
image: an instance of PIL.Image.
net: an instance of pytorch's nn.Module, P-Net.
scale: a float number,
scale width and height of the image by this number.
threshold: a float number,
threshold on the probability of a face when generating
bounding boxes from predictions of the net.
Returns:
a float numpy array of shape [n_boxes, 9],
bounding boxes with scores and offsets (4 + 1 + 4).
"""
# scale the image and convert it to a float array
width, height = image.size
sw, sh = math.ceil(width * scale), math.ceil(height * scale)
img = image.resize((sw, sh), Image.BILINEAR)
img = np.asarray(img, 'float32')
img = torch.FloatTensor(_preprocess(img)).to(device)
with torch.no_grad():
output = net(img)
probs = output[1].cpu().data.numpy()[0, 1, :, :]
offsets = output[0].cpu().data.numpy()
# probs: probability of a face at each sliding window
# offsets: transformations to true bounding boxes
boxes = _generate_bboxes(probs, offsets, scale, threshold)
if len(boxes) == 0:
return None
keep = nms(boxes[:, 0:5], overlap_threshold=0.5)
return boxes[keep]
def _generate_bboxes(probs, offsets, scale, threshold):
"""Generate bounding boxes at places
where there is probably a face.
Arguments:
probs: a float numpy array of shape [n, m].
offsets: a float numpy array of shape [1, 4, n, m].
scale: a float number,
width and height of the image were scaled by this number.
threshold: a float number.
Returns:
a float numpy array of shape [n_boxes, 9]
"""
# applying P-Net is equivalent, in some sense, to
# moving 12x12 window with stride 2
stride = 2
cell_size = 12
# indices of boxes where there is probably a face
inds = np.where(probs > threshold)
if inds[0].size == 0:
return np.array([])
# transformations of bounding boxes
tx1, ty1, tx2, ty2 = [offsets[0, i, inds[0], inds[1]] for i in range(4)]
# they are defined as:
# w = x2 - x1 + 1
# h = y2 - y1 + 1
# x1_true = x1 + tx1*w
# x2_true = x2 + tx2*w
# y1_true = y1 + ty1*h
# y2_true = y2 + ty2*h
offsets = np.array([tx1, ty1, tx2, ty2])
score = probs[inds[0], inds[1]]
# P-Net is applied to scaled images
# so we need to rescale bounding boxes back
bounding_boxes = np.vstack([
np.round((stride * inds[1] + 1.0) / scale),
np.round((stride * inds[0] + 1.0) / scale),
np.round((stride * inds[1] + 1.0 + cell_size) / scale),
np.round((stride * inds[0] + 1.0 + cell_size) / scale),
score, offsets
])
# why one is added?
return bounding_boxes.T
|
f554efdbb2cfc3b31207f9a1d3c0860c1fcd5468
|
856e9a8afcb81ae66dd998b0d2cc3556c9f315ea
|
/dexy/parser.py
|
7ad3c08a043b8d910b07ad3fbb74d05cb1398855
|
[
"MIT"
] |
permissive
|
dexy/dexy
|
1d5c999830de4663c05a09f4cd00b1628dfc8d46
|
323c1806e51f75435e11d2265703e68f46c8aef3
|
refs/heads/develop
| 2023-06-10T08:02:45.076551
| 2021-02-28T22:40:41
| 2021-02-28T22:40:41
| 1,506,989
| 141
| 34
|
MIT
| 2020-06-15T17:44:50
| 2011-03-21T14:48:28
|
Python
|
UTF-8
|
Python
| false
| false
| 6,457
|
py
|
parser.py
|
import copy
import dexy.doc
import dexy.exceptions
import dexy.plugin
import posixpath
class AbstractSyntaxTree():
def __init__(self, wrapper):
self.wrapper = wrapper
self.root_nodes_ordered = False
self.lookup_table = {}
self.tree = []
# Lists of (directory, settings) tuples
self.default_args_for_directory = []
self.environment_for_directory = []
def all_inputs(self):
"""
Returns a set of all node keys identified as inputs of some other
element.
"""
all_inputs = set()
for kwargs in list(self.lookup_table.values()):
inputs = kwargs['inputs']
all_inputs.update(inputs)
return all_inputs
def clean_tree(self):
"""
Removes tasks which are already represented as inputs (tree should
only contain root nodes).
"""
treecopy = copy.deepcopy(self.tree)
all_inputs = self.all_inputs()
for task in treecopy:
if task in all_inputs:
self.tree.remove(task)
def add_node(self, node_key, **kwargs):
"""
Adds the node and its kwargs to the tree and lookup table
"""
node_key = self.wrapper.standardize_key(node_key)
if not node_key in self.tree:
self.tree.append(node_key)
if not node_key in self.lookup_table:
self.lookup_table[node_key] = {}
self.lookup_table[node_key].update(kwargs)
if not 'inputs' in self.lookup_table[node_key]:
self.lookup_table[node_key]['inputs'] = []
self.clean_tree()
return node_key
def add_dependency(self, node_key, input_node_key):
"""
Adds input_node_key to list of inputs for node_key (both nodes are
also added to tree).
"""
node_key = self.add_node(node_key)
input_node_key = self.add_node(input_node_key)
if not node_key == input_node_key:
self.lookup_table[node_key]['inputs'].append(input_node_key)
self.clean_tree()
def args_for_node(self, node_key):
"""
Returns the dict of kw args for a node
"""
node_key = self.wrapper.standardize_key(node_key)
args = copy.deepcopy(self.lookup_table[node_key])
del args['inputs']
return args
def inputs_for_node(self, node_key):
"""
Returns the list of inputs for a node
"""
node_key = self.wrapper.standardize_key(node_key)
return self.lookup_table[node_key]['inputs']
def calculate_default_args_for_directory(self, path):
dir_path = posixpath.dirname(posixpath.abspath(path))
default_kwargs = {}
for d, args in self.default_args_for_directory:
if posixpath.abspath(d) in dir_path:
default_kwargs.update(args)
return default_kwargs
def calculate_environment_for_directory(self, path):
dir_path = posixpath.dirname(posixpath.abspath(path))
env = {}
for d, args in self.environment_for_directory:
if posixpath.abspath(d) in dir_path:
env.update(args)
return env
def walk(self):
"""
Creates Node objects for all elements in tree. Returns a list of root
nodes and a dict of all nodes referenced by qualified keys.
"""
if self.wrapper.nodes:
self.log_warn("nodes are not empty: %s" % ", ".join(self.wrapper.nodes))
if self.wrapper.roots:
self.log_warn("roots are not empty: %s" % ", ".join(self.wrapper.roots))
def create_dexy_node(key, *inputs, **kwargs):
"""
Stores already created nodes in nodes dict, if called more than
once for the same key, returns already created node.
"""
if not key in self.wrapper.nodes:
alias, pattern = self.wrapper.qualify_key(key)
node_environment = self.calculate_environment_for_directory(pattern)
kwargs_with_defaults = self.calculate_default_args_for_directory(pattern)
kwargs_with_defaults.update(kwargs)
kwargs_with_defaults.update({'environment' : node_environment })
self.wrapper.log.debug("creating node %s" % alias)
node = dexy.node.Node.create_instance(
alias,
pattern,
self.wrapper,
inputs,
**kwargs_with_defaults)
if node.inputs:
self.wrapper.log.debug("inputs are %s" % ", ".join(i.key for i in node.inputs))
self.wrapper.nodes[key] = node
for child in node.children:
self.wrapper.nodes[child.key_with_class()] = child
return self.wrapper.nodes[key]
def parse_item(key):
inputs = self.inputs_for_node(key)
kwargs = self.args_for_node(key)
self.wrapper.log.debug("parsing item %s" % key)
self.wrapper.log.debug(" inputs: %s" % ", ".join("%r" % inpt for inpt in inputs))
self.wrapper.log.debug(" kwargs: %s" % ", ".join("%s: %r" % (k, v) for k, v in kwargs.items()))
if kwargs.get('inactive') or kwargs.get('disabled'):
return
matches_target = self.wrapper.target and key.startswith(self.wrapper.target)
if not kwargs.get('default', True) and not self.wrapper.full and not matches_target:
return
input_nodes = [parse_item(i) for i in inputs if i]
input_nodes = [i for i in input_nodes if i]
return create_dexy_node(key, *input_nodes, **kwargs)
for node_key in self.tree:
root_node = parse_item(node_key)
if root_node:
self.wrapper.roots.append(root_node)
class Parser(dexy.plugin.Plugin, metaclass=dexy.plugin.PluginMeta):
"""
Parse various types of config file.
"""
aliases = []
_settings = {}
def __init__(self, wrapper, ast):
self.wrapper = wrapper
self.ast = ast
def file_exists(self, directory, filename):
filepath = self.wrapper.join_dir(directory, filename)
return self.wrapper.file_available(filepath)
def parse(self, directory, input_text):
pass
|
9e5686bbcd116c2045d16d0c55f131bc315d11c3
|
786f9ae62cb62d2b8f193350d8beb695c47d2e98
|
/pyatv/protocols/mrp/protobuf/UpdateContentItemMessage_pb2.py
|
0a5e6205eaa1c96ddadb307a1d7a2b18757b4983
|
[
"MIT"
] |
permissive
|
postlund/pyatv
|
a4213957d4d9b557c16310450bfd444cc715e17c
|
05ca46d2a8bbc8e725ad63794d14b2d1fb9913fa
|
refs/heads/master
| 2023-09-01T20:11:48.374857
| 2023-08-29T04:23:22
| 2023-08-29T11:13:17
| 80,614,028
| 749
| 120
|
MIT
| 2023-09-14T04:54:01
| 2017-02-01T11:30:30
|
Python
|
UTF-8
|
Python
| false
| true
| 1,997
|
py
|
UpdateContentItemMessage_pb2.py
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: pyatv/protocols/mrp/protobuf/UpdateContentItemMessage.proto
"""Generated protocol buffer code."""
from google.protobuf.internal import builder as _builder
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from pyatv.protocols.mrp.protobuf import ProtocolMessage_pb2 as pyatv_dot_protocols_dot_mrp_dot_protobuf_dot_ProtocolMessage__pb2
from pyatv.protocols.mrp.protobuf import ContentItem_pb2 as pyatv_dot_protocols_dot_mrp_dot_protobuf_dot_ContentItem__pb2
from pyatv.protocols.mrp.protobuf import PlayerPath_pb2 as pyatv_dot_protocols_dot_mrp_dot_protobuf_dot_PlayerPath__pb2
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n;pyatv/protocols/mrp/protobuf/UpdateContentItemMessage.proto\x1a\x32pyatv/protocols/mrp/protobuf/ProtocolMessage.proto\x1a.pyatv/protocols/mrp/protobuf/ContentItem.proto\x1a-pyatv/protocols/mrp/protobuf/PlayerPath.proto\"_\n\x18UpdateContentItemMessage\x12\"\n\x0c\x63ontentItems\x18\x01 \x03(\x0b\x32\x0c.ContentItem\x12\x1f\n\nplayerPath\x18\x02 \x01(\x0b\x32\x0b.PlayerPath:M\n\x18updateContentItemMessage\x12\x10.ProtocolMessage\x18< \x01(\x0b\x32\x19.UpdateContentItemMessage')
_globals = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'pyatv.protocols.mrp.protobuf.UpdateContentItemMessage_pb2', _globals)
if _descriptor._USE_C_DESCRIPTORS == False:
pyatv_dot_protocols_dot_mrp_dot_protobuf_dot_ProtocolMessage__pb2.ProtocolMessage.RegisterExtension(updateContentItemMessage)
DESCRIPTOR._options = None
_globals['_UPDATECONTENTITEMMESSAGE']._serialized_start=210
_globals['_UPDATECONTENTITEMMESSAGE']._serialized_end=305
# @@protoc_insertion_point(module_scope)
|
de5b5256f85e530099fd89b23a9970bba99804fd
|
39241620c6271758ea97d92fa4cf9f45aa7d2510
|
/src/python/aim/_sdk/function.py
|
434f4306c00fc3482db6c156e85de0bf5ad87ec1
|
[
"Apache-2.0"
] |
permissive
|
aimhubio/aim
|
4618e5ebdaf69d216cbe61c6187b005d443839a9
|
34e5c2c29abe9b26699760074adcadfe8fd4cfe0
|
refs/heads/main
| 2023-09-01T04:23:22.276343
| 2023-09-01T03:47:13
| 2023-09-01T03:47:13
| 189,640,071
| 4,091
| 276
|
Apache-2.0
| 2023-09-14T15:27:03
| 2019-05-31T18:25:07
|
Python
|
UTF-8
|
Python
| false
| false
| 880
|
py
|
function.py
|
import inspect
from typing import Callable, Dict
class Function:
registry: Dict[str, 'Function'] = {}
def __init__(self, func: Callable, name: str):
self._validate_function(func)
self._func = func
self._name = f'{name}.{func.__name__}'
@staticmethod
def _validate_function(func: Callable):
signature = inspect.signature(func)
for parameter in signature.parameters.values():
if parameter.kind in (inspect.Parameter.POSITIONAL_ONLY, inspect.Parameter.VAR_POSITIONAL):
raise ValueError(f'Aim function {func.__name__} must accept keyword arguments.')
@property
def name(self) -> str:
return self._name
@property
def is_generator(self) -> bool:
return inspect.isgeneratorfunction(self._func)
def execute(self, **kwargs):
return self._func(**kwargs)
|
25880d6e515e9d11fbb9e233f27ef49042df2a49
|
b18330180bc2dddf483e2eb81160934dcd818ef8
|
/assemblyHub/prepareHubFiles.py
|
fd3e92de42dae2d8fc74c4dc85f860d1342d30bd
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
ComparativeGenomicsToolkit/hal
|
3e5cc48c09bd0083d8d2eb849401ac5ed0bf8b06
|
e9e70b6c4a46a82aacf36632e1761804595d7f66
|
refs/heads/master
| 2023-08-31T06:32:09.551768
| 2023-06-22T23:55:36
| 2023-06-22T23:55:36
| 3,778,244
| 110
| 42
|
NOASSERTION
| 2023-08-31T15:13:38
| 2012-03-20T17:58:31
|
C++
|
UTF-8
|
Python
| false
| false
| 7,416
|
py
|
prepareHubFiles.py
|
#!/usr/bin/env python3
#Copyright (C) 2013 by Ngan Nguyen
# Copyright (C) 2012-2019 by UCSC Computational Genomics Lab
#
#Released under the MIT license, see LICENSE.txt
"""
Make "hub.txt", "groups.txt", files that are required by AssemblyHub
Also prepare description.html files
"""
import os, sys
from sonLib.bioio import system
from optparse import OptionGroup
from hal.assemblyHub.assemblyHubCommon import getProperName
from Bio import Phylo
from hal.assemblyHub.treeCommon import isBinaryTree
def writeDescriptionFile(genome, outdir):
filename = os.path.join(outdir, "description.html")
f = open(filename, 'w')
f.write("%s\n" %genome)
f.close()
return
def writeTrackDb_composite_html(file, treeFile):
f = open(file, 'w')
#HACK:
#huburl = "http://hgwdev.cse.ucsc.edu/~nknguyen/ecoli/hub/TEST2"
huburl = "http://hgwdev.cse.ucsc.edu/~nknguyen/birds/birds2"
basename = os.path.basename(treeFile)
f.write("<img src=\"%s/%s\">\n" %(huburl, basename))
f.close()
def writeTrackDb_compositeStart(f, shortLabel, longLabel, bbdirs, bwdirs, genomes, properName, url, img):
#Composite track includes all annotations in BED & WIGGLE formats, their lifted-over tracks, and Snake tracks
f.write("track hubCentral\n")
f.write("compositeTrack on\n")
f.write("shortLabel %s\n" %shortLabel)
f.write("longLabel %s\n" %longLabel)
f.write("group comphub\n")
bedtracktypes = [os.path.basename(b.rstrip('/')) for b in bbdirs]
bedstr = " ".join(["%s=%s" %(item, item) for item in bedtracktypes])
wigtracktypes = [os.path.basename(b.rstrip('/')) for b in bwdirs]
wigstr = " ".join(["%s=%s" %(item, item) for item in wigtracktypes])
f.write("subGroup1 view Track_Type Snake=Alignments %s %s\n" %(bedstr, wigstr))
genomeStr = " ".join(["%s=%s" %(g, getProperName(g, properName)) for g in genomes])
f.write("subGroup2 orgs Organisms %s\n" %genomeStr)
f.write("dragAndDrop subTracks\n")
f.write("#allButtonPair on\n")
#f.write("sortOrder view=+ orgs=+\n")
f.write("dimensions dimensionX=view dimensionY=orgs\n")
f.write("noInherit on\n")
f.write("priority 0\n")
f.write("centerLabelsDense on\n")
f.write("visibility full\n")
f.write("html ../documentation/hubCentral\n")
if url and img:
imgurl = os.path.join(url, os.path.basename(img))
f.write("treeImage %s\n" %imgurl)
f.write("type bigBed 3\n")
f.write("\n")
def writeTrackDb_compositeSubTrack(f, name, visibility):
f.write("\ttrack hubCentral%s\n" %name)
f.write("\tshortLabel %s\n" %name)
f.write("\tview %s\n" %name)
f.write("\tvisibility %s\n" %visibility)
f.write("\tsubTrack hubCentral\n")
f.write("\n")
def writeGroupFile(outdir, hubLabel, annotations):
filename = os.path.join(outdir, "groups.txt")
f = open(filename, 'w')
f.write("name user\n")
f.write("label Custom\n")
f.write("priority 1\n")
f.write("defaultIsClosed 1\n")
f.write("\n")
f.write("name map\n")
f.write("label Mapping\n")
f.write("priority 2\n")
f.write("defaultIsClosed 0\n")
f.write("\n")
f.write("name comphub\n")
f.write("label %s\n" % hubLabel)
f.write("priority 3\n")
f.write("defaultIsClosed 0\n")
f.write("\n")
f.write("name snake\n")
f.write("label Alignment Snakes\n")
f.write("priority 3\n")
f.write("defaultIsClosed 0\n")
f.write("\n")
for annotation in annotations:
f.write("name annotation%s\n" %annotation)
f.write("label %s Annotations\n" % annotation.capitalize() )
f.write("priority 3\n")
f.write("defaultIsClosed 1\n")
f.write("\n")
f.write("name exp\n")
f.write("label Experimental\n")
f.write("priority 4\n")
f.write("defaultIsClosed 1\n")
f.write("\n")
f.close()
def writeHubFile(outdir, options):
hubfile = os.path.join(outdir, "hub.txt")
f = open(hubfile, "w")
f.write("hub %s\n" %options.hubLabel)
f.write("shortLabel %s\n" %options.shortLabel)
f.write("longLabel %s\n" %options.longLabel)
f.write("genomesFile genomes.txt\n")
f.write("email %s\n" %options.email)
f.close()
#=========== READ FILES ===========
def readList(file):
items = []
f = open(file, 'r')
for line in f:
items.append(line.strip())
f.close()
return items
def readRename(file):
name2new = {}
f = open(file, 'r')
for line in f:
line = line.strip()
if len(line) == 0 or line[0] == "#":
continue
items = line.split('\t')
if len(items) >=2:
name2new[items[0]] = items[1]
f.close()
return name2new
#=========== OPTIONS =============
def addHubOptions(parser):
group = parser.add_argument_group("HUB INFORMATION")
group.add_argument('--hub', dest='hubLabel', default='myHub', help='a single-word name of the directory containing the track hub files. Not displayed to hub users. ')
group.add_argument('--shortLabel', dest='shortLabel', default='my hub', help='the short name for the track hub. Suggested maximum length is 17 characters. Displayed as the hub name on the Track Hubs page and the track group name on the browser tracks page. ')
group.add_argument('--longLabel', dest='longLabel', default='my hub', help='a longer descriptive label for the track hub. Suggested maximum length is 80 characters. Displayed in the description field on the Track Hubs page. ')
group.add_argument('--email', dest='email', default='NoEmail', help='the contact to whom questions regarding the track hub should be directed. ')
group.add_argument('--genomes', dest='genomes', help='File specified list of genomes to make browser for. If specified, only create browsers for these genomes in the order provided by the list. Otherwise create browsers for all genomes in the input hal file')
group.add_argument('--rename', dest='rename', help='File that maps halfile genomeNames to names displayed on the browser. Format: <halGenomeName>\\t<genomeNameToDisplayOnBrowser>. ')
group.add_argument('--tree', dest='treeFile', help='Newick binary tree. The order of the tracks and the default track layout will be based on this tree if option "genomes" is not specified. If not specified, try to extract the newick tree from the input halfile.')
group.add_argument('--url', dest='url', help='Public url of the hub location')
group.add_argument('--twobitdir', dest='twobitdir', help='Optional. Directory containing the 2bit files of each genomes. Default: extract from the input hal file.')
group = parser.add_argument_group(group)
def checkHubOptions(parser, options):
if options.genomes:
options.genomes = readList(options.genomes)
options.properName = {}
if options.rename and os.path.exists(options.rename):
options.properName = readRename(options.rename)
options.treeFig = None
options.leaves = None
options.tree = None
if options.treeFile and not os.path.exists(options.treeFile):
parser.error("The tree file %s does not exist.\n" %options.tree)
elif options.treeFile:
tree = Phylo.read(options.treeFile, 'newick')
if isBinaryTree(tree):
options.tree = tree
else:
sys.stderr.write("Warnning: tree %s is not a binary tree. Will be ignored!" %options.treeFile)
|
702e6ca7169a1f0cbf92ef799e418e6d3fa1b416
|
d110546d747d7e3865ce5742d5fca09f404623c0
|
/salt/cache/etcd_cache.py
|
c1a111bd4f9c46bdeee97a7692fda40ec7b7d0b6
|
[
"Apache-2.0",
"MIT",
"BSD-2-Clause"
] |
permissive
|
saltstack/salt
|
354fc86a7be1f69514b3dd3b2edb9e6f66844c1d
|
1ef90cbdc7203f97775edb7666db86a41eb9fc15
|
refs/heads/master
| 2023-07-19T20:56:20.210556
| 2023-06-29T23:12:28
| 2023-07-19T11:47:47
| 1,390,248
| 11,026
| 6,296
|
Apache-2.0
| 2023-09-14T20:45:37
| 2011-02-20T20:16:56
|
Python
|
UTF-8
|
Python
| false
| false
| 7,747
|
py
|
etcd_cache.py
|
"""
Minion data cache plugin for Etcd key/value data store.
.. versionadded:: 2018.3.0
.. versionchanged:: 3005
It is up to the system administrator to set up and configure the Etcd
infrastructure. All is needed for this plugin is a working Etcd agent
with a read-write access to the key-value store.
The related documentation can be found in the `Etcd documentation`_.
To enable this cache plugin, the master will need the python client for
Etcd installed. This can be easily installed with pip:
.. code-block:: bash
pip install python-etcd
.. note::
While etcd API v3 has been implemented in other places within salt,
etcd_cache does not support it at this time due to fundamental differences in
how the versions are designed and v3 not being compatible with the cache API.
Optionally, depending on the Etcd agent configuration, the following values
could be set in the master config. These are the defaults:
.. code-block:: yaml
etcd.host: 127.0.0.1
etcd.port: 2379
etcd.protocol: http
etcd.allow_reconnect: True
etcd.allow_redirect: False
etcd.srv_domain: None
etcd.read_timeout: 60
etcd.username: None
etcd.password: None
etcd.cert: None
etcd.ca_cert: None
Related docs could be found in the `python-etcd documentation`_.
To use the etcd as a minion data cache backend, set the master ``cache`` config
value to ``etcd``:
.. code-block:: yaml
cache: etcd
In Phosphorus, ls/list was changed to always return the final name in the path.
This should only make a difference if you were directly using ``ls`` on paths
that were more or less nested than, for example: ``1/2/3/4``.
.. _`Etcd documentation`: https://github.com/coreos/etcd
.. _`python-etcd documentation`: http://python-etcd.readthedocs.io/en/latest/
"""
import base64
import logging
import time
import salt.payload
from salt.exceptions import SaltCacheError
try:
import etcd
HAS_ETCD = True
except ImportError:
HAS_ETCD = False
_DEFAULT_PATH_PREFIX = "/salt_cache"
if HAS_ETCD:
# The client logging tries to decode('ascii') binary data
# and is too verbose
etcd._log.setLevel(logging.INFO) # pylint: disable=W0212
log = logging.getLogger(__name__)
client = None
path_prefix = None
_tstamp_suffix = ".tstamp"
# Module properties
__virtualname__ = "etcd"
__func_alias__ = {"ls": "list"}
def __virtual__():
"""
Confirm that python-etcd package is installed.
"""
if not HAS_ETCD:
return (
False,
"Please install python-etcd package to use etcd data cache driver",
)
return __virtualname__
def _init_client():
"""Setup client and init datastore."""
global client, path_prefix, _tstamp_suffix
if client is not None:
return
etcd_kwargs = {
"host": __opts__.get("etcd.host", "127.0.0.1"),
"port": __opts__.get("etcd.port", 2379),
"protocol": __opts__.get("etcd.protocol", "http"),
"allow_reconnect": __opts__.get("etcd.allow_reconnect", True),
"allow_redirect": __opts__.get("etcd.allow_redirect", False),
"srv_domain": __opts__.get("etcd.srv_domain", None),
"read_timeout": __opts__.get("etcd.read_timeout", 60),
"username": __opts__.get("etcd.username", None),
"password": __opts__.get("etcd.password", None),
"cert": __opts__.get("etcd.cert", None),
"ca_cert": __opts__.get("etcd.ca_cert", None),
}
_tstamp_suffix = __opts__.get("etcd.timestamp_suffix", _tstamp_suffix)
path_prefix = __opts__.get("etcd.path_prefix", _DEFAULT_PATH_PREFIX)
if path_prefix != "":
path_prefix = "/{}".format(path_prefix.strip("/"))
log.info("etcd: Setting up client with params: %r", etcd_kwargs)
client = etcd.Client(**etcd_kwargs)
try:
client.read(path_prefix)
except etcd.EtcdKeyNotFound:
log.info("etcd: Creating dir %r", path_prefix)
client.write(path_prefix, None, dir=True)
def store(bank, key, data):
"""
Store a key value.
"""
_init_client()
etcd_key = "{}/{}/{}".format(path_prefix, bank, key)
etcd_tstamp_key = "{}/{}/{}".format(path_prefix, bank, key + _tstamp_suffix)
try:
value = salt.payload.dumps(data)
client.write(etcd_key, base64.b64encode(value))
client.write(etcd_tstamp_key, int(time.time()))
except Exception as exc: # pylint: disable=broad-except
raise SaltCacheError(
"There was an error writing the key, {}: {}".format(etcd_key, exc)
)
def fetch(bank, key):
"""
Fetch a key value.
"""
_init_client()
etcd_key = "{}/{}/{}".format(path_prefix, bank, key)
try:
value = client.read(etcd_key).value
return salt.payload.loads(base64.b64decode(value))
except etcd.EtcdKeyNotFound:
return {}
except Exception as exc: # pylint: disable=broad-except
raise SaltCacheError(
"There was an error reading the key, {}: {}".format(etcd_key, exc)
)
def flush(bank, key=None):
"""
Remove the key from the cache bank with all the key content.
"""
_init_client()
if key is None:
etcd_key = "{}/{}".format(path_prefix, bank)
tstamp_key = None
else:
etcd_key = "{}/{}/{}".format(path_prefix, bank, key)
tstamp_key = "{}/{}/{}".format(path_prefix, bank, key + _tstamp_suffix)
try:
client.read(etcd_key)
except etcd.EtcdKeyNotFound:
return # nothing to flush
try:
if tstamp_key:
client.delete(tstamp_key)
client.delete(etcd_key, recursive=True)
except Exception as exc: # pylint: disable=broad-except
raise SaltCacheError(
"There was an error removing the key, {}: {}".format(etcd_key, exc)
)
def _walk(r):
"""
Recursively walk dirs. Return flattened list of keys.
r: etcd.EtcdResult
"""
if not r.dir:
if r.key.endswith(_tstamp_suffix):
return []
else:
return [r.key.rsplit("/", 1)[-1]]
keys = []
for c in client.read(r.key).children:
keys.extend(_walk(c))
return keys
def ls(bank):
"""
Return an iterable object containing all entries stored in the specified
bank.
"""
_init_client()
path = "{}/{}".format(path_prefix, bank)
try:
return _walk(client.read(path))
except etcd.EtcdKeyNotFound:
return []
except Exception as exc: # pylint: disable=broad-except
raise SaltCacheError(
'There was an error getting the key "{}": {}'.format(bank, exc)
) from exc
def contains(bank, key):
"""
Checks if the specified bank contains the specified key.
"""
_init_client()
etcd_key = "{}/{}/{}".format(path_prefix, bank, key or "")
try:
r = client.read(etcd_key)
# return True for keys, not dirs, unless key is None
return r.dir if key is None else r.dir is False
except etcd.EtcdKeyNotFound:
return False
except Exception as exc: # pylint: disable=broad-except
raise SaltCacheError(
"There was an error getting the key, {}: {}".format(etcd_key, exc)
)
def updated(bank, key):
"""
Return Unix Epoch based timestamp of when the bank/key was updated.
"""
_init_client()
tstamp_key = "{}/{}/{}".format(path_prefix, bank, key + _tstamp_suffix)
try:
value = client.read(tstamp_key).value
return int(value)
except etcd.EtcdKeyNotFound:
return None
except Exception as exc: # pylint: disable=broad-except
raise SaltCacheError(
"There was an error reading the key, {}: {}".format(tstamp_key, exc)
)
|
a9eaf6537a8f9a69451640048cca7c3c9eefc2bd
|
ac2f43c8e0d9649a7f063c59b3dffdfed9fd7ed7
|
/common/recipes-rest/rest-api/files/node_identify.py
|
876dc603e3c075361929a0f712c4c2fb0ef0862e
|
[] |
no_license
|
facebook/openbmc
|
bef10604ced226288600f55248b7f1be9945aea4
|
32777c66a8410d767eae15baabf71c61a0bef13c
|
refs/heads/helium
| 2023-08-17T03:13:54.729494
| 2023-08-16T23:24:18
| 2023-08-16T23:24:18
| 31,917,712
| 684
| 331
| null | 2023-07-25T21:19:08
| 2015-03-09T19:18:35
|
C
|
UTF-8
|
Python
| false
| false
| 1,875
|
py
|
node_identify.py
|
#!/usr/bin/env python
from typing import Any, Dict, Optional
from common_utils import async_exec
from kv import FPERSIST, kv_get
from node import node
from rest_pal_legacy import pal_get_platform_name
identify_name = {"FBTTN": "identify_slot1", "Grand Canyon": "system_identify_server"}
class identifyNode(node):
def __init__(self, name, info=None, actions=None):
self.name = name
if info == None:
self.info = {}
else:
self.info = info
if actions == None:
self.actions = []
else:
self.actions = actions
async def getInformation(self, param: Optional[Dict[Any, Any]] = None):
# Get Platform Name
plat_name = pal_get_platform_name()
if plat_name in identify_name:
identify_status = kv_get(identify_name[plat_name], FPERSIST)
else:
identify_status = kv_get("identify_slot1", FPERSIST)
info = {"Status of identify LED": identify_status}
return info
async def doAction(self, data, param: Optional[Dict[Any, Any]] = None):
if data["action"] == "on":
cmd = "/usr/bin/fpc-util --identify on"
_, stdout, _ = await async_exec(cmd, shell=True)
if stdout.startswith("Usage"):
res = "failure"
else:
res = "success"
elif data["action"] == "off":
cmd = "/usr/bin/fpc-util --identify off"
_, stdout, _ = await async_exec(cmd, shell=True)
if stdout.startswith("Usage"):
res = "failure"
else:
res = "success"
else:
res = "not support this action"
result = {"result": res}
return result
def get_node_identify(name):
actions = ["on", "off"]
return identifyNode(name=name, actions=actions)
|
2496550ba488f95ed7588f147830da3d1870eca8
|
095540173f0af89217be7a4ae4b3177c4e32af6f
|
/tests/perf/simple.py
|
b24733930268dd8fe5c708dbefb54b4ada165520
|
[
"BSD-3-Clause"
] |
permissive
|
riga/tfdeploy
|
c2cf534cff82aac716c76146a3d10072a619386d
|
22aea652fe12f081be43414e0f1f76c7d9aaf53c
|
refs/heads/master
| 2022-06-24T05:55:08.391340
| 2021-01-08T09:52:49
| 2021-01-08T09:52:49
| 53,326,300
| 394
| 50
| null | 2017-05-15T09:16:32
| 2016-03-07T13:08:21
|
Python
|
UTF-8
|
Python
| false
| false
| 749
|
py
|
simple.py
|
# -*- coding: utf-8 -*-
import tensorflow as tf
import tfdeploy as td
import numpy as np
# setup tf graph
sess = tf.Session()
x = tf.placeholder("float", shape=[None, 784], name="input")
W = tf.Variable(tf.truncated_normal([784, 100], stddev=0.05))
b = tf.Variable(tf.zeros([100]))
y = tf.nn.softmax(tf.matmul(x, W) + b, name="output")
if td._tf_version[:3] < (0, 12, 0) and 0:
sess.run(tf.initialize_all_variables())
else:
sess.run(tf.global_variables_initializer())
# setup td model
model = td.Model()
model.add(y, sess)
inp, outp = model.get("input", "output")
# testing code
batch = np.random.rand(10000, 784)
def test_tf():
return y.eval(session=sess, feed_dict={x: batch})
def test_td():
return outp.eval({inp: batch})
|
6602d1c18f87b47b69954dd0c2c964064498fdf0
|
ce32e0e1b9568c710a3168abc3c638d6f9f6c31b
|
/vnpy/app/spread_trading/ui/__init__.py
|
c7639754be682a65f99cbddc36f4ecf88dba281f
|
[
"MIT"
] |
permissive
|
msincenselee/vnpy
|
55ae76ca32cae47369a66bd2d6589c13d7a0bdd4
|
7f4fd3cd202712b083ed7dc2f346ba4bb1bda6d7
|
refs/heads/vnpy2
| 2022-05-19T10:06:55.504408
| 2022-03-19T15:26:01
| 2022-03-19T15:26:01
| 38,525,806
| 359
| 158
|
MIT
| 2020-09-09T00:09:12
| 2015-07-04T07:27:46
|
C++
|
UTF-8
|
Python
| false
| false
| 34
|
py
|
__init__.py
|
from .widget import SpreadManager
|
4cea26510d2cb7d13d0eb9679887add032be6a12
|
5c00bd92979f6e20038926ec45068fe8e6a61565
|
/mushroom_rl/utils/callbacks/collect_max_q.py
|
9ab33dd389ca4064d372bbf669d7e9eeef7238a0
|
[
"MIT"
] |
permissive
|
MushroomRL/mushroom-rl
|
2bf34ce38664a114ad37dc0468e1721e048359ab
|
2decae31459a3481130afe1263bc0a5ba7954a99
|
refs/heads/dev
| 2023-08-30T16:33:56.100589
| 2023-08-05T15:24:13
| 2023-08-05T15:24:13
| 83,158,675
| 477
| 128
|
MIT
| 2023-08-27T20:30:43
| 2017-02-25T19:59:57
|
Python
|
UTF-8
|
Python
| false
| false
| 712
|
py
|
collect_max_q.py
|
from mushroom_rl.utils.callbacks.callback import Callback
import numpy as np
class CollectMaxQ(Callback):
"""
This callback can be used to collect the maximum action value in a given
state at each call.
"""
def __init__(self, approximator, state):
"""
Constructor.
Args:
approximator ([Table, EnsembleTable]): the approximator to use;
state (np.ndarray): the state to consider.
"""
self._approximator = approximator
self._state = state
super().__init__()
def __call__(self, dataset):
q = self._approximator.predict(self._state)
max_q = np.max(q)
self._data_list.append(max_q)
|
5db2f589dfdf1d4731581ed82ded7a54df55494e
|
827b0c8c48407a1c4a4f89bacd4afcbe1be1dc83
|
/flaskbb/cli/utils.py
|
0b39bed9974eeffa4e874ec1130d64868f5f31bf
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
flaskbb/flaskbb
|
154ffa6476d094594c77f2fca28447b264b34779
|
bc999f1b9baf129dc06126940880a01ac94ba405
|
refs/heads/master
| 2023-08-22T19:47:34.877376
| 2023-03-13T06:51:20
| 2023-03-13T06:51:20
| 12,751,570
| 1,443
| 448
|
NOASSERTION
| 2023-07-26T22:36:21
| 2013-09-11T08:39:17
|
Python
|
UTF-8
|
Python
| false
| false
| 5,366
|
py
|
utils.py
|
# -*- coding: utf-8 -*-
"""
flaskbb.cli.utils
~~~~~~~~~~~~~~~~~
This module contains some utility helpers that are used across
commands.
:copyright: (c) 2016 by the FlaskBB Team.
:license: BSD, see LICENSE for more details.
"""
import sys
import os
import re
import click
from flask import current_app, __version__ as flask_version
from flask_themes2 import get_theme
from flaskbb import __version__
from flaskbb.utils.populate import create_user, update_user
_email_regex = r"[^@]+@[^@]+\.[^@]+"
class FlaskBBCLIError(click.ClickException):
"""An exception that signals a usage error including color support.
This aborts any further handling.
:param styles: The style kwargs which should be forwarded to click.secho.
"""
def __init__(self, message, **styles):
click.ClickException.__init__(self, message)
self.styles = styles
def show(self, file=None):
if file is None:
file = click._compat.get_text_stderr()
click.secho("error: %s" % self.format_message(), file=file,
**self.styles)
class EmailType(click.ParamType):
"""The choice type allows a value to be checked against a fixed set of
supported values. All of these values have to be strings.
See :ref:`choice-opts` for an example.
"""
name = "email"
def convert(self, value, param, ctx):
# Exact match
if re.match(_email_regex, value):
return value
else:
self.fail(("invalid email: %s" % value), param, ctx)
def __repr__(self):
return "email"
def validate_plugin(plugin):
"""Checks if a plugin is installed.
TODO: Figure out how to use this in a callback. Doesn't work because
the appcontext can't be found and using with_appcontext doesn't
help either.
"""
# list_name holds all plugin names, also the disabled ones (they won't do
# anything as they are set as 'blocked' on pluggy)
if plugin not in current_app.pluggy.list_name():
raise FlaskBBCLIError("Plugin {} not found.".format(plugin), fg="red")
return True
def validate_theme(theme):
"""Checks if a theme is installed."""
try:
get_theme(theme)
except KeyError:
raise FlaskBBCLIError("Theme {} not found.".format(theme), fg="red")
def get_cookiecutter():
cookiecutter_available = False
try:
from cookiecutter.main import cookiecutter # noqa
cookiecutter_available = True
except ImportError:
pass
if not cookiecutter_available:
raise FlaskBBCLIError(
"Can't continue because cookiecutter is not installed. "
"You can install it with 'pip install cookiecutter'.", fg="red"
)
return cookiecutter
def get_version(ctx, param, value):
if not value or ctx.resilient_parsing:
return
message = ("FlaskBB %(version)s using Flask %(flask_version)s on "
"Python %(python_version)s")
click.echo(message % {
'version': __version__,
'flask_version': flask_version,
'python_version': sys.version.split("\n")[0]
}, color=ctx.color)
ctx.exit()
def prompt_save_user(username, email, password, group, only_update=False):
if not username:
username = click.prompt(
click.style("Username", fg="magenta"), type=str,
default=os.environ.get("USER", "")
)
if not email:
email = click.prompt(
click.style("Email address", fg="magenta"), type=EmailType()
)
if not password:
password = click.prompt(
click.style("Password", fg="magenta"), hide_input=True,
confirmation_prompt=True
)
if not group:
group = click.prompt(
click.style("Group", fg="magenta"),
type=click.Choice(["admin", "super_mod", "mod", "member"]),
default="admin"
)
if only_update:
return update_user(username, password, email, group)
return create_user(username, password, email, group)
def prompt_config_path(config_path):
"""Asks for a config path. If the path exists it will ask the user
for a new path until a he enters a path that doesn't exist.
:param config_path: The path to the configuration.
"""
click.secho("The path to save this configuration file.", fg="cyan")
while True:
if os.path.exists(config_path) and click.confirm(click.style(
"Config {cfg} exists. Do you want to overwrite it?"
.format(cfg=config_path), fg="magenta")
):
break
config_path = click.prompt(
click.style("Save to", fg="magenta"),
default=config_path)
if not os.path.exists(config_path):
break
return config_path
def write_config(config, config_template, config_path):
"""Writes a new config file based upon the config template.
:param config: A dict containing all the key/value pairs which should be
used for the new configuration file.
:param config_template: The config (jinja2-)template.
:param config_path: The place to write the new config file.
"""
with open(config_path, 'wb') as cfg_file:
cfg_file.write(
config_template.render(**config).encode("utf-8")
)
|
1155548d1434879ca961de573765099d9ad2e042
|
74ac893aefb80fcc297b1dc899cd2a8124bd3c75
|
/test/python/echo_text.py
|
04e69d086c8853c631f4ae9fda4f8e2e1159f944
|
[
"MIT"
] |
permissive
|
extrabacon/python-shell
|
cfa12695c96df507ffbac1bfffc95382be507116
|
304da70b57b586bafdeb55dfada02431efb0c49e
|
refs/heads/master
| 2023-08-16T05:59:36.950698
| 2023-02-11T05:43:26
| 2023-02-11T05:43:26
| 17,671,651
| 2,132
| 262
| null | 2023-08-30T02:38:32
| 2014-03-12T15:22:28
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 86
|
py
|
echo_text.py
|
import sys, json
# simple JSON echo script
for line in sys.stdin:
print(line[:-1])
|
db556945948e86181cb709dbd4aec7814a91eac3
|
c0eeaae1a689a349a86b1e0cf672f7b90fa32886
|
/solara/website/pages/api/use_exception.py
|
32dbeb4f959153d87e2e3a8e020aa2b80926220b
|
[
"MIT"
] |
permissive
|
widgetti/solara
|
a624b9e6408b080cb9845c46d9a3dd56da61a1b7
|
baa36623c3eb7db50672d8eb3d3cdab9220a50a6
|
refs/heads/master
| 2023-08-31T06:59:51.637969
| 2023-07-31T19:04:29
| 2023-07-31T19:04:29
| 467,834,772
| 959
| 68
|
MIT
| 2023-09-13T19:33:42
| 2022-03-09T08:12:01
|
Python
|
UTF-8
|
Python
| false
| false
| 898
|
py
|
use_exception.py
|
import solara
title = "use_exception"
set_fail = None
clear = None
@solara.component
def UnstableComponent(number: int):
if number == 3:
raise Exception("I do not like 3")
return solara.Text(f"You picked {number}")
@solara.component
def Page():
value, set_value = solara.use_state(1)
value_previous = solara.use_previous(value)
exception, clear_exception = solara.use_exception()
# print(exception)
with solara.VBox() as main:
if exception:
def reset():
set_value(value_previous)
clear_exception()
solara.Text("Exception: " + str(exception))
solara.Button(label="Go to previous state", on_click=reset)
else:
solara.IntSlider(value=value, min=0, max=10, on_value=set_value, label="Pick a number, except 3")
UnstableComponent(value)
return main
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.