id int64 0 300k | label stringlengths 1 74 ⌀ | text stringlengths 4k 8k |
|---|---|---|
7,500 | client predict | # Copyright 2019 The MLPerf Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""
Python demo showing how to use the MLPerf Inference LoadGen over the Network bindings.
This programs runs in the LON Node side.
It runs the demo in MLPerf server mode over the network.
It communicates over the network with a Network SUT node,
which is running the Network SUT demo based on a flask server, implemented in SUT_over_network.py
"""
import threading
import requests
import array
import time
from absl import app
from absl import flags
import mlperf_loadgen
FLAGS = flags.FLAGS
flags.DEFINE_list('sut_server', 'http://localhost:8000',
'Address of the server(s) under test.')
class QSL:
"""Demo QuerySampleLibrary with dummy features."""
def __init__(self, total_sample_count, performance_sample_count):
self.eval_features = {
i: f"what_is_my_dummy_feature_{i}?" for i in range(total_sample_count)}
self.qsl = mlperf_loadgen.ConstructQSL(
total_sample_count, performance_sample_count, self.load_samples_to_ram, self.unload_samples_from_ram)
def get_features(self, sample_id):
"""Returns the feature for a given sample id."""
return self.eval_features[sample_id]
def load_samples_to_ram(self, query_samples):
"""Loads the features for the given query samples into RAM."""
# Current implementation is not using this functionality.
del query_samples
return
def unload_samples_from_ram(self, query_samples):
"""Unloads the features for the given query samples from RAM."""
# Current implementation is not using this functionality.
del query_samples
return
def __del__(self):
mlperf_loadgen.DestroyQSL(self.qsl)
class QDL:
"""QDL acting as a proxy to the SUT.
This QDL communicates with the SUT via HTTP.
It uses two endpoints to communicate with the SUT:
- /predict/ : Send a query to the SUT and get a response.
- /getname/ : Get the name of the SUT. Send a getname to the SUT and get a response.
"""
def __init__(self, qsl: QSL, sut_server_addr: list):
"""
Constructor for the QDL.
Args:
qsl: The QSL to use.
sut_server_addr: A list of addresses of the SUT.
"""
self.qsl = qsl
# Construct QDL from the python binding
self.qdl = mlperf_loadgen.ConstructQDL(
self.issue_query, self.flush_queries, self.client_get_name)
self.sut_server_addr = sut_server_addr
self.num_nodes = len(sut_server_addr)
# For round robin between the SUTs:
self.next_sut_id = 0
self.lock = threading.Lock()
def issue_query(self, query_samples):
"""Process the query to send to the SUT"""
threading.Thread(target=self.process_query_async,
args=[query_samples]).start()
def flush_queries(self):
"""Flush the queries. Dummy implementation."""
pass
def process_query_async(self, query_samples):
"""
This function is called by the Loadgen in a separate thread.
It is responsible for
1. Creating a query for the SUT, by reading the features from the QSL.
2. Sending the query to the SUT.
3. Waiting for the response from the SUT.
4. Deserializing the response.
5. Calling mlperf_loadgen.QuerySamplesComplete(query_samples, response)
Args:
query_samples: A list of QuerySample objects.
"""
responses = []
for s in query_samples:
# Overall process:
# QDL builds a real-world query and sends to SUT --> SUT processes --> SUT sends back to QDL
# Read features from the QSL
features = self.qsl.get_features(s.index)
time.sleep(.001) # Ensure a maximal rate of queries to the SUT
# Send the query to SUT in round robin
# Wait for a response
sut_result = self.METHOD_NAME(features, s.index)
response_array = array.array('B', sut_result.encode('utf-8'))
bi = response_array.buffer_info()
responses.append(mlperf_loadgen.QuerySampleResponse(
s.id, bi[0], bi[1]))
mlperf_loadgen.QuerySamplesComplete(responses)
def get_sut_id_round_robin(self):
"""Get the SUT id in round robin."""
with self.lock:
res = self.next_sut_id
self.next_sut_id = (self.next_sut_id + 1) % self.num_nodes
return res
def METHOD_NAME(self, query, id):
"""Serialize the query, send it to the SUT in round robin, and return the deserialized response."""
url = '{}/predict/'.format(self.sut_server_addr[self.get_sut_id_round_robin()])
response = requests.post(url, json={'query': query, id: id})
return response.json()['result']
def client_get_name(self):
"""Get the name of the SUT from ALL the SUTS."""
if len(self.sut_server_addr) == 1:
return requests.post(f'{self.sut_server_addr[0]}/getname/').json()['name']
sut_names = [requests.post(f'{addr}/getname/').json()['name'] for addr in self.sut_server_addr]
return "Multi-node SUT: " + ', '.join(sut_names)
def __del__(self):
mlperf_loadgen.DestroyQDL(self.qdl)
def main(argv):
del argv
settings = mlperf_loadgen.TestSettings()
settings.scenario = mlperf_loadgen.TestScenario.Server
settings.mode = mlperf_loadgen.TestMode.PerformanceOnly
settings.server_target_qps = 100
settings.server_target_latency_ns = 100000000
settings.min_query_count = 100
settings.min_duration_ms = 10000
# QDL and QSL
qsl = QSL(1024, 128)
qdl = QDL(qsl, sut_server_addr=FLAGS.sut_server)
mlperf_loadgen.StartTest(qdl.qdl, qsl.qsl, settings)
if __name__ == "__main__":
app.run(main) |
7,501 | map task to id | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from fairseq.optim.amp_optimizer import AMPOptimizer
from fairseq.tasks import register_task
from fairseq.tasks.speech_to_text import SpeechToTextTask
from .data.speech_to_text_dataset_with_domain import SpeechToTextDatasetCreatorWithDomain
from .loss.attention_head_selection import HeadSelectionLoss
@register_task("speech_to_text_head_selection")
class SpeechToTextHeadSelectionTask(SpeechToTextTask):
@classmethod
def add_args(cls, parser):
SpeechToTextTask.add_args(parser)
parser.add_argument(
"--task-type",
type=str,
default="lang",
help="task type for head selection, lang or domain"
)
parser.add_argument(
"--kl-weight",
type=float,
default=0.0,
help="the weight of KL loss"
)
def __init__(self, args, tgt_dict):
super().__init__(args, tgt_dict)
self.task_type = args.task_type
assert self.task_type in ["lang", "domain"], "invalid task_type: {}, should be either lang or domain".format(self.task_type)
self.METHOD_NAME(args.train_subset)
self.encoder_head_prior = float(args.decoder_attention_heads) / args.total_decoder_attention_heads
self.decoder_head_prior = float(args.encoder_attention_heads) / args.total_encoder_attention_heads
self.kl_loss = HeadSelectionLoss(args)
def METHOD_NAME(self, train_subset):
src_lang_set, tgt_lang_set, domain_set = set(), set(), set()
for split in train_subset.split(","):
seq = split.split("_")
assert len(seq) == 4, "subset {} should be in the format of train_src_tgt_domain".format(split)
_, src_lang, tgt_lang, domain = seq
src_lang_set.add(src_lang)
tgt_lang_set.add(tgt_lang)
domain_set.add(domain)
src_langs = sorted(src_lang_set)
tgt_langs = sorted(tgt_lang_set)
domains = sorted(domain_set)
self.src_lang_map = {src_lang: i for (i, src_lang) in enumerate(src_langs)}
self.tgt_lang_map = {tgt_lang: i for (i, tgt_lang) in enumerate(tgt_langs)}
self.domain_map = {domain: i for (i, domain) in enumerate(domains)}
if self.task_type == "lang":
self.encoder_tasks = len(self.src_lang_map)
self.decoder_tasks = len(self.tgt_lang_map)
elif self.task_type == "domain":
self.encoder_tasks = len(self.domain_map)
self.decoder_tasks = len(self.domain_map)
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
is_train_split = split.startswith("train")
pre_tokenizer = self.build_tokenizer(self.args)
bpe_tokenizer = self.build_bpe(self.args)
self.datasets[split] = SpeechToTextDatasetCreatorWithDomain.from_tsv(
self.args.data,
self.data_cfg,
split,
self.tgt_dict,
pre_tokenizer,
bpe_tokenizer,
is_train_split=is_train_split,
epoch=epoch,
seed=self.args.seed,
src_lang_map=self.src_lang_map,
tgt_lang_map=self.tgt_lang_map,
domain_map=self.domain_map,
speaker_to_id=self.speaker_to_id
)
def build_model(self, args):
args.encoder_tasks = self.encoder_tasks
args.decoder_tasks = self.decoder_tasks
return super(SpeechToTextHeadSelectionTask, self).build_model(args)
def get_sample_sizes(self, sample, task_ids, num_tasks):
"""
task_ids: (bsz,)
get sample sizes for each task
"""
bsz = task_ids.size(0)
mat = torch.zeros((num_tasks, bsz), device=task_ids.device)
mat[task_ids, torch.arange(bsz)] = 1.0
ntokens = torch.sum(sample['target'] != 1, dim=-1)
sample_sizes = torch.matmul(mat, ntokens.float())
return sample_sizes
def train_step(
self, sample, model, criterion, optimizer, update_num, ignore_grad=False
):
model.train()
model.set_num_updates(update_num)
# task ids
if self.task_type == "lang":
encoder_task_ids = sample["src_lang_ids"]
decoder_task_ids = sample["tgt_lang_ids"]
elif self.task_type == "domain":
encoder_task_ids = sample["domain_ids"]
decoder_task_ids = sample["domain_ids"]
model.encoder.set_task_ids(encoder_task_ids)
model.decoder.set_task_ids(decoder_task_ids)
with torch.autograd.profiler.record_function("forward"):
with torch.cuda.amp.autocast(enabled=(isinstance(optimizer, AMPOptimizer))):
loss, sample_size, logging_output = criterion(model, sample)
# KL loss
if self.args.encoder_attn_head_select:
sample_sizes = self.get_sample_sizes(sample, encoder_task_ids, self.encoder_tasks)
loss += self.kl_loss(
model.encoder.attn_head_selector.head_samples,
sample_sizes,
self.encoder_head_prior
)
if self.args.decoder_self_attn_head_select:
sample_sizes = self.get_sample_sizes(sample, decoder_task_ids, self.decoder_tasks)
loss += self.kl_loss(
model.decoder.self_attn_head_selector.head_samples,
sample_sizes,
self.decoder_head_prior
)
if self.args.dec_enc_attn_head_select:
sample_sizes = self.get_sample_sizes(sample, decoder_task_ids, self.decoder_tasks)
loss += self.kl_loss(
model.decoder.enc_attn_head_selector.head_sampes,
sample_sizes,
self.decoder_head_prior
)
if ignore_grad:
loss *= 0
with torch.autograd.profiler.record_function("backward"):
optimizer.backward(loss)
return loss, sample_size, logging_output
def valid_step(self, sample, model, criterion):
model.eval()
# task ids
if self.task_type == "lang":
encoder_task_ids = sample["src_lang_ids"]
decoder_task_ids = sample["tgt_lang_ids"]
elif self.task_type == "domain":
encoder_task_ids = sample["domain_ids"]
decoder_task_ids = sample["domain_ids"]
model.encoder.set_task_ids(encoder_task_ids)
model.decoder.set_task_ids(decoder_task_ids)
with torch.no_grad():
loss, sample_size, logging_output = criterion(model, sample)
return loss, sample_size, logging_output
def inference_step(
self, generator, models, sample, prefix_tokens=None, constraints=None
):
with torch.no_grad():
# task ids
if self.task_type == "lang":
encoder_task_ids = sample["src_lang_ids"][:1]
decoder_task_ids = sample["tgt_lang_ids"][:1]
elif self.task_type == "domain":
encoder_task_ids = sample["domain_ids"][:1]
decoder_task_ids = sample["domain_ids"][:1]
for model in models:
model.encoder.set_task_ids(encoder_task_ids)
model.decoder.set_task_ids(decoder_task_ids)
return generator.generate(
models, sample, prefix_tokens=prefix_tokens, constraints=constraints
) |
7,502 | test missing recommended on nested class | import pytest
from linkml_runtime.linkml_model import SchemaDefinition
from linkml_runtime.loaders import yaml_loader
from linkml.validator.plugins.recommended_slots_plugin import RecommendedSlotsPlugin
from linkml.validator.validation_context import ValidationContext
SCHEMA = """id: https://w3id.org/test/recommended_slots
name: recommended_slots
prefixes:
xsd: http://www.w3.org/2001/XMLSchema#
default_range: string
types:
string:
uri: xsd:string
base: str
description: A character string
exact_mappings:
- schema:Text
classes:
Inlined:
attributes:
id:
identifier: true
value1:
recommended: true
value2:
Object:
attributes:
rec:
recommended: true
nonrec:
nested:
range: Object
nested_inline:
range: Inlined
multivalued: true
inlined: true
nested_inline_list:
range: Inlined
multivalued: true
inlined_as_list: true
"""
@pytest.fixture(scope="module")
def validation_context():
schema = yaml_loader.load(SCHEMA, SchemaDefinition)
return ValidationContext(schema, "Object")
def test_valid_instance(validation_context):
"""Valid data should not yield any results"""
plugin = RecommendedSlotsPlugin()
instance = {"rec": "foo"}
result_iter = plugin.process(instance, validation_context)
with pytest.raises(StopIteration):
next(result_iter)
def test_missing_recommended_on_target_class(validation_context):
"""Data missing a recommended slot on the root object should yield a result"""
plugin = RecommendedSlotsPlugin()
instance = {"nonrec": "foo"}
result_iter = plugin.process(instance, validation_context)
assert next(result_iter).message == "Slot 'rec' is recommended on class 'Object' in /"
with pytest.raises(StopIteration):
next(result_iter)
def METHOD_NAME(validation_context):
"""Data missing a recommended slot on a nested object should yield a result"""
plugin = RecommendedSlotsPlugin()
instance = {"rec": "foo", "nested": {"nonrec": "foo"}}
result_iter = plugin.process(instance, validation_context)
assert next(result_iter).message == "Slot 'rec' is recommended on class 'Object' in /nested"
with pytest.raises(StopIteration):
next(result_iter)
def test_incorrect_type_in_slot(validation_context):
"""Data with an incorrect type in a slot should not yield results.
Type checking is not the responsibility of this plugin. But we want to make
sure that the implementation of this plugin doesn't implicitly assume it will
always get correct types.
"""
plugin = RecommendedSlotsPlugin()
instance = {"rec": "foo", "nested": "this is the wrong type"}
result_iter = plugin.process(instance, validation_context)
with pytest.raises(StopIteration):
next(result_iter)
def test_missing_recommended_inlined(validation_context):
"""Data missing a recommended slot on an object in an inlined collection should yield a result"""
plugin = RecommendedSlotsPlugin()
instance = {
"rec": "foo",
"nested_inline": {"a": {"value1": "1"}, "b": {"value1": "2"}, "c": {"value2": "3"}},
}
result_iter = plugin.process(instance, validation_context)
assert (
next(result_iter).message
== "Slot 'value1' is recommended on class 'Inlined' in /nested_inline/c"
)
with pytest.raises(StopIteration):
next(result_iter)
def test_missing_recommended_inlined_as_list(validation_context):
"""Data missing a recommended slot on an object in an inlined list should yield a result"""
plugin = RecommendedSlotsPlugin()
instance = {
"rec": "foo",
"nested_inline_list": [
{"id": "a", "value1": "1"},
{"id": "b", "value1": "2"},
{"id": "c", "value2": "3"},
],
}
result_iter = plugin.process(instance, validation_context)
assert (
next(result_iter).message
== "Slot 'value1' is recommended on class 'Inlined' in /nested_inline_list/2"
)
with pytest.raises(StopIteration):
next(result_iter) |
7,503 | set up module | # Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for _helpers."""
from absl.testing import absltest
from tink import aead
from tink import hybrid
from tink import mac
from tink.proto import tink_pb2
from tink_config import _helpers
from util import test_keys
def METHOD_NAME():
aead.register()
mac.register()
class HelpersTest(absltest.TestCase):
def test_get_all_key_types(self):
self.assertNotEmpty(_helpers.all_key_types())
def test_get_aead_key_types(self):
self.assertNotEmpty(_helpers.key_types_for_primitive(aead.Aead))
def test_key_type_from_type_url(self):
self.assertEqual(
_helpers.key_type_from_type_url(
'type.googleapis.com/google.crypto.tink.AesGcmKey'), 'AesGcmKey')
def test_key_type_from_type_url_wrong_prefix_throws(self):
with self.assertRaises(ValueError):
_helpers.key_type_from_type_url(
'type.googleapis.com/google.crypto.tinkAesGcmKey')
def test_key_type_from_type_url_wrong_key_type_throws(self):
with self.assertRaises(ValueError):
_helpers.key_type_from_type_url(
'type.googleapis.com/google.crypto.tink.InvalidKeyType29981')
def test_supported_languages_for_key_type(self):
self.assertCountEqual(
_helpers.supported_languages_for_key_type('AesGcmKey'),
['cc', 'java', 'go', 'python'])
def test_supported_languages_for_key_type_invalid(self):
with self.assertRaises(ValueError):
_helpers.supported_languages_for_key_type('InvalidKeyType21b9a1')
def test_supported_languages_for_primitive(self):
self.assertCountEqual(
_helpers.supported_languages_for_primitive(aead.Aead),
['cc', 'java', 'go', 'python'])
def test_supported_languages_for_primitive_invalid(self):
with self.assertRaises(KeyError):
_helpers.supported_languages_for_primitive('not a primitive, a string')
def test_all_primitives(self):
self.assertContainsSubset(
[aead.Aead, hybrid.HybridEncrypt, hybrid.HybridEncrypt],
_helpers.all_primitives())
def test_primitive_for_keytype(self):
self.assertEqual(_helpers.primitive_for_keytype('AesGcmKey'), aead.Aead)
def test_primitive_for_keytype_throws_invalid(self):
with self.assertRaises(ValueError):
_helpers.primitive_for_keytype('InvalidKeyType776611')
def test_is_asymmetric_public_key_primitive(self):
self.assertFalse(_helpers.is_asymmetric_public_key_primitive(aead.Aead))
self.assertFalse(
_helpers.is_asymmetric_public_key_primitive(hybrid.HybridDecrypt))
self.assertTrue(
_helpers.is_asymmetric_public_key_primitive(hybrid.HybridEncrypt))
def test_get_private_key_primitive(self):
self.assertEqual(
_helpers.get_private_key_primitive(hybrid.HybridEncrypt),
hybrid.HybridDecrypt)
def test_keyset_supported_true(self):
keyset = test_keys.some_keyset_for_primitive(aead.Aead)
self.assertTrue(_helpers.keyset_supported(keyset, aead.Aead, 'python'))
def test_keyset_supported_keyset_wrong_primitive_false(self):
keyset = test_keys.some_keyset_for_primitive(aead.Aead)
self.assertFalse(_helpers.keyset_supported(keyset, mac.Mac, 'python'))
def test_keyset_supported_keyset_wrong_language_false(self):
keyset = test_keys.some_keyset_for_primitive(aead.Aead)
self.assertFalse(
_helpers.keyset_supported(keyset, aead.Aead, 'non-existing-language'))
def test_keyset_two_keys_supported_true(self):
keyset = test_keys.some_keyset_for_primitive(aead.Aead)
parsed_keyset = tink_pb2.Keyset.FromString(keyset)
key0 = parsed_keyset.key[0]
parsed_keyset.key.append(key0)
parsed_keyset.key[1].key_id += 1
self.assertTrue(
_helpers.keyset_supported(parsed_keyset.SerializeToString(), aead.Aead,
'python'))
def test_keyset_two_keys_unsupported_false(self):
keyset0 = test_keys.some_keyset_for_primitive(aead.Aead)
keyset1 = test_keys.some_keyset_for_primitive(mac.Mac)
parsed_keyset0 = tink_pb2.Keyset.FromString(keyset0)
parsed_keyset1 = tink_pb2.Keyset.FromString(keyset1)
parsed_keyset0.key.append(parsed_keyset1.key[0])
self.assertFalse(
_helpers.keyset_supported(parsed_keyset0.SerializeToString(), aead.Aead,
'python'))
if __name__ == '__main__':
absltest.main() |
7,504 | load | import tkinter as tk
from tkinter import font, ttk
THEME_DARK: str = "black"
PADX: tuple[int, int] = (0, 5)
PADY: tuple[int, int] = (0, 5)
FRAME_PAD: int = 5
DIALOG_PAD: int = 5
class Styles:
tooltip: str = "Tooltip.TLabel"
tooltip_frame: str = "Tooltip.TFrame"
service_checkbutton: str = "Service.TCheckbutton"
picker_button: str = "Picker.TButton"
no_alert: str = "NAlert.TButton"
green_alert: str = "GAlert.TButton"
red_alert: str = "RAlert.TButton"
yellow_alert: str = "YAlert.TButton"
class Colors:
disabledfg: str = "DarkGrey"
frame: str = "#424242"
dark: str = "#222222"
darker: str = "#121212"
darkest: str = "black"
lighter: str = "#626262"
lightest: str = "#ffffff"
selectbg: str = "#4a6984"
selectfg: str = "#ffffff"
white: str = "white"
black: str = "black"
listboxbg: str = "#f2f1f0"
def METHOD_NAME(style: ttk.Style) -> None:
style.theme_create(
THEME_DARK,
"clam",
{
".": {
"configure": {
"background": Colors.frame,
"foreground": Colors.white,
"bordercolor": Colors.darkest,
"darkcolor": Colors.dark,
"lightcolor": Colors.lighter,
"troughcolor": Colors.darker,
"selectbackground": Colors.selectbg,
"selectforeground": Colors.selectfg,
"selectborderwidth": 0,
"font": "TkDefaultFont",
},
"map": {
"background": [
("disabled", Colors.frame),
("active", Colors.lighter),
],
"foreground": [("disabled", Colors.disabledfg)],
"selectbackground": [("!focus", Colors.darkest)],
"selectforeground": [("!focus", Colors.white)],
},
},
"TButton": {
"configure": {
"width": 8,
"padding": (5, 1),
"relief": tk.RAISED,
"anchor": tk.CENTER,
},
"map": {
"relief": [("pressed", tk.SUNKEN)],
"shiftrelief": [("pressed", 1)],
},
},
"TMenubutton": {"configure": {"padding": (5, 1), "relief": tk.RAISED}},
"TCheckbutton": {
"configure": {
"indicatorbackground": Colors.white,
"indicatormargin": (1, 1, 4, 1),
}
},
"TRadiobutton": {
"configure": {
"indicatorbackground": Colors.white,
"indicatormargin": (1, 1, 4, 1),
}
},
"TEntry": {
"configure": {
"fieldbackground": Colors.white,
"foreground": Colors.black,
"padding": (2, 0),
},
"map": {"fieldbackground": [("disabled", Colors.frame)]},
},
"TSpinbox": {
"configure": {
"fieldbackground": Colors.white,
"foreground": Colors.black,
"padding": (2, 0),
},
"map": {"fieldbackground": [("disabled", Colors.frame)]},
},
"TCombobox": {
"configure": {
"fieldbackground": Colors.white,
"foreground": Colors.black,
"padding": (2, 0),
}
},
"TLabelframe": {"configure": {"relief": tk.GROOVE}},
"TNotebook.Tab": {
"configure": {"padding": (6, 2, 6, 2)},
"map": {"background": [("selected", Colors.lighter)]},
},
"Treeview": {
"configure": {
"fieldbackground": Colors.white,
"background": Colors.white,
"foreground": Colors.black,
},
"map": {
"background": [("selected", Colors.selectbg)],
"foreground": [("selected", Colors.selectfg)],
},
},
Styles.tooltip: {
"configure": {"justify": tk.LEFT, "relief": tk.SOLID, "borderwidth": 0}
},
Styles.tooltip_frame: {"configure": {}},
Styles.service_checkbutton: {
"configure": {
"background": Colors.listboxbg,
"foreground": Colors.black,
}
},
},
)
def theme_change_menu(event: tk.Event) -> None:
if not isinstance(event.widget, tk.Menu):
return
style_menu(event.widget)
def style_menu(widget: tk.Widget) -> None:
style = ttk.Style()
bg = style.lookup(".", "background")
fg = style.lookup(".", "foreground")
abg = style.lookup(".", "lightcolor")
if not abg:
abg = bg
widget.config(
background=bg, foreground=fg, activebackground=abg, activeforeground=fg, bd=0
)
def style_listbox(widget: tk.Widget) -> None:
style = ttk.Style()
bg = style.lookup(".", "background")
fg = style.lookup(".", "foreground")
bc = style.lookup(".", "bordercolor")
if not bc:
bc = "black"
widget.config(
background=bg,
foreground=fg,
highlightthickness=1,
highlightcolor=bc,
highlightbackground=bc,
bd=0,
)
def _alert_style(style: ttk.Style, name: str, background: str):
style.configure(
name,
background=background,
padding=0,
relief=tk.RIDGE,
borderwidth=1,
font="TkDefaultFont",
foreground="black",
highlightbackground="white",
)
style.map(name, background=[("!active", background), ("active", "white")])
def theme_change(event: tk.Event) -> None:
style = ttk.Style()
style.configure(Styles.picker_button, font="TkSmallCaptionFont")
style.configure(
Styles.no_alert, padding=0, relief=tk.RIDGE, borderwidth=1, font="TkDefaultFont"
)
_alert_style(style, Styles.green_alert, "green")
_alert_style(style, Styles.yellow_alert, "yellow")
_alert_style(style, Styles.red_alert, "red")
def scale_fonts(fonts_size: dict[str, int], scale: float) -> None:
for name in font.names():
f = font.nametofont(name)
if name in fonts_size:
if name == "TkSmallCaptionFont":
f.config(size=int(fonts_size[name] * scale * 8 / 9))
else:
f.config(size=int(fonts_size[name] * scale)) |
7,505 | set temp device token | # -*- coding: utf-8 -*-
# This file is part of Tautulli.
#
# Tautulli is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Tautulli is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Tautulli. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from future.builtins import str
import requests
import threading
import plexpy
if plexpy.PYTHON2:
import database
import helpers
import logger
else:
from plexpy import database
from plexpy import helpers
from plexpy import logger
_ONESIGNAL_APP_ID = '3b4b666a-d557-4b92-acdf-e2c8c4b95357'
_ONESIGNAL_DISABLED = 'onesignal-disabled'
TEMP_DEVICE_TOKENS = {}
def METHOD_NAME(token=None, remove=False, add=False, success=False):
global TEMP_DEVICE_TOKENS
if token in TEMP_DEVICE_TOKENS and success:
if isinstance(TEMP_DEVICE_TOKENS[token], threading.Timer):
TEMP_DEVICE_TOKENS[token].cancel()
TEMP_DEVICE_TOKENS[token] = True
elif token in TEMP_DEVICE_TOKENS and remove:
if isinstance(TEMP_DEVICE_TOKENS[token], threading.Timer):
TEMP_DEVICE_TOKENS[token].cancel()
del TEMP_DEVICE_TOKENS[token]
elif token not in TEMP_DEVICE_TOKENS and add:
invalidate_time = 5 * 60 # 5 minutes
TEMP_DEVICE_TOKENS[token] = threading.Timer(invalidate_time, METHOD_NAME, args=[token, True])
TEMP_DEVICE_TOKENS[token].start()
logger._BLACKLIST_WORDS.add(token)
def get_temp_device_token(token=None):
return TEMP_DEVICE_TOKENS.get(token)
def get_mobile_devices(device_id=None, device_token=None):
where = where_id = where_token = ''
args = []
if device_id or device_token:
where = "WHERE "
if device_id:
where_id += "device_id = ?"
args.append(device_id)
if device_token:
where_token = "device_token = ?"
args.append(device_token)
where += " AND ".join([w for w in [where_id, where_token] if w])
db = database.MonitorDatabase()
result = db.select("SELECT * FROM mobile_devices %s" % where, args=args)
return result
def get_mobile_device_by_token(device_token=None):
if not device_token:
return None
return get_mobile_devices(device_token=device_token)
def add_mobile_device(device_id=None, device_name=None, device_token=None,
platform=None, version=None, friendly_name=None, onesignal_id=None):
db = database.MonitorDatabase()
keys = {'device_id': device_id}
values = {'device_name': device_name,
'device_token': device_token,
'platform': platform,
'version': version,
'onesignal_id': onesignal_id}
if friendly_name:
values['friendly_name'] = friendly_name
try:
result = db.upsert(table_name='mobile_devices', key_dict=keys, value_dict=values)
blacklist_logger()
except Exception as e:
logger.warn("Tautulli MobileApp :: Failed to register mobile device in the database: %s." % e)
return
if result == 'insert':
logger.info("Tautulli MobileApp :: Registered mobile device '%s' in the database." % device_name)
else:
logger.info("Tautulli MobileApp :: Re-registered mobile device '%s' in the database." % device_name)
set_last_seen(device_token=device_token)
threading.Thread(target=set_official, args=[device_id, onesignal_id]).start()
return True
def get_mobile_device_config(mobile_device_id=None):
if str(mobile_device_id).isdigit():
mobile_device_id = int(mobile_device_id)
else:
logger.error("Tautulli MobileApp :: Unable to retrieve mobile device config: invalid mobile_device_id %s." % mobile_device_id)
return None
db = database.MonitorDatabase()
result = db.select_single("SELECT * FROM mobile_devices WHERE id = ?",
args=[mobile_device_id])
if result['onesignal_id'] == _ONESIGNAL_DISABLED:
result['onesignal_id'] = ''
return result
def set_mobile_device_config(mobile_device_id=None, **kwargs):
if str(mobile_device_id).isdigit():
mobile_device_id = int(mobile_device_id)
else:
logger.error("Tautulli MobileApp :: Unable to set exisiting mobile device: invalid mobile_device_id %s." % mobile_device_id)
return False
keys = {'id': mobile_device_id}
values = {'friendly_name': kwargs.get('friendly_name', '')}
db = database.MonitorDatabase()
try:
db.upsert(table_name='mobile_devices', key_dict=keys, value_dict=values)
logger.info("Tautulli MobileApp :: Updated mobile device agent: mobile_device_id %s." % mobile_device_id)
blacklist_logger()
return True
except Exception as e:
logger.warn("Tautulli MobileApp :: Unable to update mobile device: %s." % e)
return False
def delete_mobile_device(mobile_device_id=None, device_id=None):
db = database.MonitorDatabase()
if mobile_device_id:
logger.debug("Tautulli MobileApp :: Deleting mobile_device_id %s from the database." % mobile_device_id)
result = db.action("DELETE FROM mobile_devices WHERE id = ?", args=[mobile_device_id])
return True
elif device_id:
logger.debug("Tautulli MobileApp :: Deleting device_id %s from the database." % device_id)
result = db.action("DELETE FROM mobile_devices WHERE device_id = ?", args=[device_id])
return True
else:
return False
def set_official(device_id, onesignal_id):
db = database.MonitorDatabase()
official = validate_onesignal_id(onesignal_id=onesignal_id)
platform = 'android' if official > 0 else None
try:
result = db.action("UPDATE mobile_devices "
"SET official = ?, platform = coalesce(platform, ?) "
"WHERE device_id = ?",
args=[official, platform, device_id])
except Exception as e:
logger.warn("Tautulli MobileApp :: Failed to set official flag for device: %s." % e)
return
def set_last_seen(device_token=None):
db = database.MonitorDatabase()
last_seen = helpers.timestamp()
try:
result = db.action("UPDATE mobile_devices SET last_seen = ? WHERE device_token = ?",
args=[last_seen, device_token])
except Exception as e:
logger.warn("Tautulli MobileApp :: Failed to set last_seen time for device: %s." % e)
return
def validate_onesignal_id(onesignal_id):
if onesignal_id is None:
return 0
elif onesignal_id == _ONESIGNAL_DISABLED:
return 2
headers = {'Content-Type': 'application/json'}
params = {'app_id': _ONESIGNAL_APP_ID}
logger.info("Tautulli MobileApp :: Validating OneSignal ID")
try:
r = requests.get('https://onesignal.com/api/v1/players/{}'.format(onesignal_id), headers=headers, params=params)
status_code = r.status_code
logger.info("Tautulli MobileApp :: OneSignal ID validation returned status code %s", status_code)
return int(status_code == 200)
except Exception as e:
logger.warn("Tautulli MobileApp :: Failed to validate OneSignal ID: %s." % e)
return -1
def revalidate_onesignal_ids():
for device in get_mobile_devices():
set_official(device['device_id'], device['onesignal_id'])
def blacklist_logger():
devices = get_mobile_devices()
for d in devices:
logger.blacklist_config(d) |
7,506 | test create inference graph with micro | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for data input for speech commands."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.examples.speech_commands import freeze
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
class FreezeTest(test.TestCase):
@test_util.run_deprecated_v1
def testCreateInferenceGraphWithMfcc(self):
with self.cached_session() as sess:
freeze.create_inference_graph(
wanted_words='a,b,c,d',
sample_rate=16000,
clip_duration_ms=1000.0,
clip_stride_ms=30.0,
window_size_ms=30.0,
window_stride_ms=10.0,
feature_bin_count=40,
model_architecture='conv',
preprocess='mfcc')
self.assertIsNotNone(sess.graph.get_tensor_by_name('wav_data:0'))
self.assertIsNotNone(
sess.graph.get_tensor_by_name('decoded_sample_data:0'))
self.assertIsNotNone(sess.graph.get_tensor_by_name('labels_softmax:0'))
ops = [node.op for node in sess.graph_def.node]
self.assertEqual(1, ops.count('Mfcc'))
@test_util.run_deprecated_v1
def testCreateInferenceGraphWithoutMfcc(self):
with self.cached_session() as sess:
freeze.create_inference_graph(
wanted_words='a,b,c,d',
sample_rate=16000,
clip_duration_ms=1000.0,
clip_stride_ms=30.0,
window_size_ms=30.0,
window_stride_ms=10.0,
feature_bin_count=40,
model_architecture='conv',
preprocess='average')
self.assertIsNotNone(sess.graph.get_tensor_by_name('wav_data:0'))
self.assertIsNotNone(
sess.graph.get_tensor_by_name('decoded_sample_data:0'))
self.assertIsNotNone(sess.graph.get_tensor_by_name('labels_softmax:0'))
ops = [node.op for node in sess.graph_def.node]
self.assertEqual(0, ops.count('Mfcc'))
@test_util.run_deprecated_v1
def METHOD_NAME(self):
with self.cached_session() as sess:
freeze.create_inference_graph(
wanted_words='a,b,c,d',
sample_rate=16000,
clip_duration_ms=1000.0,
clip_stride_ms=30.0,
window_size_ms=30.0,
window_stride_ms=10.0,
feature_bin_count=40,
model_architecture='conv',
preprocess='micro')
self.assertIsNotNone(sess.graph.get_tensor_by_name('wav_data:0'))
self.assertIsNotNone(
sess.graph.get_tensor_by_name('decoded_sample_data:0'))
self.assertIsNotNone(sess.graph.get_tensor_by_name('labels_softmax:0'))
@test_util.run_deprecated_v1
def testFeatureBinCount(self):
with self.cached_session() as sess:
freeze.create_inference_graph(
wanted_words='a,b,c,d',
sample_rate=16000,
clip_duration_ms=1000.0,
clip_stride_ms=30.0,
window_size_ms=30.0,
window_stride_ms=10.0,
feature_bin_count=80,
model_architecture='conv',
preprocess='average')
self.assertIsNotNone(sess.graph.get_tensor_by_name('wav_data:0'))
self.assertIsNotNone(
sess.graph.get_tensor_by_name('decoded_sample_data:0'))
self.assertIsNotNone(sess.graph.get_tensor_by_name('labels_softmax:0'))
ops = [node.op for node in sess.graph_def.node]
self.assertEqual(0, ops.count('Mfcc'))
if __name__ == '__main__':
test.main() |
7,507 | error | #-*- coding: utf-8 -*-
###########################################################################
## ##
## Copyrights Frederic Rodrigo 2013 ##
## ##
## This program is free software: you can redistribute it and/or modify ##
## it under the terms of the GNU General Public License as published by ##
## the Free Software Foundation, either version 3 of the License, or ##
## (at your option) any later version. ##
## ##
## This program is distributed in the hope that it will be useful, ##
## but WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ##
## GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with this program. If not, see <http://www.gnu.org/licenses/>. ##
## ##
###########################################################################
import bz2
from .IssuesFile_PolygonFilter import PolygonFilter
class IssuesFile:
def __init__(self, dst, version = None, polygon_id = None):
self.dst = dst
self.version = version
self.filter = None
if polygon_id:
try:
self.filter = PolygonFilter(polygon_id)
except Exception as e:
print(e)
pass
def begin(self):
if isinstance(self.dst, str):
if self.dst.endswith(".bz2"):
self.output = bz2.open(self.dst, "wt")
else:
self.output = open(self.dst, "w")
else:
self.output = self.dst
return self.output
def end(self):
if isinstance(self.dst, str):
self.output.close()
def analyser(self, timestamp, analyser_version, change=False):
pass
def analyser_end(self):
pass
def classs(self, id, item, level, tags, title, detail = None, fix = None, trap = None, example = None, source = None, resource = None):
pass
def METHOD_NAME(self, classs, subclass, text, ids, types, fix, geom, allow_override=False):
if self.filter and not self.filter.apply(classs, subclass, geom):
return
def delete(self, t, id):
pass
FixTable = {'~':'modify', '+':'create', '-':'delete'}
def fixdiff(self, fixes):
"""
Normalise fix in e
Normal form is [[{'+':{'k1':'v1', 'k2', 'v2'}, '-':{'k3':'v3'}, '~':{'k4','v4'}}, {...}]]
Array of alternative ways to fix -> Array of fix for objects part of error -> Dict for diff actions -> Dict for tags
"""
if not isinstance(fixes, list):
fixes = [[fixes]]
elif not isinstance(fixes[0], list):
# Default one level array is different way of fix
fixes = list(map(lambda x: [x], fixes))
return list(map(lambda fix:
list(map(lambda f:
None if f is None else (f if '~' in f or '-' in f or '+' in f else {'~': f}),
fix)),
fixes))
def filterfix(self, ids, types, fixes, geom):
ret_fixes = []
for fix in fixes:
i = 0
for f in fix:
if f is not None and i < len(types):
osm_obj = next((x for x in geom[types[i]] if x['id'] == ids[i]), None)
if osm_obj:
fix_tags = f['+'].keys() if '+' in f else []
if len(set(osm_obj['tag'].keys()).intersection(fix_tags)) > 0:
# Fix try to override existing tag in object, drop the fix
i = 0
break
i += 1
if i > 0:
ret_fixes.append(fix)
return ret_fixes
################################################################################
import unittest
class Test(unittest.TestCase):
def setUp(self):
self.a = IssuesFile(None)
def check(self, b, c):
import pprint
d = self.a.fixdiff(b)
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(d)
self.assertEqual(c, d, "fixdiff Excepted %s to %s but get %s" % (b, c, d))
def test(self):
self.check([[None]], [[None]] )
self.check({"t": "v"}, [[{"~": {"t": "v"}}]] )
self.check({"~": {"t": "v"}}, [[{"~": {"t": "v"}}]] )
self.check({"~": {"t": "v"}, "+": {"t": "v"}}, [[{"~": {"t": "v"}, "+": {"t": "v"}}]] )
self.check([{"~": {"t": "v"}, "+": {"t": "v"}}], [[{"~": {"t": "v"}, "+": {"t": "v"}}]] )
self.check([{"~": {"t": "v"}}, {"+": {"t": "v"}}], [[{"~": {"t": "v"}}], [{"+": {"t": "v"}}]] )
self.check([[{"t": "v"}], [{"t": "v"}]], [[{"~": {"t": "v"}}], [{"~": {"t": "v"}}]] )
self.check([[None, {"t": "v"}]], [[None, {"~": {"t": "v"}}]] ) |
7,508 | pre operations | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"vm perform-maintenance",
)
class PerformMaintenance(AAZCommand):
"""The operation to perform maintenance on a virtual machine.
"""
_aaz_info = {
"version": "2017-12-01",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.compute/virtualmachines/{}/performmaintenance", "2017-12-01"],
]
}
AZ_SUPPORT_NO_WAIT = True
def _handler(self, command_args):
super()._handler(command_args)
return self.build_lro_poller(self._execute_operations, self._output)
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
_args_schema.vm_name = AAZStrArg(
options=["-n", "--name", "--vm-name"],
help="The name of the Virtual Machine. You can configure the default using `az configure --defaults vm=<name>`",
required=True,
id_part="name",
configured_default="vm",
)
return cls._args_schema
def _execute_operations(self):
self.METHOD_NAME()
yield self.VirtualMachinesPerformMaintenance(ctx=self.ctx)()
self.post_operations()
@register_callback
def METHOD_NAME(self):
pass
@register_callback
def post_operations(self):
pass
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True)
return result
class VirtualMachinesPerformMaintenance(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [202]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "azure-async-operation"},
path_format_arguments=self.url_parameters,
)
if session.http_response.status_code in [200]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "azure-async-operation"},
path_format_arguments=self.url_parameters,
)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/performMaintenance",
**self.url_parameters
)
@property
def method(self):
return "POST"
@property
def error_format(self):
return "MgmtErrorFormat"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
**self.serialize_url_param(
"vmName", self.ctx.args.vm_name,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2017-12-01",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.end_time = AAZStrType(
serialized_name="endTime",
flags={"read_only": True},
)
_schema_on_200.error = AAZObjectType()
_schema_on_200.name = AAZStrType(
flags={"read_only": True},
)
_schema_on_200.start_time = AAZStrType(
serialized_name="startTime",
flags={"read_only": True},
)
_schema_on_200.status = AAZStrType(
flags={"read_only": True},
)
error = cls._schema_on_200.error
error.code = AAZStrType()
error.details = AAZListType()
error.innererror = AAZObjectType()
error.message = AAZStrType()
error.target = AAZStrType()
details = cls._schema_on_200.error.details
details.Element = AAZObjectType()
_element = cls._schema_on_200.error.details.Element
_element.code = AAZStrType()
_element.message = AAZStrType()
_element.target = AAZStrType()
innererror = cls._schema_on_200.error.innererror
innererror.errordetail = AAZStrType()
innererror.exceptiontype = AAZStrType()
return cls._schema_on_200
class _PerformMaintenanceHelper:
"""Helper class for PerformMaintenance"""
__all__ = ["PerformMaintenance"] |
7,509 | test develop canonicalize path no args | # Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
import shutil
import pytest
import llnl.util.filesystem as fs
import spack.environment as ev
import spack.spec
from spack.main import SpackCommand
develop = SpackCommand("develop")
env = SpackCommand("env")
pytestmark = pytest.mark.not_on_windows("does not run on windows")
@pytest.mark.usefixtures("mutable_mock_env_path", "mock_packages", "mock_fetch", "config")
class TestDevelop:
def check_develop(self, env, spec, path=None):
path = path or spec.name
# check in memory representation
assert spec.name in env.dev_specs
dev_specs_entry = env.dev_specs[spec.name]
assert dev_specs_entry["path"] == path
assert dev_specs_entry["spec"] == str(spec)
# check yaml representation
yaml = env.manifest[ev.TOP_LEVEL_KEY]
assert spec.name in yaml["develop"]
yaml_entry = yaml["develop"][spec.name]
assert yaml_entry["spec"] == str(spec)
if path == spec.name:
# default paths aren't written out
assert "path" not in yaml_entry
else:
assert yaml_entry["path"] == path
def test_develop_no_path_no_clone(self):
env("create", "test")
with ev.read("test") as e:
# develop checks that the path exists
fs.mkdirp(os.path.join(e.path, "mpich"))
develop("--no-clone", "mpich@1.0")
self.check_develop(e, spack.spec.Spec("mpich@=1.0"))
def test_develop_no_clone(self, tmpdir):
env("create", "test")
with ev.read("test") as e:
develop("--no-clone", "-p", str(tmpdir), "mpich@1.0")
self.check_develop(e, spack.spec.Spec("mpich@=1.0"), str(tmpdir))
def test_develop(self):
env("create", "test")
with ev.read("test") as e:
develop("mpich@1.0")
self.check_develop(e, spack.spec.Spec("mpich@=1.0"))
def test_develop_no_args(self):
env("create", "test")
with ev.read("test") as e:
# develop and remove it
develop("mpich@1.0")
shutil.rmtree(os.path.join(e.path, "mpich"))
# test develop with no args
develop()
self.check_develop(e, spack.spec.Spec("mpich@=1.0"))
def test_develop_twice(self):
env("create", "test")
with ev.read("test") as e:
develop("mpich@1.0")
self.check_develop(e, spack.spec.Spec("mpich@=1.0"))
develop("mpich@1.0")
# disk representation isn't updated unless we write
# second develop command doesn't change it, so we don't write
# but we check disk representation
e.write()
self.check_develop(e, spack.spec.Spec("mpich@=1.0"))
assert len(e.dev_specs) == 1
def test_develop_update_path(self, tmpdir):
env("create", "test")
with ev.read("test") as e:
develop("mpich@1.0")
develop("-p", str(tmpdir), "mpich@1.0")
self.check_develop(e, spack.spec.Spec("mpich@=1.0"), str(tmpdir))
assert len(e.dev_specs) == 1
def test_develop_update_spec(self):
env("create", "test")
with ev.read("test") as e:
develop("mpich@1.0")
develop("mpich@2.0")
self.check_develop(e, spack.spec.Spec("mpich@=2.0"))
assert len(e.dev_specs) == 1
def test_develop_canonicalize_path(self, monkeypatch, config):
env("create", "test")
with ev.read("test") as e:
path = "../$user"
abspath = spack.util.path.canonicalize_path(path, e.path)
def check_path(stage, dest):
assert dest == abspath
monkeypatch.setattr(spack.stage.Stage, "steal_source", check_path)
develop("-p", path, "mpich@1.0")
self.check_develop(e, spack.spec.Spec("mpich@=1.0"), path)
# Check modifications actually worked
assert spack.spec.Spec("mpich@1.0").concretized().satisfies("dev_path=%s" % abspath)
def METHOD_NAME(self, monkeypatch, config):
env("create", "test")
with ev.read("test") as e:
path = "$user"
abspath = spack.util.path.canonicalize_path(path, e.path)
def check_path(stage, dest):
assert dest == abspath
monkeypatch.setattr(spack.stage.Stage, "steal_source", check_path)
# Defensive check to ensure canonicalization failures don't pollute FS
assert abspath.startswith(e.path)
# Create path to allow develop to modify env
fs.mkdirp(abspath)
develop("--no-clone", "-p", path, "mpich@1.0")
# Remove path to ensure develop with no args runs staging code
os.rmdir(abspath)
develop()
self.check_develop(e, spack.spec.Spec("mpich@=1.0"), path)
# Check modifications actually worked
assert spack.spec.Spec("mpich@1.0").concretized().satisfies("dev_path=%s" % abspath)
def _git_commit_list(git_repo_dir):
git = spack.util.git.git()
with fs.working_dir(git_repo_dir):
output = git("log", "--pretty=format:%h", "-n", "20", output=str)
return output.strip().split()
def test_develop_full_git_repo(
mutable_mock_env_path,
mock_git_version_info,
install_mockery,
mock_packages,
monkeypatch,
tmpdir,
mutable_config,
request,
):
repo_path, filename, commits = mock_git_version_info
monkeypatch.setattr(
spack.package_base.PackageBase, "git", "file://%s" % repo_path, raising=False
)
spec = spack.spec.Spec("git-test-commit@1.2").concretized()
try:
spec.package.do_stage()
commits = _git_commit_list(spec.package.stage[0].source_path)
# Outside of "spack develop" Spack will only pull exactly the commit it
# needs, with no additional history
assert len(commits) == 1
finally:
spec.package.do_clean()
# Now use "spack develop": look at the resulting stage directory and make
# sure the git repo pulled includes the full branch history (or rather,
# more than just one commit).
env("create", "test")
with ev.read("test"):
develop("git-test-commit@1.2")
location = SpackCommand("location")
develop_stage_dir = location("git-test-commit").strip()
commits = _git_commit_list(develop_stage_dir)
assert len(commits) > 1 |
7,510 | type | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetModelVersionResult',
'AwaitableGetModelVersionResult',
'get_model_version',
'get_model_version_output',
]
@pulumi.output_type
class GetModelVersionResult:
"""
Azure Resource Manager resource envelope.
"""
def __init__(__self__, id=None, model_version_properties=None, name=None, system_data=None, METHOD_NAME=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if model_version_properties and not isinstance(model_version_properties, dict):
raise TypeError("Expected argument 'model_version_properties' to be a dict")
pulumi.set(__self__, "model_version_properties", model_version_properties)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", METHOD_NAME)
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="modelVersionProperties")
def model_version_properties(self) -> 'outputs.ModelVersionResponse':
"""
[Required] Additional attributes of the entity.
"""
return pulumi.get(self, "model_version_properties")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Azure Resource Manager metadata containing createdBy and modifiedBy information.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetModelVersionResult(GetModelVersionResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetModelVersionResult(
id=self.id,
model_version_properties=self.model_version_properties,
name=self.name,
system_data=self.system_data,
METHOD_NAME=self.METHOD_NAME)
def get_model_version(name: Optional[str] = None,
resource_group_name: Optional[str] = None,
version: Optional[str] = None,
workspace_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetModelVersionResult:
"""
Azure Resource Manager resource envelope.
:param str name: Container name. This is case-sensitive.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str version: Version identifier. This is case-sensitive.
:param str workspace_name: Name of Azure Machine Learning workspace.
"""
__args__ = dict()
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
__args__['version'] = version
__args__['workspaceName'] = workspace_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:machinelearningservices/v20230601preview:getModelVersion', __args__, opts=opts, typ=GetModelVersionResult).value
return AwaitableGetModelVersionResult(
id=pulumi.get(__ret__, 'id'),
model_version_properties=pulumi.get(__ret__, 'model_version_properties'),
name=pulumi.get(__ret__, 'name'),
system_data=pulumi.get(__ret__, 'system_data'),
METHOD_NAME=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_model_version)
def get_model_version_output(name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
version: Optional[pulumi.Input[str]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetModelVersionResult]:
"""
Azure Resource Manager resource envelope.
:param str name: Container name. This is case-sensitive.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str version: Version identifier. This is case-sensitive.
:param str workspace_name: Name of Azure Machine Learning workspace.
"""
... |
7,511 | run | #!/usr/bin/env python3
'''
Main Script to run all SiStrip DAQ O2Os at the same time.
@author: Huilin Qu
'''
import os
import sys
import atexit
import logging
import argparse
import subprocess
import traceback
import json
from functools import partial
import CondTools.SiStrip.o2o_helper as helper
logDirVar = 'O2O_LOG_FOLDER'
def METHOD_NAME(args):
logging.debug(args)
is_ok = True
status = {}
processes = {}
for analyzer in args.analyzers:
o2ocmd = 'SiStripDAQPopCon.py {analyzer} {since} {cfgfile}'.format(
analyzer=analyzer, since=args.since, cfgfile=args.cfgfile)
o2ocmd += ' --destTags {destTags}'
o2ocmd += ' --destDb {destDb}'
o2ocmd += ' --inputTag {inputTag}'
o2ocmd += ' --condDbRead {condDbRead}'
o2ocmd += ' --hashmapDb {hashmapDb}'
if args.skiplistFile:
o2ocmd += ' --skiplistFile %s' % args.skiplistFile
if args.whitelistFile:
o2ocmd += ' --whitelistFile %s' % args.whitelistFile
if args.debug:
o2ocmd += ' --debug'
jobname = analyzer.replace('O2O', '')
cmd = 'o2o --db {db} -v run -n {jobname} "{o2ocmd}"'.format(db=args.db, jobname=jobname, o2ocmd=o2ocmd)
logging.info('Start running command:\n %s' % cmd)
processes[jobname] = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
atexit.register(partial(helper.kill_subproc_noexcept, processes[jobname]))
for jobname in processes:
status[jobname] = {'job':None, 'upload':None, 'fast':None, 'changed':None}
p = processes[jobname]
log = p.communicate()[0].decode()
logging.debug('=== log from o2o run ===\n%s' % log)
if p.returncode == 0:
logging.info('Job for %s finished successfully!' % jobname)
status[jobname]['job'] = True
status[jobname]['upload'] = True
for line in log.split('\n'):
if '@@@' not in line:
continue
if 'FastO2O' in line:
status[jobname]['fast'] = ('true' in line)
if 'PayloadChange' in line:
status[jobname]['changed'] = ('true' in line)
else:
logging.error('Job %s FAILED!' % jobname)
status[jobname]['job'] = '@@@CMSSW job return code = 0@@@' in log
status[jobname]['upload'] = '@@@Upload return code = 0@@@' in log
is_ok = False
return is_ok, status
def summary(args, is_ok, status, logfile):
summary = json.dumps(status, sort_keys=True, indent=2)
if is_ok:
logging.info('O2O finished successfully! Summary: %s' % summary)
else:
logging.error('O2O FAILED! Summary: %s' % summary)
debugLabel = '[TEST] ' if args.debug else ''
# send the summary email
helper.send_mail(subject='%sNew O2O, IOV: %s' % (debugLabel, args.since),
message=summary,
send_to=args.mail_to,
send_from=args.mail_from)
# send the detailed log
with open(logfile, 'r') as log:
helper.send_mail(subject='%sNew O2O Log, IOV: %s' % (debugLabel, args.since),
message=log.read(),
send_to=args.mail_log_to,
send_from=args.mail_from)
def main():
parser = argparse.ArgumentParser(description='Run all SiStrip DAQ O2Os at the same time.')
parser.add_argument('since', metavar='SINCE', type=str, help='Run number.')
parser.add_argument('cfgfile', metavar='CFGLINES', help='File containing configuration lines.')
parser.add_argument('--skiplistFile', default='', help='File containing the devices to be skipped in G1 O2O.')
parser.add_argument('--whitelistFile', default='', help='File of the whitelisted devices in G1 O2O.')
parser.add_argument('--analyzers',
default='SiStripO2OBadStrip,SiStripO2OFedCabling,SiStripO2OLatency,SiStripO2ONoises,SiStripO2OPedestals,SiStripO2OThreshold',
help='Which EDAnalyzers to run.')
parser.add_argument('--mail-from', default='trk.o2o@cern.ch', help='Account to send email notification.')
parser.add_argument('--mail-to', default='cms-tracker-o2o-notification@cern.ch', help='List of O2O notification recipients.')
parser.add_argument('--mail-log-to', default='trk.o2o@cern.ch', help='List of O2O log recipients.')
parser.add_argument('--db', default='pro', help='The database for o2o job management: pro ( for prod ) or dev ( for prep ). Default: %(default)s.')
parser.add_argument('--debug', action="store_true", default=False, help='Switch on debug mode. Default: %(default)s.')
args = parser.parse_args()
if args.debug:
args.mail_to = args.mail_log_to
args.analyzers = args.analyzers.strip().split(',')
args.mail_to = args.mail_to.strip().split(',')
args.mail_log_to = args.mail_log_to.strip().split(',')
# Should NOT use logging before it's set up
try:
logdir = os.environ[logDirVar] if logDirVar in os.environ else '/tmp'
if not os.path.exists(logdir):
os.makedirs(logdir)
logfile = os.path.join(logdir, 'SiStripsO2O_Run%s.log' % str(args.since))
loglevel = logging.DEBUG if args.debug else logging.INFO
helper.configLogger(logfile, loglevel)
except Exception:
# in case we failed before logging is set up
# print the error, send an email, and exit
helper.send_mail('O2O Failure, IOV: %s' % args.since, traceback.format_exc(), args.mail_to, args.mail_from)
raise
try:
is_ok, status = METHOD_NAME(args)
summary(args, is_ok, status, logfile)
except Exception:
# in case we failed before logging is set up
# print the error, send an email, and exit
helper.send_mail('O2O Failure, IOV: %s' % args.since, traceback.format_exc(), args.mail_to, args.mail_from)
raise
if not is_ok:
return ' --- O2O FAILED! ---'
if __name__ == '__main__':
sys.exit(main()) |
7,512 | test without imports | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import lief
import random
from utils import get_sample
def METHOD_NAME():
"""
By convention if a binary hasn't import, imphash is '0'
"""
binary = lief.PE.Binary(lief.PE.PE_TYPE.PE32)
assert int(lief.PE.get_imphash(binary), 16) == 0
def test_casse():
"""
Test that casse doesn't change the hash
"""
binary_lhs = lief.PE.Binary(lief.PE.PE_TYPE.PE32)
binary_rhs = lief.PE.Binary(lief.PE.PE_TYPE.PE32)
kernel32_lhs = binary_lhs.add_library("KERNEL32.dll")
kernel32_lhs.add_entry("CreateMutexA")
kernel32_rhs = binary_rhs.add_library("kernel32.dll")
kernel32_rhs.add_entry("CrEatEMutExa")
assert lief.PE.get_imphash(binary_lhs) == lief.PE.get_imphash(binary_rhs)
def test_order():
"""
Test that import order doesn't change the hash
"""
binary_lhs = lief.PE.Binary(lief.PE.PE_TYPE.PE32)
binary_rhs = lief.PE.Binary(lief.PE.PE_TYPE.PE32)
fonctions = ["GetStringTypeW", "LCMapStringW", "GetCommandLineA", "TerminateProcess"]
kernel32_lhs = binary_lhs.add_library("kernel32.dll")
random.shuffle(fonctions)
list(map(kernel32_lhs.add_entry, fonctions))
print(kernel32_lhs)
kernel32_rhs = binary_rhs.add_library("kernel32.dll")
random.shuffle(fonctions)
list(map(kernel32_rhs.add_entry, fonctions))
print(kernel32_rhs)
assert lief.PE.get_imphash(binary_lhs) == lief.PE.get_imphash(binary_rhs)
def test_ordinal():
"""
Test import by ordinal
"""
binary_lhs = lief.PE.Binary(lief.PE.PE_TYPE.PE32)
binary_rhs = lief.PE.Binary(lief.PE.PE_TYPE.PE32)
fonctions = [
"GetStringTypeW",
"LCMapStringW",
"GetCommandLineA",
"TerminateProcess",
"Beep",
"CheckRemoteDebuggerPresent",
]
kernel32_lhs = binary_lhs.add_library("kernel32.dll")
list(map(kernel32_lhs.add_entry, fonctions))
kernel32_rhs = binary_rhs.add_library("kernel32.dll")
for f in fonctions:
if f == "Beep":
imp = lief.PE.ImportEntry(0x8000001d) # Ordinal number
kernel32_rhs.add_entry(imp)
else:
kernel32_rhs.add_entry(f)
assert lief.PE.get_imphash(binary_lhs) == lief.PE.get_imphash(binary_rhs)
def test_order_2():
"""
Test that import order doesn't change the hash (More complex)
"""
binary_lhs = lief.PE.Binary(lief.PE.PE_TYPE.PE32)
binary_rhs = lief.PE.Binary(lief.PE.PE_TYPE.PE32)
libraries = {
'KERNEL32.dll': [
"GetStringTypeW",
"LCMapStringW",
"GetCommandLineA",
"TerminateProcess",
"Beep",
"CheckRemoteDebuggerPresent",
],
"ntdll.dll": [
"NtWriteVirtualMemory",
"NtYieldExecution",
"PfxFindPrefix",
"PfxInitialize",
"PfxInsertPrefix",
"PfxRemovePrefix",
"PropertyLengthAsVariant",
"RtlAbortRXact",
]
}
keys = list(libraries.keys())
random.shuffle(keys)
for k in keys:
lib_lhs = binary_lhs.add_library(k)
v = libraries[k]
random.shuffle(v)
for e in v:
lib_lhs.add_entry(e)
keys = list(libraries.keys())
random.shuffle(keys)
for k in keys:
lib_rhs = binary_rhs.add_library(k)
v = libraries[k]
random.shuffle(v)
for e in v:
lib_rhs.add_entry(e)
assert lief.PE.get_imphash(binary_lhs) == lief.PE.get_imphash(binary_rhs)
def test_different():
"""
Check that different imports have different hashes
"""
binary_lhs = lief.PE.Binary(lief.PE.PE_TYPE.PE32)
binary_rhs = lief.PE.Binary(lief.PE.PE_TYPE.PE32)
libraries = {
'KERNEL32.dll': [
"GetStringTypeW",
"LCMapStringW",
"GetCommandLineA",
"TerminateProcess",
"Beep",
"CheckRemoteDebuggerPresent",
],
"ntdll.dll": [
"NtWriteVirtualMemory",
"NtYieldExecution",
"PfxFindPrefix",
"PfxInitialize",
"PfxInsertPrefix",
"PfxRemovePrefix",
"PropertyLengthAsVariant",
"RtlAbortRXact",
]
}
keys = list(libraries.keys())
random.shuffle(keys)
for k in keys:
lib_lhs = binary_lhs.add_library(k)
v = libraries[k]
random.shuffle(v)
for e in v:
lib_lhs.add_entry(e)
keys = list(libraries.keys())
random.shuffle(keys)
for k in keys:
lib_rhs = binary_rhs.add_library(k)
v = libraries[k]
random.shuffle(v)
for e in filter(lambda e: len(e) % 2 == 0, v):
lib_rhs.add_entry(e)
assert lief.PE.get_imphash(binary_lhs) != lief.PE.get_imphash(binary_rhs)
def test_pefile():
"""
Check that we can reproduce pefile output
"""
s1 = lief.parse(get_sample("PE/PE64_x86-64_binary_notepad.exe"))
assert lief.PE.get_imphash(s1, lief.PE.IMPHASH_MODE.PEFILE) == "38934ee4aaaaa8dab7c73508bc6715ca"
s2 = lief.parse(get_sample("PE/PE32_x86_binary_PGO-PGI.exe"))
assert lief.PE.get_imphash(s2, lief.PE.IMPHASH_MODE.PEFILE) == "4d7ac2eefa8a35d9c445d71412e8e71c"
|
7,513 | view column lineage count | # Copyright 2022 Collate
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Test MySql connector with CLI
"""
from typing import List
from .common.test_cli_db import CliCommonDB
from .common_e2e_sqa_mixins import SQACommonMethods
class MysqlCliTest(CliCommonDB.TestSuite, SQACommonMethods):
create_table_query: str = """
CREATE TABLE IF NOT EXISTS persons (
id INT NOT NULL AUTO_INCREMENT,
varchar_col VARCHAR(255),
text_col TEXT,
tinyint_col TINYINT,
smallint_col SMALLINT,
mediumint_col MEDIUMINT,
int_col INT,
bigint_col BIGINT,
float_col FLOAT(5,2),
double_col DOUBLE(5,2),
decimal_col DECIMAL(5,2),
date_col DATE,
datetime_col DATETIME,
timestamp_col TIMESTAMP,
time_col TIME,
year_col YEAR,
binary_col BINARY(3),
varbinary_col VARBINARY(3),
blob_col BLOB(3),
text2_col TEXT(3),
enum_col ENUM('value1','value2'),
set_col SET('value1','value2'),
PRIMARY KEY (id)
);
"""
create_view_query: str = """
CREATE VIEW view_persons AS
SELECT *
FROM openmetadata_db.persons;
"""
insert_data_queries: List[str] = [
"""
INSERT INTO persons (id, varchar_col, text_col, tinyint_col, smallint_col, mediumint_col, int_col, bigint_col, float_col, double_col, decimal_col, date_col, datetime_col, timestamp_col, time_col, year_col, binary_col,varbinary_col,blob_col,text2_col,enum_col,set_col) VALUES
(1,'value1','text1',1,2,3,4,5,6.1,7.2,'8.3', '2023-07-13', '2023-07-13 06:04:45', '2023-07-13 06:04:45', '06:06:45', 2023,X'010203',X'010203',X'010203','text2', 'value1','value1,value2')""",
"""
INSERT INTO persons (id, varchar_col, text_col, tinyint_col, smallint_col, mediumint_col, int_col, bigint_col, float_col, double_col, decimal_col, date_col, datetime_col, timestamp_col, time_col, year_col, binary_col,varbinary_col,blob_col,text2_col,enum_col,set_col) VALUES
(2,'value2','text2',11,-12,-13,-14,-15,-16.1,-17.2,'18.3', '2023-09-13', '2023-09-13 06:04:45', '2023-09-13 06:10:45', '06:04:45', 2023,X'040506',X'040506',X'040506','text3', 'value2','value1');
""",
]
drop_table_query: str = """
DROP TABLE IF EXISTS openmetadata_db.persons;
"""
drop_view_query: str = """
DROP VIEW IF EXISTS openmetadata_db.view_persons;
"""
@staticmethod
def get_connector_name() -> str:
return "mysql"
def create_table_and_view(self) -> None:
SQACommonMethods.create_table_and_view(self)
def delete_table_and_view(self) -> None:
SQACommonMethods.delete_table_and_view(self)
@staticmethod
def expected_tables() -> int:
return 49
def inserted_rows_count(self) -> int:
return len(self.insert_data_queries)
def METHOD_NAME(self) -> int:
return 22
@staticmethod
def fqn_created_table() -> str:
return "local_mysql.default.openmetadata_db.persons"
@staticmethod
def get_includes_schemas() -> List[str]:
return ["openmetadata_db.*"]
@staticmethod
def get_includes_tables() -> List[str]:
return ["entity_*"]
@staticmethod
def get_excludes_tables() -> List[str]:
return [".*bot.*"]
@staticmethod
def expected_filtered_schema_includes() -> int:
return 0
@staticmethod
def expected_filtered_schema_excludes() -> int:
return 1
@staticmethod
def expected_filtered_table_includes() -> int:
return 59
@staticmethod
def expected_filtered_table_excludes() -> int:
return 4
@staticmethod
def expected_filtered_mix() -> int:
return 59 |
7,514 | user music dir | """macOS."""
from __future__ import annotations
import os.path
from .api import PlatformDirsABC
class MacOS(PlatformDirsABC):
"""
Platform directories for the macOS operating system. Follows the guidance from `Apple documentation
<https://developer.apple.com/library/archive/documentation/FileManagement/Conceptual/FileSystemProgrammingGuide/MacOSXDirectories/MacOSXDirectories.html>`_.
Makes use of the `appname <platformdirs.api.PlatformDirsABC.appname>`,
`version <platformdirs.api.PlatformDirsABC.version>`,
`ensure_exists <platformdirs.api.PlatformDirsABC.ensure_exists>`.
"""
@property
def user_data_dir(self) -> str:
""":return: data directory tied to the user, e.g. ``~/Library/Application Support/$appname/$version``"""
return self._append_app_name_and_version(os.path.expanduser("~/Library/Application Support")) # noqa: PTH111
@property
def site_data_dir(self) -> str:
""":return: data directory shared by users, e.g. ``/Library/Application Support/$appname/$version``"""
return self._append_app_name_and_version("/Library/Application Support")
@property
def user_config_dir(self) -> str:
""":return: config directory tied to the user, same as `user_data_dir`"""
return self.user_data_dir
@property
def site_config_dir(self) -> str:
""":return: config directory shared by the users, same as `site_data_dir`"""
return self.site_data_dir
@property
def user_cache_dir(self) -> str:
""":return: cache directory tied to the user, e.g. ``~/Library/Caches/$appname/$version``"""
return self._append_app_name_and_version(os.path.expanduser("~/Library/Caches")) # noqa: PTH111
@property
def site_cache_dir(self) -> str:
""":return: cache directory shared by users, e.g. ``/Library/Caches/$appname/$version``"""
return self._append_app_name_and_version("/Library/Caches")
@property
def user_state_dir(self) -> str:
""":return: state directory tied to the user, same as `user_data_dir`"""
return self.user_data_dir
@property
def user_log_dir(self) -> str:
""":return: log directory tied to the user, e.g. ``~/Library/Logs/$appname/$version``"""
return self._append_app_name_and_version(os.path.expanduser("~/Library/Logs")) # noqa: PTH111
@property
def user_documents_dir(self) -> str:
""":return: documents directory tied to the user, e.g. ``~/Documents``"""
return os.path.expanduser("~/Documents") # noqa: PTH111
@property
def user_downloads_dir(self) -> str:
""":return: downloads directory tied to the user, e.g. ``~/Downloads``"""
return os.path.expanduser("~/Downloads") # noqa: PTH111
@property
def user_pictures_dir(self) -> str:
""":return: pictures directory tied to the user, e.g. ``~/Pictures``"""
return os.path.expanduser("~/Pictures") # noqa: PTH111
@property
def user_videos_dir(self) -> str:
""":return: videos directory tied to the user, e.g. ``~/Movies``"""
return os.path.expanduser("~/Movies") # noqa: PTH111
@property
def METHOD_NAME(self) -> str:
""":return: music directory tied to the user, e.g. ``~/Music``"""
return os.path.expanduser("~/Music") # noqa: PTH111
@property
def user_desktop_dir(self) -> str:
""":return: desktop directory tied to the user, e.g. ``~/Desktop``"""
return os.path.expanduser("~/Desktop") # noqa: PTH111
@property
def user_runtime_dir(self) -> str:
""":return: runtime directory tied to the user, e.g. ``~/Library/Caches/TemporaryItems/$appname/$version``"""
return self._append_app_name_and_version(os.path.expanduser("~/Library/Caches/TemporaryItems")) # noqa: PTH111
@property
def site_runtime_dir(self) -> str:
""":return: runtime directory shared by users, same as `user_runtime_dir`"""
return self.user_runtime_dir
__all__ = [
"MacOS",
] |
7,515 | register | # Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Abstract Base Classes (ABCs) according to PEP 3119."""
import types
from _weakrefset import WeakSet
# Instance of old-style class
class _C: pass
_InstanceType = type(_C())
def abstractmethod(funcobj):
"""A decorator indicating abstract methods.
Requires that the metaclass is ABCMeta or derived from it. A
class that has a metaclass derived from ABCMeta cannot be
instantiated unless all of its abstract methods are overridden.
The abstract methods can be called using any of the normal
'super' call mechanisms.
Usage:
class C:
__metaclass__ = ABCMeta
@abstractmethod
def my_abstract_method(self, ...):
...
"""
funcobj.__isabstractmethod__ = True
return funcobj
class abstractproperty(property):
"""A decorator indicating abstract properties.
Requires that the metaclass is ABCMeta or derived from it. A
class that has a metaclass derived from ABCMeta cannot be
instantiated unless all of its abstract properties are overridden.
The abstract properties can be called using any of the normal
'super' call mechanisms.
Usage:
class C:
__metaclass__ = ABCMeta
@abstractproperty
def my_abstract_property(self):
...
This defines a read-only property; you can also define a read-write
abstract property using the 'long' form of property declaration:
class C:
__metaclass__ = ABCMeta
def getx(self): ...
def setx(self, value): ...
x = abstractproperty(getx, setx)
"""
__isabstractmethod__ = True
class ABCMeta(type):
"""Metaclass for defining Abstract Base Classes (ABCs).
Use this metaclass to create an ABC. An ABC can be subclassed
directly, and then acts as a mix-in class. You can also register
unrelated concrete classes (even built-in classes) and unrelated
ABCs as 'virtual subclasses' -- these and their descendants will
be considered subclasses of the registering ABC by the built-in
issubclass() function, but the registering ABC won't show up in
their MRO (Method Resolution Order) nor will method
implementations defined by the registering ABC be callable (not
even via super()).
"""
# A global counter that is incremented each time a class is
# registered as a virtual subclass of anything. It forces the
# negative cache to be cleared before its next use.
_abc_invalidation_counter = 0
def __new__(mcls, name, bases, namespace):
cls = super(ABCMeta, mcls).__new__(mcls, name, bases, namespace)
# Compute set of abstract method names
abstracts = set(name
for name, value in namespace.items()
if getattr(value, "__isabstractmethod__", False))
for base in bases:
for name in getattr(base, "__abstractmethods__", set()):
value = getattr(cls, name, None)
if getattr(value, "__isabstractmethod__", False):
abstracts.add(name)
cls.__abstractmethods__ = frozenset(abstracts)
# Set up inheritance registry
cls._abc_registry = WeakSet()
cls._abc_cache = WeakSet()
cls._abc_negative_cache = WeakSet()
cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter
return cls
def METHOD_NAME(cls, subclass):
"""Register a virtual subclass of an ABC."""
if not isinstance(subclass, (type, types.ClassType)):
raise TypeError("Can only register classes")
if issubclass(subclass, cls):
return # Already a subclass
# Subtle: test for cycles *after* testing for "already a subclass";
# this means we allow X.register(X) and interpret it as a no-op.
if issubclass(cls, subclass):
# This would create a cycle, which is bad for the algorithm below
raise RuntimeError("Refusing to create an inheritance cycle")
cls._abc_registry.add(subclass)
ABCMeta._abc_invalidation_counter += 1 # Invalidate negative cache
def _dump_registry(cls, file=None):
"""Debug helper to print the ABC registry."""
print >> file, "Class: %s.%s" % (cls.__module__, cls.__name__)
print >> file, "Inv.counter: %s" % ABCMeta._abc_invalidation_counter
for name in sorted(cls.__dict__.keys()):
if name.startswith("_abc_"):
value = getattr(cls, name)
print >> file, "%s: %r" % (name, value)
def __instancecheck__(cls, instance):
"""Override for isinstance(instance, cls)."""
# Inline the cache checking when it's simple.
subclass = getattr(instance, '__class__', None)
if subclass is not None and subclass in cls._abc_cache:
return True
subtype = type(instance)
# Old-style instances
if subtype is _InstanceType:
subtype = subclass
if subtype is subclass or subclass is None:
if (cls._abc_negative_cache_version ==
ABCMeta._abc_invalidation_counter and
subtype in cls._abc_negative_cache):
return False
# Fall back to the subclass check.
return cls.__subclasscheck__(subtype)
return (cls.__subclasscheck__(subclass) or
cls.__subclasscheck__(subtype))
def __subclasscheck__(cls, subclass):
"""Override for issubclass(subclass, cls)."""
# Check cache
if subclass in cls._abc_cache:
return True
# Check negative cache; may have to invalidate
if cls._abc_negative_cache_version < ABCMeta._abc_invalidation_counter:
# Invalidate the negative cache
cls._abc_negative_cache = WeakSet()
cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter
elif subclass in cls._abc_negative_cache:
return False
# Check the subclass hook
ok = cls.__subclasshook__(subclass)
if ok is not NotImplemented:
assert isinstance(ok, bool)
if ok:
cls._abc_cache.add(subclass)
else:
cls._abc_negative_cache.add(subclass)
return ok
# Check if it's a direct subclass
if cls in getattr(subclass, '__mro__', ()):
cls._abc_cache.add(subclass)
return True
# Check if it's a subclass of a registered class (recursive)
for rcls in cls._abc_registry:
if issubclass(subclass, rcls):
cls._abc_cache.add(subclass)
return True
# Check if it's a subclass of a subclass (recursive)
for scls in cls.__subclasses__():
if issubclass(subclass, scls):
cls._abc_cache.add(subclass)
return True
# No dice; update negative cache
cls._abc_negative_cache.add(subclass)
return False |
7,516 | get | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import abc
import os
try:
from collections.abc import MutableMapping
except ImportError:
from collections import MutableMapping
try:
from collections import UserDict
except ImportError:
from UserDict import UserDict
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
try:
import pathlib
except ImportError:
pathlib = None
from io import open
import sys
try:
from thread import get_ident
except ImportError:
try:
from _thread import get_ident
except ImportError:
from _dummy_thread import get_ident
__all__ = ['UserDict', 'OrderedDict', 'open']
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
native_str = str
str = type('str')
def from_none(exc):
"""raise from_none(ValueError('a')) == raise ValueError('a') from None"""
exc.__cause__ = None
exc.__suppress_context__ = True
return exc
# from reprlib 3.2.1
def recursive_repr(fillvalue='...'):
'Decorator to make a repr function return fillvalue for a recursive call'
def decorating_function(user_function):
repr_running = set()
def wrapper(self):
key = id(self), get_ident()
if key in repr_running:
return fillvalue
repr_running.add(key)
try:
result = user_function(self)
finally:
repr_running.discard(key)
return result
# Can't use functools.wraps() here because of bootstrap issues
wrapper.__module__ = getattr(user_function, '__module__')
wrapper.__doc__ = getattr(user_function, '__doc__')
wrapper.__name__ = getattr(user_function, '__name__')
wrapper.__annotations__ = getattr(user_function, '__annotations__', {})
return wrapper
return decorating_function
# from collections 3.2.1
class _ChainMap(MutableMapping):
''' A ChainMap groups multiple dicts (or other mappings) together
to create a single, updateable view.
The underlying mappings are stored in a list. That list is public and can
accessed or updated using the *maps* attribute. There is no other state.
Lookups search the underlying mappings successively until a key is found.
In contrast, writes, updates, and deletions only operate on the first
mapping.
'''
def __init__(self, *maps):
'''Initialize a ChainMap by setting *maps* to the given mappings.
If no mappings are provided, a single empty dictionary is used.
'''
self.maps = list(maps) or [{}] # always at least one map
def __missing__(self, key):
raise KeyError(key)
def __getitem__(self, key):
for mapping in self.maps:
try:
# can't use 'key in mapping' with defaultdict
return mapping[key]
except KeyError:
pass
# support subclasses that define __missing__
return self.__missing__(key)
def METHOD_NAME(self, key, default=None):
return self[key] if key in self else default
def __len__(self):
# reuses stored hash values if possible
return len(set().union(*self.maps))
def __iter__(self):
return iter(set().union(*self.maps))
def __contains__(self, key):
return any(key in m for m in self.maps)
@recursive_repr()
def __repr__(self):
return '{0.__class__.__name__}({1})'.format(
self, ', '.join(map(repr, self.maps))
)
@classmethod
def fromkeys(cls, iterable, *args):
'Create a ChainMap with a single dict created from the iterable.'
return cls(dict.fromkeys(iterable, *args))
def copy(self):
"""
New ChainMap or subclass with a new copy of
maps[0] and refs to maps[1:]
"""
return self.__class__(self.maps[0].copy(), *self.maps[1:])
__copy__ = copy
def new_child(self): # like Django's Context.push()
'New ChainMap with a new dict followed by all previous maps.'
return self.__class__({}, *self.maps)
@property
def parents(self): # like Django's Context.pop()
'New ChainMap from maps[1:].'
return self.__class__(*self.maps[1:])
def __setitem__(self, key, value):
self.maps[0][key] = value
def __delitem__(self, key):
try:
del self.maps[0][key]
except KeyError:
raise KeyError('Key not found in the first mapping: {!r}'.format(key))
def popitem(self):
"""
Remove and return an item pair from maps[0].
Raise KeyError is maps[0] is empty.
"""
try:
return self.maps[0].popitem()
except KeyError:
raise KeyError('No keys found in the first mapping.')
def pop(self, key, *args):
"""
Remove *key* from maps[0] and return its value.
Raise KeyError if *key* not in maps[0].
"""
try:
return self.maps[0].pop(key, *args)
except KeyError:
raise KeyError('Key not found in the first mapping: {!r}'.format(key))
def clear(self):
'Clear maps[0], leaving maps[1:] intact.'
self.maps[0].clear()
try:
from collections import ChainMap
except ImportError:
ChainMap = _ChainMap
_ABC = getattr(
abc,
'ABC',
# Python 3.3 compatibility
abc.ABCMeta(native_str('__ABC'), (object,), dict(__metaclass__=abc.ABCMeta)),
)
class _PathLike(_ABC):
"""Abstract base class for implementing the file system path protocol."""
@abc.abstractmethod
def __fspath__(self):
"""Return the file system path representation of the object."""
raise NotImplementedError
@classmethod
def __subclasshook__(cls, subclass):
return bool(
hasattr(subclass, '__fspath__')
# workaround for Python 3.5
or pathlib
and issubclass(subclass, pathlib.Path)
)
PathLike = getattr(os, 'PathLike', _PathLike)
def _fspath(path):
"""Return the path representation of a path-like object.
If str or bytes is passed in, it is returned unchanged. Otherwise the
os.PathLike interface is used to get the path representation. If the
path representation is not str or bytes, TypeError is raised. If the
provided path is not str, bytes, or os.PathLike, TypeError is raised.
"""
if isinstance(path, (str, bytes)):
return path
if not hasattr(path, '__fspath__') and isinstance(path, pathlib.Path):
# workaround for Python 3.5
return str(path)
# Work from the object's type to match method resolution of other magic
# methods.
path_type = type(path)
try:
path_repr = path_type.__fspath__(path)
except AttributeError:
if hasattr(path_type, '__fspath__'):
raise
else:
raise TypeError(
"expected str, bytes or os.PathLike object, "
"not " + path_type.__name__
)
if isinstance(path_repr, (str, bytes)):
return path_repr
else:
raise TypeError(
"expected {}.__fspath__() to return str or bytes, "
"not {}".format(path_type.__name__, type(path_repr).__name__)
)
fspath = getattr(os, 'fspath', _fspath) |
7,517 | test absent | """
:codeauthor: Jayesh Kariya <jayeshk@saltstack.com>
"""
import pytest
import salt.modules.boto_route53 as boto53mod
import salt.modules.boto_vpc as botovpcmod
import salt.states.boto_route53 as boto_route53
from tests.support.mock import MagicMock, create_autospec, patch
@pytest.fixture
def configure_loader_modules():
return {boto_route53: {}}
@pytest.fixture
def patch_botomod_hosted_zones():
with patch.dict(
boto_route53.__salt__,
{
"boto_route53.describe_hosted_zones": create_autospec(
boto53mod.describe_hosted_zones
),
"boto_vpc.describe_vpcs": create_autospec(botovpcmod.describe_vpcs),
},
):
yield
@pytest.fixture
def fake_single_vpc(patch_botomod_hosted_zones):
boto_route53.__salt__["boto_vpc.describe_vpcs"].return_value = {
"vpcs": [{"region": "fnordland", "id": "fnord"}],
}
boto_route53.__salt__["boto_route53.describe_hosted_zones"].return_value = {
"HostedZone": {"Config": {"PrivateZone": "true"}},
"VPCs": {"VPC": {"VPCId": "fnord", "VPCRegion": "fnordland"}},
}
@pytest.fixture
def fake_multiple_vpcs(patch_botomod_hosted_zones):
boto_route53.__salt__["boto_vpc.describe_vpcs"].return_value = {
"vpcs": [{"region": "fnordland", "id": "fnord"}],
}
boto_route53.__salt__["boto_route53.describe_hosted_zones"].return_value = {
"HostedZone": {"Config": {"PrivateZone": "true"}},
"VPCs": [
{"VPCId": "fnord", "VPCRegion": "fnordland"},
{"VPCId": "fnord part 2", "VPCRegion": "fnordlandia"},
],
}
def test_present():
"""
Test to ensure the Route53 record is present.
"""
name = "test.example.com."
value = "1.1.1.1"
zone = "example.com."
record_type = "A"
ret = {"name": name, "result": False, "changes": {}, "comment": ""}
mock = MagicMock(side_effect=[{}, {}, {"value": ""}, False])
mock_bool = MagicMock(return_value=False)
with patch.dict(
boto_route53.__salt__,
{"boto_route53.get_record": mock, "boto_route53.add_record": mock_bool},
):
with patch.dict(boto_route53.__opts__, {"test": False}):
comt = "Failed to add {} Route53 record.".format(name)
ret.update({"comment": comt})
assert boto_route53.present(name, value, zone, record_type) == ret
with patch.dict(boto_route53.__opts__, {"test": True}):
comt = "Route53 record {} set to be added.".format(name)
ret.update({"comment": comt, "result": None})
assert boto_route53.present(name, value, zone, record_type) == ret
comt = "Route53 record {} set to be updated.".format(name)
ret.update({"comment": comt})
assert boto_route53.present(name, value, zone, record_type) == ret
ret.update({"comment": "", "result": True})
assert boto_route53.present(name, value, zone, record_type) == ret
def METHOD_NAME():
"""
Test to ensure the Route53 record is deleted.
"""
name = "test.example.com."
zone = "example.com."
record_type = "A"
ret = {"name": name, "result": True, "changes": {}, "comment": ""}
mock = MagicMock(side_effect=[False, True])
with patch.dict(boto_route53.__salt__, {"boto_route53.get_record": mock}):
comt = "{} does not exist.".format(name)
ret.update({"comment": comt})
assert boto_route53.absent(name, zone, record_type) == ret
with patch.dict(boto_route53.__opts__, {"test": True}):
comt = "Route53 record {} set to be deleted.".format(name)
ret.update({"comment": comt, "result": None})
assert boto_route53.absent(name, zone, record_type) == ret
def test_hosted_zone_present_should_not_fail_when_one_vpc_in_deets(fake_single_vpc):
boto_route53.hosted_zone_present(
name="fnord", private_zone=True, vpc_region="fnordland", vpc_name="fnord"
)
def test_hosted_zone_present_should_not_fail_with_multiple_vpcs_in_deets(
fake_multiple_vpcs,
):
boto_route53.hosted_zone_present(
name="fnord", private_zone=True, vpc_region="fnordland", vpc_name="fnord"
) |
7,518 | open configs | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Plan class to hold subgraph scheduling information."""
from typing import Dict, FrozenSet
import tvm._ffi
from tvm.runtime import Object
from . import _ffi_api
from .graph import Tensor, Part
from .tensor_config import TensorConfig, MemoryRegion
@tvm._ffi.register_object("contrib.ethosu.cascader.Plan")
class Plan(Object):
"""
A class which describes how to schedule a subgraph of Parts together.
A Plan takes the form of a subgraph of connected Parts (recorded in part_group) with
TensorConfigs for all of the required Tensors (recorded in tensor_configs). This information
can be used to produce a Tensor Expression schedule with inter-operator scheduling. A Plan is
necessarily single-output such that all non-output Parts are 'computed_at'ed the scope of the
output Part. This is what achieves the technique referred to as 'cascading'. A Plan also has
an interior memory region which specifies the region of memory into which all the Plans
intermediate buffers should be allocated.
Additionally, a Plan contains some other information used during the Plan generation and
selection algorithms. Both the memory and cycles required to run the Plan are accounted for so
that Plans can be ranked and Pareto-culled on these metrics. Furthermore, the TensorConfigs
which are 'open' is recorded indicating that these are valid points to merge with another Plan.
A Plan can only be turned into a schedule if it has no 'open' TensorConfigs - at which point
the Plan is said to be 'closed'.
Attributes
----------
tensor_configs : Dict[Tensor, TensorConfig]
The TensorConfigs specified by the Plan.
open_configs : FrozenSet[TensorConfig]
The TensorConfigs which are 'open' meaning they are a Plan input/output but have
'interior' state.
output_config : TensorConfig
The TensorConfig of the Plan's output tensor.
part_group : FrozenSet[Part]
The Parts which are covered by the Plan.
interior_region : MemoryRegion
The MemoryRegion in which to store 'interior' Plan buffers.
memory_usage : int
The interior memory used by the Plan in bytes.
cycles : int
The cycles taken to execute the Plan.
"""
def __init__(
self,
tensor_configs: Dict[Tensor, TensorConfig],
METHOD_NAME: FrozenSet[TensorConfig],
output_config: TensorConfig,
part_group: FrozenSet[Part],
interior_region: MemoryRegion,
memory_usage: int,
cycles: int,
):
self.__init_handle_by_constructor__(
_ffi_api.Plan,
list(tensor_configs.values()),
list(METHOD_NAME),
output_config,
list(part_group),
interior_region,
memory_usage,
cycles,
)
def merge(self, other):
"""
Merge two Plans with share an 'open' TensorConfig.
The current Plan is referred to as the 'upper Plan' and the other Plan as the 'lower
Plan'. The 'open' output config of the upper Plan must be an 'open' input config of the
lower Plan. The Tensor referenced by these configs is the Tensor on which the two Plans
will be merged. The merge process does the following:
The tensor config maps will be merged with TensorConfigs from the upper Plan taking
priority. The open configs will be merged with the TensorConfigs that are being merged
having been removed. The output config will be that of the lower Plan. The part groups
will be merged. The interior region is necessarily the same for both the upper and lower
Plan. The cycles and memory usage will be summed.
Parameters
----------
other : Plan
The Plan to merge with.
Return
------
Plan
The merged Plan.
"""
return _ffi_api.PlanMerge(self, other)
@property
def tensor_configs(self):
"""The TensorConfigs specified by the Plan."""
tensor_configs = {}
for config in self._tensor_configs:
tensor_configs[config.tensor] = config
return tensor_configs
@property
def METHOD_NAME(self):
"""
The TensorConfigs which are 'open' meaning they are a Plan input/output but have
'interior' state.
"""
return frozenset(self._open_configs)
@property
def output_config(self):
"""The TensorConfig of the Plan's output tensor."""
return self._output_config
@property
def part_group(self):
"""The Parts which are covered by the Plan."""
return frozenset(self._part_group)
@property
def interior_region(self):
"""The MemoryRegion in which to store 'interior' Plan buffers."""
return self._interior_region
@property
def memory_usage(self):
"""The interior memory used by the Plan in bytes."""
return self._memory_usage
@property
def cycles(self):
"""The cycles taken to execute the Plan."""
return self._cycles
def __repr__(self):
return (
f"Plan(tensor_configs={self.tensor_configs}, "
f"open_configs={self.METHOD_NAME}, "
f"output_config={self.output_config}, "
f"part_group={self.part_group}, "
f"interior_region={self.interior_region.name}, "
f"memory_usage={self.memory_usage}, "
f"cycles={self.cycles}, "
) |
7,519 | raw page | # -*- coding: utf-8 -*-
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
from typing import MutableMapping, MutableSequence
from google.protobuf import field_mask_pb2 # type: ignore
import proto # type: ignore
from google.cloud.talent_v4beta1.types import common
from google.cloud.talent_v4beta1.types import tenant as gct_tenant
__protobuf__ = proto.module(
package="google.cloud.talent.v4beta1",
manifest={
"CreateTenantRequest",
"GetTenantRequest",
"UpdateTenantRequest",
"DeleteTenantRequest",
"ListTenantsRequest",
"ListTenantsResponse",
},
)
class CreateTenantRequest(proto.Message):
r"""The Request of the CreateTenant method.
Attributes:
parent (str):
Required. Resource name of the project under which the
tenant is created.
The format is "projects/{project_id}", for example,
"projects/foo".
tenant (google.cloud.talent_v4beta1.types.Tenant):
Required. The tenant to be created.
"""
parent: str = proto.Field(
proto.STRING,
number=1,
)
tenant: gct_tenant.Tenant = proto.Field(
proto.MESSAGE,
number=2,
message=gct_tenant.Tenant,
)
class GetTenantRequest(proto.Message):
r"""Request for getting a tenant by name.
Attributes:
name (str):
Required. The resource name of the tenant to be retrieved.
The format is "projects/{project_id}/tenants/{tenant_id}",
for example, "projects/foo/tenants/bar".
"""
name: str = proto.Field(
proto.STRING,
number=1,
)
class UpdateTenantRequest(proto.Message):
r"""Request for updating a specified tenant.
Attributes:
tenant (google.cloud.talent_v4beta1.types.Tenant):
Required. The tenant resource to replace the
current resource in the system.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Strongly recommended for the best service experience.
If
[update_mask][google.cloud.talent.v4beta1.UpdateTenantRequest.update_mask]
is provided, only the specified fields in
[tenant][google.cloud.talent.v4beta1.UpdateTenantRequest.tenant]
are updated. Otherwise all the fields are updated.
A field mask to specify the tenant fields to be updated.
Only top level fields of
[Tenant][google.cloud.talent.v4beta1.Tenant] are supported.
"""
tenant: gct_tenant.Tenant = proto.Field(
proto.MESSAGE,
number=1,
message=gct_tenant.Tenant,
)
update_mask: field_mask_pb2.FieldMask = proto.Field(
proto.MESSAGE,
number=2,
message=field_mask_pb2.FieldMask,
)
class DeleteTenantRequest(proto.Message):
r"""Request to delete a tenant.
Attributes:
name (str):
Required. The resource name of the tenant to be deleted.
The format is "projects/{project_id}/tenants/{tenant_id}",
for example, "projects/foo/tenants/bar".
"""
name: str = proto.Field(
proto.STRING,
number=1,
)
class ListTenantsRequest(proto.Message):
r"""List tenants for which the client has ACL visibility.
Attributes:
parent (str):
Required. Resource name of the project under which the
tenant is created.
The format is "projects/{project_id}", for example,
"projects/foo".
page_token (str):
The starting indicator from which to return
results.
page_size (int):
The maximum number of tenants to be returned,
at most 100. Default is 100 if a non-positive
number is provided.
"""
parent: str = proto.Field(
proto.STRING,
number=1,
)
page_token: str = proto.Field(
proto.STRING,
number=2,
)
page_size: int = proto.Field(
proto.INT32,
number=3,
)
class ListTenantsResponse(proto.Message):
r"""The List tenants response object.
Attributes:
tenants (MutableSequence[google.cloud.talent_v4beta1.types.Tenant]):
Tenants for the current client.
next_page_token (str):
A token to retrieve the next page of results.
metadata (google.cloud.talent_v4beta1.types.ResponseMetadata):
Additional information for the API
invocation, such as the request tracking id.
"""
@property
def METHOD_NAME(self):
return self
tenants: MutableSequence[gct_tenant.Tenant] = proto.RepeatedField(
proto.MESSAGE,
number=1,
message=gct_tenant.Tenant,
)
next_page_token: str = proto.Field(
proto.STRING,
number=2,
)
metadata: common.ResponseMetadata = proto.Field(
proto.MESSAGE,
number=3,
message=common.ResponseMetadata,
)
__all__ = tuple(sorted(__protobuf__.manifest)) |
7,520 | calc cell volumes | """
1D Mesh
"""
from __future__ import unicode_literals
__docformat__ = 'restructuredtext'
from fipy.tools import numerix
from fipy.tools.dimensions.physicalField import PhysicalField
from fipy.tools import parallelComm
from fipy.meshes.nonUniformGrid1D import NonUniformGrid1D
__all__ = ["SphericalNonUniformGrid1D"]
from future.utils import text_to_native_str
__all__ = [text_to_native_str(n) for n in __all__]
class SphericalNonUniformGrid1D(NonUniformGrid1D):
"""
Creates a 1D spherical grid mesh.
>>> mesh = SphericalNonUniformGrid1D(nx = 3)
>>> print(mesh.cellCenters)
[[ 0.5 1.5 2.5]]
>>> mesh = SphericalNonUniformGrid1D(dx = (1, 2, 3))
>>> print(mesh.cellCenters)
[[ 0.5 2. 4.5]]
>>> print(numerix.allclose(mesh.cellVolumes, (0.5, 13., 94.5))) # doctest: +PROCESSOR_0
True
>>> mesh = SphericalNonUniformGrid1D(nx = 2, dx = (1, 2, 3))
Traceback (most recent call last):
...
IndexError: nx != len(dx)
>>> mesh = SphericalNonUniformGrid1D(nx=2, dx=(1., 2.)) + ((1.,),)
>>> print(mesh.cellCenters)
[[ 1.5 3. ]]
>>> print(numerix.allclose(mesh.cellVolumes, (3.5, 28))) # doctest: +PROCESSOR_0
True
"""
def __init__(self, dx=1., nx=None, origin=(0,), overlap=2, communicator=parallelComm, *args, **kwargs):
scale = PhysicalField(value=1, unit=PhysicalField(value=dx).unit)
self.origin = PhysicalField(value=origin)
self.origin /= scale
super(SphericalNonUniformGrid1D, self).__init__(dx=dx,
nx=nx,
overlap=overlap,
communicator=communicator,
*args,
**kwargs)
self.vertexCoords += origin
self.args['origin'] = origin
def _calcFaceCenters(self):
faceCenters = super(SphericalNonUniformGrid1D, self)._calcFaceCenters()
return faceCenters + self.origin
def _calcFaceAreas(self):
return self._calcFaceCenters()[0] * self._calcFaceCenters()[0]
def METHOD_NAME(self):
return super(SphericalNonUniformGrid1D, self).METHOD_NAME() / 2.
def _translate(self, vector):
return SphericalNonUniformGrid1D(dx=self.args['dx'], nx=self.args['nx'],
origin=numerix.array(self.args['origin']) + vector,
overlap=self.args['overlap'])
def __mul__(self, factor):
return SphericalNonUniformGrid1D(dx=self.args['dx'] * factor, nx=self.args['nx'],
origin=numerix.array(self.args['origin']) * factor,
overlap=self.args['overlap'])
def _test(self):
"""
These tests are not useful as documentation, but are here to ensure
everything works as expected. Fixed a bug where the following throws
an error on solve() when `nx` is a float.
>>> from fipy import CellVariable, DiffusionTerm
>>> mesh = SphericalNonUniformGrid1D(nx=3., dx=(1., 2., 3.))
>>> var = CellVariable(mesh=mesh)
>>> var.constrain(0., where=mesh.facesRight)
>>> DiffusionTerm().solve(var)
This test is for https://github.com/usnistgov/fipy/issues/372. Cell
volumes were being returned as `binOps` rather than arrays.
>>> m = SphericalNonUniformGrid1D(dx=(1., 2., 3., 4.), nx=4)
>>> print(isinstance(m.cellVolumes, numerix.ndarray))
True
>>> print(isinstance(m._faceAreas, numerix.ndarray))
True
If the above types aren't correct, the divergence operator's value can be a `binOp`
>>> print(isinstance(CellVariable(mesh=m).arithmeticFaceValue.divergence.value, numerix.ndarray))
True
"""
def _test():
import fipy.tests.doctestPlus
return fipy.tests.doctestPlus.testmod()
if __name__ == "__main__":
_test()
|
7,521 | batched | # coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Stationary Stochastic Python Bandit environment with per-arm features."""
from typing import Callable, Optional, Sequence, Text
import gin
import numpy as np
from tf_agents.bandits.environments import bandit_py_environment
from tf_agents.bandits.specs import utils as bandit_spec_utils
from tf_agents.specs import array_spec
from tf_agents.typing import types
GLOBAL_KEY = bandit_spec_utils.GLOBAL_FEATURE_KEY
PER_ARM_KEY = bandit_spec_utils.PER_ARM_FEATURE_KEY
NUM_ACTIONS_KEY = bandit_spec_utils.NUM_ACTIONS_FEATURE_KEY
@gin.configurable
class StationaryStochasticPerArmPyEnvironment(
bandit_py_environment.BanditPyEnvironment
):
"""Stationary Stochastic Bandit environment with per-arm features."""
def __init__(
self,
global_context_sampling_fn: Callable[[], types.Array],
arm_context_sampling_fn: Callable[[], types.Array],
max_num_actions: int,
reward_fn: Callable[[types.Array], Sequence[float]],
num_actions_fn: Optional[Callable[[], int]] = None,
batch_size: Optional[int] = 1,
name: Optional[Text] = 'stationary_stochastic_per_arm',
):
"""Initializes the environment.
In each round, global context is generated by global_context_sampling_fn,
per-arm contexts are generated by arm_context_sampling_fn. The reward_fn
function takes the concatenation of a global and a per-arm feature, and
outputs a possibly random reward.
In case `num_action_fn` is specified, the number of actions will be dynamic
and a `num_actions` feature key indicates the number of actions in any given
sample.
Example:
def global_context_sampling_fn():
return np.random.randint(0, 10, [2]) # 2-dimensional global features.
def arm_context_sampling_fn():
return np.random.randint(-3, 4, [3]) # 3-dimensional arm features.
def reward_fn(x):
return sum(x)
def num_actions_fn():
return np.random.randint(2, 6)
env = StationaryStochasticPerArmPyEnvironment(global_context_sampling_fn,
arm_context_sampling_fn,
5,
reward_fn,
num_actions_fn)
Args:
global_context_sampling_fn: A function that outputs a random 1d array or
list of ints or floats. This output is the global context. Its shape and
type must be consistent across calls.
arm_context_sampling_fn: A function that outputs a random 1 array or list
of ints or floats (same type as the output of
`global_context_sampling_fn`). This output is the per-arm context. Its
shape must be consistent across calls.
max_num_actions: (int) the maximum number of actions in every sample. If
`num_actions_fn` is not set, this many actions are available in every
time step.
reward_fn: A function that generates a reward when called with an
observation.
num_actions_fn: If set, it should be a function that outputs a single
integer specifying the number of actions for a given time step. The
value output by this function will be capped between 1 and
`max_num_actions`. The number of actions will be encoded in the
observation by the feature key `num_actions`.
batch_size: The batch size.
name: The name of this environment instance.
"""
self._global_context_sampling_fn = global_context_sampling_fn
self._arm_context_sampling_fn = arm_context_sampling_fn
self._max_num_actions = max_num_actions
self._reward_fn = reward_fn
self._batch_size = batch_size
self._num_actions_fn = num_actions_fn
observation_spec = {
GLOBAL_KEY: array_spec.ArraySpec.from_array(
global_context_sampling_fn()
),
PER_ARM_KEY: array_spec.add_outer_dims_nest(
array_spec.ArraySpec.from_array(arm_context_sampling_fn()),
(max_num_actions,),
),
}
if self._num_actions_fn is not None:
num_actions_spec = array_spec.BoundedArraySpec(
shape=(),
dtype=np.dtype(type(self._num_actions_fn())),
minimum=1,
maximum=max_num_actions,
)
observation_spec.update({NUM_ACTIONS_KEY: num_actions_spec})
action_spec = array_spec.BoundedArraySpec(
shape=(),
dtype=np.int32,
minimum=0,
maximum=max_num_actions - 1,
name='action',
)
super(StationaryStochasticPerArmPyEnvironment, self).__init__(
observation_spec, action_spec, name=name
)
def METHOD_NAME(self) -> bool:
return True
@property
def batch_size(self) -> Optional[int]:
return self._batch_size
def _observe(self) -> types.NestedArray:
global_obs = np.stack(
[self._global_context_sampling_fn() for _ in range(self._batch_size)]
)
arm_obs = np.reshape(
[
self._arm_context_sampling_fn()
for _ in range(self._batch_size * self._max_num_actions)
],
(self._batch_size, self._max_num_actions, -1),
)
self._observation = {GLOBAL_KEY: global_obs, PER_ARM_KEY: arm_obs}
if self._num_actions_fn:
num_actions = [self._num_actions_fn() for _ in range(self._batch_size)]
num_actions = np.maximum(num_actions, 1)
num_actions = np.minimum(num_actions, self._max_num_actions)
self._observation.update({NUM_ACTIONS_KEY: num_actions})
return self._observation
def _apply_action(self, action: np.ndarray) -> types.Array:
if action.shape[0] != self.batch_size:
raise ValueError('Number of actions must match batch size.')
global_obs = self._observation[GLOBAL_KEY] # pytype: disable=attribute-error # trace-all-classes
batch_size_range = range(self.batch_size)
arm_obs = self._observation[PER_ARM_KEY][batch_size_range, action, :] # pytype: disable=attribute-error # trace-all-classes
reward = np.stack(
[
self._reward_fn(np.concatenate((global_obs[b, :], arm_obs[b, :])))
for b in batch_size_range
]
)
return reward |
7,522 | configure filter | import sys
from _typeshed import StrOrBytesPath
from collections.abc import Callable, Hashable, Iterable, Sequence
from configparser import RawConfigParser
from re import Pattern
from threading import Thread
from typing import IO, Any, overload
from typing_extensions import Literal, SupportsIndex, TypeAlias, TypedDict
from . import Filter, Filterer, Formatter, Handler, Logger, _FilterType, _FormatStyle, _Level
DEFAULT_LOGGING_CONFIG_PORT: int
RESET_ERROR: int # undocumented
IDENTIFIER: Pattern[str] # undocumented
if sys.version_info >= (3, 11):
class _RootLoggerConfiguration(TypedDict, total=False):
level: _Level
filters: Sequence[str | _FilterType]
handlers: Sequence[str]
else:
class _RootLoggerConfiguration(TypedDict, total=False):
level: _Level
filters: Sequence[str]
handlers: Sequence[str]
class _LoggerConfiguration(_RootLoggerConfiguration, TypedDict, total=False):
propagate: bool
if sys.version_info >= (3, 8):
_FormatterConfigurationTypedDict = TypedDict(
"_FormatterConfigurationTypedDict", {"class": str, "format": str, "datefmt": str, "style": _FormatStyle}, total=False
)
else:
_FormatterConfigurationTypedDict = TypedDict(
"_FormatterConfigurationTypedDict",
{"class": str, "format": str, "datefmt": str, "style": _FormatStyle, "validate": bool},
total=False,
)
class _FilterConfigurationTypedDict(TypedDict):
name: str
# Formatter and filter configs can specify custom factories via the special `()` key.
# If that is the case, the dictionary can contain any additional keys
# https://docs.python.org/3/library/logging.config.html#user-defined-objects
_FormatterConfiguration: TypeAlias = _FormatterConfigurationTypedDict | dict[str, Any]
_FilterConfiguration: TypeAlias = _FilterConfigurationTypedDict | dict[str, Any]
# Handler config can have additional keys even when not providing a custom factory so we just use `dict`.
_HandlerConfiguration: TypeAlias = dict[str, Any]
class _OptionalDictConfigArgs(TypedDict, total=False):
formatters: dict[str, _FormatterConfiguration]
filters: dict[str, _FilterConfiguration]
handlers: dict[str, _HandlerConfiguration]
loggers: dict[str, _LoggerConfiguration]
root: _RootLoggerConfiguration | None
incremental: bool
disable_existing_loggers: bool
class _DictConfigArgs(_OptionalDictConfigArgs, TypedDict):
version: Literal[1]
# Accept dict[str, Any] to avoid false positives if called with a dict
# type, since dict types are not compatible with TypedDicts.
#
# Also accept a TypedDict type, to allow callers to use TypedDict
# types, and for somewhat stricter type checking of dict literals.
def dictConfig(config: _DictConfigArgs | dict[str, Any]) -> None: ...
if sys.version_info >= (3, 10):
def fileConfig(
fname: StrOrBytesPath | IO[str] | RawConfigParser,
defaults: dict[str, str] | None = None,
disable_existing_loggers: bool = True,
encoding: str | None = None,
) -> None: ...
else:
def fileConfig(
fname: StrOrBytesPath | IO[str] | RawConfigParser,
defaults: dict[str, str] | None = None,
disable_existing_loggers: bool = True,
) -> None: ...
def valid_ident(s: str) -> Literal[True]: ... # undocumented
def listen(port: int = 9030, verify: Callable[[bytes], bytes | None] | None = None) -> Thread: ...
def stopListening() -> None: ...
class ConvertingMixin: # undocumented
def convert_with_key(self, key: Any, value: Any, replace: bool = True) -> Any: ...
def convert(self, value: Any) -> Any: ...
class ConvertingDict(dict[Hashable, Any], ConvertingMixin): # undocumented
def __getitem__(self, key: Hashable) -> Any: ...
def get(self, key: Hashable, default: Any = None) -> Any: ...
def pop(self, key: Hashable, default: Any = None) -> Any: ...
class ConvertingList(list[Any], ConvertingMixin): # undocumented
@overload
def __getitem__(self, key: SupportsIndex) -> Any: ...
@overload
def __getitem__(self, key: slice) -> Any: ...
def pop(self, idx: SupportsIndex = -1) -> Any: ...
class ConvertingTuple(tuple[Any, ...], ConvertingMixin): # undocumented
@overload
def __getitem__(self, key: SupportsIndex) -> Any: ...
@overload
def __getitem__(self, key: slice) -> Any: ...
class BaseConfigurator: # undocumented
CONVERT_PATTERN: Pattern[str]
WORD_PATTERN: Pattern[str]
DOT_PATTERN: Pattern[str]
INDEX_PATTERN: Pattern[str]
DIGIT_PATTERN: Pattern[str]
value_converters: dict[str, str]
importer: Callable[..., Any]
def __init__(self, config: _DictConfigArgs | dict[str, Any]) -> None: ...
def resolve(self, s: str) -> Any: ...
def ext_convert(self, value: str) -> Any: ...
def cfg_convert(self, value: str) -> Any: ...
def convert(self, value: Any) -> Any: ...
def configure_custom(self, config: dict[str, Any]) -> Any: ...
def as_tuple(self, value: list[Any] | tuple[Any]) -> tuple[Any]: ...
class DictConfigurator(BaseConfigurator):
def configure(self) -> None: ... # undocumented
def configure_formatter(self, config: _FormatterConfiguration) -> Formatter | Any: ... # undocumented
def METHOD_NAME(self, config: _FilterConfiguration) -> Filter | Any: ... # undocumented
def add_filters(self, filterer: Filterer, filters: Iterable[_FilterType]) -> None: ... # undocumented
def configure_handler(self, config: _HandlerConfiguration) -> Handler | Any: ... # undocumented
def add_handlers(self, logger: Logger, handlers: Iterable[str]) -> None: ... # undocumented
def common_logger_config(
self, logger: Logger, config: _LoggerConfiguration, incremental: bool = False
) -> None: ... # undocumented
def configure_logger(self, name: str, config: _LoggerConfiguration, incremental: bool = False) -> None: ... # undocumented
def configure_root(self, config: _LoggerConfiguration, incremental: bool = False) -> None: ... # undocumented
dictConfigClass = DictConfigurator |
7,523 | priority | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetFirewallPolicyRuleCollectionGroupResult',
'AwaitableGetFirewallPolicyRuleCollectionGroupResult',
'get_firewall_policy_rule_collection_group',
'get_firewall_policy_rule_collection_group_output',
]
@pulumi.output_type
class GetFirewallPolicyRuleCollectionGroupResult:
"""
Rule Collection Group resource.
"""
def __init__(__self__, etag=None, id=None, name=None, METHOD_NAME=None, provisioning_state=None, rule_collections=None, type=None):
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if METHOD_NAME and not isinstance(METHOD_NAME, int):
raise TypeError("Expected argument 'priority' to be a int")
pulumi.set(__self__, "priority", METHOD_NAME)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if rule_collections and not isinstance(rule_collections, list):
raise TypeError("Expected argument 'rule_collections' to be a list")
pulumi.set(__self__, "rule_collections", rule_collections)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def METHOD_NAME(self) -> Optional[int]:
"""
Priority of the Firewall Policy Rule Collection Group resource.
"""
return pulumi.get(self, "priority")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the firewall policy rule collection group resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="ruleCollections")
def rule_collections(self) -> Optional[Sequence[Any]]:
"""
Group of Firewall Policy rule collections.
"""
return pulumi.get(self, "rule_collections")
@property
@pulumi.getter
def type(self) -> str:
"""
Rule Group type.
"""
return pulumi.get(self, "type")
class AwaitableGetFirewallPolicyRuleCollectionGroupResult(GetFirewallPolicyRuleCollectionGroupResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetFirewallPolicyRuleCollectionGroupResult(
etag=self.etag,
id=self.id,
name=self.name,
METHOD_NAME=self.METHOD_NAME,
provisioning_state=self.provisioning_state,
rule_collections=self.rule_collections,
type=self.type)
def get_firewall_policy_rule_collection_group(firewall_policy_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
rule_collection_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetFirewallPolicyRuleCollectionGroupResult:
"""
Gets the specified FirewallPolicyRuleCollectionGroup.
:param str firewall_policy_name: The name of the Firewall Policy.
:param str resource_group_name: The name of the resource group.
:param str rule_collection_group_name: The name of the FirewallPolicyRuleCollectionGroup.
"""
__args__ = dict()
__args__['firewallPolicyName'] = firewall_policy_name
__args__['resourceGroupName'] = resource_group_name
__args__['ruleCollectionGroupName'] = rule_collection_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:network/v20230401:getFirewallPolicyRuleCollectionGroup', __args__, opts=opts, typ=GetFirewallPolicyRuleCollectionGroupResult).value
return AwaitableGetFirewallPolicyRuleCollectionGroupResult(
etag=pulumi.get(__ret__, 'etag'),
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
METHOD_NAME=pulumi.get(__ret__, 'priority'),
provisioning_state=pulumi.get(__ret__, 'provisioning_state'),
rule_collections=pulumi.get(__ret__, 'rule_collections'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_firewall_policy_rule_collection_group)
def get_firewall_policy_rule_collection_group_output(firewall_policy_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
rule_collection_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetFirewallPolicyRuleCollectionGroupResult]:
"""
Gets the specified FirewallPolicyRuleCollectionGroup.
:param str firewall_policy_name: The name of the Firewall Policy.
:param str resource_group_name: The name of the resource group.
:param str rule_collection_group_name: The name of the FirewallPolicyRuleCollectionGroup.
"""
... |
7,524 | normal |
import numpy as np
from types import ModuleType
from fealpy.functionspace import LagrangeFiniteElementSpace
from ..quadrature import TriangleQuadrature
from .mesh_tools import unique_row, find_node, find_entity, show_mesh_2d
class SurfaceTriangleMesh():
def __init__(self, mesh, surface, p=1, scale=None):
"""
Initial a object of Surface Triangle Mesh.
Parameters
----------
self :
Surface Triangle Mesh Object
mesh :
mesh object, represents a triangulation with flat triangle faces.
surface :
The continuous surface which was represented as a level set
function.
p : int
The degree of the Lagrange space
Returns
-------
See Also
--------
Notes
-----
"""
self.mesh = mesh
self.p = p
self.space = LagrangeFiniteElementSpace(mesh, p)
self.surface = surface
self.ds = mesh.ds
self.scale = scale
if scale is not None:
self.mesh.node *= scale
if scale is None:
self.node, d = self.surface.project(self.space.interpolation_points())
else:
self.node, d = self.surface.project(self.space.interpolation_points()/scale)
self.node *= scale
self.meshtype = 'stri'
self.ftype = mesh.ftype
self.itype = mesh.itype
self.nodedata = {}
self.celldata = {}
def vtk_cell_type(self):
return 69
def project(self, p):
if self.scale is None:
return self.surface.project(p)
else:
p, d = self.surface.project(p/self.scale)
return p*self.scale, d*self.scale
def integrator(self, k, etype='cell'):
return TriangleQuadrature(k)
def entity(self, etype=2):
if etype in {'cell', 2}:
return self.ds.cell
elif etype in {'edge', 1}:
return self.ds.edge
elif etype in {'node', 0}:
return self.mesh.node
else:
raise ValueError("`entitytype` is wrong!")
def entity_measure(self, etype=2):
p = self.p
if etype in {'cell', 2}:
return self.area(p+1)
elif etype in {'edge', 'face', 1}:
return self.mesh.entity_measure('edge')
else:
raise ValueError("`entitytype` is wrong!")
def entity_barycenter(self, etype=2):
p = self.p
return self.mesh.entity_barycenter(etype=etype)
def number_of_nodes(self):
return self.node.shape[0]
def number_of_edges(self):
return self.mesh.ds.NE
def number_of_cells(self):
return self.mesh.ds.NC
def geo_dimension(self):
return self.node.shape[1]
def top_dimension(self):
return 2
def jacobi_matrix(self, bc, index=np.s_[:]):
mesh = self.mesh
cell2dof = self.space.dof.cell2dof
grad = self.space.grad_basis(bc, index=index)
# the tranpose of the jacobi matrix between S_h and K
Jh = mesh.jacobi_matrix(index=index)
# the tranpose of the jacobi matrix between S_p and S_h
Jph = np.einsum(
'ijm, ...ijk->...imk',
self.node[cell2dof[index], :],
grad)
# the transpose of the jacobi matrix between S_p and K
Jp = np.einsum('...ijk, imk->...imj', Jph, Jh)
grad = np.einsum('ijk, ...imk->...imj', Jh, grad)
return Jp, grad
def METHOD_NAME(self, bc, index=None):
Js, _, ps = self.surface_jacobi_matrix(bc, index=index)
n = np.cross(Js[..., 0, :], Js[..., 1, :], axis=-1)
return n, ps
def surface_jacobi_matrix(self, bc, index=None):
Jp, grad = self.jacobi_matrix(bc, index=index)
ps = self.bc_to_point(bc, index=index)
Jsp = self.surface.jacobi_matrix(ps)
Js = np.einsum('...ijk, ...imk->...imj', Jsp, Jp)
return Js, grad, ps
def bc_to_point(self, bc, index=None):
phi = self.space.basis(bc)
cell2dof = self.space.dof.cell2dof
if index is None:
bcp = np.einsum('...ij, ijk->...ik', phi, self.node[cell2dof, :])
else:
bcp = np.einsum('...ij, ijk->...ik', phi, self.node[cell2dof[index], :])
bcp, _ = self.project(bcp)
return bcp
def area(self, q=3):
integrator = self.integrator(q)
bcs, ws = integrator.quadpts, integrator.weights
Jp, _ = self.jacobi_matrix(bcs)
n = np.cross(Jp[..., 0, :], Jp[..., 1, :], axis=-1)
l = np.sqrt(np.sum(n**2, axis=-1))
a = np.einsum('i, ij->j', ws, l)/2.0
return a
def add_plot(
self, plot,
nodecolor='w', edgecolor='k',
cellcolor=[0.5, 0.9, 0.45], aspect='equal',
linewidths=1, markersize=50,
showaxis=False, showcolorbar=False, cmap='rainbow'):
if isinstance(plot, ModuleType):
fig = plot.figure()
fig.set_facecolor('white')
axes = fig.gca()
else:
axes = plot
return show_mesh_2d(
axes, self.mesh,
nodecolor=nodecolor, edgecolor=edgecolor,
cellcolor=cellcolor, aspect=aspect,
linewidths=linewidths, markersize=markersize,
showaxis=showaxis, showcolorbar=showcolorbar, cmap=cmap)
def find_node(
self, axes, node=None,
index=None, showindex=False,
color='r', markersize=100,
fontsize=24, fontcolor='k'):
if node is None:
node = self.node
if (index is None) and (showindex is True):
index = np.array(range(node.shape[0]))
find_node(
axes, node,
index=index, showindex=showindex,
color=color, markersize=markersize,
fontsize=fontsize, fontcolor=fontcolor)
def find_edge(
self, axes,
index=None, showindex=False,
color='g', markersize=150,
fontsize=24, fontcolor='k'):
find_entity(
axes, self.mesh, entity='edge',
index=index, showindex=showindex,
color=color, markersize=markersize,
fontsize=fontsize, fontcolor=fontcolor)
def find_cell(
self, axes,
index=None, showindex=False,
color='y', markersize=200,
fontsize=24, fontcolor='k'):
find_entity(
axes, self.mesh, entity='cell',
index=index, showindex=showindex,
color=color, markersize=markersize,
fontsize=fontsize, fontcolor=fontcolor) |
7,525 | get solves for challenge id | import datetime
from collections import namedtuple
from sqlalchemy import func as sa_func
from sqlalchemy.sql import and_, false, true
from CTFd.cache import cache
from CTFd.models import Challenges, Solves, Users, db
from CTFd.schemas.tags import TagSchema
from CTFd.utils import get_config
from CTFd.utils.dates import isoformat, unix_time_to_utc
from CTFd.utils.helpers.models import build_model_filters
from CTFd.utils.modes import generate_account_url, get_model
Challenge = namedtuple(
"Challenge", ["id", "type", "name", "value", "category", "tags", "requirements"]
)
@cache.memoize(timeout=60)
def get_all_challenges(admin=False, field=None, q=None, **query_args):
filters = build_model_filters(model=Challenges, query=q, field=field)
chal_q = Challenges.query
# Admins can see hidden and locked challenges in the admin view
if admin is False:
chal_q = chal_q.filter(
and_(Challenges.state != "hidden", Challenges.state != "locked")
)
chal_q = (
chal_q.filter_by(**query_args)
.filter(*filters)
.order_by(Challenges.value, Challenges.id)
)
tag_schema = TagSchema(view="user", many=True)
results = []
for c in chal_q:
ct = Challenge(
id=c.id,
type=c.type,
name=c.name,
value=c.value,
category=c.category,
requirements=c.requirements,
tags=tag_schema.dump(c.tags).data,
)
results.append(ct)
return results
@cache.memoize(timeout=60)
def METHOD_NAME(challenge_id, freeze=False):
Model = get_model()
# Note that we specifically query for the Solves.account.name
# attribute here because it is faster than having SQLAlchemy
# query for the attribute directly and it's unknown what the
# affects of changing the relationship lazy attribute would be
solves = (
Solves.query.add_columns(Model.name.label("account_name"))
.join(Model, Solves.account_id == Model.id)
.filter(
Solves.challenge_id == challenge_id,
Model.banned == False,
Model.hidden == False,
)
.order_by(Solves.date.asc())
)
if freeze:
freeze_time = get_config("freeze")
if freeze_time:
dt = datetime.datetime.utcfromtimestamp(freeze_time)
solves = solves.filter(Solves.date < dt)
results = []
for solve in solves:
# Seperate out the account name and the Solve object from the SQLAlchemy tuple
solve, account_name = solve
results.append(
{
"account_id": solve.account_id,
"name": account_name,
"date": isoformat(solve.date),
"account_url": generate_account_url(account_id=solve.account_id),
}
)
return results
@cache.memoize(timeout=60)
def get_solve_ids_for_user_id(user_id):
user = Users.query.filter_by(id=user_id).first()
solve_ids = (
Solves.query.with_entities(Solves.challenge_id)
.filter(Solves.account_id == user.account_id)
.all()
)
solve_ids = {value for value, in solve_ids}
return solve_ids
@cache.memoize(timeout=60)
def get_solve_counts_for_challenges(challenge_id=None, admin=False):
if challenge_id is None:
challenge_id_filter = ()
else:
challenge_id_filter = (Solves.challenge_id == challenge_id,)
AccountModel = get_model()
freeze = get_config("freeze")
if freeze and not admin:
freeze_cond = Solves.date < unix_time_to_utc(freeze)
else:
freeze_cond = true()
exclude_solves_cond = and_(
AccountModel.banned == false(),
AccountModel.hidden == false(),
)
solves_q = (
db.session.query(
Solves.challenge_id,
sa_func.count(Solves.challenge_id),
)
.join(AccountModel)
.filter(*challenge_id_filter, freeze_cond, exclude_solves_cond)
.group_by(Solves.challenge_id)
)
solve_counts = {}
for chal_id, solve_count in solves_q:
solve_counts[chal_id] = solve_count
return solve_counts |
7,526 | test url has newer version affirmative | """
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: MIT-0
"""
import gzip
import sys
try:
import StringIO
except:
pass
from test.testlib.testcase import BaseTestCase
from unittest.mock import MagicMock, patch
import cfnlint.helpers
class TestGetUrlContent(BaseTestCase):
"""Test Get URL Content"""
@patch("cfnlint.helpers.urlopen")
def test_get_url_content_unzipped(self, mocked_urlopen):
"""Test success run"""
input_buffer = '{"key": "value"}'
cm = MagicMock()
cm.getcode.return_value = 200
cm.read.return_value = input_buffer.encode("utf-8")
cm.__enter__.return_value = cm
mocked_urlopen.return_value = cm
url = "http://foo.com"
result = cfnlint.helpers.get_url_content(url)
mocked_urlopen.assert_called_with(url)
self.assertEqual(result, '{"key": "value"}')
@patch("cfnlint.helpers.urlopen")
def test_get_url_content_zipped(self, mocked_urlopen):
"""Test success run"""
input_buffer = '{"key": "value"}'
cm = MagicMock()
cm.getcode.return_value = 200
cm.read.return_value = gzip.compress(input_buffer.encode("utf-8"))
cm.info.return_value = {"Content-Encoding": "gzip"}
cm.__enter__.return_value = cm
mocked_urlopen.return_value = cm
url = "http://foo.com"
result = cfnlint.helpers.get_url_content(url)
mocked_urlopen.assert_called_with(url)
self.assertEqual(result, '{"key": "value"}')
@patch("cfnlint.helpers.urlopen")
@patch("cfnlint.helpers.load_metadata")
@patch("cfnlint.helpers.save_metadata")
def test_get_url_content_zipped_cache_update(
self, mock_save_metadata, mock_load_metadata, mocked_urlopen
):
"""Test success run"""
input_buffer = '{"key": "value"}'
etag = "ETAG_ONE"
url = "http://foo.com"
mock_load_metadata.return_value = {}
cm = MagicMock()
cm.getcode.return_value = 200
cm.info.return_value = {"Content-Encoding": "gzip", "ETag": etag}
cm.read.return_value = gzip.compress(input_buffer.encode("utf-8"))
cm.__enter__.return_value = cm
mocked_urlopen.return_value = cm
result = cfnlint.helpers.get_url_content(url, caching=True)
mocked_urlopen.assert_called_with(url)
mock_load_metadata.assert_called_once()
mock_save_metadata.assert_called_once()
self.assertEqual(result, '{"key": "value"}')
@patch("cfnlint.helpers.urlopen")
@patch("cfnlint.helpers.load_metadata")
def METHOD_NAME(
self, mock_load_metadata, mocked_urlopen
):
"""Test success run"""
input_buffer = '{"key": "value"}'
etag = "ETAG_ONE"
url = "http://foo.com"
mock_load_metadata.return_value = {"etag": etag}
cm = MagicMock()
cm.getcode.return_value = 200
cm.info.return_value = {"Content-Encoding": "gzip", "ETag": etag}
cm.__enter__.return_value = cm
mocked_urlopen.return_value = cm
result = cfnlint.helpers.url_has_newer_version(url)
# Python2 does not support caching, so will always return true
self.assertFalse(result)
@patch("cfnlint.helpers.urlopen")
@patch("cfnlint.helpers.load_metadata")
def test_url_has_newer_version_negative(self, mock_load_metadata, mocked_urlopen):
"""Test success run"""
input_buffer = '{"key": "value"}'
# Generate a random ETag to test with
etag = "ETAG_ONE"
etag2 = "ETAG_TWO"
url = "http://foo.com"
mock_load_metadata.return_value = {"etag": etag}
cm = MagicMock()
cm.getcode.return_value = 200
cm.info.return_value = {"Content-Encoding": "gzip", "ETag": etag2}
cm.__enter__.return_value = cm
mocked_urlopen.return_value = cm
result = cfnlint.helpers.url_has_newer_version(url)
self.assertTrue(result) |
7,527 | test bug id from url | # pylint: disable=attribute-defined-outside-init
import os
import time
import unittest
from tcms.core.contrib.linkreference.models import LinkReference
from tcms.issuetracker.types import Gitlab
from tcms.rpc.tests.utils import APITestCase
from tcms.testcases.models import BugSystem
from tcms.tests.factories import ComponentFactory, TestExecutionFactory
@unittest.skipUnless(
os.getenv("TEST_BUGTRACKER_INTEGRATION"),
"Bug tracker integration testing not enabled",
)
class TestGitlabIntegration(APITestCase):
existing_bug_id = 1
existing_bug_url = "http://bugtracker.kiwitcms.org/root/kiwitcms/issues/1"
existing_bug_url_in_group = (
"http://bugtracker.kiwitcms.org/group/sub_group/kiwitcms_in_group/issues/1"
)
def _fixture_setup(self):
super()._fixture_setup()
self.execution_1 = TestExecutionFactory()
self.execution_1.case.text = "Given-When-Then"
self.execution_1.case.save() # will generate history object
self.component = ComponentFactory(
name="Gitlab integration", product=self.execution_1.run.plan.product
)
self.execution_1.case.add_component(self.component)
bug_system = BugSystem.objects.create( # nosec:B106:hardcoded_password_funcarg
name="GitLab-EE for root/kiwitcms",
tracker_type="tcms.issuetracker.types.Gitlab",
base_url="http://bugtracker.kiwitcms.org/root/kiwitcms/",
api_url="http://bugtracker.kiwitcms.org",
api_password="ypCa3Dzb23o5nvsixwPA",
)
self.integration = Gitlab(bug_system, None)
def METHOD_NAME(self):
result = self.integration.bug_id_from_url(self.existing_bug_url)
self.assertEqual(self.existing_bug_id, result)
# this is an alternative URL, with a dash
result = self.integration.bug_id_from_url(
"http://bugtracker.kiwitcms.org/root/kiwitcms/-/issues/1"
)
self.assertEqual(self.existing_bug_id, result)
def test_bug_id_from_url_in_group(self):
bug_system = BugSystem.objects.create( # nosec:B106:hardcoded_password_funcarg
name="GitLab-EE for group/sub_group/kiwitcms_in_group",
tracker_type="tcms.issuetracker.types.Gitlab",
base_url="http://bugtracker.kiwitcms.org/group/sub_group/kiwitcms_in_group/",
api_url="http://bugtracker.kiwitcms.org",
api_password="ypCa3Dzb23o5nvsixwPA",
)
integration = Gitlab(bug_system, None)
result = integration.bug_id_from_url(self.existing_bug_url_in_group)
self.assertEqual(self.existing_bug_id, result)
# this is an alternative URL, with a dash
result = integration.bug_id_from_url(
"http://bugtracker.kiwitcms.org/group/sub_group/kiwitcms_in_group/-/issues/1"
)
self.assertEqual(self.existing_bug_id, result)
def test_details_for_public_url(self):
result = self.integration.details(self.existing_bug_url)
self.assertEqual("Hello GitLab", result["title"])
self.assertEqual("Created via CLI", result["description"])
def test_details_for_public_url_in_group(self):
bug_system = BugSystem.objects.create( # nosec:B106:hardcoded_password_funcarg
name="GitLab-EE for group/sub_group/kiwitcms_in_group",
tracker_type="tcms.issuetracker.types.Gitlab",
base_url="http://bugtracker.kiwitcms.org/group/sub_group/kiwitcms_in_group/",
api_url="http://bugtracker.kiwitcms.org",
api_password="ypCa3Dzb23o5nvsixwPA",
)
integration = Gitlab(bug_system, None)
result = integration.details(self.existing_bug_url_in_group)
self.assertEqual("Hello GitLab Group", result["title"])
self.assertEqual("Created via CLI", result["description"])
def test_details_for_private_url(self):
bug_system = BugSystem.objects.create( # nosec:B106:hardcoded_password_funcarg
name="Private GitLab for root/katinar",
tracker_type="tcms.issuetracker.types.Gitlab",
base_url="http://bugtracker.kiwitcms.org/root/katinar/",
api_url="http://bugtracker.kiwitcms.org",
api_password="ypCa3Dzb23o5nvsixwPA",
)
integration = Gitlab(bug_system, None)
result = integration.details(
"http://bugtracker.kiwitcms.org/root/katinar/-/issues/1"
)
self.assertEqual("Hello Private Issue", result["title"])
self.assertEqual("Created in secret via CLI", result["description"])
def test_auto_update_bugtracker(self):
repo_id = self.integration.repo_id
gl_project = self.integration.rpc.projects.get(repo_id)
gl_issue = gl_project.issues.get(self.existing_bug_id)
# make sure there are no comments to confuse the test
initial_comment_count = 0
for comment in gl_issue.notes.list():
initial_comment_count += 1
self.assertNotIn("Confirmed via test execution", comment.body)
# simulate user adding a new bug URL to a TE and clicking
# 'Automatically update bug tracker'
result = self.rpc_client.TestExecution.add_link(
{
"execution_id": self.execution_1.pk,
"is_defect": True,
"url": self.existing_bug_url,
},
True,
)
# making sure RPC above returned the same URL
self.assertEqual(self.existing_bug_url, result["url"])
# wait until comments have been refreshed b/c this seem to happen async
retries = 0
while len(gl_issue.notes.list()) <= initial_comment_count:
time.sleep(1)
retries += 1
self.assertLess(retries, 20)
# sort by id b/c the gitlab library returns newest comments first but
# that may be depending on configuration !
last_comment = sorted(gl_issue.notes.list(), key=lambda x: x.id)[-1]
# assert that a comment has been added as the last one
# and also verify its text
for expected_string in [
"Confirmed via test execution",
f"TR-{self.execution_1.run_id}: {self.execution_1.run.summary}",
self.execution_1.run.get_full_url(),
f"TE-{self.execution_1.pk}: {self.execution_1.case.summary}",
]:
self.assertIn(expected_string, last_comment.body)
def test_report_issue_from_test_execution_1click_works(self):
# simulate user clicking the 'Report bug' button in TE widget, TR page
result = self.rpc_client.Bug.report(
self.execution_1.pk, self.integration.bug_system.pk
)
self.assertEqual(result["rc"], 0)
self.assertIn(self.integration.bug_system.base_url, result["response"])
self.assertIn("/-/issues/", result["response"])
# assert that the result looks like valid URL parameters
new_issue_id = self.integration.bug_id_from_url(result["response"])
repo_id = self.integration.repo_id
gl_project = self.integration.rpc.projects.get(repo_id)
issue = gl_project.issues.get(new_issue_id)
self.assertEqual(f"Failed test: {self.execution_1.case.summary}", issue.title)
for expected_string in [
f"Filed from execution {self.execution_1.get_full_url()}",
"Reporter",
self.execution_1.run.plan.product.name,
self.component.name,
"Steps to reproduce",
self.execution_1.case.text,
]:
self.assertIn(expected_string, issue.description)
# verify that LR has been added to TE
self.assertTrue(
LinkReference.objects.filter(
execution=self.execution_1,
url=result["response"],
is_defect=True,
).exists()
) |
7,528 | mocked requests get | import io
import os
import shutil
from unittest.mock import MagicMock
import pandas as pd
import pytest
from ert.gui.tools.plot.plot_api import PlotApi
from ert.services import StorageService
class MockResponse:
def __init__(self, json_data, status_code, text="", url=""):
self.json_data = json_data
self.status_code = status_code
self.text = text
self.url = url
def json(self):
return self.json_data
@property
def content(self):
return self.json_data
def is_success(self):
return self.status_code == 200
@pytest.fixture
def api(tmpdir, source_root, monkeypatch):
from contextlib import contextmanager
@contextmanager
def session():
yield MagicMock(get=METHOD_NAME)
monkeypatch.setattr(StorageService, "session", session)
with tmpdir.as_cwd():
test_data_root = source_root / "test-data"
test_data_dir = os.path.join(test_data_root, "snake_oil")
shutil.copytree(test_data_dir, "test_data")
os.chdir("test_data")
api = PlotApi()
yield api
def METHOD_NAME(*args, **kwargs):
summary_data = {
"2010-01-20 00:00:00": [0.1, 0.2, 0.3, 0.4],
"2010-02-20 00:00:00": [0.2, 0.21, 0.19, 0.18],
}
summary_df = pd.DataFrame(summary_data)
summary_stream = io.BytesIO()
summary_df.to_parquet(summary_stream)
summary_parquet_data = summary_stream.getvalue()
parameter_data = {"0": [0.1, 0.2, 0.3]}
parameter_df = pd.DataFrame(parameter_data)
parameter_stream = io.BytesIO()
parameter_df.to_parquet(parameter_stream)
parameter_parquet_data = parameter_stream.getvalue()
gen_data = {
"0": [0.1, 0.2, 0.3],
"1": [0.1, 0.2, 0.3],
"2": [0.1, 0.2, 0.3],
"3": [0.1, 0.2, 0.3],
"4": [0.1, 0.2, 0.3],
"5": [0.1, 0.2, 0.3],
}
gen_df = pd.DataFrame(gen_data)
gen_stream = io.BytesIO()
gen_df.to_parquet(gen_stream)
gen_parquet_data = gen_stream.getvalue()
history_data = {
"0": [1.0, 0.2, 1.0, 1.0, 1.0],
"1": [1.1, 0.2, 1.1, 1.1, 1.1],
"2": [1.2, 1.2, 1.2, 1.2, 1.3],
}
history_df = pd.DataFrame(history_data)
history_stream = io.BytesIO()
history_df.to_parquet(history_stream)
history_parquet_data = history_stream.getvalue()
ensemble = {
"/ensembles/ens_id_1": {"name": "ensemble_1"},
"/ensembles/ens_id_2": {"name": ".ensemble_2"},
"/ensembles/ens_id_3": {"name": "default_0"},
"/ensembles/ens_id_4": {"name": "default_1"},
}
observations = {
"/ensembles/ens_id_3/records/WOPR:OP1/observations": {
"name": "WOPR:OP1",
"errors": [0.05, 0.07],
"values": [0.1, 0.7],
"x_axis": ["2010-03-31T00:00:00", "2010-12-26T00:00:00"],
},
"/ensembles/ens_id_4/records/WOPR:OP1/observations": {
"name": "WOPR:OP1",
"errors": [0.05, 0.07],
"values": [0.1, 0.7],
"x_axis": ["2010-03-31T00:00:00", "2010-12-26T00:00:00"],
},
"/ensembles/ens_id_3/records/SNAKE_OIL_WPR_DIFF@199/observations": {
"name": "SNAKE_OIL_WPR_DIFF",
"errors": [0.05, 0.07, 0.05],
"values": [0.1, 0.7, 0.5],
"x_axis": [
"2010-03-31T00:00:00",
"2010-12-26T00:00:00",
"2011-12-21T00:00:00",
],
},
"/ensembles/ens_id_4/records/SNAKE_OIL_WPR_DIFF@199/observations": {
"name": "WOPR:OP1",
"errors": [0.05, 0.07, 0.05],
"values": [0.1, 0.7, 0.5],
"x_axis": [
"2010-03-31T00:00:00",
"2010-12-26T00:00:00",
"2011-12-21T00:00:00",
],
},
"/ensembles/ens_id_3/records/FOPR/observations": {
"name": "FOPR",
"errors": [0.05, 0.07],
"values": [0.1, 0.7],
"x_axis": ["2010-03-31T00:00:00", "2010-12-26T00:00:00"],
},
}
parameters = {
"/ensembles/ens_id_1/parameters": [
{
"name": "SNAKE_OIL_PARAM:BPR_138_PERSISTENCE",
"labels": [],
"userdata": {"data_origin": "GEN_KW"},
},
{
"name": "SNAKE_OIL_PARAM:OP1_DIVERGENCE_SCALE",
"labels": [],
"userdata": {"data_origin": "GEN_KW"},
},
],
"/ensembles/ens_id_3/parameters": [
{
"name": "SNAKE_OIL_PARAM:BPR_138_PERSISTENCE",
"labels": [],
"userdata": {"data_origin": "GEN_KW"},
},
{
"name": "I_AM_A_PARAM",
"labels": [],
"userdata": {"data_origin": "GEN_KW"},
},
],
}
responses = {
"/ensembles/ens_id_1/responses": {
"BPR:1,3,8": {
"name": "BPR:1,3,8",
"id": "id_1",
"userdata": {"data_origin": "Summary"},
"has_observations": False,
},
"FOPR": {
"name": "FOPR",
"id": "id_999",
"userdata": {"data_origin": "Summary"},
"has_observations": True,
},
"SNAKE_OIL_WPR_DIFF@199": {
"id": "id_88",
"name": "SNAKE_OIL_WPR_DIFF@199",
"userdata": {"data_origin": "GEN_DATA"},
"has_observations": False,
},
},
"/ensembles/ens_id_3/responses": {
"BPR:1,3,8": {
"name": "BPR:1,3,8",
"id": "id_111111",
"userdata": {"data_origin": "Summary"},
"has_observations": False,
},
"WOPPER": {
"name": "WOPPER",
"id": "id_999",
"userdata": {"data_origin": "Summary"},
"has_observations": False,
},
},
}
ensembles = {
"/experiments/exp_1/ensembles": [
{"id": "ens_id_1", "userdata": {"name": "ensemble_1"}, "size": 25},
{"id": "ens_id_3", "userdata": {"name": "default_0"}, "size": 99},
]
}
records = {
"/ensembles/ens_id_3/records/FOPR": summary_parquet_data,
"/ensembles/ens_id_3/records/BPR:1,3,8": summary_parquet_data,
"/ensembles/ens_id_3/records/SNAKE_OIL_PARAM:BPR_138_PERSISTENCE": parameter_parquet_data, # noqa
"/ensembles/ens_id_3/records/SNAKE_OIL_PARAM:OP1_DIVERGENCE_SCALE": parameter_parquet_data, # noqa
"/ensembles/ens_id_3/records/SNAKE_OIL_WPR_DIFF@199": gen_parquet_data,
"/ensembles/ens_id_3/records/FOPRH": history_parquet_data,
}
experiments = [
{
"name": "default",
"id": "exp_1",
"ensemble_ids": ["ens_id_1", "ens_id_2", "ens_id_3", "ens_id_4"],
"priors": {},
"userdata": {},
}
]
if args[0] in ensemble:
return MockResponse({"userdata": ensemble[args[0]]}, 200)
elif args[0] in observations:
return MockResponse(
[observations[args[0]]],
200,
)
elif args[0] in ensembles:
return MockResponse(ensembles[args[0]], 200)
elif args[0] in parameters:
return MockResponse(parameters[args[0]], 200)
elif args[0] in responses:
return MockResponse(responses[args[0]], 200)
elif args[0] in records:
return MockResponse(records[args[0]], 200)
elif "/experiments" in args[0]:
return MockResponse(experiments, 200)
return MockResponse(None, 404, text="{'details': 'Not found'}", url=args[0]) |
7,529 | system data | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetPrivateLinkScopeResult',
'AwaitableGetPrivateLinkScopeResult',
'get_private_link_scope',
'get_private_link_scope_output',
]
@pulumi.output_type
class GetPrivateLinkScopeResult:
"""
An Azure Arc PrivateLinkScope definition.
"""
def __init__(__self__, id=None, location=None, name=None, properties=None, METHOD_NAME=None, tags=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if METHOD_NAME and not isinstance(METHOD_NAME, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", METHOD_NAME)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Azure resource Id
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> str:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Azure resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> 'outputs.HybridComputePrivateLinkScopePropertiesResponse':
"""
Properties that define a Azure Arc PrivateLinkScope resource.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter(name="systemData")
def METHOD_NAME(self) -> 'outputs.SystemDataResponse':
"""
The system meta data relating to this resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Azure resource type
"""
return pulumi.get(self, "type")
class AwaitableGetPrivateLinkScopeResult(GetPrivateLinkScopeResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetPrivateLinkScopeResult(
id=self.id,
location=self.location,
name=self.name,
properties=self.properties,
METHOD_NAME=self.METHOD_NAME,
tags=self.tags,
type=self.type)
def get_private_link_scope(resource_group_name: Optional[str] = None,
scope_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetPrivateLinkScopeResult:
"""
Returns a Azure Arc PrivateLinkScope.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str scope_name: The name of the Azure Arc PrivateLinkScope resource.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['scopeName'] = scope_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:hybridcompute/v20230620preview:getPrivateLinkScope', __args__, opts=opts, typ=GetPrivateLinkScopeResult).value
return AwaitableGetPrivateLinkScopeResult(
id=pulumi.get(__ret__, 'id'),
location=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
properties=pulumi.get(__ret__, 'properties'),
METHOD_NAME=pulumi.get(__ret__, 'system_data'),
tags=pulumi.get(__ret__, 'tags'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_private_link_scope)
def get_private_link_scope_output(resource_group_name: Optional[pulumi.Input[str]] = None,
scope_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetPrivateLinkScopeResult]:
"""
Returns a Azure Arc PrivateLinkScope.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str scope_name: The name of the Azure Arc PrivateLinkScope resource.
"""
... |
7,530 | update from dict | import six
import attr
from attr import validators
__all__ = ['range_validator', 'param', 'percent_param', 'TaskParameters']
def _canonize_validator(current_validator):
"""
Convert current_validator to a new list and return it.
If current_validator is None return an empty list.
If current_validator is a list, return a copy of it.
If current_validator is another type of iterable, return a list version of it.
If current_validator is a single value, return a one-list containing it.
"""
if not current_validator:
return []
if isinstance(current_validator, (list, tuple)):
current_validator = list(current_validator)
else:
current_validator = [current_validator]
return current_validator
def range_validator(min_value, max_value):
"""
A parameter validator that checks range constraint on a parameter.
:param min_value: The minimum limit of the range, inclusive. None for no minimum limit.
:param max_value: The maximum limit of the range, inclusive. None for no maximum limit.
:return: A new range validator.
"""
def _range_validator(instance, attribute, value):
if ((min_value is not None) and (value < min_value)) or \
((max_value is not None) and (value > max_value)):
raise ValueError("{} must be in range [{}, {}]".format(attribute.name, min_value, max_value))
return _range_validator
def param(
validator=None,
range=None,
type=None,
desc=None,
metadata=None,
*args,
**kwargs
):
"""
A parameter inside a TaskParameters class.
See TaskParameters for more information.
:param validator: A validator or validators list.
Any validator from attr.validators is applicable.
:param range: The legal values range of the parameter.
A tuple (min_limit, max_limit). None for no limitation.
:param type: The type of the parameter.
Supported types are int, str and float. None to place no limit of the type
:param desc: A string description of the parameter, for future use.
:param metadata: A dictionary metadata of the parameter, for future use.
:param args: Additional arguments to pass to attr.attrib constructor.
:param kwargs: Additional keyword arguments to pass to attr.attrib constructor.
:return: An attr.attrib instance to use with TaskParameters class.
Warning: Do not create an immutable param using args or kwargs. It will cause
connect method of the TaskParameters class to fail.
"""
metadata = metadata or {}
metadata["desc"] = desc
validator = _canonize_validator(validator)
if type:
validator.append(validators.optional(validators.instance_of(type)))
if range:
validator.append(range_validator(*range))
return attr.ib(validator=validator, type=type, metadata=metadata, *args, **kwargs)
def percent_param(*args, **kwargs):
"""
A param with type float and range limit (0, 1).
"""
return param(range=(0, 1), type=float, *args, **kwargs)
class _AttrsMeta(type):
def __new__(mcs, name, bases, dct):
new_class = super(_AttrsMeta, mcs).__new__(mcs, name, bases, dct)
return attr.s(new_class)
@six.add_metaclass(_AttrsMeta)
class TaskParameters(object):
"""
Base class for task parameters.
Inherit this class to create a parameter set to connect to a task.
Usage Example:
class MyParams(TaskParameters):
iterations = param(
type=int,
desc="Number of iterations to run",
range=(0, 100000),
)
target_accuracy = percent_param(
desc="The target accuracy of the model",
)
"""
def to_dict(self):
"""
:return: A new dictionary with keys are the parameters names and values
are the corresponding values.
"""
return attr.asdict(self)
def METHOD_NAME(self, source_dict):
"""
Update the parameters using values from a dictionary.
:param source_dict: A dictionary with an entry for each parameter to
update.
"""
for key, value in source_dict.items():
if not hasattr(self, key):
raise ValueError("Unknown key {} in {} object".format(key, type(self).__name__))
setattr(self, key, value)
def connect(self, task):
"""
Connect to a task.
When running locally, the task will save the parameters from self.
When running with a worker, self will be updated according to the task's
saved parameters.
:param task: The task to connect to.
:type task: .Task
"""
return task.connect(self) |
7,531 | test copy in str | import pytest
import string
from random import randrange, choice
from psycopg import sql, errors as e
from psycopg.pq import Format
from psycopg.adapt import PyFormat
from psycopg.types.numeric import Int4
from ..utils import eur, gc_collect, gc_count
from ..test_copy import sample_text, sample_binary # noqa
from ..test_copy import ensure_table, sample_records
from ..test_copy import sample_tabledef as sample_tabledef_pg
# CRDB int/serial are int8
sample_tabledef = sample_tabledef_pg.replace("int", "int4").replace("serial", "int4")
pytestmark = pytest.mark.crdb
@pytest.mark.parametrize(
"format, buffer",
[(Format.TEXT, "sample_text"), (Format.BINARY, "sample_binary")],
)
def test_copy_in_buffers(conn, format, buffer):
cur = conn.cursor()
ensure_table(cur, sample_tabledef)
with cur.copy(f"copy copy_in from stdin {copyopt(format)}") as copy:
copy.write(globals()[buffer])
data = cur.execute("select * from copy_in order by 1").fetchall()
assert data == sample_records
def test_copy_in_buffers_pg_error(conn):
cur = conn.cursor()
ensure_table(cur, sample_tabledef)
with pytest.raises(e.UniqueViolation):
with cur.copy("copy copy_in from stdin") as copy:
copy.write(sample_text)
copy.write(sample_text)
assert conn.info.transaction_status == conn.TransactionStatus.INERROR
def METHOD_NAME(conn):
cur = conn.cursor()
ensure_table(cur, sample_tabledef)
with cur.copy("copy copy_in from stdin") as copy:
copy.write(sample_text.decode())
data = cur.execute("select * from copy_in order by 1").fetchall()
assert data == sample_records
@pytest.mark.xfail(reason="bad sqlstate - CRDB #81559")
def test_copy_in_error(conn):
cur = conn.cursor()
ensure_table(cur, sample_tabledef)
with pytest.raises(e.QueryCanceled):
with cur.copy("copy copy_in from stdin with binary") as copy:
copy.write(sample_text.decode())
assert conn.info.transaction_status == conn.TransactionStatus.INERROR
@pytest.mark.parametrize("format", Format)
def test_copy_in_empty(conn, format):
cur = conn.cursor()
ensure_table(cur, sample_tabledef)
with cur.copy(f"copy copy_in from stdin {copyopt(format)}"):
pass
assert conn.info.transaction_status == conn.TransactionStatus.INTRANS
assert cur.rowcount == 0
@pytest.mark.slow
def test_copy_big_size_record(conn):
cur = conn.cursor()
ensure_table(cur, "id serial primary key, data text")
data = "".join(chr(randrange(1, 256)) for i in range(10 * 1024 * 1024))
with cur.copy("copy copy_in (data) from stdin") as copy:
copy.write_row([data])
cur.execute("select data from copy_in limit 1")
assert cur.fetchone()[0] == data
@pytest.mark.slow
def test_copy_big_size_block(conn):
cur = conn.cursor()
ensure_table(cur, "id serial primary key, data text")
data = "".join(choice(string.ascii_letters) for i in range(10 * 1024 * 1024))
copy_data = data + "\n"
with cur.copy("copy copy_in (data) from stdin") as copy:
copy.write(copy_data)
cur.execute("select data from copy_in limit 1")
assert cur.fetchone()[0] == data
def test_copy_in_buffers_with_pg_error(conn):
cur = conn.cursor()
ensure_table(cur, sample_tabledef)
with pytest.raises(e.UniqueViolation):
with cur.copy("copy copy_in from stdin") as copy:
copy.write(sample_text)
copy.write(sample_text)
assert conn.info.transaction_status == conn.TransactionStatus.INERROR
@pytest.mark.parametrize("format", Format)
def test_copy_in_records(conn, format):
cur = conn.cursor()
ensure_table(cur, sample_tabledef)
with cur.copy(f"copy copy_in from stdin {copyopt(format)}") as copy:
for row in sample_records:
if format == Format.BINARY:
row = tuple(
Int4(i) if isinstance(i, int) else i for i in row
) # type: ignore[assignment]
copy.write_row(row)
data = cur.execute("select * from copy_in order by 1").fetchall()
assert data == sample_records
@pytest.mark.parametrize("format", Format)
def test_copy_in_records_set_types(conn, format):
cur = conn.cursor()
ensure_table(cur, sample_tabledef)
with cur.copy(f"copy copy_in from stdin {copyopt(format)}") as copy:
copy.set_types(["int4", "int4", "text"])
for row in sample_records:
copy.write_row(row)
data = cur.execute("select * from copy_in order by 1").fetchall()
assert data == sample_records
@pytest.mark.parametrize("format", Format)
def test_copy_in_records_binary(conn, format):
cur = conn.cursor()
ensure_table(cur, "col1 serial primary key, col2 int4, data text")
with cur.copy(f"copy copy_in (col2, data) from stdin {copyopt(format)}") as copy:
for row in sample_records:
copy.write_row((None, row[2]))
data = cur.execute("select col2, data from copy_in order by 2").fetchall()
assert data == [(None, "hello"), (None, "world")]
@pytest.mark.crdb_skip("copy canceled")
def test_copy_in_buffers_with_py_error(conn):
cur = conn.cursor()
ensure_table(cur, sample_tabledef)
with pytest.raises(e.QueryCanceled) as exc:
with cur.copy("copy copy_in from stdin") as copy:
copy.write(sample_text)
raise Exception("nuttengoggenio")
assert "nuttengoggenio" in str(exc.value)
assert conn.info.transaction_status == conn.TransactionStatus.INERROR
def test_copy_in_allchars(conn):
cur = conn.cursor()
ensure_table(cur, "col1 int primary key, col2 int, data text")
with cur.copy("copy copy_in from stdin") as copy:
for i in range(1, 256):
copy.write_row((i, None, chr(i)))
copy.write_row((ord(eur), None, eur))
data = cur.execute(
"""
select col1 = ascii(data), col2 is null, length(data), count(*)
from copy_in group by 1, 2, 3
"""
).fetchall()
assert data == [(True, True, 1, 256)]
@pytest.mark.slow
@pytest.mark.parametrize(
"fmt, set_types",
[(Format.TEXT, True), (Format.TEXT, False), (Format.BINARY, True)],
)
@pytest.mark.crdb_skip("copy array")
def test_copy_from_leaks(conn_cls, dsn, faker, fmt, set_types):
faker.format = PyFormat.from_pq(fmt)
faker.choose_schema(ncols=20)
faker.make_records(20)
def work():
with conn_cls.connect(dsn) as conn:
with conn.cursor(binary=fmt) as cur:
cur.execute(faker.drop_stmt)
cur.execute(faker.create_stmt)
stmt = sql.SQL("copy {} ({}) from stdin {}").format(
faker.table_name,
sql.SQL(", ").join(faker.fields_names),
sql.SQL("with binary" if fmt else ""),
)
with cur.copy(stmt) as copy:
if set_types:
copy.set_types(faker.types_names)
for row in faker.records:
copy.write_row(row)
cur.execute(faker.select_stmt)
recs = cur.fetchall()
for got, want in zip(recs, faker.records):
faker.assert_record(got, want)
gc_collect()
n = []
for i in range(3):
work()
gc_collect()
n.append(gc_count())
assert n[0] == n[1] == n[2], f"objects leaked: {n[1] - n[0]}, {n[2] - n[1]}"
def copyopt(format):
return "with binary" if format == Format.BINARY else "" |
7,532 | standardise ids | #!/usr/bin/python
# Copyright (c) 2020, 2022, Oracle and/or its affiliates.
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl.
"""Provide Module Description
"""
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
__author__ = ["Andrew Hopkinson (Oracle Cloud Solutions A-Team)"]
__version__ = "1.0.0"
__module__ = "ociCommon"
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
import base64
import jinja2
import os
import magic
import xml.etree.ElementTree as ET
import yaml
from contextlib import closing
import json
from common.okitLogging import getLogger
from common.okitLogging import getOkitHome
# Configure logging
logger = getLogger()
def expandNestedVars(varsyaml):
varsstr = yaml.dump(varsyaml)
while True:
logger.debug("vars yaml : %s", yaml.dump(varsyaml))
curr = jinja2.Template(varsstr).render(**varsyaml)
if curr != varsstr:
varsstr = curr
varsyaml = yaml.load(varsstr)
else:
return yaml.load(varsstr)
def parseJsonString(jsonstring):
try:
jsonData = json.loads(jsonstring)
return jsonData
except json.decoder.JSONDecodeError as err:
# Silently ignore and return the string because it is not json.
logger.debug(err)
return jsonstring
# Read JSON file
def readJsonFile(filename, varsyaml=None, templates='/pcma/templates'):
jsonData = None
logger.info('Reading Json File : {0!s:s}'.format(filename))
logger.debug('Templates : {0!s:s}'.format(templates))
try:
if varsyaml is not None:
varsyaml = expandNestedVars(varsyaml)
loader = jinja2.FileSystemLoader(searchpath=templates)
env = jinja2.Environment(loader=loader, autoescape=True)
jsontemplate = env.get_template(filename)
rendered = jsontemplate.render(varsyaml)
logger.debug("Rendered File ===>")
logger.debug(rendered)
jsonData = json.loads(rendered)
logJson(jsonData)
else:
with closing(open(str(filename))) as jsonFile:
jsonData = json.load(jsonFile)
logJson(jsonData)
except (ValueError, TypeError) as err:
msg = 'Failed to Read JSON File "{0:s}". {1:s}'.format(filename, str(err))
logger.error('ValueError: %s', err)
raise Exception(msg)
except IOError as err:
msg = 'JSON File "{0:s}" does not exist'.format(str(filename))
logger.error('IOError: %s', err)
raise Exception(msg)
return jsonData
def writeJsonFile(jsonData, filename, sortKeys=True):
logger.info('Writing Json File : {0!s:s}'.format(filename))
dir = os.path.dirname(filename)
if len(dir) > 0 and not os.path.exists(dir):
os.makedirs(dir)
with closing(open(filename, 'w')) as outfile:
json.dump(jsonData, outfile, ensure_ascii=True, sort_keys=sortKeys, indent=2, separators=(',', ': '))
logger.debug(jsonData)
return
def logJson(jsonObj, sortKeys=True, indent=2):
if jsonObj is not None:
logger.debug(jsonToFormattedString(jsonObj, sortKeys=sortKeys, indent=indent))
return
def jsonToFormattedString(jsonObj, sortKeys=True, indent=2):
return json.dumps(jsonObj, sort_keys=sortKeys, indent=indent, separators=(',', ': '))
def readYamlFile(filename):
logger.info('Reading Yaml File : {0!s:s}'.format(filename))
yamlData = None
with closing(open(filename)) as stream:
try:
yamlData = yaml.safe_load(stream)
except yaml.YAMLError as err:
logger.warn('Failed to Read YAML File %s', filename)
return yamlData
def writeYamlFile(yamlData, filename, allowUnicode=True, defaultFlowStyle=False, defaultStyle='"'):
logger.info('Writing Yaml File : {0!s:s}'.format(filename))
with closing(open(filename, 'w')) as stream:
stream.write(yaml.safe_dump(yamlData, allow_unicode=allowUnicode, default_flow_style=defaultFlowStyle, default_style=defaultStyle))
return
def logYaml(yamlObj, allowUnicode=True, defaultFlowStyle=False):
if yamlObj is not None:
logger.debug(yaml.safe_dump(yamlObj, allow_unicode=allowUnicode, default_flow_style=defaultFlowStyle))
return
def readXmlFile(filename):
logger.info('Reading Xml File : {0!s:s}'.format(filename))
tree = None
try:
tree = ET.parse(filename)
except IOError as e:
logger.warn('Failed to Read XML File %s', filename)
return tree
def writeXmlFile(tree, filename):
logger.info('Writing Xml File : {0!s:s}'.format(filename))
tree.write(filename)
def writeTerraformFile(terraform_file, contents):
logger.info('Writing Terraform File: {0:s}'.format(terraform_file))
with closing(open(terraform_file, 'w')) as f:
if isinstance(contents, list):
for resource in contents:
f.write('{0:s}\n'.format(resource))
else:
f.write('{0:s}\n'.format(contents))
return
def writeAnsibleFile(ansible_file, contents):
logger.info('Writing Ansible File: {0:s}'.format(ansible_file))
with closing(open(ansible_file, 'w')) as f:
for resource in contents:
f.write('{0:s}\n'.format(resource))
return
def writePythonFile(python_file, contents):
logger.info('Writing Python File: {0:s}'.format(python_file))
with closing(open(python_file, 'w')) as f:
for resource in contents:
f.write('{0:s}\n'.format(resource))
return
def writeMarkdownFile(md_file, contents):
logger.info('Writing Markdown File: {0:s}'.format(md_file))
with closing(open(md_file, 'w')) as f:
for resource in contents:
f.write('{0!s:s}\n'.format(resource))
return
def writeFile(filename, contents, overwrite=False):
logger.info('Writing File: {0:s}'.format(filename))
if overwrite or not os.path.exists(filename):
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
with closing(open(filename, 'w')) as f:
f.write('{0:s}\n'.format(contents))
else:
logger.info('File Already Exists: {0:s}'.format(filename))
return
def METHOD_NAME(json_data={}, from_char='.', to_char='-'):
return json_data
def userDataDecode(data):
try:
m = magic.Magic(mime_encoding=True)
encoding = m.from_buffer(base64.b64decode(data))
logger.info('<<<<<<<<<<<user-data encoding {0!s:s}>>>>>>>>>>>'.format(encoding))
return base64.b64decode(data).decode(encoding)
except Exception as e:
logger.error(e)
return ''
|
7,533 | test remove is idempotent | from unittest import mock
import pytest
from pyramid import httpexceptions
from h.views.admin.admins import admins_add, admins_index, admins_remove
@pytest.mark.usefixtures("routes")
class TestAdminsIndex:
def test_when_no_admins(self, pyramid_request):
result = admins_index(pyramid_request)
assert result["admin_users"] == []
@pytest.mark.usefixtures("users")
def test_context_contains_admin_usernames(self, pyramid_request):
result = admins_index(pyramid_request)
assert set(result["admin_users"]) == {
"acct:agnos@example.com",
"acct:bojan@example.com",
"acct:cristof@foo.org",
}
@pytest.mark.usefixtures("users", "routes")
class TestAdminsAddRemove:
def test_add_makes_users_admins(self, pyramid_request, users):
pyramid_request.params = {"add": "eva", "authority": "foo.org"}
admins_add(pyramid_request)
assert users["eva"].admin
def test_add_is_idempotent(self, pyramid_request, users):
pyramid_request.params = {
"add": "agnos",
"authority": pyramid_request.default_authority,
}
admins_add(pyramid_request)
assert users["agnos"].admin
def test_add_strips_spaces(self, pyramid_request, users):
pyramid_request.params = {"add": " david ", "authority": " example.com "}
admins_add(pyramid_request)
assert users["david"].admin
def test_add_redirects_to_index(self, pyramid_request):
pyramid_request.params = {
"add": "eva",
"authority": pyramid_request.default_authority,
}
result = admins_add(pyramid_request)
assert isinstance(result, httpexceptions.HTTPSeeOther)
assert result.location == "/adm/admins"
def test_add_redirects_to_index_when_user_not_found(self, pyramid_request):
pyramid_request.params = {
"add": "florp",
"authority": pyramid_request.default_authority,
}
result = admins_add(pyramid_request)
assert isinstance(result, httpexceptions.HTTPSeeOther)
assert result.location == "/adm/admins"
def test_add_flashes_when_user_not_found(self, pyramid_request):
pyramid_request.params = {
"add": "florp",
"authority": pyramid_request.default_authority,
}
pyramid_request.session.flash = mock.Mock()
admins_add(pyramid_request)
assert pyramid_request.session.flash.call_count == 1
def test_remove_makes_users_not_admins(self, pyramid_request, users):
pyramid_request.params = {"remove": "acct:cristof@foo.org"}
admins_remove(pyramid_request)
assert not users["cristof"].admin
def METHOD_NAME(self, pyramid_request, users):
pyramid_request.params = {"remove": "acct:eva@example.com"}
admins_remove(pyramid_request)
assert not users["eva"].admin
def test_remove_will_not_remove_last_admin(self, pyramid_request, users):
pyramid_request.params = {"remove": "acct:cristof@foo.org"}
admins_remove(pyramid_request)
pyramid_request.params = {"remove": "acct:bojan@example.com"}
admins_remove(pyramid_request)
pyramid_request.params = {"remove": "acct:agnos@example.com"}
admins_remove(pyramid_request)
assert users["agnos"].admin
def test_remove_redirects_to_index(self, pyramid_request):
pyramid_request.params = {"remove": "acct:agnos@example.com"}
result = admins_remove(pyramid_request)
assert isinstance(result, httpexceptions.HTTPSeeOther)
assert result.location == "/adm/admins"
def test_remove_redirects_to_index_when_user_not_found(self, pyramid_request):
pyramid_request.params = {"remove": "acct:florp@example.com"}
result = admins_remove(pyramid_request)
assert isinstance(result, httpexceptions.HTTPSeeOther)
assert result.location == "/adm/admins"
@pytest.fixture
def routes(pyramid_config):
pyramid_config.add_route("admin.admins", "/adm/admins")
@pytest.fixture
def users(db_session, factories):
users = {
"agnos": factories.User(username="agnos", authority="example.com", admin=True),
"bojan": factories.User(username="bojan", authority="example.com", admin=True),
"cristof": factories.User(username="cristof", authority="foo.org", admin=True),
"david": factories.User(username="david", authority="example.com", admin=False),
"eva": factories.User(username="eva", authority="foo.org", admin=False),
"flora": factories.User(username="flora", authority="foo.org", admin=False),
}
db_session.flush()
return users |
7,534 | readline | import _compression
import sys
from _compression import BaseStream
from _typeshed import ReadableBuffer, StrOrBytesPath, WriteableBuffer
from collections.abc import Iterable
from typing import IO, Any, Protocol, TextIO, overload
from typing_extensions import Literal, Self, SupportsIndex, TypeAlias, final
__all__ = ["BZ2File", "BZ2Compressor", "BZ2Decompressor", "open", "compress", "decompress"]
# The following attributes and methods are optional:
# def fileno(self) -> int: ...
# def close(self) -> object: ...
class _ReadableFileobj(_compression._Reader, Protocol): ...
class _WritableFileobj(Protocol):
def write(self, __b: bytes) -> object: ...
# The following attributes and methods are optional:
# def fileno(self) -> int: ...
# def close(self) -> object: ...
def compress(data: ReadableBuffer, compresslevel: int = 9) -> bytes: ...
def decompress(data: ReadableBuffer) -> bytes: ...
_ReadBinaryMode: TypeAlias = Literal["", "r", "rb"]
_WriteBinaryMode: TypeAlias = Literal["w", "wb", "x", "xb", "a", "ab"]
_ReadTextMode: TypeAlias = Literal["rt"]
_WriteTextMode: TypeAlias = Literal["wt", "xt", "at"]
@overload
def open(
filename: _ReadableFileobj,
mode: _ReadBinaryMode = "rb",
compresslevel: int = 9,
encoding: None = None,
errors: None = None,
newline: None = None,
) -> BZ2File: ...
@overload
def open(
filename: _ReadableFileobj,
mode: _ReadTextMode,
compresslevel: int = 9,
encoding: str | None = None,
errors: str | None = None,
newline: str | None = None,
) -> TextIO: ...
@overload
def open(
filename: _WritableFileobj,
mode: _WriteBinaryMode,
compresslevel: int = 9,
encoding: None = None,
errors: None = None,
newline: None = None,
) -> BZ2File: ...
@overload
def open(
filename: _WritableFileobj,
mode: _WriteTextMode,
compresslevel: int = 9,
encoding: str | None = None,
errors: str | None = None,
newline: str | None = None,
) -> TextIO: ...
@overload
def open(
filename: StrOrBytesPath,
mode: _ReadBinaryMode | _WriteBinaryMode = "rb",
compresslevel: int = 9,
encoding: None = None,
errors: None = None,
newline: None = None,
) -> BZ2File: ...
@overload
def open(
filename: StrOrBytesPath,
mode: _ReadTextMode | _WriteTextMode,
compresslevel: int = 9,
encoding: str | None = None,
errors: str | None = None,
newline: str | None = None,
) -> TextIO: ...
@overload
def open(
filename: StrOrBytesPath | _ReadableFileobj | _WritableFileobj,
mode: str,
compresslevel: int = 9,
encoding: str | None = None,
errors: str | None = None,
newline: str | None = None,
) -> BZ2File | TextIO: ...
class BZ2File(BaseStream, IO[bytes]):
def __enter__(self) -> Self: ...
if sys.version_info >= (3, 9):
@overload
def __init__(self, filename: _WritableFileobj, mode: _WriteBinaryMode, *, compresslevel: int = 9) -> None: ...
@overload
def __init__(self, filename: _ReadableFileobj, mode: _ReadBinaryMode = "r", *, compresslevel: int = 9) -> None: ...
@overload
def __init__(
self, filename: StrOrBytesPath, mode: _ReadBinaryMode | _WriteBinaryMode = "r", *, compresslevel: int = 9
) -> None: ...
else:
@overload
def __init__(
self, filename: _WritableFileobj, mode: _WriteBinaryMode, buffering: Any | None = None, compresslevel: int = 9
) -> None: ...
@overload
def __init__(
self, filename: _ReadableFileobj, mode: _ReadBinaryMode = "r", buffering: Any | None = None, compresslevel: int = 9
) -> None: ...
@overload
def __init__(
self,
filename: StrOrBytesPath,
mode: _ReadBinaryMode | _WriteBinaryMode = "r",
buffering: Any | None = None,
compresslevel: int = 9,
) -> None: ...
def read(self, size: int | None = -1) -> bytes: ...
def read1(self, size: int = -1) -> bytes: ...
def METHOD_NAME(self, size: SupportsIndex = -1) -> bytes: ... # type: ignore[override]
def readinto(self, b: WriteableBuffer) -> int: ...
def readlines(self, size: SupportsIndex = -1) -> list[bytes]: ...
def seek(self, offset: int, whence: int = 0) -> int: ...
def write(self, data: ReadableBuffer) -> int: ...
def writelines(self, seq: Iterable[ReadableBuffer]) -> None: ...
@final
class BZ2Compressor:
def __init__(self, compresslevel: int = ...) -> None: ...
def compress(self, __data: ReadableBuffer) -> bytes: ...
def flush(self) -> bytes: ...
@final
class BZ2Decompressor:
def decompress(self, data: ReadableBuffer, max_length: int = -1) -> bytes: ...
@property
def eof(self) -> bool: ...
@property
def needs_input(self) -> bool: ...
@property
def unused_data(self) -> bytes: ... |
7,535 | ndim | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-import-not-at-top
"""Utilities related to disk I/O."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
import six
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import type_spec
from tensorflow.python.util.tf_export import keras_export
try:
import h5py
except ImportError:
h5py = None
@keras_export('keras.utils.HDF5Matrix')
class HDF5Matrix(object):
"""Representation of HDF5 dataset to be used instead of a Numpy array.
Example:
```python
x_data = HDF5Matrix('input/file.hdf5', 'data')
model.predict(x_data)
```
Providing `start` and `end` allows use of a slice of the dataset.
Optionally, a normalizer function (or lambda) can be given. This will
be called on every slice of data retrieved.
Arguments:
datapath: string, path to a HDF5 file
dataset: string, name of the HDF5 dataset in the file specified
in datapath
start: int, start of desired slice of the specified dataset
end: int, end of desired slice of the specified dataset
normalizer: function to be called on data when retrieved
Returns:
An array-like HDF5 dataset.
"""
refs = collections.defaultdict(int)
def __init__(self, datapath, dataset, start=0, end=None, normalizer=None):
if h5py is None:
raise ImportError('The use of HDF5Matrix requires '
'HDF5 and h5py installed.')
if datapath not in list(self.refs.keys()):
f = h5py.File(datapath)
self.refs[datapath] = f
else:
f = self.refs[datapath]
self.data = f[dataset]
self.start = start
if end is None:
self.end = self.data.shape[0]
else:
self.end = end
self.normalizer = normalizer
def __len__(self):
return self.end - self.start
def __getitem__(self, key):
if isinstance(key, slice):
start, stop = key.start, key.stop
if start is None:
start = 0
if stop is None:
stop = self.shape[0]
if stop + self.start <= self.end:
idx = slice(start + self.start, stop + self.start)
else:
raise IndexError
elif isinstance(key, (int, np.integer)):
if key + self.start < self.end:
idx = key + self.start
else:
raise IndexError
elif isinstance(key, np.ndarray):
if np.max(key) + self.start < self.end:
idx = (self.start + key).tolist()
else:
raise IndexError
else:
# Assume list/iterable
if max(key) + self.start < self.end:
idx = [x + self.start for x in key]
else:
raise IndexError
if self.normalizer is not None:
return self.normalizer(self.data[idx])
else:
return self.data[idx]
@property
def shape(self):
"""Gets a numpy-style shape tuple giving the dataset dimensions.
Returns:
A numpy-style shape tuple.
"""
return (self.end - self.start,) + self.data.shape[1:]
@property
def dtype(self):
"""Gets the datatype of the dataset.
Returns:
A numpy dtype string.
"""
return self.data.dtype
@property
def METHOD_NAME(self):
"""Gets the number of dimensions (rank) of the dataset.
Returns:
An integer denoting the number of dimensions (rank) of the dataset.
"""
return self.data.METHOD_NAME
@property
def size(self):
"""Gets the total dataset size (number of elements).
Returns:
An integer denoting the number of elements in the dataset.
"""
return np.prod(self.shape)
@staticmethod
def _to_type_spec(value):
"""Gets the Tensorflow TypeSpec corresponding to the passed dataset.
Args:
value: A HDF5Matrix object.
Returns:
A tf.TensorSpec.
"""
if not isinstance(value, HDF5Matrix):
raise TypeError('Expected value to be a HDF5Matrix, but saw: {}'.format(
type(value)))
return tensor_spec.TensorSpec(shape=value.shape, dtype=value.dtype)
type_spec.register_type_spec_from_value_converter(HDF5Matrix,
HDF5Matrix._to_type_spec) # pylint: disable=protected-access
def ask_to_proceed_with_overwrite(filepath):
"""Produces a prompt asking about overwriting a file.
Arguments:
filepath: the path to the file to be overwritten.
Returns:
True if we can proceed with overwrite, False otherwise.
"""
overwrite = six.moves.input('[WARNING] %s already exists - overwrite? '
'[y/n]' % (filepath)).strip().lower()
while overwrite not in ('y', 'n'):
overwrite = six.moves.input('Enter "y" (overwrite) or "n" '
'(cancel).').strip().lower()
if overwrite == 'n':
return False
print('[TIP] Next time specify overwrite=True!')
return True |
7,536 | update display properties | import abc
import typing
from nion.swift import Undo
from nion.swift.model import DisplayItem
from nion.swift.model import DocumentModel
from nion.swift.model import Graphics
from nion.swift.model import Persistence
from nion.ui import CanvasItem
from nion.ui import UserInterface
from nion.utils import Geometry
class DisplayCanvasItemDelegate(typing.Protocol):
@property
def tool_mode(self) -> str: raise NotImplementedError()
@tool_mode.setter
def tool_mode(self, value: str) -> None: ...
def begin_mouse_tracking(self) -> None: ...
def end_mouse_tracking(self, undo_command: typing.Optional[Undo.UndoableCommand]) -> None: ...
def delete_key_pressed(self) -> None: ...
def enter_key_pressed(self) -> None: ...
def cursor_changed(self, pos: typing.Optional[typing.Tuple[int, ...]]) -> None: ...
def METHOD_NAME(self, display_properties: Persistence.PersistentDictType) -> None: ...
def update_display_data_channel_properties(self, display_data_channel_properties: Persistence.PersistentDictType) -> None: ...
def create_change_display_command(self, *, command_id: typing.Optional[str] = None, is_mergeable: bool = False) -> Undo.UndoableCommand: ...
def create_change_graphics_command(self) -> Undo.UndoableCommand: ...
def create_insert_graphics_command(self, graphics: typing.Sequence[Graphics.Graphic]) -> Undo.UndoableCommand: ...
def create_move_display_layer_command(self, display_item: DisplayItem.DisplayItem, src_index: int, target_index: int) -> Undo.UndoableCommand: ...
def push_undo_command(self, command: Undo.UndoableCommand) -> None: ...
def add_index_to_selection(self, index: int) -> None: ...
def remove_index_from_selection(self, index: int) -> None: ...
def set_selection(self, index: int) -> None: ...
def clear_selection(self) -> None: ...
def add_and_select_region(self, region: Graphics.Graphic) -> Undo.UndoableCommand: ...
def nudge_selected_graphics(self, mapping: Graphics.CoordinateMappingLike, delta: Geometry.FloatSize) -> None: ...
def nudge_slice(self, delta: int) -> None: ...
def drag_graphics(self, graphics: typing.Sequence[Graphics.Graphic]) -> None: ...
def adjust_graphics(self, widget_mapping: Graphics.CoordinateMappingLike, graphic_drag_items: typing.Sequence[Graphics.Graphic], graphic_drag_part: str, graphic_part_data: typing.Dict[int, Graphics.DragPartData], graphic_drag_start_pos: Geometry.FloatPoint, pos: Geometry.FloatPoint, modifiers: UserInterface.KeyboardModifiers) -> None: ...
def display_clicked(self, modifiers: UserInterface.KeyboardModifiers) -> bool: ...
def image_clicked(self, image_position: Geometry.FloatPoint, modifiers: UserInterface.KeyboardModifiers) -> bool: ...
def image_mouse_pressed(self, image_position: Geometry.FloatPoint, modifiers: UserInterface.KeyboardModifiers) -> bool: ...
def image_mouse_released(self, image_position: Geometry.FloatPoint, modifiers: UserInterface.KeyboardModifiers) -> bool: ...
def image_mouse_position_changed(self, image_position: Geometry.FloatPoint, modifiers: UserInterface.KeyboardModifiers) -> bool: ...
def show_display_context_menu(self, gx: int, gy: int) -> bool: ...
def get_document_model(self) -> DocumentModel.DocumentModel: ...
def create_rectangle(self, pos: Geometry.FloatPoint) -> Graphics.RectangleGraphic: ...
def create_ellipse(self, pos: Geometry.FloatPoint) -> Graphics.EllipseGraphic: ...
def create_line(self, pos: Geometry.FloatPoint) -> Graphics.LineGraphic: ...
def create_point(self, pos: Geometry.FloatPoint) -> Graphics.PointGraphic: ...
def create_line_profile(self, pos: Geometry.FloatPoint) -> Graphics.LineProfileGraphic: ...
def create_spot(self, pos: Geometry.FloatPoint) -> Graphics.SpotGraphic: ...
def create_wedge(self, angle: float) -> Graphics.WedgeGraphic: ...
def create_ring(self, radius: float) -> Graphics.RingGraphic: ...
def create_lattice(self, u_pos: Geometry.FloatSize) -> Graphics.LatticeGraphic: ...
class DisplayCanvasItem(CanvasItem.CanvasItemComposition):
@property
def default_aspect_ratio(self) -> float:
return 1.0
@property
def key_contexts(self) -> typing.Sequence[str]:
"""Return key contexts.
Key contexts provide an ordered list of contexts that are used to determine
which actions are valid at a given time. The contexts are checked in reverse
order (i.e. last added have highest precedence).
"""
return list()
@property
def mouse_mapping(self) -> Graphics.CoordinateMappingLike: raise NotImplementedError()
@abc.abstractmethod
def add_display_control(self, display_control_canvas_item: CanvasItem.AbstractCanvasItem, role: typing.Optional[str] = None) -> None: ...
@abc.abstractmethod
def handle_auto_display(self) -> bool: ...
def update_display_properties_and_layers(self, display_calibration_info: DisplayItem.DisplayCalibrationInfo,
display_properties: Persistence.PersistentDictType,
display_layers: typing.Sequence[Persistence.PersistentDictType]) -> None:
pass
def update_display_values(self, display_values_list: typing.Sequence[typing.Optional[DisplayItem.DisplayValues]]) -> None:
pass
def update_graphics_coordinate_system(self, graphics: typing.Sequence[Graphics.Graphic],
graphic_selection: DisplayItem.GraphicSelection,
display_calibration_info: DisplayItem.DisplayCalibrationInfo) -> None:
pass |
7,537 | test l10n en us | # python-holidays
# ---------------
# A fast, efficient Python library for generating country, province and state
# specific sets of holidays on the fly. It aims to make determining whether a
# specific date is a holiday as fast and flexible as possible.
#
# Authors: dr-prodigy <dr.prodigy.github@gmail.com> (c) 2017-2023
# ryanss <ryanssdev@icloud.com> (c) 2014-2017
# Website: https://github.com/dr-prodigy/python-holidays
# License: MIT (see LICENSE file)
from holidays.countries.estonia import Estonia, EE, EST
from tests.common import TestCase
class TestEstonia(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass(Estonia, years=range(1990, 2050))
def test_country_aliases(self):
self.assertCountryAliases(Estonia, EE, EST)
def test_new_years(self):
self.assertHolidayName("uusaasta", (f"{year}-01-01" for year in range(1990, 2050)))
def test_independence_day(self):
self.assertHolidayName("iseseisvuspäev", (f"{year}-02-24" for year in range(1990, 2050)))
def test_good_friday(self):
self.assertHolidayName(
"suur reede",
"2019-04-19",
"2020-04-10",
"2021-04-02",
"2022-04-15",
"2023-04-07",
)
def test_easter_sunday(self):
self.assertHolidayName(
"ülestõusmispühade 1. püha",
"2019-04-21",
"2020-04-12",
"2021-04-04",
"2022-04-17",
"2023-04-09",
)
def test_spring_day(self):
self.assertHolidayName("kevadpüha", (f"{year}-05-01" for year in range(1990, 2050)))
def test_whit_sunday(self):
self.assertHolidayName(
"nelipühade 1. püha",
"2019-06-09",
"2020-05-31",
"2021-05-23",
"2022-06-05",
"2023-05-28",
)
def test_victory_day(self):
self.assertHolidayName("võidupüha", (f"{year}-06-23" for year in range(1990, 2050)))
def test_midsummer_day(self):
self.assertHolidayName("jaanipäev", (f"{year}-06-24" for year in range(1990, 2050)))
def test_restoration_of_independence_day(self):
name = "taasiseseisvumispäev"
self.assertHolidayName(name, (f"{year}-08-20" for year in range(1998, 2050)))
self.assertNoHoliday(f"{year}-08-20" for year in range(1990, 1998))
self.assertNoHolidayName(name, range(1990, 1998))
def test_christmas_eve(self):
name = "jõululaupäev"
self.assertHolidayName(name, (f"{year}-12-24" for year in range(2005, 2050)))
self.assertNoHoliday(f"{year}-12-24" for year in range(1990, 2005))
self.assertNoHolidayName(name, range(1990, 2005))
def test_christmas_day(self):
self.assertHolidayName(
"esimene jõulupüha", (f"{year}-12-25" for year in range(1990, 2050))
)
def test_second_christmas_day(self):
self.assertHolidayName("teine jõulupüha", (f"{year}-12-26" for year in range(1990, 2050)))
def test_l10n_default(self):
self.assertLocalizedHolidays(
("2022-01-01", "uusaasta"),
("2022-02-24", "iseseisvuspäev"),
("2022-04-15", "suur reede"),
("2022-04-17", "ülestõusmispühade 1. püha"),
("2022-05-01", "kevadpüha"),
("2022-06-05", "nelipühade 1. püha"),
("2022-06-23", "võidupüha"),
("2022-06-24", "jaanipäev"),
("2022-08-20", "taasiseseisvumispäev"),
("2022-12-24", "jõululaupäev"),
("2022-12-25", "esimene jõulupüha"),
("2022-12-26", "teine jõulupüha"),
)
def METHOD_NAME(self):
self.assertLocalizedHolidays(
"en_US",
("2022-01-01", "New Year's Day"),
("2022-02-24", "Independence Day"),
("2022-04-15", "Good Friday"),
("2022-04-17", "Easter Sunday"),
("2022-05-01", "Spring Day"),
("2022-06-05", "Whit Sunday"),
("2022-06-23", "Victory Day"),
("2022-06-24", "Midsummer Day"),
("2022-08-20", "Independence Restoration Day"),
("2022-12-24", "Christmas Eve"),
("2022-12-25", "Christmas Day"),
("2022-12-26", "Second Day of Christmas"),
)
def test_l10n_uk(self):
self.assertLocalizedHolidays(
"uk",
("2022-01-01", "Новий рік"),
("2022-02-24", "День незалежності"),
("2022-04-15", "Страсна пʼятниця"),
("2022-04-17", "Великдень"),
("2022-05-01", "День весни"),
("2022-06-05", "Трійця"),
("2022-06-23", "День перемоги"),
("2022-06-24", "День літнього сонцестояння"),
("2022-08-20", "День відновлення незалежності"),
("2022-12-24", "Святий вечір"),
("2022-12-25", "Різдво Христове"),
("2022-12-26", "Другий день Різдва"),
) |
7,538 | save | import os
import logging
import time
from dataclasses import dataclass
from typing import List, Tuple, Optional, TypeVar, Type
from medcat.cdb import CDB
from medcat.utils.decorators import check_positive
T = TypeVar("T", bound="Checkpoint")
logger = logging.getLogger(__name__) # separate logger from the package-level one
class Checkpoint(object):
"""The base class of checkpoint objects
Args:
dir_path (str):
The path to the parent directory of checkpoint files.
steps (int):
The number of processed sentences/documents before a checkpoint is saved
(N.B.: A small number could result in error "no space left on device"),
max_to_keep (int):
The maximum number of checkpoints to keep
(N.B.: A large number could result in error "no space left on device").
"""
DEFAULT_STEP = 1000
DEFAULT_MAX_TO_KEEP = 1
@check_positive
def __init__(self, dir_path: str, *, steps: int = DEFAULT_STEP, max_to_keep: int = DEFAULT_MAX_TO_KEEP) -> None:
self._dir_path = os.path.abspath(dir_path)
self._steps = steps
self._max_to_keep = max_to_keep
self._file_paths: List[str] = []
self._count = 0
os.makedirs(self._dir_path, exist_ok=True)
@property
def steps(self) -> int:
return self._steps
@steps.setter
def steps(self, value: int) -> None:
check_positive(lambda _: ...)(value) # [https://github.com/python/mypy/issues/1362]
self._steps = value
@property
def max_to_keep(self) -> int:
return self._max_to_keep
@max_to_keep.setter
def max_to_keep(self, value: int) -> None:
check_positive(lambda _: ...)(value) # [https://github.com/python/mypy/issues/1362]
self._max_to_keep = value
@property
def count(self) -> int:
return self._count
@property
def dir_path(self) -> str:
return self._dir_path
@classmethod
def from_latest(cls: Type[T], dir_path: str) -> T:
"""Retrieve the latest checkpoint from the parent directory.
Args:
dir_path (string):
The path to the directory containing checkpoint files.
Returns:
T: A new checkpoint object.
"""
if not os.path.isdir(dir_path):
raise Exception("Checkpoints not found. You need to train from scratch.")
ckpt_file_paths = cls._get_ckpt_file_paths(dir_path)
if not ckpt_file_paths:
raise Exception("Checkpoints not found. You need to train from scratch.")
latest_ckpt = ckpt_file_paths[-1]
steps, count = cls._get_steps_and_count(latest_ckpt)
checkpoint = cls(dir_path, steps=steps)
checkpoint._file_paths = ckpt_file_paths
checkpoint._count = count
logger.info(f"Checkpoint loaded from {latest_ckpt}")
return checkpoint
def METHOD_NAME(self, cdb: CDB, count: int) -> None:
"""Save the CDB as the latest checkpoint.
Args:
cdb (medcat.CDB):
The MedCAT CDB object to be checkpointed.
count (count):
The number of the finished steps.
"""
ckpt_file_path = os.path.join(os.path.abspath(self._dir_path), "checkpoint-%s-%s" % (self.steps, count))
while len(self._file_paths) >= self._max_to_keep:
to_remove = self._file_paths.pop(0)
os.remove(to_remove)
cdb.METHOD_NAME(ckpt_file_path)
logger.debug("Checkpoint saved: %s", ckpt_file_path)
self._file_paths.append(ckpt_file_path)
self._count = count
def restore_latest_cdb(self) -> CDB:
"""Restore the CDB from the latest checkpoint.
Returns:
cdb (medcat.CDB):
The MedCAT CDB object.
"""
if not os.path.isdir(self._dir_path):
raise Exception("Checkpoints not found. You need to train from scratch.")
ckpt_file_paths = self._get_ckpt_file_paths(self._dir_path)
if not ckpt_file_paths:
raise Exception("Checkpoints not found. You need to train from scratch.")
latest_ckpt = ckpt_file_paths[-1]
_, count = self._get_steps_and_count(latest_ckpt)
self._file_paths = ckpt_file_paths
self._count = count
return CDB.load(self._file_paths[-1])
@staticmethod
def _get_ckpt_file_paths(dir_path: str) -> List[str]:
ckpt_file_paths = [os.path.abspath(os.path.join(dir_path, f)) for f in os.listdir(dir_path)]
ckpt_file_paths = [f for f in ckpt_file_paths if os.path.isfile(f) and "checkpoint-" in f]
if ckpt_file_paths:
ckpt_file_paths.sort(key=lambda f: Checkpoint._get_steps_and_count(f)[1])
return ckpt_file_paths
@staticmethod
def _get_steps_and_count(file_path) -> Tuple[int, int]:
file_name_parts = os.path.basename(file_path).split('-')
return int(file_name_parts[1]), int(file_name_parts[2])
@dataclass
class CheckpointConfig(object):
output_dir: str = "checkpoints"
steps: int = Checkpoint.DEFAULT_STEP
max_to_keep: int = Checkpoint.DEFAULT_MAX_TO_KEEP
class CheckpointManager(object):
"""The class for managing checkpoints of specific training type and their configuration
Args:
name (str):
The name of the checkpoint manager (also used as the checkpoint base directory name).
checkpoint_config (medcat.utils.checkpoint.CheckpointConfig):
The checkpoint config object.
"""
def __init__(self, name: str, checkpoint_config: CheckpointConfig) -> None:
self.name = name
self.checkpoint_config = checkpoint_config
def create_checkpoint(self, dir_path: Optional[str] = None) -> "Checkpoint":
"""Create a new checkpoint inside the checkpoint base directory.
Args:
dir_path (str):
The path to the checkpoint directory.
Returns:
CheckPoint: A checkpoint object.
"""
dir_path = dir_path or os.path.join(os.path.abspath(os.getcwd()), self.checkpoint_config.output_dir, self.name, str(int(time.time())))
return Checkpoint(dir_path,
steps=self.checkpoint_config.steps,
max_to_keep=self.checkpoint_config.max_to_keep)
def get_latest_checkpoint(self, base_dir_path: Optional[str] = None) -> "Checkpoint":
"""Retrieve the latest checkpoint from the checkpoint base directory.
Args:
base_dir_path (string):
The path to the directory containing checkpoint files.
Returns:
CheckPoint: A checkpoint object
"""
base_dir_path = base_dir_path or os.path.join(os.path.abspath(os.getcwd()), self.checkpoint_config.output_dir, self.name)
ckpt_dir_path = self.get_latest_training_dir(base_dir_path=base_dir_path)
checkpoint = Checkpoint.from_latest(dir_path=ckpt_dir_path)
checkpoint.steps = self.checkpoint_config.steps
checkpoint.max_to_keep = self.checkpoint_config.max_to_keep
return checkpoint
@classmethod
def get_latest_training_dir(cls, base_dir_path: str) -> str:
"""Retrieve the latest training directory containing all checkpoints.
Args:
base_dir_path (string):
The path to the directory containing all checkpointed trainings.
Returns:
str: The path to the latest training directory containing all checkpoints.
"""
if not os.path.isdir(base_dir_path):
raise ValueError(f"Checkpoint folder passed in does not exist: {base_dir_path}")
ckpt_dir_paths = os.listdir(base_dir_path)
if not ckpt_dir_paths:
raise ValueError("No existing training found")
ckpt_dir_paths.sort()
ckpt_dir_path = os.path.abspath(os.path.join(base_dir_path, ckpt_dir_paths[-1]))
return ckpt_dir_path |
7,539 | request | import asyncio
import inspect
import os
from concurrent.futures import ThreadPoolExecutor
import pytest
import requests
from certipy import Certipy
from sqlalchemy import text
from tornado.httputil import url_concat
from jupyterhub import metrics, orm
from jupyterhub.objects import Server
from jupyterhub.roles import assign_default_roles, update_roles
from jupyterhub.utils import url_path_join as ujoin
class _AsyncRequests:
"""Wrapper around requests to return a Future from request methods
A single thread is allocated to avoid blocking the IOLoop thread.
"""
def __init__(self):
self.executor = ThreadPoolExecutor(1)
real_submit = self.executor.submit
self.executor.submit = lambda *args, **kwargs: asyncio.wrap_future(
real_submit(*args, **kwargs)
)
def __getattr__(self, name):
requests_method = getattr(requests, name)
return lambda *args, **kwargs: self.executor.submit(
requests_method, *args, **kwargs
)
# async_requests.get = requests.get returning a Future, etc.
async_requests = _AsyncRequests()
class AsyncSession(requests.Session):
"""requests.Session object that runs in the background thread"""
def METHOD_NAME(self, *args, **kwargs):
return async_requests.executor.submit(super().METHOD_NAME, *args, **kwargs)
def ssl_setup(cert_dir, authority_name):
# Set up the external certs with the same authority as the internal
# one so that certificate trust works regardless of chosen endpoint.
certipy = Certipy(store_dir=cert_dir)
alt_names = ["DNS:localhost", "IP:127.0.0.1"]
internal_authority = certipy.create_ca(authority_name, overwrite=True)
external_certs = certipy.create_signed_pair(
"external", authority_name, overwrite=True, alt_names=alt_names
)
return external_certs
"""Skip tests that don't work under internal-ssl when testing under internal-ssl"""
skip_if_ssl = pytest.mark.skipif(
os.environ.get('SSL_ENABLED', False), reason="Does not use internal SSL"
)
def check_db_locks(func):
"""Decorator that verifies no locks are held on database upon exit.
This decorator for test functions verifies no locks are held on the
application's database upon exit by creating and dropping a dummy table.
The decorator relies on an instance of JupyterHubApp being the first
argument to the decorated function.
Examples
--------
@check_db_locks
def api_request(app, *api_path, **kwargs):
"""
def new_func(app, *args, **kwargs):
maybe_future = func(app, *args, **kwargs)
def _check(_=None):
temp_session = app.session_factory()
try:
temp_session.execute(text('CREATE TABLE dummy (foo INT)'))
temp_session.execute(text('DROP TABLE dummy'))
finally:
temp_session.close()
async def await_then_check():
result = await maybe_future
_check()
return result
if inspect.isawaitable(maybe_future):
return await_then_check()
else:
_check()
return maybe_future
return new_func
def find_user(db, name, app=None):
"""Find user in database."""
orm_user = db.query(orm.User).filter(orm.User.name == name).first()
if app is None:
return orm_user
else:
return app.users[orm_user.id]
def add_user(db, app=None, **kwargs):
"""Add a user to the database."""
orm_user = find_user(db, name=kwargs.get('name'))
if orm_user is None:
orm_user = orm.User(**kwargs)
db.add(orm_user)
metrics.TOTAL_USERS.inc()
else:
for attr, value in kwargs.items():
setattr(orm_user, attr, value)
db.commit()
requested_roles = kwargs.get('roles')
if requested_roles:
update_roles(db, entity=orm_user, roles=requested_roles)
else:
assign_default_roles(db, entity=orm_user)
if app:
return app.users[orm_user.id]
else:
return orm_user
def auth_header(db, name):
"""Return header with user's API authorization token."""
user = find_user(db, name)
if user is None:
raise KeyError(f"No such user: {name}")
token = user.new_api_token()
return {'Authorization': 'token %s' % token}
@check_db_locks
async def api_request(
app, *api_path, method='get', noauth=False, bypass_proxy=False, **kwargs
):
"""Make an API request"""
if bypass_proxy:
# make a direct request to the hub,
# skipping the proxy
base_url = app.hub.url
else:
base_url = public_url(app, path='hub')
headers = kwargs.setdefault('headers', {})
if 'Authorization' not in headers and not noauth and 'cookies' not in kwargs:
# make a copy to avoid modifying arg in-place
kwargs['headers'] = h = {}
h.update(headers)
h.update(auth_header(app.db, kwargs.pop('name', 'admin')))
url = ujoin(base_url, 'api', *api_path)
if 'cookies' in kwargs:
# for cookie-authenticated requests,
# add _xsrf to url params
if "_xsrf" in kwargs['cookies'] and not noauth:
url = url_concat(url, {"_xsrf": kwargs['cookies']['_xsrf']})
f = getattr(async_requests, method)
if app.internal_ssl:
kwargs['cert'] = (app.internal_ssl_cert, app.internal_ssl_key)
kwargs["verify"] = app.internal_ssl_ca
resp = await f(url, **kwargs)
assert "frame-ancestors 'self'" in resp.headers['Content-Security-Policy']
assert (
ujoin(app.hub.base_url, "security/csp-report")
in resp.headers['Content-Security-Policy']
)
assert 'http' not in resp.headers['Content-Security-Policy']
if not kwargs.get('stream', False) and resp.content:
assert resp.headers.get('content-type') == 'application/json'
return resp
def get_page(path, app, hub=True, **kw):
if "://" in path:
raise ValueError(
"Not a hub page path: %r. Did you mean async_requests.get?" % path
)
if hub:
prefix = app.hub.base_url
else:
prefix = app.base_url
base_url = ujoin(public_host(app), prefix)
return async_requests.get(ujoin(base_url, path), **kw)
def public_host(app):
"""Return the public *host* (no URL prefix) of the given JupyterHub instance."""
if app.subdomain_host:
return app.subdomain_host
else:
return Server.from_url(app.proxy.public_url).host
def public_url(app, user_or_service=None, path=''):
"""Return the full, public base URL (including prefix) of the given JupyterHub instance."""
if user_or_service:
if app.subdomain_host:
host = user_or_service.host
else:
host = public_host(app)
prefix = user_or_service.prefix
else:
host = public_host(app)
prefix = Server.from_url(app.proxy.public_url).base_url
if path:
return host + ujoin(prefix, path)
else:
return host + prefix |
7,540 | super surface | #!/usr/bin/env python
# Searches for site overlap between two lattices
# All input between lines 72 - 79
# Requires the surface_points.py file, which defines the different surfaces
################################################################################
# Copyright Keith T Butler (2015) #
# #
# This file is part of SMACT: builder.py is free software: you can #
# redistribute it and/or modify it under the terms of the GNU General Public #
# License as published by the Free Software Foundation, either version 3 of #
# the License, or (at your option) any later version. #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# You should have received a copy of the GNU General Public License along with #
# this program. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
from optparse import OptionParser
import numpy as np
import surface_points
def find_max_csl(surfs_1, surfs_2, multiplicity1, multiplicity2):
"""
Given surface points and multiplicities of the surfaces this returns the maximal overlap fraction of the sites
Attr:
surfs : lists of the surface points on each side of the interface.
multiplicity : lists of the multiplicity of the lattice vectors of u and v for each side of the interface.
Returns:
max_csl : float, the maximum fraction overlap found.
"""
csl_values = []
for surface_1 in surfs_1:
if len(surface_1) > 0:
surf_1_super = METHOD_NAME(
np.asarray(surface_1), np.asarray(multiplicity1)
)
for surface_2 in surfs_2:
if len(surface_2) > 0:
surf_2_super = METHOD_NAME(
np.asarray(surface_2), np.asarray(multiplicity2)
)
for i in np.arange(0, 1, 0.1):
for j in np.arange(0, 1, 0.1):
t_surf = translate(surf_2_super, [i, j])
csl_values.append(
csl(surf_1_super, t_surf, multiplicity1)
)
return max(csl_values)
def METHOD_NAME(surface, multiplicity):
"""Makes a super cell out of the surface coordinates"""
surf_super = []
for site in surface:
for u in range(1, multiplicity[0] + 1):
for v in range(1, multiplicity[1] + 1):
surf_super.append(
[
(site[0] + (u - 1)) / multiplicity[0],
(site[1] + (v - 1)) / multiplicity[1],
]
)
return np.asarray(surf_super)
def distance(a, b, mult):
"""Calculate separations, don't forget that we need to scale the separations by the multiplicity of the MAPI surface in each direction."""
d1 = abs(a[0] - b[0])
if d1 > 1:
d1 = d1 - 1
d2 = abs(a[1] - b[1])
if d2 > 1:
d2 = d2 - 1
return np.sqrt((d1 * mult[0]) ** 2 + (d2 * mult[1]) ** 2)
def csl(surface1, surface2, mult_a, tol=0.15):
"""Takes two surfaces and calculates the number of co-inciding sites (within a tolerance)"""
coincidence = 0.0
for site_a in surface1:
for site_b in surface2:
if distance(site_a, site_b, mult_a) <= tol:
coincidence = coincidence + 1.0
return coincidence * 2 / (len(surface1) + len(surface2))
def wrapped(site):
"""Crude minimum image for this code"""
if site[0] > 1:
site[0] = site[0] - 1
if site[1] > 1:
site[1] = site[1] - 1
if site[0] < 0:
site[0] = site[0] + 1
if site[1] < 0:
site[1] = site[1] + 1
return site
def translate(surface, T):
"""Translate the positions of the ions by a given vector"""
for i, site in enumerate(surface):
site = wrapped(site + T)
surface[i] = site
return surface
def get_comma_separated_args(option, opt, value, parser):
setattr(parser.values, option.dest, value.split(","))
###### THESE ARE THE INPUT VARIABLES #######
"""
parser = OptionParser()
parser.add_option("-a", "--matera",
action="store", type="string", dest="mater1", default="perovskite",
help="The first material to consider")
parser.add_option("-b", "--materb",
action="store", type="string", dest="mater2", default="perovskite",
help="The second material to consider")
parser.add_option("-x", "--millera",
action="store", type=int, dest="milla", default="100",
help="The first materials miller index to consider, format : 100")
parser.add_option("-y", "--millerb",
action="store", type=int, dest="millb", default="100",
help="The second materials miller index to consider, format : 100 ")
parser.add_option("-u", "--multa",
type='string',action="callback", dest="multa",
callback=get_comma_separated_args,
help="The first materials multiplicity, format : 2,2")
parser.add_option("-v", "--multb",
type='string',action="callback", dest="multb",
callback=get_comma_separated_args,
help="The second materials multiplicity, format : 3,3")
(options, args) = parser.parse_args()
""" |
7,541 | parent path str | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Marker(_BaseTraceHierarchyType):
# color
# -----
@property
def color(self):
"""
Sets the marker color of all decreasing values.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# line
# ----
@property
def line(self):
"""
The 'line' property is an instance of Line
that may be specified as:
- An instance of :class:`plotly.graph_objs.waterfall.decreasing.marker.Line`
- A dict of string/value properties that will be passed
to the Line constructor
Supported dict properties:
color
Sets the line color of all decreasing values.
width
Sets the line width of all decreasing values.
Returns
-------
plotly.graph_objs.waterfall.decreasing.marker.Line
"""
return self["line"]
@line.setter
def line(self, val):
self["line"] = val
# property parent name
# --------------------
@property
def METHOD_NAME(self):
return "waterfall.decreasing"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
Sets the marker color of all decreasing values.
line
:class:`plotly.graph_objects.waterfall.decreasing.marke
r.Line` instance or dict with compatible properties
"""
def __init__(self, arg=None, color=None, line=None, **kwargs):
"""
Construct a new Marker object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.waterfall.decreasing.Marker`
color
Sets the marker color of all decreasing values.
line
:class:`plotly.graph_objects.waterfall.decreasing.marke
r.Line` instance or dict with compatible properties
Returns
-------
Marker
"""
super(Marker, self).__init__("marker")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.waterfall.decreasing.Marker
constructor must be a dict or
an instance of :class:`plotly.graph_objs.waterfall.decreasing.Marker`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.waterfall.decreasing import marker as v_marker
# Initialize validators
# ---------------------
self._validators["color"] = v_marker.ColorValidator()
self._validators["line"] = v_marker.LineValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
self["color"] = color if color is not None else _v
_v = arg.pop("line", None)
self["line"] = line if line is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
__all__ = ["Marker", "marker"]
from plotly.graph_objs.waterfall.decreasing import marker |
7,542 | get proposal | import json
from moto.core.responses import BaseResponse
from .models import managedblockchain_backends, ManagedBlockchainBackend
from .utils import (
networkid_from_managedblockchain_url,
proposalid_from_managedblockchain_url,
invitationid_from_managedblockchain_url,
memberid_from_managedblockchain_request,
nodeid_from_managedblockchain_url,
)
class ManagedBlockchainResponse(BaseResponse):
def __init__(self) -> None:
super().__init__(service_name="managedblockchain")
@property
def backend(self) -> ManagedBlockchainBackend:
return managedblockchain_backends[self.current_account][self.region]
def list_networks(self) -> str:
networks = self.backend.list_networks()
return json.dumps({"Networks": [network.to_dict() for network in networks]})
def create_network(self) -> str:
name = self._get_param("Name")
framework = self._get_param("Framework")
frameworkversion = self._get_param("FrameworkVersion")
frameworkconfiguration = self._get_param("FrameworkConfiguration")
voting_policy = self._get_param("VotingPolicy")
member_configuration = self._get_param("MemberConfiguration")
# Optional
description = self._get_param("Description", None)
response = self.backend.create_network(
name,
framework,
frameworkversion,
frameworkconfiguration,
voting_policy,
member_configuration,
description,
)
return json.dumps(response)
def get_network(self) -> str:
network_id = networkid_from_managedblockchain_url(self.path)
mbcnetwork = self.backend.get_network(network_id)
return json.dumps({"Network": mbcnetwork.get_format()})
def list_proposals(self) -> str:
network_id = networkid_from_managedblockchain_url(self.path)
proposals = self.backend.list_proposals(network_id)
return json.dumps({"Proposals": [proposal.to_dict() for proposal in proposals]})
def create_proposal(self) -> str:
network_id = networkid_from_managedblockchain_url(self.path)
memberid = self._get_param("MemberId")
actions = self._get_param("Actions")
# Optional
description = self._get_param("Description", None)
response = self.backend.create_proposal(
network_id, memberid, actions, description
)
return json.dumps(response)
def METHOD_NAME(self) -> str:
network_id = networkid_from_managedblockchain_url(self.path)
proposal_id = proposalid_from_managedblockchain_url(self.path)
proposal = self.backend.METHOD_NAME(network_id, proposal_id)
return json.dumps({"Proposal": proposal.get_format()})
def list_proposal_votes(self) -> str:
network_id = networkid_from_managedblockchain_url(self.path)
proposal_id = proposalid_from_managedblockchain_url(self.path)
proposalvotes = self.backend.list_proposal_votes(network_id, proposal_id)
return json.dumps({"ProposalVotes": proposalvotes})
def vote_on_proposal(self) -> str:
network_id = networkid_from_managedblockchain_url(self.path)
proposal_id = proposalid_from_managedblockchain_url(self.path)
votermemberid = self._get_param("VoterMemberId")
vote = self._get_param("Vote")
self.backend.vote_on_proposal(network_id, proposal_id, votermemberid, vote)
return ""
def list_invitations(self) -> str:
invitations = self.backend.list_invitations()
return json.dumps(
{"Invitations": [invitation.to_dict() for invitation in invitations]}
)
def reject_invitation(self) -> str:
invitation_id = invitationid_from_managedblockchain_url(self.path)
self.backend.reject_invitation(invitation_id)
return ""
def list_members(self) -> str:
network_id = networkid_from_managedblockchain_url(self.path)
members = self.backend.list_members(network_id)
return json.dumps({"Members": [member.to_dict() for member in members]})
def create_member(self) -> str:
network_id = networkid_from_managedblockchain_url(self.path)
invitationid = self._get_param("InvitationId")
member_configuration = self._get_param("MemberConfiguration")
response = self.backend.create_member(
invitationid, network_id, member_configuration
)
return json.dumps(response)
def get_member(self) -> str:
network_id = networkid_from_managedblockchain_url(self.path)
member_id = memberid_from_managedblockchain_request(self.uri, self.body)
member = self.backend.get_member(network_id, member_id)
return json.dumps({"Member": member.get_format()})
def update_member(self) -> str:
network_id = networkid_from_managedblockchain_url(self.path)
member_id = memberid_from_managedblockchain_request(self.uri, self.body)
logpublishingconfiguration = self._get_param("LogPublishingConfiguration")
self.backend.update_member(network_id, member_id, logpublishingconfiguration)
return ""
def delete_member(self) -> str:
network_id = networkid_from_managedblockchain_url(self.path)
member_id = memberid_from_managedblockchain_request(self.uri, self.body)
self.backend.delete_member(network_id, member_id)
return ""
def list_nodes(self) -> str:
network_id = networkid_from_managedblockchain_url(self.path)
member_id = memberid_from_managedblockchain_request(self.uri, self.body)
status = self._get_param("status")
nodes = self.backend.list_nodes(network_id, member_id, status)
return json.dumps({"Nodes": [node.to_dict() for node in nodes]})
def create_node(self) -> str:
network_id = networkid_from_managedblockchain_url(self.path)
member_id = memberid_from_managedblockchain_request(self.uri, self.body)
instancetype = self._get_param("NodeConfiguration")["InstanceType"]
availabilityzone = self._get_param("NodeConfiguration")["AvailabilityZone"]
logpublishingconfiguration = self._get_param("NodeConfiguration")[
"LogPublishingConfiguration"
]
response = self.backend.create_node(
network_id,
member_id,
availabilityzone,
instancetype,
logpublishingconfiguration,
)
return json.dumps(response)
def get_node(self) -> str:
network_id = networkid_from_managedblockchain_url(self.path)
member_id = memberid_from_managedblockchain_request(self.uri, self.body)
node_id = nodeid_from_managedblockchain_url(self.path)
node = self.backend.get_node(network_id, member_id, node_id)
return json.dumps({"Node": node.get_format()})
def update_node(self) -> str:
network_id = networkid_from_managedblockchain_url(self.path)
member_id = memberid_from_managedblockchain_request(self.uri, self.body)
node_id = nodeid_from_managedblockchain_url(self.path)
self.backend.update_node(
network_id, member_id, node_id, logpublishingconfiguration=self.body
)
return ""
def delete_node(self) -> str:
network_id = networkid_from_managedblockchain_url(self.path)
member_id = memberid_from_managedblockchain_request(self.uri, self.body)
node_id = nodeid_from_managedblockchain_url(self.path)
self.backend.delete_node(network_id, member_id, node_id)
return "" |
7,543 | add metric | #!/usr/bin/env python3
#
# Plot the output of test/heapwatch/client_ram_report.py --csv
import csv
import random
from matplotlib import pyplot as plt
from matplotlib.ticker import MaxNLocator, FuncFormatter
_meta_cols = {'when', 'dt', 'round'}
_metrics_cols = {'free', 'inuse', 'released', 'total'}
# see https://matplotlib.org/stable/gallery/lines_bars_and_markers/linestyles.html
plt_line_styles = [
'solid', 'dotted', 'dashed', 'dashdot',
(5, (10, 3)), # long dash with offset
(0, (3, 5, 1, 5)), # dashdotted
(0, (3, 10, 1, 10, 1, 10)), # loosely dashdotted
]
def smin(a,b):
if a is None:
return b
if b is None:
return a
return min(a,b)
def smax(a,b):
if a is None:
return b
if b is None:
return a
return max(a,b)
def METHOD_NAME(d, k, m, x, y):
"""d: {k: {m: [(x,y)]}}"""
mt = d.get(k)
if mt is None:
d[k] = {m: [(x,y)]}
else:
klist = mt.get(m)
if klist is None:
mt[m] = [(x,y)]
else:
klist.append((x, y))
def format_mem(x, _):
if x<0:
return ""
for unit in ['bytes', 'KB', 'MB', 'GB']:
if x < 1024:
return "%3.1f %s" % (x, unit)
x /= 1024
def main():
import argparse
ap = argparse.ArgumentParser()
ap.add_argument('files', nargs='+')
args = ap.parse_args()
for fname in args.files:
fvals = {}
minv = None
maxv = None
with open(fname) as fin:
reader = csv.DictReader(fin)
for rec in reader:
xround = int(rec['round'])
row_nick = None
for k,v in rec.items():
if k in _meta_cols:
continue
v = float(v)
parts = k.split('#')
if len(parts) == 2:
row_nick = parts[0]
metric = parts[1]
else :
print(f"unknown column {k}")
row_nick = k
metric = k
METHOD_NAME(fvals, row_nick, metric, xround, v)
minv = smin(minv, v)
maxv = smax(maxv, v)
if not fvals:
print(f"{fname} empty")
continue
nodes = sorted(fvals.keys())
print("{} found series {}".format(fname, nodes))
fig, ax = plt.subplots()
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
ax.yaxis.set_major_formatter(FuncFormatter(format_mem))
ax.set_ylabel('bytes')
ax.set_xlabel('round')
ax.set_ylim(minv,maxv)
max_val_color = max(map(len, nodes)) * ord('z')
for k in nodes:
lc = None # let matplotlib to pick a color if there is no standard nodes name pattern => probably because of a single local run
if len(nodes) > 1:
# if there are multiple nodes choose some color based on the node name
s = sum(map(ord, k))
lc = (s/max_val_color, s/max_val_color, s/max_val_color)
if k.startswith('r'):
# blueish
lc = (0.3*random.random(), 0.3*random.random(), 0.7+(0.3*random.random()))
elif k.startswith('npn'):
# greenish
lc = (0.3*random.random(), 0.7+(0.3*random.random()), 0.3*random.random())
elif k.startswith('n'):
# reddish
lc = (0.7+(0.3*random.random()), 0.3*random.random(), 0.3*random.random())
metrics = fvals[k]
for i, metric in enumerate(metrics.keys()):
xy = metrics[metric]
ax.plot([p[0] for p in xy], [p[1] for p in xy], label=f'{k}/{metric}', color=lc, linestyle=plt_line_styles[i%len(plt_line_styles)])
ax.legend(loc='upper left', ncol=2)
plt.savefig(fname + '.svg', format='svg')
plt.savefig(fname + '.png', format='png')
#plt.show()
if __name__ == '__main__':
main() |
7,544 | convert to new filename | #!/usr/bin/env python3
"""
Collection of CLI commands for an administrator to use
"""
import logging
import os
import re
import time
from argparse import ArgumentParser
from getpass import getpass
from sys import stderr
from szurubooru import config, db, errors, model
from szurubooru.func import files, images
from szurubooru.func import posts as postfuncs
from szurubooru.func import users as userfuncs
def reset_password(username: str) -> None:
user = userfuncs.get_user_by_name_or_email(username)
new_password = getpass("Enter new password for '%s': " % user.name)
check_password = getpass("Re-enter password: ")
if check_password != new_password:
raise errors.ValidationError("Passwords do not match")
userfuncs.update_user_password(user, new_password)
db.get_session().commit()
print("Sucessfully changed password for '%s'" % user.name)
def check_audio() -> None:
post_list = (
db.session.query(model.Post)
.filter(model.Post.type == model.Post.TYPE_VIDEO)
.order_by(model.Post.post_id)
.all()
)
for post in post_list:
print("Checking post %d ..." % post.post_id, end="\r", file=stderr)
content = files.get(postfuncs.get_post_content_path(post))
has_existing_flag = model.Post.FLAG_SOUND in post.flags
try:
has_sound_data = images.Image(content).check_for_sound()
except errors.ProcessingError:
print(
"Post %d caused an error when checking for sound"
% post.post_id
)
if has_sound_data and not has_existing_flag:
print("Post %d has sound data but is not flagged" % post.post_id)
if not has_sound_data and has_existing_flag:
print("Post %d has no sound data but is flagged" % post.post_id)
def reset_filenames() -> None:
regex = re.compile(r"(\d+)_[0-9a-f]{16}\.(\S+)")
def METHOD_NAME(old_name: str) -> str:
matches = regex.match(old_name)
if not matches:
return None
post_id = int(matches.group(1))
post_ext = matches.group(2)
return "%d_%s.%s" % (
post_id,
postfuncs.get_post_security_hash(post_id),
post_ext,
)
def rename_in_dir(dir: str) -> None:
for old_path in os.listdir(config.config["data_dir"] + dir):
new_path = METHOD_NAME(old_path)
if not new_path:
continue
if old_path != new_path:
print("%s -> %s" % (dir + old_path, dir + new_path))
os.rename(
config.config["data_dir"] + dir + old_path,
config.config["data_dir"] + dir + new_path,
)
rename_in_dir("posts/")
rename_in_dir("generated-thumbnails/")
rename_in_dir("posts/custom-thumbnails/")
def regenerate_thumbnails() -> None:
for post in db.session.query(model.Post).all():
print("Generating tumbnail for post %d ..." % post.post_id, end="\r")
try:
postfuncs.generate_post_thumbnail(post)
except Exception:
pass
def main() -> None:
parser_top = ArgumentParser(
description="Collection of CLI commands for an administrator to use",
epilog="Look at README.md for more info",
)
parser = parser_top.add_mutually_exclusive_group(required=True)
parser.add_argument(
"--change-password",
metavar="<username>",
help="change the password of specified user",
)
parser.add_argument(
"--check-all-audio",
action="store_true",
help="check the audio flags of all posts, "
"noting discrepancies, without modifying posts",
)
parser.add_argument(
"--reset-filenames",
action="store_true",
help="reset and rename the content and thumbnail "
"filenames in case of a lost/changed secret key",
)
parser.add_argument(
"--regenerate-thumbnails",
action="store_true",
help="regenerate the thumbnails for posts if the "
"thumbnail files are missing",
)
command = parser_top.parse_args()
try:
if command.change_password:
reset_password(command.change_password)
elif command.check_all_audio:
check_audio()
elif command.reset_filenames:
reset_filenames()
elif command.regenerate_thumbnails:
regenerate_thumbnails()
except errors.BaseError as e:
print(e, file=stderr)
if __name__ == "__main__":
main() |
7,545 | setup | r"""
A role and directive to display mathtext in Sphinx
==================================================
.. warning::
In most cases, you will likely want to use one of `Sphinx's builtin Math
extensions
<https://www.sphinx-doc.org/en/master/usage/extensions/math.html>`__
instead of this one.
Mathtext may be included in two ways:
1. Inline, using the role::
This text uses inline math: :mathmpl:`\alpha > \beta`.
which produces:
This text uses inline math: :mathmpl:`\alpha > \beta`.
2. Standalone, using the directive::
Here is some standalone math:
.. mathmpl::
\alpha > \beta
which produces:
Here is some standalone math:
.. mathmpl::
\alpha > \beta
Options
-------
The ``mathmpl`` role and directive both support the following options:
fontset : str, default: 'cm'
The font set to use when displaying math. See :rc:`mathtext.fontset`.
fontsize : float
The font size, in points. Defaults to the value from the extension
configuration option defined below.
Configuration options
---------------------
The mathtext extension has the following configuration options:
mathmpl_fontsize : float, default: 10.0
Default font size, in points.
mathmpl_srcset : list of str, default: []
Additional image sizes to generate when embedding in HTML, to support
`responsive resolution images
<https://developer.mozilla.org/en-US/docs/Learn/HTML/Multimedia_and_embedding/Responsive_images>`__.
The list should contain additional x-descriptors (``'1.5x'``, ``'2x'``,
etc.) to generate (1x is the default and always included.)
"""
import hashlib
from pathlib import Path
from docutils import nodes
from docutils.parsers.rst import Directive, directives
import sphinx
from sphinx.errors import ConfigError, ExtensionError
import matplotlib as mpl
from matplotlib import _api, mathtext
from matplotlib.rcsetup import validate_float_or_None
# Define LaTeX math node:
class latex_math(nodes.General, nodes.Element):
pass
def fontset_choice(arg):
return directives.choice(arg, mathtext.MathTextParser._font_type_mapping)
def math_role(role, rawtext, text, lineno, inliner,
options={}, content=[]):
i = rawtext.find('`')
latex = rawtext[i+1:-1]
node = latex_math(rawtext)
node['latex'] = latex
node['fontset'] = options.get('fontset', 'cm')
node['fontsize'] = options.get('fontsize',
METHOD_NAME.app.config.mathmpl_fontsize)
return [node], []
math_role.options = {'fontset': fontset_choice,
'fontsize': validate_float_or_None}
class MathDirective(Directive):
"""
The ``.. mathmpl::`` directive, as documented in the module's docstring.
"""
has_content = True
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = False
option_spec = {'fontset': fontset_choice,
'fontsize': validate_float_or_None}
def run(self):
latex = ''.join(self.content)
node = latex_math(self.block_text)
node['latex'] = latex
node['fontset'] = self.options.get('fontset', 'cm')
node['fontsize'] = self.options.get('fontsize',
METHOD_NAME.app.config.mathmpl_fontsize)
return [node]
# This uses mathtext to render the expression
def latex2png(latex, filename, fontset='cm', fontsize=10, dpi=100):
with mpl.rc_context({'mathtext.fontset': fontset, 'font.size': fontsize}):
try:
depth = mathtext.math_to_image(
f"${latex}$", filename, dpi=dpi, format="png")
except Exception:
_api.warn_external(f"Could not render math expression {latex}")
depth = 0
return depth
# LaTeX to HTML translation stuff:
def latex2html(node, source):
inline = isinstance(node.parent, nodes.TextElement)
latex = node['latex']
fontset = node['fontset']
fontsize = node['fontsize']
name = 'math-{}'.format(
hashlib.md5(f'{latex}{fontset}{fontsize}'.encode()).hexdigest()[-10:])
destdir = Path(METHOD_NAME.app.builder.outdir, '_images', 'mathmpl')
destdir.mkdir(parents=True, exist_ok=True)
dest = destdir / f'{name}.png'
depth = latex2png(latex, dest, fontset, fontsize=fontsize)
srcset = []
for size in METHOD_NAME.app.config.mathmpl_srcset:
filename = f'{name}-{size.replace(".", "_")}.png'
latex2png(latex, destdir / filename, fontset, fontsize=fontsize,
dpi=100 * float(size[:-1]))
srcset.append(
f'{METHOD_NAME.app.builder.imgpath}/mathmpl/{filename} {size}')
if srcset:
srcset = (f'srcset="{METHOD_NAME.app.builder.imgpath}/mathmpl/{name}.png, ' +
', '.join(srcset) + '" ')
if inline:
cls = ''
else:
cls = 'class="center" '
if inline and depth != 0:
style = 'style="position: relative; bottom: -%dpx"' % (depth + 1)
else:
style = ''
return (f'<img src="{METHOD_NAME.app.builder.imgpath}/mathmpl/{name}.png"'
f' {srcset}{cls}{style}/>')
def _config_inited(app, config):
# Check for srcset hidpi images
for i, size in enumerate(app.config.mathmpl_srcset):
if size[-1] == 'x': # "2x" = "2.0"
try:
float(size[:-1])
except ValueError:
raise ConfigError(
f'Invalid value for mathmpl_srcset parameter: {size!r}. '
'Must be a list of strings with the multiplicative '
'factor followed by an "x". e.g. ["2.0x", "1.5x"]')
else:
raise ConfigError(
f'Invalid value for mathmpl_srcset parameter: {size!r}. '
'Must be a list of strings with the multiplicative '
'factor followed by an "x". e.g. ["2.0x", "1.5x"]')
def METHOD_NAME(app):
METHOD_NAME.app = app
app.add_config_value('mathmpl_fontsize', 10.0, True)
app.add_config_value('mathmpl_srcset', [], True)
try:
app.connect('config-inited', _config_inited) # Sphinx 1.8+
except ExtensionError:
app.connect('env-updated', lambda app, env: _config_inited(app, None))
# Add visit/depart methods to HTML-Translator:
def visit_latex_math_html(self, node):
source = self.document.attributes['source']
self.body.append(latex2html(node, source))
def depart_latex_math_html(self, node):
pass
# Add visit/depart methods to LaTeX-Translator:
def visit_latex_math_latex(self, node):
inline = isinstance(node.parent, nodes.TextElement)
if inline:
self.body.append('$%s$' % node['latex'])
else:
self.body.extend(['\\begin{equation}',
node['latex'],
'\\end{equation}'])
def depart_latex_math_latex(self, node):
pass
app.add_node(latex_math,
html=(visit_latex_math_html, depart_latex_math_html),
latex=(visit_latex_math_latex, depart_latex_math_latex))
app.add_role('mathmpl', math_role)
app.add_directive('mathmpl', MathDirective)
if sphinx.version_info < (1, 8):
app.add_role('math', math_role)
app.add_directive('math', MathDirective)
metadata = {'parallel_read_safe': True, 'parallel_write_safe': True}
return metadata |
7,546 | open std fd | from functools import update_wrapper
from functools import partial
from inspect import signature, Parameter
import logging
from parsl.app.errors import wrap_error
from parsl.app.app import AppBase
from parsl.dataflow.dflow import DataFlowKernelLoader
logger = logging.getLogger(__name__)
def remote_side_bash_executor(func, *args, **kwargs):
"""Executes the supplied function with *args and **kwargs to get a
command-line to run, and then run that command-line using bash.
"""
import os
import subprocess
import parsl.app.errors as pe
from parsl.utils import get_std_fname_mode
if hasattr(func, '__name__'):
func_name = func.__name__
else:
logger.warning('No name for the function. Potentially a result of parsl#2233')
func_name = 'bash_app'
executable = None
# Try to run the func to compose the commandline
try:
# Execute the func to get the commandline
executable = func(*args, **kwargs)
if not isinstance(executable, str):
raise ValueError(f"Expected a str for bash_app commandline, got {type(executable)}")
except AttributeError as e:
if executable is not None:
raise pe.AppBadFormatting("App formatting failed for app '{}' with AttributeError: {}".format(func_name, e))
else:
raise pe.BashAppNoReturn("Bash app '{}' did not return a value, or returned None - with this exception: {}".format(func_name, e))
except IndexError as e:
raise pe.AppBadFormatting("App formatting failed for app '{}' with IndexError: {}".format(func_name, e))
except Exception as e:
raise e
# Updating stdout, stderr if values passed at call time.
def METHOD_NAME(fdname):
# fdname is 'stdout' or 'stderr'
stdfspec = kwargs.get(fdname) # spec is str name or tuple (name, mode)
if stdfspec is None:
return None
fname, mode = get_std_fname_mode(fdname, stdfspec)
try:
if os.path.dirname(fname):
os.makedirs(os.path.dirname(fname), exist_ok=True)
fd = open(fname, mode)
except Exception as e:
raise pe.BadStdStreamFile(fname, e)
return fd
std_out = METHOD_NAME('stdout')
std_err = METHOD_NAME('stderr')
timeout = kwargs.get('walltime')
if std_err is not None:
print('--> executable follows <--\n{}\n--> end executable <--'.format(executable), file=std_err, flush=True)
returncode = None
try:
proc = subprocess.Popen(executable, stdout=std_out, stderr=std_err, shell=True, executable='/bin/bash', close_fds=False)
proc.wait(timeout=timeout)
returncode = proc.returncode
except subprocess.TimeoutExpired:
raise pe.AppTimeout("[{}] App exceeded walltime: {} seconds".format(func_name, timeout))
except Exception as e:
raise pe.AppException("[{}] App caught exception with returncode: {}".format(func_name, returncode), e)
if returncode != 0:
raise pe.BashExitFailure(func_name, proc.returncode)
# TODO : Add support for globs here
missing = []
for outputfile in kwargs.get('outputs', []):
fpath = outputfile.filepath
if not os.path.exists(fpath):
missing.extend([outputfile])
if missing:
raise pe.MissingOutputs("[{}] Missing outputs".format(func_name), missing)
return returncode
class BashApp(AppBase):
def __init__(self, func, data_flow_kernel=None, cache=False, executors='all', ignore_for_cache=None):
super().__init__(func, data_flow_kernel=data_flow_kernel, executors=executors, cache=cache, ignore_for_cache=ignore_for_cache)
self.kwargs = {}
# We duplicate the extraction of parameter defaults
# to self.kwargs to ensure availability at point of
# command string format. Refer: #349
sig = signature(func)
for s in sig.parameters:
if sig.parameters[s].default is not Parameter.empty:
self.kwargs[s] = sig.parameters[s].default
# update_wrapper allows remote_side_bash_executor to masquerade as self.func
# partial is used to attach the first arg the "func" to the remote_side_bash_executor
# this is done to avoid passing a function type in the args which parsl.serializer
# doesn't support
remote_fn = partial(update_wrapper(remote_side_bash_executor, self.func), self.func)
remote_fn.__name__ = self.func.__name__
self.wrapped_remote_function = wrap_error(remote_fn)
def __call__(self, *args, **kwargs):
"""Handle the call to a Bash app.
Args:
- Arbitrary
Kwargs:
- Arbitrary
Returns:
App_fut
"""
invocation_kwargs = {}
invocation_kwargs.update(self.kwargs)
invocation_kwargs.update(kwargs)
if self.data_flow_kernel is None:
dfk = DataFlowKernelLoader.dfk()
else:
dfk = self.data_flow_kernel
app_fut = dfk.submit(self.wrapped_remote_function,
app_args=args,
executors=self.executors,
cache=self.cache,
ignore_for_cache=self.ignore_for_cache,
app_kwargs=invocation_kwargs)
return app_fut |
7,547 | sign | # Author: Stanislav Zidek
# See the LICENSE file for legal information regarding use of this file.
"""Abstract class for ECDSA."""
from .cryptomath import secureHash
class ECDSAKey(object):
"""This is an abstract base class for ECDSA keys.
Particular implementations of ECDSA keys, such as
:py:class:`~.python_ecdsakey.Python_ECDSAKey`
... more coming
inherit from this.
To create or parse an ECDSA key, don't use one of these classes
directly. Instead, use the factory functions in
:py:class:`~tlslite.utils.keyfactory`.
"""
def __init__(self, public_key, private_key):
"""Create a new ECDSA key.
If public_key or private_key are passed in, the new key
will be initialized.
:param public_key: ECDSA public key.
:param private_key: ECDSA private key.
"""
raise NotImplementedError()
def __len__(self):
"""Return the size of the order of the curve of this key, in bits.
:rtype: int
"""
raise NotImplementedError()
def hasPrivateKey(self):
"""Return whether or not this key has a private component.
:rtype: bool
"""
raise NotImplementedError()
def _sign(self, data, hash_alg):
raise NotImplementedError()
def _hashAndSign(self, data, hAlg):
raise NotImplementedError()
def _verify(self, signature, hash_bytes):
raise NotImplementedError()
def hashAndSign(self, bytes, rsaScheme=None, hAlg='sha1', sLen=None):
"""Hash and sign the passed-in bytes.
This requires the key to have a private component. It performs
a signature on the passed-in data with selected hash algorithm.
:type bytes: bytes-like object
:param bytes: The value which will be hashed and signed.
:type rsaScheme: str
:param rsaScheme: Ignored, present for API compatibility with RSA
:type hAlg: str
:param hAlg: The hash algorithm that will be used to hash data
:type sLen: int
:param sLen: Ignored, present for API compatibility with RSA
:rtype: bytearray
:returns: An ECDSA signature on the passed-in data.
"""
hAlg = hAlg.lower()
hashBytes = secureHash(bytearray(bytes), hAlg)
return self.METHOD_NAME(hashBytes, padding=rsaScheme, hashAlg=hAlg,
saltLen=sLen)
def hashAndVerify(self, sigBytes, bytes, rsaScheme=None, hAlg='sha1',
sLen=None):
"""Hash and verify the passed-in bytes with the signature.
This verifies an ECDSA signature on the passed-in data
with selected hash algorithm.
:type sigBytes: bytearray
:param sigBytes: An ECDSA signature, DER encoded.
:type bytes: str or bytearray
:param bytes: The value which will be hashed and verified.
:type rsaScheme: str
:param rsaScheme: Ignored, present for API compatibility with RSA
:type hAlg: str
:param hAlg: The hash algorithm that will be used
:type sLen: int
:param sLen: Ignored, present for API compatibility with RSA
:rtype: bool
:returns: Whether the signature matches the passed-in data.
"""
hAlg = hAlg.lower()
hashBytes = secureHash(bytearray(bytes), hAlg)
return self.verify(sigBytes, hashBytes, rsaScheme, hAlg, sLen)
def METHOD_NAME(self, bytes, padding=None, hashAlg="sha1", saltLen=None):
"""Sign the passed-in bytes.
This requires the key to have a private component. It performs
an ECDSA signature on the passed-in data.
:type bytes: bytearray
:param bytes: The value which will be signed (generally a binary
encoding of hash output.
:type padding: str
:param padding: Ignored, present for API compatibility with RSA
:type hashAlg: str
:param hashAlg: name of hash that was used for calculating the bytes
:type saltLen: int
:param saltLen: Ignored, present for API compatibility with RSA
:rtype: bytearray
:returns: An ECDSA signature on the passed-in data.
"""
sigBytes = self._sign(bytes, hashAlg)
return sigBytes
def verify(self, sigBytes, bytes, padding=None, hashAlg=None,
saltLen=None):
"""Verify the passed-in bytes with the signature.
This verifies a PKCS1 signature on the passed-in data.
:type sigBytes: bytearray
:param sigBytes: A PKCS1 signature.
:type bytes: bytearray
:param bytes: The value which will be verified.
:type padding: str
:param padding: Ignored, present for API compatibility with RSA
:rtype: bool
:returns: Whether the signature matches the passed-in data.
"""
return self._verify(sigBytes, bytes)
def acceptsPassword(self):
"""Return True if the write() method accepts a password for use
in encrypting the private key.
:rtype: bool
"""
raise NotImplementedError()
def write(self, password=None):
"""Return a string containing the key.
:rtype: str
:returns: A string describing the key, in whichever format (PEM)
is native to the implementation.
"""
raise NotImplementedError()
@staticmethod
def generate(bits):
"""Generate a new key with the specified curve.
:rtype: ~tlslite.utils.ECDSAKey.ECDSAKey
"""
raise NotImplementedError() |
7,548 | test main includes | # Data Parallel Control (dpctl)
#
# Copyright 2020-2022 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Defines unit test cases for miscellaneous functions.
"""
import ctypes
import ctypes.util
import glob
import os
import os.path
import re
import subprocess
import sys
import pytest
import dpctl
def _get_mkl_version_if_present():
class MKLVersion(ctypes.Structure):
_fields_ = [
("MajorVersion", ctypes.c_int),
("MinorVersion", ctypes.c_int),
("UpdateVersion", ctypes.c_int),
("ProductStatus", ctypes.c_char_p),
("Build", ctypes.c_char_p),
("Processor", ctypes.c_char_p),
("Platform", ctypes.c_char_p),
]
lib = ctypes.util.find_library("mkl_rt")
if lib is None:
return None
try:
lib = ctypes.cdll.LoadLibrary(lib)
get_ver_fn = lib.mkl_get_version
except Exception:
return None
get_ver_fn.argtypes = []
get_ver_fn.restype = MKLVersion
mkl_ver = get_ver_fn()
return ".".join(
[
str(mkl_ver.MajorVersion),
str(mkl_ver.UpdateVersion),
str(mkl_ver.MinorVersion),
]
)
def test_get_include():
incl = dpctl.get_include()
assert type(incl) is str
assert incl != ""
assert os.path.isdir(incl)
def test_get_dpcppversion():
"""Intent of this test is to verify that libraries from dpcpp_cpp_rt
conda package used at run-time are not from an older oneAPI. Since these
libraries currently do not report the version, this test was using
a proxy (version of Intel(R) Math Kernel Library).
"""
incl_dir = dpctl.get_include()
libs = glob.glob(os.path.join(incl_dir, "..", "*DPCTLSyclInterface*"))
libs = sorted(libs)
assert len(libs) > 0
lib = ctypes.cdll.LoadLibrary(libs[0])
fn = lib.DPCTLService_GetDPCPPVersion
fn.restype = ctypes.c_char_p
fn.argtypes = []
dpcpp_ver = fn()
assert len(dpcpp_ver) > 0
dpcpp_ver = dpcpp_ver.decode("utf-8")
mkl_ver = _get_mkl_version_if_present()
if mkl_ver is not None:
if not mkl_ver >= dpcpp_ver:
pytest.xfail(
reason="Flaky test: Investigate Math Kernel Library "
f"library version {mkl_ver} being older than "
f"DPC++ version {dpcpp_ver} used to build dpctl"
)
def test___version__():
dpctl_ver = getattr(dpctl, "__version__", None)
assert type(dpctl_ver) is str
assert "unknown" not in dpctl_ver
assert "untagged" not in dpctl_ver
# Reg expr from PEP-440, relaxed to allow for semantic variant
# 0.9.0dev0 allowed, vs. PEP-440 compliant 0.9.0.dev0
reg_expr = (
r"^([1-9][0-9]*!)?(0|[1-9][0-9]*)(\.(0|[1-9][0-9]*))"
r"*((a|b|rc)(0|[1-9][0-9]*))?(\.?post(0|[1-9][0-9]*))?(\.?dev("
r"0|[1-9][0-9]*))?(\+.*)?$"
)
assert re.match(reg_expr, dpctl_ver) is not None
def test_dev_utils():
import tempfile
import dpctl._diagnostics as dd
ctx_mngr = dd.syclinterface_diagnostics
try:
device = dpctl.SyclDevice()
except dpctl.SyclDeviceCreationError:
pytest.skip("Default-constructed device could not be created")
with ctx_mngr():
device.parent_device
with ctx_mngr(verbosity="error"):
device.parent_device
with pytest.raises(ValueError):
with ctx_mngr(verbosity="blah"):
device.parent_device
with tempfile.TemporaryDirectory() as temp_dir:
with ctx_mngr(log_dir=temp_dir):
device.parent_device
with pytest.raises(ValueError):
with ctx_mngr(log_dir="/not_a_dir"):
device.parent_device
def test_syclinterface():
install_dir = os.path.dirname(os.path.abspath(dpctl.__file__))
paths = glob.glob(os.path.join(install_dir, "*DPCTLSyclInterface*"))
if "linux" in sys.platform:
assert len(paths) > 1 and any(
[os.path.islink(fn) for fn in paths]
), "All library instances are hard links"
elif sys.platform in ["win32", "cygwin"]:
exts = []
for fn in paths:
_, file_ext = os.path.splitext(fn)
exts.append(file_ext.lower())
assert (
".lib" in exts
), "Installation does not have DPCTLSyclInterface.lib"
assert (
".dll" in exts
), "Installation does not have DPCTLSyclInterface.dll"
else:
raise RuntimeError("Unsupported system")
def METHOD_NAME():
res = subprocess.run(
[sys.executable, "-m", "dpctl", "--includes"], capture_output=True
)
assert res.returncode == 0
assert res.stdout
assert res.stdout.decode("utf-8").startswith("-I")
def test_main_library():
res = subprocess.run(
[sys.executable, "-m", "dpctl", "--library"], capture_output=True
)
assert res.returncode == 0
assert res.stdout
assert res.stdout.decode("utf-8").startswith("-L")
def test_cmakedir():
res = subprocess.run(
[sys.executable, "-m", "dpctl", "--cmakedir"], capture_output=True
)
assert res.returncode == 0
assert res.stdout
cmake_dir = res.stdout.decode("utf-8").strip()
assert os.path.exists(os.path.join(cmake_dir, "FindDpctl.cmake"))
def test_main_full_list():
res = subprocess.run(
[sys.executable, "-m", "dpctl", "-f"], capture_output=True
)
assert res.returncode == 0
if dpctl.get_num_devices() > 0:
assert res.stdout
assert res.stdout.decode("utf-8")
def test_main_long_list():
res = subprocess.run(
[sys.executable, "-m", "dpctl", "-l"], capture_output=True
)
assert res.returncode == 0
if dpctl.get_num_devices() > 0:
assert res.stdout
assert res.stdout.decode("utf-8")
def test_main_summary():
res = subprocess.run(
[sys.executable, "-m", "dpctl", "-s"], capture_output=True
)
assert res.returncode == 0
if dpctl.get_num_devices() > 0:
assert res.stdout
assert res.stdout.decode("utf-8")
def test_main_warnings():
res = subprocess.run(
[sys.executable, "-m", "dpctl", "-s", "--includes"], capture_output=True
)
assert res.returncode == 0
assert res.stdout or dpctl.get_num_devices() == 0
assert "UserWarning" in res.stderr.decode("utf-8")
assert "is being ignored." in res.stderr.decode("utf-8")
res = subprocess.run(
[sys.executable, "-m", "dpctl", "-s", "--includes", "--cmakedir"],
capture_output=True,
)
assert res.returncode == 0
assert res.stdout or dpctl.get_num_devices() == 0
assert "UserWarning" in res.stderr.decode("utf-8")
assert "are being ignored." in res.stderr.decode("utf-8") |
7,549 | clear | from _typeshed import Incomplete
from _typeshed.wsgi import WSGIEnvironment
from collections.abc import ItemsView, Iterator, KeysView, MutableMapping, ValuesView
from datetime import date, datetime, timedelta
from typing import TypeVar, overload
from typing_extensions import Literal
from webob.descriptors import _AsymmetricProperty
_T = TypeVar("_T")
class RequestCookies(MutableMapping[str, str]):
def __init__(self, environ: WSGIEnvironment) -> None: ...
def __setitem__(self, name: str, value: str) -> None: ...
def __getitem__(self, name: str) -> str: ...
@overload
def get(self, name: str, default: None = None) -> str | None: ...
@overload
def get(self, name: str, default: str | _T) -> str | _T: ...
def __delitem__(self, name: str) -> None: ...
def keys(self) -> KeysView[str]: ...
def values(self) -> ValuesView[str]: ...
def items(self) -> ItemsView[str, str]: ...
def __contains__(self, name: object) -> bool: ...
def __iter__(self) -> Iterator[str]: ...
def __len__(self) -> int: ...
def METHOD_NAME(self) -> None: ...
class Cookie(dict[str, Morsel]):
def __init__(self, input: str | None = None) -> None: ...
def load(self, data: str) -> None: ...
def add(self, key: str | bytes, val: str | bytes) -> Morsel: ...
def __setitem__(self, key: str | bytes, val: str | bytes) -> Morsel: ... # type:ignore[override]
def serialize(self, full: bool = True) -> str: ...
def values(self) -> list[Morsel]: ... # type:ignore[override]
def __str__(self, full: bool = True) -> str: ...
class Morsel(dict[bytes, bytes | bool | None]):
name: bytes
value: bytes
def __init__(self, name: str | bytes, value: str | bytes) -> None: ...
@property
def path(self) -> bytes | None: ...
@path.setter
def path(self, v: bytes | None) -> None: ...
@property
def domain(self) -> bytes | None: ...
@domain.setter
def domain(self, v: bytes | None) -> None: ...
@property
def comment(self) -> bytes | None: ...
@comment.setter
def comment(self, v: bytes | None) -> None: ...
expires: _AsymmetricProperty[bytes | None, datetime | date | timedelta | int | str | bytes | None]
max_age: _AsymmetricProperty[bytes | None, timedelta | int | str | bytes]
@property
def httponly(self) -> bool | None: ...
@httponly.setter
def httponly(self, v: bool) -> None: ...
@property
def secure(self) -> bool | None: ...
@secure.setter
def secure(self, v: bool) -> None: ...
samesite: _AsymmetricProperty[bytes | None, Literal["strict", "lax", "none"] | None]
def serialize(self, full: bool = True) -> str: ...
def __str__(self, full: bool = True) -> str: ...
def make_cookie(
name: str | bytes,
value: str | bytes | None,
max_age: int | timedelta | None = None,
path: str = "/",
domain: str | None = None,
secure: bool = False,
httponly: bool = False,
comment: str | None = None,
samesite: Literal["strict", "lax", "none"] | None = None,
) -> str: ...
class JSONSerializer:
def dumps(self, appstruct): ...
def loads(self, bstruct): ...
class Base64Serializer:
serializer: Incomplete
def __init__(self, serializer: Incomplete | None = None) -> None: ...
def dumps(self, appstruct): ...
def loads(self, bstruct): ...
class SignedSerializer:
salt: Incomplete
secret: Incomplete
hashalg: Incomplete
salted_secret: Incomplete
digestmod: Incomplete
digest_size: Incomplete
serializer: Incomplete
def __init__(self, secret, salt, hashalg: str = "sha512", serializer: Incomplete | None = None) -> None: ...
def dumps(self, appstruct): ...
def loads(self, bstruct): ...
class CookieProfile:
cookie_name: Incomplete
secure: Incomplete
max_age: Incomplete
httponly: Incomplete
samesite: Incomplete
path: Incomplete
domains: Incomplete
serializer: Incomplete
request: Incomplete
def __init__(
self,
cookie_name,
secure: bool = False,
max_age: Incomplete | None = None,
httponly: Incomplete | None = None,
samesite: Incomplete | None = None,
path: str = "/",
domains: Incomplete | None = None,
serializer: Incomplete | None = None,
) -> None: ...
def __call__(self, request): ...
def bind(self, request): ...
def get_value(self): ...
def set_cookies(self, response, value, domains=..., max_age=..., path=..., secure=..., httponly=..., samesite=...): ...
def get_headers(self, value, domains=..., max_age=..., path=..., secure=..., httponly=..., samesite=...): ...
class SignedCookieProfile(CookieProfile):
secret: Incomplete
salt: Incomplete
hashalg: Incomplete
original_serializer: Incomplete
def __init__(
self,
secret,
salt,
cookie_name,
secure: bool = False,
max_age: Incomplete | None = None,
httponly: bool = False,
samesite: Incomplete | None = None,
path: str = "/",
domains: Incomplete | None = None,
hashalg: str = "sha512",
serializer: Incomplete | None = None,
) -> None: ...
def bind(self, request): ... |
7,550 | current model | # Copyright 2023 Canonical Ltd.
# Licensed under the Apache V2, see LICENCE file for details.
import inspect
import subprocess
import uuid
from contextlib import contextmanager
from pathlib import Path
import pytest
from juju.client.jujudata import FileJujuData
from juju.controller import Controller
from juju.jasyncio import SingletonEventLoop
@pytest.fixture(scope="session")
def event_loop():
"""
This fixture forces all the asyncio tests
to use the same events loop
"""
loop = SingletonEventLoop().loop
yield loop
loop.close()
def is_bootstrapped():
try:
result = subprocess.run(['juju', 'switch'], stdout=subprocess.PIPE)
return (
result.returncode == 0 and
len(result.stdout.decode().strip()) > 0)
except FileNotFoundError:
return False
bootstrapped = pytest.mark.skipif(
not is_bootstrapped(),
reason='bootstrapped Juju environment required')
test_run_nonce = uuid.uuid4().hex[-4:]
class CleanController():
"""
Context manager that automatically connects and disconnects from
the currently active controller.
Note: Unlike CleanModel, this will not create a new controller for you,
and an active controller must already be available.
"""
def __init__(self):
self._controller = None
async def __aenter__(self):
self._controller = Controller()
await self._controller.connect()
return self._controller
async def __aexit__(self, exc_type, exc, tb):
await self._controller.disconnect()
class CleanModel():
"""
Context manager that automatically connects to the currently active
controller, adds a fresh model, returns the connection to that model,
and automatically disconnects and cleans up the model.
The new model is also set as the current default for the controller
connection.
"""
def __init__(self, bakery_client=None):
self._controller = None
self._model = None
self._model_uuid = None
self._bakery_client = bakery_client
async def __aenter__(self):
model_nonce = uuid.uuid4().hex[-4:]
frame = inspect.stack()[1]
test_name = frame.function.replace('_', '-')
jujudata = TestJujuData()
self._controller = Controller(
jujudata=jujudata,
bakery_client=self._bakery_client,
)
controller_name = jujudata.current_controller()
user_name = jujudata.accounts()[controller_name]['user']
await self._controller.connect(controller_name)
model_name = 'test-{}-{}-{}'.format(
test_run_nonce,
test_name,
model_nonce,
)
self._model = await self._controller.add_model(model_name)
# Change the JujuData instance so that it will return the new
# model as the current model name, so that we'll connect
# to it by default.
jujudata.set_model(
controller_name,
user_name + "/" + model_name,
self._model.info.uuid,
)
# save the model UUID in case test closes model
self._model_uuid = self._model.info.uuid
return self._model
async def __aexit__(self, exc_type, exc, tb):
await self._model.disconnect()
# do not wait more than a minute for the model to be destroyed
await self._controller.destroy_model(self._model_uuid, force=True, max_wait=60)
await self._controller.disconnect()
class TestJujuData(FileJujuData):
def __init__(self):
self.__controller_name = None
self.__model_name = None
self.__model_uuid = None
super().__init__()
def set_model(self, controller_name, model_name, model_uuid):
self.__controller_name = controller_name
self.__model_name = model_name
self.__model_uuid = model_uuid
def METHOD_NAME(self, *args, **kwargs):
return self.__model_name or super().METHOD_NAME(*args, **kwargs)
def models(self):
all_models = super().models()
if self.__model_name is None:
return all_models
all_models.setdefault(self.__controller_name, {})
all_models[self.__controller_name].setdefault('models', {})
cmodels = all_models[self.__controller_name]['models']
cmodels[self.__model_name] = {'uuid': self.__model_uuid}
return all_models
@contextmanager
def patch_file(filename):
"""
"Patch" a file so that its current contents are automatically restored
when the context is exited.
"""
filepath = Path(filename).expanduser()
data = filepath.read_bytes()
try:
yield
finally:
filepath.write_bytes(data) |
7,551 | test environment mapping | import numpy as np
import pyproj
from . import *
from datetime import datetime, timedelta
from opendrift.readers import reader_netCDF_CF_generic
from opendrift.readers import reader_constant
from opendrift.models.physics_methods import wind_drift_factor_from_trajectory
from opendrift.models.oceandrift import OceanDrift
from opendrift.readers.basereader.variables import ReaderDomain
def test_get_variables_along_trajectory_and_wind_drift_factor_from_trajectory():
o = OceanDrift(loglevel=50)
o.add_readers_from_list([o.test_data_folder() +
'16Nov2015_NorKyst_z_surface/norkyst800_subset_16Nov2015.nc',
o.test_data_folder() +
'16Nov2015_NorKyst_z_surface/arome_subset_16Nov2015.nc'], lazy=False)
t = o.get_variables_along_trajectory(variables=['x_sea_water_velocity', 'y_sea_water_velocity', 'x_wind', 'y_wind'],
lons=np.array([3.5, 4, 4.5]), lats=np.array([59.7, 60, 60.3]),
times=[o.readers[list(o.readers)[0]].start_time+i*timedelta(hours=3) for i in range(3)])
np.testing.assert_array_almost_equal(t['x_sea_water_velocity'], [-0.078685, -0.106489, -0.058386])
np.testing.assert_array_almost_equal(t['x_wind'], [-8.308249, -13.063459, -11.09289])
wdf, azimuth = wind_drift_factor_from_trajectory(t)
np.testing.assert_array_almost_equal(wdf, [0.27189012, 0.20492421])
np.testing.assert_array_almost_equal(azimuth, [73.0112213, 82.39749185])
def test_modulate_longitude_360():
class R360(ReaderDomain):
xmin = 0
xmax = 340
ymin = -80
ymax = 80
def __init__(self):
self.proj4 = '+proj=lonlat +ellps=WGS84'
self.crs = pyproj.CRS(self.proj4)
self.proj = pyproj.Proj(self.proj4)
super().__init__()
r = R360()
lons = np.linspace(0, 300, 100)
assert (r.modulate_longitude(lons) == lons).all()
lons = np.array([-180, -90])
assert (r.modulate_longitude(lons) == np.array([360-180, 360-90])).all()
lons = np.array([0, 90])
assert (r.modulate_longitude(lons) == np.array([0, 90])).all()
lons = np.array([100, 180])
assert (r.modulate_longitude(lons) == np.array([100, 180])).all()
lons = np.array([240, 350])
assert (r.modulate_longitude(lons) == np.array([240, 350])).all()
def test_modulate_longitude_180():
class R180(ReaderDomain):
xmin = -150
xmax = 180
ymin = -80
ymax = 80
def __init__(self):
self.proj4 = '+proj=lonlat +ellps=WGS84'
self.crs = pyproj.CRS(self.proj4)
self.proj = pyproj.Proj(self.proj4)
super().__init__()
r = R180()
lons = np.linspace(-180, 150, 100)
assert (r.modulate_longitude(lons) == lons).all()
lons = np.array([-180, -90])
assert (r.modulate_longitude(lons) == np.array([-180, -90])).all()
lons = np.array([0, 90])
assert (r.modulate_longitude(lons) == np.array([0, 90])).all()
lons = np.array([100, 180])
assert (r.modulate_longitude(lons) == np.array([100, -180])).all()
lons = np.array([240])
assert (r.modulate_longitude(lons) == np.array([-120])).all()
def test_covers_positions(test_data):
reader_arome = reader_netCDF_CF_generic.Reader(
test_data +
'2Feb2016_Nordic_sigma_3d/AROME_MetCoOp_00_DEF_20160202_subset.nc')
ts = reader_arome.get_timeseries_at_position(
lon=12, lat=68, variables=['x_wind', 'y_wind'])
assert len(ts['time']) == 49
x_wind = ts['x_wind']
assert len(x_wind) == 49
np.testing.assert_almost_equal(x_wind[0], 2.836, 2)
np.testing.assert_almost_equal(x_wind[-1], -0.667, 2)
def METHOD_NAME(test_data):
# Wind from NE
r = reader_constant.Reader({'wind_speed':5, 'wind_to_direction': 225,
'land_binary_mask': 0})
o = OceanDrift(loglevel=50)
o.set_config('general:use_auto_landmask', False)
o.add_reader(r)
o.seed_elements(lon=4, lat=60, time=datetime.now())
o.run(steps=15)
np.testing.assert_almost_equal(o.elements.lon, 3.932, 3)
np.testing.assert_almost_equal(o.elements.lat, 59.966, 3)
# Wind from SW
r = reader_constant.Reader({'wind_speed':5, 'wind_to_direction': 45,
'land_binary_mask': 0})
o = OceanDrift(loglevel=50)
o.set_config('general:use_auto_landmask', False)
o.add_reader(r)
o.seed_elements(lon=4, lat=60, time=datetime.now())
o.run(steps=15)
np.testing.assert_almost_equal(o.elements.lon, 4.068, 3)
np.testing.assert_almost_equal(o.elements.lat, 60.034, 3)
# land_binary_mask mapped from sea_floor_depth_below_sea_level
r = reader_netCDF_CF_generic.Reader(o.test_data_folder() +
'14Jan2016_NorKyst_z_3d/NorKyst-800m_ZDEPTHS_his_00_3Dsubset.nc')
assert 'land_binary_mask' not in r.derived_variables # Disabled by default
r.activate_environment_mapping('land_binary_mask_from_ocean_depth')
assert 'land_binary_mask' in r.derived_variables
|
7,552 | source subfolder | from conans import ConanFile, CMake, tools
from conans.errors import ConanInvalidConfiguration
import os
required_conan_version = ">=1.33.0"
class QCoroConan(ConanFile):
name = "qcoro"
license = "MIT"
homepage = "https://github.com/danvratil/qcoro"
url = "https://github.com/conan-io/conan-center-index"
description = "C++ Coroutines for Qt."
topics = ("coroutines", "qt")
settings = "os", "compiler", "build_type", "arch"
options = {
"shared": [True, False],
"fPIC": [True, False],
"asan": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
"asan": False,
}
generators = "cmake", "cmake_find_package_multi"
exports_sources = ["CMakeLists.txt"]
_cmake = None
@property
def METHOD_NAME(self):
return "source_subfolder"
@property
def _build_subfolder(self):
return "build_subfolder"
@property
def _compilers_minimum_version(self):
minimum_versions = {
"gcc": "10",
"Visual Studio": "17",
"msvc": "19.29",
"clang": "8",
"apple-clang": "13"
}
return minimum_versions
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared:
del self.options.fPIC
def build_requirements(self):
self.build_requires("cmake/3.23.2")
def requirements(self):
self.requires("qt/6.3.1")
def validate(self):
if self.settings.compiler.cppstd:
tools.check_min_cppstd(self, 20)
def lazy_lt_semver(v1, v2):
lv1 = [int(v) for v in v1.split(".")]
lv2 = [int(v) for v in v2.split(".")]
min_length = min(len(lv1), len(lv2))
return lv1[:min_length] < lv2[:min_length]
#Special check for clang that can only be linked to libc++
if self.settings.compiler == "clang" and self.settings.compiler.libcxx != "libc++":
raise ConanInvalidConfiguration("imagl requires some C++20 features, which are available in libc++ for clang compiler.")
compiler_version = str(self.settings.compiler.version)
minimum_version = self._compilers_minimum_version.get(str(self.settings.compiler), False)
if not minimum_version:
self.output.warn("qcoro requires C++20. Your compiler is unknown. Assuming it supports C++20.")
elif lazy_lt_semver(compiler_version, minimum_version):
raise ConanInvalidConfiguration("qcoro requires some C++20 features, which your {} {} compiler does not support.".format(str(self.settings.compiler), compiler_version))
else:
print("Your compiler is {} {} and is compatible.".format(str(self.settings.compiler), compiler_version))
def source(self):
tools.get(**self.conan_data["sources"][self.version],
destination=self.METHOD_NAME, strip_root=True)
def _configure_cmake(self):
if self._cmake:
return self._cmake
self._cmake = CMake(self)
self._cmake.definitions["QCORO_BUILD_EXAMPLES"] = False
self._cmake.definitions["QCORO_ENABLE_ASAN"] = self.options.asan
self._cmake.definitions["BUILD_TESTING"] = False
self._cmake.definitions["QCORO_WITH_QTDBUS"] = self.options["qt"].with_dbus
self._cmake.configure(build_folder=self._build_subfolder)
return self._cmake
def build(self):
for patch in self.conan_data.get("patches", {}).get(self.version, []):
tools.patch(**patch)
cmake = self._configure_cmake()
cmake.build()
def package(self):
self.copy("*", dst="licenses", src=os.path.join(self.METHOD_NAME, "LICENSES"))
cmake = self._configure_cmake()
cmake.install()
for mask in ["Find*.cmake", "*Config*.cmake", "*-config.cmake", "*Targets*.cmake"]:
tools.remove_files_by_mask(self.package_folder, mask)
def package_info(self):
self.cpp_info.filenames["cmake_find_package"] = "QCoro6"
self.cpp_info.filenames["cmake_find_package_multi"] = "QCoro6"
self.cpp_info.set_property("cmake_file_name", "QCoro6")
self.cpp_info.names["cmake_find_package"] = "QCoro"
self.cpp_info.names["cmake_find_package_multi"] = "QCoro"
self.cpp_info.components["qcoro-core"].set_property("cmake_target_name", "QCoro::Core")
self.cpp_info.components["qcoro-core"].names["cmake_find_package"] = "Core"
self.cpp_info.components["qcoro-core"].names["cmake_find_package_multi"] = "Core"
self.cpp_info.components["qcoro-core"].libs = ["QCoro6Core"]
self.cpp_info.components["qcoro-core"].includedirs.append(os.path.join("include", "qcoro6", "qcoro"))
self.cpp_info.components["qcoro-core"].requires = ["qt::qtCore"]
self.cpp_info.components["qcoro-core"].build_modules["cmake_find_package"].append(os.path.join("lib", "cmake", "QCoro6Coro", "QCoroMacros.cmake"))
self.cpp_info.components["qcoro-core"].build_modules["cmake_find_package_multi"].append(os.path.join("lib", "cmake", "QCoro6Coro", "QCoroMacros.cmake"))
self.cpp_info.components["qcoro-core"].builddirs.append(os.path.join("lib", "cmake", "QCoro6Coro"))
self.cpp_info.components["qcoro-network"].set_property("cmake_target_name", "QCoro::Network")
self.cpp_info.components["qcoro-network"].names["cmake_find_package"] = "Network"
self.cpp_info.components["qcoro-network"].names["cmake_find_package_multi"] = "Network"
self.cpp_info.components["qcoro-network"].libs = ["QCoro6Network"]
self.cpp_info.components["qcoro-network"].requires = ["qt::qtNetwork"]
if self.options["qt"].with_dbus:
self.cpp_info.components["qcoro-dbus"].set_property("cmake_target_name", "QCoro::DBus")
self.cpp_info.components["qcoro-dbus"].names["cmake_find_package"] = "DBus"
self.cpp_info.components["qcoro-dbus"].names["cmake_find_package_multi"] = "DBus"
self.cpp_info.components["qcoro-dbus"].libs = ["QCoroDBus"]
self.cpp_info.components["qcoro-core"].requires = ["qt::qtDBus"] |
7,553 | builtin filters | """
FormES
--------
"""
from copy import copy
from datetime import datetime
from jsonobject.exceptions import BadValueError
from casexml.apps.case.exceptions import PhoneDateValueError
from casexml.apps.case.xml.parser import (
CaseGenerationException,
case_update_from_block,
)
from couchforms.geopoint import GeoPoint
from dimagi.utils.parsing import json_format_datetime
from corehq.apps.es.mappings.const import NULL_VALUE
from . import filters
from .client import ElasticDocumentAdapter, create_document_adapter
from .const import (
HQ_FORMS_INDEX_CANONICAL_NAME,
HQ_FORMS_INDEX_NAME,
HQ_FORMS_SECONDARY_INDEX_NAME,
)
from .es_query import HQESQuery
from .index.settings import IndexSettingsKey
class FormES(HQESQuery):
index = HQ_FORMS_INDEX_CANONICAL_NAME
default_filters = {
'is_xform_instance': filters.term("doc_type", "xforminstance"),
'has_xmlns': filters.exists("xmlns"),
'has_user': filters.exists("form.meta.userID"),
'has_domain': filters.exists("domain"),
}
@property
def METHOD_NAME(self):
return [
form_ids,
xmlns,
app,
submitted,
completed,
user_id,
user_type,
user_ids_handle_unknown,
updating_cases,
] + super(FormES, self).METHOD_NAME
def user_aggregation(self):
return self.terms_aggregation('form.meta.userID', 'user')
def domain_aggregation(self):
return self.terms_aggregation('domain.exact', 'domain')
def only_archived(self):
"""Include only archived forms, which are normally excluded"""
return (self.remove_default_filter('is_xform_instance')
.filter(filters.doc_type('xformarchived')))
class ElasticForm(ElasticDocumentAdapter):
settings_key = IndexSettingsKey.FORMS
canonical_name = HQ_FORMS_INDEX_CANONICAL_NAME
@property
def mapping(self):
from .mappings.xform_mapping import XFORM_MAPPING
return XFORM_MAPPING
@property
def model_cls(self):
from corehq.form_processor.models.forms import XFormInstance
return XFormInstance
def _from_dict(cls, xform_dict):
"""
Takes in a xform dict and applies required transformation to make it suitable for ES.
:param xform: an instance of ``dict`` which is ``XFormInstance.to_json()``
"""
from casexml.apps.case.xform import extract_case_blocks
from corehq.apps.receiverwrapper.util import get_app_version_info
from corehq.pillows.utils import format_form_meta_for_es, get_user_type
from corehq.pillows.xform import is_valid_date
# create shallow copy of form object and case objects
# that will be modified in tranformation
form = xform_dict['form'] = copy(xform_dict['form'])
if 'case' in form:
if isinstance(form['case'], dict):
form['case'] = copy(form['case'])
elif isinstance(form['case'], list):
form['case'] = [copy(case) for case in form['case']]
user_id = None
if 'meta' in form:
form_meta = form['meta'] = copy(form['meta'])
if not is_valid_date(form_meta.get('timeEnd', None)):
form_meta['timeEnd'] = None
if not is_valid_date(form_meta.get('timeStart', None)):
form_meta['timeStart'] = None
# Some docs have their @xmlns and #text here
if isinstance(form_meta.get('appVersion'), dict):
form_meta = format_form_meta_for_es(form_meta)
app_version_info = get_app_version_info(
xform_dict['domain'],
xform_dict.get('build_id'),
xform_dict.get('version'),
form_meta,
)
form_meta['commcare_version'] = app_version_info.commcare_version
form_meta['app_build_version'] = app_version_info.build_version
user_id = form_meta.get('userID', None)
try:
geo_point = GeoPoint.from_string(xform_dict['form']['meta']['location'])
form_meta['geo_point'] = geo_point.lat_lon
except (KeyError, BadValueError):
form_meta['geo_point'] = None
pass
xform_dict['user_type'] = get_user_type(user_id)
xform_dict['inserted_at'] = json_format_datetime(datetime.utcnow())
try:
case_blocks = extract_case_blocks(xform_dict)
except PhoneDateValueError:
pass
else:
for case_dict in case_blocks:
for date_modified_key in ['date_modified', '@date_modified']:
if not is_valid_date(case_dict.get(date_modified_key, None)):
if case_dict.get(date_modified_key) == '':
case_dict[date_modified_key] = None
else:
case_dict.pop(date_modified_key, None)
# convert all mapped dict properties to nulls if they are empty strings
for object_key in ['index', 'attachment', 'create', 'update']:
if object_key in case_dict and not isinstance(case_dict[object_key], dict):
case_dict[object_key] = None
try:
xform_dict["__retrieved_case_ids"] = list(set(case_update_from_block(cb).id for cb in case_blocks))
except CaseGenerationException:
xform_dict["__retrieved_case_ids"] = []
if 'backend_id' not in xform_dict:
xform_dict['backend_id'] = 'sql'
return super()._from_dict(xform_dict)
form_adapter = create_document_adapter(
ElasticForm,
HQ_FORMS_INDEX_NAME,
"xform",
secondary=HQ_FORMS_SECONDARY_INDEX_NAME,
)
def form_ids(form_ids):
return filters.term('_id', form_ids)
def xmlns(xmlnss):
return filters.term('xmlns.exact', xmlnss)
def app(app_ids):
return filters.term('app_id', app_ids)
def submitted(gt=None, gte=None, lt=None, lte=None):
return filters.date_range('received_on', gt, gte, lt, lte)
def completed(gt=None, gte=None, lt=None, lte=None):
return filters.date_range('form.meta.timeEnd', gt, gte, lt, lte)
def user_id(user_ids):
if not isinstance(user_ids, (list, set, tuple)):
user_ids = [user_ids]
return filters.term(
'form.meta.userID',
[x if x is not None else NULL_VALUE for x in user_ids]
)
def user_type(user_types):
return filters.term("user_type", user_types)
def user_ids_handle_unknown(user_ids):
missing_users = None in user_ids
user_ids = [_f for _f in user_ids if _f]
if not missing_users:
user_filter = user_id(user_ids)
elif user_ids and missing_users:
user_filter = filters.OR(
user_id(user_ids),
filters.missing('form.meta.userID'),
)
else:
user_filter = filters.missing('form.meta.userID')
return user_filter
def updating_cases(case_ids):
"""return only those forms that have case blocks that touch the cases listed in `case_ids`
"""
return filters.term("__retrieved_case_ids", case_ids) |
7,554 | setup default | import re
from avocado.utils import cpu as cpu_utils
from virttest import cpu
from virttest import utils_misc
from virttest import virsh
from virttest.libvirt_xml import vm_xml
from provider.numa import numa_base
def METHOD_NAME(test_obj):
"""
Default setup function for the test
:param test_obj: NumaTest object
"""
online_cpus = int(cpu_utils.online_count())
if online_cpus < 16:
test_obj.test.cancel("The test needs 16 online "
"host cpus at least, but "
"only %d exist" % online_cpus)
test_obj.setup()
test_obj.test.log.debug("Step: setup is done")
def prepare_vm_xml(test_obj):
"""
Customize the vm xml
:param test_obj: NumaTest object
:return: VMXML object updated
"""
numa_cells = eval(test_obj.params.get('numa_cells', '[]'))
vcpu_num = int(test_obj.params.get('vcpu'))
cpu_topology = eval(test_obj.params.get('cpu_topology', '{}'))
vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(test_obj.vm.name)
if numa_cells:
if vmxml.xmltreefile.find('cpu'):
cpuxml = vmxml.cpu
if cpuxml.xmltreefile.find('topology'):
del cpuxml.topology
else:
cpuxml = vm_xml.VMCPUXML()
cpuxml.topology = cpu_topology
cpuxml.numa_cell = numa_cells
vmxml.cpu = cpuxml
vmxml.vcpu = vcpu_num
test_obj.test.log.debug("Step: vm xml before defining:\n%s", vmxml)
return vmxml
def verify_vm_cpu_info(vm_session, test_obj, cpu_info_type, cmd):
"""
Verify vm's cpu information
:param vm_session: vm session
:param test_obj: NumaTest object
:param cpu_info_type: str, specified cpu info type, like 'cores', 'threads'
:param cmd: str, the command used to check the cpu info
"""
status, output = vm_session.cmd_status_output(cmd, timeout=300)
if status:
test_obj.test.error("Can't get cpu info with command '%s'" % cmd)
cpu_info = eval(test_obj.params.get('cpu_topology'))[cpu_info_type]
if cpu_info_type == 'cores':
dies = eval(test_obj.params.get('cpu_topology'))['dies']
cpu_info = str(int(cpu_info) * int(dies))
if cpu_info != output.strip():
test_obj.test.fail('Guest cpu %s is expected '
'to be %s, but found %s' % (cpu_info_type,
cpu_info,
output.strip()))
else:
test_obj.test.log.debug("Step: check vm cpu %s "
"to be '%s': PASS", cpu_info_type, cpu_info)
def verify_vm_numa_node(vm_session, test_obj):
"""
Verify vm's numa node information
:param vm_session: vm session
:param test_obj: NumaTest object
"""
numa_cell_0 = eval(test_obj.params.get('numa_cell_0'))
numa_cell_1 = eval(test_obj.params.get('numa_cell_1'))
vm_numainfo = utils_misc.NumaInfo(session=vm_session)
vm_cpus = vm_numainfo.get_all_node_cpus()
vm_mems = vm_numainfo.get_all_node_meminfo()
numa_cell_list = [numa_cell_0, numa_cell_1]
for node_num in [0, 1]:
expected_cpu_list = cpu.cpus_parser(numa_cell_list[node_num]['cpus'])
expected_cpu_list = ' '.join(['%d' % i for i in expected_cpu_list])
if vm_cpus[node_num].strip() != expected_cpu_list:
test_obj.test.fail("Guest node %d cpus are "
"expected to be '%s', but found "
"'%s'" % (node_num,
expected_cpu_list,
vm_cpus[node_num]).strip())
else:
test_obj.test.log.debug("Step: check guest node "
"%d cpus to be '%s': "
"PASS",
node_num,
numa_cell_list[node_num]['cpus'])
if int(vm_mems[node_num]['MemTotal']) > int(numa_cell_list[node_num]['memory']):
test_obj.test.fail("Guest node %d memory is "
"expected to be less than '%s', but found "
"'%s'" % (node_num,
numa_cell_list[node_num]['memory'],
vm_mems[node_num]['MemTotal']))
else:
test_obj.test.log.debug("Step: check guest node "
"%d memory to be '%s': "
"PASS",
node_num,
vm_mems[node_num]['MemTotal'])
def verify_dmesg_vm_numa_mem(vm_session, test_obj):
"""
Verify dmesg for vm's numa memory
:param vm_session: vm session
:param test_obj: NumaTest object
"""
cmd = test_obj.params.get('check_dmesg_cmd')
pattern_dmesg_numa_node0 = test_obj.params.get('pattern_dmesg_numa_node0')
pattern_dmesg_numa_node1 = test_obj.params.get('pattern_dmesg_numa_node1')
status, output = vm_session.cmd_status_output(cmd, timeout=300)
test_obj.test.log.debug("Output with command '%s': \n%s", cmd, output)
if status:
test_obj.test.error("Can't get results with command '%s'" % cmd)
pats = [pattern_dmesg_numa_node0, pattern_dmesg_numa_node1]
for search_pattern in pats:
if not re.search(search_pattern, output):
test_obj.test.fail("The vm numa node memory in dmesg "
"is expected to match '%s', "
"but not found" % search_pattern)
else:
test_obj.test.log.debug("Step: Check vm numa "
"node memory in dmesg "
"with pattern '%s': "
"PASS", search_pattern)
def run_default(test_obj):
"""
Default run function for the test
:param test_obj: NumaTest object
"""
test_obj.test.log.debug("Step: prepare vm xml")
vmxml = prepare_vm_xml(test_obj)
test_obj.test.log.debug("Step: define vm")
virsh.define(vmxml.xml, **test_obj.virsh_dargs)
if not test_obj.vm.is_alive():
test_obj.test.log.debug("Step: start vm")
test_obj.vm.start()
test_obj.test.log.debug("After vm is started, vm xml:\n"
"%s", vm_xml.VMXML.new_from_dumpxml(test_obj.vm.name))
vm_session = test_obj.vm.wait_for_login()
cmds = {'sockets': 'cat /proc/cpuinfo|grep "physical id"|sort -u|wc -l',
'cores': 'cat /proc/cpuinfo|grep "core id"|sort|uniq|wc -l',
'dies': 'cat /sys/devices/system/cpu/cpu*/topology/die_id|sort|uniq|wc -l',
'threads': 'lscpu|grep Thread|cut -d":" -f2'}
for cpu_info_type, cpu_cmd in cmds.items():
verify_vm_cpu_info(vm_session, test_obj, cpu_info_type, cpu_cmd)
verify_vm_numa_node(vm_session, test_obj)
verify_dmesg_vm_numa_mem(vm_session, test_obj)
def teardown_default(test_obj):
"""
Default teardown function for the test
:param test_obj: NumaTest object
"""
test_obj.teardown()
test_obj.test.log.debug("Step: teardown is done")
def run(test, params, env):
"""
Test numa topology together with cpu topology
"""
vm_name = params.get("main_vm")
vm = env.get_vm(vm_name)
numatest_obj = numa_base.NumaTest(vm, params, test)
try:
METHOD_NAME(numatest_obj)
run_default(numatest_obj)
finally:
teardown_default(numatest_obj) |
7,555 | start | #!/usr/bin/env python3
import stat
import rospy
import os
from subprocess import Popen, PIPE
# If DRI_NAME is not set by user, use card0
DRI_PATH = os.path.join("/dev/dri", os.environ.get("DRI_NAME", "card0"))
EXERCISE = "rescue_people_newmanager"
TIMEOUT = 30
MAX_ATTEMPT = 2
# Check if acceleration can be enabled
def check_device(device_path):
try:
return stat.S_ISCHR(os.lstat(device_path)[stat.ST_MODE])
except:
return False
# Spawn new process
def spawn_process(args, insert_vglrun=False):
if insert_vglrun:
args.insert(0, "vglrun")
process = Popen(args, stdout=PIPE, bufsize=1, universal_newlines=True)
return process
class Test():
def gazebo(self):
rospy.logwarn("[GAZEBO] Launching")
try:
rospy.wait_for_service("/gazebo/get_model_properties", TIMEOUT)
return True
except rospy.ROSException:
return False
def px4(self):
rospy.logwarn("[PX4-SITL] Launching")
start_time = rospy.get_time()
args = ["./PX4-Autopilot/build/px4_sitl_default/bin/px4-commander",
"--instance", "0", "check"]
while rospy.get_time() - start_time < TIMEOUT:
process = spawn_process(args, insert_vglrun=False)
with process.stdout:
for line in iter(process.stdout.readline, ''):
if ("Prearm check: OK" in line):
return True
rospy.sleep(2)
return False
def mavros(self, ns=""):
rospy.logwarn("[MAVROS] Launching")
try:
rospy.wait_for_service(ns + "/mavros/cmd/arming", TIMEOUT)
return True
except rospy.ROSException:
return False
class Launch():
def __init__(self):
self.test = Test()
self.acceleration_enabled = check_device(DRI_PATH)
# Start roscore
args = ["/opt/ros/noetic/bin/roscore"]
spawn_process(args, insert_vglrun=False)
rospy.init_node("launch", anonymous=True)
def METHOD_NAME(self):
######## LAUNCH GAZEBO ########
args = ["/opt/ros/noetic/bin/roslaunch",
"/RoboticsAcademy/exercises/static/exercises/" +
EXERCISE + "/launch/ros1_noetic/gazebo.launch",
"--wait",
"--log"
]
attempt = 1
while True:
spawn_process(args, insert_vglrun=self.acceleration_enabled)
if self.test.gazebo() == True:
break
if attempt == MAX_ATTEMPT:
rospy.logerr("[GAZEBO] Launch Failed")
return
attempt = attempt + 1
######## LAUNCH PX4 ########
args = ["/opt/ros/noetic/bin/roslaunch",
"/RoboticsAcademy/exercises/static/exercises/" +
EXERCISE + "/launch/ros1_noetic/px4.launch",
"--log"
]
attempt = 1
while True:
spawn_process(args, insert_vglrun=self.acceleration_enabled)
if self.test.px4() == True:
break
if attempt == MAX_ATTEMPT:
rospy.logerr("[PX4] Launch Failed")
return
attempt = attempt + 1
######## LAUNCH MAVROS ########
args = ["/opt/ros/noetic/bin/roslaunch",
"/RoboticsAcademy/exercises/static/exercises/" +
EXERCISE + "/launch/ros1_noetic/mavros.launch",
"--log"
]
attempt = 1
while True:
spawn_process(args, insert_vglrun=self.acceleration_enabled)
if self.test.mavros() == True:
break
if attempt == MAX_ATTEMPT:
rospy.logerr("[MAVROS] Launch Failed")
return
attempt = attempt + 1
if __name__ == "__main__":
launch = Launch()
launch.METHOD_NAME()
with open("/drones_launch.log", "w") as f:
f.write("success") |
7,556 | method | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"vmss rolling-upgrade start",
)
class Start(AAZCommand):
"""Start a rolling upgrade to move all virtual machine scale set instances to the latest available Platform Image OS version. Instances which are already running the latest available OS version are not affected.
"""
_aaz_info = {
"version": "2022-11-01",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.compute/virtualmachinescalesets/{}/osrollingupgrade", "2022-11-01"],
]
}
AZ_SUPPORT_NO_WAIT = True
def _handler(self, command_args):
super()._handler(command_args)
return self.build_lro_poller(self._execute_operations, None)
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.resource_group = AAZResourceGroupNameArg(
help="Name of resource group. You can configure the default group using `az configure --defaults group=<name>`.",
required=True,
)
_args_schema.virtual_machine_scale_set_name = AAZStrArg(
options=["-n", "--name", "--vm-scale-set-name", "--virtual-machine-scale-set-name"],
help="Scale set name. You can configure the default using `az configure --defaults vmss=<name>`.",
required=True,
id_part="name",
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
yield self.VirtualMachineScaleSetRollingUpgradesStartOSUpgrade(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
class VirtualMachineScaleSetRollingUpgradesStartOSUpgrade(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [202]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "azure-async-operation"},
path_format_arguments=self.url_parameters,
)
if session.http_response.status_code in [200]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "azure-async-operation"},
path_format_arguments=self.url_parameters,
)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/osRollingUpgrade",
**self.url_parameters
)
@property
def METHOD_NAME(self):
return "POST"
@property
def error_format(self):
return "ODataV4Format"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
**self.serialize_url_param(
"vmScaleSetName", self.ctx.args.virtual_machine_scale_set_name,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2022-11-01",
required=True,
),
}
return parameters
def on_200(self, session):
pass
class _StartHelper:
"""Helper class for Start"""
__all__ = ["Start"] |
7,557 | fsym | import numpy as np
from molSimplify.Classes.mol3D import mol3D
from molSimplify.Scripts.geometry import distance
# xyzf = '/Users/tzuhsiungyang/Downloads/cuacetate1k2acetate1o-pyridylphenyl1_0_1_RIJCOSX-B3LYP-D3_BS_TS-ar-carboxylation.numfreq.xyz'
def fpriority(xyzf):
# setting properties
xyz = mol3D()
xyz.readfromxyz(xyzf)
# setting up variables
fpriority_list = []
fidx_list = []
sidx_list = []
satno_list = []
ref_list = []
exit_signal = True
# getting bond-order matrix
xyz.convert2OBMol()
BOMatrix = xyz.populateBOMatrix()
# preping for the loop
fidx_list.append(xyz.findMetal())
for i in range(len(fidx_list)):
for fidx in fidx_list[i]:
for sidx in xyz.getBondedAtoms(fidx):
sidx_list.append([sidx])
for i in range(len(fidx_list)):
for fidx in fidx_list[i]:
for j in range(len(sidx_list)):
for sidx in sidx_list[j]:
BO = int(BOMatrix[fidx][sidx])
if BO == 0:
BO = 1
satno_str = str(xyz.getAtom(sidx).atno)
satno_list.append(int(BO * satno_str))
for satno in sorted(set(satno_list)):
satnocount = satno_list.count(satno)
if satnocount > 1:
s_sel_list = [i for i, atno in enumerate(
satno_list) if atno is satno]
exit_signal = False
for i in range(len(fidx_list)):
for fidx in fidx_list[i]:
ref_list.append(fidx)
# starting the loop
tidx_list = []
tatno_list = []
for i in range(len(sidx_list)):
tidx_list.append([])
tatno_list.append([])
while not exit_signal:
for i in s_sel_list:
t_list = []
for sidx in sidx_list[i]:
for tidx in xyz.getBondedAtoms(sidx):
if tidx not in ref_list:
t_list.append(tidx)
tidx_list[i] = t_list
# print(sidx_list)
# print(tidx_list)
for i in s_sel_list:
for sidx in sidx_list[i]:
atno_list = tatno_list[i]
ls = []
for j in s_sel_list:
for tidx in tidx_list[j]:
BO = int(BOMatrix[sidx][tidx])
tatno_str = str(xyz.getAtom(tidx).atno)
ls.append(BO * tatno_str)
sorted(ls, reverse=True)
for j in ls:
atno_list.append(j)
a = ''.join(atno_list)
tatno_list[i] = [a]
sidx_list = []
for i in range(len(tidx_list)):
sidx_list.append(tidx_list[i])
for i in s_sel_list:
for sidx in sidx_list[i]:
ref_list.append(sidx)
test_list = []
for i in range(len(sidx_list)):
test_list.append([])
if tidx_list == test_list:
exit_signal = True
for i in range(len(satno_list)):
atno_list = []
atno_list.append(str(satno_list[i]))
if tatno_list[i] == []:
atno_list.append('')
else:
atno_list.append(tatno_list[i][0])
a = '.'.join(atno_list)
fpriority_list.append(float(a))
return fpriority_list
def METHOD_NAME(xyzf):
# setting properties
xyz = mol3D()
xyz.readfromxyz(xyzf)
# getting idxs of interest
midx = xyz.findMetal()[0] # monometallic complexes
# list of idx of the first-coord sphere
fidx_list = xyz.getBondedAtoms(midx)
fsym_list = []
for idx in fidx_list:
sym = xyz.getAtom(idx).sym
fsym_list.append(sym)
return fsym_list
def fvalency(xyzf):
# setting properties
xyz = mol3D()
xyz.readfromxyz(xyzf)
# getting idxs of interest
midx = xyz.findMetal()[0] # monometallic complexes
# list of idx of the first-coord sphere
fidx_list = xyz.getBondedAtoms(midx)
fvalency_list = []
for idx in fidx_list:
valency = len(xyz.getBondedAtoms(idx)) - 1
fvalency_list.append(valency)
return fvalency_list
def fcharge(xyzf, charge):
# setting properties
xyz = mol3D()
xyz.readfromxyz(xyzf)
xyz.calccharges(charge)
# getting idxs of interest
midx = xyz.findMetal()[0] # monometallic complexes
# list of idx of the first-coord sphere
fidx_list = xyz.getBondedAtoms(midx)
fcharge_list = []
for idx in fidx_list:
charge = xyz.partialcharges[idx]
fcharge_list.append(float(charge))
return fcharge_list
def scharge_ave(xyzf, charge):
# setting properties
xyz = mol3D()
xyz.readfromxyz(xyzf)
xyz.calccharges(charge)
# getting idxs of interest
midx = xyz.findMetal()[0] # monometallic complexes
# list of idx of the first-coord sphere
fidx_list = xyz.getBondedAtoms(midx)
sidx_list = [xyz.getBondedAtoms(fidx) for fidx in fidx_list]
scharge_ave_list = []
for i in range(len(sidx_list)):
charge = 0
for j in range(len(sidx_list[i])):
idx = sidx_list[i][j]
if idx is not midx:
charge = + xyz.partialcharges[idx]
charge_ave = charge/len(sidx_list[i])
scharge_ave_list.append(float(charge_ave))
return scharge_ave_list
def fdistance(xyzf):
# setting properties
xyz = mol3D()
xyz.readfromxyz(xyzf)
# getting idxs of interest
midx = xyz.findMetal()[0] # monometallic complexes
mcoord = xyz.getAtom(midx).coords()
# list of idx of the first-coord sphere
fidx_list = xyz.getBondedAtoms(midx)
fdistance_list = []
for idx in fidx_list:
fcoord = xyz.getAtom(idx).coords()
d = distance(mcoord, fcoord)
fdistance_list.append(float(d))
return fdistance_list
def all_prop(xyzf, charge):
fprio_list = fpriority(xyzf)
# fsym_list = fsym(xyzf)
fva_list = fvalency(xyzf)
fq_list = fcharge(xyzf, charge)
sq_ave_list = scharge_ave(xyzf, charge)
fd_list = fdistance(xyzf)
prop_list = [fprio_list, fq_list, sq_ave_list, fva_list, fd_list]
return prop_list
def features(xyzf, charge):
prop_list = all_prop(xyzf, charge)
xyz = mol3D()
xyz.readfromxyz(xyzf)
midx = xyz.findMetal()[0]
manto = xyz.getAtom(midx).atno
a = np.array(prop_list)
b = a.T[a[0].argsort()].T
feature_list = b.tolist()
feature_list[0] = [int(str(i).split('.')[0]) for i in feature_list[0]]
feature = []
feature.append(manto)
for i in range(len(feature_list)):
for j in range(len(feature_list[i])):
feature.append(feature_list[i][j])
return feature |
7,558 | lonwrong | from argparse import ArgumentParser
from datetime import datetime, time
import os
import pytest
import numpy as np
from test import TEST_DIR, pushd
SCENARIO = os.path.join(TEST_DIR, "scenario_4")
from RAiDER.cli import AttributeDict
from RAiDER.cli.validators import (
modelName2Module, getBufferedExtent, isOutside, isInside,
enforce_valid_dates as date_type, convert_time as time_type,
enforce_bbox, parse_dates, enforce_wm, get_los
)
@pytest.fixture
def parser():
return ArgumentParser()
@pytest.fixture
def llsimple():
lats = (10, 12)
lons = (-72, -74)
return lats, lons
@pytest.fixture
def latwrong():
lats = (12, 10)
lons = (-72, -74)
return lats, lons
@pytest.fixture
def METHOD_NAME():
lats = (10, 12)
lons = (-72, -74)
return lats, lons
@pytest.fixture
def llarray():
lats = np.arange(10, 12.1, 0.1)
lons = np.arange(-74, -71.9, 0.2)
return lats, lons
@pytest.fixture
def args1():
test_file = os.path.join(SCENARIO, 'los.rdr')
args = AttributeDict({'los_file': test_file, 'los_convention': 'isce','ray_trace': False})
return args
def test_enforce_wm():
with pytest.raises(NotImplementedError):
enforce_wm('notamodel', 'fakeaoi')
def test_get_los_ray(args1):
args = args1
los = get_los(args)
assert not los.ray_trace()
assert los.is_Projected()
def test_date_type():
assert date_type("2020-10-1") == datetime(2020, 10, 1)
assert date_type("2020101") == datetime(2020, 10, 1)
with pytest.raises(ValueError):
date_type("foobar")
@pytest.mark.parametrize("input,expected", (
("T23:00:01.000000", time(23, 0, 1)),
("T23:00:01.000000", time(23, 0, 1)),
("T230001.000000", time(23, 0, 1)),
("230001.000000", time(23, 0, 1)),
("T23:00:01", time(23, 0, 1)),
("23:00:01", time(23, 0, 1)),
("T230001", time(23, 0, 1)),
("230001", time(23, 0, 1)),
("T23:00", time(23, 0, 0)),
("T2300", time(23, 0, 0)),
("23:00", time(23, 0, 0)),
("2300", time(23, 0, 0))
))
@pytest.mark.parametrize("timezone", ("", "z", "+0000"))
def test_time_type(input, timezone, expected):
assert time_type(input + timezone) == expected
def test_time_type_error():
with pytest.raises(ValueError):
time_type("foobar")
def test_date_list_action():
date_list = {
'date_start':'20200101',
}
assert date_type(date_list['date_start']) == datetime(2020,1,1)
assert parse_dates(date_list) == [datetime(2020,1,1)]
date_list['date_end'] = '20200103'
assert date_type(date_list['date_end']) == datetime(2020,1,3)
assert parse_dates(date_list) == [datetime(2020,1,1), datetime(2020,1,2), datetime(2020,1,3)]
date_list['date_end'] = '20200112'
date_list['date_step'] = '5'
assert parse_dates(date_list) == [datetime(2020,1,1), datetime(2020,1,6), datetime(2020,1,11)]
def test_bbox_action():
bbox_str = "45 46 -72 -70"
assert len(enforce_bbox(bbox_str)) == 4
assert enforce_bbox(bbox_str) == [45, 46, -72, -70]
with pytest.raises(ValueError):
enforce_bbox("20 20 30 30")
with pytest.raises(ValueError):
enforce_bbox("30 100 20 40")
with pytest.raises(ValueError):
enforce_bbox("10 30 40 190")
def test_ll1(llsimple):
lats, lons = llsimple
assert np.allclose(getBufferedExtent(lats, lons), np.array([10, 12, -74, -72]))
def test_ll2(latwrong):
lats, lons = latwrong
assert np.allclose(getBufferedExtent(lats, lons), np.array([10, 12, -74, -72]))
def test_ll3(METHOD_NAME):
lats, lons = METHOD_NAME
assert np.allclose(getBufferedExtent(lats, lons), np.array([10, 12, -74, -72]))
def test_ll4(llarray):
lats, lons = llarray
assert np.allclose(getBufferedExtent(lats, lons), np.array([10, 12, -74, -72]))
def test_isOutside1(llsimple):
assert isOutside(getBufferedExtent(*llsimple), getBufferedExtent(*llsimple) + 1)
def test_isOutside2(llsimple):
assert not isOutside(getBufferedExtent(*llsimple), getBufferedExtent(*llsimple))
def test_isInside(llsimple):
assert isInside(getBufferedExtent(*llsimple), getBufferedExtent(*llsimple))
assert not isInside(getBufferedExtent(*llsimple), getBufferedExtent(*llsimple) + 1) |
7,559 | test revoked playbook | # -*- coding: UTF-8 -*-
import sys
import pytest
from mock.mock import patch
from pytest import raises
# don't even bother on 2.6
if sys.version_info >= (2, 7):
from insights.client.apps.ansible.playbook_verifier import verify, PlaybookVerificationError, getRevocationList, normalizeSnippet, loadPlaybookYaml # noqa
@pytest.mark.skipif(sys.version_info < (2, 7), reason='Playbook verifier must be run on python 2.7 or above')
def test_vars_not_found_error():
vars_error = 'VERIFICATION FAILED: Vars field not found'
fake_playbook = [{'name': "test playbook"}]
with raises(PlaybookVerificationError) as error:
verify(fake_playbook, skipVerify=False)
assert vars_error in str(error.value)
@pytest.mark.skipif(sys.version_info < (2, 7), reason='Playbook verifier must be run on python 2.7 or above')
def test_empty_vars_error():
sig_error = 'VERIFICATION FAILED: Empty vars field'
fake_playbook = [{'name': "test playbook", 'vars': None}]
with raises(PlaybookVerificationError) as error:
verify(fake_playbook, skipVerify=False)
assert sig_error in str(error.value)
@pytest.mark.skipif(sys.version_info < (2, 7), reason='Playbook verifier must be run on python 2.7 or above')
def test_signature_not_found_error():
sig_error = 'VERIFICATION FAILED: Signature not found'
fake_playbook = [{'name': "test playbook", 'vars': {}}]
with raises(PlaybookVerificationError) as error:
verify(fake_playbook, skipVerify=False)
assert sig_error in str(error.value)
@pytest.mark.skipif(sys.version_info < (2, 7), reason='Playbook verifier must be run on python 2.7 or above')
@patch('insights.client.apps.ansible.playbook_verifier.PUBLIC_KEY_FOLDER', './testing')
def test_key_not_imported():
key_error = "PUBLIC KEY NOT IMPORTED: Public key import failed"
fake_playbook = [{
'name': "test playbook",
'vars': {
'insights_signature': 'TFMwdExTMUNSVWRKVGlCUVIxQWdVMGxIVGtGVVZWSkZMUzB0TFMwS0N==',
'insights_signature_exclude': '/vars/insights_signature'
}
}]
with raises(PlaybookVerificationError) as error:
verify(fake_playbook, skipVerify=False)
assert key_error in str(error.value)
@pytest.mark.skipif(sys.version_info < (2, 7), reason='Playbook verifier must be run on python 2.7 or above')
@patch('insights.client.apps.ansible.playbook_verifier.PUBLIC_KEY_FOLDER', None)
def test_key_import_error():
key_error = "PUBLIC KEY IMPORT ERROR: Public key file not found"
fake_playbook = [{
'name': "test playbook",
'vars': {
'insights_signature': 'TFMwdExTMUNSVWRKVGlCUVIxQWdVMGxIVGtGVVZWSkZMUzB0TFMwS0N==',
'insights_signature_exclude': '/vars/insights_signature'
}
}]
with raises(PlaybookVerificationError) as error:
verify(fake_playbook, skipVerify=False)
assert key_error in str(error.value)
@pytest.mark.skipif(sys.version_info < (2, 7), reason='Playbook verifier must be run on python 2.7 or above')
@patch('insights.client.apps.ansible.playbook_verifier.verifyPlaybookSnippet', return_value=([], []))
@patch('insights.client.apps.ansible.playbook_verifier.getRevocationList', return_value=[])
def test_playbook_verification_error(call_1, call_2):
key_error = 'SIGNATURE NOT VALID: Template [name: test playbook] has invalid signature'
fake_playbook = [{
'name': "test playbook",
'vars': {
'insights_signature': 'TFMwdExTMUNSVWRKVGlCUVIxQWdVMGxIVGtGVVZWSkZMUzB0TFMwS0N==',
'insights_signature_exclude': '/vars/insights_signature'
}
}]
with raises(PlaybookVerificationError) as error:
verify(fake_playbook, skipVerify=False)
assert key_error in str(error.value)
@pytest.mark.skipif(sys.version_info < (2, 7), reason='Playbook verifier must be run on python 2.7 or above')
@patch('insights.client.apps.ansible.playbook_verifier.contrib.gnupg.GPG.verify_data')
def test_playbook_verification_success(mock_method):
mock_method.return_value = True
fake_playbook = [{
'name': "test playbook",
'vars': {
'insights_signature': 'TFMwdExTMUNSVWRKVGlCUVIxQWdVMGxIVGtGVVZWSkZMUzB0TFMwS0N==',
'insights_signature_exclude': '/vars/insights_signature'
}
}]
result = verify(fake_playbook, skipVerify=False)
assert result == fake_playbook
# getRevocationList can't load list
@pytest.mark.skipif(sys.version_info < (2, 7), reason='Playbook verifier must be run on python 2.7 or above')
@patch('insights.client.apps.ansible.playbook_verifier.contrib.ruamel_yaml.ruamel.yaml.YAML.load', side_effect=Exception())
def test_revocation_list_not_found(mock_method):
load_error = 'VERIFICATION FAILED: Error loading revocation list'
with raises(PlaybookVerificationError) as error:
getRevocationList()
assert load_error in str(error.value)
# revocation list signature invalid
@pytest.mark.skipif(sys.version_info < (2, 7), reason='Playbook verifier must be run on python 2.7 or above')
@patch('insights.client.apps.ansible.playbook_verifier.verifyPlaybookSnippet', return_value=(None, 0xdeadbeef))
def test_revocation_list_signature_invalid(mock_method):
load_error = 'VERIFICATION FAILED: Revocation list signature invalid'
with raises(PlaybookVerificationError) as error:
getRevocationList()
assert load_error in str(error.value)
# revocation list empty
@pytest.mark.skipif(sys.version_info < (2, 7), reason='Playbook verifier must be run on python 2.7 or above')
@patch('insights.client.apps.ansible.playbook_verifier.contrib.ruamel_yaml.ruamel.yaml.YAML.load', return_value=[{}])
@patch('insights.client.apps.ansible.playbook_verifier.verifyPlaybookSnippet', return_value=(True, 0xdeadbeef))
def test_revocation_list_empty(call_1, call_2):
fake_playbook = [{
'name': "test playbook",
'vars': {
'insights_signature': 'TFMwdExTMUNSVWRKVGlCUVIxQWdVMGxIVGtGVVZWSkZMUzB0TFMwS0N==',
'insights_signature_exclude': '/vars/insights_signature'
}
}]
result = verify(fake_playbook, skipVerify=False)
assert result == fake_playbook
# playbook on revoked list
@pytest.mark.skipif(sys.version_info < (2, 7), reason='Playbook verifier must be run on python 2.7 or above')
@patch('insights.client.apps.ansible.playbook_verifier.contrib.ruamel_yaml.ruamel.yaml.YAML.load',
return_value=[{'revoked_playbooks': [{'name': 'banned book', 'hash': 'deadbeef'}]}])
@patch('insights.client.apps.ansible.playbook_verifier.verifyPlaybookSnippet', return_value=(True, bytearray.fromhex(u'deadbeef')))
def METHOD_NAME(call_1, call_2):
revoked_error = 'REVOKED PLAYBOOK: Template is on the revoked list [name: banned book]'
fake_playbook = [{
'name': "test playbook",
'vars': {
'insights_signature': 'TFMwdExTMUNSVWRKVGlCUVIxQWdVMGxIVGtGVVZWSkZMUzB0TFMwS0N==',
'insights_signature_exclude': '/vars/insights_signature'
}
}]
with raises(PlaybookVerificationError) as error:
verify(fake_playbook, skipVerify=False)
assert revoked_error in str(error.value)
@pytest.mark.skipif(sys.version_info[:2] != (2, 7), reason='normalizeSnippet is only run with Python 2.7')
def test_normalize_snippet():
playbook = '''task:
when:
- '"pam" in ansible_facts.packages'
- result_pam_file_present.stat.exists'''
snippet = loadPlaybookYaml(playbook)
want = {
'task': {
'when': [
'"pam" in ansible_facts.packages',
'result_pam_file_present.stat.exists'
]
}
}
assert normalizeSnippet(snippet) == want |
7,560 | test eks mxnet multinode training | import json
import os
import random
import datetime
import pytest
from invoke import run
from invoke.context import Context
from retrying import retry
import test.test_utils.eks as eks_utils
import test.test_utils.ec2 as ec2_utils
from test.test_utils import is_pr_context, SKIP_PR_REASON
LOGGER = eks_utils.LOGGER
@pytest.mark.skipif(is_pr_context(), reason=SKIP_PR_REASON)
@pytest.mark.integration("horovod")
@pytest.mark.model("mnist")
@pytest.mark.multinode(3)
def test_eks_mxnet_multi_node_training_horovod_mnist(mxnet_training, example_only):
"""
Run MXNet distributed training on EKS using docker images with MNIST dataset (horovod)
"""
eks_cluster_size = "3"
ec2_instance_type = "p3.16xlarge"
eks_gpus_per_worker = ec2_utils.get_instance_num_gpus(instance_type=ec2_instance_type)
_run_eks_mxnet_multinode_training_horovod_mpijob(
mxnet_training, eks_cluster_size, eks_gpus_per_worker
)
def _run_eks_mxnet_multinode_training_horovod_mpijob(
example_image_uri, cluster_size, eks_gpus_per_worker
):
LOGGER.info("Starting run_eks_mxnet_multi_node_training on MNIST dataset using horovod")
LOGGER.info("The test will run on an example image %s", example_image_uri)
user = Context().run("echo $USER").stdout.strip("\n")
random.seed(f"{example_image_uri}-{datetime.datetime.now().strftime('%Y%m%d%H%M%S%f')}")
unique_tag = f"{user}-{random.randint(1, 10000)}"
namespace = "mxnet"
job_name = f"mxnet-mnist-horovod-job-{unique_tag}"
LOGGER.debug(f"Namespace: {namespace}")
local_template_file_path = os.path.join(
"eks",
"eks_manifest_templates",
"mxnet",
"training",
"multi_node_training_horovod_mnist.yaml",
)
remote_yaml_file_path = os.path.join(
os.sep, "tmp", f"tensorflow_multi_node_training_{unique_tag}.yaml"
)
replace_dict = {
"<JOB_NAME>": job_name,
"<NUM_WORKERS>": cluster_size,
"<CONTAINER_IMAGE>": example_image_uri,
"<GPUS>": str(eks_gpus_per_worker),
}
eks_utils.write_eks_yaml_file_from_template(
local_template_file_path, remote_yaml_file_path, replace_dict
)
_run_eks_multi_node_training_mpijob(namespace, job_name, remote_yaml_file_path)
@pytest.mark.skipif(is_pr_context(), reason=SKIP_PR_REASON)
@pytest.mark.integration("parameter server")
@pytest.mark.model("mnist")
@pytest.mark.multinode(3)
def METHOD_NAME(mxnet_training, example_only):
"""
Run MXNet distributed training on EKS using docker images with MNIST dataset (parameter server)
"""
random.seed(f"{mxnet_training}-{datetime.datetime.now().strftime('%Y%m%d%H%M%S%f')}")
unique_id = random.randint(1, 6000)
namespace = "mxnet"
job_name = f"kubeflow-mxnet-gpu-dist-job-{unique_id}"
# TODO: This should either be dynamic or at least global variables
num_workers = "3"
num_servers = "2"
gpu_limit = "1"
epochs = '"20"'
layers = '"2"'
gpus = '"0"'
local_template_file_path = os.path.join(
"eks", "eks_manifest_templates", "mxnet", "training", "multi_node_gpu_training.yaml"
)
remote_yaml_file_path = os.path.join(
os.sep, "tmp", f"mxnet_multi_node_training_{unique_id}.yaml"
)
replace_dict = {
"<JOB_NAME>": job_name,
"<NUM_SERVERS>": num_servers,
"<NUM_WORKERS>": num_workers,
"<CONTAINER_IMAGE>": mxnet_training,
"<EPOCHS>": epochs,
"<LAYERS>": layers,
"<GPUS>": gpus,
"<GPU_LIMIT>": gpu_limit,
}
eks_utils.write_eks_yaml_file_from_template(
local_template_file_path, remote_yaml_file_path, replace_dict
)
training_result = _run_eks_mxnet_multi_node_training(namespace, job_name, remote_yaml_file_path)
assert training_result, "EKS multinode training failed"
def _run_eks_mxnet_multi_node_training(namespace, job_name, remote_yaml_file_path):
"""Run MXNet distributed training on EKS using MXNet Operator
Args:
namespace, job_name, remote_yaml_file_path
"""
training_result = False
# Namespaces will allow parallel runs on the same cluster. Create namespace if it doesnt exist.
does_namespace_exist = run(f"kubectl get namespace | grep {namespace}", warn=True)
if does_namespace_exist.return_code != 0:
run(f"kubectl create namespace {namespace}")
try:
# Delete old job with same name if exists
run(f"kubectl delete -f {remote_yaml_file_path}", warn=True)
run(f"kubectl create -f {remote_yaml_file_path} -n {namespace}")
if is_mxnet_eks_multinode_training_complete(job_name, namespace):
training_result = True
finally:
eks_utils.eks_multinode_cleanup(remote_yaml_file_path, namespace)
return training_result
@retry(
stop_max_attempt_number=60, wait_fixed=12000, retry_on_exception=eks_utils.retry_if_value_error
)
def is_mxnet_eks_multinode_training_complete(job_name, namespace):
"""Function to check job and pod status for multinode training.
A separate method is required because kubectl commands for logs and status are different with namespaces.
Args:
job_name: str, remote_yaml_file_path: str
"""
run_out = run(f"kubectl get mxjobs -n {namespace} {job_name} -o json", warn=True)
if run_out.stdout is not None or run_out.stdout != "":
job_info = json.loads(run_out.stdout)
LOGGER.debug(f"Job info: {job_info}")
if (
"status" not in job_info
or "conditions" not in job_info["status"]
or len(job_info["status"]["conditions"]) == 0
):
raise ValueError("Waiting for job to launch...")
else:
job_conditions = job_info["status"]["conditions"]
job_condition_succeed = ["type" in c and c["type"] == "Succeeded" for c in job_conditions]
if any(job_condition_succeed):
return True
else:
raise ValueError("Waiting for job to be complete...")
def _run_eks_multi_node_training_mpijob(namespace, job_name, remote_yaml_file_path):
"""
Function to run eks multinode training MPI job
"""
does_namespace_exist = run(f"kubectl get namespace | grep {namespace}", warn=True)
if does_namespace_exist.return_code != 0:
run(f"kubectl create namespace {namespace}")
try:
training_job_start = run(
f"kubectl create -f {remote_yaml_file_path} -n {namespace}", warn=True
)
if training_job_start.return_code:
raise RuntimeError(f"Failed to start {job_name}:\n{training_job_start.stderr}")
LOGGER.info("Check pods")
run(f"kubectl get pods -n {namespace} -o wide")
complete_pod_name = eks_utils.is_mpijob_launcher_pod_ready(namespace, job_name)
_, pod_name = complete_pod_name.split("/")
LOGGER.info(f"The Pods have been created and the name of the launcher pod is {pod_name}")
LOGGER.info(f"Wait for the {job_name} job to complete")
if eks_utils.is_eks_multinode_training_complete(
remote_yaml_file_path, namespace, pod_name, job_name
):
LOGGER.info(f"Wait for the {pod_name} pod to reach completion")
distributed_out = run(f"kubectl logs -n {namespace} -f {complete_pod_name}").stdout
LOGGER.info(distributed_out)
finally:
eks_utils.eks_multinode_cleanup(remote_yaml_file_path, namespace) |
7,561 | pkcs12 | from __future__ import print_function
import base64
import os
import sys
import foreign.six as six
import core.exceptions as ex
from core.objects.data import DataMixin
from core.objects.svc import BaseSvc
from utilities.converters import print_size
from utilities.lazy import lazy
from utilities.naming import factory, split_path
from utilities.ssl import gen_cert, get_expire
from utilities.string import bdecode, bencode
DEFAULT_STATUS_GROUPS = [
]
class Sec(DataMixin, BaseSvc):
kind = "sec"
desc = "secret"
default_mode = 0o0600
@lazy
def kwstore(self):
from .secdict import KEYS
return KEYS
@lazy
def full_kwstore(self):
from .secdict import KEYS
return KEYS
def on_create(self):
if self.oget("DEFAULT", "cn") and "certificate" not in self.data_keys():
self.gen_cert()
def _add_key(self, key, data):
if not key:
raise ex.Error("secret key name can not be empty")
if data is None:
raise ex.Error("secret value can not be empty")
data = "crypt:"+base64.urlsafe_b64encode(self.encrypt(data, cluster_name="join", encode=True)).decode()
applied = self.set_multi(["data.%s=%s" % (key, data)])
if len(applied) == 0:
return
did = "added" if self.running_action == "add" else "changed"
self.log.info("secret key '%s' %s (%s)", key, did, print_size(len(data), compact=True, unit="b"))
# refresh if in use
self.postinstall(key)
def _add_keys(self, data):
if not data:
return
sdata = []
for key, val in data:
if not key:
raise ex.Error("secret key name can not be empty")
if val is None:
raise ex.Error("secret value can not be empty")
val = "crypt:"+base64.urlsafe_b64encode(self.encrypt(val, cluster_name="join", encode=True)).decode()
sdata.append("data.%s=%s" % (key, val))
self.set_multi(sdata)
self.log.info("secret keys '%s' added", ",".join([k for k, v in data]))
# refresh if in use
self.postinstall(key)
def decode_key(self, key):
if not key:
raise ex.Error("secret key name can not be empty")
data = self.oget("data", key)
if not data:
raise ex.Error("secret %s key %s does not exist or has no value" % (self.path, key))
if data.startswith("crypt:"):
data = data[6:]
return self.decrypt(base64.urlsafe_b64decode(data.encode("ascii")), structured=False)[2]
def gen_cert(self):
data = {}
for key in ("cn", "c", "st", "l", "o", "ou", "email", "alt_names", "bits", "validity", "ca"):
val = self.oget("DEFAULT", key)
if val is not None:
data[key] = val
ca = data.get("ca")
casec = None
if ca is not None:
casecname, canamespace, _ = split_path(ca)
casec = factory("sec")(casecname, namespace=canamespace, log=self.log, volatile=True)
if not casec.exists():
raise ex.Error("ca secret %s does not exist" % ca)
for key in ("crt", "key", "csr"):
data[key] = self.tempfilename()
if "alt_names" in data:
data["cnf"] = self.tempfilename()
try:
add_data = []
if casec:
for key, kw in (("cacrt", "certificate"), ("cakey", "private_key")):
if kw not in casec.data_keys():
continue
data[key] = self.tempfilename()
buff = bdecode(casec.decode_key(kw))
with open(data[key], "w") as ofile:
ofile.write(buff)
gen_cert(log=self.log, **data)
with open(data["key"], "r") as ofile:
buff = ofile.read()
fullpem = ""
fullpem += buff
add_data.append(("private_key", buff))
if data.get("crt") is not None:
with open(data["crt"], "r") as ofile:
buff = ofile.read()
add_data.append(("certificate", buff))
if data.get("csr") is not None:
with open(data["csr"], "r") as ofile:
buff = ofile.read()
add_data.append(("certificate_signing_request", buff))
if data.get("cakey") is None:
with open(data["crt"], "r") as ofile:
buff = ofile.read()
fullpem += buff
add_data.append(("certificate_chain", buff))
else:
# merge cacrt and crt
with open(data["crt"], "r") as ofile:
buff = ofile.read()
with open(data["cacrt"], "r") as ofile:
buff += ofile.read()
fullpem += buff
add_data.append(("certificate_chain", buff))
add_data.append(("fullpem", fullpem))
self._add_keys(add_data)
finally:
for key in ("crt", "key", "cacrt", "cakey", "csr", "cnf"):
if key not in data:
continue
try:
os.unlink(data[key])
except Exception:
pass
def get_cert_expire(self):
buff = bdecode(self.decode_key("certificate"))
return get_expire(buff)
def METHOD_NAME(self):
if six.PY3:
sys.stdout.buffer.write(self._pkcs12(self.options.password)) # pylint: disable=no-member
else:
print(self._pkcs12(self.options.password))
def _pkcs12(self, password):
required = set(["private_key", "certificate_chain"])
if required & set(self.data_keys()) != required:
self.gen_cert()
from subprocess import Popen, PIPE
import tempfile
_tmpcert = tempfile.NamedTemporaryFile()
_tmpkey = tempfile.NamedTemporaryFile()
tmpcert = _tmpcert.name
tmpkey = _tmpkey.name
_tmpcert.close()
_tmpkey.close()
if password is None:
from getpass import getpass
pwd = getpass("Password: ", stream=sys.stderr)
if not pwd:
pwd = "\n"
elif password in ["/dev/stdin", "-"]:
pwd = sys.stdin.readline()
else:
pwd = password+"\n"
if six.PY3:
pwd = bencode(pwd)
try:
with open(tmpkey, "w") as _tmpkey:
os.chmod(tmpkey, 0o600)
_tmpkey.write(bdecode(self.decode_key("private_key")))
with open(tmpcert, "w") as _tmpcert:
os.chmod(tmpcert, 0o600)
_tmpcert.write(bdecode(self.decode_key("certificate_chain")))
cmd = ["openssl", "pkcs12", "-export", "-in", tmpcert, "-inkey", tmpkey, "-passout", "stdin"]
proc = Popen(cmd, stdout=PIPE, stderr=PIPE, stdin=PIPE)
out, err = proc.communicate(input=pwd)
if err:
print(err, file=sys.stderr)
return out
finally:
if os.path.exists(tmpcert):
os.unlink(tmpcert)
if os.path.exists(tmpkey):
os.unlink(tmpkey)
def fullpem(self):
print(self._fullpem())
def _fullpem(self):
required = set(["private_key", "certificate_chain"])
if required & set(self.data_keys()) != required:
self.gen_cert()
buff = bdecode(self.decode_key("private_key"))
buff += bdecode(self.decode_key("certificate_chain"))
return buff |
7,562 | initialize | # Copyright (c) 2023 Intel Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Set
from nncf.api.compression import CompressionStage
from nncf.common.schedulers import StubCompressionScheduler
from nncf.common.statistics import NNCFStatistics
from nncf.common.tensor_statistics.collectors import ReductionShape
from nncf.common.tensor_statistics.collectors import TensorStatisticCollectorBase
from nncf.config import NNCFConfig
from nncf.torch.algo_selector import ZeroCompressionLoss
from nncf.torch.compression_method_api import PTCompressionAlgorithmBuilder
from nncf.torch.compression_method_api import PTCompressionAlgorithmController
from nncf.torch.graph.transformations.commands import PTInsertionCommand
from nncf.torch.graph.transformations.commands import PTTargetPoint
from nncf.torch.graph.transformations.commands import TransformationPriority
from nncf.torch.graph.transformations.layout import PTTransformationLayout
from nncf.torch.nncf_network import NNCFNetwork
class TensorStatisticObservationPoint:
def __init__(self, target_point: PTTargetPoint, reduction_shapes: Set[ReductionShape] = None):
self.target_point = target_point
self.reduction_shapes = reduction_shapes
def __hash__(self):
return hash(self.target_point)
def __eq__(self, other: "TensorStatisticObservationPoint"):
return self.target_point == other.target_point
class TensorStatisticsCollectionBuilder(PTCompressionAlgorithmBuilder):
def __init__(
self,
config: NNCFConfig,
observation_points_vs_collectors: Dict[TensorStatisticObservationPoint, TensorStatisticCollectorBase],
):
super().__init__(config)
self._observation_points_vs_collectors = observation_points_vs_collectors
def _get_transformation_layout(self, target_model: NNCFNetwork) -> PTTransformationLayout:
# Will it really suffice to use a single collector for all threads? After all, each of the threads
# receives its own data, and should we use a thread-local collector, there would have to be a
# separate thread reduction step involved. Still, is there a better option here than to rely on GIL?
layout = PTTransformationLayout()
for op, rs_vs_collector in self._observation_points_vs_collectors.items():
for collector in rs_vs_collector.values():
hook_obj = collector.register_input
command = PTInsertionCommand(
op.target_point, hook_obj, TransformationPriority.FP32_TENSOR_STATISTICS_OBSERVATION
)
layout.register(command)
return layout
def _build_controller(self, model: NNCFNetwork) -> "TensorStatisticsCollectionController":
return TensorStatisticsCollectionController(
model, {k.target_point: v for k, v in self._observation_points_vs_collectors.items()}
)
def _handle_frozen_layers(self, target_model: NNCFNetwork):
pass
def METHOD_NAME(self, model: NNCFNetwork) -> None:
pass
def _get_algo_specific_config_section(self) -> Dict:
return {}
class TensorStatisticsCollectionController(PTCompressionAlgorithmController):
def __init__(
self, target_model: NNCFNetwork, ip_vs_collector_dict: Dict[PTTargetPoint, TensorStatisticCollectorBase]
):
super().__init__(target_model)
self.ip_vs_collector_dict = ip_vs_collector_dict
self._scheduler = StubCompressionScheduler()
self._loss = ZeroCompressionLoss("cpu")
@property
def loss(self) -> ZeroCompressionLoss:
return self._loss
@property
def scheduler(self) -> StubCompressionScheduler:
return self._scheduler
def start_collection(self):
for collector in self.ip_vs_collector_dict.values():
collector.enable()
def stop_collection(self):
for collector in self.ip_vs_collector_dict.values():
collector.disable()
def compression_stage(self) -> CompressionStage:
return CompressionStage.FULLY_COMPRESSED
def statistics(self, quickly_collected_only: bool = False) -> NNCFStatistics:
return NNCFStatistics() |
7,563 | test transactions create no id | # =================================================================
#
# Authors: Tom Kralidis <tomkralidis@gmail.com>
#
# Copyright (c) 2023 Tom Kralidis
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
import json
import shutil
import pytest
from pygeoapi.provider.base import ProviderItemNotFoundError
from pygeoapi.provider.tinydb_ import TinyDBCatalogueProvider
from .util import get_test_file_path
path = get_test_file_path('tests/data/open.canada.ca/sample-records.tinydb')
@pytest.fixture()
def data():
return json.dumps({
'type': 'Feature',
'geometry': {
'type': 'Polygon',
'coordinates': [[
[100.0, 0.0], [101.0, 0.0], [101.0, 1.0],
[100.0, 1.0], [100.0, 0.0]
]]
},
'properties': {
'identifier': 123,
'title': 'test item',
'description': 'test item'
}
})
@pytest.fixture()
def data_no_id():
return json.dumps({
'type': 'Feature',
'geometry': {
'type': 'Polygon',
'coordinates': [[
[100.0, 0.0], [101.0, 0.0], [101.0, 1.0],
[100.0, 1.0], [100.0, 0.0]
]]
},
'properties': {
'title': 'test item',
'description': 'test item'
}
})
@pytest.fixture()
def config(tmp_path):
tmp_file = tmp_path / 'sample-records.tinydb'
shutil.copy(path, tmp_file)
return {
'name': 'TinyDBCatalogue',
'type': 'feature',
'data': tmp_file,
'id_field': 'externalId',
'time_field': 'created'
}
def test_query(config):
p = TinyDBCatalogueProvider(config)
fields = p.get_fields()
assert len(fields) == 9
assert fields['created']['type'] == 'string'
assert fields['title']['type'] == 'string'
assert fields['q']['type'] == 'string'
results = p.query()
assert len(results['features']) == 10
assert results['numberMatched'] == 10
assert results['numberReturned'] == 10
assert results['features'][0]['id'] == 'e5a71860-827c-453f-990e-0e0ba0ee67bb' # noqa
assert results['features'][0]['properties']['type'] == 'RI_622'
for term in ['crops', 'Crops', 'CROPS', 'CrOpS', 'CROps', 'CRops']:
results = p.query(q=term)
assert len(results['features']) == 6
assert results['numberMatched'] == 6
assert results['numberReturned'] == 6
results = p.query(q='crops barley')
assert len(results['features']) == 2
assert results['numberMatched'] == 2
assert results['numberReturned'] == 2
results = p.query(limit=1)
assert len(results['features']) == 1
assert results['features'][0]['id'] == 'e5a71860-827c-453f-990e-0e0ba0ee67bb' # noqa
results = p.query(datetime_='2020/..')
assert len(results['features']) == 6
assert results['features'][0]['id'] == '64e70d29-57a3-44a8-b55c-d465639d1e2e' # noqa
results = p.query(datetime_='../2020')
assert len(results['features']) == 4
assert results['features'][0]['id'] == 'e5a71860-827c-453f-990e-0e0ba0ee67bb' # noqa
results = p.query(datetime_='2020-09-17/2020-12-01')
assert len(results['features']) == 6
assert results['features'][0]['id'] == '64e70d29-57a3-44a8-b55c-d465639d1e2e' # noqa
results = p.query(bbox=[-154, 42, -52, 84])
assert len(results['features']) == 10
assert results['features'][0]['id'] == 'e5a71860-827c-453f-990e-0e0ba0ee67bb' # noqa
results = p.query(offset=1, limit=1)
assert len(results['features']) == 1
assert results['features'][0]['id'] == '64e70d29-57a3-44a8-b55c-d465639d1e2e' # noqa
results = p.query(offset=2, limit=2)
assert len(results['features']) == 2
assert results['features'][0]['id'] == 'd3028ad0-b0d0-47ff-bcc3-d383881e17cd' # noqa
results = p.query(sortby=[{'property': 'title', 'order': '+'}])
assert results['features'][0]['id'] == '1687cac6-ee13-4866-ab8a-114c2ede7b13' # noqa
results = p.query(sortby=[{'property': 'title', 'order': '-'}])
assert results['features'][0]['id'] == '8a09413a-0a01-4aab-8925-720d987deb20' # noqa
def test_get(config):
p = TinyDBCatalogueProvider(config)
result = p.get('caeb0592-8c95-4461-b9a5-5fde7f2ccbb3')
assert result['id'] == 'caeb0592-8c95-4461-b9a5-5fde7f2ccbb3'
assert result['properties']['title'] == 'Probability of Ice freeze days (herbaceous crops) during non-growing season (<-5°C)' # noqa
def test_get_not_existing_item_raise_exception(config):
"""Testing query for a not existing object"""
p = TinyDBCatalogueProvider(config)
with pytest.raises(ProviderItemNotFoundError):
p.get('404')
def test_transactions_create(config, data):
"""Testing transactional capabilities"""
p = TinyDBCatalogueProvider(config)
new_id = p.create(data)
assert new_id == 123
assert p.update(123, data)
assert p.delete(123)
def METHOD_NAME(config, data_no_id):
"""Testing transactional capabilities with incoming feature without ID"""
p = TinyDBCatalogueProvider(config)
new_id = p.create(data_no_id)
assert new_id is not None
data_got = p.get(new_id)
assert data_got['id'] == new_id
assert data_got['geometry'] == json.loads(data_no_id)['geometry']
assert p.update(new_id, json.dumps(data_got))
assert p.delete(new_id) |
7,564 | test concatentate multi | try:
from . import generic as g
except BaseException:
import generic as g
class TextureTest(g.unittest.TestCase):
def test_uv_to_color(self):
try:
import PIL.Image
except ImportError:
return
# n_vertices = 100
uv = g.np.array([[0.25, 0.2], [0.4, 0.5]], dtype=float)
texture = g.np.arange(96, dtype=g.np.uint8).reshape(8, 4, 3)
colors = g.trimesh.visual.uv_to_color(
uv, PIL.Image.fromarray(texture))
colors_expected = [[75, 76, 77, 255], [51, 52, 53, 255]]
assert (colors == colors_expected).all()
def test_bad_uv(self):
# get a textured OBJ
m = g.get_mesh('fuze.obj', force='mesh')
# add malformed UV coordinates
m.visual.uv = m.visual.uv[:100]
m.merge_vertices()
def test_order_kwarg(self):
for file_name in ['ico4.obj', 'ico4uv.obj']:
# get the location of the model file
file_path = g.get_path(file_name)
with open(file_path, 'r') as f:
# get the raw ordered vertices from the file with basic string
# ops
v_raw = g.np.array(
[line[2:].split() for line in f if line.startswith('v ')],
dtype=g.np.float64)
# load them with maximal correspondence captain
a = g.trimesh.load(file_path, process=False, maintain_order=True)
# see if we have the same vertices
assert g.np.allclose(a.vertices, v_raw)
# load them without process and but without maintaining order
a = g.trimesh.load(file_path, process=False, maintain_order=False)
# vertex shape should not be the same
assert a.vertices.shape != v_raw.shape
def test_fuze(self):
# create a local web server to test remote assets
with g.serve_meshes() as address:
# see if web resolvers work
tex = g.trimesh.exchange.load.load_remote(
url=address + '/fuze.obj', process=False)
g.check_fuze(tex)
# see if web + zip resolvers work
scene = g.trimesh.exchange.load.load_remote(
url=address + '/fuze.zip', process=False)
# zip files get loaded into a scene
assert len(scene.geometry) == 1
# scene should just be a fuze bottle
g.check_fuze(next(iter(scene.geometry.values())))
# obj with texture, assets should be loaded
# through a FilePathResolver
m = g.get_mesh('fuze.obj', process=False)
g.check_fuze(tex)
# obj with texture, assets should be loaded
# through a ZipResolver into a scene
scene = g.get_mesh('fuze.zip', process=False)
# zip files get loaded into a scene
assert len(scene.geometry) == 1
m = next(iter(scene.geometry.values()))
g.check_fuze(m)
# the PLY should have textures defined
m = g.get_mesh('fuze.ply', process=False)
g.check_fuze(m)
# ASCII PLY should have textures defined
m = g.get_mesh('fuze_ascii.ply', process=False)
g.check_fuze(m)
# textured meshes should subdivide OK-ish
s = m.subdivide()
assert len(s.visual.uv) == len(s.vertices)
# load without doing the vertex separation
# will look like garbage but represents original
# and skips "disconnect vertices with different UV"
b = g.get_mesh('fuze.ply',
process=False,
fix_texture=False)
assert len(b.vertices) == 502
assert len(b.visual.uv) == 502
def test_upsize(self):
"""
Texture images usually want to have sizes that are powers
of two so resize textures up to the nearest power of two.
"""
try:
from PIL import Image
except BaseException:
g.log.warning('no PIL, not testing power_resize!')
return
# shortcut for the function
resize = g.trimesh.visual.texture.power_resize
img = Image.new('RGB', (10, 20))
assert img.size == (10, 20)
assert resize(img).size == (16, 32)
assert resize(img, square=True).size == (32, 32)
# check with one value on-size
img = Image.new('RGB', (10, 32))
assert img.size == (10, 32)
assert resize(img).size == (16, 32)
assert resize(img, square=True).size == (32, 32)
# check early exit pathOA
img = Image.new('RGB', (32, 32))
assert img.size == (32, 32)
assert resize(img).size == (32, 32)
assert resize(img, square=True).size == (32, 32)
def test_concatenate(self):
# test concatenation with texture
a = g.get_mesh('fuze.obj')
b = a.copy()
b.apply_translation([b.extents[0] * 1.25, 0, 0])
c = a + b
assert len(c.vertices) > len(a.vertices)
assert len(c.visual.uv) == len(c.vertices)
# should have deduplicated image texture
assert g.np.allclose(c.visual.material.image.size,
a.visual.material.image.size)
def METHOD_NAME(self):
colors = [[255, 0, 0, 255],
[0, 255, 0, 255],
[0, 0, 255, 255],
[100, 100, 100, 255]]
funcs = [g.trimesh.creation.box,
g.trimesh.creation.icosphere,
g.trimesh.creation.capsule]
fuze = g.get_mesh('fuze.obj')
fuze.apply_scale(1.0 / fuze.extents.max())
fuze.apply_translation([-2, 0, 0] - fuze.bounds[0])
meshes = []
for i, color in enumerate(colors):
for j, f in enumerate(funcs):
m = f()
m.visual.face_colors = color
# convert color visual to texture
m.visual = m.visual.to_texture()
m.apply_translation([i * 2.2, j * 2.2, 0.0])
meshes.append(m)
c = g.trimesh.util.concatenate(meshes)
assert isinstance(c.visual, g.trimesh.visual.TextureVisuals)
assert len(c.faces) == sum(len(i.faces) for i in meshes)
assert g.np.prod(c.visual.material.image.size) >= 4
# convert texture back to color
roundtrip = c.visual.to_color()
assert roundtrip.kind == 'vertex'
vertex_c = roundtrip.vertex_colors
# get the unique colors
unique = vertex_c[g.trimesh.grouping.unique_rows(vertex_c)[0]]
# roundtripped colors should be a superset of original colors
assert set(tuple(c) for c in unique).issuperset(
set(tuple(c) for c in colors))
def test_to_tex(self):
m = g.trimesh.creation.box()
color = [255, 0, 0, 255]
m.visual.face_colors = color
# convert color visual to texture
m.visual = m.visual.to_texture()
# convert back to color
m.visual = m.visual.to_color()
assert g.np.allclose(m.visual.main_color, color)
def test_uv_none(self):
# setting UV coordinates to None should work
m = g.get_mesh('fuze.obj')
m.visual.uv = None
assert m.visual.uv is None
# should still be None on a copy
c = m.copy()
assert c.visual.uv is None
def test_pbr_export(self):
# try loading a textured box
m = next(iter(g.get_mesh('BoxTextured.glb').geometry.values()))
# make sure material copy doesn't crash
m.visual.copy()
with g.TemporaryDirectory() as d:
# exports by path allow files to be written
path = g.os.path.join(d, 'box.obj')
m.export(path)
# try reloading
r = g.trimesh.load(path)
# make sure material survived
assert r.visual.material.image.size == (256, 256)
if __name__ == '__main__':
g.trimesh.util.attach_to_log()
g.unittest.main() |
7,565 | test valid partition | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import pytest
import torch
import deepspeed.comm as dist
from deepspeed.runtime.utils import partition_uniform
from deepspeed.runtime.utils import partition_balanced
from deepspeed.runtime.utils import prefix_sum_inc
from deepspeed.runtime.utils import PartitionedTensor
from deepspeed.accelerator import get_accelerator
from unit.common import DistributedTest
class TestPartitionedTensor(DistributedTest):
world_size = 4
def test(self):
world = dist.get_world_size()
rank = dist.get_rank()
group = dist.new_group(ranks=list(range(world)))
rows = world * 4
cols = 3
full = torch.rand(rows, cols).to(get_accelerator().device_name())
dist.broadcast(full, src=0, group=group)
part = PartitionedTensor(full, group=group)
assert len(part.local_size()) == 1
assert part.local_size()[0] * world == full.numel()
reconstructed = part.full()
assert torch.equal(full, reconstructed)
class TestPartitionedTensorMeta(DistributedTest):
world_size = 4
def test(self):
world = dist.get_world_size()
rank = dist.get_rank()
group = dist.new_group(ranks=list(range(world)))
rows = world * 7
cols = 3
full = torch.rand(rows, cols).to(get_accelerator().device_name())
dist.broadcast(full, src=0, group=group)
part = PartitionedTensor(full, group=group)
my_meta = PartitionedTensor.from_meta(part.to_meta(), part.local_data, group)
assert torch.equal(full, my_meta.full())
def assert_valid_partition(weights, parts, P):
N = len(weights)
assert len(parts) == P + 1
assert parts[0] == 0
assert parts[P] == N
for idx in range(P):
assert parts[idx] <= parts[idx + 1]
def get_partition_weights(weights, parts):
""" Return the amount of weight in each partition. """
costs = [0] * (len(parts) - 1)
P = len(parts) - 1
for p in range(P):
start = parts[p]
stop = parts[p + 1]
costs[p] = sum(weights[start:stop])
return costs
def test_prefix_sum():
x = [3, 4, 5]
psum = prefix_sum_inc(x)
assert psum == [3, 7, 12]
def METHOD_NAME():
N = 10
P = 1
weights = [1] * N
parts = partition_balanced(weights, P)
assert_valid_partition(weights, parts, P)
def test_short_partition_uniform():
N = 2
P = 4
weights = [1] * N
parts = partition_uniform(len(weights), P)
assert_valid_partition(weights, parts, P)
def test_short_partition():
N = 2
P = 4
weights = [1] * N
parts = partition_balanced(weights, P)
assert_valid_partition(weights, parts, P)
def test_easy_balance_uniform():
weights = [1] * 8
P = 4
parts = partition_uniform(len(weights), P)
assert_valid_partition(weights, parts, P)
costs = get_partition_weights(weights, parts)
assert all(c == 2 for c in costs)
def test_easy_balance_balanced():
weights = [1] * 8
P = 4
parts = partition_balanced(weights, P)
assert_valid_partition(weights, parts, P)
costs = get_partition_weights(weights, parts)
assert all(c == 2 for c in costs), costs
def test_int_balanced():
weights = [0, 1, 2, 3, 3, 3]
P = 4
parts = partition_balanced(weights, P)
assert parts == [0, 3, 4, 5, 6]
assert_valid_partition(weights, parts, P)
costs = get_partition_weights(weights, parts)
assert all(c == 3 for c in costs)
def test_float_balanced():
weights = [0., 1.1, 1.9, 3., 3., 3.]
P = 4
parts = partition_balanced(weights, P)
assert_valid_partition(weights, parts, P)
assert parts == [0, 3, 4, 5, 6]
@pytest.mark.skip(reason="Variance-minimizing partitioning returns different result.")
def test_float_lastheavy():
weights = [0., 1.1, 1.9, 3., 30.]
P = 2
parts = partition_balanced(weights, P)
assert_valid_partition(weights, parts, P)
assert parts == [0, 4, 5]
def test_float_midheavy():
weights = [0., 1.1, 30, 3.]
P = 3
parts = partition_balanced(weights, P)
assert_valid_partition(weights, parts, P)
assert parts == [0, 2, 3, 4]
def test_balance_bert():
# Parameters per layer for a transformer model with 24 transformers and hidden dim 1024
weights = [
52559872, 12596224, 12596224, 12596224, 12596224, 12596224, 12596224, 12596224, 12596224, 12596224, 12596224,
12596224, 12596224, 12596224, 12596224, 12596224, 12596224, 12596224, 12596224, 12596224, 12596224, 12596224,
12596224, 12596224, 12596224, 0, 52559872
]
P = 8
parts = partition_balanced(weights, P)
assert_valid_partition(weights, parts, P) |
7,566 | initialize metric | #!/usr/bin/env python
"""Default implementation for a stats-collector."""
import abc
from grr_response_core.lib import utils
from grr_response_core.lib.rdfvalues import stats as rdf_stats
from grr_response_core.stats import stats_collector
from grr_response_core.stats import stats_utils
def _FieldsToKey(fields):
"""Converts a list of field values to a metric key."""
return tuple(fields) if fields else ()
class _Metric(metaclass=abc.ABCMeta):
"""Base class for all the metric objects used by the DefaultStatsCollector.
See stats_collector for more info.
"""
def __init__(self, field_defs):
"""Initializes the metric.
Args:
field_defs: A list of (field-name, field-type) tuples describing the
dimensions for the metric.
"""
self._field_defs = field_defs
self._metric_values = {}
@abc.abstractmethod
def _DefaultValue(self):
"""Returns the default value of a metric.
For counters, the default value is 0, for event metrics, the default
value is a distribution, and for gauges, the default value is 0, 0.0 or
the empty string depending on the type of the gauge (int, float or str).
"""
def Get(self, fields=None):
"""Gets the metric value corresponding to the given field values."""
if not self._field_defs and fields:
raise ValueError("Metric was registered without fields, "
"but following fields were provided: %s." % (fields,))
if self._field_defs and not fields:
raise ValueError("Metric was registered with fields (%s), "
"but no fields were provided." % self._field_defs)
if self._field_defs and fields and len(self._field_defs) != len(fields):
raise ValueError(
"Metric was registered with %d fields (%s), but "
"%d fields were provided (%s)." % (len(
self._field_defs), self._field_defs, len(fields), fields))
metric_value = self._metric_values.get(_FieldsToKey(fields))
return self._DefaultValue() if metric_value is None else metric_value
def ListFieldsValues(self):
"""Returns a list of tuples of all field values used with the metric."""
return list(self._metric_values) if self._field_defs else []
class _CounterMetric(_Metric):
"""Simple counter metric (see stats_collector for more info)."""
def _DefaultValue(self):
return 0
def Increment(self, delta, fields=None):
"""Increments counter value by a given delta."""
if delta < 0:
raise ValueError(
"Counter increment should not be < 0 (received: %d)" % delta)
self._metric_values[_FieldsToKey(fields)] = self.Get(fields=fields) + delta
class _EventMetric(_Metric):
"""_EventMetric provides detailed stats, like averages, distribution, etc.
See stats_collector for more info.
Args:
bins: A list of numbers defining distribution buckets for the metric. If
empty/None, a default list of buckets is used.
fields: A list of (field-name, field-type) tuples describing the dimensions
for the metric.
"""
def __init__(self, bins, fields):
super().__init__(fields)
self._bins = bins or [
0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.75, 1, 1.5, 2, 2.5, 3, 4, 5, 6, 7, 8, 9,
10, 15, 20, 50, 100
]
def _DefaultValue(self):
return rdf_stats.Distribution(bins=self._bins)
def Record(self, value, fields=None):
"""Records the given observation in a distribution."""
key = _FieldsToKey(fields)
metric_value = self._metric_values.get(key)
if metric_value is None:
metric_value = self._DefaultValue()
self._metric_values[key] = metric_value
metric_value.Record(value)
class _GaugeMetric(_Metric):
"""A metric whose value can increase or decrease.
See stats_collector for more info.
Args:
value_type: Type of the gauge (one of int, str or float).
fields: A list of (field-name, field-type) tuples describing the dimensions
for the metric.
"""
def __init__(self, value_type, fields):
super().__init__(fields)
self._value_type = value_type
def _DefaultValue(self):
return self._value_type()
def Set(self, value, fields=None):
"""Sets the metric's current value."""
self._metric_values[_FieldsToKey(fields)] = self._value_type(value)
def SetCallback(self, callback, fields=None):
"""Attaches the given callback to the metric."""
self._metric_values[_FieldsToKey(fields)] = callback
def Get(self, fields=None):
"""Returns current metric's value (executing a callback if needed)."""
result = super().Get(fields=fields)
if callable(result):
return result()
else:
return result
class DefaultStatsCollector(stats_collector.StatsCollector):
"""Default implementation for a stats-collector."""
def __init__(self):
self._counter_metrics = {}
self._gauge_metrics = {}
self._event_metrics = {}
super().__init__()
def METHOD_NAME(self, metadata):
"""See base class."""
field_defs = stats_utils.FieldDefinitionTuplesFromProtos(
metadata.fields_defs)
if metadata.metric_type == rdf_stats.MetricMetadata.MetricType.COUNTER:
self._counter_metrics[metadata.varname] = _CounterMetric(field_defs)
elif metadata.metric_type == rdf_stats.MetricMetadata.MetricType.EVENT:
self._event_metrics[metadata.varname] = _EventMetric(
list(metadata.bins), field_defs)
elif metadata.metric_type == rdf_stats.MetricMetadata.MetricType.GAUGE:
value_type = stats_utils.PythonTypeFromMetricValueType(
metadata.value_type)
self._gauge_metrics[metadata.varname] = _GaugeMetric(
value_type, field_defs)
else:
raise ValueError("Unknown metric type: %s." % metadata.metric_type)
@utils.Synchronized
def IncrementCounter(self, metric_name, delta=1, fields=None):
"""See base class."""
if delta < 0:
raise ValueError("Invalid increment for counter: %d." % delta)
self._counter_metrics[metric_name].Increment(delta, fields)
@utils.Synchronized
def RecordEvent(self, metric_name, value, fields=None):
"""See base class."""
self._event_metrics[metric_name].Record(value, fields)
@utils.Synchronized
def SetGaugeValue(self, metric_name, value, fields=None):
"""See base class."""
self._gauge_metrics[metric_name].Set(value, fields)
@utils.Synchronized
def SetGaugeCallback(self, metric_name, callback, fields=None):
"""See base class."""
self._gauge_metrics[metric_name].SetCallback(callback, fields)
def GetMetricFields(self, metric_name):
"""See base class."""
return self._GetMetric(metric_name).ListFieldsValues()
def GetMetricValue(self, metric_name, fields=None):
"""See base class."""
return self._GetMetric(metric_name).Get(fields)
def _GetMetric(self, metric_name):
"""Fetches the metric object corresponding to the given name."""
if metric_name in self._counter_metrics:
return self._counter_metrics[metric_name]
elif metric_name in self._event_metrics:
return self._event_metrics[metric_name]
elif metric_name in self._gauge_metrics:
return self._gauge_metrics[metric_name]
else:
raise ValueError("Metric %s is not registered." % metric_name) |
7,567 | stop withgrab | # Copyright (C) 2011 Chris Dekter
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import datetime
import time
from .iomediator import IoMediator
from autokey.model.key import Key, MODIFIERS
from . import iomediator
class KeyGrabber:
"""
Keygrabber used by the hotkey settings dialog to grab the key pressed
"""
def __init__(self, parent):
self.target_parent = parent
def start(self):
# In QT version, sometimes the mouse click event arrives before we finish initialising
# sleep slightly to prevent this
time.sleep(0.1)
IoMediator.listeners.append(self)
iomediator.CURRENT_INTERFACE.grab_keyboard()
def handle_keypress(self, raw_key, modifiers, key, *args):
if raw_key not in MODIFIERS:
IoMediator.listeners.remove(self)
self.target_parent.set_key(raw_key, modifiers)
iomediator.CURRENT_INTERFACE.ungrab_keyboard()
def handle_mouseclick(self, root_x, root_y, rel_x, rel_y, button, window_info):
IoMediator.listeners.remove(self)
iomediator.CURRENT_INTERFACE.ungrab_keyboard()
self.target_parent.cancel_grab()
class Recorder(KeyGrabber):
"""
Recorder used by the record macro functionality
"""
def __init__(self, parent):
KeyGrabber.__init__(self, parent)
self.insideKeys = False
self.start_time = .0
self.delay = .0
self.delay_finished = False
self.record_keyboard = self.record_mouse = False
def start(self, delay: float):
time.sleep(0.1)
IoMediator.listeners.append(self)
self.target_parent.start_record()
self.start_time = time.time()
self.delay = delay
self.delay_finished = False
def start_withgrab(self):
time.sleep(0.1)
IoMediator.listeners.append(self)
self.target_parent.start_record()
self.start_time = time.time()
self.delay = 0
self.delay_finished = True
iomediator.CURRENT_INTERFACE.grab_keyboard()
def stop(self):
if self in IoMediator.listeners:
IoMediator.listeners.remove(self)
if self.insideKeys:
self.target_parent.end_key_sequence()
self.insideKeys = False
def METHOD_NAME(self):
iomediator.CURRENT_INTERFACE.ungrab_keyboard()
if self in IoMediator.listeners:
IoMediator.listeners.remove(self)
if self.insideKeys:
self.target_parent.end_key_sequence()
self.insideKeys = False
def set_record_keyboard(self, record: bool):
self.record_keyboard = record
def set_record_mouse(self, record: bool):
self.record_mouse = record
def _delay_passed(self) -> bool:
if not self.delay_finished:
now = time.time()
delta = datetime.datetime.utcfromtimestamp(now - self.start_time)
self.delay_finished = (delta.second > self.delay)
return self.delay_finished
def handle_keypress(self, raw_key, modifiers, key, *args):
if self.record_keyboard and self._delay_passed():
if not self.insideKeys:
self.insideKeys = True
self.target_parent.start_key_sequence()
modifier_count = len(modifiers)
# TODO: This check assumes that Key.SHIFT is the only case shifting modifier. What about ISO_Level3_Shift
# or ISO_Level5_Lock?
if modifier_count > 1 or (modifier_count == 1 and Key.SHIFT not in modifiers) or \
(Key.SHIFT in modifiers and len(raw_key) > 1):
self.target_parent.append_hotkey(raw_key, modifiers)
elif key not in MODIFIERS:
self.target_parent.append_key(key)
def handle_mouseclick(self, root_x, root_y, rel_x, rel_y, button, window_info):
if self.record_mouse and self._delay_passed():
if self.insideKeys:
self.insideKeys = False
self.target_parent.end_key_sequence()
self.target_parent.append_mouseclick(rel_x, rel_y, button, window_info[0]) |
7,568 | get audit spec | # Bob build tool
# Copyright (C) 2016 Jan Klötzke
#
# SPDX-License-Identifier: GPL-3.0-or-later
from ..errors import BuildError
from ..stringparser import IfExpression
from ..utils import joinLines, check_output
from .scm import Scm, ScmAudit, ScmTaint, ScmStatus
from shlex import quote
from textwrap import indent
import os, os.path
import schema
import subprocess
from xml.etree import ElementTree
class SvnScm(Scm):
DEFAULTS = {
schema.Optional('dir') : str,
schema.Optional('sslVerify') : bool,
};
__SCHEMA = {
'scm' : 'svn',
'url' : str,
schema.Optional('if') : schema.Or(str, IfExpression),
schema.Optional('revision') : schema.Or(int, str),
}
SCHEMA = schema.Schema({**__SCHEMA, **DEFAULTS})
def __init__(self, spec, overrides=[]):
super().__init__(spec, overrides)
self.__url = spec["url"]
self.__dir = spec.get("dir", ".")
self.__revision = spec.get("revision")
self.__sslVerify = spec.get('sslVerify', True)
def getProperties(self, isJenkins):
ret = super().getProperties(isJenkins)
ret.update({
'scm' : 'svn',
"url" : self.__url,
"dir" : self.__dir,
'sslVerify' : self.__sslVerify,
})
if self.__revision:
ret["revision"] = self.__revision
return ret
async def invoke(self, invoker):
options = [ "--non-interactive" ]
if not self.__sslVerify:
options += [ "--trust-server-cert-failures=unknown-ca,cn-mismatch,expired,not-yet-valid,other" ]
if self.__revision:
options += [ "-r", str(self.__revision) ]
if os.path.isdir(invoker.joinPath(self.__dir, ".svn")):
if "/tags/" not in self.__url:
await invoker.checkCommand(["svn", "up"] + options, cwd=self.__dir)
else:
await invoker.checkCommand(["svn", "co"] + options + [self.__url, self.__dir])
def asDigestScript(self):
"""Return forward compatible stable string describing this svn module.
The module is represented as "url[@rev] > dir".
"""
return (self.__url + ( ("@"+str(self.__revision)) if self.__revision else "" ) + " > "
+ self.__dir)
def asJenkins(self, workPath, config):
scm = ElementTree.Element("scm", attrib={
"class" : "hudson.scm.SubversionSCM",
"plugin" : "subversion@2.4.5",
})
locations = ElementTree.SubElement(scm, "locations")
location = ElementTree.SubElement(locations,
"hudson.scm.SubversionSCM_-ModuleLocation")
url = self.__url
if self.__revision:
url += ( "@" + str(self.__revision) )
ElementTree.SubElement(location, "remote").text = url
credentialsId = ElementTree.SubElement(location, "credentialsId")
if config.credentials: credentialsId.text = config.credentials
ElementTree.SubElement(location, "local").text = (
os.path.normpath(os.path.join(workPath, self.__dir)) )
ElementTree.SubElement(location, "depthOption").text = "infinity"
ElementTree.SubElement(location, "ignoreExternalsOption").text = "true"
ElementTree.SubElement(scm, "excludedRegions")
ElementTree.SubElement(scm, "includedRegions")
ElementTree.SubElement(scm, "excludedUsers")
ElementTree.SubElement(scm, "excludedRevprop")
ElementTree.SubElement(scm, "excludedCommitMessages")
ElementTree.SubElement(scm, "workspaceUpdater",
attrib={"class":"hudson.scm.subversion.UpdateUpdater"})
ElementTree.SubElement(scm, "ignoreDirPropChanges").text = "false"
ElementTree.SubElement(scm, "filterChangelog").text = "false"
return scm
def getDirectory(self):
return self.__dir
def isDeterministic(self):
return str(self.__revision).isnumeric()
def hasJenkinsPlugin(self):
return True
def callSubversion(self, workspacePath, *args):
cmdLine = ['svn']
cmdLine.extend(args)
cwd = os.path.join(workspacePath, self.__dir)
try:
output = subprocess.check_output(cmdLine, cwd=cwd,
universal_newlines=True, errors='replace', stderr=subprocess.DEVNULL)
except subprocess.CalledProcessError as e:
raise BuildError("svn error:\n Directory: '{}'\n Command: '{}'\n'{}'".format(
cwd, " ".join(cmdLine), e.output.rstrip()))
except OSError as e:
raise BuildError("Error calling svn: " + str(e))
return output.strip()
def status(self, workspacePath):
status = ScmStatus()
try:
output = self.callSubversion(workspacePath, 'status')
if output:
status.add(ScmTaint.modified, joinLines("> modified:", indent(output, ' ')))
output = self.callSubversion(workspacePath, 'info', '--xml')
info = ElementTree.fromstring(output)
entry = info.find('entry')
url = entry.find('url').text
revision = entry.attrib['revision']
if self.__url != url:
status.add(ScmTaint.switched,
"> URL: configured: '{}', actual: '{}'".format(self.__url, url))
if self.__revision is not None and int(revision) != int(self.__revision):
status.add(ScmTaint.switched,
"> revision: configured: {}, actual: {}".format(self.__revision, revision))
except BuildError as e:
status.add(ScmTaint.error, e.slogan)
return status
def METHOD_NAME(self):
return ("svn", self.__dir, {})
class SvnAudit(ScmAudit):
SCHEMA = schema.Schema({
'type' : 'svn',
'dir' : str,
'url' : str,
'revision' : int,
'dirty' : bool,
'repository' : {
'root' : str,
'uuid' : str
}
})
async def _scanDir(self, workspace, dir, extra):
self.__dir = dir
try:
info = ElementTree.fromstring(await check_output(
["svn", "info", "--xml", dir],
cwd=workspace, universal_newlines=True, errors='replace'))
self.__url = info.find('entry/url').text
self.__revision = int(info.find('entry').get('revision'))
self.__repoRoot = info.find('entry/repository/root').text
self.__repoUuid = info.find('entry/repository/uuid').text
status = await check_output(["svn", "status", dir],
cwd=workspace, universal_newlines=True, errors='replace')
self.__dirty = status != ""
except subprocess.CalledProcessError as e:
raise BuildError("Svn audit failed: " + str(e))
except OSError as e:
raise BuildError("Error calling svn: " + str(e))
except ElementTree.ParseError as e:
raise BuildError("Invalid XML received from svn")
def _load(self, data):
self.__dir = data["dir"]
self.__url = data["url"]
self.__revision = data["revision"]
self.__dirty = data["dirty"]
self.__repoRoot = data["repository"]["root"]
self.__repoUuid = data["repository"]["uuid"]
def dump(self):
return {
"type" : "svn",
"dir" : self.__dir,
"url" : self.__url,
"revision" : self.__revision,
"dirty" : self.__dirty,
"repository" : {
"root" : self.__repoRoot,
"uuid" : self.__repoUuid,
}
}
def getStatusLine(self):
return self.__url + "@" + str(self.__revision) + ("-dirty" if self.__dirty else "") |
7,569 | rewire node | # Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
import re
import shutil
import tempfile
from collections import OrderedDict
from llnl.util.symlink import symlink
import spack.binary_distribution as bindist
import spack.error
import spack.hooks
import spack.paths
import spack.relocate as relocate
import spack.stage
import spack.store
def _relocate_spliced_links(links, orig_prefix, new_prefix):
"""Re-linking function which differs from `relocate.relocate_links` by
reading the old link rather than the new link, since the latter wasn't moved
in our case. This still needs to be called after the copy to destination
because it expects the new directory structure to be in place."""
for link in links:
link_target = os.readlink(os.path.join(orig_prefix, link))
link_target = re.sub("^" + orig_prefix, new_prefix, link_target)
new_link_path = os.path.join(new_prefix, link)
os.unlink(new_link_path)
symlink(link_target, new_link_path)
def rewire(spliced_spec):
"""Given a spliced spec, this function conducts all the rewiring on all
nodes in the DAG of that spec."""
assert spliced_spec.spliced
for spec in spliced_spec.traverse(order="post", root=True):
if not spec.build_spec.installed:
# TODO: May want to change this at least for the root spec...
# spec.build_spec.package.do_install(force=True)
raise PackageNotInstalledError(spliced_spec, spec.build_spec, spec)
if spec.build_spec is not spec and not spec.installed:
explicit = spec is spliced_spec
METHOD_NAME(spec, explicit)
def METHOD_NAME(spec, explicit):
"""This function rewires a single node, worrying only about references to
its subgraph. Binaries, text, and links are all changed in accordance with
the splice. The resulting package is then 'installed.'"""
tempdir = tempfile.mkdtemp()
# copy anything installed to a temporary directory
shutil.copytree(spec.build_spec.prefix, os.path.join(tempdir, spec.dag_hash()))
spack.hooks.pre_install(spec)
# compute prefix-to-prefix for every node from the build spec to the spliced
# spec
prefix_to_prefix = OrderedDict({spec.build_spec.prefix: spec.prefix})
for build_dep in spec.build_spec.traverse(root=False):
prefix_to_prefix[build_dep.prefix] = spec[build_dep.name].prefix
manifest = bindist.get_buildfile_manifest(spec.build_spec)
platform = spack.platforms.by_name(spec.platform)
text_to_relocate = [
os.path.join(tempdir, spec.dag_hash(), rel_path)
for rel_path in manifest.get("text_to_relocate", [])
]
if text_to_relocate:
relocate.relocate_text(files=text_to_relocate, prefixes=prefix_to_prefix)
bins_to_relocate = [
os.path.join(tempdir, spec.dag_hash(), rel_path)
for rel_path in manifest.get("binary_to_relocate", [])
]
if bins_to_relocate:
if "macho" in platform.binary_formats:
relocate.relocate_macho_binaries(
bins_to_relocate,
str(spack.store.STORE.layout.root),
str(spack.store.STORE.layout.root),
prefix_to_prefix,
False,
spec.build_spec.prefix,
spec.prefix,
)
if "elf" in platform.binary_formats:
relocate.relocate_elf_binaries(
bins_to_relocate,
str(spack.store.STORE.layout.root),
str(spack.store.STORE.layout.root),
prefix_to_prefix,
False,
spec.build_spec.prefix,
spec.prefix,
)
relocate.relocate_text_bin(binaries=bins_to_relocate, prefixes=prefix_to_prefix)
# Copy package into place, except for spec.json (because spec.json
# describes the old spec and not the new spliced spec).
shutil.copytree(
os.path.join(tempdir, spec.dag_hash()),
spec.prefix,
ignore=shutil.ignore_patterns("spec.json", "install_manifest.json"),
)
if manifest.get("link_to_relocate"):
_relocate_spliced_links(
manifest.get("link_to_relocate"), spec.build_spec.prefix, spec.prefix
)
shutil.rmtree(tempdir)
# Above, we did not copy spec.json: instead, here we write the new
# (spliced) spec into spec.json, without this, Database.add would fail on
# the next line (because it checks the spec.json in the prefix against the
# spec being added to look for mismatches)
spack.store.STORE.layout.write_spec(spec, spack.store.STORE.layout.spec_file_path(spec))
# add to database, not sure about explicit
spack.store.STORE.db.add(spec, spack.store.STORE.layout, explicit=explicit)
# run post install hooks
spack.hooks.post_install(spec, explicit)
class RewireError(spack.error.SpackError):
"""Raised when something goes wrong with rewiring."""
def __init__(self, message, long_msg=None):
super().__init__(message, long_msg)
class PackageNotInstalledError(RewireError):
"""Raised when the build_spec for a splice was not installed."""
def __init__(self, spliced_spec, build_spec, dep):
super().__init__(
"""Rewire of {0}
failed due to missing install of build spec {1}
for spec {2}""".format(
spliced_spec, build_spec, dep
)
) |
7,570 | inc error | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#!/usr/bin/env python3
"""A client that talks to tensorflow_model_server loaded with mnist model.
The client downloads test images of mnist data set, queries the service with
such test images to get predictions, and calculates the inference error rate.
Typical usage example:
mnist_client.py --num_tests=100 --server=localhost:9000
"""
from __future__ import print_function
import sys
import threading
import subprocess
# This is a placeholder for a Google-internal import.
import grpc
import numpy
import tensorflow as tf
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2_grpc
import mnist_input_data
tf.compat.v1.app.flags.DEFINE_integer(
"concurrency", 1, "maximum number of concurrent inference requests"
)
tf.compat.v1.app.flags.DEFINE_integer("num_tests", 100, "Number of test images")
tf.compat.v1.app.flags.DEFINE_string("server", "localhost:8500", "PredictionService host:port")
tf.compat.v1.app.flags.DEFINE_string("work_dir", "/tmp", "Working directory. ")
FLAGS = tf.compat.v1.app.flags.FLAGS
class _ResultCounter(object):
"""Counter for the prediction results."""
def __init__(self, num_tests, concurrency):
self._num_tests = num_tests
self._concurrency = concurrency
self._error = 0
self._done = 0
self._active = 0
self._condition = threading.Condition()
def METHOD_NAME(self):
with self._condition:
self._error += 1
def inc_done(self):
with self._condition:
self._done += 1
self._condition.notify()
def dec_active(self):
with self._condition:
self._active -= 1
self._condition.notify()
def get_error_rate(self):
with self._condition:
while self._done != self._num_tests:
self._condition.wait()
return self._error / float(self._num_tests)
def throttle(self):
with self._condition:
while self._active == self._concurrency:
self._condition.wait()
self._active += 1
def _create_rpc_callback(label, result_counter):
"""Creates RPC callback function.
Args:
label: The correct label for the predicted example.
result_counter: Counter for the prediction result.
Returns:
The callback function.
"""
def _callback(result_future):
"""Callback function.
Calculates the statistics for the prediction result.
Args:
result_future: Result future of the RPC.
"""
exception = result_future.exception()
if exception:
result_counter.METHOD_NAME()
print(exception)
else:
sys.stdout.write(".")
sys.stdout.flush()
response = numpy.array(result_future.result().outputs["scores"].float_val)
prediction = numpy.argmax(response)
if label != prediction:
result_counter.METHOD_NAME()
result_counter.inc_done()
result_counter.dec_active()
return _callback
def do_inference(hostport, work_dir, concurrency, num_tests):
"""Tests PredictionService with concurrent requests.
Args:
hostport: Host:port address of the PredictionService.
work_dir: The full path of working directory for test data set.
concurrency: Maximum number of concurrent requests.
num_tests: Number of test images to use.
Returns:
The classification error rate.
Raises:
IOError: An error occurred processing test data set.
"""
test_data_set = mnist_input_data.read_data_sets(work_dir).test
channel = grpc.insecure_channel(hostport)
stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
result_counter = _ResultCounter(num_tests, concurrency)
for _ in range(num_tests):
request = predict_pb2.PredictRequest()
request.model_spec.name = "mnist"
# request.model_spec.signature_name = 'predict_images'
image, label = test_data_set.next_batch(1)
request.inputs["images"].CopyFrom(tf.make_tensor_proto(image[0], shape=[1, image[0].size]))
result_counter.throttle()
result_future = stub.Predict.future(request, 5.0) # 5 seconds
result_future.add_done_callback(_create_rpc_callback(label[0], result_counter))
return result_counter.get_error_rate()
def main(_):
if FLAGS.num_tests > 10000:
print("num_tests should not be greater than 10k")
return
if not FLAGS.server:
print("please specify server host:port")
return
error_rate = do_inference(FLAGS.server, FLAGS.work_dir, FLAGS.concurrency, FLAGS.num_tests)
print("\nInference error rate: %s%%" % (error_rate * 100))
if __name__ == "__main__":
tf.compat.v1.app.run() |
7,571 | cls | import importlib_resources as resources
import torch
import sentencepiece as spm
"""
From the YaLM GitHub repository (https://github.com/yandex/YaLM-100B),
adapted from https://github.com/yandex/YaLM-100B/blob/main/megatron_lm/megatron/tokenizer/sp_tokenization.py.
"""
YALM_TOKENIZER_PACKAGE: str = "helm.proxy.clients.yalm_tokenizer"
YALM_TOKENIZER_VOCAB_FILENAME: str = "voc_100b.sp"
def convert_to_unicode(text):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if isinstance(text, bytes):
return text.decode("utf-8")
elif isinstance(text, str):
return text
else:
raise TypeError(f"Unexpected type {type(text)}")
class YaLMTokenizer:
NEW_LINE = "[NL]"
UNK = 0
BOS = 1
EOS = 2
BOS_TOKEN = "<s>"
PAD_TOKEN = "<s>"
EOS_TOKEN = "</s>"
MASK_TOKEN = "[MASK]"
MAX_SEQUENCE_LENGTH = 2048
def __init__(self):
self.name = "sp"
vocab_file_path = str(resources.files(YALM_TOKENIZER_PACKAGE).joinpath(YALM_TOKENIZER_VOCAB_FILENAME))
self._tokenizer = spm.SentencePieceProcessor(model_file=vocab_file_path)
self._vocab_words = self._get_vocab_words()
self.encoder = {token: idx for idx, token in enumerate(self._vocab_words)}
self.decoder = {idx: token for idx, token in enumerate(self._vocab_words)}
self.padding_side = "left"
self.truncation_side = "left"
mask_tokens = self.convert_tokens_to_ids([self.MASK_TOKEN])
assert len(mask_tokens) == 1
self.MASK = mask_tokens[0]
def _encode(self, line, out_type=str):
return self._tokenizer.encode(line, out_type=out_type)
def tokenize(self, line, out_type=int):
line = convert_to_unicode(line)
line = line.replace("\n", self.NEW_LINE)
has_bos = False
has_eos = False
# Handle special tokens
if line.startswith(f"{YaLMTokenizer.BOS_TOKEN} "):
has_bos = True
line = line[4:]
elif line.startswith(YaLMTokenizer.BOS_TOKEN):
has_bos = True
line = line[3:]
if line.endswith(f" {YaLMTokenizer.EOS_TOKEN}"):
has_eos = True
line = line[:-5]
elif line.endswith(YaLMTokenizer.EOS_TOKEN):
has_eos = True
line = line[:-4]
token_ids = self._encode(line, out_type=out_type)
if has_bos:
if out_type == int:
token_ids = [1] + token_ids
else:
token_ids = [YaLMTokenizer.BOS_TOKEN] + token_ids
if has_eos:
if out_type == int:
token_ids = token_ids + [2]
else:
token_ids = token_ids + [YaLMTokenizer.EOS_TOKEN]
return token_ids
def convert_tokens_to_ids(self, tokens):
return self._tokenizer.piece_to_id(tokens)
def convert_tokens_to_string(self, tokens):
return self.convert_ids_to_string(self.convert_tokens_to_ids(tokens))
def convert_ids_to_string(self, ids):
return [self._tokenizer.decode([i]) for i in ids]
def convert_ids_to_tokens(self, ids):
if isinstance(ids, int):
return self.decoder[ids]
if isinstance(ids, torch.Tensor):
ids = ids.cpu().tolist()
return [self.decoder[idx] for idx in ids]
def get_tokens(self):
return self._vocab_words
def _get_vocab_words(self):
indices = list(range(self._tokenizer.GetPieceSize()))
return self._tokenizer.id_to_piece(indices)
@property
def vocab(self):
return self.encoder
@property
def inv_vocab(self):
return self.decoder
@property
def vocab_size(self):
return len(self.encoder)
def detokenize(self, token_ids):
tokens = [self.decoder[idx] for idx in token_ids]
text = "".join(tokens).replace("\u2581", " ").lstrip()
return text
@property
def METHOD_NAME(self):
return self.BOS
@property
def eod(self):
return self.EOS
@property
def mask(self):
return self.MASK
def __call__(self, text, return_tensors="pt", padding="max_length", truncation=True, add_bos=True):
assert return_tensors == "pt"
assert padding == "max_length"
if isinstance(text, str):
text = [text]
ids = []
for t in text:
if t.startswith(f"{YaLMTokenizer.BOS_TOKEN} "):
t_ids = self.tokenize(t[4:])
t_ids = [1] + t_ids
elif t.startswith(YaLMTokenizer.BOS_TOKEN):
t_ids = self.tokenize(t[3:])
t_ids = [1] + t_ids
else:
t_ids = self.tokenize(t)
if add_bos:
t_ids = [1] + t_ids # append <s>
if truncation:
if self.truncation_side == "left":
t_ids = t_ids[-self.model_max_length :]
else:
t_ids = t_ids[: self.model_max_length]
ids.append(t_ids)
if padding != "max_length":
max_len = max([len(t_ids) for t_ids in ids])
else:
max_len = self.model_max_length
attention_mask = torch.ones(len(ids), max_len, dtype=torch.long)
if self.padding_side == "left":
new_ids = []
for i, t_ids in enumerate(ids):
attention_mask[i, : max_len - len(t_ids)] = 0
new_ids.append([self.BOS] * (max_len - len(t_ids)) + t_ids)
else:
new_ids = []
for i, t_ids in enumerate(ids):
attention_mask[i, -(max_len - len(t_ids)) :] = 0
new_ids.append(t_ids + [self.EOS] * (max_len - len(t_ids)))
ids = new_ids
ids = torch.tensor(ids)
if add_bos:
# make sure starts with <s>
ids[:, 0] = 1
return {"input_ids": ids, "attention_mask": attention_mask}
def decode(self, token_ids):
if isinstance(token_ids, torch.Tensor):
token_ids = token_ids.cpu().tolist()
return self.detokenize(token_ids).replace(self.NEW_LINE, "\n") |
7,572 | pixbuf2 image | #!/usr/bin/python3
import gi
gi.require_version("Gtk", "3.0")
from gi.repository import Gtk, Gdk, GObject
from PIL import Image
class EyeDropper(Gtk.HBox):
__gsignals__ = {
'color-picked': (GObject.SignalFlags.RUN_LAST, None, (GObject.TYPE_STRING,))
}
def __init__(self):
Gtk.HBox.__init__ (self)
self.button = Gtk.Button("")
self.button.set_tooltip_text(_("Click the eyedropper, then click a color anywhere on your screen to select that color"))
self.button.set_image(Gtk.Image().new_from_stock(Gtk.STOCK_COLOR_PICKER, Gtk.IconSize.BUTTON))
self.button.get_property('image').show()
self.button.set_events(Gdk.EventMask.POINTER_MOTION_MASK | Gdk.EventMask.POINTER_MOTION_HINT_MASK)
self.pack_start(self.button, False, False, 2)
self.bp_handler = None
self.br_handler = None
self.kp_handler = None
self.button.connect("clicked", self.on_button_clicked)
def on_button_clicked(self, widget):
screen = widget.get_screen()
self.time = Gtk.get_current_event_time()
self.device = Gtk.get_current_event_device()
self.grab_widget = Gtk.Window(Gtk.WindowType.POPUP)
self.grab_widget.set_screen(screen)
self.grab_widget.resize(1, 1)
self.grab_widget.move(-100, -100)
self.grab_widget.show()
self.grab_widget.add_events(Gdk.EventMask.BUTTON_RELEASE_MASK | Gdk.EventMask.BUTTON_PRESS_MASK | Gdk.EventMask.POINTER_MOTION_MASK)
toplevel = widget.get_toplevel()
if isinstance(toplevel, Gtk.Window):
if toplevel.has_group():
toplevel.add_window(grab_widget)
window = self.grab_widget.get_window()
picker_cursor = Gdk.Cursor(screen.get_display(), Gdk.CursorType.CROSSHAIR)
grab_status = self.device.grab(window, Gdk.GrabOwnership.APPLICATION, False,
Gdk.EventMask.BUTTON_RELEASE_MASK | Gdk.EventMask.BUTTON_PRESS_MASK | Gdk.EventMask.POINTER_MOTION_MASK,
picker_cursor, self.time)
if grab_status != Gdk.GrabStatus.SUCCESS:
return
Gtk.device_grab_add(self.grab_widget, self.device, True)
self.bp_handler = self.grab_widget.connect("button-press-event", self.mouse_press)
self.kp_handler = self.grab_widget.connect("key-press-event", self.key_press)
def mouse_press(self, widget, event):
if event.type == Gdk.EventType.BUTTON_PRESS and event.button == 1:
self.br_handler = widget.connect("button-release-event", self.mouse_release)
return True
return False
def key_press(self, widget, event):
screen, x_root, y_root = self.device.get_position()
if event.keyval == Gdk.KEY_Escape:
self.ungrab(self.device)
return True
elif event.keyval in (Gdk.KEY_space, Gdk.KEY_Return, Gdk.KEY_ISO_Enter, Gdk.KEY_KP_Enter, Gdk.KEY_KP_Space):
self.grab_color_at_pointer(event, screen, x_root, y_root)
return True
return False
def mouse_release(self, widget, event):
screen, x, y = self.device.get_position()
if event.button != 1:
return False
self.grab_color_at_pointer(event, screen, event.x_root, event.y_root)
return True
def grab_color_at_pointer(self, event, screen, x_root, y_root):
device = self.device
window = screen.get_root_window()
pixbuf = Gdk.pixbuf_get_from_window(window, x_root, y_root, 1, 1)
image = METHOD_NAME(pixbuf)
r, g, b = image.getpixel((0, 0))
color = Gdk.RGBA()
color.red = r / 255.0
color.green = g / 255.0
color.blue = b / 255.0
self.emit('color-picked', color.to_string())
self.ungrab(device)
def ungrab(self, device):
device.ungrab(self.time)
Gtk.device_grab_remove(self.grab_widget, device)
self.grab_widget.handler_disconnect(self.bp_handler)
self.grab_widget.handler_disconnect(self.br_handler)
self.grab_widget.handler_disconnect(self.kp_handler)
def METHOD_NAME(pb):
width,height = pb.get_width(),pb.get_height()
return Image.fromstring("RGB",(width,height),pb.get_pixels() ) |
7,573 | test sysconfig compiler vars | """Tests for distutils.sysconfig."""
import os
import test
import unittest
import shutil
from distutils import sysconfig
from distutils.tests import support
from test.test_support import TESTFN
class SysconfigTestCase(support.EnvironGuard,
unittest.TestCase):
def setUp(self):
super(SysconfigTestCase, self).setUp()
self.makefile = None
def tearDown(self):
if self.makefile is not None:
os.unlink(self.makefile)
self.cleanup_testfn()
super(SysconfigTestCase, self).tearDown()
def cleanup_testfn(self):
path = test.test_support.TESTFN
if os.path.isfile(path):
os.remove(path)
elif os.path.isdir(path):
shutil.rmtree(path)
def test_get_python_lib(self):
lib_dir = sysconfig.get_python_lib()
# XXX doesn't work on Linux when Python was never installed before
#self.assertTrue(os.path.isdir(lib_dir), lib_dir)
# test for pythonxx.lib?
self.assertNotEqual(sysconfig.get_python_lib(),
sysconfig.get_python_lib(prefix=TESTFN))
_sysconfig = __import__('sysconfig')
res = sysconfig.get_python_lib(True, True)
self.assertEqual(_sysconfig.get_path('platstdlib'), res)
def test_get_python_inc(self):
inc_dir = sysconfig.get_python_inc()
# This is not much of a test. We make sure Python.h exists
# in the directory returned by get_python_inc() but we don't know
# it is the correct file.
self.assertTrue(os.path.isdir(inc_dir), inc_dir)
python_h = os.path.join(inc_dir, "Python.h")
self.assertTrue(os.path.isfile(python_h), python_h)
def test_parse_makefile_base(self):
self.makefile = test.test_support.TESTFN
fd = open(self.makefile, 'w')
try:
fd.write(r"CONFIG_ARGS= '--arg1=optarg1' 'ENV=LIB'" '\n')
fd.write('VAR=$OTHER\nOTHER=foo')
finally:
fd.close()
d = sysconfig.parse_makefile(self.makefile)
self.assertEqual(d, {'CONFIG_ARGS': "'--arg1=optarg1' 'ENV=LIB'",
'OTHER': 'foo'})
def test_parse_makefile_literal_dollar(self):
self.makefile = test.test_support.TESTFN
fd = open(self.makefile, 'w')
try:
fd.write(r"CONFIG_ARGS= '--arg1=optarg1' 'ENV=\$$LIB'" '\n')
fd.write('VAR=$OTHER\nOTHER=foo')
finally:
fd.close()
d = sysconfig.parse_makefile(self.makefile)
self.assertEqual(d, {'CONFIG_ARGS': r"'--arg1=optarg1' 'ENV=\$LIB'",
'OTHER': 'foo'})
def test_sysconfig_module(self):
import sysconfig as global_sysconfig
self.assertEqual(global_sysconfig.get_config_var('CFLAGS'), sysconfig.get_config_var('CFLAGS'))
self.assertEqual(global_sysconfig.get_config_var('LDFLAGS'), sysconfig.get_config_var('LDFLAGS'))
@unittest.skipIf(sysconfig.get_config_var('CUSTOMIZED_OSX_COMPILER'),'compiler flags customized')
def METHOD_NAME(self):
# On OS X, binary installers support extension module building on
# various levels of the operating system with differing Xcode
# configurations. This requires customization of some of the
# compiler configuration directives to suit the environment on
# the installed machine. Some of these customizations may require
# running external programs and, so, are deferred until needed by
# the first extension module build. With Python 3.3, only
# the Distutils version of sysconfig is used for extension module
# builds, which happens earlier in the Distutils tests. This may
# cause the following tests to fail since no tests have caused
# the global version of sysconfig to call the customization yet.
# The solution for now is to simply skip this test in this case.
# The longer-term solution is to only have one version of sysconfig.
import sysconfig as global_sysconfig
if sysconfig.get_config_var('CUSTOMIZED_OSX_COMPILER'):
return
self.assertEqual(global_sysconfig.get_config_var('LDSHARED'), sysconfig.get_config_var('LDSHARED'))
self.assertEqual(global_sysconfig.get_config_var('CC'), sysconfig.get_config_var('CC'))
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(SysconfigTestCase))
return suite
if __name__ == '__main__':
test.test_support.run_unittest(test_suite()) |
7,574 | test fail shape | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.experimental.TFRecordWriter`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.python.data.experimental.ops import grouping
from tensorflow.python.data.experimental.ops import writers
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import readers
from tensorflow.python.eager import function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.lib.io import python_io
from tensorflow.python.lib.io import tf_record
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
from tensorflow.python.util import compat
@test_util.run_all_in_graph_and_eager_modes
class TFRecordWriterTest(test_base.DatasetTestBase):
def setUp(self):
super(TFRecordWriterTest, self).setUp()
self._num_records = 8
def writer_fn(self, filename, compression_type=""):
input_dataset = readers.TFRecordDataset([filename], compression_type)
return writers.TFRecordWriter(self._outputFilename(),
compression_type).write(input_dataset)
def _record(self, i):
return compat.as_bytes("Record %d" % (i))
def _createFile(self, options=None):
filename = self._inputFilename()
writer = python_io.TFRecordWriter(filename, options)
for i in range(self._num_records):
writer.write(self._record(i))
writer.close()
return filename
def _inputFilename(self):
return os.path.join(self.get_temp_dir(), "tf_record.in.txt")
def _outputFilename(self):
return os.path.join(self.get_temp_dir(), "tf_record.out.txt")
def testWrite(self):
self.evaluate(self.writer_fn(self._createFile()))
for i, r in enumerate(tf_record.tf_record_iterator(self._outputFilename())):
self.assertAllEqual(self._record(i), r)
def testWriteZLIB(self):
options = tf_record.TFRecordOptions(tf_record.TFRecordCompressionType.ZLIB)
self.evaluate(
self.writer_fn(self._createFile(options), compression_type="ZLIB"))
for i, r in enumerate(
tf_record.tf_record_iterator(self._outputFilename(), options=options)):
self.assertAllEqual(self._record(i), r)
def testWriteGZIP(self):
options = tf_record.TFRecordOptions(tf_record.TFRecordCompressionType.GZIP)
self.evaluate(
self.writer_fn(self._createFile(options), compression_type="GZIP"))
for i, r in enumerate(
tf_record.tf_record_iterator(self._outputFilename(), options=options)):
self.assertAllEqual(self._record(i), r)
def testFailDataset(self):
with self.assertRaises(TypeError):
writers.TFRecordWriter(self._outputFilename(), "").write("whoops")
def testFailDType(self):
input_dataset = dataset_ops.Dataset.from_tensors(10)
with self.assertRaises(TypeError):
writers.TFRecordWriter(self._outputFilename(), "").write(input_dataset)
def METHOD_NAME(self):
input_dataset = dataset_ops.Dataset.from_tensors([["hello"], ["world"]])
with self.assertRaises(TypeError):
writers.TFRecordWriter(self._outputFilename(), "").write(input_dataset)
def testSideEffect(self):
def writer_fn():
input_dataset = readers.TFRecordDataset(self._createFile())
return writers.TFRecordWriter(self._outputFilename()).write(input_dataset)
@function.defun
def fn():
_ = writer_fn()
return "hello"
self.assertEqual(self.evaluate(fn()), b"hello")
for i, r in enumerate(tf_record.tf_record_iterator(self._outputFilename())):
self.assertAllEqual(self._record(i), r)
def testShard(self):
filename = self._createFile()
dataset = readers.TFRecordDataset([filename])
def reduce_func(key, dataset):
shard_filename = string_ops.string_join(
[filename, string_ops.as_string(key)])
writer = writers.TFRecordWriter(shard_filename)
writer.write(dataset.map(lambda _, x: x))
return dataset_ops.Dataset.from_tensors(shard_filename)
dataset = dataset.enumerate()
dataset = dataset.apply(
grouping.group_by_window(lambda i, _: i % 2, reduce_func,
dtypes.int64.max))
get_next = self.getNext(dataset)
for i in range(2):
shard_filename = (filename + str(i)).encode()
self.assertEqual(self.evaluate(get_next()), shard_filename)
for j, r in enumerate(tf_record.tf_record_iterator(shard_filename)):
self.assertAllEqual(self._record(i + 2*j), r)
if __name__ == "__main__":
test.main() |
7,575 | copy | # Copyright 2018-2023 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains an DatasetAttribute that allows for heterogeneous lists of dataset
types."""
import typing
from collections.abc import Sequence
from typing import Generic, List, Union, overload
from pennylane.data.base.attribute import DatasetAttribute
from pennylane.data.base.hdf5 import HDF5Any, HDF5Group
from pennylane.data.base.mapper import MapperMixin
from pennylane.data.base.typing_util import T
class DatasetList( # pylint: disable=too-many-ancestors
Generic[T],
DatasetAttribute[HDF5Group, typing.Sequence[T], typing.Iterable[T]],
typing.MutableSequence[T],
MapperMixin,
):
"""Provides a list-like collection type for Dataset Attributes."""
type_id = "list"
def __post_init__(self, value: typing.Iterable[T]):
super().__post_init__(value)
self.extend(value)
@classmethod
def default_value(cls) -> typing.Iterable[T]:
return []
def hdf5_to_value(self, bind: HDF5Group) -> typing.MutableSequence[T]:
return self
def value_to_hdf5(
self, bind_parent: HDF5Group, key: str, value: typing.Iterable[T]
) -> HDF5Group:
grp = bind_parent.create_group(key)
return grp
def copy_value(self) -> List[T]:
return [self._mapper[str(i)].copy_value() for i in range(len(self))]
def METHOD_NAME(self) -> List[T]:
"""Returns a copy of this list as a builtin ``list``, with all
elements copied.."""
return self.copy_value()
def insert(self, index: int, value: Union[T, DatasetAttribute[HDF5Any, T, T]]):
"""Implements the insert() method."""
if index < 0:
index = len(self) + index
if index < 0:
index = 0
elif index >= len(self):
self._mapper[str(len(self))] = value
return
for i in reversed(range(index, len(self))):
self._mapper.move(str(i), str(i + 1))
self._mapper[str(index)] = value
def __len__(self) -> int:
return len(self.bind)
def __eq__(self, __value: object) -> bool:
if not isinstance(__value, Sequence):
return False
if not len(self) == len(__value):
return False
return all(x == y for x, y in zip(self, __value))
def __str__(self) -> str:
return str(list(self))
def __repr__(self) -> str:
items_repr = ", ".join(repr(elem) for elem in self)
return f"[{items_repr}]"
@overload
def __getitem__(self, index: slice) -> typing.List[T]:
...
@overload
def __getitem__(self, index: int) -> T:
...
def __getitem__(self, index: Union[int, slice]):
if isinstance(index, slice):
return [self[i] for i in range(len(self))[index]]
if index < 0:
index = len(self) + index
if not 0 <= index < len(self):
raise IndexError(index)
return self._mapper[str(index)].get_value()
def __setitem__(self, index: int, value: Union[T, DatasetAttribute[HDF5Any, T, T]]):
if index < 0:
index = len(self) + index
if not 0 <= index < len(self):
raise IndexError("list assignment index out of range")
key = str(index)
if key in self._mapper:
del self._mapper[key]
self._mapper[key] = value
def __delitem__(self, index: int):
init_len = len(self)
if index < 0:
index = init_len + index
if not 0 <= index < init_len:
raise IndexError(index)
del self._mapper[str(index)]
# Move all the objects in front of the deleted object back one
if index < init_len:
for i in range(index, init_len - 1):
self._mapper.move(str(i + 1), str(i)) |
7,576 | test set parameters | # This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Test of configurable backend generation."""
from qiskit.test import QiskitTestCase
from qiskit.providers.fake_provider.utils.configurable_backend import ConfigurableFakeBackend
class TestConfigurableFakeBackend(QiskitTestCase):
"""Configurable backend test."""
def test_default_parameters(self):
"""Test default parameters."""
fake_backend = ConfigurableFakeBackend("Tashkent", n_qubits=10)
properties = fake_backend.properties()
self.assertEqual(len(properties.qubits), 10)
self.assertEqual(properties.backend_version, "0.0.0")
self.assertEqual(properties.backend_name, "Tashkent")
configuration = fake_backend.configuration()
self.assertEqual(configuration.backend_version, "0.0.0")
self.assertEqual(configuration.backend_name, "Tashkent")
self.assertEqual(configuration.n_qubits, 10)
self.assertEqual(configuration.basis_gates, ["id", "u1", "u2", "u3", "cx"])
self.assertTrue(configuration.local)
self.assertTrue(configuration.open_pulse)
def METHOD_NAME(self):
"""Test parameters setting."""
for n_qubits in range(10, 100, 30):
with self.subTest(n_qubits=n_qubits):
fake_backend = ConfigurableFakeBackend(
"Tashkent",
n_qubits=n_qubits,
version="0.0.1",
basis_gates=["u1"],
qubit_t1=99.0,
qubit_t2=146.0,
qubit_frequency=5.0,
qubit_readout_error=0.01,
single_qubit_gates=["u1"],
)
properties = fake_backend.properties()
self.assertEqual(properties.backend_version, "0.0.1")
self.assertEqual(properties.backend_name, "Tashkent")
self.assertEqual(len(properties.qubits), n_qubits)
self.assertEqual(len(properties.gates), n_qubits)
self.assertAlmostEqual(properties.t1(0), 99e-6, places=7)
self.assertAlmostEqual(properties.t2(0), 146e-6, places=7)
configuration = fake_backend.configuration()
self.assertEqual(configuration.backend_version, "0.0.1")
self.assertEqual(configuration.backend_name, "Tashkent")
self.assertEqual(configuration.n_qubits, n_qubits)
self.assertEqual(configuration.basis_gates, ["u1"])
def test_gates(self):
"""Test generated gates."""
fake_backend = ConfigurableFakeBackend("Tashkent", n_qubits=4)
properties = fake_backend.properties()
self.assertEqual(len(properties.gates), 22)
fake_backend = ConfigurableFakeBackend(
"Tashkent", n_qubits=4, basis_gates=["u1", "u2", "cx"]
)
properties = fake_backend.properties()
self.assertEqual(len(properties.gates), 14)
self.assertEqual(len([g for g in properties.gates if g.gate == "cx"]), 6)
def test_coupling_map_generation(self):
"""Test generation of default coupling map."""
fake_backend = ConfigurableFakeBackend("Tashkent", n_qubits=10)
cmap = fake_backend.configuration().coupling_map
target = [
[0, 1],
[0, 4],
[1, 2],
[1, 5],
[2, 3],
[2, 6],
[3, 7],
[4, 5],
[4, 8],
[5, 6],
[5, 9],
[6, 7],
[8, 9],
]
for couple in cmap:
with self.subTest(coupling=couple):
self.assertTrue(couple in target)
self.assertEqual(len(target), len(cmap))
def test_configuration(self):
"""Test backend configuration."""
fake_backend = ConfigurableFakeBackend("Tashkent", n_qubits=10)
configuration = fake_backend.configuration()
self.assertEqual(configuration.n_qubits, 10)
self.assertEqual(configuration.meas_map, [list(range(10))])
self.assertEqual(len(configuration.hamiltonian["qub"]), 10)
self.assertEqual(len(configuration.hamiltonian["vars"]), 33)
self.assertEqual(len(configuration.u_channel_lo), 13)
self.assertEqual(len(configuration.meas_lo_range), 10)
self.assertEqual(len(configuration.qubit_lo_range), 10)
def test_defaults(self):
"""Test backend defaults."""
fake_backend = ConfigurableFakeBackend("Tashkent", n_qubits=10)
defaults = fake_backend.defaults()
self.assertEqual(len(defaults.cmd_def), 54)
self.assertEqual(len(defaults.meas_freq_est), 10)
self.assertEqual(len(defaults.qubit_freq_est), 10)
def test_with_coupling_map(self):
"""Test backend generation with coupling map."""
target_coupling_map = [[0, 1], [1, 2], [2, 3]]
fake_backend = ConfigurableFakeBackend(
"Tashkent", n_qubits=4, coupling_map=target_coupling_map
)
cmd_def = fake_backend.defaults().cmd_def
configured_cmap = fake_backend.configuration().coupling_map
controlled_not_qubits = [cmd.qubits for cmd in cmd_def if cmd.name == "cx"]
self.assertEqual(controlled_not_qubits, target_coupling_map)
self.assertEqual(configured_cmap, target_coupling_map)
def test_get_name_with_method(self):
"""Get backend name."""
fake_backend = ConfigurableFakeBackend("Tashkent", n_qubits=4)
self.assertEqual(fake_backend.name(), "Tashkent") |
7,577 | check is cached | """Server tests for document caching."""
import re
from unittest.mock import patch, Mock
from timApp.auth.accesstype import AccessType
from timApp.document.caching import clear_doc_cache
from timApp.document.docentry import DocEntry
from timApp.item import routes
from timApp.item.routes import render_doc_view
from timApp.tests.server.timroutetest import TimRouteTest, get_note_id_from_json
from timApp.timdb.sqa import db
from timApp.user.usergroup import UserGroup
from timApp.user.userutils import grant_access
class CachingTest(TimRouteTest):
def test_cache(self):
self.login_test1()
d = self.create_doc(initial_par="#- {plugin=textfield #t}")
self.test_user_2.grant_access(d, AccessType.view)
self.test_user_3.grant_access(d, AccessType.view)
db.session.commit()
clear_doc_cache(d, None)
self.login_test3()
self.check_not_cached(d)
self.check_not_cached(d) # cache is disabled by default
d.document.set_settings({"cache": True})
self.check_not_cached(d) # was not in cache, added to cache
def normalize(s: str) -> str:
# Remove modified date as it can be slightly different depending on CI speed
return re.sub(r'"modified": "[^"]+"', r'"modified": "X"', s)
last = normalize(self.last_get)
self.METHOD_NAME(d) # was in cache, fetched from cache
# Cached result should be the same as not cached.
self.assertEqual(last, normalize(self.last_get))
self.METHOD_NAME(d) # still in cache
self.METHOD_NAME(
d, {"unlock": True}
) # value of unlock shouldn't affect caching
self.check_not_cached(d, {"nocache": True}) # nocache should clear the cache
self.check_not_cached(d, {"nocache": True}) # make sure nocache is not cached
self.METHOD_NAME(d)
self.post_answer("textfield", f"{d.id}.t", user_input={"c": "x"})
self.check_not_cached_and_then_cached(d)
par = d.document.get_paragraphs()[0]
self.mark_as_read(d, par.get_id())
self.check_not_cached_and_then_cached(d)
self.login_test2()
self.check_not_cached_and_then_cached(d)
self.login_test3()
c = self.post_comment(par, public=False, text="test")
nid = get_note_id_from_json(c)
self.check_not_cached_and_then_cached(d)
self.login_test2()
self.METHOD_NAME(d)
self.login_test3()
self.edit_comment(nid, par, True, "test2")
self.check_not_cached_and_then_cached(d)
self.login_test2()
self.check_not_cached_and_then_cached(d)
self.login_test3()
self.edit_comment(nid, par, False, "test2")
self.check_not_cached_and_then_cached(d)
self.login_test2()
self.check_not_cached_and_then_cached(d)
def check_not_cached_and_then_cached(self, d, query=None):
self.check_not_cached(d, query)
self.METHOD_NAME(d, query)
def METHOD_NAME(self, d, query=None):
self.get_with_patch(d, query).assert_not_called()
def check_not_cached(self, d, query=None):
self.get_with_patch(d, query).assert_called_once()
def get_with_patch(self, d, query=None):
with patch.object(
routes, render_doc_view.__name__, wraps=render_doc_view
) as m: # type: Mock
self.last_get = self.get(d.url, query_string=query)
return m
def test_cache_pregenerate(self):
self.login_test1()
d = self.create_doc(initial_par="test")
self.test_user_2.grant_access(d, AccessType.view)
db.session.commit()
clear_doc_cache(d, None)
self.get(
f"/generateCache/{d.path}",
expect_status=400,
expect_content="Document does not have caching enabled.",
)
d.document.set_settings({"cache": True})
self.get(
f"/generateCache/{d.path}",
expect_content="""
1/2 testuser1: ok
2/2 testuser2: ok
""".strip()
+ "\n",
)
self.get(
f"/generateCache/{d.path}",
expect_content="""
1/2 testuser1: already cached
2/2 testuser2: already cached
""".strip()
+ "\n",
)
self.METHOD_NAME(d)
self.get(
f"/generateCache/{d.path}",
# TODO enable this test. We need two users with only view right.
# testuser1 and 2 have different rights so there will be differences in the HTML.
# query_string={'print_diffs': True},
expect_content="""
1/2 testuser1: already cached
2/2 testuser2: already cached
""".strip()
+ "\n",
)
ug = UserGroup.create("testgroup1")
self.test_user_3.add_to_group(ug, added_by=None)
grant_access(ug, d, AccessType.view)
db.session.commit()
d = DocEntry.find_by_id(d.id)
db.session.refresh(d)
self.get(
f"/generateCache/{d.path}",
expect_status=403,
expect_content="No access for group testgroup1",
)
# When running all server tests in IDE (PyCharm), the tests start failing in test_clipboard with
# sqlalchemy.orm.exc.FlushError in initialize_database. Refreshing the test client prevents it.
# The line self.test_user_3.add_to_group seems to trigger the error.
self.refresh_client()
def test_cache_generate_exam_mode(self):
self.login_test1()
d = self.create_doc(settings={"exam_mode": "view", "cache": True})
# The nocache is just for making sure there's no previous doc cache when rerunning this same test from IDE.
# The doc id is always the same because the DB starts fresh.
self.get(d.url, query_string={"nocache": True})
self.test_user_2.grant_access(d, AccessType.view)
db.session.commit()
self.get(
f"/generateCache/{d.path}",
)
self.login_test2()
r = self.get(d.url, as_tree=True)
self.assert_js_variable(r, "exam_mode", True)
rights = self.get_js_variable(r, "curr_item")["rights"]
self.assertEqual(
{
"browse_own_answers": True,
"can_comment": True,
"can_mark_as_read": True,
"copy": False,
"editable": False,
"manage": False,
"owner": False,
"see_answers": False,
"teacher": False,
},
rights,
)
rights = self.get_js_variable(r, "translations")[0]["rights"]
self.assertEqual(
{
"browse_own_answers": True,
"can_comment": True,
"can_mark_as_read": True,
"copy": False,
"editable": False,
"manage": False,
"owner": False,
"see_answers": False,
"teacher": False,
},
rights,
) |
7,578 | metrics | import tensorflow as tf
from tensorflow import keras
from keras.integration_test.models.input_spec import InputSpec
from keras.saving import serialization_lib
IMG_SIZE = (64, 64)
LATENT_DIM = 128
def get_data_spec(batch_size):
return InputSpec((batch_size,) + IMG_SIZE + (3,))
def get_input_preprocessor():
return None
class GAN(keras.Model):
def __init__(self, discriminator, generator, latent_dim):
super(GAN, self).__init__()
self.discriminator = discriminator
self.generator = generator
self.latent_dim = latent_dim
def compile(self, d_optimizer, g_optimizer, loss_fn, jit_compile=False):
super(GAN, self).compile(jit_compile=jit_compile)
self.d_optimizer = d_optimizer
self.g_optimizer = g_optimizer
self.loss_fn = loss_fn
self.d_loss_metric = keras.METHOD_NAME.Mean(name="d_loss")
self.g_loss_metric = keras.METHOD_NAME.Mean(name="g_loss")
@property
def METHOD_NAME(self):
return [self.d_loss_metric, self.g_loss_metric]
def train_step(self, real_images):
batch_size = tf.shape(real_images)[0]
random_latent_vectors = tf.random.normal(
shape=(batch_size, self.latent_dim)
)
generated_images = self.generator(random_latent_vectors)
combined_images = tf.concat([generated_images, real_images], axis=0)
labels = tf.concat(
[tf.ones((batch_size, 1)), tf.zeros((batch_size, 1))], axis=0
)
labels += 0.05 * tf.random.uniform(tf.shape(labels))
with tf.GradientTape() as tape:
predictions = self.discriminator(combined_images)
d_loss = self.loss_fn(labels, predictions)
grads = tape.gradient(d_loss, self.discriminator.trainable_weights)
self.d_optimizer.apply_gradients(
zip(grads, self.discriminator.trainable_weights)
)
random_latent_vectors = tf.random.normal(
shape=(batch_size, self.latent_dim)
)
misleading_labels = tf.zeros((batch_size, 1))
with tf.GradientTape() as tape:
predictions = self.discriminator(
self.generator(random_latent_vectors)
)
g_loss = self.loss_fn(misleading_labels, predictions)
grads = tape.gradient(g_loss, self.generator.trainable_weights)
self.g_optimizer.apply_gradients(
zip(grads, self.generator.trainable_weights)
)
self.d_loss_metric.update_state(d_loss)
self.g_loss_metric.update_state(g_loss)
return {
"d_loss": self.d_loss_metric.result(),
"g_loss": self.g_loss_metric.result(),
}
def get_config(self):
return {
"discriminator": self.discriminator,
"generator": self.generator,
"latent_dim": self.latent_dim,
}
@classmethod
def from_config(cls, config):
discriminator = serialization_lib.deserialize_keras_object(
config["discriminator"]
)
generator = serialization_lib.deserialize_keras_object(
config["generator"]
)
latent_dim = config["latent_dim"]
return cls(discriminator, generator, latent_dim)
def get_compile_config(self):
return {
"loss_fn": self.loss_fn,
"d_optimizer": self.d_optimizer,
"g_optimizer": self.g_optimizer,
"jit_compile": self.jit_compile,
}
def compile_from_config(self, config):
loss_fn = serialization_lib.deserialize_keras_object(config["loss_fn"])
d_optimizer = serialization_lib.deserialize_keras_object(
config["d_optimizer"]
)
g_optimizer = serialization_lib.deserialize_keras_object(
config["g_optimizer"]
)
jit_compile = config["jit_compile"]
self.compile(
loss_fn=loss_fn,
d_optimizer=d_optimizer,
g_optimizer=g_optimizer,
jit_compile=jit_compile,
)
def get_model(
build=False, compile=False, jit_compile=False, include_preprocessing=True
):
discriminator = keras.Sequential(
[
keras.Input(shape=IMG_SIZE + (3,)),
keras.layers.Conv2D(64, kernel_size=4, strides=2, padding="same"),
keras.layers.LeakyReLU(alpha=0.2),
keras.layers.Conv2D(128, kernel_size=4, strides=2, padding="same"),
keras.layers.LeakyReLU(alpha=0.2),
keras.layers.Conv2D(128, kernel_size=4, strides=2, padding="same"),
keras.layers.LeakyReLU(alpha=0.2),
keras.layers.Flatten(),
keras.layers.Dropout(0.2),
keras.layers.Dense(1, activation="sigmoid"),
],
name="discriminator",
)
generator = keras.Sequential(
[
keras.Input(shape=(LATENT_DIM,)),
keras.layers.Dense(8 * 8 * 128),
keras.layers.Reshape((8, 8, 128)),
keras.layers.Conv2DTranspose(
128, kernel_size=4, strides=2, padding="same"
),
keras.layers.LeakyReLU(alpha=0.2),
keras.layers.Conv2DTranspose(
256, kernel_size=4, strides=2, padding="same"
),
keras.layers.LeakyReLU(alpha=0.2),
keras.layers.Conv2DTranspose(
512, kernel_size=4, strides=2, padding="same"
),
keras.layers.LeakyReLU(alpha=0.2),
keras.layers.Conv2D(
3, kernel_size=5, padding="same", activation="sigmoid"
),
],
name="generator",
)
gan = GAN(
discriminator=discriminator, generator=generator, latent_dim=LATENT_DIM
)
if compile:
gan.compile(
d_optimizer=keras.optimizers.Adam(learning_rate=0.0001),
g_optimizer=keras.optimizers.Adam(learning_rate=0.0001),
loss_fn=keras.losses.BinaryCrossentropy(),
jit_compile=jit_compile,
)
return gan
def get_custom_objects():
return {"GAN": GAN} |
7,579 | matcher | """Index analyzer plugin for matching against data in BigQuery tables."""
import itertools
import logging
from timesketch.lib import emojis
from timesketch.lib.analyzers import interface
from timesketch.lib.analyzers import manager
has_required_deps = True
try:
from google.cloud import bigquery
from google.auth import exceptions as google_auth_exceptions
except ImportError:
has_required_deps = False
logger = logging.getLogger("timesketch.analyzers.bigquery_matcher")
class BigQueryMatcherPlugin(interface.BaseAnalyzer):
"""Analyzer for matching events to BigQuery data."""
NAME = "bigquery_matcher"
DISPLAY_NAME = "BigQuery matcher"
DESCRIPTION = "Match pre-defined event fields to data in BigQuery tables"
_BQ_BATCH_SIZE = 10000 # Number of entries per BQ query
def __init__(self, index_name, sketch_id, timeline_id=None, **kwargs):
"""Initialize the BQ Matcher Analyzer.
Args:
index_name: OpenSearch index name
sketch_id: Sketch ID
timeline_id: The ID of the timeline
"""
self.index_name = index_name
self._matcher_name = kwargs.get("matcher_name")
self._matcher_config = kwargs.get("matcher_config")
super().__init__(index_name, sketch_id, timeline_id=timeline_id)
@staticmethod
def get_kwargs():
"""Get kwargs for the analyzer.
Returns:
List of matchers.
"""
bq_config = interface.get_yaml_config("bigquery_matcher.yaml")
if not bq_config:
logger.error("BigQuery Matcher could not load configuration file.")
return []
matcher_kwargs = [
{"matcher_name": matcher_name, "matcher_config": matcher_config}
for matcher_name, matcher_config in bq_config.items()
]
return matcher_kwargs
def run(self):
"""Entry point for the analyzer.
Returns:
String with summary of the analyzer result.
"""
if self._matcher_name is None or self._matcher_config is None:
return "Configuration file is not valid for this analyzer."
return self.METHOD_NAME(self._matcher_name, self._matcher_config)
def bigquery_match(self, bq_client, bq_query, event_field_name, values):
"""Run a BigQuery query for rows with matching event_field_name values.
Returns:
BigQuery query job.
"""
job_config = bigquery.QueryJobConfig(
query_parameters=[
bigquery.ArrayQueryParameter(event_field_name, "STRING", values),
]
)
return bq_client.query(bq_query, job_config=job_config)
def METHOD_NAME(self, name, config):
"""Entry point for the analyzer.
Returns:
String with summary of the analyzer result.
"""
event_field_name = config.get("event_field_name")
bq_query = config.get("bq_query")
bq_project = config.get("bq_project")
tags = config.get("tags")
emoji_names = config.get("emojis")
emojis_to_add = [emojis.get_emoji(x) for x in emoji_names]
es_query = (
'{"query": { "bool": { "should": [ '
'{ "exists" : { "field" : "' + event_field_name + '" }} ] } } }'
)
events_stream = self.event_stream(
query_dsl=es_query,
return_fields=[event_field_name],
)
events = {}
for event in events_stream:
field = event.source.get(event_field_name)
events.setdefault(field, []).append(event)
try:
bq_client = bigquery.Client(project=bq_project)
except google_auth_exceptions.DefaultCredentialsError as exception:
return "Could not authenticate to BigQuery: {0!s}".format(exception)
num_matches = 0
for i in range(0, len(events), self._BQ_BATCH_SIZE):
batch = list(itertools.islice(events, i, i + self._BQ_BATCH_SIZE))
query_job = self.bigquery_match(
bq_client, bq_query, event_field_name, batch
)
for row in query_job:
for event in events[row[0]]:
event.add_tags(tags)
event.add_emojis(emojis_to_add)
event.commit()
num_matches += 1
return ("{0:d} events found for matcher [{1:s}]").format(num_matches, name)
if has_required_deps:
manager.AnalysisManager.register_analyzer(BigQueryMatcherPlugin) |
7,580 | init | #! /usr/bin/env python
# encoding: utf-8
#
# Project: MXCuBE
# https://github.com/mxcube
#
# This file is part of MXCuBE software.
#
# MXCuBE is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MXCuBE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with MXCuBE. If not, see <http://www.gnu.org/licenses/>.
"""
Example of xml config file
<device class="MotorMockup">
<username>Mock motor</username>
<actuator_name>mock_motor</actuator_name>
<!-- for the mockup only -->
<default_value>500</default_value>
<velocity>100</velocity>
<wrap_range>None</wrap_range>
<default_limits>[-360, 360]</default_limits>
</device>
"""
import time
import ast
from mxcubecore.HardwareObjects.abstract.AbstractMotor import AbstractMotor
from mxcubecore.HardwareObjects.mockup.ActuatorMockup import ActuatorMockup
from mxcubecore.HardwareObjects.abstract.AbstractMotor import MotorStates
__copyright__ = """ Copyright © 2010-2020 by the MXCuBE collaboration """
__license__ = "LGPLv3+"
DEFAULT_VELOCITY = 100
DEFAULT_LIMITS = (-10000, 10000)
DEFAULT_VALUE = 10.124
DEFAULT_WRAP_RANGE = None
class MotorMockup(ActuatorMockup, AbstractMotor):
"""Mock Motor implementation"""
SPECIFIC_STATES = MotorStates
def __init__(self, name):
AbstractMotor.__init__(self, name)
self._wrap_range = None
def METHOD_NAME(self):
"""Initialisation method"""
# get username, actuator_name and tolerance
super().METHOD_NAME()
# local properties
if not self.get_velocity():
self.set_velocity(DEFAULT_VELOCITY)
if None in self.get_limits():
self.update_limits(DEFAULT_LIMITS)
try:
_wr = self.get_property("wrap_range")
self._wrap_range = DEFAULT_WRAP_RANGE if not _wr else ast.literal_eval(_wr)
except (ValueError, SyntaxError):
self._wrap_range = DEFAULT_WRAP_RANGE
if self.default_value is None:
self.default_value = DEFAULT_VALUE
self.update_value(self.default_value)
self.update_state(self.STATES.READY)
def _move(self, value):
"""Simulated motor movement.
Args:
value (float): target position
Returns:
(float): The reached position.
"""
self.update_specific_state(self.SPECIFIC_STATES.MOVING)
start_pos = self.get_value()
if value is not None and start_pos is not None:
delta = abs(value - start_pos)
direction = -1 if value < self.get_value() else 1
start_time = time.time()
while (time.time() - start_time) < (delta / self.get_velocity()):
time.sleep(0.02)
val = start_pos + direction * self.get_velocity() * (
time.time() - start_time
)
val = val if not self._wrap_range else val % self._wrap_range
self.update_value(val)
time.sleep(0.02)
_low, _high = self.get_limits()
if value == self.default_value:
self.update_specific_state(self.SPECIFIC_STATES.HOME)
elif value == _low:
self.update_specific_state(self.SPECIFIC_STATES.LOWLIMIT)
elif value == _high:
self.update_specific_state(self.SPECIFIC_STATES.HIGHLIMIT)
else:
self.update_specific_state(None)
return value
def is_moving(self):
return ( (self.get_state() == self.STATES.BUSY ) or (self.get_state() == self.SPECIFIC_STATES.MOVING))
|
7,581 | test get fingerprint for ourselves | import datetime
import unittest
import stem
import stem.response
import stem.version
from unittest.mock import Mock, patch
from stem.interpreter.commands import ControlInterpreter, _get_fingerprint
from stem.response import ControlMessage
from test.unit.interpreter import CONTROLLER
EXPECTED_EVENTS_RESPONSE = """\
\x1b[34mBW 15 25\x1b[0m
\x1b[34mBW 758 570\x1b[0m
\x1b[34mDEBUG connection_edge_process_relay_cell(): Got an extended cell! Yay.\x1b[0m
"""
EXPECTED_INFO_RESPONSE = """\
moria1 (9695DFC35FFEB861329B9F1AB04C46397020CE31)
\x1b[34;1maddress: \x1b[0m128.31.0.34:9101 (moria.csail.mit.edu)
\x1b[34;1mtor version: \x1b[0m0.2.5.4-alpha-dev
\x1b[34;1mflags: \x1b[0mAuthority, Fast, Guard, HSDir, Named, Running, Stable, V2Dir, Valid
\x1b[34;1mexit policy: \x1b[0mreject *:*
\x1b[34;1mcontact: \x1b[0m1024D/28988BF5 arma mit edu
"""
EXPECTED_GETCONF_RESPONSE = """\
\x1b[34;1mlog\x1b[0m\x1b[34m => notice stdout\x1b[0m
\x1b[34;1maddress\x1b[0m\x1b[34m => \x1b[0m
"""
FINGERPRINT = '9695DFC35FFEB861329B9F1AB04C46397020CE31'
class TestInterpreterCommands(unittest.TestCase):
def METHOD_NAME(self):
controller = Mock()
controller.get_info.side_effect = lambda arg: {
'fingerprint': FINGERPRINT,
}[arg]
self.assertEqual(FINGERPRINT, _get_fingerprint('', controller))
controller.get_info.side_effect = stem.ControllerError
self.assertRaises(ValueError, _get_fingerprint, '', controller)
def test_get_fingerprint_for_fingerprint(self):
self.assertEqual(FINGERPRINT, _get_fingerprint(FINGERPRINT, Mock()))
def test_get_fingerprint_for_nickname(self):
controller, descriptor = Mock(), Mock()
descriptor.fingerprint = FINGERPRINT
controller.get_network_status.side_effect = lambda arg: {
'moria1': descriptor,
}[arg]
self.assertEqual(FINGERPRINT, _get_fingerprint('moria1', controller))
controller.get_network_status.side_effect = stem.ControllerError
self.assertRaises(ValueError, _get_fingerprint, 'moria1', controller)
def test_get_fingerprint_for_address(self):
controller = Mock()
self.assertRaises(ValueError, _get_fingerprint, '127.0.0.1:-1', controller)
self.assertRaises(ValueError, _get_fingerprint, '127.0.0.901:80', controller)
descriptor = Mock()
descriptor.address = '127.0.0.1'
descriptor.or_port = 80
descriptor.fingerprint = FINGERPRINT
controller.get_network_statuses.return_value = [descriptor]
self.assertEqual(FINGERPRINT, _get_fingerprint('127.0.0.1', controller))
self.assertEqual(FINGERPRINT, _get_fingerprint('127.0.0.1:80', controller))
self.assertRaises(ValueError, _get_fingerprint, '127.0.0.1:81', controller)
self.assertRaises(ValueError, _get_fingerprint, '127.0.0.2', controller)
def test_get_fingerprint_for_unrecognized_inputs(self):
self.assertRaises(ValueError, _get_fingerprint, 'blarg!', Mock())
def test_when_disconnected(self):
controller = Mock()
controller.msg.side_effect = stem.SocketClosed('kaboom!')
interpreter = ControlInterpreter(controller)
# we should be able to run non-tor commands
self.assertTrue('Interpreter commands include:' in interpreter.run_command('/help'))
# ... but tor commands should provide exceptions
self.assertRaises(stem.SocketClosed, interpreter.run_command, 'GETINFO version')
def test_quit(self):
interpreter = ControlInterpreter(CONTROLLER)
self.assertRaises(stem.SocketClosed, interpreter.run_command, '/quit')
self.assertRaises(stem.SocketClosed, interpreter.run_command, 'QUIT')
def test_help(self):
interpreter = ControlInterpreter(CONTROLLER)
self.assertTrue('Interpreter commands include:' in interpreter.run_command('/help'))
self.assertTrue('Queries the tor process for information.' in interpreter.run_command('/help GETINFO'))
self.assertTrue('Queries the tor process for information.' in interpreter.run_command('/help GETINFO version'))
def test_events(self):
interpreter = ControlInterpreter(CONTROLLER)
# no received events
self.assertEqual('', interpreter.run_command('/events'))
# with enqueued events
event_contents = (
'650 BW 15 25',
'650 BW 758 570',
'650 DEBUG connection_edge_process_relay_cell(): Got an extended cell! Yay.',
)
for content in event_contents:
event = ControlMessage.from_str(content, 'EVENT', normalize = True)
interpreter._received_events.append(event)
self.assertEqual(EXPECTED_EVENTS_RESPONSE, interpreter.run_command('/events'))
@patch('stem.descriptor.remote.DescriptorDownloader')
@patch('socket.gethostbyaddr', Mock(return_value = ['moria.csail.mit.edu']))
def test_info(self, downloader_mock):
controller, server_desc, ns_desc = Mock(), Mock(), Mock()
controller.get_microdescriptor.return_value = None
controller.get_server_descriptor.return_value = server_desc
controller.get_network_status.return_value = ns_desc
downloader_mock().get_server_descriptors.return_value = [server_desc]
controller.get_info.side_effect = lambda arg, _: {
'ip-to-country/128.31.0.34': 'us',
}[arg]
ns_desc.address = '128.31.0.34'
ns_desc.or_port = 9101
ns_desc.published = datetime.datetime(2014, 5, 5, 5, 52, 5)
ns_desc.nickname = 'moria1'
ns_desc.flags = ['Authority', 'Fast', 'Guard', 'HSDir', 'Named', 'Running', 'Stable', 'V2Dir', 'Valid']
server_desc.exit_policy = 'reject *:*'
server_desc.platform = 'Linux'
server_desc.tor_version = stem.version.Version('0.2.5.4-alpha-dev')
server_desc.contact = '1024D/28988BF5 arma mit edu'
interpreter = ControlInterpreter(controller)
self.assertTrue(interpreter.run_command('/info ' + FINGERPRINT).startswith(EXPECTED_INFO_RESPONSE))
def test_unrecognized_interpreter_command(self):
interpreter = ControlInterpreter(CONTROLLER)
expected = "\x1b[1;31m'/unrecognized' isn't a recognized command\x1b[0m\n"
self.assertEqual(expected, interpreter.run_command('/unrecognized'))
def test_getinfo(self):
controller = Mock()
controller.msg.return_value = ControlMessage.from_str('250-version=0.2.5.1-alpha-dev (git-245ecfff36c0cecc)\r\n250 OK\r\n')
interpreter = ControlInterpreter(controller)
self.assertEqual('\x1b[34m250-version=0.2.5.1-alpha-dev (git-245ecfff36c0cecc)\r\x1b[0m\n\x1b[34m250 OK\x1b[0m\n', interpreter.run_command('GETINFO version'))
self.assertEqual('\x1b[34m250-version=0.2.5.1-alpha-dev (git-245ecfff36c0cecc)\r\x1b[0m\n\x1b[34m250 OK\x1b[0m\n', interpreter.run_command('GETINFO version'))
controller.msg.assert_called_with('GETINFO version')
controller.msg.side_effect = stem.ControllerError('kaboom!')
self.assertEqual('\x1b[1;31mkaboom!\x1b[0m\n', interpreter.run_command('getinfo process/user'))
def test_getconf(self):
controller = Mock()
controller.msg.return_value = ControlMessage.from_str('250-Log=notice stdout\r\n250 Address\r\n')
interpreter = ControlInterpreter(controller)
self.assertEqual('\x1b[34m250-Log=notice stdout\r\x1b[0m\n\x1b[34m250 Address\x1b[0m\n', interpreter.run_command('GETCONF log address'))
controller.msg.assert_called_with('GETCONF log address')
def test_setevents(self):
controller = Mock()
controller.msg.return_value = ControlMessage.from_str('250 OK\r\n')
interpreter = ControlInterpreter(controller)
self.assertEqual('\x1b[34m250 OK\x1b[0m\n', interpreter.run_command('SETEVENTS BW')) |
7,582 | get unauthenticated user path file queryset |
from django.db import models
from feeds.models import Feed
from servicefiles.models import ServiceFile
from pacsfiles.models import PACSFile
from uploadedfiles.models import UploadedFile
from plugininstances.models import PluginInstanceFile
from pipelines.models import PipelineSourceFile
def get_path_file_model_class(path, username):
"""
Convenience function to get the file model class associated to a path.
"""
model_class = PluginInstanceFile
if path.split('/', 1)[0] == 'PIPELINES':
model_class = PipelineSourceFile
elif path.startswith('SERVICES/PACS'):
model_class = PACSFile
elif path.split('/', 1)[0] == 'SERVICES':
model_class = ServiceFile
elif path.startswith(f'{username}/uploads'):
model_class = UploadedFile
return model_class
def get_path_file_queryset(path, user):
"""
Convenience function to get the file queryset associated to a path. Raises ValueError
if the path is not found.
"""
username = user.username
model_class = get_path_file_model_class(path, username)
path_username = path.split('/', 1)[0]
if model_class == PluginInstanceFile and not path_username == username:
if username == 'chris': # chris special case (can see others' not shared feeds)
if path == path_username:
return model_class.objects.filter(fname__startswith=path + '/')
else:
return model_class.objects.filter(fname__startswith=path)
shared_feed_user = None
shared_feed_creators = get_shared_feed_creators_set(user)
for feed_creator in shared_feed_creators:
if path_username == feed_creator.username:
shared_feed_user = feed_creator
break
if shared_feed_user is None:
# path doesn't start with a username that explictily shared a feed with this
# user or created a public feed
raise ValueError('Path not found.')
elif path == shared_feed_user.username:
qs = model_class.objects.none()
else:
shared_feeds_qs = Feed.objects.filter(owner=shared_feed_user).filter(
models.Q(owner=user) | models.Q(public=True))
shared_feed = None
for feed in shared_feeds_qs.all():
if path.startswith(f'{shared_feed_user.username}/feed_{feed.id}'):
shared_feed = feed
break
if shared_feed is None:
raise ValueError('Path not found.')
else:
qs = model_class.objects.filter(fname__startswith=path)
else:
if path == username: # avoid colliding with usernames that are a superset of this
qs = model_class.objects.filter(fname__startswith=path+'/')
else:
qs = model_class.objects.filter(fname__startswith=path)
try:
qs[0]
except IndexError:
if path not in ('PIPELINES', 'SERVICES', 'SERVICES/PACS', username,
f'{username}/uploads'):
raise ValueError('Path not found.')
return qs
def METHOD_NAME(path):
"""
Convenience function to get the file queryset associated to a path for unauthenticated
users. Raises ValueError if the path is not found.
"""
path_username = path.split('/', 1)[0]
if path_username == 'PIPELINES':
return PipelineSourceFile.objects.filter(fname__startswith=path)
public_feed_user = None
public_feed_creators = get_shared_feed_creators_set()
for feed_creator in public_feed_creators:
if path_username == feed_creator.username:
public_feed_user = feed_creator
break
if public_feed_user is None:
# path doesn't start with a username that created a public feed
raise ValueError('Path not found.')
elif path == public_feed_user.username:
qs = PluginInstanceFile.objects.none()
else:
public_feeds_qs = Feed.objects.filter(
public=True).filter(owner=public_feed_user)
public_feed = None
for feed in public_feeds_qs.all():
if path.startswith(f'{public_feed_user.username}/feed_{feed.id}'):
public_feed = feed
break
if public_feed is None:
raise ValueError('Path not found.')
else:
qs = PluginInstanceFile.objects.filter(fname__startswith=path)
return qs
def get_path_folders(path, user):
"""
Convenience function to get the immediate subfolders under a path.
"""
qs = get_path_file_queryset(path, user)
username = user.username
model_class = get_path_file_model_class(path, username)
if model_class == PluginInstanceFile and path.split('/', 1)[0] == path and path != \
username and username != 'chris': # handle chris special case
shared_feeds_qs = Feed.objects.filter(owner__username=path).filter(
models.Q(owner=user) | models.Q(public=True))
subfolders = sorted([f'feed_{feed.id}' for feed in shared_feeds_qs])
else:
hash_set = set()
existing_path = False
for obj in qs:
name = obj.fname.name
if name.startswith(path + '/'):
existing_path = True
folder = name.replace(path + '/', '', 1)
try:
first_slash_ix = folder.index('/')
except ValueError:
pass # no folders under this path (only files)
else:
folder = folder[:first_slash_ix]
hash_set.add(folder)
if len(qs) and not existing_path:
raise ValueError('Path not found.')
if path == 'SERVICES':
hash_set.add('PACS')
if path == username:
hash_set.add('uploads')
subfolders = sorted(hash_set)
return subfolders
def get_unauthenticated_user_path_folders(path):
"""
Convenience function to get the immediate subfolders under a path for unauthenticated
users.
"""
qs = METHOD_NAME(path)
path_username = path.split('/', 1)[0]
if path_username == path and path_username != 'PIPELINES':
shared_feeds_qs = Feed.objects.filter(owner__username=path).filter(public=True)
subfolders = sorted([f'feed_{feed.id}' for feed in shared_feeds_qs])
else:
hash_set = set()
existing_path = False
for obj in qs:
name = obj.fname.name
if name.startswith(path + '/'):
existing_path = True
folder = name.replace(path + '/', '', 1)
try:
first_slash_ix = folder.index('/')
except ValueError:
pass # no folders under this path (only files)
else:
folder = folder[:first_slash_ix]
hash_set.add(folder)
if len(qs) and not existing_path:
raise ValueError('Path not found.')
subfolders = sorted(hash_set)
return subfolders
def get_shared_feed_creators_set(user=None):
"""
Convenience function to get the set of creators of the feeds that have been shared
with the passed user (including public feeds).
"""
creators_set = set()
if user is None:
feeds_qs = Feed.objects.filter(public=True)
username = ''
else:
feeds_qs = Feed.objects.filter(models.Q(owner=user) | models.Q(public=True))
username = user.username
for feed in feeds_qs.all():
creator = feed.get_creator()
if creator.username != username:
creators_set.add(creator)
return creators_set |
7,583 | flag handler | # Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class Flexi(CMakePackage):
"""Open Source High-Order Unstructured Discontinuous Galerkin Fluid
Dynamics Solver"""
homepage = "https://www.flexi-project.org/"
git = "https://github.com/flexi-framework/flexi.git"
version("master", preferred=True)
version("21.03.0", tag="v21.03.0", commit="d061978e5d96cfc96c06edc1bae9d92cbe540c18")
patch("for_aarch64.patch", when="target=aarch64:")
variant("mpi", default=True, description="Enable MPI")
variant(
"2d", default=False, description="If set to True the code will run in two-dimensional mode"
)
variant(
"eqnsysname",
default="navierstokes",
values=("navierstokes", "linearscalaradvection", "rans_sa"),
multi=False,
description="Defines the equation system",
)
variant(
"fv",
default=False,
description="Enables the usage of the finite volume subcell shock capturing mechanism",
)
variant(
"lifting",
default="br1",
values=("br1", "br2"),
multi=False,
description=(
"Two different lifting methods for the parabolic part of "
"the equation system available"
),
)
variant(
"nodetype",
default="GAUSS",
values=("GAUSS", "GAUSS-LOBATTO"),
multi=False,
description="Space discretization basis function",
)
variant(
"split", default=False, description="Split form of the discontinuous Galerkin operator"
)
variant(
"parabolic",
default=True,
description=(
"Defines whether the parabolic part of the chosen system " "should be included or not"
),
)
variant(
"testcase",
default="default",
values=("default", "taylorgreenvortex", "phill", "channel", "riemann2d"),
multi=False,
description="Defines the used test case",
)
variant(
"viscosity",
default="constant",
values=("constant", "sutherland", "powerlaw"),
multi=False,
description="Defines modeling approach for viscosity",
)
variant("eddy_viscosity", default=False, description="Enable eddy viscosity")
# Available Tools
variant("visu", default=True, description="Enable posti_visu")
variant("swapmesg", default=False, description="Enable posti_swapmesh")
variant("preparerecordpoints", default=False, description="Enable posti_preparerecordpoints")
variant(
"visualizerecordpoints", default=False, description="Enable posti_visualizerecordpoints"
)
variant("evaluaterecordpoints", default=False, description="Enable posti_evaluaterecordpoints")
variant("mergetimeaverages", default=False, description="Enable posti_mergetimeaverages")
variant("channel_fft", default=False, description="Enable posti_channel_fft")
variant("to3d", default=False, description="Enable posti_to3d")
variant("avg2d", default=False, description="Enable posti_avg2d")
conflicts("+to3d", when="@:21.03.0", msg="Only available in newer releases")
conflicts("nodetype=GAUSS", when="+split", msg="Only available for Gauss-Lobatto nodes")
depends_on("mpi", when="+mpi")
depends_on("hdf5+fortran+mpi", when="+mpi")
depends_on("hdf5+fortran~mpi", when="~mpi")
depends_on("lapack")
depends_on("zlib-api")
depends_on("fftw", when="+channel_fft")
def METHOD_NAME(self, name, flags):
if name == "fflags":
if self.spec.satisfies("%gcc@10:"):
if flags is None:
flags = []
flags.append("-fallow-argument-mismatch")
return (flags, None, None)
def cmake_args(self):
args = [
"-DLIBS_BUILD_HDF5:BOOL=OFF",
self.define_from_variant("LIBS_USE_MPI", "mpi"),
self.define_from_variant("FLEXI_2D", "2d"),
self.define_from_variant("FLEXI_EQNSYSNAME", "eqnsysname"),
self.define_from_variant("FLEXI_FV", "fv"),
self.define_from_variant("FLEXI_LIFTING", "lifting"),
self.define_from_variant("FLEXI_NODETYPE", "nodetype"),
self.define_from_variant("FLEXI_SPLIT_DG", "split"),
self.define_from_variant("FLEXI_PARABOLIC", "parabolic"),
self.define_from_variant("FLEXI_TESTCASE", "testcase"),
self.define_from_variant("FLEXI_VISCOSITY", "viscosity"),
self.define_from_variant("FLEXI_EDDYVISCOSITY", "eddy_viscosity"),
self.define_from_variant("POSTI_VISU", "visu"),
self.define_from_variant("POSTI_SWAPMESH", "swapmesg"),
self.define_from_variant("POSTI_RP_VISUALIZE", "visualizerecordpoints"),
self.define_from_variant("POSTI_RP_EVALUATE", "evaluaterecordpoints"),
self.define_from_variant("POSTI_MERGETIMEAVERAGES", "mergetimeaverages"),
self.define_from_variant("POSTI_CHANNEL_FFT", "channel_fft"),
self.define_from_variant("POSTI_TO3D", "to3d"),
self.define_from_variant("POSTI_AVG2D", "avg2d"),
]
if self.spec.satisfies("@:21.03.0"):
args.append(self.define_from_variant("POSTI_RP_PREPARERE", "preparerecordpoints"))
else:
args.append(self.define_from_variant("POSTI_RP_PREPARE", "preparerecordpoints"))
return args |
7,584 | create benchmark spec from config dict | # Copyright 2020 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for vpn service."""
import pickle
import sys
import unittest
from absl import flags
from absl.testing import flagsaver
from perfkitbenchmarker import benchmark_spec
from perfkitbenchmarker import configs
from perfkitbenchmarker import linux_benchmarks
from perfkitbenchmarker import provider_info
from perfkitbenchmarker.configs import benchmark_config_spec
from tests import pkb_common_test_case
from perfkitbenchmarker.vpn_service import TunnelConfig
FLAGS = flags.FLAGS
PROJECT = 'mock_project'
CLOUD = provider_info.GCP
BENCHMARK_NAME = 'iperf'
URI = 'uri45678'
DEFAULT_CFG = """
# VPN iperf config.
iperf:
description: Run iperf over vpn
flags:
iperf_sending_thread_count: 5
use_vpn: True
vpn_service_gateway_count: 1
vpn_service:
tunnel_count: 1
ike_version: 2
routing_type: static
vm_groups:
vm_1:
cloud: GCP
cidr: 10.0.1.0/24
vm_spec:
GCP:
zone: us-west1-b
machine_type: n1-standard-4
vm_2:
cloud: GCP
cidr: 192.168.1.0/24
vm_spec:
GCP:
zone: us-central1-c
machine_type: n1-standard-4
"""
class BaseVPNServiceTest(pkb_common_test_case.PkbCommonTestCase):
def setUp(self):
super(BaseVPNServiceTest, self).setUp()
if not sys.warnoptions: # https://bugs.python.org/issue33154
import warnings
warnings.simplefilter('ignore', ResourceWarning)
def _CreateBenchmarkSpecFromYaml(self, yaml_string,
benchmark_name=BENCHMARK_NAME):
config = configs.LoadConfig(yaml_string, {}, benchmark_name)
return self.METHOD_NAME(config, benchmark_name)
def METHOD_NAME(self, config_dict, benchmark_name):
config_spec = benchmark_config_spec.BenchmarkConfigSpec(benchmark_name,
flag_values=FLAGS,
**config_dict)
benchmark_module = next((b for b in linux_benchmarks.BENCHMARKS if
b.BENCHMARK_NAME == benchmark_name))
return benchmark_spec.BenchmarkSpec(benchmark_module, config_spec, URI)
def extractDictAFromB(self, A, B): # assertDictContainsSubset deprecated
return dict([(k, B[k]) for k in A.keys() if k in B.keys()])
class VpnServiceTestCase(BaseVPNServiceTest):
@flagsaver.flagsaver(use_vpn=True, vpn_service_gateway_count=1)
def testVpnServiceConfig(self):
spec = self._CreateBenchmarkSpecFromYaml(DEFAULT_CFG)
spec.ConstructVPNService()
# test global flags
self.assertTrue(spec.config.flags['use_vpn'])
self.assertEqual(spec.config.flags['vpn_service_gateway_count'], 1)
# test vpn_service flags
self.assertTrue(hasattr(spec, 'vpn_service'))
self.assertIsNot(spec.vpn_service, None)
self.assertEqual(spec.vpn_service.tunnel_count, 1)
self.assertEqual(spec.vpn_service.ike_version, 2)
self.assertEqual(spec.vpn_service.routing, 'static')
# test benchmark_spec attributes
self.assertTrue(hasattr(spec, 'vpn_gateways'))
self.assertIsNot(spec.vpn_gateways, None)
# test unpickled values for above
pspec = pickle.loads(pickle.dumps(spec))
self.assertTrue(pspec.config.flags['use_vpn'])
self.assertEqual(pspec.config.flags['vpn_service_gateway_count'], 1)
self.assertTrue(hasattr(pspec, 'vpn_service'))
self.assertIsNot(pspec.vpn_service, None)
self.assertEqual(pspec.vpn_service.tunnel_count, 1)
self.assertEqual(pspec.vpn_service.ike_version, 2)
self.assertEqual(pspec.vpn_service.routing, 'static')
self.assertTrue(hasattr(pspec, 'vpn_gateways'))
self.assertIsNot(pspec.vpn_gateways, None)
@flagsaver.flagsaver(use_vpn=True, vpn_service_gateway_count=1)
def testGetVPNGatewayPairs(self):
vpn_gateways = {
'vpngw-us-west1-0-None': None,
'vpngw-us-west1-1-None': None,
'vpngw-us-central1-0-None': None,
'vpngw-us-central1-1-None': None,
}
spec = self._CreateBenchmarkSpecFromYaml(DEFAULT_CFG)
spec.ConstructVPNService()
pairs = spec.vpn_service.GetVpnGatewayPairs(vpn_gateways)
self.assertEqual(len(pairs), 4)
# test unpickled values
pspec = pickle.loads(pickle.dumps(spec))
ppairs = pspec.vpn_service.GetVpnGatewayPairs(vpn_gateways)
self.assertEqual(len(ppairs), 4)
class TunnelConfigTestCase(BaseVPNServiceTest):
@flagsaver.flagsaver(run_uri=URI)
def testTunnelConfigHash(self):
ep1 = {
'name': 'ep1',
'ip': '1.2.3.4',
'cidr': '1.2.3.4/5',
'require_target_to_init': False,
'tunnel_id': '12345',
}
ep2 = {
'name': 'ep2',
'ip': '9.8.7.6',
'cidr': '9.8.7.6/5',
'require_target_to_init': False,
'tunnel_id': '98765',
}
endpoints = [ep1, ep2]
conf = {
'tunnel_name': 'tun1',
'ike_version': '3',
'routing': 'static',
'psk': 'private',
'endpoints': endpoints
}
tunnel_config = TunnelConfig()
tunnel_config2 = TunnelConfig()
hash1 = tunnel_config.hash()
hash2 = tunnel_config2.hash()
self.assertEqual(hash1, hash2)
tunnel_config.setConfig(**conf)
hash3 = tunnel_config.hash()
self.assertNotEqual(hash1, hash3)
if __name__ == '__main__':
unittest.main() |
7,585 | close | import asyncio
import os
import sqlite3
from functools import wraps
from typing import Any, Callable, List, Optional, Sequence, Tuple, TypeVar
import aiosqlite
from pypika import SQLLiteQuery
from tortoise.backends.base.client import (
BaseDBAsyncClient,
BaseTransactionWrapper,
Capabilities,
ConnectionWrapper,
NestedTransactionContext,
TransactionContext,
)
from tortoise.backends.sqlite.executor import SqliteExecutor
from tortoise.backends.sqlite.schema_generator import SqliteSchemaGenerator
from tortoise.exceptions import (
IntegrityError,
OperationalError,
TransactionManagementError,
)
FuncType = Callable[..., Any]
F = TypeVar("F", bound=FuncType)
def translate_exceptions(func: F) -> F:
@wraps(func)
async def translate_exceptions_(self, query, *args):
try:
return await func(self, query, *args)
except sqlite3.OperationalError as exc:
raise OperationalError(exc)
except sqlite3.IntegrityError as exc:
raise IntegrityError(exc)
return translate_exceptions_ # type: ignore
class SqliteClient(BaseDBAsyncClient):
executor_class = SqliteExecutor
query_class = SQLLiteQuery
schema_generator = SqliteSchemaGenerator
capabilities = Capabilities(
"sqlite", daemon=False, requires_limit=True, inline_comment=True, support_for_update=False
)
def __init__(self, file_path: str, **kwargs: Any) -> None:
super().__init__(**kwargs)
self.filename = file_path
self.pragmas = kwargs.copy()
self.pragmas.pop("connection_name", None)
self.pragmas.pop("fetch_inserted", None)
self.pragmas.setdefault("journal_mode", "WAL")
self.pragmas.setdefault("journal_size_limit", 16384)
self.pragmas.setdefault("foreign_keys", "ON")
self._connection: Optional[aiosqlite.Connection] = None
self._lock = asyncio.Lock()
async def create_connection(self, with_db: bool) -> None:
if not self._connection: # pragma: no branch
self._connection = aiosqlite.connect(self.filename, isolation_level=None)
self._connection.start()
await self._connection._connect()
self._connection._conn.row_factory = sqlite3.Row
for pragma, val in self.pragmas.items():
cursor = await self._connection.execute(f"PRAGMA {pragma}={val}")
await cursor.METHOD_NAME()
self.log.debug(
"Created connection %s with params: filename=%s %s",
self._connection,
self.filename,
" ".join(f"{k}={v}" for k, v in self.pragmas.items()),
)
async def METHOD_NAME(self) -> None:
if self._connection:
await self._connection.METHOD_NAME()
self.log.debug(
"Closed connection %s with params: filename=%s %s",
self._connection,
self.filename,
" ".join(f"{k}={v}" for k, v in self.pragmas.items()),
)
self._connection = None
async def db_create(self) -> None:
# DB's are automatically created once accessed
pass
async def db_delete(self) -> None:
await self.METHOD_NAME()
try:
os.remove(self.filename)
except FileNotFoundError: # pragma: nocoverage
pass
except OSError as e:
if e.errno != 22: # fix: "sqlite://:memory:" in Windows
raise e
def acquire_connection(self) -> ConnectionWrapper:
return ConnectionWrapper(self._lock, self)
def _in_transaction(self) -> "TransactionContext":
return TransactionContext(TransactionWrapper(self))
@translate_exceptions
async def execute_insert(self, query: str, values: list) -> int:
async with self.acquire_connection() as connection:
self.log.debug("%s: %s", query, values)
return (await connection.execute_insert(query, values))[0]
@translate_exceptions
async def execute_many(self, query: str, values: List[list]) -> None:
async with self.acquire_connection() as connection:
self.log.debug("%s: %s", query, values)
# This code is only ever called in AUTOCOMMIT mode
await connection.execute("BEGIN")
try:
await connection.executemany(query, values)
except Exception:
await connection.rollback()
raise
else:
await connection.commit()
@translate_exceptions
async def execute_query(
self, query: str, values: Optional[list] = None
) -> Tuple[int, Sequence[dict]]:
query = query.replace("\x00", "'||CHAR(0)||'")
async with self.acquire_connection() as connection:
self.log.debug("%s: %s", query, values)
start = connection.total_changes
rows = await connection.execute_fetchall(query, values)
return (connection.total_changes - start) or len(rows), rows
@translate_exceptions
async def execute_query_dict(self, query: str, values: Optional[list] = None) -> List[dict]:
query = query.replace("\x00", "'||CHAR(0)||'")
async with self.acquire_connection() as connection:
self.log.debug("%s: %s", query, values)
return list(map(dict, await connection.execute_fetchall(query, values)))
@translate_exceptions
async def execute_script(self, query: str) -> None:
async with self.acquire_connection() as connection:
self.log.debug(query)
await connection.executescript(query)
class TransactionWrapper(SqliteClient, BaseTransactionWrapper):
def __init__(self, connection: SqliteClient) -> None:
self.connection_name = connection.connection_name
self._connection: aiosqlite.Connection = connection._connection # type: ignore
self._lock = asyncio.Lock()
self._trxlock = connection._lock
self.log = connection.log
self._finalized = False
self.fetch_inserted = connection.fetch_inserted
self._parent = connection
def _in_transaction(self) -> "TransactionContext":
return NestedTransactionContext(self)
@translate_exceptions
async def execute_many(self, query: str, values: List[list]) -> None:
async with self.acquire_connection() as connection:
self.log.debug("%s: %s", query, values)
# Already within transaction, so ideal for performance
await connection.executemany(query, values)
async def start(self) -> None:
try:
await self._connection.commit()
await self._connection.execute("BEGIN")
except sqlite3.OperationalError as exc: # pragma: nocoverage
raise TransactionManagementError(exc)
async def rollback(self) -> None:
if self._finalized:
raise TransactionManagementError("Transaction already finalised")
await self._connection.rollback()
self._finalized = True
async def commit(self) -> None:
if self._finalized:
raise TransactionManagementError("Transaction already finalised")
await self._connection.commit()
self._finalized = True |
7,586 | dispatch line | import sys
from _typeshed import ExcInfo, TraceFunction
from collections.abc import Callable, Iterable, Mapping
from types import CodeType, FrameType, TracebackType
from typing import IO, Any, SupportsInt, TypeVar
from typing_extensions import Literal, ParamSpec
__all__ = ["BdbQuit", "Bdb", "Breakpoint"]
_T = TypeVar("_T")
_P = ParamSpec("_P")
GENERATOR_AND_COROUTINE_FLAGS: Literal[672]
class BdbQuit(Exception): ...
class Bdb:
skip: set[str] | None
breaks: dict[str, list[int]]
fncache: dict[str, str]
frame_returning: FrameType | None
botframe: FrameType | None
quitting: bool
stopframe: FrameType | None
returnframe: FrameType | None
stoplineno: int
def __init__(self, skip: Iterable[str] | None = None) -> None: ...
def canonic(self, filename: str) -> str: ...
def reset(self) -> None: ...
def trace_dispatch(self, frame: FrameType, event: str, arg: Any) -> TraceFunction: ...
def METHOD_NAME(self, frame: FrameType) -> TraceFunction: ...
def dispatch_call(self, frame: FrameType, arg: None) -> TraceFunction: ...
def dispatch_return(self, frame: FrameType, arg: Any) -> TraceFunction: ...
def dispatch_exception(self, frame: FrameType, arg: ExcInfo) -> TraceFunction: ...
def is_skipped_module(self, module_name: str) -> bool: ...
def stop_here(self, frame: FrameType) -> bool: ...
def break_here(self, frame: FrameType) -> bool: ...
def do_clear(self, arg: Any) -> bool | None: ...
def break_anywhere(self, frame: FrameType) -> bool: ...
def user_call(self, frame: FrameType, argument_list: None) -> None: ...
def user_line(self, frame: FrameType) -> None: ...
def user_return(self, frame: FrameType, return_value: Any) -> None: ...
def user_exception(self, frame: FrameType, exc_info: ExcInfo) -> None: ...
def set_until(self, frame: FrameType, lineno: int | None = None) -> None: ...
def set_step(self) -> None: ...
def set_next(self, frame: FrameType) -> None: ...
def set_return(self, frame: FrameType) -> None: ...
def set_trace(self, frame: FrameType | None = None) -> None: ...
def set_continue(self) -> None: ...
def set_quit(self) -> None: ...
def set_break(
self, filename: str, lineno: int, temporary: bool = False, cond: str | None = None, funcname: str | None = None
) -> None: ...
def clear_break(self, filename: str, lineno: int) -> None: ...
def clear_bpbynumber(self, arg: SupportsInt) -> None: ...
def clear_all_file_breaks(self, filename: str) -> None: ...
def clear_all_breaks(self) -> None: ...
def get_bpbynumber(self, arg: SupportsInt) -> Breakpoint: ...
def get_break(self, filename: str, lineno: int) -> bool: ...
def get_breaks(self, filename: str, lineno: int) -> list[Breakpoint]: ...
def get_file_breaks(self, filename: str) -> list[Breakpoint]: ...
def get_all_breaks(self) -> list[Breakpoint]: ...
def get_stack(self, f: FrameType | None, t: TracebackType | None) -> tuple[list[tuple[FrameType, int]], int]: ...
def format_stack_entry(self, frame_lineno: int, lprefix: str = ": ") -> str: ...
def run(
self, cmd: str | CodeType, globals: dict[str, Any] | None = None, locals: Mapping[str, Any] | None = None
) -> None: ...
def runeval(self, expr: str, globals: dict[str, Any] | None = None, locals: Mapping[str, Any] | None = None) -> None: ...
def runctx(self, cmd: str | CodeType, globals: dict[str, Any] | None, locals: Mapping[str, Any] | None) -> None: ...
def runcall(self, __func: Callable[_P, _T], *args: _P.args, **kwds: _P.kwargs) -> _T | None: ...
class Breakpoint:
next: int
bplist: dict[tuple[str, int], list[Breakpoint]]
bpbynumber: list[Breakpoint | None]
funcname: str | None
func_first_executable_line: int | None
file: str
line: int
temporary: bool
cond: str | None
enabled: bool
ignore: int
hits: int
number: int
def __init__(
self, file: str, line: int, temporary: bool = False, cond: str | None = None, funcname: str | None = None
) -> None: ...
if sys.version_info >= (3, 11):
@staticmethod
def clearBreakpoints() -> None: ...
def deleteMe(self) -> None: ...
def enable(self) -> None: ...
def disable(self) -> None: ...
def bpprint(self, out: IO[str] | None = None) -> None: ...
def bpformat(self) -> str: ...
def checkfuncname(b: Breakpoint, frame: FrameType) -> bool: ...
def effective(file: str, line: int, frame: FrameType) -> tuple[Breakpoint, bool] | tuple[None, None]: ...
def set_trace() -> None: ... |
7,587 | f32tou16 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.script
from tvm.script import tir as T
def get_before():
@tvm.script.ir_module
class Before:
@T.prim_func
def main(
Aptr: T.handle("bfloat16"), Bptr: T.handle("bfloat16"), Dptr: T.handle("bfloat16")
):
T.func_attr({"global_symbol": "main"})
A = T.decl_buffer((100,), "bfloat16", data=Aptr)
B = T.decl_buffer((100,), "bfloat16", data=Bptr)
D = T.decl_buffer((100,), "bfloat16", data=Dptr)
C = T.decl_buffer((100,), "bfloat16")
for i in T.grid(100):
C[i] = A[i] + B[i]
D[i] = T.exp(C[i])
return Before
def u16tof32(v):
uint32_v = v.astype("uint32")
uint32_v = uint32_v << tvm.tir.const(16, "uint32")
return T.reinterpret("float32", uint32_v)
def bf16tof32(v):
return u16tof32(T.reinterpret("uint16", v))
def METHOD_NAME(v):
uint32_v = T.reinterpret("uint32", v)
rounding_bias = (uint32_v >> tvm.tir.const(16, "uint32")) & tvm.tir.const(1, "uint32")
rounding_bias += tvm.tir.const(0x7FFF, "uint32")
uint32_v = uint32_v + rounding_bias
return (uint32_v >> tvm.tir.const(16, "uint32")).astype("uint16")
def f32tobf16(v):
return T.reinterpret("bfloat16", METHOD_NAME(v))
def get_after_compute_legalize():
@tvm.script.ir_module
class After:
@T.prim_func
def main(
Aptr: T.handle("bfloat16"), Bptr: T.handle("bfloat16"), Dptr: T.handle("bfloat16")
):
T.func_attr({"global_symbol": "main"})
A = T.decl_buffer((100,), "bfloat16", data=Aptr)
B = T.decl_buffer((100,), "bfloat16", data=Bptr)
D = T.decl_buffer((100,), "bfloat16", data=Dptr)
C = T.decl_buffer((100,), "float32")
for i in T.grid(100):
C[i] = bf16tof32(A[i]) + bf16tof32(B[i])
D[i] = f32tobf16(T.exp(C[i]))
return After
def get_after_storage_legalize():
@tvm.script.ir_module
class After:
@T.prim_func
def main(Aptr: T.handle("uint16"), Bptr: T.handle("uint16"), Dptr: T.handle("uint16")):
T.func_attr({"global_symbol": "main"})
A = T.decl_buffer((100,), "uint16", data=Aptr)
B = T.decl_buffer((100,), "uint16", data=Bptr)
D = T.decl_buffer((100,), "uint16", data=Dptr)
C = T.decl_buffer((100,), "float32")
for i in T.grid(100):
C[i] = u16tof32(A[i]) + u16tof32(B[i])
D[i] = METHOD_NAME(T.exp(C[i]))
return After
def test_bf16_compute_legalize():
before = get_before()
expected = get_after_compute_legalize()
# run the transform twice to ensure we can afford to deal
# with this repeative optimizations
after = tvm.tir.transform.BF16ComputeLegalize()(before)
after = tvm.tir.transform.BF16ComputeLegalize()(after)
tvm.ir.assert_structural_equal(after, expected)
def test_bf16_storage_legalize():
before = get_after_compute_legalize()
after = tvm.tir.transform.BF16StorageLegalize()(before)
expected = get_after_storage_legalize()
tvm.ir.assert_structural_equal(after, expected)
def test_bf16_storage_scope():
def get_before():
@tvm.script.ir_module
class Before:
@T.prim_func
def main(
Aptr: T.handle("bfloat16", storage_scope="shared"),
Bptr: T.handle("bfloat16", storage_scope="local"),
Dptr: T.handle("bfloat16"),
):
T.func_attr({"global_symbol": "main"})
A = T.decl_buffer((100,), "bfloat16", data=Aptr)
B = T.decl_buffer((100,), "bfloat16", data=Bptr)
D = T.decl_buffer((100,), "bfloat16", data=Dptr)
C = T.decl_buffer((100,), "bfloat16")
for i in T.grid(100):
C[i] = A[i] + B[i]
D[i] = T.exp(C[i])
return Before
def after_compute_legalize():
@tvm.script.ir_module
class After:
@T.prim_func
def main(
Aptr: T.handle("bfloat16", storage_scope="shared"),
Bptr: T.handle("bfloat16", storage_scope="local"),
Dptr: T.handle("bfloat16"),
):
T.func_attr({"global_symbol": "main"})
A = T.decl_buffer((100,), "bfloat16", data=Aptr)
B = T.decl_buffer((100,), "bfloat16", data=Bptr)
D = T.decl_buffer((100,), "bfloat16", data=Dptr)
C = T.decl_buffer((100,), "float32")
for i in T.grid(100):
C[i] = bf16tof32(A[i]) + bf16tof32(B[i])
D[i] = f32tobf16(T.exp(C[i]))
return After
def after_storage_legalize():
@tvm.script.ir_module
class After:
@T.prim_func
def main(
Aptr: T.handle("uint16", storage_scope="shared"),
Bptr: T.handle("uint16", storage_scope="local"),
Dptr: T.handle("uint16"),
):
T.func_attr({"global_symbol": "main"})
A = T.decl_buffer((100,), "uint16", data=Aptr)
B = T.decl_buffer((100,), "uint16", data=Bptr)
D = T.decl_buffer((100,), "uint16", data=Dptr)
C = T.decl_buffer((100,), "float32")
for i in T.grid(100):
C[i] = u16tof32(A[i]) + u16tof32(B[i])
D[i] = METHOD_NAME(T.exp(C[i]))
return After
before = get_before()
after_compute = tvm.tir.transform.BF16ComputeLegalize()(before)
after_storage = tvm.tir.transform.BF16StorageLegalize()(after_compute)
tvm.ir.assert_structural_equal(after_compute, after_compute_legalize())
tvm.ir.assert_structural_equal(after_storage, after_storage_legalize())
if __name__ == "__main__":
test_bf16_storage_legalize()
test_bf16_storage_scope() |
7,588 | test bool scalar bool | import operator
import unittest
import numpy
import pytest
import cupy
from cupy import testing
class TestArrayBoolOp(unittest.TestCase):
@testing.for_all_dtypes()
def test_bool_empty(self, dtype):
with testing.assert_warns(DeprecationWarning):
assert not bool(cupy.array((), dtype=dtype))
def METHOD_NAME(self):
assert bool(cupy.array(True, dtype=numpy.bool_))
assert not bool(cupy.array(False, dtype=numpy.bool_))
@testing.for_all_dtypes()
def test_bool_scalar(self, dtype):
assert bool(cupy.array(1, dtype=dtype))
assert not bool(cupy.array(0, dtype=dtype))
def test_bool_one_element_bool(self):
assert bool(cupy.array([True], dtype=numpy.bool_))
assert not bool(cupy.array([False], dtype=numpy.bool_))
@testing.for_all_dtypes()
def test_bool_one_element(self, dtype):
assert bool(cupy.array([1], dtype=dtype))
assert not bool(cupy.array([0], dtype=dtype))
@testing.for_all_dtypes()
def test_bool_two_elements(self, dtype):
with self.assertRaises(ValueError):
bool(cupy.array([1, 2], dtype=dtype))
class TestArrayUnaryOp(unittest.TestCase):
@testing.for_all_dtypes(no_bool=True)
@testing.numpy_cupy_allclose()
def check_array_op(self, op, xp, dtype):
a = testing.shaped_arange((2, 3), xp, dtype)
return op(a)
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose()
def check_array_op_full(self, op, xp, dtype):
a = testing.shaped_arange((2, 3), xp, dtype)
return op(a)
@testing.for_all_dtypes(no_bool=True)
@testing.numpy_cupy_allclose()
def test_neg_array(self, xp, dtype):
a = testing.shaped_arange((2, 3), xp, dtype)
return operator.neg(a)
@testing.for_all_dtypes(no_bool=True)
@testing.numpy_cupy_allclose()
def test_pos_array(self, xp, dtype):
a = testing.shaped_arange((2, 3), xp, dtype)
assert a is not +a
return +a
def test_pos_boolarray(self):
for xp in (numpy, cupy):
a = xp.array(True, dtype=xp.bool_)
with pytest.deprecated_call():
assert a is not +a
@testing.with_requires('numpy<1.16')
def test_pos_array_full(self):
self.check_array_op_full(operator.pos)
def test_abs_array(self):
self.check_array_op_full(operator.abs)
@testing.for_all_dtypes(no_bool=True)
@testing.numpy_cupy_allclose()
def check_zerodim_op(self, op, xp, dtype):
a = xp.array(-2).astype(dtype)
return op(a)
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose()
def check_zerodim_op_full(self, op, xp, dtype):
a = xp.array(-2).astype(dtype)
return op(a)
@testing.for_all_dtypes(no_bool=True)
@testing.numpy_cupy_allclose()
def test_neg_zerodim(self, xp, dtype):
a = xp.array(-2).astype(dtype)
return operator.neg(a)
def test_pos_zerodim(self):
self.check_zerodim_op(operator.pos)
def test_abs_zerodim(self):
self.check_zerodim_op_full(operator.abs)
def test_abs_zerodim_full(self):
self.check_zerodim_op_full(operator.abs)
class TestArrayIntUnaryOp(unittest.TestCase):
@testing.for_int_dtypes()
@testing.numpy_cupy_allclose()
def check_array_op(self, op, xp, dtype):
a = testing.shaped_arange((2, 3), xp, dtype)
return op(a)
def test_invert_array(self):
self.check_array_op(operator.invert)
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(accept_error=TypeError)
def check_zerodim_op(self, op, xp, dtype):
a = xp.array(-2).astype(dtype)
return op(a)
def test_invert_zerodim(self):
self.check_zerodim_op(operator.invert)
@testing.parameterize(*testing.product({
'xp': [numpy, cupy],
'shape': [(3, 2), (), (3, 0, 2)]
}))
class TestBoolNeg(unittest.TestCase):
def test_bool_neg(self):
xp = self.xp
if xp is numpy and not testing.numpy_satisfies('>=1.13.0'):
raise unittest.SkipTest('NumPy<1.13.0')
shape = self.shape
x = testing.shaped_random(shape, xp, dtype=numpy.bool_)
with pytest.raises(TypeError):
-x |
7,589 | test pandas feeding | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests `FeedingQueueRunner` using arrays and `DataFrames`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.estimator.inputs.queues.feeding_functions import _enqueue_data as enqueue_data
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
def get_rows(array, row_indices):
rows = [array[i] for i in row_indices]
return np.vstack(rows)
class FeedingQueueRunnerTestCase(test.TestCase):
"""Tests for `FeedingQueueRunner`."""
def testArrayFeeding(self):
with ops.Graph().as_default():
array = np.arange(32).reshape([16, 2])
q = enqueue_data(array, capacity=100)
batch_size = 3
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
indices = [
j % array.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_dq = get_rows(array, indices)
dq = sess.run(dq_op)
np.testing.assert_array_equal(indices, dq[0])
np.testing.assert_array_equal(expected_dq, dq[1])
coord.request_stop()
coord.join(threads)
def testArrayFeedingMultiThread(self):
with ops.Graph().as_default():
array = np.arange(256).reshape([128, 2])
q = enqueue_data(array, capacity=128, num_threads=8, shuffle=True)
batch_size = 3
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for _ in range(100):
dq = sess.run(dq_op)
indices = dq[0]
expected_dq = get_rows(array, indices)
np.testing.assert_array_equal(expected_dq, dq[1])
coord.request_stop()
coord.join(threads)
def METHOD_NAME(self):
if not HAS_PANDAS:
return
with ops.Graph().as_default():
array1 = np.arange(32)
array2 = np.arange(32, 64)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(64, 96))
q = enqueue_data(df, capacity=100)
batch_size = 5
dq_op = q.dequeue_many(5)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
indices = [
j % array1.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_df_indices = df.index[indices]
expected_rows = df.iloc[indices]
dq = sess.run(dq_op)
np.testing.assert_array_equal(expected_df_indices, dq[0])
for col_num, col in enumerate(df.columns):
np.testing.assert_array_equal(expected_rows[col].values,
dq[col_num + 1])
coord.request_stop()
coord.join(threads)
def testPandasFeedingMultiThread(self):
if not HAS_PANDAS:
return
with ops.Graph().as_default():
array1 = np.arange(128, 256)
array2 = 2 * array1
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(128))
q = enqueue_data(df, capacity=128, num_threads=8, shuffle=True)
batch_size = 5
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for _ in range(100):
dq = sess.run(dq_op)
indices = dq[0]
expected_rows = df.iloc[indices]
for col_num, col in enumerate(df.columns):
np.testing.assert_array_equal(expected_rows[col].values,
dq[col_num + 1])
coord.request_stop()
coord.join(threads)
if __name__ == "__main__":
test.main() |
7,590 | cb | import copy
import os
import tempfile
from threading import Thread, current_thread
from unittest import TestCase
import six
from pyff import utils
from pyff.constants import NS, as_list_of_string
from pyff.resource import Resource, ResourceOpts
from pyff.samlmd import entities_list, find_entity
from pyff.utils import (
Lambda,
b2u,
find_matching_files,
img_to_data,
is_past_ttl,
parse_xml,
resource_filename,
resource_string,
root,
schema,
url_get,
)
from pyff.merge_strategies import remove, replace_existing
class TestMetadata(TestCase):
def setUp(self):
self.datadir = resource_filename('metadata', 'test/data')
self.xml_source1 = os.path.join(self.datadir, 'test01.xml')
self.xml_source2 = os.path.join(self.datadir, 'swamid-2.0-test.xml')
self.t1 = parse_xml(self.xml_source1)
self.t2 = parse_xml(self.xml_source2)
def test_merge_replace_bad(self):
try:
replace_existing(self.t1, self.t1)
assert False
except AttributeError:
pass
def test_merge_remove_bad(self):
try:
remove(self.t1, self.t1)
assert False
except AttributeError:
pass
def test_replace_ndn(self):
idp = find_entity(root(self.t2), 'https://idp.nordu.net/idp/shibboleth')
assert idp is not None
idp2 = copy.deepcopy(idp)
assert idp2 is not None
for o in idp2.findall(".//{%s}OrganizationName" % NS['md']):
o.text = "FOO"
idp2.set('ID', 'kaka4711')
replace_existing(idp, idp2)
idp3 = find_entity(root(self.t2), 'kaka4711', attr='ID')
assert idp3 is not None
for o in idp2.findall(".//{%s}OrganizationName" % NS['md']):
assert o.text == "FOO"
remove(idp3, None)
idp = find_entity(root(self.t2), 'kaka4711', attr='ID')
assert idp3 is not None
def test_entities_list(self):
assert len(list(entities_list(root(self.t2)))) == 1032
assert len(list(entities_list(None))) == 0
class TestResources(TestCase):
def test_resource_filename(self):
assert resource_filename("missing") is None
assert resource_filename("missing", "gone") is None
assert os.path.isdir(resource_filename('test'))
assert os.path.isfile(resource_filename('test/data/empty.txt'))
assert os.path.isdir(resource_filename('metadata', 'test/data'))
assert os.path.isfile(resource_filename('empty.txt', 'test/data'))
assert resource_filename('empty.txt', 'test/data') == resource_filename('test/data/empty.txt')
tmp = tempfile.NamedTemporaryFile('w').name
with open(tmp, "w") as fd:
fd.write("test")
try:
assert resource_filename(tmp) == tmp
(d, fn) = os.path.split(tmp)
assert resource_filename(fn, d) == tmp
except IOError as ex:
raise ex
finally:
try:
os.unlink(tmp)
except Exception:
pass
def test_resource_string(self):
assert resource_string("missing") is None
assert resource_string("missing", "gone") is None
assert resource_string('test/data/empty.txt') == six.b('empty')
assert resource_string('empty.txt', 'test/data') == six.b('empty')
tmp = tempfile.NamedTemporaryFile('w').name
with open(tmp, "w") as fd:
fd.write("test")
try:
print(resource_string(tmp))
assert resource_string(tmp) == 'test'
(d, fn) = os.path.split(tmp)
assert resource_string(fn, d) == 'test'
except IOError as ex:
raise ex
finally:
try:
os.unlink(tmp)
except Exception:
pass
class TestXMLErrors(TestCase):
def test_strip_warnings(self):
errors = [':WARNING:', 'other']
assert utils.xml_error(errors) == 'other'
assert utils.xml_error(errors, m='other') == 'other'
assert utils.xml_error(errors, m='kaka') == ''
class TestMisc(TestCase):
def test_b2u(self):
assert int(b2u(b'1')) == 1
assert b2u('kaka') == 'kaka'
def test_cache_fuzz(self):
import time
from pyff.constants import config
config.randomize_cache_ttl = False
config.cache_ttl = 0
now = int(time.time())
assert is_past_ttl(now - 1, ttl=config.cache_ttl)
assert not is_past_ttl(now, ttl=config.cache_ttl)
config.cache_ttl = 3
config.randomize_cache_ttl = True
assert is_past_ttl(now - 6, ttl=config.cache_ttl)
assert not is_past_ttl(now, ttl=config.cache_ttl)
assert is_past_ttl(now - 100, ttl=config.cache_ttl)
def test_config_lang(self):
from pyff.constants import config
assert 'en' in config.langs
def test_schema(self):
assert schema()
def test_schema_100_times(self):
for i in range(1, 100):
assert schema()
def test_schema_threads(self):
exceptions = dict()
threads = list()
def _s(e):
try:
schema()
except BaseException as ex:
e[current_thread()] = ex
for i in range(1, 100):
t = Thread(target=_s, args=[exceptions])
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
for e in exceptions:
print(e)
assert len(exceptions) == 0
class TestLambda(TestCase):
def test_lambda(self):
def METHOD_NAME(*args, **kwargs):
assert args[0] == args[1]
f = Lambda(METHOD_NAME, "kaka")
f("kaka")
try:
f("foo")
assert False
except AssertionError as ex:
pass
class TestImage(TestCase):
ext_to_mime = dict(jpg='image/jpeg', gif='image/gif', ico='image/x-icon', png='image/png', svg='image/svg+xml')
def setUp(self):
self.imagedir = resource_filename('images', 'test/data')
self.files = [fn for fn in find_matching_files(self.imagedir, ['png', 'gif', 'jpeg', 'jpg', 'ico', 'svg'])]
def test_match(self):
assert any('sunet256.png' in fn for fn in self.files)
def test_convert(self):
for fn in self.files:
(basename, _, ext) = fn.rpartition('.')
mime_type = TestImage.ext_to_mime.get(ext, None)
assert mime_type is not None
url = "file://{}".format(fn)
assert url
r = url_get(url)
assert r
assert r.content
img = img_to_data(r.content, mime_type)
assert img
print(img)
class TestResource(TestCase):
def test_cmp(self):
r1 = Resource("https://mds.edugain.org", ResourceOpts(via=[lambda x: x]))
r2 = Resource("https://mds.edugain.org", ResourceOpts())
assert r1 == r2
class TestConfig(TestCase):
def test_as_list_of_string(self):
x = as_list_of_string('a,b')
print(x)
assert len(x) == 2
assert x[0] == 'a'
assert x[1] == 'b' |
7,591 | reverse pointer | import sys
from collections.abc import Container, Iterable, Iterator
from typing import Any, Generic, SupportsInt, TypeVar, overload
from typing_extensions import Literal, Self, TypeAlias
# Undocumented length constants
IPV4LENGTH: Literal[32]
IPV6LENGTH: Literal[128]
_A = TypeVar("_A", IPv4Address, IPv6Address)
_N = TypeVar("_N", IPv4Network, IPv6Network)
_RawIPAddress: TypeAlias = int | str | bytes | IPv4Address | IPv6Address
_RawNetworkPart: TypeAlias = IPv4Network | IPv6Network | IPv4Interface | IPv6Interface
def ip_address(address: _RawIPAddress) -> IPv4Address | IPv6Address: ...
def ip_network(
address: _RawIPAddress | _RawNetworkPart | tuple[_RawIPAddress] | tuple[_RawIPAddress, int], strict: bool = True
) -> IPv4Network | IPv6Network: ...
def ip_interface(
address: _RawIPAddress | _RawNetworkPart | tuple[_RawIPAddress] | tuple[_RawIPAddress, int]
) -> IPv4Interface | IPv6Interface: ...
class _IPAddressBase:
@property
def compressed(self) -> str: ...
@property
def exploded(self) -> str: ...
@property
def METHOD_NAME(self) -> str: ...
@property
def version(self) -> int: ...
class _BaseAddress(_IPAddressBase, SupportsInt):
def __init__(self, address: object) -> None: ...
def __add__(self, other: int) -> Self: ...
def __hash__(self) -> int: ...
def __int__(self) -> int: ...
def __sub__(self, other: int) -> Self: ...
if sys.version_info >= (3, 9):
def __format__(self, fmt: str) -> str: ...
def __eq__(self, other: object) -> bool: ...
def __lt__(self, other: Self) -> bool: ...
if sys.version_info >= (3, 11):
def __ge__(self, other: Self) -> bool: ...
def __gt__(self, other: Self) -> bool: ...
def __le__(self, other: Self) -> bool: ...
else:
def __ge__(self, other: Self, NotImplemented: Any = ...) -> bool: ...
def __gt__(self, other: Self, NotImplemented: Any = ...) -> bool: ...
def __le__(self, other: Self, NotImplemented: Any = ...) -> bool: ...
@property
def is_global(self) -> bool: ...
@property
def is_link_local(self) -> bool: ...
@property
def is_loopback(self) -> bool: ...
@property
def is_multicast(self) -> bool: ...
@property
def is_private(self) -> bool: ...
@property
def is_reserved(self) -> bool: ...
@property
def is_unspecified(self) -> bool: ...
@property
def max_prefixlen(self) -> int: ...
@property
def packed(self) -> bytes: ...
class _BaseNetwork(_IPAddressBase, Container[_A], Iterable[_A], Generic[_A]):
network_address: _A
netmask: _A
def __init__(self, address: object, strict: bool = ...) -> None: ...
def __contains__(self, other: Any) -> bool: ...
def __getitem__(self, n: int) -> _A: ...
def __iter__(self) -> Iterator[_A]: ...
def __eq__(self, other: object) -> bool: ...
def __hash__(self) -> int: ...
def __lt__(self, other: Self) -> bool: ...
if sys.version_info >= (3, 11):
def __ge__(self, other: Self) -> bool: ...
def __gt__(self, other: Self) -> bool: ...
def __le__(self, other: Self) -> bool: ...
else:
def __ge__(self, other: Self, NotImplemented: Any = ...) -> bool: ...
def __gt__(self, other: Self, NotImplemented: Any = ...) -> bool: ...
def __le__(self, other: Self, NotImplemented: Any = ...) -> bool: ...
def address_exclude(self, other: Self) -> Iterator[Self]: ...
@property
def broadcast_address(self) -> _A: ...
def compare_networks(self, other: Self) -> int: ...
def hosts(self) -> Iterator[_A]: ...
@property
def is_global(self) -> bool: ...
@property
def is_link_local(self) -> bool: ...
@property
def is_loopback(self) -> bool: ...
@property
def is_multicast(self) -> bool: ...
@property
def is_private(self) -> bool: ...
@property
def is_reserved(self) -> bool: ...
@property
def is_unspecified(self) -> bool: ...
@property
def max_prefixlen(self) -> int: ...
@property
def num_addresses(self) -> int: ...
def overlaps(self, other: _BaseNetwork[IPv4Address] | _BaseNetwork[IPv6Address]) -> bool: ...
@property
def prefixlen(self) -> int: ...
def subnet_of(self, other: Self) -> bool: ...
def supernet_of(self, other: Self) -> bool: ...
def subnets(self, prefixlen_diff: int = 1, new_prefix: int | None = None) -> Iterator[Self]: ...
def supernet(self, prefixlen_diff: int = 1, new_prefix: int | None = None) -> Self: ...
@property
def with_hostmask(self) -> str: ...
@property
def with_netmask(self) -> str: ...
@property
def with_prefixlen(self) -> str: ...
@property
def hostmask(self) -> _A: ...
class _BaseInterface(_BaseAddress, Generic[_A, _N]):
hostmask: _A
netmask: _A
network: _N
@property
def ip(self) -> _A: ...
@property
def with_hostmask(self) -> str: ...
@property
def with_netmask(self) -> str: ...
@property
def with_prefixlen(self) -> str: ...
class _BaseV4:
@property
def version(self) -> Literal[4]: ...
@property
def max_prefixlen(self) -> Literal[32]: ...
class IPv4Address(_BaseV4, _BaseAddress): ...
class IPv4Network(_BaseV4, _BaseNetwork[IPv4Address]): ...
class IPv4Interface(IPv4Address, _BaseInterface[IPv4Address, IPv4Network]):
def __eq__(self, other: object) -> bool: ...
def __hash__(self) -> int: ...
class _BaseV6:
@property
def version(self) -> Literal[6]: ...
@property
def max_prefixlen(self) -> Literal[128]: ...
class IPv6Address(_BaseV6, _BaseAddress):
@property
def ipv4_mapped(self) -> IPv4Address | None: ...
@property
def is_site_local(self) -> bool: ...
@property
def sixtofour(self) -> IPv4Address | None: ...
@property
def teredo(self) -> tuple[IPv4Address, IPv4Address] | None: ...
if sys.version_info >= (3, 9):
@property
def scope_id(self) -> str | None: ...
def __hash__(self) -> int: ...
def __eq__(self, other: object) -> bool: ...
class IPv6Network(_BaseV6, _BaseNetwork[IPv6Address]):
@property
def is_site_local(self) -> bool: ...
class IPv6Interface(IPv6Address, _BaseInterface[IPv6Address, IPv6Network]):
def __eq__(self, other: object) -> bool: ...
def __hash__(self) -> int: ...
def v4_int_to_packed(address: int) -> bytes: ...
def v6_int_to_packed(address: int) -> bytes: ...
# Third overload is technically incorrect, but convenient when first and last are return values of ip_address()
@overload
def summarize_address_range(first: IPv4Address, last: IPv4Address) -> Iterator[IPv4Network]: ...
@overload
def summarize_address_range(first: IPv6Address, last: IPv6Address) -> Iterator[IPv6Network]: ...
@overload
def summarize_address_range(
first: IPv4Address | IPv6Address, last: IPv4Address | IPv6Address
) -> Iterator[IPv4Network] | Iterator[IPv6Network]: ...
def collapse_addresses(addresses: Iterable[_N]) -> Iterator[_N]: ...
@overload
def get_mixed_type_key(obj: _A) -> tuple[int, _A]: ...
@overload
def get_mixed_type_key(obj: IPv4Network) -> tuple[int, IPv4Address, IPv4Address]: ...
@overload
def get_mixed_type_key(obj: IPv6Network) -> tuple[int, IPv6Address, IPv6Address]: ...
class AddressValueError(ValueError): ...
class NetmaskValueError(ValueError): ... |
7,592 | fcn | # Copyright 2021 Planet Labs, Inc.
# Copyright 2022 Planet Labs PBC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import pytest
from planet import geojson, exceptions
LOGGER = logging.getLogger(__name__)
@pytest.fixture
def assert_geom_equal():
def _tuple_to_list(obj):
return json.loads(json.dumps(obj).replace(")", "]").replace("(", "["))
def METHOD_NAME(geom_1, geom_2):
str_1 = _tuple_to_list(geom_1)
str_2 = _tuple_to_list(geom_2)
assert str_1 == str_2
return METHOD_NAME
def test_geom_from_geojson_success(geom_geojson,
feature_geojson,
featurecollection_geojson,
assert_geom_equal):
ggeo = geojson.as_geom(geom_geojson)
assert_geom_equal(ggeo, geom_geojson)
fgeo = geojson.geom_from_geojson(feature_geojson)
assert_geom_equal(fgeo, geom_geojson)
fcgeo = geojson.geom_from_geojson(featurecollection_geojson)
assert_geom_equal(fcgeo, geom_geojson)
def test_geom_from_geojson_no_geometry(feature_geojson):
feature_geojson.pop('geometry')
with pytest.raises(exceptions.GeoJSONError):
_ = geojson.geom_from_geojson(feature_geojson)
def test_geom_from_geojson_missing_coordinates(geom_geojson):
geom_geojson.pop('coordinates')
with pytest.raises(exceptions.GeoJSONError):
_ = geojson.geom_from_geojson(geom_geojson)
def test_geom_from_geojson_missing_type(geom_geojson):
geom_geojson.pop('type')
with pytest.raises(exceptions.GeoJSONError):
_ = geojson.geom_from_geojson(geom_geojson)
def test_geom_from_geojson_multiple_features(featurecollection_geojson):
# duplicate the feature
featurecollection_geojson[
'features'] = 2 * featurecollection_geojson['features']
with pytest.raises(geojson.GeoJSONError):
_ = geojson.geom_from_geojson(featurecollection_geojson)
def test_validate_geom_invalid_type(geom_geojson):
geom_geojson['type'] = 'invalid'
with pytest.raises(exceptions.GeoJSONError):
_ = geojson.validate_geom(geom_geojson)
def test_validate_geom_wrong_type(geom_geojson):
geom_geojson['type'] = 'point'
with pytest.raises(exceptions.GeoJSONError):
_ = geojson.validate_geom(geom_geojson)
def test_validate_geom_invalid_coordinates(geom_geojson):
geom_geojson['coordinates'] = 'invalid'
with pytest.raises(exceptions.GeoJSONError):
_ = geojson.validate_geom(geom_geojson)
def test_validate_geom_empty_coordinates(geom_geojson):
geom_geojson['coordinates'] = []
_ = geojson.validate_geom(geom_geojson)
def test_as_geom(geom_geojson):
assert geojson.as_geom(geom_geojson) == geom_geojson
def test_as_polygon(geom_geojson):
assert geojson.as_polygon(geom_geojson) == geom_geojson
def test_as_polygon_wrong_type(point_geom_geojson):
with pytest.raises(exceptions.GeoJSONError):
_ = geojson.as_polygon(point_geom_geojson)
def test_as_featurecollection_success(feature_geojson):
feature2 = feature_geojson.copy()
feature2['properties'] = {'foo': 'bar'}
values = [feature_geojson, feature2]
res = geojson.as_featurecollection(values)
expected = {'type': 'FeatureCollection', 'features': values}
assert res == expected
def test__is_instance_of_success(feature_geojson):
assert geojson._is_instance_of(feature_geojson, 'Feature')
feature2 = feature_geojson.copy()
feature2['properties'] = {'foo': 'bar'}
assert geojson._is_instance_of(feature2, 'Feature')
def test__is_instance_of_does_not_exist(feature_geojson):
with pytest.raises(exceptions.GeoJSONError):
geojson._is_instance_of(feature_geojson, 'Foobar') |
7,593 | ref adadelta |
import functools
import hypothesis
from hypothesis import given, settings, HealthCheck
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
class TestAdadelta(serial.SerializedTestCase):
@staticmethod
def METHOD_NAME(param_in,
mom_in,
mom_delta_in,
grad, lr,
epsilon,
decay,
using_fp16=False):
param_in_f32 = param_in
mom_in_f32 = mom_in
mom_delta_in_f32 = mom_delta_in
if(using_fp16):
param_in_f32 = param_in.astype(np.float32)
mom_in_f32 = mom_in.astype(np.float32)
mom_delta_in_f32 = mom_delta_in.astype(np.float32)
mom_out = decay * mom_in_f32 + (1.0 - decay) * grad * grad
new_grad = (np.sqrt(mom_delta_in_f32 + epsilon) /
np.sqrt(mom_out + epsilon)) * grad
param_out = param_in_f32 + lr * new_grad
mom_delta_out = decay * mom_delta_in_f32 + (1.0 - decay
) * new_grad * new_grad
if(using_fp16):
return (param_out.astype(np.float16), mom_out.astype(np.float16),
mom_delta_out.astype(np.float16))
else:
return (param_out.astype(np.float32), mom_out.astype(np.float32),
mom_delta_out.astype(np.float32))
@given(inputs=hu.tensors(n=4),
lr=hu.floats(min_value=0.01, max_value=0.99,
allow_nan=False, allow_infinity=False),
epsilon=hu.floats(min_value=0.01, max_value=0.99,
allow_nan=False, allow_infinity=False),
decay=hu.floats(min_value=0.01, max_value=0.99,
allow_nan=False, allow_infinity=False),
**hu.gcs)
@settings(deadline=10000)
def test_adadelta(self, inputs, lr, epsilon, decay, gc, dc):
param, moment, moment_delta, grad = inputs
moment = np.abs(moment)
moment_delta = np.abs(moment_delta)
lr = np.array([lr], dtype=np.float32)
op = core.CreateOperator(
"Adadelta",
["param", "moment", "moment_delta", "grad", "lr"],
["param", "moment", "moment_delta"],
epsilon=epsilon,
decay=decay,
device_option=gc,
)
self.assertReferenceChecks(
gc, op,
[param, moment, moment_delta, grad, lr],
functools.partial(self.METHOD_NAME, epsilon=epsilon, decay=decay))
# Suppress filter_too_much health check.
# Likely caused by `assume` call falling through too often.
@settings(suppress_health_check=[HealthCheck.filter_too_much], deadline=10000)
@given(inputs=hu.tensors(n=4),
lr=hu.floats(min_value=0.01, max_value=0.99,
allow_nan=False, allow_infinity=False),
epsilon=hu.floats(min_value=0.01, max_value=0.99,
allow_nan=False, allow_infinity=False),
decay=hu.floats(min_value=0.01, max_value=0.99,
allow_nan=False, allow_infinity=False),
**hu.gcs)
def test_sparse_adadelta(self, inputs, lr, epsilon, decay, gc, dc):
param, moment, moment_delta, grad = inputs
moment = np.abs(moment)
moment_delta = np.abs(moment_delta)
lr = np.array([lr], dtype=np.float32)
# Create an indexing array containing values that are lists of indices,
# which index into grad
indices = np.random.choice(np.arange(grad.shape[0]),
size=np.random.randint(grad.shape[0]), replace=False)
# Sparsify grad
grad = grad[indices]
op = core.CreateOperator(
"SparseAdadelta",
["param", "moment", "moment_delta", "indices", "grad", "lr"],
["param", "moment", "moment_delta"],
epsilon=epsilon,
decay=decay,
device_option=gc)
def ref_sparse(param, moment, moment_delta, indices, grad, lr, decay,
ref_using_fp16):
param_out = np.copy(param)
moment_out = np.copy(moment)
moment_delta_out = np.copy(moment_delta)
for i, index in enumerate(indices):
param_out[index], moment_out[index], moment_delta_out[
index] = self.METHOD_NAME(param[index], moment[index],
moment_delta[index], grad[i], lr,
epsilon, decay, ref_using_fp16)
return (param_out, moment_out, moment_delta_out)
ref_using_fp16_values = [False]
if gc == hu.gpu_do:
ref_using_fp16_values.append(True)
for ref_using_fp16 in ref_using_fp16_values:
moment_i = None
moment_delta_i = None
param_i = None
if(ref_using_fp16):
moment_i = moment.astype(np.float16)
moment_delta_i = moment_delta.astype(np.float16)
param_i = param.astype(np.float16)
else:
moment_i = moment.astype(np.float32)
moment_delta_i = moment_delta.astype(np.float32)
param_i = param.astype(np.float32)
self.assertReferenceChecks(gc, op, [
param_i, moment_i, moment_delta_i, indices, grad, lr, decay,
ref_using_fp16
], ref_sparse)
@given(inputs=hu.tensors(n=3),
lr=st.floats(min_value=0.01, max_value=0.99,
allow_nan=False, allow_infinity=False),
epsilon=st.floats(min_value=0.01, max_value=0.99,
allow_nan=False, allow_infinity=False),
decay=st.floats(min_value=0.01, max_value=0.99,
allow_nan=False, allow_infinity=False),
**hu.gcs)
@settings(deadline=None)
def test_sparse_adadelta_empty(self, inputs, lr, epsilon, decay, gc, dc):
param, moment, moment_delta = inputs
moment = np.abs(moment)
lr = np.array([lr], dtype=np.float32)
grad = np.empty(shape=(0,) + param.shape[1:], dtype=np.float32)
indices = np.empty(shape=(0,), dtype=np.int64)
hypothesis.note('indices.shape: %s' % str(indices.shape))
op = core.CreateOperator(
"SparseAdadelta",
["param", "moment", "moment_delta", "indices", "grad", "lr"],
["param", "moment", "moment_delta"],
epsilon=epsilon,
decay=decay,
device_option=gc)
def ref_sparse_empty(param, moment, moment_delta, indices, grad, lr, decay):
param_out = np.copy(param)
moment_out = np.copy(moment)
moment_delta_out = np.copy(moment_delta)
return (param_out, moment_out, moment_delta_out)
ref_using_fp16_values = [False]
if gc == hu.gpu_do:
ref_using_fp16_values.append(True)
for ref_using_fp16 in ref_using_fp16_values:
moment_i = None
moment_delta_i = None
param_i = None
if(ref_using_fp16):
moment_i = moment.astype(np.float16)
moment_delta_i = moment_delta.astype(np.float16)
param_i = param.astype(np.float16)
else:
moment_i = moment.astype(np.float32)
moment_delta_i = moment_delta.astype(np.float32)
param_i = param.astype(np.float32)
self.assertReferenceChecks(
gc,
op,
[param_i, moment_i, moment_delta_i, indices, grad, lr, decay],
ref_sparse_empty
) |
7,594 | add common params | # -*- coding: UTF-8 -*-
# Copyright (c) 2020 The ungoogled-chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Common code and constants"""
import argparse
import enum
import logging
import platform
from pathlib import Path
# Constants
ENCODING = 'UTF-8' # For config files and patches
USE_REGISTRY = '_use_registry'
LOGGER_NAME = 'ungoogled'
# Public classes
class PlatformEnum(enum.Enum):
"""Enum for platforms that need distinction for certain functionality"""
UNIX = 'unix' # Currently covers anything that isn't Windows
WINDOWS = 'windows'
class ExtractorEnum: #pylint: disable=too-few-public-methods
"""Enum for extraction binaries"""
SEVENZIP = '7z'
TAR = 'tar'
WINRAR = 'winrar'
class SetLogLevel(argparse.Action): #pylint: disable=too-few-public-methods
"""Sets logging level based on command line arguments it receives"""
def __init__(self, option_strings, dest, nargs=None, **kwargs):
super(SetLogLevel, self).__init__(option_strings, dest, nargs=nargs, **kwargs)
def __call__(self, parser, namespace, value, option_string=None):
if option_string in ('--verbose', '-v'):
value = logging.DEBUG
elif option_string in ('--quiet', '-q'):
value = logging.ERROR
else:
levels = {
'FATAL': logging.FATAL,
'ERROR': logging.ERROR,
'WARNING': logging.WARNING,
'INFO': logging.INFO,
'DEBUG': logging.DEBUG
}
value = levels[value]
set_logging_level(value)
# Public methods
def get_logger(initial_level=logging.INFO):
"""Gets the named logger"""
logger = logging.getLogger(LOGGER_NAME)
if logger.level == logging.NOTSET:
logger.setLevel(initial_level)
if not logger.hasHandlers():
console_handler = logging.StreamHandler()
console_handler.setLevel(initial_level)
format_string = '%(levelname)s: %(message)s'
formatter = logging.Formatter(format_string)
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
return logger
def set_logging_level(logging_level):
"""Sets logging level of logger and all its handlers"""
if not logging_level:
logging_level = logging.INFO
logger = get_logger()
logger.setLevel(logging_level)
if logger.hasHandlers():
for hdlr in logger.handlers:
hdlr.setLevel(logging_level)
return logger
def get_running_platform():
"""
Returns a PlatformEnum value indicating the platform that utils is running on.
NOTE: Platform detection should only be used when no cross-platform alternative is available.
"""
uname = platform.uname()
# detect native python and WSL
if uname.system == 'Windows' or 'Microsoft' in uname.release:
return PlatformEnum.WINDOWS
# Only Windows and UNIX-based platforms need to be distinguished right now.
return PlatformEnum.UNIX
def get_chromium_version():
"""Returns the Chromium version."""
return (Path(__file__).parent.parent / 'chromium_version.txt').read_text().strip()
def parse_series(series_path):
"""
Returns an iterator of paths over the series file
series_path is a pathlib.Path to the series file
"""
with series_path.open(encoding=ENCODING) as series_file:
series_lines = series_file.read().splitlines()
# Filter blank lines
series_lines = filter(len, series_lines)
# Filter comment lines
series_lines = filter((lambda x: not x.startswith('#')), series_lines)
# Strip in-line comments
series_lines = map((lambda x: x.strip().split(' #')[0]), series_lines)
return series_lines
def METHOD_NAME(parser):
"""
Adds common command line arguments to a parser.
"""
# Logging levels
logging_group = parser.add_mutually_exclusive_group()
logging_group.add_argument(
'--log-level',
action=SetLogLevel,
choices=['FATAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG'],
help="Set logging level of current script. Only one of 'log-level', 'verbose',"
" 'quiet' can be set at a time.")
logging_group.add_argument(
'--quiet',
'-q',
action=SetLogLevel,
nargs=0,
help="Display less outputs to console. Only one of 'log-level', 'verbose',"
" 'quiet' can be set at a time.")
logging_group.add_argument(
'--verbose',
'-v',
action=SetLogLevel,
nargs=0,
help="Increase logging verbosity to include DEBUG messages. Only one of "
"'log-level', 'verbose', 'quiet' can be set at a time.") |
7,595 | get moments | # Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import pytest
import torch
from torch.autograd import grad
import pyro
import pyro.distributions as dist
from pyro import poutine
from pyro.distributions.util import is_identically_one
from pyro.infer.autoguide.initialization import InitMessenger, init_to_uniform
from pyro.infer.reparam import LocScaleReparam
from tests.common import assert_close
from .util import check_init_reparam
# Test helper to extract a few central moments from samples.
def METHOD_NAME(x):
m1 = x.mean(0)
x = x - m1
xx = x * x
xxx = x * xx
xxxx = xx * xx
m2 = xx.mean(0)
m3 = xxx.mean(0) / m2**1.5
m4 = xxxx.mean(0) / m2**2
return torch.stack([m1, m2, m3, m4])
@pytest.mark.parametrize("shape", [(), (4,), (3, 2)], ids=str)
@pytest.mark.parametrize("centered", [0.0, 0.6, 1.0, torch.tensor(0.4), None])
@pytest.mark.parametrize("dist_type", ["Normal", "StudentT", "AsymmetricLaplace"])
def test_moments(dist_type, centered, shape):
loc = torch.empty(shape).uniform_(-1.0, 1.0).requires_grad_()
scale = torch.empty(shape).uniform_(0.5, 1.5).requires_grad_()
if isinstance(centered, torch.Tensor):
centered = centered.expand(shape)
def model():
with pyro.plate_stack("plates", shape):
with pyro.plate("particles", 200000):
if "dist_type" == "Normal":
pyro.sample("x", dist.Normal(loc, scale))
elif "dist_type" == "StudentT":
pyro.sample("x", dist.StudentT(10.0, loc, scale))
else:
pyro.sample("x", dist.AsymmetricLaplace(loc, scale, 1.5))
value = poutine.trace(model).get_trace().nodes["x"]["value"]
expected_probe = METHOD_NAME(value)
reparam = LocScaleReparam(centered)
reparam_model = poutine.reparam(model, {"x": reparam})
value = poutine.trace(reparam_model).get_trace().nodes["x"]["value"]
actual_probe = METHOD_NAME(value)
if not is_identically_one(centered):
if "dist_type" == "Normal":
assert reparam.shape_params == ()
elif "dist_type" == "StudentT":
assert reparam.shape_params == ("df",)
else:
assert reparam.shape_params == ("asymmetry",)
assert_close(actual_probe, expected_probe, atol=0.1, rtol=0.05)
for actual_m, expected_m in zip(actual_probe, expected_probe):
expected_grads = grad(expected_m.sum(), [loc, scale], retain_graph=True)
actual_grads = grad(actual_m.sum(), [loc, scale], retain_graph=True)
assert_close(actual_grads[0], expected_grads[0], atol=0.1, rtol=0.05)
assert_close(actual_grads[1], expected_grads[1], atol=0.1, rtol=0.05)
@pytest.mark.parametrize("shape", [(), (4,), (3, 2)], ids=str)
@pytest.mark.parametrize("centered", [0.0, 0.6, 1.0, torch.tensor(0.4), None])
@pytest.mark.parametrize("dist_type", ["Normal", "StudentT", "AsymmetricLaplace"])
def test_init(dist_type, centered, shape):
loc = torch.empty(shape).uniform_(-1.0, 1.0)
scale = torch.empty(shape).uniform_(0.5, 1.5)
def model():
with pyro.plate_stack("plates", shape):
if "dist_type" == "Normal":
return pyro.sample("x", dist.Normal(loc, scale))
elif "dist_type" == "StudentT":
return pyro.sample("x", dist.StudentT(10.0, loc, scale))
else:
return pyro.sample("x", dist.AsymmetricLaplace(loc, scale, 1.5))
check_init_reparam(model, LocScaleReparam(centered))
@pytest.mark.xfail(
reason=(
"reparam inside plate not compatible with init messenger,"
" issue https://github.com/pyro-ppl/pyro/issues/2990"
)
)
def test_init_with_reparam_inside_plate():
def model():
with pyro.plate("N", 10):
with poutine.reparam(config={"x": LocScaleReparam(centered=0.0)}):
return pyro.sample("x", dist.Normal(0, 1))
with InitMessenger(init_to_uniform()):
actual = model()
assert actual.shape == (10,) |
7,596 | make profile image name | """
Helper functions for the accounts API.
"""
import hashlib
from django.conf import settings
from django.contrib.staticfiles.storage import staticfiles_storage
from django.core.exceptions import ObjectDoesNotExist
from django.core.files.storage import get_storage_class
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from common.djangoapps.student.models import UserProfile
from ..errors import UserNotFound
PROFILE_IMAGE_FILE_EXTENSION = 'jpg' # All processed profile images are converted to JPEGs
_PROFILE_IMAGE_SIZES = list(settings.PROFILE_IMAGE_SIZES_MAP.values())
def get_profile_image_storage():
"""
Configures and returns a django Storage instance that can be used
to physically locate, read and write profile images.
"""
config = settings.PROFILE_IMAGE_BACKEND
storage_class = get_storage_class(config['class'])
return storage_class(**config['options'])
def METHOD_NAME(username):
"""
Returns the user-specific part of the image filename, based on a hash of
the username.
"""
hash_input = settings.PROFILE_IMAGE_HASH_SEED + username
return hashlib.md5(hash_input.encode('utf-8')).hexdigest()
def _get_profile_image_filename(name, size, file_extension=PROFILE_IMAGE_FILE_EXTENSION):
"""
Returns the full filename for a profile image, given the name and size.
"""
return f'{name}_{size}.{file_extension}'
def _get_profile_image_urls(name, storage, file_extension=PROFILE_IMAGE_FILE_EXTENSION, version=None):
"""
Returns a dict containing the urls for a complete set of profile images,
keyed by "friendly" name (e.g. "full", "large", "medium", "small").
"""
def _make_url(size):
url = storage.url(
_get_profile_image_filename(name, size, file_extension=file_extension)
)
# Return the URL, with the "v" parameter added as its query
# string with "?v=". If the original URL already includes a
# query string (such as signed S3 URLs), append to the query
# string with "&v=" instead.
separator = '&' if '?' in url else '?'
return f'{url}{separator}v={version}' if version is not None else url
return {size_display_name: _make_url(size) for size_display_name, size in settings.PROFILE_IMAGE_SIZES_MAP.items()}
def get_profile_image_names(username):
"""
Returns a dict containing the filenames for a complete set of profile
images, keyed by pixel size.
"""
name = METHOD_NAME(username)
return {size: _get_profile_image_filename(name, size) for size in _PROFILE_IMAGE_SIZES}
def get_profile_image_urls_for_user(user, request=None):
"""
Return a dict {size:url} for each profile image for a given user.
Notes:
- this function does not determine whether the set of profile images
exists, only what the URLs will be if they do exist. It is assumed that
callers will use `_get_default_profile_image_urls` instead to provide
a set of urls that point to placeholder images, when there are no user-
submitted images.
- based on the value of django.conf.settings.PROFILE_IMAGE_BACKEND,
the URL may be relative, and in that case the caller is responsible for
constructing the full URL if needed.
Arguments:
user (django.contrib.auth.User): the user for whom we are getting urls.
Returns:
dictionary of {size_display_name: url} for each image.
"""
try:
if user.profile.has_profile_image:
urls = _get_profile_image_urls(
METHOD_NAME(user.username),
get_profile_image_storage(),
version=user.profile.profile_image_uploaded_at.strftime("%s"),
)
else:
urls = _get_default_profile_image_urls()
except UserProfile.DoesNotExist:
# when user does not have profile it raises exception, when exception
# occur we can simply get default image.
urls = _get_default_profile_image_urls()
if request:
for key, value in urls.items():
urls[key] = request.build_absolute_uri(value)
return urls
def _get_default_profile_image_urls():
"""
Returns a dict {size:url} for a complete set of default profile images,
used as a placeholder when there are no user-submitted images.
TODO The result of this function should be memoized, but not in tests.
"""
return _get_profile_image_urls(
configuration_helpers.get_value('PROFILE_IMAGE_DEFAULT_FILENAME', settings.PROFILE_IMAGE_DEFAULT_FILENAME),
staticfiles_storage,
file_extension=settings.PROFILE_IMAGE_DEFAULT_FILE_EXTENSION,
)
def set_has_profile_image(username, is_uploaded, upload_dt=None):
"""
System (not user-facing) API call used to store whether the user has
uploaded a profile image, and if so, when. Used by profile_image API.
Arguments:
username (django.contrib.auth.User.username): references the user who
uploaded an image.
is_uploaded (bool): whether or not the user has an uploaded profile
image.
upload_dt (datetime.datetime): If `is_uploaded` is True, this should
contain the server-side date+time of the upload. If `is_uploaded`
is False, the parameter is optional and will be ignored.
Raises:
ValueError: is_uploaded was True, but no upload datetime was supplied.
UserNotFound: no user with username `username` exists.
"""
if is_uploaded and upload_dt is None: # lint-amnesty, pylint: disable=no-else-raise
raise ValueError("No upload datetime was supplied.")
elif not is_uploaded:
upload_dt = None
try:
profile = UserProfile.objects.get(user__username=username)
except ObjectDoesNotExist:
raise UserNotFound() # lint-amnesty, pylint: disable=raise-missing-from
profile.profile_image_uploaded_at = upload_dt
profile.save() |
7,597 | forward | """
This script provides an exmaple to wrap UER-py for C3 (a multiple choice dataset).
"""
import sys
import os
import argparse
import json
import random
import torch
import torch.nn as nn
uer_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
sys.path.append(uer_dir)
from uer.embeddings import *
from uer.encoders import *
from uer.utils.constants import *
from uer.utils import *
from uer.utils.optimizers import *
from uer.utils.config import load_hyperparam
from uer.utils.seed import set_seed
from uer.utils.logging import init_logger
from uer.model_saver import save_model
from uer.opts import finetune_opts, tokenizer_opts, adv_opts
from finetune.run_classifier import build_optimizer, load_or_initialize_parameters, train_model, batch_loader, evaluate
class MultipleChoice(nn.Module):
def __init__(self, args):
super(MultipleChoice, self).__init__()
self.embedding = Embedding(args)
for embedding_name in args.embedding:
tmp_emb = str2embedding[embedding_name](args, len(args.tokenizer.vocab))
self.embedding.update(tmp_emb, embedding_name)
self.encoder = str2encoder[args.encoder](args)
self.dropout = nn.Dropout(args.dropout)
self.output_layer = nn.Linear(args.hidden_size, 1)
def METHOD_NAME(self, src, tgt, seg, soft_tgt=None):
"""
Args:
src: [batch_size x choices_num x seq_length]
tgt: [batch_size]
seg: [batch_size x choices_num x seq_length]
"""
choices_num = src.shape[1]
src = src.view(-1, src.size(-1))
seg = seg.view(-1, seg.size(-1))
# Embedding.
emb = self.embedding(src, seg)
# Encoder.
output = self.encoder(emb, seg)
output = self.dropout(output)
logits = self.output_layer(output[:, 0, :])
reshaped_logits = logits.view(-1, choices_num)
if tgt is not None:
loss = nn.NLLLoss()(nn.LogSoftmax(dim=-1)(reshaped_logits), tgt.view(-1))
return loss, reshaped_logits
else:
return None, reshaped_logits
def read_dataset(args, path):
with open(path, mode="r", encoding="utf-8") as f:
data = json.load(f)
examples = []
for i in range(len(data)):
for j in range(len(data[i][1])):
example = ["\n".join(data[i][0]).lower(), data[i][1][j]["question"].lower()]
for k in range(len(data[i][1][j]["choice"])):
example += [data[i][1][j]["choice"][k].lower()]
for k in range(len(data[i][1][j]["choice"]), args.max_choices_num):
example += ["No Answer"]
example += [data[i][1][j].get("answer", "").lower()]
examples += [example]
dataset = []
for i, example in enumerate(examples):
tgt = 0
for k in range(args.max_choices_num):
if example[2 + k] == example[6]:
tgt = k
dataset.append(([], tgt, []))
for k in range(args.max_choices_num):
src_a = args.tokenizer.convert_tokens_to_ids([CLS_TOKEN] + args.tokenizer.tokenize(example[k + 2]) + [SEP_TOKEN])
src_b = args.tokenizer.convert_tokens_to_ids(args.tokenizer.tokenize(example[1]) + [SEP_TOKEN])
src_c = args.tokenizer.convert_tokens_to_ids(args.tokenizer.tokenize(example[0]) + [SEP_TOKEN])
src = src_a + src_b + src_c
seg = [1] * (len(src_a) + len(src_b)) + [2] * len(src_c)
if len(src) > args.seq_length:
src = src[: args.seq_length]
seg = seg[: args.seq_length]
PAD_ID = args.tokenizer.convert_tokens_to_ids([PAD_TOKEN])[0]
while len(src) < args.seq_length:
src.append(PAD_ID)
seg.append(0)
dataset[-1][0].append(src)
dataset[-1][2].append(seg)
return dataset
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
finetune_opts(parser)
parser.add_argument("--max_choices_num", default=4, type=int,
help="The maximum number of cadicate answer, shorter than this will be padded.")
tokenizer_opts(parser)
adv_opts(parser)
args = parser.parse_args()
args.labels_num = args.max_choices_num
# Load the hyperparameters from the config file.
args = load_hyperparam(args)
set_seed(args.seed)
# Build tokenizer.
args.tokenizer = str2tokenizer[args.tokenizer](args)
# Build multiple choice model.
model = MultipleChoice(args)
# Load or initialize parameters.
load_or_initialize_parameters(args, model)
# Get logger.
args.logger = init_logger(args)
args.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = model.to(args.device)
# Training phase.
trainset = read_dataset(args, args.train_path)
instances_num = len(trainset)
batch_size = args.batch_size
args.train_steps = int(instances_num * args.epochs_num / batch_size) + 1
args.logger.info("Batch size: {}".format(batch_size))
args.logger.info("The number of training instances: {}".format(instances_num))
optimizer, scheduler = build_optimizer(args, model)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
args.amp = amp
if torch.cuda.device_count() > 1:
args.logger.info("{} GPUs are available. Let's use them.".format(torch.cuda.device_count()))
model = torch.nn.DataParallel(model)
args.model = model
if args.use_adv:
args.adv_method = str2adv[args.adv_type](model)
total_loss, result, best_result = 0.0, 0.0, 0.0
args.logger.info("Start training.")
for epoch in range(1, args.epochs_num + 1):
random.shuffle(trainset)
src = torch.LongTensor([example[0] for example in trainset])
tgt = torch.LongTensor([example[1] for example in trainset])
seg = torch.LongTensor([example[2] for example in trainset])
model.train()
for i, (src_batch, tgt_batch, seg_batch, _) in enumerate(batch_loader(batch_size, src, tgt, seg)):
loss = train_model(args, model, optimizer, scheduler, src_batch, tgt_batch, seg_batch)
total_loss += loss.item()
if (i + 1) % args.report_steps == 0:
args.logger.info("Epoch id: {}, Training steps: {}, Avg loss: {:.3f}".format(epoch, i + 1, total_loss / args.report_steps))
total_loss = 0.0
result = evaluate(args, read_dataset(args, args.dev_path))
if result[0] > best_result:
best_result = result[0]
save_model(model, args.output_model_path)
# Evaluation phase.
if args.test_path is not None:
args.logger.info("Test set evaluation.")
if torch.cuda.device_count() > 1:
args.model.module.load_state_dict(torch.load(args.output_model_path))
else:
args.model.load_state_dict(torch.load(args.output_model_path))
evaluate(args, read_dataset(args, args.test_path))
if __name__ == "__main__":
main() |
7,598 | get dir | """
Thread-safe file downloading and cacheing
Authors
* Leo 2022
* Cheng Liang 2022
"""
import hashlib
import logging
import os
import shutil
import sys
import tempfile
import time
from pathlib import Path
from urllib.request import Request, urlopen
import requests
from filelock import FileLock
from tqdm import tqdm
logger = logging.getLogger(__name__)
_download_dir = Path.home() / ".cache" / "s3prl" / "download"
__all__ = [
"get_dir",
"set_dir",
"download",
"urls_to_filepaths",
]
def METHOD_NAME():
_download_dir.mkdir(exist_ok=True, parents=True)
return _download_dir
def set_dir(d):
global _download_dir
_download_dir = Path(d)
def _download_url_to_file(url, dst, hash_prefix=None, progress=True):
"""
This function is not thread-safe. Please ensure only a single
thread or process can enter this block at the same time
"""
file_size = None
req = Request(url, headers={"User-Agent": "torch.hub"})
u = urlopen(req)
meta = u.info()
if hasattr(meta, "getheaders"):
content_length = meta.getheaders("Content-Length")
else:
content_length = meta.get_all("Content-Length")
if content_length is not None and len(content_length) > 0:
file_size = int(content_length[0])
dst = os.path.expanduser(dst)
dst_dir = os.path.dirname(dst)
f = tempfile.NamedTemporaryFile(delete=False, dir=dst_dir)
try:
if hash_prefix is not None:
sha256 = hashlib.sha256()
tqdm.write(f"Downloading: {url}", file=sys.stderr)
tqdm.write(f"Destination: {dst}", file=sys.stderr)
with tqdm(
total=file_size,
disable=not progress,
unit="B",
unit_scale=True,
unit_divisor=1024,
) as pbar:
while True:
buffer = u.read(8192)
if len(buffer) == 0:
break
f.write(buffer)
if hash_prefix is not None:
sha256.update(buffer)
pbar.update(len(buffer))
f.close()
if hash_prefix is not None:
digest = sha256.hexdigest()
if digest[: len(hash_prefix)] != hash_prefix:
raise RuntimeError(
'invalid hash value (expected "{}", got "{}")'.format(
hash_prefix, digest
)
)
shutil.move(f.name, dst)
finally:
f.close()
if os.path.exists(f.name):
os.remove(f.name)
def _download_url_to_file_requests(url, dst, hash_prefix=None, progress=True):
"""
Alternative download when urllib.Request fails.
"""
req = requests.get(url, stream=True, headers={"User-Agent": "torch.hub"})
file_size = int(req.headers["Content-Length"])
dst = os.path.expanduser(dst)
dst_dir = os.path.dirname(dst)
f = tempfile.NamedTemporaryFile(delete=False, dir=dst_dir)
try:
if hash_prefix is not None:
sha256 = hashlib.sha256()
tqdm.write(
f"urllib.Request method failed. Trying using another method...",
file=sys.stderr,
)
tqdm.write(f"Downloading: {url}", file=sys.stderr)
tqdm.write(f"Destination: {dst}", file=sys.stderr)
with tqdm(
total=file_size,
disable=not progress,
unit="B",
unit_scale=True,
unit_divisor=1024,
) as pbar:
for chunk in req.iter_content(chunk_size=1024 * 1024 * 10):
if chunk:
f.write(chunk)
f.flush()
os.fsync(f.fileno())
if hash_prefix is not None:
sha256.update(chunk)
pbar.update(len(chunk))
f.close()
if hash_prefix is not None:
digest = sha256.hexdigest()
if digest[: len(hash_prefix)] != hash_prefix:
raise RuntimeError(
'invalid hash value (expected "{}", got "{}")'.format(
hash_prefix, digest
)
)
shutil.move(f.name, dst)
finally:
f.close()
if os.path.exists(f.name):
os.remove(f.name)
def _download(filepath: Path, url, refresh: bool, new_enough_secs: float = 2.0):
"""
If refresh is True, check the latest modfieid time of the filepath.
If the file is new enough (no older than `new_enough_secs`), than directly use it.
If the file is older than `new_enough_secs`, than re-download the file.
This function is useful when multi-processes are all downloading the same large file
"""
Path(filepath).parent.mkdir(exist_ok=True, parents=True)
lock_file = Path(str(filepath) + ".lock")
logger.info(f"Requesting URL: {url}")
with FileLock(str(lock_file)):
if not filepath.is_file() or (
refresh and (time.time() - os.path.getmtime(filepath)) > new_enough_secs
):
try:
_download_url_to_file(url, filepath)
except:
_download_url_to_file_requests(url, filepath)
logger.info(f"Using URL's local file: {filepath}")
def _urls_to_filepaths(*args, refresh=False, download: bool = True):
"""
Preprocess the URL specified in *args into local file paths after downloading
Args:
Any number of URLs (1 ~ any)
Return:
Same number of downloaded file paths
"""
def _url_to_filepath(url):
assert isinstance(url, str)
m = hashlib.sha256()
m.update(str.encode(url))
filepath = METHOD_NAME() / f"{str(m.hexdigest())}.{Path(url).name}"
if download:
_download(filepath, url, refresh=refresh)
return str(filepath.resolve())
paths = [_url_to_filepath(url) for url in args]
return paths if len(paths) > 1 else paths[0]
download = _download
urls_to_filepaths = _urls_to_filepaths |
7,599 | is default | import functools
import re
from typing import Dict
from typing import Optional
from typing import Tuple
from xsdata.models.enums import Namespace
from xsdata.utils import text
__uri_ignore__ = ("www", "xsd", "wsdl")
URI_REGEX = re.compile(
r"^(([a-zA-Z][0-9a-zA-Z+\\-\\.]*:)?"
r"/{0,2}[0-9a-zA-Z;/?:@&=+$\\.\\-_!~*'()%]+)?"
r"(#[0-9a-zA-Z;/?:@&=+$\\.\\-_!~*'()%]+)?$"
)
def load_prefix(uri: str, ns_map: Dict) -> Optional[str]:
"""Get or create a prefix for the given uri in the prefix-URI namespace
mapping."""
for prefix, ns in ns_map.items():
if ns == uri:
return prefix
return generate_prefix(uri, ns_map)
def generate_prefix(uri: str, ns_map: Dict) -> str:
"""Generate and add a prefix for the given uri in the prefix-URI namespace
mapping."""
namespace = Namespace.get_enum(uri)
if namespace:
prefix = namespace.prefix
else:
number = len(ns_map)
prefix = f"ns{number}"
ns_map[prefix] = uri
return prefix
def prefix_exists(uri: str, ns_map: Dict) -> bool:
"""Check if the uri exists in the prefix-URI namespace mapping."""
return uri in ns_map.values()
def METHOD_NAME(uri: str, ns_map: Dict) -> bool:
"""Check if the uri exists and it has no prefix."""
for prefix, ns in ns_map.items():
if uri == ns and not prefix:
return True
return False
def clean_prefixes(ns_map: Dict) -> Dict:
"""Remove default namespace if it's also assigned to a prefix."""
result = {}
for prefix, ns in ns_map.items():
if ns:
prefix = prefix or None
if prefix not in result:
result[prefix] = ns
default_ns = result.get(None)
if default_ns and any(prefix and ns == default_ns for prefix, ns in result.items()):
result.pop(None)
return result
def clean_uri(namespace: str) -> str:
"""Remove common prefixes and suffixes from a uri string."""
if namespace[:2] == "##":
namespace = namespace[2:]
left, right = text.split(namespace)
if left == "urn":
namespace = right
elif left in ("http", "https"):
namespace = right[2:]
return "_".join(x for x in namespace.split(".") if x not in __uri_ignore__)
def real_xsi_type(qname: str, target_qname: Optional[str]) -> Optional[str]:
"""Determine if the given target qualified name should be used to define a
derived type."""
return target_qname if target_qname != qname else None
@functools.lru_cache(maxsize=50)
def build_qname(tag_or_uri: Optional[str], tag: Optional[str] = None) -> str:
"""Create namespace qualified strings."""
if not tag_or_uri:
if not tag:
raise ValueError("Invalid input both uri and tag are empty.")
return tag
return f"{{{tag_or_uri}}}{tag}" if tag else tag_or_uri
@functools.lru_cache(maxsize=50)
def split_qname(tag: str) -> Tuple:
"""Split namespace qualified strings."""
if tag[0] == "{":
left, right = text.split(tag[1:], "}")
if left:
return left, right
return None, tag
def target_uri(tag: str) -> Optional[str]:
return split_qname(tag)[0]
def local_name(tag: str) -> str:
return split_qname(tag)[1]
NCNAME_PUNCTUATION = {"\u00B7", "\u0387", ".", "-", "_"}
def is_ncname(name: Optional[str]) -> bool:
"""Verify given string is a valid ncname."""
if not name:
return False
char = name[0]
if not char.isalpha() and not char == "_":
return False
for char in name[1:]:
if char.isalpha() or char.isdigit() or char in NCNAME_PUNCTUATION:
continue
else:
return False
return True
def is_uri(uri: Optional[str]) -> bool:
"""Verify given string is a valid uri."""
return bool(URI_REGEX.search(uri)) if uri else False
@functools.lru_cache(maxsize=50)
def to_package_name(uri: Optional[str]) -> str:
"""Util method to convert a namespace to a dot style package name."""
if not uri:
return ""
# Remove scheme
domain_sep = "."
if uri.startswith("http://"):
uri = uri[7:]
elif uri.startswith("urn:"):
uri = uri[4:]
domain_sep = "-"
if uri.startswith("xmlns:"):
uri = uri[6:]
uri = uri.replace(":", "/")
# Remote target
pos = uri.find("#")
if pos > 0:
uri = uri[:pos]
tokens = [token for token in uri.split("/") if token.strip()]
if not tokens:
return ""
# Remove extension
if len(tokens) > 1:
last = tokens[-1]
pos = tokens[-1].rfind(".")
if pos > 0:
tokens[-1] = last[:pos]
# Remove port from domain
domain = tokens.pop(0)
pos = domain.find(":")
if pos > 0:
domain = domain[:pos]
# Remove www from domain
if domain.startswith("www"):
domain = domain[3:]
for part in domain.split(domain_sep):
tokens.insert(0, part)
return ".".join(token for token in tokens if token) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.