id int64 0 300k | label stringlengths 1 74 ⌀ | text stringlengths 4k 8k |
|---|---|---|
2,000 | find | # xml.etree test for cElementTree
import doctest
import sys
from test import test_support
from xml.etree import cElementTree as ET
SAMPLE_XML = """
<body>
<tag>text</tag>
<tag />
<section>
<tag>subtext</tag>
</section>
</body>
"""
SAMPLE_XML_NS = """
<body xmlns="http://effbot.org/ns">
<tag>text</tag>
<tag />
<section>
<tag>subtext</tag>
</section>
</body>
"""
def sanity():
"""
Import sanity.
>>> from xml.etree import cElementTree
"""
def check_method(method):
if not hasattr(method, '__call__'):
print method, "not callable"
def serialize(ET, elem, encoding=None):
import StringIO
file = StringIO.StringIO()
tree = ET.ElementTree(elem)
if encoding:
tree.write(file, encoding)
else:
tree.write(file)
return file.getvalue()
def summarize(elem):
return elem.tag
def summarize_list(seq):
return map(summarize, seq)
def interface():
"""
Test element tree interface.
>>> element = ET.Element("tag", key="value")
>>> tree = ET.ElementTree(element)
Make sure all standard element methods exist.
>>> check_method(element.append)
>>> check_method(element.insert)
>>> check_method(element.remove)
>>> check_method(element.getchildren)
>>> check_method(element.find)
>>> check_method(element.findall)
>>> check_method(element.findtext)
>>> check_method(element.clear)
>>> check_method(element.get)
>>> check_method(element.set)
>>> check_method(element.keys)
>>> check_method(element.items)
>>> check_method(element.getiterator)
Basic method sanity checks.
>>> serialize(ET, element) # 1
'<tag key="value" />'
>>> subelement = ET.Element("subtag")
>>> element.append(subelement)
>>> serialize(ET, element) # 2
'<tag key="value"><subtag /></tag>'
>>> element.insert(0, subelement)
>>> serialize(ET, element) # 3
'<tag key="value"><subtag /><subtag /></tag>'
>>> element.remove(subelement)
>>> serialize(ET, element) # 4
'<tag key="value"><subtag /></tag>'
>>> element.remove(subelement)
>>> serialize(ET, element) # 5
'<tag key="value" />'
>>> element.remove(subelement)
Traceback (most recent call last):
ValueError: list.remove(x): x not in list
>>> serialize(ET, element) # 6
'<tag key="value" />'
"""
def METHOD_NAME():
"""
Test find methods (including xpath syntax).
>>> elem = ET.XML(SAMPLE_XML)
>>> elem.find("tag").tag
'tag'
>>> ET.ElementTree(elem).find("tag").tag
'tag'
>>> elem.find("section/tag").tag
'tag'
>>> ET.ElementTree(elem).find("section/tag").tag
'tag'
>>> elem.findtext("tag")
'text'
>>> elem.findtext("tog")
>>> elem.findtext("tog", "default")
'default'
>>> ET.ElementTree(elem).findtext("tag")
'text'
>>> elem.findtext("section/tag")
'subtext'
>>> ET.ElementTree(elem).findtext("section/tag")
'subtext'
>>> summarize_list(elem.findall("tag"))
['tag', 'tag']
>>> summarize_list(elem.findall("*"))
['tag', 'tag', 'section']
>>> summarize_list(elem.findall(".//tag"))
['tag', 'tag', 'tag']
>>> summarize_list(elem.findall("section/tag"))
['tag']
>>> summarize_list(elem.findall("section//tag"))
['tag']
>>> summarize_list(elem.findall("section/*"))
['tag']
>>> summarize_list(elem.findall("section//*"))
['tag']
>>> summarize_list(elem.findall("section/.//*"))
['tag']
>>> summarize_list(elem.findall("*/*"))
['tag']
>>> summarize_list(elem.findall("*//*"))
['tag']
>>> summarize_list(elem.findall("*/tag"))
['tag']
>>> summarize_list(elem.findall("*/./tag"))
['tag']
>>> summarize_list(elem.findall("./tag"))
['tag', 'tag']
>>> summarize_list(elem.findall(".//tag"))
['tag', 'tag', 'tag']
>>> summarize_list(elem.findall("././tag"))
['tag', 'tag']
>>> summarize_list(ET.ElementTree(elem).findall("/tag"))
['tag', 'tag']
>>> summarize_list(ET.ElementTree(elem).findall("./tag"))
['tag', 'tag']
>>> elem = ET.XML(SAMPLE_XML_NS)
>>> summarize_list(elem.findall("tag"))
[]
>>> summarize_list(elem.findall("{http://effbot.org/ns}tag"))
['{http://effbot.org/ns}tag', '{http://effbot.org/ns}tag']
>>> summarize_list(elem.findall(".//{http://effbot.org/ns}tag"))
['{http://effbot.org/ns}tag', '{http://effbot.org/ns}tag', '{http://effbot.org/ns}tag']
"""
def parseliteral():
r"""
>>> element = ET.XML("<html><body>text</body></html>")
>>> ET.ElementTree(element).write(sys.stdout)
<html><body>text</body></html>
>>> element = ET.fromstring("<html><body>text</body></html>")
>>> ET.ElementTree(element).write(sys.stdout)
<html><body>text</body></html>
>>> print ET.tostring(element)
<html><body>text</body></html>
>>> print ET.tostring(element, "ascii")
<?xml version='1.0' encoding='ascii'?>
<html><body>text</body></html>
>>> _, ids = ET.XMLID("<html><body>text</body></html>")
>>> len(ids)
0
>>> _, ids = ET.XMLID("<html><body id='body'>text</body></html>")
>>> len(ids)
1
>>> ids["body"].tag
'body'
"""
def check_encoding(encoding):
"""
>>> check_encoding("ascii")
>>> check_encoding("us-ascii")
>>> check_encoding("iso-8859-1")
>>> check_encoding("iso-8859-15")
>>> check_encoding("cp437")
>>> #check_encoding("mac-roman")
"""
ET.XML(
"<?xml version='1.0' encoding='%s'?><xml />" % encoding
)
def bug_1534630():
"""
>>> bob = ET.TreeBuilder()
>>> e = bob.data("data")
>>> e = bob.start("tag", {})
>>> e = bob.end("tag")
>>> e = bob.close()
>>> serialize(ET, e)
'<tag />'
"""
def test_main():
from test import test_xml_etree_c
test_support.run_doctest(test_xml_etree_c, verbosity=True)
if __name__ == '__main__':
test_main() |
2,001 | visualise result | # SPDX-FileCopyrightText: 2021 Division of Intelligent Medical Systems, DKFZ
# SPDX-FileCopyrightText: 2021 Janek Groehl
# SPDX-License-Identifier: MIT
from simpa import Tags
import simpa as sp
import numpy as np
from skimage.data import shepp_logan_phantom
from scipy.ndimage import zoom
from simpa_tests.manual_tests import ManualIntegrationTestClass
# FIXME temporary workaround for newest Intel architectures
import os
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
class SegmentationLoaderTest(ManualIntegrationTestClass):
def setup(self):
self.path_manager = sp.PathManager()
target_spacing = 1.0
label_mask = shepp_logan_phantom()
label_mask = np.digitize(label_mask, bins=np.linspace(0.0, 1.0, 11), right=True)
label_mask = np.reshape(label_mask, (400, 1, 400))
input_spacing = 0.2
segmentation_volume_tiled = np.tile(label_mask, (1, 128, 1))
segmentation_volume_mask = np.round(zoom(segmentation_volume_tiled, input_spacing/target_spacing,
order=0)).astype(int)
def segmentation_class_mapping():
ret_dict = dict()
ret_dict[0] = sp.TISSUE_LIBRARY.heavy_water()
ret_dict[1] = sp.TISSUE_LIBRARY.blood()
ret_dict[2] = sp.TISSUE_LIBRARY.epidermis()
ret_dict[3] = sp.TISSUE_LIBRARY.muscle()
ret_dict[4] = sp.TISSUE_LIBRARY.mediprene()
ret_dict[5] = sp.TISSUE_LIBRARY.ultrasound_gel()
ret_dict[6] = sp.TISSUE_LIBRARY.heavy_water()
ret_dict[7] = (sp.MolecularCompositionGenerator()
.append(sp.MOLECULE_LIBRARY.oxyhemoglobin(0.01))
.append(sp.MOLECULE_LIBRARY.deoxyhemoglobin(0.01))
.append(sp.MOLECULE_LIBRARY.water(0.98))
.get_molecular_composition(sp.SegmentationClasses.COUPLING_ARTIFACT))
ret_dict[8] = sp.TISSUE_LIBRARY.heavy_water()
ret_dict[9] = sp.TISSUE_LIBRARY.heavy_water()
ret_dict[10] = sp.TISSUE_LIBRARY.heavy_water()
ret_dict[11] = sp.TISSUE_LIBRARY.heavy_water()
return ret_dict
self.settings = sp.Settings()
self.settings[Tags.SIMULATION_PATH] = self.path_manager.get_hdf5_file_save_path()
self.settings[Tags.VOLUME_NAME] = "SegmentationTest"
self.settings[Tags.RANDOM_SEED] = 1234
self.settings[Tags.WAVELENGTHS] = [700]
self.settings[Tags.SPACING_MM] = target_spacing
self.settings[Tags.DIM_VOLUME_X_MM] = 400 / (target_spacing / input_spacing)
self.settings[Tags.DIM_VOLUME_Y_MM] = 128 / (target_spacing / input_spacing)
self.settings[Tags.DIM_VOLUME_Z_MM] = 400 / (target_spacing / input_spacing)
# self.settings[Tags.IGNORE_QA_ASSERTIONS] = True
self.settings.set_volume_creation_settings({
Tags.INPUT_SEGMENTATION_VOLUME: segmentation_volume_mask,
Tags.SEGMENTATION_CLASS_MAPPING: segmentation_class_mapping(),
})
self.settings.set_optical_settings({
Tags.OPTICAL_MODEL_NUMBER_PHOTONS: 1e7,
Tags.OPTICAL_MODEL_BINARY_PATH: self.path_manager.get_mcx_binary_path(),
Tags.ILLUMINATION_TYPE: Tags.ILLUMINATION_TYPE_MSOT_ACUITY_ECHO,
Tags.LASER_PULSE_ENERGY_IN_MILLIJOULE: 50,
})
self.pipeline = [
sp.SegmentationBasedVolumeCreationAdapter(self.settings),
sp.MCXAdapter(self.settings)
]
def perform_test(self):
sp.simulate(self.pipeline, self.settings, sp.RSOMExplorerP50(element_spacing_mm=2.0,
number_elements_y=10,
number_elements_x=20,
device_position_mm=np.asarray([20, 10, 0])))
def tear_down(self):
os.remove(self.settings[Tags.SIMPA_OUTPUT_PATH])
def METHOD_NAME(self, show_figure_on_screen=True, save_path=None):
if show_figure_on_screen:
save_path = None
else:
save_path = save_path + "SegmentationLoaderExample.png"
sp.visualise_data(path_to_hdf5_file=self.path_manager.get_hdf5_file_save_path() + "/" + "SegmentationTest" + ".hdf5",
wavelength=700,
show_initial_pressure=True,
show_segmentation_map=True,
show_absorption=True,
show_fluence=True,
show_tissue_density=True,
show_speed_of_sound=True,
show_anisotropy=True,
show_scattering=True,
save_path=save_path,
log_scale=False)
if __name__ == "__main__":
test = SegmentationLoaderTest()
test.run_test(show_figure_on_screen=False) |
2,002 | set up | # Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import pytest
from absl.testing import parameterized
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.models.backbones.resnet_v2.resnet_v2_aliases import (
ResNet50V2Backbone,
)
from keras_cv.models.backbones.resnet_v2.resnet_v2_backbone import (
ResNetV2Backbone,
)
from keras_cv.tests.test_case import TestCase
from keras_cv.utils.train import get_feature_extractor
class ResNetV2BackboneTest(TestCase):
def METHOD_NAME(self):
self.input_batch = np.ones(shape=(8, 224, 224, 3))
def test_valid_call(self):
model = ResNetV2Backbone(
stackwise_filters=[64, 128, 256, 512],
stackwise_blocks=[2, 2, 2, 2],
stackwise_strides=[1, 2, 2, 2],
include_rescaling=False,
)
model(self.input_batch)
def test_valid_call_applications_model(self):
model = ResNet50V2Backbone()
model(self.input_batch)
def test_valid_call_with_rescaling(self):
model = ResNetV2Backbone(
stackwise_filters=[64, 128, 256, 512],
stackwise_blocks=[2, 2, 2, 2],
stackwise_strides=[1, 2, 2, 2],
include_rescaling=True,
)
model(self.input_batch)
@pytest.mark.large # Saving is slow, so mark these large.
def test_saved_model(self):
model = ResNetV2Backbone(
stackwise_filters=[64, 128, 256, 512],
stackwise_blocks=[2, 2, 2, 2],
stackwise_strides=[1, 2, 2, 2],
include_rescaling=False,
)
model_output = model(self.input_batch)
save_path = os.path.join(
self.get_temp_dir(), "resnet_v2_backbone.keras"
)
model.save(save_path)
restored_model = keras.models.load_model(save_path)
# Check we got the real object back.
self.assertIsInstance(restored_model, ResNetV2Backbone)
# Check that output matches.
restored_output = restored_model(self.input_batch)
self.assertAllClose(
ops.convert_to_numpy(model_output),
ops.convert_to_numpy(restored_output),
)
@pytest.mark.large # Saving is slow, so mark these large.
def test_saved_alias_model(self):
model = ResNet50V2Backbone()
model_output = model(self.input_batch)
save_path = os.path.join(
self.get_temp_dir(), "resnet_v2_backbone.keras"
)
model.save(save_path)
restored_model = keras.models.load_model(save_path)
# Check we got the real object back.
# Note that these aliases serialized as the base class
self.assertIsInstance(restored_model, ResNetV2Backbone)
# Check that output matches.
restored_output = restored_model(self.input_batch)
self.assertAllClose(
ops.convert_to_numpy(model_output),
ops.convert_to_numpy(restored_output),
)
def test_feature_pyramid_inputs(self):
model = ResNet50V2Backbone()
backbone_model = get_feature_extractor(
model,
model.pyramid_level_inputs.values(),
model.pyramid_level_inputs.keys(),
)
input_size = 256
inputs = keras.Input(shape=[input_size, input_size, 3])
outputs = backbone_model(inputs)
levels = ["P2", "P3", "P4", "P5"]
self.assertEquals(list(outputs.keys()), levels)
self.assertEquals(
outputs["P2"].shape,
(None, input_size // 2**2, input_size // 2**2, 256),
)
self.assertEquals(
outputs["P3"].shape,
(None, input_size // 2**3, input_size // 2**3, 512),
)
self.assertEquals(
outputs["P4"].shape,
(None, input_size // 2**4, input_size // 2**4, 1024),
)
self.assertEquals(
outputs["P5"].shape,
(None, input_size // 2**5, input_size // 2**5, 2048),
)
@parameterized.named_parameters(
("one_channel", 1),
("four_channels", 4),
)
def test_application_variable_input_channels(self, num_channels):
# ResNet50 model
model = ResNetV2Backbone(
stackwise_filters=[64, 128, 256, 512],
stackwise_blocks=[3, 4, 6, 3],
stackwise_strides=[1, 2, 2, 2],
input_shape=(None, None, num_channels),
include_rescaling=False,
)
self.assertEqual(model.output_shape, (None, None, None, 2048)) |
2,003 | async neo4j driver | # Copyright (c) "Neo4j"
# Neo4j Sweden AB [https://neo4j.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import sys
from functools import wraps
import pytest
import pytest_asyncio
from neo4j import (
AsyncGraphDatabase,
GraphDatabase,
)
from neo4j.debug import watch
from . import env
# from neo4j.debug import watch
#
# watch("neo4j")
@pytest.fixture(scope="session")
def uri():
return env.NEO4J_SERVER_URI
@pytest.fixture(scope="session")
def bolt_uri(uri):
if env.NEO4J_SCHEME != "bolt":
pytest.skip("Test requires bolt scheme")
return uri
@pytest.fixture(scope="session")
def _forced_bolt_uri():
return f"bolt://{env.NEO4J_HOST}:{env.NEO4J_PORT}"
@pytest.fixture(scope="session")
def neo4j_uri():
if env.NEO4J_SCHEME != "neo4j":
pytest.skip("Test requires neo4j scheme")
return uri
@pytest.fixture(scope="session")
def _forced_neo4j_uri():
return f"neo4j://{env.NEO4J_HOST}:{env.NEO4J_PORT}"
@pytest.fixture(scope="session")
def auth():
return env.NEO4J_USER, env.NEO4J_PASS
@pytest.fixture
def driver(uri, auth):
with GraphDatabase.driver(uri, auth=auth) as driver:
yield driver
@pytest.fixture
def bolt_driver(bolt_uri, auth):
with GraphDatabase.driver(bolt_uri, auth=auth) as driver:
yield driver
@pytest.fixture
def neo4j_driver(neo4j_uri, auth):
with GraphDatabase.driver(neo4j_uri, auth=auth) as driver:
yield driver
@wraps(AsyncGraphDatabase.driver)
def get_async_driver(*args, **kwargs):
return AsyncGraphDatabase.driver(*args, **kwargs)
@pytest_asyncio.fixture
async def async_driver(uri, auth):
async with get_async_driver(uri, auth=auth) as driver:
yield driver
@pytest_asyncio.fixture
async def async_bolt_driver(bolt_uri, auth):
async with get_async_driver(bolt_uri, auth=auth) as driver:
yield driver
@pytest_asyncio.fixture
async def METHOD_NAME(neo4j_uri, auth):
async with get_async_driver(neo4j_uri, auth=auth) as driver:
yield driver
@pytest.fixture
def _forced_bolt_driver(_forced_bolt_uri):
with GraphDatabase.driver(_forced_bolt_uri, auth=auth) as driver:
yield driver
@pytest.fixture
def _forced_neo4j_driver(_forced_neo4j_uri):
with GraphDatabase.driver(_forced_neo4j_uri, auth=auth) as driver:
yield driver
@pytest.fixture(scope="session")
def server_info(_forced_bolt_driver):
return _forced_bolt_driver.get_server_info()
@pytest.fixture(scope="session")
def bolt_protocol_version(server_info):
return server_info.protocol_version
def mark_requires_min_bolt_version(version="3.5"):
return pytest.mark.skipif(
env.NEO4J_VERSION < version,
reason=f"requires server version '{version}' or higher, "
f"found '{env.NEO4J_VERSION}'"
)
def mark_requires_edition(edition):
return pytest.mark.skipif(
env.NEO4J_EDITION != edition,
reason=f"requires server edition '{edition}', "
f"found '{env.NEO4J_EDITION}'"
)
@pytest.fixture
def session(driver):
with driver.session() as session:
yield session
@pytest.fixture
def bolt_session(bolt_driver):
with bolt_driver.session() as session:
yield session
@pytest.fixture
def neo4j_session(neo4j_driver):
with neo4j_driver.session() as session:
yield session
# async support for pytest-benchmark
# https://github.com/ionelmc/pytest-benchmark/issues/66
@pytest_asyncio.fixture
async def aio_benchmark(benchmark, event_loop):
def _wrapper(func, *args, **kwargs):
if asyncio.iscoroutinefunction(func):
@benchmark
def _():
return event_loop.run_until_complete(func(*args, **kwargs))
else:
benchmark(func, *args, **kwargs)
return _wrapper
@pytest.fixture
def watcher():
with watch("neo4j", out=sys.stdout, colour=True):
yield |
2,004 | recharge connection config | import uuid
from typing import Any, Dict, Generator
import pydash
import pytest
import requests
from faker import Faker
from requests import Response
from sqlalchemy.orm import Session
from fides.api.db import session
from fides.api.models.connectionconfig import (
AccessLevel,
ConnectionConfig,
ConnectionType,
)
from fides.api.models.datasetconfig import DatasetConfig
from fides.api.models.sql_models import Dataset as CtlDataset
from fides.api.util.saas_util import (
load_config_with_replacement,
load_dataset_with_replacement,
)
from tests.ops.test_helpers.saas_test_utils import poll_for_existence
from tests.ops.test_helpers.vault_client import get_secrets
secrets = get_secrets("recharge")
@pytest.fixture(scope="function")
def recharge_secrets(saas_config):
return {
"domain": pydash.get(saas_config, "recharge.domain") or secrets["domain"],
"api_key": pydash.get(saas_config, "recharge.api_key") or secrets["api_key"],
}
@pytest.fixture(scope="function")
def recharge_identity_email(saas_config):
return (
pydash.get(saas_config, "recharge.identity_email") or secrets["identity_email"]
)
@pytest.fixture(scope="function")
def recharge_erasure_identity_email():
return f"{uuid.uuid4().hex}@email.com"
@pytest.fixture
def recharge_config() -> Dict[str, Any]:
return load_config_with_replacement(
"data/saas/config/recharge_config.yml",
"<instance_fides_key>",
"recharge_instance",
)
@pytest.fixture
def recharge_dataset() -> Dict[str, Any]:
return load_dataset_with_replacement(
"data/saas/dataset/recharge_dataset.yml",
"<instance_fides_key>",
"recharge_instance",
)[0]
@pytest.fixture(scope="function")
def METHOD_NAME(
db: session, recharge_config, recharge_secrets
) -> Generator:
fides_key = recharge_config["fides_key"]
connection_config = ConnectionConfig.create(
db=db,
data={
"key": fides_key,
"name": fides_key,
"connection_type": ConnectionType.saas,
"access": AccessLevel.write,
"secrets": recharge_secrets,
"saas_config": recharge_config,
},
)
yield connection_config
connection_config.delete(db)
@pytest.fixture
def recharge_dataset_config(
db: Session,
METHOD_NAME: ConnectionConfig,
recharge_dataset: Dict[str, Any],
) -> Generator:
fides_key = recharge_dataset["fides_key"]
METHOD_NAME.name = fides_key
METHOD_NAME.key = fides_key
METHOD_NAME.save(db=db)
ctl_dataset = CtlDataset.create_from_dataset_dict(db, recharge_dataset)
dataset = DatasetConfig.create(
db=db,
data={
"connection_config_id": METHOD_NAME.id,
"fides_key": fides_key,
"ctl_dataset_id": ctl_dataset.id,
},
)
yield dataset
dataset.delete(db=db)
ctl_dataset.delete(db)
class RechargeTestClient:
"""Helper to call various Recharge data management requests"""
def __init__(self, METHOD_NAME: ConnectionConfig):
self.recharge_secrets = METHOD_NAME.secrets
self.headers = {
"X-Recharge-Access-Token": self.recharge_secrets["api_key"],
"Content-Type": "application/json",
}
self.base_url = f"https://{self.recharge_secrets['domain']}"
self.faker = Faker()
self.first_name = self.faker.first_name()
self.last_name = self.faker.last_name()
self.street_address = self.faker.street_address()
# 1: Creates, checks for existance and deletes customer
def create_customer(self, email) -> Response:
customer_body = {
"first_name": self.first_name,
"last_name": self.last_name,
"email": email,
"billing_address1": self.street_address,
"billing_city": "New York City",
"billing_province": "New York",
"billing_country": "United States",
"billing_first_name": self.first_name,
"billing_last_name": self.last_name,
"billing_zip": "10001",
}
customer_response: Response = requests.post(
url=f"{self.base_url}/customers",
json=customer_body,
headers=self.headers,
)
assert customer_response.ok
return customer_response
def get_customer(self, email):
customer_response: Response = requests.get(
url=f"{self.base_url}/customers",
params={"email": email},
headers=self.headers,
)
assert customer_response.ok
return customer_response.json()
def delete_customer(self, customer_id):
customer_response: Response = requests.delete(
url=f"{self.base_url}/customers/{customer_id}", headers=self.headers
)
assert customer_response.ok
# 2: Creates, checks for existance and deletes address
def create_address(self, customer_id) -> Response:
address_body = {
"customer_id": customer_id,
"address1": self.street_address,
"address2": self.street_address,
"city": "Los Angeles",
"company": "Recharge",
"country_code": "US",
"country": "United States",
"first_name": self.first_name,
"last_name": self.last_name,
"order_attributes": [{"name": "custom name", "value": "custom value"}],
"phone": "5551234567",
"province": "California",
"zip": "90001",
}
address_response = requests.post(
url=f"{self.base_url}/addresses",
headers=self.headers,
json=address_body,
)
assert address_response.ok
return address_response
def get_addresses(self, customer_id):
address_response: Response = requests.get(
url=f"{self.base_url}/addresses",
params={"customer_id": customer_id},
headers=self.headers,
)
assert address_response.ok
return address_response.json()
def delete_address(self, address_id):
address_response: Response = requests.delete(
url=f"{self.base_url}/addresses/{address_id}", headers=self.headers
)
assert address_response.ok
@pytest.fixture(scope="function")
def recharge_test_client(METHOD_NAME: RechargeTestClient) -> Generator:
test_client = RechargeTestClient(
METHOD_NAME=METHOD_NAME
)
yield test_client
@pytest.fixture(scope="function")
def recharge_erasure_data(
recharge_test_client: RechargeTestClient, recharge_erasure_identity_email: str
) -> Generator:
customer_response = recharge_test_client.create_customer(
recharge_erasure_identity_email
)
error_message = f"customer with email {recharge_erasure_identity_email} could not be created in Recharge"
poll_for_existence(
recharge_test_client.get_customer,
(recharge_erasure_identity_email,),
error_message=error_message,
)
customer_id = customer_response.json()["customer"]["id"]
address_response = recharge_test_client.create_address(customer_id)
error_message = f"address for customer '{recharge_erasure_identity_email}' could not be created in Recharge"
poll_for_existence(
recharge_test_client.get_addresses,
args=(customer_id,),
error_message=error_message,
)
address_id = address_response.json()["address"]["id"]
yield customer_response, address_response
recharge_test_client.delete_address(address_id)
recharge_test_client.delete_customer(customer_id) |
2,005 | post load parent | """
Faraday Penetration Test IDE
Copyright (C) 2016 Infobyte LLC (https://faradaysec.com/)
See the file 'doc/LICENSE' for the license information
"""
# Related third party imports
from flask import Blueprint, abort, make_response, jsonify
from filteralchemy import FilterSet, operators # pylint:disable=unused-import
from marshmallow import fields, post_load, ValidationError
from marshmallow.validate import OneOf, Range
from sqlalchemy.orm.exc import NoResultFound
# Local application imports
from faraday.server.models import (
Host,
Service,
Workspace,
db
)
from faraday.server.api.base import (
AutoSchema,
ReadWriteWorkspacedView,
FilterSetMeta,
FilterAlchemyMixin,
BulkDeleteWorkspacedMixin,
BulkUpdateWorkspacedMixin
)
from faraday.server.schemas import (
MetadataSchema,
MutableField,
PrimaryKeyRelatedField,
SelfNestedField,
)
from faraday.server.utils.command import set_command_id
services_api = Blueprint('services_api', __name__)
class ServiceSchema(AutoSchema):
_id = fields.Integer(attribute='id', dump_only=True)
_rev = fields.String(default='', dump_only=True)
owned = fields.Boolean(default=False)
owner = PrimaryKeyRelatedField('username', dump_only=True,
attribute='creator')
# Port is loaded via ports
port = fields.Integer(dump_only=True, required=True,
validate=[Range(min=0, error="The value must be greater than or equal to 0")])
ports = MutableField(fields.Integer(required=True,
validate=[Range(min=0, error="The value must be greater than or equal to 0")]),
fields.Method(deserialize='load_ports'),
required=True,
attribute='port')
status = fields.String(missing='open', validate=OneOf(Service.STATUSES),
allow_none=False)
parent = fields.Integer(attribute='host_id') # parent is not required for updates
host_id = fields.Integer(attribute='host_id', dump_only=True)
vulns = fields.Integer(attribute='vulnerability_count', dump_only=True)
credentials = fields.Integer(attribute='credentials_count', dump_only=True)
metadata = SelfNestedField(MetadataSchema())
type = fields.Function(lambda obj: 'Service', dump_only=True)
summary = fields.String(dump_only=True)
command_id = fields.Int(required=False, load_only=True)
@staticmethod
def load_ports(value):
if not isinstance(value, list):
raise ValidationError('ports must be a list')
if len(value) != 1:
raise ValidationError('ports must be a list with exactly one'
'element')
port = value.pop()
if isinstance(port, str):
try:
port = int(port)
except ValueError as e:
raise ValidationError('The value must be a number') from e
if port > 65535 or port < 1:
raise ValidationError('The value must be in the range [1-65535]')
return str(port)
@post_load
def METHOD_NAME(self, data, **kwargs):
"""Gets the host_id from parent attribute. Pops it and tries to
get a Host with that id in the corresponding workspace.
"""
host_id = data.pop('host_id', None)
if self.context['updating']:
if host_id is None:
# Partial update?
return data
if 'object' in self.context:
if host_id != self.context['object'].parent.id:
raise ValidationError('Can\'t change service parent.')
else:
if any(host_id != obj.parent.id for obj in self.context['objects']):
raise ValidationError('Can\'t change service parent.')
else:
if not host_id:
raise ValidationError('Parent id is required when creating a service.')
try:
data['host'] = Host.query.join(Workspace).filter(
Workspace.name == self.context['workspace_name'],
Host.id == host_id
).one()
except NoResultFound as e:
raise ValidationError(f'Host with id {host_id} not found') from e
return data
class Meta:
model = Service
fields = ('id', '_id', 'status', 'parent', 'type',
'protocol', 'description', '_rev',
'owned', 'owner', 'credentials', 'vulns',
'name', 'version', '_id', 'port', 'ports',
'metadata', 'summary', 'host_id', 'command_id')
class ServiceFilterSet(FilterSet):
class Meta(FilterSetMeta):
model = Service
fields = ('id', 'host_id', 'protocol', 'name', 'port')
default_operator = operators.Equal
operators = (operators.Equal,)
class ServiceView(FilterAlchemyMixin, ReadWriteWorkspacedView, BulkDeleteWorkspacedMixin, BulkUpdateWorkspacedMixin):
route_base = 'services'
model_class = Service
schema_class = ServiceSchema
count_extra_filters = [Service.status == 'open']
get_undefer = [Service.credentials_count, Service.vulnerability_count]
get_joinedloads = [Service.credentials, Service.update_user]
filterset_class = ServiceFilterSet
def _envelope_list(self, objects, pagination_metadata=None):
services = []
for service in objects:
services.append({
'id': service['_id'],
'key': service['_id'],
'value': service
})
return {
'services': services,
}
def _perform_create(self, data, **kwargs):
command_id = data.pop('command_id', None)
port_number = data.get("port", "1")
if not port_number.isdigit():
abort(make_response(jsonify(message="Invalid Port number"), 400))
obj = super()._perform_create(data, **kwargs)
if command_id:
set_command_id(db.session, obj, True, command_id)
return obj
ServiceView.register(services_api) |
2,006 | create mock svc record | from unittest import TestCase
from tapiriik.services import Service, ServiceRecord, ServiceBase
from tapiriik.services.interchange import Activity, ActivityType, ActivityStatistic, ActivityStatisticUnit, Waypoint, WaypointType, Lap, Location
from datetime import datetime, timedelta
import random
import pytz
from tapiriik.database import db
class MockServiceA(ServiceBase):
ID = "mockA"
SupportedActivities = [ActivityType.Rowing]
class MockServiceB(ServiceBase):
ID = "mockB"
SupportedActivities = [ActivityType.Rowing, ActivityType.Wheelchair]
class TapiriikTestCase(TestCase):
def assertActivitiesEqual(self, a, b):
''' compare activity records with more granular asserts '''
if a == b:
return
else:
self.assertEqual(a.StartTime, b.StartTime)
self.assertEqual(a.EndTime, b.EndTime)
self.assertEqual(a.Type, b.Type)
self.assertEqual(a.Stats.Distance, b.Stats.Distance)
self.assertEqual(a.Name, b.Name)
self.assertLapsListsEqual(a.Laps, b.Laps)
def assertLapsListsEqual(self, lapsa, lapsb):
self.assertEqual(len(lapsa), len(lapsb))
for idx in range(len(lapsa)):
la = lapsa[idx]
lb = lapsb[idx]
self.assertLapsEqual(la, lb)
def assertLapsEqual(self, la, lb):
self.assertEqual(la.StartTime, lb.StartTime)
self.assertEqual(la.EndTime, lb.EndTime)
self.assertEqual(len(la.Waypoints), len(lb.Waypoints))
for idx in range(len(la.Waypoints)):
wpa = la.Waypoints[idx]
wpb = lb.Waypoints[idx]
self.assertEqual(wpa.Timestamp.astimezone(pytz.utc), wpb.Timestamp.astimezone(pytz.utc))
self.assertEqual(wpa.Location.Latitude, wpb.Location.Latitude)
self.assertEqual(wpa.Location.Longitude, wpb.Location.Longitude)
self.assertEqual(wpa.Location.Altitude, wpb.Location.Altitude)
self.assertEqual(wpa.Type, wpb.Type)
self.assertEqual(wpa.HR, wpb.HR)
self.assertEqual(wpa.Calories, wpb.Calories)
self.assertEqual(wpa.Power, wpb.Power)
self.assertEqual(wpa.Cadence, wpb.Cadence)
self.assertEqual(wpa.Temp, wpb.Temp)
self.assertEqual(wpa.Location, wpb.Location)
self.assertEqual(wpa, wpb)
class TestTools:
def create_mock_user():
db.test.insert({"asd": "asdd"})
return {"_id": str(random.randint(1, 1000))}
def METHOD_NAME(svc):
return ServiceRecord({"Service": svc.ID, "_id": str(random.randint(1, 1000)), "ExternalID": str(random.randint(1, 1000))})
def create_mock_servicedata(svc, record=None):
return {"ActivityID": random.randint(1, 1000), "Connection": record}
def create_mock_servicedatacollection(svc, record=None):
record = record if record else TestTools.METHOD_NAME(svc)
return {record._id: TestTools.create_mock_servicedata(svc, record=record)}
def create_blank_activity(svc=None, actType=ActivityType.Other, record=None):
act = Activity()
act.Type = actType
if svc:
record = record if record else TestTools.METHOD_NAME(svc)
act.ServiceDataCollection = TestTools.create_mock_servicedatacollection(svc, record=record)
act.StartTime = datetime.now()
act.EndTime = act.StartTime + timedelta(seconds=42)
act.CalculateUID()
return act
def create_random_activity(svc=None, actType=ActivityType.Other, tz=False, record=None, withPauses=True, withLaps=True):
''' creates completely random activity with valid waypoints and data '''
act = TestTools.create_blank_activity(svc, actType, record=record)
if tz is True:
tz = pytz.timezone("America/Atikokan")
act.TZ = tz
elif tz is not False:
act.TZ = tz
if act.CountTotalWaypoints() > 0:
raise ValueError("Waypoint list already populated")
# this is entirely random in case the testing account already has events in it (API doesn't support delete, etc)
act.StartTime = datetime(2011, 12, 13, 14, 15, 16)
if tz is not False:
if hasattr(tz, "localize"):
act.StartTime = tz.localize(act.StartTime)
else:
act.StartTime = act.StartTime.replace(tzinfo=tz)
act.EndTime = act.StartTime + timedelta(0, random.randint(60 * 5, 60 * 60)) # don't really need to upload 1000s of pts to test this...
act.Stats.Distance = ActivityStatistic(ActivityStatisticUnit.Meters, value=random.random() * 10000)
act.Name = str(random.random())
paused = False
waypointTime = act.StartTime
backToBackPauses = False
act.Laps = []
lap = Lap(startTime=act.StartTime)
while waypointTime < act.EndTime:
wp = Waypoint()
if waypointTime == act.StartTime:
wp.Type = WaypointType.Start
wp.Timestamp = waypointTime
wp.Location = Location(random.random() * 180 - 90, random.random() * 180 - 90, random.random() * 1000) # this is gonna be one intense activity
if not (wp.HR == wp.Cadence == wp.Calories == wp.Power == wp.Temp == None):
raise ValueError("Waypoint did not initialize cleanly")
if svc.SupportsHR:
wp.HR = float(random.randint(90, 180))
if svc.SupportsPower:
wp.Power = float(random.randint(0, 1000))
if svc.SupportsCalories:
wp.Calories = float(random.randint(0, 500))
if svc.SupportsCadence:
wp.Cadence = float(random.randint(0, 100))
if svc.SupportsTemp:
wp.Temp = float(random.randint(0, 100))
if withPauses and (random.randint(40, 50) == 42 or backToBackPauses) and not paused: # pause quite often
wp.Type = WaypointType.Pause
paused = True
elif paused:
paused = False
wp.Type = WaypointType.Resume
backToBackPauses = not backToBackPauses
waypointTime += timedelta(0, int(random.random() + 9.5)) # 10ish seconds
lap.Waypoints.append(wp)
if waypointTime > act.EndTime:
wp.Timestamp = act.EndTime
wp.Type = WaypointType.End
elif withLaps and wp.Timestamp < act.EndTime and random.randint(40, 60) == 42:
# occasionally start new laps
lap.EndTime = wp.Timestamp
act.Laps.append(lap)
lap = Lap(startTime=waypointTime)
# Final lap
lap.EndTime = act.EndTime
act.Laps.append(lap)
if act.CountTotalWaypoints() == 0:
raise ValueError("No waypoints populated")
act.CalculateUID()
act.EnsureTZ()
return act
def create_mock_service(id):
mock = MockServiceA()
mock.ID = id
Service._serviceMappings[id] = mock
return mock
def create_mock_services():
mockA = MockServiceA()
mockB = MockServiceB()
Service._serviceMappings["mockA"] = mockA
Service._serviceMappings["mockB"] = mockB
return (mockA, mockB) |
2,007 | get view frame from calib frame | import numpy as np
import common.transformations.orientation as orient
## -- hardcoded hardware params --
eon_f_focal_length = 910.0
eon_d_focal_length = 650.0
tici_f_focal_length = 2648.0
tici_e_focal_length = tici_d_focal_length = 567.0 # probably wrong? magnification is not consistent across frame
eon_f_frame_size = (1164, 874)
eon_d_frame_size = (816, 612)
tici_f_frame_size = tici_e_frame_size = tici_d_frame_size = (1928, 1208)
# aka 'K' aka camera_frame_from_view_frame
eon_fcam_intrinsics = np.array([
[eon_f_focal_length, 0.0, float(eon_f_frame_size[0])/2],
[0.0, eon_f_focal_length, float(eon_f_frame_size[1])/2],
[0.0, 0.0, 1.0]])
eon_intrinsics = eon_fcam_intrinsics # xx
eon_dcam_intrinsics = np.array([
[eon_d_focal_length, 0.0, float(eon_d_frame_size[0])/2],
[0.0, eon_d_focal_length, float(eon_d_frame_size[1])/2],
[0.0, 0.0, 1.0]])
tici_fcam_intrinsics = np.array([
[tici_f_focal_length, 0.0, float(tici_f_frame_size[0])/2],
[0.0, tici_f_focal_length, float(tici_f_frame_size[1])/2],
[0.0, 0.0, 1.0]])
tici_dcam_intrinsics = np.array([
[tici_d_focal_length, 0.0, float(tici_d_frame_size[0])/2],
[0.0, tici_d_focal_length, float(tici_d_frame_size[1])/2],
[0.0, 0.0, 1.0]])
tici_ecam_intrinsics = tici_dcam_intrinsics
# aka 'K_inv' aka view_frame_from_camera_frame
eon_fcam_intrinsics_inv = np.linalg.inv(eon_fcam_intrinsics)
eon_intrinsics_inv = eon_fcam_intrinsics_inv # xx
tici_fcam_intrinsics_inv = np.linalg.inv(tici_fcam_intrinsics)
tici_ecam_intrinsics_inv = np.linalg.inv(tici_ecam_intrinsics)
FULL_FRAME_SIZE = tici_f_frame_size
FOCAL = tici_f_focal_length
fcam_intrinsics = tici_fcam_intrinsics
W, H = FULL_FRAME_SIZE[0], FULL_FRAME_SIZE[1]
# device/mesh : x->forward, y-> right, z->down
# view : x->right, y->down, z->forward
device_frame_from_view_frame = np.array([
[ 0., 0., 1.],
[ 1., 0., 0.],
[ 0., 1., 0.]
])
view_frame_from_device_frame = device_frame_from_view_frame.T
def get_calib_from_vp(vp):
vp_norm = normalize(vp)
yaw_calib = np.arctan(vp_norm[0])
pitch_calib = -np.arctan(vp_norm[1]*np.cos(yaw_calib))
roll_calib = 0
return roll_calib, pitch_calib, yaw_calib
# aka 'extrinsic_matrix'
# road : x->forward, y -> left, z->up
def get_view_frame_from_road_frame(roll, pitch, yaw, height):
device_from_road = orient.rot_from_euler([roll, pitch, yaw]).dot(np.diag([1, -1, -1]))
view_from_road = view_frame_from_device_frame.dot(device_from_road)
return np.hstack((view_from_road, [[0], [height], [0]]))
# aka 'extrinsic_matrix'
def METHOD_NAME(roll, pitch, yaw, height):
device_from_calib= orient.rot_from_euler([roll, pitch, yaw])
view_from_calib = view_frame_from_device_frame.dot(device_from_calib)
return np.hstack((view_from_calib, [[0], [height], [0]]))
def vp_from_ke(m):
"""
Computes the vanishing point from the product of the intrinsic and extrinsic
matrices C = KE.
The vanishing point is defined as lim x->infinity C (x, 0, 0, 1).T
"""
return (m[0, 0]/m[2, 0], m[1, 0]/m[2, 0])
def roll_from_ke(m):
# note: different from calibration.h/RollAnglefromKE: i think that one's just wrong
return np.arctan2(-(m[1, 0] - m[1, 1] * m[2, 0] / m[2, 1]),
-(m[0, 0] - m[0, 1] * m[2, 0] / m[2, 1]))
def normalize(img_pts, intrinsics=fcam_intrinsics):
# normalizes image coordinates
# accepts single pt or array of pts
intrinsics_inv = np.linalg.inv(intrinsics)
img_pts = np.array(img_pts)
input_shape = img_pts.shape
img_pts = np.atleast_2d(img_pts)
img_pts = np.hstack((img_pts, np.ones((img_pts.shape[0], 1))))
img_pts_normalized = img_pts.dot(intrinsics_inv.T)
img_pts_normalized[(img_pts < 0).any(axis=1)] = np.nan
return img_pts_normalized[:, :2].reshape(input_shape)
def denormalize(img_pts, intrinsics=fcam_intrinsics, width=np.inf, height=np.inf):
# denormalizes image coordinates
# accepts single pt or array of pts
img_pts = np.array(img_pts)
input_shape = img_pts.shape
img_pts = np.atleast_2d(img_pts)
img_pts = np.hstack((img_pts, np.ones((img_pts.shape[0], 1), dtype=img_pts.dtype)))
img_pts_denormalized = img_pts.dot(intrinsics.T)
if np.isfinite(width):
img_pts_denormalized[img_pts_denormalized[:, 0] > width] = np.nan
img_pts_denormalized[img_pts_denormalized[:, 0] < 0] = np.nan
if np.isfinite(height):
img_pts_denormalized[img_pts_denormalized[:, 1] > height] = np.nan
img_pts_denormalized[img_pts_denormalized[:, 1] < 0] = np.nan
return img_pts_denormalized[:, :2].reshape(input_shape)
def device_from_ecef(pos_ecef, orientation_ecef, pt_ecef):
# device from ecef frame
# device frame is x -> forward, y-> right, z -> down
# accepts single pt or array of pts
input_shape = pt_ecef.shape
pt_ecef = np.atleast_2d(pt_ecef)
ecef_from_device_rot = orient.rotations_from_quats(orientation_ecef)
device_from_ecef_rot = ecef_from_device_rot.T
pt_ecef_rel = pt_ecef - pos_ecef
pt_device = np.einsum('jk,ik->ij', device_from_ecef_rot, pt_ecef_rel)
return pt_device.reshape(input_shape)
def img_from_device(pt_device):
# img coordinates from pts in device frame
# first transforms to view frame, then to img coords
# accepts single pt or array of pts
input_shape = pt_device.shape
pt_device = np.atleast_2d(pt_device)
pt_view = np.einsum('jk,ik->ij', view_frame_from_device_frame, pt_device)
# This function should never return negative depths
pt_view[pt_view[:, 2] < 0] = np.nan
pt_img = pt_view/pt_view[:, 2:3]
return pt_img.reshape(input_shape)[:, :2]
|
2,008 | test tuple contains | from collections import defaultdict
import pytest
from diofant import (Basic, Dict, FiniteSet, Integer, Matrix, Rational, Tuple,
false, sympify, true)
from diofant.abc import p, q, r, s, x, y, z
from diofant.core.compatibility import is_sequence, iterable
from diofant.core.containers import tuple_wrapper
__all__ = ()
def test_Tuple():
t = (1, 2, 3, 4)
st = Tuple(*t)
assert set(sympify(t)) == set(st)
assert len(t) == len(st)
assert set(sympify(t[:2])) == set(st[:2])
assert isinstance(st[:], Tuple)
assert st == Tuple(1, 2, 3, 4)
assert st.func(*st.args) == st
t2 = (p, q, r, s)
st2 = Tuple(*t2)
assert st2.atoms() == set(t2)
assert st == st2.subs({p: 1, q: 2, r: 3, s: 4})
# issue sympy/sympy#5505
assert all(isinstance(arg, Basic) for arg in st.args)
assert Tuple(p, 1).subs({p: 0}) == Tuple(0, 1)
assert Tuple(p, Tuple(p, 1)).subs({p: 0}) == Tuple(0, Tuple(0, 1))
assert Tuple(t2) == Tuple(Tuple(*t2))
def METHOD_NAME():
t1, t2 = Tuple(1), Tuple(2)
assert t1 in Tuple(1, 2, 3, t1, Tuple(t2))
assert t2 not in Tuple(1, 2, 3, t1, Tuple(t2))
def test_Tuple_concatenation():
assert Tuple(1, 2) + Tuple(3, 4) == Tuple(1, 2, 3, 4)
assert (1, 2) + Tuple(3, 4) == Tuple(1, 2, 3, 4)
assert Tuple(1, 2) + (3, 4) == Tuple(1, 2, 3, 4)
pytest.raises(TypeError, lambda: Tuple(1, 2) + 3)
pytest.raises(TypeError, lambda: 1 + Tuple(2, 3))
# the Tuple case in __radd__ is only reached when a subclass is involved
class Tuple2(Tuple):
def __radd__(self, other):
return Tuple.__radd__(self, other + other)
assert Tuple(1, 2) + Tuple2(3, 4) == Tuple(1, 2, 1, 2, 3, 4)
assert Tuple2(1, 2) + Tuple(3, 4) == Tuple(1, 2, 3, 4)
def test_Tuple_equality():
assert (Tuple(1, 2) == (1, 2)) is True
assert (Tuple(1, 2) != (1, 2)) is False
assert (Tuple(1, 2) == (1, 3)) is False
assert (Tuple(1, 2) != (1, 3)) is True
assert (Tuple(1, 2) == Tuple(1, 2)) is True
assert (Tuple(1, 2) != Tuple(1, 2)) is False
assert (Tuple(1, 2) == Tuple(1, 3)) is False
assert (Tuple(1, 2) != Tuple(1, 3)) is True
def test_Tuple_comparision():
assert (Tuple(1, 3) >= Tuple(-10, 30)) is true
assert (Tuple(1, 3) <= Tuple(-10, 30)) is false
assert (Tuple(1, 3) >= Tuple(1, 3)) is true
assert (Tuple(1, 3) <= Tuple(1, 3)) is true
def test_Tuple_tuple_count():
assert Tuple(0, 1, 2, 3).tuple_count(4) == 0
assert Tuple(0, 4, 1, 2, 3).tuple_count(4) == 1
assert Tuple(0, 4, 1, 4, 2, 3).tuple_count(4) == 2
assert Tuple(0, 4, 1, 4, 2, 4, 3).tuple_count(4) == 3
def test_Tuple_index():
assert Tuple(4, 0, 1, 2, 3).index(4) == 0
assert Tuple(0, 4, 1, 2, 3).index(4) == 1
assert Tuple(0, 1, 4, 2, 3).index(4) == 2
assert Tuple(0, 1, 2, 4, 3).index(4) == 3
assert Tuple(0, 1, 2, 3, 4).index(4) == 4
pytest.raises(ValueError, lambda: Tuple(0, 1, 2, 3).index(4))
pytest.raises(ValueError, lambda: Tuple(4, 0, 1, 2, 3).index(4, 1))
pytest.raises(ValueError, lambda: Tuple(0, 1, 2, 3, 4).index(4, 1, 4))
def test_Tuple_mul():
assert Tuple(1, 2, 3)*2 == Tuple(1, 2, 3, 1, 2, 3)
assert 2*Tuple(1, 2, 3) == Tuple(1, 2, 3, 1, 2, 3)
assert Tuple(1, 2, 3)*Integer(2) == Tuple(1, 2, 3, 1, 2, 3)
assert Integer(2)*Tuple(1, 2, 3) == Tuple(1, 2, 3, 1, 2, 3)
pytest.raises(TypeError, lambda: Tuple(1, 2, 3)*Rational(1, 2))
pytest.raises(TypeError, lambda: Rational(1, 2)*Tuple(1, 2, 3))
def test_tuple_wrapper():
@tuple_wrapper
def wrap_tuples_and_return(*t):
return t
assert wrap_tuples_and_return(p, 1) == (p, 1)
assert wrap_tuples_and_return((p, 1)) == (Tuple(p, 1),)
assert wrap_tuples_and_return(1, (p, 2), 3) == (1, Tuple(p, 2), 3)
def test_iterable_is_sequence():
ordered = [[], (), Tuple(), Matrix([[]])]
unordered = [set()]
not_diofant_iterable = [{}, '']
assert all(is_sequence(i) for i in ordered)
assert all(not is_sequence(i) for i in unordered)
assert all(iterable(i) for i in ordered + unordered)
assert all(not iterable(i) for i in not_diofant_iterable)
assert all(iterable(i, exclude=None) for i in not_diofant_iterable)
def test_Dict():
d = Dict({x: 1, y: 2, z: 3})
assert d[x] == 1
assert d[y] == 2
pytest.raises(KeyError, lambda: d[2])
assert len(d) == 3
assert set(d.keys()) == {x, y, z}
assert set(d.values()) == {1, 2, 3}
assert d.get(5, 'default') == 'default'
assert x in d
assert z in d
assert 5 not in d
assert d.has(x)
assert d.has(1) # Diofant Basic .has method
# Test input types
# input - a python dict
# input - items as args - Diofant style
assert (Dict({x: 1, y: 2, z: 3}) ==
Dict((x, 1), (y, 2), (z, 3)))
pytest.raises(TypeError, lambda: Dict(((x, 1), (y, 2), (z, 3))))
with pytest.raises(NotImplementedError):
d[5] = 6 # assert immutability
assert set(d.items()) == {Tuple(x, 1), Tuple(y, 2), Tuple(z, 3)}
assert set(d) == {x, y, z}
assert str(d) == '{x: 1, y: 2, z: 3}'
assert repr(d) == ("Dict(Tuple(Symbol('x'), Integer(1)), "
"Tuple(Symbol('y'), Integer(2)), "
"Tuple(Symbol('z'), Integer(3)))")
# Test creating a Dict from a Dict.
d = Dict({x: 1, y: 2, z: 3})
assert d == Dict(d)
# Test for supporting defaultdict
d = defaultdict(int)
assert d[x] == 0
assert d[y] == 0
assert d[z] == 0
assert Dict(d)
d = Dict(d)
assert len(d) == 3
assert set(d) == {x, y, z}
assert set(d.values()) == {0}
assert list(FiniteSet(*[Dict({x: 1}), Dict({y: 2})]))[0] == Dict({x: 1})
def test_eq_and_args():
# issue sympy/sympy#5788
args = [(1, 2), (2, 1)]
for o in [Dict, Tuple, FiniteSet]:
if o != Tuple:
assert o(*args) == o(*reversed(args))
pair = [o(*args), o(*reversed(args))]
rpair = reversed(pair)
assert sorted(pair) == sorted(rpair)
assert set(o(*args)) # doesn't fail |
2,009 | test object mutation | # stdlib
from textwrap import dedent
# third party
from faker import Faker
import pytest
# syft absolute
import syft
from syft.client.client import SyftClient
from syft.node.worker import Worker
from syft.service.action.action_object import ActionObject
from syft.service.action.action_permissions import ActionPermission
from syft.service.code.user_code import UserCodeStatus
from syft.service.context import ChangeContext
from syft.service.request.request import ActionStoreChange
from syft.service.request.request import ObjectMutation
from syft.service.request.request import RequestStatus
from syft.service.request.request import UserCodeStatusChange
from syft.service.request.request_service import RequestService
from syft.service.response import SyftError
from syft.service.response import SyftSuccess
from syft.service.settings.settings_service import SettingsService
from syft.store.document_store import DocumentStore
from syft.store.linked_obj import LinkedObject
@pytest.fixture
def request_service(document_store: DocumentStore):
return RequestService(store=document_store)
def get_ds_client(faker: Faker, root_client: SyftClient, guest_client: SyftClient):
guest_email = faker.email()
password = "mysecretpassword"
result = root_client.register(
name=faker.name(),
email=guest_email,
password=password,
password_verify=password,
)
assert isinstance(result, SyftSuccess)
guest_client.login(email=guest_email, password=password)
return guest_client
def METHOD_NAME(worker: Worker):
root_client = worker.root_client
setting = root_client.api.services.settings.get()
linked_obj = LinkedObject.from_obj(setting, SettingsService, node_uid=worker.id)
original_name = setting.organization
new_name = "Test Organization"
object_mutation = ObjectMutation(
linked_obj=linked_obj,
attr_name="organization",
match_type=True,
value=new_name,
)
change_context = ChangeContext(
node=worker,
approving_user_credentials=root_client.credentials.verify_key,
)
result = object_mutation.apply(change_context)
assert result.is_ok()
setting = root_client.api.services.settings.get()
assert setting.organization == new_name
object_mutation.undo(context=change_context)
setting = root_client.api.services.settings.get()
assert setting.organization == original_name
def test_action_store_change(faker: Faker, worker: Worker):
root_client = worker.root_client
dummy_data = [1, 2, 3]
data = ActionObject.from_obj(dummy_data)
action_obj = root_client.api.services.action.set(data)
assert action_obj.get() == dummy_data
ds_client = get_ds_client(faker, root_client, worker.guest_client)
action_object_link = LinkedObject.from_obj(
action_obj, node_uid=action_obj.syft_node_uid
)
permission_change = ActionStoreChange(
linked_obj=action_object_link,
apply_permission_type=ActionPermission.READ,
)
change_context = ChangeContext(
node=worker,
approving_user_credentials=root_client.credentials.verify_key,
requesting_user_credentials=ds_client.credentials.verify_key,
)
result = permission_change.apply(change_context)
assert result.is_ok()
action_obj_ptr = ds_client.api.services.action.get_pointer(action_obj.id)
result = action_obj_ptr.get()
assert result == dummy_data
result = permission_change.undo(change_context)
assert result.is_ok()
result = action_obj_ptr.get()
assert isinstance(result, SyftError)
def test_user_code_status_change(faker: Faker, worker: Worker):
root_client = worker.root_client
dummy_data = [1, 2, 3]
data = ActionObject.from_obj(dummy_data)
action_obj = root_client.api.services.action.set(data)
ds_client = get_ds_client(faker, root_client, worker.guest_client)
@syft.syft_function(
input_policy=syft.ExactMatch(data=action_obj),
output_policy=syft.SingleExecutionExactOutput(),
)
def simple_function(data):
return sum(data)
simple_function.code = dedent(simple_function.code)
result = ds_client.code.submit(simple_function)
assert isinstance(result, SyftSuccess)
user_code = ds_client.code.get_all()[0]
linked_obj = LinkedObject.from_obj(user_code, node_uid=worker.id)
user_code_change = UserCodeStatusChange(
value=UserCodeStatus.APPROVED, linked_obj=linked_obj
)
change_context = ChangeContext(
node=worker,
approving_user_credentials=root_client.credentials.verify_key,
requesting_user_credentials=ds_client.credentials.verify_key,
)
result = user_code_change.apply(change_context)
user_code = ds_client.code.get_all()[0]
assert user_code.status.approved
result = user_code_change.undo(change_context)
assert result.is_ok()
user_code = ds_client.code.get_all()[0]
assert not user_code.status.approved
def test_code_accept_deny(faker: Faker, worker: Worker):
root_client = worker.root_client
dummy_data = [1, 2, 3]
data = ActionObject.from_obj(dummy_data)
action_obj = root_client.api.services.action.set(data)
ds_client = get_ds_client(faker, root_client, worker.guest_client)
@syft.syft_function(
input_policy=syft.ExactMatch(data=action_obj),
output_policy=syft.SingleExecutionExactOutput(),
)
def simple_function(data):
return sum(data)
simple_function.code = dedent(simple_function.code)
result = ds_client.code.request_code_execution(simple_function)
assert not isinstance(result, SyftError)
request = root_client.requests.get_all()[0]
result = request.accept_by_depositing_result(result=10)
assert isinstance(result, SyftSuccess)
request = root_client.requests.get_all()[0]
assert request.status == RequestStatus.APPROVED
result = ds_client.code.simple_function(data=action_obj)
assert result.get() == 10
result = request.deny(reason="Function output needs differential privacy !!")
assert isinstance(result, SyftSuccess)
request = root_client.requests.get_all()[0]
assert request.status == RequestStatus.REJECTED
user_code = ds_client.code.get_all()[0]
assert not user_code.status.approved
result = ds_client.code.simple_function(data=action_obj)
assert isinstance(result, SyftError)
assert "UserCodeStatus.DENIED" in result.message |
2,010 | test head response doesnt support content | from pathlib import PurePosixPath
from typing import Any, Optional
import pytest
from litestar import MediaType, get
from litestar.datastructures import Cookie
from litestar.exceptions import ImproperlyConfiguredException
from litestar.response import Response
from litestar.response.base import ASGIResponse
from litestar.serialization import default_serializer, get_serializer
from litestar.status_codes import (
HTTP_100_CONTINUE,
HTTP_101_SWITCHING_PROTOCOLS,
HTTP_102_PROCESSING,
HTTP_103_EARLY_HINTS,
HTTP_200_OK,
HTTP_204_NO_CONTENT,
HTTP_304_NOT_MODIFIED,
HTTP_500_INTERNAL_SERVER_ERROR,
)
from litestar.testing import create_test_client
from litestar.types import Empty
def test_response_headers() -> None:
@get("/")
def handler() -> Response:
return Response(content="hello world", media_type=MediaType.TEXT, headers={"first": "123", "second": "456"})
with create_test_client(handler) as client:
response = client.get("/")
assert response.headers["first"] == "123"
assert response.headers["second"] == "456"
assert response.headers["content-length"] == "11"
assert response.headers["content-type"] == "text/plain; charset=utf-8"
def test_response_headers_do_not_lowercase_values() -> None:
# reproduces: https://github.com/litestar-org/litestar/issues/693
@get("/")
def handler() -> Response:
return Response(content="hello world", media_type=MediaType.TEXT, headers={"foo": "BaR"})
with create_test_client(handler) as client:
response = client.get("/")
assert response.headers["foo"] == "BaR"
@pytest.mark.parametrize("as_instance", [True, False])
def test_set_cookie(as_instance: bool) -> None:
@get("/")
def handler() -> Response:
response = Response(content=None)
if as_instance:
response.set_cookie(Cookie(key="test", value="abc", max_age=60, expires=60, secure=True, httponly=True))
else:
response.set_cookie(key="test", value="abc", max_age=60, expires=60, secure=True, httponly=True)
assert len(response.cookies) == 1
return response
with create_test_client(handler) as client:
response = client.get("/")
assert response.cookies.get("test") == "abc"
def test_delete_cookie() -> None:
@get("/create")
def create_cookie_handler() -> Response:
response = Response(content=None)
response.set_cookie("test", "abc", max_age=60, expires=60, secure=True, httponly=True)
assert len(response.cookies) == 1
return response
@get("/delete")
def delete_cookie_handler() -> Response:
response = Response(content=None)
response.delete_cookie(
"test",
"abc",
)
assert len(response.cookies) == 1
return response
with create_test_client(route_handlers=[create_cookie_handler, delete_cookie_handler]) as client:
response = client.get("/create")
assert response.cookies.get("test") == "abc"
assert client.cookies.get("test") == "abc"
response = client.get("/delete")
assert response.cookies.get("test") is None
# the commented out assert fails, because of the starlette test client's behaviour - which doesn't clear
# cookies.
@pytest.mark.parametrize(
"media_type, expected, should_have_content_length",
((MediaType.TEXT, b"", False), (MediaType.HTML, b"", False), (MediaType.JSON, b"null", True)),
)
def test_empty_response(media_type: MediaType, expected: bytes, should_have_content_length: bool) -> None:
@get("/", media_type=media_type)
def handler() -> None:
return
with create_test_client(handler) as client:
response = client.get("/")
assert response.content == expected
assert response.headers["content-length"] == str(len(expected))
@pytest.mark.parametrize("status_code", (HTTP_204_NO_CONTENT, HTTP_304_NOT_MODIFIED))
def test_response_without_payload(status_code: int) -> None:
@get("/")
def handler() -> Response:
return Response(b"", status_code=status_code)
with create_test_client(handler) as client:
response = client.get("/")
assert "content-type" not in response.headers
assert "content-length" not in response.headers
@pytest.mark.parametrize(
"status, body, should_raise",
(
(HTTP_100_CONTINUE, None, False),
(HTTP_101_SWITCHING_PROTOCOLS, None, False),
(HTTP_102_PROCESSING, None, False),
(HTTP_103_EARLY_HINTS, None, False),
(HTTP_204_NO_CONTENT, None, False),
(HTTP_100_CONTINUE, "1", True),
(HTTP_101_SWITCHING_PROTOCOLS, "1", True),
(HTTP_102_PROCESSING, "1", True),
(HTTP_103_EARLY_HINTS, "1", True),
(HTTP_204_NO_CONTENT, "1", True),
),
)
def test_statuses_without_body(status: int, body: Optional[str], should_raise: bool) -> None:
@get("/")
def handler() -> Response:
return Response(content=body, status_code=status)
with create_test_client(handler) as client:
response = client.get("/")
if should_raise:
assert response.status_code == HTTP_500_INTERNAL_SERVER_ERROR
else:
assert response.status_code == status
assert "content-length" not in response.headers
@pytest.mark.parametrize(
"body, media_type, should_raise",
(
("", MediaType.TEXT, False),
("abc", MediaType.TEXT, False),
(b"", MediaType.HTML, False),
(b"abc", MediaType.HTML, False),
({"key": "value"}, MediaType.TEXT, True),
([1, 2, 3], MediaType.TEXT, True),
({"key": "value"}, MediaType.HTML, True),
([1, 2, 3], MediaType.HTML, True),
([], MediaType.HTML, False),
([], MediaType.TEXT, False),
({}, MediaType.HTML, False),
({}, MediaType.TEXT, False),
({"abc": "def"}, MediaType.JSON, False),
(Empty, MediaType.JSON, True),
),
)
def test_render_method(body: Any, media_type: MediaType, should_raise: bool) -> None:
@get("/", media_type=media_type)
def handler() -> Any:
return body
with create_test_client(handler) as client:
response = client.get("/")
if should_raise:
assert response.status_code == HTTP_500_INTERNAL_SERVER_ERROR
else:
assert response.status_code == HTTP_200_OK
def test_get_serializer() -> None:
class Foo:
pass
foo_encoder = {Foo: lambda f: "it's a foo"}
path_encoder = {PurePosixPath: lambda p: "it's a path"}
class FooResponse(Response):
type_encoders = foo_encoder
assert get_serializer() is default_serializer
assert get_serializer(type_encoders=foo_encoder)(Foo()) == "it's a foo"
assert get_serializer(type_encoders=path_encoder)(PurePosixPath()) == "it's a path"
assert get_serializer(FooResponse(None).type_encoders)(Foo()) == "it's a foo"
assert (
get_serializer(FooResponse(None, type_encoders={Foo: lambda f: "foo"}).response_type_encoders)(Foo()) == "foo"
)
def METHOD_NAME() -> None:
with pytest.raises(ImproperlyConfiguredException):
ASGIResponse(body=b"hello world", media_type=MediaType.TEXT, is_head_response=True) |
2,011 | provisioning state | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetOutboundEndpointResult',
'AwaitableGetOutboundEndpointResult',
'get_outbound_endpoint',
'get_outbound_endpoint_output',
]
@pulumi.output_type
class GetOutboundEndpointResult:
"""
Describes an outbound endpoint for a DNS resolver.
"""
def __init__(__self__, etag=None, id=None, location=None, name=None, METHOD_NAME=None, resource_guid=None, subnet=None, system_data=None, tags=None, type=None):
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", METHOD_NAME)
if resource_guid and not isinstance(resource_guid, str):
raise TypeError("Expected argument 'resource_guid' to be a str")
pulumi.set(__self__, "resource_guid", resource_guid)
if subnet and not isinstance(subnet, dict):
raise TypeError("Expected argument 'subnet' to be a dict")
pulumi.set(__self__, "subnet", subnet)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def etag(self) -> str:
"""
ETag of the outbound endpoint.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> str:
"""
The geo-location where the resource lives
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def METHOD_NAME(self) -> str:
"""
The current provisioning state of the outbound endpoint. This is a read-only property and any attempt to set this value will be ignored.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> str:
"""
The resourceGuid property of the outbound endpoint resource.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter
def subnet(self) -> 'outputs.SubResourceResponse':
"""
The reference to the subnet used for the outbound endpoint.
"""
return pulumi.get(self, "subnet")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Metadata pertaining to creation and last modification of the resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetOutboundEndpointResult(GetOutboundEndpointResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetOutboundEndpointResult(
etag=self.etag,
id=self.id,
location=self.location,
name=self.name,
METHOD_NAME=self.METHOD_NAME,
resource_guid=self.resource_guid,
subnet=self.subnet,
system_data=self.system_data,
tags=self.tags,
type=self.type)
def get_outbound_endpoint(dns_resolver_name: Optional[str] = None,
outbound_endpoint_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetOutboundEndpointResult:
"""
Gets properties of an outbound endpoint for a DNS resolver.
:param str dns_resolver_name: The name of the DNS resolver.
:param str outbound_endpoint_name: The name of the outbound endpoint for the DNS resolver.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
__args__ = dict()
__args__['dnsResolverName'] = dns_resolver_name
__args__['outboundEndpointName'] = outbound_endpoint_name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:network/v20220701:getOutboundEndpoint', __args__, opts=opts, typ=GetOutboundEndpointResult).value
return AwaitableGetOutboundEndpointResult(
etag=pulumi.get(__ret__, 'etag'),
id=pulumi.get(__ret__, 'id'),
location=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
METHOD_NAME=pulumi.get(__ret__, 'provisioning_state'),
resource_guid=pulumi.get(__ret__, 'resource_guid'),
subnet=pulumi.get(__ret__, 'subnet'),
system_data=pulumi.get(__ret__, 'system_data'),
tags=pulumi.get(__ret__, 'tags'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_outbound_endpoint)
def get_outbound_endpoint_output(dns_resolver_name: Optional[pulumi.Input[str]] = None,
outbound_endpoint_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetOutboundEndpointResult]:
"""
Gets properties of an outbound endpoint for a DNS resolver.
:param str dns_resolver_name: The name of the DNS resolver.
:param str outbound_endpoint_name: The name of the outbound endpoint for the DNS resolver.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
... |
2,012 | is sequence finished | from typing import Any, Callable, Optional
import torch
import torch.distributed as dist
import torch.nn as nn
try:
from transformers.generation_logits_process import (
LogitsProcessorList,
TemperatureLogitsWarper,
TopKLogitsWarper,
TopPLogitsWarper,
)
except ImportError:
from transformers.generation import (
LogitsProcessorList,
TemperatureLogitsWarper,
TopKLogitsWarper,
TopPLogitsWarper,
)
def prepare_logits_processor(
top_k: Optional[int] = None,
top_p: Optional[float] = None,
temperature: Optional[float] = None,
) -> LogitsProcessorList:
processor_list = LogitsProcessorList()
if temperature is not None and temperature != 1.0:
processor_list.append(TemperatureLogitsWarper(temperature))
if top_k is not None and top_k != 0:
processor_list.append(TopKLogitsWarper(top_k))
if top_p is not None and top_p < 1.0:
processor_list.append(TopPLogitsWarper(top_p))
return processor_list
def METHOD_NAME(unfinished_sequences: torch.Tensor) -> bool:
if dist.is_initialized() and dist.get_world_size() > 1:
# consider DP
unfinished_sequences = unfinished_sequences.clone()
dist.all_reduce(unfinished_sequences)
return unfinished_sequences.max() == 0
def sample(
model: nn.Module,
input_ids: torch.Tensor,
max_length: int,
early_stopping: bool = False,
eos_token_id: Optional[int] = None,
pad_token_id: Optional[int] = None,
top_k: Optional[int] = None,
top_p: Optional[float] = None,
temperature: Optional[float] = None,
prepare_inputs_fn: Optional[Callable[[torch.Tensor, Any], dict]] = None,
update_model_kwargs_fn: Optional[Callable[[dict, Any], dict]] = None,
**model_kwargs
) -> torch.Tensor:
if input_ids.size(1) >= max_length:
return input_ids
logits_processor = prepare_logits_processor(top_k, top_p, temperature)
unfinished_sequences = input_ids.new(input_ids.shape[0]).fill_(1)
for _ in range(input_ids.size(1), max_length):
model_inputs = (
prepare_inputs_fn(input_ids, **model_kwargs)
if prepare_inputs_fn is not None
else {"input_ids": input_ids}
)
outputs = model(**model_inputs)
next_token_logits = outputs["logits"][:, -1, :]
# pre-process distribution
next_token_logits = logits_processor(input_ids, next_token_logits)
# sample
probs = torch.softmax(next_token_logits, dim=-1, dtype=torch.float)
next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1)
# finished sentences should have their next token be a padding token
if eos_token_id is not None:
if pad_token_id is None:
raise ValueError(
"If `eos_token_id` is defined, make sure that `pad_token_id` is defined."
)
next_tokens = next_tokens * unfinished_sequences + pad_token_id * (
1 - unfinished_sequences
)
# update generated ids, model inputs for next step
input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1)
if update_model_kwargs_fn is not None:
model_kwargs = update_model_kwargs_fn(outputs, **model_kwargs)
# if eos_token was found in one sentence, set sentence to finished
if eos_token_id is not None:
unfinished_sequences = unfinished_sequences.mul(
(next_tokens != eos_token_id).long()
)
# stop when each sentence is finished if early_stopping=True
if early_stopping and METHOD_NAME(unfinished_sequences):
break
return input_ids
def generate(
model: nn.Module,
input_ids: torch.Tensor,
max_length: int,
num_beams: int = 1,
do_sample: bool = True,
early_stopping: bool = False,
eos_token_id: Optional[int] = None,
pad_token_id: Optional[int] = None,
top_k: Optional[int] = None,
top_p: Optional[float] = None,
temperature: Optional[float] = None,
prepare_inputs_fn: Optional[Callable[[torch.Tensor, Any], dict]] = None,
update_model_kwargs_fn: Optional[Callable[[dict, Any], dict]] = None,
**model_kwargs
) -> torch.Tensor:
"""Generate token sequence. The returned sequence is input_ids + generated_tokens.
Args:
model (nn.Module): model
input_ids (torch.Tensor): input sequence
max_length (int): max length of the returned sequence
num_beams (int, optional): number of beams. Defaults to 1.
do_sample (bool, optional): whether to do sample. Defaults to True.
early_stopping (bool, optional): if True, the sequence length may be smaller than max_length due to finding eos. Defaults to False.
eos_token_id (Optional[int], optional): end of sequence token id. Defaults to None.
pad_token_id (Optional[int], optional): pad token id. Defaults to None.
top_k (Optional[int], optional): the number of highest probability vocabulary tokens to keep for top-k-filtering. Defaults to None.
top_p (Optional[float], optional): If set to float < 1, only the smallest set of most probable tokens with probabilities that add up to top_p or higher are kept for generation. Defaults to None.
temperature (Optional[float], optional): The value used to module the next token probabilities. Defaults to None.
prepare_inputs_fn (Optional[Callable[[torch.Tensor, Any], dict]], optional): Function to preprocess model inputs. Arguments of this function should be input_ids and model_kwargs. Defaults to None.
update_model_kwargs_fn (Optional[Callable[[dict, Any], dict]], optional): Function to update model_kwargs based on outputs. Arguments of this function should be outputs and model_kwargs. Defaults to None.
"""
is_greedy_gen_mode = (num_beams == 1) and do_sample is False
is_sample_gen_mode = (num_beams == 1) and do_sample is True
is_beam_gen_mode = (num_beams > 1) and do_sample is False
if is_greedy_gen_mode:
# run greedy search
raise NotImplementedError
elif is_sample_gen_mode:
# run sample
return sample(
model,
input_ids,
max_length,
early_stopping=early_stopping,
eos_token_id=eos_token_id,
pad_token_id=pad_token_id,
top_k=top_k,
top_p=top_p,
temperature=temperature,
prepare_inputs_fn=prepare_inputs_fn,
update_model_kwargs_fn=update_model_kwargs_fn,
**model_kwargs
)
elif is_beam_gen_mode:
raise NotImplementedError
else:
raise ValueError("Unsupported generation mode") |
2,013 | test trotter hamiltonian scalar mul | """Test Trotter Hamiltonian methods from `qibo/core/hamiltonians.py`."""
import numpy as np
import pytest
from qibo import hamiltonians
from qibo.backends import NumpyBackend
from qibo.quantum_info import random_hermitian, random_statevector
from .utils import random_complex
@pytest.mark.parametrize("nqubits", [3, 4])
@pytest.mark.parametrize("model", ["TFIM", "XXZ", "Y", "MaxCut"])
def test_trotter_hamiltonian_to_dense(backend, nqubits, model):
"""Test that Trotter Hamiltonian dense form agrees with normal Hamiltonian."""
local_ham = getattr(hamiltonians, model)(nqubits, dense=False, backend=backend)
target_ham = getattr(hamiltonians, model)(nqubits, backend=backend)
final_ham = local_ham.dense
backend.assert_allclose(final_ham.matrix, target_ham.matrix, atol=1e-15)
def METHOD_NAME(backend, nqubits=3):
"""Test multiplication of Trotter Hamiltonian with scalar."""
local_ham = hamiltonians.TFIM(nqubits, h=1.0, dense=False, backend=backend)
target_ham = 2 * hamiltonians.TFIM(nqubits, h=1.0, backend=backend)
local_dense = (2 * local_ham).dense
backend.assert_allclose(local_dense.matrix, target_ham.matrix)
local_ham = hamiltonians.TFIM(nqubits, h=1.0, dense=False, backend=backend)
local_dense = (local_ham * 2).dense
backend.assert_allclose(local_dense.matrix, target_ham.matrix)
def test_trotter_hamiltonian_scalar_add(backend, nqubits=4):
"""Test addition of Trotter Hamiltonian with scalar."""
local_ham = hamiltonians.TFIM(nqubits, h=1.0, dense=False, backend=backend)
target_ham = 2 + hamiltonians.TFIM(nqubits, h=1.0, backend=backend)
local_dense = (2 + local_ham).dense
backend.assert_allclose(local_dense.matrix, target_ham.matrix)
local_ham = hamiltonians.TFIM(nqubits, h=1.0, dense=False, backend=backend)
local_dense = (local_ham + 2).dense
backend.assert_allclose(local_dense.matrix, target_ham.matrix)
def test_trotter_hamiltonian_scalar_sub(backend, nqubits=3):
"""Test subtraction of Trotter Hamiltonian with scalar."""
local_ham = hamiltonians.TFIM(nqubits, h=1.0, dense=False, backend=backend)
target_ham = 2 - hamiltonians.TFIM(nqubits, h=1.0, backend=backend)
local_dense = (2 - local_ham).dense
backend.assert_allclose(local_dense.matrix, target_ham.matrix)
target_ham = hamiltonians.TFIM(nqubits, h=1.0, backend=backend) - 2
local_ham = hamiltonians.TFIM(nqubits, h=1.0, dense=False, backend=backend)
local_dense = (local_ham - 2).dense
backend.assert_allclose(local_dense.matrix, target_ham.matrix)
def test_trotter_hamiltonian_operator_add_and_sub(backend, nqubits=3):
"""Test addition and subtraction between Trotter Hamiltonians."""
local_ham1 = hamiltonians.TFIM(nqubits, h=1.0, dense=False, backend=backend)
local_ham2 = hamiltonians.TFIM(nqubits, h=0.5, dense=False, backend=backend)
local_ham = local_ham1 + local_ham2
target_ham = hamiltonians.TFIM(nqubits, h=1.0, backend=backend) + hamiltonians.TFIM(
nqubits, h=0.5, backend=backend
)
dense = local_ham.dense
backend.assert_allclose(dense.matrix, target_ham.matrix)
local_ham = local_ham1 - local_ham2
target_ham = hamiltonians.TFIM(nqubits, h=1.0, backend=backend) - hamiltonians.TFIM(
nqubits, h=0.5, backend=backend
)
dense = local_ham.dense
backend.assert_allclose(dense.matrix, target_ham.matrix)
@pytest.mark.parametrize("nqubits,normalize", [(3, False), (4, False)])
def test_trotter_hamiltonian_matmul(backend, nqubits, normalize):
"""Test Trotter Hamiltonian expectation value."""
local_ham = hamiltonians.TFIM(nqubits, h=1.0, dense=False, backend=backend)
dense_ham = hamiltonians.TFIM(nqubits, h=1.0, backend=backend)
state = backend.cast(random_complex((2**nqubits,)))
trotter_ev = local_ham.expectation(state, normalize)
target_ev = dense_ham.expectation(state, normalize)
backend.assert_allclose(trotter_ev, target_ev)
state = random_complex((2**nqubits,))
trotter_ev = local_ham.expectation(state, normalize)
target_ev = dense_ham.expectation(state, normalize)
backend.assert_allclose(trotter_ev, target_ev)
trotter_matmul = local_ham @ state
target_matmul = dense_ham @ state
backend.assert_allclose(trotter_matmul, target_matmul)
def test_trotter_hamiltonian_three_qubit_term(backend):
"""Test creating ``TrotterHamiltonian`` with three qubit term."""
from scipy.linalg import expm
from qibo.hamiltonians.terms import HamiltonianTerm
numpy_backend = NumpyBackend()
m1 = random_hermitian(2**3, backend=numpy_backend)
m2 = random_hermitian(2**2, backend=numpy_backend)
m3 = random_hermitian(2**1, backend=numpy_backend)
terms = [
HamiltonianTerm(m1, 0, 1, 2),
HamiltonianTerm(m2, 2, 3),
HamiltonianTerm(m3, 1),
]
m1 = backend.cast(m1, dtype=m1.dtype)
m2 = backend.cast(m2, dtype=m2.dtype)
m3 = backend.cast(m3, dtype=m3.dtype)
ham = hamiltonians.SymbolicHamiltonian(backend=backend)
ham.terms = terms
# Test that the `TrotterHamiltonian` dense matrix is correct
eye = np.eye(2, dtype=complex)
eye = backend.cast(eye, dtype=eye.dtype)
mm1 = np.kron(m1, eye)
mm2 = np.kron(np.kron(eye, eye), m2)
mm3 = np.kron(np.kron(eye, m3), np.kron(eye, eye))
target_ham = hamiltonians.Hamiltonian(4, mm1 + mm2 + mm3, backend=backend)
backend.assert_allclose(ham.matrix, target_ham.matrix)
dt = 1e-2
initial_state = random_statevector(2**4, backend=backend)
circuit = ham.circuit(dt=dt)
final_state = backend.execute_circuit(circuit, np.copy(initial_state))
mm1 = backend.to_numpy(mm1)
mm2 = backend.to_numpy(mm2)
mm3 = backend.to_numpy(mm3)
u = [expm(-0.5j * dt * (mm1 + mm3)), expm(-0.5j * dt * mm2)]
u = backend.cast(u)
target_state = np.dot(u[1], np.dot(u[0], initial_state))
target_state = np.dot(u[0], np.dot(u[1], target_state))
backend.assert_allclose(final_state, target_state)
def test_old_trotter_hamiltonian_errors():
"""Check errors when creating the deprecated ``TrotterHamiltonian`` object."""
with pytest.raises(NotImplementedError):
h = hamiltonians.TrotterHamiltonian()
with pytest.raises(NotImplementedError):
h = hamiltonians.TrotterHamiltonian.from_symbolic(0, 1) |
2,014 | test given recognizer result then one is | import pytest
from presidio_anonymizer.entities import InvalidParamException, RecognizerResult
@pytest.mark.parametrize(
# fmt: off
"start, end",
[
(0, 10),
(2, 8),
(0, 8),
(0, 10),
],
# fmt: on
)
def test_given_recognizer_results_then_one_contains_another(start, end):
first = create_recognizer_result("entity", 0, 0, 10)
second = create_recognizer_result("entity", 0, start, end)
assert first.contains(second)
@pytest.mark.parametrize(
# fmt: off
"start, end",
[
(4, 10),
(5, 11),
(0, 5),
(0, 6),
],
# fmt: on
)
def test_given_recognizer_result_then_they_do_not_contain_one_another(start, end):
first = create_recognizer_result("entity", 0, 5, 10)
second = create_recognizer_result("entity", 0, start, end)
assert not first.contains(second)
def test_given_recognizer_results_with_same_indices_then_indices_are_equal():
first = create_recognizer_result("entity", 0, 0, 10)
second = create_recognizer_result("entity", 0, 0, 10)
assert first.equal_indices(second)
@pytest.mark.parametrize(
# fmt: off
"start, end",
[
(4, 10),
(5, 11),
(0, 5),
(0, 6),
],
# fmt: on
)
def test_given_recognizer_results_with_different_indices_then_indices_are_not_equal(
start, end
):
first = create_recognizer_result("entity", 0, 5, 10)
second = create_recognizer_result("entity", 0, start, end)
assert not first.equal_indices(second)
@pytest.mark.parametrize(
# fmt: off
"start, end, err",
[
("0", 10,
"Invalid parameter value for start. Expecting 'number', but got 'string'."),
(0, "10",
"Invalid parameter value for end. Expecting 'number', but got 'string'."),
],
# fmt: on
)
def test_given_invalid_string_start_instead_of_int_then_we_fail(start, end, err):
with pytest.raises(InvalidParamException, match=err):
create_recognizer_result("bla", 0.2, start, end)
def test_given_identical_recognizer_results_then_they_are_equal():
first = create_recognizer_result("bla", 0.2, 0, 10)
second = create_recognizer_result("bla", 0.2, 0, 10)
assert first == second
@pytest.mark.parametrize(
# fmt: off
"entity_type, score, start, end",
[
("bla", 0.2, 4, 10),
("changed", 0.2, 0, 10),
("bla", 0.2, 0, 11),
("bla", 0.3, 0, 10),
],
# fmt: on
)
def test_given_different_recognizer_result_then_they_are_not_equal(
entity_type, score, start, end
):
first = create_recognizer_result("bla", 0.2, 0, 10)
second = create_recognizer_result(entity_type, score, start, end)
assert first != second
def test_given_recognizer_result_then_their_hash_is_equal():
first = create_recognizer_result("entity", 0, 0, 10)
second = create_recognizer_result("entity", 0, 0, 10)
assert first.__hash__() == second.__hash__()
@pytest.mark.parametrize(
# fmt: off
"entity_type, score, start, end",
[
("bla", 0.2, 4, 10),
("changed", 0.2, 0, 10),
("bla", 0.2, 0, 11),
("bla", 0.3, 0, 10),
],
# fmt: on
)
def test_given_different_recognizer_results_then_hash_is_not_equal(
entity_type, score, start, end
):
first = create_recognizer_result("bla", 0.2, 0, 10)
second = create_recognizer_result(entity_type, score, start, end)
assert first.__hash__() != second.__hash__()
@pytest.mark.parametrize(
# fmt: off
"entity_type, score, start, end",
[
("bla", 0.2, 0, 10),
("changed", 0.2, 2, 10),
("bla", 0.3, 0, 11),
("bla", 0.1, 0, 10),
],
# fmt: on
)
def test_given_recognizer_results_with_conflicting_indices_then_there_is_a_conflict(
entity_type, score, start, end
):
first = create_recognizer_result("bla", 0.2, 2, 10)
second = create_recognizer_result(entity_type, score, start, end)
assert first.has_conflict(second)
@pytest.mark.parametrize(
# fmt: off
"entity_type, score, start, end",
[
("bla", 0.2, 3, 10),
("changed", 0.1, 2, 10),
("bla", 0.3, 0, 9),
],
# fmt: on
)
def test_given_recognizer_results_with_no_conflicting_indices_then_there_is_no_conflict(
entity_type, score, start, end
):
first = create_recognizer_result("bla", 0.2, 2, 10)
second = create_recognizer_result(entity_type, score, start, end)
assert not first.has_conflict(second)
@pytest.mark.parametrize(
# fmt: off
"request_json, result_text",
[
({}, "Invalid input, result must contain start",),
({
"end": 32,
"score": 0.8,
"entity_type": "NUMBER"
}, "Invalid input, result must contain start",),
({
"start": 28,
"score": 0.8,
"entity_type": "NUMBER"
}, "Invalid input, result must contain end",),
({
"start": 28,
"end": 32,
"entity_type": "NUMBER"
}, "Invalid input, analyzer result must contain score",),
({
"start": 28,
"end": 32,
"score": 0.8,
}, "Invalid input, result must contain entity_type",),
],
# fmt: on
)
def test_given_json_for_creating_recognizer_result_without_text_then_creation_fails(
request_json, result_text
):
with pytest.raises(InvalidParamException) as e:
RecognizerResult.from_json(request_json)
assert result_text == e.value.err_msg
def test_given_valid_json_for_creating_recognizer_result_then_creation_is_successful():
data = create_recognizer_result("NUMBER", 0.8, 0, 32)
assert data.start == 0
assert data.end == 32
assert data.score == 0.8
assert data.entity_type == "NUMBER"
@pytest.mark.parametrize(
# fmt: off
"start, end",
[
(4, 10),
(4, 9),
(0, 2),
(5, 9),
],
# fmt: on
)
def test_given_recognizer_results_then_one_is_greater_then_another(start, end):
first = create_recognizer_result("entity", 0, 5, 10)
second = create_recognizer_result("entity", 0, start, end)
assert first.__gt__(second)
@pytest.mark.parametrize(
# fmt: off
"start, end",
[
(5, 10),
(6, 12),
(6, 7),
],
# fmt: on
)
def METHOD_NAME(start, end):
first = create_recognizer_result("entity", 0, 5, 10)
second = create_recognizer_result("entity", 0, start, end)
assert not first.__gt__(second)
def test_given_endpoint_larger_then_start_point_then_we_fail():
with pytest.raises(InvalidParamException) as e:
create_recognizer_result("entity", 0, 10, 0)
assert (
e.value.err_msg == "Invalid input, start index '10' "
"must be smaller than end index '0'"
)
def test_given_endpoint_equal_to_start_point_then_we_succeed():
assert create_recognizer_result("entity", 0, 0, 0)
@pytest.mark.parametrize(
# fmt: off
"start, end",
[
(-1, 10),
(6, -12),
(-2, -2),
],
# fmt: on
)
def test_given_negative_start_or_endpoint_then_we_fail(start, end):
with pytest.raises(
InvalidParamException,
match="Invalid input, result start and end must be positive",
):
create_recognizer_result("entity", 0, start, end)
def create_recognizer_result(entity_type: str, score: float, start: int, end: int):
data = {"entity_type": entity_type, "score": score, "start": start, "end": end}
return RecognizerResult.from_json(data) |
2,015 | dict has value | # -*- coding: utf-8 -*-
#
# widgets.py - Mycodo core utils
#
# Copyright (C) 2015-2020 Kyle T. Gabriel <mycodo@kylegabriel.com>
#
# This file is part of Mycodo
#
# Mycodo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Mycodo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Mycodo. If not, see <http://www.gnu.org/licenses/>.
#
# Contact at kylegabriel.com
import logging
import os
from mycodo.config import PATH_WIDGETS
from mycodo.config import PATH_WIDGETS_CUSTOM
from mycodo.utils.modules import load_module_from_file
logger = logging.getLogger("mycodo.utils.widgets")
def parse_widget_information(exclude_custom=False):
"""Parses the variables assigned in each Widget and return a dictionary of IDs and values."""
def METHOD_NAME(dict_inp, widget_cus, key, force_type=None):
if (key in widget_cus.WIDGET_INFORMATION and
(widget_cus.WIDGET_INFORMATION[key] or
widget_cus.WIDGET_INFORMATION[key] == 0)):
if force_type == 'list':
if isinstance(widget_cus.WIDGET_INFORMATION[key], list):
dict_inp[widget_cus.WIDGET_INFORMATION['widget_name_unique']][key] = \
widget_cus.WIDGET_INFORMATION[key]
else:
dict_inp[widget_cus.WIDGET_INFORMATION['widget_name_unique']][key] = \
[widget_cus.WIDGET_INFORMATION[key]]
else:
dict_inp[widget_cus.WIDGET_INFORMATION['widget_name_unique']][key] = \
widget_cus.WIDGET_INFORMATION[key]
return dict_inp
excluded_files = [
'__init__.py', '__pycache__', 'base_widget.py', 'custom_widgets',
'examples', 'tmp_widgets'
]
widget_paths = [PATH_WIDGETS]
if not exclude_custom:
widget_paths.append(PATH_WIDGETS_CUSTOM)
dict_widgets = {}
for each_path in widget_paths:
real_path = os.path.realpath(each_path)
for each_file in os.listdir(real_path):
if each_file in excluded_files:
continue
full_path = f"{real_path}/{each_file}"
widget_custom, status = load_module_from_file(full_path, 'widgets')
if not widget_custom or not hasattr(widget_custom, 'WIDGET_INFORMATION'):
continue
# Populate dictionary of widget information
if widget_custom.WIDGET_INFORMATION['widget_name_unique'] in dict_widgets:
logger.error(f"Error: Cannot add widget modules because it does not have "
f"a unique name: {widget_custom.WIDGET_INFORMATION['widget_name_unique']}")
else:
dict_widgets[widget_custom.WIDGET_INFORMATION['widget_name_unique']] = {}
dict_widgets[widget_custom.WIDGET_INFORMATION['widget_name_unique']]['file_path'] = full_path
dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'widget_name')
dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'widget_library')
dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'no_class')
dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'widget_height')
dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'widget_width')
dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'message')
dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'url_datasheet', force_type='list')
dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'url_manufacturer', force_type='list')
dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'url_product_purchase', force_type='list')
dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'url_additional', force_type='list')
# Dependencies
dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'dependencies_module')
dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'dependencies_message')
# Which form options to display and whether each option is enabled
dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'options_enabled')
dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'options_disabled')
# Misc
dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'period')
dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'endpoints')
dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'execute_at_creation')
dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'execute_at_modification')
dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'execute_at_deletion')
dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'generate_page_variables')
dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'custom_options_message')
dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'custom_options')
dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'custom_commands_message')
dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'custom_commands')
dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'widget_dashboard_head')
dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'widget_dashboard_title_bar')
dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'widget_dashboard_body')
dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'widget_dashboard_configure_options')
dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'widget_dashboard_js')
dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'widget_dashboard_js_ready')
dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'widget_dashboard_js_ready_end')
return dict_widgets |
2,016 | run container | import pytest
import docker
import os
from time import sleep
from subprocess import Popen, PIPE
from sinspqa import LOGS_PATH, is_containerized
from sinspqa.sinsp import SinspStreamerBuilder
def pytest_addoption(parser):
parser.addoption('--no-kmod', action='store_true',
default=False, help='Skip tests with kernel module')
parser.addoption('--no-ebpf', action='store_true',
default=False, help='Skip tests with eBPF')
parser.addoption('--no-modern', action='store_true',
default=False, help='Skip tests with modern eBPF')
def pytest_collection_modifyitems(config, items):
no_kmod = config.getoption('--no-kmod')
no_ebpf = config.getoption('--no-ebpf')
no_modern = config.getoption('--no-modern')
if not no_kmod and not no_ebpf and not no_modern:
# We are not skipping any tests
return
skip_kmod = pytest.mark.skip(
reason='Skipping tests with kernel module driver')
skip_ebpf = pytest.mark.skip(reason='Skipping tests with eBPF driver')
skip_modern = pytest.mark.skip(
reason='Skipping tests with modern eBPF driver')
for item in items:
if no_kmod:
for kw in item.keywords:
if 'kmod' in kw:
item.add_marker(skip_kmod)
break
if no_ebpf:
for kw in item.keywords:
if 'ebpf' in kw:
item.add_marker(skip_ebpf)
break
if no_modern:
for kw in item.keywords:
if 'modern_bpf' in kw:
item.add_marker(skip_modern)
break
@pytest.fixture(scope="session", autouse=True)
def check_root():
assert os.geteuid() == 0, 'e2e tests need to be run as root'
@pytest.fixture(scope="session", autouse=True)
def docker_client():
"""
Create a docker client to be used by the tests.
Returns:
A docker.DockerClient object created from the environment the tests run on.
"""
return docker.from_env()
def wait_container_running(container: docker.models.containers.Container, additional_wait: int = 0, retries: int = 5):
success = False
for _ in range(retries):
container.reload()
if container.status == 'running':
success = True
break
sleep(0.5)
if not success:
raise TimeoutError
if additional_wait:
sleep(additional_wait)
def METHOD_NAME(docker_client: docker.client.DockerClient, name: str, container: dict):
image = container['image']
args = container.get('args', '')
privileged = container.get('privileged', False)
mounts = container.get('mounts', [])
environment = container.get('env', {})
user = container.get('user', '')
pid_mode = container.get('pid_mode', '')
network_mode = container.get('network_mode', '')
additional_wait = container.get('init_wait', 0)
post_validation = container.get('post_validation', None)
stop_signal = container.get('signal', None)
handle = docker_client.containers.run(
image,
args,
name=name,
detach=True,
privileged=privileged,
mounts=mounts,
environment=environment,
user=user,
pid_mode=pid_mode,
network_mode=network_mode,
)
post = {
'validation': post_validation,
'signal': stop_signal
}
try:
wait_container_running(handle, additional_wait)
except TimeoutError:
print(f'{name} failed to start, the test will fail')
return (handle, post)
def teardown_container(name, container, validation, stop_signal):
if stop_signal:
container.kill(stop_signal)
# The stop command is issued regardless of the kill command to ensure
# the container stops
container.stop()
logs = container.logs().decode('utf-8')
if logs:
with open(os.path.join(LOGS_PATH, f'{name}.log'), 'w') as f:
f.write(logs)
result = ''
if validation:
try:
validation(container)
except AssertionError as e:
result = f'{name}: {e}'
container.remove()
return result
@pytest.fixture(scope="function")
def run_containers(request, docker_client: docker.client.DockerClient):
"""
Runs containers, dumps their logs and cleans'em up
"""
containers = {}
post = {}
for name, container in request.param.items():
handle, post_validation = METHOD_NAME(docker_client, name, container)
containers[name] = handle
post[name] = post_validation
yield containers
success = True
errors = []
for name, container in containers.items():
validation = post[name]['validation']
stop_signal = post[name]['signal']
result = teardown_container(name, container, validation, stop_signal)
if result != '':
errors.append(result)
success = False
assert success, '\n'.join(errors)
@pytest.fixture(scope='function')
def sinsp(request, docker_client: docker.client.DockerClient):
"""
Runs an instance of sinsp-example, either in a container or as a regular
process
"""
if is_containerized():
container = request.param
handle, post = METHOD_NAME(docker_client, 'sinsp', container)
yield SinspStreamerBuilder() \
.setContainerized(True) \
.setSinsp(handle) \
.setTimeout(10) \
.build()
validation = container.get('post_validation', None)
stop_signal = container.get('signal', None)
result = teardown_container(
'sinsp', handle, validation, stop_signal)
assert result == '', result
else:
process = request.param
args = process['args']
args.insert(0, process['path'])
env = os.environ.copy()
additional_wait = process.get('init_wait', 0)
for k, v in process['env'].items():
env[k] = v
process = Popen(args, env=env, stdout=PIPE, universal_newlines=True)
if additional_wait:
sleep(additional_wait)
reader = SinspStreamerBuilder() \
.setContainerized(False) \
.setSinsp(process) \
.setTimeout(10) \
.build()
yield reader
reader.stop()
process.terminate()
process.wait()
assert process.returncode == 0, f'sinsp-example terminated with code {process.returncode}'
def pytest_html_report_title(report):
report.title = "sinsp e2e tests"
def dump_logs(pytest_html, extra):
"""
Finds all logs dumped to LOGS_PATH and makes them available through the
auto-generated report
"""
for file in os.listdir(LOGS_PATH):
full_path = os.path.join(LOGS_PATH, file)
if not os.path.isfile(full_path):
continue
with open(full_path, 'r', errors='replace') as f:
logs = f.read()
extra.append(pytest_html.extras.text(logs, name=file))
# Remove file so it doesn't bleed to following tests
os.remove(full_path)
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_makereport(item, call):
pytest_html = item.config.pluginmanager.getplugin("html")
outcome = yield
report = outcome.get_result()
extra = getattr(report, "extra", [])
if report.when == "teardown":
dump_logs(pytest_html, extra)
report.extra = extra |
2,017 | test initialise with list kwarg | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
# pylint: disable=redefined-outer-name
"""Tests for :class:`aiida.orm.nodes.data.list.List` class."""
import pytest
from aiida.common.exceptions import ModificationNotAllowed
from aiida.orm import List, load_node
@pytest.fixture
def listing():
return ['a', 2, True]
@pytest.fixture
def int_listing():
return [2, 1, 3]
def test_creation():
"""Test the creation of an empty ``List`` node."""
node = List()
assert len(node) == 0
with pytest.raises(IndexError):
node[0] # pylint: disable=pointless-statement
def test_mutability():
"""Test list's mutability before and after storage."""
node = List()
node.append(5)
node.store()
# Test all mutable calls are now disallowed
with pytest.raises(ModificationNotAllowed):
node.append(5)
with pytest.raises(ModificationNotAllowed):
node.extend([5])
with pytest.raises(ModificationNotAllowed):
node.insert(0, 2)
with pytest.raises(ModificationNotAllowed):
node.remove(5)
with pytest.raises(ModificationNotAllowed):
node.pop()
with pytest.raises(ModificationNotAllowed):
node.sort()
with pytest.raises(ModificationNotAllowed):
node.reverse()
def test_store_load(listing):
"""Test load_node on just stored object."""
node = List(listing)
node.store()
node_loaded = load_node(node.pk)
assert node.get_list() == node_loaded.get_list()
def test_special_methods(listing):
"""Test the special methods of the ``List`` class."""
node = List(listing)
# __getitem__
for i, value in enumerate(listing):
assert node[i] == value
# __setitem__
node[0] = 'b'
assert node[0] == 'b'
# __delitem__
del node[0]
assert node.get_list() == listing[1:]
# __len__
assert len(node) == 2
def test_equality(listing):
"""Test equality comparison for ``List`` nodes."""
different_list = ['I', 'am', 'different']
node = List(listing)
different_node = List(different_list)
clone = List(listing)
# Test equality comparison with Python base type
assert node == listing
assert node != different_list
# Test equality comparison with other `BaseType` nodes
assert node == clone
assert node != different_node
def test_append(listing):
"""Test the ``List.append()`` method."""
def do_checks(node):
assert len(node) == 1
assert node[0] == 4
node = List()
node.append(4)
do_checks(node)
# Try the same after storing
node.store()
do_checks(node)
node = List(listing)
node.append('more')
assert node[-1] == 'more'
def test_extend(listing):
"""Test extend() member function."""
def do_checks(node, lst):
assert len(node) == len(lst)
# Do an element wise comparison
for lst_el, node_el in zip(lst, node):
assert lst_el == node_el
node = List()
node.extend(listing)
do_checks(node, listing)
# Further extend
node.extend(listing)
do_checks(node, listing * 2)
# Now try after storing
node.store()
do_checks(node, listing * 2)
def test_insert(listing):
"""Test the ``List.insert()`` method."""
node = List(listing)
node.insert(1, 'new')
assert node[1] == 'new'
assert len(node) == 4
def test_remove(listing):
"""Test the ``List.remove()`` method."""
node = List(listing)
node.remove(1)
listing.remove(1)
assert node.get_list() == listing
with pytest.raises(ValueError, match=r'list.remove\(x\): x not in list'):
node.remove('non-existent')
def test_pop(listing):
"""Test the ``List.pop()`` method."""
node = List(listing)
node.pop()
assert node.get_list() == listing[:-1]
def test_index(listing):
"""Test the ``List.index()`` method."""
node = List(listing)
assert node.index(True) == listing.index(True)
def test_count(listing):
"""Test the ``List.count()`` method."""
node = List(listing)
for value in listing:
assert node.count(value) == listing.count(value)
def test_sort(listing, int_listing):
"""Test the ``List.sort()`` method."""
node = List(int_listing)
node.sort()
int_listing.sort()
assert node.get_list() == int_listing
node = List(listing)
with pytest.raises(TypeError, match=r"'<' not supported between instances of 'int' and 'str'"):
node.sort()
def test_reverse(listing):
"""Test the ``List.reverse()`` method."""
node = List(listing)
node.reverse()
listing.reverse()
assert node.get_list() == listing
def METHOD_NAME(listing):
"""Test that the ``List`` node can be initialized with the ``list`` keyword argument for backwards compatibility."""
node = List(listing)
assert node.get_list() == listing |
2,018 | test validate slug and generate if needed | from datetime import timedelta
from decimal import Decimal
import pytest
from django.core.exceptions import ValidationError
from django.utils import timezone
from graphql.error import GraphQLError
from ....product.models import Category
from ..validators import (
clean_seo_fields,
validate_end_is_after_start,
validate_one_of_args_is_in_query,
validate_price_precision,
validate_slug_and_generate_if_needed,
)
@pytest.mark.parametrize(
"value, currency",
[
(Decimal("1.1200"), "USD"),
(Decimal("1.12"), "USD"),
(Decimal("1"), "USD"),
(Decimal("1"), "ISK"),
(Decimal("1.00"), "ISK"),
(Decimal("5.12"), None),
(Decimal("1000"), "USD"),
],
)
def test_validate_price_precision(value, currency):
# when
result = validate_price_precision(value, currency)
# then
assert result is None
@pytest.mark.parametrize(
"value, currency",
[
(Decimal("1.1212"), "USD"),
(Decimal("1.128"), "USD"),
(Decimal("1.1"), "ISK"),
(Decimal("1.11"), "ISK"),
(Decimal("5.123"), None),
],
)
def test_validate_price_precision_raise_error(value, currency):
with pytest.raises(ValidationError):
validate_price_precision(value, currency)
def test_validate_end_is_after_start_raise_error():
start_date = timezone.now() + timedelta(days=365)
end_date = timezone.now() - timedelta(days=365)
with pytest.raises(ValidationError) as error:
validate_end_is_after_start(start_date, end_date)
assert error.value.message == "End date cannot be before the start date."
def test_validate_one_of_args_is_in_query():
assert validate_one_of_args_is_in_query("arg1", "present", "arg2", None) is None
def test_validate_one_of_args_is_in_query_false_args():
with pytest.raises(GraphQLError) as error:
validate_one_of_args_is_in_query("arg1", None, "arg2", "")
assert (
error.value.message == "At least one of arguments is required: 'arg1', 'arg2'."
)
def test_validate_one_of_args_is_in_query_more_than_one_true():
with pytest.raises(GraphQLError) as error:
validate_one_of_args_is_in_query(
"arg1", "present", "arg2", "present", "arg3", "present"
)
assert (
error.value.message == "Argument 'arg1' cannot be combined with 'arg2', 'arg3'"
)
def test_validate_one_of_args_is_in_query_single_arg():
assert validate_one_of_args_is_in_query("arg1", "present") is None
def test_validate_one_of_args_is_in_query_single_arg_absent():
with pytest.raises(GraphQLError) as error:
validate_one_of_args_is_in_query("arg1", None) is None
assert error.value.message == "At least one of arguments is required: 'arg1'."
def test_clean_seo_fields():
title = "lady title"
description = "fantasy description"
data = {"seo": {"title": title, "description": description}}
clean_seo_fields(data)
assert data["seo_title"] == title
assert data["seo_description"] == description
def test_clean_seo_fields_accepts_null():
data = {"seo": None}
clean_seo_fields(data)
assert not data
@pytest.mark.parametrize(
"cleaned_input",
[
{"slug": None, "name": "test"},
{"slug": "", "name": "test"},
{"slug": ""},
{"slug": None},
],
)
def test_validate_slug_and_generate_if_needed_raises_errors(category, cleaned_input):
with pytest.raises(ValidationError):
validate_slug_and_generate_if_needed(category, "name", cleaned_input)
@pytest.mark.parametrize(
"cleaned_input", [{"slug": "test-slug"}, {"slug": "test-slug", "name": "test"}]
)
def test_validate_slug_and_generate_if_needed_not_raises_errors(
category, cleaned_input
):
validate_slug_and_generate_if_needed(category, "name", cleaned_input)
@pytest.mark.parametrize(
"cleaned_input",
[
{"slug": None, "name": "test"},
{"slug": "", "name": "test"},
],
)
def METHOD_NAME(cleaned_input):
# given
category = Category(name="test")
previous_slug_value = cleaned_input["slug"]
# when
validate_slug_and_generate_if_needed(category, "name", cleaned_input)
# then
assert previous_slug_value != cleaned_input["slug"]
assert cleaned_input["slug"] == cleaned_input["name"]
@pytest.mark.parametrize(
"cleaned_input",
[
{"slug": ""},
{"slug": None},
{"slug": "test-slug"},
{"slug": "test-slug", "name": "test"},
],
)
def test_validate_slug_and_generate_if_needed_slug_not_changed(cleaned_input):
# given
category = Category(name="test")
previous_slug_value = cleaned_input["slug"]
# when
validate_slug_and_generate_if_needed(category, "name", cleaned_input)
# then
assert cleaned_input["slug"] == previous_slug_value |
2,019 | get | # Copyright (c) Meta Platforms, Inc. and affiliates.
from .request import Broker
from .vocabulary import ThreatExchange as t
from .vocabulary import ThreatExchangeMember as tem
from .errors import pytxAttributeError
class ThreatExchangeMember(object):
_URL = t.URL + t.VERSION + t.THREAT_EXCHANGE_MEMBERS
_internal = [
"_access_token",
]
_fields = [
tem.ID,
tem.NAME,
tem.EMAIL,
]
_default_fields = [
tem.ID,
tem.NAME,
tem.EMAIL,
]
_unique = []
def __init__(self, **kwargs):
"""
Initialize the object. Set the _access_token and any attributes that
were provided.
"""
for name, value in kwargs.items():
self.__setattr__(name, value)
def __getattr__(self, attr):
"""
Get an attribute. If the attribute does not exist, return None
"""
if attr not in self._fields and attr not in self._internal:
raise pytxAttributeError("%s is not a valid attribute" % attr)
try:
return object.__getattribute__(self, attr)
except:
return None
def METHOD_NAME(self, attr):
"""
Wrapper around __getattr__ making it easier to use the vocabulary to get
class attributes.
:param attr: The name of the attribute to get.
:type attr: str
"""
return self.__getattr__(attr)
@classmethod
def _get_generator(
cls,
url,
to_dict=False,
params=None,
retries=None,
headers=None,
proxies=None,
verify=None,
):
"""
Send the GET request and return a generator.
:param url: The URL to send the GET request to.
:type url: str
:param to_dict: Return a dictionary instead of an instantiated class.
:type to_dict: bool
:param params: The GET parameters to send in the request.
:type params: dict
:param retries: Number of retries to fetch a page before stopping.
:type retries: int
:param headers: header info for requests.
:type headers: dict
:param proxies: proxy info for requests.
:type proxies: dict
:param verify: verify info for requests.
:type verify: bool, str
:returns: Generator, dict (using json.loads())
"""
if not params:
params = dict()
members = Broker.METHOD_NAME(
url,
params=params,
retries=retries,
headers=headers,
proxies=proxies,
verify=verify,
).METHOD_NAME(t.DATA, [])
total = len(members)
if total == t.MIN_TOTAL:
yield None
else:
for member in members:
if to_dict:
yield member
else:
yield Broker.get_new(cls, member)
@classmethod
def objects(
cls,
full_response=False,
dict_generator=False,
retries=None,
headers=None,
proxies=None,
verify=None,
):
"""
Get a list of Threat Exchange Members
:param full_response: Return the full response instead of the generator.
Takes precedence over dict_generator.
:type full_response: bool
:param dict_generator: Return a dictionary instead of an instantiated
object.
:type dict_generator: bool
:param retries: Number of retries to fetch a page before stopping.
:type retries: int
:param headers: header info for requests.
:type headers: dict
:param proxies: proxy info for requests.
:type proxies: dict
:param verify: verify info for requests.
:type verify: bool, str
:returns: Generator, dict (using json.loads())
"""
if full_response:
return Broker.METHOD_NAME(
cls._URL,
retries=retries,
headers=headers,
proxies=proxies,
verify=verify,
)
else:
return cls._get_generator(
cls._URL,
to_dict=dict_generator,
retries=retries,
headers=headers,
proxies=proxies,
verify=verify,
)
def to_dict(self):
"""
Convert this object into a dictionary.
:returns: dict
"""
d = dict((n, getattr(self, n, None)) for n in self._fields)
return d |
2,020 | combine frontiers | import itertools
import random
import sys
import time
import unittest
import backend as F
import dgl
import networkx as nx
import numpy as np
import scipy.sparse as sp
from utils import parametrize_idtype
np.random.seed(42)
def toset(x):
# F.zerocopy_to_numpy may return a int
return set(F.zerocopy_to_numpy(x).tolist())
@parametrize_idtype
def test_bfs(idtype, n=100):
def _bfs_nx(g_nx, src):
edges = nx.bfs_edges(g_nx, src)
layers_nx = [set([src])]
edges_nx = []
frontier = set()
edge_frontier = set()
for u, v in edges:
if u in layers_nx[-1]:
frontier.add(v)
edge_frontier.add(g.edge_ids(int(u), int(v)))
else:
layers_nx.append(frontier)
edges_nx.append(edge_frontier)
frontier = set([v])
edge_frontier = set([g.edge_ids(u, v)])
# avoids empty successors
if len(frontier) > 0 and len(edge_frontier) > 0:
layers_nx.append(frontier)
edges_nx.append(edge_frontier)
return layers_nx, edges_nx
a = sp.random(n, n, 3 / n, data_rvs=lambda n: np.ones(n))
g = dgl.from_scipy(a).astype(idtype)
g_nx = g.to_networkx()
src = random.choice(range(n))
layers_nx, _ = _bfs_nx(g_nx, src)
layers_dgl = dgl.bfs_nodes_generator(g, src)
assert len(layers_dgl) == len(layers_nx)
assert all(toset(x) == y for x, y in zip(layers_dgl, layers_nx))
g_nx = nx.random_tree(n, seed=42)
g = dgl.from_networkx(g_nx).astype(idtype)
src = 0
_, edges_nx = _bfs_nx(g_nx, src)
edges_dgl = dgl.bfs_edges_generator(g, src)
assert len(edges_dgl) == len(edges_nx)
assert all(toset(x) == y for x, y in zip(edges_dgl, edges_nx))
@parametrize_idtype
def test_topological_nodes(idtype, n=100):
a = sp.random(n, n, 3 / n, data_rvs=lambda n: np.ones(n))
b = sp.tril(a, -1).tocoo()
g = dgl.from_scipy(b).astype(idtype)
layers_dgl = dgl.topological_nodes_generator(g)
adjmat = g.adj_external(transpose=True)
def tensor_topo_traverse():
n = g.num_nodes()
mask = F.copy_to(F.ones((n, 1)), F.cpu())
degree = F.spmm(adjmat, mask)
while F.reduce_sum(mask) != 0.0:
v = F.astype((degree == 0.0), F.float32)
v = v * mask
mask = mask - v
frontier = F.copy_to(F.nonzero_1d(F.squeeze(v, 1)), F.cpu())
yield frontier
degree -= F.spmm(adjmat, v)
layers_spmv = list(tensor_topo_traverse())
assert len(layers_dgl) == len(layers_spmv)
assert all(toset(x) == toset(y) for x, y in zip(layers_dgl, layers_spmv))
DFS_LABEL_NAMES = ["forward", "reverse", "nontree"]
@parametrize_idtype
def test_dfs_labeled_edges(idtype, example=False):
dgl_g = dgl.graph([]).astype(idtype)
dgl_g.add_nodes(6)
dgl_g.add_edges([0, 1, 0, 3, 3], [1, 2, 2, 4, 5])
dgl_edges, dgl_labels = dgl.dfs_labeled_edges_generator(
dgl_g, [0, 3], has_reverse_edge=True, has_nontree_edge=True
)
dgl_edges = [toset(t) for t in dgl_edges]
dgl_labels = [toset(t) for t in dgl_labels]
g1_solutions = [
# edges labels
[[0, 1, 1, 0, 2], [0, 0, 1, 1, 2]],
[[2, 2, 0, 1, 0], [0, 1, 0, 2, 1]],
]
g2_solutions = [
# edges labels
[[3, 3, 4, 4], [0, 1, 0, 1]],
[[4, 4, 3, 3], [0, 1, 0, 1]],
]
def METHOD_NAME(sol):
es, ls = zip(*sol)
es = [
set(i for i in t if i is not None)
for t in itertools.zip_longest(*es)
]
ls = [
set(i for i in t if i is not None)
for t in itertools.zip_longest(*ls)
]
return es, ls
for sol_set in itertools.product(g1_solutions, g2_solutions):
es, ls = METHOD_NAME(sol_set)
if es == dgl_edges and ls == dgl_labels:
break
else:
assert False
if __name__ == "__main__":
test_bfs(idtype="int32")
test_topological_nodes(idtype="int32")
test_dfs_labeled_edges(idtype="int32") |
2,021 | test edit post belongs to thread and | from guardian.shortcuts import assign_perm
from kitsune.forums.tests import ForumFactory, ForumTestCase, PostFactory, ThreadFactory
from kitsune.sumo.tests import get, post
from kitsune.users.tests import GroupFactory, UserFactory
class BelongsTestCase(ForumTestCase):
"""
Mixing and matching thread, forum, and post data in URLs should fail.
"""
def test_posts_thread_belongs_to_forum(self):
"""Posts view - redirect if thread does not belong to forum."""
f = ForumFactory()
t = ThreadFactory() # Thread belongs to a different forum
r = get(self.client, "forums.posts", args=[f.slug, t.id])
self.assertEqual(200, r.status_code)
u = r.redirect_chain[0][0]
assert u.endswith(t.get_absolute_url())
def test_reply_thread_belongs_to_forum(self):
"""Reply action - thread belongs to forum."""
f = ForumFactory()
t = ThreadFactory() # Thread belongs to a different forum
u = UserFactory()
self.client.login(username=u.username, password="testpass")
r = post(self.client, "forums.reply", {}, args=[f.slug, t.id])
self.assertEqual(404, r.status_code)
def test_locked_thread_belongs_to_forum(self):
"""Lock action - thread belongs to forum."""
f = ForumFactory()
t = ThreadFactory() # Thread belongs to a different forum
u = UserFactory()
# Give the user the permission to lock threads.
g = GroupFactory()
g.user_set.add(u)
assign_perm("forums.lock_forum_thread", g, f)
assign_perm("forums.lock_forum_thread", g, t.forum)
self.client.login(username=u.username, password="testpass")
r = post(self.client, "forums.lock_thread", {}, args=[f.slug, t.id])
self.assertEqual(404, r.status_code)
def test_sticky_thread_belongs_to_forum(self):
"""Sticky action - thread belongs to forum."""
f = ForumFactory()
t = ThreadFactory() # Thread belongs to a different forum
u = UserFactory()
# Give the user the permission to sticky threads.
g = GroupFactory()
g.user_set.add(u)
assign_perm("forums.sticky_forum_thread", g, f)
assign_perm("forums.sticky_forum_thread", g, t.forum)
self.client.login(username=u.username, password="testpass")
r = post(self.client, "forums.sticky_thread", {}, args=[f.slug, t.id])
self.assertEqual(404, r.status_code)
def test_edit_thread_belongs_to_forum(self):
"""Edit thread action - thread belongs to forum."""
f = ForumFactory()
t = ThreadFactory() # Thread belongs to a different forum
u = t.creator
self.client.login(username=u.username, password="testpass")
r = get(self.client, "forums.edit_thread", args=[f.slug, t.id])
self.assertEqual(404, r.status_code)
def test_delete_thread_belongs_to_forum(self):
"""Delete thread action - thread belongs to forum."""
f = ForumFactory()
t = ThreadFactory() # Thread belongs to a different forum
u = UserFactory()
# Give the user the permission to delete threads.
g = GroupFactory()
g.user_set.add(u)
assign_perm("forums.delete_forum_thread", g, f)
assign_perm("forums.delete_forum_thread", g, t.forum)
self.client.login(username=u.username, password="testpass")
r = get(self.client, "forums.delete_thread", args=[f.slug, t.id])
self.assertEqual(404, r.status_code)
def METHOD_NAME(self):
# Edit post action - post belongs to thread and thread belongs
# to forum.
f = ForumFactory()
t = ThreadFactory(forum=f)
# Post belongs to a different forum and thread.
p = PostFactory()
u = p.author
self.client.login(username=u.username, password="testpass")
# Post isn't in the passed forum:
r = get(self.client, "forums.edit_post", args=[f.slug, p.thread.id, p.id])
self.assertEqual(404, r.status_code)
# Post isn't in the passed thread:
r = get(self.client, "forums.edit_post", args=[p.thread.forum.slug, t.id, p.id])
self.assertEqual(404, r.status_code)
def test_delete_post_belongs_to_thread_and_forum(self):
# Delete post action - post belongs to thread and thread
# belongs to forum.
f = ForumFactory()
t = ThreadFactory(forum=f)
# Post belongs to a different forum and thread.
p = PostFactory()
u = p.author
# Give the user the permission to delete posts.
g = GroupFactory()
g.user_set.add(u)
assign_perm("forums.delete_forum_thread_post", g, f)
assign_perm("forums.delete_forum_thread_post", g, p.thread.forum)
self.client.login(username=u.username, password="testpass")
# Post isn't in the passed forum:
r = get(self.client, "forums.delete_post", args=[f.slug, p.thread.id, p.id])
self.assertEqual(404, r.status_code)
# Post isn't in the passed thread:
r = get(self.client, "forums.delete_post", args=[p.thread.forum.slug, t.id, p.id])
self.assertEqual(404, r.status_code) |
2,022 | test stock rule buy payment mode | # Copyright 2013-2015 Tecnativa - Pedro M. Baeza
# Copyright 2017 Tecnativa - Vicent Cubells
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from odoo import fields
from odoo.tests import Form
from odoo.addons.account_payment_purchase.tests.test_account_payment_purchase import (
TestAccountPaymentPurchase,
)
class TestAccountPaymentPurchaseStock(TestAccountPaymentPurchase):
def test_purchase_stock_order_invoicing(self):
self.purchase.onchange_partner_id()
self.purchase.button_confirm()
picking = self.purchase.picking_ids[0]
picking.action_confirm()
picking.move_lines.write({"quantity_done": 1.0})
picking.button_validate()
invoice = self.env["account.move"].create(
{"partner_id": self.partner.id, "move_type": "in_invoice"}
)
with Form(invoice) as inv:
inv.purchase_id = self.purchase
self.assertEqual(
self.purchase.invoice_ids[0].payment_mode_id, self.payment_mode
)
def test_picking_from_purchase_order_invoicing(self):
# Test payment mode
stockable_product = self.env["product.product"].create(
{"name": "Test stockable product", "type": "product"}
)
self.purchase.order_line[0].product_id = stockable_product
self.purchase.button_confirm()
picking = self.purchase.picking_ids[0]
picking.action_confirm()
picking.move_lines.write({"quantity_done": 1.0})
picking.button_validate()
invoice = self.env["account.move"].create(
{"partner_id": self.partner.id, "move_type": "in_invoice"}
)
invoice.purchase_id = self.purchase
invoice._onchange_purchase_auto_complete()
self.assertEqual(invoice.payment_mode_id, self.payment_mode)
purchase2 = self.purchase.copy()
payment_mode2 = self.payment_mode.copy()
purchase2.payment_mode_id = payment_mode2
purchase2.button_confirm()
picking = purchase2.picking_ids[0]
picking.action_confirm()
picking.move_lines.write({"quantity_done": 1.0})
picking.button_validate()
invoice.purchase_id = purchase2
result = invoice._onchange_purchase_auto_complete()
self.assertEqual(
result and result.get("warning", {}).get("title", False), "Warning"
)
def test_picking_from_purchase_order_invoicing_bank(self):
# Test partner_bank
stockable_product = self.env["product.product"].create(
{"name": "Test stockable product", "type": "product"}
)
self.purchase.order_line[0].product_id = stockable_product
self.purchase.supplier_partner_bank_id = self.bank
self.purchase.button_confirm()
picking = self.purchase.picking_ids[0]
picking.action_confirm()
picking.move_lines.write({"quantity_done": 1.0})
picking.button_validate()
invoice = self.env["account.move"].create(
{"partner_id": self.partner.id, "move_type": "in_invoice"}
)
invoice.purchase_id = self.purchase
invoice._onchange_purchase_auto_complete()
self.assertEqual(invoice.partner_bank_id, self.bank)
purchase2 = self.purchase.copy()
purchase2.supplier_partner_bank_id = self.bank2
purchase2.button_confirm()
picking = purchase2.picking_ids[0]
picking.action_confirm()
picking.move_lines.write({"quantity_done": 1.0})
picking.button_validate()
invoice.purchase_id = purchase2
result = invoice._onchange_purchase_auto_complete()
self.assertEqual(
result and result.get("warning", {}).get("title", False), "Warning"
)
def METHOD_NAME(self):
route = self.env.ref("purchase_stock.route_warehouse0_buy")
rule = self.env["stock.rule"].search([("route_id", "=", route.id)], limit=1)
rule._run_buy(
procurements=[
(
self.env["procurement.group"].Procurement(
self.mto_product,
1,
self.mto_product.uom_id,
self.env["stock.location"].search([], limit=1),
"Procurement order test",
"Test",
rule.company_id,
{
"company_id": rule.company_id,
"date_planned": fields.Datetime.now(),
},
),
rule,
)
]
)
purchase = self.env["purchase.order"].search([("origin", "=", "Test")])
self.assertEqual(purchase.payment_mode_id, self.payment_mode) |
2,023 | init vars | # Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Regret Matching Approximate Nash Solver."""
from absl import logging # pylint:disable=unused-import
import numpy as np
class Solver(object):
"""Regret-matching Solver."""
def __init__(self, optimism=True, discount=False, rnd_init=False, seed=None,
**kwargs):
"""Ctor."""
del kwargs
self.num_players = None
self.lrs = None
self.optimism = optimism
self.discount = discount
self.rnd_init = rnd_init
self.has_aux = True
self.aux_errors = []
self.seed = seed
self.random = np.random.RandomState(seed)
def METHOD_NAME(self, num_strats, num_players):
"""Initialize solver parameters."""
self.num_players = num_players
if self.rnd_init:
init_dist = self.random.rand(num_strats)
else:
init_dist = np.ones(num_strats)
init_dist /= init_dist.sum()
init_regret = np.zeros(num_strats)
return (init_dist, init_regret)
def record_aux_errors(self, grads):
"""Record errors for the auxiliary variables."""
grad_regret = grads[1]
self.aux_errors.append([np.linalg.norm(grad_regret)])
def compute_gradients(self, params, payoff_matrices):
"""Compute and return gradients (and exploitabilities) for all parameters.
Args:
params: tuple of params (dist, regret), see regmatch.gradients
payoff_matrices: (>=2 x A x A) np.array, payoffs for each joint action
Returns:
tuple of gradients (grad_dist, grad_regret), see ate.gradients
unregularized exploitability (stochastic estimate)
solver exploitability (stochastic estimate) - NaN
"""
return gradients(*params, payoff_matrices)
def exploitability(self, params, payoff_matrices):
"""Regret matching does not minimize any exploitability so return NaN.
Args:
params: tuple of params (dist,)
payoff_matrices: (>=2 x A x A) np.array, payoffs for each joint action
Returns:
np.NaN
"""
del params
del payoff_matrices
return np.NaN
def update(self, params, grads, t):
"""Update cumulative regret and strategy (dist).
Args:
params: tuple of variables to be updated (dist, regret)
grads: tuple of variable gradients (grad_dist, grad_regret)
t: int, solver iteration (not used)
Returns:
new_params: tuple of update params (new_dist, new_regret)
"""
dist, regret = params
regret_delta = grads[1]
if self.discount:
gamma = t / float(t + 1)
else:
gamma = 1
new_regret = gamma * regret + regret_delta
new_clipped_regrets = np.clip(new_regret + self.optimism * regret_delta,
0.,
np.inf)
if np.sum(new_clipped_regrets) > 0:
new_dist = new_clipped_regrets / new_clipped_regrets.sum()
else:
new_dist = np.ones_like(dist) / dist.size
new_params = (new_dist, new_regret)
return new_params
def gradients(dist, regret, payoff_matrices):
"""Computes regret delta to be added to regret in update.
Args:
dist: 1-d np.array, current estimate of nash distribution
regret: 1-d np.array (same shape as dist), current estimate of regrets
payoff_matrices: (>=2 x A x A) np.array, payoffs for each joint action
Returns:
deltas w.r.t. (dist, regret) as tuple
unregularized exploitability (stochastic estimate)
solver exploitability (stochastic estimate) - NaN
"""
del regret
nabla = payoff_matrices[0].dot(dist)
utility = nabla.dot(dist)
grad_dist = np.NaN * np.ones_like(dist)
grad_regret = nabla - utility
unreg_exp = np.max(nabla) - nabla.dot(dist)
return (grad_dist, grad_regret), unreg_exp, np.NaN |
2,024 | callback wazuhdb response | # Copyright (C) 2015-2022, Wazuh Inc.
# Created by Wazuh, Inc. <info@wazuh.com>.
# This program is free software; you can redistribute it and/or modify it under the terms of GPLv2
import functools
import hashlib
import json
import logging
import socket
import sqlite3
import time
from wazuh_testing.tools import GLOBAL_DB_PATH, WAZUH_DB_SOCKET_PATH
from wazuh_testing.tools.monitoring import wazuh_pack, wazuh_unpack
from wazuh_testing.tools.services import control_service
def METHOD_NAME(item):
if isinstance(item, tuple):
data, response = item
return response.decode()
def mock_db(func):
"""Decorator used in any function that needs to mock a wazuh db
This function will execute `func` after stopping wazuh-modulesd and wazuh-db. After that,
it will start the daemons again
Args:
func (callable): function that will mock the cve.db
Example:
@vd.mock__db
def mock_agent_status(request, agent_id, agent_status):
"""
@functools.wraps(func)
def magic(*args, **kwargs):
control_service('stop', daemon='wazuh-modulesd')
func(*args, **kwargs)
control_service('start', daemon='wazuh-modulesd')
return magic
def mock_agent(
agent_id, name="centos8-agent", ip="127.0.0.1", register_ip="127.0.0.1", internal_key="",
os_name="CentOS Linux", os_version="7.1", os_major="7", os_minor="1", os_codename="centos-8",
os_build="4.18.0-147.8.1.el8_1.x86_64", os_platform="#1 SMP Thu Apr 9 13:49:54 UTC 2020",
os_uname="x86_64", os_arch="x86_64", version="4.2", config_sum="", merged_sum="",
manager_host="centos-8", node_name="node01", date_add="1612942494",
last_keepalive="253402300799", group="", sync_status="synced", connection_status="active",
client_key_secret=None):
create_agent_query = f'''global sql INSERT OR REPLACE INTO AGENT
(id, name, ip, register_ip, internal_key, os_name, os_version, os_major, os_minor,
os_codename, os_build, os_platform, os_uname, os_arch, version, config_sum, merged_sum,
manager_host, node_name, date_add, last_keepalive, "group", sync_status, connection_status)
VALUES
( {agent_id}, "{name}", "{ip}", "{register_ip}", "{internal_key}", "{os_name}", "{os_version}",
"{os_major}", "{os_minor}", "{os_codename}", "{os_build}", "{os_platform}", "{os_uname}",
"{os_arch}", "{version}", "{config_sum}", "{merged_sum}", "{manager_host}", "{node_name}",
"{date_add}", "{last_keepalive}", "{group}", "{sync_status}", "{connection_status}")
'''
try:
query_wdb(create_agent_query)
except sqlite3.IntegrityError:
logging.error("Failed to mock agent in database!")
def load_db(db_path):
"""Load a database in db_path
Args:
db_path (str): path to the database
"""
conn = sqlite3.connect(db_path)
cursor = conn.cursor()
return conn, cursor
@mock_db
def run_query(db_query, db_path=GLOBAL_DB_PATH):
"""Method used to run sqlite queries on wazuh databases
This function will execute the sqlite3 query `db_query` in `db_path` database.
Args:
db_query (string): sqlite3 valid query
db_path (string): path to the database where the query will be run
"""
conn, _ = load_db(db_path)
try:
with conn:
conn.execute(db_query)
finally:
conn.close()
def get_query_result(query, db_path=GLOBAL_DB_PATH):
"""Return the result of a query in a specified DB
Args:
db_path (str): path to the database
query (str): SQL query. (SELECT * ..)
Returns:
result (List[list]): each row is the query result row and each column is the query field value
"""
global cursor, db
try:
db, cursor = load_db(db_path)
cursor.execute(query)
records = cursor.fetchall()
result = []
for row in records:
result.append(', '.join([f'{item}' for item in row]))
return result
finally:
cursor.close()
db.close()
def query_wdb(command):
"""Make queries to wazuh-db using the wdb socket.
Args:
command (str): wazuh-db command alias. For example `global get-agent-info 000`.
Returns:
list: Query response data
"""
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(WAZUH_DB_SOCKET_PATH)
data = []
try:
sock.send(wazuh_pack(len(command)) + command.encode())
rcv = sock.recv(4)
if len(rcv) == 4:
data_len = wazuh_unpack(rcv)
data = sock.recv(data_len).decode()
# Remove response header and cast str to list of dictionaries
# From --> 'ok [ {data1}, {data2}...]' To--> [ {data1}, data2}...]
if len(data.split()) > 1 and data.split()[0] == 'ok':
data = json.loads(' '.join(data.split(' ')[1:]))
finally:
sock.close()
return data
def clean_agents_from_db():
"""
Clean agents from DB
"""
command = 'global sql DELETE FROM agent WHERE id != 0'
try:
query_wdb(command)
except Exception:
raise Exception('Unable to clean agents')
def clean_groups_from_db():
"""
Clean groups table from global.db
"""
command = 'global sql DELETE FROM "group"'
try:
query_wdb(command)
except Exception:
raise Exception('Unable to clean groups table.')
def clean_belongs():
"""
Clean belong table from global.db
"""
command = 'global sql DELETE FROM belongs'
try:
query_wdb(command)
except Exception:
raise Exception('Unable to clean belongs table.')
def insert_agent_in_db(id=1, name='TestAgent', ip='any', registration_time=0, connection_status=0,
disconnection_time=0):
"""
Write agent in global.db
"""
insert_command = f'global insert-agent {{"id":{id},"name":"{name}","ip":"{ip}","date_add":{registration_time}}}'
update_command = f'global sql UPDATE agent SET connection_status = "{connection_status}",\
disconnection_time = "{disconnection_time}" WHERE id = {id};'
try:
query_wdb(insert_command)
query_wdb(update_command)
except Exception:
raise Exception(f"Unable to add agent {id}")
# Insert agents into DB and assign them into a group
def insert_agent_into_group(total_agents):
for i in range(total_agents):
id = i + 1
name = 'Agent-test' + str(id)
date = time.time()
command = f'global insert-agent {{"id":{id},"name":"{name}","date_add":{date}}}'
results = query_wdb(command)
assert results == 'ok'
command = f'''global set-agent-groups {{"mode":"append","sync_status":"syncreq",
"source":"remote","data":[{{"id":{id},"groups":["Test_group{id}"]}}]}}'''
results = query_wdb(command)
assert results == 'ok'
def remove_agent(agent_id):
"""Function that wraps the needed queries to remove an agent.
Args:
agent_id(int): Unique identifier of an agent
"""
data = query_wdb(f"global delete-agent {agent_id}").split()
assert data[0] == 'ok', f"Unable to remove agent {agent_id} - {data[1]}"
def calculate_global_hash():
"""Function that calculates and retrieves the actual global groups hash.
Returns:
str: Actual global groups hash.
"""
GET_GROUP_HASH = '''global sql SELECT group_hash FROM agent WHERE
id > 0 AND group_hash IS NOT NULL ORDER BY id'''
result = query_wdb(GET_GROUP_HASH)
group_hashes = [item['group_hash'] for item in result]
return hashlib.sha1("".join(group_hashes).encode()).hexdigest() |
2,025 | autogenerate | #!/usr/bin/env python3
"""
manager.py - Script which acts as the user interface for schema management.
"""
import argparse
import json
import os
from schema_parser import LDAPSchemaParser
from generator import SchemaGenerator
localdir = os.path.dirname(os.path.abspath(__file__))
def generate(infile, schema_type=None, out_file=None):
"""Function generates the LDAP schema definitions from the JSON data
Args:
schema_type (str): The schema type to be generated (opendj)
"""
fp = open(infile, 'r')
json_text = fp.read()
fp.close()
gen = SchemaGenerator(json_text)
if schema_type == 'opendj':
schema_str = gen.generate_ldif()
else:
schema_str = gen.generate_schema()
if out_file:
with open(out_file, 'w') as w:
w.write(schema_str)
else:
print(schema_str)
def METHOD_NAME():
"""Function that generates the LDAP schemas for OpenDJ from the
gluu_schema.json and custom_schema.json and puts them in their respective
folders.
"""
opendj_folder = os.path.join(os.path.dirname(localdir), 'static/opendj/')
fp = open(os.path.join(localdir, 'gluu_schema.json'), 'r')
gluu_json = fp.read()
fp.close()
gen = SchemaGenerator(gluu_json)
with open(os.path.join(opendj_folder, '101-ox.ldif'), 'w') as f:
f.write(gen.generate_ldif())
fp = open(os.path.join(localdir, 'custom_schema.json'), 'r')
custom_json = fp.read()
fp.close()
gen = SchemaGenerator(custom_json)
with open(os.path.join(opendj_folder, '77-customAttributes.ldif'), 'w') \
as f:
f.write(gen.generate_ldif())
def run_tests():
"""Function that runs the unit tests of the scripts in this package.
"""
# TODO
pass
def make_json(filename):
"""Function that parses the input schema file and generates JSON.
"""
parser = LDAPSchemaParser(filename)
definitions = parser.parse()
schema_dict = {}
objectclasses = []
attributetypes = []
for obj in definitions['objectClasses']:
obcl = {}
props = ['oid', 'names', 'desc', 'must', 'may', 'sup', 'x_origin']
for prop in props:
if hasattr(obj, prop):
if getattr(obj, prop):
obcl[prop] = getattr(obj, prop)
# obcl['obsolete'] = obj.obsolete
if obj.kind == 0:
obcl['kind'] = 'STRUCTURAL'
elif obj.kind == 1:
obcl['kind'] = 'ABSTRACT'
elif obj.kind == 2:
obcl['kind'] = 'AUXILIARY'
objectclasses.append(obcl)
for att in definitions['attributeTypes']:
attype = {}
props = ['oid', 'names', 'desc', 'equality', 'substr', 'ordering',
'syntax', 'x_origin']
for prop in props:
if hasattr(att, prop):
if getattr(att, prop):
attype[prop] = getattr(att, prop)
# attype['no_user_mod'] = att.no_user_mod
# attype['single_value'] = att.single_value
# attype['obsolete'] = att.obsolete
attributetypes.append(attype)
schema_dict['objectClasses'] = objectclasses
schema_dict['attributeTypes'] = attributetypes
schema_dict['oidMacros'] = definitions['oidMacros']
print(json.dumps(schema_dict, indent=4, sort_keys=True))
def make_schema_docs():
schema = os.path.join(localdir, 'gluu_schema.json')
f = open(schema)
json_string = f.read()
f.close()
data = json.loads(json_string)
objClasses = data['objectClasses']
attTypes = data['attributeTypes']
docs = ''
for obj_class in objClasses:
docs += "\n\n## {}".format(" (or) ".join(obj_class['names']))
if 'desc' in obj_class:
docs += "\n_{}_".format(obj_class['desc'].encode('utf-8'))
for obj_attr in obj_class['may']:
attr_docs_added = False
for attr_type in attTypes:
if obj_attr in attr_type['names']:
docs += "\n* __{}__".format(" (or) ".join(attr_type['names']))
if 'desc' in attr_type:
docs += ": {}".format(attr_type['desc'].encode('utf-8'))
attr_docs_added = True
break
if not attr_docs_added:
docs += "\n* __{}__".format(obj_attr)
print(docs)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
"action", help="the action you want to perform.",
choices=["autogenerate", "generate", "makejson", "makedocs", "test"])
parser.add_argument(
"--type", help="the schema type you want to generate",
choices=["opendj"])
parser.add_argument(
"--filename", help="the input file for various actions")
args = parser.parse_args()
if args.action == 'generate':
if args.filename:
generate(args.filename, args.type)
else:
print("No JSON Input. Specify a JSON file with --filename")
elif args.action == 'test':
run_tests()
elif args.action == 'makejson':
if args.filename:
make_json(args.filename)
else:
print("No Schema Input. Specify schema file with --filename")
elif args.action == 'autogenerate':
METHOD_NAME()
elif args.action == 'makedocs':
make_schema_docs() |
2,026 | concat | # Copyright (c) 2023 zfit
import numpy as np
import tensorflow as tf
import zfit.z.numpy as znp
SWITCH_ON = True
def is_tensor(x):
return tf.is_tensor(x)
def has_tensor(x):
return any(tf.is_tensor(t) for t in tf.nest.flatten(x))
def allclose_anyaware(x, y, rtol=1e-5, atol=1e-8):
"""Tests if x and y are close by first testing equality (with numpy), then within the limits.
The prepended equality test allow for ANY objects to compare positively if the x and y have the shape (1, n)
with n arbitrary
Args:
x:
y:
rtol:
atol:
Returns:
"""
if not SWITCH_ON or has_tensor([x, y]):
return znp.all(znp.less_equal(znp.abs(x - y), znp.abs(y) * rtol + atol))
else:
x = np.array(x)
y = np.array(y)
if any(ar.dtype == object for ar in (x, y)):
from zfit.core.space import LimitRangeDefinition
equal = []
for x1, y1 in zip(x[0], y[0]):
if isinstance(x1, LimitRangeDefinition) or isinstance(
y1, LimitRangeDefinition
):
equal.append(x1 < y1 or x1 > y1)
else:
equal.append(np.allclose(x1, y1, rtol=rtol, atol=atol))
allclose = np.array(equal)[None, :]
else:
allclose = np.allclose(x, y, rtol=rtol, atol=atol)
return allclose
def broadcast_to(input, shape):
if not SWITCH_ON or is_tensor(input):
return tf.broadcast_to(input, shape)
else:
return np.broadcast_to(input, shape)
def expand_dims(input, axis):
if not SWITCH_ON or has_tensor(input):
return znp.expand_dims(input, axis)
else:
return np.expand_dims(input, axis)
def reduce_prod(input_tensor, axis=None, keepdims=None):
if not SWITCH_ON or has_tensor(input_tensor):
return znp.prod(input_tensor, axis, keepdims=keepdims)
else:
if keepdims is None:
return np.prod(input_tensor, axis)
else:
return np.prod(input_tensor, axis, keepdims=keepdims)
def equal(x, y):
if not SWITCH_ON or is_tensor(x) or is_tensor(y):
return znp.equal(x, y)
else:
return np.equal(x, y)
def reduce_all(input_tensor, axis=None):
if not SWITCH_ON or has_tensor(input_tensor):
if axis is None:
input_tensor = [
znp.reshape(ar, (-1,)) for ar in tf.nest.flatten(input_tensor)
]
return znp.all(input_tensor, axis)
else:
out = np.all(input_tensor, axis)
if out.shape == (1,):
out = out[0]
return out
def reduce_any(input_tensor, axis=None):
if not SWITCH_ON or has_tensor(input_tensor):
if axis is None:
input_tensor = [
znp.reshape(ar, (-1,)) for ar in tf.nest.flatten(input_tensor)
]
return znp.any(input_tensor, axis)
else:
out = np.any(input_tensor, axis)
if out.shape == (1,):
out = out[0]
return out
def logical_and(x, y):
if not SWITCH_ON or has_tensor(x) or has_tensor(y):
return znp.logical_and(x, y)
else:
return np.logical_and(x, y)
def logical_or(x, y):
if not SWITCH_ON or has_tensor(x) or has_tensor(y):
return znp.logical_or(x, y)
else:
return np.logical_or(x, y)
def less_equal(x, y):
if not SWITCH_ON or has_tensor(x) or has_tensor(y):
return znp.less_equal(x, y)
else:
return np.less_equal(x, y)
def greater_equal(x, y):
if not SWITCH_ON or has_tensor(x) or has_tensor(y):
return znp.greater_equal(x, y)
else:
return np.greater_equal(x, y)
def gather(x, indices=None, axis=None):
if not SWITCH_ON or has_tensor(x):
return tf.gather(x, indices=indices, axis=axis)
else:
return np.take(x, indices=indices, axis=axis)
def METHOD_NAME(values, axis):
if not SWITCH_ON or has_tensor(values):
return znp.concatenate(values, axis=axis)
else:
return np.concatenate(values, axis=axis)
def _try_convert_numpy(tensorlike):
if hasattr(tensorlike, "numpy"):
tensorlike = tensorlike.numpy()
if not isinstance(tensorlike, np.ndarray):
from zfit.util.exception import CannotConvertToNumpyError
raise CannotConvertToNumpyError(
f"Cannot convert {tensorlike} to a Numpy array. This may be because the"
f" object is a Tensor and the function is called in Graph mode (e.g. in"
f"a `z.function` decorated function.\n"
f"If this error appears and is not understandable, it is most likely a bug."
f" Please open an issue on Github."
)
return tensorlike |
2,027 | test rst | # Copyright 2023 Iguazio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ast
from textwrap import dedent
import pytest
import yaml
from mlrun.runtimes import funcdoc
from tests.conftest import tests_root_directory
def load_rst_cases(name):
with open(tests_root_directory / "runtimes" / name) as fp:
data = yaml.load(fp)
for i, case in enumerate(data):
name = case.get("name", "")
tid = f"{i} - {name}"
yield pytest.param(case["text"], case["expected"], id=tid)
@pytest.mark.parametrize("text, expected", load_rst_cases("rst_cases.yml"))
def METHOD_NAME(text, expected):
doc, params, ret = funcdoc.parse_rst(text)
assert expected["doc"].strip() == doc.strip(), "doc"
assert expected["params"] == params, "params"
assert expected["ret"] == ret, "ret"
def is_ast_func(obj):
return isinstance(obj, ast.FunctionDef)
def ast_func(code):
funcs = [s for s in ast.parse(code).body if is_ast_func(s)]
assert len(funcs) == 1, f"{len(funcs)} functions in:\n{code}"
return funcs[0]
def eval_func(code):
out = {}
exec(code, None, out)
funcs = [obj for obj in out.values() if callable(obj)]
assert len(funcs) == 1, f"more than one function in:\n{code}"
return funcs[0]
info_handlers = [
(funcdoc.func_info, eval_func),
(funcdoc.ast_func_info, ast_func),
]
def load_info_cases():
with open(tests_root_directory / "runtimes" / "info_cases.yml") as fp:
cases = yaml.load(fp)
for case in cases:
for info_fn, conv in info_handlers:
obj = conv(case["code"])
tid = f'{case["id"]}-{info_fn.__name__}'
expected = case["expected"].copy()
# No line info in evaled functions
if info_fn is funcdoc.func_info:
expected["lineno"] = -1
yield pytest.param(info_fn, obj, expected, id=tid)
@pytest.mark.parametrize("info_fn, obj, expected", load_info_cases())
def test_func_info(info_fn, obj, expected):
out = info_fn(obj)
assert expected == out
find_handlers_code = """
def dec(n):
return n - 1
# mlrun:handler
def inc(n):
return n + 1
"""
find_handlers_expected = [
{
"name": "inc",
"doc": "",
"return": funcdoc.param_dict(),
"params": [funcdoc.param_dict("n")],
"lineno": 6,
"has_varargs": False,
"has_kwargs": False,
},
]
def test_find_handlers():
funcs = funcdoc.find_handlers(find_handlers_code)
assert funcs == find_handlers_expected
ast_code_cases = [
"{'x': 1, 'y': 2}",
"dict(x=1, y=2)",
"{}",
"[1, 2]",
"[]",
"(1, 2)",
"()",
"{1, 2}",
"set()",
"Point(1, 2)",
"3",
"'hello'",
"None",
]
@pytest.mark.parametrize("expr", ast_code_cases)
def test_ast_code(expr):
node = ast.parse(expr).body[0].value
code = funcdoc.ast_code(node)
assert expr == code
def test_ast_none():
code = """
def fn() -> None:
pass
"""
fn: ast.FunctionDef = ast.parse(dedent(code)).body[0]
funcdoc.ast_func_info(fn)
@pytest.mark.parametrize(
"func_code,expected_has_varargs,expected_has_kwargs",
[
(
"""
def fn(p1,p2,*args,**kwargs) -> None:
pass
""",
True,
True,
),
(
"""
def fn(p1,p2,*args) -> None:
pass
""",
True,
False,
),
(
"""
def fn(p1,p2,**kwargs) -> None:
pass
""",
False,
True,
),
(
"""
def fn(p1,p2) -> None:
pass
""",
False,
False,
),
(
"""
def fn(p1,p2,**something) -> None:
pass
""",
False,
True,
),
],
)
def test_ast_func_info_with_kwargs_and_args(
func_code, expected_has_varargs, expected_has_kwargs
):
fn: ast.FunctionDef = ast.parse(dedent(func_code)).body[0]
func_info = funcdoc.ast_func_info(fn)
assert func_info["has_varargs"] == expected_has_varargs
assert func_info["has_kwargs"] == expected_has_kwargs
def test_ast_compound():
param_types = []
with open(f"{tests_root_directory}/runtimes/arc.txt") as fp:
code = fp.read()
# collect the types of the function parameters
# assumes each param is in a new line for simplicity
for line in code.splitlines()[3:15]:
if ":" not in line:
param_types.append(None)
continue
param_type = line[line.index(":") + 1 :]
if "=" in param_type:
param_type = param_type[: param_type.index("=")]
param_type = param_type[:-1].strip()
param_types.append(param_type)
fn = ast_func(code)
info = funcdoc.ast_func_info(fn)
for i, param in enumerate(info["params"]):
if i in (4, 8):
continue
assert (
param["type"] == param_types[i]
), f"param at index {i} has a bad type value. param: {param}"
underscore_code = """
def info(message):
_log('INFO', message)
def warning(message):
_log('WARNING', message)
def _log(level, message):
print(f'{level} - {message}')
"""
def test_ignore_underscore():
funcs = funcdoc.find_handlers(underscore_code)
names = {fn["name"] for fn in funcs}
assert {"info", "warning"} == names, "names"
def test_annotate_mod():
code = """
import mlrun
def handler(data: mlrun.DataItem):
...
"""
handlers = funcdoc.find_handlers(dedent(code))
param = handlers[0]["params"][0]
assert param["type"] == "DataItem" |
2,028 | pause | """Support for audio output
The `audioio` module contains classes to provide access to audio IO.
All classes change hardware state and should be deinitialized when they
are no longer needed if the program continues after use. To do so, either
call :py:meth:`!deinit` or use a context manager. See
:ref:`lifetime-and-contextmanagers` for more info.
For more information on working with this module, refer to the
`CircuitPython Essentials Learn Guide
<https://learn.adafruit.com/circuitpython-essentials/circuitpython-audio-out>`_.
Since CircuitPython 5, `RawSample` and `WaveFile` are moved
to :mod:`audiocore`, and `Mixer` is moved to :mod:`audiomixer`.
For compatibility with CircuitPython 4.x, some builds allow the items in
`audiocore` to be imported from `audioio`. This will be removed for all
boards in a future build of CircuitPython."""
from __future__ import annotations
from typing import Optional
import circuitpython_typing
import microcontroller
class AudioOut:
"""Output an analog audio signal"""
def __init__(
self,
left_channel: microcontroller.Pin,
*,
right_channel: Optional[microcontroller.Pin] = None,
quiescent_value: int = 0x8000,
) -> None:
"""Create a AudioOut object associated with the given pin(s). This allows you to
play audio signals out on the given pin(s).
:param ~microcontroller.Pin left_channel: The pin to output the left channel to
:param ~microcontroller.Pin right_channel: The pin to output the right channel to
:param int quiescent_value: The output value when no signal is present. Samples should start
and end with this value to prevent audible popping.
Simple 8ksps 440 Hz sin wave::
import audiocore
import audioio
import board
import array
import time
import math
# Generate one period of sine wav.
length = 8000 // 440
sine_wave = array.array("H", [0] * length)
for i in range(length):
sine_wave[i] = int(math.sin(math.pi * 2 * i / length) * (2 ** 15) + 2 ** 15)
dac = audioio.AudioOut(board.SPEAKER)
sine_wave = audiocore.RawSample(sine_wave, sample_rate=8000)
dac.play(sine_wave, loop=True)
time.sleep(1)
dac.stop()
Playing a wave file from flash::
import board
import audioio
import digitalio
# Required for CircuitPlayground Express
speaker_enable = digitalio.DigitalInOut(board.SPEAKER_ENABLE)
speaker_enable.switch_to_output(value=True)
data = open("cplay-5.1-16bit-16khz.wav", "rb")
wav = audiocore.WaveFile(data)
a = audioio.AudioOut(board.A0)
print("playing")
a.play(wav)
while a.playing:
pass
print("stopped")"""
...
def deinit(self) -> None:
"""Deinitialises the AudioOut and releases any hardware resources for reuse."""
...
def __enter__(self) -> AudioOut:
"""No-op used by Context Managers."""
...
def __exit__(self) -> None:
"""Automatically deinitializes the hardware when exiting a context. See
:ref:`lifetime-and-contextmanagers` for more info."""
...
def play(
self, sample: circuitpython_typing.AudioSample, *, loop: bool = False
) -> None:
"""Plays the sample once when loop=False and continuously when loop=True.
Does not block. Use `playing` to block.
Sample must be an `audiocore.WaveFile`, `audiocore.RawSample`, `audiomixer.Mixer` or `audiomp3.MP3Decoder`.
The sample itself should consist of 16 bit samples. Microcontrollers with a lower output
resolution will use the highest order bits to output. For example, the SAMD21 has a 10 bit
DAC that ignores the lowest 6 bits when playing 16 bit samples."""
...
def stop(self) -> None:
"""Stops playback and resets to the start of the sample."""
...
playing: bool
"""True when an audio sample is being output even if `paused`. (read-only)"""
def METHOD_NAME(self) -> None:
"""Stops playback temporarily while remembering the position. Use `resume` to resume playback."""
...
def resume(self) -> None:
"""Resumes sample playback after :py:func:`pause`."""
...
paused: bool
"""True when playback is paused. (read-only)""" |
2,029 | test rename fields check log remove only | from __future__ import annotations
import logging
from typing import TYPE_CHECKING
import pytest
from bentoml._internal.configuration.helpers import flatten_dict
from bentoml._internal.configuration.helpers import is_valid_ip_address
from bentoml._internal.configuration.helpers import load_config_file
from bentoml._internal.configuration.helpers import rename_fields
from bentoml.exceptions import BentoMLConfigException
if TYPE_CHECKING:
from pathlib import Path
from _pytest.logging import LogCaptureFixture
def test_flatten_dict():
assert dict(flatten_dict({"a": 1, "b": {"c": 2, "d": {"e": 3}}})) == {
"a": 1,
"b.c": 2,
"b.d.e": 3,
}
assert dict(
flatten_dict({"runners": {"iris_clf": {"nvidia.com/gpu": [0, 1]}}})
) == {'runners.iris_clf."nvidia.com/gpu"': [0, 1]}
assert dict(flatten_dict({"a": 1, "b": 2}, sep="_")) == {"a": 1, "b": 2}
def test_rename_fields_field_in_dict():
# If given field is in the dictionary, it will be renamed
d = {"a": 1, "b": 2}
rename_fields(d, "a", "x")
assert "a" not in d
assert "x" in d
assert d["x"] == 1
assert d["b"] == 2
def test_rename_fields_field_not_in_dict():
# If given field is not in the dictionary, nothing will happen
d = {"a": 1, "b": 2}
rename_fields(d, "c", "d")
assert "a" in d
assert "b" in d
assert d["a"] == 1
assert d["b"] == 2
def test_rename_fields_remove_only():
# If given field is in the dictionary, and remove_only is True, it will be removed.
d = {"a": 1, "b": 2}
rename_fields(d, "a", remove_only=True)
assert "a" not in d
rename_fields(d, "b", remove_only=True)
assert len(d) == 0
def test_rename_fields_check_log(caplog: LogCaptureFixture):
d = {"api_server.port": 5000}
with caplog.at_level(logging.WARNING):
rename_fields(d, "api_server.port", "api_server.http.port")
assert (
"Field 'api_server.port' is deprecated and has been renamed to 'api_server.http.port'"
in caplog.text
)
assert "api_server.http.port" in d and d["api_server.http.port"] == 5000
def METHOD_NAME(caplog: LogCaptureFixture):
d = {"api_server.port": 5000}
with caplog.at_level(logging.WARNING):
rename_fields(d, "api_server.port", remove_only=True)
assert "Field 'api_server.port' is deprecated and will be removed." in caplog.text
assert len(d) == 0
def test_rename_fields_exception():
# If no replace_with field is given, an AssertionError will be raised
d = {"api_server.port": 5000}
with pytest.raises(AssertionError, match="'replace_with' must be provided."):
rename_fields(d, "api_server.port")
with pytest.raises(AssertionError, match="'replace_with' must be provided."):
rename_fields(d, "api_server.port", remove_only=False)
# If the given dictionary is not flattened, a ValueError will be raised
d = {"a": 1, "b": {"c": 2}}
with pytest.raises(ValueError, match="Given dictionary is not flattened. *"):
rename_fields(d, "b.c", "b.d.c")
# If the given dictionary is not flattened + no replace_with field is given, a ValueError will be raised
d = {"a": 1, "b": {"c": 2}}
with pytest.raises(ValueError, match="Given dictionary is not flattened. *"):
rename_fields(d, "b.c")
def test_valid_load_config_file(tmp_path: Path):
config = tmp_path / "configuration.yaml"
config.write_text("api_server:\n port: 5000")
assert load_config_file(config.__fspath__()) == {"api_server": {"port": 5000}}
def test_invalid_load_config_file():
with pytest.raises(BentoMLConfigException) as e:
load_config_file("/tmp/nonexistent.yaml")
assert "Configuration file /tmp/nonexistent.yaml not found." in str(e.value)
with pytest.raises(BentoMLConfigException) as e:
load_config_file("\\tmp\\invalid.yaml")
assert "Configuration file \\tmp\\invalid.yaml not found." in str(e.value)
def test_valid_ip_address():
assert is_valid_ip_address("0.0.0.0")
assert is_valid_ip_address("192.192.192.192")
assert is_valid_ip_address("255.255.255.255")
def test_invalid_ip_address():
assert not is_valid_ip_address("asdfadsf:143")
assert not is_valid_ip_address("asdfadsf")
assert not is_valid_ip_address("0.0.0.0.0")
assert not is_valid_ip_address("0.0.0.")
assert not is_valid_ip_address(".0.0.0")
assert not is_valid_ip_address("x.0.0.0")
assert not is_valid_ip_address("255.255.255.256")
assert not is_valid_ip_address("255.255.256.255")
assert not is_valid_ip_address("255.256.255.255")
assert not is_valid_ip_address("256.255.255.255")
assert not is_valid_ip_address("256.256.256.256")
assert not is_valid_ip_address("") |
2,030 | scale | #!/usr/bin/env python
############################################################################
#
# MODULE: r.out.kde
# AUTHOR(S): Anna Petrasova
#
# PURPOSE:
# COPYRIGHT: (C) 2013 - 2019 by the GRASS Development Team
#
# This program is free software under the GNU General Public
# License (>=v2). Read the file COPYING that comes with GRASS
# for details.
#
#############################################################################
# %module
# % description: Exports raster with variable transparency into an image file
# % keyword: raster
# % keyword: kernel density
# % keyword: visualization
# % keyword: transparency
# % keyword: heatmap
# %end
# %option G_OPT_R_INPUT
# % description: Raster map to be rendered with semi-transparency
# %end
# %option G_OPT_R_INPUT
# % key: background
# % description: Background raster map
# %end
# %option G_OPT_F_OUTPUT
# % description: Rendered output file
# %end
# %option
# % key: method
# % type: string
# % options: linear,logistic
# % description: Method to scale transparency
# %end
import os
import tempfile
import atexit
import shutil
from math import exp
import grass.script as gscript
TMPRAST = []
TMPDIR = tempfile.mkdtemp()
def cleanup():
gscript.run_command(
"g.remove", name=",".join(TMPRAST), flags="f", type="raster", quiet=True
)
shutil.rmtree(TMPDIR)
def main(rinput, background, output, method):
try:
from PIL import Image
except ImportError:
gscript.fatal("Cannot import PIL." " Please install the Python pillow package.")
if "@" in rinput:
rinput = rinput.split("@")[0]
suffix = "_" + os.path.basename(gscript.tempfile(False))
tmpname = rinput + suffix
gscript.run_command("g.copy", raster=[rinput, tmpname])
TMPRAST.append(tmpname)
gscript.run_command("r.colors", map=tmpname, color="grey")
reg = gscript.region()
width = reg["cols"]
height = reg["rows"]
fg_out = os.path.join(TMPDIR, "foreground.png")
bg_out = os.path.join(TMPDIR, "background.png")
intensity_tmp = os.path.join(TMPDIR, "intensity.png")
gscript.run_command(
"d.mon",
start="cairo",
output=fg_out,
width=width,
height=height,
bgcolor="black",
)
gscript.run_command("d.rast", map=rinput)
gscript.run_command("d.mon", stop="cairo")
# background
gscript.run_command(
"d.mon", start="cairo", output=bg_out, width=width, height=height
)
gscript.run_command("d.rast", map=background)
gscript.run_command("d.mon", stop="cairo")
# greyscale
gscript.run_command(
"d.mon", start="cairo", output=intensity_tmp, width=width, height=height
)
gscript.run_command("d.rast", map=tmpname)
gscript.run_command("d.mon", stop="cairo")
# put together with transparency
foreground = Image.open(fg_out)
background = Image.open(bg_out)
intensity = Image.open(intensity_tmp)
foreground = foreground.convert("RGBA")
data_f = foreground.getdata()
data_i = intensity.getdata()
newData = []
for i in range(len(data_f)):
intens = data_i[i][0]
if intens == 0:
newData.append((data_f[i][0], data_f[i][1], data_f[i][2], 0))
else:
newData.append(
(
data_f[i][0],
data_f[i][1],
data_f[i][2],
METHOD_NAME(0, 255, intens, method),
)
)
foreground.putdata(newData)
background.paste(foreground, (0, 0), foreground)
background.save(output)
def METHOD_NAME(cmin, cmax, intens, method):
# scale to 0 - 1
val = (intens - cmin) / float((cmax - cmin))
if method == "logistic":
val = 1.0 / (1 + exp(-10 * (val - 0.5)))
val *= 255
return int(val)
if __name__ == "__main__":
options, flags = gscript.parser()
rinput = options["input"]
bg = options["background"]
output = options["output"]
method = options["method"]
atexit.register(cleanup)
main(rinput, bg, output, method) |
2,031 | bctester | #!/usr/bin/env python3
# Copyright 2014 BitPay Inc.
# Copyright 2016-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file LICENSE or http://www.opensource.org/licenses/mit-license.php.
"""Test framework for defi utils.
Runs automatically during `make check`.
Can also be run manually."""
import argparse
# import binascii # TODO: (temp) it's used in bctest
import configparser
# import difflib # TODO: (temp) it's used in bctest
import json
import logging
import os
import pprint
# import subprocess # TODO: (temp) it's used in bctest
import sys
def main():
config = configparser.ConfigParser()
config.optionxform = str
config.read_file(
open(os.path.join(os.path.dirname(__file__), "../config.ini"), encoding="utf8")
)
env_conf = dict(config.items("environment"))
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("-v", "--verbose", action="store_true")
args = parser.parse_args()
verbose = args.verbose
if verbose:
level = logging.DEBUG
else:
level = logging.ERROR
formatter = "%(asctime)s - %(levelname)s - %(message)s"
# Add the format/level to the logger
logging.basicConfig(format=formatter, level=level)
METHOD_NAME(
os.path.join(env_conf["SRCDIR"], "test", "util", "data"),
"defi-util-test.json",
env_conf,
)
def METHOD_NAME(testDir, input_basename, buildenv):
"""Loads and parses the input file, runs all tests and reports results"""
input_filename = os.path.join(testDir, input_basename)
raw_data = open(input_filename, encoding="utf8").read()
input_data = json.loads(raw_data)
failed_testcases = []
for testObj in input_data:
try:
bctest(testDir, testObj, buildenv)
logging.info("PASSED: " + testObj["description"])
except Exception:
logging.info("FAILED: " + testObj["description"])
failed_testcases.append(testObj["description"])
if failed_testcases:
error_message = "FAILED_TESTCASES:\n"
error_message += pprint.pformat(failed_testcases, width=400)
logging.error(error_message)
sys.exit(1)
else:
sys.exit(0)
def bctest(testDir, testObj, buildenv):
"""Runs a single test, comparing output and RC to expected output and RC.
Raises an error if input can't be read, executable fails, or output/RC
are not as expected. Error is caught by bctester() and reported.
"""
return
# # Get the exec names and arguments # TODO: (temp) disable functional tests
# execprog = os.path.join(buildenv["BUILDDIR"], "src", testObj["exec"] + buildenv["EXEEXT"])
# execargs = testObj['args']
# execrun = [execprog] + execargs
#
# # Read the input data (if there is any)
# stdinCfg = None
# inputData = None
# if "input" in testObj:
# filename = os.path.join(testDir, testObj["input"])
# inputData = open(filename, encoding="utf8").read()
# stdinCfg = subprocess.PIPE
#
# # Read the expected output data (if there is any)
# outputFn = None
# outputData = None
# outputType = None
# if "output_cmp" in testObj:
# outputFn = testObj['output_cmp']
# outputType = os.path.splitext(outputFn)[1][1:] # output type from file extension (determines how to compare)
# try:
# outputData = open(os.path.join(testDir, outputFn), encoding="utf8").read()
# except:
# logging.error("Output file " + outputFn + " can not be opened")
# raise
# if not outputData:
# logging.error("Output data missing for " + outputFn)
# raise Exception
# if not outputType:
# logging.error("Output file %s does not have a file extension" % outputFn)
# raise Exception
#
# # Run the test
# proc = subprocess.Popen(execrun, stdin=stdinCfg, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
# try:
# outs = proc.communicate(input=inputData)
# except OSError:
# logging.error("OSError, Failed to execute " + execprog)
# raise
#
# if outputData:
# data_mismatch, formatting_mismatch = False, False
# # Parse command output and expected output
# try:
# a_parsed = parse_output(outs[0], outputType)
# except Exception as e:
# logging.error('Error parsing command output as %s: %s' % (outputType, e))
# raise
# try:
# b_parsed = parse_output(outputData, outputType)
# except Exception as e:
# logging.error('Error parsing expected output %s as %s: %s' % (outputFn, outputType, e))
# raise
# # Compare data
# if a_parsed != b_parsed:
# logging.error("Output data mismatch for " + outputFn + " (format " + outputType + ")")
# data_mismatch = True
# # Compare formatting
# if outs[0] != outputData:
# error_message = "Output formatting mismatch for " + outputFn + ":\n"
# error_message += "".join(difflib.context_diff(outputData.splitlines(True),
# outs[0].splitlines(True),
# fromfile=outputFn,
# tofile="returned"))
# logging.error(error_message)
# formatting_mismatch = True
#
# assert not data_mismatch and not formatting_mismatch
#
# # Compare the return code to the expected return code
# wantRC = 0
# if "return_code" in testObj:
# wantRC = testObj['return_code']
# if proc.returncode != wantRC:
# logging.error("Return code mismatch for " + outputFn)
# raise Exception
#
# if "error_txt" in testObj:
# want_error = testObj["error_txt"]
# # Compare error text
# # TODO: ideally, we'd compare the strings exactly and also assert
# # That stderr is empty if no errors are expected. However, defi-tx
# # emits DISPLAY errors when running as a windows application on
# # linux through wine. Just assert that the expected error text appears
# # somewhere in stderr.
# if want_error not in outs[1]:
# logging.error("Error mismatch:\n" + "Expected: " + want_error + "\nReceived: " + outs[1].rstrip())
# raise Exception
#
# def parse_output(a, fmt):
# """Parse the output according to specified format.
#
# Raise an error if the output can't be parsed."""
# if fmt == 'json': # json: compare parsed data
# return json.loads(a)
# elif fmt == 'hex': # hex: parse and compare binary data
# return binascii.a2b_hex(a.strip())
# else:
# raise NotImplementedError("Don't know how to compare %s" % fmt)
if __name__ == "__main__":
main() |
2,032 | group hashes | import time
import uuid
import pytest
from sentry.event_manager import _save_aggregate
from sentry.eventstore.models import Event
from sentry.grouping.result import CalculatedHashes
from sentry.models import Group, GroupHash
from sentry.testutils.pytest.fixtures import django_db_all
@pytest.fixture
def fast_save(default_project, task_runner):
def inner(last_frame):
data = {"timestamp": time.time(), "type": "error"}
evt = Event(
default_project.id,
uuid.uuid4().hex,
data=data,
)
with task_runner():
return _save_aggregate(
evt,
hashes=CalculatedHashes(
hashes=["a" * 32, "b" * 32],
hierarchical_hashes=["c" * 32, "d" * 32, "e" * 32, last_frame * 32],
tree_labels=[
[
{
"function": "foo",
"package": "",
"is_sentinel": False,
"is_prefix": False,
"datapath": "",
}
],
[
{
"function": "bar",
"package": "",
"is_sentinel": False,
"is_prefix": False,
"datapath": "",
}
],
[
{
"function": "baz",
"package": "",
"is_sentinel": False,
"is_prefix": False,
"datapath": "",
}
],
[
{
"function": "bam",
"package": "",
"is_sentinel": False,
"is_prefix": False,
"datapath": "",
}
],
],
),
release=None,
metadata={},
received_timestamp=0,
level=10,
culprit="",
)
return inner
def METHOD_NAME(group_id):
return {gh.hash for gh in GroupHash.objects.filter(group_id=group_id)}
def _assoc_hash(group, hash):
gh = GroupHash.objects.get_or_create(project=group.project, hash=hash)[0]
assert gh.group is None or gh.group.id != group.id
gh.group = group
gh.save()
@django_db_all
def test_move_all_events(default_project, fast_save):
group_info = fast_save("f")
assert group_info.is_new
assert not group_info.is_regression
new_group_info = fast_save("f")
assert not new_group_info.is_new
assert not new_group_info.is_regression
assert new_group_info.group.id == group_info.group.id
_assoc_hash(group_info.group, "a" * 32)
_assoc_hash(group_info.group, "b" * 32)
assert METHOD_NAME(group_info.group.id) == {"a" * 32, "b" * 32, "c" * 32}
assert Group.objects.get(id=new_group_info.group.id).title == "foo"
# simulate split operation where all events of group are moved into a more specific hash
GroupHash.objects.filter(group=group_info.group).delete()
GroupHash.objects.create(project=default_project, hash="f" * 32, group_id=group_info.group.id)
new_group_info = fast_save("f")
assert not new_group_info.is_new
assert not new_group_info.is_regression
assert new_group_info.group.id == group_info.group.id
assert {g.hash for g in GroupHash.objects.filter(group=group_info.group)} == {
# one hierarchical hash associated
# no flat hashes associated when sorting into split group!
"f"
* 32,
}
assert Group.objects.get(id=new_group_info.group.id).title == "bam"
new_group_info = fast_save("g")
assert new_group_info.is_new
assert not new_group_info.is_regression
assert new_group_info.group.id != group_info.group.id
assert METHOD_NAME(new_group_info.group.id) == {"c" * 32}
assert Group.objects.get(id=new_group_info.group.id).title == "foo"
@django_db_all
def test_partial_move(default_project, fast_save):
group_info = fast_save("f")
assert group_info.is_new
assert not group_info.is_regression
new_group_info = fast_save("g")
assert not new_group_info.is_new
assert not new_group_info.is_regression
assert new_group_info.group.id == group_info.group.id
assert METHOD_NAME(group_info.group.id) == {"c" * 32}
# simulate split operation where event "f" of group is moved into a more specific hash
group2 = Group.objects.create(project=default_project)
f_hash = GroupHash.objects.create(project=default_project, hash="f" * 32, group_id=group2.id)
new_group_info = fast_save("f")
assert not new_group_info.is_new
assert not new_group_info.is_regression
assert new_group_info.group.id == group2.id
assert METHOD_NAME(new_group_info.group.id) == {
# one hierarchical hash associated
# no flat hashes associated when sorting into split group!
"f"
* 32,
}
new_group_info = fast_save("g")
assert not new_group_info.is_new
assert not new_group_info.is_regression
assert new_group_info.group.id == group_info.group.id
assert METHOD_NAME(new_group_info.group.id) == {
"c" * 32,
}
f_hash.delete()
new_group_info = fast_save("f")
assert not new_group_info.is_new
assert not new_group_info.is_regression
assert new_group_info.group.id == group_info.group.id |
2,033 | event | #
# This file is part of pretix (Community Edition).
#
# Copyright (C) 2014-2020 Raphael Michel and contributors
# Copyright (C) 2020-2021 rami.io GmbH and contributors
#
# This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General
# Public License as published by the Free Software Foundation in version 3 of the License.
#
# ADDITIONAL TERMS APPLY: Pursuant to Section 7 of the GNU Affero General Public License, additional terms are
# applicable granting you additional permissions and placing additional restrictions on your usage of this software.
# Please refer to the pretix LICENSE file to obtain the full terms applicable to this work. If you did not receive
# this file, see <https://pretix.eu/about/en/license>.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along with this program. If not, see
# <https://www.gnu.org/licenses/>.
#
from datetime import datetime, time
from zoneinfo import ZoneInfo
import pytest
from django_scopes import scope
from pretix.base.models import Event, Organizer
from pretix.base.reldate import RelativeDate, RelativeDateWrapper
TOKYO = ZoneInfo('Asia/Tokyo')
BERLIN = ZoneInfo('Europe/Berlin')
@pytest.fixture
def METHOD_NAME():
o = Organizer.objects.create(name='Dummy', slug='dummy')
METHOD_NAME = Event.objects.create(
organizer=o, name='Dummy', slug='dummy',
date_from=datetime(2017, 12, 27, 5, 0, 0, tzinfo=TOKYO),
presale_start=datetime(2017, 12, 1, 5, 0, 0, tzinfo=TOKYO),
plugins='pretix.plugins.banktransfer'
)
METHOD_NAME.settings.timezone = "Asia/Tokyo"
return METHOD_NAME
@pytest.mark.django_db
def test_absolute_date(METHOD_NAME):
d = datetime(2017, 12, 25, 5, 0, 0, tzinfo=TOKYO)
rdw = RelativeDateWrapper(d)
assert rdw.datetime(METHOD_NAME) == d
assert rdw.to_string() == d.isoformat()
@pytest.mark.django_db
def test_relative_date_without_time(METHOD_NAME):
rdw = RelativeDateWrapper(RelativeDate(days_before=1, time=None, base_date_name='date_from', minutes_before=None))
assert rdw.datetime(METHOD_NAME).astimezone(TOKYO) == datetime(2017, 12, 26, 5, 0, 0, tzinfo=TOKYO)
assert rdw.to_string() == 'RELDATE/1/-/date_from/'
@pytest.mark.django_db
def test_relative_date_other_base_point(METHOD_NAME):
with scope(organizer=METHOD_NAME.organizer):
rdw = RelativeDateWrapper(RelativeDate(days_before=1, time=None, base_date_name='presale_start', minutes_before=None))
assert rdw.datetime(METHOD_NAME) == datetime(2017, 11, 30, 5, 0, 0, tzinfo=TOKYO)
assert rdw.to_string() == 'RELDATE/1/-/presale_start/'
# presale_end is unset, defaults to date_from
rdw = RelativeDateWrapper(RelativeDate(days_before=1, time=None, base_date_name='presale_end', minutes_before=None))
assert rdw.datetime(METHOD_NAME) == datetime(2017, 12, 26, 5, 0, 0, tzinfo=TOKYO)
assert rdw.to_string() == 'RELDATE/1/-/presale_end/'
# subevent base
se = METHOD_NAME.subevents.create(name="SE1", date_from=datetime(2017, 11, 27, 5, 0, 0, tzinfo=TOKYO))
rdw = RelativeDateWrapper(RelativeDate(days_before=1, time=None, base_date_name='date_from', minutes_before=None))
assert rdw.datetime(se) == datetime(2017, 11, 26, 5, 0, 0, tzinfo=TOKYO)
# presale_start is unset on subevent, default to event
rdw = RelativeDateWrapper(RelativeDate(days_before=1, time=None, base_date_name='presale_start', minutes_before=None))
assert rdw.datetime(se) == datetime(2017, 11, 30, 5, 0, 0, tzinfo=TOKYO)
# presale_end is unset on all, default to date_from of subevent
rdw = RelativeDateWrapper(RelativeDate(days_before=1, time=None, base_date_name='presale_end', minutes_before=None))
assert rdw.datetime(se) == datetime(2017, 11, 26, 5, 0, 0, tzinfo=TOKYO)
@pytest.mark.django_db
def test_relative_date_in_minutes(METHOD_NAME):
rdw = RelativeDateWrapper(RelativeDate(days_before=0, time=None, base_date_name='date_from', minutes_before=60))
assert rdw.to_string() == 'RELDATE/minutes/60/date_from/'
assert rdw.datetime(METHOD_NAME) == datetime(2017, 12, 27, 4, 0, 0, tzinfo=TOKYO)
@pytest.mark.django_db
def test_relative_date_with_time(METHOD_NAME):
rdw = RelativeDateWrapper(RelativeDate(days_before=1, time=time(8, 5, 13), base_date_name='date_from', minutes_before=None))
assert rdw.to_string() == 'RELDATE/1/08:05:13/date_from/'
assert rdw.datetime(METHOD_NAME) == datetime(2017, 12, 26, 8, 5, 13, tzinfo=TOKYO)
@pytest.mark.django_db
def test_relative_date_with_time_around_dst(METHOD_NAME):
METHOD_NAME.settings.timezone = "Europe/Berlin"
METHOD_NAME.date_from = datetime(2020, 3, 29, 18, 0, 0, tzinfo=BERLIN)
rdw = RelativeDateWrapper(RelativeDate(days_before=1, time=time(18, 0, 0), base_date_name='date_from', minutes_before=None))
assert rdw.to_string() == 'RELDATE/1/18:00:00/date_from/'
assert rdw.datetime(METHOD_NAME) == datetime(2020, 3, 28, 18, 0, 0, tzinfo=BERLIN)
rdw = RelativeDateWrapper(RelativeDate(days_before=0, time=time(2, 30, 0), base_date_name='date_from', minutes_before=None))
assert rdw.to_string() == 'RELDATE/0/02:30:00/date_from/'
assert rdw.datetime(METHOD_NAME) == datetime(2020, 3, 29, 2, 30, 0, tzinfo=BERLIN)
METHOD_NAME.date_from = datetime(2020, 10, 25, 18, 0, 0, tzinfo=BERLIN)
rdw = RelativeDateWrapper(RelativeDate(days_before=1, time=time(18, 0, 0), base_date_name='date_from', minutes_before=None))
assert rdw.to_string() == 'RELDATE/1/18:00:00/date_from/'
assert rdw.datetime(METHOD_NAME) == datetime(2020, 10, 24, 18, 0, 0, tzinfo=BERLIN)
rdw = RelativeDateWrapper(RelativeDate(days_before=0, time=time(2, 30, 0), base_date_name='date_from', minutes_before=None))
assert rdw.to_string() == 'RELDATE/0/02:30:00/date_from/'
assert rdw.datetime(METHOD_NAME) == datetime(2020, 10, 25, 2, 30, 0, tzinfo=BERLIN)
def test_unserialize():
d = datetime(2017, 12, 25, 10, 0, 0, tzinfo=TOKYO)
rdw = RelativeDateWrapper.from_string(d.isoformat())
assert rdw.data == d
rdw = RelativeDateWrapper.from_string('RELDATE/1/-/date_from/')
assert rdw.data == RelativeDate(days_before=1, time=None, base_date_name='date_from', minutes_before=None)
rdw = RelativeDateWrapper.from_string('RELDATE/1/18:05:13/date_from/')
assert rdw.data == RelativeDate(days_before=1, time=time(18, 5, 13), base_date_name='date_from', minutes_before=None)
rdw = RelativeDateWrapper.from_string('RELDATE/minutes/60/date_from/')
assert rdw.data == RelativeDate(days_before=0, time=None, base_date_name='date_from', minutes_before=60) |
2,034 | test set owner | from django.test import TestCase
from django.utils import timezone
from ...categories.models import Category
from ...users.test import create_test_user
from ..models import Post, Thread, ThreadParticipant
from ..participants import (
add_participants,
has_participants,
make_participants_aware,
set_owner,
set_users_unread_private_threads_sync,
)
class ParticipantsTests(TestCase):
def setUp(self):
datetime = timezone.now()
self.category = Category.objects.all_categories()[:1][0]
self.thread = Thread(
category=self.category,
started_on=datetime,
starter_name="Tester",
starter_slug="tester",
last_post_on=datetime,
last_poster_name="Tester",
last_poster_slug="tester",
)
self.thread.set_title("Test thread")
self.thread.save()
post = Post.objects.create(
category=self.category,
thread=self.thread,
poster_name="Tester",
original="Hello! I am test message!",
parsed="<p>Hello! I am test message!</p>",
checksum="nope",
posted_on=datetime,
updated_on=datetime,
)
self.thread.first_post = post
self.thread.last_post = post
self.thread.save()
def test_has_participants(self):
"""has_participants returns true if thread has participants"""
users = [
create_test_user("User", "user@example.com"),
create_test_user("Other_User", "otheruser@example.com"),
]
self.assertFalse(has_participants(self.thread))
ThreadParticipant.objects.add_participants(self.thread, users)
self.assertTrue(has_participants(self.thread))
self.thread.threadparticipant_set.all().delete()
self.assertFalse(has_participants(self.thread))
def test_make_threads_participants_aware(self):
"""
make_participants_aware sets participants_list and participant
annotations on list of threads
"""
user = create_test_user("User", "user@example.com")
other_user = create_test_user("Other_User", "otheruser@example.com")
self.assertFalse(hasattr(self.thread, "participants_list"))
self.assertFalse(hasattr(self.thread, "participant"))
make_participants_aware(user, [self.thread])
self.assertFalse(hasattr(self.thread, "participants_list"))
self.assertTrue(hasattr(self.thread, "participant"))
self.assertIsNone(self.thread.participant)
ThreadParticipant.objects.set_owner(self.thread, user)
ThreadParticipant.objects.add_participants(self.thread, [other_user])
make_participants_aware(user, [self.thread])
self.assertFalse(hasattr(self.thread, "participants_list"))
self.assertEqual(self.thread.participant.user, user)
def test_make_thread_participants_aware(self):
"""
make_participants_aware sets participants_list and participant
annotations on thread model
"""
user = create_test_user("User", "user@example.com")
other_user = create_test_user("Other_User", "otheruser@example.com")
self.assertFalse(hasattr(self.thread, "participants_list"))
self.assertFalse(hasattr(self.thread, "participant"))
make_participants_aware(user, self.thread)
self.assertTrue(hasattr(self.thread, "participants_list"))
self.assertTrue(hasattr(self.thread, "participant"))
self.assertEqual(self.thread.participants_list, [])
self.assertIsNone(self.thread.participant)
ThreadParticipant.objects.set_owner(self.thread, user)
ThreadParticipant.objects.add_participants(self.thread, [other_user])
make_participants_aware(user, self.thread)
self.assertEqual(self.thread.participant.user, user)
for participant in self.thread.participants_list:
if participant.user == user:
break
else:
self.fail("thread.participants_list didn't contain user")
def METHOD_NAME(self):
"""set_owner sets user as thread owner"""
user = create_test_user("User", "user@example.com")
set_owner(self.thread, user)
owner = self.thread.threadparticipant_set.get(is_owner=True)
self.assertEqual(user, owner.user)
def test_set_users_unread_private_threads_sync(self):
"""
set_users_unread_private_threads_sync sets sync_unread_private_threads
flag on users provided to true
"""
users = [
create_test_user("User", "user@example.com"),
create_test_user("Other_User", "otheruser@example.com"),
]
set_users_unread_private_threads_sync(users=users)
for user in users:
user.refresh_from_db()
assert user.sync_unread_private_threads
def test_set_participants_unread_private_threads_sync(self):
"""
set_users_unread_private_threads_sync sets sync_unread_private_threads
flag on participants provided to true
"""
users = [
create_test_user("User", "user@example.com"),
create_test_user("Other_User", "otheruser@example.com"),
]
participants = [ThreadParticipant(user=u) for u in users]
set_users_unread_private_threads_sync(participants=participants)
for user in users:
user.refresh_from_db()
assert user.sync_unread_private_threads
def test_set_participants_users_unread_private_threads_sync(self):
"""
set_users_unread_private_threads_sync sets sync_unread_private_threads
flag on users and participants provided to true
"""
users = [create_test_user("User", "user@example.com")]
participants = [ThreadParticipant(user=u) for u in users]
users.append(create_test_user("Other_User", "otheruser@example.com"))
set_users_unread_private_threads_sync(users=users, participants=participants)
for user in users:
user.refresh_from_db()
assert user.sync_unread_private_threads
def test_set_users_unread_private_threads_sync_exclude_user(self):
"""exclude_user kwarg works"""
users = [
create_test_user("User", "user@example.com"),
create_test_user("Other_User", "otheruser@example.com"),
]
set_users_unread_private_threads_sync(users=users, exclude_user=users[0])
[i.refresh_from_db() for i in users]
assert users[0].sync_unread_private_threads is False
assert users[1].sync_unread_private_threads
def test_set_users_unread_private_threads_sync_noop(self):
"""excluding only user is noop"""
user = create_test_user("User", "user@example.com")
with self.assertNumQueries(0):
set_users_unread_private_threads_sync(users=[user], exclude_user=user)
user.refresh_from_db()
assert user.sync_unread_private_threads is False
def test_add_participants_triggers_notify_on_new_private_thread(
mocker, user, other_user, private_thread
):
notify_on_new_private_thread_mock = mocker.patch(
"misago.threads.participants.notify_on_new_private_thread"
)
add_participants(user, private_thread, [user, other_user])
notify_on_new_private_thread_mock.delay.assert_called_once_with(
user.id, private_thread.id, [other_user.id]
) |
2,035 | delete host | """
Support for RFC 2136 dynamic DNS updates.
:depends: - dnspython Python module
:configuration: If you want to use TSIG authentication for the server, there
are a couple of optional configuration parameters made available to
support this (the keyname is only needed if the keyring contains more
than one key)::
keyfile: keyring file (default=None)
keyname: key name in file (default=None)
keyalgorithm: algorithm used to create the key
(default='HMAC-MD5.SIG-ALG.REG.INT').
Other possible values: hmac-sha1, hmac-sha224, hmac-sha256,
hmac-sha384, hmac-sha512
The keyring file needs to be in json format and the key name needs to end
with an extra period in the file, similar to this:
.. code-block:: json
{"keyname.": "keycontent"}
"""
import logging
import salt.utils.files
import salt.utils.json
log = logging.getLogger(__name__)
try:
import dns.query
import dns.tsigkeyring # pylint: disable=no-name-in-module
import dns.update # pylint: disable=no-name-in-module
dns_support = True
except ImportError as e:
dns_support = False
def __virtual__():
"""
Confirm dnspython is available.
"""
if dns_support:
return "ddns"
return (
False,
"The ddns execution module cannot be loaded: dnspython not installed.",
)
def _config(name, key=None, **kwargs):
"""
Return a value for 'name' from command line args then config file options.
Specify 'key' if the config file option is not the same as 'name'.
"""
if key is None:
key = name
if name in kwargs:
value = kwargs[name]
else:
value = __salt__["config.option"]("ddns.{}".format(key))
if not value:
value = None
return value
def _get_keyring(keyfile):
keyring = None
if keyfile:
with salt.utils.files.fopen(keyfile) as _f:
keyring = dns.tsigkeyring.from_text(salt.utils.json.load(_f))
return keyring
def add_host(
zone,
name,
ttl,
ip,
nameserver="127.0.0.1",
replace=True,
timeout=5,
port=53,
**kwargs
):
"""
Add, replace, or update the A and PTR (reverse) records for a host.
CLI Example:
.. code-block:: bash
salt ns1 ddns.add_host example.com host1 60 10.1.1.1
"""
res = update(zone, name, ttl, "A", ip, nameserver, timeout, replace, port, **kwargs)
if res is False:
return False
fqdn = "{}.{}.".format(name, zone)
parts = ip.split(".")[::-1]
popped = []
# Iterate over possible reverse zones
while len(parts) > 1:
p = parts.pop(0)
popped.append(p)
zone = "{}.{}".format(".".join(parts), "in-addr.arpa.")
name = ".".join(popped)
ptr = update(
zone, name, ttl, "PTR", fqdn, nameserver, timeout, replace, port, **kwargs
)
if ptr:
return True
return res
def METHOD_NAME(zone, name, nameserver="127.0.0.1", timeout=5, port=53, **kwargs):
"""
Delete the forward and reverse records for a host.
Returns true if any records are deleted.
CLI Example:
.. code-block:: bash
salt ns1 ddns.delete_host example.com host1
"""
fqdn = "{}.{}".format(name, zone)
request = dns.message.make_query(fqdn, "A")
answer = dns.query.udp(request, nameserver, timeout, port)
try:
ips = [i.address for i in answer.answer[0].items]
except IndexError:
ips = []
res = delete(
zone, name, nameserver=nameserver, timeout=timeout, port=port, **kwargs
)
fqdn = fqdn + "."
for ip in ips:
parts = ip.split(".")[::-1]
popped = []
# Iterate over possible reverse zones
while len(parts) > 1:
p = parts.pop(0)
popped.append(p)
zone = "{}.{}".format(".".join(parts), "in-addr.arpa.")
name = ".".join(popped)
ptr = delete(
zone,
name,
"PTR",
fqdn,
nameserver=nameserver,
timeout=timeout,
port=port,
**kwargs
)
if ptr:
res = True
return res
def update(
zone,
name,
ttl,
rdtype,
data,
nameserver="127.0.0.1",
timeout=5,
replace=False,
port=53,
**kwargs
):
"""
Add, replace, or update a DNS record.
nameserver must be an IP address and the minion running this module
must have update privileges on that server.
If replace is true, first deletes all records for this name and type.
CLI Example:
.. code-block:: bash
salt ns1 ddns.update example.com host1 60 A 10.0.0.1
"""
name = str(name)
if name[-1:] == ".":
fqdn = name
else:
fqdn = "{}.{}".format(name, zone)
request = dns.message.make_query(fqdn, rdtype)
answer = dns.query.udp(request, nameserver, timeout, port)
rdtype = dns.rdatatype.from_text(rdtype)
rdata = dns.rdata.from_text(dns.rdataclass.IN, rdtype, data)
keyring = _get_keyring(_config("keyfile", **kwargs))
keyname = _config("keyname", **kwargs)
keyalgorithm = _config("keyalgorithm", **kwargs) or "HMAC-MD5.SIG-ALG.REG.INT"
is_exist = False
for rrset in answer.answer:
if rdata in rrset.items:
if ttl == rrset.ttl:
if len(answer.answer) >= 1 or len(rrset.items) >= 1:
is_exist = True
break
dns_update = dns.update.Update(
zone, keyring=keyring, keyname=keyname, keyalgorithm=keyalgorithm
)
if replace:
dns_update.replace(name, ttl, rdata)
elif not is_exist:
dns_update.add(name, ttl, rdata)
else:
return None
answer = dns.query.udp(dns_update, nameserver, timeout, port)
if answer.rcode() > 0:
return False
return True
def delete(
zone,
name,
rdtype=None,
data=None,
nameserver="127.0.0.1",
timeout=5,
port=53,
**kwargs
):
"""
Delete a DNS record.
CLI Example:
.. code-block:: bash
salt ns1 ddns.delete example.com host1 A
"""
name = str(name)
if name[-1:] == ".":
fqdn = name
else:
fqdn = "{}.{}".format(name, zone)
request = dns.message.make_query(fqdn, (rdtype or "ANY"))
answer = dns.query.udp(request, nameserver, timeout, port)
if not answer.answer:
return None
keyring = _get_keyring(_config("keyfile", **kwargs))
keyname = _config("keyname", **kwargs)
keyalgorithm = _config("keyalgorithm", **kwargs) or "HMAC-MD5.SIG-ALG.REG.INT"
dns_update = dns.update.Update(
zone, keyring=keyring, keyname=keyname, keyalgorithm=keyalgorithm
)
if rdtype:
rdtype = dns.rdatatype.from_text(rdtype)
if data:
rdata = dns.rdata.from_text(dns.rdataclass.IN, rdtype, data)
dns_update.delete(name, rdata)
else:
dns_update.delete(name, rdtype)
else:
dns_update.delete(name)
answer = dns.query.udp(dns_update, nameserver, timeout, port)
if answer.rcode() > 0:
return False
return True |
2,036 | repeats cmp | """
Comparison utilities for STIX pattern observation expressions.
"""
from stix2.equivalence.pattern.compare import generic_cmp, iter_lex_cmp
from stix2.equivalence.pattern.compare.comparison import (
comparison_expression_cmp, generic_constant_cmp,
)
from stix2.patterns import (
AndObservationExpression, FollowedByObservationExpression,
ObservationExpression, OrObservationExpression,
QualifiedObservationExpression, RepeatQualifier, StartStopQualifier,
WithinQualifier, _CompoundObservationExpression,
)
_OBSERVATION_EXPRESSION_TYPE_ORDER = (
ObservationExpression, AndObservationExpression, OrObservationExpression,
FollowedByObservationExpression, QualifiedObservationExpression,
)
_QUALIFIER_TYPE_ORDER = (
RepeatQualifier, WithinQualifier, StartStopQualifier,
)
def METHOD_NAME(qual1, qual2):
"""
Compare REPEATS qualifiers. This orders by repeat count.
"""
return generic_constant_cmp(qual1.times_to_repeat, qual2.times_to_repeat)
def within_cmp(qual1, qual2):
"""
Compare WITHIN qualifiers. This orders by number of seconds.
"""
return generic_constant_cmp(
qual1.number_of_seconds, qual2.number_of_seconds,
)
def startstop_cmp(qual1, qual2):
"""
Compare START/STOP qualifiers. This lexicographically orders by start time,
then stop time.
"""
return iter_lex_cmp(
(qual1.start_time, qual1.stop_time),
(qual2.start_time, qual2.stop_time),
generic_constant_cmp,
)
_QUALIFIER_COMPARATORS = {
RepeatQualifier: METHOD_NAME,
WithinQualifier: within_cmp,
StartStopQualifier: startstop_cmp,
}
def observation_expression_cmp(expr1, expr2):
"""
Compare two observation expression ASTs. This is sensitive to the order of
the expressions' sub-components. To achieve an order-insensitive
comparison, the sub-component ASTs must be ordered first.
Args:
expr1: The first observation expression
expr2: The second observation expression
Returns:
<0, 0, or >0 depending on whether the first arg is less, equal or
greater than the second
"""
type1 = type(expr1)
type2 = type(expr2)
type1_idx = _OBSERVATION_EXPRESSION_TYPE_ORDER.index(type1)
type2_idx = _OBSERVATION_EXPRESSION_TYPE_ORDER.index(type2)
if type1_idx != type2_idx:
result = generic_cmp(type1_idx, type2_idx)
# else, both exprs are of same type.
# If they're simple, use contained comparison expression order
elif type1 is ObservationExpression:
result = comparison_expression_cmp(
expr1.operand, expr2.operand,
)
elif isinstance(expr1, _CompoundObservationExpression):
# Both compound, and of same type (and/or/followedby): sort according
# to contents.
result = iter_lex_cmp(
expr1.operands, expr2.operands, observation_expression_cmp,
)
else: # QualifiedObservationExpression
# Both qualified. Check qualifiers first; if they are the same,
# use order of the qualified expressions.
qual1_type = type(expr1.qualifier)
qual2_type = type(expr2.qualifier)
qual1_type_idx = _QUALIFIER_TYPE_ORDER.index(qual1_type)
qual2_type_idx = _QUALIFIER_TYPE_ORDER.index(qual2_type)
result = generic_cmp(qual1_type_idx, qual2_type_idx)
if result == 0:
# Same qualifier type; compare qualifier details
qual_cmp = _QUALIFIER_COMPARATORS.get(qual1_type)
if qual_cmp:
result = qual_cmp(expr1.qualifier, expr2.qualifier)
else:
raise TypeError(
"Can't compare qualifier type: " + qual1_type.__name__,
)
if result == 0:
# Same qualifier type and details; use qualified expression order
result = observation_expression_cmp(
expr1.observation_expression, expr2.observation_expression,
)
return result |
2,037 | function returning generator | # pylint: disable=too-few-public-methods,import-error, missing-docstring
# pylint: disable=useless-super-delegation,wrong-import-position,invalid-name, wrong-import-order, condition-evals-to-constant
if len('TEST'): # [use-implicit-booleaness-not-len]
pass
if not len('TEST'): # [use-implicit-booleaness-not-len]
pass
z = []
if z and len(['T', 'E', 'S', 'T']): # [use-implicit-booleaness-not-len]
pass
if True or len('TEST'): # [use-implicit-booleaness-not-len]
pass
if len('TEST') == 0: # Should be fine
pass
if len('TEST') < 1: # Should be fine
pass
if len('TEST') <= 0: # Should be fine
pass
if 1 > len('TEST'): # Should be fine
pass
if 0 >= len('TEST'): # Should be fine
pass
if z and len('TEST') == 0: # Should be fine
pass
if 0 == len('TEST') < 10: # Should be fine
pass
# Should be fine
if 0 < 1 <= len('TEST') < 10: # [comparison-of-constants]
pass
if 10 > len('TEST') != 0: # Should be fine
pass
if 10 > len('TEST') > 1 > 0: # Should be fine
pass
if 0 <= len('TEST') < 100: # Should be fine
pass
if z or 10 > len('TEST') != 0: # Should be fine
pass
if z:
pass
elif len('TEST'): # [use-implicit-booleaness-not-len]
pass
if z:
pass
elif not len('TEST'): # [use-implicit-booleaness-not-len]
pass
while len('TEST'): # [use-implicit-booleaness-not-len]
pass
while not len('TEST'): # [use-implicit-booleaness-not-len]
pass
while z and len('TEST'): # [use-implicit-booleaness-not-len]
pass
while not len('TEST') and z: # [use-implicit-booleaness-not-len]
pass
assert len('TEST') > 0 # Should be fine
x = 1 if len('TEST') != 0 else 2 # Should be fine
f_o_o = len('TEST') or 42 # Should be fine
a = x and len(x) # Should be fine
def some_func():
return len('TEST') > 0 # Should be fine
def github_issue_1325():
l = [1, 2, 3]
length = len(l) if l else 0 # Should be fine
return length
def github_issue_1331(*args):
assert False, len(args) # Should be fine
def github_issue_1331_v2(*args):
assert len(args), args # [use-implicit-booleaness-not-len]
def github_issue_1331_v3(*args):
assert len(args) or z, args # [use-implicit-booleaness-not-len]
def github_issue_1331_v4(*args):
assert z and len(args), args # [use-implicit-booleaness-not-len]
b = bool(len(z)) # [use-implicit-booleaness-not-len]
c = bool(len('TEST') or 42) # [use-implicit-booleaness-not-len]
def github_issue_1879():
class ClassWithBool(list):
def __bool__(self):
return True
class ClassWithoutBool(list):
pass
class ChildClassWithBool(ClassWithBool):
pass
class ChildClassWithoutBool(ClassWithoutBool):
pass
assert len(ClassWithBool())
assert len(ChildClassWithBool())
assert len(ClassWithoutBool()) # [use-implicit-booleaness-not-len]
assert len(ChildClassWithoutBool()) # [use-implicit-booleaness-not-len]
assert len(range(0)) # [use-implicit-booleaness-not-len]
assert len([t + 1 for t in []]) # [use-implicit-booleaness-not-len]
assert len(u + 1 for u in []) # [use-implicit-booleaness-not-len]
assert len({"1":(v + 1) for v in {}}) # [use-implicit-booleaness-not-len]
assert len(set((w + 1) for w in set())) # [use-implicit-booleaness-not-len]
# pylint: disable=import-outside-toplevel
import numpy
numpy_array = numpy.array([0])
if len(numpy_array) > 0:
print('numpy_array')
if len(numpy_array):
print('numpy_array')
if numpy_array:
print('b')
import pandas as pd
pandas_df = pd.DataFrame()
if len(pandas_df):
print("this works, but pylint tells me not to use len() without comparison")
if len(pandas_df) > 0:
print("this works and pylint likes it, but it's not the solution intended by PEP-8")
if pandas_df:
print("this does not work (truth value of dataframe is ambiguous)")
def function_returning_list(r):
if r==1:
return [1]
return [2]
def function_returning_int(r):
if r==1:
return 1
return 2
def METHOD_NAME(r):
for i in [r, 1, 2, 3]:
yield i
def function_returning_comprehension(r):
return [x+1 for x in [r, 1, 2, 3]]
def function_returning_function(r):
return METHOD_NAME(r)
assert len(function_returning_list(z)) # [use-implicit-booleaness-not-len]
assert len(function_returning_int(z))
# This should raise a use-implicit-booleaness-not-len once astroid can infer it
# See https://github.com/pylint-dev/pylint/pull/3821#issuecomment-743771514
assert len(METHOD_NAME(z))
assert len(function_returning_comprehension(z))
assert len(function_returning_function(z))
def github_issue_4215():
# Test undefined variables
# https://github.com/pylint-dev/pylint/issues/4215
if len(undefined_var): # [undefined-variable]
pass
if len(undefined_var2[0]): # [undefined-variable]
pass
# pylint: disable=len-as-condition
if len('TEST'):
pass |
2,038 | parse datetime | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2022 Satpy developers
#
# This file is part of satpy.
#
# satpy is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# satpy is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# satpy. If not, see <http://www.gnu.org/licenses/>.
"""Reader for files produced by ESA's Ocean Color CCI project.
This reader currently supports the lat/lon gridded products and does not yet support the
products on a sinusoidal grid. The products on each of the composite periods (1, 5 and 8 day plus monthly)
are supported and both the merged product files (OC_PRODUCTS) and single product (RRS, CHLOR_A, IOP, K_490) are
supported.
"""
import logging
from datetime import datetime
import dask.array as da
import numpy as np
from pyresample import geometry
from satpy.readers.netcdf_utils import NetCDF4FileHandler
logger = logging.getLogger(__name__)
class OCCCIFileHandler(NetCDF4FileHandler):
"""File handler for Ocean Color CCI netCDF files."""
@staticmethod
def METHOD_NAME(datestr):
"""Parse datetime."""
return datetime.strptime(datestr, "%Y%m%d%H%MZ")
@property
def start_time(self):
"""Get the start time."""
return self.METHOD_NAME(self['/attr/time_coverage_start'])
@property
def end_time(self):
"""Get the end time."""
return self.METHOD_NAME(self['/attr/time_coverage_end'])
@property
def composite_period(self):
"""Determine composite period from filename information."""
comp1 = self.filename_info['composite_period_1']
comp2 = self.filename_info['composite_period_2']
if comp2 == 'MONTHLY' and comp1 == "1M":
return 'monthly'
elif comp1 == '1D':
return 'daily'
elif comp1 == '5D':
return '5-day'
elif comp1 == '8D':
return '8-day'
else:
raise ValueError(f"Unknown data compositing period: {comp1}_{comp2}")
def _update_attrs(self, dataset, dataset_info):
"""Update dataset attributes."""
dataset.attrs.update(self[dataset_info['nc_key']].attrs)
dataset.attrs.update(dataset_info)
dataset.attrs['sensor'] = 'merged'
dataset.attrs['composite_period'] = self.composite_period
# remove attributes from original file which don't apply anymore
dataset.attrs.pop("nc_key")
def get_dataset(self, dataset_id, ds_info):
"""Get dataset."""
dataset = da.squeeze(self[ds_info['nc_key']])
if '_FillValue' in dataset.attrs:
dataset.data = da.where(dataset.data == dataset.attrs['_FillValue'], np.nan, dataset.data)
self._update_attrs(dataset, ds_info)
if 'lat' in dataset.dims:
dataset = dataset.rename({'lat': 'y'})
if 'lon' in dataset.dims:
dataset = dataset.rename({'lon': 'x'})
return dataset
def get_area_def(self, dsid):
"""Get the area definition based on information in file.
There is no area definition in the file itself, so we have to compute it
from the metadata, which specifies the area extent and pixel resolution.
"""
proj_param = 'EPSG:4326'
lon_res = float(self['/attr/geospatial_lon_resolution'])
lat_res = float(self['/attr/geospatial_lat_resolution'])
min_lon = self['/attr/geospatial_lon_min']
max_lon = self['/attr/geospatial_lon_max']
min_lat = self['/attr/geospatial_lat_min']
max_lat = self['/attr/geospatial_lat_max']
area_extent = (min_lon, min_lat, max_lon, max_lat)
lon_size = np.round((max_lon - min_lon) / lon_res).astype(int)
lat_size = np.round((max_lat - min_lat) / lat_res).astype(int)
area = geometry.AreaDefinition('gridded_occci',
'Full globe gridded area',
'longlat',
proj_param,
lon_size,
lat_size,
area_extent)
return area |
2,039 | handle | """
This command:
* deletes all prescribing data (both original data and extracts created by the matrixstore build) from:
* the filesystem
* BigQuery
* Cloud Storage
* resets the import pipeline so that the import may be re-run with correct data
"""
import json
import os
import networkx as nx
from django.conf import settings
from django.core.management import BaseCommand
from frontend.models import ImportLog
from gcutils.bigquery import Client as BQClient
from gcutils.bigquery import NotFound
from gcutils.storage import Client as StorageClient
from pipeline.models import TaskLog
from pipeline.runner import dump_import_records, load_import_records
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument("year")
parser.add_argument("month")
def METHOD_NAME(self, year, month, **kwargs):
verify_year_month(year, month)
delete_import_record(year, month)
mark_task_logs_as_failed(year, month)
delete_fetch_and_import_task_log(year, month)
delete_import_logs(year, month)
delete_prescribing_file_on_filesystem(year, month)
delete_prescribing_file_in_storage(year, month)
delete_temporary_prescribing_bq_table(year, month)
remove_records_from_bq_table(year, month)
delete_backup_from_storage(year, month)
delete_matrixstore_bq_table(year, month)
delete_matrixstore_storage_files(year, month)
delete_matrixstore_download(year, month)
def verify_year_month(year, month):
print("verify_year_month")
log = ImportLog.objects.latest_in_category("prescribing")
assert log.current_at.year == year
assert log.current_at.month == int(month)
def delete_import_record(year, month):
print("delete_import_record")
import_records = load_import_records()
logs = import_records["prescribing"]
new_logs = [
r for r in logs if f"prescribing_v2/{year}_{month}" not in r["imported_file"]
]
assert len(logs) == len(new_logs) + 1
import_records["prescribing"] = new_logs
dump_import_records(import_records)
def mark_task_logs_as_failed(year, month):
print("mark_task_logs_as_failed")
with open(settings.PIPELINE_METADATA_DIR + "/tasks.json") as f:
tasks = json.load(f)
graph = nx.DiGraph()
for task_name, task_def in tasks.items():
for dependency_name in task_def.get("dependencies", []):
graph.add_edge(dependency_name, task_name)
convert_task_log = TaskLog.objects.get(
task_name="convert_hscic_prescribing",
year=year,
month=month,
status=TaskLog.SUCCESSFUL,
)
for task_name in nx.descendants(graph, "convert_hscic_prescribing"):
task_log = TaskLog.objects.get(
task_name=task_name, year=year, month=month, status=TaskLog.SUCCESSFUL
)
assert task_log.started_at > convert_task_log.started_at
task_log.status = TaskLog.FAILED
task_log.save()
convert_task_log.status = TaskLog.FAILED
convert_task_log.save()
def delete_fetch_and_import_task_log(year, month):
print("delete_fetch_and_import_task_log")
TaskLog.objects.get(task_name="fetch_and_import", year=year, month=month).delete()
def delete_import_logs(year, month):
print("delete_import_logs")
ImportLog.objects.get(
category="prescribing", current_at=f"{year}-{month}-01"
).delete()
ImportLog.objects.get(
category="dashboard_data", current_at=f"{year}-{month}-01"
).delete()
def delete_prescribing_file_on_filesystem(year, month):
print("delete_prescribing_file_on_filesystem")
path = os.path.join(
settings.PIPELINE_DATA_BASEDIR,
"prescribing_v2",
f"{year}_{month}",
f"epd_{year}{month}.csv",
)
os.remove(path)
def delete_prescribing_file_in_storage(year, month):
print("delete_prescribing_file_in_storage")
_delete_file_from_storage("hscic/prescribing_v2/2021_10")
def delete_temporary_prescribing_bq_table(year, month):
print("delete_temporary_prescribing_bq_table")
try:
_delete_table_from_bq("tmp_eu", f"raw_prescribing_data_{year}_{month}")
except NotFound:
# This is ok, as the table might already have been deleted
pass
def remove_records_from_bq_table(year, month):
print("remove_records_from_bq_table")
client = BQClient("hscic")
sql = (
f"DELETE FROM ebmdatalab.hscic.prescribing_v2 WHERE month = '{year}-{month}-01'"
)
client.query(sql)
def delete_backup_from_storage(year, month):
print("delete_backup_from_storage")
_delete_file_from_storage("backup/prescribing_v2/2021_10")
def delete_matrixstore_bq_table(year, month):
print("delete_matrixstore_bq_table")
_delete_table_from_bq("prescribing_export", f"prescribing_{year}_{month}")
def delete_matrixstore_storage_files(year, month):
print("delete_matrixstore_storage_files")
_delete_file_from_storage(f"prescribing_exports/prescribing_{year}_{month}_*")
def delete_matrixstore_download(year, month):
print("delete_matrixstore_download")
path = os.path.join(
settings.PIPELINE_DATA_BASEDIR,
"matrixstore_import",
f"{year}-{month}-01_prescribing.csv.gz",
)
os.remove(path)
def _delete_file_from_storage(path):
client = StorageClient()
bucket = client.get_bucket()
for blob in bucket.list_blobs(prefix=path):
blob.delete()
def _delete_table_from_bq(dataset_name, table_name):
client = BQClient(dataset_name)
client.delete_table(table_name) |
2,040 | test run specs | from unittest import mock
import pytest
import string
import dbt.exceptions
import dbt.graph.selector as graph_selector
import dbt.graph.cli as graph_cli
from dbt.node_types import NodeType
import networkx as nx
from dbt import flags
from argparse import Namespace
from dbt.contracts.project import UserConfig
flags.set_from_args(Namespace(), UserConfig())
def _get_graph():
integer_graph = nx.balanced_tree(2, 2, nx.DiGraph())
package_mapping = {
i: "m." + ("X" if i % 2 == 0 else "Y") + "." + letter
for (i, letter) in enumerate(string.ascii_lowercase)
}
# Edges: [(X.a, Y.b), (X.a, X.c), (Y.b, Y.d), (Y.b, X.e), (X.c, Y.f), (X.c, X.g)]
return graph_selector.Graph(nx.relabel_nodes(integer_graph, package_mapping))
def _get_manifest(graph):
nodes = {}
for unique_id in graph:
fqn = unique_id.split(".")
node = mock.MagicMock(
unique_id=unique_id,
fqn=fqn,
package_name=fqn[0],
tags=[],
resource_type=NodeType.Model,
empty=False,
config=mock.MagicMock(enabled=True),
is_versioned=False,
)
nodes[unique_id] = node
nodes["m.X.a"].tags = ["abc"]
nodes["m.Y.b"].tags = ["abc", "bcef"]
nodes["m.X.c"].tags = ["abc", "bcef"]
nodes["m.Y.d"].tags = []
nodes["m.X.e"].tags = ["efg", "bcef"]
nodes["m.Y.f"].tags = ["efg", "bcef"]
nodes["m.X.g"].tags = ["efg"]
return mock.MagicMock(nodes=nodes)
@pytest.fixture
def graph():
return graph_selector.Graph(_get_graph())
@pytest.fixture
def manifest(graph):
return _get_manifest(graph)
def id_macro(arg):
if isinstance(arg, str):
return arg
try:
return "_".join(arg)
except TypeError:
return arg
run_specs = [
# include by fqn
(["X.a"], [], {"m.X.a"}),
# include by tag
(["tag:abc"], [], {"m.X.a", "m.Y.b", "m.X.c"}),
# exclude by tag
(["*"], ["tag:abc"], {"m.Y.d", "m.X.e", "m.Y.f", "m.X.g"}),
# tag + fqn
(["tag:abc", "a"], [], {"m.X.a", "m.Y.b", "m.X.c"}),
(["tag:abc", "d"], [], {"m.X.a", "m.Y.b", "m.X.c", "m.Y.d"}),
# multiple node selection across packages
(["X.a", "b"], [], {"m.X.a", "m.Y.b"}),
(["X.a+"], ["b"], {"m.X.a", "m.X.c", "m.Y.d", "m.X.e", "m.Y.f", "m.X.g"}),
# children
(["X.c+"], [], {"m.X.c", "m.Y.f", "m.X.g"}),
(["X.a+1"], [], {"m.X.a", "m.Y.b", "m.X.c"}),
(["X.a+"], ["tag:efg"], {"m.X.a", "m.Y.b", "m.X.c", "m.Y.d"}),
# parents
(["+Y.f"], [], {"m.X.c", "m.Y.f", "m.X.a"}),
(["1+Y.f"], [], {"m.X.c", "m.Y.f"}),
# childrens parents
(["@X.c"], [], {"m.X.a", "m.X.c", "m.Y.f", "m.X.g"}),
# multiple selection/exclusion
(["tag:abc", "tag:bcef"], [], {"m.X.a", "m.Y.b", "m.X.c", "m.X.e", "m.Y.f"}),
(["tag:abc", "tag:bcef"], ["tag:efg"], {"m.X.a", "m.Y.b", "m.X.c"}),
(["tag:abc", "tag:bcef"], ["tag:efg", "a"], {"m.Y.b", "m.X.c"}),
# intersections
(["a,a"], [], {"m.X.a"}),
(["+c,c+"], [], {"m.X.c"}),
(["a,b"], [], set()),
(["tag:abc,tag:bcef"], [], {"m.Y.b", "m.X.c"}),
(["*,tag:abc,a"], [], {"m.X.a"}),
(["a,tag:abc,*"], [], {"m.X.a"}),
(["tag:abc,tag:bcef"], ["c"], {"m.Y.b"}),
(["tag:bcef,tag:efg"], ["tag:bcef,@b"], {"m.Y.f"}),
(["tag:bcef,tag:efg"], ["tag:bcef,@a"], set()),
(["*,@a,+b"], ["*,tag:abc,tag:bcef"], {"m.X.a"}),
(["tag:bcef,tag:efg", "*,tag:abc"], [], {"m.X.a", "m.Y.b", "m.X.c", "m.X.e", "m.Y.f"}),
(["tag:bcef,tag:efg", "*,tag:abc"], ["e"], {"m.X.a", "m.Y.b", "m.X.c", "m.Y.f"}),
(["tag:bcef,tag:efg", "*,tag:abc"], ["e"], {"m.X.a", "m.Y.b", "m.X.c", "m.Y.f"}),
(["tag:bcef,tag:efg", "*,tag:abc"], ["e", "f"], {"m.X.a", "m.Y.b", "m.X.c"}),
(["tag:bcef,tag:efg", "*,tag:abc"], ["tag:abc,tag:bcef"], {"m.X.a", "m.X.e", "m.Y.f"}),
(["tag:bcef,tag:efg", "*,tag:abc"], ["tag:abc,tag:bcef", "tag:abc,a"], {"m.X.e", "m.Y.f"}),
]
@pytest.mark.parametrize("include,exclude,expected", run_specs, ids=id_macro)
def METHOD_NAME(include, exclude, expected):
graph = _get_graph()
manifest = _get_manifest(graph)
selector = graph_selector.NodeSelector(graph, manifest)
# TODO: The "eager" string below needs to be replaced with programatic access
# to the default value for the indirect selection parameter in
# dbt.cli.params.indirect_selection
#
# Doing that is actually a little tricky, so I'm punting it to a new ticket GH #6397
spec = graph_cli.parse_difference(include, exclude, "eager")
selected, _ = selector.select_nodes(spec)
assert selected == expected
param_specs = [
("a", False, None, False, None, "fqn", "a", False),
("+a", True, None, False, None, "fqn", "a", False),
("256+a", True, 256, False, None, "fqn", "a", False),
("a+", False, None, True, None, "fqn", "a", False),
("a+256", False, None, True, 256, "fqn", "a", False),
("+a+", True, None, True, None, "fqn", "a", False),
("16+a+32", True, 16, True, 32, "fqn", "a", False),
("@a", False, None, False, None, "fqn", "a", True),
("a.b", False, None, False, None, "fqn", "a.b", False),
("+a.b", True, None, False, None, "fqn", "a.b", False),
("256+a.b", True, 256, False, None, "fqn", "a.b", False),
("a.b+", False, None, True, None, "fqn", "a.b", False),
("a.b+256", False, None, True, 256, "fqn", "a.b", False),
("+a.b+", True, None, True, None, "fqn", "a.b", False),
("16+a.b+32", True, 16, True, 32, "fqn", "a.b", False),
("@a.b", False, None, False, None, "fqn", "a.b", True),
("a.b.*", False, None, False, None, "fqn", "a.b.*", False),
("+a.b.*", True, None, False, None, "fqn", "a.b.*", False),
("256+a.b.*", True, 256, False, None, "fqn", "a.b.*", False),
("a.b.*+", False, None, True, None, "fqn", "a.b.*", False),
("a.b.*+256", False, None, True, 256, "fqn", "a.b.*", False),
("+a.b.*+", True, None, True, None, "fqn", "a.b.*", False),
("16+a.b.*+32", True, 16, True, 32, "fqn", "a.b.*", False),
("@a.b.*", False, None, False, None, "fqn", "a.b.*", True),
("tag:a", False, None, False, None, "tag", "a", False),
("+tag:a", True, None, False, None, "tag", "a", False),
("256+tag:a", True, 256, False, None, "tag", "a", False),
("tag:a+", False, None, True, None, "tag", "a", False),
("tag:a+256", False, None, True, 256, "tag", "a", False),
("+tag:a+", True, None, True, None, "tag", "a", False),
("16+tag:a+32", True, 16, True, 32, "tag", "a", False),
("@tag:a", False, None, False, None, "tag", "a", True),
("source:a", False, None, False, None, "source", "a", False),
("source:a+", False, None, True, None, "source", "a", False),
("source:a+1", False, None, True, 1, "source", "a", False),
("source:a+32", False, None, True, 32, "source", "a", False),
("@source:a", False, None, False, None, "source", "a", True),
]
@pytest.mark.parametrize(
"spec,parents,parents_depth,children,children_depth,filter_type,filter_value,childrens_parents",
param_specs,
ids=id_macro,
)
def test_parse_specs(
spec,
parents,
parents_depth,
children,
children_depth,
filter_type,
filter_value,
childrens_parents,
):
parsed = graph_selector.SelectionCriteria.from_single_spec(spec)
assert parsed.parents == parents
assert parsed.parents_depth == parents_depth
assert parsed.children == children
assert parsed.children_depth == children_depth
assert parsed.method == filter_type
assert parsed.value == filter_value
assert parsed.childrens_parents == childrens_parents
invalid_specs = [
"@a+",
"@a.b+",
"@a.b*+",
"@tag:a+",
"@source:a+",
]
@pytest.mark.parametrize("invalid", invalid_specs, ids=lambda k: str(k))
def test_invalid_specs(invalid):
with pytest.raises(dbt.exceptions.DbtRuntimeError):
graph_selector.SelectionCriteria.from_single_spec(invalid) |
2,041 | compute exact | """
Copyright (c) 2018-2023 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
from collections import Counter
import string
import numpy
from ..representation import QuestionAnsweringAnnotation, QuestionAnsweringPrediction
from ..representation import QuestionAnsweringEmbeddingAnnotation, QuestionAnsweringEmbeddingPrediction
from ..representation import QuestionAnsweringBiDAFAnnotation
from .metric import PerImageEvaluationMetric, FullDatasetEvaluationMetric
from ..config import NumberField
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
regex = re.compile(r"\b(a|an|the)\b", re.UNICODE)
return re.sub(regex, " ", text)
def white_space_fix(text):
return " ".join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def get_tokens(s):
if not s:
return []
return normalize_answer(s).split()
class ScoreF1(PerImageEvaluationMetric):
__provider__ = 'f1'
annotation_types = (QuestionAnsweringAnnotation,)
prediction_types = (QuestionAnsweringPrediction,)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.per_question_results = {}
def update(self, annotation, prediction):
gold_answers = [answer["text"] for answer in annotation.orig_answer_text if normalize_answer(answer["text"])]
if not gold_answers:
gold_answers = ['']
prediction_answer = prediction.tokens[0] if prediction.tokens else ''
max_f1_score = max(self.compute_f1(a, prediction_answer) for a in gold_answers)
current_max_f1_score = self.per_question_results.get(annotation.question_id, 0)
self.per_question_results[annotation.question_id] = max(max_f1_score, current_max_f1_score)
return max_f1_score
@staticmethod
def compute_f1(a_gold, a_pred):
gold_toks = get_tokens(a_gold)
pred_toks = get_tokens(a_pred)
common = Counter(gold_toks) & Counter(pred_toks)
num_same = sum(common.values())
if len(gold_toks) == 0 or len(pred_toks) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks)
if num_same == 0:
return 0
precision = 1.0 * num_same / len(pred_toks)
recall = 1.0 * num_same / len(gold_toks)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def evaluate(self, annotations, predictions):
return sum(self.per_question_results.values()) / len(self.per_question_results)
def reset(self):
del self.per_question_results
self.per_question_results = {}
class ExactMatchScore(PerImageEvaluationMetric):
__provider__ = 'exact_match'
annotation_types = (QuestionAnsweringAnnotation, QuestionAnsweringBiDAFAnnotation, )
prediction_types = (QuestionAnsweringPrediction, )
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.per_question_results = {}
def update(self, annotation, prediction):
gold_answers = [answer["text"] for answer in annotation.orig_answer_text if normalize_answer(answer["text"])]
if not gold_answers:
gold_answers = ['']
pred_answer = prediction.tokens[0] if prediction.tokens else ''
max_exact_match = max(self.METHOD_NAME(a_gold, pred_answer) for a_gold in gold_answers)
self.per_question_results[annotation.question_id] = max(
max_exact_match, self.per_question_results.get(annotation.question_id, 0)
)
return max_exact_match
@staticmethod
def METHOD_NAME(a_gold, a_pred):
return int(normalize_answer(a_gold) == normalize_answer(a_pred))
def evaluate(self, annotations, predictions):
return sum(self.per_question_results.values()) / len(self.per_question_results)
def reset(self):
del self.per_question_results
self.per_question_results = {}
class QuestionAnsweringEmbeddingAccuracy(FullDatasetEvaluationMetric):
__provider__ = 'qa_embedding_accuracy'
annotation_types = (QuestionAnsweringEmbeddingAnnotation,)
prediction_types = (QuestionAnsweringEmbeddingPrediction,)
@classmethod
def parameters(cls):
parameters = super().parameters()
parameters.update({
'top_k': NumberField(
value_type=int, min_value=1, max_value=1000, default=5, optional=True,
description='Specifies the number of closest context embeddings to check.'
),
})
return parameters
def configure(self):
self.top_k = self.get_value_from_config('top_k')
def evaluate(self, annotations, predictions):
ap_pairs = list(zip(annotations, predictions))
#check data alignment
assert all(
a.identifier is p.identifier
if not isinstance(p.identifier, tuple)
else p.identifier.values
for a, p in ap_pairs), "annotations and predictions are not aligned"
q_pairs = [(a, p) for a, p in ap_pairs if a.context_pos_indetifier is not None]
c_pairs = [(a, p) for a, p in ap_pairs if a.context_pos_indetifier is None]
c_data_identifiers = [a.identifier for a, p in c_pairs]
c_vecs = numpy.array([p.embedding for a, p in c_pairs])
# calc distances from each question to all contexts and check if top_k has true positives
true_pos = 0
for q_a, q_p in q_pairs:
#calc distance between question embedding with all context embeddings
d = c_vecs - q_p.embedding[None, :]
dist = numpy.linalg.norm(d, ord=2, axis=1)
index = dist.argsort()
#check that right context in the list of top_k
c_pos_index = c_data_identifiers.index(q_a.context_pos_indetifier)
if c_pos_index in index[:self.top_k]:
true_pos += 1
return [true_pos/len(q_pairs)] if q_pairs else 0 |
2,042 | grad | # This code is part of Qiskit.
#
# (C) Copyright IBM 2021, 2023.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Tests for the Gradient Descent optimizer."""
from test.python.algorithms import QiskitAlgorithmsTestCase
import numpy as np
from qiskit.algorithms.optimizers import GradientDescent, GradientDescentState
from qiskit.algorithms.optimizers.steppable_optimizer import TellData, AskData
from qiskit.circuit.library import PauliTwoDesign
from qiskit.opflow import I, Z, StateFn
class TestGradientDescent(QiskitAlgorithmsTestCase):
"""Tests for the gradient descent optimizer."""
def setUp(self):
super().setUp()
np.random.seed(12)
self.initial_point = np.array([1, 1, 1, 1, 0])
def objective(self, x):
"""Objective Function for the tests"""
return (np.linalg.norm(x) - 1) ** 2
def METHOD_NAME(self, x):
"""Gradient of the objective function"""
return 2 * (np.linalg.norm(x) - 1) * x / np.linalg.norm(x)
def test_pauli_two_design(self):
"""Test standard gradient descent on the Pauli two-design example."""
circuit = PauliTwoDesign(3, reps=3, seed=2)
parameters = list(circuit.parameters)
with self.assertWarns(DeprecationWarning):
obs = Z ^ Z ^ I
expr = ~StateFn(obs) @ StateFn(circuit)
initial_point = np.array(
[
0.1822308,
-0.27254251,
0.83684425,
0.86153976,
-0.7111668,
0.82766631,
0.97867993,
0.46136964,
2.27079901,
0.13382699,
0.29589915,
0.64883193,
]
)
def objective_pauli(x):
return expr.bind_parameters(dict(zip(parameters, x))).eval().real
optimizer = GradientDescent(maxiter=100, learning_rate=0.1, perturbation=0.1)
with self.assertWarns(DeprecationWarning):
result = optimizer.minimize(objective_pauli, x0=initial_point)
self.assertLess(result.fun, -0.95) # final loss
self.assertEqual(result.nfev, 1300) # function evaluations
def test_callback(self):
"""Test the callback."""
history = []
def callback(*args):
history.append(args)
optimizer = GradientDescent(maxiter=1, callback=callback)
_ = optimizer.minimize(self.objective, np.array([1, -1]))
self.assertEqual(len(history), 1)
self.assertIsInstance(history[0][0], int) # nfevs
self.assertIsInstance(history[0][1], np.ndarray) # parameters
self.assertIsInstance(history[0][2], float) # function value
self.assertIsInstance(history[0][3], float) # norm of the gradient
def test_minimize(self):
"""Test setting the learning rate as iterator and minimizing the funciton."""
def learning_rate():
power = 0.6
constant_coeff = 0.1
def powerlaw():
n = 0
while True:
yield constant_coeff * (n**power)
n += 1
return powerlaw()
optimizer = GradientDescent(maxiter=20, learning_rate=learning_rate)
result = optimizer.minimize(self.objective, self.initial_point, self.METHOD_NAME)
self.assertLess(result.fun, 1e-5)
def test_no_start(self):
"""Tests that making a step without having started the optimizer raises an error."""
optimizer = GradientDescent()
with self.assertRaises(AttributeError):
optimizer.step()
def test_start(self):
"""Tests if the start method initializes the state properly."""
optimizer = GradientDescent()
self.assertIsNone(optimizer.state)
self.assertIsNone(optimizer.perturbation)
optimizer.start(x0=self.initial_point, fun=self.objective)
test_state = GradientDescentState(
x=self.initial_point,
fun=self.objective,
jac=None,
nfev=0,
njev=0,
nit=0,
learning_rate=1,
stepsize=None,
)
self.assertEqual(test_state, optimizer.state)
def test_ask(self):
"""Test the ask method."""
optimizer = GradientDescent()
optimizer.start(fun=self.objective, x0=self.initial_point)
ask_data = optimizer.ask()
np.testing.assert_equal(ask_data.x_jac, self.initial_point)
self.assertIsNone(ask_data.x_fun)
def test_evaluate(self):
"""Test the evaluate method."""
optimizer = GradientDescent(perturbation=1e-10)
optimizer.start(fun=self.objective, x0=self.initial_point)
ask_data = AskData(x_jac=self.initial_point)
tell_data = optimizer.evaluate(ask_data=ask_data)
np.testing.assert_almost_equal(tell_data.eval_jac, self.METHOD_NAME(self.initial_point), decimal=2)
def test_tell(self):
"""Test the tell method."""
optimizer = GradientDescent(learning_rate=1.0)
optimizer.start(fun=self.objective, x0=self.initial_point)
ask_data = AskData(x_jac=self.initial_point)
tell_data = TellData(eval_jac=self.initial_point)
optimizer.tell(ask_data=ask_data, tell_data=tell_data)
np.testing.assert_equal(optimizer.state.x, np.zeros(optimizer.state.x.shape))
def test_continue_condition(self):
"""Test if the continue condition is working properly."""
optimizer = GradientDescent(tol=1)
optimizer.start(fun=self.objective, x0=self.initial_point)
self.assertTrue(optimizer.continue_condition())
optimizer.state.stepsize = 0.1
self.assertFalse(optimizer.continue_condition())
optimizer.state.stepsize = 10
optimizer.state.nit = 1000
self.assertFalse(optimizer.continue_condition())
def test_step(self):
"""Tests if performing one step yields the desired result."""
optimizer = GradientDescent(learning_rate=1.0)
optimizer.start(fun=self.objective, jac=self.METHOD_NAME, x0=self.initial_point)
optimizer.step()
np.testing.assert_almost_equal(
optimizer.state.x, self.initial_point - self.METHOD_NAME(self.initial_point), 6
)
def test_wrong_dimension_gradient(self):
"""Tests if an error is raised when a gradient of the wrong dimension is passed."""
optimizer = GradientDescent(learning_rate=1.0)
optimizer.start(fun=self.objective, x0=self.initial_point)
ask_data = AskData(x_jac=self.initial_point)
tell_data = TellData(eval_jac=np.array([1.0, 5]))
with self.assertRaises(ValueError):
optimizer.tell(ask_data=ask_data, tell_data=tell_data)
tell_data = TellData(eval_jac=np.array(1))
with self.assertRaises(ValueError):
optimizer.tell(ask_data=ask_data, tell_data=tell_data) |
2,043 | separate true followers | import json
import re
from html.parser import HTMLParser
from io import StringIO
from hedera.supported_languages import SUPPORTED_LANGUAGES
from lemmatization.lemmatizer import Lemmatizer
class EditedTextHtmlParser(HTMLParser):
def __init__(self, token_lemma_dict=None, lang=None):
self.current_tag = None
self.current_attrs = {}
self.current_data = ""
self.lemmatized_text_data = []
self.token_lemma_dict = token_lemma_dict
self.lemmatizer = Lemmatizer(lang)
self.service = SUPPORTED_LANGUAGES[lang].service
self.initial = ""
self.unique_text = False
return super().__init__()
def handle_starttag(self, tag, attrs):
if tag == "span":
self.current_tag = "span"
"""
Note: the fed in data could be two different types from a tuple of (key, dict) or (key, bool)
handle_endtag() will require a key:value pair containing either of the structure below:
[('data-token', '{"glossed": "glossed-automatic", "initial": "", "lemma_id": 1372, "resolved": "resolved-automatic", "gloss_ids": [84128, 68154], "word_normalized": "Arma"}')]
[('follower', 'true')]
"""
key, value = attrs[0]
if key in "follower":
self.current_attrs = {key: value}
else:
self.current_attrs = json.loads(value)
def handle_endtag(self, tag):
if "follower" in self.current_attrs:
self.METHOD_NAME(self.current_data)
#Note: sometimes the current_tag/self.current_attrs will be empty/None when there is a newline/break
# len() checks if empty string so we dont append blank words
elif self.current_data is not None and self.current_tag is not None and len(self.current_data):
self.lemmatized_text_data.append(
{
**self.current_attrs,
"word": self.current_data,
"following": "",
}
)
self.current_tag = None
self.current_attrs = {}
self.current_data = ""
def handle_data(self, data):
# used to modify data by the service(e.g latin underscores)
formatted_text_data = self.service.apply_text_rule(self.unique_text, data)
if type(formatted_text_data) is dict:
data = formatted_text_data["data"]
self.unique_text = formatted_text_data["unique_text"]
if ("follower" in self.current_attrs):
self.current_data = data
else:
try:
if (
(self.current_tag is None) or
(self.current_tag == "span" and self.current_attrs == {}) or
(self.current_attrs["lemma_id"] not in self.token_lemma_dict[data])
):
self.lemmatize_chunk(data)
else:
self.current_data = data
except KeyError:
if self.service.check_text(data):
self.unique_text = data
if not self.unique_text:
self.lemmatize_chunk(data)
def METHOD_NAME(self, follower):
"""
Takes the contents of a span where 'follower' is true.
Splits any 'follower' characters from alpha numeric characters.
Sets the 'following' attr on the previous data point with true followers
and sends new alpha numeric string to be lemmatized.
Returns None
"""
followers = []
text = []
for idx, ch in enumerate(follower):
if ch.isalnum():
text = follower[idx:]
break
followers.append(ch)
if len(self.lemmatized_text_data) > 0:
self.lemmatized_text_data[-1]["following"] += "".join(followers)
else:
# this will only occur if the text begins with a "follower"
self.lemmatized_text_data.append(
{
"word": "",
"lemma_id": None,
"resolved": True,
"word_normalized": "",
"following": "".join(followers)
}
)
if (len(text) > 0):
self.lemmatize_chunk("".join(text))
def lemmatize_chunk(self, chunk):
"""
Takes an unrecognized chunk of text.
Sends 'chunk' to be lemmatized, then extends the data with the returned content.
Checks if chunk does not contain return and newline "\r\n" - only add tokens if it the chunk is not a return/newline
In case there is an newline at the beginning of the text("initial"), the newline char will be added to the previous text "following" key:value pair
**Fixes problem with empty tokens**
**Fixes problem with latin underscores**
Returns None
"""
self.current_data = None
new_data = self.lemmatizer.lemmatize(chunk)
# regex checks if '\r\n' is the only char used in the chunk
contains_only_newline = bool(re.match(r"^[\r\n]+$", chunk))
if not contains_only_newline:
self.process_initial_data(new_data)
self.lemmatized_text_data.extend(new_data)
if contains_only_newline and len(self.lemmatized_text_data):
token_lemma_dict_keys = list(self.token_lemma_dict.keys())
prev_lemma_id = self.lemmatized_text_data[-1]["lemma_id"]
following = self.lemmatized_text_data[-1]["following"]
#Note: Added check if we have reached the end of the data array because theres a bug where new lines are added after each edit
if len(token_lemma_dict_keys) and prev_lemma_id not in self.token_lemma_dict[token_lemma_dict_keys[-1]]:
self.lemmatized_text_data[-1]["following"] = f"{following}{chunk}"
else:
self.process_initial_data(new_data)
self.lemmatized_text_data.extend(new_data)
#TODO EDGE CASE: Newlines/breaks that may happen at the very beginning of the text
def process_initial_data(self, new_data):
# if statement will add newlines to "following" to previous text in lemmatized_text_data
if len(new_data) and new_data[0]["initial"] and len(self.lemmatized_text_data):
following = self.lemmatized_text_data[-1]["following"]
self.lemmatized_text_data[-1]["following"] = f"{following}{new_data[0]['initial']}"
class TagStripper(HTMLParser):
def __init__(self):
super().__init__()
self.reset()
self.strict = False
self.convert_charrefs = True
self.text = StringIO()
def handle_data(self, d):
self.text.write(d)
def get_data(self):
return self.text.getvalue() |
2,044 | awk rule1 | # Leo colorizer control file for awk mode.
# This file is in the public domain.
# Properties for awk mode.
properties = {
"indentCloseBrackets": "}",
"indentOpenBrackets": "{",
"lineComment": "#",
"lineUpClosingBracket": "true",
"wordBreakChars": ",+-=<>/?^&*",
}
# Attributes dict for awk_main ruleset.
awk_main_attributes_dict = {
"default": "null",
"digit_re": "",
"escape": "\\",
"highlight_digits": "true",
"ignore_case": "false",
"no_word_sep": "",
}
# Dictionary of attributes dictionaries for awk mode.
attributesDictDict = {
"awk_main": awk_main_attributes_dict,
}
# Keywords dict for awk_main ruleset.
awk_main_keywords_dict = {
"$0": "keyword3",
"ARGC": "keyword3",
"ARGIND": "keyword3",
"ARGV": "keyword3",
"BEGIN": "keyword3",
"CONVFMT": "keyword3",
"END": "keyword3",
"ENVIRON": "keyword3",
"ERRNO": "keyword3",
"FIELDSWIDTH": "keyword3",
"FILENAME": "keyword3",
"FNR": "keyword3",
"FS": "keyword3",
"IGNORECASE": "keyword3",
"NF": "keyword3",
"NR": "keyword3",
"OFMT": "keyword3",
"OFS": "keyword3",
"ORS": "keyword3",
"RLENGTH": "keyword3",
"RS": "keyword3",
"RSTART": "keyword3",
"RT": "keyword3",
"SUBSEP": "keyword3",
"atan2": "keyword2",
"break": "keyword1",
"close": "keyword1",
"continue": "keyword1",
"cos": "keyword2",
"delete": "keyword1",
"do": "keyword1",
"else": "keyword1",
"exit": "keyword1",
"exp": "keyword2",
"fflush": "keyword1",
"for": "keyword1",
"function": "keyword1",
"gensub": "keyword2",
"getline": "keyword2",
"gsub": "keyword2",
"huge": "keyword1",
"if": "keyword1",
"in": "keyword1",
"index": "keyword2",
"int": "keyword2",
"length": "keyword2",
"log": "keyword2",
"match": "keyword2",
"next": "keyword1",
"nextfile": "keyword1",
"print": "keyword1",
"printf": "keyword1",
"rand": "keyword2",
"return": "keyword1",
"sin": "keyword2",
"split": "keyword2",
"sprintf": "keyword2",
"sqrt": "keyword2",
"srand": "keyword2",
"sub": "keyword2",
"substr": "keyword2",
"system": "keyword2",
"tolower": "keyword2",
"toupper": "keyword2",
"while": "keyword1",
}
# Dictionary of keywords dictionaries for awk mode.
keywordsDictDict = {
"awk_main": awk_main_keywords_dict,
}
# Rules for awk_main ruleset.
def awk_rule0(colorer, s, i):
return colorer.match_span(s, i, kind="literal1", begin="\"", end="\"",
no_line_break=True)
def METHOD_NAME(colorer, s, i):
return colorer.match_span(s, i, kind="literal1", begin="'", end="'",
no_line_break=True)
def awk_rule2(colorer, s, i):
return colorer.match_eol_span(s, i, kind="comment1", seq="#")
def awk_rule3(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq="=")
def awk_rule4(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq="!")
def awk_rule5(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq=">=")
def awk_rule6(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq="<=")
def awk_rule7(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq="+")
def awk_rule8(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq="-")
def awk_rule9(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq="/")
def awk_rule10(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq="*")
def awk_rule11(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq=">")
def awk_rule12(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq="<")
def awk_rule13(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq="%")
def awk_rule14(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq="&")
def awk_rule15(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq="|")
def awk_rule16(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq="^")
def awk_rule17(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq="~")
def awk_rule18(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq="}")
def awk_rule19(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq="{")
def awk_rule20(colorer, s, i):
return colorer.match_mark_previous(s, i, kind="label", pattern=":",
at_whitespace_end=True,
exclude_match=True)
def awk_rule21(colorer, s, i):
return colorer.match_keywords(s, i)
# Rules dict for awk_main ruleset.
rulesDict1 = {
"!": [awk_rule4,],
"\"": [awk_rule0,],
"#": [awk_rule2,],
"$": [awk_rule21,],
"%": [awk_rule13,],
"&": [awk_rule14,],
"'": [METHOD_NAME,],
"*": [awk_rule10,],
"+": [awk_rule7,],
"-": [awk_rule8,],
"/": [awk_rule9,],
"0": [awk_rule21,],
"1": [awk_rule21,],
"2": [awk_rule21,],
"3": [awk_rule21,],
"4": [awk_rule21,],
"5": [awk_rule21,],
"6": [awk_rule21,],
"7": [awk_rule21,],
"8": [awk_rule21,],
"9": [awk_rule21,],
":": [awk_rule20,],
"<": [awk_rule6, awk_rule12,],
"=": [awk_rule3,],
">": [awk_rule5, awk_rule11,],
"@": [awk_rule21,],
"A": [awk_rule21,],
"B": [awk_rule21,],
"C": [awk_rule21,],
"D": [awk_rule21,],
"E": [awk_rule21,],
"F": [awk_rule21,],
"G": [awk_rule21,],
"H": [awk_rule21,],
"I": [awk_rule21,],
"J": [awk_rule21,],
"K": [awk_rule21,],
"L": [awk_rule21,],
"M": [awk_rule21,],
"N": [awk_rule21,],
"O": [awk_rule21,],
"P": [awk_rule21,],
"Q": [awk_rule21,],
"R": [awk_rule21,],
"S": [awk_rule21,],
"T": [awk_rule21,],
"U": [awk_rule21,],
"V": [awk_rule21,],
"W": [awk_rule21,],
"X": [awk_rule21,],
"Y": [awk_rule21,],
"Z": [awk_rule21,],
"^": [awk_rule16,],
"a": [awk_rule21,],
"b": [awk_rule21,],
"c": [awk_rule21,],
"d": [awk_rule21,],
"e": [awk_rule21,],
"f": [awk_rule21,],
"g": [awk_rule21,],
"h": [awk_rule21,],
"i": [awk_rule21,],
"j": [awk_rule21,],
"k": [awk_rule21,],
"l": [awk_rule21,],
"m": [awk_rule21,],
"n": [awk_rule21,],
"o": [awk_rule21,],
"p": [awk_rule21,],
"q": [awk_rule21,],
"r": [awk_rule21,],
"s": [awk_rule21,],
"t": [awk_rule21,],
"u": [awk_rule21,],
"v": [awk_rule21,],
"w": [awk_rule21,],
"x": [awk_rule21,],
"y": [awk_rule21,],
"z": [awk_rule21,],
"{": [awk_rule19,],
"|": [awk_rule15,],
"}": [awk_rule18,],
"~": [awk_rule17,],
}
# x.rulesDictDict for awk mode.
rulesDictDict = {
"awk_main": rulesDict1,
}
# Import dict for awk mode.
importDict = {} |
2,045 | test consider not charging chargepoint in loadmanagement | from typing import List, Tuple
from unittest.mock import Mock
import pytest
from control import data
from control.algorithm import common
from control.chargepoint.chargepoint import Chargepoint
from control.ev import Ev
from control.counter import Counter
from control.counter_all import CounterAll
@pytest.fixture(autouse=True)
def cp() -> None:
data.data_init(Mock())
data.data.cp_data = {"cp0": Chargepoint(0, None)}
@pytest.mark.parametrize("set_current, expected_current",
[pytest.param(6, 0),
pytest.param(0, 0)])
def test_reset_current(set_current: int, expected_current: int):
# setup
data.data.cp_data["cp0"].data.set.current = set_current
# execution
common.reset_current()
# evaluation
assert data.data.cp_data["cp0"].data.set.current == expected_current
@pytest.mark.parametrize(
"diff, required_currents, expected_set_current, expected_diffs",
[
pytest.param(2, [10, 0, 0], 8, [2, 0, 0], id="set diff one phase"),
pytest.param(2, [12]*3, 8, [2]*3, id="set diff three phases"),
pytest.param(8, [8]*3, 8, [8]*3, id="set min current three phases"),
pytest.param(0, [8]*3, 8, [0]*3, id="min current is already set, three phases"),
])
def test_set_current_counterdiff(diff: float,
required_currents: List[float],
expected_set_current: float,
expected_diffs: List[float],
monkeypatch):
# setup
cp = Chargepoint(4, None)
ev = Ev(0)
ev.data.control_parameter.required_currents = required_currents
cp.data.set.charging_ev_data = ev
cp.data.set.current = 6
get_counters_to_check_mock = Mock(return_value=["cp0", "cp6"])
monkeypatch.setattr(CounterAll, "get_counters_to_check", get_counters_to_check_mock)
data.data.counter_data = {"cp0": Mock(spec=Counter), "cp6": Mock(spec=Counter)}
# evaluation
common.set_current_counterdiff(diff, 8, cp)
# assertion
assert cp.data.set.current == expected_set_current
if diff != 0:
assert data.data._counter_data['cp0'].update_values_left.call_args_list[0][0][0] == expected_diffs
assert data.data._counter_data['cp6'].update_values_left.call_args_list[0][0][0] == expected_diffs
@pytest.mark.parametrize(
"required_currents, expected_mins_counts",
[
([10, 0, 0], ([6, 0, 0], [1, 0, 0])),
([12]*3, ([6]*3, [1]*3))
])
def test_get_min_current(required_currents: List[float], expected_mins_counts: Tuple[List[float], List[int]]):
# setup
cp = Chargepoint(4, None)
ev = Ev(0)
ev.data.control_parameter.required_currents = required_currents
cp.data.set.charging_ev_data = ev
# evaluation
mins_counts = common.get_min_current(cp)
# assertion
assert mins_counts == expected_mins_counts
@pytest.mark.parametrize(
"set_current, diff, expected_current",
[
pytest.param(0, 2, 8, id="min current is set, no current has been set on this iteration"),
pytest.param(6, 2, 6, id="min current is set, current has been set on this iteration"),
pytest.param(7, 2, 7, id="new current is higher, current has been set on this iteration"),
pytest.param(9, 2, 8, id="new current is lower, current has been set on this iteration"),
])
def test_get_current_to_set(set_current: float, diff: float, expected_current: float):
# setup & evaluation
current = common.get_current_to_set(set_current, diff, 6)
# assertion
assert current == expected_current
@pytest.mark.parametrize(
"counts, available_currents, missing_currents, expected_current",
[
pytest.param([2]*3, [12, 15, 16], [5]*3, 6),
pytest.param([2]*3, [1]*3, [2]*3, 0.5),
pytest.param([2]*3, [0]*3, [2]*3, 0),
])
def test_available_currents_for_cp(counts: List[int],
available_currents: List[float],
missing_currents: List[float],
expected_current: float):
# setup
cp = Chargepoint(4, None)
ev = Ev(0)
ev.data.control_parameter.required_currents = [16]*3
ev.data.control_parameter.required_current = 16
cp.data.set.charging_ev_data = ev
cp.data.set.target_current = 10
# evaluation
current = common.available_current_for_cp(cp, counts, available_currents, missing_currents)
# assertion
assert current == expected_current
@pytest.mark.parametrize(
"required_currents_1, required_currents_2, expected_currents",
[
pytest.param([6, 10, 15], [20]*3, ([14, 18, 23], [2]*3)),
pytest.param([6, 10, 15], [6, 0, 0], ([0, 4, 9], [2, 1, 1])),
])
def test_get_missing_currents_left(required_currents_1: List[float],
required_currents_2: List[float],
expected_currents: List[float]):
# setup
def setup_cp(num: int, required_currents) -> Chargepoint:
ev = Ev(0)
cp = Chargepoint(num, None)
ev.data.control_parameter.required_currents = required_currents
cp.data.set.charging_ev_data = ev
return cp
# evaluation
currents = common.get_missing_currents_left(
[setup_cp(1, required_currents_1), setup_cp(2, required_currents_2)])
# assertion
assert currents == expected_currents
@pytest.mark.parametrize(
"reserve_for_not_charging, get_currents, expected_considered",
[
pytest.param(True, [0]*3, False, id="reserve_for_not_charging active"),
pytest.param(True, [6]*3, False, id="reserve_for_not_charging active"),
pytest.param(False, [0]*3, True, id="not charging"),
pytest.param(False, [6]*3, False, id="charging"),
])
def METHOD_NAME(reserve_for_not_charging: bool,
get_currents: List[float],
expected_considered: bool):
# setup
cp = Chargepoint(4, None)
cp.data.get.currents = get_currents
data.data.counter_all_data.data.config.reserve_for_not_charging = reserve_for_not_charging
# evaluation
considered = common.consider_not_charging_chargepoint_in_loadmanagement(cp)
# assertion
assert considered == expected_considered |
2,046 | cindex | """
Classes and methods to interface with files storing rate data.
"""
import os
import re
from scipy.constants import physical_constants
from pynucastro.nucdata.binding_table import BindingTable
from pynucastro.nucdata.elements import PeriodicTable
from pynucastro.nucdata.mass_table import MassTable
from pynucastro.nucdata.partition_function import PartitionFunctionCollection
from pynucastro.nucdata.spin_table import SpinTable
_pynucastro_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
_pynucastro_rates_dir = os.path.join(_pynucastro_dir, 'library')
_pynucastro_tabular_dir = os.path.join(_pynucastro_rates_dir, 'tabular')
#set the atomic mass unit constant in MeV
m_u, _, _ = physical_constants['atomic mass constant energy equivalent in MeV']
#read the mass excess table once and store it at the module-level
_mass_table = MassTable()
#read the spin table once and store it at the module-level
_spin_table = SpinTable(reliable=True)
# read the binding energy table once and store it at the module-level
_binding_table = BindingTable()
# read the partition function table once and store it at the module-level
_pcollection = PartitionFunctionCollection(use_high_temperatures=True, use_set='frdm')
class UnsupportedNucleus(Exception):
pass
class Nucleus:
"""
a nucleus that participates in a reaction -- we store it in a
class to hold its properties, define a sorting, and give it a
pretty printing string.
:var Z: atomic number
:var N: neutron number
:var A: atomic mass
:var nucbind: nuclear binding energy (MeV / nucleon)
:var short_spec_name: nucleus abbreviation (e.g. "he4")
:var caps_name: capitalized short species name (e.g. "He4")
:var el: element name (e.g. "he")
:var pretty: LaTeX formatted version of the nucleus name
:var A_nuc: Nuclear Mass in amu
"""
_cache = {}
def __init__(self, name, dummy=False):
name = name.lower()
self.raw = name
# a dummy nucleus is one that we can use where a nucleus is needed
# but it is not considered to be part of the network
self.dummy = dummy
# element symbol and atomic weight
if name == "p":
self.el = "h"
self.A = 1
self.short_spec_name = "h1"
self.caps_name = "p"
elif name == "d":
self.el = "h"
self.A = 2
self.short_spec_name = "h2"
self.caps_name = "H2"
elif name == "t":
self.el = "h"
self.A = 3
self.short_spec_name = "h3"
self.caps_name = "H3"
elif name == "a":
#this is a convenience, enabling the use of a commonly-used alias:
# He4 --> \alpha --> "a" , e.g. c12(a,g)o16
self.el = "he"
self.A = 4
self.short_spec_name = "he4"
self.raw = "he4"
self.caps_name = "He4"
elif name == "n":
self.el = "n"
self.A = 1
self.Z = 0
self.N = 1
self.short_spec_name = "n"
self.spec_name = "neutron"
self.pretty = fr"\mathrm{{{self.el}}}"
self.caps_name = "n"
elif name.strip() in ("al-6", "al*6"):
raise UnsupportedNucleus()
else:
e = re.match(r"([a-zA-Z]*)(\d*)", name)
self.el = e.group(1).title() # chemical symbol
assert self.el
self.A = int(e.group(2))
assert self.A >= 0
self.short_spec_name = name
self.caps_name = name.capitalize()
# use lowercase element abbreviation regardless the case of the input
self.el = self.el.lower()
# atomic number comes from periodic table
if name != "n":
i = PeriodicTable.lookup_abbreviation(self.el)
self.Z = i.Z
assert isinstance(self.Z, int)
assert self.Z >= 0
self.N = self.A - self.Z
assert isinstance(self.N, int)
assert self.N >= 0
# long name
self.spec_name = f'{i.name}-{self.A}'
# latex formatted style
self.pretty = fr"{{}}^{{{self.A}}}\mathrm{{{self.el.capitalize()}}}"
# set the number of spin states
try:
self.spin_states = _spin_table.get_spin_states(a=self.A, z=self.Z)
except NotImplementedError:
self.spin_states = None
# set a partition function object to every nucleus
try:
self.partition_function = _pcollection.get_partition_function(self.short_spec_name)
except ValueError:
self.partition_function = None
try:
self.nucbind = _binding_table.get_binding_energy(n=self.N, z=self.Z)
except NotImplementedError:
# the binding energy table doesn't know about this nucleus
self.nucbind = None
# Now we will define the Nuclear Mass,
try:
self.A_nuc = float(self.A) + _mass_table.get_mass_diff(a=self.A, z=self.Z) / m_u
except NotImplementedError:
self.A_nuc = None
@classmethod
def from_cache(cls, name, dummy=False):
key = (name.lower(), dummy)
if key not in cls._cache:
cls._cache[key] = Nucleus(name, dummy)
return cls._cache[key]
def __repr__(self):
return self.raw
def __hash__(self):
return hash((self.Z, self.A))
def c(self):
"""return the capitalized-style name"""
return self.caps_name
def METHOD_NAME(self):
"""return the name for C++ indexing"""
return self.short_spec_name.capitalize()
def __eq__(self, other):
if isinstance(other, Nucleus):
return self.el == other.el and \
self.Z == other.Z and self.A == other.A
if isinstance(other, tuple):
return (self.Z, self.A) == other
return NotImplemented
def __lt__(self, other):
if not self.Z == other.Z:
return self.Z < other.Z
return self.A < other.A
def get_nuclei_in_range(zmin, zmax, amin, amax):
"""given a range of Z = [zmin, zmax], and A = [amin, amax],
return a list of Nucleus objects for all nuclei in this range"""
nuc_list = []
assert zmax >= zmin, "zmax must be >= zmin"
assert amax >= amin, "amax must be >= amin"
for z in range(zmin, zmax+1):
element = PeriodicTable.lookup_Z(z)
for a in range(amin, amax+1):
name = f"{element.abbreviation}{a}"
nuc_list.append(Nucleus(name))
return nuc_list |
2,047 | get test params | """Optional passthrough transformer."""
# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
__author__ = ["aiwalter", "fkiraly"]
__all__ = ["OptionalPassthrough"]
from sktime.transformations._delegate import _DelegatedTransformer
from sktime.transformations.compose._common import CORE_MTYPES
from sktime.transformations.compose._id import Id
class OptionalPassthrough(_DelegatedTransformer):
"""Wrap an existing transformer to tune whether to include it in a pipeline.
Allows tuning the implicit hyperparameter whether or not to use a
particular transformer inside a pipeline (e.g. TransformedTargetForecaster)
or not. This is achieved by the hyperparameter `passthrough`
which can be added to a tuning grid then (see example).
Parameters
----------
transformer : Estimator
scikit-learn-like or sktime-like transformer to fit and apply to series.
this is a "blueprint" transformer, state does not change when `fit` is called
passthrough : bool, default=False
Whether to apply the given transformer or to just
passthrough the data (identity transformation). If, True the transformer
is not applied and the OptionalPassthrough uses the identity
transformation.
Attributes
----------
transformer_: transformer,
this clone is fitted when `fit` is called and provides `transform` and inverse
if passthrough = False, a clone of `transformer`passed
if passthrough = True, the identity transformer `Id`
Examples
--------
>>> from sktime.datasets import load_airline
>>> from sktime.forecasting.naive import NaiveForecaster
>>> from sktime.transformations.compose import OptionalPassthrough
>>> from sktime.transformations.series.detrend import Deseasonalizer
>>> from sktime.transformations.series.adapt import TabularToSeriesAdaptor
>>> from sktime.forecasting.compose import TransformedTargetForecaster
>>> from sktime.forecasting.model_selection import (
... ForecastingGridSearchCV,
... SlidingWindowSplitter)
>>> from sklearn.preprocessing import StandardScaler
>>> # create pipeline
>>> pipe = TransformedTargetForecaster(steps=[
... ("deseasonalizer", OptionalPassthrough(Deseasonalizer())),
... ("scaler", OptionalPassthrough(TabularToSeriesAdaptor(StandardScaler()))),
... ("forecaster", NaiveForecaster())]) # doctest: +SKIP
>>> # putting it all together in a grid search
>>> cv = SlidingWindowSplitter(
... initial_window=60,
... window_length=24,
... start_with_window=True,
... step_length=48) # doctest: +SKIP
>>> param_grid = {
... "deseasonalizer__passthrough" : [True, False],
... "scaler__transformer__transformer__with_mean": [True, False],
... "scaler__passthrough" : [True, False],
... "forecaster__strategy": ["drift", "mean", "last"]} # doctest: +SKIP
>>> gscv = ForecastingGridSearchCV(
... forecaster=pipe,
... param_grid=param_grid,
... cv=cv,
... n_jobs=-1) # doctest: +SKIP
>>> gscv_fitted = gscv.fit(load_airline()) # doctest: +SKIP
"""
_tags = {
"scitype:transform-input": "Series",
# what is the scitype of X: Series, or Panel
"scitype:transform-output": "Series",
# what scitype is returned: Primitives, Series, Panel
"scitype:instancewise": True, # is this an instance-wise transform?
"X_inner_mtype": CORE_MTYPES,
# which mtypes do _fit/_predict support for X?
"y_inner_mtype": "None", # which mtypes do _fit/_predict support for y?
"univariate-only": False,
"fit_is_empty": False,
"capability:inverse_transform": True,
}
def __init__(self, transformer, passthrough=False):
self.transformer = transformer
self.passthrough = passthrough
super().__init__()
# should be all tags, but not fit_is_empty
# (_fit should not be skipped)
tags_to_clone = [
"scitype:transform-input",
"scitype:transform-output",
"scitype:instancewise",
"y_inner_mtype",
"capability:inverse_transform",
"handles-missing-data",
"X-y-must-have-same-index",
"transform-returns-same-time-index",
"skip-inverse-transform",
]
self.clone_tags(transformer, tag_names=tags_to_clone)
if passthrough:
self.transformer_ = Id()
else:
self.transformer_ = transformer.clone()
# attribute for _DelegatedTransformer, which then delegates
# all non-overridden methods are same as of getattr(self, _delegate_name)
# see further details in _DelegatedTransformer docstring
_delegate_name = "transformer_"
@classmethod
def METHOD_NAME(cls, parameter_set="default"):
"""Return testing parameter settings for the estimator.
Parameters
----------
parameter_set : str, default="default"
Name of the set of test parameters to return, for use in tests. If no
special parameters are defined for a value, will return `"default"` set.
Returns
-------
params : dict or list of dict, default = {}
Parameters to create testing instances of the class
Each dict are parameters to construct an "interesting" test instance, i.e.,
`MyClass(**params)` or `MyClass(**params[i])` creates a valid test instance.
`create_test_instance` uses the first (or only) dictionary in `params`
"""
from sktime.transformations.series.boxcox import BoxCoxTransformer
return {"transformer": BoxCoxTransformer(), "passthrough": False} |
2,048 | name | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetSqlPoolVulnerabilityAssessmentRuleBaselineResult',
'AwaitableGetSqlPoolVulnerabilityAssessmentRuleBaselineResult',
'get_sql_pool_vulnerability_assessment_rule_baseline',
'get_sql_pool_vulnerability_assessment_rule_baseline_output',
]
@pulumi.output_type
class GetSqlPoolVulnerabilityAssessmentRuleBaselineResult:
"""
A Sql pool vulnerability assessment rule baseline.
"""
def __init__(__self__, baseline_results=None, id=None, METHOD_NAME=None, type=None):
if baseline_results and not isinstance(baseline_results, list):
raise TypeError("Expected argument 'baseline_results' to be a list")
pulumi.set(__self__, "baseline_results", baseline_results)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", METHOD_NAME)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(METHOD_NAME="baselineResults")
def baseline_results(self) -> Sequence['outputs.SqlPoolVulnerabilityAssessmentRuleBaselineItemResponse']:
"""
The rule baseline result
"""
return pulumi.get(self, "baseline_results")
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetSqlPoolVulnerabilityAssessmentRuleBaselineResult(GetSqlPoolVulnerabilityAssessmentRuleBaselineResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetSqlPoolVulnerabilityAssessmentRuleBaselineResult(
baseline_results=self.baseline_results,
id=self.id,
METHOD_NAME=self.METHOD_NAME,
type=self.type)
def get_sql_pool_vulnerability_assessment_rule_baseline(baseline_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
rule_id: Optional[str] = None,
sql_pool_name: Optional[str] = None,
vulnerability_assessment_name: Optional[str] = None,
workspace_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSqlPoolVulnerabilityAssessmentRuleBaselineResult:
"""
Gets a SqlPool's vulnerability assessment rule baseline.
Azure REST API version: 2021-06-01.
:param str baseline_name: The name of the vulnerability assessment rule baseline (default implies a baseline on a Sql pool level rule and master for server level rule).
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str rule_id: The vulnerability assessment rule ID.
:param str sql_pool_name: SQL pool name
:param str vulnerability_assessment_name: The name of the vulnerability assessment.
:param str workspace_name: The name of the workspace.
"""
__args__ = dict()
__args__['baselineName'] = baseline_name
__args__['resourceGroupName'] = resource_group_name
__args__['ruleId'] = rule_id
__args__['sqlPoolName'] = sql_pool_name
__args__['vulnerabilityAssessmentName'] = vulnerability_assessment_name
__args__['workspaceName'] = workspace_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:synapse:getSqlPoolVulnerabilityAssessmentRuleBaseline', __args__, opts=opts, typ=GetSqlPoolVulnerabilityAssessmentRuleBaselineResult).value
return AwaitableGetSqlPoolVulnerabilityAssessmentRuleBaselineResult(
baseline_results=pulumi.get(__ret__, 'baseline_results'),
id=pulumi.get(__ret__, 'id'),
METHOD_NAME=pulumi.get(__ret__, 'name'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_sql_pool_vulnerability_assessment_rule_baseline)
def get_sql_pool_vulnerability_assessment_rule_baseline_output(baseline_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
rule_id: Optional[pulumi.Input[str]] = None,
sql_pool_name: Optional[pulumi.Input[str]] = None,
vulnerability_assessment_name: Optional[pulumi.Input[str]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetSqlPoolVulnerabilityAssessmentRuleBaselineResult]:
"""
Gets a SqlPool's vulnerability assessment rule baseline.
Azure REST API version: 2021-06-01.
:param str baseline_name: The name of the vulnerability assessment rule baseline (default implies a baseline on a Sql pool level rule and master for server level rule).
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str rule_id: The vulnerability assessment rule ID.
:param str sql_pool_name: SQL pool name
:param str vulnerability_assessment_name: The name of the vulnerability assessment.
:param str workspace_name: The name of the workspace.
"""
... |
2,049 | norm | # Test the Unicode versions of normal file functions
# open, os.open, os.stat. os.listdir, os.rename, os.remove, os.mkdir, os.chdir, os.rmdir
import sys, os, unittest
from unicodedata import normalize
from test import test_support
filenames = [
'1_abc',
u'2_ascii',
u'3_Gr\xfc\xdf-Gott',
u'4_\u0393\u03b5\u03b9\u03ac-\u03c3\u03b1\u03c2',
u'5_\u0417\u0434\u0440\u0430\u0432\u0441\u0442\u0432\u0443\u0439\u0442\u0435',
u'6_\u306b\u307d\u3093',
u'7_\u05d4\u05e9\u05e7\u05e6\u05e5\u05e1',
u'8_\u66e8\u66e9\u66eb',
u'9_\u66e8\u05e9\u3093\u0434\u0393\xdf',
# Specific code points: fn, NFC(fn) and NFKC(fn) all differents
u'10_\u1fee\u1ffd',
]
# Mac OS X decomposes Unicode names, using Normal Form D.
# http://developer.apple.com/mac/library/qa/qa2001/qa1173.html
# "However, most volume formats do not follow the exact specification for
# these normal forms. For example, HFS Plus uses a variant of Normal Form D
# in which U+2000 through U+2FFF, U+F900 through U+FAFF, and U+2F800 through
# U+2FAFF are not decomposed."
if sys.platform != 'darwin':
filenames.extend([
# Specific code points: NFC(fn), NFD(fn), NFKC(fn) and NFKD(fn) all differents
u'11_\u0385\u03d3\u03d4',
u'12_\u00a8\u0301\u03d2\u0301\u03d2\u0308', # == NFD(u'\u0385\u03d3\u03d4')
u'13_\u0020\u0308\u0301\u038e\u03ab', # == NFKC(u'\u0385\u03d3\u03d4')
u'14_\u1e9b\u1fc1\u1fcd\u1fce\u1fcf\u1fdd\u1fde\u1fdf\u1fed',
# Specific code points: fn, NFC(fn) and NFKC(fn) all differents
u'15_\u1fee\u1ffd\ufad1',
u'16_\u2000\u2000\u2000A',
u'17_\u2001\u2001\u2001A',
u'18_\u2003\u2003\u2003A', # == NFC(u'\u2001\u2001\u2001A')
u'19_\u0020\u0020\u0020A', # u'\u0020' == u' ' == NFKC(u'\u2000') ==
# NFKC(u'\u2001') == NFKC(u'\u2003')
])
# Is it Unicode-friendly?
if not os.path.supports_unicode_filenames:
fsencoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
try:
for name in filenames:
name.encode(fsencoding)
except UnicodeEncodeError:
raise unittest.SkipTest("only NT+ and systems with "
"Unicode-friendly filesystem encoding")
# Destroy directory dirname and all files under it, to one level.
def deltree(dirname):
# Don't hide legitimate errors: if one of these suckers exists, it's
# an error if we can't remove it.
if os.path.exists(dirname):
# must pass unicode to os.listdir() so we get back unicode results.
for fname in os.listdir(unicode(dirname)):
os.unlink(os.path.join(dirname, fname))
os.rmdir(dirname)
class UnicodeFileTests(unittest.TestCase):
files = set(filenames)
normal_form = None
def setUp(self):
try:
os.mkdir(test_support.TESTFN)
except OSError:
pass
files = set()
for name in self.files:
name = os.path.join(test_support.TESTFN, self.METHOD_NAME(name))
with open(name, 'w') as f:
f.write((name+'\n').encode("utf-8"))
os.stat(name)
files.add(name)
self.files = files
def tearDown(self):
deltree(test_support.TESTFN)
def METHOD_NAME(self, s):
if self.normal_form and isinstance(s, unicode):
return normalize(self.normal_form, s)
return s
def _apply_failure(self, fn, filename, expected_exception,
check_fn_in_exception = True):
with self.assertRaises(expected_exception) as c:
fn(filename)
exc_filename = c.exception.filename
# the "filename" exception attribute may be encoded
if isinstance(exc_filename, str):
filename = filename.encode(sys.getfilesystemencoding())
if check_fn_in_exception:
self.assertEqual(exc_filename, filename, "Function '%s(%r) failed "
"with bad filename in the exception: %r" %
(fn.__name__, filename, exc_filename))
def test_failures(self):
# Pass non-existing Unicode filenames all over the place.
for name in self.files:
name = "not_" + name
self._apply_failure(open, name, IOError)
self._apply_failure(os.stat, name, OSError)
self._apply_failure(os.chdir, name, OSError)
self._apply_failure(os.rmdir, name, OSError)
self._apply_failure(os.remove, name, OSError)
# listdir may append a wildcard to the filename, so dont check
self._apply_failure(os.listdir, name, OSError, False)
def test_open(self):
for name in self.files:
f = open(name, 'w')
f.write((name+'\n').encode("utf-8"))
f.close()
os.stat(name)
# Skip the test on darwin, because darwin does normalize the filename to
# NFD (a variant of Unicode NFD form). Normalize the filename to NFC, NFKC,
# NFKD in Python is useless, because darwin will normalize it later and so
# open(), os.stat(), etc. don't raise any exception.
@unittest.skipIf(sys.platform == 'darwin', 'irrelevant test on Mac OS X')
def test_normalize(self):
files = set(f for f in self.files if isinstance(f, unicode))
others = set()
for nf in set(['NFC', 'NFD', 'NFKC', 'NFKD']):
others |= set(normalize(nf, file) for file in files)
others -= files
for name in others:
self._apply_failure(open, name, IOError)
self._apply_failure(os.stat, name, OSError)
self._apply_failure(os.chdir, name, OSError)
self._apply_failure(os.rmdir, name, OSError)
self._apply_failure(os.remove, name, OSError)
# listdir may append a wildcard to the filename, so dont check
self._apply_failure(os.listdir, name, OSError, False)
# Skip the test on darwin, because darwin uses a normalization different
# than Python NFD normalization: filenames are different even if we use
# Python NFD normalization.
@unittest.skipIf(sys.platform == 'darwin', 'irrelevant test on Mac OS X')
def test_listdir(self):
sf0 = set(self.files)
f1 = os.listdir(test_support.TESTFN)
f2 = os.listdir(unicode(test_support.TESTFN,
sys.getfilesystemencoding()))
sf2 = set(os.path.join(unicode(test_support.TESTFN), f) for f in f2)
self.assertEqual(sf0, sf2)
self.assertEqual(len(f1), len(f2))
def test_rename(self):
for name in self.files:
os.rename(name, "tmp")
os.rename("tmp", name)
def test_directory(self):
dirname = os.path.join(test_support.TESTFN,
u'Gr\xfc\xdf-\u66e8\u66e9\u66eb')
filename = u'\xdf-\u66e8\u66e9\u66eb'
oldwd = os.getcwd()
os.mkdir(dirname)
os.chdir(dirname)
try:
with open(filename, 'w') as f:
f.write((filename + '\n').encode("utf-8"))
os.access(filename,os.R_OK)
os.remove(filename)
finally:
os.chdir(oldwd)
os.rmdir(dirname)
class UnicodeNFCFileTests(UnicodeFileTests):
normal_form = 'NFC'
class UnicodeNFDFileTests(UnicodeFileTests):
normal_form = 'NFD'
class UnicodeNFKCFileTests(UnicodeFileTests):
normal_form = 'NFKC'
class UnicodeNFKDFileTests(UnicodeFileTests):
normal_form = 'NFKD'
def test_main():
try:
test_support.run_unittest(
UnicodeFileTests,
UnicodeNFCFileTests,
UnicodeNFDFileTests,
UnicodeNFKCFileTests,
UnicodeNFKDFileTests,
)
finally:
deltree(test_support.TESTFN)
if __name__ == "__main__":
test_main() |
2,050 | patch internal | # engine params
from typing import Any, Callable, Dict, Optional, Sequence, Union, cast
from .argument_config import get_argument_config_value
from .config_file_config import get_config_dict_from_config_file
from .default_config import get_default_config_value
from .environment_config import get_environment_config_value
from .keys import ALL_KEYS, KEYS, ConfigDict
from .system_config import get_system_config_value
def chain_getters(
getters: Sequence[Callable[[str], Optional[str]]],
key: str,
default_return: Optional[str] = None,
) -> Optional[str]:
for getter in getters:
result = getter(key)
if result is not None:
return result
return default_return
def lazy_get_config_value(
key: str, default_return: Optional[str] = None
) -> Optional[Union[str, Dict[str, Dict[str, str]]]]:
"""
Get the config value for a key in the following precedence
Otherwise return default_return
"""
if key not in ALL_KEYS:
# For sections which can't be overridden via envvars/arguments,
# we only use default values
return chain_getters([get_default_config_value], key, default_return)
return chain_getters(
[
get_argument_config_value,
get_environment_config_value,
get_system_config_value,
get_default_config_value,
],
key,
default_return,
)
def update_config_dict_from_arguments(config_dict: ConfigDict) -> ConfigDict:
"""
Given an existing config_dict, update after reading sys.argv
and overwriting any keys.
Return updated copy of config_dict.
"""
argument_config_dict = {
k: get_argument_config_value(k, None)
for k in KEYS
if get_argument_config_value(k) is not None
}
new_config_dict = patch_config(config_dict, cast(ConfigDict, argument_config_dict))
return new_config_dict
def update_config_dict_from_env_vars(config_dict: ConfigDict) -> ConfigDict:
"""
Given an existing config_dict, update after reading os.environ
and overwriting any keys.
Return updated copy of config_dict.
"""
argument_config_dict = {
k: get_environment_config_value(k, None)
for k in KEYS
if get_environment_config_value(k) is not None
}
new_config_dict = patch_config(config_dict, cast(ConfigDict, argument_config_dict))
return new_config_dict
def update_config_dict_from_file(config_dict: ConfigDict, sg_config_file: str) -> ConfigDict:
"""
Given an existing config_dict, update after reading sg_config_file
and overwriting any keys according to the rules in config_file_config
Return updated copy of config_dict.
"""
config_file_dict = get_config_dict_from_config_file(sg_config_file)
new_config_dict = patch_config(config_dict, config_file_dict)
return new_config_dict
def create_config_dict() -> ConfigDict:
"""
Create and return a dict of all known config values
"""
initial_dict = {k: lazy_get_config_value(k) for k in ALL_KEYS}
config_dict = cast(ConfigDict, {k: v for k, v in initial_dict.items() if v is not None})
try:
sg_config_file = get_singleton(config_dict, "SG_CONFIG_FILE")
config_dict = update_config_dict_from_file(config_dict, sg_config_file)
except KeyError:
pass
config_dict = update_config_dict_from_env_vars(config_dict)
config_dict = update_config_dict_from_arguments(config_dict)
return config_dict
def patch_config(config: ConfigDict, patch: ConfigDict) -> ConfigDict:
"""
Recursively updates a nested configuration dictionary:
patch_config(
{"key_1": "value_1",
"dict_1": {"key_1": "value_1"}},
{"key_1": "value_2",
"dict_1": {"key_2": "value_2"}}) == \
{"key_1": "value_2",
"dict_1": {"key_1": "value_1", "key_2": "value_2"}}
:param config: Config dictionary
:param patch: Dictionary with the path
:return: New patched dictionary
"""
def METHOD_NAME(left: Dict[str, Any], right: Dict[str, Any]) -> Dict[str, Any]:
result = left.copy()
for key, value in right.items():
if key in left and isinstance(left[key], dict) and isinstance(value, dict):
result[key] = METHOD_NAME(left[key], value)
else:
result[key] = value
return result
return METHOD_NAME(config, patch)
def get_singleton(config: ConfigDict, item: str) -> str:
"""Return a singleton (not a section) variable from the config."""
return str(config[item])
def get_all_in_section(config: ConfigDict, section: str) -> Dict[str, Union[str, Dict[str, str]]]:
"""
Get all subsections from a config (e.g. config["data_sources"])
"""
result: Dict[str, Union[str, Dict[str, str]]] = cast(
Dict[str, Union[str, Dict[str, str]]], config.get(section, {})
)
assert isinstance(result, dict)
return result
def get_all_in_subsection(config: ConfigDict, section: str, subsection: str) -> Dict[str, str]:
section_dict = get_all_in_section(config, section)
subsection_dict: Dict[str, str] = cast(Dict[str, str], section_dict.get(subsection, {}))
assert isinstance(subsection_dict, dict)
return subsection_dict
def get_from_subsection(config: ConfigDict, section: str, subsection: str, item: str) -> str:
"""Return a singleton variable from a subsection of the config,
e.g. config["remotes"]["data.splitgraph.com"]["SG_ENGINE_HOST"]"""
subsection_dict = get_all_in_subsection(config, section, subsection)
return subsection_dict[item]
def get_from_section(config: ConfigDict, section: str, item: str) -> str:
section_dict = get_all_in_section(config, section)
assert isinstance(section_dict, dict)
return cast(str, section_dict[item])
def set_in_subsection(
config: ConfigDict, section: str, subsection: str, item: str, value: str
) -> None:
"""Set a singleton variable in a subsection of the config,
e.g. config["remotes"]["data.splitgraph.com"]["SG_ENGINE_HOST"]"""
subsection_dict = get_all_in_subsection(config, section, subsection)
subsection_dict[item] = value |
2,051 | test resampling to numpy img 1 | import numpy as np
import unittest
from grass.gunittest.case import TestCase
from grass.gunittest.main import test
from grass.pygrass.raster import raster2numpy_img
from grass.pygrass.gis.region import Region
from grass.script.core import tempfile
has_PyQt4 = False
try:
from PyQt4.QtCore import *
from PyQt4.QtGui import *
has_PyQt4 = True
except:
pass
class RasterRowImgTestCase(TestCase):
name = "RasterRowImgTestCase_map"
@classmethod
def setUpClass(cls):
"""Create test raster map and region"""
cls.use_temp_region()
cls.runModule("g.region", n=60, s=0, e=40, w=0, res=0.1)
cls.runModule(
"r.mapcalc",
expression="%s = if(row() >= 10 && row() <= 60, null(), row() + (10.0 * col()))"
% (cls.name),
overwrite=True,
)
cls.runModule("r.colors", map=cls.name, color="elevation")
@classmethod
def tearDownClass(cls):
"""Remove the generated vector map, if exist"""
cls.runModule("g.remove", flags="f", type="raster", name=cls.name)
cls.del_temp_region()
@unittest.skipIf(has_PyQt4 is False, "Require PyQt4")
def test_resampling_to_QImg_1(self):
region = Region()
region.from_rast(self.name)
region.cols = 320
region.rows = 240
region.adjust()
tmpfile = tempfile(False)
tmpfile = tmpfile + ".png"
a = raster2numpy_img(self.name, region)
image = QImage(a.data, region.cols, region.rows, QImage.Format_ARGB32)
# image.save("data/a.png")
image.save(tmpfile)
self.assertFilesEqualMd5(tmpfile, "data/a.png")
@unittest.skipIf(has_PyQt4 is False, "Require PyQt4")
def test_resampling_to_QImg_2(self):
region = Region()
region.from_rast(self.name)
region.cols = 640
region.rows = 480
region.adjust()
tmpfile = tempfile(False)
tmpfile = tmpfile + ".png"
# With array as argument
array = np.ndarray((region.rows * region.cols * 4), np.uint8)
raster2numpy_img(rastname=self.name, region=region, color="ARGB", array=array)
image = QImage(array.data, region.cols, region.rows, QImage.Format_ARGB32)
# image.save("data/b.png")
image.save(tmpfile)
self.assertFilesEqualMd5(tmpfile, "data/b.png")
@unittest.skipIf(has_PyQt4 is False, "Require PyQt4")
def test_resampling_to_QImg_large(self):
region = Region()
region.from_rast(self.name)
region.cols = 4000
region.rows = 3000
region.adjust()
tmpfile = tempfile(False)
tmpfile = tmpfile + ".png"
# With array as argument
array = np.ndarray((region.rows * region.cols * 4), np.uint8)
raster2numpy_img(rastname=self.name, region=region, color="ARGB", array=array)
image = QImage(array.data, region.cols, region.rows, QImage.Format_ARGB32)
# image.save("data/c.png")
image.save(tmpfile)
self.assertFilesEqualMd5(tmpfile, "data/c.png")
@unittest.skipIf(has_PyQt4 is False, "Require PyQt4")
def test_resampling_to_QImg_3(self):
region = Region()
region.from_rast(self.name)
region.cols = 400
region.rows = 300
region.adjust()
tmpfile = tempfile(False)
tmpfile = tmpfile + ".png"
# With array as argument
array = np.ndarray((region.rows * region.cols * 4), np.uint8)
raster2numpy_img(rastname=self.name, region=region, color="RGB", array=array)
image = QImage(array.data, region.cols, region.rows, QImage.Format_RGB32)
# image.save("data/d.png")
image.save(tmpfile)
self.assertFilesEqualMd5(tmpfile, "data/d.png")
@unittest.skipIf(has_PyQt4 is False, "Require PyQt4")
def test_resampling_to_QImg_4(self):
region = Region()
region.from_rast(self.name)
region.cols = 400
region.rows = 300
region.adjust()
tmpfile = tempfile(False)
tmpfile = tmpfile + ".png"
array = raster2numpy_img(rastname=self.name, region=region, color="RGB")
image = QImage(array.data, region.cols, region.rows, QImage.Format_RGB32)
# image.save("data/e.png")
image.save(tmpfile)
self.assertFilesEqualMd5(tmpfile, "data/e.png")
def METHOD_NAME(self):
region = Region()
region.ewres = 10
region.nsres = 10
region.adjust(rows=True, cols=True)
a = raster2numpy_img(self.name, region)
self.assertEqual(len(a), region.rows * region.cols * 4)
def test_resampling_to_numpy_img_2(self):
region = Region()
region.ewres = 1
region.nsres = 1
region.adjust(rows=True, cols=True)
a = raster2numpy_img(self.name, region)
self.assertEqual(len(a), region.rows * region.cols * 4)
def test_resampling_to_numpy_img_3(self):
region = Region()
region.ewres = 0.4
region.nsres = 0.4
region.adjust(rows=True, cols=True)
a = raster2numpy_img(self.name, region, color="GRAY1")
self.assertEqual(len(a), region.rows * region.cols * 1)
def test_resampling_to_numpy_img_4(self):
region = Region()
region.ewres = 0.1
region.nsres = 0.1
region.adjust(rows=True, cols=True)
a = raster2numpy_img(self.name, region, color="GRAY2")
self.assertEqual(len(a), region.rows * region.cols * 1)
if __name__ == "__main__":
test() |
2,052 | on server init complete | # -*- coding: utf-8 -*-
""" *==LICENSE==*
CyanWorlds.com Engine - MMOG client, server and tools
Copyright (C) 2011 Cyan Worlds, Inc.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Additional permissions under GNU GPL version 3 section 7
If you modify this Program, or any covered work, by linking or
combining it with any of RAD Game Tools Bink SDK, Autodesk 3ds Max SDK,
NVIDIA PhysX SDK, Microsoft DirectX SDK, OpenSSL library, Independent
JPEG Group JPEG library, Microsoft Windows Media SDK, or Apple QuickTime SDK
(or a modified version of those libraries),
containing parts covered by the terms of the Bink SDK EULA, 3ds Max EULA,
PhysX SDK EULA, DirectX SDK EULA, OpenSSL and SSLeay licenses, IJG
JPEG Library README, Windows Media SDK EULA, or QuickTime SDK EULA, the
licensors of this Program grant you additional
permission to convey the resulting work. Corresponding Source for a
non-source form of such a combination shall include the source code for
the parts of OpenSSL and IJG JPEG Library used as well as that of the covered
work.
You can contact Cyan Worlds, Inc. by email legal@cyan.com
or by snail mail at:
Cyan Worlds, Inc.
14617 N Newport Hwy
Mead, WA 99021
*==LICENSE==* """
"""
Module: Neighborhood02.py
Age: Neighborhood02
Date: January 2004
event manager hooks for Neighborhood02, aka Kirel
"""
from Plasma import *
from PlasmaTypes import *
from PlasmaKITypes import *
import time
class Neighborhood02(ptResponder):
def __init__(self):
ptResponder.__init__(self)
self.id = 5700
self.version = 1
def OnFirstUpdate(self):
pass
def OnNotify(self,state,id,events):
pass
def METHOD_NAME(self):
self.UpdateRecentVisitors()
def UpdateRecentVisitors(self):
try:
AmCCR = ptCCRMgr().getLevel()
except:
AmCCR = 0
if not AmCCR:
# add player to recent players list
deviceNode = None
deviceInbox = None
playerlist = None
# find the device
avault = ptAgeVault()
adevicesfolder = avault.getAgeDevicesFolder()
adevices = adevicesfolder.getChildNodeRefList()
for device in adevices:
device = device.getChild()
devicetn = device.upcastToTextNoteNode()
if devicetn and devicetn.getTitle() == "D'ni Imager Right":
deviceNode = devicetn
break
# if we have the device then find the inbox
if deviceNode:
inboxes = deviceNode.getChildNodeRefList()
for inbox in inboxes:
inbox = inbox.getChild()
inboxfolder = inbox.upcastToFolderNode()
if inboxfolder:
deviceInbox = inboxfolder
break
# if we have the inbox then look for the heek score note
if deviceInbox:
items = deviceInbox.getChildNodeRefList()
for item in items:
item = item.getChild()
itemtn = item.upcastToTextNoteNode()
if itemtn:
if itemtn.getTitle() == "Visitors, Visiteurs, Besucher":
playerlist = itemtn
break
elif itemtn.getTitle() == "Most Recent Visitors":
itemtn.setTitle("Visitors, Visiteurs, Besucher")
playerlist = itemtn
break
# if we have the text note then update it, otherwise create it
if playerlist:
currenttime = time.gmtime(PtGetDniTime())
currenttimestr = time.strftime("%m/%d/%Y %I:%M %p", currenttime)
playername = PtGetLocalPlayer().getPlayerName()
thetext = playerlist.getText()
if (thetext.count("\n") + 1) > 15:
thetext = thetext[:thetext.rfind("\n")]
thetext = currenttimestr + (" " * (30 - len(currenttimestr))) + playername + "\n" + thetext
playerlist.setText(thetext)
playerlist.save()
else:
currenttime = time.gmtime(PtGetDniTime())
currenttimestr = time.strftime("%m/%d/%Y %I:%M %p", currenttime)
playername = PtGetLocalPlayer().getPlayerName()
thetext = currenttimestr + (" " * (30 - len(currenttimestr))) + playername
playerlist = ptVaultTextNoteNode(0)
playerlist.setTitle("Visitors, Visiteurs, Besucher")
playerlist.setText(thetext)
deviceInbox.addNode(playerlist |
2,053 | add rule | import re
from collections.abc import Mapping
ELEMENT_SELECTOR = re.compile(r"^([\w-]+)$")
ELEMENT_WITH_ATTR_SELECTOR = re.compile(r"^([\w-]+)\[([\w-]+)\]$")
ELEMENT_WITH_ATTR_EXACT_SINGLE_QUOTE_SELECTOR = re.compile(
r"^([\w-]+)\[([\w-]+)='(.*)'\]$"
)
ELEMENT_WITH_ATTR_EXACT_DOUBLE_QUOTE_SELECTOR = re.compile(
r'^([\w-]+)\[([\w-]+)="(.*)"\]$'
)
ELEMENT_WITH_ATTR_EXACT_UNQUOTED_SELECTOR = re.compile(
r"^([\w-]+)\[([\w-]+)=([\w-]+)\]$"
)
class HTMLRuleset:
"""
Maintains a set of rules for matching HTML elements.
Each rule defines a mapping from a CSS-like selector to an arbitrary result object.
The following forms of rule are currently supported:
'a' = matches any <a> element
'a[href]' = matches any <a> element with an 'href' attribute
'a[linktype="page"]' = matches any <a> element with a 'linktype' attribute equal to 'page'
"""
def __init__(self, rules=None):
# mapping of element name to a sorted list of (precedence, attr_check, result) tuples
# where attr_check is a callable that takes an attr dict and returns True if they match
self.element_rules = {}
if rules:
self.add_rules(rules)
def add_rules(self, rules):
# accepts either a dict of {selector: result}, or a list of (selector, result) tuples
if isinstance(rules, Mapping):
rules = rules.items()
for selector, result in rules:
self.METHOD_NAME(selector, result)
def _add_element_rule(self, name, result):
# add a rule that matches on any element with name `name`
rules = self.element_rules.setdefault(name, [])
# element-only rules have priority 2 (lower)
rules.append((2, (lambda attrs: True), result))
# sort list on priority
rules.sort(key=lambda t: t[0])
def _add_element_with_attr_rule(self, name, attr, result):
# add a rule that matches any element with name `name` which has the attribute `attr`
rules = self.element_rules.setdefault(name, [])
# element-and-attr rules have priority 1 (higher)
rules.append((1, (lambda attrs: attr in attrs), result))
# sort list on priority
rules.sort(key=lambda t: t[0])
def _add_element_with_attr_exact_rule(self, name, attr, value, result):
# add a rule that matches any element with name `name` which has an
# attribute `attr` equal to `value`
rules = self.element_rules.setdefault(name, [])
# element-and-attr rules have priority 1 (higher)
rules.append(
(1, (lambda attrs: attr in attrs and attrs[attr] == value), result)
)
# sort list on priority
rules.sort(key=lambda t: t[0])
def METHOD_NAME(self, selector, result):
match = ELEMENT_SELECTOR.match(selector)
if match:
name = match.group(1)
self._add_element_rule(name, result)
return
match = ELEMENT_WITH_ATTR_SELECTOR.match(selector)
if match:
name, attr = match.groups()
self._add_element_with_attr_rule(name, attr, result)
return
for regex in (
ELEMENT_WITH_ATTR_EXACT_SINGLE_QUOTE_SELECTOR,
ELEMENT_WITH_ATTR_EXACT_DOUBLE_QUOTE_SELECTOR,
ELEMENT_WITH_ATTR_EXACT_UNQUOTED_SELECTOR,
):
match = regex.match(selector)
if match:
name, attr, value = match.groups()
self._add_element_with_attr_exact_rule(name, attr, value, result)
return
def match(self, name, attrs):
"""
Look for a rule matching an HTML element with the given name and attribute dict,
and return the corresponding result object. If no rule matches, return None.
If multiple rules match, the one chosen is undetermined.
"""
try:
rules_to_test = self.element_rules[name]
except KeyError:
return None
for precedence, attr_check, result in rules_to_test:
if attr_check(attrs):
return result |
2,054 | test no owner | # redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from twisted.trial import unittest
from buildbot.test.util.config import ConfigErrorsMixin
from buildbot.www.authz import roles
class RolesFromGroups(unittest.TestCase):
def setUp(self):
self.roles = roles.RolesFromGroups("buildbot-")
def test_noGroups(self):
ret = self.roles.getRolesFromUser({"username": 'homer'})
self.assertEqual(ret, [])
def test_noBuildbotGroups(self):
ret = self.roles.getRolesFromUser({
"username": "homer",
"groups": ["employee"]
})
self.assertEqual(ret, [])
def test_someBuildbotGroups(self):
ret = self.roles.getRolesFromUser({
"username": "homer",
"groups": ["employee", "buildbot-maintainer", "buildbot-admin"]
})
self.assertEqual(ret, ["maintainer", "admin"])
class RolesFromEmails(unittest.TestCase):
def setUp(self):
self.roles = roles.RolesFromEmails(
employee=["homer@plant.com", "burns@plant.com"], boss=["burns@plant.com"])
def test_noUser(self):
ret = self.roles.getRolesFromUser({
"username": 'lisa',
"email": 'lisa@school.com'
})
self.assertEqual(ret, [])
def test_User1(self):
ret = self.roles.getRolesFromUser({
"username": 'homer',
"email": 'homer@plant.com'
})
self.assertEqual(ret, ["employee"])
def test_User2(self):
ret = self.roles.getRolesFromUser({
"username": 'burns',
"email": 'burns@plant.com'
})
self.assertEqual(sorted(ret), ["boss", "employee"])
class RolesFromOwner(unittest.TestCase):
def setUp(self):
self.roles = roles.RolesFromOwner("ownerofbuild")
def METHOD_NAME(self):
ret = self.roles.getRolesFromUser({
"username": 'lisa',
"email": 'lisa@school.com'
}, None)
self.assertEqual(ret, [])
def test_notOwner(self):
ret = self.roles.getRolesFromUser({
"username": 'lisa',
"email": 'lisa@school.com'
}, "homer@plant.com")
self.assertEqual(ret, [])
def test_owner(self):
ret = self.roles.getRolesFromUser({
"username": 'homer',
"email": 'homer@plant.com'
}, "homer@plant.com")
self.assertEqual(ret, ["ownerofbuild"])
class RolesFromUsername(unittest.TestCase, ConfigErrorsMixin):
def setUp(self):
self.roles = roles.RolesFromUsername(roles=["admins"], usernames=["Admin"])
self.roles2 = roles.RolesFromUsername(
roles=["developers", "integrators"], usernames=["Alice", "Bob"])
def test_anonymous(self):
ret = self.roles.getRolesFromUser({"anonymous": True})
self.assertEqual(ret, [])
def test_normalUser(self):
ret = self.roles.getRolesFromUser({"username": 'Alice'})
self.assertEqual(ret, [])
def test_admin(self):
ret = self.roles.getRolesFromUser({"username": 'Admin'})
self.assertEqual(ret, ["admins"])
def test_multipleGroups(self):
ret = self.roles2.getRolesFromUser({"username": 'Bob'})
self.assertEqual(ret, ["developers", "integrators"])
def test_badUsernames(self):
with self.assertRaisesConfigError('Usernames cannot be None'):
roles.RolesFromUsername(roles=[], usernames=[None]) |
2,055 | add datum for arg | """
EndpointsHelper
---------------
This is support for session endpoints, which are a flagged feature for mobile that also form the basis of smart
links in web apps.
Endpoints define specific locations in the application using a stack, so they rely on similar logic to end of form
navigation. The complexity of generating endpoints is all delegated to ``WorkflowHelper``.
"""
from corehq.apps.app_manager.suite_xml.contributors import PostProcessor
from corehq.apps.app_manager.suite_xml.post_process.workflow import (
CommandId,
WorkflowDatumMeta,
WorkflowHelper,
prepend_parent_frame_children,
)
from corehq.apps.app_manager.suite_xml.xml_models import (
Argument,
PushFrame,
SessionEndpoint,
Stack,
StackDatum,
StackInstanceDatum,
)
from corehq.util.timer import time_method
class EndpointsHelper(PostProcessor):
"""
Generates "Session Endpoints" - user-defined labels for forms or modules.
They end up as entries in the suite file that declare stack operations
necessary to navigate to the form or module, as well as what arguments (eg:
case IDs) must be provided to get there.
"""
@time_method()
def update_suite(self):
for module in self.modules:
if module.session_endpoint_id:
self.suite.endpoints.append(self._make_session_endpoint(module.session_endpoint_id, module))
if module.case_list_session_endpoint_id:
self.suite.endpoints.append(self._make_session_endpoint(
module.case_list_session_endpoint_id, module, None, False))
if module.module_type != "shadow":
for form in module.get_suite_forms():
if form.session_endpoint_id:
self.suite.endpoints.append(self._make_session_endpoint(
form.session_endpoint_id, module, form))
elif module.session_endpoint_id:
for form in module.get_suite_forms():
endpoint = next(
(m for m in module.form_session_endpoints if m.form_id == form.unique_id), None)
if endpoint:
self.suite.endpoints.append(self._make_session_endpoint(
endpoint.session_endpoint_id, module, form))
def _make_session_endpoint(self, endpoint_id, module, form=None, should_add_last_selection_datum=True):
stack = Stack()
children = self.get_frame_children(module, form)
argument_ids = self.get_argument_ids(children, form, should_add_last_selection_datum)
# Add a claim request for each endpoint argument.
# This assumes that all arguments are case ids.
non_computed_arguments = [
child for child in children
if isinstance(child, WorkflowDatumMeta) and child.requires_selection
and (should_add_last_selection_datum or child != children[-1])
]
for arg in non_computed_arguments:
self._add_claim_frame(stack, arg, endpoint_id)
# Add a frame to navigate to the endpoint
frame = PushFrame()
stack.add_frame(frame)
for child in children:
if isinstance(child, CommandId):
frame.add_command(child.to_command())
elif child.id in argument_ids:
self.METHOD_NAME(frame, child)
def get_child(child_id):
for child in children:
if child.id == child_id:
return child
arguments = []
for arg_id in argument_ids:
child = get_child(arg_id)
if child.is_instance:
arguments.append(Argument(
id=arg_id,
instance_id=arg_id,
instance_src="jr://instance/selected-entities",
))
else:
arguments.append(Argument(id=arg_id))
return SessionEndpoint(
id=endpoint_id,
arguments=arguments,
stack=stack,
)
def get_argument_ids(self, frame_children, form=None, should_add_last_selection_datum=True):
def should_include(child, add_selection_datum):
if not isinstance(child, WorkflowDatumMeta):
return False
if child.requires_selection and add_selection_datum:
return True
if form:
return child.id in (form.function_datum_endpoints or [])
return False
return [
child.id for child in frame_children
if should_include(child, should_add_last_selection_datum or child != frame_children[-1])
]
def _add_claim_frame(self, stack, arg, endpoint_id):
frame = PushFrame()
stack.add_frame(frame)
self.METHOD_NAME(frame, arg)
frame.add_command(f"'claim_command.{endpoint_id}.{arg.id}'")
def METHOD_NAME(self, frame, child):
datum = StackInstanceDatum(id=child.id, value=f"${child.id}") if child.is_instance \
else StackDatum(id=child.id, value=f"${child.id}")
frame.add_datum(datum)
def get_frame_children(self, module, form):
helper = WorkflowHelper(self.suite, self.app, self.app.get_modules())
frame_children = helper.get_frame_children(module, form)
if module.root_module_id:
frame_children = prepend_parent_frame_children(helper, frame_children, module.root_module)
return frame_children |
2,056 | calc time | # -*- coding: utf-8 -*-
"""
Travel time calculations.
"""
from .helper_classes import TauModelError
from .seismic_phase import SeismicPhase
from .utils import parse_phase_list
from . import _DEFAULT_VALUES
class TauPTime(object):
"""
Calculate travel times for different branches using linear interpolation
between known slowness samples.
"""
def __init__(self, model, phase_list, depth, degrees, receiver_depth=0.0,
ray_param_tol=_DEFAULT_VALUES["default_time_ray_param_tol"]
):
self.source_depth = depth
self.receiver_depth = receiver_depth
self.degrees = degrees
self.arrivals = []
self.phases = []
# Names of phases to be used, e.g. PKIKP
self.phase_names = parse_phase_list(phase_list)
# A standard and a depth corrected model. Both are needed.
self.model = model
self.depth_corrected_model = self.model
self.ray_param_tol = ray_param_tol
def run(self):
"""
Do all the calculations and print the output if told to. The resulting
arrival times will be in self.arrivals.
"""
self.depth_correct(self.source_depth, self.receiver_depth)
self.calculate(self.degrees)
def depth_correct(self, depth, receiver_depth=None):
"""
Corrects the TauModel for the given source depth (if not already
corrected).
"""
if receiver_depth is None:
receiver_depth = self.receiver_depth
if self.depth_corrected_model is None or \
self.depth_corrected_model.source_depth != depth:
self.depth_corrected_model = self.model.depth_correct(depth)
self.arrivals = []
if receiver_depth != depth:
# If already split on receiver depth this does nothing.
self.depth_corrected_model = \
self.depth_corrected_model.split_branch(receiver_depth)
self.arrivals = []
self.source_depth = depth
self.receiver_depth = receiver_depth
def recalc_phases(self):
"""
Recalculates the given phases using a possibly new or changed tau
model.
"""
new_phases = []
for temp_phase_name in self.phase_names:
for phase_num, seismic_phase in enumerate(self.phases):
pass
# if seismic_phase.name == temp_phase_name:
# self.phases.pop(phase_num)
# if (seismic_phase.source_depth == self.source_depth and
# seismic_phase.tau_model ==
# self.depth_corrected_model):
# # OK so copy to new_phases:
# new_phases.append(seismic_phase)
# break
# Executed, if break is NOT called.
else:
# Didn't find it precomputed, so recalculate:
try:
seismic_phase = SeismicPhase(temp_phase_name,
self.depth_corrected_model,
self.receiver_depth)
new_phases.append(seismic_phase)
except TauModelError:
print("Error with this phase, skipping it: " +
str(temp_phase_name))
self.phases = new_phases
def calculate(self, degrees):
"""
Calculate the arrival times.
"""
self.depth_correct(self.source_depth, self.receiver_depth)
# Called before, but depth_correct might have changed the phases.
self.recalc_phases()
self.METHOD_NAME(degrees)
def METHOD_NAME(self, degrees):
"""
Calls the calc_time method of SeismicPhase to calculate arrival
times for every phase, each sorted by time.
"""
self.degrees = degrees
self.arrivals = []
for phase in self.phases:
self.arrivals += phase.METHOD_NAME(degrees, self.ray_param_tol)
# Sort them.
self.arrivals = sorted(self.arrivals,
key=lambda arrivals: arrivals.time) |
2,057 | global var | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Visualize Relay IR in AST text-form."""
from collections import deque
from typing import (
Dict,
Union,
Tuple,
List,
)
import tvm
from tvm import relay
from .interface import (
DefaultVizParser,
Plotter,
VizEdge,
VizGraph,
VizNode,
VizParser,
)
class TermVizParser(VizParser):
"""`TermVizParser` parse nodes and edges for `TermPlotter`."""
def __init__(self):
self._default_parser = DefaultVizParser()
def get_node_edges(
self,
node: relay.Expr,
relay_param: Dict[str, tvm.runtime.NDArray],
node_to_id: Dict[relay.Expr, str],
) -> Tuple[Union[VizNode, None], List[VizEdge]]:
"""Parse a node and edges from a relay.Expr."""
if isinstance(node, relay.Call):
return self._call(node, node_to_id)
if isinstance(node, relay.Let):
return self._let(node, node_to_id)
if isinstance(node, relay.GlobalVar):
return self.METHOD_NAME(node, node_to_id)
if isinstance(node, relay.If):
return self._if(node, node_to_id)
if isinstance(node, tvm.ir.Op):
return self._op(node, node_to_id)
if isinstance(node, relay.Function):
return self._function(node, node_to_id)
# Leverage logics from default parser.
return self._default_parser.get_node_edges(node, relay_param, node_to_id)
def _call(self, node, node_to_id):
node_id = node_to_id[node]
viz_node = VizNode(node_id, "Call", "")
viz_edges = [VizEdge(node_to_id[node.op], node_id)]
for arg in node.args:
arg_id = node_to_id[arg]
viz_edges.append(VizEdge(arg_id, node_id))
return viz_node, viz_edges
def _let(self, node, node_to_id):
node_id = node_to_id[node]
viz_node = VizNode(node_id, "Let", "(var, val, body)")
viz_edges = [
VizEdge(node_to_id[node.var], node_id),
VizEdge(node_to_id[node.value], node_id),
VizEdge(node_to_id[node.body], node_id),
]
return viz_node, viz_edges
def METHOD_NAME(self, node, node_to_id):
node_id = node_to_id[node]
viz_node = VizNode(node_id, "GlobalVar", node.name_hint)
viz_edges = []
return viz_node, viz_edges
def _if(self, node, node_to_id):
node_id = node_to_id[node]
viz_node = VizNode(node_id, "If", "(cond, true, false)")
viz_edges = [
VizEdge(node_to_id[node.cond], node_id),
VizEdge(node_to_id[node.true_branch], node_id),
VizEdge(node_to_id[node.false_branch], node_id),
]
return viz_node, viz_edges
def _op(self, node, node_to_id):
node_id = node_to_id[node]
op_name = node.name
viz_node = VizNode(node_id, op_name, "")
viz_edges = []
return viz_node, viz_edges
def _function(self, node, node_to_id):
node_id = node_to_id[node]
viz_node = VizNode(node_id, "Func", str(node.params))
viz_edges = [VizEdge(node_to_id[node.body], node_id)]
return viz_node, viz_edges
class TermNode:
"""TermNode is aimed to generate text more suitable for terminal visualization."""
def __init__(self, viz_node: VizNode):
self.type = viz_node.type_name
# We don't want too many lines in a terminal.
self.other_info = viz_node.detail.replace("\n", ", ")
class TermGraph(VizGraph):
"""Terminal graph for a relay IR Module
Parameters
----------
name: str
name of this graph.
"""
def __init__(self, name: str):
self._name = name
# A graph in adjacency list form.
# The key is source node, and the value is a list of destination nodes.
self._graph = {}
# a hash table for quick searching.
self._id_to_term_node = {}
# node_id in reversed post order
# That mean, root is the first node.
self._node_id_rpo = deque()
def node(self, viz_node: VizNode) -> None:
"""Add a node to the underlying graph.
Nodes in a Relay IR Module are expected to be added in the post-order.
Parameters
----------
viz_node : VizNode
A `VizNode` instance.
"""
self._node_id_rpo.appendleft(viz_node.identity)
if viz_node.identity not in self._graph:
# Add the node into the graph.
self._graph[viz_node.identity] = []
# Create TermNode from VizNode
node = TermNode(viz_node)
self._id_to_term_node[viz_node.identity] = node
def edge(self, viz_edge: VizEdge) -> None:
"""Add an edge to the terminal graph.
Parameters
----------
viz_edge : VizEdge
A `VizEdge` instance.
"""
# Take CallNode as an example, instead of "arguments point to CallNode",
# we want "CallNode points to arguments" in ast-dump form.
#
# The direction of edge is typically controlled by the implemented VizParser.
# Reverse start/end here simply because we leverage default parser implementation.
if viz_edge.end in self._graph:
self._graph[viz_edge.end].append(viz_edge.start)
else:
self._graph[viz_edge.end] = [viz_edge.start]
def render(self) -> str:
"""Draw a terminal graph
Returns
-------
rv1: str
text representing a graph.
"""
lines = []
seen_node = set()
def gen_line(indent, n_id):
if (indent, n_id) in seen_node:
return
seen_node.add((indent, n_id))
conn_symbol = ["|--", "`--"]
last = len(self._graph[n_id]) - 1
for i, next_n_id in enumerate(self._graph[n_id]):
node = self._id_to_term_node[next_n_id]
lines.append(
f"{indent}{conn_symbol[1 if i==last else 0]}{node.type} {node.other_info}"
)
next_indent = indent
# increase indent for the next level.
next_indent += " " if (i == last) else "| "
gen_line(next_indent, next_n_id)
first_node_id = self._node_id_rpo[0]
first_node = self._id_to_term_node[first_node_id]
lines.append(f"@{self._name}({first_node.other_info})")
gen_line("", first_node_id)
return "\n".join(lines)
class TermPlotter(Plotter):
"""Terminal plotter"""
def __init__(self):
self._name_to_graph = {}
def create_graph(self, name):
self._name_to_graph[name] = TermGraph(name)
return self._name_to_graph[name]
def render(self, filename):
"""If filename is None, print to stdio. Otherwise, write to the filename."""
lines = []
for name in self._name_to_graph:
text_graph = self._name_to_graph[name].render()
lines.append(text_graph)
if filename is None:
print("\n".join(lines))
else:
with open(filename, "w") as out_file:
out_file.write("\n".join(lines)) |
2,058 | test delta reached pending | import itertools
from datetime import timedelta
import pytest
from django.utils import timezone
from freezegun import freeze_time
from sentry.models import Group, GroupSnooze
from sentry.testutils.cases import PerformanceIssueTestCase, SnubaTestCase, TestCase
from sentry.testutils.helpers.datetime import before_now, iso_format
from sentry.testutils.performance_issues.store_transaction import PerfIssueTransactionTestMixin
from sentry.testutils.silo import region_silo_test
from sentry.utils.samples import load_data
from tests.sentry.issues.test_utils import SearchIssueTestMixin
@region_silo_test(stable=True)
class GroupSnoozeTest(
TestCase,
SnubaTestCase,
PerfIssueTransactionTestMixin,
SearchIssueTestMixin,
PerformanceIssueTestCase,
):
sequence = itertools.count() # generates unique values, class scope doesn't matter
def setUp(self):
super().setUp()
self.project = self.create_project()
self.group.times_seen_pending = 0
def test_until_not_reached(self):
snooze = GroupSnooze.objects.create(
group=self.group, until=timezone.now() + timedelta(days=1)
)
assert snooze.is_valid()
def test_until_reached(self):
snooze = GroupSnooze.objects.create(
group=self.group, until=timezone.now() - timedelta(days=1)
)
assert not snooze.is_valid()
def test_mismatched_group(self):
snooze = GroupSnooze.objects.create(group=self.group)
with pytest.raises(ValueError):
snooze.is_valid(self.create_group())
def test_delta_not_reached(self):
snooze = GroupSnooze.objects.create(group=self.group, count=100, state={"times_seen": 0})
assert snooze.is_valid()
def test_delta_reached(self):
snooze = GroupSnooze.objects.create(group=self.group, count=100, state={"times_seen": 0})
self.group.update(times_seen=100)
assert not snooze.is_valid()
def METHOD_NAME(self):
snooze = GroupSnooze.objects.create(group=self.group, count=100, state={"times_seen": 0})
self.group.update(times_seen=90)
assert snooze.is_valid(use_pending_data=True)
self.group.times_seen_pending = 10
assert not snooze.is_valid(use_pending_data=True)
def test_user_delta_not_reached(self):
snooze = GroupSnooze.objects.create(
group=self.group, user_count=100, state={"users_seen": 0}
)
assert snooze.is_valid(test_rates=True)
@freeze_time()
def test_user_delta_reached(self):
for i in range(0, 100):
self.store_event(
data={
"user": {"id": i},
"timestamp": iso_format(before_now(seconds=1)),
"fingerprint": ["group1"],
},
project_id=self.project.id,
)
group = list(Group.objects.all())[-1]
snooze = GroupSnooze.objects.create(group=group, user_count=100, state={"users_seen": 0})
assert not snooze.is_valid(test_rates=True)
@freeze_time()
def test_user_rate_reached(self):
"""Test that ignoring an error issue until it's hit by 10 users in an hour works."""
for i in range(5):
group = self.store_event(
data={
"fingerprint": ["group1"],
"timestamp": iso_format(before_now(minutes=5 + i)),
"tags": {"sentry:user": i},
},
project_id=self.project.id,
).group
snooze = GroupSnooze.objects.create(group=group, user_count=5, user_window=60)
assert not snooze.is_valid(test_rates=True)
@freeze_time()
def test_user_rate_reached_perf_issues(self):
"""Test that ignoring a performance issue until it's hit by 10 users in an hour works."""
for i in range(0, 10):
event_data = load_data(
"transaction-n-plus-one",
timestamp=before_now(minutes=10),
)
event_data["user"]["id"] = str(i)
event = self.create_performance_issue(event_data=event_data)
perf_group = event.group
snooze = GroupSnooze.objects.create(group=perf_group, user_count=10, user_window=60)
assert not snooze.is_valid(test_rates=True)
@freeze_time()
def test_user_rate_not_reached(self):
snooze = GroupSnooze.objects.create(group=self.group, user_count=100, user_window=60)
assert snooze.is_valid(test_rates=True)
@freeze_time()
def test_user_rate_without_test(self):
snooze = GroupSnooze.objects.create(group=self.group, count=100, window=60)
assert snooze.is_valid(test_rates=False)
@freeze_time()
def test_rate_not_reached(self):
snooze = GroupSnooze.objects.create(group=self.group, count=100, window=60)
assert snooze.is_valid(test_rates=True)
@freeze_time()
def test_rate_reached(self):
"""Test when an error issue is ignored until it happens 5 times in a day"""
for i in range(5):
group = self.store_event(
data={
"fingerprint": ["group1"],
"timestamp": iso_format(before_now(minutes=5 + i)),
},
project_id=self.project.id,
).group
snooze = GroupSnooze.objects.create(group=group, count=5, window=24 * 60)
assert not snooze.is_valid(test_rates=True)
@freeze_time()
def test_rate_reached_perf_issue(self):
"""Test when a performance issue is ignored until it happens 10 times in a day"""
for i in range(0, 10):
event = self.create_performance_issue()
snooze = GroupSnooze.objects.create(group=event.group, count=10, window=24 * 60)
assert not snooze.is_valid(test_rates=True)
@freeze_time()
def test_rate_without_test(self):
snooze = GroupSnooze.objects.create(group=self.group, count=100, window=60)
assert snooze.is_valid(test_rates=False)
@freeze_time()
def test_user_rate_reached_generic_issues(self):
"""Test that ignoring a generic issue until it's hit by 10 users in an hour works."""
for i in range(0, 10):
event, occurrence, group_info = self.store_search_issue(
project_id=self.project.id,
user_id=i,
fingerprints=["test_user_rate_reached_generic_issues-group"],
environment=None,
)
assert group_info is not None
generic_group = group_info.group
assert generic_group is not None
snooze = GroupSnooze.objects.create(group=generic_group, user_count=10, user_window=60)
assert not snooze.is_valid(test_rates=True)
@freeze_time()
def test_rate_reached_generic_issue(self):
"""Test when a generic issue is ignored until it happens 10 times in a day"""
for i in range(0, 10):
event, occurrence, group_info = self.store_search_issue(
project_id=self.project.id,
user_id=3, # pin the user_id here to verify the number of events impacts the snooze
fingerprints=["test_rate_reached_generic_issue-group"],
environment=None,
)
assert group_info is not None
generic_group = group_info.group
assert generic_group is not None
snooze = GroupSnooze.objects.create(group=generic_group, count=10, window=24 * 60)
assert not snooze.is_valid(test_rates=True) |
2,059 | validate tar archive | import errno
import json
import os
import shutil
import subprocess
import requests
DEFAULT_MODEL_PATH = "model_archiver/tests/integ_tests/resources/regular_model"
DEFAULT_HANDLER = "service:handle"
DEFAULT_RUNTIME = "python"
DEFAULT_MODEL_NAME = "model"
DEFAULT_EXPORT_PATH = "/tmp/model"
MANIFEST_FILE = "MAR-INF/MANIFEST.json"
def update_tests(test):
test["modelName"] = test.get("modelName", DEFAULT_MODEL_NAME)
test["modelPath"] = test.get("modelPath", DEFAULT_MODEL_PATH)
test["handler"] = test.get("handler", DEFAULT_HANDLER)
test["runtime"] = test.get("runtime", DEFAULT_RUNTIME)
test["exportPath"] = test.get("exportPath", DEFAULT_EXPORT_PATH)
test["archiveFormat"] = test.get("archiveFormat", "default")
return test
def create_file_path(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def delete_file_path(path):
try:
if os.path.isfile(path):
os.remove(path)
if os.path.isdir(path):
shutil.rmtree(path)
except OSError:
pass
def run_test(test, cmd):
it = test.get("iterations") if test.get("iterations") is not None else 1
for i in range(it):
try:
subprocess.check_call(cmd, shell=True)
except subprocess.CalledProcessError as exc:
if test.get("expectError") is not True:
assert 0, "{}".format(exc.output)
else:
return 0
return 1
def validate_archive_exists(test):
fmt = test.get("archiveFormat")
if fmt == "tgz":
assert os.path.isfile(os.path.join(test.get("exportPath"), test.get("modelName")+".tar.gz"))
elif fmt == "no-archive":
assert os.path.isdir(os.path.join(test.get("exportPath"), test.get("modelName")))
else:
assert os.path.isfile(os.path.join(test.get("exportPath"), test.get("modelName")+".mar"))
def validate_manifest_file(manifest, test):
"""
Validate the MANIFEST file
:param manifest:
:param test:
:return:
"""
assert manifest.get("runtime") == test.get("runtime")
assert manifest.get("model").get("modelName") == test.get("modelName")
assert manifest.get("model").get("handler") == test.get("handler")
def validate_files(file_list, prefix, regular):
assert os.path.join(prefix, MANIFEST_FILE) in file_list
assert os.path.join(prefix, "service.py") in file_list
if regular:
assert os.path.join(prefix, "dummy-artifacts.txt") in file_list
assert os.path.join(prefix, "dir/1.py") in file_list
else:
assert os.path.join(prefix, "model.onnx") in file_list
def METHOD_NAME(test_cfg):
import tarfile
file_name = os.path.join(test_cfg.get("exportPath"), test_cfg.get("modelName") + ".tar.gz")
f = tarfile.open(file_name, "r:gz")
manifest = json.loads(f.extractfile(os.path.join(test_cfg.get("modelName"), MANIFEST_FILE)).read())
validate_manifest_file(manifest, test_cfg)
validate_files(f.getnames(), test_cfg.get("modelName"), "regular_model" in test_cfg.get("modelPath"))
def validate_noarchive_archive(test):
file_name = os.path.join(test.get("exportPath"), test.get("modelName"), MANIFEST_FILE)
manifest = json.loads(open(file_name).read())
validate_manifest_file(manifest, test)
def validate_mar_archive(test):
import zipfile
file_name = os.path.join(test.get("exportPath"), test.get("modelName") + ".mar")
zf = zipfile.ZipFile(file_name, "r")
manifest = json.loads(zf.open(MANIFEST_FILE).read())
validate_manifest_file(manifest, test)
def validate_archive_content(test):
fmt = test.get("archiveFormat")
if fmt == "tgz":
METHOD_NAME(test)
if fmt == "no-archive":
validate_noarchive_archive(test)
if fmt == "default":
validate_mar_archive(test)
def validate(test):
validate_archive_exists(test)
validate_archive_content(test)
def test_model_archiver():
f = open("model_archiver/tests/integ_tests/configuration.json", "r")
tests = json.loads(f.read())
for t in tests:
try:
delete_file_path(t.get("exportPath"))
create_file_path(t.get("exportPath"))
t = update_tests(t)
cmd = "model-archiver " \
"--model-name {} " \
"--model-path {} " \
"--handler {} " \
"--runtime {} " \
"--export-path {} " \
"--archive-format {}".format(t.get("modelName"),
t.get("modelPath"),
t.get("handler"),
t.get("runtime"),
t.get("exportPath"),
t.get("archiveFormat"))
if t.get("force"):
cmd += " -f"
# TODO: Add tests to check for "convert" functionality
if run_test(t, cmd):
validate(t)
finally:
delete_file_path(t.get("exportPath"))
if __name__ == "__main__":
test_model_archiver() |
2,060 | get enabled | """
Service support for Debian systems (uses update-rc.d and /sbin/service)
.. important::
If you feel that Salt should be using this module to manage services on a
minion, and it is using a different module (or gives an error similar to
*'service.start' is not available*), see :ref:`here
<module-provider-override>`.
"""
import fnmatch
import glob
import logging
import os
import re
import shlex
import salt.utils.systemd
__func_alias__ = {"reload_": "reload"}
# Define the module's virtual name
__virtualname__ = "service"
log = logging.getLogger(__name__)
def __virtual__():
"""
Only work on Debian and when systemd isn't running
"""
if __grains__["os"] in (
"Debian",
"Raspbian",
"Devuan",
"NILinuxRT",
) and not salt.utils.systemd.booted(__context__):
return __virtualname__
else:
return (
False,
"The debian_service module could not be loaded: "
"unsupported OS family and/or systemd running.",
)
def _service_cmd(*args):
return "service {} {}".format(args[0], " ".join(args[1:]))
def _get_runlevel():
"""
returns the current runlevel
"""
out = __salt__["cmd.run"]("runlevel")
# unknown can be returned while inside a container environment, since
# this is due to a lack of init, it should be safe to assume runlevel
# 2, which is Debian's default. If not, all service related states
# will throw an out of range exception here which will cause
# other functions to fail.
if "unknown" in out:
return "2"
else:
return out.split()[1]
def METHOD_NAME():
"""
Return a list of service that are enabled on boot
CLI Example:
.. code-block:: bash
salt '*' service.get_enabled
"""
prefix = "/etc/rc[S{}].d/S".format(_get_runlevel())
ret = set()
for line in [x.rsplit(os.sep, 1)[-1] for x in glob.glob("{}*".format(prefix))]:
ret.add(re.split(r"\d+", line)[-1])
return sorted(ret)
def get_disabled():
"""
Return a set of services that are installed but disabled
CLI Example:
.. code-block:: bash
salt '*' service.get_disabled
"""
return sorted(set(get_all()) - set(METHOD_NAME()))
def available(name):
"""
Returns ``True`` if the specified service is available, otherwise returns
``False``.
CLI Example:
.. code-block:: bash
salt '*' service.available sshd
"""
return name in get_all()
def missing(name):
"""
The inverse of service.available.
Returns ``True`` if the specified service is not available, otherwise returns
``False``.
CLI Example:
.. code-block:: bash
salt '*' service.missing sshd
"""
return name not in get_all()
def get_all():
"""
Return all available boot services
CLI Example:
.. code-block:: bash
salt '*' service.get_all
"""
ret = set()
lines = glob.glob("/etc/init.d/*")
for line in lines:
service = line.split("/etc/init.d/")[1]
# Remove README. If it's an enabled service, it will be added back in.
if service != "README":
ret.add(service)
return sorted(ret | set(METHOD_NAME()))
def start(name):
"""
Start the specified service
CLI Example:
.. code-block:: bash
salt '*' service.start <service name>
"""
cmd = _service_cmd(name, "start")
return not __salt__["cmd.retcode"](cmd)
def stop(name):
"""
Stop the specified service
CLI Example:
.. code-block:: bash
salt '*' service.stop <service name>
"""
cmd = _service_cmd(name, "stop")
return not __salt__["cmd.retcode"](cmd)
def restart(name):
"""
Restart the named service
CLI Example:
.. code-block:: bash
salt '*' service.restart <service name>
"""
cmd = _service_cmd(name, "restart")
return not __salt__["cmd.retcode"](cmd)
def reload_(name):
"""
Reload the named service
CLI Example:
.. code-block:: bash
salt '*' service.reload <service name>
"""
cmd = _service_cmd(name, "reload")
return not __salt__["cmd.retcode"](cmd)
def force_reload(name):
"""
Force-reload the named service
CLI Example:
.. code-block:: bash
salt '*' service.force_reload <service name>
"""
cmd = _service_cmd(name, "force-reload")
return not __salt__["cmd.retcode"](cmd)
def status(name, sig=None):
"""
Return the status for a service.
If the name contains globbing, a dict mapping service name to True/False
values is returned.
.. versionchanged:: 2018.3.0
The service name can now be a glob (e.g. ``salt*``)
Args:
name (str): The name of the service to check
sig (str): Signature to use to find the service via ps
Returns:
bool: True if running, False otherwise
dict: Maps service name to True if running, False otherwise
CLI Example:
.. code-block:: bash
salt '*' service.status <service name> [service signature]
"""
if sig:
return bool(__salt__["status.pid"](sig))
contains_globbing = bool(re.search(r"\*|\?|\[.+\]", name))
if contains_globbing:
services = fnmatch.filter(get_all(), name)
else:
services = [name]
results = {}
for service in services:
cmd = _service_cmd(service, "status")
results[service] = not __salt__["cmd.retcode"](cmd, ignore_retcode=True)
if contains_globbing:
return results
return results[name]
def enable(name, **kwargs):
"""
Enable the named service to start at boot
CLI Example:
.. code-block:: bash
salt '*' service.enable <service name>
"""
cmd = "insserv {0} && update-rc.d {0} enable".format(shlex.quote(name))
return not __salt__["cmd.retcode"](cmd, python_shell=True)
def disable(name, **kwargs):
"""
Disable the named service to start at boot
CLI Example:
.. code-block:: bash
salt '*' service.disable <service name>
"""
cmd = "update-rc.d {} disable".format(name)
return not __salt__["cmd.retcode"](cmd)
def enabled(name, **kwargs):
"""
Return True if the named service is enabled, false otherwise
CLI Example:
.. code-block:: bash
salt '*' service.enabled <service name>
"""
return name in METHOD_NAME()
def disabled(name):
"""
Return True if the named service is disabled, false otherwise
CLI Example:
.. code-block:: bash
salt '*' service.disabled <service name>
"""
return name in get_disabled() |
2,061 | test resort solo groups | import unittest
from drake.tools.lint.formatter import FormatterBase, IncludeFormatter
class TestFormatterBase(unittest.TestCase):
def test_essentials(self):
original_lines = [
'// Line 1\n',
'/* Line 2 */\n',
'\n',
]
dut = FormatterBase('filename.cc', readlines=original_lines)
# Everything starts out unchanged.
self.assertTrue(dut.is_same_as_original())
self.assertTrue(dut.is_permutation_of_original())
self.assertEqual(dut.get_all_lines(), original_lines)
self.assertTrue(dut.get_first_differing_original_index() is None)
# Basic getters.
self.assertEqual(dut.get_num_lines(), 3)
self.assertTrue(dut.is_blank_line(2))
self.assertEqual(dut.get_line(0), '// Line 1\n')
# Reverse it and end up with a permutation.
dut.set_all_lines(reversed(dut.get_all_lines()))
self.assertFalse(dut.is_same_as_original())
self.assertTrue(dut.is_permutation_of_original())
self.assertEqual(dut.get_first_differing_original_index(), 0)
# Rebuild it using insertion and removal.
dut.set_all_lines(['\n'] * 3)
dut.set_line(0, '/* Line 2 */\n')
dut.insert_lines(0, ['AAA\n', '// Line 1\n'])
dut.remove_all([0, 3])
self.assertEqual(dut.get_all_lines(), original_lines)
def test_format_ranges(self):
original_lines = [
'#include "line0"\n',
'// clang-format off\n',
'#include "line2"\n',
'// clang-format on\n',
'#include "line4"\n',
'#include "line5"\n',
'/* clang-format off */\n',
'#include "line7"\n',
'#include "line8"\n',
'/* clang-format on */\n',
'#include "line10"\n',
]
dut = FormatterBase("filename.cc", readlines=original_lines)
self.assertEqual(
dut.get_format_ranges(), [[0], [4, 5], [10]])
self.assertEqual(
dut.get_non_format_ranges(), [[1, 2, 3], [6, 7, 8, 9]])
def test_dos(self):
original_lines = [
'#include "line0"\r\n',
]
with self.assertRaisesRegex(Exception, "DOS newline"):
FormatterBase("filename.cc", readlines=original_lines)
def test_missing_eof(self):
original_lines = [
'#include "line0"',
]
with self.assertRaisesRegex(Exception, "newline.*end of file"):
FormatterBase("filename.cc", readlines=original_lines)
class TestIncludeFormatter(unittest.TestCase):
def _split(self, triple_quoted_file_contents):
lines = triple_quoted_file_contents.split("\n")
assert len(lines) >= 2
assert lines[0] == "" # Detritus from first triple quote.
assert lines[-1] == "" # Detritus from last triple quote.
del lines[0]
del lines[-1]
return [line + "\n" for line in lines]
def _check(self, basename, original, expected, first_differing):
original_lines = self._split(original)
expected_lines = self._split(expected)
dut = IncludeFormatter(
"drake/dummy/" + basename,
readlines=original_lines)
dut.format_includes()
self.assertEqual(dut.get_all_lines(), expected_lines)
self.assertEqual(dut.get_first_differing_original_index(),
first_differing)
def test_basic(self):
# A pile of headers gets sorted per cppguide:
# - The related header
# - C system files
# - C++ system files
# - Other libraries' .h files
# - Your project's .h files
original = """
#include "drake/common/drake_assert.h"
#include "drake/dummy/bar.h"
#include "drake/dummy/dut.h"
#include <gtest/gtest.h>
#include <Eigen/Dense>
#include <algorithm>
#include <poll.h>
#include <sys/wait.h>
#include <vector>
"""
expected = """
#include "drake/dummy/dut.h"
#include <poll.h>
#include <sys/wait.h>
#include <algorithm>
#include <vector>
#include <Eigen/Dense>
#include <gtest/gtest.h>
#include "drake/common/drake_assert.h"
#include "drake/dummy/bar.h"
"""
self._check("dut.cc", original, expected, 0)
def test_nothing(self):
# A file with _no_ include statements.
original = """
namespace { }
"""
self._check("dut.cc", original, original, None)
def test_regroup(self):
# Wrongly grouped whitespace.
original = """
#include "drake/dummy/dut.h"
#include <Eigen/Dense>
#include <algorithm>
#include <vector>
#include "drake/common/drake_assert.h"
#include "drake/dummy/bar.h"
#include <gtest/gtest.h>
"""
expected = """
#include "drake/dummy/dut.h"
#include <algorithm>
#include <vector>
#include <Eigen/Dense>
#include <gtest/gtest.h>
#include "drake/common/drake_assert.h"
#include "drake/dummy/bar.h"
"""
self._check("dut.cc", original, expected, 2)
def test_format_off(self):
# "clang-format off".
original = """
#include "drake/dummy/dut.h"
// clang-format off
#ifdef FOO
#include <algorithm>
#include <vector>
#else
#include <vector>
#include <algorithm>
#endif
// clang-format on
#include "drake/common/drake_assert.h"
"""
self._check("dut.cc", original, original, None)
def test_target_is_header(self):
# A header file.
original = """
#include "drake/common/drake_assert.h"
#include <algorithm>
namespace { }
"""
expected = """
#include <algorithm>
#include "drake/common/drake_assert.h"
namespace { }
"""
self._check("dut.h", original, expected, 0)
def test_associated_comment(self):
# A comment prior to a line.
original = """
#include "drake/dummy/dut.h"
// Some comment describing the next line.
#include <vector>
namespace { }
"""
self._check("dut.cc", original, original, None)
def test_file_opening_comment(self):
# A comment atop the file with no blank line.
original = """
/// @file dut.cc
/// Mumble mumble
///
#include <string>
#include <vector>
"""
self._check("dut.cc", original, original, None)
def test_internal_related_header(self):
# Two related headers, guarded by "clang-format off".
original = """
/* clang-format off (with explanatory comment) */
#include "drake/dummy/dut.h"
#include "drake/dummy/dut_internal.h"
/* clang-format on (with explanatory comment) */
#include <vector>
#include <string>
#include "drake/dummy/drake_assert.h"
#include "drake/dummy/drake_deprecated.h"
"""
expected = """
/* clang-format off (with explanatory comment) */
#include "drake/dummy/dut.h"
#include "drake/dummy/dut_internal.h"
/* clang-format on (with explanatory comment) */
#include <string>
#include <vector>
#include "drake/dummy/drake_assert.h"
#include "drake/dummy/drake_deprecated.h"
"""
self._check("dut.cc", original, expected, 5)
def METHOD_NAME(self):
# Groups of one, but sorted incorrectly.
original = """
#include "drake/dummy/dut.h"
#include "drake/common/drake_assert.h"
#include <vector>
"""
expected = """
#include "drake/dummy/dut.h"
#include <vector>
#include "drake/common/drake_assert.h"
"""
self._check("dut.cc", original, expected, 2)
def test_nontrivial_reformatting(self):
# If clang-format changes any lines, we want to fail-fast.
# (Note the two spaces between #include and the double quote.)
original_lines = ['#include "nontrivial.h"\n']
dut = IncludeFormatter("nontrivial.cc", readlines=original_lines)
dut.format_includes()
with self.assertRaisesRegex(Exception, 'not just a shuffle'):
dut.rewrite_file() |
2,062 | get logger | """Ray Module."""
import logging
import os
from functools import wraps
from typing import TYPE_CHECKING, Any, Callable, List, Optional, TypeVar, Union
from awswrangler._config import apply_configs
from awswrangler._distributed import EngineEnum, engine
if engine.get() == EngineEnum.RAY or TYPE_CHECKING:
import ray
_logger: logging.Logger = logging.getLogger(__name__)
FunctionType = TypeVar("FunctionType", bound=Callable[..., Any])
class RayLogger:
"""Create discrete Logger instance for Ray Tasks."""
def __init__(
self,
logging_level: int = logging.INFO,
format: str = "%(asctime)s::%(levelname)-2s::%(name)s::%(message)s", # pylint: disable=redefined-builtin
datefmt: str = "%Y-%m-%d %H:%M:%S",
):
logging.basicConfig(level=logging_level, format=format, datefmt=datefmt)
def METHOD_NAME(self, name: Union[str, Any] = None) -> Optional[logging.Logger]:
"""Return logger object."""
return logging.getLogger(name)
@apply_configs
def ray_logger(
function: FunctionType,
configure_logging: bool = True,
logging_level: int = logging.INFO,
) -> FunctionType:
"""
Decorate callable to add RayLogger.
Parameters
----------
function : Callable[..., Any]
Callable as input to decorator.
Returns
-------
Callable[..., Any]
"""
@wraps(function)
def wrapper(*args: Any, **kwargs: Any) -> Any:
if configure_logging:
RayLogger(logging_level=logging_level).METHOD_NAME(name=function.__name__)
return function(*args, **kwargs)
return wrapper
def ray_remote(**options: Any) -> Callable[[FunctionType], FunctionType]:
"""
Decorate with @ray.remote providing .options().
Parameters
----------
options : Any
Ray remote options
Returns
-------
Callable[..., Any]
"""
def remote_decorator(function: FunctionType) -> FunctionType:
"""
Decorate callable to wrap within ray.remote.
Parameters
----------
function : Callable[..., Any]
Callable as input to ray.remote.
Returns
-------
Callable[..., Any]
"""
# Access the source function if it exists
function = getattr(function, "_source_func", function)
@wraps(function)
def wrapper(*args: Any, **kwargs: Any) -> Any:
remote_fn = ray.remote(ray_logger(function))
if options:
remote_fn = remote_fn.options(**options)
return remote_fn.remote(*args, **kwargs)
return wrapper
return remote_decorator
def ray_get(futures: Union["ray.ObjectRef[Any]", List["ray.ObjectRef[Any]"]]) -> Any:
"""
Run ray.get on futures if distributed.
Parameters
----------
futures : List[Any]
List of Ray futures
Returns
-------
List[Any]
"""
if engine.get() == EngineEnum.RAY:
return ray.get(futures) # type: ignore[attr-defined]
return futures
@apply_configs
def initialize_ray(
address: Optional[str] = None,
redis_password: Optional[str] = None,
ignore_reinit_error: bool = True,
include_dashboard: Optional[bool] = False,
configure_logging: bool = True,
log_to_driver: bool = False,
logging_level: int = logging.INFO,
object_store_memory: Optional[int] = None,
cpu_count: Optional[int] = None,
gpu_count: Optional[int] = None,
) -> None:
"""
Connect to an existing Ray cluster or start one and connect to it.
Parameters
----------
address : Optional[str]
Address of the Ray cluster to connect to, by default None
redis_password : Optional[str]
Password to the Redis cluster, by default None
ignore_reinit_error : bool
If true, Ray suppress errors from calling ray.init() twice, by default True
include_dashboard : Optional[bool]
Boolean flag indicating whether or not to start the Ray dashboard, by default False
configure_logging : Optional[bool]
Boolean flag indicating whether or not to enable logging, by default True
log_to_driver : bool
Boolean flag to enable routing of all worker logs to the driver, by default False
logging_level : int
Logging level, defaults to logging.INFO. Ignored unless "configure_logging" is True
object_store_memory : Optional[int]
The amount of memory (in bytes) to start the object store with, by default None
cpu_count : Optional[int]
Number of CPUs to assign to each raylet, by default None
gpu_count : Optional[int]
Number of GPUs to assign to each raylet, by default None
"""
if not ray.is_initialized():
# Detect an existing cluster
ray_address = os.environ.get("RAY_ADDRESS")
if not address and ray_address:
_logger.info("Using address %s set in the environment variable RAY_ADDRESS", ray_address)
address = ray_address
if address:
_logger.info("Connecting to a Ray instance at: %s", address)
ray.init(
address=address,
include_dashboard=include_dashboard,
ignore_reinit_error=ignore_reinit_error,
configure_logging=configure_logging,
log_to_driver=log_to_driver,
logging_level=logging_level,
)
else:
ray_runtime_env_vars = [
"__MODIN_AUTOIMPORT_PANDAS__",
]
ray_init_kwargs = {
"num_cpus": cpu_count,
"num_gpus": gpu_count,
"include_dashboard": include_dashboard,
"ignore_reinit_error": ignore_reinit_error,
"configure_logging": configure_logging,
"log_to_driver": log_to_driver,
"logging_level": logging_level,
"object_store_memory": object_store_memory,
"_redis_password": redis_password,
"_memory": object_store_memory,
"runtime_env": {
"env_vars": {var: os.environ.get(var) for var in ray_runtime_env_vars if os.environ.get(var)}
},
}
_logger.info("Initializing a Ray instance")
ray.init(**ray_init_kwargs) |
2,063 | test hidetip | """Test tooltip, coverage 100%.
Coverage is 100% after excluding 6 lines with "# pragma: no cover".
They involve TclErrors that either should or should not happen in a
particular situation, and which are 'pass'ed if they do.
"""
from idlelib.tooltip import TooltipBase, Hovertip
from test.support import requires
requires('gui')
from functools import wraps
import time
from tkinter import Button, Tk, Toplevel
import unittest
def setUpModule():
global root
root = Tk()
def tearDownModule():
global root
root.update_idletasks()
root.destroy()
del root
def add_call_counting(func):
@wraps(func)
def wrapped_func(*args, **kwargs):
wrapped_func.call_args_list.append((args, kwargs))
return func(*args, **kwargs)
wrapped_func.call_args_list = []
return wrapped_func
def _make_top_and_button(testobj):
global root
top = Toplevel(root)
testobj.addCleanup(top.destroy)
top.title("Test tooltip")
button = Button(top, text='ToolTip test button')
button.pack()
testobj.addCleanup(button.destroy)
top.lift()
return top, button
class ToolTipBaseTest(unittest.TestCase):
def setUp(self):
self.top, self.button = _make_top_and_button(self)
def test_base_class_is_unusable(self):
global root
top = Toplevel(root)
self.addCleanup(top.destroy)
button = Button(top, text='ToolTip test button')
button.pack()
self.addCleanup(button.destroy)
with self.assertRaises(NotImplementedError):
tooltip = TooltipBase(button)
tooltip.showtip()
class HovertipTest(unittest.TestCase):
def setUp(self):
self.top, self.button = _make_top_and_button(self)
def is_tipwindow_shown(self, tooltip):
return tooltip.tipwindow and tooltip.tipwindow.winfo_viewable()
def test_showtip(self):
tooltip = Hovertip(self.button, 'ToolTip text')
self.addCleanup(tooltip.hidetip)
self.assertFalse(self.is_tipwindow_shown(tooltip))
tooltip.showtip()
self.assertTrue(self.is_tipwindow_shown(tooltip))
def test_showtip_twice(self):
tooltip = Hovertip(self.button, 'ToolTip text')
self.addCleanup(tooltip.hidetip)
self.assertFalse(self.is_tipwindow_shown(tooltip))
tooltip.showtip()
self.assertTrue(self.is_tipwindow_shown(tooltip))
orig_tipwindow = tooltip.tipwindow
tooltip.showtip()
self.assertTrue(self.is_tipwindow_shown(tooltip))
self.assertIs(tooltip.tipwindow, orig_tipwindow)
def METHOD_NAME(self):
tooltip = Hovertip(self.button, 'ToolTip text')
self.addCleanup(tooltip.hidetip)
tooltip.showtip()
tooltip.hidetip()
self.assertFalse(self.is_tipwindow_shown(tooltip))
def test_showtip_on_mouse_enter_no_delay(self):
tooltip = Hovertip(self.button, 'ToolTip text', hover_delay=None)
self.addCleanup(tooltip.hidetip)
tooltip.showtip = add_call_counting(tooltip.showtip)
root.update()
self.assertFalse(self.is_tipwindow_shown(tooltip))
self.button.event_generate('<Enter>', x=0, y=0)
root.update()
self.assertTrue(self.is_tipwindow_shown(tooltip))
self.assertGreater(len(tooltip.showtip.call_args_list), 0)
def test_hover_with_delay(self):
# Run multiple tests requiring an actual delay simultaneously.
# Test #1: A hover tip with a non-zero delay appears after the delay.
tooltip1 = Hovertip(self.button, 'ToolTip text', hover_delay=100)
self.addCleanup(tooltip1.hidetip)
tooltip1.showtip = add_call_counting(tooltip1.showtip)
root.update()
self.assertFalse(self.is_tipwindow_shown(tooltip1))
self.button.event_generate('<Enter>', x=0, y=0)
root.update()
self.assertFalse(self.is_tipwindow_shown(tooltip1))
# Test #2: A hover tip with a non-zero delay doesn't appear when
# the mouse stops hovering over the base widget before the delay
# expires.
tooltip2 = Hovertip(self.button, 'ToolTip text', hover_delay=100)
self.addCleanup(tooltip2.hidetip)
tooltip2.showtip = add_call_counting(tooltip2.showtip)
root.update()
self.button.event_generate('<Enter>', x=0, y=0)
root.update()
self.button.event_generate('<Leave>', x=0, y=0)
root.update()
time.sleep(0.15)
root.update()
# Test #1 assertions.
self.assertTrue(self.is_tipwindow_shown(tooltip1))
self.assertGreater(len(tooltip1.showtip.call_args_list), 0)
# Test #2 assertions.
self.assertFalse(self.is_tipwindow_shown(tooltip2))
self.assertEqual(tooltip2.showtip.call_args_list, [])
def test_hidetip_on_mouse_leave(self):
tooltip = Hovertip(self.button, 'ToolTip text', hover_delay=None)
self.addCleanup(tooltip.hidetip)
tooltip.showtip = add_call_counting(tooltip.showtip)
root.update()
self.button.event_generate('<Enter>', x=0, y=0)
root.update()
self.button.event_generate('<Leave>', x=0, y=0)
root.update()
self.assertFalse(self.is_tipwindow_shown(tooltip))
self.assertGreater(len(tooltip.showtip.call_args_list), 0)
if __name__ == '__main__':
unittest.main(verbosity=2) |
2,064 | test biosample characterization upgrade status encode2 | import pytest
def test_antibody_characterization_upgrade(upgrader, antibody_characterization_1):
value = upgrader.upgrade('antibody_characterization', antibody_characterization_1, target_version='3')
assert value['schema_version'] == '3'
assert value['status'] == 'PENDING DCC REVIEW'
assert value['characterization_method'] == 'immunoprecipitation followed by mass spectrometry'
def test_biosample_characterization_upgrade(upgrader, biosample_characterization_1):
value = upgrader.upgrade('biosample_characterization', biosample_characterization_1, target_version='3')
assert value['schema_version'] == '3'
assert value['status'] == 'NOT REVIEWED'
assert value['characterization_method'] == 'FACs analysis'
def test_antibody_characterization_upgrade_status(upgrader, antibody_characterization_2):
value = upgrader.upgrade('antibody_characterization', antibody_characterization_2, target_version='4')
assert value['schema_version'] == '4'
assert value['status'] == 'compliant'
def METHOD_NAME(upgrader, biosample_characterization_2):
value = upgrader.upgrade('biosample_characterization', biosample_characterization_2, target_version='4')
assert value['schema_version'] == '4'
assert value['status'] == 'released'
def test_antibody_characterization_upgrade_primary(upgrader, antibody_characterization_3):
value = upgrader.upgrade('antibody_characterization', antibody_characterization_3, target_version='5')
assert value['schema_version'] == '5'
assert value['primary_characterization_method'] == 'immunoblot'
assert 'characterization_method' not in value
def test_antibody_characterization_upgrade_secondary(upgrader, antibody_characterization_3):
antibody_characterization_3['characterization_method'] = 'immunoprecipitation followed by mass spectrometry'
value = upgrader.upgrade('antibody_characterization', antibody_characterization_3, target_version='5')
assert value['schema_version'] == '5'
assert value['secondary_characterization_method'] == 'immunoprecipitation followed by mass spectrometry'
assert 'characterization_method' not in value
def test_antibody_characterization_upgrade_compliant_status(upgrader, antibody_characterization_3):
antibody_characterization_3['characterization_method'] = 'immunoprecipitation followed by mass spectrometry'
antibody_characterization_3['status'] = 'compliant'
value = upgrader.upgrade('antibody_characterization', antibody_characterization_3, target_version='5')
assert value['schema_version'] == '5'
assert value['secondary_characterization_method'] == 'immunoprecipitation followed by mass spectrometry'
assert 'characterization_method' not in value
assert value['reviewed_by'] == '81a6cc12-2847-4e2e-8f2c-f566699eb29e'
assert value['documents'] == ['88dc12f7-c72d-4b43-a6cd-c6f3a9d08821']
def test_antibody_characterization_upgrade_not_compliant_status(upgrader, antibody_characterization_3):
antibody_characterization_3['characterization_method'] = 'immunoprecipitation followed by mass spectrometry'
antibody_characterization_3['status'] = 'not reviewed'
value = upgrader.upgrade('antibody_characterization', antibody_characterization_3, target_version='5')
assert value['schema_version'] == '5'
assert value['secondary_characterization_method'] == 'immunoprecipitation followed by mass spectrometry'
assert 'characterization_method' not in value
assert value['reviewed_by'] == 'ff7b77e7-bb55-4307-b665-814c9f1e65fb'
def test_biosample_characterization_upgrade_references(root, upgrader, biosample_characterization, biosample_characterization_4, publication, threadlocals, dummy_request):
context = root.get_by_uuid(biosample_characterization['uuid'])
dummy_request.context = context
value = upgrader.upgrade('biosample_characterization', biosample_characterization_4,
target_version='5', context=context)
assert value['schema_version'] == '5'
assert value['references'] == [publication['uuid']]
def test_antibody_characterization_upgrade_inline(testapp, registry, antibody_characterization_1):
from snovault import TYPES
schema = registry[TYPES]['antibody_characterization'].schema
res = testapp.post_json('/antibody-characterizations?validate=false&render=uuid', antibody_characterization_1)
location = res.location
# The properties are stored un-upgraded.
res = testapp.get(location + '?frame=raw&upgrade=false').maybe_follow()
assert res.json['schema_version'] == '1'
# When the item is fetched, it is upgraded automatically.
res = testapp.get(location).maybe_follow()
assert res.json['schema_version'] == schema['properties']['schema_version']['default']
res = testapp.patch_json(location, {})
# The stored properties are now upgraded.
res = testapp.get(location + '?frame=raw&upgrade=false').maybe_follow()
assert res.json['schema_version'] == schema['properties']['schema_version']['default']
def test_antibody_characterization_comment_to_submitter_comment_upgrade(upgrader, antibody_characterization_10, antibody_characterization):
value = upgrader.upgrade('antibody_characterization', antibody_characterization_10,
current_version='10', target_version='11')
assert value['schema_version'] == '11'
assert 'comment' not in value
assert value['submitter_comment'] == 'We tried really hard to characterize this antibody.'
def test_upgrade_antibody_characterization_11_to_12(upgrader, antibody_characterization_11, biosample):
value = upgrader.upgrade('antibody_characterization', antibody_characterization_11, current_version='11', target_version='12')
for characterization_review in value['characterization_reviews']:
assert characterization_review['biosample_type'] == 'cell line'
def test_upgrade_antibody_characterization_13_to_14(upgrader, antibody_characterization_13, biosample):
value = upgrader.upgrade('antibody_characterization', antibody_characterization_13, current_version='13', target_version='14')
for characterization_review in value['characterization_reviews']:
assert characterization_review['biosample_type'] == 'cell line'
def test_upgrade_antibody_characterization_14_to_15(root, upgrader,
antibody_characterization_14,
a549):
value = upgrader.upgrade('antibody_characterization',
antibody_characterization_14,
current_version='14',
target_version='15',
context=root.get_by_uuid(a549['uuid']))
for characterization_review in value['characterization_reviews']:
assert characterization_review['biosample_ontology'] == a549['uuid']
def test_upgrade_antibody_characterization_15_to_16(upgrader,
antibody_characterization_14):
value = upgrader.upgrade(
'antibody_characterization', antibody_characterization_14,
current_version='15', target_version='16'
)
for char_review in value['characterization_reviews']:
assert 'biosample_type' not in char_review
assert 'biosample_term_id' not in char_review
assert 'biosample_term_name' not in char_review |
2,065 | sub | #
# SPDX-License-Identifier: GPL-2.0-only
#
import errno
import re
import os
class OEList(list):
"""OpenEmbedded 'list' type
Acts as an ordinary list, but is constructed from a string value and a
separator (optional), and re-joins itself when converted to a string with
str(). Set the variable type flag to 'list' to use this type, and the
'separator' flag may be specified (defaulting to whitespace)."""
name = "list"
def __init__(self, value, separator = None):
if value is not None:
list.__init__(self, value.split(separator))
else:
list.__init__(self)
if separator is None:
self.separator = " "
else:
self.separator = separator
def __str__(self):
return self.separator.join(self)
def choice(value, choices):
"""OpenEmbedded 'choice' type
Acts as a multiple choice for the user. To use this, set the variable
type flag to 'choice', and set the 'choices' flag to a space separated
list of valid values."""
if not isinstance(value, str):
raise TypeError("choice accepts a string, not '%s'" % type(value))
value = value.lower()
choices = choices.lower()
if value not in choices.split():
raise ValueError("Invalid choice '%s'. Valid choices: %s" %
(value, choices))
return value
class NoMatch(object):
"""Stub python regex pattern object which never matches anything"""
def findall(self, string, flags=0):
return None
def finditer(self, string, flags=0):
return None
def match(self, flags=0):
return None
def search(self, string, flags=0):
return None
def split(self, string, maxsplit=0):
return None
def METHOD_NAME(pattern, repl, string, count=0):
return None
def subn(pattern, repl, string, count=0):
return None
NoMatch = NoMatch()
def regex(value, regexflags=None):
"""OpenEmbedded 'regex' type
Acts as a regular expression, returning the pre-compiled regular
expression pattern object. To use this type, set the variable type flag
to 'regex', and optionally, set the 'regexflags' type to a space separated
list of the flags to control the regular expression matching (e.g.
FOO[regexflags] += 'ignorecase'). See the python documentation on the
're' module for a list of valid flags."""
flagval = 0
if regexflags:
for flag in regexflags.split():
flag = flag.upper()
try:
flagval |= getattr(re, flag)
except AttributeError:
raise ValueError("Invalid regex flag '%s'" % flag)
if not value:
# Let's ensure that the default behavior for an undefined or empty
# variable is to match nothing. If the user explicitly wants to match
# anything, they can match '.*' instead.
return NoMatch
try:
return re.compile(value, flagval)
except re.error as exc:
raise ValueError("Invalid regex value '%s': %s" %
(value, exc.args[0]))
def boolean(value):
"""OpenEmbedded 'boolean' type
Valid values for true: 'yes', 'y', 'true', 't', '1'
Valid values for false: 'no', 'n', 'false', 'f', '0', None
"""
if value is None:
return False
if isinstance(value, bool):
return value
if not isinstance(value, str):
raise TypeError("boolean accepts a string, not '%s'" % type(value))
value = value.lower()
if value in ('yes', 'y', 'true', 't', '1'):
return True
elif value in ('no', 'n', 'false', 'f', '0'):
return False
raise ValueError("Invalid boolean value '%s'" % value)
def integer(value, numberbase=10):
"""OpenEmbedded 'integer' type
Defaults to base 10, but this can be specified using the optional
'numberbase' flag."""
return int(value, int(numberbase))
_float = float
def float(value, fromhex='false'):
"""OpenEmbedded floating point type
To use this type, set the type flag to 'float', and optionally set the
'fromhex' flag to a true value (obeying the same rules as for the
'boolean' type) if the value is in base 16 rather than base 10."""
if boolean(fromhex):
return _float.fromhex(value)
else:
return _float(value)
def path(value, relativeto='', normalize='true', mustexist='false'):
value = os.path.join(relativeto, value)
if boolean(normalize):
value = os.path.normpath(value)
if boolean(mustexist):
try:
with open(value, 'r'):
pass
except IOError as exc:
if exc.errno == errno.ENOENT:
raise ValueError("{0}: {1}".format(value, os.strerror(errno.ENOENT)))
return value
def is_x86(arch):
"""
Check whether arch is x86 or x86_64
"""
if arch.startswith('x86_') or re.match('i.*86', arch):
return True
else:
return False
def qemu_use_kvm(kvm, target_arch):
"""
Enable kvm if target_arch == build_arch or both of them are x86 archs.
"""
use_kvm = False
if kvm and boolean(kvm):
build_arch = os.uname()[4]
if is_x86(build_arch) and is_x86(target_arch):
use_kvm = True
elif build_arch == target_arch:
use_kvm = True
return use_kvm |
2,066 | test place no place info | """ Test PlaceInfo """
import pytest
PHOTOS_DB = "./tests/Test-Places-Catalina-10_15_1.photoslibrary/database/photos.db"
UUID_DICT = {
"place_dc": "128FB4C6-0B16-4E7D-9108-FB2E90DA1546",
"place_maui": "FF7AFE2C-49B0-4C9B-B0D7-7E1F8B8F2F0C",
"no_place": "A9B73E13-A6F2-4915-8D67-7213B39BAE9F",
}
MAUI_DICT = {
"name": "Maui, Wailea, Hawai'i, United States",
"names": {
"field0": [],
"country": ["United States"],
"state_province": ["Hawai'i"],
"sub_administrative_area": ["Maui"],
"city": ["Wailea", "Kihei", "Kihei"],
"field5": [],
"additional_city_info": [],
"ocean": [],
"area_of_interest": [],
"inland_water": [],
"field10": [],
"region": ["Maui"],
"sub_throughfare": [],
"field13": [],
"postal_code": [],
"field15": [],
"field16": [],
"street_address": ["3700 Wailea Alanui Dr"],
"body_of_water": [],
},
"country_code": "US",
"ishome": False,
"address_str": "3700 Wailea Alanui Dr, Kihei, HI 96753, United States",
"address": {
"street": "3700 Wailea Alanui Dr",
"sub_locality": None,
"city": "Kihei",
"sub_administrative_area": "Maui",
"state_province": "HI",
"postal_code": "96753",
"country": "United States",
"iso_country_code": "US",
},
}
def test_place_place_info_1():
# test valid place info
import osxphotos
photosdb = osxphotos.PhotosDB(dbfile=PHOTOS_DB)
photo = photosdb.photos(uuid=[UUID_DICT["place_dc"]])[0]
assert photo.place is not None
assert isinstance(photo.place, osxphotos.placeinfo.PlaceInfo)
assert not photo.place.ishome
assert photo.place.name == "Washington, District of Columbia, United States"
assert photo.place.names.country[0] == "United States"
assert photo.place.names.state_province[0] == "District of Columbia"
assert photo.place.names.city[0] == "Washington"
assert photo.place.names.additional_city_info[0] == "Adams Morgan"
assert photo.place.names.street_address[0] == "2038 18th St NW"
assert photo.place.names.ocean == []
assert photo.place.names.area_of_interest == []
assert photo.place.names.inland_water == []
assert photo.place.names.postal_code == []
assert photo.place.names.sub_throughfare == []
assert photo.place.names.body_of_water == []
assert photo.place.country_code == "US"
assert (
photo.place.address_str
== "2038 18th St NW, Washington, DC 20009, United States"
)
assert photo.place.address.city == "Washington"
assert photo.place.address.country == "United States"
assert photo.place.address.postal_code == "20009"
assert photo.place.address.state_province == "DC"
assert photo.place.address.street == "2038 18th St NW"
assert photo.place.address.sub_administrative_area is None
assert photo.place.address.sub_locality == "Adams Morgan"
assert photo.place.address.iso_country_code == "US"
def test_place_place_info_2():
# test valid place info
import osxphotos
photosdb = osxphotos.PhotosDB(dbfile=PHOTOS_DB)
photo = photosdb.photos(uuid=[UUID_DICT["place_maui"]])[0]
assert isinstance(photo.place, osxphotos.placeinfo.PlaceInfo)
assert photo.place is not None
assert not photo.place.ishome
assert photo.place.name == "Maui, Wailea, Hawai'i, United States"
assert photo.place.names.street_address == ["3700 Wailea Alanui Dr"]
assert photo.place.names.city == ["Wailea", "Kihei", "Kihei"]
assert photo.place.names.region == ["Maui"]
assert photo.place.names.sub_administrative_area == ["Maui"]
assert photo.place.names.state_province == ["Hawai'i"]
assert photo.place.names.country == ["United States"]
assert photo.place.country_code == "US"
assert (
photo.place.address_str
== "3700 Wailea Alanui Dr, Kihei, HI 96753, United States"
)
assert type(photo.place.address) == osxphotos.placeinfo.PostalAddress
assert photo.place.address.city == "Kihei"
assert photo.place.address.country == "United States"
assert photo.place.address.postal_code == "96753"
assert photo.place.address.state_province == "HI"
assert photo.place.address.street == "3700 Wailea Alanui Dr"
assert photo.place.address.sub_administrative_area == "Maui"
assert photo.place.address.sub_locality is None
assert photo.place.address.iso_country_code == "US"
def METHOD_NAME():
# test valid place info
import osxphotos
photosdb = osxphotos.PhotosDB(dbfile=PHOTOS_DB)
photo = photosdb.photos(uuid=[UUID_DICT["no_place"]])[0]
assert photo.place is None
def test_place_place_info_asdict():
# test PlaceInfo.asdict()
import osxphotos
photosdb = osxphotos.PhotosDB(dbfile=PHOTOS_DB)
photo = photosdb.photos(uuid=[UUID_DICT["place_maui"]])[0]
assert isinstance(photo.place, osxphotos.placeinfo.PlaceInfo)
assert photo.place.asdict() == MAUI_DICT |
2,067 | test create view no view aborts | import os
import pytest
from tempfile import TemporaryDirectory
from mindsdb.api.http.initialize import initialize_app
from mindsdb.migrations import migrate
from mindsdb.interfaces.storage import db
from mindsdb.utilities.config import Config
@pytest.fixture(scope="session", autouse=True)
def app():
old_minds_db_con = ''
if 'MINDSDB_DB_CON' in os.environ:
old_minds_db_con = os.environ['MINDSDB_DB_CON']
with TemporaryDirectory(prefix='views_test_') as temp_dir:
db_path = 'sqlite:///' + os.path.join(temp_dir, 'mindsdb.sqlite3.db')
# Need to change env variable for migrate module, since it calls db.init().
os.environ['MINDSDB_DB_CON'] = db_path
db.init()
migrate.migrate_to_head()
app = initialize_app(Config(), True, False)
yield app
os.environ['MINDSDB_DB_CON'] = old_minds_db_con
@pytest.fixture()
def client(app):
return app.test_client()
def test_get_view_project_not_found_abort(client):
response = client.get('/api/projects/zoopy/views', follow_redirects=True)
assert '404' in response.status
def test_get_view_not_found(client):
response = client.get('/api/projects/mindsdb/views/vroom', follow_redirects=True)
assert '404' in response.status
def test_create_view(client):
view_data = {
'view': {
'name': 'test_create_view',
'query': 'SELECT * FROM example_db.house_sales'
}
}
response = client.post('/api/projects/mindsdb/views', json=view_data, follow_redirects=True)
# Make sure we use the CREATED HTTP status code.
assert '201' in response.status
new_view = response.get_json()
expected_view = {
'name': 'test_create_view',
'query': 'SELECT * FROM example_db.house_sales',
'id': new_view['id']
}
assert new_view == expected_view
def test_create_view_project_not_found_abort(client):
view_data = {
'view': {
'name': 'test_create_view',
'query': 'SELECT * FROM example_db.house_sales'
}
}
response = client.post('/api/projects/muhproject/views', json=view_data, follow_redirects=True)
assert '404' in response.status
def test_create_view_already_exists_abort(client):
view_data = {
'view': {
'name': 'test_create_view_duplicate',
'query': 'SELECT * FROM example_db.house_sales'
}
}
response = client.post('/api/projects/mindsdb/views', json=view_data, follow_redirects=True)
assert '201' in response.status
create_duplicate_response = client.post('/api/projects/mindsdb/views', json=view_data, follow_redirects=True)
# Make sure we use CONFLICT status code.
assert '409' in create_duplicate_response.status
def METHOD_NAME(client):
view_data = {
'name': 'test_create_view',
'query': 'SELECT * FROM example_db.house_sales'
}
response = client.post('/api/projects/mindsdb/views', json=view_data, follow_redirects=True)
assert '400' in response.status
def test_create_view_no_name_aborts(client):
view_data = {
'view': {
'query': 'SELECT * FROM example_db.house_sales'
}
}
response = client.post('/api/projects/mindsdb/views', json=view_data, follow_redirects=True)
assert '400' in response.status
def test_create_view_no_query_aborts(client):
view_data = {
'view': {
'name': 'test_create_view'
}
}
response = client.post('/api/projects/mindsdb/views', json=view_data, follow_redirects=True)
assert '400' in response.status
def test_update_view(client):
view_data = {
'view': {
'name': 'test_update_view',
'query': 'SELECT * FROM example_db.house_sales'
}
}
updated_view = {
'view': {
'query': 'SELECT * FROM example_db.updated_house_sales'
}
}
client.post('/api/projects/mindsdb/views', json=view_data, follow_redirects=True)
response = client.put('/api/projects/mindsdb/views/test_update_view', json=updated_view, follow_redirects=True)
assert '200' in response.status
updated_view = response.get_json()
expected_view = {
'name': 'test_update_view',
'query': 'SELECT * FROM example_db.updated_house_sales',
'id': updated_view['id']
}
assert updated_view == expected_view
def test_update_view_creates(client):
view_data = {
'view': {
'query': 'SELECT * FROM example_db.house_sales'
}
}
response = client.put('/api/projects/mindsdb/views/test_update_view_creates', json=view_data, follow_redirects=True)
assert '201' in response.status
created_view = response.get_json()
expected_view = {
'name': 'test_update_view_creates',
'query': 'SELECT * FROM example_db.house_sales',
'id': created_view['id']
}
assert created_view == expected_view
def test_update_view_no_view_aborts(client):
view_data = {
'name': 'test_update_view',
'query': 'SELECT * FROM example_db.house_sales'
}
response = client.put('/api/projects/mindsdb/views/test_update_view', json=view_data, follow_redirects=True)
assert '400' in response.status
def test_delete_view(client):
view_data = {
'view': {
'name': 'test_delete_view',
'query': 'SELECT * FROM example_db.house_sales'
}
}
# Delete newly created DB.
client.post('/api/projects/mindsdb/views', json=view_data, follow_redirects=True)
response = client.get('/api/projects/mindsdb/views/test_delete_view', follow_redirects=True)
assert '200' in response.status
response = client.delete('/api/projects/mindsdb/views/test_delete_view', follow_redirects=True)
# Make sure we return NO_CONTENT status since we don't return the deleted DB.
assert '204' in response.status
response = client.get('/api/projects/mindsdb/views/test_delete_view', follow_redirects=True)
assert '404' in response.status
def test_delete_view_does_not_exist(client):
response = client.delete('/api/projects/mindsdb/views/florp', follow_redirects=True)
assert '404' in response.status
def test_delete_view_project_not_found(client):
response = client.delete('/api/projects/dindsmb/views/test_delete_view', follow_redirects=True)
assert '404' in response.status |
2,068 | register attributes | from collections import UserList
from collections.abc import Iterable
from datetime import datetime
from functools import reduce
from mage_ai.api.operations.constants import READ
from mage_ai.api.resources.BaseResource import BaseResource
from mage_ai.orchestration.db.models.base import BaseModel
from mage_ai.shared.hash import merge_dict
import importlib
import inspect
class BasePresenter():
all_attributes_attr = {}
all_formats_attr = {}
default_attributes = []
def __init__(self, resource, current_user, **kwargs):
self.current_user = current_user
self.options = kwargs
self.resource = resource
@classmethod
def all_attributes(self):
if not self.all_attributes_attr.get(self.__name__):
self.all_attributes_attr[self.__name__] = {}
return self.all_attributes_attr[self.__name__]
@classmethod
def all_formats(self):
if not self.all_formats_attr.get(self.__name__):
self.all_formats_attr[self.__name__] = {
'default': self.default_attributes,
}
return self.all_formats_attr[self.__name__]
@classmethod
def formats(self, format_arg):
if format_arg and self.all_formats().get(format_arg, None) is not None:
return self.all_formats()[format_arg]
else:
return self.all_formats()['default']
@classmethod
def METHOD_NAME(self, keys, klass_symbol_or_lambda):
for key in keys:
self.all_attributes()[key] = klass_symbol_or_lambda
@classmethod
def register_format(self, format_arg, keys):
self.all_formats()[format_arg] = keys
@classmethod
def register_formats(self, formats, keys):
arr = formats if isinstance(formats, list) else [formats]
for format_arg in arr:
self.register_format(format_arg, keys)
@classmethod
async def present_resource(self, resource, user, **kwargs):
async def present_lambda(r):
if r and inspect.isawaitable(r):
r = await r
results = r.__class__.presenter_class()(
r,
user,
**kwargs,
).present(
**kwargs,
)
if results and inspect.isawaitable(results):
results = await results
return results
if isinstance(resource, Iterable):
return [await present_lambda(r) for r in resource]
else:
return await present_lambda(resource)
@classmethod
def present_model(self, model, resource_class, user, **kwargs):
if model:
return self.present_resource(
resource_class(model, user, **kwargs),
user,
**kwargs,
)
@classmethod
def present_models(self, models, resource_class, user, **kwargs):
return self.present_resource(
resource_class.build_result_set(models, user, **kwargs),
user,
**kwargs,
)
async def present(self, **kwargs):
def _build(obj, key):
value = getattr(self, key)
if callable(value):
value = value(**kwargs)
self.__validate_attribute_type(key, value)
if issubclass(
value.__class__,
list) or issubclass(
value.__class__,
UserList):
obj[key] = [
self.__transform_value(
key, v, **kwargs) for v in value]
else:
obj[key] = self.__transform_value(key, value, **kwargs)
return obj
format_to_present = kwargs.get('format', None)
if format_to_present and self.options.get('from_resource'):
from_resource_name = self.options['from_resource'].resource_name_singular(
)
format_to_present = f'{from_resource_name}/{format_to_present}'
return reduce(_build, self.__class__.formats(format_to_present), {})
def __transform_value(self, key, value, **kwargs):
klass_symbol_or_lambda = self.__class__.all_attributes().get(key, None)
if issubclass(value.__class__, BaseModel):
resource_class_name = f'{value.__class__.__name__}Resource'
resource_class = getattr(importlib.import_module(
f'mage_ai.api.resources.{resource_class_name}'), resource_class_name, )
value = resource_class(value, self.current_user, **kwargs)
if isinstance(value, datetime):
return str(value)
elif klass_symbol_or_lambda is float:
return float(value)
elif klass_symbol_or_lambda is int:
return int(value)
elif issubclass(value.__class__, BaseResource):
opts = self.options.copy()
opts['from_resource'] = self.resource
data = value.presenter_class().present_resource(
value,
self.current_user,
**merge_dict(kwargs, opts),
)
if not kwargs.get('ignore_permissions'):
policy = value.policy_class()(value, self.current_user, **opts)
policy.authorize_attributes(
READ,
data.keys(),
**opts,
)
return data
else:
return value
def __validate_attribute_class(self, klass_symbol, value):
pass
def __validate_attribute_type(self, key, value):
pass
def __getattr__(self, name):
def _missing(*args, **kwargs):
val = getattr(self.resource, name)
if callable(val):
return val(*args, **kwargs)
else:
return val
return _missing() |
2,069 | rebuild cases | from django.core.management.base import BaseCommand
from casexml.apps.case.cleanup import rebuild_case_from_forms
from casexml.apps.case.xform import get_case_updates
from corehq.apps.users.models import CouchUser
from corehq.form_processor.backends.sql.dbaccessors import LedgerAccessorSQL
from corehq.form_processor.models import RebuildWithReason, XFormInstance
from corehq.util.log import with_progress_bar
from corehq.form_processor.interfaces.processor import FormProcessorInterface
from corehq.form_processor.parsers.ledgers.form import get_case_ids_from_stock_transactions
class Command(BaseCommand):
help = """
Bulk archive forms for user on domain.
First archive all forms and then rebuild corresponding cases
"""
def __init__(self, *args, **kwargs):
super(Command, self).__init__(*args, **kwargs)
self.forms = []
self.case_ids_to_rebuild = []
self.user_id = None
self.domain = None
def add_arguments(self, parser):
parser.add_argument('user_id')
parser.add_argument('domain')
def _get_forms_to_archive(self):
# ordered with latest form's id on top
get_forms = XFormInstance.objects.get_forms
form_ids = XFormInstance.objects.get_form_ids_for_user(self.domain, self.user_id)
return [f for f in get_forms(form_ids, self.domain) if f.is_normal]
def _fetch_case_ids_to_rebuild(self):
case_ids_to_rebuild = set()
for form in with_progress_bar(self.forms):
form_case_ids = set(cu.id for cu in get_case_updates(form))
if form_case_ids:
case_ids_to_rebuild.update(form_case_ids)
return list(case_ids_to_rebuild)
def _archive_forms(self):
with open("forms_archived.txt", "w") as forms_log:
for form in with_progress_bar(self.forms):
forms_log.write("%s\n" % form.form_id)
form.archive(trigger_signals=False)
def _remove_ledger_transactions(self):
with open("ledger_transactions_removed_case_ids.txt", "w") as case_ids_log:
forms_iterated = 0
for xform in with_progress_bar(self.forms):
forms_iterated += 1
if forms_iterated % 100 == 0:
print("traversed %s forms" % forms_iterated)
ledger_case_ids = get_case_ids_from_stock_transactions(xform)
if ledger_case_ids:
ledger_case_ids = list(ledger_case_ids)
for ledger_case_id in ledger_case_ids:
case_ids_log.write("%s\n" % ledger_case_id)
LedgerAccessorSQL.delete_ledger_transactions_for_form(ledger_case_ids, xform.form_id)
def METHOD_NAME(self):
user = CouchUser.get_by_user_id(self.user_id)
reason = "User %s forms archived for domain %s by system" % (user.raw_username, self.domain)
form_processor_interface = FormProcessorInterface(self.domain)
with open("cases_rebuilt.txt", "w") as case_log:
for case_id in with_progress_bar(self.case_ids_to_rebuild):
case_log.write("%s\n" % case_id)
rebuild_case_from_forms(self.domain, case_id, RebuildWithReason(reason=reason))
ledgers = form_processor_interface.ledger_db.get_ledgers_for_case(case_id)
for ledger in ledgers:
form_processor_interface.ledger_processor.rebuild_ledger_state(
case_id, ledger.section_id, ledger.entry_id)
def handle(self, user_id, domain, **options):
self.user_id = user_id
self.domain = domain
self.forms = self._get_forms_to_archive()
print("Found %s normal forms for user" % len(self.forms))
self.case_ids_to_rebuild = self._fetch_case_ids_to_rebuild()
print("Found %s cases that would need to be rebuilt" % len(self.case_ids_to_rebuild))
print("Starting with form archival")
self._archive_forms()
print("Starting with removing ledger transactions")
self._remove_ledger_transactions()
print("Starting with cases rebuild")
self.METHOD_NAME()
print("Completed!") |
2,070 | short name | # Copyright 2023 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Tests for cross-lowering.
We check that we produce the same exact HLO using native lowering and with
cross-lowering. This will save the HLO for all PrimitiveHarnesses as generated
on the current backend (`jax.default_backend()`) for all of `cpu`, `gpu`, and
`tpu`. The file names are <save_directory>/<harness_name>/for_{cpu,tpu}_on_{cpu,tpu}.mlir.
If a saved file already exists produced on a different backend, then compare the
currently saved file with the saved one.
"""
from collections.abc import Sequence
import contextlib
import dataclasses
import os
import re
from typing import Callable, Optional
import zlib
from absl import app
from absl import logging
import numpy.random as npr
import jax
from jax import config # Must import before TF
from jax.experimental import jax2tf # Defines needed flags
from jax._src import test_util # Defines needed flags
config.parse_flags_with_absl()
# Import after parsing flags
from jax.experimental.jax2tf.tests import primitive_harness
@dataclasses.dataclass
class Scenario:
harness: primitive_harness.Harness
on_platform: str
for_platform: str
@property
def METHOD_NAME(self) -> str:
basename = re.sub(r"[^a-zA-Z0-9_\-]", "_", self.harness.fullname)
if len(basename) >= 128:
basename = basename[0:100] + str(hash(self.harness.fullname))
return basename
def output_file(self, save_directory: str) -> str:
basename = self.METHOD_NAME
return os.path.join(
save_directory, basename,
f"for_{self.for_platform}_on_{self.on_platform}.mlir")
def __str__(self):
return f"Scenario(harness={self.harness.fullname}, on={self.on_platform}, for={self.for_platform}, basename={self.METHOD_NAME}"
class Io:
"""Abstracts a few IO operation over standard "open" vs. gfile."""
def __init__(self, use_gfile=False):
self.use_gfile = use_gfile
if use_gfile:
from tensorflow.io import gfile
self.gfile = gfile
else:
self.gfile = None
def exists(self, filename: str) -> bool:
if self.use_gfile:
return self.gfile.exists(filename)
else:
return os.path.exists(filename)
def makedirs(self, dirname: str):
if self.use_gfile:
return self.gfile.makedirs(dirname)
else:
return os.makedirs(dirname)
@contextlib.contextmanager
def open(self, filename: str, mode: str):
if self.use_gfile:
f = self.gfile.GFile(filename, mode=mode)
else:
f = open(filename, mode=mode)
try:
yield f
finally:
f.close()
def write_and_check_harness(harness: primitive_harness.Harness,
io: Io,
save_directory: str,
for_platforms: Sequence[str] = ("cpu", "tpu"),) -> Sequence[str]:
"""Writes and checks HLO for a given harness.
Writes the HLOs generated in the current platform for all platforms.
If it finds previously written HLOs generated on other platforms, compares
them with the ones generated on this platform.
Returns a list of harnesses on which diffs were found.
"""
diffs = []
func_jax = harness.dyn_fun
rng = npr.RandomState(zlib.adler32(harness.fullname.encode()))
args = harness.dyn_args_maker(rng)
# Generate the HLO for all platforms
for for_platform in for_platforms:
if not harness.filter(for_platform):
logging.info("Skip harness %s for %s because it is not implemented in JAX",
harness.fullname, for_platform)
continue
scenario1 = Scenario(harness, jax.default_backend(), for_platform)
output_file = scenario1.output_file(save_directory)
output_dir = os.path.dirname(output_file)
if not io.exists(output_dir):
io.makedirs(output_dir)
if io.exists(output_file):
with open(output_file) as f:
hlo = f.read()
else:
# For a tighter check, detect the native platform lowering and do not
# trigger cross-lowering
if for_platform == jax.default_backend():
lowered = jax.jit(func_jax).lower(*args)
else:
# TODO: replace this with JAX cross-platform API, without going through
# jax2tf
from jax.experimental.jax2tf.jax2tf import cross_platform_lowering
lowered = cross_platform_lowering(func_jax, args,
platforms=[for_platform])
hlo = lowered.compiler_ir(dialect="stablehlo") # type: ignore
with open(output_file, "w") as f:
f.write(str(hlo))
# Compare with previously written files
for on_platform in ['cpu', 'tpu']:
if on_platform == jax.default_backend():
continue
scenario2 = Scenario(harness, on_platform, for_platform)
other_file = scenario2.output_file(save_directory)
if io.exists(other_file):
logging.info("Comparing for %s harness %s on %s vs %s",
for_platform, harness.fullname, jax.default_backend(), on_platform)
with open(other_file) as f:
other_hlo = f.read()
if hlo != other_hlo:
logging.info("Found diff",
for_platform, harness.fullname, jax.default_backend(), on_platform)
diffs.append(f"Found diff between {output_file} and {other_file}")
return diffs
def write_and_check_harnesses(io: Io,
save_directory: str,
*,
filter_harness: Optional[Callable[[str], bool]] = None,
for_platforms: Sequence[str] = ("cpu", "tpu"),
verbose = False):
logging.info("Writing and checking harnesses at %s", save_directory)
nr_harnesses = len(primitive_harness.all_harnesses)
for i, harness in enumerate(primitive_harness.all_harnesses):
if i % 100 == 0:
logging.info("Trying cross-lowering for harness #%d/%d",
i, nr_harnesses)
enable_xla = harness.params.get("enable_xla", True)
if not enable_xla:
if verbose:
logging.info("Skip %s due to enable_xla=False", harness.fullname)
continue
if filter_harness is not None and not filter_harness(harness.fullname):
if verbose:
logging.info("Skip %s due to filter_harness", harness.fullname)
continue
write_and_check_harness(harness, io, save_directory,
for_platforms=for_platforms)
def main(argv: Sequence[str]) -> None:
if len(argv) > 1:
raise app.UsageError("Too many command-line arguments.")
def filter_harness(name: str) -> bool:
return "cummax" in name
for_platforms = ('cpu', 'tpu')
write_and_check_harnesses(Io(False), "./hlo_dumps",
filter_harness=filter_harness,
for_platforms=for_platforms)
if __name__ == "__main__":
app.run(main) |
2,071 | test status subscriber error |
import unittest
import multiprocessing
import sys
import time
import ipaddress
import broker
class TestCommunication(unittest.TestCase):
def test_ping(self):
# --peer-start
with broker.Endpoint() as ep1, \
broker.Endpoint() as ep2, \
ep1.make_subscriber("/test") as s1, \
ep2.make_subscriber("/test") as s2:
port = ep1.listen("127.0.0.1", 0)
self.assertTrue(ep2.peer("127.0.0.1", port, 1.0))
ep1.await_peer(ep2.node_id())
ep2.await_peer(ep1.node_id())
# --peer-end
# --ping-start
ep2.publish("/test", ["ping"])
(t, d) = s1.get()
# t == "/test", d == ["ping"]
# --ping-end
self.assertEqual(t, "/test")
self.assertEqual(d[0], "ping")
ep1.publish(t, ["pong"])
while True:
# This loop exists just for sake of test coverage for "poll()"
msgs = s2.poll()
if msgs:
self.assertEqual(len(msgs), 1)
(t, d) = msgs[0]
break;
time.sleep(0.1)
self.assertEqual(t, "/test")
self.assertEqual(d[0], "pong")
def test_messages(self):
with broker.Endpoint() as ep1, \
broker.Endpoint() as ep2, \
ep1.make_subscriber("/test") as s1:
port = ep1.listen("127.0.0.1", 0)
self.assertTrue(ep2.peer("127.0.0.1", port, 1.0))
ep1.await_peer(ep2.node_id())
ep2.await_peer(ep1.node_id())
msg0 = ("/test/1", ())
ep2.publish(*msg0)
# --messages-start
msg1 = ("/test/2", (1, 2, 3))
msg2 = ("/test/3", (42, "foo", {"a": "A", "b": ipaddress.IPv4Address('1.2.3.4')}))
ep2.publish_batch(msg1, msg2)
# --messages-end
msgs = s1.get(3)
self.assertFalse(s1.available())
self.assertEqual(msgs[0], msg0)
self.assertEqual(msgs[1], msg1)
self.assertEqual(msgs[2], msg2)
# These results are not (all) immutable: try modifying the third
# value (the dict) of the last message above.
dict_data = msgs[2][1][2]
self.assertEqual(len(dict_data), 2)
dict_data["c"] = "not immutable"
self.assertEqual(len(dict_data), 3)
def test_immutable_messages(self):
with broker.Endpoint() as ep1, \
broker.Endpoint() as ep2, \
ep1.make_safe_subscriber("/test") as s1:
port = ep1.listen("127.0.0.1", 0)
ep2.peer("127.0.0.1", port, 1.0)
msg = ("/test/1", ({"a": "A"}, set([1,2,3]), ('a', 'b', 'c')))
ep2.publish(*msg)
topic, (dict_data, set_data, tuple_data) = s1.get()
# The return values are immutable, so each of the following triggers
# a type-specific exception.
with self.assertRaises(TypeError):
# 'mappingproxy' object does not support item assignment
dict_data["b"] = "B"
with self.assertRaises(AttributeError):
# 'frozenset' object has no attribute 'add'
set_data.add(4)
with self.assertRaises(TypeError):
# 'tuple' object does not support item assignment
tuple_data[3] = 'd'
def test_publisher(self):
with broker.Endpoint() as ep1, \
broker.Endpoint() as ep2, \
ep1.make_subscriber("/test") as s1, \
ep2.make_publisher("/test") as p2:
port = ep1.listen("127.0.0.1", 0)
self.assertTrue(ep2.peer("127.0.0.1", port, 1.0))
ep1.await_peer(ep2.node_id())
ep2.await_peer(ep1.node_id())
p2.publish([1, 2, 3])
p2.publish_batch(["a", "b", "c"], [True, False])
msgs = s1.get(3)
self.assertFalse(s1.available())
self.assertEqual(msgs[0], ("/test", (1, 2, 3)))
self.assertEqual(msgs[1], ("/test", ("a", "b", "c")))
self.assertEqual(msgs[2], ("/test", (True, False)))
def test_status_subscriber(self):
# --status-start
with broker.Endpoint() as ep1, \
broker.Endpoint() as ep2, \
ep1.make_status_subscriber(True) as es1, \
ep2.make_status_subscriber(True) as es2:
port = ep1.listen("127.0.0.1", 0)
self.assertEqual(ep2.peer("127.0.0.1", port, 1.0), True)
ep1.await_peer(ep2.node_id())
ep2.await_peer(ep1.node_id())
st1 = es1.get(2)
st2 = es2.get(2)
# st1.code() == [broker.SC.EndpointDiscovered, broker.SC.PeerAdded]
# st2.code() == [broker.SC.EndpointDiscovered, broker.SC.PeerAdded]
# --status-end
self.assertEqual(len(st1), 2)
self.assertEqual(st1[0].code(), broker.SC.EndpointDiscovered)
self.assertEqual(st1[1].code(), broker.SC.PeerAdded)
self.assertEqual(len(st2), 2)
self.assertEqual(st2[0].code(), broker.SC.EndpointDiscovered)
self.assertEqual(st2[1].code(), broker.SC.PeerAdded)
self.assertEqual(st2[1].context().network.get().address, "127.0.0.1")
def METHOD_NAME(self):
# --error-start
with broker.Endpoint() as ep1, \
ep1.make_status_subscriber() as es1:
r = ep1.peer("127.0.0.1", 1947, 0.0) # Try unavailable port, no retry
self.assertEqual(r, False) # Not shown in docs.
st1 = es1.get()
# s1.code() == broker.EC.PeerUnavailable
# --error-end
self.assertEqual(st1.code(), broker.EC.PeerUnavailable)
# Async version.
ep1.peer_nosync("127.0.0.1", 1947, 1.0)
st1 = es1.get()
self.assertEqual(st1.code(), broker.EC.PeerUnavailable)
st1 = es1.get()
self.assertEqual(st1.code(), broker.EC.PeerUnavailable)
def test_idle_endpoint(self):
with broker.Endpoint() as ep1, \
ep1.make_status_subscriber() as es1, \
ep1.make_subscriber("/test") as s1:
pass
if __name__ == '__main__':
unittest.main(verbosity=3) |
2,072 | drop index | from redis import Redis, RedisError, ConnectionPool
import datetime
import itertools
import json
import time
class Document(object):
def __init__(self, id, **fields):
self.id = id
for k, v in fields.iteritems():
setattr(self, k, v)
def __repr__(self):
return 'Document %s' % self.__dict__
def snippetize(self, field, size=500, boldTokens=[]):
txt = getattr(self, field, '')
for tok in boldTokens:
txt = txt.replace(tok, "<b>%s</b>" % tok)
while size < len(txt) and txt[size] != ' ':
size+=1
setattr(self, field, (txt[:size] + '...') if len(txt) > size else txt)
class Result(object):
def __init__(self, res, hascontent, queryText, duration=0):
self.total = res[0]
self.duration = duration
self.docs = []
tokens = filter(None, queryText.rstrip("\" ").lstrip(" \"").split(' '))
for i in xrange(1, len(res), 2 if hascontent else 1):
id = res[i]
fields = {}
if hascontent:
fields = dict(
dict(itertools.izip(res[i + 1][::2], res[i + 1][1::2]))) if hascontent else {}
try:
del fields['id']
except KeyError:
pass
doc = Document(id, **fields)
#print doc
if hascontent:
try:
doc.snippetize('body', size=500, boldTokens = tokens)
except Exception as e:
print e
self.docs.append(doc)
def __repr__(self):
return 'Result{%d total, docs: %s}' % (self.total, self.docs)
class Client(object):
NUMERIC = 'numeric'
CREATE_CMD = 'FT.CREATE'
SEARCH_CMD = 'FT.SEARCH'
ADD_CMD = 'FT.ADD'
DROP_CMD = 'FT.DROP'
class BatchIndexer(object):
"""
A batch indexer allows you to automatically batch
document indexeing in pipelines, flushing it every N documents.
"""
def __init__(self, client, chunk_size = 1000):
self.client = client
self.pipeline = client.redis.pipeline(False)
self.total = 0
self.chunk_size = chunk_size
self.current_chunk = 0
def __del__(self):
if self.current_chunk:
self.commit()
def add_document(self, doc_id, nosave = False, score=1.0, **fields):
self.client._add_document(doc_id, conn=self.pipeline, nosave = nosave, score = score, **fields)
self.current_chunk += 1
self.total += 1
if self.current_chunk >= self.chunk_size:
self.commit()
def commit(self):
self.pipeline.execute()
self.current_chunk = 0
def __init__(self, index_name, host='localhost', port=6379):
self.host = host
self.port = port
self.index_name = index_name
self.redis = Redis(
connection_pool = ConnectionPool(host=host, port=port))
def batch_indexer(self, chunk_size = 100):
"""
Create a new batch indexer from the client with a given chunk size
"""
return Client.BatchIndexer(self, chunk_size = chunk_size)
def create_index(self, **fields):
"""
Create the search index. Creating an existing index juts updates its properties
:param fields: a kwargs consisting of field=[score|NUMERIC]
:return:
"""
self.redis.execute_command(
self.CREATE_CMD, self.index_name, *itertools.chain(*fields.items()))
def METHOD_NAME(self):
"""
Drop the index if it exists
:return:
"""
self.redis.execute_command(self.DROP_CMD, self.index_name)
def _add_document(self, doc_id, conn = None, nosave = False, score=1.0, **fields):
"""
Internal add_document used for both batch and single doc indexing
"""
if conn is None:
conn = self.redis
args = [self.ADD_CMD, self.index_name, doc_id, score]
if nosave:
args.append('NOSAVE')
args.append('FIELDS')
args += list(itertools.chain(*fields.items()))
return conn.execute_command(*args)
def add_document(self, doc_id, nosave = False, score=1.0, **fields):
"""
Add a single document to the index.
:param doc_id: the id of the saved document.
:param nosave: if set to true, we just index the document, and don't save a copy of it.
this means that searches will just return ids.
:param score: the document ranking, between 0.0 and 1.0.
:fields: kwargs dictionary of the document fields to be saved and/or indexed
"""
return self._add_document(doc_id, conn=None, nosave=nosave, score=score, **fields)
def load_document(self, id):
"""
Load a single document by id
"""
fields = self.redis.hgetall(id)
try:
del fields['id']
except KeyError:
pass
return Document(id=id, **fields)
def search(self, query, offset =0, num = 10, verbatim = False, no_content=False, no_stopwords = False, fields=None, **filters):
"""
Search eht
:param query:
:param fields:
:param filters:
:return:
"""
args = [self.index_name, query]
if no_content:
args.append('NOCONTENT')
if fields:
args.append('INFIELDS')
args.append(len(fields))
args += fields
if verbatim:
args.append('VERBATIM')
if no_stopwords:
args.append('NOSTOPWORDS')
if filters:
for k, v in filters.iteritems():
args += ['FILTER', k] + list(v)
args += ["LIMIT", offset, num]
st = time.time()
res = self.redis.execute_command(self.SEARCH_CMD, *args)
return Result(res, no_content == False, queryText=query, duration = (time.time()-st)*1000.0) |
2,073 | create next relation | # Copyright Contributors to the Amundsen project.
# SPDX-License-Identifier: Apache-2.0
from typing import Iterator, Union
from amundsen_common.utils.atlas import AtlasCommonParams, AtlasCommonTypes
from databuilder.models.atlas_entity import AtlasEntity
from databuilder.models.atlas_relationship import AtlasRelationship
from databuilder.models.atlas_serializable import AtlasSerializable
from databuilder.models.graph_node import GraphNode
from databuilder.models.graph_relationship import GraphRelationship
from databuilder.models.graph_serializable import GraphSerializable
from databuilder.serializers.atlas_serializer import get_entity_attrs
from databuilder.utils.atlas import AtlasRelationshipTypes, AtlasSerializedEntityOperation
class ResourceReport(GraphSerializable, AtlasSerializable):
"""
Resource Report matching model
Report represents a document that can be linked to any resource (like a table) in Amundsen.
Example would be Pandas Profiling HTML report containing full advanced profile of a table.
"""
RESOURCE_REPORT_LABEL = 'Report'
RESOURCE_REPORT_NAME = 'name'
RESOURCE_REPORT_URL = 'url'
REPORT_KEY_FORMAT = '{resource_uri}/_report/{report_name}'
REPORT_RESOURCE_RELATION_TYPE = 'REFERS_TO'
RESOURCE_REPORT_RELATION_TYPE = 'HAS_REPORT'
def __init__(self,
name: str,
url: str,
resource_uri: str,
resource_label: str, # for example 'Table'
) -> None:
self.report_name = name
self.report_url = url
self.resource_uri = resource_uri
self.resource_label = resource_label
self.resource_report_key = self.get_resource_model_key()
self._node_iter = self._create_node_iterator()
self._relation_iter = self._create_relation_iterator()
self._atlas_entity_iterator = self._create_next_atlas_entity()
self._atlas_relation_iterator = self._create_atlas_relation_iterator()
def get_resource_model_key(self) -> str:
return ResourceReport.REPORT_KEY_FORMAT.format(resource_uri=self.resource_uri, report_name=self.report_name)
def create_next_node(self) -> Union[GraphNode, None]:
# creates new node
try:
return next(self._node_iter)
except StopIteration:
return None
def METHOD_NAME(self) -> Union[GraphRelationship, None]:
try:
return next(self._relation_iter)
except StopIteration:
return None
def _create_node_iterator(self) -> Iterator[GraphNode]:
"""
Create an application node
:return:
"""
report_node = GraphNode(
key=self.resource_report_key,
label=ResourceReport.RESOURCE_REPORT_LABEL,
attributes={
ResourceReport.RESOURCE_REPORT_NAME: self.report_name,
ResourceReport.RESOURCE_REPORT_URL: self.report_url
}
)
yield report_node
def _create_relation_iterator(self) -> Iterator[GraphRelationship]:
"""
Create relations between application and table nodes
:return:
"""
graph_relationship = GraphRelationship(
start_key=self.resource_uri,
start_label=self.resource_label,
end_key=self.resource_report_key,
end_label=ResourceReport.RESOURCE_REPORT_LABEL,
type=ResourceReport.RESOURCE_REPORT_RELATION_TYPE,
reverse_type=ResourceReport.REPORT_RESOURCE_RELATION_TYPE,
attributes={}
)
yield graph_relationship
def create_next_atlas_entity(self) -> Union[AtlasEntity, None]:
try:
return next(self._atlas_entity_iterator)
except StopIteration:
return None
def _create_next_atlas_entity(self) -> Iterator[AtlasEntity]:
group_attrs_mapping = [
(AtlasCommonParams.qualified_name, self.resource_report_key),
('name', self.report_name),
('url', self.report_url)
]
entity_attrs = get_entity_attrs(group_attrs_mapping)
entity = AtlasEntity(
typeName=AtlasCommonTypes.resource_report,
operation=AtlasSerializedEntityOperation.CREATE,
relationships=None,
attributes=entity_attrs,
)
yield entity
def create_next_atlas_relation(self) -> Union[AtlasRelationship, None]:
try:
return next(self._atlas_relation_iterator)
except StopIteration:
return None
def _create_atlas_relation_iterator(self) -> Iterator[AtlasRelationship]:
relationship = AtlasRelationship(
relationshipType=AtlasRelationshipTypes.referenceable_report,
entityType1=self.resource_label,
entityQualifiedName1=self.resource_uri,
entityType2=AtlasCommonTypes.resource_report,
entityQualifiedName2=self.resource_report_key,
attributes={}
)
yield relationship |
2,074 | get source | # Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
"""Data Resource Provider implementation.
"""
import os
from pathlib import Path
from c7n.actions import ActionRegistry
from c7n.exceptions import PolicyExecutionError, PolicyValidationError
from c7n.filters import FilterRegistry
from c7n.manager import ResourceManager
from c7n.provider import Provider, clouds
from c7n.query import sources
from c7n.registry import PluginRegistry
from c7n.utils import load_file, jmespath_search
@clouds.register("c7n")
class CustodianProvider(Provider):
display_name = "Custodian Core"
resources = PluginRegistry("policy")
resource_prefix = "c7n"
# lazy load chicken sacrifice
resource_map = {"c7n.data": "c7n.data.Data"}
def get_session_factory(self, config):
return NullSession()
def initialize(self, options):
return
def initialize_policies(self, policy_collection, options):
return policy_collection
class NullSession:
"""dummy session"""
@sources.register('static')
class StaticSource:
def __init__(self, queries):
self.queries = queries
def __iter__(self):
records = []
for q in self.queries:
records.extend(q.get("records", ()))
return iter(records)
def validate(self):
for q in self.queries:
if not isinstance(q.get("records", None), (list, tuple)):
raise PolicyValidationError("invalid static data source `records`")
@sources.register('disk')
class DiskSource:
def __init__(self, queries):
self.queries = queries
def validate(self):
for q in self.queries:
if not os.path.exists(q["path"]):
raise PolicyValidationError("invalid disk path %s" % q)
if os.path.isdir(q["path"]) and "glob" not in q:
raise PolicyValidationError("glob pattern required for dir")
def __iter__(self):
for q in self.queries:
for collection in self.scan_path(
path=q["path"], resource_key=q.get("key"), glob=q.get("glob")
):
for p in collection:
yield p
def scan_path(self, path, glob, resource_key):
if os.path.isfile(path):
yield self.load_file(path, resource_key)
return
for path in Path(path).glob(glob):
yield self.load_file(str(path), resource_key)
def load_file(self, path, resource_key):
data = load_file(path)
if resource_key:
data = jmespath_search(resource_key, data)
if not isinstance(data, list):
raise PolicyExecutionError(
"found disk records at %s in non list format %s" % (path, type(data))
)
return DataFile(path, resource_key, data)
class DataFile:
__slots__ = ("path", "records", "resource_key")
def __init__(self, path, resource_key, records):
self.path = path
self.resource_key = resource_key
self.records = records
def __iter__(self):
return iter(self.records)
@CustodianProvider.resources.register("data")
class Data(ResourceManager):
action_registry = ActionRegistry("c7n.data.actions")
filter_registry = FilterRegistry("c7n.data.filters")
source_mapping = {"static": StaticSource, "disk": DiskSource}
def validate(self):
if self.data.get("source", "disk") not in self.source_mapping:
raise PolicyValidationError("invalid source %s" % self.data["source"])
self.METHOD_NAME().validate()
def get_resources(self, resource_ids):
return []
def resources(self):
with self.ctx.tracer.subsegment("resource-fetch"):
source = self.METHOD_NAME()
resources = list(source)
with self.ctx.tracer.subsegment("filter"):
resources = self.filter_resources(resources)
return resources
def METHOD_NAME(self):
source_type = self.data.get("source", "disk")
return self.source_mapping[source_type](self.data.get("query", [])) |
2,075 | test force open completions event | import unittest
from test.test_support import requires
from Tkinter import Tk, Text
import idlelib.AutoComplete as ac
import idlelib.AutoCompleteWindow as acw
from idlelib.idle_test.mock_idle import Func
from idlelib.idle_test.mock_tk import Event
class AutoCompleteWindow:
def complete():
return
class DummyEditwin:
def __init__(self, root, text):
self.root = root
self.text = text
self.indentwidth = 8
self.tabwidth = 8
self.context_use_ps1 = True
class AutoCompleteTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
requires('gui')
cls.root = Tk()
cls.text = Text(cls.root)
cls.editor = DummyEditwin(cls.root, cls.text)
@classmethod
def tearDownClass(cls):
del cls.editor, cls.text
cls.root.destroy()
del cls.root
def setUp(self):
self.editor.text.delete('1.0', 'end')
self.autocomplete = ac.AutoComplete(self.editor)
def test_init(self):
self.assertEqual(self.autocomplete.editwin, self.editor)
def test_make_autocomplete_window(self):
testwin = self.autocomplete._make_autocomplete_window()
self.assertIsInstance(testwin, acw.AutoCompleteWindow)
def test_remove_autocomplete_window(self):
self.autocomplete.autocompletewindow = (
self.autocomplete._make_autocomplete_window())
self.autocomplete._remove_autocomplete_window()
self.assertIsNone(self.autocomplete.autocompletewindow)
def METHOD_NAME(self):
# Test that force_open_completions_event calls _open_completions
o_cs = Func()
self.autocomplete.open_completions = o_cs
self.autocomplete.force_open_completions_event('event')
self.assertEqual(o_cs.args, (True, False, True))
def test_try_open_completions_event(self):
Equal = self.assertEqual
autocomplete = self.autocomplete
trycompletions = self.autocomplete.try_open_completions_event
o_c_l = Func()
autocomplete._open_completions_later = o_c_l
# _open_completions_later should not be called with no text in editor
trycompletions('event')
Equal(o_c_l.args, None)
# _open_completions_later should be called with COMPLETE_ATTRIBUTES (1)
self.text.insert('1.0', 're.')
trycompletions('event')
Equal(o_c_l.args, (False, False, False, 1))
# _open_completions_later should be called with COMPLETE_FILES (2)
self.text.delete('1.0', 'end')
self.text.insert('1.0', '"./Lib/')
trycompletions('event')
Equal(o_c_l.args, (False, False, False, 2))
def test_autocomplete_event(self):
Equal = self.assertEqual
autocomplete = self.autocomplete
# Test that the autocomplete event is ignored if user is pressing a
# modifier key in addition to the tab key
ev = Event(mc_state=True)
self.assertIsNone(autocomplete.autocomplete_event(ev))
del ev.mc_state
# If autocomplete window is open, complete() method is called
self.text.insert('1.0', 're.')
# This must call autocomplete._make_autocomplete_window()
Equal(self.autocomplete.autocomplete_event(ev), 'break')
# If autocomplete window is not active or does not exist,
# open_completions is called. Return depends on its return.
autocomplete._remove_autocomplete_window()
o_cs = Func() # .result = None
autocomplete.open_completions = o_cs
Equal(self.autocomplete.autocomplete_event(ev), None)
Equal(o_cs.args, (False, True, True))
o_cs.result = True
Equal(self.autocomplete.autocomplete_event(ev), 'break')
Equal(o_cs.args, (False, True, True))
def test_open_completions_later(self):
# Test that autocomplete._delayed_completion_id is set
pass
def test_delayed_open_completions(self):
# Test that autocomplete._delayed_completion_id set to None and that
# open_completions only called if insertion index is the same as
# _delayed_completion_index
pass
def test_open_completions(self):
# Test completions of files and attributes as well as non-completion
# of errors
pass
def test_fetch_completions(self):
# Test that fetch_completions returns 2 lists:
# For attribute completion, a large list containing all variables, and
# a small list containing non-private variables.
# For file completion, a large list containing all files in the path,
# and a small list containing files that do not start with '.'
pass
def test_get_entity(self):
# Test that a name is in the namespace of sys.modules and
# __main__.__dict__
pass
if __name__ == '__main__':
unittest.main(verbosity=2) |
2,076 | replace variables | # SPDX-FileCopyrightText: Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# SPDX-License-Identifier: GPL-3.0-or-later
"""Module containing command managers (SearchRunner and CommandRunner)."""
import traceback
import re
import contextlib
from typing import TYPE_CHECKING, Callable, Dict, Iterator, Mapping, MutableMapping
from qutebrowser.qt.core import pyqtSlot, QUrl, QObject
from qutebrowser.api import cmdutils
from qutebrowser.commands import cmdexc, parser
from qutebrowser.utils import message, objreg, qtutils, usertypes, utils
from qutebrowser.keyinput import macros, modeman
if TYPE_CHECKING:
from qutebrowser.mainwindow import tabbedbrowser
_ReplacementFunction = Callable[['tabbedbrowser.TabbedBrowser'], str]
last_command = {}
def _url(tabbed_browser):
"""Convenience method to get the current url."""
try:
return tabbed_browser.current_url()
except qtutils.QtValueError as e:
msg = "Current URL is invalid"
if e.reason:
msg += " ({})".format(e.reason)
msg += "!"
raise cmdutils.CommandError(msg)
def _init_variable_replacements() -> Mapping[str, _ReplacementFunction]:
"""Return a dict from variable replacements to fns processing them."""
replacements: Dict[str, _ReplacementFunction] = {
'url': lambda tb: _url(tb).toString(
QUrl.ComponentFormattingOption.FullyEncoded | QUrl.UrlFormattingOption.RemovePassword),
'url:pretty': lambda tb: _url(tb).toString(
QUrl.ComponentFormattingOption.DecodeReserved | QUrl.UrlFormattingOption.RemovePassword),
'url:domain': lambda tb: "{}://{}{}".format(
_url(tb).scheme(), _url(tb).host(),
":" + str(_url(tb).port()) if _url(tb).port() != -1 else ""),
'url:auth': lambda tb: "{}:{}@".format(
_url(tb).userName(),
_url(tb).password()) if _url(tb).userName() else "",
'url:scheme': lambda tb: _url(tb).scheme(),
'url:username': lambda tb: _url(tb).userName(),
'url:password': lambda tb: _url(tb).password(),
'url:host': lambda tb: _url(tb).host(),
'url:port': lambda tb: str(
_url(tb).port()) if _url(tb).port() != -1 else "",
'url:path': lambda tb: _url(tb).path(),
'url:query': lambda tb: _url(tb).query(),
'title': lambda tb: tb.widget.page_title(tb.widget.currentIndex()),
'clipboard': lambda _: utils.get_clipboard(),
'primary': lambda _: utils.get_clipboard(selection=True),
}
for key in list(replacements):
modified_key = '{' + key + '}'
# x = modified_key is to avoid binding x as a closure
replacements[modified_key] = (
lambda _, x=modified_key: x) # type: ignore[misc]
return replacements
VARIABLE_REPLACEMENTS = _init_variable_replacements()
# A regex matching all variable replacements
VARIABLE_REPLACEMENT_PATTERN = re.compile(
"{(?P<var>" + "|".join(VARIABLE_REPLACEMENTS.keys()) + ")}")
def METHOD_NAME(win_id, arglist):
"""Utility function to replace variables like {url} in a list of args."""
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=win_id)
values: MutableMapping[str, str] = {}
args = []
def repl_cb(matchobj):
"""Return replacement for given match."""
var = matchobj.group("var")
if var not in values:
values[var] = VARIABLE_REPLACEMENTS[var](tabbed_browser)
return values[var]
try:
for arg in arglist:
# using re.sub with callback function replaces all variables in a
# single pass and avoids expansion of nested variables (e.g.
# "{url}" from clipboard is not expanded)
args.append(VARIABLE_REPLACEMENT_PATTERN.sub(repl_cb, arg))
except utils.ClipboardError as e:
raise cmdutils.CommandError(e)
return args
class AbstractCommandRunner(QObject):
"""Abstract base class for CommandRunner."""
def run(self, text, count=None, *, safely=False):
raise NotImplementedError
@pyqtSlot(str, int)
@pyqtSlot(str)
def run_safely(self, text, count=None):
"""Run a command and display exceptions in the statusbar."""
self.run(text, count, safely=True)
class CommandRunner(AbstractCommandRunner):
"""Parse and run qutebrowser commandline commands.
Attributes:
_win_id: The window this CommandRunner is associated with.
"""
def __init__(self, win_id, partial_match=False, find_similar=True, parent=None):
super().__init__(parent)
self._parser = parser.CommandParser(
partial_match=partial_match,
find_similar=find_similar,
)
self._win_id = win_id
@contextlib.contextmanager
def _handle_error(self, safely: bool) -> Iterator[None]:
"""Show exceptions as errors if safely=True is given."""
try:
yield
except cmdexc.Error as e:
if safely:
message.error(str(e), stack=traceback.format_exc())
else:
raise
def run(self, text, count=None, *, safely=False):
"""Parse a command from a line of text and run it.
Args:
text: The text to parse.
count: The count to pass to the command.
safely: Show CmdError exceptions as messages.
"""
record_last_command = True
record_macro = True
mode_manager = modeman.instance(self._win_id)
cur_mode = mode_manager.mode
parsed = None
with self._handle_error(safely):
parsed = self._parser.parse_all(text)
if parsed is None:
return # type: ignore[unreachable]
for result in parsed:
with self._handle_error(safely):
if result.cmd.no_replace_variables:
args = result.args
else:
args = METHOD_NAME(self._win_id, result.args)
result.cmd.run(self._win_id, args, count=count)
if result.cmdline[0] in ['repeat-command', 'cmd-repeat-last']:
record_last_command = False
if result.cmdline[0] in ['macro-record', 'macro-run', 'set-cmd-text', 'cmd-set-text']:
record_macro = False
if record_last_command:
last_command[cur_mode] = (text, count)
if record_macro and cur_mode == usertypes.KeyMode.normal:
macros.macro_recorder.record_command(text, count) |
2,077 | get name cache info | from functools import lru_cache
import logging
import re
from lona import default_settings
ABSTRACT_ROUTE_RE = re.compile(r'<(?P<name>[^:>]+)(:(?P<pattern>[^>]+))?>')
ROUTE_PART_FORMAT_STRING = r'(?P<{}>{})'
DEFAULT_PATTERN = r'[^/]+'
OPTIONAL_TRAILING_SLASH_PATTERN = r'(/)'
MATCH_ALL = 1
logger = logging.getLogger('lona.routing')
class Route:
def __init__(self, raw_pattern, view, name='', interactive=True,
http_pass_through=False, frontend_view=None):
self.raw_pattern = raw_pattern
self.view = view
self.name = name
self.interactive = interactive
self.http_pass_through = http_pass_through
self.frontend_view = frontend_view
self.path = None
self.format_string = ''
self.optional_trailing_slash = False
# match all
if self.raw_pattern == MATCH_ALL:
self.path = MATCH_ALL
# string or regex
else:
raw_pattern = self.raw_pattern
if raw_pattern.endswith(OPTIONAL_TRAILING_SLASH_PATTERN):
self.optional_trailing_slash = True
raw_pattern = \
raw_pattern[:-len(OPTIONAL_TRAILING_SLASH_PATTERN)]
groups = ABSTRACT_ROUTE_RE.findall(raw_pattern)
# path is no pattern but simple string
if not groups:
self.path = raw_pattern
self.format_string = raw_pattern
return
pattern_names = [i[0] for i in groups]
patterns = [(i[0], i[2] or DEFAULT_PATTERN) for i in groups]
cleaned_pattern = ABSTRACT_ROUTE_RE.sub('{}', raw_pattern)
# setup format string
self.format_string = cleaned_pattern.format(
*['{' + i + '}' for i in pattern_names])
# compile pattern
self.pattern = re.compile(
r'^{}{}$'.format( # NOQA: FS002
cleaned_pattern.format(
*[ROUTE_PART_FORMAT_STRING.format(*i)
for i in patterns],
),
(r'(/)?'
if self.optional_trailing_slash else ''),
),
)
def match(self, path):
# match all
if self.path == MATCH_ALL:
return True, {}
# simple string
if self.path:
if self.optional_trailing_slash and path.endswith('/'):
path = path[:-1]
return path == self.path, {}
# pattern
match_object = self.pattern.match(path)
if not match_object:
return False, {}
return True, match_object.groupdict()
def __repr__(self):
raw_pattern = self.raw_pattern
if raw_pattern == MATCH_ALL:
raw_pattern = 'MATCH_ALL'
return f'<Route({raw_pattern}, {self.view})>'
class Router:
def __init__(self):
self.routes = []
self.resize_name_cache(
default_settings.ROUTING_NAME_CACHE_MAX_SIZE,
)
self.resize_resolve_cache(
default_settings.ROUTING_RESOLVE_CACHE_MAX_SIZE,
)
self.resize_reverse_cache(
default_settings.ROUTING_REVERSE_CACHE_MAX_SIZE,
)
# caches ##################################################################
# name
def resize_name_cache(self, max_size):
self._name_lru_cache = lru_cache(max_size)(self._get_route)
def METHOD_NAME(self):
return self._name_lru_cache.cache_info()
def clear_name_cache_info(self):
return self._name_lru_cache.cache_clear()
# resolve
def resize_resolve_cache(self, max_size):
self._resolve_lru_cache = lru_cache(max_size)(self._resolve)
def get_resolve_cache_info(self):
return self._resolve_lru_cache.cache_info()
def clear_resolve_cache_info(self):
return self._resolve_lru_cache.cache_clear()
# reverse
def resize_reverse_cache(self, max_size):
self._reverse_lru_cache = lru_cache(max_size)(self._reverse)
def get_reverse_cache_info(self):
return self._reverse_lru_cache.cache_info()
def clear_reverse_cache_info(self):
return self._reverse_lru_cache.cache_clear()
# routes ##################################################################
def add_route(self, route):
# check if route name already exists
if route.name:
for _route in self.routes:
if route.name == _route.name:
logger.warning(
"route name '%s' already exists",
route.name,
)
self.routes.append(route)
def add_routes(self, *routes):
for route in routes:
self.add_route(route)
def _get_route(self, name):
for route in self.routes:
if route.name == name:
return route
def get_route(self, *args, **kwargs):
return self._name_lru_cache(*args, **kwargs)
# resolve #################################################################
def _resolve(self, path):
logger.debug("resolving '%s'", path)
for route in self.routes:
match, match_info = route.match(path)
if match:
logger.debug('%s matched', route)
return True, route, match_info
logger.debug("no match for '%s'", path)
return False, None, {}
def resolve(self, *args, **kwargs):
return self._resolve_lru_cache(*args, **kwargs)
# reverse #################################################################
def _reverse(self, route_name, *args, **kwargs):
route = None
for _route in self.routes:
if _route.name == route_name:
route = _route
break
if not route:
raise ValueError(f"no route named '{route_name}' found")
if route.path:
return route.path
try:
return route.format_string.format(*args, **kwargs)
except KeyError as e:
key_error = e
# raise is outside of except block to avoid stacking tracebacks
raise ValueError(f'missing URL arg: {key_error.args[0]}')
def reverse(self, *args, **kwargs):
return self._reverse_lru_cache(*args, **kwargs) |
2,078 | get function | from math import floor
import numpy as np
from scipy.signal import savgol_coeffs, savgol_filter
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Double
from featuretools.primitives.base import TransformPrimitive
class SavgolFilter(TransformPrimitive):
"""Applies a Savitzky-Golay filter to a list of values.
Description:
Given a list of values, return a smoothed list which increases
the signal to noise ratio without greatly distoring the
signal. Uses the `Savitzky–Golay filter` method.
If the input list has less than 20 values, it will be returned
as is.
See the following page for more info:
https://docs.scipy.org/doc/scipy-0.16.0/reference/generated/scipy.signal.savgol_filter.html
Args:
window_length (int): The length of the filter window (i.e. the number
of coefficients). `window_length` must be a positive odd integer.
polyorder (int): The order of the polynomial used to fit the samples.
`polyorder` must be less than `window_length`.
deriv (int): Optional. The order of the derivative to compute. This
must be a nonnegative integer. The default is 0, which means to
filter the data without differentiating.
delta (float): Optional. The spacing of the samples to which the filter
will be applied. This is only used if deriv > 0. Default is 1.0.
mode (str): Optional. Must be 'mirror', 'constant', 'nearest', 'wrap'
or 'interp'. This determines the type of extension to use for the
padded signal to which the filter is applied. When `mode` is
'constant', the padding value is given by `cval`. See the Notes
for more details on 'mirror', 'constant', 'wrap', and 'nearest'.
When the 'interp' mode is selected (the default), no extension
is used. Instead, a degree `polyorder` polynomial is fit to the
last `window_length` values of the edges, and this polynomial is
used to evaluate the last `window_length // 2` output values.
cval (scalar): Optional. Value to fill past the edges of the input
if `mode` is 'constant'. Default is 0.0.
Examples:
>>> savgol_filter = SavgolFilter()
>>> data = [0, 1, 1, 2, 3, 4, 5, 7, 8, 7, 9, 9, 12, 11, 12, 14, 15, 17, 17, 17, 20]
>>> [round(x, 4) for x in savgol_filter(data).tolist()[:3]]
[0.0429, 0.8286, 1.2571]
We can control `window_length` and `polyorder` of the filter.
>>> savgol_filter = SavgolFilter(window_length=13, polyorder=3)
>>> [round(x, 4) for x in savgol_filter(data).tolist()[:3]]
[-0.0962, 0.6484, 1.4451]
We can also control the `deriv` and `delta` parameters.
>>> savgol_filter = SavgolFilter(deriv=1, delta=1.5)
>>> [round(x, 4) for x in savgol_filter(data).tolist()[:3]]
[0.754, 0.3492, 0.2778]
Finally, we can use `mode` to control how edge values are handled.
>>> savgol_filter = SavgolFilter(mode='constant', cval=5)
>>> [round(x, 4) for x in savgol_filter(data).tolist()[:3]]
[1.5429, 0.2286, 1.2571]
"""
name = "savgol_filter"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(logical_type=Double, semantic_tags={"numeric"})
def __init__(
self,
window_length=None,
polyorder=None,
deriv=0,
delta=1.0,
mode="interp",
cval=0.0,
):
if window_length is not None and polyorder is not None:
try:
if mode not in ["mirror", "constant", "nearest", "interp", "wrap"]:
raise ValueError(
"mode must be 'mirror', 'constant', "
"'nearest', 'wrap' or 'interp'.",
)
savgol_coeffs(window_length, polyorder, deriv=deriv, delta=delta)
except Exception:
raise
elif (window_length is None and polyorder is not None) or (
window_length is not None and polyorder is None
):
error_text = (
"Both window_length and polyorder must be defined if you define one."
)
raise ValueError(error_text)
self.window_length = window_length
self.polyorder = polyorder
self.deriv = deriv
self.delta = delta
self.mode = mode
self.cval = cval
def METHOD_NAME(self):
def smooth(x):
if x.shape[0] < 20:
return x
if np.isnan(np.min(x)):
# interpolate the nan values, works for edges & middle nans
mask = np.isnan(x)
x[mask] = np.interp(
np.flatnonzero(mask),
np.flatnonzero(~mask),
x[~mask],
)
window_length = self.window_length
polyorder = self.polyorder
if window_length is None and polyorder is None:
window_length = floor(len(x) / 10) * 2 + 1
polyorder = 3
return savgol_filter(
x,
window_length=window_length,
polyorder=polyorder,
deriv=self.deriv,
delta=self.delta,
mode=self.mode,
cval=self.cval,
)
return smooth |
2,079 | get revision object | from time import time
from django.contrib.admin.utils import unquote
from django.core.exceptions import PermissionDenied
from django.http import Http404, JsonResponse
from django.http.request import QueryDict
from django.shortcuts import get_object_or_404
from django.template.response import TemplateResponse
from django.utils.decorators import method_decorator
from django.views.generic import View
from wagtail.admin.panels import get_edit_handler
from wagtail.models import PreviewableMixin, RevisionMixin
from wagtail.utils.decorators import xframe_options_sameorigin_override
class PreviewOnEdit(View):
model = None
form_class = None
http_method_names = ("post", "get", "delete")
preview_expiration_timeout = 60 * 60 * 24 # seconds
session_key_prefix = "wagtail-preview-"
def setup(self, request, *args, **kwargs):
super().setup(request, *args, **kwargs)
self.object = self.get_object()
def dispatch(self, request, *args, **kwargs):
if not isinstance(self.object, PreviewableMixin):
raise Http404
return super().dispatch(request, *args, **kwargs)
def remove_old_preview_data(self):
expiration = time() - self.preview_expiration_timeout
expired_keys = [
k
for k, v in self.request.session.items()
if k.startswith(self.session_key_prefix) and v[1] < expiration
]
# Removes the session key gracefully
for k in expired_keys:
self.request.session.pop(k)
@property
def session_key(self):
app_label = self.model._meta.app_label
model_name = self.model._meta.model_name
unique_key = f"{app_label}-{model_name}-{self.object.pk}"
return f"{self.session_key_prefix}{unique_key}"
def get_object(self):
obj = get_object_or_404(self.model, pk=unquote(self.kwargs["pk"]))
if isinstance(obj, RevisionMixin):
obj = obj.get_latest_revision_as_object()
return obj
def get_form_class(self):
if self.form_class:
return self.form_class
return get_edit_handler(self.model).get_form_class()
def get_form(self, query_dict):
form_class = self.get_form_class()
if not query_dict:
# Query dict is empty, return null form
return form_class(instance=self.object, for_user=self.request.user)
return form_class(query_dict, instance=self.object, for_user=self.request.user)
def _get_data_from_session(self):
post_data, _ = self.request.session.get(self.session_key, (None, None))
if not isinstance(post_data, str):
post_data = ""
return QueryDict(post_data)
def post(self, request, *args, **kwargs):
self.remove_old_preview_data()
form = self.get_form(request.POST)
is_valid = form.is_valid()
if is_valid:
# TODO: Handle request.FILES.
request.session[self.session_key] = request.POST.urlencode(), time()
is_available = True
else:
# Check previous data in session to determine preview availability
form = self.get_form(self._get_data_from_session())
is_available = form.is_valid()
return JsonResponse({"is_valid": is_valid, "is_available": is_available})
def error_response(self):
return TemplateResponse(
self.request,
"wagtailadmin/generic/preview_error.html",
{"object": self.object},
)
@method_decorator(xframe_options_sameorigin_override)
def get(self, request, *args, **kwargs):
form = self.get_form(self._get_data_from_session())
if not form.is_valid():
return self.error_response()
form.save(commit=False)
try:
preview_mode = request.GET.get("mode", self.object.default_preview_mode)
except IndexError:
raise PermissionDenied
extra_attrs = {
"in_preview_panel": request.GET.get("in_preview_panel") == "true",
"is_editing": True,
}
return self.object.make_preview_request(request, preview_mode, extra_attrs)
def delete(self, request, *args, **kwargs):
request.session.pop(self.session_key, None)
return JsonResponse({"success": True})
class PreviewOnCreate(PreviewOnEdit):
@property
def session_key(self):
app_label = self.model._meta.app_label
model_name = self.model._meta.model_name
return f"{self.session_key_prefix}{app_label}-{model_name}"
def get_object(self):
return self.model()
class PreviewRevision(View):
model = None
http_method_names = ("get",)
def setup(self, request, pk, revision_id, *args, **kwargs):
super().setup(request, *args, **kwargs)
self.pk = pk
self.revision_id = revision_id
self.object = self.get_object()
self.revision_object = self.METHOD_NAME()
def get_object(self):
if not issubclass(self.model, RevisionMixin):
raise Http404
return get_object_or_404(self.model, pk=unquote(self.pk))
def METHOD_NAME(self):
revision = get_object_or_404(self.object.revisions, id=self.revision_id)
return revision.as_object()
def get(self, request, *args, **kwargs):
try:
preview_mode = request.GET.get(
"mode", self.revision_object.default_preview_mode
)
except IndexError:
raise PermissionDenied
return self.revision_object.make_preview_request(request, preview_mode) |
2,080 | execution instance | # Python
import pytest
from unittest import mock
from contextlib import contextmanager
from awx.main.models import Credential, UnifiedJob, Instance
from awx.main.tests.factories import (
create_organization,
create_job_template,
create_instance,
create_instance_group,
create_notification_template,
create_survey_spec,
create_workflow_job_template,
)
from django.core.cache import cache
from django.conf import settings
def pytest_addoption(parser):
parser.addoption("--genschema", action="store_true", default=False, help="execute schema validator")
def pytest_configure(config):
import sys
sys._called_from_test = True
def pytest_unconfigure(config):
import sys
del sys._called_from_test
@pytest.fixture
def mock_access():
@contextmanager
def access_given_class(TowerClass):
try:
mock_instance = mock.MagicMock(__name__='foobar')
MockAccess = mock.MagicMock(return_value=mock_instance)
the_patch = mock.patch.dict('awx.main.access.access_registry', {TowerClass: MockAccess}, clear=False)
the_patch.__enter__()
yield mock_instance
finally:
the_patch.__exit__()
return access_given_class
@pytest.fixture
def job_template_factory():
return create_job_template
@pytest.fixture
def organization_factory():
return create_organization
@pytest.fixture
def notification_template_factory():
return create_notification_template
@pytest.fixture
def survey_spec_factory():
return create_survey_spec
@pytest.fixture
def instance_factory():
return create_instance
@pytest.fixture
def instance_group_factory():
return create_instance_group
@pytest.fixture
def controlplane_instance_group(instance_factory, instance_group_factory):
"""There always has to be a controlplane instancegroup and at least one instance in it"""
return create_instance_group(settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME, create_instance('hybrid-1', node_type='hybrid', capacity=500))
@pytest.fixture
def default_instance_group(instance_factory, instance_group_factory):
return create_instance_group("default", instances=[create_instance("hostA", node_type='execution')])
@pytest.fixture
def control_instance():
'''Control instance in the controlplane automatic IG'''
inst = create_instance('control-1', node_type='control', capacity=500)
return inst
@pytest.fixture
def control_instance_low_capacity():
'''Control instance in the controlplane automatic IG that has low capacity'''
inst = create_instance('control-1', node_type='control', capacity=5)
return inst
@pytest.fixture
def METHOD_NAME():
'''Execution node in the automatic default IG'''
ig = create_instance_group('default')
inst = create_instance('receptor-1', node_type='execution', capacity=500)
ig.instances.add(inst)
return inst
@pytest.fixture
def hybrid_instance():
'''Hybrid node in the default controlplane IG'''
inst = create_instance('hybrid-1', node_type='hybrid', capacity=500)
return inst
@pytest.fixture
def job_template_with_survey_passwords_factory(job_template_factory):
def rf(persisted):
"Returns job with linked JT survey with password survey questions"
objects = job_template_factory(
'jt',
organization='org1',
survey=[
{'variable': 'submitter_email', 'type': 'text', 'default': 'foobar@redhat.com'},
{'variable': 'secret_key', 'default': '6kQngg3h8lgiSTvIEb21', 'type': 'password'},
{'variable': 'SSN', 'type': 'password'},
],
persisted=persisted,
)
return objects.job_template
return rf
@pytest.fixture
def job_with_secret_key_unit(job_with_secret_key_factory):
return job_with_secret_key_factory(persisted=False)
@pytest.fixture
def workflow_job_template_factory():
return create_workflow_job_template
@pytest.fixture
def job_template_with_survey_passwords_unit(job_template_with_survey_passwords_factory):
return job_template_with_survey_passwords_factory(persisted=False)
@pytest.fixture
def mock_cache():
class MockCache(object):
cache = {}
def get(self, key, default=None):
return self.cache.get(key, default)
def set(self, key, value, timeout=60):
self.cache[key] = value
def delete(self, key):
del self.cache[key]
return MockCache()
def pytest_runtest_teardown(item, nextitem):
# clear Django cache at the end of every test ran
# NOTE: this should not be memcache (as it is deprecated), nor should it be redis.
# This is a local test cache, so we want every test to start with an empty cache
cache.clear()
@pytest.fixture(scope='session', autouse=True)
def mock_external_credential_input_sources():
# Credential objects query their related input sources on initialization.
# We mock that behavior out of credentials by default unless we need to
# test it explicitly.
with mock.patch.object(Credential, 'dynamic_input_fields', new=[]) as _fixture:
yield _fixture
@pytest.fixture(scope='session', autouse=True)
def mock_has_unpartitioned_events():
# has_unpartitioned_events determines if there are any events still
# left in the old, unpartitioned job events table. In order to work,
# this method looks up when the partition migration occurred. When
# Django's unit tests run, however, there will be no record of the migration.
# We mock this out to circumvent the migration query.
with mock.patch.object(UnifiedJob, 'has_unpartitioned_events', new=False) as _fixture:
yield _fixture
@pytest.fixture(scope='session', autouse=True)
def mock_get_event_queryset_no_job_created():
"""
SQLite friendly since partitions aren't supported. Do not add the faked job_created field to the filter. If we do, it will result in an sql query for the
job_created field. That field does not actually exist in a non-partition scenario.
"""
def event_qs(self):
kwargs = {self.event_parent_key: self.id}
return self.event_class.objects.filter(**kwargs)
with mock.patch.object(UnifiedJob, 'get_event_queryset', lambda self: event_qs(self)) as _fixture:
yield _fixture
@pytest.fixture
def mock_me():
me_mock = mock.MagicMock(return_value=Instance(id=1, hostname=settings.CLUSTER_HOST_ID, uuid='00000000-0000-0000-0000-000000000000'))
with mock.patch.object(Instance.objects, 'me', me_mock):
yield |
2,081 | type | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetApiPortalResult',
'AwaitableGetApiPortalResult',
'get_api_portal',
'get_api_portal_output',
]
@pulumi.output_type
class GetApiPortalResult:
"""
API portal resource
"""
def __init__(__self__, id=None, name=None, properties=None, sku=None, system_data=None, METHOD_NAME=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", METHOD_NAME)
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource Id for the resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> 'outputs.ApiPortalPropertiesResponse':
"""
API portal properties payload
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def sku(self) -> Optional['outputs.SkuResponse']:
"""
Sku of the API portal resource
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Metadata pertaining to creation and last modification of the resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
class AwaitableGetApiPortalResult(GetApiPortalResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetApiPortalResult(
id=self.id,
name=self.name,
properties=self.properties,
sku=self.sku,
system_data=self.system_data,
METHOD_NAME=self.METHOD_NAME)
def get_api_portal(api_portal_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
service_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetApiPortalResult:
"""
Get the API portal and its properties.
:param str api_portal_name: The name of API portal.
:param str resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
:param str service_name: The name of the Service resource.
"""
__args__ = dict()
__args__['apiPortalName'] = api_portal_name
__args__['resourceGroupName'] = resource_group_name
__args__['serviceName'] = service_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:appplatform/v20230701preview:getApiPortal', __args__, opts=opts, typ=GetApiPortalResult).value
return AwaitableGetApiPortalResult(
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
properties=pulumi.get(__ret__, 'properties'),
sku=pulumi.get(__ret__, 'sku'),
system_data=pulumi.get(__ret__, 'system_data'),
METHOD_NAME=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_api_portal)
def get_api_portal_output(api_portal_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
service_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetApiPortalResult]:
"""
Get the API portal and its properties.
:param str api_portal_name: The name of API portal.
:param str resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
:param str service_name: The name of the Service resource.
"""
... |
2,082 | test invalid human handle | # Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS
from __future__ import annotations
import zlib
from unicodedata import normalize
import pytest
from parsec._parsec import (
DataError,
DeviceID,
DeviceName,
EntryName,
EntryNameError,
HumanHandle,
OrganizationID,
SecretKey,
UserID,
)
from parsec._parsec import (
FileManifest as RemoteFileManifest,
)
from parsec._parsec import (
FolderManifest as RemoteFolderManifest,
)
from parsec._parsec import (
UserManifest as RemoteUserManifest,
)
from parsec._parsec import (
WorkspaceManifest as RemoteWorkspaceManifest,
)
from parsec.serde import packb
from tests.common import LocalDevice
@pytest.mark.parametrize("cls", (UserID, DeviceName, OrganizationID))
@pytest.mark.parametrize(
"data",
(
"!x", # Invalid character
" x", # Invalid character
"x" * 33, # Too long
# Sinogram encoded on 3 bytes with utf8, so those 11 characters
# form a 33 bytes long utf8 string !
"飞" * 11,
"😀", # Not a unicode word
"",
),
)
def test_max_bytes_size(cls, data):
with pytest.raises(ValueError):
cls(data)
@pytest.mark.parametrize("cls", (UserID, DeviceName, OrganizationID))
def test_normalization(cls):
nfc_str = normalize("NFC", "àæßšūÿź") # cspell: disable-line
nfd_str = normalize("NFD", nfc_str)
assert nfc_str != nfd_str
assert cls(nfd_str).str == nfc_str
assert cls(nfc_str).str == nfc_str
assert cls(nfc_str + nfd_str).str == nfc_str + nfc_str
@pytest.mark.parametrize("cls", (UserID, DeviceName, OrganizationID))
@pytest.mark.parametrize(
"data", ("x", "x" * 32, "飞" * 10 + "xx", "X1-_é飞") # 32 bytes long utf8 string # Mix-and-match
)
def test_good_pattern(cls, data):
cls(data)
@pytest.mark.parametrize(
"data",
(
"!x@x", # Invalid character
"x@ ", # Invalid character
"x" * 66, # Too long
# Sinogram encoded on 3 bytes with utf8, so those 22 characters
# form a 66 bytes long utf8 string !
"飞" * 22,
"😀@x", # Not a unicode word
"x", # Missing @ separator
"@x",
"x@",
"x" * 62 + "@x", # Respect overall length but not UserID length
"x@" + "x" * 62, # Respect overall length but not DeviceName length
"",
),
)
def test_max_bytes_size_device_id(data):
with pytest.raises(ValueError):
DeviceID(data)
@pytest.mark.parametrize(
"data",
(
"x@x",
"x" * 32 + "@" + "x" * 32,
"飞" * 10 + "xx@xx" + "飞" * 10, # 65 bytes long utf8 string
"X1-_é飞@X1-_é飞", # Mix-and-match
),
)
def test_good_pattern_device_id(data):
DeviceID(data)
def test_human_handle_compare():
a = HumanHandle(email="alice@example.com", label="Alice")
a2 = HumanHandle(email="alice@example.com", label="Whatever")
b = HumanHandle(email="bob@example.com", label="Bob")
assert a == a2
assert a != b
assert b == b
@pytest.mark.parametrize(
"email,label",
(
("alice@example.com", "Alice"),
("a@x", "A"), # Smallest size
(f"{'a' * 64}@{'x' * 185}.com", "x" * 254), # Max sizes
(f"{'飞' * 21}@{'飞' * 62}.com", f"{'飞' * 84}xx"), # Unicode & max size
("john.doe@example.com", "J.D."),
),
)
def test_valid_human_handle(email, label):
HumanHandle(email, label)
@pytest.mark.parametrize(
"email,label",
(
("alice@example.com", "x" * 255),
(f"{'@example.com':a>255}", "Alice"),
("alice@example.com", "飞" * 85), # 255 bytes long utf8 label
(f"{'飞' * 21}@{'飞' * 63}.x", "Alice"), # 255 bytes long utf8 email
("alice@example.com", ""), # Empty label
("", "Alice"), # Empty email
("", "Alice <alice@example.com>"), # Empty email and misleading label
("Alice <alice@example.com>", ""), # Empty label and misleading label
("Alice <@example.com>", "Alice"), # Missing local part in email
),
)
def METHOD_NAME(email, label):
with pytest.raises(ValueError):
HumanHandle(email, label)
def test_human_handle_normalization():
nfc_label = normalize("NFC", "àæßšūÿź") # cspell: disable-line
nfd_label = normalize("NFD", nfc_label)
nfc_email = normalize("NFC", "àæßš@ūÿ.ź") # cspell: disable-line
nfd_email = normalize("NFD", nfc_email)
assert nfc_label != nfd_label
assert nfc_email != nfd_email
hh = HumanHandle(nfd_email, nfd_label)
assert hh.email == nfc_email
assert hh.label == nfc_label
hh = HumanHandle(nfc_email, nfc_label)
assert hh.email == nfc_email
assert hh.label == nfc_label
@pytest.mark.parametrize(
"data",
(
"foo",
"foo.txt",
"x" * 255, # Max size
"飞" * 85, # Unicode & max size
"X1-_é飞",
"🌍☄️==🦕🦖💀", # Probably a bad name for a real folder...
".a", # Dot and dot-dot are allowed if they are not alone
"..a",
"a..",
"a.",
),
)
def test_valid_entry_name(data):
EntryName(data)
@pytest.mark.parametrize("data", ("x" * 256, "飞" * 85 + "x"))
def test_entry_name_too_long(data):
with pytest.raises(EntryNameError):
EntryName(data)
@pytest.mark.parametrize(
"data",
(
".", # Not allowed
"..", # Not allowed
"/x", # Slash not allowed
"x/x",
"x/",
"/",
"\x00x", # Null-byte not allowed
"x\x00x",
"x\x00",
"\x00",
),
)
def test_invalid_entry_name(data):
with pytest.raises(ValueError):
EntryName(data)
def test_entry_name_normalization():
nfc_str = normalize(
"NFC", "àáâäæãåāçćčèéêëēėęîïíīįìłñńôöòóœøōõßśšûüùúūÿžźż" # cspell: disable-line
)
nfd_str = normalize("NFD", nfc_str)
assert nfc_str != nfd_str
assert EntryName(nfd_str).str == nfc_str
assert EntryName(nfc_str).str == nfc_str
assert EntryName(nfc_str + nfd_str).str == nfc_str + nfc_str
def test_remote_manifests_load_invalid_data(alice: LocalDevice):
key = SecretKey.generate()
valid_zip_msgpack_but_bad_fields = zlib.compress(packb({"foo": 42}))
valid_zip_bud_bad_msgpack = zlib.compress(b"dummy")
invalid_zip = b"\x42" * 10
for cls in (
RemoteFileManifest,
RemoteFolderManifest,
RemoteWorkspaceManifest,
RemoteUserManifest,
):
print(f"Testing class {cls.__name__}")
with pytest.raises(DataError):
cls.decrypt_verify_and_load(
b"",
key=key,
author_verify_key=alice.verify_key,
expected_author=alice.device_id,
expected_timestamp=alice.timestamp(),
)
with pytest.raises(DataError):
cls.decrypt_verify_and_load(
invalid_zip,
key=key,
author_verify_key=alice.verify_key,
expected_author=alice.device_id,
expected_timestamp=alice.timestamp(),
)
with pytest.raises(DataError):
cls.decrypt_verify_and_load(
valid_zip_bud_bad_msgpack,
key=key,
author_verify_key=alice.verify_key,
expected_author=alice.device_id,
expected_timestamp=alice.timestamp(),
)
# Valid to deserialize, invalid fields
with pytest.raises(DataError):
cls.decrypt_verify_and_load(
valid_zip_msgpack_but_bad_fields,
key=key,
author_verify_key=alice.verify_key,
expected_author=alice.device_id,
expected_timestamp=alice.timestamp(),
) |
2,083 | test out file | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for the gtest_xml_output module."""
import os
from xml.dom import minidom, Node
from googletest.test import gtest_test_utils
from googletest.test import gtest_xml_test_utils
GTEST_OUTPUT_SUBDIR = "xml_outfiles"
GTEST_OUTPUT_1_TEST = "gtest_xml_outfile1_test_"
GTEST_OUTPUT_2_TEST = "gtest_xml_outfile2_test_"
EXPECTED_XML_1 = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*" name="AllTests">
<testsuite name="PropertyOne" tests="1" failures="0" skipped="0" disabled="0" errors="0" time="*" timestamp="*">
<testcase name="TestSomeProperties" file="gtest_xml_outfile1_test_.cc" line="41" status="run" result="completed" time="*" timestamp="*" classname="PropertyOne">
<properties>
<property name="SetUpProp" value="1"/>
<property name="TestSomeProperty" value="1"/>
<property name="TearDownProp" value="1"/>
</properties>
</testcase>
</testsuite>
</testsuites>
"""
EXPECTED_XML_2 = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*" name="AllTests">
<testsuite name="PropertyTwo" tests="1" failures="0" skipped="0" disabled="0" errors="0" time="*" timestamp="*">
<testcase name="TestSomeProperties" file="gtest_xml_outfile2_test_.cc" line="41" status="run" result="completed" time="*" timestamp="*" classname="PropertyTwo">
<properties>
<property name="SetUpProp" value="2"/>
<property name="TestSomeProperty" value="2"/>
<property name="TearDownProp" value="2"/>
</properties>
</testcase>
</testsuite>
</testsuites>
"""
class GTestXMLOutFilesTest(gtest_xml_test_utils.GTestXMLTestCase):
"""Unit test for Google Test's XML output functionality."""
def setUp(self):
# We want the trailing '/' that the last "" provides in os.path.join, for
# telling Google Test to create an output directory instead of a single file
# for xml output.
self.output_dir_ = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_OUTPUT_SUBDIR, "")
self.DeleteFilesAndDir()
def tearDown(self):
self.DeleteFilesAndDir()
def DeleteFilesAndDir(self):
try:
os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_1_TEST + ".xml"))
except os.error:
pass
try:
os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_2_TEST + ".xml"))
except os.error:
pass
try:
os.rmdir(self.output_dir_)
except os.error:
pass
def testOutfile1(self):
self.METHOD_NAME(GTEST_OUTPUT_1_TEST, EXPECTED_XML_1)
def testOutfile2(self):
self.METHOD_NAME(GTEST_OUTPUT_2_TEST, EXPECTED_XML_2)
def METHOD_NAME(self, test_name, expected_xml):
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(test_name)
command = [gtest_prog_path, "--gtest_output=xml:%s" % self.output_dir_]
p = gtest_test_utils.Subprocess(command,
working_dir=gtest_test_utils.GetTempDir())
self.assert_(p.exited)
self.assertEquals(0, p.exit_code)
output_file_name1 = test_name + ".xml"
output_file1 = os.path.join(self.output_dir_, output_file_name1)
output_file_name2 = 'lt-' + output_file_name1
output_file2 = os.path.join(self.output_dir_, output_file_name2)
self.assert_(os.path.isfile(output_file1) or os.path.isfile(output_file2),
output_file1)
expected = minidom.parseString(expected_xml)
if os.path.isfile(output_file1):
actual = minidom.parse(output_file1)
else:
actual = minidom.parse(output_file2)
self.NormalizeXml(actual.documentElement)
self.AssertEquivalentNodes(expected.documentElement,
actual.documentElement)
expected.unlink()
actual.unlink()
if __name__ == "__main__":
os.environ["GTEST_STACK_TRACE_DEPTH"] = "0"
gtest_test_utils.Main() |
2,084 | replace tests | #!/usr/bin/env python3
# NUnit test validator for csplugin tasks
import json
import os
import re
import sys
from subprocess import call, DEVNULL
def replace_all(lines, s1, s2):
for i in range(len(lines)):
s = lines[i]
lines[i] = re.sub(s1, s2, lines[i])
def replace_by(lines, instructions):
replace = instructions.get("replace", None)
if not replace:
return
for cond in replace:
s1 = cond.get("sub", "")
if not s1:
continue
s2 = cond.get("by", "")
replace_all(lines, s1, s2)
def find_test(lines, testname):
if not testname:
return -1, -1
reg = re.compile(testname)
i1 = i2 = -1
n = 0
for i in range(len(lines)):
s = lines[i]
res = reg.match(s)
if res:
i1 = i - 1
else:
if i1 >= 0 and s.find("{") >= 0:
n += 1
if i1 >= 0 and s.find("}") >= 0:
i2 = i
n -= 1
if n <= 0:
break
return i1, i2
def METHOD_NAME(lines, test):
n = 1
for t in test:
i1, i2 = find_test(lines, t.get("replaceline", None))
if i1 < 0 or i2 < 0:
continue
tlines = lines[i1 : i2 + 1]
del lines[i1 : i2 + 1]
replacecall = t.get("replacecall", "")
byline = t.get("byline", "")
for tst in t.get("bycalls", []):
tmethod = list(tlines)
tc = tst.get("call", "")
tr = tst.get("result", "")
if byline:
tst["name"] = tc + "xxxx" + str(n)
tmethod[1] = byline + tst["name"] + "()\n"
n += 1
# replace_all(tmethod, replacecall, tr)
replace_all(tmethod, replacecall, tc)
lines[i1:i1] = tmethod
def count_points(lines, test):
p = 0
for t in test:
for tst in t.get("bycalls", []):
name = tst.get("call", "XXXX")
result = tst.get("result", "XXXX")
# NUnit 3 uses "Passed" instead of "Success" and "Failed" instead of "Failure"
if result == "Success":
result = "Passed"
elif result == "Failure":
result = "Failed"
expl = tst.get("expl", "???")
pts = tst.get("pts", 1)
line = [s for s in lines if s.find(name + "xxxx") >= 0]
if line:
line = line[0]
rst = 'result="'
i = line.find(rst)
if i >= 0:
line = line[i + len(rst) :]
i = line.find('"')
if i >= 0:
xmlres = line[:i]
if xmlres == result:
p += pts
else:
pts = tst.get("wrong", 0)
p += pts
print(
expl
+ ": pitäisi tulla "
+ result
+ ", tuli: "
+ xmlres
+ ". Pisteitä:",
pts,
)
return p
def scale_points(pts, points):
if not points:
return pts
p = 0
for pt in points:
if pts < pt.get("from", 0):
return p
p = pt.get("p", pts)
return p
GLOBAL_NUGET_PACKAGES_PATH = "/cs_data/dotnet/nuget_cache"
def get_build_refs(ref_type):
with open(f"/cs_data/dotnet/configs/{ref_type}.build.deps", encoding="utf-8") as f:
dep_paths = [
os.path.join(GLOBAL_NUGET_PACKAGES_PATH, dep_line.strip())
for dep_line in f.readlines()
]
return [f"-r:{p}" for p in dep_paths]
def main():
filename = sys.argv[1]
filename2 = sys.argv[2]
filename3 = "T" + filename
lines = open(filename).readlines()
lines2 = open(filename2).read()
# yaml
# instructions = yaml.load(lines2, CLoader)
# insert = instructions.get("insert", None)
# json
instructions = json.loads(lines2)
insertfile = instructions.get("insert", None)
insert = ""
if insertfile:
insert = open(insertfile).read()
replace_by(lines, instructions)
METHOD_NAME(lines, instructions.get("test", None))
# print("".join(lines))
# print(insert)
f = open(filename3, "w")
f.writelines(lines)
if insert:
f.write(insert)
f.close()
args1 = [
"/cs/dotnet/csc",
"-nologo",
f"-out:{filename3}.dll",
"-target:library",
*get_build_refs("nunit_test"),
*get_build_refs("jypeli"),
filename3,
]
sourceFiles = instructions.get("sourceFiles", [])
for sourceFile in sourceFiles:
args1.append(sourceFile)
ret = call(args1)
# print(ret)
# print(args1)
if ret != 0:
print("Testikoodi ei käänny")
return
args = ["/cs/dotnet/nunit-test-dll", f"{filename3}.dll"]
ret = call(args, stdout=DEVNULL, stderr=DEVNULL, timeout=20)
# https://docs.nunit.org/articles/nunit/running-tests/Console-Runner.html
# print(args)
if ret < 0:
print("Testikoodia ei voi ajaa")
xml = open("TestResult.xml").readlines()
# print("\n".join(xml))
points = count_points(xml, instructions.get("test", None))
points = scale_points(points, instructions.get("points", None))
print("Points: " + f"{points:.2f}")
if __name__ == "__main__":
main() |
2,085 | locals example defined before | # pylint: disable=missing-docstring, invalid-name, too-few-public-methods, import-outside-toplevel, fixme, line-too-long, broad-exception-raised
def test_regression_737():
import xml # [unused-import]
def test_regression_923():
import unittest.case # [unused-import]
import xml as sql # [unused-import]
def test_unused_with_prepended_underscore():
_foo = 42
_ = 24
__a = 24
dummy = 24
_a_ = 42 # [unused-variable]
__a__ = 24 # [unused-variable]
__never_used = 42
def test_local_field_prefixed_with_unused_or_ignored():
flagged_local_field = 42 # [unused-variable]
unused_local_field = 42
ignored_local_field = 42
class HasUnusedDunderClass:
def test(self):
__class__ = 42 # [unused-variable]
def best(self):
self.test()
def METHOD_NAME():
value = 42 # [possibly-unused-variable]
return locals()
def locals_example_defined_after():
local_variables = locals()
value = 42 # [unused-variable]
return local_variables
def locals_does_not_account_for_subscopes():
value = 42 # [unused-variable]
def some_other_scope():
return locals()
return some_other_scope
def unused_import_from():
from functools import wraps as abc # [unused-import]
from collections import namedtuple # [unused-import]
def unused_import_in_function(value):
from string import digits, hexdigits # [unused-import]
return value if value in digits else "Nope"
def hello(arg):
my_var = 'something' # [unused-variable]
if arg:
return True
raise Exception
# pylint: disable=wrong-import-position
PATH = OS = collections = deque = None
def function(matches):
""""yo"""
aaaa = 1 # [unused-variable]
index = -1
for match in matches:
index += 1
print(match)
from astroid import nodes
def visit_if(self, node: nodes.If) -> None:
"""increments the branches counter"""
branches = 1
# don't double count If nodes coming from some 'elif'
if node.orelse and len(node.orelse) > 1:
branches += 1
self.inc_branch(branches)
self.stmts += branches
def test_global():
""" Test various assignments of global
variables through imports.
"""
# pylint: disable=redefined-outer-name
global PATH, OS, collections, deque # [global-statement]
from os import path as PATH
import os as OS
import collections
from collections import deque
# make sure that these triggers unused-variable
from sys import platform # [unused-import]
from sys import version as VERSION # [unused-import]
import this # [unused-import]
import re as RE # [unused-import]
# test cases that include exceptions
def function2():
unused = 1 # [unused-variable]
try:
1 / 0
except ZeroDivisionError as error:
try:
1 / 0
except ZeroDivisionError as error: # [redefined-outer-name]
raise Exception("") from error
def func():
try:
1 / 0
except ZeroDivisionError as error:
try:
1 / 0
except error:
print("error")
def func2():
try:
1 / 0
except ZeroDivisionError as error:
try:
1 / 0
except:
raise Exception("") from error
def func3():
try:
1 / 0
except ZeroDivisionError as error:
print(f"{error}")
try:
1 / 2
except TypeError as error: # [unused-variable, redefined-outer-name]
print("warning")
def func4():
try:
1 / 0
except ZeroDivisionError as error: # [unused-variable]
try:
1 / 0
except ZeroDivisionError as error: # [redefined-outer-name]
print("error")
def main(lst):
"""https://github.com/pylint-dev/astroid/pull/1111#issuecomment-890367609"""
try:
raise ValueError
except ValueError as e: # [unused-variable]
pass
for e in lst:
pass
# e will be undefined if lst is empty
print(e) # [undefined-loop-variable]
main([])
def func5():
"""No unused-variable for a container if iterated in comprehension"""
x = []
# Test case requires homonym between "for x" and "in x"
assert [True for x in x]
def sibling_except_handlers():
try:
pass
except ValueError as e:
print(e)
try:
pass
except ValueError as e:
print(e)
def func6():
a = 1
def nonlocal_writer():
nonlocal a
for a in range(10):
pass
nonlocal_writer()
assert a == 9, a
def test_regression_8595():
# pylint: disable=broad-exception-caught
import logging
def compute():
pass
try:
compute()
error = False
except Exception as e:
logging.error(e)
error = True
if error:
try:
compute()
except Exception as e: # [unused-variable]
pass |
2,086 | can see ban details | from django.apps import AppConfig
from django.utils.translation import pgettext_lazy
from .pages import user_profile, usercp, users_list
class MisagoUsersConfig(AppConfig):
name = "misago.users"
label = "misago_users"
verbose_name = "Misago Auth"
def ready(self):
from . import signals as _
from .admin import tasks # pylint: disable=unused-import
self.register_default_usercp_pages()
self.register_default_users_list_pages()
self.register_default_user_profile_pages()
def register_default_usercp_pages(self):
def auth_is_not_delegated(request):
return not request.settings.enable_oauth2_client
usercp.add_section(
link="misago:usercp-change-forum-options",
name=pgettext_lazy("user options page", "Forum options"),
component="forum-options",
icon="settings",
)
usercp.add_section(
link="misago:usercp-edit-details",
name=pgettext_lazy("user options page", "Edit details"),
component="edit-details",
icon="person_outline",
)
usercp.add_section(
link="misago:usercp-change-username",
name=pgettext_lazy("user options page", "Change username"),
component="change-username",
icon="card_membership",
visible_if=auth_is_not_delegated,
)
usercp.add_section(
link="misago:usercp-change-email-password",
name=pgettext_lazy("user options page", "Change email or password"),
component="sign-in-credentials",
icon="vpn_key",
visible_if=auth_is_not_delegated,
)
def can_download_own_data(request):
return request.settings.allow_data_downloads
usercp.add_section(
link="misago:usercp-download-data",
name=pgettext_lazy("user options page", "Download data"),
component="download-data",
icon="save_alt",
visible_if=can_download_own_data,
)
def can_delete_own_account(request):
if not auth_is_not_delegated(request):
return False
return request.settings.allow_delete_own_account
usercp.add_section(
link="misago:usercp-delete-account",
name=pgettext_lazy("user options page", "Delete account"),
component="delete-account",
icon="cancel",
visible_if=can_delete_own_account,
)
def register_default_users_list_pages(self):
users_list.add_section(
link="misago:users-active-posters",
component="active-posters",
name=pgettext_lazy("users lists page", "Top posters"),
)
def register_default_user_profile_pages(self):
def can_see_names_history(request, profile):
if request.user.is_authenticated:
is_account_owner = profile.pk == request.user.pk
has_permission = request.user_acl["can_see_users_name_history"]
return is_account_owner or has_permission
return False
def METHOD_NAME(request, profile):
if request.user.is_authenticated:
if request.user_acl["can_see_ban_details"]:
from .bans import get_user_ban
return bool(get_user_ban(profile, request.cache_versions))
return False
return False
user_profile.add_section(
link="misago:user-posts",
name=pgettext_lazy("user profile page", "Posts"),
icon="message",
component="posts",
)
user_profile.add_section(
link="misago:user-threads",
name=pgettext_lazy("user profile page", "Threads"),
icon="forum",
component="threads",
)
user_profile.add_section(
link="misago:user-followers",
name=pgettext_lazy("user profile page", "Followers"),
icon="favorite",
component="followers",
)
user_profile.add_section(
link="misago:user-follows",
name=pgettext_lazy("user profile page", "Follows"),
icon="favorite_border",
component="follows",
)
user_profile.add_section(
link="misago:user-details",
name=pgettext_lazy("user profile page", "Details"),
icon="person_outline",
component="details",
)
user_profile.add_section(
link="misago:username-history",
name=pgettext_lazy("user profile page", "Username history"),
icon="card_membership",
component="username-history",
visible_if=can_see_names_history,
)
user_profile.add_section(
link="misago:user-ban",
name=pgettext_lazy("user profile page", "Ban details"),
icon="remove_circle_outline",
component="ban-details",
visible_if=METHOD_NAME,
) |
2,087 | add xml attr | ## Original version of code heavily based on recipe written by Wai Yip
## Tung, released under PSF license.
## http://code.activestate.com/recipes/534109/
import re
import os
import xml.sax.handler
class DataNode (object):
def __init__ (self, **kwargs):
self._attrs = {} # XML attributes and child elements
self._data = None # child text data
self._ncDict = kwargs.get ('nameChangeDict', {})
def __len__ (self):
# treat single element as a list of 1
return 1
def __getitem__ (self, key):
if isinstance (key, str):
return self._attrs.get(key,None)
else:
return [self][key]
def __contains__ (self, name):
return name in self._attrs
def __nonzero__ (self):
return bool (self._attrs or self._data)
def __getattr__ (self, name):
if name.startswith('__'):
# need to do this for Python special methods???
raise AttributeError (name)
return self._attrs.get (name, None)
def METHOD_NAME (self, name, value):
change = self._ncDict.get (name)
if change:
name = change
if name in self._attrs:
# multiple attribute of the same name are represented by a list
children = self._attrs[name]
if not isinstance(children, list):
children = [children]
self._attrs[name] = children
children.append(value)
else:
self._attrs[name] = value
def __str__ (self):
return self._data or ''
def __repr__ (self):
items = sorted (self._attrs.items())
if self._data:
items.append(('data', self._data))
return u'{%s}' % ', '.join([u'%s:%s' % (k,repr(v)) for k,v in items])
def attributes (self):
return self._attrs
class TreeBuilder (xml.sax.handler.ContentHandler):
non_id_char = re.compile('[^_0-9a-zA-Z]')
def __init__ (self, **kwargs):
self._stack = []
self._text_parts = []
self._ncDict = kwargs.get ('nameChangeDict', {})
self._root = DataNode (nameChangeDict = self._ncDict)
self.current = self._root
def startElement (self, name, attrs):
self._stack.append( (self.current, self._text_parts))
self.current = DataNode (nameChangeDict = self._ncDict)
self._text_parts = []
# xml attributes --> python attributes
for k, v in attrs.items():
self.current.METHOD_NAME (TreeBuilder._name_mangle(k), v)
def endElement (self, name):
text = ''.join (self._text_parts).strip()
if text:
self.current._data = text
if self.current.attributes():
obj = self.current
else:
# a text only node is simply represented by the string
obj = text or ''
self.current, self._text_parts = self._stack.pop()
self.current.METHOD_NAME (TreeBuilder._name_mangle(name), obj)
def characters (self, content):
self._text_parts.append(content)
def root (self):
return self._root
def topLevel (self):
'''Returns top level object'''
return self._root.attributes().values()[0]
@staticmethod
def _name_mangle (name):
return TreeBuilder.non_id_char.sub('_', name)
regexList = [ (re.compile (r'&'), '&' ),
(re.compile (r'<'), '<' ),
(re.compile (r'>'), '>' ),
(re.compile (r'"'), '"e;' ),
(re.compile (r"'"), ''' )
]
quoteRE = re.compile (r'(\w\s*=\s*")([^"]+)"')
def fixQuoteValue (match):
'''Changes all characters inside of the match'''
quote = match.group(2)
for regexTup in regexList:
quote = regexTup[0].sub( regexTup[1], quote )
return match.group(1) + quote + '"'
def xml2obj (**kwargs):
''' Converts XML data into native Python object. Takes either
file handle or string as input. Does NOT fix illegal characters.
input source: Exactly one of the three following is needed
filehandle - input from file handle
contents - input from string
filename - input from filename
options:
filtering - boolean value telling code whether or not to fileter
input selection to remove illegal XML characters
nameChangeDict - dictionaries of names to change in python object'''
# make sure we have exactly 1 input source
filehandle = kwargs.get ('filehandle')
contents = kwargs.get ('contents')
filename = kwargs.get ('filename')
if not filehandle and not contents and not filename:
raise RuntimeError("You must provide 'filehandle', 'contents', or 'filename'")
if filehandle and contents or \
filehandle and filename or \
contents and filename:
raise RuntimeError("You must provide only ONE of 'filehandle', 'contents', or 'filename'")
# are we filtering?
filtering = kwargs.get ('filtering')
if filtering:
# if we are filtering, we need to read in the contents to modify them
if not contents:
if not filehandle:
try:
filehandle = open (filename, 'r')
except:
raise RuntimeError("Failed to open '%s'" % filename)
contents = ''
for line in filehandle:
contents += line
filehandle.close()
filehandle = filename = ''
contents = quoteRE.sub (fixQuoteValue, contents)
ncDict = kwargs.get ('nameChangeDict', {})
builder = TreeBuilder (nameChangeDict = ncDict)
if contents:
xml.sax.parseString(contents, builder)
else:
if not filehandle:
try:
filehandle = open (filename, 'r')
except:
raise RuntimeError("Failed to open '%s'" % filename)
xml.sax.parse(filehandle, builder)
return builder.topLevel() |
2,088 | retry | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
# with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and
# limitations under the License.
import logging
import re
# A nosec comment is appended to the following line in order to disable the B404 check.
# In this file the input of the module subprocess is trusted.
import subprocess as sub # nosec B404
import time
import webbrowser
from typing import List
from argparse import ArgumentParser, Namespace
from pcluster.cli.commands.common import CliCommand
from pcluster.constants import PCLUSTER_ISSUES_LINK
from pcluster.models.cluster import Cluster
from pcluster.utils import error
DCV_CONNECT_SCRIPT = "/opt/parallelcluster/scripts/pcluster_dcv_connect.sh"
LOGGER = logging.getLogger(__name__)
class DCVConnectionError(Exception):
"""Error raised with DCV connection fails."""
pass
def _check_command_output(cmd):
# A nosec comment is appended to the following line in order to disable the B602 check.
# This is done because it's needed to enable the desired functionality. The only caller
# of this function is _retrieve_dcv_session_url, which passes a command that is safe.
return sub.check_output(cmd, shell=True, universal_newlines=True, stderr=sub.STDOUT).strip() # nosec B602 nosemgrep
def _dcv_connect(args):
"""
Execute pcluster dcv connect command.
:param args: pcluster cli arguments.
"""
try:
head_node = Cluster(args.cluster_name).head_node_instance
except Exception as e:
error(f"Unable to connect to the cluster.\n{e}")
else:
head_node_ip = head_node.public_ip or head_node.private_ip
# Prepare ssh command to execute in the head node instance
cmd = 'ssh {CFN_USER}@{HEAD_NODE_IP} {KEY} "{REMOTE_COMMAND} /home/{CFN_USER}"'.format(
CFN_USER=head_node.default_user,
HEAD_NODE_IP=head_node_ip,
KEY="-i {0}".format(args.key_path) if args.key_path else "",
REMOTE_COMMAND=DCV_CONNECT_SCRIPT,
)
try:
url = METHOD_NAME(_retrieve_dcv_session_url, func_args=[cmd, args.cluster_name, head_node_ip], attempts=4)
url_message = f"Please use the following one-time URL in your browser within 30 seconds:\n{url}"
if args.show_url:
print(url_message)
return
try:
if not webbrowser.open_new(url):
raise webbrowser.Error("Unable to open the Web browser.")
except webbrowser.Error as e:
print(f"{e}\n{url_message}")
except DCVConnectionError as e:
error(
"Something went wrong during DCV connection.\n{0}"
"Please check the logs in the /var/log/parallelcluster/ folder "
"of the head node and submit an issue {1}\n".format(e, PCLUSTER_ISSUES_LINK)
)
def _retrieve_dcv_session_url(ssh_cmd, cluster_name, head_node_ip):
"""Connect by ssh to the head node instance, prepare DCV session and return the DCV session URL."""
try:
LOGGER.debug("SSH command: %s", ssh_cmd)
output = _check_command_output(ssh_cmd)
# At first ssh connection, the ssh command alerts it is adding the host to the known hosts list
if re.search("Permanently added .* to the list of known hosts.", output):
output = _check_command_output(ssh_cmd)
dcv_parameters = re.search(
r"PclusterDcvServerPort=([\d]+) PclusterDcvSessionId=([\w]+) PclusterDcvSessionToken=([\w-]+)", output
)
if dcv_parameters:
dcv_server_port = dcv_parameters.group(1)
dcv_session_id = dcv_parameters.group(2)
dcv_session_token = dcv_parameters.group(3)
else:
error(
"Something went wrong during DCV connection. Please manually execute the command:\n{0}\n"
"If the problem persists, please check the logs in the /var/log/parallelcluster/ folder "
"of the head node and submit an issue {1}".format(ssh_cmd, PCLUSTER_ISSUES_LINK)
)
except sub.CalledProcessError as e:
if "{0}: No such file or directory".format(DCV_CONNECT_SCRIPT) in e.output:
error(
"The cluster {0} has been created with an old version of ParallelCluster "
"without the DCV support.".format(cluster_name)
)
else:
raise DCVConnectionError(e.output)
return "https://{IP}:{PORT}?authToken={TOKEN}#{SESSION_ID}".format(
IP=head_node_ip, PORT=dcv_server_port, TOKEN=dcv_session_token, SESSION_ID=dcv_session_id
)
def METHOD_NAME(func, func_args, attempts=1, wait=0): # pylint: disable=R1710
"""
Call function and re-execute it if it raises an Exception.
:param func: the function to execute.
:param func_args: the positional arguments of the function.
:param attempts: the maximum number of attempts. Default: 1.
:param wait: delay between attempts. Default: 0.
:returns: the result of the function.
"""
while attempts:
try:
return func(*func_args)
except Exception as e:
attempts -= 1
if not attempts:
raise e
LOGGER.debug("%s, retrying in %s seconds..", e, wait)
time.sleep(wait)
return None
class DcvConnectCommand(CliCommand):
"""Implement pcluster dcv connect command."""
# CLI
name = "dcv-connect"
help = "Permits to connect to the head node through an interactive session by using NICE DCV."
description = help
def __init__(self, subparsers):
super().__init__(subparsers, name=self.name, help=self.help, description=self.description)
def register_command_args(self, parser: ArgumentParser) -> None: # noqa: D102
parser.add_argument("-n", "--cluster-name", help="Name of the cluster to connect to", required=True)
parser.add_argument("--key-path", dest="key_path", help="Key path of the SSH key to use for the connection")
parser.add_argument("--show-url", action="store_true", default=False, help="Print URL and exit")
def execute(self, args: Namespace, extra_args: List[str]) -> None: # noqa: D102 #pylint: disable=unused-argument
_dcv_connect(args) |
2,089 | display participation result | from datetime import timedelta
from django.core.exceptions import ValidationError
from django.db.models import Min, OuterRef, Subquery
from django.template.defaultfilters import floatformat
from django.urls import reverse
from django.utils.html import format_html
from django.utils.safestring import mark_safe
from django.utils.translation import gettext as _, gettext_lazy
from judge.contest_format.default import DefaultContestFormat
from judge.contest_format.registry import register_contest_format
from judge.utils.timedelta import nice_repr
@register_contest_format('ioi')
class LegacyIOIContestFormat(DefaultContestFormat):
name = gettext_lazy('IOI (pre-2016)')
config_defaults = {'cumtime': False}
"""
cumtime: Specify True if time penalties are to be computed. Defaults to False.
"""
@classmethod
def validate(cls, config):
if config is None:
return
if not isinstance(config, dict):
raise ValidationError('IOI-styled contest expects no config or dict as config')
for key, value in config.items():
if key not in cls.config_defaults:
raise ValidationError('unknown config key "%s"' % key)
if not isinstance(value, type(cls.config_defaults[key])):
raise ValidationError('invalid type for config key "%s"' % key)
def __init__(self, contest, config):
self.config = self.config_defaults.copy()
self.config.update(config or {})
self.contest = contest
def update_participation(self, participation):
cumtime = 0
score = 0
format_data = {}
queryset = (participation.submissions.values('problem_id')
.filter(points=Subquery(
participation.submissions.filter(problem_id=OuterRef('problem_id'))
.order_by('-points').values('points')[:1]))
.annotate(time=Min('submission__date'))
.values_list('problem_id', 'time', 'points'))
for problem_id, time, points in queryset:
if self.config['cumtime']:
dt = (time - participation.start).total_seconds()
if points:
cumtime += dt
else:
dt = 0
format_data[str(problem_id)] = {'points': points, 'time': dt}
score += points
participation.cumtime = max(cumtime, 0)
participation.score = round(score, self.contest.points_precision)
participation.tiebreaker = 0
participation.format_data = format_data
participation.save()
def display_user_problem(self, participation, contest_problem):
format_data = (participation.format_data or {}).get(str(contest_problem.id))
if format_data:
return format_html(
'<td class="{state}"><a href="{url}">{points}<div class="solving-time">{time}</div></a></td>',
state=(('pretest-' if self.contest.run_pretests_only and contest_problem.is_pretested else '') +
self.best_solution_state(format_data['points'], contest_problem.points)),
url=reverse('contest_user_submissions',
args=[self.contest.key, participation.user.user.username, contest_problem.problem.code]),
points=floatformat(format_data['points']),
time=nice_repr(timedelta(seconds=format_data['time']), 'noday') if self.config['cumtime'] else '',
)
else:
return mark_safe('<td></td>')
def METHOD_NAME(self, participation):
return format_html(
'<td class="user-points"><a href="{url}">{points}<div class="solving-time">{cumtime}</div></a></td>',
url=reverse('contest_all_user_submissions',
args=[self.contest.key, participation.user.user.username]),
points=floatformat(participation.score, -self.contest.points_precision),
cumtime=nice_repr(timedelta(seconds=participation.cumtime), 'noday') if self.config['cumtime'] else '',
)
def get_short_form_display(self):
yield _('The maximum score submission for each problem will be used.')
if self.config['cumtime']:
yield _('Ties will be broken by the sum of the last score altering submission time on problems with a '
'non-zero score.')
else:
yield _('Ties by score will **not** be broken.') |
2,090 | d set p | """DistributedNode module: contains the DistributedNode class"""
from panda3d.core import NodePath
from . import GridParent
from . import DistributedObject
class DistributedNode(DistributedObject.DistributedObject, NodePath):
"""Distributed Node class:"""
def __init__(self, cr):
if not hasattr(self, 'DistributedNode_initialized'):
self.DistributedNode_initialized = 1
self.gotStringParentToken = 0
DistributedObject.DistributedObject.__init__(self, cr)
if not self.this:
NodePath.__init__(self, "DistributedNode")
# initialize gridParent
self.gridParent = None
def disable(self):
if self.activeState != DistributedObject.ESDisabled:
if not self.isEmpty():
self.reparentTo(hidden)
DistributedObject.DistributedObject.disable(self)
def delete(self):
if not hasattr(self, 'DistributedNode_deleted'):
self.DistributedNode_deleted = 1
if not self.isEmpty():
self.removeNode()
if self.gridParent:
self.gridParent.delete()
DistributedObject.DistributedObject.delete(self)
def generate(self):
DistributedObject.DistributedObject.generate(self)
self.gotStringParentToken = 0
def setLocation(self, parentId, zoneId, teleport=0):
# Redefine DistributedObject setLocation, so that when
# location is set to the ocean grid, we can update our parenting
# under gridParent
DistributedObject.DistributedObject.setLocation(self, parentId, zoneId)
parentObj = self.cr.doId2do.get(parentId)
if parentObj:
# Make sure you in a zone that is in the grid before making a GridParent
if (parentObj.isGridParent() and (zoneId >= parentObj.startingZone)):
if not self.gridParent:
self.gridParent = GridParent.GridParent(self)
self.gridParent.setGridParent(parentObj, zoneId, teleport)
else:
if self.gridParent:
self.gridParent.delete()
self.gridParent = None
else:
if self.gridParent:
self.gridParent.delete()
self.gridParent = None
def __cmp__(self, other):
# DistributedNode inherits from NodePath, which inherits a
# definition of __cmp__ from FFIExternalObject that uses the
# NodePath's compareTo() method to compare different
# NodePaths. But we don't want this behavior for
# DistributedNodes; DistributedNodes should only be compared
# pointerwise.
if self is other:
return 0
else:
return 1
### setParent ###
def b_setParent(self, parentToken):
if isinstance(parentToken, str):
self.setParentStr(parentToken)
else:
self.setParent(parentToken)
# it's important to call the local setParent first.
self.d_setParent(parentToken)
def d_setParent(self, parentToken):
if isinstance(parentToken, str):
self.sendUpdate("setParentStr", [parentToken])
else:
self.sendUpdate("setParent", [parentToken])
def setParentStr(self, parentTokenStr):
assert self.notify.debug('setParentStr: %s' % parentTokenStr)
assert self.notify.debug('isGenerated: %s' % self.isGenerated())
if len(parentTokenStr) > 0:
self.do_setParent(parentTokenStr)
self.gotStringParentToken = 1
def setParent(self, parentToken):
assert self.notify.debug('setParent: %s' % parentToken)
assert self.notify.debug('isGenerated: %s' % self.isGenerated())
# if we are not yet generated and we just got a parent token
# as a string, ignore whatever value comes in here
justGotRequiredParentAsStr = ((not self.isGenerated()) and
self.gotStringParentToken)
if not justGotRequiredParentAsStr:
if parentToken != 0:
self.do_setParent(parentToken)
self.gotStringParentToken = 0
def do_setParent(self, parentToken):
"""do_setParent(self, int parentToken)
This function is defined simply to allow a derived class (like
DistributedAvatar) to override the behavior of setParent if
desired.
"""
if not self.isDisabled():
self.cr.parentMgr.requestReparent(self, parentToken)
###### set pos and hpr functions #######
# setX provided by NodePath
def d_setX(self, x):
self.sendUpdate("setX", [x])
# setY provided by NodePath
def d_setY(self, y):
self.sendUpdate("setY", [y])
# setZ provided by NodePath
def d_setZ(self, z):
self.sendUpdate("setZ", [z])
# setH provided by NodePath
def d_setH(self, h):
self.sendUpdate("setH", [h])
# setP provided by NodePath
def METHOD_NAME(self, p):
self.sendUpdate("setP", [p])
# setR provided by NodePath
def d_setR(self, r):
self.sendUpdate("setR", [r])
def setXY(self, x, y):
self.setX(x)
self.setY(y)
def d_setXY(self, x, y):
self.sendUpdate("setXY", [x, y])
def setXZ(self, x, z):
self.setX(x)
self.setZ(z)
def d_setXZ(self, x, z):
self.sendUpdate("setXZ", [x, z])
# setPos provided by NodePath
def d_setPos(self, x, y, z):
self.sendUpdate("setPos", [x, y, z])
# setHpr provided by NodePath
def d_setHpr(self, h, p, r):
self.sendUpdate("setHpr", [h, p, r])
def setXYH(self, x, y, h):
self.setX(x)
self.setY(y)
self.setH(h)
def d_setXYH(self, x, y, h):
self.sendUpdate("setXYH", [x, y, h])
def setXYZH(self, x, y, z, h):
self.setPos(x, y, z)
self.setH(h)
def d_setXYZH(self, x, y, z, h):
self.sendUpdate("setXYZH", [x, y, z, h])
# setPosHpr provided by NodePath
def d_setPosHpr(self, x, y, z, h, p, r):
self.sendUpdate("setPosHpr", [x, y, z, h, p, r]) |
2,091 | test delete | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import os
import shutil
import unittest
from unittest import mock
from azure.monitor.opentelemetry.exporter._storage import (
LocalFileBlob,
LocalFileStorage,
_now,
_seconds,
)
TEST_FOLDER = os.path.abspath(".test.storage")
def throw(exc_type, *args, **kwargs):
def func(*_args, **_kwargs):
raise exc_type(*args, **kwargs)
return func
def clean_folder(folder):
if os.path.isfile(folder):
for filename in os.listdir(folder):
file_path = os.path.join(folder, filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
print('Failed to delete %s. Reason: %s' % (file_path, e))
# pylint: disable=no-self-use
class TestLocalFileBlob(unittest.TestCase):
@classmethod
def setup_class(cls):
os.makedirs(TEST_FOLDER, exist_ok=True)
@classmethod
def tearDownClass(cls):
shutil.rmtree(TEST_FOLDER, True)
def tearDown(self):
clean_folder(TEST_FOLDER)
def METHOD_NAME(self):
blob = LocalFileBlob(os.path.join(TEST_FOLDER, "foobar"))
blob.delete()
def test_get(self):
blob = LocalFileBlob(os.path.join(TEST_FOLDER, "foobar"))
self.assertIsNone(blob.get())
blob.get()
def test_put_error(self):
blob = LocalFileBlob(os.path.join(TEST_FOLDER, "foobar"))
with mock.patch("os.rename", side_effect=throw(Exception)):
blob.put([1, 2, 3])
@unittest.skip("transient storage")
def test_put(self):
blob = LocalFileBlob(os.path.join(TEST_FOLDER, "foobar.blob"))
test_input = (1, 2, 3)
blob.put(test_input)
self.assertGreaterEqual(len(os.listdir(TEST_FOLDER)), 1)
@unittest.skip("transient storage")
def test_lease_error(self):
blob = LocalFileBlob(os.path.join(TEST_FOLDER, "foobar.blob"))
blob.delete()
self.assertEqual(blob.lease(0.01), None)
# pylint: disable=protected-access
class TestLocalFileStorage(unittest.TestCase):
@classmethod
def setup_class(cls):
os.makedirs(TEST_FOLDER, exist_ok=True)
@classmethod
def tearDownClass(cls):
shutil.rmtree(TEST_FOLDER, True)
def test_get_nothing(self):
with LocalFileStorage(os.path.join(TEST_FOLDER, "test", "a")) as stor:
pass
with LocalFileStorage(os.path.join(TEST_FOLDER, "test")) as stor:
self.assertIsNone(stor.get())
def test_get(self):
now = _now()
with LocalFileStorage(os.path.join(TEST_FOLDER, "foo")) as stor:
stor.put((1, 2, 3), lease_period=10)
with mock.patch("azure.monitor.opentelemetry.exporter._storage._now") as m:
m.return_value = now - _seconds(30 * 24 * 60 * 60)
stor.put((1, 2, 3))
stor.put((1, 2, 3), lease_period=10)
with mock.patch("os.rename"):
stor.put((1, 2, 3))
with mock.patch("os.rename"):
stor.put((1, 2, 3))
with mock.patch("os.remove", side_effect=throw(Exception)):
with mock.patch("os.rename", side_effect=throw(Exception)):
self.assertIsNone(stor.get())
self.assertIsNone(stor.get())
def test_put(self):
test_input = (1, 2, 3)
with LocalFileStorage(os.path.join(TEST_FOLDER, "bar")) as stor:
stor.put(test_input, 0)
self.assertEqual(stor.get().get(), test_input)
with LocalFileStorage(os.path.join(TEST_FOLDER, "bar")) as stor:
self.assertEqual(stor.get().get(), test_input)
with mock.patch("os.rename", side_effect=throw(Exception)):
self.assertIsNone(stor.put(test_input))
def test_put_max_size(self):
test_input = (1, 2, 3)
with LocalFileStorage(os.path.join(TEST_FOLDER, "asd")) as stor:
size_mock = mock.Mock()
size_mock.return_value = False
stor._check_storage_size = size_mock
stor.put(test_input)
self.assertEqual(stor.get(), None)
def test_check_storage_size_full(self):
test_input = (1, 2, 3)
with LocalFileStorage(os.path.join(TEST_FOLDER, "asd2"), 1) as stor:
stor.put(test_input)
self.assertFalse(stor._check_storage_size())
def test_check_storage_size_not_full(self):
test_input = (1, 2, 3)
with LocalFileStorage(os.path.join(TEST_FOLDER, "asd3"), 1000) as stor:
stor.put(test_input)
self.assertTrue(stor._check_storage_size())
def test_check_storage_size_no_files(self):
with LocalFileStorage(os.path.join(TEST_FOLDER, "asd3"), 1000) as stor:
self.assertTrue(stor._check_storage_size())
def test_check_storage_size_links(self):
test_input = (1, 2, 3)
with LocalFileStorage(os.path.join(TEST_FOLDER, "asd4"), 1000) as stor:
stor.put(test_input)
with mock.patch("os.path.islink") as os_mock:
os_mock.return_value = True
self.assertTrue(stor._check_storage_size())
def test_check_storage_size_error(self):
test_input = (1, 2, 3)
with LocalFileStorage(os.path.join(TEST_FOLDER, "asd5"), 1) as stor:
with mock.patch("os.path.getsize", side_effect=throw(OSError)):
stor.put(test_input)
with mock.patch("os.path.islink") as os_mock:
os_mock.return_value = True
self.assertTrue(stor._check_storage_size())
def test_maintenance_routine(self):
with mock.patch("os.makedirs") as m:
with LocalFileStorage(os.path.join(TEST_FOLDER, "baz")) as stor:
m.return_value = None
with mock.patch("os.makedirs", side_effect=throw(Exception)):
stor = LocalFileStorage(os.path.join(TEST_FOLDER, "baz"))
stor.close()
with mock.patch("os.listdir", side_effect=throw(Exception)):
stor = LocalFileStorage(os.path.join(TEST_FOLDER, "baz"))
stor.close()
with LocalFileStorage(os.path.join(TEST_FOLDER, "baz")) as stor:
with mock.patch("os.listdir", side_effect=throw(Exception)):
stor._maintenance_routine()
with mock.patch("os.path.isdir", side_effect=throw(Exception)):
stor._maintenance_routine() |
2,092 | test create association in new diagram should | import pytest
from gaphor import UML
from gaphor.core.modeling import Diagram
from gaphor.diagram.tests.fixtures import allow, connect, disconnect, get_connected
from gaphor.UML.classes.association import AssociationItem
from gaphor.UML.classes.klass import ClassItem
@pytest.fixture
def connected_association(create):
asc = create(AssociationItem)
c1 = create(ClassItem, UML.Class)
c2 = create(ClassItem, UML.Class)
connect(asc, asc.head, c1)
assert asc.subject is None # no UML metaclass yet
connect(asc, asc.tail, c2)
assert asc.subject is not None
return asc, c1, c2
@pytest.fixture
def clone(create):
def _clone(item):
new = create(type(item))
new.subject = item.subject
new.head_subject = item.head_subject
new.tail_subject = item.tail_subject
return new
return _clone
def test_glue_to_class(connected_association):
asc, c1, c2 = connected_association
glued = allow(asc, asc.head, c1)
assert glued
connect(asc, asc.head, c1)
glued = allow(asc, asc.tail, c2)
assert glued
def test_association_item_connect(connected_association, element_factory):
asc, c1, c2 = connected_association
# Diagram, Class *2, Property *2, Association
assert len(element_factory.lselect()) == 9
assert asc.head_subject is not None
assert asc.tail_subject is not None
def test_association_item_reconnect_copies_properties(connected_association, create):
asc, c1, c2 = connected_association
c3 = create(ClassItem, UML.Class)
asc.subject.name = "Name"
a = asc.subject
connect(asc, asc.tail, c3)
assert a is not asc.subject
ends = [p.type for p in asc.subject.memberEnd]
assert c1.subject in ends
assert c3.subject in ends
assert c2.subject not in ends
assert asc.subject.name == "Name"
def test_association_item_reconnect_with_navigability(connected_association, create):
asc, c1, c2 = connected_association
c3 = create(ClassItem, UML.Class)
UML.recipes.set_navigability(asc.subject, asc.tail_subject, True)
connect(asc, asc.tail, c3)
assert asc.tail_subject.navigability is True
def test_association_item_reconnect_with_aggregation(connected_association, create):
asc, c1, c2 = connected_association
c3 = create(ClassItem, UML.Class)
asc.tail_subject.aggregation = "composite"
connect(asc, asc.tail, c3)
assert asc.tail_subject.aggregation == "composite"
def test_disconnect_should_disconnect_model(connected_association, element_factory):
asc, c1, c2 = connected_association
disconnect(asc, asc.head)
disconnect(asc, asc.tail)
assert c1 is not get_connected(asc, asc.head)
assert c2 is not get_connected(asc, asc.tail)
assert not asc.subject
assert not asc.head_subject
assert not asc.tail_subject
assert not element_factory.lselect(UML.Property)
def test_disconnect_of_second_association_should_leave_model_in_tact(
connected_association, clone
):
asc, c1, c2 = connected_association
new = clone(asc)
disconnect(new, new.head)
assert asc.subject.memberEnd[0].type is c1.subject
assert asc.subject.memberEnd[1].type is c2.subject
assert new.subject is asc.subject
def test_disconnect_of_navigable_end_should_remove_owner_relationship(
connected_association, element_factory
):
asc, c1, c2 = connected_association
UML.recipes.set_navigability(asc.subject, asc.head_subject, True)
assert asc.head_subject in c2.subject.ownedAttribute
disconnect(asc, asc.head)
assert not asc.subject
assert not asc.head_subject
assert not asc.tail_subject
assert not element_factory.lselect(UML.Property)
def test_allow_reconnect_for_single_presentation(connected_association, create):
asc, c1, c2 = connected_association
c3 = create(ClassItem, UML.Class)
assert allow(asc, asc.head, c3)
def test_allow_reconnect_on_same_class_for_multiple_presentations(
connected_association, clone, create
):
asc, c1, c2 = connected_association
new = clone(asc)
assert allow(new, new.head, c1)
assert allow(new, new.tail, c2)
def test_allow_reconnect_if_only_one_connected_presentations(
connected_association, clone, create
):
asc, c1, c2 = connected_association
clone(asc)
c3 = create(ClassItem, UML.Class)
assert allow(asc, asc.head, c3)
def METHOD_NAME(
connected_association, element_factory
):
asc, c1, c2 = connected_association
diagram2 = element_factory.create(Diagram)
c3 = diagram2.create(ClassItem, subject=c1.subject)
c4 = diagram2.create(ClassItem, subject=c2.subject)
asc2 = diagram2.create(AssociationItem)
connect(asc2, asc2.head, c3)
connect(asc2, asc2.tail, c4)
assert asc.subject is asc2.subject
assert asc.head_subject is asc2.head_subject
assert asc.tail_subject is asc2.tail_subject
def test_create_association_in_new_diagram_reversed_should_reuse_existing(
connected_association, element_factory
):
asc, c1, c2 = connected_association
diagram2 = element_factory.create(Diagram)
c3 = diagram2.create(ClassItem, subject=c1.subject)
c4 = diagram2.create(ClassItem, subject=c2.subject)
asc2 = diagram2.create(AssociationItem)
connect(asc2, asc2.tail, c3)
connect(asc2, asc2.head, c4)
assert asc.subject is asc2.subject
assert asc.head_subject is asc2.tail_subject
assert asc.tail_subject is asc2.head_subject
def test_disconnect_association_in_new_diagram_should_clear_ends(
connected_association, element_factory
):
asc, c1, c2 = connected_association
diagram2 = element_factory.create(Diagram)
c3 = diagram2.create(ClassItem, subject=c1.subject)
c4 = diagram2.create(ClassItem, subject=c2.subject)
asc2 = diagram2.create(AssociationItem)
connect(asc2, asc2.tail, c3)
connect(asc2, asc2.head, c4)
disconnect(asc, asc.head)
assert not asc.subject
assert not asc.head_subject
assert not asc.tail_subject |
2,093 | test add noise column df | import numpy as np
import pandas as pd
import pytest
from numpy.core.fromnumeric import sort
from autogluon.core.utils.feature_selection import *
from autogluon.core.utils.utils import unevaluated_fi_df_template
def evaluated_fi_df_template(features, importance=None, n=None):
rng = np.random.default_rng(0)
importance_df = pd.DataFrame({"name": features})
importance_df["importance"] = rng.standard_normal(len(features)) if importance is None else importance
importance_df["stddev"] = rng.standard_normal(len(features))
importance_df["p_value"] = None
importance_df["n"] = 5 if n is None else n
importance_df.set_index("name", inplace=True)
importance_df.index.name = None
return importance_df
@pytest.fixture
def sample_features():
return ["a", "b", "c", "d", "e"]
@pytest.fixture
def sample_importance_df_1(sample_features):
return evaluated_fi_df_template(sample_features, importance=[0.2, 0.2, None, 1.0, None], n=[10, 5, 0, 5, 0])
@pytest.fixture
def sample_importance_df_2(sample_features):
return evaluated_fi_df_template(sample_features, importance=[-0.1, -0.1, 0.1, None, None], n=[5, 10, 10, 0, 0])
def METHOD_NAME():
# test noise columns are appended to input dataframe and feature_metadata
X = pd.DataFrame({"a": [1, 2]})
args = {"rng": np.random.default_rng(0), "count": 2}
X_noised, noise_columns = add_noise_column(X, **args)
expected_features = X.columns.tolist() + noise_columns
assert expected_features == X_noised.columns.tolist()
def test_merge_importance_dfs_base(sample_features):
# test the scenario when previous feature importance df is none
prev_df, curr_df = None, unevaluated_fi_df_template(sample_features)
assert merge_importance_dfs(prev_df, curr_df, using_prev_fit_fi=set()) is curr_df
def test_merge_importance_dfs_same_model(sample_features, sample_importance_df_1, sample_importance_df_2):
# test the scenario where previous feature importance df exists and its importance estimates come from the same fitted model
prev_df, curr_df = sample_importance_df_1, sample_importance_df_2
result_df = merge_importance_dfs(prev_df, curr_df, using_prev_fit_fi=set())
assert [score if score == score else None for score in result_df["importance"].tolist()] == [0.0, 0.1, 0.1, 1.0, None]
assert result_df["n"].tolist() == [15, 15, 10, 5, 0]
def test_merge_importance_dfs_different_model(sample_features, sample_importance_df_1, sample_importance_df_2):
# test the scenario where previous feature importance df exists and its importance estimates come from a different fitted model
prev_df, curr_df = sample_importance_df_1, sample_importance_df_2
using_prev_fit_fi = set(sample_features)
result_df = merge_importance_dfs(prev_df, curr_df, using_prev_fit_fi=using_prev_fit_fi).sort_index()
assert len(using_prev_fit_fi) == 2
assert [score if score == score else None for score in result_df["importance"].tolist()] == [-0.1, -0.1, 0.1, 1.0, None]
assert result_df["n"].tolist() == [5, 10, 10, 5, 0]
def test_merge_importance_dfs_all(sample_features, sample_importance_df_1, sample_importance_df_2):
# test the scenario where previous feature importance df exists and its importance estimates come from both same and different fitted models
prev_df, curr_df = sample_importance_df_1, sample_importance_df_2
using_prev_fit_fi = set([sample_features[0]])
result_df = merge_importance_dfs(prev_df, curr_df, using_prev_fit_fi=using_prev_fit_fi).sort_index()
assert [score if score == score else None for score in result_df["importance"].tolist()] == [-0.1, 0.0, 0.1, 1.0, None]
assert result_df["n"].tolist() == [5, 15, 10, 5, 0]
assert using_prev_fit_fi == set()
def test_sort_features_by_priority_base(sample_features):
# test the ordering of feature importance computation when no prior feature importance computation was done
sorted_features = sort_features_by_priority(features=sample_features, prev_importance_df=None, using_prev_fit_fi=set())
assert sorted_features == sample_features
def test_sort_features_by_priority_same_model(sample_features):
# test the ordering of feature importance computation when prior feature importance computation from the same fitted model was done
prev_importance_df = evaluated_fi_df_template(sample_features)
sorted_features = sort_features_by_priority(features=sample_features, prev_importance_df=prev_importance_df, using_prev_fit_fi=set())
assert sorted_features == prev_importance_df.sort_values("importance").index.tolist()
def test_sort_features_by_priority_different_model(sample_features):
# test the ordering of feature importance computation when prior feature importance computation from a different fitted model was done
prev_importance_df = evaluated_fi_df_template(sample_features)
using_prev_fit_fi = sample_features[-2:]
sorted_features = sort_features_by_priority(features=sample_features, prev_importance_df=prev_importance_df, using_prev_fit_fi=using_prev_fit_fi)
sorted_prev_fit_features = prev_importance_df[prev_importance_df.index.isin(using_prev_fit_fi)].sort_values("importance").index.tolist()
sorted_curr_fit_features = prev_importance_df[~prev_importance_df.index.isin(using_prev_fit_fi)].sort_values("importance").index.tolist()
expected_features = sorted_prev_fit_features + sorted_curr_fit_features
assert sorted_features == expected_features
def test_sort_features_by_priority_all(sample_features):
# test the ordering of feature importance computation when feature impotance computation comes from mix of current and previous fit models,
# and some feature are unevaluated
length = len(sample_features)
using_prev_fit_fi = set(sample_features[: length // 3])
evaluated_rows, unevaluated_rows = evaluated_fi_df_template(sample_features[: length // 2]), unevaluated_fi_df_template(sample_features[length // 2 :])
prev_importance_df = pd.concat([evaluated_rows, unevaluated_rows])
sorted_features = sort_features_by_priority(features=sample_features, prev_importance_df=prev_importance_df, using_prev_fit_fi=using_prev_fit_fi)
unevaluated_features = unevaluated_rows.index.tolist()
sorted_prev_fit_features = (
evaluated_rows[(~evaluated_rows.index.isin(sample_features[length // 2 :])) & (evaluated_rows.index.isin(using_prev_fit_fi))]
.sort_values("importance")
.index.tolist()
)
sorted_curr_fit_features = (
evaluated_rows[(~evaluated_rows.index.isin(sample_features[length // 2 :])) & (~evaluated_rows.index.isin(using_prev_fit_fi))]
.sort_values("importance")
.index.tolist()
)
expected_features = unevaluated_features + sorted_prev_fit_features + sorted_curr_fit_features
assert sorted_features == expected_features |
2,094 | test nonhashable | import unittest
import twowaymap
class TestTwoWayMap(unittest.TestCase):
def assertTwoWayMap(self, twmap, forward, reverse):
map_repr = (
{ k: twmap.lookup_left(k) for k in twmap.left_all() },
{ k: twmap.lookup_right(k) for k in twmap.right_all() }
)
self.assertEqual(map_repr, (forward, reverse))
def test_set_list(self):
tmap = twowaymap.TwoWayMap(left=set, right=list)
self.assertFalse(tmap)
tmap.insert(1, "a")
self.assertTrue(tmap)
self.assertTwoWayMap(tmap, {1: ["a"]}, {"a": {1}})
tmap.insert(1, "a") # should be a no-op, since this pair already exists
tmap.insert(1, "b")
tmap.insert(2, "a")
self.assertTwoWayMap(tmap, {1: ["a", "b"], 2: ["a"]}, {"a": {1,2}, "b": {1}})
tmap.insert(1, "b")
tmap.insert(2, "b")
self.assertTwoWayMap(tmap, {1: ["a", "b"], 2: ["a", "b"]}, {"a": {1,2}, "b": {1,2}})
tmap.remove(1, "b")
tmap.remove(2, "b")
self.assertTwoWayMap(tmap, {1: ["a"], 2: ["a"]}, {"a": {1,2}})
tmap.insert(1, "b")
tmap.insert(2, "b")
tmap.remove_left(1)
self.assertTwoWayMap(tmap, {2: ["a", "b"]}, {"a": {2}, "b": {2}})
tmap.insert(1, "a")
tmap.insert(2, "b")
tmap.remove_right("b")
self.assertTwoWayMap(tmap, {1: ["a"], 2: ["a"]}, {"a": {1,2}})
self.assertTrue(tmap)
tmap.clear()
self.assertTwoWayMap(tmap, {}, {})
self.assertFalse(tmap)
def test_set_single(self):
tmap = twowaymap.TwoWayMap(left=set, right="single")
self.assertFalse(tmap)
tmap.insert(1, "a")
self.assertTrue(tmap)
self.assertTwoWayMap(tmap, {1: "a"}, {"a": {1}})
tmap.insert(1, "a") # should be a no-op, since this pair already exists
tmap.insert(1, "b")
tmap.insert(2, "a")
self.assertTwoWayMap(tmap, {1: "b", 2: "a"}, {"a": {2}, "b": {1}})
tmap.insert(1, "b")
tmap.insert(2, "b")
self.assertTwoWayMap(tmap, {1: "b", 2: "b"}, {"b": {1,2}})
tmap.remove(1, "b")
self.assertTwoWayMap(tmap, {2: "b"}, {"b": {2}})
tmap.remove(2, "b")
self.assertTwoWayMap(tmap, {}, {})
tmap.insert(1, "b")
tmap.insert(2, "b")
self.assertTwoWayMap(tmap, {1: "b", 2: "b"}, {"b": {1,2}})
tmap.remove_left(1)
self.assertTwoWayMap(tmap, {2: "b"}, {"b": {2}})
tmap.insert(1, "a")
tmap.insert(2, "b")
tmap.remove_right("b")
self.assertTwoWayMap(tmap, {1: "a"}, {"a": {1}})
self.assertTrue(tmap)
tmap.clear()
self.assertTwoWayMap(tmap, {}, {})
self.assertFalse(tmap)
def test_strict_list(self):
tmap = twowaymap.TwoWayMap(left="strict", right=list)
self.assertFalse(tmap)
tmap.insert(1, "a")
self.assertTrue(tmap)
self.assertTwoWayMap(tmap, {1: ["a"]}, {"a": 1})
tmap.insert(1, "a") # should be a no-op, since this pair already exists
tmap.insert(1, "b")
with self.assertRaises(ValueError):
tmap.insert(2, "a")
self.assertTwoWayMap(tmap, {1: ["a", "b"]}, {"a": 1, "b": 1})
tmap.insert(1, "b")
with self.assertRaises(ValueError):
tmap.insert(2, "b")
tmap.insert(2, "c")
self.assertTwoWayMap(tmap, {1: ["a", "b"], 2: ["c"]}, {"a": 1, "b": 1, "c": 2})
tmap.remove(1, "b")
self.assertTwoWayMap(tmap, {1: ["a"], 2: ["c"]}, {"a": 1, "c": 2})
tmap.remove(2, "b")
self.assertTwoWayMap(tmap, {1: ["a"], 2: ["c"]}, {"a": 1, "c": 2})
tmap.insert(1, "b")
with self.assertRaises(ValueError):
tmap.insert(2, "b")
self.assertTwoWayMap(tmap, {1: ["a", "b"], 2: ["c"]}, {"a": 1, "b": 1, "c": 2})
tmap.remove_left(1)
self.assertTwoWayMap(tmap, {2: ["c"]}, {"c": 2})
tmap.insert(1, "a")
tmap.insert(2, "b")
tmap.remove_right("b")
self.assertTwoWayMap(tmap, {1: ["a"], 2: ["c"]}, {"a": 1, "c": 2})
self.assertTrue(tmap)
tmap.clear()
self.assertTwoWayMap(tmap, {}, {})
self.assertFalse(tmap)
def test_strict_single(self):
tmap = twowaymap.TwoWayMap(left="strict", right="single")
tmap.insert(1, "a")
tmap.insert(2, "b")
tmap.insert(2, "c")
self.assertTwoWayMap(tmap, {1: "a", 2: "c"}, {"a": 1, "c": 2})
with self.assertRaises(ValueError):
tmap.insert(2, "a")
tmap.insert(2, "c") # This pair already exists, so not an error.
self.assertTwoWayMap(tmap, {1: "a", 2: "c"}, {"a": 1, "c": 2})
def METHOD_NAME(self):
# Test that we don't get into an inconsistent state if we attempt to use a non-hashable value.
tmap = twowaymap.TwoWayMap(left=list, right=list)
tmap.insert(1, "a")
self.assertTwoWayMap(tmap, {1: ["a"]}, {"a": [1]})
with self.assertRaises(TypeError):
tmap.insert(1, {})
with self.assertRaises(TypeError):
tmap.insert({}, "a")
self.assertTwoWayMap(tmap, {1: ["a"]}, {"a": [1]})
if __name__ == "__main__":
unittest.main() |
2,095 | get default display mode | # ***************************************************************************
# * Copyright (c) 2017 Markus Hovorka <m.hovorka@live.de> *
# * *
# * This file is part of the FreeCAD CAx development system. *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this program; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
__title__ = "FreeCAD FEM solver equation base object"
__author__ = "Markus Hovorka"
__url__ = "https://www.freecad.org"
## \addtogroup FEM
# @{
import FreeCAD
if FreeCAD.GuiUp:
from pivy import coin
class BaseProxy(object):
BaseType = "App::FeaturePython"
def __init__(self, obj):
obj.Proxy = self
obj.addProperty(
"App::PropertyLinkSubList", "References",
"Base", "")
def execute(self, obj):
return True
class BaseViewProxy(object):
def __init__(self, vobj):
vobj.Proxy = self
def attach(self, vobj):
default = coin.SoGroup()
vobj.addDisplayMode(default, "Default")
def getDisplayModes(self, obj):
"Return a list of display modes."
modes = ["Default"]
return modes
def METHOD_NAME(self):
return "Default"
def setDisplayMode(self, mode):
return mode
class DeformationProxy(BaseProxy):
pass
class DeformationViewProxy(BaseViewProxy):
def getIcon(self):
return ":/icons/FEM_EquationDeformation.svg"
class ElasticityProxy(BaseProxy):
pass
class ElasticityViewProxy(BaseViewProxy):
def getIcon(self):
return ":/icons/FEM_EquationElasticity.svg"
class ElectricforceProxy(BaseProxy):
pass
class ElectricforceViewProxy(BaseViewProxy):
def getIcon(self):
return ":/icons/FEM_EquationElectricforce.svg"
class ElectrostaticProxy(BaseProxy):
pass
class ElectrostaticViewProxy(BaseViewProxy):
def getIcon(self):
return ":/icons/FEM_EquationElectrostatic.svg"
class FlowProxy(BaseProxy):
pass
class FlowViewProxy(BaseViewProxy):
def getIcon(self):
return ":/icons/FEM_EquationFlow.svg"
class FluxProxy(BaseProxy):
pass
class FluxViewProxy(BaseViewProxy):
def getIcon(self):
return ":/icons/FEM_EquationFlux.svg"
class HeatProxy(BaseProxy):
pass
class HeatViewProxy(BaseViewProxy):
def getIcon(self):
return ":/icons/FEM_EquationHeat.svg"
class MagnetodynamicProxy(BaseProxy):
pass
class MagnetodynamicViewProxy(BaseViewProxy):
def getIcon(self):
return ":/icons/FEM_EquationMagnetodynamic.svg"
class Magnetodynamic2DProxy(BaseProxy):
pass
class Magnetodynamic2DViewProxy(BaseViewProxy):
def getIcon(self):
return ":/icons/FEM_EquationMagnetodynamic2D.svg"
## @} |
2,096 | write parameters | # Copyright (c) 2017 The University of Manchester
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy
from spinn_utilities.overrides import overrides
from spinn_front_end_common.interface.ds import DataType
from spinn_front_end_common.utilities.constants import (
BYTES_PER_WORD, BYTES_PER_SHORT)
from spynnaker.pyNN.data import SpynnakerDataView
from .abstract_timing_dependence import AbstractTimingDependence
from spynnaker.pyNN.models.neuron.plasticity.stdp.synapse_structure import (
SynapseStructureWeightAccumulator)
from spynnaker.pyNN.models.neuron.plasticity.stdp.common import (
STDP_FIXED_POINT_ONE)
class TimingDependenceRecurrent(AbstractTimingDependence):
"""
A timing dependence STDP rule based on recurrences.
"""
__slots__ = [
"__accumulator_depression_plus_one",
"__accumulator_potentiation_minus_one",
"__dual_fsm",
"__mean_post_window",
"__mean_pre_window",
"__synapse_structure",
"__a_plus",
"__a_minus"]
__PARAM_NAMES = (
'accumulator_depression', 'accumulator_potentiation',
'mean_pre_window', 'mean_post_window', 'dual_fsm')
default_parameters = {
'accumulator_depression': -6, 'accumulator_potentiation': 6,
'mean_pre_window': 35.0, 'mean_post_window': 35.0, 'dual_fsm': True}
def __init__(
self, accumulator_depression=default_parameters[
'accumulator_depression'],
accumulator_potentiation=default_parameters[
'accumulator_potentiation'],
mean_pre_window=default_parameters['mean_pre_window'],
mean_post_window=default_parameters['mean_post_window'],
dual_fsm=default_parameters['dual_fsm'],
A_plus=0.01, A_minus=0.01):
"""
:param int accumulator_depression:
:param int accumulator_potentiation:
:param float mean_pre_window:
:param float mean_post_window:
:param bool dual_fsm:
:param float A_plus: :math:`A^+`
:param float A_minus: :math:`A^-`
"""
# pylint: disable=too-many-arguments
self.__accumulator_depression_plus_one = accumulator_depression + 1
self.__accumulator_potentiation_minus_one = \
accumulator_potentiation - 1
self.__mean_pre_window = mean_pre_window
self.__mean_post_window = mean_post_window
self.__dual_fsm = dual_fsm
self.__a_plus = A_plus
self.__a_minus = A_minus
self.__synapse_structure = SynapseStructureWeightAccumulator()
@property
def A_plus(self):
r"""
:math:`A^+`
:rtype: float
"""
return self.__a_plus
@A_plus.setter
def A_plus(self, new_value):
self.__a_plus = new_value
@property
def A_minus(self):
r"""
:math:`A^-`
:rtype: float
"""
return self.__a_minus
@A_minus.setter
def A_minus(self, new_value):
self.__a_minus = new_value
@overrides(AbstractTimingDependence.is_same_as)
def is_same_as(self, timing_dependence):
if timing_dependence is None or not isinstance(
timing_dependence, TimingDependenceRecurrent):
return False
return ((self.__accumulator_depression_plus_one ==
timing_dependence.accumulator_depression_plus_one) and
(self.__accumulator_potentiation_minus_one ==
timing_dependence.accumulator_potentiation_minus_one) and
(self.__mean_pre_window ==
timing_dependence.mean_pre_window) and
(self.__mean_post_window ==
timing_dependence.mean_post_window))
@property
def vertex_executable_suffix(self):
"""
The suffix to be appended to the vertex executable for this rule.
:rtype: str
"""
if self.__dual_fsm:
return "recurrent_dual_fsm"
return "recurrent_pre_stochastic"
@property
def pre_trace_n_bytes(self):
"""
The number of bytes used by the pre-trace of the rule per neuron.
:rtype: int
"""
# When using the separate FSMs, pre-trace contains window length,
# otherwise it's in the synapse
return BYTES_PER_SHORT if self.__dual_fsm else 0
@overrides(AbstractTimingDependence.get_parameters_sdram_usage_in_bytes)
def get_parameters_sdram_usage_in_bytes(self):
# 2 * 32-bit parameters
# 2 * LUTS with STDP_FIXED_POINT_ONE * 16-bit entries
return (2 * BYTES_PER_WORD) + (
2 * STDP_FIXED_POINT_ONE * BYTES_PER_SHORT)
@property
def n_weight_terms(self):
"""
The number of weight terms expected by this timing rule.
:rtype: int
"""
return 1
@overrides(AbstractTimingDependence.METHOD_NAME)
def METHOD_NAME(
self, spec, global_weight_scale, synapse_weight_scales):
# Write parameters
spec.write_value(data=self.__accumulator_depression_plus_one,
data_type=DataType.INT32)
spec.write_value(data=self.__accumulator_potentiation_minus_one,
data_type=DataType.INT32)
# Convert mean times into machine timesteps
time_step_per_ms = SpynnakerDataView.get_simulation_time_step_per_ms()
mean_pre_timesteps = float(self.__mean_pre_window * time_step_per_ms)
mean_post_timesteps = float(self.__mean_post_window * time_step_per_ms)
# Write lookup tables
self._write_exp_dist_lut(spec, mean_pre_timesteps)
self._write_exp_dist_lut(spec, mean_post_timesteps)
@staticmethod
def _write_exp_dist_lut(spec, mean):
"""
:param .DataSpecificationGenerator spec:
:param float mean:
"""
indices = numpy.arange(STDP_FIXED_POINT_ONE)
inv_cdf = numpy.log(1.0 - indices/float(STDP_FIXED_POINT_ONE)) * -mean
spec.write_array(
inv_cdf.astype(numpy.uint16), data_type=DataType.UINT16)
@property
def synaptic_structure(self):
"""
The synaptic structure of the plastic part of the rows.
:rtype: AbstractSynapseStructure
"""
return self.__synapse_structure
@overrides(AbstractTimingDependence.get_parameter_names)
def get_parameter_names(self):
return self.__PARAM_NAMES |
2,097 | build transform | # Copyright 2019 Open Source Robotics Foundation, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the Willow Garage nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import pytest
import rclpy
from tf2_ros.buffer import Buffer
from geometry_msgs.msg import TransformStamped, PointStamped
class TestBuffer:
def METHOD_NAME(self, target, source, rclpy_time):
transform = TransformStamped()
transform.header.frame_id = target
transform.header.stamp = rclpy_time.to_msg()
transform.child_frame_id = source
transform.transform.translation.x = 42.0
transform.transform.translation.y = -3.14
transform.transform.translation.z = 0.0
transform.transform.rotation.w = 1.0
transform.transform.rotation.x = 0.0
transform.transform.rotation.y = 0.0
transform.transform.rotation.z = 0.0
return transform
def test_can_transform_valid_transform(self):
buffer = Buffer()
clock = rclpy.clock.Clock()
rclpy_time = clock.now()
transform = self.METHOD_NAME('foo', 'bar', rclpy_time)
assert buffer.set_transform(transform, 'unittest') is None
assert buffer.can_transform('foo', 'bar', rclpy_time)
output = buffer.lookup_transform('foo', 'bar', rclpy_time)
assert transform.child_frame_id == output.child_frame_id
assert transform.transform.translation.x == output.transform.translation.x
assert transform.transform.translation.y == output.transform.translation.y
assert transform.transform.translation.z == output.transform.translation.z
def test_await_transform_immediately_available(self):
# wait for a transform that is already available to test short-cut code
buffer = Buffer()
clock = rclpy.clock.Clock()
rclpy_time = clock.now()
transform = self.METHOD_NAME('foo', 'bar', rclpy_time)
buffer.set_transform(transform, 'unittest')
coro = buffer.lookup_transform_async('foo', 'bar', rclpy_time)
with pytest.raises(StopIteration) as excinfo:
coro.send(None)
assert transform == excinfo.value.value
coro.close()
def test_await_transform_full_immediately_available(self):
# wait for a transform that is already available to test short-cut code
buffer = Buffer()
clock = rclpy.clock.Clock()
rclpy_time = clock.now()
transform = self.METHOD_NAME('foo', 'bar', rclpy_time)
buffer.set_transform(transform, 'unittest')
coro = buffer.lookup_transform_full_async('foo', rclpy_time, 'bar', rclpy_time, 'foo')
with pytest.raises(StopIteration) as excinfo:
coro.send(None)
assert transform == excinfo.value.value
coro.close()
def test_await_transform_delayed(self):
# wait for a transform that is not yet available
buffer = Buffer()
clock = rclpy.clock.Clock()
rclpy_time = clock.now()
transform = self.METHOD_NAME('foo', 'bar', rclpy_time)
coro = buffer.lookup_transform_async('foo', 'bar', rclpy_time)
coro.send(None)
buffer.set_transform(transform, 'unittest')
with pytest.raises(StopIteration) as excinfo:
coro.send(None)
assert transform == excinfo.value.value
coro.close()
def test_await_transform_full_delayed(self):
# wait for a transform that is not yet available
buffer = Buffer()
clock = rclpy.clock.Clock()
rclpy_time = clock.now()
transform = self.METHOD_NAME('foo', 'bar', rclpy_time)
coro = buffer.lookup_transform_full_async('foo', rclpy_time, 'bar', rclpy_time, 'foo')
coro.send(None)
buffer.set_transform(transform, 'unittest')
with pytest.raises(StopIteration) as excinfo:
coro.send(None)
assert transform == excinfo.value.value
coro.close()
def test_buffer_non_default_cache(self):
buffer = Buffer(cache_time=rclpy.duration.Duration(seconds=10.0))
clock = rclpy.clock.Clock()
rclpy_time = clock.now()
transform = self.METHOD_NAME('foo', 'bar', rclpy_time)
assert buffer.set_transform(transform, 'unittest') is None
assert buffer.can_transform('foo', 'bar', rclpy_time)
output = buffer.lookup_transform('foo', 'bar', rclpy_time)
assert transform.child_frame_id == output.child_frame_id
assert transform.transform.translation.x == output.transform.translation.x
assert transform.transform.translation.y == output.transform.translation.y
assert transform.transform.translation.z == output.transform.translation.z |
2,098 | get vertexai job client | # Copyright 2022 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""CRMint's abstract worker dealing with Vertex AI."""
import time
import google.auth
from google.cloud import aiplatform
from google.cloud.aiplatform_v1.types import job_state as js
from google.cloud.aiplatform_v1.types import pipeline_state as ps
from jobs.workers import worker
_PIPELINE_COMPLETE_STATES = frozenset([
ps.PipelineState.PIPELINE_STATE_SUCCEEDED,
ps.PipelineState.PIPELINE_STATE_FAILED,
ps.PipelineState.PIPELINE_STATE_CANCELLED,
ps.PipelineState.PIPELINE_STATE_PAUSED])
_JOB_COMPLETE_STATES = frozenset([
js.JobState.JOB_STATE_SUCCEEDED,
js.JobState.JOB_STATE_FAILED,
js.JobState.JOB_STATE_CANCELLED,
js.JobState.JOB_STATE_PAUSED])
class VertexAIWorker(worker.Worker):
"""Worker that polls job status and respawns itself if the job is not done."""
def METHOD_NAME(self, location):
api_endpoint = f'{location}-aiplatform.googleapis.com'
client_options = {'api_endpoint': api_endpoint}
return aiplatform.gapic.JobServiceClient(client_options=client_options)
def _get_vertexai_pipeline_client(self, location):
api_endpoint = f'{location}-aiplatform.googleapis.com'
client_options = {'api_endpoint': api_endpoint}
return aiplatform.gapic.PipelineServiceClient(client_options=client_options)
def _get_vertexai_dataset_client(self, location):
api_endpoint = f'{location}-aiplatform.googleapis.com'
client_options = {'api_endpoint': api_endpoint}
return aiplatform.gapic.DatasetServiceClient(client_options=client_options)
def _get_vertexai_model_client(self, location):
api_endpoint = f'{location}-aiplatform.googleapis.com'
client_options = {'api_endpoint': api_endpoint}
return aiplatform.gapic.ModelServiceClient(client_options=client_options)
def _get_batch_prediction_job(self, job_client, job_name):
return job_client.get_batch_prediction_job(name=job_name)
def _get_training_pipeline(self, pipeline_client, pipeline_name):
return pipeline_client.get_training_pipeline(name=pipeline_name)
def _get_location_from_pipeline_name(self, pipeline_name):
return pipeline_name.split('/')[3]
def _get_location_from_job_name(self, job_name):
return job_name.split('/')[3]
def _get_project_id(self):
_, project_id = google.auth.default()
return project_id
def _get_parent_resource(self, location):
project_id = self._get_project_id()
return f'projects/{project_id}/locations/{location}'
def _wait_for_pipeline(self, pipeline):
"""Waits for pipeline completion.
It will relay to VertexAIWaiter if it takes too long.
"""
delay = 5
waiting_time = 5
time.sleep(delay)
while pipeline.state not in _PIPELINE_COMPLETE_STATES:
if waiting_time > 300: # Once 5 minute has passed, spawn VertexAIWaiter.
self._enqueue(
'VertexAIWaiter', {
'id': pipeline.name,
'worker_class': 'VertexAITabularTrainer'
}, 60)
return None
if delay < 30:
delay = [5, 10, 15, 20, 30][int(waiting_time / 60)]
time.sleep(delay)
waiting_time += delay
if pipeline.state == ps.PipelineState.PIPELINE_STATE_FAILED:
raise worker.WorkerException(f'Training pipeline {pipeline.name} failed.')
def _wait_for_job(self, job):
"""Waits for batch prediction job completion.
It will relay to VertexAIWaiter if it takes too long.
"""
delay = 5
waiting_time = 5
time.sleep(delay)
while job.state not in _JOB_COMPLETE_STATES:
if waiting_time > 300: # Once 5 minute has passed, spawn VertexAIWaiter.
self._enqueue(
'VertexAIWaiter', {
'id': job.name,
'worker_class': 'VertexAIBatchPredictorToBQ'},
60)
return None
if delay < 30:
delay = [5, 10, 15, 20, 30][int(waiting_time / 60)]
time.sleep(delay)
waiting_time += delay
if job.state == js.JobState.JOB_STATE_FAILED:
raise worker.WorkerException(f'Job {job.name} failed.')
def _clean_up_datasets(self, dataset_client, project, region, display_name):
parent = f'projects/{project}/locations/{region}'
datasets = list(
dataset_client.list_datasets({
'parent': parent,
'filter': f'display_name="{display_name}"',
'order_by': 'create_time asc'}))
configs = map(lambda x: (x.create_time, {'name': x.name}), datasets)
sorted_configs = sorted(configs)
for _, config in sorted_configs[:-1]:
dataset_name = config['name']
dataset_client.delete_dataset({'name': dataset_name})
self.log_info(f'Deleted dataset: {dataset_name}')
def _clean_up_training_pipelines(self, pipeline_client, project, region,
display_name):
parent = f'projects/{project}/locations/{region}'
training_pipelines = list(
pipeline_client.list_training_pipelines({
'parent': parent,
'filter': f'display_name="{display_name}"'}))
configs = map(
lambda x: (x.create_time, {'state': x.state, 'name': x.name}),
training_pipelines)
sorted_configs = sorted(configs)
for _, config in sorted_configs[:-1]:
training_pipeline_name = config['name']
if config['state'] in _PIPELINE_COMPLETE_STATES:
pipeline_client.delete_training_pipeline(name=training_pipeline_name)
else:
pipeline_client.cancel_training_pipeline(
name=training_pipeline_name, timeout=300)
pipeline_client.delete_training_pipeline(name=training_pipeline_name)
self.log_info(f'Deleted training pipeline: {training_pipeline_name}')
def _clean_up_batch_predictions(self, job_client, project, region,
display_name):
parent = f'projects/{project}/locations/{region}'
batch_predictions = list(
job_client.list_batch_prediction_jobs({
'parent': parent,
'filter': f'display_name="{display_name}"'}))
configs = map(
lambda x: (x.create_time, {'state': x.state, 'name': x.name}),
batch_predictions)
sorted_configs = sorted(configs)
for _, config in sorted_configs[:-1]:
batch_prediction_name = config['name']
if config['state'] in _JOB_COMPLETE_STATES:
job_client.delete_batch_prediction_job(name=batch_prediction_name)
else:
job_client.cancel_batch_prediction_job(
name=batch_prediction_name, timeout=300)
job_client.delete_batch_prediction_job(name=batch_prediction_name)
self.log_info(f'Deleted batch prediction: {batch_prediction_name}') |
2,099 | get cluster log groups from boto3 | import json
import logging
import boto3
import utils
from botocore.exceptions import ClientError
LOGGER = logging.getLogger(__name__)
def _dumps_json(obj):
"""Dump obj to a JSON string."""
return json.dumps(obj, indent=2)
def METHOD_NAME(cluster_log_group_prefix):
"""
Get log groups with cluster log group prefix from boto3.
Raises ClientError.
"""
try:
log_groups = (
boto3.client("logs").describe_log_groups(logGroupNamePrefix=cluster_log_group_prefix).get("logGroups")
)
LOGGER.info("Log groups: {0}\n".format(_dumps_json(log_groups)))
return log_groups
except ClientError as e:
LOGGER.error("Unable to retrieve any log group with prefix {0}\nError: {1}".format(cluster_log_group_prefix, e))
raise ClientError
def _get_log_stream_pages(log_client, log_group_name):
"""
Get paged list of log streams.
Raises ClientError if the log group doesn't exist.
"""
next_token = None
while True:
kwargs = {"logGroupName": log_group_name}
if next_token:
kwargs.update({"nextToken": next_token})
response = log_client.describe_log_streams(**kwargs)
streams = response.get("logStreams")
LOGGER.info("Log streams for {group}:\n{streams}".format(group=log_group_name, streams=_dumps_json(streams)))
yield streams
next_token = response.get("nextToken")
if next_token is None:
break
def get_log_streams(log_group_name):
"""
Get list of log streams.
Raises ClientError if the log group doesn't exist.
"""
log_client = boto3.client("logs")
for stream_page in _get_log_stream_pages(log_client, log_group_name):
for stream in stream_page:
yield stream
def get_log_events(log_group_name, log_stream_name):
"""
Get log events for the given log_stream_name.
Raises ClientError if the given log group or stream doesn't exist.
"""
logs_client = boto3.client("logs")
# get_log_events is not page-able using utils.paginate_boto3
response = logs_client.get_log_events(
logGroupName=log_group_name, logStreamName=log_stream_name, startFromHead=True
)
prev_token = None
next_token = response.get("nextForwardToken")
LOGGER.info(f"Starting pagination of GetLogEvents for {log_group_name}/{log_stream_name} with {next_token}")
while next_token != prev_token:
for event in response.get("events"):
LOGGER.info(f"event from stream {log_group_name}/{log_stream_name}:\n{json.dumps(event, indent=2)}")
yield event
response = logs_client.get_log_events(
logGroupName=log_group_name, logStreamName=log_stream_name, nextToken=next_token
)
prev_token = next_token
next_token = response.get("nextForwardToken")
LOGGER.info(f"Continuing pagination of GetLogEvents for {log_group_name}/{log_stream_name} with {next_token}")
def get_ec2_instances():
"""Iterate through ec2's describe_instances."""
for instance_page in utils.paginate_boto3(boto3.client("ec2").describe_instances):
for instance in instance_page.get("Instances"):
yield instance
def _get_log_group_for_stack(stack_name):
"""Return a list of log groups belonging to the given stack."""
log_groups = []
for resource in utils.get_cfn_resources(stack_name):
if resource.get("ResourceType") == "AWS::Logs::LogGroup":
log_groups.append(resource.get("PhysicalResourceId"))
return log_groups
def get_cluster_log_groups(stack_name):
"""Return list of PhysicalResourceIds for log groups created by cluster with given stack name."""
log_groups = []
substack_phys_ids = utils.get_substacks(stack_name)
for substack_phys_id in substack_phys_ids:
log_groups.extend(_get_log_group_for_stack(substack_phys_id))
return log_groups
def delete_log_group(log_group):
"""Delete the given log group."""
try:
boto3.client("logs").delete_log_group(logGroupName=log_group)
except ClientError as client_err:
if client_err.response.get("Error").get("Code") == "ResourceNotFoundException":
return # Log group didn't exist.
LOGGER.warning(
"Error when deleting log group {log_group}: {msg}".format(
log_group=log_group, msg=client_err.response.get("Error").get("Message")
)
)
def delete_log_groups(log_groups):
"""Delete the given log groups, if they exist."""
for log_group in log_groups:
delete_log_group(log_group) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.