id int64 0 300k | label stringlengths 1 74 ⌀ | text stringlengths 4k 8k |
|---|---|---|
7,000 | test queryset not deleted | from unittest.mock import MagicMock, patch
import pytest
from django import forms
from django.urls import reverse
from django.utils.timezone import now
from django.utils.translation import gettext_lazy as _
from github3.exceptions import NotFoundError
from ..admin import JSONWidget, ProjectForm, SiteAdminForm, SoftDeletedListFilter
from ..models import Epic, GitHubOrganization
@pytest.mark.django_db
class TestSoftDeletedListFilter:
def test_lookups(self):
args = (
None,
{},
None,
None,
)
lookups = SoftDeletedListFilter(*args).lookups(None, None)
assert lookups == (("true", _("Deleted")),)
def METHOD_NAME(self, epic_factory):
epic = epic_factory()
epic_factory(deleted_at=now())
args = (
None,
{},
None,
None,
)
actual = SoftDeletedListFilter(*args).queryset(None, Epic.objects.all())
assert list(actual) == [epic]
def test_queryset__deleted(self, epic_factory):
epic_factory()
epic = epic_factory(deleted_at=now())
args = (
None,
{"deleted_at": "true"},
None,
None,
)
actual = SoftDeletedListFilter(*args).queryset(None, Epic.objects.all())
assert list(actual) == [epic]
def test_choices(self):
args = (
None,
{},
None,
None,
)
changelist = MagicMock()
actual = SoftDeletedListFilter(*args).choices(changelist)
assert set(next(actual).keys()) == {
"selected",
"query_string",
"display",
}
assert set(next(actual).keys()) == {
"selected",
"query_string",
"display",
}
@pytest.mark.django_db
class TestProjectForm:
def test_clean__repo_missing(self, user_factory):
form = ProjectForm({"name": "Test", "repo_owner": "test", "repo_name": "test"})
# This is how the user gets there in real circumstances, just
# jammed on:
form.user = user_factory()
with patch("metecho.api.admin.gh.get_repo_info") as get_repo_info:
get_repo_info.side_effect = NotFoundError(MagicMock())
assert not form.is_valid()
assert form.errors == {
"__all__": [
"Could not access test/test using GitHub app. "
"Does the Metecho app need to be installed for this repository?"
],
}
@pytest.mark.django_db
class TestProjectAdmin:
@pytest.mark.parametrize(
"repo_image_url, should_fetch",
(
("", True),
("https://example.com", False),
),
)
def test_save(self, admin_client, mocker, repo_image_url, should_fetch):
mocker.patch("metecho.api.admin.gh")
get_social_image_job = mocker.patch("metecho.api.jobs.get_social_image_job")
response = admin_client.post(
reverse("admin:api_project_add"),
data={
"repo_image_url": repo_image_url,
"repo_owner": "gh-user",
"repo_name": "gh-repo",
"name": "Project 1",
"org_config_names": "[]",
"branch_name": "main",
"latest_sha": "abc123",
"githubcollaboration_set-TOTAL_FORMS": 0,
"githubcollaboration_set-INITIAL_FORMS": 0,
},
)
assert get_social_image_job.delay.called == should_fetch, response.context[
"form"
].errors
@pytest.mark.django_db
class TestGitHubOrganizationAdmin:
def test_github_link(self, admin_client, git_hub_organization):
href = f'href="https://github.com/{git_hub_organization.login}"'
response = admin_client.get(reverse("admin:api_githuborganization_changelist"))
assert href in str(response.content)
def test_org__bad(self, admin_client, mocker):
gh = mocker.patch("metecho.api.admin.gh", autospec=True)
gh.gh_as_org.side_effect = Exception
url = reverse("admin:api_githuborganization_add")
response = admin_client.post(url, data={"name": "Test", "login": "test"})
assert not GitHubOrganization.objects.exists()
assert b"has not been installed" in response.content
def test_org__good(self, admin_client, mocker):
gh = mocker.patch("metecho.api.admin.gh", autospec=True)
gh.gh_as_org.return_value.organization.return_value = mocker.MagicMock(
avatar_url="http://example.com"
)
url = reverse("admin:api_githuborganization_add")
admin_client.post(url, data={"name": "Test", "login": "test"})
assert GitHubOrganization.objects.filter(
name="Test", login="test", avatar_url="http://example.com"
).exists()
def test_json_widget():
assert JSONWidget().value_from_datadict({"test": ""}, None, "test") == "{}"
class TestSiteAdminForm:
def test_error(self):
form = SiteAdminForm()
form.cleaned_data = {"domain": "example.com/"}
with pytest.raises(forms.ValidationError):
form.clean_domain()
def test_good(self):
form = SiteAdminForm()
form.cleaned_data = {"domain": "example.com"}
assert form.clean_domain() == "example.com" |
7,001 | get variable from table | import copy
import re
from collections import defaultdict
from pathlib import Path
from typing import List, Optional
import pandas as pd
import structlog
from owid.catalog import Dataset, Table, Variable
from owid.catalog.utils import concat_variables
from etl.paths import DATA_DIR
log = structlog.get_logger(__name__)
SDG_SOURCES_FILE = Path(__file__).parent / "sdg_sources.csv"
def _load_sdg_sources() -> pd.DataFrame:
sdg_sources = pd.read_csv(SDG_SOURCES_FILE)
sdg_sources["goal"] = sdg_sources.indicator.str.split(".").str[0]
# drop duplicate goal-variable pairs
sdg_sources = sdg_sources.drop_duplicates(subset=["goal", "variable_id"])
return sdg_sources
def _get_variable_from_backported_table(table: Table, variable_id: str) -> Optional[Variable]:
"""Get variable from backported table."""
for col in table.columns:
var_id = table[col].metadata.additional_info["grapher_meta"]["id"]
if var_id == variable_id:
# return a copy
v = Variable(table[col].dropna())
v.metadata = copy.deepcopy(table[col].metadata)
return v
else:
return None
def METHOD_NAME(table: Table, variable_title: str) -> Optional[Variable]:
"""Get variable from table based on variable's title."""
for col in table.columns:
var_title = table[col].metadata.title
if var_title == variable_title:
# return a copy
v = Variable(table[col].dropna())
v.metadata = copy.deepcopy(table[col].metadata)
return v
else:
return None
def _indicator_prefix(name: str, indicator: str) -> str:
"""Create new variable name by adding indicator prefix (if data comes directly from UN
and prefix already exists then replace it)."""
if name.startswith("_"):
name = name.split("__", 1)[1]
return f"indicator_{indicator.replace('.', '_').lower()}__{name}"
def _align_multiindex(index: pd.Index) -> pd.Index:
"""Align multiindex (year, entity_name, entity_id, entity_code) to (country, year)."""
index = index.rename([n.replace("entity_name", "country") for n in index.names])
return pd.MultiIndex.from_arrays(
[
index.get_level_values("country").astype("category"),
index.get_level_values("year").astype(int),
],
names=("country", "year"),
)
def run(dest_dir: str) -> None:
"""Assemble SDG dataset."""
sdg_sources = _load_sdg_sources()
vars: dict[str, List[Variable]] = defaultdict(list)
# group by datasets to make sure we load each one only once
for dataset_name, sdg_group in sdg_sources.groupby("dataset_name"):
# kludge: if dataset is World Bank WDI, then grab metadata from the
# corresponding garden dataset
regex = re.search(r"world_development_indicators__world_bank__(\d{4}_\d{2}_\d{2})$", dataset_name)
if regex:
from_backport = False
version = regex.groups()[0].replace("_", "-")
ds = Dataset(DATA_DIR / f"garden/worldbank_wdi/{version}/wdi")
else:
from_backport = True
ds = Dataset(DATA_DIR / "backport/owid/latest" / dataset_name)
# Since ds[table] reads from a feather file, it becomes the bottleneck in
# runtime. Caching saves us from repeated reads
table_cache: dict[str, Table] = {}
# go over all indicators from that dataset
for r in sdg_group.itertuples():
# iterate over all tables in a dataset (backported datasets would
# usually have only one)
for table_name in ds.table_names:
if table_name in table_cache:
table = table_cache[table_name]
else:
table = ds[table_name]
table.index = _align_multiindex(table.index)
table_cache[table_name] = table
log.info("sdg.run", indicator=r.indicator, variable_name=r.variable_name)
if from_backport:
v = _get_variable_from_backported_table(table, r.variable_id)
else:
v = METHOD_NAME(table, r.variable_name)
if v is not None:
v.name = _indicator_prefix(v.name, r.indicator)
vars[r.goal].append(v)
# variable found, continue with another indicator
break
else:
raise Exception(f"Variable {r.variable_id} not found in tables")
# create new dataset
new_ds = Dataset.create_empty(dest_dir)
new_ds.metadata.namespace = "sdg"
new_ds.metadata.short_name = "sustainable_development_goals"
# every goal has its own table with variables
for goal, variables in vars.items():
t = concat_variables(variables)
t.metadata.short_name = f"sustainable_development_goal_{goal}"
# sort by indicator name
t = t.sort_index(axis=1)
new_ds.add(t)
new_ds.save() |
7,002 | load arguments | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.core import AzCommandsLoader
from azure.cli.core.commands import CliCommandType
from azure.cli.core.commands.parameters import get_enum_type
from azure.cli.command_modules.profile._format import transform_account_list
import azure.cli.command_modules.profile._help # pylint: disable=unused-import
from ._validators import validate_tenant
cloud_resource_types = ["oss-rdbms", "arm", "aad-graph", "ms-graph", "batch", "media", "data-lake"]
class ProfileCommandsLoader(AzCommandsLoader):
def __init__(self, cli_ctx=None):
super(ProfileCommandsLoader, self).__init__(cli_ctx=cli_ctx)
def load_command_table(self, args):
profile_custom = CliCommandType(
operations_tmpl='azure.cli.command_modules.profile.custom#{}'
)
with self.command_group('', profile_custom) as g:
g.command('login', 'login')
g.command('logout', 'logout')
g.command('self-test', 'check_cli', deprecate_info=g.deprecate(hide=True))
with self.command_group('account', profile_custom) as g:
g.command('list', 'list_subscriptions', table_transformer=transform_account_list)
g.command('set', 'set_active_subscription')
g.show_command('show', 'show_subscription')
g.command('clear', 'account_clear')
g.command('list-locations', 'list_locations')
g.command('get-access-token', 'get_access_token')
return self.command_table
# pylint: disable=line-too-long
def METHOD_NAME(self, command):
from azure.cli.core.api import get_subscription_id_list
with self.argument_context('login') as c:
c.argument('password', options_list=['--password', '-p'], help="Credentials like user password, or for a service principal, provide client secret or a pem file with key and public certificate. Will prompt if not given.")
c.argument('service_principal', action='store_true', help='The credential representing a service principal.')
c.argument('username', options_list=['--username', '-u'], help='user name, service principal, or managed service identity ID')
c.argument('tenant', options_list=['--tenant', '-t'], help='The AAD tenant, must provide when using service principals.', validator=validate_tenant)
c.argument('allow_no_subscriptions', action='store_true', help="Support access tenants without subscriptions. It's uncommon but useful to run tenant level commands, such as 'az ad'")
c.ignore('_subscription') # hide the global subscription parameter
c.argument('identity', options_list=('-i', '--identity'), action='store_true', help="Log in using the Virtual Machine's identity", arg_group='Managed Service Identity')
c.argument('identity_port', type=int, help="the port to retrieve tokens for login. Default: 50342", arg_group='Managed Service Identity')
c.argument('use_device_code', action='store_true',
help="Use CLI's old authentication flow based on device code. CLI will also use this if it can't launch a browser in your behalf, e.g. in remote SSH or Cloud Shell")
c.argument('use_cert_sn_issuer', action='store_true', help='used with a service principal configured with Subject Name and Issuer Authentication in order to support automatic certificate rolls')
c.argument('scopes', options_list=['--scope'], nargs='+', help='Used in the /authorize request. It can cover only one static resource.')
c.argument('client_assertion', options_list=['--federated-token'], help='Federated token that can be used for OIDC token exchange.')
with self.argument_context('logout') as c:
c.argument('username', help='account user, if missing, logout the current active account')
c.ignore('_subscription') # hide the global subscription parameter
with self.argument_context('account') as c:
c.argument('subscription', options_list=['--subscription', '-s', '--name', '-n'], arg_group='', help='Name or ID of subscription.', completer=get_subscription_id_list)
c.ignore('_subscription')
with self.argument_context('account list') as c:
c.argument('all', help="List all subscriptions from all clouds, rather than just 'Enabled' ones", action='store_true')
c.argument('refresh', help="retrieve up-to-date subscriptions from server", action='store_true')
c.ignore('_subscription') # hide the global subscription parameter
with self.argument_context('account get-access-token') as c:
c.argument('resource_type', get_enum_type(cloud_resource_types), options_list=['--resource-type'], arg_group='', help='Type of well-known resource.')
c.argument('resource', options_list=['--resource'], help='Azure resource endpoints in AAD v1.0.')
c.argument('scopes', options_list=['--scope'], nargs='*', help='Space-separated AAD scopes in AAD v2.0. Default to Azure Resource Manager.')
c.argument('tenant', options_list=['--tenant', '-t'], help='Tenant ID for which the token is acquired. Only available for user and service principal account, not for MSI or Cloud Shell account')
COMMAND_LOADER_CLS = ProfileCommandsLoader |
7,003 | test scaled interval score | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import numpy as np
from gluonts.ev.stats import (
absolute_error,
absolute_label,
absolute_percentage_error,
coverage,
error,
quantile_loss,
squared_error,
symmetric_absolute_percentage_error,
scaled_interval_score,
absolute_scaled_error,
scaled_quantile_loss,
)
from gluonts.ev.ts_stats import seasonal_error
PREDICTION_LENGTH = 5
NAN = np.full(PREDICTION_LENGTH, np.nan)
ZEROES = np.zeros(PREDICTION_LENGTH)
CONSTANT = np.full(PREDICTION_LENGTH, 0.4)
LINEAR = np.array([0.0, 0.1, 0.2, 0.3, 0.4])
EXPONENTIAL = np.array([0.1, 0.01, 0.001, 0.0001, 0.00001])
RANDOM = np.random.random(PREDICTION_LENGTH)
NEGATIVE_RANDOM = -1 * np.random.random(PREDICTION_LENGTH)
TIME_SERIES = [
NAN,
ZEROES,
CONSTANT,
LINEAR,
EXPONENTIAL,
RANDOM,
NEGATIVE_RANDOM,
]
def test_absolute_label():
for label in TIME_SERIES:
for forecast in TIME_SERIES:
data = {"label": label, "mean": forecast}
actual = absolute_label(data)
expected = np.abs(label)
np.testing.assert_almost_equal(actual, expected)
def test_error():
for label in TIME_SERIES:
for forecast in TIME_SERIES:
data = {"label": label, "0.5": forecast}
actual = error(data, forecast_type="0.5")
expected = label - forecast
np.testing.assert_almost_equal(actual, expected)
def test_abs_error():
for label in TIME_SERIES:
for forecast in TIME_SERIES:
data = {"label": label, "0.5": forecast}
actual = absolute_error(data, forecast_type="0.5")
expected = np.abs(label - forecast)
np.testing.assert_almost_equal(actual, expected)
def test_squared_error():
for label in TIME_SERIES:
for forecast in TIME_SERIES:
data = {"label": label, "mean": forecast}
actual = squared_error(data, forecast_type="mean")
expected = np.square(label - forecast)
np.testing.assert_almost_equal(actual, expected)
def test_quantile_loss():
for label in TIME_SERIES:
for forecast in TIME_SERIES:
q = 0.9
data = {"label": label, str(q): forecast}
actual = quantile_loss(data, q=q)
expected = 2 * np.abs(
(label - forecast) * ((forecast >= label) - q)
)
np.testing.assert_almost_equal(actual, expected)
def test_coverage():
for label in TIME_SERIES:
for forecast in TIME_SERIES:
q = 0.9
data = {"label": label, str(q): forecast}
actual = coverage(data, q=q)
expected = label <= forecast
np.testing.assert_almost_equal(actual, expected)
def test_absolute_percentage_error():
for label in TIME_SERIES:
for forecast in TIME_SERIES:
data = {"label": label, "0.5": forecast}
actual = absolute_percentage_error(data, forecast_type="0.5")
expected = np.abs(label - forecast) / np.abs(label)
np.testing.assert_almost_equal(actual, expected)
def test_symmetric_absolute_percentage_error():
for label in TIME_SERIES:
for forecast in TIME_SERIES:
data = {"label": label, "0.5": forecast}
actual = symmetric_absolute_percentage_error(
data, forecast_type="0.5"
)
expected = (
2
* np.abs(label - forecast)
/ (np.abs(label) + np.abs(forecast))
)
np.testing.assert_almost_equal(actual, expected)
def METHOD_NAME():
for label in TIME_SERIES:
for forecast in TIME_SERIES:
alpha = 0.05
# to keep things simple, the following values are rather arbitrary
lower_quantile = forecast * (alpha / 2)
upper_quantile = forecast * (1 - alpha / 2)
# applying `seasonal_error` on the label is not realistic but
# at least, the seasonal error function gets used this way
seasonal_err = seasonal_error(label, seasonality=2)
data = {
"label": label,
"mean": forecast,
"seasonal_error": seasonal_err,
"0.025": lower_quantile,
"0.975": upper_quantile,
}
actual = scaled_interval_score(data, alpha=alpha)
expected = (
upper_quantile
- lower_quantile
+ 2.0
/ alpha
* (lower_quantile - label)
* (label < lower_quantile)
+ 2.0
/ alpha
* (label - upper_quantile)
* (label > upper_quantile)
) / seasonal_err
np.testing.assert_almost_equal(actual, expected)
def test_absolute_scaled_error():
for label in TIME_SERIES:
for forecast in TIME_SERIES:
# applying `seasonal_error` on the label is not realistic but
# at least, the seasonal error function gets used this way
seasonal_err = seasonal_error(label, seasonality=2)
data = {
"label": label,
"0.5": forecast,
"seasonal_error": seasonal_err,
}
actual = absolute_scaled_error(data, forecast_type="0.5")
expected = np.abs(label - forecast) / seasonal_err
np.testing.assert_almost_equal(actual, expected)
def test_scaled_quantile_loss():
for label in TIME_SERIES:
for forecast in TIME_SERIES:
# applying `seasonal_error` on the label is not realistic but
# at least, the seasonal error function gets used this way
seasonal_err = seasonal_error(label, seasonality=2)
data = {
"label": label,
"0.9": forecast,
"seasonal_error": seasonal_err,
}
actual = scaled_quantile_loss(data, 0.9)
expected = (
2
* (label - forecast)
* (0.9 - (label < forecast))
/ seasonal_err
)
np.testing.assert_almost_equal(actual, expected) |
7,004 | get table | import copy
from .fixtures.datacatalog import TABLE_INPUT, PARTITION_INPUT, DATABASE_INPUT
from .fixtures.schema_registry import (
TEST_REGISTRY_NAME,
TEST_SCHEMA_NAME,
TEST_BACKWARD_COMPATIBILITY,
TEST_AVRO_DATA_FORMAT,
TEST_AVRO_SCHEMA_DEFINITION,
TEST_SCHEMA_ID,
TEST_NEW_AVRO_SCHEMA_DEFINITION,
)
def create_database_input(database_name):
database_input = copy.deepcopy(DATABASE_INPUT)
database_input["Name"] = database_name
database_input["LocationUri"] = f"s3://my-bucket/{database_name}"
return database_input
def create_database(client, database_name, database_input=None, catalog_id=None):
if database_input is None:
database_input = create_database_input(database_name)
database_kwargs = {"DatabaseInput": database_input}
if catalog_id is not None:
database_kwargs["CatalogId"] = catalog_id
return client.create_database(**database_kwargs)
def get_database(client, database_name):
return client.get_database(Name=database_name)
def create_table_input(database_name, table_name, columns=None, partition_keys=None):
table_input = copy.deepcopy(TABLE_INPUT)
table_input["Name"] = table_name
table_input["PartitionKeys"] = partition_keys or []
table_input["StorageDescriptor"]["Columns"] = columns or []
table_input["StorageDescriptor"][
"Location"
] = f"s3://my-bucket/{database_name}/{table_name}"
return table_input
def create_table(client, database_name, table_name, table_input=None, **kwargs):
if table_input is None:
table_input = create_table_input(database_name, table_name, **kwargs)
return client.create_table(DatabaseName=database_name, TableInput=table_input)
def update_table(client, database_name, table_name, table_input=None, **kwargs):
if table_input is None:
table_input = create_table_input(database_name, table_name, **kwargs)
return client.update_table(DatabaseName=database_name, TableInput=table_input)
def METHOD_NAME(client, database_name, table_name):
return client.METHOD_NAME(DatabaseName=database_name, Name=table_name)
def get_tables(client, database_name, expression=None):
if expression:
return client.get_tables(DatabaseName=database_name, Expression=expression)
else:
return client.get_tables(DatabaseName=database_name)
def get_table_versions(client, database_name, table_name):
return client.get_table_versions(DatabaseName=database_name, TableName=table_name)
def get_table_version(client, database_name, table_name, version_id):
return client.get_table_version(
DatabaseName=database_name, TableName=table_name, VersionId=version_id
)
def create_column(name, type_, comment=None, parameters=None):
column = {"Name": name, "Type": type_}
if comment is not None:
column["Comment"] = comment
if parameters is not None:
column["Parameters"] = parameters
return column
def create_partition_input(database_name, table_name, values=None, columns=None):
root_path = f"s3://my-bucket/{database_name}/{table_name}"
part_input = copy.deepcopy(PARTITION_INPUT)
part_input["Values"] = values or []
part_input["StorageDescriptor"]["Columns"] = columns or []
part_input["StorageDescriptor"]["SerdeInfo"]["Parameters"]["path"] = root_path
return part_input
def create_partition(client, database_name, table_name, partiton_input=None, **kwargs):
if partiton_input is None:
partiton_input = create_partition_input(database_name, table_name, **kwargs)
return client.create_partition(
DatabaseName=database_name, TableName=table_name, PartitionInput=partiton_input
)
def update_partition(
client, database_name, table_name, old_values=None, partiton_input=None, **kwargs
):
if partiton_input is None:
partiton_input = create_partition_input(database_name, table_name, **kwargs)
return client.update_partition(
DatabaseName=database_name,
TableName=table_name,
PartitionInput=partiton_input,
PartitionValueList=old_values or [],
)
def get_partition(client, database_name, table_name, values):
return client.get_partition(
DatabaseName=database_name, TableName=table_name, PartitionValues=values
)
def create_crawler(
client, crawler_name, crawler_role=None, crawler_targets=None, **kwargs
):
optional_param_map = {
"database_name": "DatabaseName",
"description": "Description",
"schedule": "Schedule",
"classifiers": "Classifiers",
"table_prefix": "TablePrefix",
"schema_change_policy": "SchemaChangePolicy",
"recrawl_policy": "RecrawlPolicy",
"lineage_configuration": "LineageConfiguration",
"configuration": "Configuration",
"crawler_security_configuration": "CrawlerSecurityConfiguration",
"tags": "Tags",
}
params = {
boto3_key: kwargs.get(key)
for key, boto3_key in optional_param_map.items()
if kwargs.get(key) is not None
}
if crawler_role is None:
crawler_role = "arn:aws:iam::123456789012:role/Glue/Role"
if crawler_targets is None:
crawler_targets = {
"S3Targets": [],
"JdbcTargets": [],
"MongoDBTargets": [],
"DynamoDBTargets": [],
"CatalogTargets": [],
}
return client.create_crawler(
Name=crawler_name, Role=crawler_role, Targets=crawler_targets, **params
)
def create_registry(client, registry_name=TEST_REGISTRY_NAME):
return client.create_registry(RegistryName=registry_name)
def create_schema(
client,
registry_id,
data_format=TEST_AVRO_DATA_FORMAT,
compatibility=TEST_BACKWARD_COMPATIBILITY,
schema_definition=TEST_AVRO_SCHEMA_DEFINITION,
):
return client.create_schema(
RegistryId=registry_id,
SchemaName=TEST_SCHEMA_NAME,
DataFormat=data_format,
Compatibility=compatibility,
SchemaDefinition=schema_definition,
)
def register_schema_version(client):
return client.register_schema_version(
SchemaId=TEST_SCHEMA_ID, SchemaDefinition=TEST_NEW_AVRO_SCHEMA_DEFINITION
) |
7,005 | construct model | import functools
import operator
import os
import os.path
import sys
import numpy as np
# Bamboo utilities
current_file = os.path.realpath(__file__)
current_dir = os.path.dirname(current_file)
sys.path.insert(0, os.path.join(os.path.dirname(current_dir), 'common_python'))
import tools
# ==============================================
# Objects for Python data reader
# ==============================================
# Note: The Python data reader imports this file as a module and calls
# the functions below to ingest data.
# Data
np.random.seed(20210301)
_num_samples = 11
_sample_size = 3
_samples = np.random.normal(size=(_num_samples,_sample_size))
_samples = _samples.astype(np.float32)
# Sample access functions
def get_sample(index):
return _samples[index]
def num_samples():
return _samples.shape[0]
def sample_dims():
return (_samples.shape[1],)
# ==============================================
# Setup LBANN experiment
# ==============================================
def setup_experiment(lbann, weekly):
"""Construct LBANN experiment.
Args:
lbann (module): Module for LBANN Python frontend
"""
mini_batch_size = num_samples()
trainer = lbann.Trainer(mini_batch_size)
model = METHOD_NAME(lbann)
data_reader = construct_data_reader(lbann)
optimizer = lbann.NoOptimizer()
return trainer, model, data_reader, optimizer, None # Don't request any specific number of nodes
def METHOD_NAME(lbann):
"""Construct LBANN model.
Args:
lbann (module): Module for LBANN Python frontend
"""
# Input data
# Note: Multiply with a weights layer so that gradient checking
# will verify that error signals are correct. We multiply instead
# of adding so that each batch sample contributes a different
# gradient.
x_weights = lbann.Weights(
optimizer=lbann.SGD(),
initializer=lbann.ConstantInitializer(value=1.0),
name='input_weights'
)
x = lbann.Multiply(
lbann.Input(data_field='samples'),
lbann.WeightsLayer(weights=x_weights, dims=_sample_size),
)
# Compute variance along batch dimension
sum_x = lbann.BatchwiseReduceSum(x)
sum_x2 = lbann.BatchwiseReduceSum(lbann.Square(x))
mini_batch_size = lbann.Tessellate(lbann.MiniBatchSize(), hint_layer=x)
mean_x = lbann.Divide(sum_x, mini_batch_size)
mean_x2 = lbann.Divide(sum_x2, mini_batch_size)
var = lbann.Subtract(mean_x2, lbann.Square(mean_x))
obj = lbann.L2Norm2(var)
# Objects for LBANN model
layers = list(lbann.traverse_layer_graph(x))
metric = lbann.Metric(obj, name='obj')
obj = lbann.ObjectiveFunction(obj)
callbacks = []
# Compute expected metric value
var = np.var(_samples, axis=0)
val = tools.numpy_l2norm2(var)
tol = 8 * val * np.finfo(np.float32).eps
callbacks.append(lbann.CallbackCheckMetric(
metric=metric.name,
lower_bound=val-tol,
upper_bound=val+tol,
error_on_failure=True,
execution_modes='test'))
# Gradient checking
callbacks.append(lbann.CallbackCheckGradients(error_on_failure=True))
# Construct model
num_epochs = 0
return lbann.Model(num_epochs,
layers=layers,
objective_function=obj,
metrics=[metric],
callbacks=callbacks)
def construct_data_reader(lbann):
"""Construct Protobuf message for Python data reader.
The Python data reader will import the current Python file to
access the sample access functions.
Args:
lbann (module): Module for LBANN Python frontend
"""
# Note: The training data reader should be removed when
# https://github.com/LLNL/lbann/issues/1098 is resolved.
message = lbann.reader_pb2.DataReader()
message.reader.extend([
tools.create_python_data_reader(
lbann,
current_file,
'get_sample',
'num_samples',
'sample_dims',
'train'
)
])
message.reader.extend([
tools.create_python_data_reader(
lbann,
current_file,
'get_sample',
'num_samples',
'sample_dims',
'test'
)
])
return message
# ==============================================
# Setup PyTest
# ==============================================
# Create test functions that can interact with PyTest
for _test_func in tools.create_tests(setup_experiment, __file__):
globals()[_test_func.__name__] = _test_func |
7,006 | get md links | #!/usr/bin/env python
#
# Checks that all links in the readme markdown files are valid
#
# SPDX-FileCopyrightText: 2020-2022 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Apache-2.0
#
import argparse
import concurrent.futures
import os
import os.path
import re
import sys
import urllib.error
import urllib.request
from collections import defaultdict, namedtuple
from pathlib import Path
from typing import List
# The apple apps links are not accessible from the company network for some reason
EXCLUDE_URL_LIST = ['https://apps.apple.com/in/app/esp-ble-provisioning/id1473590141', 'https://apps.apple.com/in/app/esp-softap-provisioning/id1474040630']
Link = namedtuple('Link', ['file', 'url'])
class ReadmeLinkError(Exception):
def __init__(self, file: str, url: str) -> None:
self.file = file
self.url = url
class RelativeLinkError(ReadmeLinkError):
def __str__(self) -> str:
return 'Relative link error, file - {} not found, linked from {}'.format(self.url, self.file)
class UrlLinkError(ReadmeLinkError):
def __init__(self, file: str, url: str, error_code: str):
self.error_code = error_code
super().__init__(file, url)
def __str__(self) -> str:
files = [str(f) for f in self.file]
return 'URL error, url - {} in files - {} is not accessible, request returned {}'.format(self.url, ', '.join(files), self.error_code)
# we do not want a failed test just due to bad network conditions, for non 404 errors we simply print a warning
def check_url(url: str, files: str, timeout: float) -> None:
try:
with urllib.request.urlopen(url, timeout=timeout):
return
except urllib.error.HTTPError as e:
if e.code == 404:
raise UrlLinkError(files, url, str(e))
else:
print('Unable to access {}, err = {}'.format(url, str(e)))
except Exception as e:
print('Unable to access {}, err = {}'.format(url, str(e)))
def check_web_links(web_links: defaultdict) -> List:
with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:
errors = []
future_to_url = {executor.submit(check_url, url, files, timeout=30): (url, files) for url, files in web_links.items()}
for future in concurrent.futures.as_completed(future_to_url):
try:
future.result()
except UrlLinkError as e:
errors.append(e)
return errors
def check_file_links(file_links: List) -> List:
errors = []
for link in file_links:
link_path = link.file.parent / link.url
if not Path.exists(link_path):
errors.append(RelativeLinkError(link.file, link.url))
print('Found {} errors with relative links'.format(len(errors)))
return errors
def METHOD_NAME(folder: str) -> List:
MD_LINK_RE = r'\[.+?\]\((.+?)(#.+)?\)'
idf_path_str = os.getenv('IDF_PATH')
if idf_path_str is None:
raise RuntimeError("Environment variable 'IDF_PATH' wasn't set.")
idf_path = Path(idf_path_str)
links = []
for path in (idf_path / folder).rglob('*.md'):
with path.open(encoding='utf8') as f:
content = f.read()
for url in re.findall(MD_LINK_RE, content):
link = Link(path, url[0].lstrip())
# Ignore "local" links
if not link.url.startswith('#'):
links.append(link)
return links
def check_readme_links(args: argparse.Namespace) -> int:
links = METHOD_NAME('examples')
print('Found {} links'.format(len(links)))
errors = []
web_links = defaultdict(list)
file_links = []
# Sort links into file and web links
for link in links:
if link.url.startswith('http'):
web_links[link.url].append(link.file)
else:
file_links.append(link)
for url in EXCLUDE_URL_LIST:
del web_links[url]
errors.extend(check_file_links(file_links))
if not args.skip_weburl:
errors.extend(check_web_links(web_links))
print('Found {} errors:'.format(len(errors)))
for e in errors:
print(e)
return 1 if len(errors) > 0 else 0
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='check_readme_links.py: Checks for dead links in example READMEs', prog='check_readme_links.py')
parser.add_argument('--skip-weburl', '-w', action='store_true', help='Skip checking of web URLs, only check links to local files')
args = parser.parse_args()
sys.exit(check_readme_links(args)) |
7,007 | build | # pylint: disable=inconsistent-return-statements
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-module-docstring
# pylint: disable=missing-class-docstring
# pylint: disable=missing-function-docstring
"""
as_torch: a decorator, which is used to wrap the TVMScript code to `torch.nn.module`.
"""
import tempfile
from typing import Callable, List, Optional, Union
# isort: off
from typing_extensions import Literal
# isort: on
import torch
import torch.utils.dlpack
import tvm
from tvm import meta_schedule as ms
from tvm.target.target import Target
from tvm.tir import PrimFunc
# python wrapper for OperatorModule
class OperatorModuleWrapper(torch.nn.Module):
def __init__(
self,
module: Union[
tvm.ir.module.IRModule,
tvm.tir.function.PrimFunc,
],
):
super().__init__()
self.rt_module = None # runtime module
self.ir_module = module # IR modules
def tune(
self,
target: Union[str, Target] = "cpu",
max_trials_global: int = 32,
*,
num_trials_per_iter: int = 32,
builder: ms.Builder.BuilderType = "local",
runner: ms.Runner.RunnerType = "local",
database: ms.Database.DatabaseType = "json",
cost_model: ms.CostModel.CostModelType = "xgb",
measure_callbacks: ms.MeasureCallback.CallbackListType = "default",
task_scheduler: ms.TaskScheduler.TaskSchedulerType = "round-robin",
space: ms.SpaceGenerator.SpaceGeneratorType = "post-order-apply",
strategy: ms.SearchStrategy.SearchStrategyType = "replay-trace",
num_tuning_cores: Union[Literal["physical", "logical"], int] = "physical",
seed: Optional[int] = None,
) -> None:
"""
Tune the TVMScript code.
Parameters
----------
config: Optional[TuneConfig]
The tuning configuration.
target : Optional[str, Target]
The target to tune for.
"""
if target == "cpu":
target = f"llvm --num-cores {ms.utils.cpu_count(logical=False)}"
with tempfile.TemporaryDirectory() as work_dir:
database = ms.tir_integration.tune_tir(
mod=self.ir_module,
target=target,
work_dir=work_dir,
max_trials_global=max_trials_global,
num_trials_per_iter=num_trials_per_iter,
builder=builder,
runner=runner,
database=database,
cost_model=cost_model,
measure_callbacks=measure_callbacks,
task_scheduler=task_scheduler,
space=space,
strategy=strategy,
num_tuning_cores=num_tuning_cores,
seed=seed,
)
sch = ms.tir_integration.compile_tir(database, self.ir_module, target)
self.ir_module = sch.mod
self.METHOD_NAME(target)
def script(self):
return self.ir_module.script()
def METHOD_NAME(self, target=None):
runtime_module = tvm.METHOD_NAME(self.ir_module, target=target)
func = tvm.get_global_func("tvmtorch.save_runtime_mod", allow_missing=True)
if func is None:
raise ValueError('as_torch requires the flag /"USE_PT_TVMDSOOP/" set in config.cmake')
func(runtime_module)
self.rt_module = torch.classes.tvm_torch.OperatorModuleWrapper()
def forward(self, *torch_inputs: List[torch.Tensor]) -> List[torch.Tensor]:
if self.rt_module is None:
if torch_inputs[0].is_cuda:
self.METHOD_NAME(target="cuda")
elif torch_inputs[0].device.type == "cpu":
self.METHOD_NAME()
else:
raise Exception(f"the target {torch_inputs[0].device.type} is not supported yet")
return self.rt_module.forward(torch_inputs)
def as_torch(func: Union[tvm.ir.module.IRModule, tvm.tir.function.PrimFunc, Callable]):
"""A decorator of converting TensorIR to PyTorch nn.Module.
Parameters
----------
func: Optional[tvm.ir.module.IRModule, tvm.tir.function.PrimFunc, Callable]
The function written by TVMScript.
Returns
-------
mod : Union[OperatorModuleWrapper, Callable]
It will return an object, or a templated function of OperatorModuleWrapper,
which is the subclass of the original nn.Module.
"""
if isinstance(func, (tvm.ir.module.IRModule, PrimFunc)):
return OperatorModuleWrapper(func)
if callable(func):
def func_get_param(*args, **kwargs):
return OperatorModuleWrapper(func(*args, **kwargs))
return func_get_param |
7,008 | getmtime | """
Path operations common to more than one OS
Do not use directly. The OS specific modules import the appropriate
functions from this module themselves.
"""
import os
import stat
__all__ = ['commonprefix', 'exists', 'getatime', 'getctime', 'getmtime',
'getsize', 'isdir', 'isfile', 'samefile', 'sameopenfile',
'samestat']
# Does a path exist?
# This is false for dangling symbolic links on systems that support them.
def exists(path):
"""Test whether a path exists. Returns False for broken symbolic links"""
try:
os.stat(path)
except (OSError, ValueError):
return False
return True
# This follows symbolic links, so both islink() and isdir() can be true
# for the same path on systems that support symlinks
def isfile(path):
"""Test whether a path is a regular file"""
try:
st = os.stat(path)
except (OSError, ValueError):
return False
return stat.S_ISREG(st.st_mode)
# Is a path a directory?
# This follows symbolic links, so both islink() and isdir()
# can be true for the same path on systems that support symlinks
def isdir(s):
"""Return true if the pathname refers to an existing directory."""
try:
st = os.stat(s)
except (OSError, ValueError):
return False
return stat.S_ISDIR(st.st_mode)
def getsize(filename):
"""Return the size of a file, reported by os.stat()."""
return os.stat(filename).st_size
def METHOD_NAME(filename):
"""Return the last modification time of a file, reported by os.stat()."""
return os.stat(filename).st_mtime
def getatime(filename):
"""Return the last access time of a file, reported by os.stat()."""
return os.stat(filename).st_atime
def getctime(filename):
"""Return the metadata change time of a file, reported by os.stat()."""
return os.stat(filename).st_ctime
# Return the longest prefix of all list elements.
def commonprefix(m):
"Given a list of pathnames, returns the longest common leading component"
if not m: return ''
# Some people pass in a list of pathname parts to operate in an OS-agnostic
# fashion; don't try to translate in that case as that's an abuse of the
# API and they are already doing what they need to be OS-agnostic and so
# they most likely won't be using an os.PathLike object in the sublists.
if not isinstance(m[0], (list, tuple)):
m = tuple(map(os.fspath, m))
s1 = min(m)
s2 = max(m)
for i, c in enumerate(s1):
if c != s2[i]:
return s1[:i]
return s1
# Are two stat buffers (obtained from stat, fstat or lstat)
# describing the same file?
def samestat(s1, s2):
"""Test whether two stat buffers reference the same file"""
return (s1.st_ino == s2.st_ino and
s1.st_dev == s2.st_dev)
# Are two filenames really pointing to the same file?
def samefile(f1, f2):
"""Test whether two pathnames reference the same actual file or directory
This is determined by the device number and i-node number and
raises an exception if an os.stat() call on either pathname fails.
"""
s1 = os.stat(f1)
s2 = os.stat(f2)
return samestat(s1, s2)
# Are two open files really referencing the same file?
# (Not necessarily the same file descriptor!)
def sameopenfile(fp1, fp2):
"""Test whether two open file objects reference the same file"""
s1 = os.fstat(fp1)
s2 = os.fstat(fp2)
return samestat(s1, s2)
# Split a path in root and extension.
# The extension is everything starting at the last dot in the last
# pathname component; the root is everything before that.
# It is always true that root + ext == p.
# Generic implementation of splitext, to be parametrized with
# the separators
def _splitext(p, sep, altsep, extsep):
"""Split the extension from a pathname.
Extension is everything from the last dot to the end, ignoring
leading dots. Returns "(root, ext)"; ext may be empty."""
# NOTE: This code must work for text and bytes strings.
sepIndex = p.rfind(sep)
if altsep:
altsepIndex = p.rfind(altsep)
sepIndex = max(sepIndex, altsepIndex)
dotIndex = p.rfind(extsep)
if dotIndex > sepIndex:
# skip all leading dots
filenameIndex = sepIndex + 1
while filenameIndex < dotIndex:
if p[filenameIndex:filenameIndex+1] != extsep:
return p[:dotIndex], p[dotIndex:]
filenameIndex += 1
return p, p[:0]
def _check_arg_types(funcname, *args):
hasstr = hasbytes = False
for s in args:
if isinstance(s, str):
hasstr = True
elif isinstance(s, bytes):
hasbytes = True
else:
raise TypeError(f'{funcname}() argument must be str, bytes, or '
f'os.PathLike object, not {s.__class__.__name__!r}') from None
if hasstr and hasbytes:
raise TypeError("Can't mix strings and bytes in path components") from None |
7,009 | layout | from conan import ConanFile
from conan.tools.build import check_min_cppstd
from conan.tools.cmake import CMake, CMakeToolchain, CMakeDeps, cmake_layout
from conan.tools.files import get, rmdir, apply_conandata_patches, export_conandata_patches, copy
from conan.tools.scm import Version
import os
required_conan_version = ">=1.53.0"
class DateConan(ConanFile):
name = "date"
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/HowardHinnant/date"
description = "A date and time library based on the C++11/14/17 <chrono> header"
topics = ("datetime", "timezone", "calendar", "time", "iana-database")
license = "MIT"
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
"header_only": [True, False],
"use_system_tz_db": [True, False],
"use_tz_db_in_dot": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
"header_only": False,
"use_system_tz_db": False,
"use_tz_db_in_dot": False,
}
def export_sources(self):
export_conandata_patches(self)
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
if self.settings.os in ["iOS", "tvOS", "watchOS", "Android"]:
self.options.use_system_tz_db = True
def configure(self):
if self.options.shared or self.options.header_only:
self.options.rm_safe("fPIC")
if self.options.header_only:
del self.options.shared
def METHOD_NAME(self):
cmake_layout(self, src_folder="src")
def requirements(self):
if not self.options.header_only and not self.options.use_system_tz_db:
self.requires("libcurl/8.2.1")
def package_id(self):
if self.info.options.header_only:
self.info.clear()
def validate(self):
if self.settings.compiler.get_safe("cppstd"):
check_min_cppstd(self, 11)
def source(self):
get(self, **self.conan_data["sources"][self.version], strip_root=True)
def generate(self):
tc = CMakeToolchain(self)
tc.variables["ENABLE_DATE_TESTING"] = False
tc.variables["USE_SYSTEM_TZ_DB"] = self.options.use_system_tz_db
tc.variables["USE_TZ_DB_IN_DOT"] = self.options.use_tz_db_in_dot
tc.variables["BUILD_TZ_LIB"] = not self.options.header_only
# workaround for clang 5 not having string_view
if Version(self.version) >= "3.0.0" and self.settings.compiler == "clang" \
and Version(self.settings.compiler.version) <= "5.0":
tc.cache_variables["DISABLE_STRING_VIEW"] = True
tc.generate()
deps = CMakeDeps(self)
deps.generate()
def build(self):
apply_conandata_patches(self)
if not self.options.header_only:
cmake = CMake(self)
cmake.configure()
cmake.build()
def package(self):
copy(self, "LICENSE.txt", dst=os.path.join(self.package_folder, "licenses"), src=self.source_folder)
if self.options.header_only:
src = os.path.join(self.source_folder, "include", "date")
dst = os.path.join(self.package_folder, "include", "date")
copy(self, "date.h", dst=dst, src=src)
copy(self, "tz.h", dst=dst, src=src)
copy(self, "ptz.h", dst=dst, src=src)
copy(self, "iso_week.h", dst=dst, src=src)
copy(self, "julian.h", dst=dst, src=src)
copy(self, "islamic.h", dst=dst, src=src)
else:
cmake = CMake(self)
cmake.install()
rmdir(self, os.path.join(self.package_folder, "lib", "cmake"))
rmdir(self, os.path.join(self.package_folder, "CMake"))
def package_info(self):
self.cpp_info.set_property("cmake_target_name", "date::date")
# TODO: Remove legacy .names attribute when conan 2.0 is released
self.cpp_info.names["cmake_find_package"] = "date"
self.cpp_info.names["cmake_find_package_multi"] = "date"
# date-tz
if not self.options.header_only:
self.cpp_info.components["date-tz"].set_property("cmake_target_name", "date::date-tz")
# TODO: Remove legacy .names attribute when conan 2.0 is released
self.cpp_info.components["date-tz"].names["cmake_find_package"] = "date-tz"
self.cpp_info.components["date-tz"].names["cmake_find_package_multi"] = "date-tz"
lib_name = "{}tz".format("date-" if Version(self.version) >= "3.0.0" else "")
self.cpp_info.components["date-tz"].libs = [lib_name]
if self.settings.os == "Linux":
self.cpp_info.components["date-tz"].system_libs.append("pthread")
self.cpp_info.components["date-tz"].system_libs.append("m")
if not self.options.use_system_tz_db:
self.cpp_info.components["date-tz"].requires.append("libcurl::libcurl")
if self.options.use_system_tz_db and not self.settings.os == "Windows":
use_os_tzdb = 1
else:
use_os_tzdb = 0
defines = ["USE_OS_TZDB={}".format(use_os_tzdb)]
if self.settings.os == "Windows" and self.options.shared:
defines.append("DATE_USE_DLL=1")
self.cpp_info.components["date-tz"].defines.extend(defines)
else:
self.cpp_info.defines.append("DATE_HEADER_ONLY")
self.cpp_info.libdirs = [] |
7,010 | release | """
JBoss version
=============
Provide information about the versions of all running Jboss on a system.
"""
import json
from collections import namedtuple
from insights import Parser, parser
from insights.specs import Specs
# define namedtuple to store the property of version
_VersionNameTuple = namedtuple("_VersionNameTuple", ["file_path", "product", "version", "code_name", "major", "minor", "release"])
def _get_version_tuple(version_line, i_file_path):
"""
Perform the version line parsing, returning a nametuple of the values of one jboss version
"""
product, _, version_name = [v.strip() for v in version_line.partition("- Version")]
if " GA" in version_name:
# handle Red Hat JBoss Web Server - Version 5.6 GA
version = version_name.split(' GA')[0]
code_name = "GA"
updated_version = version + ".0"
major, minor, METHOD_NAME = updated_version.split('.')[0:3]
else:
# add empty code name for Red Hat Data Grid - Version 7.3.0
version_name = version_name.strip() + "."
major, minor, METHOD_NAME, code_name = version_name.split(".")[0:4]
version = '.'.join([major, minor, METHOD_NAME])
return _VersionNameTuple(i_file_path, product, version, code_name, int(major), int(minor), int(METHOD_NAME))
@parser(Specs.jboss_version)
class JbossVersion(Parser):
"""
This class is to access to file ``$JBOSS_HOME/version.txt``
Typical content of file ``$JBOSS_HOME/version.txt`` is::
Red Hat JBoss Enterprise Application Platform - Version 6.4.3.GA
This class parses the file content and stores data in the dict ``self.parsed``.
The version info can also be got via ``obj.major`` and ``obj.minor``, etc.
Examples:
>>> jboss_version.file_path
'/home/test/jboss/jboss-eap-6.4/version.txt'
>>> jboss_version.raw
'Red Hat JBoss Enterprise Application Platform - Version 6.4.3.GA'
>>> jboss_version.major
6
>>> jboss_version.minor
4
>>> jboss_version.release
3
>>> jboss_version.version
'6.4.3'
>>> jboss_version.code_name
'GA'
"""
def parse_content(self, content):
self.raw = content[0]
self._parsed = _get_version_tuple(content[0], self.file_path)
@property
def product(self):
"""string: the version of this running JBoss progress."""
return self._parsed.product
@property
def version(self):
"""string: the version of this running JBoss progress."""
return self._parsed.version
@property
def major(self):
"""int: the major version of this running JBoss progress."""
return self._parsed.major
@property
def minor(self):
"""int: the minor version of this running JBoss progress."""
return self._parsed.minor
@property
def METHOD_NAME(self):
"""int: release of this running JBoss progress."""
return self._parsed.METHOD_NAME
@property
def code_name(self):
"""string: code name of this running JBoss progress."""
return self._parsed.code_name
@parser(Specs.jboss_runtime_versions)
class JbossRuntimeVersions(Parser, list):
"""
This class is to access to file ``data/insights_commands/jboss_versions``
Typical content of file ``data/insights_commands/jboss_versions`` is::
{"/opt/jboss-datagrid-7.3.0-server": "Red Hat Data Grid - Version 7.3.0"}
This class parses the file content and stores data in the list.
Examples:
>>> len(all_jboss_versions)
1
>>> all_jboss_versions[0].major
7
>>> all_jboss_versions[0].minor
3
>>> all_jboss_versions[0].release
0
"""
def parse_content(self, content):
jboss_version_dict = json.loads(' '.join(content))
for j_path, version_content in jboss_version_dict.items():
lines = version_content.strip().splitlines()
self.append(_get_version_tuple(lines[0], j_path)) |
7,011 | process | import re
import pyblish.api
class CollectClipEffects(pyblish.api.InstancePlugin):
"""Collect soft effects instances."""
order = pyblish.api.CollectorOrder - 0.078
label = "Collect Clip Effects Instances"
families = ["clip"]
def METHOD_NAME(self, instance):
family = "effect"
effects = {}
review = instance.data.get("review")
review_track_index = instance.context.data.get("reviewTrackIndex")
item = instance.data["item"]
if "audio" in instance.data["family"]:
return
# frame range
self.handle_start = instance.data["handleStart"]
self.handle_end = instance.data["handleEnd"]
self.clip_in = int(item.timelineIn())
self.clip_out = int(item.timelineOut())
self.clip_in_h = self.clip_in - self.handle_start
self.clip_out_h = self.clip_out + self.handle_end
track_item = instance.data["item"]
track = track_item.parent()
track_index = track.trackIndex()
tracks_effect_items = instance.context.data.get("tracksEffectItems")
clip_effect_items = instance.data.get("clipEffectItems")
# add clips effects to track's:
if clip_effect_items:
tracks_effect_items[track_index] = clip_effect_items
# process all effects and divide them to instance
for _track_index, sub_track_items in tracks_effect_items.items():
# skip if track index is the same as review track index
if review and review_track_index == _track_index:
continue
for sitem in sub_track_items:
# make sure this subtrack item is relative of track item
if ((track_item not in sitem.linkedItems())
and (len(sitem.linkedItems()) > 0)):
continue
if not (track_index <= _track_index):
continue
effect = self.add_effect(_track_index, sitem)
if effect:
effects.update(effect)
# skip any without effects
if not effects:
return
subset = instance.data.get("subset")
effects.update({"assignTo": subset})
subset_split = re.findall(r'[A-Z][^A-Z]*', subset)
if len(subset_split) > 0:
root_name = subset.replace(subset_split[0], "")
subset_split.insert(0, root_name.capitalize())
subset_split.insert(0, "effect")
name = "".join(subset_split)
# create new instance and inherit data
data = {}
for key, value in instance.data.items():
if "clipEffectItems" in key:
continue
data[key] = value
# change names
data["subset"] = name
data["family"] = family
data["families"] = [family]
data["name"] = data["subset"] + "_" + data["asset"]
data["label"] = "{} - {}".format(
data['asset'], data["subset"]
)
data["effects"] = effects
# create new instance
_instance = instance.context.create_instance(**data)
self.log.info("Created instance `{}`".format(_instance))
self.log.debug("instance.data `{}`".format(_instance.data))
def test_overlap(self, effect_t_in, effect_t_out):
covering_exp = bool(
(effect_t_in <= self.clip_in)
and (effect_t_out >= self.clip_out)
)
overlaying_right_exp = bool(
(effect_t_in < self.clip_out)
and (effect_t_out >= self.clip_out)
)
overlaying_left_exp = bool(
(effect_t_out > self.clip_in)
and (effect_t_in <= self.clip_in)
)
return any((
covering_exp,
overlaying_right_exp,
overlaying_left_exp
))
def add_effect(self, track_index, sitem):
track = sitem.parentTrack().name()
# node serialization
node = sitem.node()
node_serialized = self.node_serialization(node)
node_name = sitem.name()
node_class = node.Class()
# collect timelineIn/Out
effect_t_in = int(sitem.timelineIn())
effect_t_out = int(sitem.timelineOut())
if not self.test_overlap(effect_t_in, effect_t_out):
return
self.log.debug("node_name: `{}`".format(node_name))
self.log.debug("node_class: `{}`".format(node_class))
return {node_name: {
"class": node_class,
"timelineIn": effect_t_in,
"timelineOut": effect_t_out,
"subTrackIndex": sitem.subTrackIndex(),
"trackIndex": track_index,
"track": track,
"node": node_serialized
}}
def node_serialization(self, node):
node_serialized = {}
# adding ignoring knob keys
_ignoring_keys = ['invert_mask', 'help', 'mask',
'xpos', 'ypos', 'layer', 'process_mask', 'channel',
'channels', 'maskChannelMask', 'maskChannelInput',
'note_font', 'note_font_size', 'unpremult',
'postage_stamp_frame', 'maskChannel', 'export_cc',
'select_cccid', 'mix', 'version', 'matrix']
# loop through all knobs and collect not ignored
# and any with any value
for knob in node.knobs().keys():
# skip nodes in ignore keys
if knob in _ignoring_keys:
continue
# get animation if node is animated
if node[knob].isAnimated():
# grab animation including handles
knob_anim = [node[knob].getValueAt(i)
for i in range(
self.clip_in_h, self.clip_out_h + 1)]
node_serialized[knob] = knob_anim
else:
node_serialized[knob] = node[knob].value()
return node_serialized |
7,012 | world size | # Owner(s): ["oncall: distributed"]
import itertools
import torch
from torch.distributed._tensor import distribute_tensor
from torch.distributed._tensor._utils import (
compute_local_shape,
compute_local_shape_and_global_offset,
)
from torch.distributed._tensor.device_mesh import DeviceMesh
from torch.distributed._tensor.placement_types import Replicate, Shard
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
with_comms,
)
class UtilTest(DTensorTestBase):
@property
def METHOD_NAME(self):
return 8
@with_comms
def test_compute_local_shape_2d_uneven(self):
# mesh: 4 * 2
mesh_tensor = torch.arange(self.METHOD_NAME).reshape(4, 2)
mesh = DeviceMesh(self.device_type, mesh_tensor)
size = torch.Size([7, 7])
rank_coordinates = mesh.get_coordinate()
# replicate, shard
placements2 = [Replicate(), Shard(0)]
local_size2 = compute_local_shape(size, mesh, placements2)
if rank_coordinates[1] < 1:
self.assertEqual(local_size2, torch.Size([4, 7]))
else:
self.assertEqual(local_size2, torch.Size([3, 7]))
# shard, shard
placements3 = [Shard(0), Shard(1)]
local_size3 = compute_local_shape(size, mesh, placements3)
# first dim
if rank_coordinates[0] < 3:
self.assertEqual(local_size3[0], 2)
else:
self.assertEqual(local_size3[0], 1)
# second dim
if rank_coordinates[1] < 1:
self.assertEqual(local_size3[1], 4)
else:
self.assertEqual(local_size3[1], 3)
@with_comms
def test_compute_local_shape_and_global_offset_1D(self):
one_d_placements = [[Shard(0)], [Replicate()]]
for placements in one_d_placements:
mesh_tensor = torch.arange(self.METHOD_NAME)
device_mesh = DeviceMesh(self.device_type, mesh_tensor)
global_tensor = torch.arange(64).view(8, 8)
global_shape = global_tensor.size()
dtensor = distribute_tensor(global_tensor, device_mesh, placements)
local_size, global_offset = compute_local_shape_and_global_offset(
global_shape, device_mesh, placements
)
# TODO: make this test cleaner and work for nD
dim0_start = global_offset[0]
dim0_end = global_offset[0] + local_size[0]
# Check the local tensor of dtensor is exactly the same
# if we slice the global_tensor with local_size and global_offset
self.assertEqual(
dtensor.to_local(),
global_tensor[dim0_start:dim0_end],
)
@with_comms
def test_compute_local_shape_and_global_offset_2D(self):
two_d_placements_options = [Shard(0), Shard(1), Replicate()]
# Generating 6 two-d placements combinations
two_d_placements = list(
itertools.combinations_with_replacement(two_d_placements_options, 2)
)
for placements in two_d_placements:
# mesh: 2 * 4
mesh_tensor = torch.arange(self.METHOD_NAME).reshape(2, 4)
device_mesh = DeviceMesh(self.device_type, mesh_tensor)
global_tensor = torch.arange(64).view(8, 8)
global_shape = global_tensor.size()
dtensor = distribute_tensor(global_tensor, device_mesh, placements)
local_size, global_offset = compute_local_shape_and_global_offset(
global_shape, device_mesh, placements
)
# TODO: make this test cleaner and work for nD
dim0_start = global_offset[0]
dim0_end = global_offset[0] + local_size[0]
dim1_start = global_offset[1]
dim1_end = global_offset[1] + local_size[1]
# Check the local tensor of dtensor is exactly the same
# if we slice the global_tensor with local_size and global_offset
self.assertEqual(
dtensor.to_local(),
global_tensor[dim0_start:dim0_end, dim1_start:dim1_end],
)
if __name__ == "__main__":
run_tests() |
7,013 | predict | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from functools import partial
import numpy as np
import paddle
from data import convert_example, create_dataloader, read_text_pair
from model import QuestionMatching
from paddlenlp.data import Pad, Tuple
from paddlenlp.datasets import load_dataset
from paddlenlp.transformers import AutoModel, AutoTokenizer
# fmt: off
parser = argparse.ArgumentParser()
parser.add_argument("--input_file", type=str, required=True, help="The full path of input file")
parser.add_argument("--result_file", type=str, required=True, help="The result file name")
parser.add_argument("--params_path", type=str, required=True, help="The path to model parameters to be loaded.")
parser.add_argument("--max_seq_length", default=256, type=int, help="The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.")
parser.add_argument("--batch_size", default=32, type=int, help="Batch size per GPU/CPU for training.")
parser.add_argument('--device', choices=['cpu', 'gpu'], default="gpu", help="Select which device to train model, defaults to gpu.")
args = parser.parse_args()
# fmt: on
def METHOD_NAME(model, data_loader):
"""
Predicts the data labels.
Args:
model (obj:`QuestionMatching`): A model to calculate whether the question pair is semantic similar or not.
data_loader (obj:`List(Example)`): The processed data ids of text pair: [query_input_ids, query_token_type_ids, title_input_ids, title_token_type_ids]
Returns:
results(obj:`List`): cosine similarity of text pairs.
"""
batch_logits = []
model.eval()
with paddle.no_grad():
for batch_data in data_loader:
input_ids, token_type_ids = batch_data
input_ids = paddle.to_tensor(input_ids)
token_type_ids = paddle.to_tensor(token_type_ids)
batch_logit, _ = model(input_ids=input_ids, token_type_ids=token_type_ids)
batch_logits.append(batch_logit.numpy())
batch_logits = np.concatenate(batch_logits, axis=0)
return batch_logits
if __name__ == "__main__":
paddle.set_device(args.device)
pretrained_model = AutoModel.from_pretrained("ernie-3.0-medium-zh")
tokenizer = AutoTokenizer.from_pretrained("ernie-3.0-medium-zh")
trans_func = partial(convert_example, tokenizer=tokenizer, max_seq_length=args.max_seq_length, is_test=True)
batchify_fn = lambda samples, fn=Tuple(
Pad(axis=0, pad_val=tokenizer.pad_token_id), # input_ids
Pad(axis=0, pad_val=tokenizer.pad_token_type_id), # segment_ids
): [data for data in fn(samples)]
test_ds = load_dataset(read_text_pair, data_path=args.input_file, is_test=True, lazy=False)
test_data_loader = create_dataloader(
test_ds, mode="predict", batch_size=args.batch_size, batchify_fn=batchify_fn, trans_fn=trans_func
)
model = QuestionMatching(pretrained_model)
if args.params_path and os.path.isfile(args.params_path):
state_dict = paddle.load(args.params_path)
model.set_dict(state_dict)
print("Loaded parameters from %s" % args.params_path)
else:
raise ValueError("Please set --params_path with correct pretrained model file")
y_probs = METHOD_NAME(model, test_data_loader)
y_preds = np.argmax(y_probs, axis=1)
with open(args.result_file, "w", encoding="utf-8") as f:
for y_pred in y_preds:
f.write(str(y_pred) + "\n") |
7,014 | teardown method | # Generated by Selenium IDE
# pylint: skip-file
import pytest
import time
import json
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
class TestEnablealldags():
def setup_method(self, method): # pylint: disable=unused-argument
"""setup method."""
chrome_options = Options()
chrome_options.add_argument("--headless")
chrome_options.add_argument('--disable-dev-shm-usage')
chrome_options.add_argument("disable-infobars")
chrome_options.add_argument("--disable-extensions")
chrome_options.add_argument("--disable-gpu")
chrome_options.add_argument("--no-sandbox")
self.driver = webdriver.Chrome(
options=chrome_options) # pylint: disable=attribute-defined-outside-init
self.driver.implicitly_wait(10)
def METHOD_NAME(self, method): # pylint: disable=unused-argument
"""teardown method."""
self.driver.quit()
def test_enablealldags(self, params):
self.driver.get(
"http://{}:{}/".format(
params["server"],
params["port"])) # pylint: disable=consider-using-f-string
self.driver.set_window_size(1440, 790)
self.driver.find_element(By.ID, "username").send_keys("rf-test")
self.driver.find_element(
By.ID, "password").send_keys("rf_password123!")
self.driver.find_element(By.CSS_SELECTOR, ".btn-primary").click()
element = self.driver.find_element(
By.CSS_SELECTOR, ".active:nth-child(2) .material-icons")
actions = ActionChains(self.driver)
actions.move_to_element(element).perform()
element = self.driver.find_element(By.CSS_SELECTOR, "body")
actions = ActionChains(self.driver)
actions.move_to_element(element).perform()
self.driver.find_element(
By.CSS_SELECTOR, "tr:nth-child(1) .switch").click()
self.driver.find_element(
By.CSS_SELECTOR, "tr:nth-child(2) .switch").click()
self.driver.find_element(
By.CSS_SELECTOR, "tr:nth-child(3) .switch").click()
self.driver.find_element(
By.CSS_SELECTOR, "tr:nth-child(4) .switch").click()
self.driver.find_element(
By.CSS_SELECTOR, "tr:nth-child(5) .switch").click()
self.driver.find_element(
By.CSS_SELECTOR, "tr:nth-child(6) .switch").click()
self.driver.find_element(
By.CSS_SELECTOR, "tr:nth-child(7) > td:nth-child(1)").click()
self.driver.find_element(
By.CSS_SELECTOR, "tr:nth-child(7) .switch").click()
self.driver.find_element(
By.CSS_SELECTOR, "tr:nth-child(8) .switch").click()
self.driver.find_element(
By.CSS_SELECTOR, "tr:nth-child(9) .switch").click()
self.driver.find_element(
By.CSS_SELECTOR, "tr:nth-child(10) .switch").click()
self.driver.find_element(
By.CSS_SELECTOR, "tr:nth-child(11) .switch").click()
self.driver.find_element(
By.CSS_SELECTOR, "tr:nth-child(12) .switch").click()
self.driver.find_element(
By.CSS_SELECTOR, "tr:nth-child(13) .switch").click()
self.driver.find_element(
By.CSS_SELECTOR, "tr:nth-child(14) .switch").click()
self.driver.find_element(
By.CSS_SELECTOR, "tr:nth-child(15) .switch").click()
self.driver.find_element(
By.CSS_SELECTOR, "tr:nth-child(16) .switch").click()
self.driver.find_element(
By.CSS_SELECTOR, "tr:nth-child(17) .switch").click()
self.driver.find_element(
By.CSS_SELECTOR, "tr:nth-child(18) .switch").click()
self.driver.find_element(
By.CSS_SELECTOR, "tr:nth-child(19) .switch").click()
self.driver.find_element(
By.CSS_SELECTOR, "tr:nth-child(20) .switch").click()
self.driver.find_element(
By.CSS_SELECTOR, "tr:nth-child(21) .switch").click()
self.driver.find_element(
By.CSS_SELECTOR, "tr:nth-child(22) .switch").click()
self.driver.find_element(
By.CSS_SELECTOR, "tr:nth-child(23) .switch").click()
self.driver.find_element(
By.CSS_SELECTOR, "tr:nth-child(24) .switch").click()
self.driver.find_element(
By.CSS_SELECTOR, "tr:nth-child(25) .switch").click()
self.driver.find_element(
By.CSS_SELECTOR, "tr:nth-child(26) .switch").click()
self.driver.find_element(
By.CSS_SELECTOR, "tr:nth-child(27) .switch").click()
self.driver.close() |
7,015 | dump state | import os
import sys
from PyQt5.QtCore import Qt, QDir
from PyQt5.QtGui import QIcon, QPixmap, QGuiApplication
from PyQt5.QtWidgets import QApplication, QWidget
from feeluown.gui.browser import Browser
from feeluown.gui.hotkey import HotkeyManager
from feeluown.gui.image import ImgManager
from feeluown.gui.theme import ThemeManager
from feeluown.gui.tips import TipsManager
from feeluown.gui.watch import WatchManager
from feeluown.gui.ui import Ui
from feeluown.gui.tray import Tray
from feeluown.gui.uimodels.provider import ProviderUiManager
from feeluown.gui.uimodels.playlist import PlaylistUiManager
from feeluown.gui.uimodels.my_music import MyMusicUiManager
from feeluown.collection import CollectionManager
from .app import App
class GuiApp(App, QWidget):
def __init__(self, *args, **kwargs):
config = args[1]
pkg_root_dir = os.path.join(os.path.dirname(__file__), '..')
icons_dir = os.path.join(pkg_root_dir, 'gui/assets/icons')
QDir.addSearchPath('icons', icons_dir)
QGuiApplication.setWindowIcon(QIcon(QPixmap('icons:feeluown.png')))
# Set desktopFileName so that the window icon is properly shown under wayland.
# I don't know if this setting brings other benefits or not.
# https://github.com/pyfa-org/Pyfa/issues/1607#issuecomment-392099878
QApplication.setDesktopFileName('FeelUOwn')
QApplication.instance().setQuitOnLastWindowClosed(not config.ENABLE_TRAY)
QApplication.instance().setApplicationName('FeelUOwn')
if sys.platform == 'win32':
font = QApplication.font()
# By default, it uses SimSun(宋体) on windows, which is a little ugly.
# "Segoe UI Symbol" is used to render charactor symbols.
# "Microsoft Yahei" is used to render chinese (and english).
# Choose a default sans-serif font when the first two fonts do not work,
font.setFamilies(['Segoe UI Symbol', 'Microsoft YaHei', 'sans-serif'])
# When a HiDPI screen is used, users need to set both font DPI and
# screen scale factor to make it working properly when pointSize is used.
# It's hard for most users to set them right.
# When using pixelSize, users only need to set screen scale factor.
# In other words, only QT_AUTO_SCREEN_SCALE_FACTOR=1 is needed to set
# and feeluown can works properly in HiDPI environment.
#
# Based on past experience, 13px is the default font size for all platform,
# including windows, linux and macOS.
font.setPixelSize(13)
QApplication.setFont(font)
QWidget.__init__(self)
App.__init__(self, *args, **kwargs)
GuiApp.__q_app = QApplication.instance()
self.setObjectName('app')
# GUI 的一些辅助管理模块
self.coll_mgr = CollectionManager(self)
self.theme_mgr = ThemeManager(self, parent=self)
self.tips_mgr = TipsManager(self)
self.hotkey_mgr = HotkeyManager(self)
self.img_mgr = ImgManager(self)
self.watch_mgr = WatchManager(self)
# GUI 组件的数据管理模块
self.pvd_uimgr = ProviderUiManager(self)
self.pl_uimgr = PlaylistUiManager(self)
self.mymusic_uimgr = MyMusicUiManager(self)
self.browser = Browser(self)
self.ui = Ui(self)
if self.config.ENABLE_TRAY:
self.tray = Tray(self)
self.show_msg = self.ui._message_line.show_msg
def initialize(self):
super().initialize()
self.hotkey_mgr.initialize()
self.theme_mgr.initialize()
if self.config.ENABLE_TRAY:
self.tray.initialize()
self.tray.show()
self.coll_mgr.scan()
self.watch_mgr.initialize()
self.browser.initialize()
QApplication.instance().aboutToQuit.connect(self.about_to_exit)
def run(self):
self.show()
super().run()
def apply_state(self, state):
super().apply_state(state)
coll_library = self.coll_mgr.get_coll_library()
self.browser.goto(page=f'/colls/{coll_library.identifier}')
gui = state.get('gui', {})
lyric = gui.get('lyric', {})
self.ui.lyric_window.apply_state(lyric)
def METHOD_NAME(self):
state = super().METHOD_NAME()
state['gui'] = {'lyric': self.ui.lyric_window.METHOD_NAME()}
return state
def closeEvent(self, _):
if not self.config.ENABLE_TRAY:
self.exit()
def mouseReleaseEvent(self, e):
if not self.rect().contains(e.pos()):
return
if e.button() == Qt.BackButton:
self.browser.back()
elif e.button() == Qt.ForwardButton:
self.browser.forward()
def exit_player(self):
# Destroy GL context or mpv renderer
self.ui.mpv_widget.shutdown()
super().exit_player()
def about_to_exit(self):
super().about_to_exit()
QApplication.instance().aboutToQuit.disconnect(self.about_to_exit)
def exit(self):
QApplication.exit() |
7,016 | load index | # Copyright (c) Microsoft Corporation
# Licensed under the MIT License.
"""Defines the dashboard class."""
import json
import os
import uuid
from html.parser import HTMLParser
from rai_core_flask import FlaskHelper # , environment_detector
from raiutils.data_processing import serialize_json_safe
from raiwidgets.interfaces import WidgetRequestResponseConstants
invalid_feature_flights_error = \
"feature_flights should be of type string. Separate multiple flights " \
"using ampersand (&)."
class InLineScript(HTMLParser):
def __init__(self, load_widget_file):
HTMLParser.__init__(self)
self.content = ""
self.load_widget_file = load_widget_file
def handle_starttag(self, tag, attrs):
if tag == "script":
src = None
scriptTag = "<script "
for att in attrs:
if att[0] == "src":
src = att[1]
continue
# skip module type as it causes ipython to render widget
# with 8px height
if att[0] == "type":
continue
scriptTag += f' {att[0]}={att[1]}'
if src is not None:
content = self.load_widget_file(src)
self.content += f'{scriptTag}>\r\n{content}\r\n'
return
self.content += self.get_starttag_text()
def handle_endtag(self, tag):
self.content += f'</{tag}>'
pass
def handle_data(self, data):
self.content += data
pass
class Dashboard(object):
"""The dashboard class, wraps the dashboard component."""
def __init__(self, *,
dashboard_type,
model_data,
public_ip,
port,
locale,
no_inline_dashboard=False,
**kwargs):
"""Initialize the dashboard."""
if model_data is None or type is None:
raise ValueError("Required parameters not provided")
try:
self._service = FlaskHelper(ip=public_ip, port=port)
except Exception as e:
self._service = None
raise e
self.id = uuid.uuid4().hex
feature_flights = kwargs.get('feature_flights')
if feature_flights and not isinstance(feature_flights, str):
raise ValueError(invalid_feature_flights_error)
self.config = {
'dashboardType': dashboard_type,
'id': self.id,
'baseUrl': self._service.env.base_url,
'withCredentials': self._service.with_credentials,
'locale': locale,
'featureFlights': feature_flights
}
self.model_data = model_data
self.add_route()
html = self.METHOD_NAME()
print(f'{dashboard_type} started at {self._service.env.base_url}')
if no_inline_dashboard:
return
self._service.env.display(html)
def add_route(self):
# To enable multiple dashboards to run in the same notebook we need to
# prevent them from using the same method names (in addition to using
# dedicated ports). Below we rename the function for that purpose and
# manually add the URL rule instead of using the route decorator.
def visual():
return self.METHOD_NAME()
self.add_url_rule(visual, '/', methods=["GET"])
def get_config():
return json.dumps({
WidgetRequestResponseConstants.data: self.config
})
self.add_url_rule(get_config, '/config', methods=["POST"])
def get_model_data():
return json.dumps({
WidgetRequestResponseConstants.data: self.model_data},
default=serialize_json_safe)
self.add_url_rule(get_model_data, '/model_data', methods=["POST"])
return
@staticmethod
def get_widget_path(path):
script_path = os.path.dirname(os.path.abspath(__file__))
return os.path.join(script_path, "widget", path)
def METHOD_NAME(self):
index = self.load_widget_file("index.html")
parser = InLineScript(self.load_widget_file)
parser.feed(index)
return parser.content
def load_widget_file(self, path):
js_path = Dashboard.get_widget_path(path)
with open(js_path, "r", encoding="utf-8") as f:
content = f.read()
content = content.replace(
"__rai_app_id__", f'rai_widget_{self.id}')
content = content.replace(
'"__rai_config__"', f'`{json.dumps(self.config)}`')
model_data = json.dumps(self.model_data,
default=serialize_json_safe)
content = content.replace(
'"__rai_model_data__"',
f'`{model_data}`')
return content
def add_url_rule(self, func, route, methods):
"""To enable multiple dashboards to run in the same notebook we need to
prevent them from using the same method names (in addition to using
dedicated ports). We rename the function for that purpose and
manually add the URL rule instead of using the route decorator.
"""
func.__name__ = func.__name__ + str(id(self))
self._service.app.add_url_rule(
route,
endpoint=func.__name__,
view_func=func,
methods=methods) |
7,017 | update render storage | # Copyright (c) 2022 Ultimaker B.V.
# Uranium is released under the terms of the LGPLv3 or higher.
from typing import Optional, Tuple
from PyQt6.QtGui import QImage #For typing.
from UM.Logger import Logger
from UM.View.GL.OpenGL import OpenGL
from UM.View.GL.FrameBufferObject import FrameBufferObject
class RenderPass:
"""Base class for a rendering pass.
The RenderPass class encapsulates a render pass, that is a single
step in the rendering process.
:note While the render pass could technically support render to
texture without using Framebuffer Objects, the PyQt bindings object
lacks support for any function like glReadPixels. Therefore, the
Qt OpenGL initialization code checks for FBO support and aborts the
program if no support is found.
"""
MaximumPriority = 999
"""The maximum priority of a render pass. Priority should always be
less than this.
"""
def __init__(self, name: str, width: int, height: int, priority: int = 0) -> None:
self._name = name #type: str
self._width = width #type: int
self._height = height #type: int
self._priority = priority #type: int
self._gl = OpenGL.getInstance().getBindingsObject()
self._fbo = None #type: Optional[FrameBufferObject]
def getName(self) -> str:
"""Get the name of this RenderPass.
:return: The name of the render pass.
"""
return self._name
def getSize(self) -> Tuple[int, int]:
return self._width, self._height
def getPriority(self) -> int:
"""Get the priority of this RenderPass.
The priority is used for ordering the render passes. Lower priority render passes
are rendered earlier and are available for later render passes to use as texture
sources.
:return: The priority of this render pass.
"""
return self._priority
def setSize(self, width: int, height: int) -> None:
"""Set the size of this render pass.
:param width: The new width of the render pass.
:param height: The new height of the render pass.
:note This will recreate the storage object used by the render
pass. Due to that, the contents will be invalid after resizing
until the render pass is rendered again.
"""
if self._width != width or self._height != height:
self._width = width
self._height = height
self._fbo = None # Ensure the fbo is re-created next render pass.
def bind(self) -> None:
"""Bind the render pass so it can be rendered to.
This will make sure everything is set up so the contents of
this render pass will be updated correctly. It should be called
as part of your render() implementation.
:note It is very important to call release() after a call to
bind(), once done with rendering.
"""
if self._fbo is None:
# Ensure that the fbo is created. This is done on (first) bind, as this needs to be done on the main thread.
self.METHOD_NAME()
if self._fbo:
self._fbo.bind()
# Ensure we can actually write to the relevant FBO components.
self._gl.glColorMask(self._gl.GL_TRUE, self._gl.GL_TRUE,self._gl.GL_TRUE, self._gl.GL_TRUE)
self._gl.glDepthMask(self._gl.GL_TRUE)
self._gl.glClear(self._gl.GL_COLOR_BUFFER_BIT | self._gl.GL_DEPTH_BUFFER_BIT)
def release(self) -> None:
"""Release the render pass.
This makes sure the contents of this render pass are properly
updated at the end of rendering.
"""
if self._fbo is None:
return #Already released. Nothing more to do.
self._fbo.release()
# Workaround for a driver bug with recent Intel chips on OSX.
# Releasing the current FBO does not properly clear the depth buffer, so we have to do that manually.
#if Platform.isOSX() and OpenGL.getInstance().getGPUVendor() == OpenGL.Vendor.Intel:
#self._gl.glClear(self._gl.GL_COLOR_BUFFER_BIT | self._gl.GL_DEPTH_BUFFER_BIT)
def render(self) -> None:
"""Render the contents of this render pass.
This method should be reimplemented by subclasses to perform the
actual rendering of the render pass.
"""
raise NotImplementedError("Should be implemented by subclasses")
def getTextureId(self) -> int:
"""Get the texture ID of this render pass so it can be reused by other passes.
:return: The OpenGL texture ID used by this pass.
"""
if self._fbo is None:
Logger.log("w", "FrameBufferObject has been released. Can't get any frame buffer texture ID.")
return -1
return self._fbo.getTextureId()
def getOutput(self) -> QImage:
"""Get the pixel data produced by this render pass.
This returns an object that contains the pixel data for this render pass.
:note The current object type returned is currently dependant on the specific
implementation of the UM.View.GL.FrameBufferObject class.
"""
if self._fbo is None:
Logger.log("w", "FrameBufferObject has been released. Can't get frame output.")
return QImage()
return self._fbo.getContents()
def METHOD_NAME(self) -> None:
# On Mac OS X, this function may get called by a main window resize signal during closing.
# This will cause a crash, so don't do anything when it is shutting down.
import UM.Qt.QtApplication
if UM.Qt.QtApplication.QtApplication.getInstance().isShuttingDown():
return
if self._width <= 0 or self._height <= 0:
Logger.log("w", "Tried to create render pass with size <= 0")
return
self._fbo = OpenGL.getInstance().createFrameBufferObject(self._width, self._height) |
7,018 | test negative values | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown copyright. The Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Unit tests for the calculate_sleet_probability plugin."""
import unittest
import numpy as np
from iris.tests import IrisTest
from improver.precipitation_type.calculate_sleet_prob import calculate_sleet_probability
from improver.synthetic_data.set_up_test_cubes import set_up_probability_cube
class Test_calculate_sleet_probability(IrisTest):
""" Tests the calculate sleet probability function."""
def setUp(self):
"""Create cubes to input into the function."""
self.thresholds = np.array([276, 277], dtype=np.float32)
self.rain_name = "probability_of_falling_rain_level_above_surface"
self.snow_name = "probability_of_falling_snow_level_below_surface"
rain_prob = np.array(
[
[[0.5, 0.1, 1.0], [0.0, 0.2, 0.5], [0.1, 0.1, 0.3]],
[[0.5, 0.1, 1.0], [0.0, 0.2, 0.5], [0.1, 0.1, 0.3]],
],
dtype=np.float32,
)
self.rain_prob_cube = set_up_probability_cube(
rain_prob, self.thresholds, variable_name=self.rain_name
)
snow_prob = np.array(
[
[[0.0, 0.4, 0.0], [0.5, 0.3, 0.1], [0.0, 0.4, 0.3]],
[[0.0, 0.4, 0.0], [0.5, 0.3, 0.1], [0.0, 0.4, 0.3]],
],
dtype=np.float32,
)
self.snow_prob_cube = set_up_probability_cube(
snow_prob, self.thresholds, variable_name=self.snow_name
)
high_prob = np.array(
[
[[1.0, 0.7, 0.2], [0.8, 0.8, 0.7], [0.9, 0.9, 0.7]],
[[1.0, 0.7, 0.2], [0.8, 0.8, 0.7], [0.9, 0.9, 0.7]],
],
dtype=np.float32,
)
self.high_prob_cube = set_up_probability_cube(
high_prob, self.thresholds, variable_name=self.snow_name
)
def test_basic_calculation(self):
"""Test the basic sleet calculation works."""
expected_result = np.array(
[
[[0.5, 0.5, 0.0], [0.5, 0.5, 0.4], [0.9, 0.5, 0.4]],
[[0.5, 0.5, 0.0], [0.5, 0.5, 0.4], [0.9, 0.5, 0.4]],
],
dtype=np.float32,
)
result = calculate_sleet_probability(self.rain_prob_cube, self.snow_prob_cube)
self.assertArrayAlmostEqual(result.data, expected_result)
self.assertTrue(result.dtype == np.float32)
def test_with_ints(self):
"""Test the basic sleet calculation works with int8 data."""
rain_prob_cube = self.rain_prob_cube.copy(
np.array(
[[[1, 0, 0], [0, 1, 1], [0, 0, 1]], [[1, 0, 0], [0, 1, 1], [0, 0, 1]]],
dtype=np.int8,
)
)
snow_prob_cube = self.snow_prob_cube.copy(
np.array(
[[[0, 1, 0], [1, 0, 0], [0, 1, 0]], [[0, 1, 0], [1, 0, 0], [0, 1, 0]]],
dtype=np.int8,
)
)
expected_result = np.array(
[[[0, 0, 1], [0, 0, 0], [1, 0, 0]], [[0, 0, 1], [0, 0, 0], [1, 0, 0]]],
dtype=np.int8,
)
result = calculate_sleet_probability(rain_prob_cube, snow_prob_cube)
self.assertArrayAlmostEqual(result.data, expected_result)
self.assertTrue(result.dtype == np.int8)
def METHOD_NAME(self):
"""Test that an exception is raised for negative values of
probability_of_sleet in the cube."""
rain = self.rain_prob_cube
high_prob = self.high_prob_cube
msg = "Negative values of sleet probability have been calculated."
with self.assertRaisesRegex(ValueError, msg):
calculate_sleet_probability(rain, high_prob)
def test_name_of_cube(self):
"""Test that the name has been changed to sleet_probability"""
result = calculate_sleet_probability(self.snow_prob_cube, self.rain_prob_cube)
name = "probability_of_sleet"
self.assertEqual(result.long_name, name)
if __name__ == "__main__":
unittest.main() |
7,019 | test create folder err | # Copyright (C) 2010-2015 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
import os
import pytest
from tcr_misc import random_string
from lib.cuckoo.common import utils
from lib.cuckoo.common.exceptions import CuckooOperationalError
from lib.cuckoo.common.path_utils import path_mkdir
def test_free_space_monitor(mocker):
# Will not enter main loop
utils.free_space_monitor(return_value=True)
def test_get_memdump_path(mocker):
ret_path = utils.get_memdump_path(memdump_id=123)
assert ret_path.rsplit("/", 4)[-4:] == "storage/analyses/123/memory.dmp".split("/")
class TestValidateReferrer:
def test_validate_referrer(self):
assert utils.validate_referrer(url="http://foo.example.com:1337/bar") == "http://foo.example.com:1337/bar"
def test_validate_referrer_bad_url(self):
assert utils.validate_referrer(url="irc://foo.example.com:1337") is None
def test_validate_referrer_no_url(self):
assert utils.validate_referrer(url=None) is None
@pytest.fixture
def rnd_tmp_folder():
random_file_name = random_string()
yield random_file_name
try:
os.rmdir("/tmp/" + random_file_name)
except Exception as e:
print(("Error cleaning up, probably fine:" + str(e)))
class TestFileOps:
def test_create_folders_no_folders(self):
utils.create_folders(root="foo")
def test_create_folder_default(self):
with pytest.raises(CuckooOperationalError):
utils.create_folder()
"""
def test_create_folder(self, rnd_tmp_folder):
utils.create_folder(root="/tmp", folder=rnd_tmp_folder)
assert test_create_folder_err("/tmp/" + rnd_tmp_folder) is True
"""
def METHOD_NAME(self, rnd_tmp_folder, mocker):
mocker.patch("pathlib.Path.mkdir", side_effect=OSError)
with pytest.raises(CuckooOperationalError):
utils.create_folder(root="/tmp", folder=rnd_tmp_folder)
def test_delete_folder(self, rnd_tmp_folder):
folder = "/tmp/" + rnd_tmp_folder
path_mkdir(folder)
utils.delete_folder(folder)
def test_delete_folder_err(self, rnd_tmp_folder, mocker):
folder = "/tmp/" + rnd_tmp_folder
path_mkdir(folder)
mocker.patch("shutil.rmtree", side_effect=OSError)
with pytest.raises(CuckooOperationalError):
utils.delete_folder(folder)
class TestConvertChar:
def test_utf(self):
assert "\\xe9", utils.convert_char("\xe9")
def test_digit(self):
assert "9" == utils.convert_char("9")
def test_literal(self):
assert "e" == utils.convert_char("e")
def test_punctation(self):
assert "." == utils.convert_char(".")
def test_whitespace(self):
assert " " == utils.convert_char(" ")
class TestConvertToPrintable:
def test_utf(self):
assert "\\xe9" == utils.convert_to_printable("\xe9")
def test_digit(self):
assert "9" == utils.convert_to_printable("9")
def test_literal(self):
assert "e" == utils.convert_to_printable("e")
def test_punctation(self):
assert "." == utils.convert_to_printable(".")
def test_whitespace(self):
assert " " == utils.convert_to_printable(" ")
def test_non_printable(self):
assert r"\x0b" == utils.convert_to_printable(chr(11))
class TestIsPrintable:
def test_utf(self):
assert not utils.is_printable("\xe9")
def test_digit(self):
assert utils.is_printable("9")
def test_literal(self):
assert utils.is_printable("e")
def test_punctation(self):
assert utils.is_printable(".")
def test_whitespace(self):
assert utils.is_printable(" ")
def test_non_printable(self):
assert not utils.is_printable(chr(11))
class TestConvertFilenameChar:
def test_convert_filename_char(self):
assert utils.convert_filename_char("\u00A3") == "\\xa3"
def test_convert_filename_char_allowed(self):
assert utils.convert_filename_char("!") == "!"
class TestIsSaneFilename:
def test_is_sane_filename(self):
assert utils.is_sane_filename("abc") is True
def test_is_sane_filename_not(self):
assert utils.is_sane_filename("\n") is False
class TestSanitizePathname:
def test_sanitize_pathname(self):
assert utils.sanitize_pathname("abc") == "abc"
def test_sanitize_pathname_not(self):
assert utils.sanitize_pathname("\nabc") == "\\x0aabc"
class TestPrettyPrintRetval:
def test_pretty_print_retval_no_lookup(self):
assert utils.pretty_print_retval(status=False, retval="0") is None
def test_pretty_print_retval(self):
assert utils.pretty_print_retval(status=False, retval="0xc0000139") == "ENTRYPOINT_NOT_FOUND"
def test_pretty_print_retval_err(self):
assert utils.pretty_print_retval(status=False, retval="-") is None
def test_pretty_print_retval_true_status(self):
assert utils.pretty_print_retval(status=True, retval="0") is None
@pytest.mark.skip
def test_is_safelisted_domain():
from lib.cuckoo.common.safelist import is_safelisted_domain
assert is_safelisted_domain("java.com") is True
assert is_safelisted_domain("java2.com") is False
assert is_safelisted_domain("crl.microsoft.com") is True |
7,020 | test mean | # Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
import dace
import numpy as np
from copy import deepcopy as dc
from common import compare_numpy_output
@compare_numpy_output()
def test_sum(A: dace.float64[10, 5, 3]):
return np.sum(A)
@compare_numpy_output()
def test_sum_1(A: dace.float64[10, 5, 3]):
return np.sum(A, axis=1)
@compare_numpy_output()
def test_min(A: dace.float64[10, 5, 3]):
return np.min(A)
@compare_numpy_output()
def test_max(A: dace.float64[10, 5, 3]):
return np.max(A)
@compare_numpy_output()
def test_min_1(A: dace.float64[10, 5, 3]):
return np.min(A, axis=1)
@compare_numpy_output()
def test_min_int32(A: dace.int32[10, 5, 3]):
return np.min(A, axis=1)
@compare_numpy_output()
def test_min_int64(A: dace.int64[10, 5, 3]):
return np.min(A, axis=1)
@compare_numpy_output()
def test_max_int32(A: dace.int32[10, 5, 3]):
return np.max(A, axis=1)
@compare_numpy_output()
def test_max_int64(A: dace.int64[10, 5, 3]):
return np.max(A, axis=1)
@compare_numpy_output()
def test_max_1(A: dace.float64[10, 5, 3]):
return np.max(A, axis=1)
@compare_numpy_output()
def test_argmax_1(A: dace.float64[10, 5, 3]):
return np.argmax(A, axis=1)
@compare_numpy_output()
def test_argmin_1(A: dace.float64[10, 5, 3]):
return np.argmin(A, axis=1)
@compare_numpy_output()
def test_argmin_1_int32(A: dace.int32[10, 5, 3]):
return np.argmin(A, axis=1)
@compare_numpy_output()
def test_argmin_1_int64(A: dace.int64[10, 5, 3]):
return np.argmin(A, axis=1)
@compare_numpy_output()
def test_argmax_1_int32(A: dace.int32[10, 5, 3]):
return np.argmax(A, axis=1)
@compare_numpy_output()
def test_argmax_1_int64(A: dace.int64[10, 5, 3]):
return np.argmax(A, axis=1)
def test_return_both():
from dace.frontend.python.replacements import _argminmax
sdfg = dace.SDFG("test_return_both")
state = sdfg.add_state()
sdfg.add_array("IN", [10, 5, 3], dace.float64)
_, (outval, outidx) = _argminmax(None, sdfg, state, "IN", 1, "min", return_both=True)
IN = np.random.rand(10, 5, 3)
OUT_IDX = np.zeros((10, 3), dtype=np.int32)
OUT_VAL = np.zeros((10, 3), dtype=np.float64)
sdfg.arrays[outval].transient = False
sdfg.arrays[outidx].transient = False
sdfg(**{"IN": IN.copy(), outval: OUT_VAL, outidx: OUT_IDX})
np.allclose(OUT_IDX, np.argmin(IN.copy(), axis=1))
np.allclose(OUT_VAL, np.min(IN.copy(), axis=1))
def test_argmin_result_type():
@dace.program
def test_argmin_result(A: dace.float64[10, 5, 3]):
return np.argmin(A, axis=1, result_type=dace.int64)
res = test_argmin_result(np.random.rand(10, 5, 3))
assert res.dtype == np.int64
@dace.program
def test_argmin_result(A: dace.float64[10, 5, 3]):
return np.argmin(A, axis=1)
res = test_argmin_result(np.random.rand(10, 5, 3))
assert res.dtype == np.int32
@compare_numpy_output()
def test_sum_negative_axis(A: dace.float64[10, 5, 3]):
return np.sum(A, axis=-1)
@compare_numpy_output()
def test_sum_multiple_axes(A: dace.float64[10, 5, 3]):
return np.mean(A, axis=(-1, 0))
@compare_numpy_output()
def METHOD_NAME(A: dace.float64[10, 5, 3]):
return np.mean(A, axis=2)
@compare_numpy_output()
def test_mean_negative(A: dace.float64[10, 5, 3]):
return np.mean(A, axis=-2)
@compare_numpy_output()
def test_mean_multiple_axes(A: dace.float64[10, 5, 3]):
return np.mean(A, axis=(-2, 0))
def test_mean_reduce_symbolic_shape():
N = dace.symbol('N')
@dace.program
def mean_reduce_symbolic_shape(A: dace.float64[10, N, 3]):
return np.mean(A, axis=(-2, 0))
X = np.random.normal(scale=10, size=(10, 12, 3)).astype(np.float64)
dace_result = mean_reduce_symbolic_shape(A=X)
numpy_result = np.mean(X, axis=(-2, 0))
assert np.allclose(dace_result, numpy_result)
@compare_numpy_output()
def test_reduce_all_axes(A: dace.float64[10, 5, 3]):
return np.mean(A, axis=(0, -2, 2))
# test accessing a global variable
my_none = None
@compare_numpy_output()
def test_reduce_global_None(A: dace.float64[10, 5, 3]):
return np.mean(A, axis=my_none)
def test_scalar_reduction():
gamma = 1.4
@dace.program
def eigenvalues(u: dace.float64[3]):
rho = u[0]
rhov = u[1]
E = u[2]
v = rhov / rho
p = (E - 0.5 * rhov * v) * (gamma - 1)
c = np.sqrt(gamma * p / rho)
ret = np.empty_like(u)
ret[0] = v - c
ret[1] = v
ret[2] = v + c
return ret
@dace.program
def flux_min1(ul: dace.float64[3], ur: dace.float64[3]):
fl = np.array([0.0442802, 0.13597403, 0.12488015])
fr = np.array([0., 0.1, 0.])
eigvalsl = eigenvalues(ul)
eigvalsr = eigenvalues(ur)
sl = np.min(eigvalsl)
sr = np.max(eigvalsr)
if sl >= 0:
return fl
elif sr <= 0:
return fr
else:
return (sl * sr * (ur - ul) + fl * sr - fr * sl) / (sr - sl)
ul = np.array([0.15532005, 0.0442802, 0.31468739])
ur = np.array([0.125, 0., 0.25])
assert (np.allclose(flux_min1(ul, ur), flux_min1.f(ul, ur)))
@compare_numpy_output()
def test_degenerate_reduction_explicit(A: dace.float64[20]):
return np.sum(A, axis=())
@compare_numpy_output()
def test_degenerate_reduction_implicit(A: dace.float64[1, 20]):
return np.sum(A, axis=0)
if __name__ == '__main__':
# generated with cat tests/numpy/reductions_test.py | grep -oP '(?<=^def ).*(?=\()' | awk '{print $0 "()"}'
test_sum()
test_sum_1()
test_min()
test_max()
test_min_1()
test_min_int32()
test_min_int64()
test_max_int32()
test_max_int64()
test_max_1()
test_argmax_1()
test_argmin_1()
test_argmin_1_int32()
test_argmin_1_int64()
test_argmax_1_int32()
test_argmax_1_int64()
test_return_both()
test_argmin_result_type()
# Test supported reduction with OpenMP library node implementation
from dace.libraries.standard import Reduce
Reduce.default_implementation = 'OpenMP'
test_sum()
test_sum_1()
test_max()
test_max_1()
test_min()
test_min_1()
test_sum_negative_axis()
test_sum_multiple_axes()
METHOD_NAME()
test_mean_negative()
test_mean_multiple_axes()
test_mean_reduce_symbolic_shape()
test_scalar_reduction()
test_degenerate_reduction_explicit()
test_degenerate_reduction_implicit() |
7,021 | two step1 | import attr
import pytest
from ... import _abc, _core
from .tutil import check_sequence_matches
@attr.s(eq=False, hash=False)
class TaskRecorder:
record = attr.ib(factory=list)
def before_run(self):
self.record.append(("before_run",))
def task_scheduled(self, task):
self.record.append(("schedule", task))
def before_task_step(self, task):
assert task is _core.current_task()
self.record.append(("before", task))
def after_task_step(self, task):
assert task is _core.current_task()
self.record.append(("after", task))
def after_run(self):
self.record.append(("after_run",))
def filter_tasks(self, tasks):
for item in self.record:
if item[0] in ("schedule", "before", "after") and item[1] in tasks:
yield item
if item[0] in ("before_run", "after_run"):
yield item
def test_instruments(recwarn):
r1 = TaskRecorder()
r2 = TaskRecorder()
r3 = TaskRecorder()
task = None
# We use a child task for this, because the main task does some extra
# bookkeeping stuff that can leak into the instrument results, and we
# don't want to deal with it.
async def task_fn():
nonlocal task
task = _core.current_task()
for _ in range(4):
await _core.checkpoint()
# replace r2 with r3, to test that we can manipulate them as we go
_core.remove_instrument(r2)
with pytest.raises(KeyError):
_core.remove_instrument(r2)
# add is idempotent
_core.add_instrument(r3)
_core.add_instrument(r3)
for _ in range(1):
await _core.checkpoint()
async def main():
async with _core.open_nursery() as nursery:
nursery.start_soon(task_fn)
_core.run(main, instruments=[r1, r2])
# It sleeps 5 times, so it runs 6 times. Note that checkpoint()
# reschedules the task immediately upon yielding, before the
# after_task_step event fires.
expected = (
[("before_run",), ("schedule", task)]
+ [("before", task), ("schedule", task), ("after", task)] * 5
+ [("before", task), ("after", task), ("after_run",)]
)
assert r1.record == r2.record + r3.record
assert list(r1.filter_tasks([task])) == expected
def test_instruments_interleave():
tasks = {}
async def METHOD_NAME():
tasks["t1"] = _core.current_task()
await _core.checkpoint()
async def two_step2():
tasks["t2"] = _core.current_task()
await _core.checkpoint()
async def main():
async with _core.open_nursery() as nursery:
nursery.start_soon(METHOD_NAME)
nursery.start_soon(two_step2)
r = TaskRecorder()
_core.run(main, instruments=[r])
expected = [
("before_run",),
("schedule", tasks["t1"]),
("schedule", tasks["t2"]),
{
("before", tasks["t1"]),
("schedule", tasks["t1"]),
("after", tasks["t1"]),
("before", tasks["t2"]),
("schedule", tasks["t2"]),
("after", tasks["t2"]),
},
{
("before", tasks["t1"]),
("after", tasks["t1"]),
("before", tasks["t2"]),
("after", tasks["t2"]),
},
("after_run",),
]
print(list(r.filter_tasks(tasks.values())))
check_sequence_matches(list(r.filter_tasks(tasks.values())), expected)
def test_null_instrument():
# undefined instrument methods are skipped
class NullInstrument:
def something_unrelated(self):
pass # pragma: no cover
async def main():
await _core.checkpoint()
_core.run(main, instruments=[NullInstrument()])
def test_instrument_before_after_run():
record = []
class BeforeAfterRun:
def before_run(self):
record.append("before_run")
def after_run(self):
record.append("after_run")
async def main():
pass
_core.run(main, instruments=[BeforeAfterRun()])
assert record == ["before_run", "after_run"]
def test_instrument_task_spawn_exit():
record = []
class SpawnExitRecorder:
def task_spawned(self, task):
record.append(("spawned", task))
def task_exited(self, task):
record.append(("exited", task))
async def main():
return _core.current_task()
main_task = _core.run(main, instruments=[SpawnExitRecorder()])
assert ("spawned", main_task) in record
assert ("exited", main_task) in record
# This test also tests having a crash before the initial task is even spawned,
# which is very difficult to handle.
def test_instruments_crash(caplog):
record = []
class BrokenInstrument:
def task_scheduled(self, task):
record.append("scheduled")
raise ValueError("oops")
def close(self):
# Shouldn't be called -- tests that the instrument disabling logic
# works right.
record.append("closed") # pragma: no cover
async def main():
record.append("main ran")
return _core.current_task()
r = TaskRecorder()
main_task = _core.run(main, instruments=[r, BrokenInstrument()])
assert record == ["scheduled", "main ran"]
# the TaskRecorder kept going throughout, even though the BrokenInstrument
# was disabled
assert ("after", main_task) in r.record
assert ("after_run",) in r.record
# And we got a log message
exc_type, exc_value, exc_traceback = caplog.records[0].exc_info
assert exc_type is ValueError
assert str(exc_value) == "oops"
assert "Instrument has been disabled" in caplog.records[0].message
def test_instruments_monkeypatch():
class NullInstrument(_abc.Instrument):
pass
instrument = NullInstrument()
async def main():
record = []
# Changing the set of hooks implemented by an instrument after
# it's installed doesn't make them start being called right away
instrument.before_task_step = record.append
await _core.checkpoint()
await _core.checkpoint()
assert len(record) == 0
# But if we remove and re-add the instrument, the new hooks are
# picked up
_core.remove_instrument(instrument)
_core.add_instrument(instrument)
await _core.checkpoint()
await _core.checkpoint()
assert record.count(_core.current_task()) == 2
_core.remove_instrument(instrument)
await _core.checkpoint()
await _core.checkpoint()
assert record.count(_core.current_task()) == 2
_core.run(main, instruments=[instrument])
def test_instrument_that_raises_on_getattr():
class EvilInstrument:
def task_exited(self, task):
assert False # pragma: no cover
@property
def after_run(self):
raise ValueError("oops")
async def main():
with pytest.raises(ValueError):
_core.add_instrument(EvilInstrument())
# Make sure the instrument is fully removed from the per-method lists
runner = _core.current_task()._runner
assert "after_run" not in runner.instruments
assert "task_exited" not in runner.instruments
_core.run(main) |
7,022 | test async future | from browser import aio, timer
from tester import assert_raises
results = []
def report(*args):
for url, size in results:
if size is not None:
print(f"file at {url}: {size} bytes")
else:
print(f"file at {url}: not found")
class Done(Exception):
pass
class AIter:
def __init__(self):
self.count = 0
def __aiter__(self):
return self
async def __anext__(self):
self.count += 1
if self.count > 3:
raise StopAsyncIteration
return (0, self.count)
async def test_async_for():
data = []
async for i, j in AIter():
data.append([i, j])
assert data == [[0, 1], [0, 2], [0, 3]]
print("'async for' test ok")
class manager:
async def __aenter__(self):
return (1, 2)
async def __aexit__(self, *exc):
return False
async def test_async_with():
async with manager():
pass
r1 = []
async with manager() as xwq:
r1.append(xwq)
assert r1 == [(1, 2)]
r2 = []
async with manager() as (x, y):
r2.append(x)
assert r2 == [1]
async with manager(), manager():
pass
r3 = []
async with manager() as x, manager() as y:
r3.append((x, y))
assert r3 == [((1, 2), (1, 2))]
r4 = []
async with manager() as x, manager():
r4.append(x)
assert r4 == [(1, 2)]
print("'async with' test ok")
def handle(err):
assert isinstance(err, Done)
print("handle error ok")
async def raise_error():
raise Done
async def ajax_call(url):
req = await aio.get(url)
if req.status == 200:
return (url, len(req.data))
else:
return (url, None)
class Lock:
def __init__(self):
self._locked = False
async def acquire(self):
while self._locked:
await aio.sleep(0)
self._locked = True
def release(self):
if not self._locked:
raise RuntimeError('Lock is already released')
self._locked = False
def locked(self):
return self._locked
async def __aenter__(self):
await self.acquire()
return self
async def __aexit__(self, *l):
self.release()
aio.Lock = Lock
async def test_lock(): # issue 1205
# "async with" with alias
async with aio.Lock() as l:
pass
# "async with" without alias
l = aio.Lock()
async with l:
pass
# async comprehensions
async def test_async_comp():
for url, req in await async_comp():
print(url, len(req.data))
print("test async comp ok")
async def async_comp():
return [(url, await aio.get(url)) for url in ["console.html", "index.html"]]
async def async_gen():
count = 0
while count < 5:
count += 1
yield count
await aio.sleep(0.1)
async def test_async_gen(throw, close, expected):
result = []
a = async_gen()
result.append(await a.__anext__())
a.__aiter__()
result.append(await a.asend(None))
if throw:
try:
await a.athrow(ZeroDivisionError)
except Exception as e:
result.append(type(e))
if close:
await a.aclose()
async for i in a:
result.append(i)
assert result == expected,(close, result, expected)
print(throw, close, "async generator ok")
async def METHOD_NAME():
"""Future is returning value from set_result"""
fut = aio.Future()
timer.set_timeout(lambda: fut.set_result("OK"), 10)
result = await fut
assert result == "OK", "Result has not the expected value"
async def test_async_future_exc():
"""Future is raising exception from set_exception"""
fut = aio.Future()
timer.set_timeout(lambda: fut.set_exception(ValueError("EXPECTED_ERROR")), 10)
try:
await fut
except ValueError as e:
assert str(e) == "EXPECTED_ERROR"
return
assert False, "Error has not been raised"
n1571 = 0
t1571 = []
async def test_fstring_with_global():
global n1571
async def g():
global n1571
n1571 += 1
t1571.append(f'{n1571}')
for p in range(3):
await g()
assert t1571 == ['1', '2', '3']
print('fstring with global ok')
answers = {
((1,), (1,), "a1"): -1,
((2,), (2,), "a1"): 1,
((1,), (2,), "a1"): -1,
((2,), (1,), "a1"): 0,
((0,), (0,), "a2"): -1,
((2,), (2,), "a2"): 1,
((0,), (2,), "a2"): -1,
((2,), (0,), "a2"): 0}
class Jump(Exception): pass
async def test_issue_1906():
t = []
for a, cs in [("a1", {1, 2}), ("a2", {0, 2})]:
t.append(f'Iteration {a}')
try:
t.append(f'cs in the try is {cs}')
async def f(rel, cs1, cs2):
if not cs1:
raise Jump(rel)
for c1 in [(c,) for c in sorted(cs1)]:
for c2 in [(c,) for c in sorted(cs2)]:
p = answers[(c1, c2, a)]
if rel == 0 or p in (0, rel):
await f(rel or p, cs1.difference(c1), cs2.difference(c2))
#cs
t.append(f'cs before calling f is {cs}')
await f(0, cs, cs)
except Jump:
pass
t.append(f'cs after try is {cs}')
assert t == ['Iteration a1',
'cs in the try is {1, 2}',
'cs before calling f is {1, 2}',
'cs after try is {1, 2}',
'Iteration a2',
'cs in the try is {0, 2}',
'cs before calling f is {0, 2}',
'cs after try is {0, 2}']
print('issue 1906 ok')
# issue 2158
class T:
@classmethod
async def test(cls):
a, b = await cls.does_not_exist()
async def test_issue_2158():
def not_async_function():
pass
assert_raises(TypeError, aio.run, not_async_function,
msg="object is not a coroutine")
assert_raises(TypeError, aio.run, not_async_function(),
msg="object is not a coroutine")
async def async_func1():
return 'af ok'
async def async_func2():
assert (await async_func1()) == 'af ok'
assert_raises(
TypeError, aio.run, async_func2,
msg="object is not a coroutine. Maybe you forgot to call the async function ?")
aio.run(async_func2())
assert_raises(TypeError, aio.run, T.test,
msg="object is not a coroutine. Maybe you forgot to call the async function ?")
async def test():
try:
await T.test()
raise AssertionError("should have raised AttributeError")
except AttributeError:
pass
aio.run(test())
print('test 2158 ok')
async def main(secs, urls):
print(f"wait {secs} seconds...")
await aio.sleep(secs)
for url in urls:
r = await ajax_call(url)
results.append(r)
report()
await test_async_for()
await test_async_with()
await test_lock()
await test_async_comp()
for throw, close, expected in [
[False, False, [1, 2, 3, 4, 5]],
[True, False, [1, 2, ZeroDivisionError]],
[False, True, [1, 2]],
[True, True, [1, 2, ZeroDivisionError]]]:
await test_async_gen(throw, close, expected)
await METHOD_NAME()
await test_async_future_exc()
await test_fstring_with_global()
await test_issue_1906()
await test_issue_2158()
await raise_error()
print("Start...")
aio.run(main(1, ["test_suite.py", "index.html", "unknown.txt"]),
onerror=handle)
src = """
from browser import aio
async def main():
print("before sleep")
await aio.sleep(1)
await aio.sleep(1)
print("after sleep")"""
exec(src, globals())
aio.run(main()) |
7,023 | get next | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar
import urllib.parse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from ..._serialization import Serializer
from .._vendor import _convert_request
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(**kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-11-01")) # type: Literal["2022-11-01"]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/providers/Microsoft.KubernetesConfiguration/operations")
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class Operations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.kubernetesconfiguration.v2022_11_01.SourceControlConfigurationClient`'s
:attr:`operations` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(self, **kwargs: Any) -> Iterable["_models.ResourceProviderOperation"]:
"""List all the available operations the KubernetesConfiguration resource provider supports.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ResourceProviderOperation or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.kubernetesconfiguration.v2022_11_01.models.ResourceProviderOperation]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-11-01")) # type: Literal["2022-11-01"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.ResourceProviderOperationList]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("ResourceProviderOperationList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def METHOD_NAME(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(METHOD_NAME, extract_data)
list.metadata = {"url": "/providers/Microsoft.KubernetesConfiguration/operations"} # type: ignore |
7,024 | functional impl | import torch
from torch.library import Library
from torch._ops import OpOverload
from torchgen.model import FunctionSchema, OperatorName, SchemaKind, BaseTy, BaseType
from torch._C import _ExcludeDispatchKeyGuard, DispatchKeySet, DispatchKey
from .autograd import autograd_not_implemented
import torch.utils._pytree as pytree
import weakref
def register_functional_op(
lib: Library,
new_op_name: str,
mutable_op: OpOverload,
) -> None:
"""Given a mutable operator, registers the functional variant.
This API also correctly links the functional variant with the mutable
operator for the purposes of functionalization.
All of the new registrations are performed on the ``lib`` passed in.
Arguments:
lib (Library): Should be a torch.library.Library object that has
the same namespace as ``mutable_op``'s namespace.
lib will be used to register the new functional op as well
as a functionalization kernel for the ``mutable_op``
If you don't have a library handy, use
``torch.library.Library(ns, 'FRAGMENT')`` to construct one.
new_op_name (str): The name of the functional operator (without the
namespace). If no namespace, the new functional variant will be
accessible under ``torch.ops.{lib.ns}.new_op_name``.
mutable_op (OpOverload): The mutable custom operator. Note
that you may need to add a `.default` to it, like
`torch.ops.aten.abs_.default`.
"""
validate(mutable_op)
schema = functional_schema(new_op_name, mutable_op)
lib.define(schema)
METHOD_NAME = construct_functional_impl(mutable_op)
lib.impl(new_op_name, METHOD_NAME, 'CompositeExplicitAutograd')
functional_op = getattr(getattr(torch.ops, lib.ns), new_op_name).default
# There's no easy way for us to generate the autograd kernel, so we
# use autograd_not_implemented. Also, this makes it so that the user
# is unable to register an autograd formula themselves. This shouldn't
# be a problem if the user doesn't use the functional op direclty
# in their program, but we may need to revist this in the future.
lib.impl(new_op_name, autograd_not_implemented(functional_op), 'Autograd')
f_kernel = construct_functionalization_kernel(weakref.proxy(mutable_op), functional_op)
lib.impl(mutable_op, f_kernel, 'Functionalize')
def construct_functional_impl(mutable_op):
def METHOD_NAME(*args):
# Strategy:
# - clone args that would have been mutated
# - run mutable_op
# - return the cloned args as additional outputs
new_args = []
extra_rets = []
for is_write, arg in zip(mutable_args(mutable_op), args):
if is_write:
cloned = arg.clone()
new_args.append(cloned)
extra_rets.append(cloned)
else:
new_args.append(arg)
result = mutable_op(*new_args)
if result is None:
return tuple(extra_rets)
if isinstance(result, tuple):
return (*result, *extra_rets)
return (result, *extra_rets)
return METHOD_NAME
def construct_functionalization_kernel(mutable_op, functional_op):
def kernel(*args):
# There's nothing to be functionalized!
# We can still end up here because DispatchKey::Functionalize is a mode key
if pytree.tree_all_only(torch.Tensor, lambda x: not torch._is_functional_tensor(x), args):
with _ExcludeDispatchKeyGuard(DispatchKeySet(DispatchKey.Functionalize)):
return mutable_op(*args)
# NB: This differs from the codegen -- codegen handles cases where there
# are mixed FunctionalTensorWrapper and non-FunctionalTensorWrapper.
# This only really matters for XLA (mixed CPU-XLA tensors) and
# running functionalization without the PT2 stack (which guarantees to us that
# all tensors are FunctionalTensorWrapper).
if not pytree.tree_all_only(torch.Tensor, torch._is_functional_tensor, args):
raise RuntimeError("{mutable_op}: expected all args to be FunctionalTensorWrapper")
unwrapped_args = []
for arg in args:
if isinstance(arg, torch.Tensor) and torch._is_functional_tensor(arg):
torch._sync(arg)
unwrapped = torch._from_functional_tensor(arg)
unwrapped_args.append(unwrapped)
else:
unwrapped_args.append(arg)
with _ExcludeDispatchKeyGuard(DispatchKeySet(DispatchKey.Functionalize)):
output = functional_op(*unwrapped_args)
num_actual_output = len(mutable_op._schema.returns)
actual_output = pytree.tree_map(
torch._to_functional_tensor, output[:num_actual_output])
new_values_to_propagate = output[num_actual_output:]
inputs_to_replace = [arg for is_write, arg in zip(mutable_args(mutable_op), args)
if is_write]
assert len(new_values_to_propagate) == len(inputs_to_replace)
for new_value, arg in zip(new_values_to_propagate, inputs_to_replace):
torch._C._propagate_xla_data(arg, new_value)
torch._C._replace_(arg, new_value)
torch._C._commit_update(arg)
torch._sync(arg)
if len(actual_output) == 1:
return actual_output[0]
elif len(actual_output) == 0:
return None
return actual_output
return kernel
def validate(mutable_op: OpOverload):
if not isinstance(mutable_op, OpOverload):
raise TypeError(
f"register_functional_op(mutable_op): expected mutable_op to be instance of "
f"OpOverload but got {type(mutable_op)}")
# There are generally three types of "in-place" or "mutable" ops.
# Each of them have their own conventions:
# - inplace (first input modified in-place and returned as only output)
# - out= (some args modified in-place and returned as outputs)
# - mutable (some args modified in-place but none of those returned as outputs)
# In theory we can support all three, but we'll just support the last
# option right now for simplicity.
schema = FunctionSchema.parse(str(mutable_op._schema))
if not schema.kind() == SchemaKind.mutable:
raise RuntimeError("Expected op to be mutable (as opposed to functional, inplace or out)")
for ret in schema.returns:
# construct_functionalization_kernel assumes this for simplicity
if ret.annotation is not None:
raise NotImplementedError(
"NYI: register_functional_op(op) where op returns a mutated or aliased value. "
"Please file an issue (and as a workaround, modify your operator to "
"not return the mutated value or aliases)")
for arg in schema.arguments.flat_all:
# construct_functionalization_kernel assumes this for simplicity
if arg.type.is_tensor_like() and arg.type != BaseType(BaseTy.Tensor):
raise NotImplementedError(
"NYI: register_functional_op(op) where op accepts Optional or List of tensors."
"Please file an issue.")
def functional_schema(new_op_name, op: OpOverload):
schema = FunctionSchema.parse(str(op._schema))
schema = schema.signature().with_name(OperatorName.parse(new_op_name))
return str(schema)
def mutable_args(op: OpOverload):
return tuple(False if arg.alias_info is None else arg.alias_info.is_write
for arg in op._schema.arguments) |
7,025 | run command | #!/usr/bin/env python
"""
Runs a ROSE tool. Impersonates CXX. For use by /kull/systemconf/compilers/
Linux_run_rose_gnu_4_9_3_mvapich2_2_2_compiler.py and similar.
If the tool returns status 0, logs "PASSED" to stdout and copies the command
and its arguments to kull_testing/passed.txt. Otherwise, logs "FAILED" to
stdout, copies the command and its arguments to kull_testing/failed.txt, and
runs the native compiler.
Returns status 0 whether the tool succeeds or fails.
"""
import argparse
import inspect
import os
from support.local_logging import Logger
from support.runner import Runner
class ROSECXXRunner (object):
""" Just exports run()
"""
_SEPARATOR = "================================================================================"
def __init__(self):
# Will be a Namespace (e.g. can refer to self._args_defined.command_args):
self._args_defined = None
self._args_remaining = []
self._current_dir = ""
self._failed_file = None
self._failed_file_path = ""
self._logger = Logger( os.path.basename(__file__) + '.' + type(self).__name__ )
self._parser = None
self._passed_file = None
self._passed_file_path = ""
self._primary_args = []
self._primary_command = ""
self._runner = Runner()
self._script_dir = ""
self._secondary_args = []
self._secondary_command = ""
self._define_args()
def _define_args(self):
""" This script passes all its arguments on to the called
programs, so there are no args defined.
"""
self._parser = argparse.ArgumentParser(
prog='run_rose_cxx.py',
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
# This matches the first positional and all remaining/following args.
# We want ALL the arguments, so, we are using parse_known_arguments
# below instead and commenting this out for now:
#self._parser.add_argument('command_args', nargs=argparse.REMAINDER)
def _process_args(self):
self._args_defined, self._args_remaining = self._parser.parse_known_args()
self._logger.debug("defined args\n" + str(self._args_defined))
self._logger.debug("remaining args\n" + str(self._args_remaining))
self._current_dir = os.getcwd()
#self._script_dir = os.path.dirname(os.path.abspath(__file__))
# Robustly get this script's directory, even when started by exec or execfiles:
script_rel_path = inspect.getframeinfo(inspect.currentframe()).filename
self._script_dir = os.path.dirname(os.path.abspath(script_rel_path))
# Could execute /bin/bash -c parms, but only if parms are one array element!
# self._primary_command = '/g/g17/charles/code/ROSE/rose-vanderbrugge1-rose-1398-test-2018-10-30-gcc-4.9.3/bin/identityTranslator'
self._primary_command = '/g/g17/charles/code/ROSE/rose-0.9.10.91-gcc-4.9.3/bin/identityTranslator'
self._secondary_command = '/usr/tce/packages/gcc/gcc-4.9.3/bin/g++'
# self._primary_command = self._secondary_command
self._primary_args.append(self._primary_command)
# Helpful rose parameter added by ROSE-1424:
self._primary_args.append('-rose:no_optimize_flag_for_frontend')
# # Helpful rose parameter added by ROSE-1392:
self._primary_args.append('-rose:unparse_edg_normalized_method_ROSE_1392')
self._primary_args.extend(self._args_remaining)
self._secondary_args.append(self._secondary_command)
self._secondary_args.extend(self._args_remaining)
self._passed_file_path = os.path.join (self._current_dir, "passed.txt")
self._failed_file_path = os.path.join (self._current_dir, "failed.txt")
def _log_success(self, args):
self._logger.success("\n" + self._SEPARATOR + "\nPASSED")
self._logger.debug("Will log to " + self._passed_file_path + ":")
self._logger.debug(args)
self._passed_file.write(str(args) + '\n')
def _log_failure(self, args):
self._logger.problem("\n" + self._SEPARATOR + "\nFAILED")
self._logger.debug("Will log to " + self._failed_file_path + ":")
self._logger.debug(args)
self._failed_file.write(str(args) + '\n')
def METHOD_NAME (self, args, dir):
self._logger.info("\n" + self._SEPARATOR)
self._runner.callOrLog(args, dir)
def run(self):
""" Run the primary command. If it fails, run the secondary command. If
that fails, let the exception (Runner.Failed) propagate.
"""
self._logger.set_debug_off()
self._runner.setEffortOnly(False)
self._process_args()
self._passed_file = open(self._passed_file_path, 'a')
self._failed_file = open(self._failed_file_path, 'a')
try:
self.METHOD_NAME(self._primary_args, self._current_dir)
self._log_success(self._primary_args)
except Runner.Failed, e:
self._log_failure(self._primary_args)
# 2018-10-05: Only run the tool (doing do_sbatch_in_subdir.sh):
self.METHOD_NAME(self._secondary_args, self._current_dir)
def main():
ROSECXXRunner().run()
if __name__ == '__main__':
main()
|
7,026 | tableqa tracking and print results with tableid | # Copyright (c) Alibaba, Inc. and its affiliates.
import os
import unittest
from threading import Thread
from typing import List
import json
from transformers import BertTokenizer
from modelscope.hub.snapshot_download import snapshot_download
from modelscope.models import Model
from modelscope.outputs import OutputKeys
from modelscope.pipelines import pipeline
from modelscope.pipelines.nlp import TableQuestionAnsweringPipeline
from modelscope.preprocessors import TableQuestionAnsweringPreprocessor
from modelscope.preprocessors.nlp.space_T_cn.fields.database import Database
from modelscope.utils.constant import ModelFile, Tasks
from modelscope.utils.test_utils import test_level
def tableqa_tracking_and_print_results_with_history(
pipelines: List[TableQuestionAnsweringPipeline]):
test_case = {
'utterance': [
'有哪些风险类型?',
'风险类型有多少种?',
'珠江流域的小型水库的库容总量是多少?',
'那平均值是多少?',
'那水库的名称呢?',
'换成中型的呢?',
]
}
for p in pipelines:
historical_queries = None
for question in test_case['utterance']:
output_dict = p({
'question': question,
'history_sql': historical_queries
})[OutputKeys.OUTPUT]
print('question', question)
print('sql text:', output_dict[OutputKeys.SQL_STRING])
print('sql query:', output_dict[OutputKeys.SQL_QUERY])
print('query result:', output_dict[OutputKeys.QUERY_RESULT])
print('json dumps', json.dumps(output_dict, ensure_ascii=False))
print()
historical_queries = output_dict[OutputKeys.HISTORY]
def tableqa_tracking_and_print_results_without_history(
pipelines: List[TableQuestionAnsweringPipeline]):
test_case = {
'utterance': [['列出油耗大于8但是功率低于200的名称和价格', 'car'],
['油耗低于5的suv有哪些?', 'car'], ['上个月收益超过3的有几个基金?', 'fund'],
['净值不等于1的基金平均月收益率和年收益率是多少?', 'fund'],
['计算机或者成绩优秀的同学有哪些?学号是多少?', 'student'],
['本部博士生中平均身高是多少?', 'student'],
['长江流域和珠江流域的水库库容总量是多少?', 'reservoir'],
['今天星期几?', 'reservoir']]
}
for p in pipelines:
for question, table_id in test_case['utterance']:
output_dict = p({
'question': question,
'table_id': table_id
})[OutputKeys.OUTPUT]
print('question', question)
print('sql text:', output_dict[OutputKeys.SQL_STRING])
print('sql query:', output_dict[OutputKeys.SQL_QUERY])
print('query result:', output_dict[OutputKeys.QUERY_RESULT])
print('json dumps', json.dumps(output_dict, ensure_ascii=False))
print()
def METHOD_NAME(
pipelines: List[TableQuestionAnsweringPipeline]):
test_case = {
'utterance': [
['有哪些风险类型?', 'fund', False],
['风险类型有多少种?', 'fund', True],
['珠江流域的小型水库的库容总量是多少?', 'reservoir', False],
['那平均值是多少?', 'reservoir', True],
['那水库的名称呢?', 'reservoir', True],
['换成中型的呢?', 'reservoir', True],
['近7年来车辆的销量趋势?', 'car_sales', False],
['近7年来车辆的销量月环比是多少呢?', 'car_sales', True],
],
}
for p in pipelines:
historical_queries = None
for question, table_id, use_history in test_case['utterance']:
output_dict = p({
'question':
question,
'table_id':
table_id,
'history_sql':
historical_queries if use_history else None
})[OutputKeys.OUTPUT]
print('question', question)
print('sql text:', output_dict[OutputKeys.SQL_STRING])
print('sql query:', output_dict[OutputKeys.SQL_QUERY])
print('query result:', output_dict[OutputKeys.QUERY_RESULT])
print('json dumps', json.dumps(output_dict, ensure_ascii=False))
print()
historical_queries = output_dict[OutputKeys.HISTORY]
class TableQuestionAnswering(unittest.TestCase):
def setUp(self) -> None:
self.task = Tasks.table_question_answering
self.model_id = 'damo/nlp_convai_text2sql_pretrain_cn'
@unittest.skipUnless(test_level() >= 2, 'skip test in current test level')
def test_run_by_direct_model_download(self):
cache_path = snapshot_download(self.model_id)
preprocessor = TableQuestionAnsweringPreprocessor(model_dir=cache_path)
pipelines = [
pipeline(
Tasks.table_question_answering,
model=cache_path,
preprocessor=preprocessor)
]
tableqa_tracking_and_print_results_with_history(pipelines)
@unittest.skipUnless(test_level() >= 2, 'skip test in current test level')
def test_run_by_direct_model_download_with_multithreads(self):
cache_path = snapshot_download(self.model_id)
pl = pipeline(Tasks.table_question_answering, model=cache_path)
def print_func(pl, i):
result = pl({
'question': '上个月收益从低到高排前七的基金的名称和风险等级是什么',
'table_id': 'fund',
'history_sql': None
})
print(i, result[OutputKeys.OUTPUT][OutputKeys.SQL_QUERY],
result[OutputKeys.OUTPUT][OutputKeys.QUERY_RESULT],
json.dumps(result))
procs = []
for i in range(5):
proc = Thread(target=print_func, args=(pl, i))
procs.append(proc)
proc.start()
for proc in procs:
proc.join()
@unittest.skipUnless(test_level() >= 1, 'skip test in current test level')
def test_run_with_model_from_modelhub(self):
model = Model.from_pretrained(self.model_id)
self.tokenizer = BertTokenizer(
os.path.join(model.model_dir, ModelFile.VOCAB_FILE))
db = Database(
tokenizer=self.tokenizer,
table_file_path=[
os.path.join(model.model_dir, 'databases', fname)
for fname in os.listdir(
os.path.join(model.model_dir, 'databases'))
],
syn_dict_file_path=os.path.join(model.model_dir, 'synonym.txt'),
is_use_sqlite=True)
preprocessor = TableQuestionAnsweringPreprocessor(
model_dir=model.model_dir, db=db)
pipelines = [
pipeline(
Tasks.table_question_answering,
model=model,
preprocessor=preprocessor,
db=db)
]
METHOD_NAME(pipelines)
@unittest.skipUnless(test_level() >= 2, 'skip test in current test level')
def test_run_with_model_from_modelhub_with_other_classes(self):
model = Model.from_pretrained(self.model_id)
self.tokenizer = BertTokenizer(
os.path.join(model.model_dir, ModelFile.VOCAB_FILE))
db = Database(
tokenizer=self.tokenizer,
table_file_path=[
os.path.join(model.model_dir, 'databases', fname)
for fname in os.listdir(
os.path.join(model.model_dir, 'databases'))
],
syn_dict_file_path=os.path.join(model.model_dir, 'synonym.txt'),
is_use_sqlite=True)
preprocessor = TableQuestionAnsweringPreprocessor(
model_dir=model.model_dir, db=db)
pipelines = [
pipeline(
Tasks.table_question_answering,
model=model,
preprocessor=preprocessor,
db=db)
]
tableqa_tracking_and_print_results_without_history(pipelines)
if __name__ == '__main__':
unittest.main() |
7,027 | on notify | # -*- coding: utf-8 -*-
""" *==LICENSE==*
CyanWorlds.com Engine - MMOG client, server and tools
Copyright (C) 2011 Cyan Worlds, Inc.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Additional permissions under GNU GPL version 3 section 7
If you modify this Program, or any covered work, by linking or
combining it with any of RAD Game Tools Bink SDK, Autodesk 3ds Max SDK,
NVIDIA PhysX SDK, Microsoft DirectX SDK, OpenSSL library, Independent
JPEG Group JPEG library, Microsoft Windows Media SDK, or Apple QuickTime SDK
(or a modified version of those libraries),
containing parts covered by the terms of the Bink SDK EULA, 3ds Max EULA,
PhysX SDK EULA, DirectX SDK EULA, OpenSSL and SSLeay licenses, IJG
JPEG Library README, Windows Media SDK EULA, or QuickTime SDK EULA, the
licensors of this Program grant you additional
permission to convey the resulting work. Corresponding Source for a
non-source form of such a combination shall include the source code for
the parts of OpenSSL and IJG JPEG Library used as well as that of the covered
work.
You can contact Cyan Worlds, Inc. by email legal@cyan.com
or by snail mail at:
Cyan Worlds, Inc.
14617 N Newport Hwy
Mead, WA 99021
*==LICENSE==* """
"""
Module: clftYeeshaPage08
Age: Cleft
Date: May 2003
Author: Adam Van Ornum
Manages and records the finding of Yeesha Pages
!!! NOTE: This file only applies to the cleft but is only used in the global xYeeshaPagesGUI.max file. !!!
"""
from Plasma import *
from PlasmaTypes import *
from PlasmaKITypes import *
from PlasmaVaultConstants import *
from PlasmaNetConstants import *
# define the attributes that will be entered in max
actClickableBook = ptAttribNamedActivator(1,"Act: Clickable Yeesha Page")
GUIDialogObject = ptAttribSceneobject(2, "GUIDialog scene object")
RespOpen = ptAttribResponder(3, "Open Responder")
RespLoop = ptAttribResponder(4, "Loop Responder")
RespClose = ptAttribResponder(5, "Close Responder")
RespGlow = ptAttribResponder(6, "Glow Responder",['GlowSound','PageTurn'])
#Linking Books GUI tags
DialogName="YeeshaPageGUI"
kPageButton = 100
kYeeshaPage = list(range(200, 298, 1))
kYeeshaPageCancel = 299
isOpen = 0
class clftYeeshaPage08(ptModifier):
"The Yeesha Page 08 cleft imager python code"
def __init__(self):
ptModifier.__init__(self)
self.id = 5312
self.version = 1
PtDebugPrint("__init__clftYeeshaPage08 v.", self.version)
def OnFirstUpdate(self):
PtLoadDialog(DialogName, self.key)
pass
def __del__(self):
"destructor - get rid of any dialogs that we might have loaded"
#~ PtUnloadDialog(DialogName)
def METHOD_NAME(self,state,id,events):
global LocalAvatar
global isOpen
if id == actClickableBook.id and state and PtWasLocallyNotified(self.key):
PtLoadDialog(DialogName,self.key)
self.SetStdGUIVisibility(0)
PtShowDialog(DialogName)
RespOpen.run(self.key)
isOpen = 1
elif id == actClickableBook.id and not state and PtWasLocallyNotified(self.key):
if not isOpen:
self.SetStdGUIVisibility(0)
PtShowDialog(DialogName)
RespOpen.run(self.key)
isOpen = 1
elif id == RespOpen.id:
RespLoop.run(self.key)
def OnGUINotify(self,id,control,event):
global isOpen
btnID = 0
if isinstance(control,ptGUIControlButton):
btnID = control.getTagID()
if event == kAction and btnID == kYeeshaPage[8]:
PtDebugPrint("DEBUG: clftYeeshaPage08.OnGUINotify():\tPicked up page")
RespClose.run(self.key)
isOpen = 0
PtHideDialog(DialogName)
self.SetStdGUIVisibility(1)
RespGlow.run(self.key, state='GlowSound')
if self.GotPage():
PtDebugPrint ("DEBUG: clftYeeshaPage08.py: You've already found Yeesha Page #8. Move along. Move along.")
return
else:
PtDebugPrint ("DEBUG: clftYeeshaPage08.py: Yeesha Page #8 is new to you.")
PtDebugPrint ("DEBUG: clftYeeshaPage08.py: Trying to update the value of the SDL variable %s to 4" % ("YeeshaPage8"))
vault = ptVault()
psnlSDL = vault.getPsnlAgeSDL()
if psnlSDL:
YeeshaPageVar = psnlSDL.findVar("YeeshaPage8")
YeeshaPageVar.setInt(4)
vault.updatePsnlAgeSDL(psnlSDL)
mydialog = PtGetDialogFromString(DialogName)
PtSendKIMessageInt(kStartBookAlert,0)
elif event == kAction and btnID == kYeeshaPageCancel:
RespClose.run(self.key)
isOpen = 0
PtHideDialog(DialogName)
self.SetStdGUIVisibility(1)
def GotPage(self):
vault = ptVault()
psnlSDL = vault.getPsnlAgeSDL()
if psnlSDL:
YeeshaPageVar = psnlSDL.findVar("YeeshaPage8")
PtDebugPrint ("DEBUG: clftYeeshaPage08.py: The previous value of the SDL variable %s is %s" % ("YeeshaPage8", YeeshaPageVar.getInt()))
if YeeshaPageVar.getInt() != 0:
PtDebugPrint ("DEBUG: clftYeeshaPage08.py: You've already found Yeesha Page #8. Move along. Move along.")
return 1
else:
return 0
else:
PtDebugPrint("ERROR: clftYeeshaPage08: Error trying to access the Chronicle psnlSDL. psnlSDL = %s" % ( psnlSDL))
return 0
def SetStdGUIVisibility(self, visible):
global DialogName
if visible:
GUIDialogObject.value.draw.enable()
else:
mydialog = PtGetDialogFromString(DialogName)
for x in kYeeshaPage:
try:
ctrl = mydialog.getControlModFromTag(x)
except KeyError:
continue
else:
ctrl.hide()
mydialog.getControlModFromTag(kYeeshaPage[8]).show()
GUIDialogObject.value.draw.disable()
|
7,028 | compute metrics | """
This module computes evaluation metrics for MSMARCO dataset on the ranking task.
Command line:
python msmarco_eval_ranking.py <path_to_reference_file> <path_to_candidate_file>
Creation Date : 06/12/2018
Last Modified : 1/21/2019
Authors : Daniel Campos <dacamp@microsoft.com>, Rutger van Haasteren <ruvanh@microsoft.com>
"""
import sys
import statistics
from collections import Counter
MaxMRRRank = 10
def load_reference_from_stream(f):
"""Load Reference reference relevant passages
Args:f (stream): stream to load.
Returns:qids_to_relevant_passageids (dict): dictionary mapping from query_id (int) to relevant passages (list of ints).
"""
qids_to_relevant_passageids = {}
for l in f:
try:
l = l.strip().split('\t')
qid = int(l[0])
if qid in qids_to_relevant_passageids:
pass
else:
qids_to_relevant_passageids[qid] = []
qids_to_relevant_passageids[qid].append(int(l[2]))
except:
raise IOError('\"%s\" is not valid format' % l)
return qids_to_relevant_passageids
def load_reference(path_to_reference):
"""Load Reference reference relevant passages
Args:path_to_reference (str): path to a file to load.
Returns:qids_to_relevant_passageids (dict): dictionary mapping from query_id (int) to relevant passages (list of ints).
"""
with open(path_to_reference,'r') as f:
qids_to_relevant_passageids = load_reference_from_stream(f)
return qids_to_relevant_passageids
def load_candidate_from_stream(f):
"""Load candidate data from a stream.
Args:f (stream): stream to load.
Returns:qid_to_ranked_candidate_passages (dict): dictionary mapping from query_id (int) to a list of 1000 passage ids(int) ranked by relevance and importance
"""
qid_to_ranked_candidate_passages = {}
for l in f:
try:
l = l.strip().split('\t')
qid = int(l[0])
pid = int(l[1])
rank = int(l[2])
if qid in qid_to_ranked_candidate_passages:
pass
else:
# By default, all PIDs in the list of 1000 are 0. Only override those that are given
tmp = [0] * 1000
qid_to_ranked_candidate_passages[qid] = tmp
qid_to_ranked_candidate_passages[qid][rank-1]=pid
except:
raise IOError('\"%s\" is not valid format' % l)
return qid_to_ranked_candidate_passages
def load_candidate(path_to_candidate):
"""Load candidate data from a file.
Args:path_to_candidate (str): path to file to load.
Returns:qid_to_ranked_candidate_passages (dict): dictionary mapping from query_id (int) to a list of 1000 passage ids(int) ranked by relevance and importance
"""
with open(path_to_candidate,'r') as f:
qid_to_ranked_candidate_passages = load_candidate_from_stream(f)
return qid_to_ranked_candidate_passages
def quality_checks_qids(qids_to_relevant_passageids, qids_to_ranked_candidate_passages):
"""Perform quality checks on the dictionaries
Args:
p_qids_to_relevant_passageids (dict): dictionary of query-passage mapping
Dict as read in with load_reference or load_reference_from_stream
p_qids_to_ranked_candidate_passages (dict): dictionary of query-passage candidates
Returns:
bool,str: Boolean whether allowed, message to be shown in case of a problem
"""
message = ''
allowed = True
# Create sets of the QIDs for the submitted and reference queries
candidate_set = set(qids_to_ranked_candidate_passages.keys())
ref_set = set(qids_to_relevant_passageids.keys())
# Check that we do not have multiple passages per query
for qid in qids_to_ranked_candidate_passages:
# Remove all zeros from the candidates
duplicate_pids = set([item for item, count in Counter(qids_to_ranked_candidate_passages[qid]).items() if count > 1])
if len(duplicate_pids-set([0])) > 0:
message = "Cannot rank a passage multiple times for a single query. QID={qid}, PID={pid}".format(
qid=qid, pid=list(duplicate_pids)[0])
allowed = False
return allowed, message
def METHOD_NAME(qids_to_relevant_passageids, qids_to_ranked_candidate_passages, per_query_score=False):
"""Compute MRR metric
Args:
p_qids_to_relevant_passageids (dict): dictionary of query-passage mapping
Dict as read in with load_reference or load_reference_from_stream
p_qids_to_ranked_candidate_passages (dict): dictionary of query-passage candidates
Returns:
dict: dictionary of metrics {'MRR': <MRR Score>}
"""
all_scores = {}
MRR = 0
qids_with_relevant_passages = 0
ranking = []
qid2score = {}
for qid in qids_to_ranked_candidate_passages:
MRR_i = 0.0
if qid in qids_to_relevant_passageids:
ranking.append(0)
target_pid = qids_to_relevant_passageids[qid]
candidate_pid = qids_to_ranked_candidate_passages[qid]
for i in range(0,MaxMRRRank):
if candidate_pid[i] in target_pid:
MRR += 1/(i + 1)
MRR_i += 1/(i + 1)
ranking.pop()
ranking.append(i+1)
break
qid2score[qid] = MRR_i
if len(ranking) == 0:
raise IOError("No matching QIDs found. Are you sure you are scoring the evaluation set?")
MRR = MRR/len(qids_to_relevant_passageids)
all_scores['MRR @10'] = MRR
all_scores['QueriesRanked'] = len(qids_to_ranked_candidate_passages)
if per_query_score:
per_query_scores = {"MRR@10": qid2score}
return all_scores, per_query_scores
return all_scores
def compute_metrics_from_files(path_to_reference, path_to_candidate, perform_checks=True, per_query_score=False):
"""Compute MRR metric
Args:
p_path_to_reference_file (str): path to reference file.
Reference file should contain lines in the following format:
QUERYID\tPASSAGEID
Where PASSAGEID is a relevant passage for a query. Note QUERYID can repeat on different lines with different PASSAGEIDs
p_path_to_candidate_file (str): path to candidate file.
Candidate file sould contain lines in the following format:
QUERYID\tPASSAGEID1\tRank
If a user wishes to use the TREC format please run the script with a -t flag at the end. If this flag is used the expected format is
QUERYID\tITER\tDOCNO\tRANK\tSIM\tRUNID
Where the values are separated by tabs and ranked in order of relevance
Returns:
dict: dictionary of metrics {'MRR': <MRR Score>}
"""
qids_to_relevant_passageids = load_reference(path_to_reference)
qids_to_ranked_candidate_passages = load_candidate(path_to_candidate)
if perform_checks:
allowed, message = quality_checks_qids(qids_to_relevant_passageids, qids_to_ranked_candidate_passages)
if message != '': print(message)
return METHOD_NAME(qids_to_relevant_passageids, qids_to_ranked_candidate_passages, per_query_score=per_query_score)
def main():
"""Command line:
python msmarco_eval_ranking.py <path_to_reference_file> <path_to_candidate_file>
"""
if len(sys.argv) == 3:
path_to_reference = sys.argv[1]
path_to_candidate = sys.argv[2]
metrics = compute_metrics_from_files(path_to_reference, path_to_candidate)
print('#####################')
for metric in sorted(metrics):
print('{}: {}'.format(metric, metrics[metric]))
print('#####################')
else:
print('Usage: msmarco_eval_ranking.py <reference ranking> <candidate ranking>')
exit()
if __name__ == '__main__':
main() |
7,029 | get view output | #!/usr/bin/env python
import os
import sys
import argcomplete
from tron.commands import cmd_utils
from tron.commands import display
from tron.commands.client import Client
from tron.commands.client import get_object_type_from_identifier
from tron.commands.client import RequestError
from tron.commands.client import TronObjectType
from tron.commands.cmd_utils import ExitCode
from tron.commands.cmd_utils import suggest_possibilities
from tron.commands.cmd_utils import tron_jobs_completer
def parse_cli():
parser = cmd_utils.build_option_parser()
parser.add_argument(
"--numshown", "-n", type=int, dest="num_displays", help="Max number of jobs/job-runs shown", default=10,
)
parser.add_argument(
"--color", "-c", action="store_true", dest="display_color", help="Display in color", default=None,
)
parser.add_argument(
"--nocolor", action="store_false", dest="display_color", help="Display without color", default=None,
)
parser.add_argument(
"--stdout", "-o", action="count", dest="stdout", help="Solely displays stdout", default=0,
)
parser.add_argument(
"--stderr", "-e", action="count", dest="stderr", help="Solely displays stderr", default=0,
)
parser.add_argument(
"--events", "-E", action="store_true", dest="events", help="Display stored events", default=0,
)
parser.add_argument(
"name", nargs="?", help="job name | job run id | action id",
).completer = cmd_utils.tron_jobs_completer
argcomplete.autocomplete(parser)
args = parser.parse_args()
return args
def console_height():
if not sys.stdout.isatty():
return 40
return int(os.popen("stty size", "r").read().split()[0])
def view_all(args, client):
"""Retrieve jobs and display them."""
return display.DisplayJobs().format(
client.jobs(
include_job_runs=False, include_action_runs=False, include_action_graph=False, include_node_pool=False,
),
)
def view_job(args, job_id, client):
"""Retrieve details of the specified job and display"""
job_content = client.job(job_id.url, count=args.num_displays)
return display.format_job_details(job_content)
def view_job_run(args, job_run_id, client):
actions = client.job_runs(job_run_id.url)
display_action = display.DisplayActionRuns()
return display_action.format(actions)
def view_action_run(args, act_run_id, client):
content = client.action_runs(act_run_id.url, num_lines=args.num_displays,)
return display.format_action_run_details(content)
obj_type_to_view_map = {
TronObjectType.job: view_job,
TronObjectType.job_run: view_job_run,
TronObjectType.action_run: view_action_run,
}
def METHOD_NAME(name, args, client):
url_index = client.index()
try:
tron_id = get_object_type_from_identifier(url_index, name)
except ValueError as e:
possibilities = list(tron_jobs_completer(prefix="", client=client))
suggestions = suggest_possibilities(word=name, possibilities=possibilities,)
raise SystemExit(f"Error: {e}{suggestions}")
if tron_id.type not in obj_type_to_view_map:
return
try:
return obj_type_to_view_map[tron_id.type](args, tron_id, client)
except RequestError as e:
raise SystemExit(f"Error: {e}")
def main():
"""run tronview"""
args = parse_cli()
cmd_utils.setup_logging(args)
cmd_utils.load_config(args)
display.Color.toggle(args.display_color)
client = Client(args.server)
try:
if args.events:
response = client.request("/api/events")
error = response.get("error")
if not error:
for evt in response.get("response", ["* no recorded events *"]):
print(evt)
sys.exit(ExitCode.success)
if not args.name:
output = view_all(args, client)
else:
output = METHOD_NAME(args.name, args, client)
if not output:
print("Unrecognized identifier: %s" % args.name, file=sys.stderr)
sys.exit(ExitCode.fail)
if sys.stdout.isatty() and len(output.split("\n")) > console_height():
display.view_with_less(output, args.display_color)
else:
print(output)
except RequestError as err:
print(
f"Error connecting to the tron server ({args.server}): {err}", file=sys.stderr,
)
sys.exit(ExitCode.fail)
if __name__ == "__main__":
main() |
7,030 | test semi circle | # bluemira is an integrated inter-disciplinary design tool for future fusion
# reactors. It incorporates several modules, some of which rely on other
# codes, to carry out a range of typical conceptual fusion reactor design
# activities.
#
# Copyright (C) 2021-2023 M. Coleman, J. Cook, F. Franza, I.A. Maione, S. McIntosh,
# J. Morris, D. Short
#
# bluemira is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# bluemira is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with bluemira; if not, see <https://www.gnu.org/licenses/>.
import numpy as np
import pytest
from bluemira.codes.error import FreeCADError
from bluemira.equilibria.shapes import JohnerLCFS
from bluemira.geometry.face import BluemiraFace
from bluemira.geometry.parameterisations import PrincetonD, TripleArc
from bluemira.geometry.tools import (
make_circle,
make_polygon,
offset_wire,
revolve_shape,
sweep_shape,
)
class TestSweep:
def test_straight(self):
path = make_polygon([[0, 0, 0], [0, 0, 1]])
profile = make_polygon(
[[-1, -1, 0], [1, -1, 0], [1, 1, 0], [-1, 1, 0]], closed=True
)
sweep = sweep_shape(profile, path, solid=True)
assert np.isclose(sweep.volume, 4.0)
def test_semicircle(self):
path = make_circle(start_angle=90, end_angle=-90)
profile = make_polygon(
[[0.5, 0, -0.5], [1.5, 0, -0.5], [1.5, 0, 0.5], [0.5, 0, 0.5]], closed=True
)
sweep = sweep_shape(profile, path, solid=True)
assert sweep.is_valid()
assert np.isclose(sweep.volume, np.pi)
def test_circle(self):
path = make_circle(start_angle=0, end_angle=360)
profile = make_polygon(
[[0.5, 0, -0.5], [1.5, 0, -0.5], [1.5, 0, 0.5], [0.5, 0, 0.5]], closed=True
)
sweep = sweep_shape(profile, path, solid=True)
assert sweep.is_valid()
assert np.isclose(sweep.volume, 2 * np.pi)
def test_multiple_profiles(self):
path = make_polygon([[0, 0, 0], [0, 0, 10]])
profile_1 = make_polygon(
[[-1, -1, 0], [1, -1, 0], [1, 1, 0], [-1, 1, 0]], closed=True
)
profile_2 = make_circle(
axis=[0, 0, 1], center=[0, 0, 5], radius=1, start_angle=0, end_angle=360
)
profile_3 = make_circle(
axis=[0, 0, 1], center=[0, 0, 10], radius=2, start_angle=0, end_angle=360
)
sweep = sweep_shape([profile_1, profile_2, profile_3], path)
assert sweep.is_valid()
def test_princeton_d(self):
x2 = 14
dx = 0.5
dy = 1
pd = PrincetonD({"x2": {"value": x2}})
path = pd.create_shape()
profile = make_polygon(
[[x2 - dx, -dy, 0], [x2 + dx, -dy, 0], [x2 + dx, dy, 0], [x2 - dx, dy, 0]],
closed=True,
)
sweep = sweep_shape(profile, path)
assert sweep.is_valid()
assert np.isclose(sweep.volume, 84.1923, rtol=1e-4)
def test_triple_arc(self):
x1 = 4
dx = 0.5
dy = 1
path = TripleArc().create_shape({"x1": {"value": x1}})
profile = make_polygon(
[[x1 - dx, -dy, 0], [x1 + dx, -dy, 0], [x1 + dx, dy, 0], [x1 - dx, dy, 0]],
closed=True,
)
sweep = sweep_shape(profile, path)
assert sweep.is_valid()
assert np.isclose(sweep.volume, 95.61485, rtol=1e-4)
def test_bad_profiles(self):
path = make_polygon([[0, 0, 0], [0, 0, 10]])
profile_1 = make_polygon(
[[-1, -1, 0], [1, -1, 0], [1, 1, 0], [-1, 1, 0]], closed=True
)
profile_2 = make_polygon(
[[-1, -1, 10], [1, -1, 10], [1, 1, 10], [-1, 1, 10]], closed=False
)
with pytest.raises(FreeCADError):
sweep = sweep_shape([profile_1, profile_2], path)
def test_bad_path(self):
path = make_polygon([[0, 0, 0], [0, 0, 10], [10, 0, 10]])
profile = make_circle(
axis=[0, 0, 1], center=[0, 0, 0], start_angle=0, end_angle=360
)
with pytest.raises(FreeCADError):
sweep = sweep_shape(profile, path)
def test_open_shell(self):
path = make_polygon([[0, 0, 0], [0, 0, 10]])
profile = make_polygon([[1, 0, 0], [1, 1, 0], [2, 1, 0]])
sweep = sweep_shape(profile, path, solid=False)
assert sweep.is_valid()
class TestRevolve:
def METHOD_NAME(self):
wire = make_polygon(
[[0.5, 0, -0.5], [1.5, 0, -0.5], [1.5, 0, 0.5], [0.5, 0, 0.5]], closed=True
)
shape = revolve_shape(wire, degree=180)
assert np.isclose(shape.area, 4 * np.pi)
assert shape.is_valid()
face = BluemiraFace(wire)
shape = revolve_shape(face, degree=180)
assert np.isclose(shape.volume, np.pi)
assert shape.is_valid()
def test_circle(self):
wire = make_polygon(
[[0.5, 0, -0.5], [1.5, 0, -0.5], [1.5, 0, 0.5], [0.5, 0, 0.5]], closed=True
)
shape = revolve_shape(wire, degree=360)
assert np.isclose(shape.area, 8 * np.pi)
assert shape.is_valid()
face = BluemiraFace(wire)
shape = revolve_shape(face, degree=360)
assert np.isclose(shape.volume, 2 * np.pi)
assert shape.is_valid()
def test_johner_semi(self):
wire = JohnerLCFS().create_shape()
face = BluemiraFace(wire)
shape = revolve_shape(wire, degree=180)
assert shape.is_valid()
true_volume = np.pi * face.center_of_mass[0] * face.area
shape = revolve_shape(face, degree=180)
assert shape.is_valid()
assert np.isclose(shape.volume, true_volume)
def test_johner_full(self):
wire = JohnerLCFS().create_shape()
face = BluemiraFace(wire)
shape = revolve_shape(wire, degree=360)
assert shape.is_valid()
true_volume = 2 * np.pi * face.center_of_mass[0] * face.area
shape = revolve_shape(face, degree=360)
assert shape.is_valid()
assert np.isclose(shape.volume, true_volume)
def test_revolve_hollow(self):
x_c = 10
d_xc = 1.0
d_zc = 1.0
inner = make_polygon(
[
[x_c - d_xc, 0, -d_zc],
[x_c + d_xc, 0, -d_zc],
[x_c + d_xc, 0, d_zc],
[x_c - d_xc, 0, d_zc],
],
closed=True,
)
outer = offset_wire(inner, 1.0, join="intersect")
face = BluemiraFace([outer, inner])
solid = revolve_shape(face, degree=360)
true_volume = 2 * np.pi * x_c * (4**2 - 2**2)
assert solid.is_valid()
assert np.isclose(solid.volume, true_volume) |
7,031 | requested attribute type from string | #!/usr/bin/env python
#
# Generated Tue Jul 18 14:58:29 2017 by parse_xsd.py version 0.5.
#
import saml2
from saml2 import SamlBase
from saml2 import saml
NAMESPACE = "http://eidas.europa.eu/saml-extensions"
class RequestedAttributeType_(SamlBase):
"""The http://eidas.europa.eu/saml-extensions:RequestedAttributeType element"""
c_tag = "RequestedAttributeType"
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children["{urn:oasis:names:tc:SAML:2.0:assertion}AttributeValue"] = ("attribute_value", [saml.AttributeValue])
c_cardinality["attribute_value"] = {"min": 0}
c_attributes["Name"] = ("name", "None", True)
c_attributes["NameFormat"] = ("name_format", "None", True)
c_attributes["FriendlyName"] = ("friendly_name", "None", False)
c_attributes["isRequired"] = ("is_required", "None", False)
c_child_order.extend(["attribute_value"])
def __init__(
self,
attribute_value=None,
name=None,
name_format=None,
friendly_name=None,
is_required=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(
self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.attribute_value = attribute_value or []
self.name = name
self.name_format = name_format
self.friendly_name = friendly_name
self.is_required = is_required
def METHOD_NAME(xml_string):
return saml2.create_class_from_xml_string(RequestedAttributeType_, xml_string)
class RequestedAttribute(RequestedAttributeType_):
"""The http://eidas.europa.eu/saml-extensions:RequestedAttribute element"""
c_tag = "RequestedAttribute"
c_namespace = NAMESPACE
c_children = RequestedAttributeType_.c_children.copy()
c_attributes = RequestedAttributeType_.c_attributes.copy()
c_child_order = RequestedAttributeType_.c_child_order[:]
c_cardinality = RequestedAttributeType_.c_cardinality.copy()
def requested_attribute_from_string(xml_string):
return saml2.create_class_from_xml_string(RequestedAttribute, xml_string)
class RequestedAttributesType_(SamlBase):
"""The http://eidas.europa.eu/saml-extensions:RequestedAttributesType element"""
c_tag = "RequestedAttributesType"
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children["{http://eidas.europa.eu/saml-extensions}RequestedAttribute"] = (
"requested_attribute",
[RequestedAttribute],
)
c_cardinality["requested_attribute"] = {"min": 0}
c_child_order.extend(["requested_attribute"])
def __init__(
self,
requested_attribute=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(
self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.requested_attribute = requested_attribute or []
def requested_attributes_type__from_string(xml_string):
return saml2.create_class_from_xml_string(RequestedAttributesType_, xml_string)
class RequestedAttributes(RequestedAttributesType_):
"""The http://eidas.europa.eu/saml-extensions:RequestedAttributes element"""
c_tag = "RequestedAttributes"
c_namespace = NAMESPACE
c_children = RequestedAttributesType_.c_children.copy()
c_attributes = RequestedAttributesType_.c_attributes.copy()
c_child_order = RequestedAttributesType_.c_child_order[:]
c_cardinality = RequestedAttributesType_.c_cardinality.copy()
def requested_attributes_from_string(xml_string):
return saml2.create_class_from_xml_string(RequestedAttributes, xml_string)
ELEMENT_FROM_STRING = {
RequestedAttributes.c_tag: requested_attributes_from_string,
RequestedAttributesType_.c_tag: requested_attributes_type__from_string,
RequestedAttribute.c_tag: requested_attribute_from_string,
RequestedAttributeType_.c_tag: METHOD_NAME,
}
ELEMENT_BY_TAG = {
"RequestedAttributes": RequestedAttributes,
"RequestedAttributesType": RequestedAttributesType_,
"RequestedAttribute": RequestedAttribute,
"RequestedAttributeType": RequestedAttributeType_,
}
def factory(tag, **kwargs):
return ELEMENT_BY_TAG[tag](**kwargs) |
7,032 | default output file | """Class for output file configuration"""
import logging
from DDSim.Helper.ConfigHelper import ConfigHelper
logger = logging.getLogger(__name__)
#: True if DD4hep was built with LCIO
DD4HEP_USE_LCIO = "@DD4HEP_USE_LCIO@" != "OFF"
#: True if DD4hep was built with EDM4hep
DD4HEP_USE_EDM4HEP = "@DD4HEP_USE_EDM4HEP@" != "OFF"
def METHOD_NAME():
if DD4HEP_USE_LCIO:
return "dummyOutput.slcio"
return "dummyOutput.root"
class OutputConfig(ConfigHelper):
"""Configuration for Output Files."""
def __init__(self):
super(OutputConfig, self).__init__()
self._userPlugin = None
self._forceLCIO = False
self._forceEDM4HEP = False
self._forceDD4HEP = False
# no closeProperties, allow custom ones for userPlugin configuration
def _checkConsistency(self):
"""Raise error if more than one force flag is true."""
if self._forceLCIO + self._forceEDM4HEP + self._forceDD4HEP > 1:
raise RuntimeError(f"OutputConfig error: More than one force flag enabled: LCIO({self._forceLCIO}),"
f" EDM4HEP({self._forceEDM4HEP}), DD4HEP({self._forceDD4HEP})")
@property
def forceLCIO(self):
"""Use the LCIO output plugin regardless of outputfilename."""
return self._forceLCIO
@forceLCIO.setter
def forceLCIO(self, val):
self._forceLCIO = self.makeBool(val)
if self._forceLCIO:
if not DD4HEP_USE_LCIO:
raise RuntimeError("OutputConfig error: forceLCIO requested, but LCIO not available!")
self._checkConsistency()
@property
def forceEDM4HEP(self):
"""Use the EDM4HEP output plugin regardless of outputfilename."""
return self._forceEDM4HEP
@forceEDM4HEP.setter
def forceEDM4HEP(self, val):
self._forceEDM4HEP = self.makeBool(val)
if self._forceEDM4HEP:
if not DD4HEP_USE_EDM4HEP:
raise RuntimeError("OutputConfig error: forceEDM4HEP requested, but EDM4HEP not available!")
self._checkConsistency()
@property
def forceDD4HEP(self):
"""Use the DD4HEP output plugin regardless of outputfilename."""
return self._forceDD4HEP
@forceDD4HEP.setter
def forceDD4HEP(self, val):
self._forceDD4HEP = self.makeBool(val)
if self._forceDD4HEP:
self._checkConsistency()
@property
def userOutputPlugin(self):
"""Set a function to configure the outputFile.
The function must take a ``DD4hepSimulation`` object as its only argument and return ``None``.
For example one can add this to the ddsim steering file:
def exampleUserPlugin(dd4hepSimulation):
'''Example code for user created plugin.
:param DD4hepSimulation dd4hepSimulation: The DD4hepSimulation instance, so all parameters can be accessed
:return: None
'''
from DDG4 import EventAction, Kernel
dd = dd4hepSimulation # just shorter variable name
evt_root = EventAction(Kernel(), 'Geant4Output2ROOT/' + dd.outputFile, True)
evt_root.HandleMCTruth = True or False
evt_root.Control = True
output = dd.outputFile
if not dd.outputFile.endswith(dd.outputConfig.myExtension):
output = dd.outputFile + dd.outputConfig.myExtension
evt_root.Output = output
evt_root.enableUI()
Kernel().eventAction().add(evt_root)
return None
SIM.outputConfig.userOutputPlugin = exampleUserPlugin
# arbitrary options can be created and set via the steering file or command line
SIM.outputConfig.myExtension = '.csv'
"""
return self._userPlugin
@userOutputPlugin.setter
def userOutputPlugin(self, userOutputPluginConfig):
if userOutputPluginConfig is None:
return
if not callable(userOutputPluginConfig):
raise RuntimeError("The provided userPlugin is not a callable function.")
self._userPlugin = userOutputPluginConfig
def initialize(self, dd4hepsimulation, geant4):
"""Configure the output file and plugin."""
if callable(self._userPlugin):
logger.info("++++ Setting up UserPlugin for Output ++++")
return self._userPlugin(dd4hepsimulation)
if self.forceLCIO:
return self._configureLCIO(dd4hepsimulation, geant4)
if self.forceEDM4HEP:
return self._configureEDM4HEP(dd4hepsimulation, geant4)
if self.forceDD4HEP:
return self._configureDD4HEP(dd4hepsimulation, geant4)
if dd4hepsimulation.outputFile.endswith(".slcio"):
return self._configureLCIO(dd4hepsimulation, geant4)
if dd4hepsimulation.outputFile.endswith(".root") and DD4HEP_USE_EDM4HEP:
return self._configureEDM4HEP(dd4hepsimulation, geant4)
if dd4hepsimulation.outputFile.endswith(".root"):
return self._configureDD4HEP(dd4hepsimulation, geant4)
def _configureLCIO(self, dds, geant4):
if not DD4HEP_USE_LCIO:
raise RuntimeError("DD4HEP was not build wiht LCIO support: please change output format %s" % dds.outputFile)
logger.info("++++ Setting up LCIO Output ++++")
lcOut = geant4.setupLCIOOutput('LcioOutput', dds.outputFile)
lcOut.RunHeader = dds.meta.addParametersToRunHeader(dds)
eventPars = dds.meta.parseEventParameters()
lcOut.EventParametersString, lcOut.EventParametersInt, lcOut.EventParametersFloat = eventPars
lcOut.RunNumberOffset = dds.meta.runNumberOffset if dds.meta.runNumberOffset > 0 else 0
lcOut.EventNumberOffset = dds.meta.eventNumberOffset if dds.meta.eventNumberOffset > 0 else 0
return
def _configureEDM4HEP(self, dds, geant4):
logger.info("++++ Setting up EDM4hep ROOT Output ++++")
e4Out = geant4.setupEDM4hepOutput('EDM4hepOutput', dds.outputFile)
eventPars = dds.meta.parseEventParameters()
e4Out.RunHeader = dds.meta.addParametersToRunHeader(dds)
e4Out.EventParametersString, e4Out.EventParametersInt, e4Out.EventParametersFloat = eventPars
e4Out.RunNumberOffset = dds.meta.runNumberOffset if dds.meta.runNumberOffset > 0 else 0
e4Out.EventNumberOffset = dds.meta.eventNumberOffset if dds.meta.eventNumberOffset > 0 else 0
return
def _configureDD4HEP(self, dds, geant4):
logger.info("++++ Setting up DD4hep's ROOT Output ++++")
geant4.setupROOTOutput('RootOutput', dds.outputFile)
return |
7,033 | test morphsnakes simple shape chan vese | import numpy as np
import pytest
from numpy.testing import assert_array_equal
from skimage.segmentation import (disk_level_set,
inverse_gaussian_gradient,
morphological_chan_vese,
morphological_geodesic_active_contour)
def gaussian_blob():
coords = np.mgrid[-5:6, -5:6]
sqrdistances = (coords ** 2).sum(0)
return np.exp(-sqrdistances / 10)
def test_morphsnakes_incorrect_image_shape():
img = np.zeros((10, 10, 3))
ls = np.zeros((10, 9))
with pytest.raises(ValueError):
morphological_chan_vese(img, num_iter=1, init_level_set=ls)
with pytest.raises(ValueError):
morphological_geodesic_active_contour(img, num_iter=1,
init_level_set=ls)
def test_morphsnakes_incorrect_ndim():
img = np.zeros((4, 4, 4, 4))
ls = np.zeros((4, 4, 4, 4))
with pytest.raises(ValueError):
morphological_chan_vese(img, num_iter=1, init_level_set=ls)
with pytest.raises(ValueError):
morphological_geodesic_active_contour(img, num_iter=1,
init_level_set=ls)
def test_morphsnakes_black():
img = np.zeros((11, 11))
ls = disk_level_set(img.shape, center=(5, 5), radius=3)
ref_zeros = np.zeros(img.shape, dtype=np.int8)
ref_ones = np.ones(img.shape, dtype=np.int8)
acwe_ls = morphological_chan_vese(img, num_iter=6, init_level_set=ls)
assert_array_equal(acwe_ls, ref_zeros)
gac_ls = morphological_geodesic_active_contour(img, num_iter=6,
init_level_set=ls)
assert_array_equal(gac_ls, ref_zeros)
gac_ls2 = morphological_geodesic_active_contour(img, num_iter=6,
init_level_set=ls,
balloon=1, threshold=-1,
smoothing=0)
assert_array_equal(gac_ls2, ref_ones)
assert acwe_ls.dtype == gac_ls.dtype == gac_ls2.dtype == np.int8
def METHOD_NAME():
img = gaussian_blob()
ls1 = disk_level_set(img.shape, center=(5, 5), radius=3)
ls2 = disk_level_set(img.shape, center=(5, 5), radius=6)
acwe_ls1 = morphological_chan_vese(img, num_iter=10, init_level_set=ls1)
acwe_ls2 = morphological_chan_vese(img, num_iter=10, init_level_set=ls2)
assert_array_equal(acwe_ls1, acwe_ls2)
assert acwe_ls1.dtype == acwe_ls2.dtype == np.int8
def test_morphsnakes_simple_shape_geodesic_active_contour():
img = (disk_level_set((11, 11), center=(5, 5), radius=3.5)).astype(float)
gimg = inverse_gaussian_gradient(img, alpha=10.0, sigma=1.0)
ls = disk_level_set(img.shape, center=(5, 5), radius=6)
ref = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
dtype=np.int8)
gac_ls = morphological_geodesic_active_contour(gimg, num_iter=10,
init_level_set=ls,
balloon=-1)
assert_array_equal(gac_ls, ref)
assert gac_ls.dtype == np.int8
def test_init_level_sets():
image = np.zeros((6, 6))
checkerboard_ls = morphological_chan_vese(image, 0, 'checkerboard')
checkerboard_ref = np.array([[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1],
[1, 1, 1, 1, 1, 0]], dtype=np.int8)
disk_ls = morphological_geodesic_active_contour(image, 0, 'disk')
disk_ref = np.array([[0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 0]], dtype=np.int8)
assert_array_equal(checkerboard_ls, checkerboard_ref)
assert_array_equal(disk_ls, disk_ref)
def test_morphsnakes_3d():
image = np.zeros((7, 7, 7))
evolution = []
def callback(x):
evolution.append(x.sum())
ls = morphological_chan_vese(image, 5, 'disk',
iter_callback=callback)
# Check that the initial disk level set is correct
assert evolution[0] == 81
# Check that the final level set is correct
assert ls.sum() == 0
# Check that the contour is shrinking at every iteration
for v1, v2 in zip(evolution[:-1], evolution[1:]):
assert v1 >= v2 |
7,034 | main | #!/usr/bin/env python3
"""Tool to take files from a font family project upstream git repository
to the google/fonts GitHub repository structure, taking care of all the details.
Documentation at gftools/docs/gftools-packager/README.md
"""
import sys
from gftools import packager
from gftools.packager import UserAbortError, ProgramAbortError
import argparse
def _ansi_bold(text:str) ->str:
return f'\033[1m{text}\033[0m'
parser = argparse.ArgumentParser(
prog='gftools packager',
description='Package upstream font families for Google Fonts.',
epilog=f'{_ansi_bold("Documentation:")} '
'https://github.com/googlefonts/gftools/tree/main/docs/gftools-packager'
'\n'
f'{_ansi_bold("Issues:")} '
'https://github.com/googlefonts/gftools/issues'
)
parser.add_argument(
'file_or_families',
metavar='name',
type=str,
nargs='*',
help='The family name(s) or file name(s) of upstream conf yaml '
'files to be packaged. If a name ends with the ".yaml" suffix, '
'it\'s treated as a file otherwise it\'s used as family name '
'and packager tries to gather upstream configuration from '
'the google/fonts GitHub repository. If no name is specified, '
'no package will be created. This is useful to only make a '
'PR from an already created branch, not adding a commit, '
'use -b/--branch and see see -p/--pr.')
parser.add_argument(
'-f','--force',
action='store_true',
help='This allows the program to manipulate/change/delete data '
'in [target]. Without this flag only adding new items '
'(depends: files, directories or branches, trees, blobs) '
'is allowed.')
parser.add_argument(
'-y',
'--no-confirm',
dest='yes',
action='store_true',
help='Don\'t require user interaction, by answering with the '
'default always. Removes all interactivity.')
parser.add_argument(
'-q',
'--quiet',
action='store_true',
help='Don\'t print user interaction dialogues when -y/--no-confirm is used.')
parser.add_argument(
'target',
type=str,
help='The target of the package. By default a path to a directory. '
'See -f/--force to allow changing none-empty directories. '
'See -g/--gf-git to use it as a git repository. '
'A notable exception is -u/--upstream-yaml where the upstream.yaml '
'template will be saved to target file name.')
parser.add_argument(
'-g','--gf-git',
dest='is_gf_git',
action='store_true',
help='Try to use target as a git repository clone of GitHub google/fonts and '
'create or override a branch from upstream main using a generated '
'default branch name or a branch name specified with -b/--branch')
parser.add_argument(
'-b', '--branch',
type=str,
default=None,
help='Set the local target branch name instead '
'of using the generated branch name, like: "gftools_packager_{familyname}". '
'This implies -g/--gf-git, i.e. target will be treated as if -g/--gf-git is set.')
parser.add_argument(
'-a', '--add-commit',
action='store_true',
help='Don\'t override existing branch and instead add a new '
'commit to the branch. Use this to create a PR for multiple '
'familes e.g. a super family or a bunch update. '
'It\'s likely that you want to combine this with -b/--branch. '
'This implies -g/--gf-git, i.e. target will be treated as if -g/--gf-git is set.')
parser.add_argument(
'-p', '--pr',
action='store_true',
help='Make a pull request. '
'This implies -g/--gf-git, i.e. target will be treated as if -g/--gf-git is set. '
'See --pr-upstream and --push-upstream.')
parser.add_argument(
'--pr-upstream',
type=str,
default='',
help='The upstream where the pull request goes, as a GitHub '
'"owner/repoName" pair (default: google/fonts). '
'This implies -p/--pr, i.e. target will be treated as if -p/--pr is set.'
)
parser.add_argument(
'--push-upstream',
type=str,
default='',
# we can push to a clone of google/fonts and then pr from
# that clone to --pr-upstream, however, our ghactions QA can't
# run on a different repo, that's why this is mostly for testing.
help='The upstream where the push goes, as a GitHub "owner/repoName" '
'pair (default: the value of --pr-upstream). '
'This implies -p/--pr, i.e. target will be treated as if -p/--pr is set.')
parser.add_argument(
'-u', '--upstream-yaml',
action='store_true',
help='Create and output the upstream.yaml to the file name given by target. '
'This is intended to help bootstrapping new upstream configurations. '
'In it\'s simplest form, if no name argument is given, it will output the '
'yaml template. '
'However, if name is given, this will also try to include all available '
'information and interact with the user. This can only handle one name, '
'because there can only be one target. '
'Use -y/--no-confirm to skip interactive mode.'
'Use -f/--force to override existing target.')
parser.add_argument(
'--no-allowlist',
action='store_true',
help='Don\'t use the allowlist of allowed files to copy from '
'TARGET in upstream-conf "files". This is meant to enable '
'forward compatibility with new files and should not '
'be used regularly. Instead file an issue to add new '
'files to the allowlist.')
parser.add_argument(
'-B', '--allow-build',
action='store_true',
help='Allow executing the bash command stored in the "build" '
'key of upstream-conf, if present. Don\'t allow this lightly '
'and review build command, build process and its dependencies prior. '
'This support for building from sources is provisional, a '
'discussion can be found at https://github.com/googlefonts/gftools/issues/231'
)
def METHOD_NAME(args=None):
args = parser.parse_args(args)
try:
packager.make_package(**args.__dict__)
except UserAbortError as e:
print('Aborted',
'by user!' if not args.yes else \
'by program! User interaction required (don\'t use the -y/--no-confirm flag).',
f'{e}')
sys.exit(1)
except ProgramAbortError as e:
print(f'Aborted by program: {e}')
sys.exit(1)
print('Done!')
if __name__ == '__main__':
METHOD_NAME() |
7,035 | get folder group share list | import os
import configparser
from django.db import connection
class SeafileDB:
def __init__(self):
self.db_name = self._get_seafile_db_name()
def _get_seafile_db_name(self):
conf_dir = os.environ.get('SEAFILE_CENTRAL_CONF_DIR') or \
os.environ.get('SEAFILE_CONF_DIR')
if not conf_dir:
return ""
config = configparser.ConfigParser()
seafile_conf_path = os.path.join(conf_dir, 'seafile.conf')
config.read(seafile_conf_path)
if not config.has_section('database'):
return ''
if 'sqlite' in config.get('database', 'type'):
return ''
db_name = config.get('database', 'db_name')
if not db_name:
raise Exception("Database name not configured.")
return db_name
def get_repo_user_share_list(self, repo_id, org_id=''):
# get repos shared to user
if not org_id:
sql = f"""
SELECT
s.repo_id, s.from_email, s.to_email, s.permission
FROM
{self.db_name}.SharedRepo s
WHERE
repo_id = '{repo_id}';
"""
else:
sql = f"""
SELECT
s.repo_id, s.from_email, s.to_email, s.permission
FROM
{self.db_name}.OrgSharedRepo s
WHERE
repo_id = '{repo_id}';
"""
share_info_list = []
with connection.cursor() as cursor:
cursor.execute(sql)
for item in cursor.fetchall():
info = {}
info['share_type'] = 'user'
info['repo_id'] = item[0]
info['path'] = '/'
info['share_from'] = item[1]
info['share_to'] = item[2]
info['permission'] = item[3]
share_info_list.append(info)
return share_info_list
def get_repo_group_share_list(self, repo_id, org_id=''):
# get repos shared to group
if not org_id:
sql = f"""
SELECT
s.repo_id, s.user_name, s.group_id, s.permission
FROM
{self.db_name}.RepoGroup s
WHERE
repo_id = '{repo_id}';
"""
else:
sql = f"""
SELECT
s.repo_id, s.owner, s.group_id, s.permission
FROM
{self.db_name}.OrgGroupRepo s
WHERE
repo_id = '{repo_id}';
"""
share_info_list = []
with connection.cursor() as cursor:
cursor.execute(sql)
for item in cursor.fetchall():
info = {}
info['share_type'] = 'group'
info['repo_id'] = item[0]
info['path'] = '/'
info['share_from'] = item[1]
info['share_to'] = item[2]
info['permission'] = item[3]
share_info_list.append(info)
return share_info_list
def get_folder_user_share_list(self, repo_id, org_id=''):
# get folders shared to user
if not org_id:
sql = f"""
SELECT
v.origin_repo, v.path, s.from_email, s.to_email, s.permission
FROM
{self.db_name}.SharedRepo s join {self.db_name}.VirtualRepo v
ON
s.repo_id=v.repo_id
WHERE
v.origin_repo = '{repo_id}';
"""
else:
sql = f"""
SELECT
v.origin_repo, v.path, s.from_email, s.to_email, s.permission
FROM
{self.db_name}.OrgSharedRepo s join {self.db_name}.VirtualRepo v
ON
s.repo_id=v.repo_id
WHERE
v.origin_repo = '{repo_id}';
"""
share_info_list = []
with connection.cursor() as cursor:
cursor.execute(sql)
for item in cursor.fetchall():
info = {}
info['share_type'] = 'user'
info['repo_id'] = item[0]
info['path'] = item[1]
info['share_from'] = item[2]
info['share_to'] = item[3]
info['permission'] = item[4]
share_info_list.append(info)
return share_info_list
def METHOD_NAME(self, repo_id, org_id=''):
# get folders shared to group
if not org_id:
sql = f"""
SELECT
v.origin_repo, v.path, r.user_name, r.group_id, r.permission
FROM
{self.db_name}.RepoGroup r join {self.db_name}.VirtualRepo v
ON
r.repo_id=v.repo_id
WHERE
v.origin_repo = '{repo_id}';
"""
else:
sql = f"""
SELECT
v.origin_repo, v.path, r.owner, r.group_id, r.permission
FROM
{self.db_name}.OrgGroupRepo r join {self.db_name}.VirtualRepo v
ON
r.repo_id=v.repo_id
WHERE
v.origin_repo = '{repo_id}';
"""
share_info_list = []
with connection.cursor() as cursor:
cursor.execute(sql)
for item in cursor.fetchall():
info = {}
info['share_type'] = 'group'
info['repo_id'] = item[0]
info['path'] = item[1]
info['share_from'] = item[2]
info['share_to'] = item[3]
info['permission'] = item[4]
share_info_list.append(info)
return share_info_list |
7,036 | check p2p message | #!/usr/bin/env python3
# Copyright (c) 2022 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
""" Tests the net:* tracepoint API interface.
See https://github.com/bitcoin/bitcoin/blob/master/doc/tracing.md#context-net
"""
import ctypes
from io import BytesIO
# Test will be skipped if we don't have bcc installed
try:
from bcc import BPF, USDT # type: ignore[import]
except ImportError:
pass
from test_framework.messages import msg_version
from test_framework.p2p import P2PInterface
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
# Tor v3 addresses are 62 chars + 6 chars for the port (':12345').
MAX_PEER_ADDR_LENGTH = 68
MAX_PEER_CONN_TYPE_LENGTH = 20
MAX_MSG_TYPE_LENGTH = 20
# We won't process messages larger than 150 byte in this test. For reading
# larger messanges see contrib/tracing/log_raw_p2p_msgs.py
MAX_MSG_DATA_LENGTH = 150
net_tracepoints_program = """
#include <uapi/linux/ptrace.h>
#define MAX_PEER_ADDR_LENGTH {}
#define MAX_PEER_CONN_TYPE_LENGTH {}
#define MAX_MSG_TYPE_LENGTH {}
#define MAX_MSG_DATA_LENGTH {}
""".format(
MAX_PEER_ADDR_LENGTH,
MAX_PEER_CONN_TYPE_LENGTH,
MAX_MSG_TYPE_LENGTH,
MAX_MSG_DATA_LENGTH
) + """
#define MIN(a,b) ({ __typeof__ (a) _a = (a); __typeof__ (b) _b = (b); _a < _b ? _a : _b; })
struct p2p_message
{
u64 peer_id;
char peer_addr[MAX_PEER_ADDR_LENGTH];
char peer_conn_type[MAX_PEER_CONN_TYPE_LENGTH];
char msg_type[MAX_MSG_TYPE_LENGTH];
u64 msg_size;
u8 msg[MAX_MSG_DATA_LENGTH];
};
BPF_PERF_OUTPUT(inbound_messages);
int trace_inbound_message(struct pt_regs *ctx) {
struct p2p_message msg = {};
bpf_usdt_readarg(1, ctx, &msg.peer_id);
bpf_usdt_readarg_p(2, ctx, &msg.peer_addr, MAX_PEER_ADDR_LENGTH);
bpf_usdt_readarg_p(3, ctx, &msg.peer_conn_type, MAX_PEER_CONN_TYPE_LENGTH);
bpf_usdt_readarg_p(4, ctx, &msg.msg_type, MAX_MSG_TYPE_LENGTH);
bpf_usdt_readarg(5, ctx, &msg.msg_size);
bpf_usdt_readarg_p(6, ctx, &msg.msg, MIN(msg.msg_size, MAX_MSG_DATA_LENGTH));
inbound_messages.perf_submit(ctx, &msg, sizeof(msg));
return 0;
}
BPF_PERF_OUTPUT(outbound_messages);
int trace_outbound_message(struct pt_regs *ctx) {
struct p2p_message msg = {};
bpf_usdt_readarg(1, ctx, &msg.peer_id);
bpf_usdt_readarg_p(2, ctx, &msg.peer_addr, MAX_PEER_ADDR_LENGTH);
bpf_usdt_readarg_p(3, ctx, &msg.peer_conn_type, MAX_PEER_CONN_TYPE_LENGTH);
bpf_usdt_readarg_p(4, ctx, &msg.msg_type, MAX_MSG_TYPE_LENGTH);
bpf_usdt_readarg(5, ctx, &msg.msg_size);
bpf_usdt_readarg_p(6, ctx, &msg.msg, MIN(msg.msg_size, MAX_MSG_DATA_LENGTH));
outbound_messages.perf_submit(ctx, &msg, sizeof(msg));
return 0;
};
"""
class NetTracepointTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
def skip_test_if_missing_module(self):
self.skip_if_platform_not_linux()
self.skip_if_no_bitcoind_tracepoints()
self.skip_if_no_python_bcc()
self.skip_if_no_bpf_permissions()
def run_test(self):
# Tests the net:inbound_message and net:outbound_message tracepoints
# See https://github.com/bitcoin/bitcoin/blob/master/doc/tracing.md#context-net
class P2PMessage(ctypes.Structure):
_fields_ = [
("peer_id", ctypes.c_uint64),
("peer_addr", ctypes.c_char * MAX_PEER_ADDR_LENGTH),
("peer_conn_type", ctypes.c_char * MAX_PEER_CONN_TYPE_LENGTH),
("msg_type", ctypes.c_char * MAX_MSG_TYPE_LENGTH),
("msg_size", ctypes.c_uint64),
("msg", ctypes.c_ubyte * MAX_MSG_DATA_LENGTH),
]
def __repr__(self):
return f"P2PMessage(peer={self.peer_id}, addr={self.peer_addr.decode('utf-8')}, conn_type={self.peer_conn_type.decode('utf-8')}, msg_type={self.msg_type.decode('utf-8')}, msg_size={self.msg_size})"
self.log.info(
"hook into the net:inbound_message and net:outbound_message tracepoints")
ctx = USDT(pid=self.nodes[0].process.pid)
ctx.enable_probe(probe="net:inbound_message",
fn_name="trace_inbound_message")
ctx.enable_probe(probe="net:outbound_message",
fn_name="trace_outbound_message")
bpf = BPF(text=net_tracepoints_program, usdt_contexts=[ctx], debug=0)
EXPECTED_INOUTBOUND_VERSION_MSG = 1
checked_inbound_version_msg = 0
checked_outbound_version_msg = 0
events = []
def METHOD_NAME(event, inbound):
nonlocal checked_inbound_version_msg, checked_outbound_version_msg
if event.msg_type.decode("utf-8") == "version":
self.log.info(
f"check_p2p_message(): {'inbound' if inbound else 'outbound'} {event}")
peer = self.nodes[0].getpeerinfo()[0]
msg = msg_version()
msg.deserialize(BytesIO(bytes(event.msg[:event.msg_size])))
assert_equal(peer["id"], event.peer_id, peer["id"])
assert_equal(peer["addr"], event.peer_addr.decode("utf-8"))
assert_equal(peer["connection_type"],
event.peer_conn_type.decode("utf-8"))
if inbound:
checked_inbound_version_msg += 1
else:
checked_outbound_version_msg += 1
def handle_inbound(_, data, __):
nonlocal events
event = ctypes.cast(data, ctypes.POINTER(P2PMessage)).contents
events.append((event, True))
def handle_outbound(_, data, __):
event = ctypes.cast(data, ctypes.POINTER(P2PMessage)).contents
events.append((event, False))
bpf["inbound_messages"].open_perf_buffer(handle_inbound)
bpf["outbound_messages"].open_perf_buffer(handle_outbound)
self.log.info("connect a P2P test node to our bitcoind node")
test_node = P2PInterface()
self.nodes[0].add_p2p_connection(test_node)
bpf.perf_buffer_poll(timeout=200)
self.log.info(
"check receipt and content of in- and outbound version messages")
for event, inbound in events:
METHOD_NAME(event, inbound)
assert_equal(EXPECTED_INOUTBOUND_VERSION_MSG,
checked_inbound_version_msg)
assert_equal(EXPECTED_INOUTBOUND_VERSION_MSG,
checked_outbound_version_msg)
bpf.cleanup()
if __name__ == '__main__':
NetTracepointTest().main() |
7,037 | healthz | # Copyright The Lightning AI team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import multiprocessing as mp
from argparse import ArgumentParser
from typing import Optional
from urllib.parse import urljoin
import uvicorn
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from starlette.staticfiles import StaticFiles
from lightning.app.frontend.frontend import Frontend
from lightning.app.utilities.log import get_logfile
from lightning.app.utilities.network import find_free_network_port
class StaticWebFrontend(Frontend):
"""A frontend that serves static files from a directory using FastAPI.
Return this in your `LightningFlow.configure_layout()` method if you wish to serve a HTML page.
Arguments:
serve_dir: A local directory to serve files from. This directory should at least contain a file `index.html`.
root_path: A path prefix when routing traffic from behind a proxy at `/<root_path>`
Example:
In your LightningFlow, override the method `configure_layout`:
.. code-block:: python
def configure_layout(self):
return StaticWebFrontend("path/to/folder/to/serve")
"""
def __init__(self, serve_dir: str) -> None:
super().__init__()
self.serve_dir = serve_dir
self._process: Optional[mp.Process] = None
def start_server(self, host: str, port: int, root_path: str = "") -> None:
log_file = str(get_logfile())
self._process = mp.Process(
target=_start_server,
kwargs={
"host": host,
"port": port,
"serve_dir": self.serve_dir,
"path": f"/{self.flow.name}",
"log_file": log_file,
"root_path": root_path,
},
)
self._process.start()
def stop_server(self) -> None:
if self._process is None:
raise RuntimeError("Server is not running. Call `StaticWebFrontend.start_server()` first.")
self._process.kill()
def METHOD_NAME():
"""Health check endpoint used in the cloud FastAPI servers to check the status periodically."""
return {"status": "ok"}
def _start_server(
serve_dir: str, host: str = "localhost", port: int = -1, path: str = "/", log_file: str = "", root_path: str = ""
) -> None:
if port == -1:
port = find_free_network_port()
fastapi_service = FastAPI()
fastapi_service.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# trailing / is required for urljoin to properly join the path. In case of
# multiple trailing /, urljoin removes them
fastapi_service.get(urljoin(f"{path}/", "healthz"), status_code=200)(METHOD_NAME)
fastapi_service.mount(urljoin(path, root_path), StaticFiles(directory=serve_dir, html=True), name="static")
log_config = _get_log_config(log_file) if log_file else uvicorn.config.LOGGING_CONFIG
uvicorn.run(app=fastapi_service, host=host, port=port, log_config=log_config, root_path=root_path)
def _get_log_config(log_file: str) -> dict:
"""Returns a logger configuration in the format expected by uvicorn that sends all logs to the given logfile."""
# Modified from the default config found in uvicorn.config.LOGGING_CONFIG
return {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"default": {
"()": "uvicorn.logging.DefaultFormatter",
"fmt": "%(levelprefix)s %(message)s",
"use_colors": False,
},
},
"handlers": {
"default": {
"formatter": "default",
"class": "logging.FileHandler",
"filename": log_file,
},
},
"loggers": {
"uvicorn": {"handlers": ["default"], "level": "INFO", "propagate": False},
"uvicorn.error": {"handlers": ["default"], "level": "INFO", "propagate": False},
"uvicorn.access": {"handlers": ["default"], "level": "INFO", "propagate": False},
},
}
if __name__ == "__main__": # pragma: no-cover
parser = ArgumentParser()
parser.add_argument("serve_dir", type=str)
parser.add_argument("root_path", type=str, default="")
parser.add_argument("--host", type=str, default="localhost")
parser.add_argument("--port", type=int, default=-1)
args = parser.parse_args()
_start_server(serve_dir=args.serve_dir, host=args.host, port=args.port, root_path=args.root_path) |
7,038 | on click | import pytest
import reactpy
from reactpy.core.events import (
EventHandler,
merge_event_handler_funcs,
merge_event_handlers,
to_event_handler_function,
)
from reactpy.testing import DisplayFixture, poll
from tests.tooling.common import DEFAULT_TYPE_DELAY
def test_event_handler_repr():
handler = EventHandler(lambda: None)
assert repr(handler) == (
f"EventHandler(function={handler.function}, prevent_default=False, "
f"stop_propagation=False, target={handler.target!r})"
)
def test_event_handler_props():
handler_0 = EventHandler(lambda data: None)
assert handler_0.stop_propagation is False
assert handler_0.prevent_default is False
assert handler_0.target is None
handler_1 = EventHandler(lambda data: None, prevent_default=True)
assert handler_1.stop_propagation is False
assert handler_1.prevent_default is True
assert handler_1.target is None
handler_2 = EventHandler(lambda data: None, stop_propagation=True)
assert handler_2.stop_propagation is True
assert handler_2.prevent_default is False
assert handler_2.target is None
handler_3 = EventHandler(lambda data: None, target="123")
assert handler_3.stop_propagation is False
assert handler_3.prevent_default is False
assert handler_3.target == "123"
def test_event_handler_equivalence():
async def func(data):
return None
assert EventHandler(func) == EventHandler(func)
assert EventHandler(lambda data: None) != EventHandler(lambda data: None)
assert EventHandler(func, stop_propagation=True) != EventHandler(
func, stop_propagation=False
)
assert EventHandler(func, prevent_default=True) != EventHandler(
func, prevent_default=False
)
assert EventHandler(func, target="123") != EventHandler(func, target="456")
async def test_to_event_handler_function():
call_args = reactpy.Ref(None)
async def coro(*args):
call_args.current = args
def func(*args):
call_args.current = args
await to_event_handler_function(coro, positional_args=True)([1, 2, 3])
assert call_args.current == (1, 2, 3)
await to_event_handler_function(func, positional_args=True)([1, 2, 3])
assert call_args.current == (1, 2, 3)
await to_event_handler_function(coro, positional_args=False)([1, 2, 3])
assert call_args.current == ([1, 2, 3],)
await to_event_handler_function(func, positional_args=False)([1, 2, 3])
assert call_args.current == ([1, 2, 3],)
async def test_merge_event_handler_empty_list():
with pytest.raises(ValueError, match="No event handlers to merge"):
merge_event_handlers([])
@pytest.mark.parametrize(
"kwargs_1, kwargs_2",
[
({"stop_propagation": True}, {"stop_propagation": False}),
({"prevent_default": True}, {"prevent_default": False}),
({"target": "this"}, {"target": "that"}),
],
)
async def test_merge_event_handlers_raises_on_mismatch(kwargs_1, kwargs_2):
def func(data):
return None
with pytest.raises(ValueError, match="Cannot merge handlers"):
merge_event_handlers(
[
EventHandler(func, **kwargs_1),
EventHandler(func, **kwargs_2),
]
)
async def test_merge_event_handlers():
handler = EventHandler(lambda data: None)
assert merge_event_handlers([handler]) is handler
calls = []
merged_handler = merge_event_handlers(
[
EventHandler(lambda data: calls.append("first")),
EventHandler(lambda data: calls.append("second")),
]
)
await merged_handler.function({})
assert calls == ["first", "second"]
def test_merge_event_handler_funcs_empty_list():
with pytest.raises(ValueError, match="No event handler functions to merge"):
merge_event_handler_funcs([])
async def test_merge_event_handler_funcs():
calls = []
async def some_func(data):
calls.append("some_func")
async def some_other_func(data):
calls.append("some_other_func")
assert merge_event_handler_funcs([some_func]) is some_func
merged_handler = merge_event_handler_funcs([some_func, some_other_func])
await merged_handler([])
assert calls == ["some_func", "some_other_func"]
async def test_can_prevent_event_default_operation(display: DisplayFixture):
@reactpy.component
def Input():
@reactpy.event(prevent_default=True)
async def on_key_down(value):
pass
return reactpy.html.input({"on_key_down": on_key_down, "id": "input"})
await display.show(Input)
inp = await display.page.wait_for_selector("#input")
await inp.type("hello", delay=DEFAULT_TYPE_DELAY)
# the default action of updating the element's value did not take place
assert (await inp.evaluate("node => node.value")) == ""
async def test_simple_click_event(display: DisplayFixture):
@reactpy.component
def Button():
clicked, set_clicked = reactpy.hooks.use_state(False)
async def METHOD_NAME(event):
set_clicked(True)
if not clicked:
return reactpy.html.button(
{"on_click": METHOD_NAME, "id": "click"}, ["Click Me!"]
)
else:
return reactpy.html.p({"id": "complete"}, ["Complete"])
await display.show(Button)
button = await display.page.wait_for_selector("#click")
await button.click()
await display.page.wait_for_selector("#complete")
async def test_can_stop_event_propagation(display: DisplayFixture):
clicked = reactpy.Ref(False)
@reactpy.component
def DivInDiv():
@reactpy.event(stop_propagation=True)
def inner_click_no_op(event):
clicked.current = True
def outer_click_is_not_triggered(event):
raise AssertionError()
outer = reactpy.html.div(
{
"style": {"height": "35px", "width": "35px", "background_color": "red"},
"on_click": outer_click_is_not_triggered,
"id": "outer",
},
reactpy.html.div(
{
"style": {
"height": "30px",
"width": "30px",
"background_color": "blue",
},
"on_click": inner_click_no_op,
"id": "inner",
}
),
)
return outer
await display.show(DivInDiv)
inner = await display.page.wait_for_selector("#inner")
await inner.click()
await poll(lambda: clicked.current).until_is(True) |
7,039 | validate | #!./.mnist-pytorch/bin/python
import collections
import json
import math
import os
import docker
import fire
import torch
from fedn.utils.pytorchhelper import PytorchHelper
NUM_CLASSES = 10
def _get_data_path():
""" For test automation using docker-compose. """
# Figure out FEDn client number from container name
client = docker.from_env()
container = client.containers.get(os.environ['HOSTNAME'])
number = container.name[-1]
# Return data path
return f"/var/data/clients/{number}/mnist.pt"
def _compile_model():
# Define model
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = torch.nn.Linear(784, 64)
self.fc2 = torch.nn.Linear(64, 32)
self.fc3 = torch.nn.Linear(32, 10)
def forward(self, x):
x = torch.nn.functional.relu(self.fc1(x.reshape(x.size(0), 784)))
x = torch.nn.functional.dropout(x, p=0.5, training=self.training)
x = torch.nn.functional.relu(self.fc2(x))
x = torch.nn.functional.log_softmax(self.fc3(x), dim=1)
return x
# Return model
return Net()
def _load_data(data_path, is_train=True):
# Load data
if data_path is None:
data = torch.load(_get_data_path())
else:
data = torch.load(data_path)
if is_train:
X = data['x_train']
y = data['y_train']
else:
X = data['x_test']
y = data['y_test']
# Normalize
X = X / 255
return X, y
def _save_model(model, out_path):
weights = model.state_dict()
weights_np = collections.OrderedDict()
for w in weights:
weights_np[w] = weights[w].cpu().detach().numpy()
helper = PytorchHelper()
helper.save_model(weights, out_path)
def _load_model(model_path):
helper = PytorchHelper()
weights_np = helper.load_model(model_path)
weights = collections.OrderedDict()
for w in weights_np:
weights[w] = torch.tensor(weights_np[w])
model = _compile_model()
model.load_state_dict(weights)
model.eval()
return model
def init_seed(out_path='seed.npz'):
# Init and save
model = _compile_model()
_save_model(model, out_path)
def train(in_model_path, out_model_path, data_path=None, batch_size=32, epochs=1, lr=0.01):
# Load data
x_train, y_train = _load_data(data_path)
# Load model
model = _load_model(in_model_path)
# Train
optimizer = torch.optim.SGD(model.parameters(), lr=lr)
n_batches = int(math.ceil(len(x_train) / batch_size))
criterion = torch.nn.NLLLoss()
for e in range(epochs): # epoch loop
for b in range(n_batches): # batch loop
# Retrieve current batch
batch_x = x_train[b * batch_size:(b + 1) * batch_size]
batch_y = y_train[b * batch_size:(b + 1) * batch_size]
# Train on batch
optimizer.zero_grad()
outputs = model(batch_x)
loss = criterion(outputs, batch_y)
loss.backward()
optimizer.step()
# Log
if b % 100 == 0:
print(
f"Epoch {e}/{epochs-1} | Batch: {b}/{n_batches-1} | Loss: {loss.item()}")
# Save
_save_model(model, out_model_path)
def METHOD_NAME(in_model_path, out_json_path, data_path=None):
# Load data
x_train, y_train = _load_data(data_path)
x_test, y_test = _load_data(data_path, is_train=False)
# Load model
model = _load_model(in_model_path)
# Evaluate
criterion = torch.nn.NLLLoss()
with torch.no_grad():
train_out = model(x_train)
training_loss = criterion(train_out, y_train)
training_accuracy = torch.sum(torch.argmax(
train_out, dim=1) == y_train) / len(train_out)
test_out = model(x_test)
test_loss = criterion(test_out, y_test)
test_accuracy = torch.sum(torch.argmax(
test_out, dim=1) == y_test) / len(test_out)
# JSON schema
report = {
"training_loss": training_loss.item(),
"training_accuracy": training_accuracy.item(),
"test_loss": test_loss.item(),
"test_accuracy": test_accuracy.item(),
}
# Save JSON
with open(out_json_path, "w") as fh:
fh.write(json.dumps(report))
if __name__ == '__main__':
fire.Fire({
'init_seed': init_seed,
'train': train,
'validate': METHOD_NAME,
'_get_data_path': _get_data_path, # for testing
}) |
7,040 | serialize instances | from typing import Dict, Any, Union
from pathlib import Path
import os
from labelbox.data.annotation_types.collection import LabelCollection, LabelGenerator
from labelbox.data.serialization.coco.instance_dataset import CocoInstanceDataset
from labelbox.data.serialization.coco.panoptic_dataset import CocoPanopticDataset
def create_path_if_not_exists(path: Union[Path, str],
ignore_existing_data=False):
path = Path(path)
if not path.exists():
path.mkdir(parents=True, exist_ok=True)
elif not ignore_existing_data and os.listdir(path):
raise ValueError(
f"Directory `{path}`` must be empty. Or set `ignore_existing_data=True`"
)
return path
def validate_path(path: Union[Path, str], name: str):
path = Path(path)
if not path.exists():
raise ValueError(f"{name} `{path}` must exist")
return path
class COCOConverter:
"""
Class for convertering between coco and labelbox formats
Note that this class is only compatible with image data.
Subclasses are currently ignored.
To use subclasses, manually flatten them before using the converter.
"""
@staticmethod
def METHOD_NAME(labels: LabelCollection,
image_root: Union[Path, str],
ignore_existing_data=False,
max_workers=8) -> Dict[str, Any]:
"""
Convert a Labelbox LabelCollection into an mscoco dataset.
This function will only convert masks, polygons, and rectangles.
Masks will be converted into individual instances.
Use deserialize_panoptic to prevent masks from being split apart.
Args:
labels: A collection of labels to convert
image_root: Where to save images to
ignore_existing_data: Whether or not to raise an exception if images already exist.
This exists only to support detectons panoptic fpn model which requires two mscoco payloads for the same images.
max_workers : Number of workers to process dataset with. A value of 0 will process all data in the main process
Returns:
A dictionary containing labels in the coco object format.
"""
image_root = create_path_if_not_exists(image_root, ignore_existing_data)
return CocoInstanceDataset.from_common(labels=labels,
image_root=image_root,
max_workers=max_workers).dict()
@staticmethod
def serialize_panoptic(labels: LabelCollection,
image_root: Union[Path, str],
mask_root: Union[Path, str],
all_stuff: bool = False,
ignore_existing_data=False,
max_workers: int = 8) -> Dict[str, Any]:
"""
Convert a Labelbox LabelCollection into an mscoco dataset.
This function will only convert masks, polygons, and rectangles.
Masks will be converted into individual instances.
Use deserialize_panoptic to prevent masks from being split apart.
Args:
labels: A collection of labels to convert
image_root: Where to save images to
mask_root: Where to save segmentation masks to
all_stuff: If rectangle or polygon annotations are encountered, they will be treated as instances.
To convert them to stuff class set `all_stuff=True`.
ignore_existing_data: Whether or not to raise an exception if images already exist.
This exists only to support detectons panoptic fpn model which requires two mscoco payloads for the same images.
max_workers : Number of workers to process dataset with. A value of 0 will process all data in the main process.
Returns:
A dictionary containing labels in the coco panoptic format.
"""
image_root = create_path_if_not_exists(image_root, ignore_existing_data)
mask_root = create_path_if_not_exists(mask_root, ignore_existing_data)
return CocoPanopticDataset.from_common(labels=labels,
image_root=image_root,
mask_root=mask_root,
all_stuff=all_stuff,
max_workers=max_workers).dict()
@staticmethod
def deserialize_panoptic(json_data: Dict[str, Any], image_root: Union[Path,
str],
mask_root: Union[Path, str]) -> LabelGenerator:
"""
Convert coco panoptic data into the labelbox format (as a LabelGenerator).
Args:
json_data: panoptic data as a dict
image_root: Path to local images that are referenced by the panoptic json
mask_root: Path to local segmentation masks that are referenced by the panoptic json
Returns:
LabelGenerator
"""
image_root = validate_path(image_root, 'image_root')
mask_root = validate_path(mask_root, 'mask_root')
objs = CocoPanopticDataset(**json_data)
gen = objs.to_common(image_root, mask_root)
return LabelGenerator(data=gen)
@staticmethod
def deserialize_instances(json_data: Dict[str, Any],
image_root: Path) -> LabelGenerator:
"""
Convert coco object data into the labelbox format (as a LabelGenerator).
Args:
json_data: coco object data as a dict
image_root: Path to local images that are referenced by the coco object json
Returns:
LabelGenerator
"""
image_root = validate_path(image_root, 'image_root')
objs = CocoInstanceDataset(**json_data)
gen = objs.to_common(image_root)
return LabelGenerator(data=gen) |
7,041 | add script | from lost.db import model
# from celery.utils.log import get_task_logger
# from celery import task
from lostconfig import LOSTConfig
from lost.db.access import DBMan
from datetime import datetime, timedelta
import json
def register_worker(dbm, lostconfig):
worker = model.Worker(
env_name=lostconfig.env_name,
worker_name=lostconfig.worker_name,
timestamp=datetime.utcnow(),
register_timestamp=datetime.utcnow(),
resources='[]'
)
dbm.add(worker)
dbm.commit()
def init_worker_on_startup():
lostconfig = LOSTConfig()
dbm = DBMan(lostconfig)
worker = dbm.get_worker(lostconfig.worker_name)
if worker is None:
register_worker(dbm, lostconfig)
print('Registered worker: {}'.format(lostconfig.worker_name))
else:
worker.timestamp = datetime.utcnow()
worker.resources = '[]'
worker.in_progress = '{}'
dbm.add(worker)
dbm.commit()
print('Reset worker on startup: {}'.format(worker.worker_name))
dbm.close_session()
def send_life_sign():
# logger = get_task_logger(__name__)
lostconfig = LOSTConfig()
dbm = DBMan(lostconfig)
worker = dbm.get_worker(lostconfig.worker_name)
if worker is None:
register_worker(dbm, lostconfig)
# logger.info('Registered worker: {}'.format(lostconfig.worker_name))
else:
worker.timestamp = datetime.utcnow()
dbm.add(worker)
dbm.commit()
#logger.info('Sent lifesign: {}'.format(worker.worker_name))
dbm.close_session()
class WorkerMan(object):
'''Class to manage workers in LOST'''
def __init__(self, dbm, lostconfig):
self.dbm = dbm
self.lostconfig = lostconfig
def get_living_worker(self):
'''Get list of worker that are alive
Returns:
list of :class:`model.Worker`
'''
worker_list = self.dbm.get_worker()
to_old = datetime.utcnow() - timedelta(seconds=int(self.lostconfig.worker_timeout))
living = list(filter(lambda x: x.timestamp > to_old, worker_list))
return living
def get_worker_envs(self):
'''Get a set fo envs from all living workers
Returns:
set of str
'''
env_set = set()
for w in self.get_living_worker():
env_set.add(w.env_name)
return env_set
class CurrentWorker(object):
'''Class that represant the current worker in which this code is executed
'''
def __init__(self, dbm, lostconfig):
self.dbm = dbm
self.lostconfig = lostconfig
self.worker = self.get_worker()
def get_worker(self):
'''Get worker of this container
Returns:
:class:`model.Worker`
'''
return self.dbm.get_worker(self.lostconfig.worker_name)
def _lock_worker(self):
'''Lock this worker that only current script can be executed and no others'''
self.worker.resources = json.dumps(['lock_all'])
def _unlock_worker(self):
self.worker.resources = json.dumps([])
def METHOD_NAME(self, pipe_e, script):
'''Add a script that is currently executed by this worker
Args:
script (:class:`model.PipeElement`): Pipeline element that is related to script.
script (:class:`model.Script`): Script that is executed.
'''
self.worker = self.dbm.get_worker_and_lock(self.lostconfig.worker_name)
if self.worker.in_progress is not None:
scripts = json.loads(self.worker.in_progress)
else:
scripts = {}
scripts[pipe_e.idx] = script.path
self.worker.in_progress = json.dumps(scripts)
if script.resources:
if 'lock_all' in json.loads(script.resources):
self._lock_worker()
self.dbm.add(self.worker)
self.dbm.commit()
def remove_script(self, pipe_e, script):
'''Remove a script that has finished
Args:
script (:class:`model.PipeElement`): Pipeline element that is related to script.
script (:class:`model.Script`): Script that is executed.
'''
self.worker = self.dbm.get_worker_and_lock(self.lostconfig.worker_name)
if self.worker.in_progress is not None:
scripts = json.loads(self.worker.in_progress)
else:
return
try:
scripts.pop(str(pipe_e.idx))
except:
print('Could not find pipe_element id to remove script from worker!')
self.worker.in_progress = json.dumps(scripts)
if self.worker.resources:
if 'lock_all' in json.loads(script.resources):
self._unlock_worker()
self.dbm.add(self.worker)
self.dbm.commit()
def enough_resources(self, script):
'''Check if this worker has enough resources to execute a script.
Args:
script (:class:`model.Script`): Script that is executed.
Returns:
bool: True if worker has enough resources to execute script.
'''
worker = self.worker
if worker.resources:
res = json.loads(worker.resources)
if 'lock_all' in res:
return False
else:
return True
else:
return Tru |
7,042 | set active axis | # Copyright (c) 2019 Ultimaker B.V.
# Uranium is released under the terms of the LGPLv3 or higher.
from typing import Optional
from enum import IntEnum
import random
from UM.Logger import Logger
from UM.Mesh.MeshData import MeshData
from . import SceneNode
from UM.Resources import Resources
from UM.Application import Application
from UM.Math.Color import Color
from UM.Math.Vector import Vector
from UM.Scene.Selection import Selection
from UM.View.GL.OpenGL import OpenGL
from UM.View.RenderBatch import RenderBatch
class ToolHandle(SceneNode.SceneNode):
"""A tool handle is a object in the scene that gives queues for what the tool it is
'paired' with can do. ToolHandles are, for example, used for translation, rotation & scale handles.
They can also be used as actual objects to interact with (in the case of translation,
pressing one arrow of the toolhandle locks the translation in that direction)
"""
NoAxis = 1
XAxis = 2
YAxis = 3
ZAxis = 4
AllAxis = 5
# These colors are used to draw the selection pass only. They must be unique, which is
# why we cannot rely on themed colors
DisabledSelectionColor = Color(0.5, 0.5, 0.5, 1.0)
XAxisSelectionColor = Color(1.0, 0.0, 0.0, 1.0)
YAxisSelectionColor = Color(0.0, 0.0, 1.0, 1.0)
ZAxisSelectionColor = Color(0.0, 1.0, 0.0, 1.0)
AllAxisSelectionColor = Color(1.0, 1.0, 1.0, 1.0)
class ExtraWidgets(IntEnum):
"""Toolhandle subclasses can optionally register additional widgets by overriding this enum.
The ExtraWidgetsEnum should start with Toolhanlde.AllAxis + 1 in order not to overlap with the native axes.
"""
pass
def __init__(self, parent = None):
super().__init__(parent)
self._disabled_axis_color = None
self._x_axis_color = None
self._y_axis_color = None
self._z_axis_color = None
self._all_axis_color = None
self._axis_color_map = {}
self._extra_widgets_color_map = {}
self._scene = Application.getInstance().getController().getScene()
self._solid_mesh = None # type: Optional[MeshData]
self._line_mesh = None # type: Optional[MeshData]
self._selection_mesh = None # type: Optional[MeshData]
self._shader = None
self._active_axis = None # type: Optional[int]
# Auto scale is used to ensure that the tool handle will end up the same size on the camera no matter the zoom
# This should be used to ensure that the tool handles are still usable even if the camera is zoomed in all the way.
self._auto_scale = True
self._enabled = False
self.setCalculateBoundingBox(False)
Selection.selectionCenterChanged.connect(self._onSelectionCenterChanged)
Application.getInstance().engineCreatedSignal.connect(self._onEngineCreated)
def getLineMesh(self) -> Optional[MeshData]:
return self._line_mesh
def setLineMesh(self, mesh: MeshData) -> None:
self._line_mesh = mesh
self.meshDataChanged.emit(self)
def getSolidMesh(self) -> Optional[MeshData]:
return self._solid_mesh
def setSolidMesh(self, mesh: MeshData) -> None:
self._solid_mesh = mesh
self.meshDataChanged.emit(self)
def getSelectionMesh(self) -> Optional[MeshData]:
return self._selection_mesh
def setSelectionMesh(self, mesh: MeshData) -> None:
self._selection_mesh = mesh
self.meshDataChanged.emit(self)
def render(self, renderer) -> bool:
if not self._enabled:
return True
if not self._shader:
self._shader = OpenGL.getInstance().createShaderProgram(Resources.getPath(Resources.Shaders, "toolhandle.shader"))
if self._auto_scale:
active_camera = self._scene.getActiveCamera()
if active_camera.isPerspective():
camera_position = active_camera.getWorldPosition()
dist = (camera_position - self.getWorldPosition()).length()
scale = dist / 400
else:
view_width = active_camera.getViewportWidth()
current_size = view_width + (2 * active_camera.getZoomFactor() * view_width)
scale = current_size / view_width * 5
self.setScale(Vector(scale, scale, scale))
if self._line_mesh:
renderer.queueNode(self, mesh = self._line_mesh, mode = RenderBatch.RenderMode.Lines, overlay = True, shader = self._shader)
if self._solid_mesh:
renderer.queueNode(self, mesh = self._solid_mesh, overlay = True, shader = self._shader)
return True
def METHOD_NAME(self, axis: Optional[int]) -> None:
if axis == self._active_axis or not self._shader:
return
if axis:
self._shader.setUniformValue("u_activeColor", self._axis_color_map.get(axis, Color()))
else:
self._shader.setUniformValue("u_activeColor", self._disabled_axis_color)
self._active_axis = axis
self._scene.sceneChanged.emit(self)
def getActiveAxis(self) -> Optional[int]:
return self._active_axis
def isAxis(self, value):
return value in self._axis_color_map
def getExtraWidgetsColorMap(self):
return self._extra_widgets_color_map
def buildMesh(self) -> None:
# This method should be overridden by toolhandle implementations
pass
def _onSelectionCenterChanged(self) -> None:
if self._enabled:
self.setPosition(Selection.getSelectionCenter())
def setEnabled(self, enable: bool):
super().setEnabled(enable)
# Force an update
self._onSelectionCenterChanged()
def _onEngineCreated(self) -> None:
from UM.Qt.QtApplication import QtApplication
theme = QtApplication.getInstance().getTheme()
if theme is None:
Logger.log("w", "Could not get theme, so unable to create tool handle meshes.")
return
self._disabled_axis_color = Color(*theme.getColor("disabled_axis").getRgb())
self._x_axis_color = Color(*theme.getColor("x_axis").getRgb())
self._y_axis_color = Color(*theme.getColor("y_axis").getRgb())
self._z_axis_color = Color(*theme.getColor("z_axis").getRgb())
self._all_axis_color = Color(*theme.getColor("all_axis").getRgb())
self._axis_color_map = {
self.NoAxis: self._disabled_axis_color,
self.XAxis: self._x_axis_color,
self.YAxis: self._y_axis_color,
self.ZAxis: self._z_axis_color,
self.AllAxis: self._all_axis_color
}
for value in self.ExtraWidgets:
self._extra_widgets_color_map[value] = self._getUnusedColor()
self.buildMesh()
def _getUnusedColor(self):
while True:
r = random.randint(0, 255)
g = random.randint(0, 255)
b = random.randint(0, 255)
a = 255
color = Color(r, g, b, a)
if color not in self._axis_color_map.values() and color not in self._extra_widgets_color_map.values():
break
return colo |
7,043 | overfeat arg scope | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the model definition for the OverFeat network.
The definition for the network was obtained from:
OverFeat: Integrated Recognition, Localization and Detection using
Convolutional Networks
Pierre Sermanet, David Eigen, Xiang Zhang, Michael Mathieu, Rob Fergus and
Yann LeCun, 2014
http://arxiv.org/abs/1312.6229
Usage:
with slim.arg_scope(overfeat.overfeat_arg_scope()):
outputs, end_points = overfeat.overfeat(inputs)
@@overfeat
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import layers
from tensorflow.contrib.framework.python.ops import arg_scope
from tensorflow.contrib.layers.python.layers import layers as layers_lib
from tensorflow.contrib.layers.python.layers import regularizers
from tensorflow.contrib.layers.python.layers import utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope
trunc_normal = lambda stddev: init_ops.truncated_normal_initializer(0.0, stddev)
def METHOD_NAME(weight_decay=0.0005):
with arg_scope(
[layers.conv2d, layers_lib.fully_connected],
activation_fn=nn_ops.relu,
weights_regularizer=regularizers.l2_regularizer(weight_decay),
biases_initializer=init_ops.zeros_initializer()):
with arg_scope([layers.conv2d], padding='SAME'):
with arg_scope([layers_lib.max_pool2d], padding='VALID') as arg_sc:
return arg_sc
def overfeat(inputs,
num_classes=1000,
is_training=True,
dropout_keep_prob=0.5,
spatial_squeeze=True,
scope='overfeat'):
"""Contains the model definition for the OverFeat network.
The definition for the network was obtained from:
OverFeat: Integrated Recognition, Localization and Detection using
Convolutional Networks
Pierre Sermanet, David Eigen, Xiang Zhang, Michael Mathieu, Rob Fergus and
Yann LeCun, 2014
http://arxiv.org/abs/1312.6229
Note: All the fully_connected layers have been transformed to conv2d layers.
To use in classification mode, resize input to 231x231. To use in fully
convolutional mode, set spatial_squeeze to false.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_classes: number of predicted classes.
is_training: whether or not the model is being trained.
dropout_keep_prob: the probability that activations are kept in the dropout
layers during training.
spatial_squeeze: whether or not should squeeze the spatial dimensions of the
outputs. Useful to remove unnecessary dimensions for classification.
scope: Optional scope for the variables.
Returns:
the last op containing the log predictions and end_points dict.
"""
with variable_scope.variable_scope(scope, 'overfeat', [inputs]) as sc:
end_points_collection = sc.name + '_end_points'
# Collect outputs for conv2d, fully_connected and max_pool2d
with arg_scope(
[layers.conv2d, layers_lib.fully_connected, layers_lib.max_pool2d],
outputs_collections=end_points_collection):
net = layers.conv2d(
inputs, 64, [11, 11], 4, padding='VALID', scope='conv1')
net = layers_lib.max_pool2d(net, [2, 2], scope='pool1')
net = layers.conv2d(net, 256, [5, 5], padding='VALID', scope='conv2')
net = layers_lib.max_pool2d(net, [2, 2], scope='pool2')
net = layers.conv2d(net, 512, [3, 3], scope='conv3')
net = layers.conv2d(net, 1024, [3, 3], scope='conv4')
net = layers.conv2d(net, 1024, [3, 3], scope='conv5')
net = layers_lib.max_pool2d(net, [2, 2], scope='pool5')
with arg_scope(
[layers.conv2d],
weights_initializer=trunc_normal(0.005),
biases_initializer=init_ops.constant_initializer(0.1)):
# Use conv2d instead of fully_connected layers.
net = layers.conv2d(net, 3072, [6, 6], padding='VALID', scope='fc6')
net = layers_lib.dropout(
net, dropout_keep_prob, is_training=is_training, scope='dropout6')
net = layers.conv2d(net, 4096, [1, 1], scope='fc7')
net = layers_lib.dropout(
net, dropout_keep_prob, is_training=is_training, scope='dropout7')
net = layers.conv2d(
net,
num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
biases_initializer=init_ops.zeros_initializer(),
scope='fc8')
# Convert end_points_collection into a end_point dict.
end_points = utils.convert_collection_to_dict(end_points_collection)
if spatial_squeeze:
net = array_ops.squeeze(net, [1, 2], name='fc8/squeezed')
end_points[sc.name + '/fc8'] = net
return net, end_points |
7,044 | filter code | from typing import List
from uuid import UUID
import django_filters
import graphene
from django.db.models import Exists, OuterRef, Q
from graphql.error import GraphQLError
from ...account import models as account_models
from ...giftcard import models
from ...order import models as order_models
from ...product import models as product_models
from ..core.doc_category import DOC_CATEGORY_GIFT_CARDS
from ..core.filters import (
GlobalIDMultipleChoiceFilter,
ListObjectTypeFilter,
MetadataFilterBase,
ObjectTypeFilter,
)
from ..core.types import (
BaseInputObjectType,
FilterInputObjectType,
NonNullList,
PriceRangeInput,
)
from ..utils import resolve_global_ids_to_primary_keys
from .enums import GiftCardEventsEnum
def filter_products(qs, _, value):
if value:
_, product_pks = resolve_global_ids_to_primary_keys(value, "Product")
qs = filter_gift_cards_by_products(qs, product_pks)
return qs
def filter_gift_cards_by_products(qs, product_ids):
products = product_models.Product.objects.filter(pk__in=product_ids)
return qs.filter(Exists(products.filter(pk=OuterRef("product_id"))))
def filter_used_by(qs, _, value):
if value:
_, user_pks = resolve_global_ids_to_primary_keys(value, "User")
qs = filter_gift_cards_by_used_by_user(qs, user_pks)
return qs
def filter_gift_cards_by_used_by_user(qs, user_pks):
users = account_models.User.objects.filter(pk__in=user_pks)
return qs.filter(Exists(users.filter(pk=OuterRef("used_by_id"))))
def filter_tags_list(qs, _, value):
if not value:
return qs
tags = models.GiftCardTag.objects.filter(name__in=value)
return qs.filter(Exists(tags.filter(pk=OuterRef("tags__id"))))
def filter_gift_card_used(qs, _, value):
if value is None:
return qs
return qs.filter(used_by_email__isnull=not value)
def filter_currency(qs, _, value):
if not value:
return qs
return qs.filter(currency=value)
def _filter_by_price(qs, field, value):
lookup = {}
if lte := value.get("lte"):
lookup[f"{field}_amount__lte"] = lte
if gte := value.get("gte"):
lookup[f"{field}_amount__gte"] = gte
return qs.filter(**lookup)
def METHOD_NAME(qs, _, value):
if not value:
return qs
return qs.filter(code=value)
def filter_created_by_email(qs, _, value):
if not value:
return qs
return qs.filter(created_by_email=value)
class GiftCardFilter(MetadataFilterBase):
tags = ListObjectTypeFilter(input_class=graphene.String, method=filter_tags_list)
products = GlobalIDMultipleChoiceFilter(method=filter_products)
used_by = GlobalIDMultipleChoiceFilter(method=filter_used_by)
used = django_filters.BooleanFilter(method=filter_gift_card_used)
currency = django_filters.CharFilter(method=filter_currency)
current_balance = ObjectTypeFilter(
input_class=PriceRangeInput, method="filter_current_balance"
)
initial_balance = ObjectTypeFilter(
input_class=PriceRangeInput, method="filter_initial_balance"
)
is_active = django_filters.BooleanFilter()
code = django_filters.CharFilter(method=METHOD_NAME)
created_by_email = django_filters.CharFilter(method=filter_created_by_email)
class Meta:
model = models.GiftCard
fields = ["is_active"]
def filter_current_balance(self, queryset, name, value):
check_currency_in_filter_data(self.data)
return _filter_by_price(queryset, "current_balance", value)
def filter_initial_balance(self, queryset, name, value):
check_currency_in_filter_data(self.data)
return _filter_by_price(queryset, "initial_balance", value)
def check_currency_in_filter_data(filter_data: dict):
currency = filter_data.get("currency")
if not currency:
raise GraphQLError(
"You must provide a `currency` filter parameter for filtering by price."
)
class GiftCardFilterInput(FilterInputObjectType):
class Meta:
doc_category = DOC_CATEGORY_GIFT_CARDS
filterset_class = GiftCardFilter
def filter_events_by_type(events: List[models.GiftCardEvent], type_value: str):
filtered_events = []
for event in events:
if event.type == type_value:
filtered_events.append(event)
return filtered_events
def filter_events_by_orders(events: List[models.GiftCardEvent], order_ids: List[str]):
order_pks = _get_order_pks(order_ids)
filtered_events = []
for event in events:
if event.order_id in order_pks:
filtered_events.append(event)
return filtered_events
def _get_order_pks(order_ids: List[str]):
_, order_pks = resolve_global_ids_to_primary_keys(order_ids, "Order")
pks = []
old_pks = []
for pk in order_pks:
try:
pks.append(UUID(pk))
except ValueError:
old_pks.append(pk)
return order_models.Order.objects.filter(
Q(id__in=pks) | (Q(use_old_id=True) & Q(number__in=old_pks))
).values_list("id", flat=True)
class GiftCardEventFilterInput(BaseInputObjectType):
type = graphene.Argument(GiftCardEventsEnum)
orders = NonNullList(graphene.ID)
class Meta:
doc_category = DOC_CATEGORY_GIFT_CARDS
def filter_gift_card_tag_search(qs, _, value):
if not value:
return qs
return qs.filter(name__ilike=value)
class GiftCardTagFilter(django_filters.FilterSet):
search = django_filters.CharFilter(method=filter_gift_card_tag_search)
class GiftCardTagFilterInput(FilterInputObjectType):
class Meta:
doc_category = DOC_CATEGORY_GIFT_CARDS
filterset_class = GiftCardTagFilter |
7,045 | test final envvars | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License'). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the 'license' file accompanying this file. This file is
# distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from unittest import TestCase
from mock import patch, Mock
from ebcli.containers.envvarcollector import EnvvarCollector
from ebcli.containers.preconfigured_container import PreconfiguredContainer
from tests.unit.containers import dummy
OPT_ENV = EnvvarCollector({'a': '1', 'b': '555'})
SETENV_ENV = EnvvarCollector({'a': '350'})
EXPECTED_ENVVARS_MAP = {'a': '1', 'b': '555'}
IMG_ID = '12345'
class TestAbstractContainer(TestCase):
def setUp(self):
self.fs_handler = dummy.get_container_fs_handler()
self.pathconfig = self.fs_handler.pathconfig
self.cnt = PreconfiguredContainer(fs_handler=self.fs_handler,
soln_stk=Mock(),
container_cfg={},
opt_env=OPT_ENV)
self.cnt._get_log_volume_map = Mock(return_value=None)
self.cnt._containerize = Mock(return_value=False)
self.cnt._require_pull = Mock(return_value=False)
@patch('ebcli.containers.abstractcontainer.commands')
def test_start_check_new_dockerfie_creation_when_required(self, commands):
self.fs_handler.require_new_dockerfile.return_value = True
self.cnt.start()
self.cnt._containerize.assert_called_once_with()
@patch('ebcli.containers.abstractcontainer.commands')
def test_start_check_new_dockerfie_no_creation(self, commands):
self.fs_handler.require_new_dockerfile.return_value = False
self.cnt.start()
self.assertFalse(self.cnt._containerize.called)
@patch('ebcli.containers.abstractcontainer.commands')
def test_start_pull_required(self, commands):
self.cnt._require_pull.return_value = True
self.pathconfig.dockerfile_exists = lambda: False
self.cnt.start()
# We expect image pulled from Dockerfile user provided
# since we bypassed Dockerfile creation.
commands.pull_img.assert_called_once_with(dummy.NEW_DOCKERFILE_PATH)
@patch('ebcli.containers.abstractcontainer.commands')
def test_start_pull_not_required(self, commands):
self.cnt.start()
self.assertFalse(commands.pull_img.called)
@patch('ebcli.containers.abstractcontainer.commands')
def test_start_container_rm(self, commands):
self.cnt.start()
commands.rm_container.assert_called_once_with(self.cnt.get_name(),
force=True)
@patch('ebcli.containers.abstractcontainer.commands')
def test_start_check_all(self, commands):
self.pathconfig.dockerfile_exists = lambda: False
self.fs_handler.require_append_dockerignore.return_value = True
self.fs_handler.require_new_dockerfile.return_value = True
self.cnt._require_pull.return_value = True
commands.build_img.return_value = IMG_ID
self.cnt.start()
# Should've appended dockerignore, containerize, pull
# remove existing container, build, and run
self.fs_handler.append_dockerignore.assert_called_once_with()
self.cnt._containerize.assert_called_once_with()
commands.pull_img.assert_called_once_with(dummy.NEW_DOCKERFILE_PATH)
commands.build_img.expect_called_once_with(dummy.DOCKER_PROJ_PATH,
dummy.NEW_DOCKERFILE_PATH)
commands.rm_container.assert_called_once_with(self.cnt.get_name(),
force=True)
commands.run_container.expect_called_once_with(dummy.NEW_DOCKERFILE_PATH,
IMG_ID,
envvars_map=EXPECTED_ENVVARS_MAP)
def test_get_name(self):
self.pathconfig.docker_proj_path = lambda: ''
# This is the result of sha1('')
expected_hash = 'da39a3ee5e6b4b0d3255bfef95601890afd80709'
self.assertEqual(expected_hash, self.cnt.get_name())
@patch('ebcli.containers.abstractcontainer.commands.is_running')
def test_is_running_true(self, is_running):
is_running.return_value = True
self.assertTrue(self.cnt.is_running())
@patch('ebcli.containers.abstractcontainer.commands.is_running')
def test_is_running_false(self, is_running):
is_running.return_value = False
self.assertFalse(self.cnt.is_running())
def METHOD_NAME(self):
self.fs_handler.get_setenv_env.return_value = SETENV_ENV
self.assertDictEqual(EXPECTED_ENVVARS_MAP,
self.cnt.final_envvars()) |
7,046 | type | # Copyright 2021 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class for MLMD artifact in KFP SDK."""
from typing import Any, Optional
from absl import logging
import importlib
import yaml
from google.protobuf import json_format
from google.protobuf import struct_pb2
from kfp.pipeline_spec import pipeline_spec_pb2
from kfp.deprecated.dsl import serialization_utils
from kfp.deprecated.dsl import artifact_utils
KFP_ARTIFACT_ONTOLOGY_MODULE = 'kfp.dsl.ontology_artifacts'
DEFAULT_ARTIFACT_SCHEMA = 'title: kfp.Artifact\ntype: object\nproperties:\n'
class Artifact(object):
"""KFP Artifact Python class.
Artifact Python class/object mainly serves following purposes in different
period of its lifecycle.
1. During compile time, users can use Artifact class to annotate I/O types of
their components.
2. At runtime, Artifact objects provide helper function/utilities to access
the underlying RuntimeArtifact pb message, and provide additional layers
of validation to ensure type compatibility for fields specified in the
instance schema.
"""
TYPE_NAME = "kfp.Artifact"
# Initialization flag to support setattr / getattr behavior.
_initialized = False
def __init__(self, instance_schema: Optional[str] = None):
"""Constructs an instance of Artifact.
Setups up self._metadata_fields to perform type checking and
initialize RuntimeArtifact.
"""
if self.__class__ == Artifact:
if not instance_schema:
raise ValueError(
'The "instance_schema" argument must be set for Artifact.')
self._instance_schema = instance_schema
else:
if instance_schema:
raise ValueError(
'The "instance_schema" argument must not be passed for Artifact \
subclass: {}'.format(self.__class__))
# setup self._metadata_fields
self.TYPE_NAME, self._metadata_fields = artifact_utils.parse_schema(
self._instance_schema)
# Instantiate a RuntimeArtifact pb message as the POD data structure.
self._artifact = pipeline_spec_pb2.RuntimeArtifact()
# Stores the metadata for the Artifact.
self.metadata = {}
self._artifact.METHOD_NAME.CopyFrom(
pipeline_spec_pb2.ArtifactTypeSchema(
instance_schema=self._instance_schema))
self._initialized = True
@property
def type_schema(self) -> str:
"""Gets the instance_schema for this Artifact object."""
return self._instance_schema
def __getattr__(self, name: str) -> Any:
"""Custom __getattr__ to allow access to artifact metadata."""
if name not in self._metadata_fields:
raise AttributeError(
'No metadata field: {} in artifact.'.format(name))
return self.metadata[name]
def __setattr__(self, name: str, value: Any):
"""Custom __setattr__ to allow access to artifact metadata."""
if not self._initialized:
object.__setattr__(self, name, value)
return
metadata_fields = {}
if self._metadata_fields:
metadata_fields = self._metadata_fields
if name not in self._metadata_fields:
if (name in self.__dict__ or
any(name in c.__dict__ for c in self.__class__.mro())):
# Use any provided getter / setter if available.
object.__setattr__(self, name, value)
return
# In the case where we do not handle this via an explicit getter /
# setter, we assume that the user implied an artifact attribute store,
# and we raise an exception since such an attribute was not explicitly
# defined in the Artifact PROPERTIES dictionary.
raise AttributeError('Cannot set an unspecified metadata field:{} \
on artifact. Only fields specified in instance schema can be \
set.'.format(name))
# Type checking to be performed during serialization.
self.metadata[name] = value
def _update_runtime_artifact(self):
"""Verifies metadata is well-formed and updates artifact instance."""
artifact_utils.verify_schema_instance(self._instance_schema,
self.metadata)
if len(self.metadata) != 0:
metadata_protobuf_struct = struct_pb2.Struct()
metadata_protobuf_struct.update(self.metadata)
self._artifact.metadata.CopyFrom(metadata_protobuf_struct)
@property
def METHOD_NAME(self):
return self.__class__
@property
def type_name(self):
return self.TYPE_NAME
@property
def uri(self) -> str:
return self._artifact.uri
@uri.setter
def uri(self, uri: str) -> None:
self._artifact.uri = uri
@property
def name(self) -> str:
return self._artifact.name
@name.setter
def name(self, name: str) -> None:
self._artifact.name = name
@property
def runtime_artifact(self) -> pipeline_spec_pb2.RuntimeArtifact:
self._update_runtime_artifact()
return self._artifact
@runtime_artifact.setter
def runtime_artifact(self, artifact: pipeline_spec_pb2.RuntimeArtifact):
self._artifact = artifact
def serialize(self) -> str:
"""Serializes an Artifact to JSON dict format."""
self._update_runtime_artifact()
return json_format.MessageToJson(self._artifact, sort_keys=True)
@classmethod
def get_artifact_type(cls) -> str:
"""Gets the instance_schema according to the Python schema spec."""
result_map = {'title': cls.TYPE_NAME, 'type': 'object'}
return serialization_utils.yaml_dump(result_map)
@classmethod
def get_ir_type(cls) -> pipeline_spec_pb2.ArtifactTypeSchema:
return pipeline_spec_pb2.ArtifactTypeSchema(
instance_schema=cls.get_artifact_type())
@classmethod
def get_from_runtime_artifact(
cls, artifact: pipeline_spec_pb2.RuntimeArtifact) -> Any:
"""Deserializes an Artifact object from RuntimeArtifact message."""
instance_schema = yaml.safe_load(artifact.METHOD_NAME.instance_schema)
type_name = instance_schema['title'][len('kfp.'):]
result = None
try:
artifact_cls = getattr(
importlib.import_module(KFP_ARTIFACT_ONTOLOGY_MODULE),
type_name)
result = artifact_cls()
except (AttributeError, ImportError, ValueError) as err:
logging.warning('Failed to instantiate Ontology Artifact:{} \
instance'.format(type_name))
if not result:
# Otherwise generate a generic Artifact object.
result = Artifact(instance_schema=artifact.METHOD_NAME.instance_schema)
result.runtime_artifact = artifact
result.metadata = json_format.MessageToDict(artifact.metadata)
return result
@classmethod
def deserialize(cls, data: str) -> Any:
"""Deserializes an Artifact object from JSON dict."""
artifact = pipeline_spec_pb2.RuntimeArtifact()
json_format.Parse(data, artifact, ignore_unknown_fields=True)
return cls.get_from_runtime_artifact(artifact) |
7,047 | resource apply dense | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SGD optimizer implementation."""
import tensorflow.compat.v2 as tf
from keras.optimizers.legacy import optimizer_v2
# isort: off
from tensorflow.python.util.tf_export import keras_export
@keras_export(
"keras.optimizers.legacy.SGD",
v1=["keras.optimizers.SGD", "keras.optimizers.legacy.SGD"],
)
class SGD(optimizer_v2.OptimizerV2):
r"""Gradient descent (with momentum) optimizer.
Update rule for parameter `w` with gradient `g` when `momentum=0`:
```python
w = w - learning_rate * g
```
Update rule when `momentum` is larger than 0:
```python
velocity = momentum * velocity - learning_rate * g
w = w + velocity
```
When `nesterov=True`, this rule becomes:
```python
velocity = momentum * velocity - learning_rate * g
w = w + momentum * velocity - learning_rate * g
```
Args:
learning_rate: A `Tensor`, floating point value, or a schedule that is a
`tf.keras.optimizers.schedules.LearningRateSchedule`, or a callable
that takes no arguments and returns the actual value to use. The
learning rate. Defaults to `0.01`.
momentum: float hyperparameter >= 0 that accelerates gradient descent in
the relevant direction and dampens oscillations. Vanilla gradient
descent means no momentum. Defaults to `0.`.
nesterov: boolean. Whether to apply Nesterov momentum.
Defaults to `False`.
name: Optional name prefix for the operations created when applying
gradients. Defaults to `"SGD"`.
**kwargs: keyword arguments. Allowed arguments are `clipvalue`,
`clipnorm`, `global_clipnorm`.
If `clipvalue` (float) is set, the gradient of each weight
is clipped to be no higher than this value.
If `clipnorm` (float) is set, the gradient of each weight
is individually clipped so that its norm is no higher than this value.
If `global_clipnorm` (float) is set the gradient of all weights is
clipped so that their global norm is no higher than this value.
Usage:
>>> opt = tf.keras.optimizers.legacy.SGD(learning_rate=0.1)
>>> var = tf.Variable(1.0)
>>> loss = lambda: (var ** 2)/2.0 # d(loss)/d(var1) = var1
>>> step_count = opt.minimize(loss, [var]).numpy()
>>> # Step is `- learning_rate * grad`
>>> var.numpy()
0.9
>>> opt = tf.keras.optimizers.legacy.SGD(learning_rate=0.1, momentum=0.9)
>>> var = tf.Variable(1.0)
>>> val0 = var.value()
>>> loss = lambda: (var ** 2)/2.0 # d(loss)/d(var1) = var1
>>> # First step is `- learning_rate * grad`
>>> step_count = opt.minimize(loss, [var]).numpy()
>>> val1 = var.value()
>>> (val0 - val1).numpy()
0.1
>>> # On later steps, step-size increases because of momentum
>>> step_count = opt.minimize(loss, [var]).numpy()
>>> val2 = var.value()
>>> (val1 - val2).numpy()
0.18
Reference:
- For `nesterov=True`, See [Sutskever et al., 2013](
https://github.com/mlresearch/v28/blob/gh-pages/sutskever13.pdf).
"""
_HAS_AGGREGATE_GRAD = True
def __init__(
self,
learning_rate=0.01,
momentum=0.0,
nesterov=False,
name="SGD",
**kwargs,
):
super().__init__(name, **kwargs)
self._set_hyper("learning_rate", kwargs.get("lr", learning_rate))
self._set_hyper("decay", self._initial_decay)
self._momentum = False
if (
isinstance(momentum, tf.Tensor)
or callable(momentum)
or momentum > 0
):
self._momentum = True
if isinstance(momentum, (int, float)) and (
momentum < 0 or momentum > 1
):
raise ValueError(
"`momentum` must be between [0, 1]. Received: "
f"momentum={momentum} (of type {type(momentum)})."
)
self._set_hyper("momentum", momentum)
self.nesterov = nesterov
def _create_slots(self, var_list):
if self._momentum:
for var in var_list:
self.add_slot(var, "momentum")
def _prepare_local(self, var_device, var_dtype, apply_state):
super()._prepare_local(var_device, var_dtype, apply_state)
apply_state[(var_device, var_dtype)]["momentum"] = tf.identity(
self._get_hyper("momentum", var_dtype)
)
def METHOD_NAME(self, grad, var, apply_state=None):
var_device, var_dtype = var.device, var.dtype.base_dtype
coefficients = (apply_state or {}).get(
(var_device, var_dtype)
) or self._fallback_apply_state(var_device, var_dtype)
if self._momentum:
momentum_var = self.get_slot(var, "momentum")
return tf.raw_ops.ResourceApplyKerasMomentum(
var=var.handle,
accum=momentum_var.handle,
lr=coefficients["lr_t"],
grad=grad,
momentum=coefficients["momentum"],
use_locking=self._use_locking,
use_nesterov=self.nesterov,
)
else:
return tf.raw_ops.ResourceApplyGradientDescent(
var=var.handle,
alpha=coefficients["lr_t"],
delta=grad,
use_locking=self._use_locking,
)
def _resource_apply_sparse_duplicate_indices(
self, grad, var, indices, **kwargs
):
if self._momentum:
return super()._resource_apply_sparse_duplicate_indices(
grad, var, indices, **kwargs
)
else:
var_device, var_dtype = var.device, var.dtype.base_dtype
coefficients = kwargs.get("apply_state", {}).get(
(var_device, var_dtype)
) or self._fallback_apply_state(var_device, var_dtype)
return tf.raw_ops.ResourceScatterAdd(
resource=var.handle,
indices=indices,
updates=-grad * coefficients["lr_t"],
)
def _resource_apply_sparse(self, grad, var, indices, apply_state=None):
# This method is only needed for momentum optimization.
var_device, var_dtype = var.device, var.dtype.base_dtype
coefficients = (apply_state or {}).get(
(var_device, var_dtype)
) or self._fallback_apply_state(var_device, var_dtype)
momentum_var = self.get_slot(var, "momentum")
return tf.raw_ops.ResourceSparseApplyKerasMomentum(
var=var.handle,
accum=momentum_var.handle,
lr=coefficients["lr_t"],
grad=grad,
indices=indices,
momentum=coefficients["momentum"],
use_locking=self._use_locking,
use_nesterov=self.nesterov,
)
def get_config(self):
config = super().get_config()
config.update(
{
"learning_rate": self._serialize_hyperparameter(
"learning_rate"
),
"decay": self._initial_decay,
"momentum": self._serialize_hyperparameter("momentum"),
"nesterov": self.nesterov,
}
)
return config |
7,048 | set | # Tkinter font wrapper
#
# written by Fredrik Lundh, February 1998
#
__version__ = "0.9"
import itertools
import tkinter
# weight/slant
NORMAL = "normal"
ROMAN = "roman"
BOLD = "bold"
ITALIC = "italic"
def nametofont(name):
"""Given the name of a tk named font, returns a Font representation.
"""
return Font(name=name, exists=True)
class Font:
"""Represents a named font.
Constructor options are:
font -- font specifier (name, system font, or (family, size, style)-tuple)
name -- name to use for this font configuration (defaults to a unique name)
exists -- does a named font by this name already exist?
Creates a new named font if False, points to the existing font if True.
Raises _tkinter.TclError if the assertion is false.
the following are ignored if font is specified:
family -- font 'family', e.g. Courier, Times, Helvetica
size -- font size in points
weight -- font thickness: NORMAL, BOLD
slant -- font slant: ROMAN, ITALIC
underline -- font underlining: false (0), true (1)
overstrike -- font strikeout: false (0), true (1)
"""
counter = itertools.count(1)
def METHOD_NAME(self, kw):
options = []
for k, v in kw.items():
options.append("-"+k)
options.append(str(v))
return tuple(options)
def _get(self, args):
options = []
for k in args:
options.append("-"+k)
return tuple(options)
def _mkdict(self, args):
options = {}
for i in range(0, len(args), 2):
options[args[i][1:]] = args[i+1]
return options
def __init__(self, root=None, font=None, name=None, exists=False,
**options):
if not root:
root = tkinter._default_root
tk = getattr(root, 'tk', root)
if font:
# get actual settings corresponding to the given font
font = tk.splitlist(tk.call("font", "actual", font))
else:
font = self.METHOD_NAME(options)
if not name:
name = "font" + str(next(self.counter))
self.name = name
if exists:
self.delete_font = False
# confirm font exists
if self.name not in tk.splitlist(tk.call("font", "names")):
raise tkinter._tkinter.TclError(
"named font %s does not already exist" % (self.name,))
# if font config info supplied, apply it
if font:
tk.call("font", "configure", self.name, *font)
else:
# create new font (raises TclError if the font exists)
tk.call("font", "create", self.name, *font)
self.delete_font = True
self._tk = tk
self._split = tk.splitlist
self._call = tk.call
def __str__(self):
return self.name
def __eq__(self, other):
return isinstance(other, Font) and self.name == other.name
def __getitem__(self, key):
return self.cget(key)
def __setitem__(self, key, value):
self.configure(**{key: value})
def __del__(self):
try:
if self.delete_font:
self._call("font", "delete", self.name)
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
pass
def copy(self):
"Return a distinct copy of the current font"
return Font(self._tk, **self.actual())
def actual(self, option=None, displayof=None):
"Return actual font attributes"
args = ()
if displayof:
args = ('-displayof', displayof)
if option:
args = args + ('-' + option, )
return self._call("font", "actual", self.name, *args)
else:
return self._mkdict(
self._split(self._call("font", "actual", self.name, *args)))
def cget(self, option):
"Get font attribute"
return self._call("font", "config", self.name, "-"+option)
def config(self, **options):
"Modify font attributes"
if options:
self._call("font", "config", self.name,
*self.METHOD_NAME(options))
else:
return self._mkdict(
self._split(self._call("font", "config", self.name)))
configure = config
def measure(self, text, displayof=None):
"Return text width"
args = (text,)
if displayof:
args = ('-displayof', displayof, text)
return int(self._call("font", "measure", self.name, *args))
def metrics(self, *options, **kw):
"""Return font metrics.
For best performance, create a dummy widget
using this font before calling this method."""
args = ()
displayof = kw.pop('displayof', None)
if displayof:
args = ('-displayof', displayof)
if options:
args = args + self._get(options)
return int(
self._call("font", "metrics", self.name, *args))
else:
res = self._split(self._call("font", "metrics", self.name, *args))
options = {}
for i in range(0, len(res), 2):
options[res[i][1:]] = int(res[i+1])
return options
def families(root=None, displayof=None):
"Get font families (as a tuple)"
if not root:
root = tkinter._default_root
args = ()
if displayof:
args = ('-displayof', displayof)
return root.tk.splitlist(root.tk.call("font", "families", *args))
def names(root=None):
"Get names of defined fonts (as a tuple)"
if not root:
root = tkinter._default_root
return root.tk.splitlist(root.tk.call("font", "names"))
# --------------------------------------------------------------------
# test stuff
if __name__ == "__main__":
root = tkinter.Tk()
# create a font
f = Font(family="times", size=30, weight=NORMAL)
print(f.actual())
print(f.actual("family"))
print(f.actual("weight"))
print(f.config())
print(f.cget("family"))
print(f.cget("weight"))
print(names())
print(f.measure("hello"), f.metrics("linespace"))
print(f.metrics(displayof=root))
f = Font(font=("Courier", 20, "bold"))
print(f.measure("hello"), f.metrics("linespace", displayof=root))
w = tkinter.Label(root, text="Hello, world", font=f)
w.pack()
w = tkinter.Button(root, text="Quit!", command=root.destroy)
w.pack()
fb = Font(font=w["font"]).copy()
fb.config(weight=BOLD)
w.config(font=fb)
tkinter.mainloop() |
7,049 | find script dir | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
from spack.util.environment import is_system_path
class Tcl(AutotoolsPackage, SourceforgePackage):
"""Tcl (Tool Command Language) is a very powerful but easy to learn dynamic
programming language, suitable for a very wide range of uses, including web and
desktop applications, networking, administration, testing and many more. Open source
and business-friendly, Tcl is a mature yet evolving language that is truly cross
platform, easily deployed and highly extensible."""
homepage = "https://www.tcl.tk/"
sourceforge_mirror_path = "tcl/tcl8.6.11-src.tar.gz"
version('8.6.11', sha256='8c0486668586672c5693d7d95817cb05a18c5ecca2f40e2836b9578064088258')
version('8.6.10', sha256='5196dbf6638e3df8d5c87b5815c8c2b758496eb6f0e41446596c9a4e638d87ed')
version('8.6.8', sha256='c43cb0c1518ce42b00e7c8f6eaddd5195c53a98f94adc717234a65cbcfd3f96a')
version('8.6.6', sha256='a265409781e4b3edcc4ef822533071b34c3dc6790b893963809b9fe221befe07')
version('8.6.5', sha256='ce26d5b9c7504fc25d2f10ef0b82b14cf117315445b5afa9e673ed331830fb53')
version('8.6.4', sha256='9e6ed94c981c1d0c5f5fefb8112d06c6bf4d050a7327e95e71d417c416519c8d')
version('8.6.3', sha256='6ce0778de0d50daaa9c345d7c1fd1288fb658f674028812e7eeee992e3051005')
version('8.5.19', sha256='d3f04456da873d17f02efc30734b0300fb6c3b85028d445fe284b83253a6db18')
extendable = True
depends_on('zlib')
configure_directory = 'unix'
def install(self, spec, prefix):
with working_dir(self.build_directory):
make('install')
# https://wiki.tcl-lang.org/page/kitgen
if self.spec.satisfies('@8.6:'):
make('install-headers')
# Some applications like Expect require private Tcl headers.
make('install-private-headers')
# Copy source to install tree
# A user-provided install option might re-do this
# https://github.com/spack/spack/pull/4102/files
installed_src = join_path(
self.spec.prefix, 'share', self.name, 'src')
stage_src = os.path.realpath(self.stage.source_path)
install_tree(stage_src, installed_src)
# Replace stage dir -> installed src dir in tclConfig
filter_file(
stage_src, installed_src,
join_path(self.spec['tcl'].libs.directories[0],
'tclConfig.sh'))
# Don't install binaries in src/ tree
with working_dir(join_path(installed_src, self.configure_directory)):
make('clean')
@run_after('install')
def symlink_tclsh(self):
with working_dir(self.prefix.bin):
symlink('tclsh{0}'.format(self.version.up_to(2)), 'tclsh')
# ========================================================================
# Set up environment to make install easy for tcl extensions.
# ========================================================================
@property
def libs(self):
return find_libraries(['libtcl{0}'.format(self.version.up_to(2))],
root=self.prefix, recursive=True)
@property
def command(self):
"""Returns the tclsh command.
Returns:
Executable: the tclsh command
"""
# Although we symlink tclshX.Y to tclsh, we also need to support external
# installations that may not have this symlink, or may have multiple versions
# of Tcl installed in the same directory.
return Executable(os.path.realpath(self.prefix.bin.join(
'tclsh{0}'.format(self.version.up_to(2)))))
def METHOD_NAME(self):
# Put more-specific prefixes first
check_prefixes = [
join_path(self.prefix, "share", "tcl{0}".format(self.version.up_to(2))),
self.prefix,
]
for prefix in check_prefixes:
result = find(prefix, "init.tcl")
if result:
return os.path.dirname(sorted(result)[0])
def setup_run_environment(self, env):
"""Set TCL_LIBRARY to the directory containing init.tcl.
For further info see:
* https://wiki.tcl-lang.org/page/TCL_LIBRARY
"""
# When using tkinter from within spack provided python+tkinter,
# python will not be able to find Tcl unless TCL_LIBRARY is set.
env.set('TCL_LIBRARY', self.METHOD_NAME())
#sorted(find(self.prefix, 'init.tcl'))[0]))
def setup_dependent_build_environment(self, env, dependent_spec):
"""Set TCL_LIBRARY to the directory containing init.tcl.
Set TCLLIBPATH to include the tcl-shipped directory for
extensions and any other tcl extension it depends on.
For further info see:
* https://wiki.tcl-lang.org/page/TCL_LIBRARY
* https://wiki.tcl-lang.org/page/TCLLIBPATH
"""
env.set('TCL_LIBRARY', self.METHOD_NAME())
#sorted(find(self.prefix, 'init.tcl'))[0]))
# If we set TCLLIBPATH, we must also ensure that the corresponding
# tcl is found in the build environment. This to prevent cases
# where a system provided tcl is run against the standard libraries
# of a Spack built tcl. See issue #7128 that relates to python but
# it boils down to the same situation we have here.
if not is_system_path(self.prefix.bin):
env.prepend_path('PATH', self.prefix.bin)
# WARNING: paths in $TCLLIBPATH must be *space* separated,
# its value is meant to be a Tcl list, *not* an env list
# as explained here: https://wiki.tcl-lang.org/page/TCLLIBPATH:
# "TCLLIBPATH is a Tcl list, not some platform-specific
# colon-separated or semi-colon separated format"
# WARNING: Tcl and Tcl extensions like Tk install their configuration files
# in subdirectories like `<prefix>/lib/tcl8.6`. However, Tcl is aware of this,
# and $TCLLIBPATH should only contain `<prefix>/lib`. $TCLLIBPATH is only needed
# because we install Tcl extensions to different directories than Tcl. See:
# https://core.tcl-lang.org/tk/tktview/447bd3e4abe17452d19a80e6840dcc8a2603fcbc
env.prepend_path(
'TCLLIBPATH', self.spec['tcl'].libs.directories[0], separator=' ')
for d in dependent_spec.traverse(deptype=('build', 'run', 'test')):
if d.package.extends(self.spec):
# Tcl libraries may be installed in lib or lib64, see #19546
for lib in ['lib', 'lib64']:
tcllibpath = join_path(d.prefix, lib)
if os.path.exists(tcllibpath):
env.prepend_path('TCLLIBPATH', tcllibpath, separator=' ')
def setup_dependent_run_environment(self, env, dependent_spec):
"""Set TCLLIBPATH to include the tcl-shipped directory for
extensions and any other tcl extension it depends on.
For further info see:
* https://wiki.tcl-lang.org/page/TCLLIBPATH
"""
for d in dependent_spec.traverse(deptype=('build', 'run', 'test')):
if d.package.extends(self.spec):
# Tcl libraries may be installed in lib or lib64, see #19546
for lib in ['lib', 'lib64']:
tcllibpath = join_path(d.prefix, lib)
if os.path.exists(tcllibpath):
env.prepend_path('TCLLIBPATH', tcllibpath, separator=' ') |
7,050 | test powerwall update if cookie cached | import json
from unittest.mock import Mock
import pytest
import requests
import requests_mock
from modules.devices.tesla import bat
from modules.devices.tesla.device import Device, Tesla
from modules.common.component_state import BatState
from modules.devices.tesla.config import TeslaConfiguration
from test_utils.mock_ramdisk import MockRamdisk
sample_soe_json = """{"percentage":69.16}"""
sample_aggregates_json = """
{
"site":{
"last_communication_time":"2018-04-02T16:11:41.885377469-07:00",
"instant_power":-21.449996948242188,
"instant_reactive_power":-138.8300018310547,
"instant_apparent_power":140.47729986545957,
"frequency":60.060001373291016,
"energy_exported":1136916.6875,
"energy_imported":3276432.6625,
"instant_average_voltage":239.81999969482422,
"instant_total_current":0,
"i_a_current":0,
"i_b_current":0,
"i_c_current":0
},
"battery":{
"last_communication_time":"2018-04-02T16:11:41.89022247-07:00",
"instant_power":-2350,
"instant_reactive_power":0,
"instant_apparent_power":2350,
"frequency":60.033,
"energy_exported":1169030,
"energy_imported":1638140,
"instant_average_voltage":239.10000000000002,
"instant_total_current":45.8,
"i_a_current":0,
"i_b_current":0,
"i_c_current":0
},
"load":{
"last_communication_time":"2018-04-02T16:11:41.885377469-07:00",
"instant_power":1546.2712597712405,
"instant_reactive_power":-71.43153973801415,
"instant_apparent_power":1547.920305979569,
"frequency":60.060001373291016,
"energy_exported":0,
"energy_imported":7191016.994444443,
"instant_average_voltage":239.81999969482422,
"instant_total_current":6.44763264839839,
"i_a_current":0,
"i_b_current":0,
"i_c_current":0
},
"solar":{
"last_communication_time":"2018-04-02T16:11:41.885541803-07:00",
"instant_power":3906.1700439453125,
"instant_reactive_power":53.26999855041504,
"instant_apparent_power":3906.533259164868,
"frequency":60.060001373291016,
"energy_exported":5534272.949724403,
"energy_imported":13661.930279959455,
"instant_average_voltage":239.8699951171875,
"instant_total_current":0,
"i_a_current":0,
"i_b_current":0,
"i_c_current":0
},
"busway":{
"last_communication_time":"0001-01-01T00:00:00Z",
"instant_power":0,
"instant_reactive_power":0,
"instant_apparent_power":0,
"frequency":0,
"energy_exported":0,
"energy_imported":0,
"instant_average_voltage":0,
"instant_total_current":0,
"i_a_current":0,
"i_b_current":0,
"i_c_current":0
},
"frequency":{
"last_communication_time":"0001-01-01T00:00:00Z",
"instant_power":0,
"instant_reactive_power":0,
"instant_apparent_power":0,
"frequency":0,
"energy_exported":0,
"energy_imported":0,
"instant_average_voltage":0,
"instant_total_current":0,
"i_a_current":0,
"i_b_current":0,
"i_c_current":0
},
"generator":{
"last_communication_time":"0001-01-01T00:00:00Z",
"instant_power":0,
"instant_reactive_power":0,
"instant_apparent_power":0,
"frequency":0,
"energy_exported":0,
"energy_imported":0,
"instant_average_voltage":0,
"instant_total_current":0,
"i_a_current":0,
"i_b_current":0,
"i_c_current":0
}
}"""
def setup_battery_component() -> Device:
device_config = Tesla(configuration=TeslaConfiguration(
ip_address="sample-address",
email="sample@mail.com",
password="some password"))
dev = Device(device_config)
dev.add_component(bat.component_descriptor.configuration_factory())
return dev
def match_cookie_ok(request: requests.PreparedRequest):
return "AuthCookie=auth-cookie" in request.headers['Cookie']
def match_cookie_reject(request: requests.PreparedRequest):
return not match_cookie_ok(request)
@pytest.fixture
def mock_ramdisk(monkeypatch):
return MockRamdisk(monkeypatch)
API_URL = "https://sample-address/api"
COOKIE_FILE_NAME = "powerwall_cookie.txt"
def assert_battery_state_correct(state: BatState):
assert state.soc == 69.16
assert state.power == 2350
assert state.imported == 1638140
assert state.exported == 1169030
def METHOD_NAME(monkeypatch, requests_mock: requests_mock.Mocker, mock_ramdisk: MockRamdisk):
# setup
mock_bat_value_store = Mock()
monkeypatch.setattr(bat, "get_bat_value_store", Mock(return_value=mock_bat_value_store))
requests_mock.get("https://sample-address/api/meters/aggregates", text=sample_aggregates_json,
additional_matcher=match_cookie_ok)
requests_mock.get("https://sample-address/api/system_status/soe", text=sample_soe_json,
additional_matcher=match_cookie_ok)
mock_ramdisk[COOKIE_FILE_NAME] = """{"AuthCookie": "auth-cookie", "UserRecord": "user-record"}"""
# execution
setup_battery_component().update()
# evaluation
assert_battery_state_correct(mock_bat_value_store.set.call_args[0][0])
@pytest.mark.parametrize(
"cookie_file", [
pytest.param("""{"AuthCookie": "reject-me", "UserRecord": "user-record"}""", id="expired cookie"),
pytest.param("""{this is not valid json}""", id="garbage file"),
pytest.param(None, id="no cookie file")
]
)
def test_powerwall_update_retrieves_new_cookie_if_cookie_rejected(monkeypatch,
requests_mock: requests_mock.Mocker,
mock_ramdisk: MockRamdisk,
cookie_file: str):
# setup
mock_bat_value_store = Mock()
monkeypatch.setattr(bat, "get_bat_value_store", Mock(return_value=mock_bat_value_store))
requests_mock.post(API_URL + "/login/Basic", cookies={"AuthCookie": "auth-cookie", "UserRecord": "user-record"})
requests_mock.get(API_URL + "/meters/aggregates", status_code=401, additional_matcher=match_cookie_reject)
requests_mock.get(API_URL + "/system_status/soe", status_code=401, additional_matcher=match_cookie_reject)
requests_mock.get(API_URL + "/meters/aggregates", text=sample_aggregates_json, additional_matcher=match_cookie_ok)
requests_mock.get(API_URL + "/system_status/soe", text=sample_soe_json, additional_matcher=match_cookie_ok)
if cookie_file is not None:
mock_ramdisk[COOKIE_FILE_NAME] = cookie_file
# execution
setup_battery_component().update()
# evaluation
assert json.loads(mock_ramdisk[COOKIE_FILE_NAME]) == {"AuthCookie": "auth-cookie", "UserRecord": "user-record"}
assert_battery_state_correct(mock_bat_value_store.set.call_args[0][0]) |
7,051 | test bug id from url | # pylint: disable=attribute-defined-outside-init
import os
import time
import unittest
from django.utils import timezone
from tcms.core.contrib.linkreference.models import LinkReference
from tcms.issuetracker.types import GitHub
from tcms.rpc.tests.utils import APITestCase
from tcms.testcases.models import BugSystem
from tcms.tests.factories import ComponentFactory, TestExecutionFactory
@unittest.skipUnless(
os.getenv("TEST_BUGTRACKER_INTEGRATION"),
"Bug tracker integration testing not enabled",
)
class TestGitHubIntegration(APITestCase):
existing_bug_id = 1
existing_bug_url = "https://github.com/kiwitcms/test-github-integration/issues/1"
def _fixture_setup(self):
super()._fixture_setup()
self.execution_1 = TestExecutionFactory()
self.execution_1.case.summary = "Tested at " + timezone.now().isoformat()
self.execution_1.case.text = "Given-When-Then"
self.execution_1.case.save() # will generate history object
self.execution_1.run.summary = (
"Automated TR for GitHub integration on " + timezone.now().isoformat()
)
self.execution_1.run.save()
self.component = ComponentFactory(
name="GitHub integration", product=self.execution_1.run.plan.product
)
self.execution_1.case.add_component(self.component)
bug_system = BugSystem.objects.create( # nosec:B106:hardcoded_password_funcarg
name="GitHub for kiwitcms/test-github-integration",
tracker_type="tcms.issuetracker.types.GitHub",
base_url="https://github.com/kiwitcms/test-github-integration",
api_password=os.getenv("GH_BUGTRACKER_INTEGRATION_TEST_API_TOKEN"),
)
self.integration = GitHub(bug_system, None)
def METHOD_NAME(self):
result = self.integration.bug_id_from_url(self.existing_bug_url)
self.assertEqual(self.existing_bug_id, result)
def test_details_for_public_url(self):
result = self.integration.details(self.existing_bug_url)
self.assertEqual("Hello GitHub", result["title"])
self.assertEqual(
"This issue is used in automated tests that verify Kiwi TCMS - GitHub "
"bug tracking integration!",
result["description"],
)
def test_details_for_private_url(self):
bug_system = BugSystem.objects.create( # nosec:B106:hardcoded_password_funcarg
name="Private GitHub for kiwitcms/private-test-github-integration",
tracker_type="GitHub",
base_url="https://github.com/kiwitcms/private-test-github-integration",
api_password=os.getenv("GH_BUGTRACKER_INTEGRATION_TEST_API_TOKEN"),
)
integration = GitHub(bug_system, None)
result = integration.details(
"https://github.com/kiwitcms/private-test-github-integration/issues/1"
)
self.assertEqual("Hello Private GitHub", result["title"])
self.assertEqual(
"This issue is used in automated tests that verify "
"Kiwi TCMS - GitHub bug tracking integration!",
result["description"],
)
def test_auto_update_bugtracker(self):
repo_id = self.integration.repo_id
repo = self.integration.rpc.get_repo(repo_id)
issue = repo.get_issue(self.existing_bug_id)
# make sure there are no comments to confuse the test
initial_comments_count = 0
for comment in issue.get_comments():
initial_comments_count += 1
self.assertNotIn(self.execution_1.run.summary, comment.body)
# simulate user adding a new bug URL to a TE and clicking
# 'Automatically update bug tracker'
result = self.rpc_client.TestExecution.add_link(
{
"execution_id": self.execution_1.pk,
"is_defect": True,
"url": self.existing_bug_url,
},
True,
)
# making sure RPC above returned the same URL
self.assertEqual(self.existing_bug_url, result["url"])
# wait until comments have been refreshed b/c this seem to happen async
retries = 0
last_comment = None
current_comment_count = 0
while current_comment_count <= initial_comments_count:
current_comment_count = 0
# .get_comments() returns an iterator
for comment in issue.get_comments():
current_comment_count += 1
last_comment = comment
time.sleep(1)
retries += 1
self.assertLess(retries, 20)
# assert that a comment has been added as the last one
# and also verify its text
for expected_string in [
"Confirmed via test execution",
f"TR-{self.execution_1.run_id}: {self.execution_1.run.summary}",
self.execution_1.run.get_full_url(),
f"TE-{self.execution_1.pk}: {self.execution_1.case.summary}",
]:
self.assertIn(expected_string, last_comment.body)
# clean up after ourselves in case everything above looks good
last_comment.delete()
def test_report_issue_from_test_execution_1click_works(self):
# simulate user clicking the 'Report bug' button in TE widget, TR page
result = self.rpc_client.Bug.report(
self.execution_1.pk, self.integration.bug_system.pk
)
self.assertEqual(result["rc"], 0)
self.assertIn(self.integration.bug_system.base_url, result["response"])
self.assertIn("/issues/", result["response"])
new_issue_id = self.integration.bug_id_from_url(result["response"])
repo_id = self.integration.repo_id
repo = self.integration.rpc.get_repo(repo_id)
issue = repo.get_issue(new_issue_id)
self.assertEqual(f"Failed test: {self.execution_1.case.summary}", issue.title)
for expected_string in [
f"Filed from execution {self.execution_1.get_full_url()}",
"Reporter",
self.execution_1.run.plan.product.name,
self.component.name,
"Steps to reproduce",
self.execution_1.case.text,
]:
self.assertIn(expected_string, issue.body)
# verify that LR has been added to TE
self.assertTrue(
LinkReference.objects.filter(
execution=self.execution_1,
url=result["response"],
is_defect=True,
).exists()
)
# close issue after we're done
issue.edit(state="closed") |
7,052 | visit call | """ Inlining inline functions body. """
from pythran.analyses import Inlinable, Aliases
from pythran.passmanager import Transformation
import gast as ast
import copy
class Inlining(Transformation):
"""
Inline one line functions.
>>> import gast as ast
>>> from pythran import passmanager, backend
>>> pm = passmanager.PassManager("test")
>>> node = ast.parse('''
... def foo(a, b):
... return b + b * a
... def bar(b):
... return foo(2 * b, b) * foo(b, b)''')
>>> _, node = pm.apply(Inlining, node)
>>> print(pm.dump(backend.Python, node))
def foo(a, b):
return (b + (b * a))
def bar(b):
__pythran_inlinefooa0 = (2 * b)
__pythran_inlinefoob0 = b
__pythran_inlinefooa1 = b
__pythran_inlinefoob1 = b
return ((__pythran_inlinefoob0 + (__pythran_inlinefoob0 * \
__pythran_inlinefooa0)) * (__pythran_inlinefoob1 + \
(__pythran_inlinefoob1 * __pythran_inlinefooa1)))
"""
def __init__(self):
""" fun : Function {name :body} for inlinable functions. """
self.update = False
self.defs = list()
self.call_count = 0
super(Inlining, self).__init__(Inlinable, Aliases)
def visit_Stmt(self, node):
""" Add new variable definition before the Statement. """
save_defs, self.defs = self.defs or list(), list()
self.generic_visit(node)
new_defs, self.defs = self.defs, save_defs
return new_defs + [node]
visit_Return = visit_Stmt
visit_Assign = visit_Stmt
visit_AugAssign = visit_Stmt
visit_Print = visit_Stmt
visit_For = visit_Stmt
visit_While = visit_Stmt
visit_If = visit_Stmt
visit_With = visit_Stmt
visit_Assert = visit_Stmt
visit_Expr = visit_Stmt
def METHOD_NAME(self, node):
"""
Replace function call by inlined function's body.
We can inline if it aliases on only one function.
"""
func_aliases = self.aliases[node.func]
if len(func_aliases) == 1:
function_def = next(iter(func_aliases))
if (isinstance(function_def, ast.FunctionDef) and
function_def.name in self.inlinable):
self.update = True
to_inline = copy.deepcopy(self.inlinable[function_def.name])
arg_to_value = dict()
values = node.args
values += to_inline.args.defaults[len(node.args) -
len(to_inline.args.args):]
for arg_fun, arg_call in zip(to_inline.args.args, values):
v_name = "__pythran_inline{}{}{}".format(function_def.name,
arg_fun.id,
self.call_count)
new_var = ast.Name(id=v_name,
ctx=ast.Store(),
annotation=None, type_comment=None)
self.defs.append(ast.Assign(targets=[new_var],
value=arg_call,
type_comment=None))
arg_to_value[arg_fun.id] = ast.Name(id=v_name,
ctx=ast.Load(),
annotation=None,
type_comment=None)
self.call_count += 1
return Inliner(arg_to_value).visit(to_inline.body[0])
return node
class Inliner(ast.NodeTransformer):
""" Helper transform that performed inlined body transformation. """
def __init__(self, match):
""" match : {original_variable_name : Arguments use on call}. """
self.match = match
super(Inliner, self).__init__()
def visit_Name(self, node):
""" Transform name from match values if available. """
return self.match.get(node.id, node)
def visit_Return(self, node):
""" Remove return keyword after inline. """
return self.visit(node.value) |
7,053 | wrapped md5 function | import os
import sys
from typing import TYPE_CHECKING
from ddtrace.appsec.iast import oce
from ddtrace.appsec.iast._metrics import _set_metric_iast_instrumented_sink
from ddtrace.appsec.iast._patch import set_and_check_module_is_patched
from ddtrace.appsec.iast._patch import set_module_unpatched
from ddtrace.appsec.iast._patch import try_unwrap
from ddtrace.appsec.iast._patch import try_wrap_function_wrapper
from ddtrace.appsec.iast.constants import DEFAULT_WEAK_HASH_ALGORITHMS
from ddtrace.appsec.iast.constants import EVIDENCE_ALGORITHM_TYPE
from ddtrace.appsec.iast.constants import MD5_DEF
from ddtrace.appsec.iast.constants import SHA1_DEF
from ddtrace.appsec.iast.constants import VULN_INSECURE_HASHING_TYPE
from ddtrace.appsec.iast.taint_sinks._base import VulnerabilityBase
from ddtrace.internal.logger import get_logger
if TYPE_CHECKING: # pragma: no cover
from typing import Any
from typing import Callable
from typing import Set
log = get_logger(__name__)
def get_weak_hash_algorithms():
# type: () -> Set
CONFIGURED_WEAK_HASH_ALGORITHMS = None
DD_IAST_WEAK_HASH_ALGORITHMS = os.getenv("DD_IAST_WEAK_HASH_ALGORITHMS")
if DD_IAST_WEAK_HASH_ALGORITHMS:
CONFIGURED_WEAK_HASH_ALGORITHMS = set(algo.strip() for algo in DD_IAST_WEAK_HASH_ALGORITHMS.lower().split(","))
return CONFIGURED_WEAK_HASH_ALGORITHMS or DEFAULT_WEAK_HASH_ALGORITHMS
@oce.register
class WeakHash(VulnerabilityBase):
vulnerability_type = VULN_INSECURE_HASHING_TYPE
evidence_type = EVIDENCE_ALGORITHM_TYPE
scrub_evidence = False
def unpatch_iast():
# type: () -> None
set_module_unpatched("hashlib", default_attr="_datadog_weak_hash_patch")
set_module_unpatched("Crypto", default_attr="_datadog_weak_hash_patch")
if sys.version_info >= (3, 0, 0):
try_unwrap("_hashlib", "HASH.digest")
try_unwrap("_hashlib", "HASH.hexdigest")
try_unwrap(("_%s" % MD5_DEF), "MD5Type.digest")
try_unwrap(("_%s" % MD5_DEF), "MD5Type.hexdigest")
try_unwrap(("_%s" % SHA1_DEF), "SHA1Type.digest")
try_unwrap(("_%s" % SHA1_DEF), "SHA1Type.hexdigest")
else:
try_unwrap("hashlib", MD5_DEF)
try_unwrap("hashlib", SHA1_DEF)
try_unwrap("hashlib", "new")
# pycryptodome methods
try_unwrap("Crypto.Hash.MD5", "MD5Hash.digest")
try_unwrap("Crypto.Hash.MD5", "MD5Hash.hexdigest")
try_unwrap("Crypto.Hash.SHA1", "SHA1Hash.digest")
try_unwrap("Crypto.Hash.SHA1", "SHA1Hash.hexdigest")
def get_version():
# type: () -> str
return ""
def patch():
# type: () -> None
"""Wrap hashing functions.
Weak hashing algorithms are those that have been proven to be of high risk, or even completely broken,
and thus are not fit for use.
"""
if not set_and_check_module_is_patched("hashlib", default_attr="_datadog_weak_hash_patch"):
return
if not set_and_check_module_is_patched("Crypto", default_attr="_datadog_weak_hash_patch"):
return
weak_hash_algorithms = get_weak_hash_algorithms()
num_instrumented_sinks = 0
if sys.version_info >= (3, 0, 0):
try_wrap_function_wrapper("_hashlib", "HASH.digest", wrapped_digest_function)
try_wrap_function_wrapper("_hashlib", "HASH.hexdigest", wrapped_digest_function)
num_instrumented_sinks += 2
if MD5_DEF in weak_hash_algorithms:
try_wrap_function_wrapper(("_%s" % MD5_DEF), "MD5Type.digest", METHOD_NAME)
try_wrap_function_wrapper(("_%s" % MD5_DEF), "MD5Type.hexdigest", METHOD_NAME)
num_instrumented_sinks += 2
if SHA1_DEF in weak_hash_algorithms:
try_wrap_function_wrapper(("_%s" % SHA1_DEF), "SHA1Type.digest", wrapped_sha1_function)
try_wrap_function_wrapper(("_%s" % SHA1_DEF), "SHA1Type.hexdigest", wrapped_sha1_function)
num_instrumented_sinks += 2
else:
if MD5_DEF in weak_hash_algorithms:
try_wrap_function_wrapper("hashlib", MD5_DEF, METHOD_NAME)
num_instrumented_sinks += 1
if SHA1_DEF in weak_hash_algorithms:
try_wrap_function_wrapper("hashlib", SHA1_DEF, wrapped_sha1_function)
num_instrumented_sinks += 1
try_wrap_function_wrapper("hashlib", "new", wrapped_new_function)
num_instrumented_sinks += 1
# pycryptodome methods
if MD5_DEF in weak_hash_algorithms:
try_wrap_function_wrapper("Crypto.Hash.MD5", "MD5Hash.digest", METHOD_NAME)
try_wrap_function_wrapper("Crypto.Hash.MD5", "MD5Hash.hexdigest", METHOD_NAME)
num_instrumented_sinks += 2
if SHA1_DEF in weak_hash_algorithms:
try_wrap_function_wrapper("Crypto.Hash.SHA1", "SHA1Hash.digest", wrapped_sha1_function)
try_wrap_function_wrapper("Crypto.Hash.SHA1", "SHA1Hash.hexdigest", wrapped_sha1_function)
num_instrumented_sinks += 2
if num_instrumented_sinks > 0:
_set_metric_iast_instrumented_sink(VULN_INSECURE_HASHING_TYPE, num_instrumented_sinks)
@WeakHash.wrap
def wrapped_digest_function(wrapped, instance, args, kwargs):
# type: (Callable, Any, Any, Any) -> Any
if instance.name.lower() in get_weak_hash_algorithms():
WeakHash.report(
evidence_value=instance.name,
)
return wrapped(*args, **kwargs)
@WeakHash.wrap
def METHOD_NAME(wrapped, instance, args, kwargs):
# type: (Callable, Any, Any, Any) -> Any
return wrapped_function(wrapped, MD5_DEF, instance, args, kwargs)
@WeakHash.wrap
def wrapped_sha1_function(wrapped, instance, args, kwargs):
# type: (Callable, Any, Any, Any) -> Any
return wrapped_function(wrapped, SHA1_DEF, instance, args, kwargs)
@WeakHash.wrap
def wrapped_new_function(wrapped, instance, args, kwargs):
# type: (Callable, Any, Any, Any) -> Any
if args[0].lower() in get_weak_hash_algorithms():
WeakHash.report(
evidence_value=args[0].lower(),
)
return wrapped(*args, **kwargs)
def wrapped_function(wrapped, evidence, instance, args, kwargs):
# type: (Callable, str, Any, Any, Any) -> Any
WeakHash.report(
evidence_value=evidence,
)
return wrapped(*args, **kwargs) |
7,054 | gen output file name | # Copyright (c) Lawrence Livermore National Security, LLC and other VisIt
# Project developers. See the top-level LICENSE file for dates and other
# details. No copyright assignment is required to contribute to VisIt.
"""
file: imagick.py
author: Cyrus Harrison <cyrush@llnl.gov>
created: 10/14/2010
description:
Provides flow filters that wrap image magick functionaliy.
Requires an install of the imagemagick command line tools.
"""
import os
from os.path import join as pjoin
from ..core import Filter, Context, PropertyTree, sexe
class ImagickExecuteError(Exception):
def __init__(self,output):
self.output = output
def __str__(self):
return self.output
class ImagickContext(Context):
context_type = "imagick"
def set_working_dir(self,dname,create=True):
self.wdir = os.path.abspath(dname)
if create and not os.path.isdir(self.wdir):
os.mkdir(wdir)
def working_dir(self):
return self.wdir
def METHOD_NAME(node):
obase = node.name
if node.params.has_property("obase"):
obase = node.params.obase
# note, we can check for abs path here ...
# provide an option besides usign the working_dir
ofname = pjoin(node.context.working_dir(), obase)
return ofname + "%s_%s_output.png" % (obase,node.state_vector.index())
def imagick_exe(cmd,ofname):
ret, out = sexe(cmd + " " + ofname,ret_output=True)
if ret != 0:
raise ImagickExecuteError(out)
return ofname
class ImageFill(Filter):
filter_type = "fill"
input_ports = []
default_params = {"width":0,
"height":0,
"color":"black"}
output_port = True
def execute(self):
p = self.params
cmd = "convert -size %dx%d xc:%s " % (p.width,p.height,p.color)
return imagick_exe(cmd,METHOD_NAME(self))
class ImageResize(Filter):
filter_type = "resize"
input_ports = ["in"]
default_params = { "width":0,
"height":0}
output_port = True
def execute(self):
p = self.params
cmd = "convert -resize %dx%d %s " % (p.width,p.height,self.input("in"))
return imagick_exe(cmd,METHOD_NAME(self))
class ImageOver(Filter):
filter_type = "over"
input_ports = ["over","under"]
default_params = {"x":0,
"y":0}
output_port = True
def execute(self):
p = self.params
cmd = "composite -geometry +%d+%d %s %s " % (p.x,p.y,
self.input("over"),
self.input("under"))
return imagick_exe(cmd,METHOD_NAME(self))
class ImageBlend(Filter):
filter_type = "blend"
input_port = ["over","under"]
default_params = {"x":0,
"y":0,
"percent":0}
output_port = True
def execute(self):
p = self.params
cmd = "composite -blend %f -geometry +%d+%d %s %s " % (p.percent,
p.x,
p.y,
self.input("over"),
self.input("under"))
return imagick_exe(cmd,METHOD_NAME(self))
class ImageCrop(Filter):
filter_type = "crop"
input_ports = ["in"]
default_params = {"x":0,
"y":0,
"width":0,
"height":0}
output_port = True
def execute(self):
p = self.params
cmd = "convert -crop %dx%d+%d+%d %s " % (p.width,
p.height,
p.x,
p.y,
self.input("in"))
return imagick_exe(cmd,METHOD_NAME(self))
class ImageAppend(Filter):
filter_type = "append"
input_ports = ["in_a","in_b"]
default_params = {"direction":"horz"}
output_port = True
def execute(self):
p = self.params
d = p.direction
op = "+"
if d == "vert":
op = "-"
cmd = "convert %s %s %sappend " % (self.input("in_a"),self.input("in_b"),op)
return imagick_exe(cmd,METHOD_NAME(self))
filters = [ImageFill, ImageResize, ImageOver, ImageBlend, ImageCrop, ImageAppend]
contexts = [ImagickContext |
7,055 | ensure structure calc is set | # coding: utf-8
# Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department
# Distributed under the terms of "New BSD License", see the LICENSE file.
import numpy as np
from pyiron_atomistics.atomistics.job.interactive import GenericInteractive
from pyiron_atomistics.atomistics.structure.atoms import Atoms
try:
from ase.cell import Cell
except ImportError:
Cell = None
__author__ = "Jan Janssen"
__copyright__ = (
"Copyright 2021, Max-Planck-Institut für Eisenforschung GmbH - "
"Computational Materials Design (CM) Department"
)
__version__ = "1.0"
__maintainer__ = "Jan Janssen"
__email__ = "janssen@mpie.de"
__status__ = "development"
__date__ = "Sep 1, 2018"
class AseJob(GenericInteractive):
def __init__(self, project, job_name):
super(AseJob, self).__init__(project, job_name)
self.__version__ = (
None # Reset the version number to the executable is set automatically
)
def to_hdf(self, hdf=None, group_name=None):
super(AseJob, self).to_hdf(hdf=hdf, group_name=group_name)
self._structure_to_hdf()
def from_hdf(self, hdf=None, group_name=None):
super(AseJob, self).from_hdf(hdf=hdf, group_name=group_name)
self._structure_from_hdf()
def run_static(self):
pre_run_mode = self.server.run_mode
self.server.run_mode.interactive = True
self.run_if_interactive()
self.interactive_close()
self.server.run_mode = pre_run_mode
def run_if_interactive(self):
self.METHOD_NAME()
super(AseJob, self).run_if_interactive()
self.interactive_collect()
def METHOD_NAME(self):
if self.structure.calc is None:
self.set_calculator()
def set_calculator(self):
raise NotImplementedError(
"The set_calculator function is not implemented for this code. Either set "
"an ase calculator to the structure attribute, or subclass this job and "
"define set_calculator."
)
def interactive_structure_setter(self, structure):
self.structure.calc.calculate(structure)
def interactive_positions_setter(self, positions):
self.structure.positions = positions
def interactive_initialize_interface(self):
self.status.running = True
self.METHOD_NAME()
self.structure.calc.set_label(self.working_directory + "/")
self._interactive_library = True
def interactive_close(self):
if self.interactive_is_activated():
super(AseJob, self).interactive_close()
with self.project_hdf5.open("output") as h5:
if "interactive" in h5.list_groups():
for key in h5["interactive"].list_nodes():
h5["generic/" + key] = h5["interactive/" + key]
def interactive_forces_getter(self):
return self.structure.get_forces()
def interactive_pressures_getter(self):
return -self.structure.get_stress(voigt=False)
def interactive_energy_pot_getter(self):
return self.structure.get_potential_energy()
def interactive_energy_tot_getter(self):
return self.structure.get_potential_energy()
def interactive_indices_getter(self):
element_lst = sorted(list(set(self.structure.get_chemical_symbols())))
return np.array(
[element_lst.index(el) for el in self.structure.get_chemical_symbols()]
)
def interactive_positions_getter(self):
return self.structure.positions.copy()
def interactive_steps_getter(self):
return len(self.interactive_cache[list(self.interactive_cache.keys())[0]])
def interactive_time_getter(self):
return self.interactive_steps_getter()
def interactive_volume_getter(self):
return self.structure.get_volume()
def interactive_cells_getter(self):
return self.structure.cell.copy()
def write_input(self):
pass
def collect_output(self):
pass
def run_if_scheduler(self):
self._create_working_directory()
super(AseJob, self).run_if_scheduler()
def interactive_index_organizer(self):
index_merge_lst = self._interactive_species_lst.tolist() + list(
np.unique(self._structure_current.get_chemical_symbols())
)
el_lst = sorted(set(index_merge_lst), key=index_merge_lst.index)
current_structure_index = [
el_lst.index(el) for el in self._structure_current.get_chemical_symbols()
]
previous_structure_index = [
el_lst.index(el) for el in self._structure_previous.get_chemical_symbols()
]
if not np.array_equal(
np.array(current_structure_index),
np.array(previous_structure_index),
):
self._logger.debug("Generic library: indices changed!")
self.interactive_indices_setter(self._structure_current.indices)
def _get_structure(self, frame=-1, wrap_atoms=True):
if (
self.server.run_mode.interactive
or self.server.run_mode.interactive_non_modal
):
# Warning: We only copy symbols, positions and cell information - no tags.
if self.output.indices is not None and len(self.output.indices) != 0:
indices = self.output.indices[frame]
else:
return None
if len(self._interactive_species_lst) == 0:
el_lst = list(np.unique(self._structure_current.get_chemical_symbols()))
else:
el_lst = self._interactive_species_lst.tolist()
if indices is not None:
if wrap_atoms:
positions = self.output.positions[frame]
else:
if len(self.output.unwrapped_positions) > max([frame, 0]):
positions = self.output.unwrapped_positions[frame]
else:
positions = (
self.output.positions[frame]
+ self.output.total_displacements[frame]
)
atoms = Atoms(
symbols=np.array([el_lst[el] for el in indices]),
positions=positions,
cell=self.output.cells[frame],
pbc=self.structure.pbc,
)
# Update indicies to match the indicies in the cache.
atoms.set_array("indices", indices)
return atoms
else:
return None
else:
if (
self.get("output/generic/cells") is not None
and len(self.get("output/generic/cells")) != 0
):
return super()._get_structure(frame=frame, wrap_atoms=wrap_atoms)
else:
return None |
7,056 | compute rnd reward | # Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Networks definitions for the BC agent."""
import dataclasses
import functools
from typing import Callable, Generic, Tuple, TypeVar
from acme import specs
from acme import types
from acme.jax import networks as networks_lib
from acme.jax import utils
import haiku as hk
import jax.numpy as jnp
DirectRLNetworks = TypeVar('DirectRLNetworks')
@dataclasses.dataclass
class RNDNetworks(Generic[DirectRLNetworks]):
"""Container of RND networks factories."""
target: networks_lib.FeedForwardNetwork
predictor: networks_lib.FeedForwardNetwork
# Function from predictor output, target output, and original reward to reward
get_reward: Callable[
[networks_lib.NetworkOutput, networks_lib.NetworkOutput, jnp.ndarray],
jnp.ndarray]
direct_rl_networks: DirectRLNetworks = None
# See Appendix A.2 of https://arxiv.org/pdf/1810.12894.pdf
def rnd_reward_fn(
predictor_output: networks_lib.NetworkOutput,
target_output: networks_lib.NetworkOutput,
original_reward: jnp.ndarray,
intrinsic_reward_coefficient: float = 1.0,
extrinsic_reward_coefficient: float = 0.0,
) -> jnp.ndarray:
intrinsic_reward = jnp.mean(
jnp.square(predictor_output - target_output), axis=-1)
return (intrinsic_reward_coefficient * intrinsic_reward +
extrinsic_reward_coefficient * original_reward)
def make_networks(
spec: specs.EnvironmentSpec,
direct_rl_networks: DirectRLNetworks,
layer_sizes: Tuple[int, ...] = (256, 256),
intrinsic_reward_coefficient: float = 1.0,
extrinsic_reward_coefficient: float = 0.0,
) -> RNDNetworks[DirectRLNetworks]:
"""Creates networks used by the agent and returns RNDNetworks.
Args:
spec: Environment spec.
direct_rl_networks: Networks used by a direct rl algorithm.
layer_sizes: Layer sizes.
intrinsic_reward_coefficient: Multiplier on intrinsic reward.
extrinsic_reward_coefficient: Multiplier on extrinsic reward.
Returns:
The RND networks.
"""
def _rnd_fn(obs, act):
# RND does not use the action but other variants like RED do.
del act
network = networks_lib.LayerNormMLP(list(layer_sizes))
return network(obs)
target = hk.without_apply_rng(hk.transform(_rnd_fn))
predictor = hk.without_apply_rng(hk.transform(_rnd_fn))
# Create dummy observations and actions to create network parameters.
dummy_obs = utils.zeros_like(spec.observations)
dummy_obs = utils.add_batch_dim(dummy_obs)
return RNDNetworks(
target=networks_lib.FeedForwardNetwork(
lambda key: target.init(key, dummy_obs, ()), target.apply),
predictor=networks_lib.FeedForwardNetwork(
lambda key: predictor.init(key, dummy_obs, ()), predictor.apply),
direct_rl_networks=direct_rl_networks,
get_reward=functools.partial(
rnd_reward_fn,
intrinsic_reward_coefficient=intrinsic_reward_coefficient,
extrinsic_reward_coefficient=extrinsic_reward_coefficient))
def METHOD_NAME(predictor_params: networks_lib.Params,
target_params: networks_lib.Params,
transitions: types.Transition,
networks: RNDNetworks) -> jnp.ndarray:
"""Computes the intrinsic RND reward for a given transition.
Args:
predictor_params: Parameters of the predictor network.
target_params: Parameters of the target network.
transitions: The sample to compute rewards for.
networks: RND networks
Returns:
The rewards as an ndarray.
"""
target_output = networks.target.apply(target_params, transitions.observation,
transitions.action)
predictor_output = networks.predictor.apply(predictor_params,
transitions.observation,
transitions.action)
return networks.get_reward(predictor_output, target_output,
transitions.reward) |
7,057 | get resample time | # -*- coding: utf-8 -*-
# Copyright 2016-2023 The pyXem developers
#
# This file is part of pyXem.
#
# pyXem is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pyXem is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyXem. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import scipy.ndimage as ndi
import scipy.signal as ss
def _register_drift_5d(data, shifts1, shifts2, order=1):
"""
Register 5D data set using affine transformation
Parameters
----------
data: np.array or dask.array
Input image in 5D array (time * rx * ry * kx * ky)
shifts1: np.array
1D array for shifts in 1st real space direction or x in hyperspy indexing.
shifts2: np.array
1D array for shifts in 2nd real space direction or y in hyperspy indexing.
order: int
The order of the spline interpolation for affine transformation. Default
is 1, has to be in the range 0-5
Returns
-------
data_t: np.array
5D array after translation according to shift vectors
"""
data_t = np.zeros_like(data)
time_size = len(shifts1)
for i in range(time_size):
data_t[i, :, :, :, :] = ndi.affine_transform(
data[i, :, :, :, :],
np.identity(4),
offset=(shifts1[i][0, 0, 0, 0], shifts2[i][0, 0, 0, 0], 0, 0),
order=order,
)
return data_t
def _register_drift_2d(data, shift1, shift2, order=1):
"""
Register 2D data set using affine transformation
Parameters
----------
data: np.array or dask.array
Input image in 2D array (ry * rx)
shift1: float
shifts in 1st real space direction or x in hyperspy indexing.
shift2: float
shifts in 2nd real space direction or y in hyperspy indexing.
order: int
The order of the spline interpolation for affine transformation. Default
is 1, has to be in the range 0-5
Returns
-------
data_t: np.array
2D array after translation according to shift vectors
"""
data_t = ndi.affine_transform(
data, np.identity(2), offset=(shift1, shift2), order=order
)
return data_t
def _g2_2d(data, normalization="split", k1bin=1, k2bin=1, tbin=1):
"""
Calculate k resolved g2(k,t) from I(t,k_r,k_phi)
Parameters
----------
data: 3D np.array
Time series for I(t,k_r,k_phi)
normalization: string
Normalization format for time autocorrelation, 'split' or 'self'
k1bin: int
Binning factor for k1 axis
k2bin: int
Binning factor for k2 axis
tbin: int
Binning factor for t axis
Returns
-------
g2: 3D np.array
Time correlation function g2(t,k_r,k_phi)
"""
data = data.T
data = (
data.reshape(
(data.shape[0] // k1bin),
k1bin,
(data.shape[1] // k2bin),
k2bin,
(data.shape[2] // tbin),
tbin,
)
.sum(5)
.sum(3)
.sum(1)
)
# Calculate autocorrelation along time axis
autocorr = ss.fftconvolve(data, data[:, :, ::-1], mode="full", axes=[-1])
norm = ss.fftconvolve(np.ones(data.shape), data[:, :, ::-1], mode="full", axes=[-1])
if normalization == "self":
overlap_factor = np.expand_dims(
np.linspace(data.shape[-1], 1, data.shape[-1]), axis=(0, 1)
)
norm_factor = norm[:, :, data.shape[-1] : 0 : -1] ** 2
g2 = autocorr[:, :, data.shape[-1] - 1 :] / norm_factor * overlap_factor
elif normalization == "split":
overlap_factor = np.expand_dims(
np.linspace(data.shape[-1], 1, data.shape[-1]), axis=(0, 1)
)
norm_factor = (
norm[:, :, data.shape[-1] - 1 :]
* norm[:, :, ::-1][:, :, data.shape[-1] - 1 :]
)
g2 = autocorr[:, :, data.shape[-1] - 1 :] / norm_factor * overlap_factor
else:
raise ValueError(
normalization
+ " not recognize, normalization must be chosen 'split' or 'self'"
)
return g2.T
def METHOD_NAME(t_size, dt, t_rs_size):
"""
Return log linear resampled time array based on time step and sampling points
Parameters
----------
t_size: int
Size of original time array
dt: float
Time interval for original time array
t_rs_size: int
Size of resampled time array
Returns
-------
t_rs: 1D np.array
Resampled time array
"""
t_rs = np.zeros(t_rs_size, dtype=float)
for i in range(t_rs_size):
t_rs[i] = np.power(
10, np.log10(dt) + np.log10(t_size - 1) * i / (t_rs_size - 1)
)
return t_rs
def _interpolate_g2_2d(g2, t_rs, dt):
"""
Interpolate k resolved g2(k,t) based on resampled time
Parameters
----------
g2: 3D np.array
Time correlation function g2(t,k_r,k_phi)
t_rs: 1D np.array
Resampled time axis array
dt: float
Time interval for original g2 function
Returns
-------
g2rs: 3D np.array
Resampled time correlation function g2(t,k_r,k_phi)
"""
t = np.round(t_rs / dt, 8)
g2_l = g2[np.floor(t).astype(int)]
g2_h = g2[np.ceil(t).astype(int)]
g2_rs = (
g2_l + (g2_h - g2_l) * (t - np.floor(t).astype(int))[:, np.newaxis, np.newaxis]
)
return g2_rs |
7,058 | impossible return type | import math
import re
import textwrap
import operator
import numpy as np
import unittest
from numba.core.compiler import compile_isolated
from numba import jit
from numba.core import types
from numba.core.errors import TypingError
from numba.core.types.functions import _header_lead
from numba.tests.support import TestCase
def what():
pass
def foo():
return what()
def bar(x):
return x.a
def issue_868(a):
return a.shape * 2
def METHOD_NAME(x):
if x > 0:
return ()
else:
return 1j
def bad_hypot_usage():
return math.hypot(1)
def imprecise_list():
l = []
return len(l)
def using_imprecise_list():
a = np.array([])
return a.astype(np.int32)
def unknown_module():
return numpyz.int32(0)
def nop(x, y, z):
pass
def array_setitem_invalid_cast():
arr = np.empty(1, dtype=np.float64)
arr[0] = 1j # invalid cast from complex to float
return arr
class Foo(object):
def __repr__(self):
return "<Foo instance>"
class TestTypingError(unittest.TestCase):
def test_unknown_function(self):
try:
compile_isolated(foo, ())
except TypingError as e:
self.assertIn("Untyped global name 'what'", str(e))
else:
self.fail("Should raise error")
def test_unknown_attrs(self):
try:
compile_isolated(bar, (types.int32,))
except TypingError as e:
self.assertIn("Unknown attribute 'a' of type int32", str(e))
else:
self.fail("Should raise error")
def test_unknown_module(self):
# This used to print "'object' object has no attribute 'int32'"
with self.assertRaises(TypingError) as raises:
compile_isolated(unknown_module, ())
self.assertIn("name 'numpyz' is not defined", str(raises.exception))
def test_issue_868(self):
'''
Summary: multiplying a scalar by a non-scalar would cause a crash in
type inference because TimeDeltaMixOp always assumed at least one of
its operands was an NPTimeDelta in its generic() method.
'''
with self.assertRaises(TypingError) as raises:
compile_isolated(issue_868, (types.Array(types.int32, 1, 'C'),))
expected = ((_header_lead + " Function(<built-in function mul>) found "
"for signature:\n \n >>> mul(UniTuple({} x 1), {})")
.format(str(types.intp), types.IntegerLiteral(2)))
self.assertIn(expected, str(raises.exception))
self.assertIn("During: typing of", str(raises.exception))
def test_return_type_unification(self):
with self.assertRaises(TypingError) as raises:
compile_isolated(METHOD_NAME, (types.int32,))
msg = ("Can't unify return type from the following types: Tuple(), "
"complex128")
self.assertIn(msg, str(raises.exception))
def test_bad_hypot_usage(self):
with self.assertRaises(TypingError) as raises:
compile_isolated(bad_hypot_usage, ())
errmsg = str(raises.exception)
# Make sure it listed the known signatures.
# This is sensitive to the formatting of the error message.
self.assertIn(" * (float64, float64) -> float64", errmsg)
# find the context lines
ctx_lines = [x for x in errmsg.splitlines() if "During:" in x ]
# Check contextual msg
self.assertTrue(re.search(r'.*During: resolving callee type: Function.*hypot', ctx_lines[0]))
self.assertTrue(re.search(r'.*During: typing of call .*test_typingerror.py', ctx_lines[1]))
def test_imprecise_list(self):
"""
Type inference should catch that a list type's remain imprecise,
instead of letting lowering fail.
"""
with self.assertRaises(TypingError) as raises:
compile_isolated(imprecise_list, ())
errmsg = str(raises.exception)
msg = ("Cannot infer the type of variable 'l', have imprecise type: "
"list(undefined)")
self.assertIn(msg, errmsg)
# check help message has gone in
self.assertIn("For Numba to be able to compile a list", errmsg)
def test_using_imprecise_list(self):
"""
Type inference should report informative error about untyped list.
TODO: #2931
"""
with self.assertRaises(TypingError) as raises:
compile_isolated(using_imprecise_list, ())
errmsg = str(raises.exception)
self.assertIn("Undecided type", errmsg)
def test_array_setitem_invalid_cast(self):
with self.assertRaises(TypingError) as raises:
compile_isolated(array_setitem_invalid_cast, ())
errmsg = str(raises.exception)
self.assertIn(
_header_lead + " Function({})".format(operator.setitem),
errmsg,
)
self.assertIn(
"(array(float64, 1d, C), Literal[int](0), complex128)",
errmsg,
)
def test_template_rejection_error_message_cascade(self):
from numba import njit
@njit
def foo():
z = 1
for a, b in enumerate(z):
pass
return z
with self.assertRaises(TypingError) as raises:
foo()
errmsg = str(raises.exception)
expected = "No match."
self.assertIn(expected, errmsg)
ctx_lines = [x for x in errmsg.splitlines() if "During:" in x ]
search = [r'.*During: resolving callee type: Function.*enumerate',
r'.*During: typing of call .*test_typingerror.py']
for i, x in enumerate(search):
self.assertTrue(re.search(x, ctx_lines[i]))
class TestArgumentTypingError(unittest.TestCase):
"""
Test diagnostics of typing errors caused by argument inference failure.
"""
def test_unsupported_array_dtype(self):
# See issue #1943
cfunc = jit(nopython=True)(nop)
a = np.ones(3)
a = a.astype(a.dtype.newbyteorder())
with self.assertRaises(TypingError) as raises:
cfunc(1, a, a)
expected = f"Unsupported array dtype: {a.dtype}"
self.assertIn(expected, str(raises.exception))
def test_unsupported_type(self):
cfunc = jit(nopython=True)(nop)
foo = Foo()
with self.assertRaises(TypingError) as raises:
cfunc(1, foo, 1)
expected=re.compile(("This error may have been caused by the following "
"argument\(s\):\\n- argument 1:.*Cannot determine "
"Numba type of "
"<class \'numba.tests.test_typingerror.Foo\'>"))
self.assertTrue(expected.search(str(raises.exception)) is not None)
class TestCallError(unittest.TestCase):
def test_readonly_array(self):
@jit("(f8[:],)", nopython=True)
def inner(x):
return x
@jit(nopython=True)
def outer():
return inner(gvalues)
gvalues = np.ones(10, dtype=np.float64)
with self.assertRaises(TypingError) as raises:
outer()
got = str(raises.exception)
pat = r"Invalid use of.*readonly array\(float64, 1d, C\)"
self.assertIsNotNone(re.search(pat, got))
if __name__ == '__main__':
unittest.main() |
7,059 | deserialize tuple | """
The serialization API supports the following datatypes: dict, list, str, bytes, int, float, and whatever is supported by group.serialize and group.deserialize
"""
from __future__ import print_function
import io, pickle
import json, zlib
from base64 import *
from charm.toolbox.bitstring import *
def serializeDict(Object, group):
return {
k: serializeObject(o, group)
for k, o in Object.items()
}
def serializeList(Object, group):
return [
serializeObject(o, group)
for o in Object
]
serializers = {
dict: serializeDict,
list: serializeList,
tuple: serializeList,
str: lambda obj, g: 'str:' + obj,
bytes: lambda obj, g: 'bytes:' + obj.decode('UTF-8'),
int: lambda obj, g: obj,
float: lambda obj, g: obj,
}
def serializeObject(Objects, group):
assert hasattr(group, 'serialize'), "group does not have serialize method"
try:
serializer = serializers[type(Objects)]
except KeyError:
return group.serialize(Objects)
return serializer(Objects, group)
def deserializeDict(Object, group):
return {
k: deserializeObject(o, group)
for k, o in Object.items()
}
def deserializeList(Object, group):
return [
deserializeObject(o, group)
for o in Object
]
def METHOD_NAME(Object, group):
return tuple(deserializeList(Object, group))
def deserializeStr(object, group):
typ, obj = object.split(':', 1)
if typ == 'str':
return str(obj)
elif typ == 'bytes':
return getBytes(obj)
deserializers = {
dict: deserializeDict,
list: deserializeList,
tuple: METHOD_NAME,
str: deserializeStr,
bytes: lambda obj, group: group.deserialize(obj)
}
def deserializeObject(Objects, group):
assert hasattr(group, 'deserialize'), "group does not have deserialize method"
try:
deserializer = deserializers[type(Objects)]
except KeyError:
return Objects
return deserializer(Objects, group)
def pickleObject(Object):
valid_types = [bytes, dict, list, str, int]
file = io.BytesIO()
# check that dictionary is all bytes (if not, return None)
if isinstance(Object, dict):
for k in Object.keys():
_type = type(Object[k])
if not _type in valid_types:
print("DEBUG: pickleObject Error!!! only bytes or dictionaries of bytes accepted."); print("invalid type =>", type(Object[k]))
return None
pickle.dump(Object, file, pickle.HIGHEST_PROTOCOL)
result = file.getvalue()
encoded = b64encode(result)
file.close()
return encoded
def unpickleObject(Object):
if type(Object) == str or type(Object) == bytes:
byte_object = Object
else:
return None
decoded = b64decode(byte_object)
if type(decoded) == bytes and len(decoded) > 0:
return pickle.loads(decoded)
return None
# JSON does not support 'bytes' objects, so these from/to_json
# functions handle protecting the
def to_json(object):
if isinstance(object, bytes):
return {'__class__': 'bytes', '__value__': list(object) }
elif isinstance(object, tuple):
return {'__class__': 'tuple', '__value__': list(object) }
return TypeError(repr(object) + " is not JSON serializable")
def from_json(json_object):
if '__class__' in json_object:
if json_object['__class__'] == 'bytes':
return bytes(json_object['__value__'])
elif json_object['__class__'] == 'tuple':
return tuple(json_object['__value__'])
return json_object
# Two new API calls to simplify serializing to a blob of bytes
# objectToBytes() and bytesToObject()
def objectToBytes(object, group):
object_ser = serializeObject(object, group)
#result = pickleObject(object_ser)
result = getBytes(json.dumps(object_ser, default=to_json))
return b64encode(zlib.compress(result))
def bytesToObject(byteobject, group):
#unwrap_object = unpickleObject(byteobject)
decoded = bytes.decode(zlib.decompress(b64decode(byteobject)))
unwrap_object = json.loads(decoded, object_hook=from_json)
return deserializeObject(unwrap_object, group)
# Note: included for backwards compatibility with older versions.
# Will be removed completely in future versions.
def objectToBytesWithPickle(Object, group):
object_ser = serializeObject(Object, group)
return pickleObject(object_ser)
def bytesToObjectWithPickle(byteobject, group):
print("SecurityWarning: do not unpickle data received from an untrusted source. Bad things WILL happen!")
unwrap_object = unpickleObject(byteobject)
return deserializeObject(unwrap_object, group)
"""
Using serialization tools with our cryptographic schemes
requires that the group object is initialized
data = { 'test1':b"hello", 'test2':b"world", }
dataBytes = objectToBytes(data, group)
dataRec = bytesToObject(dataBytes, group)
assert data == dataRec, 'Error during deserialization.'
""" |
7,060 | id | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetVirtualNetworkRuleResult',
'AwaitableGetVirtualNetworkRuleResult',
'get_virtual_network_rule',
'get_virtual_network_rule_output',
]
@pulumi.output_type
class GetVirtualNetworkRuleResult:
"""
Data Lake Store virtual network rule information.
"""
def __init__(__self__, METHOD_NAME=None, name=None, subnet_id=None, type=None):
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", METHOD_NAME)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if subnet_id and not isinstance(subnet_id, str):
raise TypeError("Expected argument 'subnet_id' to be a str")
pulumi.set(__self__, "subnet_id", subnet_id)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
The resource identifier.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="subnetId")
def subnet_id(self) -> str:
"""
The resource identifier for the subnet.
"""
return pulumi.get(self, "subnet_id")
@property
@pulumi.getter
def type(self) -> str:
"""
The resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetVirtualNetworkRuleResult(GetVirtualNetworkRuleResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetVirtualNetworkRuleResult(
METHOD_NAME=self.METHOD_NAME,
name=self.name,
subnet_id=self.subnet_id,
type=self.type)
def get_virtual_network_rule(account_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
virtual_network_rule_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetVirtualNetworkRuleResult:
"""
Gets the specified Data Lake Store virtual network rule.
Azure REST API version: 2016-11-01.
:param str account_name: The name of the Data Lake Store account.
:param str resource_group_name: The name of the Azure resource group.
:param str virtual_network_rule_name: The name of the virtual network rule to retrieve.
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['resourceGroupName'] = resource_group_name
__args__['virtualNetworkRuleName'] = virtual_network_rule_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:datalakestore:getVirtualNetworkRule', __args__, opts=opts, typ=GetVirtualNetworkRuleResult).value
return AwaitableGetVirtualNetworkRuleResult(
METHOD_NAME=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
subnet_id=pulumi.get(__ret__, 'subnet_id'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_virtual_network_rule)
def get_virtual_network_rule_output(account_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
virtual_network_rule_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetVirtualNetworkRuleResult]:
"""
Gets the specified Data Lake Store virtual network rule.
Azure REST API version: 2016-11-01.
:param str account_name: The name of the Data Lake Store account.
:param str resource_group_name: The name of the Azure resource group.
:param str virtual_network_rule_name: The name of the virtual network rule to retrieve.
"""
... |
7,061 | log | """
This type stub file was generated by pyright.
"""
from supervisor.medusa import http_server
from supervisor.medusa.auth_handler import auth_handler
class NOT_DONE_YET:
...
class deferring_chunked_producer:
"""A producer that implements the 'chunked' transfer coding for HTTP/1.1.
Here is a sample usage:
request['Transfer-Encoding'] = 'chunked'
request.push (
producers.chunked_producer (your_producer)
)
request.done()
"""
def __init__(self, producer, footers=...) -> None:
...
def more(self): # -> Type[NOT_DONE_YET] | bytes:
...
class deferring_composite_producer:
"""combine a fifo of producers into one"""
def __init__(self, producers) -> None:
...
def more(self): # -> Type[NOT_DONE_YET] | Literal[b'']:
...
class deferring_globbing_producer:
"""
'glob' the output from a producer into a particular buffer size.
helps reduce the number of calls to send(). [this appears to
gain about 30% performance on requests to a single channel]
"""
def __init__(self, producer, buffer_size=...) -> None:
...
def more(self): # -> Type[NOT_DONE_YET] | bytes:
...
class deferring_hooked_producer:
"""
A producer that will call <function> when it empties,.
with an argument of the number of bytes produced. Useful
for logging/instrumentation purposes.
"""
def __init__(self, producer, function) -> None:
...
def more(self): # -> Type[NOT_DONE_YET] | Literal[b'']:
...
class deferring_http_request(http_server.http_request):
""" The medusa http_request class uses the default set of producers in
medusa.producers. We can't use these because they don't know anything
about deferred responses, so we override various methods here. This was
added to support tail -f like behavior on the logtail handler """
def done(self, *arg, **kw): # -> None:
""" I didn't want to override this, but there's no way around
it in order to support deferreds - CM
finalize this transaction - send output to the http channel"""
...
def METHOD_NAME(self, bytes): # -> None:
""" We need to override this because UNIX domain sockets return
an empty string for the addr rather than a (host, port) combination """
...
def cgi_environment(self): # -> dict[Unknown, Unknown]:
...
def get_server_url(self): # -> str:
""" Functionality that medusa's http request doesn't have; set an
attribute named 'server_url' on the request based on the Host: header
"""
...
class deferring_http_channel(http_server.http_channel):
ac_out_buffer_size = ...
delay = ...
last_writable_check = ...
def writable(self, now=...): # -> bool:
...
def refill_buffer(self): # -> None:
""" Implement deferreds """
...
def found_terminator(self): # -> None:
""" We only override this to use 'deferring_http_request' class
instead of the normal http_request class; it sucks to need to override
this """
...
class supervisor_http_server(http_server.http_server):
channel_class = deferring_http_channel
ip = ...
def prebind(self, sock, logger_object): # -> None:
""" Override __init__ to do logger setup earlier so it can
go to our logger object instead of stdout """
...
def postbind(self): # -> None:
...
def log_info(self, message, type=...): # -> None:
...
class supervisor_af_inet_http_server(supervisor_http_server):
""" AF_INET version of supervisor HTTP server """
def __init__(self, ip, port, logger_object) -> None:
...
class supervisor_af_unix_http_server(supervisor_http_server):
""" AF_UNIX version of supervisor HTTP server """
def __init__(self, socketname, sockchmod, sockchown, logger_object) -> None:
...
def checkused(self, socketname): # -> bool:
...
class tail_f_producer:
def __init__(self, request, filename, head) -> None:
...
def __del__(self): # -> None:
...
def more(self): # -> bytes | Type[NOT_DONE_YET] | Literal['''==> File truncated <==
''']:
...
class logtail_handler:
IDENT = ...
path = ...
def __init__(self, supervisord) -> None:
...
def match(self, request):
...
def handle_request(self, request): # -> None:
...
class mainlogtail_handler:
IDENT = ...
path = ...
def __init__(self, supervisord) -> None:
...
def match(self, request):
...
def handle_request(self, request): # -> None:
...
def make_http_servers(options, supervisord): # -> list[Unknown]:
...
class LogWrapper:
'''Receives METHOD_NAME messages from the Medusa servers and forwards
them to the Supervisor logger'''
def __init__(self, logger) -> None:
...
def log(self, msg): # -> None:
'''Medusa servers call this method. There is no METHOD_NAME level so
we have to sniff the message. We want "Server Error" messages
from medusa.http_server logged as errors at least.'''
...
class encrypted_dictionary_authorizer:
def __init__(self, dict) -> None:
...
def authorize(self, auth_info): # -> Literal[False]:
...
class supervisor_auth_handler(auth_handler):
def __init__(self, dict, handler, realm=...) -> None:
... |
7,062 | set bookmarked | """
Bookmarks service.
"""
import logging
from django.core.exceptions import ObjectDoesNotExist
from edx_django_utils.cache import DEFAULT_REQUEST_CACHE
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.exceptions import ItemNotFoundError
from . import DEFAULT_FIELDS, api_impl as api
log = logging.getLogger(__name__)
CACHE_KEY_TEMPLATE = "bookmarks.list.{}.{}"
class BookmarksService:
"""
A service that provides access to the bookmarks API.
When bookmarks() or is_bookmarked() is called for the
first time, the service fetches and caches all the bookmarks
of the user for the relevant course. So multiple calls to
get bookmark status during a request (for, example when
rendering courseware and getting bookmarks status for search
results) will not cause repeated queries to the database.
"""
def __init__(self, user, **kwargs):
super().__init__(**kwargs)
self._user = user
def _bookmarks_cache(self, course_key, fetch=False):
"""
Return the user's bookmarks cache for a particular course.
Arguments:
course_key (CourseKey): course_key of the course whose bookmarks cache should be returned.
fetch (Bool): if the bookmarks should be fetched and cached if they already aren't.
"""
store = modulestore()
course_key = store.fill_in_run(course_key)
if course_key.run is None:
return []
cache_key = CACHE_KEY_TEMPLATE.format(self._user.id, course_key)
bookmarks_cache = DEFAULT_REQUEST_CACHE.data.get(cache_key, None)
if bookmarks_cache is None and fetch is True:
bookmarks_cache = api.get_bookmarks(
self._user, course_key=course_key, fields=DEFAULT_FIELDS
)
DEFAULT_REQUEST_CACHE.data[cache_key] = bookmarks_cache
return bookmarks_cache
def bookmarks(self, course_key):
"""
Return a list of bookmarks for the course for the current user.
Arguments:
course_key: CourseKey of the course for which to retrieve the user's bookmarks for.
Returns:
list of dict:
"""
return self._bookmarks_cache(course_key, fetch=True)
def is_bookmarked(self, usage_key):
"""
Return whether the block has been bookmarked by the user.
Arguments:
usage_key: UsageKey of the block.
Returns:
Bool
"""
usage_id = str(usage_key)
bookmarks_cache = self._bookmarks_cache(usage_key.course_key, fetch=True)
for bookmark in bookmarks_cache:
if bookmark['usage_id'] == usage_id:
return True
return False
def METHOD_NAME(self, usage_key):
"""
Adds a bookmark for the block.
Arguments:
usage_key: UsageKey of the block.
Returns:
Bool indicating whether the bookmark was added.
"""
try:
bookmark = api.create_bookmark(user=self._user, usage_key=usage_key)
except ItemNotFoundError:
log.error('Block with usage_id: %s not found.', usage_key)
return False
bookmarks_cache = self._bookmarks_cache(usage_key.course_key)
if bookmarks_cache is not None:
bookmarks_cache.append(bookmark)
return True
def unset_bookmarked(self, usage_key):
"""
Removes the bookmark for the block.
Arguments:
usage_key: UsageKey of the block.
Returns:
Bool indicating whether the bookmark was removed.
"""
try:
api.delete_bookmark(self._user, usage_key=usage_key)
except ObjectDoesNotExist:
log.error('Bookmark with usage_id: %s does not exist.', usage_key)
return False
bookmarks_cache = self._bookmarks_cache(usage_key.course_key)
if bookmarks_cache is not None:
deleted_bookmark_index = None
usage_id = str(usage_key)
for index, bookmark in enumerate(bookmarks_cache):
if bookmark['usage_id'] == usage_id:
deleted_bookmark_index = index
break
if deleted_bookmark_index is not None:
bookmarks_cache.pop(deleted_bookmark_index)
return True |
7,063 | predict load data | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
from flash.core.data.io.input import DataKeys, Input
from flash.core.data.utilities.loading import IMG_EXTENSIONS, NP_EXTENSIONS, load_image
from flash.core.data.utilities.paths import PATH_TYPE, filter_valid_files
from flash.core.data.utilities.samples import to_samples
from flash.core.integrations.fiftyone.utils import FiftyOneLabelUtilities
from flash.core.utilities.imports import _FIFTYONE_AVAILABLE, lazy_import
from flash.image.data import ImageFilesInput, ImageNumpyInput, ImageTensorInput
from flash.image.segmentation.output import SegmentationLabelsOutput
if _FIFTYONE_AVAILABLE:
fo = lazy_import("fiftyone")
SampleCollection = "fiftyone.core.collections.SampleCollection"
else:
fo = None
SampleCollection = None
class SemanticSegmentationInput(Input):
num_classes: int
labels_map: Dict[int, Tuple[int, int, int]]
def load_labels_map(
self, num_classes: Optional[int] = None, labels_map: Optional[Dict[int, Tuple[int, int, int]]] = None
) -> None:
if num_classes is not None:
self.num_classes = num_classes
labels_map = labels_map or SegmentationLabelsOutput.create_random_labels_map(num_classes)
if labels_map is not None:
self.labels_map = labels_map
class SemanticSegmentationTensorInput(SemanticSegmentationInput, ImageTensorInput):
def load_data(
self,
tensor: Any,
masks: Any = None,
num_classes: Optional[int] = None,
labels_map: Optional[Dict[int, Tuple[int, int, int]]] = None,
) -> List[Dict[str, Any]]:
self.load_labels_map(num_classes, labels_map)
return to_samples(tensor, masks)
def load_sample(self, sample: Dict[str, Any]) -> Dict[str, Any]:
if DataKeys.TARGET in sample:
sample[DataKeys.TARGET] = sample[DataKeys.TARGET].numpy()
return super().load_sample(sample)
class SemanticSegmentationNumpyInput(SemanticSegmentationInput, ImageNumpyInput):
def load_data(
self,
array: Any,
masks: Any = None,
num_classes: Optional[int] = None,
labels_map: Optional[Dict[int, Tuple[int, int, int]]] = None,
) -> List[Dict[str, Any]]:
self.load_labels_map(num_classes, labels_map)
return to_samples(array, masks)
class SemanticSegmentationFilesInput(SemanticSegmentationInput, ImageFilesInput):
def load_data(
self,
files: Union[PATH_TYPE, List[PATH_TYPE]],
mask_files: Optional[Union[PATH_TYPE, List[PATH_TYPE]]] = None,
num_classes: Optional[int] = None,
labels_map: Optional[Dict[int, Tuple[int, int, int]]] = None,
) -> List[Dict[str, Any]]:
self.load_labels_map(num_classes, labels_map)
if mask_files is None:
files = filter_valid_files(files, valid_extensions=IMG_EXTENSIONS + NP_EXTENSIONS)
else:
files, mask_files = filter_valid_files(files, mask_files, valid_extensions=IMG_EXTENSIONS + NP_EXTENSIONS)
return to_samples(files, mask_files)
def load_sample(self, sample: Dict[str, Any]) -> Dict[str, Any]:
if DataKeys.TARGET in sample:
sample[DataKeys.TARGET] = np.array(load_image(sample[DataKeys.TARGET])).transpose((2, 0, 1))[0, :, :]
return super().load_sample(sample)
class SemanticSegmentationFolderInput(SemanticSegmentationFilesInput):
def load_data(
self,
folder: PATH_TYPE,
mask_folder: Optional[PATH_TYPE] = None,
num_classes: Optional[int] = None,
labels_map: Optional[Dict[int, Tuple[int, int, int]]] = None,
) -> List[Dict[str, Any]]:
self.load_labels_map(num_classes, labels_map)
files = os.listdir(folder)
files.sort()
if mask_folder is not None:
mask_files = {os.path.splitext(file)[0]: file for file in os.listdir(mask_folder)}
file_names = [os.path.splitext(file)[0] for file in files]
if len(set(file_names) - mask_files.keys()) != 0:
raise ValueError(
f"Found inconsistent files in input folder: {folder} and mask folder: {mask_folder}. All input "
f"files must have a corresponding mask file with the same name."
)
files = [os.path.join(folder, file) for file in files]
mask_files = [os.path.join(mask_folder, mask_files[file_name]) for file_name in file_names]
return super().load_data(files, mask_files)
return super().load_data([os.path.join(folder, file) for file in files])
class SemanticSegmentationFiftyOneInput(SemanticSegmentationFilesInput):
label_field: str
def load_data(
self,
sample_collection: SampleCollection,
label_field: str = "ground_truth",
num_classes: Optional[int] = None,
labels_map: Optional[Dict[int, Tuple[int, int, int]]] = None,
) -> List[Dict[str, Any]]:
self.load_labels_map(num_classes, labels_map)
self.label_field = label_field
label_utilities = FiftyOneLabelUtilities(label_field, fo.Segmentation)
label_utilities.validate(sample_collection)
self._fo_dataset_name = sample_collection.name
return to_samples(sample_collection.values("filepath"))
def METHOD_NAME(
self,
sample_collection: SampleCollection,
) -> List[Dict[str, Any]]:
return to_samples(sample_collection.values("filepath"))
def load_sample(self, sample: Dict[str, Any]) -> Dict[str, Any]:
filepath = sample[DataKeys.INPUT]
sample = super().load_sample(sample)
if not self.predicting:
fo_dataset = fo.load_dataset(self._fo_dataset_name)
fo_sample = fo_dataset[filepath]
sample[DataKeys.TARGET] = fo_sample[self.label_field].mask
return sample |
7,064 | operator config set bool | from ansys.dpf.gate.generated import operator_config_abstract_api
from ansys.dpf.gate import errors
# -------------------------------------------------------------------------------
# OperatorConfig
# -------------------------------------------------------------------------------
def _get_stub(server):
return server.get_stub(OperatorConfigGRPCAPI.STUBNAME)
@errors.protect_grpc_class
class OperatorConfigGRPCAPI(operator_config_abstract_api.OperatorConfigAbstractAPI):
STUBNAME = "operator_config_stub"
@staticmethod
def init_operator_config_environment(obj):
from ansys.grpc.dpf import operator_config_pb2_grpc
obj._server.create_stub_if_necessary(OperatorConfigGRPCAPI.STUBNAME,
operator_config_pb2_grpc.OperatorConfigServiceStub)
obj._deleter_func = (_get_stub(obj._server).Delete, lambda obj: obj._internal_obj)
@staticmethod
def operator_config_default_new_on_client(client, operator_name):
from ansys.grpc.dpf import operator_config_pb2
request = operator_config_pb2.CreateRequest()
request.operator_name.operator_name = operator_name
return _get_stub(client).Create(request)
@staticmethod
def operator_config_empty_new_on_client(client):
from ansys.grpc.dpf import operator_config_pb2
request = operator_config_pb2.CreateRequest()
return _get_stub(client).Create(request)
@staticmethod
def operator_config_get_int(config, option):
tmp = OperatorConfigGRPCAPI.get_list(config)
return int(tmp.options[option].value_str)
@staticmethod
def operator_config_get_double(config, option):
tmp = OperatorConfigGRPCAPI.get_list(config)
return float(tmp.options[option].value_str)
@staticmethod
def operator_config_get_bool(config, option):
tmp = OperatorConfigGRPCAPI.get_list(config)
return bool(tmp.options[option].value_str)
@staticmethod
def update_init(config, option_name):
from ansys.grpc.dpf import operator_config_pb2
request = operator_config_pb2.UpdateRequest()
request.config.CopyFrom(config._internal_obj)
option_request = operator_config_pb2.ConfigOption()
option_request.option_name = option_name
return request, option_request
@staticmethod
def update(config, request, option_request):
request.options.extend([option_request])
_get_stub(config._server).Update(request)
@staticmethod
def operator_config_set_int(config, option_name, value):
request, option_request = OperatorConfigGRPCAPI.update_init(config, option_name)
option_request.int = value
OperatorConfigGRPCAPI.update(config, request, option_request)
@staticmethod
def operator_config_set_double(config, option_name, value):
request, option_request = OperatorConfigGRPCAPI.update_init(config, option_name)
option_request.double = value
OperatorConfigGRPCAPI.update(config, request, option_request)
@staticmethod
def METHOD_NAME(config, option_name, value):
request, option_request = OperatorConfigGRPCAPI.update_init(config, option_name)
option_request.bool = value
OperatorConfigGRPCAPI.update(config, request, option_request)
@staticmethod
def get_list(config):
return _get_stub(config._server).List(config._internal_obj)
@staticmethod
def operator_config_get_num_config(config):
tmp = OperatorConfigGRPCAPI.get_list(config)
return len(tmp.options)
@staticmethod
def operator_config_get_config_option_name(config, index):
tmp = OperatorConfigGRPCAPI.get_list(config)
return tmp.options[index].option_name
@staticmethod
def operator_config_get_config_option_printable_value(config, index):
tmp = OperatorConfigGRPCAPI.get_list(config)
return tmp.options[index].value_str
@staticmethod
def operator_config_has_option(config, option):
tmp = OperatorConfigGRPCAPI.get_list(config)
return option in tmp.options |
7,065 | test register random within nested function scope | # This file is part of Hypothesis, which may be found at
# https://github.com/HypothesisWorks/hypothesis/
#
# Copyright the Hypothesis Authors.
# Individual contributors are listed in AUTHORS.rst and the git log.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
import gc
import random
import pytest
from hypothesis import (
Phase,
core,
find,
given,
register_random,
settings,
strategies as st,
)
from hypothesis.errors import HypothesisWarning, InvalidArgument
from hypothesis.internal import entropy
from hypothesis.internal.compat import GRAALPY, PYPY
from hypothesis.internal.entropy import deterministic_PRNG
def gc_collect():
# CPython uses reference counting, so objects (without circular refs)
# are collected immediately on `del`, breaking weak references.
# Python implementations with other garbage collection strategies may
# or may not, so we use this function in tests before counting the
# surviving references to ensure that they're deterministic.
if PYPY or GRAALPY:
gc.collect()
def test_can_seed_random():
@settings(phases=(Phase.generate, Phase.shrink))
@given(st.random_module())
def test(r):
raise AssertionError
with pytest.raises(AssertionError) as err:
test()
assert "RandomSeeder(0)" in "\n".join(err.value.__notes__)
@given(st.random_module(), st.random_module())
def test_seed_random_twice(r, r2):
assert repr(r) == repr(r2)
@given(st.random_module())
def test_does_not_fail_health_check_if_randomness_is_used(r):
random.getrandbits(128)
def test_cannot_register_non_Random():
with pytest.raises(InvalidArgument):
register_random("not a Random instance")
@pytest.mark.filterwarnings(
"ignore:It looks like `register_random` was passed an object that could be garbage collected"
)
def test_registering_a_Random_is_idempotent():
gc_collect()
n_registered = len(entropy.RANDOMS_TO_MANAGE)
r = random.Random()
register_random(r)
register_random(r)
assert len(entropy.RANDOMS_TO_MANAGE) == n_registered + 1
del r
gc_collect()
assert len(entropy.RANDOMS_TO_MANAGE) == n_registered
def test_manages_registered_Random_instance():
r = random.Random()
register_random(r)
state = r.getstate()
result = []
@given(st.integers())
def inner(x):
v = r.random()
if result:
assert v == result[0]
else:
result.append(v)
inner()
assert state == r.getstate()
def test_registered_Random_is_seeded_by_random_module_strategy():
r = random.Random()
register_random(r)
state = r.getstate()
results = set()
count = [0]
@given(st.integers())
def inner(x):
results.add(r.random())
count[0] += 1
inner()
assert count[0] > len(results) * 0.9, "too few unique random numbers"
assert state == r.getstate()
@given(st.random_module())
def test_will_actually_use_the_random_seed(rnd):
a = random.randint(0, 100)
b = random.randint(0, 100)
random.seed(rnd.seed)
assert a == random.randint(0, 100)
assert b == random.randint(0, 100)
def test_given_does_not_pollute_state():
with deterministic_PRNG():
@given(st.random_module())
def test(r):
pass
test()
state_a = random.getstate()
state_a2 = core._hypothesis_global_random.getstate()
test()
state_b = random.getstate()
state_b2 = core._hypothesis_global_random.getstate()
assert state_a == state_b
assert state_a2 != state_b2
def test_find_does_not_pollute_state():
with deterministic_PRNG():
find(st.random_module(), lambda r: True)
state_a = random.getstate()
state_a2 = core._hypothesis_global_random.getstate()
find(st.random_module(), lambda r: True)
state_b = random.getstate()
state_b2 = core._hypothesis_global_random.getstate()
assert state_a == state_b
assert state_a2 != state_b2
@pytest.mark.filterwarnings(
"ignore:It looks like `register_random` was passed an object that could be garbage collected"
)
def test_evil_prng_registration_nonsense():
gc_collect()
n_registered = len(entropy.RANDOMS_TO_MANAGE)
r1, r2, r3 = random.Random(1), random.Random(2), random.Random(3)
s2 = r2.getstate()
# We're going to be totally evil here: register two randoms, then
# drop one and add another, and finally check that we reset only
# the states that we collected before we started
register_random(r1)
k = max(entropy.RANDOMS_TO_MANAGE) # get a handle to check if r1 still exists
register_random(r2)
assert len(entropy.RANDOMS_TO_MANAGE) == n_registered + 2
with deterministic_PRNG(0):
del r1
gc_collect()
assert k not in entropy.RANDOMS_TO_MANAGE, "r1 has been garbage-collected"
assert len(entropy.RANDOMS_TO_MANAGE) == n_registered + 1
r2.seed(4)
register_random(r3)
r3.seed(4)
s4 = r3.getstate()
# Implicit check, no exception was raised in __exit__
assert r2.getstate() == s2, "reset previously registered random state"
assert r3.getstate() == s4, "retained state when registered within the context"
@pytest.mark.skipif(
PYPY, reason="We can't guard against bad no-reference patterns in pypy."
)
def test_passing_unreferenced_instance_raises():
with pytest.raises(ReferenceError):
register_random(random.Random(0))
@pytest.mark.skipif(
PYPY, reason="We can't guard against bad no-reference patterns in pypy."
)
def test_passing_unreferenced_instance_within_function_scope_raises():
def f():
register_random(random.Random(0))
with pytest.raises(ReferenceError):
f()
@pytest.mark.skipif(
PYPY, reason="We can't guard against bad no-reference patterns in pypy."
)
def test_passing_referenced_instance_within_function_scope_warns():
def f():
r = random.Random(0)
register_random(r)
with pytest.warns(
HypothesisWarning,
match="It looks like `register_random` was passed an object that could be"
" garbage collected",
):
f()
@pytest.mark.filterwarnings(
"ignore:It looks like `register_random` was passed an object that could be garbage collected"
)
@pytest.mark.skipif(
PYPY, reason="We can't guard against bad no-reference patterns in pypy."
)
def METHOD_NAME():
n_registered = len(entropy.RANDOMS_TO_MANAGE)
def f():
r = random.Random()
register_random(r)
assert len(entropy.RANDOMS_TO_MANAGE) == n_registered + 1
f()
gc_collect()
assert len(entropy.RANDOMS_TO_MANAGE) == n_registered |
7,066 | check errors | from __future__ import print_function
from six.moves import queue as Queue
from six.moves import range
from functools import partial
import threading
import time
from tqdm import tqdm
DEFAULT_THREADS = 20
class ThreadedQueue(object):
"""Grant threaded task processing to any derived class."""
def __init__(self, n_threads, queue_size=0, progress=None):
self._n_threads = n_threads
self._queue = Queue.Queue(maxsize=queue_size) # 0 = infinite size
self._error_queue = Queue.Queue(maxsize=queue_size)
self._threads = ()
self._terminate = threading.Event()
self._processed_lock = threading.Lock()
self.processed = 0
self._inserted = 0
self.with_progress = progress
self.start_threads(n_threads)
@property
def pending(self):
return self._queue.qsize()
def put(self, fn):
"""
Enqueue a task function for processing.
Requires:
fn: a function object that takes one argument
that is the interface associated with each
thread.
e.g. def download(api):
results.append(api.download())
self.put(download)
Returns: self
"""
self._inserted += 1
self._queue.put(fn, block=True)
return self
def start_threads(self, n_threads):
"""
Terminate existing threads and create a
new set if the thread number doesn't match
the desired number.
Required:
n_threads: (int) number of threads to spawn
Returns: self
"""
if n_threads == len(self._threads):
return self
# Terminate all previous tasks with the existing
# event object, then create a new one for the next
# generation of threads. The old object will hang
# around in memory until the threads actually terminate
# after another iteration.
self._terminate.set()
self._terminate = threading.Event()
threads = []
for _ in range(n_threads):
worker = threading.Thread(
target=self._consume_queue,
args=(self._terminate,)
)
worker.daemon = True
worker.start()
threads.append(worker)
self._threads = tuple(threads)
return self
def are_threads_alive(self):
"""Returns: boolean indicating if any threads are alive"""
return any(map(lambda t: t.is_alive(), self._threads))
def kill_threads(self):
"""Kill all threads."""
self._terminate.set()
while self.are_threads_alive():
time.sleep(0.001)
self._threads = ()
return self
def _initialize_interface(self):
"""
This is used to initialize the interfaces used in each thread.
You should reimplement it in subclasses. For example, return
an API object, file handle, or network connection. The functions
you pass into the self._queue will get it as the first parameter.
e.g. an implementation in a subclass.
def _initialize_interface(self):
return HTTPConnection()
def other_function(self):
def threaded_file_read(connection):
# do stuff
self._queue.put(threaded_file_handle)
Returns: Interface object used in threads
"""
return None
def _close_interface(self, interface):
"""Allows derived classes to clean up after a thread finishes."""
pass
def _consume_queue(self, terminate_evt):
"""
This is the main thread function that consumes functions that are
inside the _queue object. To use, execute self._queue(fn), where fn
is a function that performs some kind of network IO or otherwise
benefits from threading and is independent.
terminate_evt is automatically passed in on thread creation and
is a common event for this generation of threads. The threads
will terminate when the event is set and the queue burns down.
Returns: void
"""
interface = self._initialize_interface()
while not terminate_evt.is_set():
try:
fn = self._queue.get(block=True, timeout=0.01)
except Queue.Empty:
continue # periodically check if the thread is supposed to die
fn = partial(fn, interface)
try:
self._consume_queue_execution(fn)
except Exception as err:
self._error_queue.put(err)
self._close_interface(interface)
def _consume_queue_execution(self, fn):
"""
The actual task execution in each thread. This
is broken out so that exceptions can be caught
in derived classes and allow them to manipulate
the errant task, e.g. putting it back in the queue
for a retry.
Every task processed will automatically be marked complete.
Required:
[0] fn: A curried function that includes the interface
as its first argument.
Returns: void
"""
# `finally` fires after all success or exceptions
# exceptions are handled in derived classes
# and uncaught ones are caught as a last resort
# in _consume_queue to be raised on the main thread.
try:
fn()
finally:
with self._processed_lock:
self.processed += 1
self._queue.task_done()
def METHOD_NAME(self):
try:
err = self._error_queue.get(block=False)
self._error_queue.task_done()
self.kill_threads()
raise err
except Queue.Empty:
pass
def wait(self, progress=None):
"""
Allow background threads to process until the
task queue is empty. If there are no threads,
in theory the queue should always be empty
as processing happens immediately on the main thread.
Optional:
progress: (bool or str) show a tqdm progress bar optionally
with a description if a string is provided
Returns: self (for chaining)
Raises: The first exception recieved from threads
"""
if not len(self._threads):
return self
desc = None
if type(progress) is str:
desc = progress
last = self._inserted
with tqdm(total=self._inserted, disable=(not progress), desc=desc) as pbar:
# Allow queue to consume, but check up on
# progress and errors every tenth of a second
while not self._queue.empty():
size = self._queue.qsize()
delta = last - size
if delta != 0: # We should crash on negative numbers
pbar.update(delta)
last = size
self.METHOD_NAME()
time.sleep(0.015)
# Wait until all tasks in the queue are
# fully processed. queue.task_done must be
# called for each task.
self._queue.join()
self.METHOD_NAME()
final = self._inserted - last
if final:
pbar.update(final)
if self._queue.empty():
self._inserted = 0
return self
def __del__(self):
self.wait() # if no threads were set the queue is always empty
self.kill_threads()
def __enter__(self):
if self.__class__ is ThreadedQueue and self._n_threads == 0:
raise ValueError("Using 0 threads in base class ThreadedQueue with statement will never exit.")
self.start_threads(self._n_threads)
return self
def __exit__(self, exception_type, exception_value, traceback):
self.wait(progress=self.with_progress)
self.kill_threads() |
7,067 | test trans rdm1 | #!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from functools import reduce
import numpy
from pyscf import gto
from pyscf import scf
from pyscf import ao2mo
from pyscf import mcscf
from pyscf import fci
from pyscf.fci import fci_slow
def setUpModule():
global mol, m, h1e, g2e, ci0, ci1
global norb, nelec
mol = gto.Mole()
mol.verbose = 0
mol.output = None#"out_h2o"
mol.atom = [
['H', ( 1.,-1. , 0. )],
['H', ( 0.,-1. ,-1. )],
['H', ( 0.,-0.5 ,-0. )],
['H', ( 0.,-0. ,-1. )],
['H', ( 1.,-0.5 , 0. )],
['H', ( 0., 1. , 1. )],
]
mol.basis = {'H': '6-31g'}
mol.build()
m = scf.RHF(mol)
m.conv_tol = 1e-15
m.conv_tol_grad = 1e-7
ehf = m.scf()
norb = m.mo_coeff.shape[1]
nelec = mol.nelectron
h1e = reduce(numpy.dot, (m.mo_coeff.T, m.get_hcore(), m.mo_coeff)).round(9)
g2e = ao2mo.incore.general(m._eri, (m.mo_coeff,)*4, compact=False).round(9)
na = fci.cistring.num_strings(norb, nelec//2)
numpy.random.seed(15)
ci0 = numpy.random.random((na,na))
ci0 = ci0 + ci0.T
ci0 /= numpy.linalg.norm(ci0)
ci1 = numpy.random.random((na,na))
ci1 = ci1 + ci1.T
ci1 /= numpy.linalg.norm(ci1)
def tearDownModule():
global mol, m, h1e, g2e, ci0, ci1
del mol, m, h1e, g2e, ci0, ci1
class KnownValues(unittest.TestCase):
def test_contract(self):
ci1 = fci.direct_spin0.contract_1e(h1e, ci0, norb, nelec)
ci1ref = fci.direct_spin1.contract_1e(h1e, ci0, norb, nelec)
self.assertTrue(numpy.allclose(ci1ref, ci1))
self.assertAlmostEqual(numpy.linalg.norm(ci1), 9.1191973750140729, 7)
ci1 = fci.direct_spin0.contract_2e(g2e, ci0, norb, nelec)
ci1ref = fci.direct_spin1.contract_2e(g2e, ci0, norb, nelec)
self.assertTrue(numpy.allclose(ci1ref, ci1))
self.assertAlmostEqual(numpy.linalg.norm(ci1), 15.076640155228787, 7)
def test_kernel(self):
e, c = fci.direct_spin0.kernel(h1e, g2e, norb, nelec)
self.assertAlmostEqual(e, -9.1491239851241737, 7)
e = fci.direct_spin0.energy(h1e, g2e, c, norb, nelec)
self.assertAlmostEqual(e, -9.1491239851241737, 7)
def test_rdm1(self):
dm1ref = fci.direct_spin1.make_rdm1(ci0, norb, nelec)
dm1 = fci.direct_spin0.make_rdm1(ci0, norb, nelec)
self.assertTrue(numpy.allclose(dm1ref, dm1))
self.assertAlmostEqual(numpy.linalg.norm(dm1), 2.7059849569286722, 10)
norb1 = nelec
na = fci.cistring.num_strings(norb1, nelec//2)
ci1 = numpy.random.random((na,na))
ci1 = ci1 + ci1.T
dm1 = fci.direct_spin0.make_rdm1(ci1, norb1, nelec)
ref1 = fci_slow.make_rdm1(ci1, norb1, nelec)
self.assertAlmostEqual(abs(ref1-dm1).max(), 0, 10)
def test_rdm12(self):
dm1ref, dm2ref = fci.direct_spin1.make_rdm12(ci0, norb, nelec)
dm1, dm2 = fci.direct_spin0.make_rdm12(ci0, norb, nelec)
self.assertTrue(numpy.allclose(dm1ref, dm1))
self.assertTrue(numpy.allclose(dm2ref, dm2))
self.assertAlmostEqual(numpy.linalg.norm(dm1), 2.7059849569286731, 10)
self.assertAlmostEqual(numpy.linalg.norm(dm2), 7.8811473403497736, 10)
norb1 = nelec
na = fci.cistring.num_strings(norb1, nelec//2)
ci1 = numpy.random.random((na,na))
ci1 = ci1 + ci1.T
dm1, dm2 = fci.direct_spin0.make_rdm12(ci1, norb1, nelec)
ref1, ref2 = fci_slow.make_rdm12(ci1, norb1, nelec)
self.assertAlmostEqual(abs(ref1-dm1).max(), 0, 10)
self.assertAlmostEqual(abs(ref2-dm2).max(), 0, 10)
def METHOD_NAME(self):
dm1ref = fci.direct_spin1.trans_rdm1(ci0, ci1, norb, nelec)
dm1 = fci.direct_spin0.trans_rdm1(ci0, ci1, norb, nelec)
self.assertTrue(numpy.allclose(dm1ref, dm1))
self.assertAlmostEqual(numpy.linalg.norm(dm1), 2.5485017426647461, 10)
dm0 = fci.direct_spin0.make_rdm1(ci0, norb, nelec)
dm1 = fci.direct_spin0.trans_rdm1(ci0, ci0, norb, nelec)
self.assertTrue(numpy.allclose(dm1, dm0))
def test_trans_rdm12(self):
dm1ref, dm2ref = fci.direct_spin1.trans_rdm12(ci0, ci1, norb, nelec)
dm1, dm2 = fci.direct_spin0.trans_rdm12(ci0, ci1, norb, nelec)
self.assertTrue(numpy.allclose(dm1ref, dm1))
self.assertTrue(numpy.allclose(dm2ref, dm2))
self.assertAlmostEqual(numpy.linalg.norm(dm1), 2.5485017426647461, 10)
self.assertAlmostEqual(numpy.linalg.norm(dm2), 7.7327573770739235, 10)
_,dm0 = fci.direct_spin0.make_rdm12(ci0, norb, nelec)
_,dm2 = fci.direct_spin0.trans_rdm12(ci0, ci0, norb, nelec)
self.assertTrue(numpy.allclose(dm2,dm0))
def test_davidson_only(self):
x = 3.0 * 0.529177249
y = (2.54 - 0.46 * 3.0) * 0.529177249
mol = gto.M(
verbose = 0,
atom = [
['Be',( 0., 0. , 0. )],
['H', ( x, -y , 0. )],
['H', ( x, y , 0. )],],
symmetry = True,
basis = '6-311g')
mf = scf.RHF(mol)
mf.run(conv_tol=1e-10)
mf._scf = mf
h1e = mcscf.casci.h1e_for_cas(mf, mf.mo_coeff, ncas=2, ncore=2)[0]
eri = ao2mo.incore.full(mf._eri, mf.mo_coeff[:,2:4])
cis = fci.direct_spin0.FCISolver(mol)
cis.davidson_only = True
ci0 = numpy.zeros((2,2))
ci0[0,0] = 1
e, c = cis.kernel(h1e, eri, 2, 2, ci0)
self.assertAlmostEqual(e, -0.80755526695538049, 7)
cis = fci.direct_spin0_symm.FCISolver(mol)
cis.wfnsym = 5
self.assertRaises(RuntimeError,
cis.kernel, h1e, eri, 2, 2, orbsym=mf.mo_coeff.orbsym[2:4])
def test_gen_linkstr(self):
sol = fci.direct_spin0.FCI(mol)
link1 = sol.gen_linkstr(7, 6, tril=True)
link1[:,:,1] = 0
link2 = sol.gen_linkstr(7, (3,3), tril=False)
self.assertAlmostEqual(abs(link1 - fci.cistring.reform_linkstr_index(link2)).max(), 0, 12)
def test_small_system(self):
sol = fci.direct_spin0.FCI()
norb = 6
nelec = (3,3)
numpy.random.seed(9)
h1e = numpy.random.random((norb,norb))
h1e = h1e + h1e.T
g2e = numpy.random.random((norb,norb,norb,norb))
eri = .5* ao2mo.restore(1, ao2mo.restore(8, g2e, norb), norb)
h = fci.direct_spin1.pspace(h1e, eri, norb, nelec, np=5000)[1]
eref, c0 = numpy.linalg.eigh(h)
e, c1 = sol.kernel(h1e, eri, norb, (norb,norb))
self.assertAlmostEqual(e, 20.52279077686709, 12)
e, c1 = sol.kernel(h1e, eri, norb, nelec, nroots=4)
self.assertAlmostEqual(abs(eref[[0,1,3,5]] - e).max(), 0, 8)
if __name__ == "__main__":
print("Full Tests for spin0")
unittest.main() |
7,068 | process source | import csv
import datetime
import logging
import os
import sys
from io import StringIO
from dateutil import parser
from data_research.models import (
County,
CountyMortgageData,
MortgageDataConstant,
)
from data_research.mortgage_utilities.fips_meta import validate_fips
from data_research.mortgage_utilities.s3_utils import (
S3_SOURCE_BUCKET,
S3_SOURCE_FILE,
read_in_s3_csv,
)
from data_research.scripts import (
export_public_csvs,
load_mortgage_aggregates,
update_county_msa_meta,
)
DATAFILE = StringIO()
SCRIPT_NAME = os.path.basename(__file__).split(".")[0]
logger = logging.getLogger(__name__)
def update_through_date_constant(date):
constant, cr = MortgageDataConstant.objects.get_or_create(
name="through_date"
)
constant.date_value = date
constant.save()
def dump_as_csv(rows_out, dump_slug):
"""
Drops a headerless CSV to `/tmp/mp_countydata.csv
Sample output row:
1,01001,2008-01-01,268,260,4,1,0,3,2891
"""
with open("{}.csv".format(dump_slug), "w") as f:
writer = csv.writer(f)
for row in rows_out:
writer.writerow(row)
def METHOD_NAME(starting_date, through_date, dump_slug=None):
"""
Re-generate aggregated data from the latest source CSV posted to S3.
This operation has three steps
- Wipe and regenerate the base county_mortgage_data table.
- Regenerate aggregated data for MSAs, non-MSAs, states and national.
- Update metadata values and files.
- Export new downloadable public CSV files.
If dump_slug is provided, a CSV the base county tables will be dumped.
The input CSV has the following field_names and row form:
date,fips,open,current,thirty,sixty,ninety,other
01/01/08,1001,268,260,4,1,0,3
"""
starter = datetime.datetime.now()
counter = 0
pk = 1
new_objects = []
# truncate table
CountyMortgageData.objects.all().delete()
source_url = "{}/{}".format(S3_SOURCE_BUCKET, S3_SOURCE_FILE)
raw_data = read_in_s3_csv(source_url)
for row in raw_data:
sampling_date = parser.parse(row.get("date")).date()
if sampling_date >= starting_date and sampling_date <= through_date:
valid_fips = validate_fips(row.get("fips"))
if valid_fips:
county = County.objects.get(fips=valid_fips)
new_objects.append(
CountyMortgageData(
pk=pk,
fips=valid_fips,
date=sampling_date,
total=row.get("open"),
current=row.get("current"),
thirty=row.get("thirty"),
sixty=row.get("sixty"),
ninety=row.get("ninety"),
other=row.get("other"),
county=county,
)
)
pk += 1
counter += 1
if counter % 10000 == 0: # pragma: no cover
sys.stdout.write(".")
sys.stdout.flush()
if counter % 100000 == 0: # pragma: no cover
logger.info("\n{}".format(counter))
CountyMortgageData.objects.bulk_create(new_objects)
logger.info(
"\n{} took {} "
"to create {} countymortgage records".format(
SCRIPT_NAME, (datetime.datetime.now() - starter), len(new_objects)
)
)
if dump_slug:
dump_as_csv(
(
(
obj.pk,
obj.fips,
"{}".format(obj.date),
obj.total,
obj.current,
obj.thirty,
obj.sixty,
obj.ninety,
obj.other,
obj.county.pk,
)
for obj in new_objects
),
dump_slug,
)
def run(*args):
"""
Proces latest data source and optionally drop a CSV of result.
The script ingests a through-date (YYYY-MM-DD) and dump location/slug.
Sample command:
manage.py runscript process_mortgage_data \
--script-args 2017-03-01 /tmp/mp_countydata`
"""
dump_slug = None
starting_date = MortgageDataConstant.objects.get(
name="starting_date"
).date_value
if args:
through_date = parser.parse(args[0]).date()
update_through_date_constant(through_date)
if len(args) > 1:
dump_slug = args[1]
METHOD_NAME(starting_date, through_date, dump_slug=dump_slug)
load_mortgage_aggregates.run()
update_county_msa_meta.run()
export_public_csvs.run()
else:
logger.info(
"Please provide a through-date (YYYY-MM-DD).\n"
"Optionally, you may also provide a dump location/slug, "
"such as '/tmp/mp_countydata.csv', if you want a CSV dumped.\n"
) |
7,069 | test inflow double | #
# Copyright (C) 2022-2023 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import numpy as np
import unittest as ut
import unittest_decorators as utx
import espressomd
import espressomd.shapes
import espressomd.electrokinetics
@utx.skipIfMissingFeatures(["WALBERLA"])
class EKFixedFlux(ut.TestCase):
BOX_L = 2.5
AGRID = 0.5
DENSITY = 1
DIFFUSION_COEFFICIENT = 0.1
TIMESTEPS = 40
TAU = 0.25
INFLOW_FLUX = 0.1
system = espressomd.System(box_l=[BOX_L, BOX_L, BOX_L])
system.time_step = TAU
system.cell_system.skin = 0.4
def tearDown(self):
self.system.ekcontainer = None
def test_inflow_single(self):
self.detail_test_inflow(single_precision=True)
def METHOD_NAME(self):
self.detail_test_inflow(single_precision=False)
def detail_test_inflow(self, single_precision: bool):
"""
Testing the EK fixed flux boundaries to test the fixed inflow into a non-periodic box.
"""
decimal_precision: int = 5 if single_precision else 7
lattice = espressomd.electrokinetics.LatticeWalberla(
n_ghost_layers=1, agrid=self.AGRID)
ekspecies = espressomd.electrokinetics.EKSpecies(
lattice=lattice, density=0.0, diffusion=self.DIFFUSION_COEFFICIENT,
valency=0.0, advection=False, friction_coupling=False,
single_precision=single_precision, tau=self.TAU)
eksolver = espressomd.electrokinetics.EKNone(lattice=lattice)
self.system.ekcontainer = espressomd.electrokinetics.EKContainer(
tau=self.TAU, solver=eksolver)
self.system.ekcontainer.add(ekspecies)
ekspecies[1:-1, 1:-1, 1:-1].density = self.DENSITY
ekspecies[:, :, 0].flux_boundary = \
espressomd.electrokinetics.FluxBoundary([0, 0, 0])
ekspecies[:, :, -1].flux_boundary = \
espressomd.electrokinetics.FluxBoundary([0, 0, 0])
ekspecies[:, 0, :].flux_boundary = \
espressomd.electrokinetics.FluxBoundary([0, 0, 0])
ekspecies[:, -1, :].flux_boundary = \
espressomd.electrokinetics.FluxBoundary([0, 0, 0])
ekspecies[0, :, :].flux_boundary = \
espressomd.electrokinetics.FluxBoundary([0, 0, 0])
ekspecies[-1, :, :].flux_boundary = \
espressomd.electrokinetics.FluxBoundary([0, 0, 0])
# set fixed flux in +z-direction
ekspecies[:, :, -1].flux_boundary = espressomd.electrokinetics.FluxBoundary(
[0, 0, -self.INFLOW_FLUX])
additional_center_flux = 3 * self.INFLOW_FLUX
midpoint = int(lattice.shape[0] // 2)
ekspecies[midpoint, midpoint, -1].flux_boundary = \
espressomd.electrokinetics.FluxBoundary(
[0, 0, -self.INFLOW_FLUX - additional_center_flux])
# check density before integration
expected_initial_density = self.DENSITY * np.prod(lattice.shape - 2)
np.testing.assert_almost_equal(
actual=np.sum(ekspecies[1:-1, 1:-1, 1:-1].density),
desired=expected_initial_density, decimal=decimal_precision)
self.system.integrator.run(self.TIMESTEPS)
# check that density has pushed into domain
inflow_area = np.prod(lattice.shape[:2] - 2)
expected_end_density = expected_initial_density + \
(self.INFLOW_FLUX * inflow_area + additional_center_flux) * \
self.TIMESTEPS * self.TAU / self.AGRID
np.testing.assert_almost_equal(
actual=np.sum(ekspecies[1:-1, 1:-1, 1:-1].density),
desired=expected_end_density, decimal=decimal_precision)
if __name__ == "__main__":
ut.main() |
7,070 | logout | import json
from typing import Any
from starwhale.core.instance.view import InstanceTermView
from . import CLI
from .base.invoke import invoke_output, invoke_with_react
class Instance:
instance_cmd = "instance"
def login(
self,
user: str = "starwhale",
password: str = "abcd1234",
url: str = "http://console.pre.intra.starwhale.ai",
alias: str = "server",
) -> bool:
"""
:param alias:
:param user:
:param password:
:param url: Instance URI, if ignore it, swcli will login current selected instance
:return:
res is:login http://console.pre.intra.starwhale.ai successfully
or: anything else
"""
kw = {"username": user, "password": password}
InstanceTermView().login(url, alias, **kw)
return True
def info(self, instance: str = "") -> Any:
"""
:param instance: instance alias name or uri, if ignore it, swcli will use current selected instance.
:return:
local:
{
"instance": "local",
"root_dir": "/home/star_g/.starwhale"
}
server:
{
"agents": [
{
"ip": "10.0.127.1",
"status": "OFFLINE",
"version": "KUBELET:v1.21.11"
},
...
],
"instance": "server...",
"version": "0.1.0:8c82767b60686f3e2bfea9dafe8c8cce5dd34f52"
}
"""
_ret_code, _res = invoke_output(
[CLI, self.instance_cmd, "-o", "json", "info", instance]
)
return json.loads(_res) if _ret_code == 0 else {}
def list(self) -> Any:
"""
:return:
[
{
"current_project": "",
"in_use": false,
"name": "server",
"updated_at": "2022-09-09 10:45:30 CST",
"uri": "http://server.pre.intra.starwhale.ai",
"user_name": "lijing_test",
"user_role": "normal"
},
{
"current_project": "self",
"in_use": true,
"name": "local",
"updated_at": "2022-06-08 16:05:35 CST",
"uri": "local",
"user_name": "star_g",
"user_role": ""
},
{
"current_project": "project_for_test1",
"in_use": false,
"name": "pre-k8s",
"updated_at": "2022-08-25 19:59:24 CST",
"uri": "http://console.pre.intra.starwhale.ai",
"user_name": "starwhale",
"user_role": "normal"
},
...
]
"""
_ret_code, _res = invoke_output([CLI, "-o", "json", self.instance_cmd, "list"])
return json.loads(_res) if _ret_code == 0 else []
def METHOD_NAME(self, instance: str = "") -> bool:
"""
:param instance: instance alias name or uri, if ignore it, swcli will logout current selected instance.
then, the instance will remove from list
:return:
res is:bye
or:skip local instance logout
or:
"""
_ret_code, _res = invoke_with_react(
[CLI, self.instance_cmd, "logout", instance]
)
return bool(_ret_code == 0)
def select(self, instance: str) -> bool:
"""
:param instance: instance alias name or uri
:return:
res is:select local instance
or:failed to select local2, reason: need to login instance local2
"""
InstanceTermView().select(instance)
return True |
7,071 | test venv and pths | import os
from glob import glob
import sys
import shutil
from pathlib import Path
import pytest
from ..helpers import skip_if_windows, skip_if_not_windows, get_example_dir
from jedi.inference import sys_path
from jedi.api.environment import create_environment
def test_paths_from_assignment(Script):
def paths(src):
script = Script(src, path='/foo/bar.py')
expr_stmt = script._module_node.children[0]
return set(sys_path._paths_from_assignment(script._get_module_context(), expr_stmt))
# Normalize paths for Windows.
path_a = Path('/foo/a').absolute()
path_b = Path('/foo/b').absolute()
path_c = Path('/foo/c').absolute()
assert paths('sys.path[0:0] = ["a"]') == {path_a}
assert paths('sys.path = ["b", 1, x + 3, y, "c"]') == {path_b, path_c}
assert paths('sys.path = a = ["a"]') == {path_a}
# Fail for complicated examples.
assert paths('sys.path, other = ["a"], 2') == set()
def METHOD_NAME(venv_path, environment):
pjoin = os.path.join
if os.name == 'nt':
if environment.version_info < (3, 11):
site_pkg_path = pjoin(venv_path, 'lib', 'site-packages')
else:
site_pkg_path = pjoin(venv_path, 'Lib', 'site-packages')
else:
site_pkg_path = glob(pjoin(venv_path, 'lib', 'python*', 'site-packages'))[0]
shutil.rmtree(site_pkg_path)
shutil.copytree(get_example_dir('sample_venvs', 'pth_directory'), site_pkg_path)
virtualenv = create_environment(venv_path)
venv_paths = virtualenv.get_sys_path()
ETALON = [
# For now disable egg-links. I have no idea how they work... ~ dave
#pjoin('/path', 'from', 'egg-link'),
#pjoin(site_pkg_path, '.', 'relative', 'egg-link', 'path'),
site_pkg_path,
pjoin(site_pkg_path, 'dir-from-foo-pth'),
'/foo/smth.py:module',
# Not sure why it's added twice. It has to do with site.py which is not
# something we can change. However this obviously also doesn't matter.
'/foo/smth.py:from_func',
'/foo/smth.py:from_func',
]
# Ensure that pth and egg-link paths were added.
assert venv_paths[-len(ETALON):] == ETALON
# Ensure that none of venv dirs leaked to the interpreter.
assert not set(sys.path).intersection(ETALON)
_s = ['/a', '/b', '/c/d/']
@pytest.mark.parametrize(
'sys_path_, module_path, expected, is_package', [
(_s, '/a/b', ('b',), False),
(_s, '/a/b/c', ('b', 'c'), False),
(_s, '/a/b.py', ('b',), False),
(_s, '/a/b/c.py', ('b', 'c'), False),
(_s, '/x/b.py', None, False),
(_s, '/c/d/x.py', ('x',), False),
(_s, '/c/d/x.py', ('x',), False),
(_s, '/c/d/x/y.py', ('x', 'y'), False),
# If dots are in there they also resolve. These are obviously illegal
# in Python, but Jedi can handle them. Give the user a bit more freedom
# that he will have to correct eventually.
(_s, '/a/b.c.py', ('b.c',), False),
(_s, '/a/b.d/foo.bar.py', ('b.d', 'foo.bar'), False),
(_s, '/a/.py', None, False),
(_s, '/a/c/.py', None, False),
(['/foo'], '/foo/bar/__init__.py', ('bar',), True),
(['/foo'], '/foo/bar/baz/__init__.py', ('bar', 'baz'), True),
skip_if_windows(['/foo'], '/foo/bar.so', ('bar',), False),
skip_if_windows(['/foo'], '/foo/bar/__init__.so', ('bar',), True),
skip_if_not_windows(['/foo'], '/foo/bar.pyd', ('bar',), False),
skip_if_not_windows(['/foo'], '/foo/bar/__init__.pyd', ('bar',), True),
(['/foo'], '/x/bar.py', None, False),
(['/foo'], '/foo/bar.xyz', ('bar.xyz',), False),
(['/foo', '/foo/bar'], '/foo/bar/baz', ('baz',), False),
(['/foo/bar', '/foo'], '/foo/bar/baz', ('baz',), False),
(['/'], '/bar/baz.py', ('bar', 'baz',), False),
])
def test_transform_path_to_dotted(sys_path_, module_path, expected, is_package):
# transform_path_to_dotted expects normalized absolute paths.
sys_path_ = [os.path.abspath(path) for path in sys_path_]
module_path = os.path.abspath(module_path)
assert sys_path.transform_path_to_dotted(sys_path_, Path(module_path)) \
== (expected, is_package) |
7,072 | get info | # This file is generated by numpy's setup.py
# It contains system_info results at the time of building this package.
__all__ = ["get_info","show"]
import os
import sys
extra_dll_dir = os.path.join(os.path.dirname(__file__), '.libs')
if sys.platform == 'win32' and os.path.isdir(extra_dll_dir):
os.add_dll_directory(extra_dll_dir)
blas_armpl_info={}
blas_mkl_info={}
blis_info={}
openblas_info={}
accelerate_info={}
atlas_3_10_blas_threads_info={}
atlas_3_10_blas_info={}
atlas_blas_threads_info={}
atlas_blas_info={}
blas_info={'libraries': ['blas', 'blas'], 'library_dirs': ['/usr/lib'], 'include_dirs': ['/usr/local/include'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None)]}
blas_opt_info={'define_macros': [('NO_ATLAS_INFO', 1), ('HAVE_CBLAS', None)], 'libraries': ['blas', 'blas'], 'library_dirs': ['/usr/lib'], 'include_dirs': ['/usr/local/include'], 'language': 'c'}
lapack_armpl_info={}
lapack_mkl_info={}
openblas_lapack_info={}
openblas_clapack_info={}
flame_info={}
atlas_3_10_threads_info={}
atlas_3_10_info={}
atlas_threads_info={}
atlas_info={}
lapack_info={'libraries': ['lapack', 'lapack'], 'library_dirs': ['/usr/lib'], 'language': 'f77'}
lapack_opt_info={'libraries': ['lapack', 'lapack', 'blas', 'blas'], 'library_dirs': ['/usr/lib'], 'language': 'c', 'define_macros': [('NO_ATLAS_INFO', 1), ('HAVE_CBLAS', None)], 'include_dirs': ['/usr/local/include']}
def METHOD_NAME(name):
g = globals()
return g.get(name, g.get(name + "_info", {}))
def show():
"""
Show libraries in the system on which NumPy was built.
Print information about various resources (libraries, library
directories, include directories, etc.) in the system on which
NumPy was built.
See Also
--------
get_include : Returns the directory containing NumPy C
header files.
Notes
-----
1. Classes specifying the information to be printed are defined
in the `numpy.distutils.system_info` module.
Information may include:
* ``language``: language used to write the libraries (mostly
C or f77)
* ``libraries``: names of libraries found in the system
* ``library_dirs``: directories containing the libraries
* ``include_dirs``: directories containing library header files
* ``src_dirs``: directories containing library source files
* ``define_macros``: preprocessor macros used by
``distutils.setup``
* ``baseline``: minimum CPU features required
* ``found``: dispatched features supported in the system
* ``not found``: dispatched features that are not supported
in the system
2. NumPy BLAS/LAPACK Installation Notes
Installing a numpy wheel (``pip install numpy`` or force it
via ``pip install numpy --only-binary :numpy: numpy``) includes
an OpenBLAS implementation of the BLAS and LAPACK linear algebra
APIs. In this case, ``library_dirs`` reports the original build
time configuration as compiled with gcc/gfortran; at run time
the OpenBLAS library is in
``site-packages/numpy.libs/`` (linux), or
``site-packages/numpy/.dylibs/`` (macOS), or
``site-packages/numpy/.libs/`` (windows).
Installing numpy from source
(``pip install numpy --no-binary numpy``) searches for BLAS and
LAPACK dynamic link libraries at build time as influenced by
environment variables NPY_BLAS_LIBS, NPY_CBLAS_LIBS, and
NPY_LAPACK_LIBS; or NPY_BLAS_ORDER and NPY_LAPACK_ORDER;
or the optional file ``~/.numpy-site.cfg``.
NumPy remembers those locations and expects to load the same
libraries at run-time.
In NumPy 1.21+ on macOS, 'accelerate' (Apple's Accelerate BLAS
library) is in the default build-time search order after
'openblas'.
Examples
--------
>>> import numpy as np
>>> np.show_config()
blas_opt_info:
language = c
define_macros = [('HAVE_CBLAS', None)]
libraries = ['openblas', 'openblas']
library_dirs = ['/usr/local/lib']
"""
from numpy.core._multiarray_umath import (
__cpu_features__, __cpu_baseline__, __cpu_dispatch__
)
for name,info_dict in globals().items():
if name[0] == "_" or type(info_dict) is not type({}): continue
print(name + ":")
if not info_dict:
print(" NOT AVAILABLE")
for k,v in info_dict.items():
v = str(v)
if k == "sources" and len(v) > 200:
v = v[:60] + " ...\n... " + v[-60:]
print(" %s = %s" % (k,v))
features_found, features_not_found = [], []
for feature in __cpu_dispatch__:
if __cpu_features__[feature]:
features_found.append(feature)
else:
features_not_found.append(feature)
print("Supported SIMD extensions in this NumPy install:")
print(" baseline = %s" % (','.join(__cpu_baseline__)))
print(" found = %s" % (','.join(features_found)))
print(" not found = %s" % (','.join(features_not_found)))
|
7,073 | memoize | """
Return a list of recent PyTorch wheels published on download.pytorch.org.
Users can specify package name, python version, platform, and the number of days to return.
If one of the packages specified is missing on one day, the script will skip outputing the results on that day.
"""
import os
import re
import requests
import argparse
import urllib.parse
from datetime import date, timedelta
from bs4 import BeautifulSoup
from collections import defaultdict
import sys
from pathlib import Path
import subprocess
from typing import List
REPO_ROOT = Path(__file__).parent.parent.parent.resolve()
class add_path():
def __init__(self, path):
self.path = path
def __enter__(self):
sys.path.insert(0, self.path)
def __exit__(self, exc_type, exc_value, traceback):
try:
sys.path.remove(self.path)
except ValueError:
pass
with add_path(str(REPO_ROOT)):
from utils.cuda_utils import DEFAULT_CUDA_VERSION, CUDA_VERSION_MAP
from utils.python_utils import DEFAULT_PYTHON_VERSION, PYTHON_VERSION_MAP
PYTORCH_CUDA_VERISON = CUDA_VERSION_MAP[DEFAULT_CUDA_VERSION]["pytorch_url"]
PYTORCH_PYTHON_VERSION = PYTHON_VERSION_MAP[DEFAULT_PYTHON_VERSION]["pytorch_url"]
torch_wheel_nightly_base = f"https://download.pytorch.org/whl/nightly/{PYTORCH_CUDA_VERISON}/"
torch_nightly_wheel_index = f"https://download.pytorch.org/whl/nightly/{PYTORCH_CUDA_VERISON}/torch_nightly.html"
torch_nightly_wheel_index_override = "torch_nightly.html"
def METHOD_NAME(function):
"""
"""
call_cache = {}
def memoized_function(*f_args):
if f_args in call_cache:
return call_cache[f_args]
call_cache[f_args] = result = function(*f_args)
return result
return memoized_function
@METHOD_NAME
def get_wheel_index_data(py_version, platform_version, url=torch_nightly_wheel_index, override_file=torch_nightly_wheel_index_override):
"""
"""
if os.path.isfile(override_file) and os.stat(override_file).st_size:
with open(override_file) as f:
data = f.read()
else:
r = requests.get(url)
r.raise_for_status()
data = r.text
soup = BeautifulSoup(data, 'html.parser')
data = defaultdict(dict)
for link in soup.find_all('a'):
group_match = re.search("([a-z]*)-(.*)-(.*)-(.*)-(.*)\.whl", link.text)
# some packages (e.g., torch-rec) doesn't follow this naming convention
if not group_match:
continue
pkg, version, py, py_m, platform = group_match.groups()
version = urllib.parse.unquote(version)
if py == py_version and platform == platform_version:
full_url = os.path.join(torch_wheel_nightly_base, link.text)
data[pkg][version] = full_url
return data
def get_nightly_wheel_urls(packages:list, date:date,
py_version=PYTORCH_PYTHON_VERSION, platform_version='linux_x86_64'):
"""Gets urls to wheels for specified packages matching the date, py_version, platform_version
"""
date_str = f"{date.year}{date.month:02}{date.day:02}"
data = get_wheel_index_data(py_version, platform_version)
rc = {}
for pkg in packages:
pkg_versions = data[pkg]
# multiple versions could happen when bumping the pytorch version number
# e.g., both torch-1.11.0.dev20220211%2Bcu113-cp38-cp38-linux_x86_64.whl and
# torch-1.12.0.dev20220212%2Bcu113-cp38-cp38-linux_x86_64.whl exist in the download link
keys = sorted([key for key in pkg_versions if date_str in key], reverse=True)
if len(keys) > 1:
print(f"Warning: multiple versions matching a single date: {keys}, using {keys[0]}")
if len(keys) == 0:
return None
full_url = pkg_versions[keys[0]]
rc[pkg] = {
"version": keys[0],
"wheel": full_url,
}
return rc
def get_nightly_wheels_in_range(packages:list, start_date:date, end_date:date,
py_version=PYTORCH_PYTHON_VERSION, platform_version='linux_x86_64', reverse=False):
rc = []
curr_date = start_date
while curr_date <= end_date:
curr_wheels = get_nightly_wheel_urls(packages, curr_date,
py_version=py_version,
platform_version=platform_version)
if curr_wheels is not None:
rc.append(curr_wheels)
curr_date += timedelta(days=1)
if reverse:
rc.reverse()
return rc
def get_n_prior_nightly_wheels(packages:list, n:int,
py_version=PYTORCH_PYTHON_VERSION, platform_version='linux_x86_64', reverse=False):
end_date = date.today()
start_date = end_date - timedelta(days=n)
return get_nightly_wheels_in_range(packages, start_date, end_date,
py_version=py_version, platform_version=platform_version, reverse=reverse)
def get_most_recent_successful_wheels(packages: list, pyver: str, platform: str) -> List[str]:
"""Get the most recent successful nightly wheels. Return List[str] """
curr_date = date.today()
date_limit = curr_date - timedelta(days=365)
while curr_date >= date_limit:
wheels = get_nightly_wheel_urls(packages, curr_date, py_version=pyver, platform_version=platform)
if wheels:
return wheels
curr_date = curr_date - timedelta(days=1)
# Can't find any valid pytorch package
return None
def install_wheels(wheels):
"""Install the wheels specified in the wheels."""
wheel_urls = list(map(lambda x: wheels[x]["wheel"], wheels.keys()))
work_dir = Path(__file__).parent.joinpath(".data")
work_dir.mkdir(parents=True, exist_ok=True)
requirements_file = work_dir.joinpath("requirements.txt").resolve()
with open(requirements_file, "w") as rf:
rf.write("\n".join(wheel_urls))
command = ["pip", "install", "-r", str(requirements_file)]
print(f"Installing pytorch nightly packages command: {command}")
subprocess.check_call(command)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--pyver", type=str, default=PYTORCH_PYTHON_VERSION, help="PyTorch Python version")
parser.add_argument("--platform", type=str, default="linux_x86_64", help="PyTorch platform")
parser.add_argument("--priordays", type=int, default=1, help="Number of days")
parser.add_argument("--reverse", action="store_true", help="Return reversed result")
parser.add_argument("--packages", required=True, type=str, nargs="+", help="List of package names")
parser.add_argument("--install-nightlies", action="store_true",
help="Install the most recent successfully built nightly packages")
args = parser.parse_args()
if args.install_nightlies:
wheels = get_most_recent_successful_wheels(args.packages, args.pyver, args.platform)
assert wheels, f"We do not find any successful pytorch nightly build of packages: {args.packages}."
print(f"Found pytorch nightly wheels: {wheels} ")
install_wheels(wheels)
exit(0)
wheels = get_n_prior_nightly_wheels(packages=args.packages,
n=args.priordays,
py_version=args.pyver,
platform_version=args.platform,
reverse=args.reverse)
for wheelset in wheels:
for pkg in wheelset:
print(f"{pkg}-{wheelset[pkg]['version']}: {wheelset[pkg]['wheel']}") |
7,074 | run | # -*- encoding: utf8 -*-
"""Tests for distutils.command.check."""
import textwrap
import unittest
from test.test_support import run_unittest
from distutils.command.check import check, HAS_DOCUTILS
from distutils.tests import support
from distutils.errors import DistutilsSetupError
try:
import pygments
except ImportError:
pygments = None
class CheckTestCase(support.LoggingSilencer,
support.TempdirManager,
unittest.TestCase):
def METHOD_NAME(self, metadata=None, **options):
if metadata is None:
metadata = {}
pkg_info, dist = self.create_dist(**metadata)
cmd = check(dist)
cmd.initialize_options()
for name, value in options.items():
setattr(cmd, name, value)
cmd.ensure_finalized()
cmd.run()
return cmd
def test_check_metadata(self):
# let's run the command with no metadata at all
# by default, check is checking the metadata
# should have some warnings
cmd = self.METHOD_NAME()
self.assertEqual(cmd._warnings, 2)
# now let's add the required fields
# and run it again, to make sure we don't get
# any warning anymore
metadata = {'url': 'xxx', 'author': 'xxx',
'author_email': 'xxx',
'name': 'xxx', 'version': 'xxx'}
cmd = self.METHOD_NAME(metadata)
self.assertEqual(cmd._warnings, 0)
# now with the strict mode, we should
# get an error if there are missing metadata
self.assertRaises(DistutilsSetupError, self.METHOD_NAME, {}, **{'strict': 1})
# and of course, no error when all metadata are present
cmd = self.METHOD_NAME(metadata, strict=1)
self.assertEqual(cmd._warnings, 0)
# now a test with Unicode entries
metadata = {'url': u'xxx', 'author': u'\u00c9ric',
'author_email': u'xxx', u'name': 'xxx',
'version': u'xxx',
'description': u'Something about esszet \u00df',
'long_description': u'More things about esszet \u00df'}
cmd = self.METHOD_NAME(metadata)
self.assertEqual(cmd._warnings, 0)
@unittest.skipUnless(HAS_DOCUTILS, "won't test without docutils")
def test_check_document(self):
pkg_info, dist = self.create_dist()
cmd = check(dist)
# let's see if it detects broken rest
broken_rest = 'title\n===\n\ntest'
msgs = cmd._check_rst_data(broken_rest)
self.assertEqual(len(msgs), 1)
# and non-broken rest
rest = 'title\n=====\n\ntest'
msgs = cmd._check_rst_data(rest)
self.assertEqual(len(msgs), 0)
@unittest.skipUnless(HAS_DOCUTILS, "won't test without docutils")
def test_check_restructuredtext(self):
# let's see if it detects broken rest in long_description
broken_rest = 'title\n===\n\ntest'
pkg_info, dist = self.create_dist(long_description=broken_rest)
cmd = check(dist)
cmd.check_restructuredtext()
self.assertEqual(cmd._warnings, 1)
# let's see if we have an error with strict=1
metadata = {'url': 'xxx', 'author': 'xxx',
'author_email': 'xxx',
'name': 'xxx', 'version': 'xxx',
'long_description': broken_rest}
self.assertRaises(DistutilsSetupError, self.METHOD_NAME, metadata,
**{'strict': 1, 'restructuredtext': 1})
# and non-broken rest, including a non-ASCII character to test #12114
metadata['long_description'] = u'title\n=====\n\ntest \u00df'
cmd = self.METHOD_NAME(metadata, strict=1, restructuredtext=1)
self.assertEqual(cmd._warnings, 0)
@unittest.skipUnless(HAS_DOCUTILS, "won't test without docutils")
def test_check_restructuredtext_with_syntax_highlight(self):
# Don't fail if there is a `code` or `code-block` directive
example_rst_docs = []
example_rst_docs.append(textwrap.dedent("""\
Here's some code:
.. code:: python
def foo():
pass
"""))
example_rst_docs.append(textwrap.dedent("""\
Here's some code:
.. code-block:: python
def foo():
pass
"""))
for rest_with_code in example_rst_docs:
pkg_info, dist = self.create_dist(long_description=rest_with_code)
cmd = check(dist)
cmd.check_restructuredtext()
msgs = cmd._check_rst_data(rest_with_code)
if pygments is not None:
self.assertEqual(len(msgs), 0)
else:
self.assertEqual(len(msgs), 1)
self.assertEqual(
str(msgs[0][1]),
'Cannot analyze code. Pygments package not found.'
)
def test_check_all(self):
metadata = {'url': 'xxx', 'author': 'xxx'}
self.assertRaises(DistutilsSetupError, self.METHOD_NAME,
{}, **{'strict': 1,
'restructuredtext': 1})
def test_suite():
return unittest.makeSuite(CheckTestCase)
if __name__ == "__main__":
run_unittest(test_suite()) |
7,075 | set thirdparty installed dir | # Copyright (c) Yugabyte, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations
# under the License.
import logging
import os
from yugabyte.common_util import get_home_dir_aliases, YB_SRC_ROOT
from typing import Dict, List, Any, Optional, cast
def replace_home_dir_with_tilde(p: str) -> str:
"""
Transforms a path before logging by replacing the home directory path with ~.
>>> replace_home_dir_with_tilde(os.path.expanduser('~/foo'))
'~/foo'
>>> replace_home_dir_with_tilde(os.path.expanduser('~'))
'~'
>>> replace_home_dir_with_tilde(os.path.realpath(os.path.expanduser('~')))
'~'
>>> replace_home_dir_with_tilde('/usr/bin')
'/usr/bin'
"""
for home_dir in get_home_dir_aliases():
if p == home_dir:
return '~'
home_dir_prefix = home_dir + '/'
if p.startswith(home_dir_prefix):
return '~/%s' % p[len(home_dir_prefix):]
return p
class LogArgRewriter:
path_to_var_name: Dict[str, str]
var_name_to_path: Dict[str, str]
paths_by_decreasing_length: List[str]
def __init__(self) -> None:
self.path_to_var_name = {}
self.var_name_to_path = {}
self.paths_by_decreasing_length = []
def add_rewrite(self, var_name: str, path: str) -> None:
if var_name in self.var_name_to_path:
if path == self.var_name_to_path[var_name]:
# Ignore duplicate.
return
raise ValueError(
f"Variable {var_name} is already mapped to path {self.var_name_to_path[var_name]} "
f"for the purposes of rewriting log messages, cannot map it to {path}.")
logging.info("Will shorten the path %s as ${%s} going forward", path, var_name)
self.path_to_var_name[path] = var_name
self.var_name_to_path[var_name] = path
self.paths_by_decreasing_length = sorted([
path for path in self.path_to_var_name.keys()
], key=lambda s: len(s), reverse=True)
def rewrite_arg(self, arg: str) -> str:
for path in self.paths_by_decreasing_length:
if arg.startswith(path + '/'):
return "${%s}/%s" % (self.path_to_var_name[path], arg[len(path) + 1:])
if arg == path:
return "${%s}" % self.path_to_var_name[path]
return arg
g_log_arg_rewriter: Optional[LogArgRewriter] = None
def get_log_arg_rewriter() -> LogArgRewriter:
global g_log_arg_rewriter
if g_log_arg_rewriter is not None:
return g_log_arg_rewriter
g_log_arg_rewriter = LogArgRewriter()
g_log_arg_rewriter.add_rewrite('YB_SRC_ROOT', YB_SRC_ROOT)
return g_log_arg_rewriter
def rewrite_args(args: List[Any]) -> List[Any]:
return [
get_log_arg_rewriter().rewrite_arg(arg)
if isinstance(arg, str) else arg
for arg in args
]
def log_info(format_str: str, *args: Any) -> None:
logging.info(format_str, *rewrite_args(cast(List[Any], args)))
def log_warning(format_str: str, *args: Any) -> None:
logging.warning(format_str, *rewrite_args(cast(List[Any], args)))
def log_error(format_str: str, *args: Any) -> None:
logging.error(format_str, *rewrite_args(cast(List[Any], args)))
def set_thirdparty_dir(thirdparty_dir: str) -> None:
get_log_arg_rewriter().add_rewrite('YB_THIRDPARTY_DIR', thirdparty_dir)
def METHOD_NAME(thirdparty_installed_dir: str) -> None:
get_log_arg_rewriter().add_rewrite('YB_THIRDPARTY_INSTALLED_DIR', thirdparty_installed_dir)
def set_build_root(build_root: str) -> None:
get_log_arg_rewriter().add_rewrite('YB_BUILD_ROOT', build_root)
def set_linuxbrew_dir(linuxbrew_dir: str) -> None:
get_log_arg_rewriter().add_rewrite('YB_LINUXBREW_DIR', linuxbrew_dir)
def rewrite_logging_arg(arg: str) -> str:
return get_log_arg_rewriter().rewrite_arg(arg) |
7,076 | configure gcs | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""GCS file system configuration for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
from tensorflow.contrib.cloud.python.ops import gen_gcs_config_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.training import training
# @tf_export('contrib.cloud.BlockCacheParams')
class BlockCacheParams(object):
"""BlockCacheParams is a struct used for configuring the GCS Block Cache."""
def __init__(self, block_size=None, max_bytes=None, max_staleness=None):
self._block_size = block_size or 128 * 1024 * 1024
self._max_bytes = max_bytes or 2 * self._block_size
self._max_staleness = max_staleness or 0
@property
def block_size(self):
return self._block_size
@property
def max_bytes(self):
return self._max_bytes
@property
def max_staleness(self):
return self._max_staleness
# @tf_export('contrib.cloud.ConfigureGcsHook')
class ConfigureGcsHook(training.SessionRunHook):
"""ConfigureGcsHook configures GCS when used with Estimator/TPUEstimator.
Warning: GCS `credentials` may be transmitted over the network unencrypted.
Please ensure that the network is trusted before using this function. For
users running code entirely within Google Cloud, your data is protected by
encryption in between data centers. For more information, please take a look
at https://cloud.google.com/security/encryption-in-transit/.
Example:
```
sess = tf.compat.v1.Session()
refresh_token = raw_input("Refresh token: ")
client_secret = raw_input("Client secret: ")
client_id = "<REDACTED>"
creds = {
"client_id": client_id,
"refresh_token": refresh_token,
"client_secret": client_secret,
"type": "authorized_user",
}
tf.contrib.cloud.configure_gcs(sess, credentials=creds)
```
"""
def _verify_dictionary(self, creds_dict):
if 'refresh_token' in creds_dict or 'private_key' in creds_dict:
return True
return False
def __init__(self, credentials=None, block_cache=None):
"""Constructs a ConfigureGcsHook.
Args:
credentials: A json-formatted string.
block_cache: A `BlockCacheParams`
Raises:
ValueError: If credentials is improperly formatted or block_cache is not a
BlockCacheParams.
"""
if credentials is not None:
if isinstance(credentials, str):
try:
data = json.loads(credentials)
except ValueError as e:
raise ValueError('credentials was not a well formed JSON string.', e)
if not self._verify_dictionary(data):
raise ValueError(
'credentials has neither a "refresh_token" nor a "private_key" '
'field.')
elif isinstance(credentials, dict):
if not self._verify_dictionary(credentials):
raise ValueError('credentials has neither a "refresh_token" nor a '
'"private_key" field.')
credentials = json.dumps(credentials)
else:
raise ValueError('credentials is of an unknown type')
self._credentials = credentials
if block_cache and not isinstance(block_cache, BlockCacheParams):
raise ValueError('block_cache must be an instance of BlockCacheParams.')
self._block_cache = block_cache
def begin(self):
if self._credentials:
self._credentials_placeholder = array_ops.placeholder(dtypes.string)
self._credentials_op = gen_gcs_config_ops.gcs_configure_credentials(
self._credentials_placeholder)
else:
self._credentials_op = None
if self._block_cache:
self._block_cache_op = gen_gcs_config_ops.gcs_configure_block_cache(
max_cache_size=self._block_cache.max_bytes,
block_size=self._block_cache.block_size,
max_staleness=self._block_cache.max_staleness)
else:
self._block_cache_op = None
def after_create_session(self, session, coord):
del coord
if self._credentials_op:
session.run(
self._credentials_op,
feed_dict={self._credentials_placeholder: self._credentials})
if self._block_cache_op:
session.run(self._block_cache_op)
def METHOD_NAME(session, credentials=None, block_cache=None, device=None):
"""Configures the GCS file system for a given a session.
Warning: GCS `credentials` may be transmitted over the network unencrypted.
Please ensure that the network is trusted before using this function. For
users running code entirely within Google Cloud, your data is protected by
encryption in between data centers. For more information, please take a look
at https://cloud.google.com/security/encryption-in-transit/.
Args:
session: A `tf.compat.v1.Session` session that should be used to configure
the GCS file system.
credentials: [Optional.] A JSON string
block_cache: [Optional.] A BlockCacheParams to configure the block cache .
device: [Optional.] The device to place the configure ops.
"""
def configure(credentials, block_cache):
"""Helper function to actually configure GCS."""
if credentials:
if isinstance(credentials, dict):
credentials = json.dumps(credentials)
placeholder = array_ops.placeholder(dtypes.string)
op = gen_gcs_config_ops.gcs_configure_credentials(placeholder)
session.run(op, feed_dict={placeholder: credentials})
if block_cache:
op = gen_gcs_config_ops.gcs_configure_block_cache(
max_cache_size=block_cache.max_bytes,
block_size=block_cache.block_size,
max_staleness=block_cache.max_staleness)
session.run(op)
if device:
with ops.device(device):
return configure(credentials, block_cache)
return configure(credentials, block_cache)
def configure_colab_session(session):
"""ConfigureColabSession configures the GCS file system in Colab.
Args:
session: A `tf.compat.v1.Session` session.
"""
# Read from the application default credentials (adc).
adc_filename = os.environ.get(
'GOOGLE_APPLICATION_CREDENTIALS', '/content/adc.json')
with open(adc_filename) as f:
data = json.load(f)
METHOD_NAME(session, credentials=data) |
7,077 | check list of dict table | # -*- coding: utf-8 -*-
"""Machine type checkers for Table scitype.
Exports checkers for Table scitype:
check_dict: dict indexed by pairs of str
1st element = mtype - str
2nd element = scitype - str
elements are checker/validation functions for mtype
Function signature of all elements
check_dict[(mtype, scitype)]
Parameters
----------
obj - object to check
return_metadata - bool, optional, default=False
if False, returns only "valid" return
if True, returns all three return objects
var_name: str, optional, default="obj" - name of input in error messages
Returns
-------
valid: bool - whether obj is a valid object of mtype/scitype
msg: str - error message if object is not valid, otherwise None
returned only if return_metadata is True
metadata: dict - metadata about obj if valid, otherwise None
returned only if return_metadata is True
fields:
"is_univariate": bool, True iff table has one variable
"is_empty": bool, True iff table has no variables or no instances
"has_nans": bool, True iff the panel contains NaN values
"n_instances": int, number of instances/rows in the table
"""
__author__ = ["fkiraly"]
__all__ = ["check_dict"]
import numpy as np
import pandas as pd
check_dict = dict()
PRIMITIVE_TYPES = (float, int, str)
def _ret(valid, msg, metadata, return_metadata):
if return_metadata:
return valid, msg, metadata
else:
return valid
def check_pddataframe_table(obj, return_metadata=False, var_name="obj"):
metadata = dict()
if not isinstance(obj, pd.DataFrame):
msg = f"{var_name} must be a pandas.DataFrame, found {type(obj)}"
return _ret(False, msg, None, return_metadata)
# we now know obj is a pd.DataFrame
index = obj.index
metadata["is_empty"] = len(index) < 1 or len(obj.columns) < 1
metadata["is_univariate"] = len(obj.columns) < 2
metadata["n_instances"] = len(index)
# check whether there are any nans
# compute only if needed
if return_metadata:
metadata["has_nans"] = obj.isna().values.any()
# check that no dtype is object
if "object" in obj.dtypes.values:
msg = f"{var_name} should not have column of 'object' dtype"
return _ret(False, msg, None, return_metadata)
return _ret(True, None, metadata, return_metadata)
check_dict[("pd_DataFrame_Table", "Table")] = check_pddataframe_table
def check_pdseries_table(obj, return_metadata=False, var_name="obj"):
metadata = dict()
if not isinstance(obj, pd.Series):
msg = f"{var_name} must be a pandas.Series, found {type(obj)}"
return _ret(False, msg, None, return_metadata)
# we now know obj is a pd.Series
index = obj.index
metadata["is_empty"] = len(index) < 1
metadata["is_univariate"] = True
metadata["n_instances"] = len(index)
# check that dtype is not object
if "object" == obj.dtypes:
msg = f"{var_name} should not be of 'object' dtype"
return _ret(False, msg, None, return_metadata)
# check whether index is equally spaced or if there are any nans
# compute only if needed
if return_metadata:
metadata["has_nans"] = obj.isna().values.any()
return _ret(True, None, metadata, return_metadata)
check_dict[("pd_Series_Table", "Table")] = check_pdseries_table
def check_numpy1d_table(obj, return_metadata=False, var_name="obj"):
metadata = dict()
if not isinstance(obj, np.ndarray):
msg = f"{var_name} must be a numpy.ndarray, found {type(obj)}"
return _ret(False, msg, None, return_metadata)
if len(obj.shape) != 1:
msg = f"{var_name} must be 1D numpy.ndarray, but found {len(obj.shape)}D"
return _ret(False, msg, None, return_metadata)
# we now know obj is a 1D np.ndarray
metadata["is_empty"] = len(obj) < 1
metadata["n_instances"] = len(obj)
# 1D numpy arrays are considered univariate
metadata["is_univariate"] = True
# check whether there any nans; compute only if requested
if return_metadata:
metadata["has_nans"] = pd.isnull(obj).any()
return _ret(True, None, metadata, return_metadata)
check_dict[("numpy1D", "Table")] = check_numpy1d_table
def check_numpy2d_table(obj, return_metadata=False, var_name="obj"):
metadata = dict()
if not isinstance(obj, np.ndarray):
msg = f"{var_name} must be a numpy.ndarray, found {type(obj)}"
return _ret(False, msg, None, return_metadata)
if len(obj.shape) != 2:
msg = f"{var_name} must be 1D or 2D numpy.ndarray, but found {len(obj.shape)}D"
return _ret(False, msg, None, return_metadata)
# we now know obj is a 2D np.ndarray
metadata["is_empty"] = len(obj) < 1 or obj.shape[1] < 1
metadata["is_univariate"] = obj.shape[1] < 2
metadata["n_instances"] = obj.shape[0]
# check whether there any nans; compute only if requested
if return_metadata:
metadata["has_nans"] = pd.isnull(obj).any()
return _ret(True, None, metadata, return_metadata)
check_dict[("numpy2D", "Table")] = check_numpy2d_table
def METHOD_NAME(obj, return_metadata=False, var_name="obj"):
metadata = dict()
if not isinstance(obj, list):
msg = f"{var_name} must be a list of dict, found {type(obj)}"
return _ret(False, msg, None, return_metadata)
if not np.all([isinstance(x, dict) for x in obj]):
msg = (
f"{var_name} must be a list of dict, but elements at following "
f"indices are not dict: {np.where(not isinstance(x, dict) for x in obj)}"
)
return _ret(False, msg, None, return_metadata)
for i, d in enumerate(obj):
for key in d.keys():
if not isinstance(d[key], PRIMITIVE_TYPES):
msg = (
"all entries must be of primitive type (str, int, float), but "
f"found {type(d[key])} at index {i}, key {key}"
)
# we now know obj is a list of dict
# check whether there any nans; compute only if requested
if return_metadata:
multivariate_because_one_row = np.any([len(x) > 1 for x in obj])
if not multivariate_because_one_row:
all_keys = np.unique([key for d in obj for key in d.keys()])
multivariate_because_keys_different = len(all_keys) > 1
multivariate = multivariate_because_keys_different
else:
multivariate = multivariate_because_one_row
metadata["is_univariate"] = not multivariate
metadata["has_nans"] = np.any(
[pd.isnull(d[key]) for d in obj for key in d.keys()]
)
metadata["is_empty"] = len(obj) < 1 or np.all([len(x) < 1 for x in obj])
metadata["n_instances"] = len(obj)
return _ret(True, None, metadata, return_metadata)
check_dict[("list_of_dict", "Table")] = METHOD_NAME |
7,078 | write | #
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
from logging import Logger, getLogger
from typing import Any, Iterable, Mapping
from airbyte_cdk.destinations import Destination
from airbyte_cdk.models import AirbyteConnectionStatus, AirbyteMessage, ConfiguredAirbyteCatalog, DestinationSyncMode, Status, Type
from destination_cumulio.client import CumulioClient
from destination_cumulio.writer import CumulioWriter
logger = getLogger("airbyte")
class DestinationCumulio(Destination):
def METHOD_NAME(
self,
config: Mapping[str, Any],
configured_catalog: ConfiguredAirbyteCatalog,
input_messages: Iterable[AirbyteMessage],
) -> Iterable[AirbyteMessage]:
"""Reads the input stream of messages, config, and catalog to write data to the destination.
This method returns an iterable (typically a generator of AirbyteMessages via yield) containing state messages received in the
input message stream. Outputting a state message means that every AirbyteRecordMessage which came before it has been successfully
persisted to the destination. This is used to ensure fault tolerance in the case that a sync fails before fully completing,
then the source is given the last state message output from this method as the starting point of the next sync.
:param config: dict of JSON configuration matching the configuration declared in spec.json. Current format:
{
'api_host': '<api_host_url, e.g. https://api.cumul.io>',
'api_key': '<api_key>',
'api_token': '<api_token>'
}
:param configured_catalog: schema of the data being received and how it should be persisted in the destination.
:param input_messages: stream of input messages received from the source.
:return: Iterable of AirbyteStateMessages wrapped in AirbyteMessage structs.
"""
writer = CumulioWriter(config, configured_catalog, logger)
for configured_stream in configured_catalog.streams:
# Cumul.io does not support removing all data from an existing dataset, and removing the dataset itself will break existing
# dashboards built on top of it.
# Instead, the connector will make sure to push the first batch of data as a "replace" action: this will cause all existing data
# to be replaced with the first batch of data. All next batches will be pushed as an "append" action.
if configured_stream.destination_sync_mode == DestinationSyncMode.overwrite:
writer.delete_stream_entries(configured_stream.stream.name)
for message in input_messages:
if message.type == Type.STATE:
# Yielding a state message indicates that all records which came before it have been written to the destination.
# We flush all write buffers in the writer, and then output the state message itself.
writer.flush_all()
yield message
elif message.type == Type.RECORD:
record = message.record
assert record is not None
assert record.stream is not None
assert record.data is not None
writer.queue_write_operation(record.stream, record.data)
else:
# ignore other message types for now
continue
# Make sure to flush any records still in the queue
writer.flush_all()
def check(self, logger: Logger, config: Mapping[str, Any]) -> AirbyteConnectionStatus:
"""Tests if the input configuration can be used to successfully connect to the destination with the needed permissions.
This will test whether the combination of the Cumul.io API host, API key and API token is valid.
:param logger: Logging object to display debug/info/error to the logs
(logs will not be accessible via airbyte UI if they are not passed to this logger)
:param config: Json object containing the configuration of this destination, content of this json is as specified in
the properties of the spec.json file
:return: AirbyteConnectionStatus indicating a Success or Failure
"""
try:
client = CumulioClient(config, logger)
# Verify access by hitting Cumul.io authentication endpoint
client.test_api_token()
# We're no longer using testing a data push as this might take some time.
# If the API host, key, and token are valid, we can assume Data can be pushed using it.
return AirbyteConnectionStatus(status=Status.SUCCEEDED)
except Exception as e:
# The Cumul.io Python SDK currently returns a generic error message when an issue occurs during the request,
# or when the request return e.g. a 401 Unauthorized HTTP response code.
# We'll assume that either the API host is incorrect, or the API key and token are no longer valid.
if not e == "Something went wrong":
return AirbyteConnectionStatus(status=Status.FAILED, message=f"An exception occurred: {repr(e)}")
return AirbyteConnectionStatus(
status=Status.FAILED,
message="An exception occurred: could it be that the API host is incorrect, or the API key and token are no longer valid?",
) |
7,079 | metadata | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2023, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Helper functions for downloading and accessing sample data.
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations # isort:skip
# NOTE: skip logging imports so that this module may be run as a script
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import hashlib
import json
from os.path import splitext
from pathlib import Path
from sys import stdout
from typing import TYPE_CHECKING, Any, TextIO
from urllib.parse import urljoin
from urllib.request import urlopen
# NOTE: since downloading sampledata is not a common occurrence, non-stdlib
# imports are generally deferrered in this module
if TYPE_CHECKING:
import pandas as pd
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'download',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
def download(progress: bool = True) -> None:
''' Download larger data sets for various Bokeh examples.
'''
data_dir = external_data_dir(create=True)
print(f"Using data directory: {data_dir}")
# HTTP requests are cheaper for us, and there is nothing private to protect
s3 = 'http://sampledata.bokeh.org'
for file_name, md5 in METHOD_NAME().items():
real_path = data_dir / real_name(file_name)
if real_path.exists():
with open(real_path, "rb") as file:
data = file.read()
local_md5 = hashlib.md5(data).hexdigest()
if local_md5 == md5:
print(f"Skipping {file_name!r} (checksum match)")
continue
print(f"Fetching {file_name!r}")
_download_file(s3, file_name, data_dir, progress=progress)
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
def real_name(name: str) -> str:
real_name, ext = splitext(name)
if ext == ".zip":
if not splitext(real_name)[1]:
return f"{real_name}.csv"
else:
return real_name
else:
return name
def METHOD_NAME() -> dict[str, str]:
with (Path(__file__).parent / "sampledata.json").open("rb") as f:
return dict(json.load(f))
def external_csv(module: str, name: str, **kw: Any) -> pd.DataFrame:
import pandas as pd
return pd.read_csv(external_path(name), **kw)
def external_data_dir(*, create: bool = False) -> Path:
try:
import yaml
except ImportError:
raise RuntimeError("'yaml' and 'pyyaml' are required to use bokeh.sampledata functions")
bokeh_dir = _bokeh_dir(create=create)
data_dir = bokeh_dir / "data"
try:
config = yaml.safe_load(open(bokeh_dir / 'config'))
data_dir = Path.expanduser(config['sampledata_dir'])
except (OSError, TypeError):
pass
if not data_dir.exists():
if not create:
raise RuntimeError('bokeh sample data directory does not exist, please execute bokeh.sampledata.download()')
print(f"Creating {data_dir} directory")
try:
data_dir.mkdir()
except OSError:
raise RuntimeError(f"could not create bokeh data directory at {data_dir}")
else:
if not data_dir.is_dir():
raise RuntimeError(f"{data_dir} exists but is not a directory")
return data_dir
def external_path(file_name: str) -> Path:
data_dir = external_data_dir()
file_path = data_dir / file_name
if not file_path.exists() or not file_path.is_file():
raise RuntimeError(f"Could not locate external data file {file_path}. Please execute bokeh.sampledata.download()")
with open(file_path, "rb") as file:
meta = METHOD_NAME()
known_md5 = meta.get(file_name) or \
meta.get(f"{file_name}.zip") or \
meta.get(f"{splitext(file_name)[0]}.zip")
if known_md5 is None:
raise RuntimeError(f"Unknown external data file {file_name}")
local_md5 = hashlib.md5(file.read()).hexdigest()
if known_md5 != local_md5:
raise RuntimeError(f"External data file {file_path} is outdated. Please execute bokeh.sampledata.download()")
return file_path
def package_csv(module: str, name: str, **kw: Any) -> pd.DataFrame:
import pandas as pd
return pd.read_csv(package_path(name), **kw)
def package_dir() -> Path:
return Path(__file__).parents[1].joinpath("sampledata", "_data").resolve()
def package_path(filename: str | Path) -> Path:
return package_dir() / filename
def load_json(filename: str | Path) -> Any:
with open(filename, "rb") as f:
return json.load(f)
def open_csv(filename: str | Path) -> TextIO:
return open(filename, newline='', encoding='utf8')
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
def _bokeh_dir(create: bool = False) -> Path:
bokeh_dir = Path("~").expanduser() / ".bokeh"
if not bokeh_dir.exists():
if not create: return bokeh_dir
print(f"Creating {bokeh_dir} directory")
try:
bokeh_dir.mkdir()
except OSError:
raise RuntimeError(f"could not create bokeh config directory at {bokeh_dir}")
else:
if not bokeh_dir.is_dir():
raise RuntimeError(f"{bokeh_dir} exists but is not a directory")
return bokeh_dir
def _download_file(base_url: str, filename: str, data_dir: Path, progress: bool = True) -> None:
# These are actually somewhat expensive imports that added ~5% to overall
# typical bokeh import times. Since downloading sampledata is not a common
# action, we defer them to inside this function.
from zipfile import ZipFile
file_url = urljoin(base_url, filename)
file_path = data_dir / filename
url = urlopen(file_url)
with open(file_path, 'wb') as file:
file_size = int(url.headers["Content-Length"])
print(f"Downloading: {filename} ({file_size} bytes)")
fetch_size = 0
block_size = 16384
while True:
data = url.read(block_size)
if not data:
break
fetch_size += len(data)
file.write(data)
if progress:
status = f"\r{fetch_size:< 10d} [{fetch_size*100.0/file_size:6.2f}%%]"
stdout.write(status)
stdout.flush()
if progress:
print()
real_name, ext = splitext(filename)
if ext == '.zip':
if not splitext(real_name)[1]:
real_name += ".csv"
print(f"Unpacking: {real_name}")
with ZipFile(file_path, 'r') as zip_file:
zip_file.extract(real_name, data_dir)
file_path.unlink()
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
# This is necessary so that we can run the sampledata download code in the
# release build, before an actual package exists.
if __name__ == "__main__":
download(progress=False) |
7,080 | get web pub sub hub | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetWebPubSubHubResult',
'AwaitableGetWebPubSubHubResult',
'get_web_pub_sub_hub',
'get_web_pub_sub_hub_output',
]
@pulumi.output_type
class GetWebPubSubHubResult:
"""
A hub setting
"""
def __init__(__self__, id=None, name=None, properties=None, system_data=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource Id for the resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> 'outputs.WebPubSubHubPropertiesResponse':
"""
Properties of a hub.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Metadata pertaining to creation and last modification of the resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource - e.g. "Microsoft.SignalRService/SignalR"
"""
return pulumi.get(self, "type")
class AwaitableGetWebPubSubHubResult(GetWebPubSubHubResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetWebPubSubHubResult(
id=self.id,
name=self.name,
properties=self.properties,
system_data=self.system_data,
type=self.type)
def METHOD_NAME(hub_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
resource_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetWebPubSubHubResult:
"""
Get a hub setting.
:param str hub_name: The hub name.
:param str resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
:param str resource_name: The name of the resource.
"""
__args__ = dict()
__args__['hubName'] = hub_name
__args__['resourceGroupName'] = resource_group_name
__args__['resourceName'] = resource_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:webpubsub/v20230201:getWebPubSubHub', __args__, opts=opts, typ=GetWebPubSubHubResult).value
return AwaitableGetWebPubSubHubResult(
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
properties=pulumi.get(__ret__, 'properties'),
system_data=pulumi.get(__ret__, 'system_data'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(METHOD_NAME)
def get_web_pub_sub_hub_output(hub_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetWebPubSubHubResult]:
"""
Get a hub setting.
:param str hub_name: The hub name.
:param str resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
:param str resource_name: The name of the resource.
"""
... |
7,081 | get sorted tasks | # Copyright 2023 Avaiga Private Limited
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
from __future__ import annotations
import abc
from typing import Any, Callable, List, Optional, Set, Union
import networkx as nx
from ..common._listattributes import _ListAttributes
from ..common._utils import _Subscriber
from ..data.data_node import DataNode
from ..job.job import Job
from ..task.task import Task
from ._dag import _DAG
class Submittable:
"""Instance of an entity that can be submitted for execution.
A submittable holds functions that can be used to build the execution directed acyclic graph.
Attributes:
subscribers (List[Callable]): The list of callbacks to be called on `Job^`'s status change.
"""
def __init__(self, subscribers: Optional[List[_Subscriber]] = None):
self._subscribers = _ListAttributes(self, subscribers or list())
@abc.abstractmethod
def submit(
self,
callbacks: Optional[List[Callable]] = None,
force: bool = False,
wait: bool = False,
timeout: Optional[Union[float, int]] = None,
):
raise NotImplementedError
def get_inputs(self) -> Set[DataNode]:
"""Return the set of input data nodes of the submittable entity.
Returns:
The set of input data nodes.
"""
dag = self._build_dag()
return self.__get_inputs(dag)
def __get_inputs(self, dag: nx.DiGraph) -> Set[DataNode]:
return {node for node, degree in dict(dag.in_degree).items() if degree == 0 and isinstance(node, DataNode)}
def get_outputs(self) -> Set[DataNode]:
"""Return the set of output data nodes of the submittable entity.
Returns:
The set of output data nodes.
"""
dag = self._build_dag()
return self.__get_outputs(dag)
def __get_outputs(self, dag: nx.DiGraph) -> set[DataNode]:
return {node for node, degree in dict(dag.out_degree).items() if degree == 0 and isinstance(node, DataNode)}
def get_intermediate(self) -> Set[DataNode]:
"""Return the set of intermediate data nodes of the submittable entity.
Returns:
The set of intermediate data nodes.
"""
dag = self._build_dag()
all_data_nodes_in_dag = {node for node in dag.nodes if isinstance(node, DataNode)}
return all_data_nodes_in_dag - self.__get_inputs(dag) - self.__get_outputs(dag)
def is_ready_to_run(self) -> bool:
"""Indicate if the entity is ready to be run.
Returns:
True if the given entity is ready to be run. False otherwise.
"""
return all(dn.is_ready_for_reading for dn in self.get_inputs())
def data_nodes_being_edited(self) -> Set[DataNode]:
"""Return the set of data nodes of the submittable entity that are being edited.
Returns:
The set of data nodes that are being edited.
"""
dag = self._build_dag()
return {node for node in dag.nodes if isinstance(node, DataNode) and node.edit_in_progress}
@abc.abstractmethod
def subscribe(self, callback: Callable[[Submittable, Job], None], params: Optional[List[Any]] = None):
raise NotImplementedError
@abc.abstractmethod
def unsubscribe(self, callback: Callable[[Submittable, Job], None], params: Optional[List[Any]] = None):
raise NotImplementedError
@abc.abstractmethod
def _get_set_of_tasks(self) -> Set[Task]:
raise NotImplementedError
def _get_dag(self) -> _DAG:
return _DAG(self._build_dag())
def _build_dag(self) -> nx.DiGraph:
graph = nx.DiGraph()
tasks = self._get_set_of_tasks()
for task in tasks:
if has_input := task.input:
for predecessor in task.input.values():
graph.add_edges_from([(predecessor, task)])
if has_output := task.output:
for successor in task.output.values():
graph.add_edges_from([(task, successor)])
if not has_input and not has_output:
graph.add_node(task)
return graph
def METHOD_NAME(self) -> List[List[Task]]:
dag = self._build_dag()
remove = [node for node, degree in dict(dag.in_degree).items() if degree == 0 and isinstance(node, DataNode)]
dag.remove_nodes_from(remove)
return list(nodes for nodes in nx.topological_generations(dag) if (Task in (type(node) for node in nodes)))
def _add_subscriber(self, callback: Callable, params: Optional[List[Any]] = None):
params = [] if params is None else params
self._subscribers.append(_Subscriber(callback=callback, params=params))
def _remove_subscriber(self, callback: Callable, params: Optional[List[Any]] = None):
if params is not None:
self._subscribers.remove(_Subscriber(callback, params))
else:
elem = [x for x in self._subscribers if x.callback == callback]
if not elem:
raise ValueError
self._subscribers.remove(elem[0]) |
7,082 | system data | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetHybridIdentityMetadataResult',
'AwaitableGetHybridIdentityMetadataResult',
'get_hybrid_identity_metadata',
'get_hybrid_identity_metadata_output',
]
@pulumi.output_type
class GetHybridIdentityMetadataResult:
"""
Defines the HybridIdentityMetadata.
"""
def __init__(__self__, id=None, identity=None, name=None, provisioning_state=None, public_key=None, resource_uid=None, METHOD_NAME=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if identity and not isinstance(identity, dict):
raise TypeError("Expected argument 'identity' to be a dict")
pulumi.set(__self__, "identity", identity)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if public_key and not isinstance(public_key, str):
raise TypeError("Expected argument 'public_key' to be a str")
pulumi.set(__self__, "public_key", public_key)
if resource_uid and not isinstance(resource_uid, str):
raise TypeError("Expected argument 'resource_uid' to be a str")
pulumi.set(__self__, "resource_uid", resource_uid)
if METHOD_NAME and not isinstance(METHOD_NAME, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", METHOD_NAME)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def identity(self) -> 'outputs.IdentityResponse':
"""
The identity of the resource.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
Gets or sets the provisioning state.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="publicKey")
def public_key(self) -> Optional[str]:
"""
Gets or sets the Public Key.
"""
return pulumi.get(self, "public_key")
@property
@pulumi.getter(name="resourceUid")
def resource_uid(self) -> Optional[str]:
"""
Gets or sets the Vm Id.
"""
return pulumi.get(self, "resource_uid")
@property
@pulumi.getter(name="systemData")
def METHOD_NAME(self) -> 'outputs.SystemDataResponse':
"""
Azure Resource Manager metadata containing createdBy and modifiedBy information.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetHybridIdentityMetadataResult(GetHybridIdentityMetadataResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetHybridIdentityMetadataResult(
id=self.id,
identity=self.identity,
name=self.name,
provisioning_state=self.provisioning_state,
public_key=self.public_key,
resource_uid=self.resource_uid,
METHOD_NAME=self.METHOD_NAME,
type=self.type)
def get_hybrid_identity_metadata(metadata_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
virtual_machine_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetHybridIdentityMetadataResult:
"""
Implements HybridIdentityMetadata GET method.
:param str metadata_name: Name of the HybridIdentityMetadata.
:param str resource_group_name: The name of the resource group.
:param str virtual_machine_name: Name of the vm.
"""
__args__ = dict()
__args__['metadataName'] = metadata_name
__args__['resourceGroupName'] = resource_group_name
__args__['virtualMachineName'] = virtual_machine_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:scvmm/v20230401preview:getHybridIdentityMetadata', __args__, opts=opts, typ=GetHybridIdentityMetadataResult).value
return AwaitableGetHybridIdentityMetadataResult(
id=pulumi.get(__ret__, 'id'),
identity=pulumi.get(__ret__, 'identity'),
name=pulumi.get(__ret__, 'name'),
provisioning_state=pulumi.get(__ret__, 'provisioning_state'),
public_key=pulumi.get(__ret__, 'public_key'),
resource_uid=pulumi.get(__ret__, 'resource_uid'),
METHOD_NAME=pulumi.get(__ret__, 'system_data'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_hybrid_identity_metadata)
def get_hybrid_identity_metadata_output(metadata_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
virtual_machine_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetHybridIdentityMetadataResult]:
"""
Implements HybridIdentityMetadata GET method.
:param str metadata_name: Name of the HybridIdentityMetadata.
:param str resource_group_name: The name of the resource group.
:param str virtual_machine_name: Name of the vm.
"""
... |
7,083 | set up | ############################ Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2016 Jannis Gebauer <ja.geb@me.com> #
# Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> #
# Copyright 2017 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2018 sfdye <tsfdye@gmail.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
from datetime import datetime, timezone
from . import Framework
class Hook(Framework.TestCase):
def METHOD_NAME(self):
super().METHOD_NAME()
self.hook = self.g.get_user().get_repo("PyGithub").get_hook(257993)
def testAttributes(self):
self.assertTrue(self.hook.active) # WTF
self.assertEqual(self.hook.config, {"url": "http://foobar.com"})
self.assertEqual(
self.hook.created_at,
datetime(2012, 5, 19, 6, 1, 45, tzinfo=timezone.utc),
)
self.assertEqual(self.hook.events, ["push"])
self.assertEqual(self.hook.id, 257993)
self.assertEqual(self.hook.last_response.status, "ok")
self.assertEqual(self.hook.last_response.message, "OK")
self.assertEqual(self.hook.last_response.code, 200)
self.assertEqual(self.hook.name, "web")
self.assertEqual(
self.hook.updated_at,
datetime(2012, 5, 29, 18, 49, 47, tzinfo=timezone.utc),
)
self.assertEqual(self.hook.url, "https://api.github.com/repos/jacquev6/PyGithub/hooks/257993")
self.assertEqual(
self.hook.test_url,
"https://api.github.com/repos/jacquev6/PyGithub/hooks/257993/tests",
)
self.assertEqual(
self.hook.ping_url,
"https://api.github.com/repos/jacquev6/PyGithub/hooks/257993/pings",
)
self.assertEqual(
repr(self.hook),
'Hook(url="https://api.github.com/repos/jacquev6/PyGithub/hooks/257993", id=257993)',
)
self.assertEqual(repr(self.hook.last_response), 'HookResponse(status="ok")')
def testEditWithMinimalParameters(self):
self.hook.edit("web", {"url": "http://foobar.com/hook"})
self.assertEqual(self.hook.config, {"url": "http://foobar.com/hook"})
self.assertEqual(
self.hook.updated_at,
datetime(2012, 5, 19, 5, 8, 16, tzinfo=timezone.utc),
)
def testDelete(self):
self.hook.delete()
def testTest(self):
self.hook.test() # This does not update attributes of hook
def testPing(self):
self.hook.ping() # This does not update attributes of hook
def testEditWithAllParameters(self):
self.hook.edit("web", {"url": "http://foobar.com"}, events=["fork", "push"])
self.assertEqual(self.hook.events, ["fork", "push"])
self.hook.edit("web", {"url": "http://foobar.com"}, add_events=["push"])
self.assertEqual(self.hook.events, ["fork", "push"])
self.hook.edit("web", {"url": "http://foobar.com"}, remove_events=["fork"])
self.assertEqual(self.hook.events, ["push"])
self.hook.edit("web", {"url": "http://foobar.com"}, active=True)
self.assertTrue(self.hook.active) |
7,084 | configure | from conan import ConanFile
from conan.errors import ConanInvalidConfiguration
from conan.tools.files import apply_conandata_patches, export_conandata_patches, get, copy, rmdir
from conan.tools.scm import Version
from conan.tools.cmake import CMake, CMakeToolchain, cmake_layout
import os
required_conan_version = ">=1.53.0"
class NngConan(ConanFile):
name = "nng"
description = "nanomsg-next-generation: light-weight brokerless messaging"
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/nanomsg/nng"
license = "MIT"
topics = ("nanomsg", "communication", "messaging", "protocols")
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
"nngcat": [True, False],
"http": [True, False],
"tls": [True, False],
"max_taskq_threads": ["ANY"]
}
default_options = {
"shared": False,
"fPIC": True,
"nngcat": False,
"http": True,
"tls": False,
"max_taskq_threads": "16"
}
def export_sources(self):
export_conandata_patches(self)
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def METHOD_NAME(self):
if self.options.shared:
self.options.rm_safe("fPIC")
self.settings.rm_safe("compiler.libcxx")
self.settings.rm_safe("compiler.cppstd")
def layout(self):
cmake_layout(self, src_folder="src")
def requirements(self):
if self.options.tls:
if Version(self.version) < "1.5.2":
self.requires("mbedtls/2.25.0")
else:
self.requires("mbedtls/3.0.0")
def validate(self):
compiler_minimum_version = {
"Visual Studio": "14",
"msvc": "190",
}
minimum_version = compiler_minimum_version.get(str(self.settings.compiler), False)
if minimum_version and Version(self.settings.compiler.version) < minimum_version:
raise ConanInvalidConfiguration(
f"{self.settings.compiler} < {minimum_version} is not supported",
)
if not self.options.max_taskq_threads.value.isdigit():
raise ConanInvalidConfiguration("max_taskq_threads must be an integral number")
def source(self):
get(self, **self.conan_data["sources"][self.version], strip_root=True)
def generate(self):
tc = CMakeToolchain(self)
tc.variables["NNG_TESTS"] = False
tc.variables["NNG_ENABLE_TLS"] = self.options.tls
tc.variables["NNG_ENABLE_NNGCAT"] = self.options.nngcat
tc.variables["NNG_ENABLE_HTTP"] = self.options.http
tc.variables["NNG_MAX_TASKQ_THREADS"] = self.options.max_taskq_threads
tc.generate()
def build(self):
apply_conandata_patches(self)
cmake = CMake(self)
cmake.METHOD_NAME()
cmake.build()
def package(self):
copy(self, pattern="LICENSE.txt", dst=os.path.join(self.package_folder, "licenses"), src=self.source_folder)
cmake = CMake(self)
cmake.install()
rmdir(self, os.path.join(self.package_folder, "lib", "cmake"))
def package_info(self):
self.cpp_info.set_property("cmake_file_name", "nng")
self.cpp_info.set_property("cmake_target_name", "nng::nng")
self.cpp_info.libs = ["nng"]
if self.settings.os == "Windows" and not self.options.shared:
self.cpp_info.system_libs.extend(["mswsock", "ws2_32"])
elif self.settings.os in ["Linux", "FreeBSD"]:
self.cpp_info.system_libs.extend(["pthread"])
if self.options.shared:
self.cpp_info.defines.append("NNG_SHARED_LIB")
else:
self.cpp_info.defines.append("NNG_STATIC_LIB")
# TODO: to remove in conan v2 once cmake_find_package_* generators removed
self.cpp_info.names["cmake_find_package"] = "nng"
self.cpp_info.names["cmake_find_package_multi"] = "nng" |
7,085 | tuned bmm | import torch
from ..lowering import register_lowering
from ..select_algorithm import (
autotune_select_algorithm,
ExternKernelChoice,
TritonTemplate,
)
from ..utils import ceildiv as cdiv, use_aten_gemm_kernels, use_triton_template
from .mm_common import addmm_epilogue, mm_args, mm_configs, mm_options
aten = torch.ops.aten
def bmm_grid(b, m, n, meta):
return (cdiv(m, meta["BLOCK_M"]) * cdiv(n, meta["BLOCK_N"]), b, 1)
bmm_template = TritonTemplate(
name="bmm",
grid=bmm_grid,
source=r"""
{{def_kernel("A", "B")}}
M = {{size("A", -2)}}
N = {{size("B", -1)}}
K = {{size("A", -1)}}
stride_aq = {{stride("A", 0)}}
stride_am = {{stride("A", 1)}}
stride_ak = {{stride("A", 2)}}
stride_bq = {{stride("B", 0)}}
stride_bk = {{stride("B", 1)}}
stride_bn = {{stride("B", 2)}}
# based on triton.ops.matmul
pid = tl.program_id(0)
grid_m = (M + BLOCK_M - 1) // BLOCK_M
grid_n = (N + BLOCK_N - 1) // BLOCK_N
# re-order program ID for better L2 performance
width = GROUP_M * grid_n
group_id = pid // width
group_size = min(grid_m - group_id * GROUP_M, GROUP_M)
pid_m = group_id * GROUP_M + (pid % group_size)
pid_n = (pid % width) // (group_size)
rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
ram = tl.max_contiguous(tl.multiple_of(rm % M, BLOCK_M), BLOCK_M)
rbn = tl.max_contiguous(tl.multiple_of(rn % N, BLOCK_N), BLOCK_N)
rk = tl.arange(0, BLOCK_K)
idx_q = tl.program_id(1) # batch dimension for BMM
A = A + (ram[:, None] * stride_am + rk[None, :] * stride_ak + idx_q*stride_aq)
B = B + (rk[:, None] * stride_bk + rbn[None, :] * stride_bn + idx_q*stride_bq)
acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=ACC_TYPE)
for k in range(K, 0, -BLOCK_K):
if EVEN_K:
a = tl.load(A)
b = tl.load(B)
else:
a = tl.load(A, mask=rk[None, :] < k, other=0.)
b = tl.load(B, mask=rk[:, None] < k, other=0.)
acc += tl.dot(a, b, allow_tf32=ALLOW_TF32)
A += BLOCK_K * stride_ak
B += BLOCK_K * stride_bk
# rematerialize rm and rn to save registers
rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
idx_q = tl.program_id(1) # batch dimension for BMM
idx_m = rm[:, None]
idx_n = rn[None, :]
mask = (idx_m < M) & (idx_n < N)
# inductor generates a suffix
{{store_output(("idx_q", "idx_m", "idx_n"), "acc", "mask")}}
""",
)
aten_bmm = ExternKernelChoice(torch.bmm, "at::bmm_out")
aten_baddbmm = ExternKernelChoice(torch.baddbmm, "at::baddbmm_out")
@register_lowering(aten.bmm)
def METHOD_NAME(mat1, mat2, *, layout=None):
m, n, k, layout, mat1, mat2 = mm_args(mat1, mat2, layout=layout)
# options to tune from
choices = [aten_bmm.bind((mat1, mat2), layout)] if use_aten_gemm_kernels() else []
if use_triton_template(layout):
for config in mm_configs(m, n, k):
bmm_template.maybe_append_choice(
choices,
(mat1, mat2),
layout,
**mm_options(config, k, layout),
)
return autotune_select_algorithm("bmm", choices, [mat1, mat2], layout)
# Don't register this since it is slower than decomposing it
# @register_lowering(aten.baddbmm)
def tuned_baddbmm(inp, mat1, mat2, *, alpha=1, beta=1, layout=None):
m, n, k, layout, mat1, mat2, inp = mm_args(mat1, mat2, inp, layout=layout)
# options to tune from
choices = (
[aten_baddbmm.bind((inp, mat1, mat2), layout, alpha=alpha, beta=beta)]
if use_aten_gemm_kernels()
else []
)
if use_triton_template(layout):
for config in mm_configs(m, n, k):
bmm_template.maybe_append_choice(
choices,
(inp, mat1, mat2),
layout,
**mm_options(config, k, layout),
prefix_args=1,
epilogue_fn=addmm_epilogue(layout.dtype, alpha, beta),
)
return autotune_select_algorithm("baddbmm", choices, [inp, mat1, mat2], layout) |
7,086 | remove resource | """ Wrapper around a Redis-backed registry for storing resources in a hash (https://redis.io/topics/data-types).
Redis stores key/values.
key hashes are generated from a dictionary (e.g. {"user_id":"a_user_id, "some_other_id":123} will
create a hash named "user_id=a_user_id:some_other_id=123:resources")
resources are tuples (resource_name, resource_value) that are stored with a key as Redis fields.
A same key can have a lot of fields provided they have a different name.
A key can be set as "alive". This creates a secondary key (e.g. "user_id=a_user_id:some_other_id=123:alive").
This key can have a timeout value. When the key times out then the key disappears from Redis automatically.
"""
import logging
from typing import TypedDict
import redis.asyncio as aioredis
from aiohttp import web
from models_library.basic_types import UUIDStr
from ..redis import get_redis_resources_client
from ._constants import APP_CLIENT_SOCKET_REGISTRY_KEY
_logger = logging.getLogger(__name__)
# redis `resources` db has composed-keys formatted as '${user_id=}:${client_session_id=}:{suffix}'
# Example:
# Key: user_id=1:client_session_id=7f40353b-db02-4474-a44d-23ce6a6e428c:alive = 1
# Key: user_id=1:client_session_id=7f40353b-db02-4474-a44d-23ce6a6e428c:resources = {project_id: ... , socket_id: ...}
#
_ALIVE_SUFFIX = "alive" # points to a string type
_RESOURCE_SUFFIX = "resources" # points to a hash (like a dict) type
class _UserRequired(TypedDict, total=True):
user_id: str | int
class UserSessionDict(_UserRequired):
"""Parts of the key used in redis for a user-session"""
client_session_id: str
class ResourcesDict(TypedDict, total=False):
"""Field-value pairs of {user_id}:{client_session_id}:resources key"""
project_id: UUIDStr
socket_id: str
class RedisResourceRegistry:
"""Keeps a record of connected sockets per user
redis structure is following
Redis Hash: key=user_id:client_session_id values={server_id socket_id project_id}
Example:
Key: user_id=1:client_session_id=7f40353b-db02-4474-a44d-23ce6a6e428c:alive = 1
Key: user_id=1:client_session_id=7f40353b-db02-4474-a44d-23ce6a6e428c:resources = {project_id: ... , socket_id: ...}
"""
def __init__(self, app: web.Application):
self._app = app
@property
def app(self) -> web.Application:
return self._app
@classmethod
def _hash_key(cls, key: UserSessionDict) -> str:
hash_key: str = ":".join(f"{k}={v}" for k, v in key.items())
return hash_key
@classmethod
def _decode_hash_key(cls, hash_key: str) -> UserSessionDict:
tmp_key = (
hash_key[: -len(f":{_RESOURCE_SUFFIX}")]
if hash_key.endswith(f":{_RESOURCE_SUFFIX}")
else hash_key[: -len(f":{_ALIVE_SUFFIX}")]
)
key = dict(x.split("=") for x in tmp_key.split(":"))
return UserSessionDict(**key) # type: ignore
@property
def client(self) -> aioredis.Redis:
client: aioredis.Redis = get_redis_resources_client(self.app)
return client
async def set_resource(
self, key: UserSessionDict, resource: tuple[str, str]
) -> None:
hash_key = f"{self._hash_key(key)}:{_RESOURCE_SUFFIX}"
field, value = resource
await self.client.hset(hash_key, mapping={field: value})
async def get_resources(self, key: UserSessionDict) -> ResourcesDict:
hash_key = f"{self._hash_key(key)}:{_RESOURCE_SUFFIX}"
fields = await self.client.hgetall(hash_key)
return ResourcesDict(**fields) # type: ignore
async def METHOD_NAME(self, key: UserSessionDict, resource_name: str) -> None:
hash_key = f"{self._hash_key(key)}:{_RESOURCE_SUFFIX}"
await self.client.hdel(hash_key, resource_name)
async def find_resources(
self, key: UserSessionDict, resource_name: str
) -> list[str]:
resources: list[str] = []
# the key might only be partialy complete
partial_hash_key = f"{self._hash_key(key)}:{_RESOURCE_SUFFIX}"
async for scanned_key in self.client.scan_iter(match=partial_hash_key):
if await self.client.hexists(scanned_key, resource_name):
resources.append(await self.client.hget(scanned_key, resource_name))
return resources
async def find_keys(self, resource: tuple[str, str]) -> list[UserSessionDict]:
keys: list[UserSessionDict] = []
if not resource:
return keys
field, value = resource
async for hash_key in self.client.scan_iter(match=f"*:{_RESOURCE_SUFFIX}"):
if value == await self.client.hget(hash_key, field):
keys.append(self._decode_hash_key(hash_key))
return keys
async def set_key_alive(self, key: UserSessionDict, timeout: int) -> None:
# setting the timeout to always expire, timeout > 0
timeout = int(max(1, timeout))
hash_key = f"{self._hash_key(key)}:{_ALIVE_SUFFIX}"
await self.client.set(hash_key, 1, ex=timeout)
async def is_key_alive(self, key: UserSessionDict) -> bool:
hash_key = f"{self._hash_key(key)}:{_ALIVE_SUFFIX}"
return await self.client.exists(hash_key) > 0
async def remove_key(self, key: UserSessionDict) -> None:
await self.client.delete(
f"{self._hash_key(key)}:{_RESOURCE_SUFFIX}",
f"{self._hash_key(key)}:{_ALIVE_SUFFIX}",
)
async def get_all_resource_keys(
self,
) -> tuple[list[UserSessionDict], list[UserSessionDict]]:
alive_keys = [
self._decode_hash_key(hash_key)
async for hash_key in self.client.scan_iter(match=f"*:{_ALIVE_SUFFIX}")
]
dead_keys = [
self._decode_hash_key(hash_key)
async for hash_key in self.client.scan_iter(match=f"*:{_RESOURCE_SUFFIX}")
if self._decode_hash_key(hash_key) not in alive_keys
]
return (alive_keys, dead_keys)
def get_registry(app: web.Application) -> RedisResourceRegistry:
client: RedisResourceRegistry = app[APP_CLIENT_SOCKET_REGISTRY_KEY]
assert isinstance(client, RedisResourceRegistry) # nosec
return client |
7,087 | run around tests | import pytest
import asyncio
import tempfile
import shutil
import weakref
from aiohttp import web
from unittest.mock import MagicMock, patch
from pathlib import Path
from gns3server.web.route import Route
from gns3server.controller import Controller
from gns3server.config import Config
from gns3server.compute import MODULES
from gns3server.compute.port_manager import PortManager
from gns3server.compute.project_manager import ProjectManager
# this import will register all handlers
from gns3server.handlers import *
from .handlers.api.base import Query
sys._called_from_test = True
sys.original_platform = sys.platform
if sys.platform.startswith("win"):
@pytest.yield_fixture(scope="session")
def loop(request):
"""Return an event loop and destroy it at the end of test"""
loop = asyncio.ProactorEventLoop()
asyncio.set_event_loop(loop) # Replace main loop to avoid conflict between tests
yield loop
asyncio.set_event_loop(None)
@pytest.fixture(scope='function')
async def http_client(aiohttp_client):
app = web.Application()
app['websockets'] = weakref.WeakSet()
for method, route, handler in Route.get_routes():
app.router.add_route(method, route, handler)
return await aiohttp_client(app)
@pytest.fixture
def controller_config_path(tmpdir):
return str(tmpdir / "config" / "gns3_controller.conf")
@pytest.fixture
def controller(tmpdir, controller_config_path):
Controller._instance = None
controller = Controller.instance()
os.makedirs(os.path.dirname(controller_config_path), exist_ok=True)
Path(controller_config_path).touch()
controller._config_file = controller_config_path
controller._config_loaded = True
return controller
@pytest.fixture
def compute(controller):
compute = MagicMock()
compute.id = "example.com"
controller._computes = {"example.com": compute}
return compute
@pytest.fixture
async def project(loop, tmpdir, controller):
return await controller.add_project(name="Test")
@pytest.fixture
def compute_project(tmpdir):
return ProjectManager.instance().create_project(project_id="a1e920ca-338a-4e9f-b363-aa607b09dd80")
@pytest.fixture
def compute_api(http_client):
"""
Return an helper allowing you to call the hypervisor API via HTTP
"""
return Query(http_client, prefix="/compute", api_version=2)
@pytest.fixture
def controller_api(http_client, controller):
"""
Return an helper allowing you to call the server API without any prefix
"""
return Query(http_client, api_version=2)
@pytest.fixture
def config():
config = Config.instance()
config.clear()
return config
@pytest.fixture
def images_dir(config):
"""
Get the location of images
"""
path = config.get_section_config("Server").get("images_path")
os.makedirs(path, exist_ok=True)
os.makedirs(os.path.join(path, "QEMU"), exist_ok=True)
os.makedirs(os.path.join(path, "IOU"), exist_ok=True)
return path
@pytest.fixture
def symbols_dir(config):
"""
Get the location of symbols
"""
path = config.get_section_config("Server").get("symbols_path")
os.makedirs(path, exist_ok=True)
print(path)
return path
@pytest.fixture
def projects_dir(config):
"""
Get the location of images
"""
path = config.get_section_config("Server").get("projects_path")
os.makedirs(path, exist_ok=True)
return path
@pytest.fixture(scope="function")
def port_manager():
"""An instance of port manager"""
PortManager._instance = None
p = PortManager.instance()
p.console_host = "127.0.0.1"
return p
@pytest.fixture(scope="function")
def free_console_port(port_manager, compute_project):
"""Get a free TCP port"""
# In case of already use ports we will raise an exception
port = port_manager.get_free_tcp_port(compute_project)
# We release the port immediately in order to allow
# the test do whatever the test want
port_manager.release_tcp_port(port, compute_project)
return port
@pytest.fixture
def darwin_platform():
"""
Change sys.plaform to Darwin
"""
old_platform = sys.platform
sys.platform = "darwin10.10"
yield
sys.plaform = old_platform
@pytest.fixture
def windows_platform():
"""
Change sys.platform to Windows
"""
old_platform = sys.platform
sys.platform = "win10"
yield
sys.plaform = old_platform
@pytest.fixture
def linux_platform():
"""
Change sys.platform to Linux
"""
old_platform = sys.platform
sys.platform = "linuxdebian"
yield
sys.plaform = old_platform
@pytest.fixture
def on_gns3vm(linux_platform):
"""
Mock the hostname to emulate the GNS3 VM
"""
with patch("gns3server.utils.interfaces.interfaces", return_value=[
{"name": "eth0", "special": False, "type": "ethernet"},
{"name": "eth1", "special": False, "type": "ethernet"},
{"name": "virbr0", "special": True, "type": "ethernet"}]):
with patch("socket.gethostname", return_value="gns3vm"):
yield
@pytest.fixture
def ethernet_device():
import psutil
return sorted(psutil.net_if_addrs().keys())[0]
@pytest.fixture
def ubridge_path(config):
"""
Get the location of a fake ubridge
"""
path = config.get_section_config("Server").get("ubridge_path")
os.makedirs(os.path.dirname(path), exist_ok=True)
open(path, 'w+').close()
return path
@pytest.fixture(autouse=True)
def METHOD_NAME(monkeypatch, config, port_manager):#port_manager, controller, config):
"""
This setup a temporary project file environment around tests
"""
tmppath = tempfile.mkdtemp()
for module in MODULES:
module._instance = None
os.makedirs(os.path.join(tmppath, 'projects'))
config.set("Server", "projects_path", os.path.join(tmppath, 'projects'))
config.set("Server", "symbols_path", os.path.join(tmppath, 'symbols'))
config.set("Server", "images_path", os.path.join(tmppath, 'images'))
config.set("Server", "appliances_path", os.path.join(tmppath, 'appliances'))
config.set("Server", "ubridge_path", os.path.join(tmppath, 'bin', 'ubridge'))
config.set("Server", "auth", False)
# Prevent executions of the VM if we forgot to mock something
config.set("VirtualBox", "vboxmanage_path", tmppath)
config.set("VPCS", "vpcs_path", tmppath)
config.set("VMware", "vmrun_path", tmppath)
config.set("Dynamips", "dynamips_path", tmppath)
# Force turn off KVM because it's not available on CI
config.set("Qemu", "enable_kvm", False)
monkeypatch.setattr("gns3server.utils.path.get_default_project_directory", lambda *args: os.path.join(tmppath, 'projects'))
# Force sys.platform to the original value. Because it seem not be restore correctly at each tests
sys.platform = sys.original_platform
yield
# An helper should not raise Exception
try:
shutil.rmtree(tmppath)
except BaseException:
pass |
7,088 | prop descriptions | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Marker(_BaseTraceHierarchyType):
# color
# -----
@property
def color(self):
"""
Sets the marker color of unselected points, applied only when a
selection exists.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# opacity
# -------
@property
def opacity(self):
"""
Sets the marker opacity of unselected points, applied only when
a selection exists.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
# size
# ----
@property
def size(self):
"""
Sets the marker size of unselected points, applied only when a
selection exists.
The 'size' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "splom.unselected"
# Self properties description
# ---------------------------
@property
def METHOD_NAME(self):
return """\
color
Sets the marker color of unselected points, applied
only when a selection exists.
opacity
Sets the marker opacity of unselected points, applied
only when a selection exists.
size
Sets the marker size of unselected points, applied only
when a selection exists.
"""
def __init__(self, arg=None, color=None, opacity=None, size=None, **kwargs):
"""
Construct a new Marker object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.splom.unselected.Marker`
color
Sets the marker color of unselected points, applied
only when a selection exists.
opacity
Sets the marker opacity of unselected points, applied
only when a selection exists.
size
Sets the marker size of unselected points, applied only
when a selection exists.
Returns
-------
Marker
"""
super(Marker, self).__init__("marker")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.splom.unselected.Marker
constructor must be a dict or
an instance of :class:`plotly.graph_objs.splom.unselected.Marker`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.splom.unselected import marker as v_marker
# Initialize validators
# ---------------------
self._validators["color"] = v_marker.ColorValidator()
self._validators["opacity"] = v_marker.OpacityValidator()
self._validators["size"] = v_marker.SizeValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
self["color"] = color if color is not None else _v
_v = arg.pop("opacity", None)
self["opacity"] = opacity if opacity is not None else _v
_v = arg.pop("size", None)
self["size"] = size if size is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
__all__ = ["Marker"] |
7,089 | configured session | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under thimport mock
import contextlib
import copy
import json
import logging
import os
import socket
import threading
import pytest
import botocore.config
import botocore.exceptions
import botocore.session
from botocore import xform_name
from tests import ClientHTTPStubber, mock, temporary_file
logger = logging.getLogger(__name__)
CASES_FILE = os.path.join(os.path.dirname(__file__), 'cases.json')
DATA_DIR = os.path.join(os.path.dirname(__file__), 'data/')
class RetryableException(botocore.exceptions.EndpointConnectionError):
fmt = '{message}'
class NonRetryableException(Exception):
pass
EXPECTED_EXCEPTIONS_THROWN = (
botocore.exceptions.ClientError,
NonRetryableException,
RetryableException,
)
def _load_test_cases():
with open(CASES_FILE) as f:
loaded_tests = json.loads(f.read())
test_cases = _get_cases_with_defaults(loaded_tests)
_replace_expected_anys(test_cases)
return test_cases
def _get_cases_with_defaults(loaded_tests):
cases = []
defaults = loaded_tests['defaults']
for case in loaded_tests['cases']:
base = copy.deepcopy(defaults)
base.update(case)
cases.append(base)
return cases
def _replace_expected_anys(test_cases):
for case in test_cases:
for expected_event in case['expectedMonitoringEvents']:
for entry, value in expected_event.items():
if value in ['ANY_STR', 'ANY_INT']:
expected_event[entry] = mock.ANY
@pytest.mark.parametrize("test_case", _load_test_cases())
def test_client_monitoring(test_case):
_run_test_case(test_case)
@contextlib.contextmanager
def METHOD_NAME(case_configuration, listener_port):
environ = {
'AWS_ACCESS_KEY_ID': case_configuration['accessKey'],
'AWS_SECRET_ACCESS_KEY': 'secret-key',
'AWS_DEFAULT_REGION': case_configuration['region'],
'AWS_DATA_PATH': DATA_DIR,
'AWS_CSM_PORT': listener_port,
}
if 'sessionToken' in case_configuration:
environ['AWS_SESSION_TOKEN'] = case_configuration['sessionToken']
environ.update(case_configuration['environmentVariables'])
with temporary_file('w') as f:
_setup_shared_config(
f, case_configuration['sharedConfigFile'], environ
)
with mock.patch('os.environ', environ):
session = botocore.session.Session()
if 'maxRetries' in case_configuration:
_setup_max_retry_attempts(session, case_configuration)
yield session
def _setup_shared_config(fileobj, shared_config_options, environ):
fileobj.write('[default]\n')
for key, value in shared_config_options.items():
fileobj.write(f'{key} = {value}\n')
fileobj.flush()
environ['AWS_CONFIG_FILE'] = fileobj.name
def _setup_max_retry_attempts(session, case_configuration):
config = botocore.config.Config(
retries={'max_attempts': case_configuration['maxRetries']}
)
session.set_default_client_config(config)
def _run_test_case(case):
with MonitoringListener() as listener:
with METHOD_NAME(
case['configuration'], listener.port
) as session:
for api_call in case['apiCalls']:
_make_api_call(session, api_call)
assert listener.received_events == case['expectedMonitoringEvents']
def _make_api_call(session, api_call):
client = session.create_client(
api_call['serviceId'].lower().replace(' ', '')
)
operation_name = api_call['operationName']
client_method = getattr(client, xform_name(operation_name))
with _stubbed_http_layer(client, api_call['attemptResponses']):
try:
client_method(**api_call['params'])
except EXPECTED_EXCEPTIONS_THROWN:
pass
@contextlib.contextmanager
def _stubbed_http_layer(client, attempt_responses):
with ClientHTTPStubber(client) as stubber:
_add_stubbed_responses(stubber, attempt_responses)
yield
def _add_stubbed_responses(stubber, attempt_responses):
for attempt_response in attempt_responses:
if 'sdkException' in attempt_response:
sdk_exception = attempt_response['sdkException']
_add_sdk_exception(
stubber, sdk_exception['message'], sdk_exception['isRetryable']
)
else:
_add_stubbed_response(stubber, attempt_response)
def _add_sdk_exception(stubber, message, is_retryable):
if is_retryable:
stubber.responses.append(RetryableException(message=message))
else:
stubber.responses.append(NonRetryableException(message))
def _add_stubbed_response(stubber, attempt_response):
headers = attempt_response['responseHeaders']
status_code = attempt_response['httpStatus']
if 'errorCode' in attempt_response:
error = {
'__type': attempt_response['errorCode'],
'message': attempt_response['errorMessage'],
}
content = json.dumps(error).encode('utf-8')
else:
content = b'{}'
stubber.add_response(status=status_code, headers=headers, body=content)
class MonitoringListener(threading.Thread):
_PACKET_SIZE = 1024 * 8
def __init__(self, port=0):
threading.Thread.__init__(self)
self._socket = None
self.port = port
self.received_events = []
def __enter__(self):
self._socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._socket.bind(('127.0.0.1', self.port))
# The socket may have been assigned to an unused port so we
# reset the port member after binding.
self.port = self._socket.getsockname()[1]
self.start()
return self
def __exit__(self, *args):
self._socket.sendto(b'', ('127.0.0.1', self.port))
self.join()
self._socket.close()
def run(self):
logger.debug('Started listener')
while True:
data = self._socket.recv(self._PACKET_SIZE)
logger.debug('Received: %s', data.decode('utf-8'))
if not data:
return
self.received_events.append(json.loads(data.decode('utf-8'))) |
7,090 | example files | import os
import shutil
import sys
import unittest.mock
from pathlib import Path
from queue import Queue
from typing import Any, Callable, Generator, Optional, Union
os.environ["WANDB_ERROR_REPORTING"] = "false"
import git # noqa: E402
import pytest # noqa: E402
import wandb # noqa: E402
import wandb.old.settings # noqa: E402
import wandb.sdk.lib.apikey # noqa: E402
import wandb.util # noqa: E402
from click.testing import CliRunner # noqa: E402
from wandb import Api # noqa: E402
from wandb.sdk.interface.interface_queue import InterfaceQueue # noqa: E402
from wandb.sdk.lib import filesystem, runid # noqa: E402
from wandb.sdk.lib.gitlib import GitRepo # noqa: E402
from wandb.sdk.lib.paths import StrPath # noqa: E402
# --------------------------------
# Misc Fixtures utilities
# --------------------------------
@pytest.fixture(scope="session")
def assets_path() -> Generator[Callable, None, None]:
def assets_path_fn(path: Path) -> Path:
return Path(__file__).resolve().parent / "assets" / path
yield assets_path_fn
@pytest.fixture
def copy_asset(assets_path) -> Generator[Callable, None, None]:
def copy_asset_fn(path: StrPath, dst: Optional[StrPath] = None) -> Path:
src = assets_path(path)
if src.is_file():
return shutil.copy(src, dst or path)
return shutil.copytree(src, dst or path)
yield copy_asset_fn
# --------------------------------
# Misc Fixtures
# --------------------------------
@pytest.fixture(scope="function", autouse=True)
def filesystem_isolate(tmp_path):
# Click>=8 implements temp_dir argument which depends on python>=3.7
kwargs = dict(temp_dir=tmp_path) if sys.version_info >= (3, 7) else {}
with CliRunner().isolated_filesystem(**kwargs):
yield
# todo: this fixture should probably be autouse=True
@pytest.fixture(scope="function", autouse=False)
def local_settings(filesystem_isolate):
"""Place global settings in an isolated dir."""
config_path = os.path.join(os.getcwd(), ".config", "wandb", "settings")
filesystem.mkdir_exists_ok(os.path.join(".config", "wandb"))
# todo: this breaks things in unexpected places
# todo: get rid of wandb.old
with unittest.mock.patch.object(
wandb.old.settings.Settings,
"_global_path",
return_value=config_path,
):
yield
@pytest.fixture(scope="function", autouse=True)
def local_netrc(filesystem_isolate):
"""Never use our real credentials, put them in their own isolated dir."""
original_expanduser = os.path.expanduser # TODO: this seems overkill...
open(".netrc", "wb").close() # Touch that netrc file
def expand(path):
if "netrc" in path:
try:
full_path = os.path.realpath("netrc")
except OSError:
full_path = original_expanduser(path)
else:
full_path = original_expanduser(path)
return full_path
# monkeypatch.setattr(os.path, "expanduser", expand)
with unittest.mock.patch.object(os.path, "expanduser", expand):
yield
@pytest.fixture
def dummy_api_key():
return "1824812581259009ca9981580f8f8a9012409eee"
@pytest.fixture
def patch_apikey(dummy_api_key):
with unittest.mock.patch.object(
wandb.sdk.lib.apikey, "isatty", return_value=True
), unittest.mock.patch.object(
wandb.sdk.lib.apikey, "input", return_value=1
), unittest.mock.patch.object(
wandb.sdk.lib.apikey, "getpass", return_value=dummy_api_key
):
yield
@pytest.fixture
def patch_prompt(monkeypatch):
monkeypatch.setattr(
wandb.util, "prompt_choices", lambda x, input_timeout=None, jupyter=False: x[0]
)
monkeypatch.setattr(
wandb.wandb_lib.apikey,
"prompt_choices",
lambda x, input_timeout=None, jupyter=False: x[0],
)
@pytest.fixture
def runner(patch_apikey, patch_prompt):
return CliRunner()
@pytest.fixture
def git_repo(runner):
with runner.isolated_filesystem(), git.Repo.init(".") as repo:
filesystem.mkdir_exists_ok("wandb")
# Because the forked process doesn't use my monkey patch above
with open(os.path.join("wandb", "settings"), "w") as f:
f.write("[default]\nproject: test")
open("README", "wb").close()
repo.index.add(["README"])
repo.index.commit("Initial commit")
yield GitRepo(lazy=False)
@pytest.fixture(scope="function", autouse=True)
def unset_global_objects():
from wandb.sdk.lib.module import unset_globals
yield
unset_globals()
@pytest.fixture(scope="session", autouse=True)
def env_teardown():
wandb.teardown()
yield
wandb.teardown()
if not os.environ.get("CI") == "true":
# TODO: uncomment this for prod? better make controllable with an env var
# subprocess.run(["wandb", "server", "stop"])
pass
@pytest.fixture(scope="function", autouse=True)
def clean_up():
yield
wandb.teardown()
@pytest.fixture
def api():
return Api()
# --------------------------------
# Fixtures for user test point
# --------------------------------
@pytest.fixture()
def record_q() -> "Queue":
return Queue()
@pytest.fixture()
def mocked_interface(record_q: "Queue") -> InterfaceQueue:
return InterfaceQueue(record_q=record_q)
@pytest.fixture
def mocked_backend(mocked_interface: InterfaceQueue) -> Generator[object, None, None]:
class MockedBackend:
def __init__(self) -> None:
self.interface = mocked_interface
yield MockedBackend()
def dict_factory():
def helper():
return dict()
return helper
@pytest.fixture(scope="function")
def test_settings():
def update_test_settings(
extra_settings: Union[
dict, wandb.sdk.wandb_settings.Settings
] = dict_factory() # noqa: B008
):
settings = wandb.Settings(
console="off",
save_code=False,
)
if isinstance(extra_settings, dict):
settings.update(extra_settings, source=wandb.sdk.wandb_settings.Source.BASE)
elif isinstance(extra_settings, wandb.sdk.wandb_settings.Settings):
settings.update(extra_settings)
settings._set_run_start_time()
return settings
yield update_test_settings
@pytest.fixture(scope="function")
def mock_run(test_settings, mocked_backend) -> Generator[Callable, None, None]:
from wandb.sdk.lib.module import unset_globals
def mock_run_fn(use_magic_mock=False, **kwargs: Any) -> "wandb.sdk.wandb_run.Run":
kwargs_settings = kwargs.pop("settings", dict())
kwargs_settings = {
**{
"run_id": runid.generate_id(),
},
**kwargs_settings,
}
run = wandb.wandb_sdk.wandb_run.Run(
settings=test_settings(kwargs_settings), **kwargs
)
run._set_backend(
unittest.mock.MagicMock() if use_magic_mock else mocked_backend
)
run._set_globals()
return run
yield mock_run_fn
unset_globals()
@pytest.fixture
def example_file(tmp_path: Path) -> Path:
new_file = tmp_path / "test.txt"
new_file.write_text("hello")
return new_file
@pytest.fixture
def METHOD_NAME(tmp_path: Path) -> Path:
artifact_dir = tmp_path / "artifacts"
artifact_dir.mkdir(parents=True, exist_ok=True)
for i in range(3):
(artifact_dir / f"artifact_{i}.txt").write_text(f"file-{i}")
return artifact_dir |
7,091 | list available models | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import Any, Dict, List
import librosa
import torch
from hydra.utils import instantiate
from omegaconf import MISSING, DictConfig, OmegaConf
from nemo.collections.tts.models.base import MelToSpec, Vocoder
from nemo.collections.tts.parts.utils.helpers import OperationMode, griffin_lim
from nemo.core.classes.common import PretrainedModelInfo
from nemo.core.neural_types.elements import AudioSignal, MelSpectrogramType
from nemo.core.neural_types.neural_type import NeuralType
class MelPsuedoInverseModel(MelToSpec):
def __init__(self, cfg: DictConfig):
if isinstance(cfg, dict):
cfg = OmegaConf.create(cfg)
super().__init__(cfg=cfg)
sampling_rate = self._cfg['sampling_rate']
n_fft = self._cfg['n_fft']
mel_fmin = self._cfg['mel_fmin']
mel_fmax = self._cfg['mel_fmax']
mel_freq = self._cfg['mel_freq']
melinv = librosa.filters.mel(sr=sampling_rate, n_fft=n_fft, fmin=mel_fmin, fmax=mel_fmax, n_mels=mel_freq)
self.mel_pseudo_inverse = torch.tensor(melinv, dtype=torch.float)
def convert_mel_spectrogram_to_linear(self, mel):
lin_spec = torch.tensordot(mel, self.mel_pseudo_inverse, dims=[[1], [0]])
lin_spec = lin_spec.permute(0, 2, 1)
return lin_spec
def setup_training_data(self, cfg):
pass
def setup_validation_data(self, cfg):
pass
def cuda(self, *args, **kwargs):
self.mel_pseudo_inverse = self.mel_pseudo_inverse.cuda(*args, **kwargs)
return self
class GriffinLimModel(Vocoder):
def __init__(self, cfg: DictConfig):
if isinstance(cfg, dict):
cfg = OmegaConf.create(cfg)
super().__init__(cfg=cfg)
self.n_iters = self._cfg['n_iters']
self.n_fft = self._cfg['n_fft']
self.l_hop = self._cfg['l_hop']
def convert_spectrogram_to_audio(self, spec, Ts=None):
batch_size = spec.shape[0]
T_max = spec.shape[2]
if Ts is None:
Ts = [T_max] * batch_size
max_size = (max(Ts) - 1) * self.l_hop
audios = torch.zeros(batch_size, max_size)
# Lazy GL implementation. Could be improved by moving to pytorch.
for i in range(batch_size):
audio = griffin_lim(spec[i, :, 0 : Ts[i]].cpu().numpy(), n_iters=self.n_iters, n_fft=self.n_fft)
my_len = audio.shape[0]
audios[i, 0:my_len] = torch.from_numpy(audio)
return audios
def setup_training_data(self, cfg):
pass
def setup_validation_data(self, cfg):
pass
def cuda(self, *args, **kwargs):
return self
@dataclass
class TwoStagesConfig:
mel2spec: Dict[Any, Any] = MISSING
linvocoder: Dict[Any, Any] = MISSING
class TwoStagesModel(Vocoder):
"""Two Stages model used to convert mel spectrograms, to linear spectrograms, and then to audio"""
def __init__(self, cfg: DictConfig):
if isinstance(cfg, dict):
cfg = OmegaConf.create(cfg)
super().__init__(cfg=cfg)
schema = OmegaConf.structured(TwoStagesConfig)
# ModelPT ensures that cfg is a DictConfig, but do this second check in case ModelPT changes
if isinstance(cfg, dict):
cfg = OmegaConf.create(cfg)
elif not isinstance(cfg, DictConfig):
raise ValueError(f"cfg was type: {type(cfg)}. Expected either a dict or a DictConfig")
# Ensure passed cfg is compliant with schema
OmegaConf.merge(cfg, schema)
if '_target_' in self._cfg.mel2spec:
self.mel2spec = instantiate(self._cfg.mel2spec)
else:
self.mel2spec = None
if '_target_' in self._cfg.linvocoder:
self.linvocoder = instantiate(self._cfg.linvocoder)
else:
self.linvocoder = None
def set_mel_to_spec_model(self, mel2spec: MelToSpec):
self.mel2spec = mel2spec
def set_linear_vocoder(self, linvocoder: Vocoder):
self.linvocoder = linvocoder
def cuda(self, *args, **kwargs):
self.mel2spec.cuda(*args, **kwargs)
self.linvocoder.cuda(*args, **kwargs)
return super().cuda(*args, **kwargs)
@property
def input_types(self):
return {
"mel": NeuralType(('B', 'D', 'T'), MelSpectrogramType()),
}
@property
def output_types(self):
return {
"wave": NeuralType(('B', 'T'), AudioSignal()),
}
def forward(self, *, mel):
pass
def convert_spectrogram_to_audio(self, spec: torch.Tensor, **kwargs) -> torch.Tensor:
self.eval()
try:
self.mel2spec.mode = OperationMode.infer
except AttributeError:
pass
try:
self.linvocoder.mode = OperationMode.infer
except AttributeError:
pass
with torch.no_grad():
exp_spec = torch.exp(spec)
linear_spec = self.mel2spec.convert_mel_spectrogram_to_linear(exp_spec)
audio = self.linvocoder.convert_spectrogram_to_audio(linear_spec, **kwargs)
return audio
def training_step(self, batch, batch_idx):
pass
def validation_step(self, batch, batch_idx):
pass
def on_validation_epoch_end(self, outputs):
pass
def __setup_dataloader_from_config(self, cfg, shuffle_should_be: bool = True, name: str = "train"):
pass
def setup_training_data(self, cfg):
pass
def setup_validation_data(self, cfg):
pass
@classmethod
def METHOD_NAME(cls) -> 'List[PretrainedModelInfo]':
"""
This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.
Returns:
List of available pre-trained models.
"""
list_of_models = []
return list_of_models |
7,092 | show | """ Plots points on an all-sky sinusoidal projection plot. """
import numpy as np
import matplotlib.pyplot as plt
class AllSkyPlot(object):
def __init__(self, ax_handle=None):
self.ra0 = 180.0
if ax_handle is None:
self.fig = plt.figure()
self.ax = self.fig.add_subplot(1, 1, 1, facecolor='black')
else:
self.ax = ax_handle
self.fig = plt.gcf()
# Set background color
self.fig.patch.set_facecolor('black')
# Set equal aspect ratio
self.ax.set_aspect('equal')
# # Set tick color
# self.ax.tick_params(axis='x', colors='0.5')
# self.ax.tick_params(axis='y', colors='0.5')
# Turn off ticks
self.ax.tick_params(labeltop=False, labelright=False, labelbottom=False, labelleft=False)
self.plotGrid()
def raDec2XY(self, ra, dec):
# Compute projected coordinates
x = ((180 - ra)%360 - self.ra0)*np.cos(np.radians(dec))
y = dec
return x, y
def plot(self, ra_array, dec_array, max_break_deg=30, **kwargs):
# If there are more than one point, check for 0/360 wraparounds in RA
if isinstance(ra_array, list) or isinstance(ra_array, np.ndarray):
ra_array = np.array(ra_array)
ra_array = (180 - ra_array)%360
dec_array = np.array(dec_array)
coord_list = []
# Find large breaks in RA and plot them separately
ra_diff = np.abs(ra_array[:-1] - ra_array[1:])
break_indices = np.where(ra_diff > max_break_deg)[0]
if not len(break_indices):
coord_list = [[ra_array, dec_array]]
else:
prev_break_idx = 0
for break_idx in break_indices:
ra_temp = ra_array[prev_break_idx:break_idx + 1]
dec_temp = dec_array[prev_break_idx:break_idx + 1]
prev_break_idx = break_idx + 1
coord_list.append([ra_temp, dec_temp])
coord_list.append([ra_array[break_idx + 1:], dec_array[break_idx + 1:]])
else:
coord_list = [[180 - ra_array, dec_array]]
# Plot all segments
for i, (ra_temp, dec_temp) in enumerate(coord_list):
x, y = self.raDec2XY(180 - ra_temp, dec_temp)
# Make sure that all plotted lines have the same color
if i > 0:
color = plt_handle[0].get_color()
# Add color to kwargs
if 'color' not in kwargs:
kwargs['color'] = color
plt_handle = self.ax.plot(x, y, **kwargs)
def scatter(self, ra_array, dec_array, **kwargs):
x, y = self.raDec2XY(ra_array, dec_array)
self.ax.scatter(x, y, **kwargs)
def plotGrid(self, step=15):
# Plot a meridian and parallel grid
ra_grid = np.sort(np.append(np.arange(0, 360 + step, step), [180.0001]))
dec_grid = np.arange(-90, 90 + step, step)
# Plot meridians
for ra in ra_grid[:-1]:
# Increase number of points for meridian plot so they are smoother
step_finer = step/5
dec_arr = np.arange(-90, 90 + step_finer, step_finer)
ra_temp = np.zeros_like(dec_arr) + ra
x_grid, y_grid = self.raDec2XY(ra_temp, dec_arr)
self.ax.plot(x_grid, y_grid, linestyle='dotted', alpha=0.5, color='silver')
# Plot parallels
for dec in dec_grid:
dec_temp = np.zeros_like(ra_grid) + dec
self.plot(ra_grid, dec_temp, linestyle='dotted', alpha=0.5, color='silver')
# Plot dec ticks
for dec in dec_grid[::2]:
x, y = self.raDec2XY(0, dec)
if dec > 0:
va = 'bottom'
else:
va = 'top'
self.ax.text(x, y, "{:+d}$^\circ$".format(dec), color='0.5', ha='center', va=va, size=7)
# Plot every other RA tick and skip 0 and 360
ra_ticks = np.sort(np.append(np.arange(0, 360, 2*step), [180.0001]))
for ra in ra_ticks:
# Offset RA so 0 starts in the middle and increases to the left
#ra_text = (180 - ra)%360
x, y = self.raDec2XY(ra, 0)
self.ax.text(x, y, "{:+d}$^\circ$".format(int(ra)), color='0.5', ha='center', va='top', size=7)
def beautify(self):
self.ax.set_xlim([-180, 180])
self.ax.set_ylim([-90, 90])
self.fig.tight_layout()
def METHOD_NAME(self):
self.beautify()
plt.METHOD_NAME()
if __name__ == "__main__":
allsky_plot = AllSkyPlot()
ra_array = np.arange(0, 2000, 1)
dec_array = np.linspace(-90, 90, len(ra_array))
allsky_plot.plot(ra_array, dec_array, color='green', linestyle='dashed')
allsky_plot.METHOD_NAME( |
7,093 | g | import sys
import unittest
import io
import atexit
from test import support
### helpers
def h1():
print("h1")
def h2():
print("h2")
def h3():
print("h3")
def h4(*args, **kwargs):
print("h4", args, kwargs)
def raise1():
raise TypeError
def raise2():
raise SystemError
class GeneralTest(unittest.TestCase):
def setUp(self):
self.save_stdout = sys.stdout
self.save_stderr = sys.stderr
self.stream = io.StringIO()
sys.stdout = sys.stderr = self.stream
atexit._clear()
def tearDown(self):
sys.stdout = self.save_stdout
sys.stderr = self.save_stderr
atexit._clear()
def test_args(self):
# be sure args are handled properly
atexit.register(h1)
atexit.register(h4)
atexit.register(h4, 4, kw="abc")
atexit._run_exitfuncs()
self.assertEqual(self.stream.getvalue(),
"h4 (4,) {'kw': 'abc'}\nh4 () {}\nh1\n")
def test_badargs(self):
atexit.register(lambda: 1, 0, 0, (x for x in (1,2)), 0, 0)
self.assertRaises(TypeError, atexit._run_exitfuncs)
def test_order(self):
# be sure handlers are executed in reverse order
atexit.register(h1)
atexit.register(h2)
atexit.register(h3)
atexit._run_exitfuncs()
self.assertEqual(self.stream.getvalue(), "h3\nh2\nh1\n")
def test_raise(self):
# be sure raises are handled properly
atexit.register(raise1)
atexit.register(raise2)
self.assertRaises(TypeError, atexit._run_exitfuncs)
def test_raise_unnormalized(self):
# Issue #10756: Make sure that an unnormalized exception is
# handled properly
atexit.register(lambda: 1 / 0)
self.assertRaises(ZeroDivisionError, atexit._run_exitfuncs)
self.assertIn("ZeroDivisionError", self.stream.getvalue())
def test_print_tracebacks(self):
# Issue #18776: the tracebacks should be printed when errors occur.
def f():
1/0 # one
def METHOD_NAME():
1/0 # two
def h():
1/0 # three
atexit.register(f)
atexit.register(METHOD_NAME)
atexit.register(h)
self.assertRaises(ZeroDivisionError, atexit._run_exitfuncs)
stderr = self.stream.getvalue()
self.assertEqual(stderr.count("ZeroDivisionError"), 3)
self.assertIn("# one", stderr)
self.assertIn("# two", stderr)
self.assertIn("# three", stderr)
def test_stress(self):
a = [0]
def inc():
a[0] += 1
for i in range(128):
atexit.register(inc)
atexit._run_exitfuncs()
self.assertEqual(a[0], 128)
def test_clear(self):
a = [0]
def inc():
a[0] += 1
atexit.register(inc)
atexit._clear()
atexit._run_exitfuncs()
self.assertEqual(a[0], 0)
def test_unregister(self):
a = [0]
def inc():
a[0] += 1
def dec():
a[0] -= 1
for i in range(4):
atexit.register(inc)
atexit.register(dec)
atexit.unregister(inc)
atexit._run_exitfuncs()
self.assertEqual(a[0], -1)
def test_bound_methods(self):
l = []
atexit.register(l.append, 5)
atexit._run_exitfuncs()
self.assertEqual(l, [5])
atexit.unregister(l.append)
atexit._run_exitfuncs()
self.assertEqual(l, [5])
class SubinterpreterTest(unittest.TestCase):
def test_callbacks_leak(self):
# This test shows a leak in refleak mode if atexit doesn't
# take care to free callbacks in its per-subinterpreter module
# state.
n = atexit._ncallbacks()
code = r"""if 1:
import atexit
def f():
pass
atexit.register(f)
del atexit
"""
ret = support.run_in_subinterp(code)
self.assertEqual(ret, 0)
self.assertEqual(atexit._ncallbacks(), n)
def test_callbacks_leak_refcycle(self):
# Similar to the above, but with a refcycle through the atexit
# module.
n = atexit._ncallbacks()
code = r"""if 1:
import atexit
def f():
pass
atexit.register(f)
atexit.__atexit = atexit
"""
ret = support.run_in_subinterp(code)
self.assertEqual(ret, 0)
self.assertEqual(atexit._ncallbacks(), n)
def test_main():
support.run_unittest(__name__)
if __name__ == "__main__":
test_main() |
7,094 | test report slave id request | """Test other messages."""
from unittest import mock
import pymodbus.other_message as pymodbus_message
class TestOtherMessage:
"""Unittest for the pymodbus.other_message module."""
requests = [
pymodbus_message.ReadExceptionStatusRequest,
pymodbus_message.GetCommEventCounterRequest,
pymodbus_message.GetCommEventLogRequest,
pymodbus_message.ReportSlaveIdRequest,
]
responses = [
lambda: pymodbus_message.ReadExceptionStatusResponse(0x12),
lambda: pymodbus_message.GetCommEventCounterResponse(0x12),
pymodbus_message.GetCommEventLogResponse,
lambda: pymodbus_message.ReportSlaveIdResponse(0x12),
]
def test_other_messages_to_string(self):
"""Test other messages to string."""
for message in self.requests:
assert str(message())
for message in self.responses:
assert str(message())
def test_read_exception_status(self):
"""Test read exception status."""
request = pymodbus_message.ReadExceptionStatusRequest()
request.decode(b"\x12")
assert not request.encode()
assert request.execute().function_code == 0x07
response = pymodbus_message.ReadExceptionStatusResponse(0x12)
assert response.encode() == b"\x12"
response.decode(b"\x12")
assert response.status == 0x12
def test_get_comm_event_counter(self):
"""Test get comm event counter."""
request = pymodbus_message.GetCommEventCounterRequest()
request.decode(b"\x12")
assert not request.encode()
assert request.execute().function_code == 0x0B
response = pymodbus_message.GetCommEventCounterResponse(0x12)
assert response.encode() == b"\x00\x00\x00\x12"
response.decode(b"\x00\x00\x00\x12")
assert response.status
assert response.count == 0x12
response.status = False
assert response.encode() == b"\xFF\xFF\x00\x12"
def test_get_comm_event_log(self):
"""Test get comm event log."""
request = pymodbus_message.GetCommEventLogRequest()
request.decode(b"\x12")
assert not request.encode()
assert request.execute().function_code == 0x0C
response = pymodbus_message.GetCommEventLogResponse()
assert response.encode() == b"\x06\x00\x00\x00\x00\x00\x00"
response.decode(b"\x06\x00\x00\x00\x12\x00\x12")
assert response.status
assert response.message_count == 0x12
assert response.event_count == 0x12
assert not response.events
response.status = False
assert response.encode() == b"\x06\xff\xff\x00\x12\x00\x12"
def test_get_comm_event_log_with_events(self):
"""Test get comm event log with events."""
response = pymodbus_message.GetCommEventLogResponse(events=[0x12, 0x34, 0x56])
assert response.encode() == b"\x09\x00\x00\x00\x00\x00\x00\x12\x34\x56"
response.decode(b"\x09\x00\x00\x00\x12\x00\x12\x12\x34\x56")
assert response.status
assert response.message_count == 0x12
assert response.event_count == 0x12
assert response.events == [0x12, 0x34, 0x56]
def METHOD_NAME(self):
"""Test report slave id request."""
with mock.patch("pymodbus.other_message.DeviceInformationFactory") as dif:
# First test regular identity strings
identity = {
0x00: "VN", # VendorName
0x01: "PC", # ProductCode
0x02: "REV", # MajorMinorRevision
0x03: "VU", # VendorUrl
0x04: "PN", # ProductName
0x05: "MN", # ModelName
0x06: "UAN", # UserApplicationName
0x07: "RA", # reserved
0x08: "RB", # reserved
}
dif.get.return_value = identity
expected_identity = "-".join(identity.values()).encode()
request = pymodbus_message.ReportSlaveIdRequest()
response = request.execute()
assert response.identifier == expected_identity
# Change to byte strings and test again (final result should be the same)
identity = {
0x00: b"VN", # VendorName
0x01: b"PC", # ProductCode
0x02: b"REV", # MajorMinorRevision
0x03: b"VU", # VendorUrl
0x04: b"PN", # ProductName
0x05: b"MN", # ModelName
0x06: b"UAN", # UserApplicationName
0x07: b"RA", # reserved
0x08: b"RB", # reserved
}
dif.get.return_value = identity
request = pymodbus_message.ReportSlaveIdRequest()
response = request.execute()
assert response.identifier == expected_identity
def test_report_slave_id(self):
"""Test report slave id."""
with mock.patch("pymodbus.other_message.DeviceInformationFactory") as dif:
dif.get.return_value = {}
request = pymodbus_message.ReportSlaveIdRequest()
request.decode(b"\x12")
assert not request.encode()
assert request.execute().function_code == 0x11
response = pymodbus_message.ReportSlaveIdResponse(
request.execute().identifier, True
)
assert response.encode() == b"\tPymodbus\xff"
response.decode(b"\x03\x12\x00")
assert not response.status
assert response.identifier == b"\x12\x00"
response.status = False
assert response.encode() == b"\x03\x12\x00\x00" |
7,095 | test get dynamic linker undefined | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright 2016-2023 Canonical Ltd.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from pathlib import Path
import pytest
from snapcraft.elf import elf_utils
from snapcraft.errors import SnapcraftError
@pytest.fixture(autouse=True, scope="function")
def _setup_method():
elf_utils.get_elf_files.cache_clear()
class TestGetElfFiles:
"""get_elf_files functionality."""
def test_get_elf_files(self, new_dir, fake_elf):
fake_elf("fake_elf-2.23")
fake_elf("object_file.o")
fake_elf("fake_elf-static")
elf_files = elf_utils.get_elf_files(new_dir)
assert len(elf_files) == 1
elf_file = elf_files.pop()
assert elf_file.interp == "/lib64/ld-linux-x86-64.so.2"
def test_get_elf_files_from_list_from_list(self, new_dir, fake_elf):
fake_elf("fake_elf-2.23")
elf_files = elf_utils.get_elf_files_from_list(new_dir, ["fake_elf-2.23"])
assert len(elf_files) == 1
elf_file = elf_files.pop()
assert elf_file.interp == "/lib64/ld-linux-x86-64.so.2"
def test_skip_object_files(self, new_dir, fake_elf):
fake_elf("object_file.o")
elf_files = elf_utils.get_elf_files_from_list(new_dir, ["object_file.o"])
assert elf_files == []
def test_no_find_dependencies_statically_linked(self, new_dir, fake_elf):
fake_elf("fake_elf-static")
elf_files = elf_utils.get_elf_files_from_list(new_dir, ["fake_elf-static"])
assert elf_files == []
def test_elf_with_execstack(self, new_dir, fake_elf):
fake_elf("fake_elf-with-execstack")
elf_files = elf_utils.get_elf_files_from_list(
new_dir, {"fake_elf-with-execstack"}
)
elf_file = elf_files.pop()
assert elf_file.execstack_set is True
def test_elf_without_execstack(self, new_dir, fake_elf):
fake_elf("fake_elf-2.23")
elf_files = elf_utils.get_elf_files_from_list(new_dir, {"fake_elf-2.23"})
elf_file = elf_files.pop()
assert elf_file.execstack_set is False
def test_non_elf_files(self, new_dir):
# A bz2 header
Path("non-elf").write_bytes(b"\x42\x5a\x68")
elf_files = elf_utils.get_elf_files_from_list(new_dir, {"non-elf"})
assert elf_files == []
def test_symlinks(self, new_dir):
symlinked_path = Path(new_dir, "symlinked")
symlinked_path.symlink_to("/bin/dash")
elf_files = elf_utils.get_elf_files_from_list(new_dir, {"symlinked"})
assert elf_files == []
def test_device_files(self):
elf_files = elf_utils.get_elf_files_from_list(Path("/dev"), {"null"})
assert elf_files == []
def test_fifo(self, new_dir):
fifo_path = os.path.join(new_dir, "fifo")
os.mkfifo(fifo_path)
elf_files = elf_utils.get_elf_files_from_list(new_dir, {"fifo"})
assert elf_files == []
class TestGetDynamicLinker:
"""find_linker functionality."""
@pytest.mark.parametrize(
"arch,linker",
[
("x86_64", "lib64/ld-linux-x86-64.so.2"),
("aarch64", "lib/ld-linux-aarch64.so.1"),
("armv7l", "lib/ld-linux-armhf.so.3"),
("riscv64", "lib/ld-linux-riscv64-lp64d.so.1"),
("ppc64le", "lib64/ld64.so.2"),
("s390x", "lib/ld64.so.1"),
],
)
def test_get_dynamic_linker(self, mocker, new_dir, arch, linker):
mocker.patch("platform.machine", return_value=arch)
lpath = Path(linker)
lpath.parent.mkdir(parents=True)
lpath.touch()
dynamic_linker = elf_utils.get_dynamic_linker(
root_path=new_dir, snap_path=Path("/snap/foo/current")
)
assert dynamic_linker == f"/snap/foo/current/{linker}"
def METHOD_NAME(self, mocker):
mocker.patch("platform.machine", return_value="z80")
with pytest.raises(RuntimeError) as err:
elf_utils.get_dynamic_linker(
root_path=Path("prime"), snap_path=Path("/snap/foo/current")
)
assert str(err.value) == "Dynamic linker not defined for arch 'z80'"
def test_get_dynamic_linker_not_found(self, mocker):
mocker.patch("platform.machine", return_value="x86_64")
with pytest.raises(SnapcraftError) as err:
elf_utils.get_dynamic_linker(
root_path=Path("prime"), snap_path=Path("/snap/foo/current")
)
assert str(err.value) == (
"Dynamic linker 'prime/lib64/ld-linux-x86-64.so.2' not found."
)
class TestArchConfig:
"""Test architecture config functionality."""
@pytest.mark.parametrize(
"machine, expected_arch_triplet",
[
("aarch64", "aarch64-linux-gnu"),
("armv7l", "arm-linux-gnueabihf"),
("ppc64le", "powerpc64le-linux-gnu"),
("riscv64", "riscv64-linux-gnu"),
("s390x", "s390x-linux-gnu"),
("x86_64", "x86_64-linux-gnu"),
("i686", "i386-linux-gnu"),
],
)
def test_get_arch_triplet_host(self, mocker, machine, expected_arch_triplet):
"""Verify `get_arch_triplet()` gets the host's architecture triplet."""
mocker.patch("snapcraft.elf.elf_utils.platform.machine", return_value=machine)
arch_triplet = elf_utils.get_arch_triplet()
assert arch_triplet == expected_arch_triplet
@pytest.mark.parametrize(
"machine, expected_arch_triplet",
[
("aarch64", "aarch64-linux-gnu"),
("armv7l", "arm-linux-gnueabihf"),
("ppc64le", "powerpc64le-linux-gnu"),
("riscv64", "riscv64-linux-gnu"),
("s390x", "s390x-linux-gnu"),
("x86_64", "x86_64-linux-gnu"),
("i686", "i386-linux-gnu"),
],
)
def test_get_arch_triplet(self, mocker, machine, expected_arch_triplet):
"""Get the architecture triplet from the architecture passed as a parameter."""
arch_triplet = elf_utils.get_arch_triplet(machine)
assert arch_triplet == expected_arch_triplet
def test_get_arch_triplet_error(self, mocker):
"""Verify `get_arch_triplet()` raises an error for invalid machines."""
mocker.patch("snapcraft.elf.elf_utils.platform.machine", return_value="4004")
with pytest.raises(RuntimeError) as raised:
elf_utils.get_arch_triplet()
assert str(raised.value) == "Arch triplet not defined for arch '4004'"
def test_get_all_arch_triplets(self):
"""Verify `get_all_arch_triplets()` gets a list of all architecture triplets."""
arch_triplets = elf_utils.get_all_arch_triplets()
assert arch_triplets == [
"aarch64-linux-gnu",
"arm-linux-gnueabihf",
"powerpc64le-linux-gnu",
"riscv64-linux-gnu",
"s390x-linux-gnu",
"x86_64-linux-gnu",
"i386-linux-gnu",
] |
7,096 | config dash ipv4 | import click
def get_attr_full_name(ctx, threshold):
attr = 'dash_'
if ctx.obj["crm"].addr_family:
attr += ctx.obj["crm"].addr_family + '_'
if ctx.obj["crm"].direction:
attr += ctx.obj["crm"].direction + '_'
attr += ctx.obj["crm"].res_type + '_' + threshold
return attr
@click.command('type')
@click.argument('value', type=click.Choice(['percentage', 'used', 'free']))
@click.pass_context
def config_dash_type(ctx, value):
"""CRM threshold type configuration"""
ctx.obj["crm"].config(get_attr_full_name(ctx, 'threshold_type'), value)
@click.command('low')
@click.argument('value', type=click.INT)
@click.pass_context
def config_dash_low(ctx, value):
"""CRM low threshold configuration"""
ctx.obj["crm"].config(get_attr_full_name(ctx, 'low_threshold'), value)
@click.command('high')
@click.argument('value', type=click.INT)
@click.pass_context
def config_dash_high(ctx, value):
"""CRM high threshold configuration"""
ctx.obj["crm"].config(get_attr_full_name(ctx, 'high_threshold'), value)
def group_add_thresholds(group):
group.add_command(config_dash_type)
group.add_command(config_dash_low)
group.add_command(config_dash_high)
@click.group('dash')
@click.pass_context
def config_dash(ctx):
"""CRM configuration for DASH resource"""
pass
@config_dash.group('ipv4')
@click.pass_context
def METHOD_NAME(ctx):
"""DASH CRM resource IPv4 address family"""
ctx.obj["crm"].addr_family = 'ipv4'
@config_dash.group('ipv6')
@click.pass_context
def config_dash_ipv6(ctx):
"""DASH CRM resource IPv6 address family"""
ctx.obj["crm"].addr_family = 'ipv6'
@click.group('inbound')
@click.pass_context
def config_dash_inbound(ctx):
"""DASH CRM inbound direction resource"""
ctx.obj["crm"].direction = 'inbound'
METHOD_NAME.add_command(config_dash_inbound)
config_dash_ipv6.add_command(config_dash_inbound)
@click.group('outbound')
@click.pass_context
def config_dash_outbound(ctx):
"""DASH CRM outbound direction resource"""
ctx.obj["crm"].direction = 'outbound'
METHOD_NAME.add_command(config_dash_outbound)
config_dash_ipv6.add_command(config_dash_outbound)
@config_dash.group('eni')
@click.pass_context
def config_dash_eni(ctx):
"""CRM configuration for DASH ENI resource"""
ctx.obj["crm"].res_type = 'eni'
group_add_thresholds(config_dash_eni)
@config_dash.group('eni-ether-address')
@click.pass_context
def config_dash_eni_ether_address_map(ctx):
"""CRM configuration for DASH ENI ETHER address map entry"""
ctx.obj["crm"].res_type = 'eni_ether_address_map'
group_add_thresholds(config_dash_eni_ether_address_map)
@config_dash.group('vnet')
@click.pass_context
def config_dash_vnet(ctx):
"""CRM configuration for DASH VNET resource"""
ctx.obj["crm"].res_type = 'vnet'
group_add_thresholds(config_dash_vnet)
@click.group('routing')
@click.pass_context
def config_dash_routing(ctx):
"""CRM configuration for DASH inbound routes"""
ctx.obj["crm"].res_type = 'routing'
group_add_thresholds(config_dash_routing)
config_dash_inbound.add_command(config_dash_routing)
config_dash_outbound.add_command(config_dash_routing)
@click.group('pa-validation')
@click.pass_context
def config_dash_pa_validation(ctx):
"""CRM configuration for DASH PA validation entries"""
ctx.obj["crm"].res_type = 'pa_validation'
group_add_thresholds(config_dash_pa_validation)
METHOD_NAME.add_command(config_dash_pa_validation)
config_dash_ipv6.add_command(config_dash_pa_validation)
@click.group('ca-to-pa')
@click.pass_context
def config_dash_ca_to_pa(ctx):
"""CRM configuration for DASH CA to PA entries"""
ctx.obj["crm"].res_type = 'ca_to_pa'
group_add_thresholds(config_dash_ca_to_pa)
config_dash_outbound.add_command(config_dash_ca_to_pa)
@click.group('acl')
@click.pass_context
def config_dash_acl(ctx):
"""DASH CRM ACL resource"""
METHOD_NAME.add_command(config_dash_acl)
config_dash_ipv6.add_command(config_dash_acl)
@click.group('group')
@click.pass_context
def config_dash_acl_group(ctx):
"""CRM configuration for DASH ACL group entries"""
ctx.obj["crm"].res_type = 'acl_group'
group_add_thresholds(config_dash_acl_group)
config_dash_acl.add_command(config_dash_acl_group)
@click.group('rule')
@click.pass_context
def config_dash_acl_rule(ctx):
"""CRM configuration for DASH ACL rule entries"""
ctx.obj["crm"].res_type = 'acl_rule'
group_add_thresholds(config_dash_acl_rule)
config_dash_acl.add_command(config_dash_acl_rule)
|
7,097 | test validation date1 | #
# @file TestValidation.py
# @brief Validation of Date ModelCreator and ModelHistory unit tests
#
# @author Akiya Jouraku (Python conversion)
# @author Sarah Keating
#
# ====== WARNING ===== WARNING ===== WARNING ===== WARNING ===== WARNING ======
#
# DO NOT EDIT THIS FILE.
#
# This file was generated automatically by converting the file located at
# src/annotation/test/TestValidation.cpp
# using the conversion program dev/utilities/translateTests/translateTests.pl.
# Any changes made here will be lost the next time the file is regenerated.
#
# -----------------------------------------------------------------------------
# This file is part of libSBML. Please visit http://sbml.org for more
# information about SBML, and the latest version of libSBML.
#
# Copyright 2005-2010 California Institute of Technology.
# Copyright 2002-2005 California Institute of Technology and
# Japan Science and Technology Corporation.
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation. A copy of the license agreement is provided
# in the file named "LICENSE.txt" included with this software distribution
# and also available online as http://sbml.org/software/libsbml/license.html
# -----------------------------------------------------------------------------
import sys
import unittest
import libsbml
class TestValidation(unittest.TestCase):
def test_Validation_CVTerm1(self):
cv = libsbml.CVTerm()
self.assertTrue( cv != None )
self.assertEqual( False, (cv.hasRequiredAttributes()) )
cv.setQualifierType(libsbml.MODEL_QUALIFIER)
self.assertEqual( False, (cv.hasRequiredAttributes()) )
cv.setModelQualifierType(libsbml.BQM_IS)
self.assertEqual( False, (cv.hasRequiredAttributes()) )
cv.addResource("ggg")
self.assertEqual( True, (cv.hasRequiredAttributes()) )
cv = None
pass
def test_Validation_CVTerm2(self):
cv = libsbml.CVTerm()
self.assertTrue( cv != None )
self.assertEqual( False, (cv.hasRequiredAttributes()) )
cv.setQualifierType(libsbml.BIOLOGICAL_QUALIFIER)
self.assertEqual( False, (cv.hasRequiredAttributes()) )
cv.setBiologicalQualifierType(libsbml.BQB_IS)
self.assertEqual( False, (cv.hasRequiredAttributes()) )
cv.addResource("ggg")
self.assertEqual( True, (cv.hasRequiredAttributes()) )
cv = None
pass
def METHOD_NAME(self):
date = libsbml.Date(200,12,30,12,15,45,1,2,0)
self.assertTrue( date != None )
self.assertEqual( False, (date.representsValidDate()) )
date = None
pass
def test_Validation_Date2(self):
date = libsbml.Date(2007,14,30,12,15,45,1,2,0)
self.assertTrue( date != None )
self.assertEqual( False, (date.representsValidDate()) )
date = None
pass
def test_Validation_Date3(self):
date = libsbml.Date("Jan 12")
self.assertTrue( date != None )
self.assertEqual( False, (date.representsValidDate()) )
date = None
pass
def test_Validation_Date4(self):
date = libsbml.Date(2007,12,30,12,15,45,1,2,0)
self.assertTrue( date != None )
self.assertEqual( True, date.representsValidDate() )
date = None
pass
def test_Validation_ModelCreator(self):
mc = libsbml.ModelCreator()
self.assertTrue( mc != None )
self.assertEqual( False, (mc.hasRequiredAttributes()) )
mc.setEmail("k123")
self.assertEqual( False, (mc.hasRequiredAttributes()) )
mc.setFamilyName("Keating")
self.assertEqual( False, (mc.hasRequiredAttributes()) )
mc.setGivenName("Sarah")
self.assertEqual( True, mc.hasRequiredAttributes() )
mc = None
pass
def test_Validation_ModelHistory1(self):
mh = libsbml.ModelHistory()
self.assertTrue( mh != None )
self.assertEqual( False, (mh.hasRequiredAttributes()) )
date = libsbml.Date(2007,12,30,12,15,45,1,2,0)
mh.setCreatedDate(date)
self.assertEqual( False, (mh.hasRequiredAttributes()) )
mh.setModifiedDate(date)
self.assertEqual( False, (mh.hasRequiredAttributes()) )
mc = libsbml.ModelCreator()
mc.setFamilyName("Keating")
mc.setGivenName("Sarah")
mh.addCreator(mc)
self.assertEqual( True, mh.hasRequiredAttributes() )
mh = None
pass
def test_Validation_ModelHistory2(self):
mh = libsbml.ModelHistory()
self.assertTrue( mh != None )
self.assertEqual( False, (mh.hasRequiredAttributes()) )
date = libsbml.Date(200,12,30,12,15,45,1,2,0)
mh.setCreatedDate(date)
self.assertEqual( False, (mh.hasRequiredAttributes()) )
mh.setModifiedDate(date)
self.assertEqual( False, (mh.hasRequiredAttributes()) )
mc = libsbml.ModelCreator()
mc.setFamilyName("Keating")
mc.setGivenName("Sarah")
mh.addCreator(mc)
self.assertEqual( False, (mh.hasRequiredAttributes()) )
mh = None
pass
def test_Validation_ModelHistory3(self):
mh = libsbml.ModelHistory()
self.assertTrue( mh != None )
self.assertEqual( False, (mh.hasRequiredAttributes()) )
date = libsbml.Date(2007,12,30,12,15,45,1,2,0)
mh.setCreatedDate(date)
self.assertEqual( False, (mh.hasRequiredAttributes()) )
mh.setModifiedDate(date)
self.assertEqual( False, (mh.hasRequiredAttributes()) )
mc = libsbml.ModelCreator()
mc.setFamilyName("Keating")
mh.addCreator(mc)
self.assertEqual( False, (mh.hasRequiredAttributes()) )
mh = None
pass
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestValidation))
return suite
if __name__ == "__main__":
if unittest.TextTestRunner(verbosity=1).run(suite()).wasSuccessful() :
sys.exit(0)
else:
sys.exit(1) |
7,098 | test bad type cache | # -----------------------------------------------------------------------------
# Copyright (c) 2012 - 2018, Anaconda, Inc. and Intake contributors
# All rights reserved.
#
# The full license is in the LICENSE file, distributed with this software.
# -----------------------------------------------------------------------------
import os
import shutil
import string
import time
import pytest
from fsspec.implementations.local import make_path_posix
import intake
from intake.config import conf
@pytest.fixture
def catalog_cache():
path = os.path.dirname(__file__)
return intake.open_catalog(os.path.join(path, "catalog_caching.yml"))
def test_load_csv(catalog_cache, tempdir):
os.environ["TEST_CACHE_DIR"] = str(tempdir)
catalog_cache["test_cache_new"].read()
files = os.listdir(tempdir)
assert "cache" in files
assert len(files) == 2
cache_id = [f for f in files if f != "cache"][0]
assert all(c in string.hexdigits for c in cache_id)
def test_list_of_files(catalog_cache):
pd = pytest.importorskip("pandas")
s1 = catalog_cache["test_cache"]
s2 = catalog_cache["test_list_cache"]
assert s2.read().equals(pd.concat([s1.read(), s1.read()]))
def METHOD_NAME(catalog_cache):
with pytest.raises(IndexError):
catalog_cache["test_bad_type_cache_spec"].cache
def test_load_textfile(catalog_cache):
cat = catalog_cache["text_cache"]
cache = cat.cache[0]
cache_paths = cache.load(cat._urlpath, output=False)
cache_path = cache_paths[-1]
assert cache._cache_dir in cache_path
assert os.path.isfile(cache_path)
cache_id = os.path.basename(os.path.dirname(cache_path))
# Checking for md5 hash
assert all(c in string.hexdigits for c in cache_id)
cache.clear_all()
def test_load_arr(catalog_cache):
cat = catalog_cache["arr_cache"]
cache = cat.cache[0]
cache_paths = cache.load(cat.path, output=False)
cache_path = cache_paths[-1]
assert cache._cache_dir in cache_path
assert os.path.isfile(cache_path)
cache_id = os.path.basename(os.path.dirname(cache_path))
# Checking for md5 hash
assert all(c in string.hexdigits for c in cache_id)
cache.clear_all()
@pytest.mark.parametrize("section", ["test_no_regex", "test_regex_no_match", "test_regex_partial_match"])
def test_regex(catalog_cache, section):
cat = catalog_cache[section]
cache = cat.cache[0]
cache_paths = cache.load(cat._urlpath, output=False)
cache_path = cache_paths[-1]
assert cache_path.startswith(cache._cache_dir)
assert os.path.isfile(cache_path)
cache.clear_all()
def test_get_metadata(catalog_cache):
cat = catalog_cache["test_cache"]
cache = cat.cache[0]
cache_paths = cache.load(cat._urlpath, output=False)
metadata = cache.get_metadata(cat._urlpath)
assert isinstance(metadata, list)
for d in metadata:
assert d["cache_path"] in cache_paths
assert "created" in d.keys()
assert "original_path" in d.keys()
cache.clear_all()
def test_clear_cache(catalog_cache):
cat = catalog_cache["test_cache"]
cache = cat.cache[0]
cache_paths = cache.load(cat._urlpath, output=False)
cache.clear_cache(cat._urlpath)
assert cat._urlpath not in cache._metadata.keys()
for cache_path in cache_paths:
assert os.path.basename(cache_path) not in os.listdir(cache._cache_dir)
def test_clear_cache_bad_metadata(catalog_cache):
cat = catalog_cache["test_cache"]
cache = cat.cache[0]
cache_paths = cache.load(cat._urlpath, output=False)
subdir = os.path.dirname(cache_paths[0])
shutil.rmtree(subdir)
cache.clear_cache(cat._urlpath)
assert cat._urlpath not in cache._metadata.keys()
for cache_path in cache_paths:
assert os.path.basename(cache_path) not in os.listdir(cache._cache_dir)
def test_clear_all(catalog_cache):
cat = catalog_cache["test_cache"]
cache = cat.cache[0]
cache_paths = cache.load(cat._urlpath, output=False)
cache.clear_all()
for cache_path in cache_paths:
assert not os.path.exists(cache_path)
cache.clear_all()
def test_second_load(catalog_cache):
cat = catalog_cache["test_cache"]
cache = cat.cache[0]
cache_paths = cache.load(cat._urlpath, output=False)
cache_path = cache_paths[-1]
assert os.path.isfile(cache_path)
t1 = os.path.getmtime(cache_path)
cache.load(cat._urlpath, output=False)
assert os.path.isfile(cache_path)
t2 = os.path.getmtime(cache_path)
assert t1 == t2
cache.clear_all()
def test_second_load_timestamp(catalog_cache):
cat = catalog_cache["test_cache"]
cache = cat.cache[0]
cache_paths = cache.load(cat._urlpath, output=False)
cache_path = cache_paths[-1]
time1 = os.path.getmtime(cache_path)
cache.clear_cache(cat._urlpath)
assert not os.path.isfile(cache_path)
time.sleep(0.5)
cache.load(cat._urlpath, output=False)
assert os.path.isfile(cache_path)
time2 = os.path.getmtime(cache_path)
assert time1 < time2
cache.clear_all()
def test_second_load_refresh(catalog_cache):
cat = catalog_cache["test_cache"]
cache = cat.cache[0]
cache_paths = cache.load(cat._urlpath, output=False)
cache_path = cache_paths[-1]
time1 = os.path.getmtime(cache_path)
assert os.path.isfile(cache_path)
cache.load(cat._urlpath, output=False)
assert os.path.isfile(cache_path)
time2 = os.path.getmtime(cache_path)
assert time1 == time2
cache.clear_all()
def test_multiple_cache(catalog_cache):
cat = catalog_cache["test_multiple_cache"]
assert len(cat.cache) == 2
for cache in cat.cache:
cache_paths = cache.load(cat._urlpath, output=False)
cache_path = cache_paths[-1]
assert cache._cache_dir in cache_path
assert os.path.isfile(cache_path)
cache.clear_all()
def test_disable_caching(catalog_cache):
conf["cache_disabled"] = True
cat = catalog_cache["test_cache"]
cache = cat.cache[0]
cache_paths = cache.load(cat._urlpath, output=False)
cache_path = cache_paths[-1]
assert cache_path == cat._urlpath
conf["cache_disabled"] = False
cache_paths = cache.load(cat._urlpath, output=False)
cache_path = cache_paths[-1]
assert cache._cache_dir in cache_path
assert os.path.isfile(cache_path)
cache_id = os.path.basename(os.path.dirname(cache_path))
# Checking for md5 hash
assert all(c in string.hexdigits for c in cache_id)
cache.clear_all()
def test_ds_set_cache_dir(catalog_cache):
cat = catalog_cache["test_cache"]()
defaults = cat.cache_dirs
new_cache_dir = os.path.join(os.getcwd(), "test_cache_dir")
cat.set_cache_dir(new_cache_dir)
cache = cat.cache[0]
assert make_path_posix(cache._cache_dir) == make_path_posix(new_cache_dir)
cache_paths = cache.load(cat._urlpath, output=False)
cache_path = cache_paths[-1]
expected_cache_dir = make_path_posix(new_cache_dir)
assert expected_cache_dir in cache_path
assert defaults[0] not in cache_path
assert os.path.isfile(cache_path)
cache_id = os.path.basename(os.path.dirname(cache_path))
# Checking for md5 hash
assert all(c in string.hexdigits for c in cache_id)
cache.clear_all()
shutil.rmtree(expected_cache_dir) |
7,099 | sympy euler | # emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the NiBabel package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Tests for Euler angles"""
import math
import numpy as np
import pytest
from numpy import pi
from numpy.testing import assert_array_almost_equal, assert_array_equal
from .. import eulerangles as nea
from .. import quaternions as nq
FLOAT_EPS = np.finfo(np.float64).eps
# Example rotations """
eg_rots = []
params = np.arange(-pi * 2, pi * 2.5, pi / 2)
for x in params:
for y in params:
for z in params:
eg_rots.append((x, y, z))
def x_only(x):
cosx = np.cos(x)
sinx = np.sin(x)
return np.array(
[
[1, 0, 0],
[0, cosx, -sinx],
[0, sinx, cosx],
]
)
def y_only(y):
cosy = np.cos(y)
siny = np.sin(y)
return np.array(
[
[cosy, 0, siny],
[0, 1, 0],
[-siny, 0, cosy],
]
)
def z_only(z):
cosz = np.cos(z)
sinz = np.sin(z)
return np.array(
[
[cosz, -sinz, 0],
[sinz, cosz, 0],
[0, 0, 1],
]
)
def METHOD_NAME(z, y, x):
# The whole matrix formula for z,y,x rotations from Sympy
cos = math.cos
sin = math.sin
# the following copy / pasted from Sympy - see derivations subdirectory
return [
[cos(y) * cos(z), -cos(y) * sin(z), sin(y)],
[
cos(x) * sin(z) + cos(z) * sin(x) * sin(y),
cos(x) * cos(z) - sin(x) * sin(y) * sin(z),
-cos(y) * sin(x),
],
[
sin(x) * sin(z) - cos(x) * cos(z) * sin(y),
cos(z) * sin(x) + cos(x) * sin(y) * sin(z),
cos(x) * cos(y),
],
]
def is_valid_rotation(M):
if not np.allclose(np.linalg.det(M), 1):
return False
return np.allclose(np.eye(3), np.dot(M, M.T))
def test_basic_euler():
# some example rotations, in radians
zr = 0.05
yr = -0.4
xr = 0.2
# Rotation matrix composing the three rotations
M = nea.euler2mat(zr, yr, xr)
# Corresponding individual rotation matrices
M1 = nea.euler2mat(zr)
M2 = nea.euler2mat(0, yr)
M3 = nea.euler2mat(0, 0, xr)
# which are all valid rotation matrices
assert is_valid_rotation(M)
assert is_valid_rotation(M1)
assert is_valid_rotation(M2)
assert is_valid_rotation(M3)
# Full matrix is composition of three individual matrices
assert np.allclose(M, np.dot(M3, np.dot(M2, M1)))
# Rotations can be specified with named args, default 0
assert np.all(nea.euler2mat(zr) == nea.euler2mat(z=zr))
assert np.all(nea.euler2mat(0, yr) == nea.euler2mat(y=yr))
assert np.all(nea.euler2mat(0, 0, xr) == nea.euler2mat(x=xr))
# Applying an opposite rotation same as inverse (the inverse is
# the same as the transpose, but just for clarity)
assert np.allclose(nea.euler2mat(x=-xr), np.linalg.inv(nea.euler2mat(x=xr)))
def test_euler_mat_1():
M = nea.euler2mat()
assert_array_equal(M, np.eye(3))
@pytest.mark.parametrize('x, y, z', eg_rots)
def test_euler_mat_2(x, y, z):
M1 = nea.euler2mat(z, y, x)
M2 = METHOD_NAME(z, y, x)
assert_array_almost_equal(M1, M2)
M3 = np.dot(x_only(x), np.dot(y_only(y), z_only(z)))
assert_array_almost_equal(M1, M3)
zp, yp, xp = nea.mat2euler(M1)
# The parameters may not be the same as input, but they give the
# same rotation matrix
M4 = nea.euler2mat(zp, yp, xp)
assert_array_almost_equal(M1, M4)
def sympy_euler2quat(z=0, y=0, x=0):
# direct formula for z,y,x quaternion rotations using sympy
# see derivations subfolder
cos = math.cos
sin = math.sin
# the following copy / pasted from Sympy output
return (
cos(0.5 * x) * cos(0.5 * y) * cos(0.5 * z) - sin(0.5 * x) * sin(0.5 * y) * sin(0.5 * z),
cos(0.5 * x) * sin(0.5 * y) * sin(0.5 * z) + cos(0.5 * y) * cos(0.5 * z) * sin(0.5 * x),
cos(0.5 * x) * cos(0.5 * z) * sin(0.5 * y) - cos(0.5 * y) * sin(0.5 * x) * sin(0.5 * z),
cos(0.5 * x) * cos(0.5 * y) * sin(0.5 * z) + cos(0.5 * z) * sin(0.5 * x) * sin(0.5 * y),
)
def crude_mat2euler(M):
"""The simplest possible - ignoring atan2 instability"""
r11, r12, r13, r21, r22, r23, r31, r32, r33 = M.flat
return math.atan2(-r12, r11), math.asin(r13), math.atan2(-r23, r33)
def test_euler_instability():
# Test for numerical errors in mat2euler
# problems arise for cos(y) near 0
po2 = pi / 2
zyx = po2, po2, po2
M = nea.euler2mat(*zyx)
# Round trip
M_back = nea.euler2mat(*nea.mat2euler(M))
assert np.allclose(M, M_back)
# disturb matrix slightly
M_e = M - FLOAT_EPS
# round trip to test - OK
M_e_back = nea.euler2mat(*nea.mat2euler(M_e))
assert np.allclose(M_e, M_e_back)
# not so with crude routine
M_e_back = nea.euler2mat(*crude_mat2euler(M_e))
assert not np.allclose(M_e, M_e_back)
@pytest.mark.parametrize('x, y, z', eg_rots)
def test_quats(x, y, z):
M1 = nea.euler2mat(z, y, x)
quatM = nq.mat2quat(M1)
quat = nea.euler2quat(z, y, x)
assert nq.nearly_equivalent(quatM, quat)
quatS = sympy_euler2quat(z, y, x)
assert nq.nearly_equivalent(quat, quatS)
zp, yp, xp = nea.quat2euler(quat)
# The parameters may not be the same as input, but they give the
# same rotation matrix
M2 = nea.euler2mat(zp, yp, xp)
assert_array_almost_equal(M1, M2) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.