blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e220e2ab2ce4ac5dd9e85403208620fa41df089a
|
bc6e87f8e9a3f6c35f8080718ac409801dab3b24
|
/server/workers/tests/test_clustering.py
|
358af7f0c99ee52bf7fafdb2bb4a494dbdde0647
|
[
"MIT"
] |
permissive
|
OpenKnowledgeMaps/Headstart
|
b7f56d8562d044e8d96a08f9f7ae0bc6de1076cd
|
94dcc248e1892de7b603d5a4dad175f5d8a128db
|
refs/heads/master
| 2023-08-31T20:06:34.485558
| 2023-08-25T17:34:03
| 2023-08-25T17:34:03
| 15,936,466
| 132
| 36
|
MIT
| 2023-08-25T17:34:05
| 2014-01-15T13:52:50
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 2,193
|
py
|
test_clustering.py
|
import json
import pytest
import pandas as pd
import numpy as np
from .test_helpers import CASENAMES, CASEDATA, RESULTS, get_dataprocessing_result
@pytest.mark.parametrize("testcase", CASENAMES)
def test_clustering_2_items(testcase):
testcase = CASEDATA[testcase]
metadata = pd.DataFrame.from_records(json.loads(testcase["input_data"]["metadata"]))
text = pd.DataFrame.from_records(json.loads(testcase["input_data"]["text"]))
metadata_sample = metadata.sample(n=2, random_state=42)
text_sample = text.sample(n=2, random_state=42)
testcase["input_data"]["metadata"] = metadata_sample.to_json(orient='records')
testcase["input_data"]["text"] = text_sample.to_json(orient='records')
test_result = get_dataprocessing_result(testcase)
assert len(test_result) == 2
@pytest.mark.parametrize("testcase", CASENAMES)
def test_max_n_cluster(testcase):
testcase = RESULTS[testcase]
n_items = len(testcase)
if n_items <= 100:
assert testcase.area.nunique() <= 15
if 150 <= n_items < 200:
assert testcase.area.nunique() == 16
if 200 <= n_items < 300:
assert testcase.area.nunique() == 17
if 300 <= n_items < 400:
assert testcase.area.nunique() == 18
if 400 <= n_items < 500:
assert testcase.area.nunique() == 19
if n_items >= 500:
assert testcase.area.nunique() == 20
@pytest.mark.parametrize("testcase", CASENAMES)
def test_n_cluster_lower_bound(testcase):
testcase = CASEDATA[testcase]
metadata = pd.DataFrame.from_records(json.loads(testcase["input_data"]["metadata"]))
text = pd.DataFrame.from_records(json.loads(testcase["input_data"]["text"]))
rand_n = np.random.randint(2, 30)
n = min(len(metadata), rand_n)
metadata_sample = metadata.sample(n=n, random_state=42)
text_sample = text.sample(n=n, random_state=42)
testcase["input_data"]["metadata"] = metadata_sample.to_json(orient='records')
testcase["input_data"]["text"] = text_sample.to_json(orient='records')
test_result = get_dataprocessing_result(testcase)
n_items = len(test_result)
if n_items <= 30:
assert test_result.area.nunique() <= round(np.sqrt(n_items)) + 1
|
5db23cb7cb8b4ee35952fcb12cf89f802122517a
|
ec8d9e1595ccc252a57d1769382bb98d604e40a9
|
/test/comp/comp_impute.py
|
899cf5365bf7c36a4f2175234e287179bed6782a
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
morganjwilliams/pyrolite
|
2a8e738e14099f72282a80acb7139d71eec71efc
|
ac7cd52029909738a41143b89be880e0da419266
|
refs/heads/main
| 2023-08-31T09:20:33.282184
| 2023-07-21T06:45:24
| 2023-07-21T06:45:24
| 137,172,322
| 113
| 37
|
NOASSERTION
| 2023-08-29T10:49:17
| 2018-06-13T06:31:12
|
Python
|
UTF-8
|
Python
| false
| false
| 2,128
|
py
|
comp_impute.py
|
import unittest
import numpy as np
import pandas as pd
from pyrolite.comp.aggregate import np_cross_ratios
from pyrolite.comp.impute import EMCOMP, _little_sweep, _multisweep, _reg_sweep
from pyrolite.util.math import augmented_covariance_matrix
from pyrolite.util.synthetic import (
normal_frame,
normal_series,
random_composition,
random_cov_matrix,
)
class TestRegSweep(unittest.TestCase):
def setUp(self):
self.G = augmented_covariance_matrix(np.array([1.1, 0.9]), random_cov_matrix(2))
self.G3 = augmented_covariance_matrix(
np.array([1.1, 0.9, 1.05]), random_cov_matrix(3)
)
def test_default(self):
pass
class TestLittleSweep(unittest.TestCase):
def setUp(self):
self.G = augmented_covariance_matrix(np.array([1.1, 0.9]), random_cov_matrix(2))
self.G3 = augmented_covariance_matrix(
np.array([1.1, 0.9, 1.05]), random_cov_matrix(3)
)
def test_multisweep_commutative(self):
G = self.G3
assert np.allclose(_multisweep(G, [0, 1]), _multisweep(G, [1, 0]))
def test_default(self):
G = self.G
H = _little_sweep(G, k=0)
assert np.allclose(
H,
np.array(
[
[-1 / G[0, 0], G[0, 1] / G[0, 0], G[0, 2] / G[0, 0]],
[
G[0, 1] / G[0, 0],
G[1, 1] - G[0, 1] * G[0, 1] / G[0, 0],
G[1, 2] - G[0, 2] * G[0, 1] / G[0, 0],
],
[
G[0, 2] / G[0, 0],
G[1, 2] - G[0, 2] * G[0, 1] / G[0, 0],
G[2, 2] - G[0, 2] * G[0, 2] / G[0, 0],
],
]
),
)
class TestEMCOMP(unittest.TestCase):
def setUp(self):
self.data = random_composition(size=200, missing="MNAR")
def test_encomp(self):
impute, p0, ni = EMCOMP(
self.data, threshold=0.5 * np.nanmin(self.data, axis=0), tol=0.01
)
if __name__ == "__main__":
unittest.main()
|
3c99f1eba7f9a6d727dff12bfe819b9f3a2d2077
|
6a7005ca7e418a18cbfeec296129873aef6446a4
|
/DecryptLogin/modules/utils/__init__.py
|
58aacbc99bde08a10d1b8173c062b16e1f47c398
|
[
"Apache-2.0"
] |
permissive
|
CharlesPikachu/DecryptLogin
|
f0646d37e8604fb9c41dc74c17c0ea48cb5066ec
|
bb4228c0535ffd7060b7816cbd1da51ba8d95ab8
|
refs/heads/master
| 2023-05-22T15:21:59.038844
| 2022-08-29T08:59:05
| 2022-08-29T08:59:05
| 172,416,496
| 2,871
| 809
|
Apache-2.0
| 2022-10-06T14:58:49
| 2019-02-25T01:57:20
|
Python
|
UTF-8
|
Python
| false
| false
| 247
|
py
|
__init__.py
|
'''initialize'''
from .misc import showImage, removeImage, saveImage
from .cookies import saveSessionCookies, loadSessionCookies
'''all'''
__all__ = [
'showImage', 'removeImage', 'saveImage',
'saveSessionCookies', 'loadSessionCookies',
]
|
0855035d3fe4b403f0101822b93a66b92d28367b
|
5da5473ff3026165a47f98744bac82903cf008e0
|
/packages/google-cloud-dataplex/google/cloud/dataplex_v1/types/datascans.py
|
792b14e7cd49e363b535293f45ef34855220221e
|
[
"Apache-2.0"
] |
permissive
|
googleapis/google-cloud-python
|
ed61a5f03a476ab6053870f4da7bc5534e25558b
|
93c4e63408c65129422f65217325f4e7d41f7edf
|
refs/heads/main
| 2023-09-04T09:09:07.852632
| 2023-08-31T22:49:26
| 2023-08-31T22:49:26
| 16,316,451
| 2,792
| 917
|
Apache-2.0
| 2023-09-14T21:45:18
| 2014-01-28T15:51:47
|
Python
|
UTF-8
|
Python
| false
| false
| 25,542
|
py
|
datascans.py
|
# -*- coding: utf-8 -*-
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
from typing import MutableMapping, MutableSequence
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
import proto # type: ignore
from google.cloud.dataplex_v1.types import (
data_profile,
data_quality,
processing,
resources,
)
__protobuf__ = proto.module(
package="google.cloud.dataplex.v1",
manifest={
"DataScanType",
"CreateDataScanRequest",
"UpdateDataScanRequest",
"DeleteDataScanRequest",
"GetDataScanRequest",
"ListDataScansRequest",
"ListDataScansResponse",
"RunDataScanRequest",
"RunDataScanResponse",
"GetDataScanJobRequest",
"ListDataScanJobsRequest",
"ListDataScanJobsResponse",
"DataScan",
"DataScanJob",
},
)
class DataScanType(proto.Enum):
r"""The type of DataScan.
Values:
DATA_SCAN_TYPE_UNSPECIFIED (0):
The DataScan type is unspecified.
DATA_QUALITY (1):
Data Quality scan.
DATA_PROFILE (2):
Data Profile scan.
"""
DATA_SCAN_TYPE_UNSPECIFIED = 0
DATA_QUALITY = 1
DATA_PROFILE = 2
class CreateDataScanRequest(proto.Message):
r"""Create dataScan request.
Attributes:
parent (str):
Required. The resource name of the parent location:
``projects/{project}/locations/{location_id}`` where
``project`` refers to a *project_id* or *project_number* and
``location_id`` refers to a GCP region.
data_scan (google.cloud.dataplex_v1.types.DataScan):
Required. DataScan resource.
data_scan_id (str):
Required. DataScan identifier.
- Must contain only lowercase letters, numbers and hyphens.
- Must start with a letter.
- Must end with a number or a letter.
- Must be between 1-63 characters.
- Must be unique within the customer project / location.
validate_only (bool):
Optional. Only validate the request, but do not perform
mutations. The default is ``false``.
"""
parent: str = proto.Field(
proto.STRING,
number=1,
)
data_scan: "DataScan" = proto.Field(
proto.MESSAGE,
number=2,
message="DataScan",
)
data_scan_id: str = proto.Field(
proto.STRING,
number=3,
)
validate_only: bool = proto.Field(
proto.BOOL,
number=4,
)
class UpdateDataScanRequest(proto.Message):
r"""Update dataScan request.
Attributes:
data_scan (google.cloud.dataplex_v1.types.DataScan):
Required. DataScan resource to be updated.
Only fields specified in ``update_mask`` are updated.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Required. Mask of fields to update.
validate_only (bool):
Optional. Only validate the request, but do not perform
mutations. The default is ``false``.
"""
data_scan: "DataScan" = proto.Field(
proto.MESSAGE,
number=1,
message="DataScan",
)
update_mask: field_mask_pb2.FieldMask = proto.Field(
proto.MESSAGE,
number=2,
message=field_mask_pb2.FieldMask,
)
validate_only: bool = proto.Field(
proto.BOOL,
number=3,
)
class DeleteDataScanRequest(proto.Message):
r"""Delete dataScan request.
Attributes:
name (str):
Required. The resource name of the dataScan:
``projects/{project}/locations/{location_id}/dataScans/{data_scan_id}``
where ``project`` refers to a *project_id* or
*project_number* and ``location_id`` refers to a GCP region.
"""
name: str = proto.Field(
proto.STRING,
number=1,
)
class GetDataScanRequest(proto.Message):
r"""Get dataScan request.
Attributes:
name (str):
Required. The resource name of the dataScan:
``projects/{project}/locations/{location_id}/dataScans/{data_scan_id}``
where ``project`` refers to a *project_id* or
*project_number* and ``location_id`` refers to a GCP region.
view (google.cloud.dataplex_v1.types.GetDataScanRequest.DataScanView):
Optional. Select the DataScan view to return. Defaults to
``BASIC``.
"""
class DataScanView(proto.Enum):
r"""DataScan view options.
Values:
DATA_SCAN_VIEW_UNSPECIFIED (0):
The API will default to the ``BASIC`` view.
BASIC (1):
Basic view that does not include *spec* and *result*.
FULL (10):
Include everything.
"""
DATA_SCAN_VIEW_UNSPECIFIED = 0
BASIC = 1
FULL = 10
name: str = proto.Field(
proto.STRING,
number=1,
)
view: DataScanView = proto.Field(
proto.ENUM,
number=2,
enum=DataScanView,
)
class ListDataScansRequest(proto.Message):
r"""List dataScans request.
Attributes:
parent (str):
Required. The resource name of the parent location:
``projects/{project}/locations/{location_id}`` where
``project`` refers to a *project_id* or *project_number* and
``location_id`` refers to a GCP region.
page_size (int):
Optional. Maximum number of dataScans to
return. The service may return fewer than this
value. If unspecified, at most 500 scans will be
returned. The maximum value is 1000; values
above 1000 will be coerced to 1000.
page_token (str):
Optional. Page token received from a previous
``ListDataScans`` call. Provide this to retrieve the
subsequent page. When paginating, all other parameters
provided to ``ListDataScans`` must match the call that
provided the page token.
filter (str):
Optional. Filter request.
order_by (str):
Optional. Order by fields (``name`` or ``create_time``) for
the result. If not specified, the ordering is undefined.
"""
parent: str = proto.Field(
proto.STRING,
number=1,
)
page_size: int = proto.Field(
proto.INT32,
number=2,
)
page_token: str = proto.Field(
proto.STRING,
number=3,
)
filter: str = proto.Field(
proto.STRING,
number=4,
)
order_by: str = proto.Field(
proto.STRING,
number=5,
)
class ListDataScansResponse(proto.Message):
r"""List dataScans response.
Attributes:
data_scans (MutableSequence[google.cloud.dataplex_v1.types.DataScan]):
DataScans (``BASIC`` view only) under the given parent
location.
next_page_token (str):
Token to retrieve the next page of results,
or empty if there are no more results in the
list.
unreachable (MutableSequence[str]):
Locations that could not be reached.
"""
@property
def raw_page(self):
return self
data_scans: MutableSequence["DataScan"] = proto.RepeatedField(
proto.MESSAGE,
number=1,
message="DataScan",
)
next_page_token: str = proto.Field(
proto.STRING,
number=2,
)
unreachable: MutableSequence[str] = proto.RepeatedField(
proto.STRING,
number=3,
)
class RunDataScanRequest(proto.Message):
r"""Run DataScan Request
Attributes:
name (str):
Required. The resource name of the DataScan:
``projects/{project}/locations/{location_id}/dataScans/{data_scan_id}``.
where ``project`` refers to a *project_id* or
*project_number* and ``location_id`` refers to a GCP region.
Only **OnDemand** data scans are allowed.
"""
name: str = proto.Field(
proto.STRING,
number=1,
)
class RunDataScanResponse(proto.Message):
r"""Run DataScan Response.
Attributes:
job (google.cloud.dataplex_v1.types.DataScanJob):
DataScanJob created by RunDataScan request.
"""
job: "DataScanJob" = proto.Field(
proto.MESSAGE,
number=1,
message="DataScanJob",
)
class GetDataScanJobRequest(proto.Message):
r"""Get DataScanJob request.
Attributes:
name (str):
Required. The resource name of the DataScanJob:
``projects/{project}/locations/{location_id}/dataScans/{data_scan_id}/jobs/{data_scan_job_id}``
where ``project`` refers to a *project_id* or
*project_number* and ``location_id`` refers to a GCP region.
view (google.cloud.dataplex_v1.types.GetDataScanJobRequest.DataScanJobView):
Optional. Select the DataScanJob view to return. Defaults to
``BASIC``.
"""
class DataScanJobView(proto.Enum):
r"""DataScanJob view options.
Values:
DATA_SCAN_JOB_VIEW_UNSPECIFIED (0):
The API will default to the ``BASIC`` view.
BASIC (1):
Basic view that does not include *spec* and *result*.
FULL (10):
Include everything.
"""
DATA_SCAN_JOB_VIEW_UNSPECIFIED = 0
BASIC = 1
FULL = 10
name: str = proto.Field(
proto.STRING,
number=1,
)
view: DataScanJobView = proto.Field(
proto.ENUM,
number=2,
enum=DataScanJobView,
)
class ListDataScanJobsRequest(proto.Message):
r"""List DataScanJobs request.
Attributes:
parent (str):
Required. The resource name of the parent environment:
``projects/{project}/locations/{location_id}/dataScans/{data_scan_id}``
where ``project`` refers to a *project_id* or
*project_number* and ``location_id`` refers to a GCP region.
page_size (int):
Optional. Maximum number of DataScanJobs to
return. The service may return fewer than this
value. If unspecified, at most 10 DataScanJobs
will be returned. The maximum value is 1000;
values above 1000 will be coerced to 1000.
page_token (str):
Optional. Page token received from a previous
``ListDataScanJobs`` call. Provide this to retrieve the
subsequent page. When paginating, all other parameters
provided to ``ListDataScanJobs`` must match the call that
provided the page token.
filter (str):
Optional. An expression for filtering the results of the
ListDataScanJobs request.
If unspecified, all datascan jobs will be returned. Multiple
filters can be applied (with ``AND``, ``OR`` logical
operators). Filters are case-sensitive.
Allowed fields are:
- ``start_time``
- ``end_time``
``start_time`` and ``end_time`` expect RFC-3339 formatted
strings (e.g. 2018-10-08T18:30:00-07:00).
For instance, 'start_time > 2018-10-08T00:00:00.123456789Z
AND end_time < 2018-10-09T00:00:00.123456789Z' limits
results to DataScanJobs between specified start and end
times.
"""
parent: str = proto.Field(
proto.STRING,
number=1,
)
page_size: int = proto.Field(
proto.INT32,
number=2,
)
page_token: str = proto.Field(
proto.STRING,
number=3,
)
filter: str = proto.Field(
proto.STRING,
number=4,
)
class ListDataScanJobsResponse(proto.Message):
r"""List DataScanJobs response.
Attributes:
data_scan_jobs (MutableSequence[google.cloud.dataplex_v1.types.DataScanJob]):
DataScanJobs (``BASIC`` view only) under a given dataScan.
next_page_token (str):
Token to retrieve the next page of results,
or empty if there are no more results in the
list.
"""
@property
def raw_page(self):
return self
data_scan_jobs: MutableSequence["DataScanJob"] = proto.RepeatedField(
proto.MESSAGE,
number=1,
message="DataScanJob",
)
next_page_token: str = proto.Field(
proto.STRING,
number=2,
)
class DataScan(proto.Message):
r"""Represents a user-visible job which provides the insights for the
related data source.
For example:
- Data Quality: generates queries based on the rules and runs
against the data to get data quality check results.
- Data Profile: analyzes the data in table(s) and generates
insights about the structure, content and relationships (such as
null percent, cardinality, min/max/mean, etc).
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
name (str):
Output only. The relative resource name of the scan, of the
form:
``projects/{project}/locations/{location_id}/dataScans/{datascan_id}``,
where ``project`` refers to a *project_id* or
*project_number* and ``location_id`` refers to a GCP region.
uid (str):
Output only. System generated globally unique
ID for the scan. This ID will be different if
the scan is deleted and re-created with the same
name.
description (str):
Optional. Description of the scan.
- Must be between 1-1024 characters.
display_name (str):
Optional. User friendly display name.
- Must be between 1-256 characters.
labels (MutableMapping[str, str]):
Optional. User-defined labels for the scan.
state (google.cloud.dataplex_v1.types.State):
Output only. Current state of the DataScan.
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. The time when the scan was
created.
update_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. The time when the scan was last
updated.
data (google.cloud.dataplex_v1.types.DataSource):
Required. The data source for DataScan.
execution_spec (google.cloud.dataplex_v1.types.DataScan.ExecutionSpec):
Optional. DataScan execution settings.
If not specified, the fields in it will use
their default values.
execution_status (google.cloud.dataplex_v1.types.DataScan.ExecutionStatus):
Output only. Status of the data scan
execution.
type_ (google.cloud.dataplex_v1.types.DataScanType):
Output only. The type of DataScan.
data_quality_spec (google.cloud.dataplex_v1.types.DataQualitySpec):
DataQualityScan related setting.
This field is a member of `oneof`_ ``spec``.
data_profile_spec (google.cloud.dataplex_v1.types.DataProfileSpec):
DataProfileScan related setting.
This field is a member of `oneof`_ ``spec``.
data_quality_result (google.cloud.dataplex_v1.types.DataQualityResult):
Output only. The result of the data quality
scan.
This field is a member of `oneof`_ ``result``.
data_profile_result (google.cloud.dataplex_v1.types.DataProfileResult):
Output only. The result of the data profile
scan.
This field is a member of `oneof`_ ``result``.
"""
class ExecutionSpec(proto.Message):
r"""DataScan execution settings.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
trigger (google.cloud.dataplex_v1.types.Trigger):
Optional. Spec related to how often and when a scan should
be triggered.
If not specified, the default is ``OnDemand``, which means
the scan will not run until the user calls ``RunDataScan``
API.
field (str):
Immutable. The unnested field (of type *Date* or
*Timestamp*) that contains values which monotonically
increase over time.
If not specified, a data scan will run for all data in the
table.
This field is a member of `oneof`_ ``incremental``.
"""
trigger: processing.Trigger = proto.Field(
proto.MESSAGE,
number=1,
message=processing.Trigger,
)
field: str = proto.Field(
proto.STRING,
number=100,
oneof="incremental",
)
class ExecutionStatus(proto.Message):
r"""Status of the data scan execution.
Attributes:
latest_job_start_time (google.protobuf.timestamp_pb2.Timestamp):
The time when the latest DataScanJob started.
latest_job_end_time (google.protobuf.timestamp_pb2.Timestamp):
The time when the latest DataScanJob ended.
"""
latest_job_start_time: timestamp_pb2.Timestamp = proto.Field(
proto.MESSAGE,
number=4,
message=timestamp_pb2.Timestamp,
)
latest_job_end_time: timestamp_pb2.Timestamp = proto.Field(
proto.MESSAGE,
number=5,
message=timestamp_pb2.Timestamp,
)
name: str = proto.Field(
proto.STRING,
number=1,
)
uid: str = proto.Field(
proto.STRING,
number=2,
)
description: str = proto.Field(
proto.STRING,
number=3,
)
display_name: str = proto.Field(
proto.STRING,
number=4,
)
labels: MutableMapping[str, str] = proto.MapField(
proto.STRING,
proto.STRING,
number=5,
)
state: resources.State = proto.Field(
proto.ENUM,
number=6,
enum=resources.State,
)
create_time: timestamp_pb2.Timestamp = proto.Field(
proto.MESSAGE,
number=7,
message=timestamp_pb2.Timestamp,
)
update_time: timestamp_pb2.Timestamp = proto.Field(
proto.MESSAGE,
number=8,
message=timestamp_pb2.Timestamp,
)
data: processing.DataSource = proto.Field(
proto.MESSAGE,
number=9,
message=processing.DataSource,
)
execution_spec: ExecutionSpec = proto.Field(
proto.MESSAGE,
number=10,
message=ExecutionSpec,
)
execution_status: ExecutionStatus = proto.Field(
proto.MESSAGE,
number=11,
message=ExecutionStatus,
)
type_: "DataScanType" = proto.Field(
proto.ENUM,
number=12,
enum="DataScanType",
)
data_quality_spec: data_quality.DataQualitySpec = proto.Field(
proto.MESSAGE,
number=100,
oneof="spec",
message=data_quality.DataQualitySpec,
)
data_profile_spec: data_profile.DataProfileSpec = proto.Field(
proto.MESSAGE,
number=101,
oneof="spec",
message=data_profile.DataProfileSpec,
)
data_quality_result: data_quality.DataQualityResult = proto.Field(
proto.MESSAGE,
number=200,
oneof="result",
message=data_quality.DataQualityResult,
)
data_profile_result: data_profile.DataProfileResult = proto.Field(
proto.MESSAGE,
number=201,
oneof="result",
message=data_profile.DataProfileResult,
)
class DataScanJob(proto.Message):
r"""A DataScanJob represents an instance of DataScan execution.
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
name (str):
Output only. The relative resource name of the DataScanJob,
of the form:
``projects/{project}/locations/{location_id}/dataScans/{datascan_id}/jobs/{job_id}``,
where ``project`` refers to a *project_id* or
*project_number* and ``location_id`` refers to a GCP region.
uid (str):
Output only. System generated globally unique
ID for the DataScanJob.
start_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. The time when the DataScanJob
was started.
end_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. The time when the DataScanJob
ended.
state (google.cloud.dataplex_v1.types.DataScanJob.State):
Output only. Execution state for the
DataScanJob.
message (str):
Output only. Additional information about the
current state.
type_ (google.cloud.dataplex_v1.types.DataScanType):
Output only. The type of the parent DataScan.
data_quality_spec (google.cloud.dataplex_v1.types.DataQualitySpec):
Output only. DataQualityScan related setting.
This field is a member of `oneof`_ ``spec``.
data_profile_spec (google.cloud.dataplex_v1.types.DataProfileSpec):
Output only. DataProfileScan related setting.
This field is a member of `oneof`_ ``spec``.
data_quality_result (google.cloud.dataplex_v1.types.DataQualityResult):
Output only. The result of the data quality
scan.
This field is a member of `oneof`_ ``result``.
data_profile_result (google.cloud.dataplex_v1.types.DataProfileResult):
Output only. The result of the data profile
scan.
This field is a member of `oneof`_ ``result``.
"""
class State(proto.Enum):
r"""Execution state for the DataScanJob.
Values:
STATE_UNSPECIFIED (0):
The DataScanJob state is unspecified.
RUNNING (1):
The DataScanJob is running.
CANCELING (2):
The DataScanJob is canceling.
CANCELLED (3):
The DataScanJob cancellation was successful.
SUCCEEDED (4):
The DataScanJob completed successfully.
FAILED (5):
The DataScanJob is no longer running due to
an error.
PENDING (7):
The DataScanJob has been created but not
started to run yet.
"""
STATE_UNSPECIFIED = 0
RUNNING = 1
CANCELING = 2
CANCELLED = 3
SUCCEEDED = 4
FAILED = 5
PENDING = 7
name: str = proto.Field(
proto.STRING,
number=1,
)
uid: str = proto.Field(
proto.STRING,
number=2,
)
start_time: timestamp_pb2.Timestamp = proto.Field(
proto.MESSAGE,
number=3,
message=timestamp_pb2.Timestamp,
)
end_time: timestamp_pb2.Timestamp = proto.Field(
proto.MESSAGE,
number=4,
message=timestamp_pb2.Timestamp,
)
state: State = proto.Field(
proto.ENUM,
number=5,
enum=State,
)
message: str = proto.Field(
proto.STRING,
number=6,
)
type_: "DataScanType" = proto.Field(
proto.ENUM,
number=7,
enum="DataScanType",
)
data_quality_spec: data_quality.DataQualitySpec = proto.Field(
proto.MESSAGE,
number=100,
oneof="spec",
message=data_quality.DataQualitySpec,
)
data_profile_spec: data_profile.DataProfileSpec = proto.Field(
proto.MESSAGE,
number=101,
oneof="spec",
message=data_profile.DataProfileSpec,
)
data_quality_result: data_quality.DataQualityResult = proto.Field(
proto.MESSAGE,
number=200,
oneof="result",
message=data_quality.DataQualityResult,
)
data_profile_result: data_profile.DataProfileResult = proto.Field(
proto.MESSAGE,
number=201,
oneof="result",
message=data_profile.DataProfileResult,
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
7517dd7fce1d6aa7d4a3ee7078eee961e099a642
|
71acb7214efd91c0d327f6d8958e1798eadb4401
|
/locations/pipelines/check_item_properties.py
|
1e4dc4457a2127caf6882169cbc21ae9188368a1
|
[
"CC0-1.0",
"MIT"
] |
permissive
|
alltheplaces/alltheplaces
|
21b9f8b4ace1352e52ae7b8f8825a930d2cb033e
|
1bcbb55cfcf06f2c714465570711f6e83f205c22
|
refs/heads/master
| 2023-08-30T19:45:35.098658
| 2023-08-30T17:51:54
| 2023-08-30T17:51:54
| 61,166,935
| 453
| 176
|
NOASSERTION
| 2023-09-14T17:16:40
| 2016-06-15T01:09:18
|
Python
|
UTF-8
|
Python
| false
| false
| 4,678
|
py
|
check_item_properties.py
|
import math
import re
from scrapy import Spider
from locations.hours import OpeningHours
from locations.items import get_lat_lon, set_lat_lon
def check_field(item, spider: Spider, param, allowed_types, match_regex=None):
if val := item.get(param):
if not isinstance(val, allowed_types):
spider.crawler.stats.inc_value(f"atp/field/{param}/wrong_type")
spider.logger.error(f"Invalid type {type(val).__name__} on {param}, expected {allowed_types}")
elif match_regex and not match_regex.match(val):
spider.crawler.stats.inc_value(f"atp/field/{param}/invalid")
else:
spider.crawler.stats.inc_value(f"atp/field/{param}/missing")
class CheckItemPropertiesPipeline:
# From https://github.com/django/django/blob/master/django/core/validators.py
url_regex = re.compile(
r"^(?:http)s?://" # http:// or https://
r"(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|" # domain...
r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}|" # ...or ipv4
r"\[?[A-F0-9]*:[A-F0-9:]+\]?)" # ...or ipv6
r"(?::\d+)?" # optional port
r"(?:/?|[/?]\S+)$",
re.IGNORECASE,
)
country_regex = re.compile(r"(^[A-Z]{2}$)")
email_regex = re.compile(r"(^[-\w_.+]+@[-\w]+\.[-\w.]+$)")
twitter_regex = re.compile(r"^@?([-\w_]+)$")
wikidata_regex = re.compile(
r"^Q[0-9]+$",
)
opening_hours_regex = re.compile(
r"^(?:(?:Mo|Tu|We|Th|Fr|Sa|Su)(?:-(?:Mo|Tu|We|Th|Fr|Sa|Su))? (?:,?[0-9]{2}:[0-9]{2}-[0-9]{2}:[0-9]{2})+(?:; )?)+$"
)
min_lat = -90.0
max_lat = 90.0
min_lon = -180.0
max_lon = 180.0
def process_item(self, item, spider): # noqa: C901
check_field(item, spider, "brand_wikidata", allowed_types=(str,), match_regex=self.wikidata_regex)
check_field(item, spider, "website", (str,), self.url_regex)
check_field(item, spider, "image", (str,), self.url_regex)
check_field(item, spider, "email", (str,), self.email_regex)
check_field(item, spider, "phone", (str,))
check_field(item, spider, "street_address", (str,))
check_field(item, spider, "city", (str,))
check_field(item, spider, "state", (str,))
check_field(item, spider, "postcode", (str,))
check_field(item, spider, "country", (str,), self.country_regex)
check_field(item, spider, "name", (str,))
check_field(item, spider, "brand", (str,))
if coords := get_lat_lon(item):
lat, lon = coords
if not (self.min_lat < lat < self.max_lat):
spider.crawler.stats.inc_value("atp/field/lat/invalid")
lat = None
if not (self.min_lon < lon < self.max_lon):
spider.crawler.stats.inc_value("atp/field/lon/invalid")
lon = None
if isinstance(lat, float) and isinstance(lon, float):
if math.fabs(lat) < 3 and math.fabs(lon) < 3:
spider.crawler.stats.inc_value("atp/geometry/null_island")
lat = None
lon = None
set_lat_lon(item, lat, lon)
if not (item.get("geometry") or get_lat_lon(item)):
spider.crawler.stats.inc_value("atp/field/lat/missing")
spider.crawler.stats.inc_value("atp/field/lon/missing")
if twitter := item.get("twitter"):
if not isinstance(twitter, str):
spider.crawler.stats.inc_value("atp/field/twitter/wrong_type")
elif not (self.url_regex.match(twitter) and "twitter.com" in twitter) and not self.twitter_regex.match(
twitter
):
spider.crawler.stats.inc_value("atp/field/twitter/invalid")
else:
spider.crawler.stats.inc_value("atp/field/twitter/missing")
if opening_hours := item.get("opening_hours"):
if isinstance(opening_hours, OpeningHours):
if opening_hours.day_hours:
item["opening_hours"] = opening_hours.as_opening_hours()
else:
item["opening_hours"] = None
spider.crawler.stats.inc_value("atp/field/opening_hours/missing")
elif not isinstance(opening_hours, str):
spider.crawler.stats.inc_value("atp/field/opening_hours/wrong_type")
elif not self.opening_hours_regex.match(opening_hours) and opening_hours != "24/7":
spider.crawler.stats.inc_value("atp/field/opening_hours/invalid")
else:
spider.crawler.stats.inc_value("atp/field/opening_hours/missing")
return item
|
388b773a695ba7d66b72d07c28bbec030257946d
|
44ddd25c6aa008cc0a814f9f49b2344c6a59aedb
|
/lib/coloraide/css/__init__.py
|
441e5b8a7283eea45e2e07cd8430b50d2db77f9f
|
[
"MIT"
] |
permissive
|
facelessuser/ColorHelper
|
eb757896fa6e4a9029090188fad789587dc2ed06
|
ad4d779bff57a65b7c77cda0b79c10cf904eb817
|
refs/heads/master
| 2023-08-31T20:51:30.390633
| 2023-08-28T15:53:39
| 2023-08-28T15:53:39
| 31,641,842
| 279
| 41
|
MIT
| 2023-09-06T23:37:41
| 2015-03-04T06:27:11
|
Python
|
UTF-8
|
Python
| false
| false
| 37
|
py
|
__init__.py
|
"""CSS parsing and serialization."""
|
ff3c7fa0a3d3cca6fc1d57d26f82a93799674343
|
c6b9b9f2fbc6c62e7a86b02718954661af3c564f
|
/mmflow/core/hooks/__init__.py
|
b39b35ae43b9bc10b5ab8f6fdf47ad2286dab76e
|
[
"Apache-2.0"
] |
permissive
|
open-mmlab/mmflow
|
a90ff072805ac79cbc0b277baded1e74d25cccf0
|
9fb1d2f1bb3de641ddcba0dd355064b6ed9419f4
|
refs/heads/master
| 2023-05-22T05:19:48.986601
| 2023-01-10T16:05:18
| 2023-01-10T16:05:18
| 428,493,460
| 808
| 110
|
Apache-2.0
| 2023-09-05T13:19:38
| 2021-11-16T02:42:41
|
Python
|
UTF-8
|
Python
| false
| false
| 237
|
py
|
__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from .liteflownet_stage_loading import LiteFlowNetStageLoadHook
from .multistagelr_updater import MultiStageLrUpdaterHook
__all__ = ['MultiStageLrUpdaterHook', 'LiteFlowNetStageLoadHook']
|
32b04290d4912d4a92d644d6c37f24738333c27e
|
cd8a9ac708be8f559da1e9cadc1d5ad457226364
|
/mods/GuessTheBomb.py
|
c88b9adbb9d8622fd9885acd29537c4603a5b616
|
[
"Unlicense"
] |
permissive
|
Mrmaxmeier/BombSquad-Community-Mod-Manager
|
078d0c95f326979989b8a9338c89724a08bcf077
|
aeba3f6858335aa08243c02ea29d4d4c807d43cc
|
refs/heads/master
| 2023-01-25T05:38:15.945963
| 2023-01-22T11:20:19
| 2023-01-22T11:20:19
| 28,673,893
| 143
| 155
|
Unlicense
| 2020-04-10T10:43:37
| 2014-12-31T19:54:47
|
Python
|
UTF-8
|
Python
| false
| false
| 5,362
|
py
|
GuessTheBomb.py
|
#Made by Paolo Valerdi
import bs
import random
def bsGetAPIVersion():
return 4
def bsGetGames():
return [GuessTheBombGame]
def bsGetLevels():
return [bs.Level('Guess The Bomb',displayName='${GAME}',gameType=GuessTheBombGame,settings={},previewTexName='rampagePreview'),
bs.Level('Epic Guess The Bomb',displayName='${GAME}',gameType=GuessTheBombGame,settings={'Epic Mode':True},previewTexName='rampagePreview')]
class GuessTheBombGame(bs.TeamGameActivity):
@classmethod
def getName(cls):
return 'Guess The Bomb'
@classmethod
def getScoreInfo(cls):
return {'scoreName':'Survived',
'scoreType':'milliseconds',
'scoreVersion':'B'}
@classmethod
def getDescription(cls,sessionType):
return 'Dodge the falling bombs.'
@classmethod
def getSupportedMaps(cls,sessionType):
return ['Rampage']
@classmethod
def getSettings(cls,sessionType):
return [("Epic Mode",{'default':False})]
@classmethod
def supportsSessionType(cls,sessionType):
return True if (issubclass(sessionType,bs.TeamsSession)
or issubclass(sessionType,bs.FreeForAllSession)
or issubclass(sessionType,bs.CoopSession)) else False
def __init__(self,settings):
bs.TeamGameActivity.__init__(self,settings)
if self.settings['Epic Mode']: self._isSlowMotion = True
self.announcePlayerDeaths = True
self._lastPlayerDeathTime = None
def onTransitionIn(self):
bs.TeamGameActivity.onTransitionIn(self, music='Epic' if self.settings['Epic Mode'] else 'Survival')
def onBegin(self):
bs.TeamGameActivity.onBegin(self)
self._meteorTime = 3000
t = 7500 if len(self.players) > 2 else 4000
if self.settings['Epic Mode']: t /= 4
bs.gameTimer(t,self._decrementMeteorTime,repeat=True)
t = 3000
if self.settings['Epic Mode']: t /= 4
bs.gameTimer(t,self._setMeteorTimer)
self._timer = bs.OnScreenTimer()
self._timer.start()
def spawnPlayer(self,player):
spaz = self.spawnPlayerSpaz(player)
spaz.connectControlsToPlayer(enablePunch=False,
enableBomb=False,
enablePickUp=False)
spaz.playBigDeathSound = True
def handleMessage(self,m):
if isinstance(m,bs.PlayerSpazDeathMessage):
bs.TeamGameActivity.handleMessage(self,m)
deathTime = bs.getGameTime()
m.spaz.getPlayer().gameData['deathTime'] = deathTime
if isinstance(self.getSession(),bs.CoopSession):
bs.pushCall(self._checkEndGame)
self._lastPlayerDeathTime = deathTime
else:
bs.gameTimer(1000,self._checkEndGame)
else:
bs.TeamGameActivity.handleMessage(self,m)
def _checkEndGame(self):
livingTeamCount = 0
for team in self.teams:
for player in team.players:
if player.isAlive():
livingTeamCount += 1
break
if isinstance(self.getSession(),bs.CoopSession):
if livingTeamCount <= 0: self.endGame()
else:
if livingTeamCount <= 1: self.endGame()
def _setMeteorTimer(self):
bs.gameTimer(int((1.0+0.2*random.random())*self._meteorTime),self._dropBombCluster)
def _dropBombCluster(self):
if False:
bs.newNode('locator',attrs={'position':(8,6,-5.5)})
bs.newNode('locator',attrs={'position':(8,6,-2.3)})
bs.newNode('locator',attrs={'position':(-7.3,6,-5.5)})
bs.newNode('locator',attrs={'position':(-7.3,6,-2.3)})
delay = 0
for i in range(random.randrange(1,3)):
types = ["normal", "ice", "sticky", "impact"]
magic = random.choice(types)
bt = magic
pos = (-7.3+15.3*random.random(),11,-5.5+2.1*random.random())
vel = ((-5.0+random.random()*30.0) * (-1.0 if pos[0] > 0 else 1.0), -4.0,0)
bs.gameTimer(delay,bs.Call(self._dropBomb,pos,vel,bt))
delay += 100
self._setMeteorTimer()
def _dropBomb(self,position,velocity,bombType):
b = bs.Bomb(position=position,velocity=velocity,bombType=bombType).autoRetain()
def _decrementMeteorTime(self):
self._meteorTime = max(10,int(self._meteorTime*0.9))
def endGame(self):
curTime = bs.getGameTime()
for team in self.teams:
for player in team.players:
if 'deathTime' not in player.gameData: player.gameData['deathTime'] = curTime+1
score = (player.gameData['deathTime']-self._timer.getStartTime())/1000
if 'deathTime' not in player.gameData: score += 50
self.scoreSet.playerScored(player,score,screenMessage=False)
self._timer.stop(endTime=self._lastPlayerDeathTime)
results = bs.TeamGameResults()
for team in self.teams:
longestLife = 0
for player in team.players:
longestLife = max(longestLife,(player.gameData['deathTime'] - self._timer.getStartTime()))
results.setTeamScore(team,longestLife)
self.end(results=results)
|
d96be33b7c3a9b75537f1b33c5e7b609696ec889
|
e124b63c8542da816e03ef5177112fc7af4d91a8
|
/flatdata-generator/tests/generators/py_expectations/structs/integers.py
|
0fbf8f38d16e0aec68125362b8468284390688fd
|
[
"Apache-2.0"
] |
permissive
|
heremaps/flatdata
|
c5395977ac16af5adfe4824ed99511cae5f691ef
|
494f7731fa3cd6780d767fc5d218c783714acdee
|
refs/heads/master
| 2023-09-02T21:11:53.973570
| 2023-08-30T09:34:02
| 2023-08-30T09:34:02
| 118,035,076
| 170
| 28
|
Apache-2.0
| 2023-08-30T09:34:03
| 2018-01-18T20:34:58
|
Python
|
UTF-8
|
Python
| false
| false
| 3,207
|
py
|
integers.py
|
#############################################################################
# ATTENTION! #
# This code is automatically generated by flatdata generator. #
# Any modifications to this file will be lost upon next regeneration. #
#############################################################################
import flatdata.lib as flatdata
class n_U8(flatdata.structure.Structure):
""""""
_SCHEMA = """namespace n {
struct U8
{
f : u8 : 8;
}
}
"""
_NAME = "n_U8"
_SIZE_IN_BITS = 8
_SIZE_IN_BYTES = 1
_FIELDS = {
"f": flatdata.structure.FieldSignature(offset=0, width=8, is_signed=False, dtype="B"),
}
_FIELD_KEYS = {
"f",
}
class n_I8(flatdata.structure.Structure):
""""""
_SCHEMA = """namespace n {
struct I8
{
f : i8 : 8;
}
}
"""
_NAME = "n_I8"
_SIZE_IN_BITS = 8
_SIZE_IN_BYTES = 1
_FIELDS = {
"f": flatdata.structure.FieldSignature(offset=0, width=8, is_signed=True, dtype="b"),
}
_FIELD_KEYS = {
"f",
}
class n_U16(flatdata.structure.Structure):
""""""
_SCHEMA = """namespace n {
struct U16
{
f : u16 : 16;
}
}
"""
_NAME = "n_U16"
_SIZE_IN_BITS = 16
_SIZE_IN_BYTES = 2
_FIELDS = {
"f": flatdata.structure.FieldSignature(offset=0, width=16, is_signed=False, dtype="u2"),
}
_FIELD_KEYS = {
"f",
}
class n_I16(flatdata.structure.Structure):
""""""
_SCHEMA = """namespace n {
struct I16
{
f : i16 : 16;
}
}
"""
_NAME = "n_I16"
_SIZE_IN_BITS = 16
_SIZE_IN_BYTES = 2
_FIELDS = {
"f": flatdata.structure.FieldSignature(offset=0, width=16, is_signed=True, dtype="i2"),
}
_FIELD_KEYS = {
"f",
}
class n_U32(flatdata.structure.Structure):
""""""
_SCHEMA = """namespace n {
struct U32
{
f : u32 : 32;
}
}
"""
_NAME = "n_U32"
_SIZE_IN_BITS = 32
_SIZE_IN_BYTES = 4
_FIELDS = {
"f": flatdata.structure.FieldSignature(offset=0, width=32, is_signed=False, dtype="u4"),
}
_FIELD_KEYS = {
"f",
}
class n_I32(flatdata.structure.Structure):
""""""
_SCHEMA = """namespace n {
struct I32
{
f : i32 : 32;
}
}
"""
_NAME = "n_I32"
_SIZE_IN_BITS = 32
_SIZE_IN_BYTES = 4
_FIELDS = {
"f": flatdata.structure.FieldSignature(offset=0, width=32, is_signed=True, dtype="i4"),
}
_FIELD_KEYS = {
"f",
}
class n_U64(flatdata.structure.Structure):
""""""
_SCHEMA = """namespace n {
struct U64
{
f : u64 : 64;
}
}
"""
_NAME = "n_U64"
_SIZE_IN_BITS = 64
_SIZE_IN_BYTES = 8
_FIELDS = {
"f": flatdata.structure.FieldSignature(offset=0, width=64, is_signed=False, dtype="u8"),
}
_FIELD_KEYS = {
"f",
}
class n_I64(flatdata.structure.Structure):
""""""
_SCHEMA = """namespace n {
struct I64
{
f : i64 : 64;
}
}
"""
_NAME = "n_I64"
_SIZE_IN_BITS = 64
_SIZE_IN_BYTES = 8
_FIELDS = {
"f": flatdata.structure.FieldSignature(offset=0, width=64, is_signed=True, dtype="i8"),
}
_FIELD_KEYS = {
"f",
}
|
3a04d94198483ae30bfbf9cb4b0e05cfd7ad446f
|
5ef6c8d47864f471e26b9902d61f8c687e941f05
|
/src/genie/libs/parser/iosxe/tests/ShowVersion/cli/equal/golden_output_c4507_expected.py
|
5da400037fa3f207555569df93190b0f5d333266
|
[
"Apache-2.0"
] |
permissive
|
CiscoTestAutomation/genieparser
|
169c196558f1c1a0f0d10650876096f993224917
|
b531eff760b2e44cd69d7a2716db6f866907c239
|
refs/heads/master
| 2023-09-03T08:56:18.831340
| 2023-08-29T22:32:02
| 2023-08-29T22:32:02
| 131,621,824
| 247
| 409
|
Apache-2.0
| 2023-08-29T22:32:04
| 2018-04-30T16:51:50
|
Python
|
UTF-8
|
Python
| false
| false
| 1,538
|
py
|
golden_output_c4507_expected.py
|
expected_output = {
"version": {
"chassis": "WS-C4507R+E",
"chassis_sn": "FXS1729E2TD",
"compiled_by": "prod_rel_team",
"compiled_date": "Tue 23-Oct-12 23:51",
"copyright_years": "1986-2012",
"curr_config_register": "0x2101",
"hostname": "switchname",
"image_id": "cat4500e-UNIVERSALK9-M",
"image_type": "production image",
"jawa_revision": "7",
"last_reload_reason": "Reload",
"license_level": "entservices",
"license_type": "Permanent",
"main_mem": "2097152",
"mem_size": {"non-volatile configuration": "511"},
"next_config_register": "0x2102",
"next_reload_license_level": "entservices",
"number_of_intfs": {
"Gigabit Ethernet": "240",
"Ten Gigabit Ethernet": "4",
"Virtual Ethernet": "9",
},
"os": "IOS-XE",
"platform": "Catalyst 4500 L3 Switch",
"processor": {"cpu_type": "MPC8572", "speed": "1.5GHz", "supervisor": "7"},
"processor_type": "MPC8572",
"returned_to_rom_by": "reload",
"rom": "15.0(1r)SG5",
"rtr_type": "WS-C4507R+E",
"running_default_software": True,
"snowtrooper_revision": "0x0.0x1C",
"system_restarted_at": "09:57:20 GMT Tue Oct 15 2013",
"uptime": "6 years, 2 weeks, 13 hours, 31 minutes",
"uptime_this_cp": "6 years, 2 weeks, 13 hours, 33 minutes",
"version": "03.03.02.SG",
"version_short": "03.03",
}
}
|
1b9f5ce4c00ca79a69c0a00d0774e0b849fd808e
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/batchai/azure-mgmt-batchai/azure/mgmt/batchai/operations/_jobs_operations.py
|
118e1bc26dc4b65ad75544f70915a8da898c74fa
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012
| 2023-09-06T01:08:06
| 2023-09-06T01:08:06
| 4,127,088
| 4,046
| 2,755
|
MIT
| 2023-09-14T21:48:49
| 2012-04-24T16:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 44,822
|
py
|
_jobs_operations.py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class JobsOperations(object):
"""JobsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~batch_ai.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_experiment(
self,
resource_group_name, # type: str
workspace_name, # type: str
experiment_name, # type: str
jobs_list_by_experiment_options=None, # type: Optional["_models.JobsListByExperimentOptions"]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.JobListResult"]
"""Gets a list of Jobs within the specified Experiment.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param workspace_name: The name of the workspace. Workspace names can only contain a
combination of alphanumeric characters along with dash (-) and underscore (_). The name must be
from 1 through 64 characters long.
:type workspace_name: str
:param experiment_name: The name of the experiment. Experiment names can only contain a
combination of alphanumeric characters along with dash (-) and underscore (_). The name must be
from 1 through 64 characters long.
:type experiment_name: str
:param jobs_list_by_experiment_options: Parameter group.
:type jobs_list_by_experiment_options: ~batch_ai.models.JobsListByExperimentOptions
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either JobListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~batch_ai.models.JobListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.JobListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
_max_results = None
if jobs_list_by_experiment_options is not None:
_max_results = jobs_list_by_experiment_options.max_results
api_version = "2018-05-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_experiment.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', pattern=r'^[-\w\._]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str', max_length=64, min_length=1, pattern=r'^[-\w_]+$'),
'experimentName': self._serialize.url("experiment_name", experiment_name, 'str', max_length=64, min_length=1, pattern=r'^[-\w_]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if _max_results is not None:
query_parameters['maxresults'] = self._serialize.query("max_results", _max_results, 'int', maximum=1000, minimum=1)
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('JobListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_experiment.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.BatchAI/workspaces/{workspaceName}/experiments/{experimentName}/jobs'} # type: ignore
def _create_initial(
self,
resource_group_name, # type: str
workspace_name, # type: str
experiment_name, # type: str
job_name, # type: str
parameters, # type: "_models.JobCreateParameters"
**kwargs # type: Any
):
# type: (...) -> Optional["_models.Job"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.Job"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', pattern=r'^[-\w\._]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str', max_length=64, min_length=1, pattern=r'^[-\w_]+$'),
'experimentName': self._serialize.url("experiment_name", experiment_name, 'str', max_length=64, min_length=1, pattern=r'^[-\w_]+$'),
'jobName': self._serialize.url("job_name", job_name, 'str', max_length=64, min_length=1, pattern=r'^[-\w_]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'JobCreateParameters')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Job', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.BatchAI/workspaces/{workspaceName}/experiments/{experimentName}/jobs/{jobName}'} # type: ignore
def begin_create(
self,
resource_group_name, # type: str
workspace_name, # type: str
experiment_name, # type: str
job_name, # type: str
parameters, # type: "_models.JobCreateParameters"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.Job"]
"""Creates a Job in the given Experiment.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param workspace_name: The name of the workspace. Workspace names can only contain a
combination of alphanumeric characters along with dash (-) and underscore (_). The name must be
from 1 through 64 characters long.
:type workspace_name: str
:param experiment_name: The name of the experiment. Experiment names can only contain a
combination of alphanumeric characters along with dash (-) and underscore (_). The name must be
from 1 through 64 characters long.
:type experiment_name: str
:param job_name: The name of the job within the specified resource group. Job names can only
contain a combination of alphanumeric characters along with dash (-) and underscore (_). The
name must be from 1 through 64 characters long.
:type job_name: str
:param parameters: The parameters to provide for job creation.
:type parameters: ~batch_ai.models.JobCreateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either Job or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~batch_ai.models.Job]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Job"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_initial(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
experiment_name=experiment_name,
job_name=job_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Job', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', pattern=r'^[-\w\._]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str', max_length=64, min_length=1, pattern=r'^[-\w_]+$'),
'experimentName': self._serialize.url("experiment_name", experiment_name, 'str', max_length=64, min_length=1, pattern=r'^[-\w_]+$'),
'jobName': self._serialize.url("job_name", job_name, 'str', max_length=64, min_length=1, pattern=r'^[-\w_]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.BatchAI/workspaces/{workspaceName}/experiments/{experimentName}/jobs/{jobName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
workspace_name, # type: str
experiment_name, # type: str
job_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-05-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', pattern=r'^[-\w\._]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str', max_length=64, min_length=1, pattern=r'^[-\w_]+$'),
'experimentName': self._serialize.url("experiment_name", experiment_name, 'str', max_length=64, min_length=1, pattern=r'^[-\w_]+$'),
'jobName': self._serialize.url("job_name", job_name, 'str', max_length=64, min_length=1, pattern=r'^[-\w_]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.BatchAI/workspaces/{workspaceName}/experiments/{experimentName}/jobs/{jobName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
workspace_name, # type: str
experiment_name, # type: str
job_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes a Job.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param workspace_name: The name of the workspace. Workspace names can only contain a
combination of alphanumeric characters along with dash (-) and underscore (_). The name must be
from 1 through 64 characters long.
:type workspace_name: str
:param experiment_name: The name of the experiment. Experiment names can only contain a
combination of alphanumeric characters along with dash (-) and underscore (_). The name must be
from 1 through 64 characters long.
:type experiment_name: str
:param job_name: The name of the job within the specified resource group. Job names can only
contain a combination of alphanumeric characters along with dash (-) and underscore (_). The
name must be from 1 through 64 characters long.
:type job_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
experiment_name=experiment_name,
job_name=job_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', pattern=r'^[-\w\._]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str', max_length=64, min_length=1, pattern=r'^[-\w_]+$'),
'experimentName': self._serialize.url("experiment_name", experiment_name, 'str', max_length=64, min_length=1, pattern=r'^[-\w_]+$'),
'jobName': self._serialize.url("job_name", job_name, 'str', max_length=64, min_length=1, pattern=r'^[-\w_]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.BatchAI/workspaces/{workspaceName}/experiments/{experimentName}/jobs/{jobName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
workspace_name, # type: str
experiment_name, # type: str
job_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.Job"
"""Gets information about a Job.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param workspace_name: The name of the workspace. Workspace names can only contain a
combination of alphanumeric characters along with dash (-) and underscore (_). The name must be
from 1 through 64 characters long.
:type workspace_name: str
:param experiment_name: The name of the experiment. Experiment names can only contain a
combination of alphanumeric characters along with dash (-) and underscore (_). The name must be
from 1 through 64 characters long.
:type experiment_name: str
:param job_name: The name of the job within the specified resource group. Job names can only
contain a combination of alphanumeric characters along with dash (-) and underscore (_). The
name must be from 1 through 64 characters long.
:type job_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Job, or the result of cls(response)
:rtype: ~batch_ai.models.Job
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Job"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-05-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', pattern=r'^[-\w\._]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str', max_length=64, min_length=1, pattern=r'^[-\w_]+$'),
'experimentName': self._serialize.url("experiment_name", experiment_name, 'str', max_length=64, min_length=1, pattern=r'^[-\w_]+$'),
'jobName': self._serialize.url("job_name", job_name, 'str', max_length=64, min_length=1, pattern=r'^[-\w_]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Job', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.BatchAI/workspaces/{workspaceName}/experiments/{experimentName}/jobs/{jobName}'} # type: ignore
def list_output_files(
self,
resource_group_name, # type: str
workspace_name, # type: str
experiment_name, # type: str
job_name, # type: str
jobs_list_output_files_options, # type: "_models.JobsListOutputFilesOptions"
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.FileListResult"]
"""List all directories and files inside the given directory of the Job's output directory (if the
output directory is on Azure File Share or Azure Storage Container).
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param workspace_name: The name of the workspace. Workspace names can only contain a
combination of alphanumeric characters along with dash (-) and underscore (_). The name must be
from 1 through 64 characters long.
:type workspace_name: str
:param experiment_name: The name of the experiment. Experiment names can only contain a
combination of alphanumeric characters along with dash (-) and underscore (_). The name must be
from 1 through 64 characters long.
:type experiment_name: str
:param job_name: The name of the job within the specified resource group. Job names can only
contain a combination of alphanumeric characters along with dash (-) and underscore (_). The
name must be from 1 through 64 characters long.
:type job_name: str
:param jobs_list_output_files_options: Parameter group.
:type jobs_list_output_files_options: ~batch_ai.models.JobsListOutputFilesOptions
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either FileListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~batch_ai.models.FileListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FileListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
_outputdirectoryid = None
_directory = None
_linkexpiryinminutes = None
_max_results = None
if jobs_list_output_files_options is not None:
_outputdirectoryid = jobs_list_output_files_options.outputdirectoryid
_directory = jobs_list_output_files_options.directory
_linkexpiryinminutes = jobs_list_output_files_options.linkexpiryinminutes
_max_results = jobs_list_output_files_options.max_results
api_version = "2018-05-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_output_files.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', pattern=r'^[-\w\._]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str', max_length=64, min_length=1, pattern=r'^[-\w_]+$'),
'experimentName': self._serialize.url("experiment_name", experiment_name, 'str', max_length=64, min_length=1, pattern=r'^[-\w_]+$'),
'jobName': self._serialize.url("job_name", job_name, 'str', max_length=64, min_length=1, pattern=r'^[-\w_]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['outputdirectoryid'] = self._serialize.query("outputdirectoryid", _outputdirectoryid, 'str')
if _directory is not None:
query_parameters['directory'] = self._serialize.query("directory", _directory, 'str')
if _linkexpiryinminutes is not None:
query_parameters['linkexpiryinminutes'] = self._serialize.query("linkexpiryinminutes", _linkexpiryinminutes, 'int', maximum=600, minimum=5)
if _max_results is not None:
query_parameters['maxresults'] = self._serialize.query("max_results", _max_results, 'int', maximum=1000, minimum=1)
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.post(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('FileListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_output_files.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.BatchAI/workspaces/{workspaceName}/experiments/{experimentName}/jobs/{jobName}/listOutputFiles'} # type: ignore
def list_remote_login_information(
self,
resource_group_name, # type: str
workspace_name, # type: str
experiment_name, # type: str
job_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.RemoteLoginInformationListResult"]
"""Gets a list of currently existing nodes which were used for the Job execution. The returned
information contains the node ID, its public IP and SSH port.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param workspace_name: The name of the workspace. Workspace names can only contain a
combination of alphanumeric characters along with dash (-) and underscore (_). The name must be
from 1 through 64 characters long.
:type workspace_name: str
:param experiment_name: The name of the experiment. Experiment names can only contain a
combination of alphanumeric characters along with dash (-) and underscore (_). The name must be
from 1 through 64 characters long.
:type experiment_name: str
:param job_name: The name of the job within the specified resource group. Job names can only
contain a combination of alphanumeric characters along with dash (-) and underscore (_). The
name must be from 1 through 64 characters long.
:type job_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RemoteLoginInformationListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~batch_ai.models.RemoteLoginInformationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RemoteLoginInformationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-05-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_remote_login_information.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', pattern=r'^[-\w\._]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str', max_length=64, min_length=1, pattern=r'^[-\w_]+$'),
'experimentName': self._serialize.url("experiment_name", experiment_name, 'str', max_length=64, min_length=1, pattern=r'^[-\w_]+$'),
'jobName': self._serialize.url("job_name", job_name, 'str', max_length=64, min_length=1, pattern=r'^[-\w_]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.post(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('RemoteLoginInformationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_remote_login_information.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.BatchAI/workspaces/{workspaceName}/experiments/{experimentName}/jobs/{jobName}/listRemoteLoginInformation'} # type: ignore
def _terminate_initial(
self,
resource_group_name, # type: str
workspace_name, # type: str
experiment_name, # type: str
job_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-05-01"
accept = "application/json"
# Construct URL
url = self._terminate_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', pattern=r'^[-\w\._]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str', max_length=64, min_length=1, pattern=r'^[-\w_]+$'),
'experimentName': self._serialize.url("experiment_name", experiment_name, 'str', max_length=64, min_length=1, pattern=r'^[-\w_]+$'),
'jobName': self._serialize.url("job_name", job_name, 'str', max_length=64, min_length=1, pattern=r'^[-\w_]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_terminate_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.BatchAI/workspaces/{workspaceName}/experiments/{experimentName}/jobs/{jobName}/terminate'} # type: ignore
def begin_terminate(
self,
resource_group_name, # type: str
workspace_name, # type: str
experiment_name, # type: str
job_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Terminates a job.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param workspace_name: The name of the workspace. Workspace names can only contain a
combination of alphanumeric characters along with dash (-) and underscore (_). The name must be
from 1 through 64 characters long.
:type workspace_name: str
:param experiment_name: The name of the experiment. Experiment names can only contain a
combination of alphanumeric characters along with dash (-) and underscore (_). The name must be
from 1 through 64 characters long.
:type experiment_name: str
:param job_name: The name of the job within the specified resource group. Job names can only
contain a combination of alphanumeric characters along with dash (-) and underscore (_). The
name must be from 1 through 64 characters long.
:type job_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._terminate_initial(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
experiment_name=experiment_name,
job_name=job_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', pattern=r'^[-\w\._]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str', max_length=64, min_length=1, pattern=r'^[-\w_]+$'),
'experimentName': self._serialize.url("experiment_name", experiment_name, 'str', max_length=64, min_length=1, pattern=r'^[-\w_]+$'),
'jobName': self._serialize.url("job_name", job_name, 'str', max_length=64, min_length=1, pattern=r'^[-\w_]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_terminate.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.BatchAI/workspaces/{workspaceName}/experiments/{experimentName}/jobs/{jobName}/terminate'} # type: ignore
|
ef82fad8e2aa3e9ec4e617658128287333e3bc6e
|
e145f05e919cad040c23587eadd11bed1e6bd5e8
|
/test/functional/feature_on_chain_government_voting_scenarios.py
|
0266ffbcc013390e30f9189a698277617b23986f
|
[
"MIT"
] |
permissive
|
DeFiCh/ain
|
f819ea586a9c17f99f8f8ea32982554b7c4fb5e2
|
6908107c4a7ee0e30dabc4ea773820637b42bbfe
|
refs/heads/master
| 2023-08-16T18:51:18.460486
| 2023-08-16T14:11:56
| 2023-08-16T14:11:56
| 228,198,960
| 435
| 146
|
MIT
| 2023-09-14T16:23:37
| 2019-12-15T14:37:57
|
C++
|
UTF-8
|
Python
| false
| false
| 15,484
|
py
|
feature_on_chain_government_voting_scenarios.py
|
#!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Copyright (c) DeFi Blockchain Developers
# Distributed under the MIT software license, see the accompanying
# file LICENSE or http://www.opensource.org/licenses/mit-license.php.
"""Test OCG voting scenarios"""
from test_framework.test_framework import DefiTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error
APPROVAL_THRESHOLD = 50
QUORUM = 50
VOTING_PERIOD = 10
NEXT_NETWORK_UPGRADE_HEIGHT = 200
class OCGVotingScenarionTest(DefiTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.extra_args = [
[
"-jellyfish_regtest=1",
"-dummypos=0",
"-txnotokens=0",
"-amkheight=50",
"-bayfrontheight=51",
"-eunosheight=80",
"-fortcanningheight=82",
"-fortcanninghillheight=84",
"-fortcanningroadheight=86",
"-fortcanningcrunchheight=88",
"-fortcanningspringheight=90",
"-fortcanninggreatworldheight=94",
"-grandcentralheight=101",
f"-nextnetworkupgradeheight={NEXT_NETWORK_UPGRADE_HEIGHT}",
"-rpc-governance-accept-neutral=1",
"-simulatemainnet=1",
],
]
def setup_masternodes(self, nMasternodes=19):
self.nodes[0].mns = []
self.operatorAddresses = []
for _ in range(nMasternodes):
address = self.nodes[0].getnewaddress("", "legacy")
self.nodes[0].mns.append(self.nodes[0].createmasternode(address))
self.operatorAddresses.append(address)
self.nodes[0].generate(1)
self.nodes[0].generate(20) # Enables all MNs
self.sync_blocks(timeout=120)
# restart node with masternode_operator addresses to be able to mint with every MNs
self.restart_node(
0,
self.nodes[0].extra_args
+ [
"-masternode_operator={}".format(address)
for address in self.operatorAddresses
],
)
# Mint with every MNs to meet voting eligibility criteria
for address in self.operatorAddresses:
self.nodes[0].generatetoaddress(1, address)
def setup(self):
# Generate chain
self.nodes[0].generate(100)
self.sync_blocks(timeout=120)
self.setup_masternodes()
# activate on-chain governance
self.nodes[0].setgov(
{
"ATTRIBUTES": {
"v0/params/feature/gov": "true",
"v0/gov/proposals/voting_period": "{}".format(VOTING_PERIOD),
}
}
)
self.nodes[0].generate(1)
def test_vote_on_cfp(self, yesVote, noVote, neutralVote, expectedStatus):
height = self.nodes[0].getblockcount()
# Create address for CFP
address = self.nodes[0].getnewaddress()
context = "<Git issue url>"
title = "Create test community fund proposal"
amount = 100
# Create CFP
propId = self.nodes[0].creategovcfp(
{
"title": title,
"context": context,
"amount": amount,
"cycles": 1,
"payoutAddress": address,
}
)
self.nodes[0].generate(1)
mnIterator = iter(self.nodes[0].mns)
for _ in range(yesVote):
mnId = next(mnIterator)
self.nodes[0].votegov(propId, mnId, "yes")
for _ in range(noVote):
mnId = next(mnIterator)
self.nodes[0].votegov(propId, mnId, "no")
for _ in range(neutralVote):
mnId = next(mnIterator)
self.nodes[0].votegov(propId, mnId, "neutral")
self.nodes[0].generate(1)
self.nodes[0].generate(VOTING_PERIOD * 2)
proposal = self.nodes[0].getgovproposal(propId)
assert_equal(proposal["status"], expectedStatus)
self.rollback_to(height)
def test_vote_on_cfp_with_address(
self, yesVote, noVote, neutralVote, expectedStatus
):
height = self.nodes[0].getblockcount()
# Create address for CFP
address = self.nodes[0].getnewaddress()
context = "<Git issue url>"
title = "Create test community fund proposal"
amount = 100
# Create CFP
propId = self.nodes[0].creategovcfp(
{
"title": title,
"context": context,
"amount": amount,
"cycles": 1,
"payoutAddress": address,
}
)
self.nodes[0].generate(1)
addressIterator = iter(self.operatorAddresses)
for _ in range(yesVote):
mnId = next(addressIterator)
self.nodes[0].votegov(propId, mnId, "yes")
for _ in range(noVote):
mnId = next(addressIterator)
self.nodes[0].votegov(propId, mnId, "no")
for _ in range(neutralVote):
mnId = next(addressIterator)
self.nodes[0].votegov(propId, mnId, "neutral")
self.nodes[0].generate(1)
self.nodes[0].generate(VOTING_PERIOD * 2)
proposal = self.nodes[0].getgovproposal(propId)
assert_equal(proposal["status"], expectedStatus)
self.rollback_to(height)
def test_vote_with_address_without_masternode(self):
# Create address for CFP
address = self.nodes[0].getnewaddress()
context = "<Git issue url>"
title = "Create test community fund proposal"
amount = 100
# Create CFP
propId = self.nodes[0].creategovcfp(
{
"title": title,
"context": context,
"amount": amount,
"cycles": 1,
"payoutAddress": address,
}
)
self.nodes[0].generate(1)
address = self.nodes[0].getnewaddress("", "legacy")
assert_raises_rpc_error(
-8,
"The masternode does not exist or the address doesn't own a masternode: {}".format(
address
),
self.nodes[0].votegov,
propId,
address,
"yes",
)
def test_vote_with_invalid_address(self):
# Create address for CFP
address = self.nodes[0].getnewaddress()
context = "<Git issue url>"
title = "Create test community fund proposal"
amount = 100
# Create CFP
propId = self.nodes[0].creategovcfp(
{
"title": title,
"context": context,
"amount": amount,
"cycles": 1,
"payoutAddress": address,
}
)
self.nodes[0].generate(1)
address = "fake_address"
assert_raises_rpc_error(
-8,
"The masternode id or address is not valid: {}".format(address),
self.nodes[0].votegov,
propId,
address,
"yes",
)
def test_scenario_below_approval_threshold(self, expectedStatus):
self.test_vote_on_cfp(
yesVote=4, noVote=6, neutralVote=2, expectedStatus=expectedStatus
)
self.test_vote_on_cfp_with_address(
yesVote=4, noVote=6, neutralVote=2, expectedStatus=expectedStatus
)
def test_scenario_at_approval_threshold(self, expectedStatus):
self.test_vote_on_cfp(
yesVote=8, noVote=8, neutralVote=0, expectedStatus=expectedStatus
)
self.test_vote_on_cfp_with_address(
yesVote=8, noVote=8, neutralVote=0, expectedStatus=expectedStatus
)
def test_scenario_above_approval_threshold(self, expectedStatus):
self.test_vote_on_cfp(
yesVote=10, noVote=6, neutralVote=2, expectedStatus=expectedStatus
)
self.test_vote_on_cfp_with_address(
yesVote=10, noVote=6, neutralVote=2, expectedStatus=expectedStatus
)
def test_scenario_below_quorum(self, expectedStatus):
self.test_vote_on_cfp(
yesVote=6, noVote=2, neutralVote=1, expectedStatus=expectedStatus
)
self.test_vote_on_cfp_with_address(
yesVote=6, noVote=2, neutralVote=1, expectedStatus=expectedStatus
)
def test_scenario_at_quorum(self, expectedStatus):
self.test_vote_on_cfp(
yesVote=6, noVote=2, neutralVote=2, expectedStatus=expectedStatus
)
self.test_vote_on_cfp_with_address(
yesVote=6, noVote=2, neutralVote=2, expectedStatus=expectedStatus
)
def test_scenario_above_quorum(self, expectedStatus):
self.test_vote_on_cfp(
yesVote=6, noVote=3, neutralVote=2, expectedStatus=expectedStatus
)
self.test_vote_on_cfp_with_address(
yesVote=6, noVote=3, neutralVote=2, expectedStatus=expectedStatus
)
def test_scenario_high_neutral_vote(self, expectedStatus):
self.test_vote_on_cfp(
yesVote=8, noVote=3, neutralVote=5, expectedStatus=expectedStatus
)
self.test_vote_on_cfp_with_address(
yesVote=8, noVote=3, neutralVote=5, expectedStatus=expectedStatus
)
def test_scenario_only_yes_and_neutral(self, expectedStatus):
self.test_vote_on_cfp(
yesVote=8, noVote=0, neutralVote=8, expectedStatus=expectedStatus
)
self.test_vote_on_cfp_with_address(
yesVote=8, noVote=0, neutralVote=8, expectedStatus=expectedStatus
)
def test_scenario_66_6_percent_approval_full_yes_votes(self, expectedStatus):
self.test_vote_on_cfp(
yesVote=len(self.nodes[0].mns),
noVote=0,
neutralVote=0,
expectedStatus=expectedStatus,
)
self.test_vote_on_cfp_with_address(
yesVote=len(self.nodes[0].mns),
noVote=0,
neutralVote=0,
expectedStatus=expectedStatus,
)
def test_scenario_66_6_percent_approval_full_no_votes(self, expectedStatus):
self.test_vote_on_cfp(
yesVote=0,
noVote=len(self.nodes[0].mns),
neutralVote=0,
expectedStatus=expectedStatus,
)
self.test_vote_on_cfp_with_address(
yesVote=0,
noVote=len(self.nodes[0].mns),
neutralVote=0,
expectedStatus=expectedStatus,
)
def test_scenario_66_6_percent_approval_full_neutral_votes(self, expectedStatus):
self.test_vote_on_cfp(
yesVote=0,
noVote=0,
neutralVote=len(self.nodes[0].mns),
expectedStatus=expectedStatus,
)
self.test_vote_on_cfp_with_address(
yesVote=0,
noVote=0,
neutralVote=len(self.nodes[0].mns),
expectedStatus=expectedStatus,
)
def scenarios_test(self):
self.nodes[0].setgov(
{
"ATTRIBUTES": {
"v0/gov/proposals/cfp_approval_threshold": "{}%".format(
APPROVAL_THRESHOLD
),
}
}
)
self.nodes[0].generate(1)
self.test_scenario_below_approval_threshold(expectedStatus="Rejected")
self.test_scenario_at_approval_threshold(expectedStatus="Rejected")
self.test_scenario_above_approval_threshold(expectedStatus="Completed")
self.nodes[0].setgov(
{
"ATTRIBUTES": {
"v0/gov/proposals/quorum": "{}%".format(QUORUM),
}
}
)
self.nodes[0].generate(1)
self.test_scenario_below_quorum(expectedStatus="Rejected")
self.test_scenario_at_quorum(expectedStatus="Rejected")
self.test_scenario_above_quorum(expectedStatus="Completed")
# Currently marked as Rejected as neutral votes are incorrectly counted as no
# Should assert that it's Completed once https://github.com/DeFiCh/ain/issues/1704 is fixed
self.test_scenario_high_neutral_vote(expectedStatus="Rejected")
# Currently marked as Rejected as neutral votes are incorrectly counted as no
# Should assert that it's Completed once https://github.com/DeFiCh/ain/issues/1704 is fixed
self.test_scenario_only_yes_and_neutral(expectedStatus="Rejected")
self.nodes[0].setgov(
{
"ATTRIBUTES": {
"v0/gov/proposals/cfp_approval_threshold": "{}%".format(66.6),
}
}
)
self.nodes[0].generate(1)
self.test_scenario_66_6_percent_approval_full_yes_votes(
expectedStatus="Completed"
)
self.test_scenario_66_6_percent_approval_full_no_votes(
expectedStatus="Rejected"
)
self.test_scenario_66_6_percent_approval_full_neutral_votes(
expectedStatus="Rejected"
)
def scenarios_neutral_votes_not_counted_test(self):
self.nodes[0].generate(
NEXT_NETWORK_UPGRADE_HEIGHT - self.nodes[0].getblockcount()
)
self.nodes[0].setgov(
{
"ATTRIBUTES": {
"v0/gov/proposals/cfp_approval_threshold": "{}%".format(
APPROVAL_THRESHOLD
),
}
}
)
self.nodes[0].generate(1)
self.test_scenario_below_approval_threshold(expectedStatus="Rejected")
self.test_scenario_at_approval_threshold(expectedStatus="Rejected")
self.test_scenario_above_approval_threshold(expectedStatus="Completed")
self.nodes[0].setgov(
{
"ATTRIBUTES": {
"v0/gov/proposals/quorum": "{}%".format(QUORUM),
}
}
)
self.nodes[0].generate(1)
self.test_scenario_below_quorum(expectedStatus="Rejected")
self.test_scenario_at_quorum(expectedStatus="Rejected")
self.test_scenario_above_quorum(expectedStatus="Completed")
# Now it should be Completed after neutral votes fix
self.test_scenario_high_neutral_vote(expectedStatus="Completed")
# Now it should be Completed after neutral votes fix
self.test_scenario_only_yes_and_neutral(expectedStatus="Completed")
self.nodes[0].setgov(
{
"ATTRIBUTES": {
"v0/gov/proposals/cfp_approval_threshold": "{}%".format(66.6),
}
}
)
self.nodes[0].generate(1)
self.test_scenario_66_6_percent_approval_full_yes_votes(
expectedStatus="Completed"
)
self.test_scenario_66_6_percent_approval_full_no_votes(
expectedStatus="Rejected"
)
self.test_scenario_66_6_percent_approval_full_neutral_votes(
expectedStatus="Rejected"
)
def run_test(self):
self.setup()
self.scenarios_test()
self.test_vote_with_address_without_masternode()
self.test_vote_with_invalid_address()
self.scenarios_neutral_votes_not_counted_test()
if __name__ == "__main__":
OCGVotingScenarionTest().main()
|
645c41b6ceb696495e07eb1bde25a2c123a87335
|
3225f903ab65e9e242f250a02b3b74fc7e34b098
|
/targets/pipistrello/crg.py
|
daffd3eb9d17b75d978e2f1bee32591c19a1b0b6
|
[
"MIT",
"BSD-2-Clause"
] |
permissive
|
timvideos/litex-buildenv
|
6f7657a0d2836f66bbc8ad29f89f1f975eda7832
|
fafb0f1c645a0d36ff4947686b4d2d8e3433f686
|
refs/heads/master
| 2022-06-04T18:02:22.567089
| 2022-05-21T15:21:25
| 2022-05-21T15:21:25
| 115,785,208
| 217
| 88
|
BSD-2-Clause
| 2022-05-21T15:21:26
| 2017-12-30T08:50:22
|
Python
|
UTF-8
|
Python
| false
| false
| 6,880
|
py
|
crg.py
|
# Support for the Pipistrello - http://pipistrello.saanlima.com/
from fractions import Fraction
from migen import *
from migen.genlib.resetsync import AsyncResetSynchronizer
class _CRG(Module):
def __init__(self, platform, clk_freq):
# Clock domains for the system (soft CPU and related components run at).
self.clock_domains.cd_sys = ClockDomain()
# Clock domains for the DDR interface.
self.clock_domains.cd_sdram_half = ClockDomain()
self.clock_domains.cd_sdram_full_wr = ClockDomain()
self.clock_domains.cd_sdram_full_rd = ClockDomain()
# Clock domain for peripherals (such as HDMI output).
self.clock_domains.cd_base50 = ClockDomain()
self.reset = Signal()
# Input 50MHz clock
f0 = 50*1000000
clk50 = platform.request("clk50")
clk50a = Signal()
# Input 50MHz clock (buffered)
self.specials += Instance("IBUFG", i_I=clk50, o_O=clk50a)
clk50b = Signal()
self.specials += Instance(
"BUFIO2", p_DIVIDE=1,
p_DIVIDE_BYPASS="TRUE", p_I_INVERT="FALSE",
i_I=clk50a, o_DIVCLK=clk50b)
p = 12
f = Fraction(clk_freq*p, f0)
n, d = f.numerator, f.denominator
assert 19e6 <= f0/d <= 500e6 # pfd
assert 400e6 <= f0*n/d <= 1080e6 # vco
# Unbuffered output signals from the PLL. They need to be buffered
# before feeding into the fabric.
unbuf_sdram_full = Signal()
unbuf_sdram_half_a = Signal()
unbuf_sdram_half_b = Signal()
unbuf_unused = Signal()
unbuf_sys = Signal()
unbuf_periph = Signal()
# PLL signals
pll_lckd = Signal()
pll_fb = Signal()
self.specials.pll = Instance(
"PLL_ADV",
name="crg_pll_adv",
p_SIM_DEVICE="SPARTAN6", p_BANDWIDTH="OPTIMIZED", p_COMPENSATION="INTERNAL",
p_REF_JITTER=.01,
i_DADDR=0, i_DCLK=0, i_DEN=0, i_DI=0, i_DWE=0, i_RST=0, i_REL=0,
p_DIVCLK_DIVIDE=d,
# Input Clocks (50MHz)
i_CLKIN1=clk50b,
p_CLKIN1_PERIOD=1e9/f0,
i_CLKIN2=0,
p_CLKIN2_PERIOD=0.,
i_CLKINSEL=1,
# Feedback
i_CLKFBIN=pll_fb, o_CLKFBOUT=pll_fb, o_LOCKED=pll_lckd,
p_CLK_FEEDBACK="CLKFBOUT",
p_CLKFBOUT_MULT=n, p_CLKFBOUT_PHASE=0.,
# (333MHz) sdram wr rd
o_CLKOUT0=unbuf_sdram_full, p_CLKOUT0_DUTY_CYCLE=.5,
p_CLKOUT0_PHASE=0., p_CLKOUT0_DIVIDE=p//4,
# unused?
o_CLKOUT1=unbuf_unused, p_CLKOUT1_DUTY_CYCLE=.5,
p_CLKOUT1_PHASE=0., p_CLKOUT1_DIVIDE=15,
# (166MHz) sdram_half - sdram dqs adr ctrl
o_CLKOUT2=unbuf_sdram_half_a, p_CLKOUT2_DUTY_CYCLE=.5,
p_CLKOUT2_PHASE=270., p_CLKOUT2_DIVIDE=p//2,
# (166MHz) off-chip ddr
o_CLKOUT3=unbuf_sdram_half_b, p_CLKOUT3_DUTY_CYCLE=.5,
p_CLKOUT3_PHASE=250., p_CLKOUT3_DIVIDE=p//2,
# ( 50MHz) periph
o_CLKOUT4=unbuf_periph, p_CLKOUT4_DUTY_CYCLE=.5,
p_CLKOUT4_PHASE=0., p_CLKOUT4_DIVIDE=20,
# ( 83MHz) sysclk
o_CLKOUT5=unbuf_sys, p_CLKOUT5_DUTY_CYCLE=.5,
p_CLKOUT5_PHASE=0., p_CLKOUT5_DIVIDE=p//1,
)
# power on reset?
reset = platform.request("user_btn") | self.reset
self.clock_domains.cd_por = ClockDomain()
por = Signal(max=1 << 11, reset=(1 << 11) - 1)
self.sync.por += If(por != 0, por.eq(por - 1))
self.specials += AsyncResetSynchronizer(self.cd_por, reset)
# System clock - 75MHz
self.specials += Instance("BUFG", i_I=unbuf_sys, o_O=self.cd_sys.clk)
self.comb += self.cd_por.clk.eq(self.cd_sys.clk)
self.specials += AsyncResetSynchronizer(self.cd_sys, ~pll_lckd | (por > 0))
# SDRAM clocks
# ------------------------------------------------------------------------------
self.clk4x_wr_strb = Signal()
self.clk4x_rd_strb = Signal()
# sdram_full
self.specials += Instance("BUFPLL", name="sdram_full_bufpll",
p_DIVIDE=4,
i_PLLIN=unbuf_sdram_full, i_GCLK=self.cd_sys.clk,
i_LOCKED=pll_lckd,
o_IOCLK=self.cd_sdram_full_wr.clk,
o_SERDESSTROBE=self.clk4x_wr_strb)
self.comb += [
self.cd_sdram_full_rd.clk.eq(self.cd_sdram_full_wr.clk),
self.clk4x_rd_strb.eq(self.clk4x_wr_strb),
]
# sdram_half
self.specials += Instance("BUFG", name="sdram_half_a_bufpll", i_I=unbuf_sdram_half_a, o_O=self.cd_sdram_half.clk)
clk_sdram_half_shifted = Signal()
self.specials += Instance("BUFG", name="sdram_half_b_bufpll", i_I=unbuf_sdram_half_b, o_O=clk_sdram_half_shifted)
clk = platform.request("ddram_clock")
self.specials += Instance("ODDR2", p_DDR_ALIGNMENT="NONE",
p_INIT=0, p_SRTYPE="SYNC",
i_D0=1, i_D1=0, i_S=0, i_R=0, i_CE=1,
i_C0=clk_sdram_half_shifted,
i_C1=~clk_sdram_half_shifted,
o_Q=clk.p)
self.specials += Instance("ODDR2", p_DDR_ALIGNMENT="NONE",
p_INIT=0, p_SRTYPE="SYNC",
i_D0=0, i_D1=1, i_S=0, i_R=0, i_CE=1,
i_C0=clk_sdram_half_shifted, i_C1=~clk_sdram_half_shifted,
o_Q=clk.n)
# Peripheral clock - 50MHz
# ------------------------------------------------------------------------------
# The peripheral clock is kept separate from the system clock to allow
# the system clock to be increased in the future.
dcm_base50_locked = Signal()
self.specials += [
Instance("DCM_CLKGEN", name="crg_periph_dcm_clkgen",
p_CLKIN_PERIOD=20.0,
p_CLKFX_MULTIPLY=2,
p_CLKFX_DIVIDE=2,
p_CLKFX_MD_MAX=1.0, # CLKFX_MULTIPLY/CLKFX_DIVIDE
p_CLKFXDV_DIVIDE=2,
p_SPREAD_SPECTRUM="NONE",
p_STARTUP_WAIT="FALSE",
i_CLKIN=clk50a,
o_CLKFX=self.cd_base50.clk,
o_LOCKED=dcm_base50_locked,
i_FREEZEDCM=0,
i_RST=ResetSignal(),
),
AsyncResetSynchronizer(self.cd_base50,
self.cd_sys.rst | ~dcm_base50_locked)
]
platform.add_period_constraint(self.cd_base50.clk, 20)
|
6e3324b89dd389b7929a0011aca8bd925f0cc770
|
52a677b94056d3397b4a499bc9185adb68a63f05
|
/data/migrations/versions/d42c175b439a_backfill_state_id_and_make_it_unique.py
|
1c2f5c6a7204ca7918540349bdc9cb4d1d0fa24b
|
[
"Apache-2.0"
] |
permissive
|
quay/quay
|
9b6fcff54efc0dbf7c6d91fa80676950555b6f1a
|
e400a0c22c5f89dd35d571654b13d262b1f6e3b3
|
refs/heads/master
| 2023-08-28T15:08:38.001842
| 2023-08-28T13:52:31
| 2023-08-28T13:52:31
| 220,517,730
| 2,363
| 322
|
Apache-2.0
| 2023-09-14T17:43:48
| 2019-11-08T17:37:05
|
Python
|
UTF-8
|
Python
| false
| false
| 1,075
|
py
|
d42c175b439a_backfill_state_id_and_make_it_unique.py
|
"""
Backfill state_id and make it unique.
Revision ID: d42c175b439a
Revises: 3e8cc74a1e7b
Create Date: 2017-01-18 15:11:01.635632
"""
# revision identifiers, used by Alembic.
revision = "d42c175b439a"
down_revision = "3e8cc74a1e7b"
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade(op, tables, tester):
# Backfill the queueitem table's state_id field with unique values for all entries which are
# empty.
conn = op.get_bind()
conn.execute("update queueitem set state_id = id where state_id = ''")
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index("queueitem_state_id", table_name="queueitem")
op.create_index("queueitem_state_id", "queueitem", ["state_id"], unique=True)
# ### end Alembic commands ###
def downgrade(op, tables, tester):
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index("queueitem_state_id", table_name="queueitem")
op.create_index("queueitem_state_id", "queueitem", ["state_id"], unique=False)
# ### end Alembic commands ###
|
498ab35ba593fbc10c92e11427c3cea407fa16c0
|
8a8d68aed7901ef5c400d54c9ff289423780c009
|
/pyppeteer/tracing.py
|
3d9ff22f1f1c182441054aae59fa322cb90f6db7
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
pyppeteer/pyppeteer
|
fef4207aa83b7754d07232897b7d88b2d40bd8da
|
4e88a900bff77b63e62b88bed5afd5f78902db27
|
refs/heads/dev
| 2023-08-06T19:41:43.889422
| 2023-02-16T00:17:06
| 2023-02-16T00:17:06
| 237,223,760
| 3,112
| 332
|
NOASSERTION
| 2023-02-16T00:17:08
| 2020-01-30T13:54:40
|
Python
|
UTF-8
|
Python
| false
| false
| 3,319
|
py
|
tracing.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tracing module."""
from pathlib import Path
from typing import Any
from pyppeteer.connection import CDPSession
from pyppeteer.util import merge_dict
class Tracing(object):
"""Tracing class.
You can use :meth:`start` and :meth:`stop` to create a trace file which can
be opened in Chrome DevTools or
`timeline viewer <https://chromedevtools.github.io/timeline-viewer/>`_.
.. code::
await page.tracing.start({'path': 'trace.json'})
await page.goto('https://www.google.com')
await page.tracing.stop()
"""
def __init__(self, client: CDPSession) -> None:
self._client = client
self._recording = False
self._path = ''
async def start(self, options: dict = None, **kwargs: Any) -> None:
"""Start tracing.
Only one trace can be active at a time per browser.
This method accepts the following options:
* ``path`` (str): A path to write the trace file to.
* ``screenshots`` (bool): Capture screenshots in the trace.
* ``categories`` (List[str]): Specify custom categories to use instead
of default.
"""
options = merge_dict(options, kwargs)
defaultCategories = [
'-*', 'devtools.timeline', 'v8.execute',
'disabled-by-default-devtools.timeline',
'disabled-by-default-devtools.timeline.frame', 'toplevel',
'blink.console', 'blink.user_timing', 'latencyInfo',
'disabled-by-default-devtools.timeline.stack',
'disabled-by-default-v8.cpu_profiler',
'disabled-by-default-v8.cpu_profiler.hires',
]
categoriesArray = options.get('categories', defaultCategories)
if 'screenshots' in options:
categoriesArray.append('disabled-by-default-devtools.screenshot')
self._path = options.get('path', '')
self._recording = True
await self._client.send('Tracing.start', {
'transferMode': 'ReturnAsStream',
'categories': ','.join(categoriesArray),
})
async def stop(self) -> str:
"""Stop tracing.
:return: trace data as string.
"""
contentPromise = self._client._loop.create_future()
self._client.once(
'Tracing.tracingComplete',
lambda event: self._client._loop.create_task(
self._readStream(event.get('stream'), self._path)
).add_done_callback(
lambda fut: contentPromise.set_result(fut.result())
)
)
await self._client.send('Tracing.end')
self._recording = False
return await contentPromise
async def _readStream(self, handle: str, path: str) -> str:
# might be better to return as bytes
eof = False
bufs = []
while not eof:
response = await self._client.send('IO.read', {'handle': handle})
eof = response.get('eof', False)
bufs.append(response.get('data', ''))
await self._client.send('IO.close', {'handle': handle})
result = ''.join(bufs)
if path:
file = Path(path)
with file.open('w', encoding='utf-8') as f:
f.write(result)
return result
|
e7dff3e7dbc52c57c57e41f1a1d531e9c2f600fd
|
511144d69b15df3f4bad6dc2fbee2ab6adc457d4
|
/action/main.py
|
c0af3930dbbe6f31745e941712e81df3ddb2cae3
|
[
"MIT"
] |
permissive
|
psf/black
|
7345769d7932d474cb05ca62a73a940608728794
|
47676bf5939ae5c8e670d947917bc8af4732eab6
|
refs/heads/main
| 2023-09-02T12:44:37.458104
| 2023-08-26T13:44:17
| 2023-08-26T13:44:17
| 125,266,328
| 23,453
| 2,531
|
MIT
| 2023-09-13T07:40:43
| 2018-03-14T19:54:45
|
Python
|
UTF-8
|
Python
| false
| false
| 2,521
|
py
|
main.py
|
import os
import shlex
import shutil
import sys
from pathlib import Path
from subprocess import PIPE, STDOUT, run
ACTION_PATH = Path(os.environ["GITHUB_ACTION_PATH"])
ENV_PATH = ACTION_PATH / ".black-env"
ENV_BIN = ENV_PATH / ("Scripts" if sys.platform == "win32" else "bin")
OPTIONS = os.getenv("INPUT_OPTIONS", default="")
SRC = os.getenv("INPUT_SRC", default="")
JUPYTER = os.getenv("INPUT_JUPYTER") == "true"
BLACK_ARGS = os.getenv("INPUT_BLACK_ARGS", default="")
VERSION = os.getenv("INPUT_VERSION", default="")
run([sys.executable, "-m", "venv", str(ENV_PATH)], check=True)
version_specifier = VERSION
if VERSION and VERSION[0] in "0123456789":
version_specifier = f"=={VERSION}"
if JUPYTER:
extra_deps = "[colorama,jupyter]"
else:
extra_deps = "[colorama]"
if version_specifier:
req = f"black{extra_deps}{version_specifier}"
else:
describe_name = ""
with open(ACTION_PATH / ".git_archival.txt", encoding="utf-8") as fp:
for line in fp:
if line.startswith("describe-name: "):
describe_name = line[len("describe-name: ") :].rstrip()
break
if not describe_name:
print("::error::Failed to detect action version.", file=sys.stderr, flush=True)
sys.exit(1)
# expected format is one of:
# - 23.1.0
# - 23.1.0-51-g448bba7
if describe_name.count("-") < 2:
# the action's commit matches a tag exactly, install exact version from PyPI
req = f"black{extra_deps}=={describe_name}"
else:
# the action's commit does not match any tag, install from the local git repo
req = f".{extra_deps}"
print(f"Installing {req}...", flush=True)
pip_proc = run(
[str(ENV_BIN / "python"), "-m", "pip", "install", req],
stdout=PIPE,
stderr=STDOUT,
encoding="utf-8",
cwd=ACTION_PATH,
)
if pip_proc.returncode:
print(pip_proc.stdout)
print("::error::Failed to install Black.", file=sys.stderr, flush=True)
sys.exit(pip_proc.returncode)
base_cmd = [str(ENV_BIN / "black")]
if BLACK_ARGS:
# TODO: remove after a while since this is deprecated in favour of SRC + OPTIONS.
proc = run(
[*base_cmd, *shlex.split(BLACK_ARGS)],
stdout=PIPE,
stderr=STDOUT,
encoding="utf-8",
)
else:
proc = run(
[*base_cmd, *shlex.split(OPTIONS), *shlex.split(SRC)],
stdout=PIPE,
stderr=STDOUT,
encoding="utf-8",
)
shutil.rmtree(ENV_PATH, ignore_errors=True)
print(proc.stdout)
sys.exit(proc.returncode)
|
1dc74d4fc82ee464b2ae7eaaad9929ed0cc73c1f
|
9ca55981d3245d87d45debce8e9825b60db43046
|
/chemicals/reaction.py
|
87cafe7a6a8525cb05e306a40775c16dd6d7e2d3
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
CalebBell/chemicals
|
c6b1ebd409c32e0e1053c4f97668a8ebcc92b969
|
37e32a7c7f819e0cb8e2a8784f8448f68b9a4215
|
refs/heads/master
| 2023-07-25T23:34:17.754310
| 2023-07-25T02:00:14
| 2023-07-25T02:00:14
| 264,697,738
| 137
| 33
|
MIT
| 2022-06-05T18:21:02
| 2020-05-17T15:27:11
|
Python
|
UTF-8
|
Python
| false
| false
| 41,468
|
py
|
reaction.py
|
"""Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2016, 2017, 2018, 2019, 2020 Caleb Bell <Caleb.Andrew.Bell@gmail.com>
Copyright (C) 2020 Yoel Rene Cortes-Pena <yoelcortes@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
This module contains lookup functions enthalpies and standard entropies of
formation. Lookup functions are availa for the liquid, solid, and gas states.
A compound may be in more than one lookup function.
For reporting bugs, adding feature requests, or submitting pull requests,
please use the `GitHub issue tracker <https://github.com/CalebBell/chemicals/>`_.
.. contents:: :local:
Solid Heat of Formation
-----------------------
.. autofunction:: chemicals.reaction.Hfs
.. autofunction:: chemicals.reaction.Hfs_methods
.. autodata:: chemicals.reaction.Hfs_all_methods
Liquid Heat of Formation
------------------------
.. autofunction:: chemicals.reaction.Hfl
.. autofunction:: chemicals.reaction.Hfl_methods
.. autodata:: chemicals.reaction.Hfl_all_methods
Gas Heat of Formation
---------------------
.. autofunction:: chemicals.reaction.Hfg
.. autofunction:: chemicals.reaction.Hfg_methods
.. autodata:: chemicals.reaction.Hfg_all_methods
Solid Absolute Entropy
----------------------
.. autofunction:: chemicals.reaction.S0s
.. autofunction:: chemicals.reaction.S0s_methods
.. autodata:: chemicals.reaction.S0s_all_methods
Liquid Absolute Entropy
-----------------------
.. autofunction:: chemicals.reaction.S0l
.. autofunction:: chemicals.reaction.S0l_methods
.. autodata:: chemicals.reaction.S0l_all_methods
Gas Absolute Entropy
--------------------
.. autofunction:: chemicals.reaction.S0g
.. autofunction:: chemicals.reaction.S0g_methods
.. autodata:: chemicals.reaction.S0g_all_methods
Utility Functions
-----------------
.. autofunction:: chemicals.reaction.Gibbs_formation
.. autofunction:: chemicals.reaction.entropy_formation
.. autofunction:: chemicals.reaction.Hf_basis_converter
Chemical Reactions
------------------
.. autofunction:: chemicals.reaction.balance_stoichiometry
.. autofunction:: chemicals.reaction.stoichiometric_matrix
.. autofunction:: chemicals.reaction.stoichiometry_mass_to_molar
.. autofunction:: chemicals.reaction.stoichiometry_molar_to_mass
.. autofunction:: chemicals.reaction.stoichiometry_MW_error
.. autofunction:: chemicals.reaction.standard_formation_reaction
"""
__all__ = ['Hfg', 'Hfl', 'Hfs', 'S0g', 'S0l', 'S0s',
'Hfl_methods', 'Hfg_methods', 'Hfs_methods',
'S0l_methods', 'S0g_methods', 'S0s_methods',
'Hfl_all_methods', 'Hfg_all_methods', 'Hfs_all_methods',
'S0l_all_methods', 'S0g_all_methods', 'S0s_all_methods',
'Gibbs_formation', 'entropy_formation', 'Hf_basis_converter',
'balance_stoichiometry', 'stoichiometric_matrix',
'stoichiometry_molar_to_mass', 'stoichiometry_mass_to_molar',
'standard_formation_reaction', 'stoichiometry_MW_error']
from math import ceil, log10
from chemicals import data_reader as dr
from chemicals import heat_capacity, miscdata
from chemicals.data_reader import (
data_source,
database_constant_lookup,
list_available_methods_from_df_dict,
register_df_source,
retrieve_any_from_df_dict,
retrieve_from_df_dict,
)
from chemicals.elements import periodic_table, simple_formula_parser
from chemicals.utils import PY37, can_load_data, mark_numba_incompatible, os_path_join, source_path
# %% Register data sources and lazy load them
CRC = 'CRC'
YAWS = 'YAWS'
API_TDB_G = 'API_TDB_G'
ATCT_L = 'ATCT_L'
ATCT_G = 'ATCT_G'
TRC = 'TRC'
folder = os_path_join(source_path, 'Reactions')
register_df_source(folder, 'API TDB Albahri Hf (g).tsv')
register_df_source(folder, 'ATcT 1.112 (g).tsv')
register_df_source(folder, 'ATcT 1.112 (l).tsv')
register_df_source(folder, 'Yaws Hf S0 (g).tsv')
register_df_source(folder, 'JANAF_1998.tsv')
_reaction_data_loaded = False
def _load_reaction_data():
global Hfg_API_TDB_data, Hfg_ATcT_data, Hfl_ATcT_data, Hfg_S0g_YAWS_data
global Hfg_sources, Hfl_sources, Hfs_sources
global S0g_sources, S0l_sources, S0s_sources
global _reaction_data_loaded
Hfg_API_TDB_data = data_source('API TDB Albahri Hf (g).tsv')
Hfg_ATcT_data = data_source('ATcT 1.112 (g).tsv')
Hfl_ATcT_data = data_source('ATcT 1.112 (l).tsv')
Hfg_S0g_YAWS_data = data_source('Yaws Hf S0 (g).tsv')
JANAF_1998_data = data_source('JANAF_1998.tsv')
_reaction_data_loaded = True
S0g_sources = {
CRC: heat_capacity.CRC_standard_data,
miscdata.WEBBOOK: miscdata.webbook_data,
miscdata.JANAF: JANAF_1998_data,
YAWS: Hfg_S0g_YAWS_data,
}
S0l_sources = {
CRC: heat_capacity.CRC_standard_data,
miscdata.WEBBOOK: miscdata.webbook_data,
miscdata.JANAF: JANAF_1998_data,
}
S0s_sources = {
CRC: heat_capacity.CRC_standard_data,
miscdata.WEBBOOK: miscdata.webbook_data,
}
Hfg_sources = {
ATCT_G: Hfg_ATcT_data,
CRC: heat_capacity.CRC_standard_data,
API_TDB_G: Hfg_API_TDB_data,
miscdata.WEBBOOK: miscdata.webbook_data,
TRC: heat_capacity.TRC_gas_data,
miscdata.JANAF: JANAF_1998_data,
YAWS: Hfg_S0g_YAWS_data,
miscdata.JOBACK: miscdata.joback_predictions,
}
Hfl_sources = {
ATCT_L: Hfl_ATcT_data,
CRC: heat_capacity.CRC_standard_data,
miscdata.WEBBOOK: miscdata.webbook_data,
miscdata.JANAF: JANAF_1998_data,
}
Hfs_sources = {
CRC: heat_capacity.CRC_standard_data,
miscdata.WEBBOOK: miscdata.webbook_data,
}
if PY37:
def __getattr__(name):
if name in ('Hfg_API_TDB_data', 'Hfg_ATcT_data',
'Hfl_ATcT_data', 'Hfg_S0g_YAWS_data', 'JANAF_1998_data',
'Hfg_sources', 'Hfl_sources', 'Hfs_sources',
'S0g_sources', 'S0l_sources', 'S0s_sources'):
_load_reaction_data()
return globals()[name]
raise AttributeError(f"module {__name__} has no attribute {name}")
else:
if can_load_data:
_load_reaction_data()
# %% Lookup functions
# TODO: more data from https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3692305/
# has dippr standard heats of formation, about 55% of the database
Hfs_all_methods = (CRC, miscdata.WEBBOOK)
"""Tuple of method name keys. See the `Hfs` for the actual references"""
@mark_numba_incompatible
def Hfs_methods(CASRN):
"""Return all methods available to obtain the solid-phase heat of
formation for the desired chemical.
Parameters
----------
CASRN : str
CASRN, [-]
Returns
-------
methods : list[str]
Methods which can be used to obtain the Hfs with the given
inputs.
See Also
--------
Hfs
"""
if not _reaction_data_loaded: _load_reaction_data()
return list_available_methods_from_df_dict(Hfs_sources, CASRN, 'Hfs')
@mark_numba_incompatible
def Hfs(CASRN, method=None):
r'''This function handles the retrieval of a chemical's solid/crystaline
standard phase heat of formation. The lookup is based on CASRNs. Will
automatically select a data source to use if no method is provided; returns
None if the data is not available.
Parameters
----------
CASRN : str
CASRN [-]
Returns
-------
Hfs : float
Solid standard-state heat of formation, [J/mol]
Other Parameters
----------------
method : string, optional
A string for the method name to use, as defined by constants in
Hfs_methods
Notes
-----
Sources are:
* 'CRC', from the CRC handbook (1360 values) [1]_
* 'WEBBOOK' (2000 values) [2]_
Examples
--------
>>> Hfs('101-81-5') # Diphenylmethane
71500.0
See Also
--------
Hfs_methods
References
----------
.. [1] Haynes, W.M., Thomas J. Bruno, and David R. Lide. CRC Handbook of
Chemistry and Physics. [Boca Raton, FL]: CRC press, 2014.
.. [2] Shen, V.K., Siderius, D.W., Krekelberg, W.P., and Hatch, H.W., Eds.,
NIST WebBook, NIST, http://doi.org/10.18434/T4M88Q
'''
if dr.USE_CONSTANTS_DATABASE and method is None:
val, found = database_constant_lookup(CASRN, 'Hfs')
if found: return val
if not _reaction_data_loaded: _load_reaction_data()
if method:
return retrieve_from_df_dict(Hfs_sources, CASRN, 'Hfs', method)
else:
return retrieve_any_from_df_dict(Hfs_sources, CASRN, 'Hfs')
Hfl_all_methods = (ATCT_L, CRC, miscdata.WEBBOOK, miscdata.JANAF)
"""Tuple of method name keys. See the `Hfl` for the actual references"""
@mark_numba_incompatible
def Hfl_methods(CASRN):
"""Return all methods available to obtain the standard liquid-state heat
of formation for the desired chemical.
Parameters
----------
CASRN : str
CASRN, [-]
Returns
-------
methods : list[str]
Methods which can be used to obtain the Hfl with the given
inputs.
See Also
--------
Hfl
"""
if not _reaction_data_loaded: _load_reaction_data()
return list_available_methods_from_df_dict(Hfl_sources, CASRN, 'Hfl')
@mark_numba_incompatible
def Hfl(CASRN, method=None):
r'''This function handles the retrieval of a chemical's liquid standard
phase heat of formation. The lookup is based on CASRNs. Will automatically
select a data source to use if no method is provided; returns None if
the data is not available.
Parameters
----------
CASRN : str
CASRN [-]
Returns
-------
Hfl : float
Liquid standard-state heat of formation, [J/mol]
Other Parameters
----------------
method : string, optional
A string for the method name to use, as defined in the variable,
`Hfl_all_methods`.
Notes
-----
Sources are:
* 'ATCT_L', the Active Thermochemical Tables version 1.112. [1]_
* 'CRC', from the CRC handbook (1360 values) [2]_
* 'WEBBOOK' (2000 values) [3]_
Examples
--------
>>> Hfl('67-56-1')
-238400.0
See Also
--------
Hfl_methods
References
----------
.. [1] Ruscic, Branko, Reinhardt E. Pinzon, Gregor von Laszewski, Deepti
Kodeboyina, Alexander Burcat, David Leahy, David Montoy, and Albert F.
Wagner. "Active Thermochemical Tables: Thermochemistry for the 21st
Century." Journal of Physics: Conference Series 16, no. 1
(January 1, 2005): 561. doi:10.1088/1742-6596/16/1/078.
.. [2] Haynes, W.M., Thomas J. Bruno, and David R. Lide. CRC Handbook of
Chemistry and Physics. [Boca Raton, FL]: CRC press, 2014.
.. [3] Shen, V.K., Siderius, D.W., Krekelberg, W.P., and Hatch, H.W., Eds.,
NIST WebBook, NIST, http://doi.org/10.18434/T4M88Q
'''
if dr.USE_CONSTANTS_DATABASE and method is None:
val, found = database_constant_lookup(CASRN, 'Hfl')
if found: return val
if not _reaction_data_loaded: _load_reaction_data()
if method:
return retrieve_from_df_dict(Hfl_sources, CASRN, 'Hfl', method)
else:
return retrieve_any_from_df_dict(Hfl_sources, CASRN, 'Hfl')
Hfg_all_methods = (ATCT_G, TRC, CRC, miscdata.WEBBOOK, miscdata.JANAF, YAWS, miscdata.JOBACK)
"""Tuple of method name keys. See the `Hfg` for the actual references"""
@mark_numba_incompatible
def Hfg_methods(CASRN):
"""Return all methods available to obtain the gas phase heat of formation
for the desired chemical.
Parameters
----------
CASRN : str
CASRN, [-]
Returns
-------
methods : list[str]
Methods which can be used to obtain the Hfg with the given
inputs.
See Also
--------
Hfg
"""
if not _reaction_data_loaded: _load_reaction_data()
return list_available_methods_from_df_dict(Hfg_sources, CASRN, 'Hfg')
@mark_numba_incompatible
def Hfg(CASRN, method=None):
r'''This function handles the retrieval of a chemical's gas heat of
formation. Lookup is based on CASRNs. Will automatically select a data
source to use if no method is provided; returns None if the data is not
available.
Parameters
----------
CASRN : str
CASRN [-]
Returns
-------
Hfg : float
Ideal gas phase heat of formation, [J/mol]
Other Parameters
----------------
method : string, optional
A string for the method name to use, as defined by constants in
Hfg_methods
Notes
-----
Function has data for approximately 8700 chemicals. Sources are:
* 'ATCT_G', the Active Thermochemical Tables version 1.112 (600 values) [1]_
* 'TRC', from a 1994 compilation (1750 values) [2]_
* 'CRC', from the CRC handbook (1360 values) [3]_
* 'WEBBOOK', a NIST resource [6]_ containing mostly experimental
and averaged values
* 'JANAF', the 1998 JANAF values online
* 'JOBACK', an estimation method for organic substances in [5]_
* 'YAWS', a large compillation of values, mostly estimated (5000 values) [4]_
'TRC' data may have come from computational procedures, for example petane
is off by 30%.
Examples
--------
>>> Hfg('67-56-1')
-200700.0
>>> Hfg('67-56-1', method='YAWS')
-200900.0
>>> Hfg('67-56-1', method='CRC')
-201000.0
>>> Hfg('67-56-1', method='TRC')
-190100.0
See Also
--------
Hfg_methods
References
----------
.. [1] Ruscic, Branko, Reinhardt E. Pinzon, Gregor von Laszewski, Deepti
Kodeboyina, Alexander Burcat, David Leahy, David Montoy, and Albert F.
Wagner. "Active Thermochemical Tables: Thermochemistry for the 21st
Century." Journal of Physics: Conference Series 16, no. 1
(January 1, 2005): 561. doi:10.1088/1742-6596/16/1/078.
.. [2] Frenkel`, M. L, Texas Engineering Experiment Station, and
Thermodynamics Research Center. Thermodynamics of Organic Compounds in
the Gas State. College Station, Tex.: Thermodynamics Research Center,
1994.
.. [3] Haynes, W.M., Thomas J. Bruno, and David R. Lide. CRC Handbook of
Chemistry and Physics. [Boca Raton, FL]: CRC press, 2014.
.. [4] Yaws, Carl L. Thermophysical Properties of Chemicals and
Hydrocarbons, Second Edition. Amsterdam Boston: Gulf Professional
Publishing, 2014.
.. [5] Joback, K.G., and R.C. Reid. "Estimation of Pure-Component
Properties from Group-Contributions." Chemical Engineering
Communications 57, no. 1-6 (July 1, 1987): 233-43.
doi:10.1080/00986448708960487.
.. [6] Shen, V.K., Siderius, D.W., Krekelberg, W.P., and Hatch, H.W., Eds.,
NIST WebBook, NIST, http://doi.org/10.18434/T4M88Q
'''
if dr.USE_CONSTANTS_DATABASE and method is None:
val, found = database_constant_lookup(CASRN, 'Hfg')
if found: return val
if not _reaction_data_loaded: _load_reaction_data()
if method:
return retrieve_from_df_dict(Hfg_sources, CASRN, 'Hfg', method)
else:
return retrieve_any_from_df_dict(Hfg_sources, CASRN, 'Hfg')
S0s_all_methods = (CRC, miscdata.WEBBOOK)
"""Tuple of method name keys. See the `S0s` for the actual references"""
@mark_numba_incompatible
def S0s_methods(CASRN):
"""Return all methods available to obtain the absolute entropy of the
compound in the solid phase for the desired chemical.
Parameters
----------
CASRN : str
CASRN, [-]
Returns
-------
methods : list[str]
Methods which can be used to obtain the S0s with the given
inputs.
See Also
--------
S0s
"""
if not _reaction_data_loaded: _load_reaction_data()
return list_available_methods_from_df_dict(S0s_sources, CASRN, 'S0s')
@mark_numba_incompatible
def S0s(CASRN, method=None):
r'''This function handles the retrieval of a chemical's absolute
entropy at a reference temperature of 298.15 K and pressure of 1 bar,
in the solid state. Lookup is based on CASRNs. Will automatically select a
data source to use if no method is provided; returns None if the data is not
available.
Parameters
----------
CASRN : str
CASRN [-]
Returns
-------
S0s : float
Ideal gas standard absolute entropy of compound, [J/mol/K]
Other Parameters
----------------
method : string, optional
A string for the method name to use, as defined by constants in
`S0s_all_methods`.
Notes
-----
Sources are:
* 'CRC' [1]_ from the CRC handbook (1360 values)
* 'WEBBOOK', a NIST resource [2]_ containing mostly experimental
and averaged values
Examples
--------
>>> S0s('7439-93-2') # Lithium
29.1
See Also
--------
S0s_methods
References
----------
.. [1] Haynes, W.M., Thomas J. Bruno, and David R. Lide. CRC Handbook of
Chemistry and Physics. [Boca Raton, FL]: CRC press, 2014.
.. [2] Shen, V.K., Siderius, D.W., Krekelberg, W.P., and Hatch, H.W., Eds.,
NIST WebBook, NIST, http://doi.org/10.18434/T4M88Q
'''
if dr.USE_CONSTANTS_DATABASE and method is None:
val, found = database_constant_lookup(CASRN, 'S0s')
if found: return val
if not _reaction_data_loaded: _load_reaction_data()
if method:
return retrieve_from_df_dict(S0s_sources, CASRN, 'S0s', method)
else:
return retrieve_any_from_df_dict(S0s_sources, CASRN, 'S0s')
S0l_all_methods = (CRC, miscdata.WEBBOOK, miscdata.JANAF)
"""Tuple of method name keys. See the `S0l` for the actual references"""
@mark_numba_incompatible
def S0l_methods(CASRN):
"""Return all methods available to obtain the absolute entropy for the desired chemical.
Parameters
----------
CASRN : str
CASRN, [-]
Returns
-------
methods : list[str]
Methods which can be used to obtain the S0l with the given
inputs.
See Also
--------
S0l
"""
if not _reaction_data_loaded: _load_reaction_data()
return list_available_methods_from_df_dict(S0l_sources, CASRN, 'S0l')
@mark_numba_incompatible
def S0l(CASRN, method=None):
r'''This function handles the retrieval of a chemical's absolute
entropy at a reference temperature of 298.15 K and pressure of 1 bar,
in the liquid state.
Lookup is based on CASRNs. Will automatically select a data
source to use if no method is provided; returns None if the data is not
available.
Parameters
----------
CASRN : str
CASRN [-]
Returns
-------
S0l : float
Ideal gas standard absolute entropy of compound, [J/mol/K]
Other Parameters
----------------
method : string, optional
A string for the method name to use, as defined in the variable,
`S0l_all_methods`.
Notes
-----
Sources are:
* 'CRC', from the CRC handbook
Examples
--------
>>> S0l('7439-97-6') # Mercury
75.9
See Also
--------
S0l_methods
References
----------
.. [1] Haynes, W.M., Thomas J. Bruno, and David R. Lide. CRC Handbook of
Chemistry and Physics. [Boca Raton, FL]: CRC press, 2014.
'''
if dr.USE_CONSTANTS_DATABASE and method is None:
val, found = database_constant_lookup(CASRN, 'S0l')
if found: return val
if not _reaction_data_loaded: _load_reaction_data()
if method:
return retrieve_from_df_dict(S0l_sources, CASRN, 'S0l', method)
else:
return retrieve_any_from_df_dict(S0l_sources, CASRN, 'S0l')
S0g_all_methods = (CRC, miscdata.WEBBOOK, miscdata.JANAF, YAWS)
"""Tuple of method name keys. See the `S0g` for the actual references"""
@mark_numba_incompatible
def S0g_methods(CASRN):
"""Return all methods available to obtain the S0g for the desired chemical.
Parameters
----------
CASRN : str
CASRN, [-]
Returns
-------
methods : list[str]
Methods which can be used to obtain the S0g with the given
inputs.
See Also
--------
S0g
"""
if not _reaction_data_loaded: _load_reaction_data()
return list_available_methods_from_df_dict(S0g_sources, CASRN, 'S0g')
@mark_numba_incompatible
def S0g(CASRN, method=None):
r'''This function handles the retrieval of a chemical's absolute
entropy at a reference temperature of 298.15 K and pressure of 1 bar,
in the ideal gas state.
Lookup is based on CASRNs. Will automatically select a data
source to use if no method is provided; returns None if the data is not
available.
Parameters
----------
CASRN : str
CASRN [-]
Returns
-------
S0g : float
Ideal gas standard absolute entropy of compound, [J/mol/K]
Other Parameters
----------------
method : string, optional
A string for the method name to use, as defined in the variable,
`S0g_all_methods`
Notes
-----
Function has data for approximately 5400 chemicals. Sources are:
* 'CRC', from the CRC handbook (520 values)
* 'YAWS', a large compillation of values, mostly estimated (4890 values)
* 'WEBBOOK', a NIST resource [3]_ containing mostly experimental
and averaged values
Examples
--------
>>> S0g('67-56-1')
239.9
>>> S0g('67-56-1', method='YAWS')
239.88
See Also
--------
S0g_methods
References
----------
.. [1] Haynes, W.M., Thomas J. Bruno, and David R. Lide. CRC Handbook of
Chemistry and Physics. [Boca Raton, FL]: CRC press, 2014.
.. [2] Yaws, Carl L. Thermophysical Properties of Chemicals and
Hydrocarbons, Second Edition. Amsterdam Boston: Gulf Professional
Publishing, 2014.
.. [3] Shen, V.K., Siderius, D.W., Krekelberg, W.P., and Hatch, H.W., Eds.,
NIST WebBook, NIST, http://doi.org/10.18434/T4M88Q
'''
if dr.USE_CONSTANTS_DATABASE and method is None:
val, found = database_constant_lookup(CASRN, 'S0g')
if found: return val
if not _reaction_data_loaded: _load_reaction_data()
if method:
return retrieve_from_df_dict(S0g_sources, CASRN, 'S0g', method)
else:
return retrieve_any_from_df_dict(S0g_sources, CASRN, 'S0g')
# %% Converter functions
def Hf_basis_converter(Hvapm, Hf_liq=None, Hf_gas=None):
r'''This function converts a liquid or gas enthalpy of formation to the
other. This is useful, as thermodynamic packages often work with ideal-
gas as the reference state and require ideal-gas enthalpies of formation.
Parameters
----------
Hvapm : float
Molar enthalpy of vaporization of compound at 298.15 K or (unlikely)
the reference temperature, [J/mol]
Hf_liq : float, optional
Enthalpy of formation of the compound in its liquid state, [J/mol]
Hf_gas : float, optional
Enthalpy of formation of the compound in its ideal-gas state, [J/mol]
Returns
-------
Hf_calc : float, optional
Enthalpy of formation of the compound in the other state to the one
provided, [J/mol]
Notes
-----
Examples
--------
Calculate the ideal-gas enthalpy of formation for water, from its standard-
state (liquid) value:
>>> Hf_basis_converter(44018, Hf_liq=-285830)
-241812
Calculate the standard-state (liquid) enthalpy of formation for water, from
its ideal-gas value:
>>> Hf_basis_converter(44018, Hf_gas=-241812)
-285830
'''
if Hf_liq is None and Hf_gas is None:
raise ValueError("Provide either a liquid or a gas enthalpy of formation")
if Hvapm is None or Hvapm < 0.0:
raise ValueError("Enthalpy of formation unknown or zero")
if Hf_liq is None:
return Hf_gas - Hvapm
else:
return Hf_liq + Hvapm
def Gibbs_formation(dHf, S0_abs, dHfs_std, S0_abs_elements, coeffs_elements,
T_ref=298.15):
r'''This function calculates the Gibbs free energy of formation of a
compound, from its constituent elements.
The calculated value will be for a "standard-state" value if `dHf` and
`S0_abs` are provided in the standard state; or it will be in an
"ideal gas" basis if they are both for an ideal gas. For compounds which
are gases at STP, the two values are the same.
Parameters
----------
dHf : float
Molar enthalpy of formation of the created compound, [J/mol]
S0_abs : float
Absolute molar entropy of the created compound at the reference
temperature, [J/mol/K]
dHfs_std : list[float]
List of standard molar enthalpies of formation of all elements used in
the formation of the created compound, [J/mol]
S0_abs_elements : list[float]
List of standard absolute molar entropies at the reference temperature
of all elements used in the formation of the created compound,
[J/mol/K]
coeffs_elements : list[float]
List of coefficients for each compound (i.e. 1 for C, 2 for H2 if the
target is methane), in the same order as `dHfs_std` and
`S0_abs_elements`, [-]
T_ref : float, optional
The standard state temperature, default 298.15 K; few values are
tabulated at other temperatures, [-]
Returns
-------
dGf : float
Gibbs free energy of formation for the created compound, [J/mol]
Notes
-----
Be careful for elements like Bromine - is the tabulated value for Br2 or
Br?
Examples
--------
Calculate the standard-state Gibbs free energy of formation for water,
using water's standard state heat of formation and absolute entropy
at 298.15 K:
>>> Gibbs_formation(-285830, 69.91, [0, 0], [130.571, 205.147], [1, .5])
-237161.633825
Calculate the ideal-gas state Gibbs free energy of formation for water,
using water's ideal-gas state heat of formation and absolute entropy
at 298.15 K as a gas:
>>> Gibbs_formation(-241818, 188.825, [0, 0], [130.571, 205.147], [1, .5])
-228604.141075
Calculate the Gibbs free energy of formation for CBrF3 (it is a gas at STP,
so its standard-state and ideal-gas state values are the same) at 298.15 K:
>>> Gibbs_formation(-648980, 297.713, [0, 0, 0], [5.74, 152.206, 202.789], [1, .5, 1.5])
-622649.329975
Note in the above calculation that the Bromine's `S0` and `Hf` are for Br2;
and that the value for Bromine as a liquid, which is its standard state,
is used.
References
----------
.. [1] "Standard Gibbs Free Energy of Formation Calculations Chemistry
Tutorial." Accessed March, 2019. https://www.ausetute.com.au/gibbsform.html.
'''
N = len(coeffs_elements)
dH = dHf
dS = S0_abs
for i in range(N):
dH -= dHfs_std[i]*coeffs_elements[i]
dS -= S0_abs_elements[i]*coeffs_elements[i]
return dH - T_ref*dS
def entropy_formation(Hf, Gf, T_ref=298.15):
r'''This function calculates the entropy of formation of a
compound, from its constituent elements.
The calculated value will be for a "standard-state" value if `Hf` and
`Gf` are provided in the standard state; or it will be in an
"ideal gas" basis if they are both for an ideal gas. For compounds which
are gases at STP, the two values are the same.
Parameters
----------
Hf : float
Molar enthalpy of formation of the compound, [J/mol]
Gf : float
Molar Gibbs free energy of formation of the compound, [J/mol]
T_ref : float, optional
The standard state temperature, default 298.15 K; few values are
tabulated at other temperatures, [-]
Returns
-------
S0 : float
Entropy of formation of the compound, [J/mol/K]
Notes
-----
Examples
--------
Entropy of formation of methane:
>>> entropy_formation(Hf=-74520, Gf=-50490)
-80.59701492537314
Entropy of formation of water in ideal gas state:
>>> entropy_formation(Hf=-241818, Gf=-228572)
-44.427301693778304
'''
return (Hf - Gf)/T_ref
# %% Stoichiometry functions
@mark_numba_incompatible
def stoichiometric_matrix(atomss, reactants):
r'''This function calculates a stoichiometric matrix of reactants and
stoichiometric matrix, as required by a solver to compute the reation
coefficients.
Parameters
----------
atomss : list[dict[(str, float)]]
A list of dictionaties of (element, element_count) pairs for each
chemical, [-]
reactants : list[bool]
List of booleans indicating whether each chemical is a reactant (True)
or a product (False), [-]
Returns
-------
matrix : list[list[float]]
Chemical reaction matrix for further processing; rows contain element
counts of each compound, and the columns represent each chemical, [-]
Notes
-----
The rows of the matrix contain the element counts of each compound,
and the columns represent each chemical.
Examples
--------
MgO2 -> Mg + 1/2 O2
(k=1)
>>> stoichiometric_matrix([{'Mg': 1, 'O': 1}, {'Mg': 1}, {'O': 2}], [True, False, False])
[[1, -1, 0], [1, 0, -2]]
Cl2 + propylene -> allyl chloride + HCl
>>> stoichiometric_matrix([{'Cl': 2}, {'C': 3, 'H': 6}, {'C': 3, 'Cl': 1, 'H': 5}, {'Cl': 1, 'H': 1}], [True, True, False, False, False])
[[0, 3, -3, 0], [2, 0, -1, -1], [0, 6, -5, -1]]
Al + 4HNO3 -> Al(NO3)3 + NO + 2H2O
(k=1)
>>> stoichiometric_matrix([{'Al': 1}, {'H': 1, 'N': 1, 'O': 3}, {'Al': 1, 'N': 3, 'O': 9}, {'N': 1, 'O': 1}, {'H': 2, 'O': 1}], [True, True, False, False, False])
[[1, 0, -1, 0, 0], [0, 1, 0, 0, -2], [0, 1, -3, -1, 0], [0, 3, -9, -1, -1]]
4Fe + 3O2 -> 2(Fe2O3)
(k=2)
>>> stoichiometric_matrix([{'Fe': 1}, {'O': 2}, {'Fe':2, 'O': 3}], [True, True, False])
[[1, 0, -2], [0, 2, -3]]
4NH3 + 5O2 -> 4NO + 6(H2O)
(k=4)
>>> stoichiometric_matrix([{'N': 1, 'H': 3}, {'O': 2}, {'N': 1, 'O': 1}, {'H': 2, 'O': 1}], [True, True, False, False])
[[3, 0, 0, -2], [1, 0, -1, 0], [0, 2, -1, -1]]
No unique solution:
C2H5NO2 + C3H7NO3 + 2C6H14N4O2 + 3C5H9NO2 + 2C9H11NO2 -> 8H2O + C50H73N15O11
>>> stoichiometric_matrix([{'C': 2, 'H': 5, 'N': 1, 'O': 2}, {'C': 3, 'H': 7, 'N': 1, 'O': 3}, {'C': 6, 'H': 14, 'N': 4, 'O': 2}, {'C': 5, 'H': 9, 'N': 1, 'O': 2}, {'C': 9, 'H': 11, 'N': 1, 'O': 2}, {'H': 2, 'O': 1}, {'C': 50, 'H': 73, 'N': 15, 'O': 11}], [True, True, True, True, True, False, False])
[[2, 3, 6, 5, 9, 0, -50], [5, 7, 14, 9, 11, -2, -73], [1, 1, 4, 1, 1, 0, -15], [2, 3, 2, 2, 2, -1, -11]]
References
----------
.. [1] Sen, S. K., Hans Agarwal, and Sagar Sen. "Chemical Equation
Balancing: An Integer Programming Approach." Mathematical and Computer
Modelling 44, no. 7 (October 1, 2006): 678-91.
https://doi.org/10.1016/j.mcm.2006.02.004.
.. [2] URAVNOTE, NOVOODKRITI PARADOKSI V. TEORIJI, and ENJA KEMIJSKIH
REAKCIJ. "New Discovered Paradoxes in Theory of Balancing Chemical
Reactions." Materiali in Tehnologije 45, no. 6 (2011): 503-22.
'''
n_compounds = len(atomss)
elements = set()
for atoms in atomss:
elements.update(atoms.keys())
elements = list(elements)
elements.sort() # Ensure reproducibility
n_elements = len(elements)
matrix = [[0]*n_compounds for _ in range(n_elements)]
element_to_row = {ele: matrix[idx] for idx, ele in enumerate(elements)}
for i, atoms in enumerate(atomss):
if reactants[i]:
for k, v in atoms.items():
element_to_row[k][i] = v
else:
for k, v in atoms.items():
element_to_row[k][i] = -v
return matrix
def balance_stoichiometry(matrix, rounding=9, allow_fractional=False):
r'''This function balances a chemical reaction.
Parameters
----------
matrix : list[list[float]]
Chemical reaction matrix for further processing; rows contain element
counts of each compound, and the columns represent each chemical, [-]
Returns
-------
coefficients : list[float]
Balanced coefficients; all numbers are positive, [-]
Notes
-----
Balance the reaction 4 NH3 + 5 O2 = 4 NO + 6 H2O, without knowing the
coefficients:
>>> matrix = stoichiometric_matrix([{'N': 1, 'H': 3}, {'O': 2}, {'N': 1, 'O': 1}, {'H': 2, 'O': 1}], [True, True, False, False])
>>> matrix
[[3, 0, 0, -2], [1, 0, -1, 0], [0, 2, -1, -1]]
>>> balance_stoichiometry(matrix)
[4.0, 5.0, 4.0, 6.0]
>>> balance_stoichiometry(matrix, allow_fractional=True)
[1.0, 1.25, 1.0, 1.5]
This algorithm relies on `scipy`.
The behavior of this function for inputs which do not have a unique
solution is undefined.
This algorithm may suffer from floating point issues. If you believe there
is an error in the result, please report your reaction to the developers.
References
----------
.. [1] Sen, S. K., Hans Agarwal, and Sagar Sen. "Chemical Equation
Balancing: An Integer Programming Approach." Mathematical and Computer
Modelling 44, no. 7 (October 1, 2006): 678-91.
https://doi.org/10.1016/j.mcm.2006.02.004.
.. [2] URAVNOTE, NOVOODKRITI PARADOKSI V. TEORIJI, and ENJA KEMIJSKIH
REAKCIJ. "New Discovered Paradoxes in Theory of Balancing Chemical
Reactions." Materiali in Tehnologije 45, no. 6 (2011): 503-22.
'''
import scipy.linalg
done = scipy.linalg.null_space(matrix)
if len(done[0]) > 1:
raise ValueError("No solution")
d = done[:, 0].tolist()
min_value_inv = 1.0/min(d)
d = [i*min_value_inv for i in d]
if not allow_fractional:
from fractions import Fraction
max_denominator = 10**rounding
fs = [Fraction(x).limit_denominator(max_denominator=max_denominator) for x in d]
all_denominators = {i.denominator for i in fs}
if 1 in all_denominators:
all_denominators.remove(1)
for den in sorted(list(all_denominators), reverse=True):
fs = [num*den for num in fs]
if all(i.denominator == 1 for i in fs):
break
# May have gone too far
return [float(i) for i in fs]
# done = False
# for i in range(100):
# for c in d:
# ratio = c.as_integer_ratio()[1]
# if ratio != 1:
# d = [di*ratio for di in d]
# break
# done = True
# if done:
# break
#
# d_as_int = [int(i) for i in d]
# for i, j in zip(d, d_as_int):
# if i != j:
# raise ValueError("Could not find integer coefficients (%s, %s)" %(i, j))
# return d_as_int
else:
d = [round(i, rounding + int(ceil(log10(abs(i))))) for i in d]
return d
def stoichiometry_molar_to_mass(coefficients, MWs):
r'''This function translates molar stoichiometric
coefficients (most commonly used) into less commonly
used mass-based stoichiometric coefficients.
Parameters
----------
coefficients : list[float]
Molar balanced stoichiometric coefficients; all numbers are positive, [-]
MWs : list[float]
Molecular weights of all species in reaction ordered in
the same way as the coefficients, [g/mol]
Returns
-------
mass_coefficients : list[float]
Mass-based balanced coefficients; all numbers are positive, [-]
Notes
-----
Note that mass-based reactions are usually not normalized to integers.
Mass-based coefficients are used with components that don't have well defined formulas.
Calculate the mass based coefficients for the reaction 4 NH3 + 5 O2 = 4 NO + 6 H2O:
>>> matrix = stoichiometric_matrix([{'N': 1, 'H': 3}, {'O': 2}, {'N': 1, 'O': 1}, {'H': 2, 'O': 1}], [True, True, False, False])
>>> coeffs = balance_stoichiometry(matrix)
>>> stoichiometry_molar_to_mass(coeffs, [17.03052, 31.9988, 30.0061, 18.01528])
[68.12208, 159.994, 120.0244, 108.09168]
'''
return [c*MW for c, MW in zip(coefficients, MWs)]
def stoichiometry_mass_to_molar(mass_coefficients, MWs):
r'''This function translates mass stoichiometric coefficients into the
more commonly used mole-based stoichiometric coefficients.
Parameters
----------
mass_coefficients : list[float]
Mass-based balanced coefficients; all numbers are positive, [-]
MWs : list[float]
Molecular weights of all species in reaction ordered in
the same way as the coefficients, [g/mol]
Returns
-------
coefficients : list[float]
Molar balanced stoichiometric coefficients; all numbers are positive, [-]
Notes
-----
>>> stoichiometry_mass_to_molar([68.12208, 159.994, 120.0244, 108.09168], [17.03052, 31.9988, 30.0061, 18.01528])
[4.0, 5.0, 4.0, 6.0]
'''
return [c/MW for c, MW in zip(mass_coefficients, MWs)]
def stoichiometry_MW_error(coefficients, MWs, reactants):
r'''This function calculates the molecular weight imbalance
of a reaction given the coefficients and molecular weights of
the involved components, and their statuses as reactants or product.
Parameters
----------
coefficients : list[float]
Molar balanced stoichiometric coefficients; all numbers are positive, [-]
MWs : list[float]
Molecular weights of all species in reaction ordered in
the same way as the coefficients, [g/mol]
reactants : list[bool]
List of booleans indicating whether each chemical is a reactant (True)
or a product (False), [-]
Returns
-------
MW_error : float
The molecular weight error, [g/mol]
Notes
-----
A very small value may be returned for a properly balanced
equation because of floating-point error.
>>> stoichiometry_MW_error([4.0, 5.0, 4.0, 6.0], [17.03052, 31.9988, 30.0061, 18.01528], [True, True, False, False])
0.0
'''
reactant_MW = 0.0
product_MW = 0.0
for coeff, MW, stat in zip(coefficients, MWs, reactants):
if stat:
reactant_MW += coeff*MW
else:
product_MW += coeff*MW
return reactant_MW - product_MW
def standard_formation_reaction(atoms):
r'''This function calculates the standard reaction to reduce a chemical
compound to its standard state elements. Any hydrogen in the compound
is transformed to H2; oxygen to O2; carbon to graphite (single C), calcium
to Ca, etc.
Parameters
----------
atoms : dict[(str, float)]
A dictionary of (element, element_count) pairs for the reacting
compound, [-]
Returns
-------
reactant_coeff : float
The coefficient of the reactant; for compounds like CO that do not
divide evenly, this will be something other than 1 [-]
elemental_counts : list[float]
Balanced coefficients of each of the products, [-]
product_atomss : list[dict[(str, float)]]
A list of dictionaries of the elements produced, and how many atoms
of each element are in one unit of the element in its
standard form. Each dictionary contains a single key:value, with the key
being the element and the value being either 1 or 2 depending on the
standard state [-]
Examples
--------
Methane
>>> standard_formation_reaction({'C': 1, 'H': 4})
(1.0, [1.0, 2.0], [{'C': 1}, {'H': 2}])
Carbon monoxide
>>> standard_formation_reaction({'C': 1, 'O': 1})
(2.0, [2.0, 1.0], [{'C': 1}, {'O': 2}])
Methylamine
>>> standard_formation_reaction({'C': 1, 'H': 5, 'N': 1})
(2.0, [2.0, 5.0, 1.0], [{'C': 1}, {'H': 2}, {'N': 2}])
'''
product_atomss = []
reactants = []
for atom in atoms:
ele = periodic_table[atom]
ele_atoms = simple_formula_parser(ele.formula_standard)
product_atomss.append(ele_atoms)
reactants.append(True)
atoms_to_process = product_atomss + [atoms]
reactants.append(False)
matrix = stoichiometric_matrix(atoms_to_process, reactants)
coeffs = balance_stoichiometry(matrix)
reactant_coeff = coeffs[-1]
elemental_counts = coeffs[:-1]
return reactant_coeff, elemental_counts, product_atomss
|
27889b512397efdffa9c012788340cb49035f665
|
a8ca3225e24c8b093056ce6baa1db6ba3aea8f97
|
/tutorials/_temporary/plot_inv_1_em1dtm_stitched_skytem.py
|
7cdb188cbaa6549f59c2eb2ef577c1d7236a645d
|
[
"MIT"
] |
permissive
|
simpeg/simpeg
|
3e8779392d7b26fe576a7a665205068989d8f4d8
|
ebde5856c318f7b4deb92d755b4fefe19012c48e
|
refs/heads/main
| 2023-09-03T18:49:03.545965
| 2023-08-27T15:45:50
| 2023-08-27T15:45:50
| 14,727,320
| 437
| 268
|
MIT
| 2023-09-10T18:16:22
| 2013-11-26T19:46:36
|
Python
|
UTF-8
|
Python
| false
| false
| 12,947
|
py
|
plot_inv_1_em1dtm_stitched_skytem.py
|
"""
Forward Simulation of Stitched SkyTEM Data
==========================================
"""
#####################################################
# Import Modules
# --------------
#
import numpy as np
import tarfile
import matplotlib as mpl
from matplotlib import pyplot as plt
from discretize import TensorMesh
from pymatsolver import PardisoSolver
from SimPEG.utils import mkvc
from SimPEG import (
maps,
data,
data_misfit,
inverse_problem,
regularization,
optimization,
directives,
inversion,
utils,
)
import SimPEG.electromagnetics.time_domain_1d as em1d
from SimPEG.electromagnetics.utils.em1d_utils import (
get_2d_mesh,
get_vertical_discretization_time,
)
from SimPEG.electromagnetics.time_domain_1d.known_waveforms import (
skytem_HM_2015,
skytem_LM_2015,
)
save_file = True
plt.rcParams.update({"font.size": 16, "lines.linewidth": 2, "lines.markersize": 8})
#############################################
# Define File Names
# -----------------
#
# File paths for assets we are loading. To set up the inversion, we require
# topography and field observations. The true model defined on the whole mesh
# is loaded to compare with the inversion result.
#
# storage bucket where we have the data
data_source = "https://storage.googleapis.com/simpeg/doc-assets/em1dtm_stitched_skytem_data.tar.gz"
# download the data
downloaded_data = utils.download(data_source, overwrite=True)
# unzip the tarfile
tar = tarfile.open(downloaded_data, "r")
tar.extractall()
tar.close()
# filepath to data file
data_filename = downloaded_data.split(".")[0] + ".obs"
#####################################################################
# topography
# -------------
#
#
x = np.linspace(50, 4950, 50)
y = np.zeros_like(x)
z = np.zeros_like(x)
topo = np.c_[x, y, z].astype(float)
n_sounding = len(x)
source_locations = np.c_[x, np.zeros(n_sounding), 30.0 * np.ones(n_sounding)]
source_current = 1.0
source_orientation = "z"
receiver_offset_r = 13.25
receiver_offset_z = 2.0
receiver_locations = np.c_[
x + receiver_offset_r,
np.zeros(n_sounding),
30.0 * np.ones(n_sounding) + receiver_offset_z,
]
receiver_orientation = "z" # "x", "y" or "z"
#############################################
# Load Data and Plot
# ------------------
#
# Load field data
dobs = np.loadtxt(str(data_filename))
dobs = mkvc(dobs[:, -1])
######################################################
# Create Survey
# -------------
#
wave_HM = skytem_HM_2015()
wave_LM = skytem_LM_2015()
time_HM = wave_HM.time_gate_center[0::2]
time_LM = wave_LM.time_gate_center[0::2]
time_input_currents_HM = wave_HM.current_times[-7:]
input_currents_HM = wave_HM.currents[-7:]
time_input_currents_LM = wave_LM.current_times[-13:]
input_currents_LM = wave_LM.currents[-13:]
source_list = []
for ii in range(0, n_sounding):
source_location = mkvc(source_locations[ii, :])
receiver_location = mkvc(receiver_locations[ii, :])
receiver_list = [
em1d.receivers.PointReceiver(
receiver_location,
times=time_HM,
times_dual_moment=time_LM,
orientation=receiver_orientation,
component="dbdt",
)
]
# Sources
source_list.append(
em1d.sources.MagneticDipoleSource(
receiver_list=receiver_list,
location=source_location,
moment_amplitude=source_current,
orientation=source_orientation,
wave_type="general",
moment_type="dual",
time_input_currents=time_input_currents_HM,
input_currents=input_currents_HM,
n_pulse=1,
base_frequency=25.0,
time_input_currents_dual_moment=time_input_currents_LM,
input_currents_dual_moment=input_currents_LM,
base_frequency_dual_moment=210,
)
)
# Survey
survey = em1d.survey.EM1DSurveyTD(source_list)
#############################################
# Assign Uncertainties
# --------------------
#
#
uncertainties = 0.1 * np.abs(dobs) * np.ones(np.shape(dobs))
###############################################
# Define Data
# --------------------
#
# Here is where we define the data that are inverted. The data are defined by
# the survey, the observation values and the uncertainties.
#
data_object = data.Data(survey, dobs=dobs, noise_floor=uncertainties)
###############################################
# Defining a Global Mesh
# ----------------------
#
n_layer = 25
thicknesses = get_vertical_discretization_time(
time_HM, sigma_background=0.1, n_layer=n_layer - 1
)
dx = 100.0
hx = np.ones(n_sounding) * dx
hz = np.r_[thicknesses, thicknesses[-1]]
mesh2D = TensorMesh([hx, np.flipud(hz)], x0="0N")
mesh_soundings = TensorMesh([hz, hx], x0="00")
n_param = n_layer * n_sounding
###############################################
# Defining a Model
# ----------------------
#
conductivity = np.ones(n_param) * 0.1
mapping = maps.ExpMap(nP=n_param)
starting_model = np.log(conductivity)
#######################################################################
# Define the Forward Simulation and Predic Data
# ----------------------------------------------
#
# Simulate response for static conductivity
simulation = em1d.simulation.StitchedEM1DTMSimulation(
survey=survey,
thicknesses=thicknesses,
sigmaMap=mapping,
topo=topo,
Solver=PardisoSolver,
)
# simulation = em1d.simulation.StitchedEM1DTMSimulation(
# survey=survey, thicknesses=thicknesses, sigmaMap=mapping,
# topo=topo, parallel=True, n_cpu=4, verbose=True, Solver=PardisoSolver
# )
pred = simulation.dpred(starting_model)
fig = plt.figure()
plt.semilogy(-pred, ".", ms=1)
plt.semilogy(-dobs, "x")
plt.show()
########################################################################
# Define Inverse Problem
# ----------------------
#
# The inverse problem is defined by 3 things:
#
# 1) Data Misfit: a measure of how well our recovered model explains the field data
# 2) Regularization: constraints placed on the recovered model and a priori information
# 3) Optimization: the numerical approach used to solve the inverse problem
#
#
# Define the data misfit. Here the data misfit is the L2 norm of the weighted
# residual between the observed data and the data predicted for a given model.
# The weighting is defined by the reciprocal of the uncertainties.
dmis = data_misfit.L2DataMisfit(simulation=simulation, data=data_object)
dmis.W = 1.0 / uncertainties
# Define the regularization (model objective function)
mesh_reg = get_2d_mesh(n_sounding, hz)
reg_map = maps.IdentityMap(mesh_reg)
reg = regularization.LaterallyConstrained(
mesh_reg,
mapping=reg_map,
alpha_s=0.1,
alpha_x=1.0,
alpha_y=1.0,
)
xy = utils.ndgrid(x, np.r_[0.0])
reg.get_grad_horizontal(xy, hz, dim=2, use_cell_weights=True)
# reg_map = maps.IdentityMap(nP=mesh_soundings.nC)
# reg = regularization.Sparse(
# mesh_reg, mapping=reg_map,
# )
ps, px, py = 1, 1, 1
reg.norms = np.c_[ps, px, py, 0]
reg.mref = starting_model
reg.mrefInSmooth = False
# Define how the optimization problem is solved. Here we will use an inexact
# Gauss-Newton approach that employs the conjugate gradient solver.
opt = optimization.InexactGaussNewton(maxIter=40, maxIterCG=20)
# Define the inverse problem
inv_prob = inverse_problem.BaseInvProblem(dmis, reg, opt)
#######################################################################
# Define Inversion Directives
# ---------------------------
#
# Here we define any directiveas that are carried out during the inversion. This
# includes the cooling schedule for the trade-off parameter (beta), stopping
# criteria for the inversion and saving inversion results at each iteration.
#
# Apply and update sensitivity weighting as the model updates
# sensitivity_weights = directives.UpdateSensitivityWeights()
# Reach target misfit for L2 solution, then use IRLS until model stops changing.
# IRLS = directives.Update_IRLS(max_irls_iterations=40, minGNiter=1, f_min_change=1e-5, chifact_start=2)
# IRLS = directives.Update_IRLS(
# max_irls_iterations=20, minGNiter=1, fix_Jmatrix=True, coolingRate=2,
# beta_tol=1e-2, f_min_change=1e-5,
# chifact_start = 1.
# )
# Defining a starting value for the trade-off parameter (beta) between the data
# misfit and the regularization.
starting_beta = directives.BetaEstimate_ByEig(beta0_ratio=10)
beta_schedule = directives.BetaSchedule(coolingFactor=2, coolingRate=2)
# Update the preconditionner
update_Jacobi = directives.UpdatePreconditioner()
# Options for outputting recovered models and predicted data for each beta.
save_iteration = directives.SaveOutputEveryIteration(save_txt=False)
update_IRLS = directives.Update_IRLS(
max_irls_iterations=20,
minGNiter=1,
fix_Jmatrix=True,
f_min_change=1e-3,
coolingRate=3,
)
# Updating the preconditionner if it is model dependent.
update_jacobi = directives.UpdatePreconditioner()
# Setting a stopping criteria for the inversion.
target_misfit = directives.TargetMisfit(chifact=1)
# Add sensitivity weights
sensitivity_weights = directives.UpdateSensitivityWeights()
target = directives.TargetMisfit()
# The directives are defined as a list.
directives_list = [
# sensitivity_weights,
starting_beta,
beta_schedule,
save_iteration,
# target_misfit,
update_IRLS,
# update_jacobi,
]
#####################################################################
# Running the Inversion
# ---------------------
#
# To define the inversion object, we need to define the inversion problem and
# the set of directives. We can then run the inversion.
#
# Here we combine the inverse problem and the set of directives
inv = inversion.BaseInversion(inv_prob, directives_list)
# Run the inversion
recovered_model = inv.run(starting_model)
#######################################################################
# Plotting Results
# -------------------------------------------------
#
#
# True model
from scipy.spatial import Delaunay
def PolygonInd(mesh, pts):
hull = Delaunay(pts)
inds = hull.find_simplex(mesh.gridCC) >= 0
return inds
background_conductivity = 0.1
overburden_conductivity = 0.025
slope_conductivity = 0.4
true_model = np.ones(mesh2D.nC) * background_conductivity
layer_ind = mesh2D.gridCC[:, -1] > -30.0
true_model[layer_ind] = overburden_conductivity
x0 = np.r_[0.0, -30.0]
x1 = np.r_[dx * n_sounding, -30.0]
x2 = np.r_[dx * n_sounding, -130.0]
x3 = np.r_[0.0, -50.0]
pts = np.vstack((x0, x1, x2, x3, x0))
poly_inds = PolygonInd(mesh2D, pts)
true_model[poly_inds] = slope_conductivity
# true_model = true_model.reshape(mesh_soundings.vnC, order='C')
# true_model = np.flipud(true_model)
# true_model = mkvc(true_model)
l2_model = inv_prob.l2model
dpred_l2 = simulation.dpred(l2_model)
l2_model = np.exp(l2_model)
# l2_model = l2_model.reshape((simulation.n_sounding, simulation.n_layer),)
# l2_model = mkvc(l2_model)
dpred = simulation.dpred(recovered_model)
recovered_model = np.exp(recovered_model)
# recovered_model = recovered_model.reshape((simulation.n_sounding, simulation.n_layer))
# recovered_model = mkvc(recovered_model)
mesh_plotting = TensorMesh([hx, np.flipud(hz)], x0="0N")
l2_model = l2_model.reshape(mesh_plotting.vnC, order="C")
l2_model = mkvc(np.fliplr(l2_model))
recovered_model = recovered_model.reshape(mesh_plotting.vnC, order="C")
recovered_model = mkvc(np.fliplr(recovered_model))
models_list = [true_model, l2_model, recovered_model]
for mod in models_list:
fig = plt.figure(figsize=(9, 3))
ax1 = fig.add_axes([0.1, 0.12, 0.73, 0.78])
log_mod = np.log10(mod)
mesh_plotting.plot_image(
log_mod,
ax=ax1,
grid=False,
clim=(np.log10(true_model.min()), np.log10(true_model.max())),
# clim=(np.log10(0.1), np.log10(1)),
pcolor_opts={"cmap": "viridis"},
)
ax1.set_ylim(mesh_plotting.nodes_y.min(), mesh_plotting.nodes_y.max())
ax1.set_title("Conductivity Model")
ax1.set_xlabel("x (m)")
ax1.set_ylabel("depth (m)")
ax2 = fig.add_axes([0.85, 0.12, 0.05, 0.78])
norm = mpl.colors.Normalize(
vmin=np.log10(true_model.min()),
vmax=np.log10(true_model.max())
# vmin=np.log10(0.1), vmax=np.log10(1)
)
cbar = mpl.colorbar.ColorbarBase(
ax2,
norm=norm,
cmap=mpl.cm.viridis,
orientation="vertical",
format="$10^{%.1f}$",
)
cbar.set_label("Conductivity [S/m]", rotation=270, labelpad=15, size=12)
data_list = [dobs, dpred_l2, dpred]
color_list = ["k", "b", "r"]
fig, ax = plt.subplots(1, 1, figsize=(7, 7))
n_time = time_HM.size + time_LM.size
for ii in range(0, len(data_list)):
d = np.reshape(data_list[ii], (n_sounding, n_time))
ax.semilogy(x, np.abs(d), color_list[ii], lw=1)
ax.set_xlabel("Times (s)")
ax.set_ylabel("|dBdt| (T/s)")
plt.show()
|
5cdb18b7404b6ccfc8c6c5f9a28d646476b3182e
|
7f0ed84404abb57c3bc062cd986b67c6a254d3f3
|
/tests/common/test_flags.py
|
48a37618624bd8b8a9ce2d863a665fe163a3ef75
|
[
"BSD-3-Clause"
] |
permissive
|
abhinavsingh/proxy.py
|
ad8eff50476815c4654cade3b6fe628e1ecea2eb
|
30574fd0414005dfa8792a6e797023e862bdcf43
|
refs/heads/develop
| 2023-09-01T03:40:13.473734
| 2023-04-17T04:12:18
| 2023-04-17T04:12:18
| 12,228,178
| 2,691
| 657
|
BSD-3-Clause
| 2023-09-08T11:56:39
| 2013-08-19T21:33:51
|
Python
|
UTF-8
|
Python
| false
| false
| 7,264
|
py
|
test_flags.py
|
# -*- coding: utf-8 -*-
"""
proxy.py
~~~~~~~~
⚡⚡⚡ Fast, Lightweight, Pluggable, TLS interception capable proxy server focused on
Network monitoring, controls & Application development, testing, debugging.
:copyright: (c) 2013-present by Abhinav Singh and contributors.
:license: BSD, see LICENSE for more details.
"""
from typing import Any, Dict, List
import unittest
from unittest import mock
from proxy.plugin import CacheResponsesPlugin, FilterByUpstreamHostPlugin
from proxy.http.proxy import HttpProxyPlugin
from proxy.common.flag import FlagParser
from proxy.common.utils import bytes_
from proxy.common.version import __version__
from proxy.common.constants import PLUGIN_HTTP_PROXY, PY2_DEPRECATION_MESSAGE
from . import my_plugins
class TestFlags(unittest.TestCase):
def assert_plugins(self, expected: Dict[str, List[type]]) -> None:
for k in expected:
self.assertIn(k.encode(), self.flags.plugins)
for p in expected[k]:
self.assertIn(p, self.flags.plugins[k.encode()])
self.assertEqual(
len([o for o in self.flags.plugins[k.encode()] if o == p]), 1,
)
def test_load_plugin_from_bytes(self) -> None:
self.flags = FlagParser.initialize(
[], plugins=[
b'proxy.plugin.CacheResponsesPlugin',
],
)
self.assert_plugins({'HttpProxyBasePlugin': [CacheResponsesPlugin]})
def test_load_plugins_from_bytes(self) -> None:
self.flags = FlagParser.initialize(
[], plugins=[
b'proxy.plugin.CacheResponsesPlugin',
b'proxy.plugin.FilterByUpstreamHostPlugin',
],
)
self.assert_plugins({
'HttpProxyBasePlugin': [
CacheResponsesPlugin,
FilterByUpstreamHostPlugin,
],
})
def test_load_plugin_from_args(self) -> None:
self.flags = FlagParser.initialize([
'--plugins', 'proxy.plugin.CacheResponsesPlugin',
])
self.assert_plugins({'HttpProxyBasePlugin': [CacheResponsesPlugin]})
def test_load_plugins_from_args(self) -> None:
self.flags = FlagParser.initialize([
'--plugins', 'proxy.plugin.CacheResponsesPlugin,proxy.plugin.FilterByUpstreamHostPlugin',
])
self.assert_plugins({
'HttpProxyBasePlugin': [
CacheResponsesPlugin,
FilterByUpstreamHostPlugin,
],
})
def test_load_plugin_from_class(self) -> None:
self.flags = FlagParser.initialize(
[], plugins=[
CacheResponsesPlugin,
],
)
self.assert_plugins({'HttpProxyBasePlugin': [CacheResponsesPlugin]})
def test_load_plugins_from_class(self) -> None:
self.flags = FlagParser.initialize(
[], plugins=[
CacheResponsesPlugin,
FilterByUpstreamHostPlugin,
],
)
self.assert_plugins({
'HttpProxyBasePlugin': [
CacheResponsesPlugin,
FilterByUpstreamHostPlugin,
],
})
def test_load_plugins_from_bytes_and_class(self) -> None:
self.flags = FlagParser.initialize(
[], plugins=[
CacheResponsesPlugin,
b'proxy.plugin.FilterByUpstreamHostPlugin',
],
)
self.assert_plugins({
'HttpProxyBasePlugin': [
CacheResponsesPlugin,
FilterByUpstreamHostPlugin,
],
})
def test_unique_plugin_from_bytes(self) -> None:
self.flags = FlagParser.initialize(
[], plugins=[
bytes_(PLUGIN_HTTP_PROXY),
],
)
self.assert_plugins({
'HttpProtocolHandlerPlugin': [
HttpProxyPlugin,
],
})
def test_unique_plugin_from_args(self) -> None:
self.flags = FlagParser.initialize([
'--plugins', PLUGIN_HTTP_PROXY,
])
self.assert_plugins({
'HttpProtocolHandlerPlugin': [
HttpProxyPlugin,
],
})
def test_unique_plugin_from_class(self) -> None:
self.flags = FlagParser.initialize(
[], plugins=[
HttpProxyPlugin,
],
)
self.assert_plugins({
'HttpProtocolHandlerPlugin': [
HttpProxyPlugin,
],
})
def test_plugin_from_inner_class_by_type(self) -> None:
self.flags = FlagParser.initialize(
[], plugins=[
TestFlags.MyHttpProxyPlugin,
my_plugins.MyHttpProxyPlugin,
my_plugins.OuterClass.MyHttpProxyPlugin,
],
)
self.assert_plugins({
'HttpProtocolHandlerPlugin': [
TestFlags.MyHttpProxyPlugin,
my_plugins.MyHttpProxyPlugin,
my_plugins.OuterClass.MyHttpProxyPlugin,
],
})
def test_plugin_from_inner_class_by_name(self) -> None:
self.flags = FlagParser.initialize(
[], plugins=[
b'tests.common.test_flags.TestFlags.MyHttpProxyPlugin',
b'tests.common.my_plugins.MyHttpProxyPlugin',
b'tests.common.my_plugins.OuterClass.MyHttpProxyPlugin',
],
)
self.assert_plugins({
'HttpProtocolHandlerPlugin': [
TestFlags.MyHttpProxyPlugin,
my_plugins.MyHttpProxyPlugin,
my_plugins.OuterClass.MyHttpProxyPlugin,
],
})
class MyHttpProxyPlugin(HttpProxyPlugin):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
def test_basic_auth_flag_is_base64_encoded(self) -> None:
flags = FlagParser.initialize(['--basic-auth', 'user:pass'])
self.assertEqual(flags.auth_code, b'dXNlcjpwYXNz')
@mock.patch('builtins.print')
def test_main_version(self, mock_print: mock.Mock) -> None:
with self.assertRaises(SystemExit) as e:
FlagParser.initialize(['--version'])
mock_print.assert_called_with(__version__)
self.assertEqual(e.exception.code, 0)
@mock.patch('builtins.print')
@mock.patch('proxy.common.flag.is_py2')
def test_main_py2_exit(
self,
mock_is_py2: mock.Mock,
mock_print: mock.Mock,
) -> None:
mock_is_py2.return_value = True
with self.assertRaises(SystemExit) as e:
FlagParser.initialize()
mock_print.assert_called_with(PY2_DEPRECATION_MESSAGE)
self.assertEqual(e.exception.code, 1)
mock_is_py2.assert_called()
@mock.patch('builtins.print')
@mock.patch('proxy.common.flag.is_py2')
def test_main_py3_runs(
self,
mock_is_py2: mock.Mock,
mock_print: mock.Mock,
) -> None:
mock_is_py2.return_value = False
FlagParser.initialize()
mock_is_py2.assert_called()
mock_print.assert_not_called()
if __name__ == '__main__':
unittest.main()
|
0f68f35a836bd4df1e59c8edd903c990b1df17c9
|
444a9480bce2035565332d4d4654244c0b5cd47b
|
/research/cv/SPC-Net/eval.py
|
6280f8416ee4fc7a713d6f268bfbe4c3b4a9e41d
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
mindspore-ai/models
|
7ede9c6454e77e995e674628204e1c6e76bd7b27
|
eab643f51336dbf7d711f02d27e6516e5affee59
|
refs/heads/master
| 2023-07-20T01:49:34.614616
| 2023-07-17T11:43:18
| 2023-07-17T11:43:18
| 417,393,380
| 301
| 92
|
Apache-2.0
| 2023-05-17T11:22:28
| 2021-10-15T06:38:37
|
Python
|
UTF-8
|
Python
| false
| false
| 3,568
|
py
|
eval.py
|
# Copyright 2023 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import warnings
import time
import argparse
from network.network import deep_r50v3plusd
from src.dataset import create_dataset
from src.utils import save_imgs, fast_hist, evaluate_eval_for_inference
import mindspore as ms
from mindspore import context
import mindspore.ops as ops
warnings.filterwarnings("ignore")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Semantic Segmentation')
parser.add_argument('--root', type=str, default='/path/to/Datasets')
parser.add_argument('--dataset', type=str, default='cityscapes',
help='[cityscapes, bdd, mapillary, synthia, gtav]')
parser.add_argument('--num', type=int, default=2,
help='the number of sources. 1, 2 or 3')
parser.add_argument('--save_path', type=str, default=None)
args = parser.parse_args()
# define device
ms.set_context(mode=context.GRAPH_MODE, device_target="GPU", save_graphs=False)
# init network
net = deep_r50v3plusd(args=args, num_classes=19, criterion=None, criterion_aux=None)
# load paramwters
if args.num == 1:
MODELNAME = './models/single_source_model.ckpt'
elif args.num == 2:
MODELNAME = './models/double_source_model.ckpt'
elif args.num == 3:
MODELNAME = './models/triple_source_model.ckpt'
else:
raise AttributeError('No this mode!')
param_dict = ms.load_checkpoint(MODELNAME)
param_not_load = ms.load_param_into_net(net, param_dict)
assert not param_not_load
# init dataset
dataset = create_dataset(root=args.root, data=args.dataset)
# inference
net.set_train(False)
dataset_size = dataset.get_dataset_size()
print('Start inference...')
print('eval dataset size: {}'.format(dataset_size))
IOUACC = 0
start = time.time()
for idx, data in enumerate(dataset.create_dict_iterator(num_epochs=1), start=0):
image = data['image']
label = data['label']
img_name = data['img_name']
prediction = net(image)
prediction, _ = ops.ArgMaxWithValue(axis=1)(prediction)
prediction = ops.Squeeze(0)(prediction)
label = ops.Squeeze(0)(label)
prediction = prediction.asnumpy()
label = label.asnumpy()
if args.save_path is not None:
save_imgs(prediction=prediction, img_name=img_name, save_path=args.save_path)
temp_iou = fast_hist(prediction.flatten(), label.flatten(), 19)
IOUACC += temp_iou
print('COST TIME:', (time.time()-start))
res = evaluate_eval_for_inference(IOUACC, dataset_name=args.dataset)
try:
acc, acc_cls, mean_iu, fwavacc = res["acc"], res["acc_cls"], res["mean_iu"], res["fwavacc"]
print('acc=%.6f, acc_cls=%.6f, mean_iu=%.6f, fwavacc=%.6f' % (acc, acc_cls, mean_iu, fwavacc))
except KeyError:
print("res format error, some key not found in res dict.")
|
ac097faa2273f15e7caeb8a8870a4645444bd03f
|
aeef2494b283012ed619870c4275e7d015f4017a
|
/sdk/python/pulumi_gcp/compute/region_per_instance_config.py
|
5b018f311378f91fbd4b2e54b7a3822e7c3dc84e
|
[
"BSD-3-Clause",
"MPL-2.0",
"Apache-2.0"
] |
permissive
|
pulumi/pulumi-gcp
|
d4fd3f80c3df5290edaf33eb5eafe34e6699d0ff
|
7deea0a50a4ee5ab7bd722a83eca01707e298f85
|
refs/heads/master
| 2023-08-31T07:12:45.921522
| 2023-08-31T06:16:27
| 2023-08-31T06:16:27
| 97,485,806
| 160
| 63
|
Apache-2.0
| 2023-09-14T19:49:36
| 2017-07-17T14:28:37
|
Java
|
UTF-8
|
Python
| false
| false
| 35,431
|
py
|
region_per_instance_config.py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['RegionPerInstanceConfigArgs', 'RegionPerInstanceConfig']
@pulumi.input_type
class RegionPerInstanceConfigArgs:
def __init__(__self__, *,
region_instance_group_manager: pulumi.Input[str],
minimal_action: Optional[pulumi.Input[str]] = None,
most_disruptive_allowed_action: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
preserved_state: Optional[pulumi.Input['RegionPerInstanceConfigPreservedStateArgs']] = None,
project: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
remove_instance_state_on_destroy: Optional[pulumi.Input[bool]] = None):
"""
The set of arguments for constructing a RegionPerInstanceConfig resource.
:param pulumi.Input[str] region_instance_group_manager: The region instance group manager this instance config is part of.
- - -
:param pulumi.Input[str] minimal_action: The minimal action to perform on the instance during an update.
Default is `NONE`. Possible values are:
* REPLACE
* RESTART
* REFRESH
* NONE
:param pulumi.Input[str] most_disruptive_allowed_action: The most disruptive action to perform on the instance during an update.
Default is `REPLACE`. Possible values are:
* REPLACE
* RESTART
* REFRESH
* NONE
:param pulumi.Input[str] name: The name for this per-instance config and its corresponding instance.
:param pulumi.Input['RegionPerInstanceConfigPreservedStateArgs'] preserved_state: The preserved state for this instance.
Structure is documented below.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
:param pulumi.Input[str] region: Region where the containing instance group manager is located
:param pulumi.Input[bool] remove_instance_state_on_destroy: When true, deleting this config will immediately remove any specified state from the underlying instance.
When false, deleting this config will *not* immediately remove any state from the underlying instance.
State will be removed on the next instance recreation or update.
"""
pulumi.set(__self__, "region_instance_group_manager", region_instance_group_manager)
if minimal_action is not None:
pulumi.set(__self__, "minimal_action", minimal_action)
if most_disruptive_allowed_action is not None:
pulumi.set(__self__, "most_disruptive_allowed_action", most_disruptive_allowed_action)
if name is not None:
pulumi.set(__self__, "name", name)
if preserved_state is not None:
pulumi.set(__self__, "preserved_state", preserved_state)
if project is not None:
pulumi.set(__self__, "project", project)
if region is not None:
pulumi.set(__self__, "region", region)
if remove_instance_state_on_destroy is not None:
pulumi.set(__self__, "remove_instance_state_on_destroy", remove_instance_state_on_destroy)
@property
@pulumi.getter(name="regionInstanceGroupManager")
def region_instance_group_manager(self) -> pulumi.Input[str]:
"""
The region instance group manager this instance config is part of.
- - -
"""
return pulumi.get(self, "region_instance_group_manager")
@region_instance_group_manager.setter
def region_instance_group_manager(self, value: pulumi.Input[str]):
pulumi.set(self, "region_instance_group_manager", value)
@property
@pulumi.getter(name="minimalAction")
def minimal_action(self) -> Optional[pulumi.Input[str]]:
"""
The minimal action to perform on the instance during an update.
Default is `NONE`. Possible values are:
* REPLACE
* RESTART
* REFRESH
* NONE
"""
return pulumi.get(self, "minimal_action")
@minimal_action.setter
def minimal_action(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "minimal_action", value)
@property
@pulumi.getter(name="mostDisruptiveAllowedAction")
def most_disruptive_allowed_action(self) -> Optional[pulumi.Input[str]]:
"""
The most disruptive action to perform on the instance during an update.
Default is `REPLACE`. Possible values are:
* REPLACE
* RESTART
* REFRESH
* NONE
"""
return pulumi.get(self, "most_disruptive_allowed_action")
@most_disruptive_allowed_action.setter
def most_disruptive_allowed_action(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "most_disruptive_allowed_action", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name for this per-instance config and its corresponding instance.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="preservedState")
def preserved_state(self) -> Optional[pulumi.Input['RegionPerInstanceConfigPreservedStateArgs']]:
"""
The preserved state for this instance.
Structure is documented below.
"""
return pulumi.get(self, "preserved_state")
@preserved_state.setter
def preserved_state(self, value: Optional[pulumi.Input['RegionPerInstanceConfigPreservedStateArgs']]):
pulumi.set(self, "preserved_state", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter
def region(self) -> Optional[pulumi.Input[str]]:
"""
Region where the containing instance group manager is located
"""
return pulumi.get(self, "region")
@region.setter
def region(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "region", value)
@property
@pulumi.getter(name="removeInstanceStateOnDestroy")
def remove_instance_state_on_destroy(self) -> Optional[pulumi.Input[bool]]:
"""
When true, deleting this config will immediately remove any specified state from the underlying instance.
When false, deleting this config will *not* immediately remove any state from the underlying instance.
State will be removed on the next instance recreation or update.
"""
return pulumi.get(self, "remove_instance_state_on_destroy")
@remove_instance_state_on_destroy.setter
def remove_instance_state_on_destroy(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "remove_instance_state_on_destroy", value)
@pulumi.input_type
class _RegionPerInstanceConfigState:
def __init__(__self__, *,
minimal_action: Optional[pulumi.Input[str]] = None,
most_disruptive_allowed_action: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
preserved_state: Optional[pulumi.Input['RegionPerInstanceConfigPreservedStateArgs']] = None,
project: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
region_instance_group_manager: Optional[pulumi.Input[str]] = None,
remove_instance_state_on_destroy: Optional[pulumi.Input[bool]] = None):
"""
Input properties used for looking up and filtering RegionPerInstanceConfig resources.
:param pulumi.Input[str] minimal_action: The minimal action to perform on the instance during an update.
Default is `NONE`. Possible values are:
* REPLACE
* RESTART
* REFRESH
* NONE
:param pulumi.Input[str] most_disruptive_allowed_action: The most disruptive action to perform on the instance during an update.
Default is `REPLACE`. Possible values are:
* REPLACE
* RESTART
* REFRESH
* NONE
:param pulumi.Input[str] name: The name for this per-instance config and its corresponding instance.
:param pulumi.Input['RegionPerInstanceConfigPreservedStateArgs'] preserved_state: The preserved state for this instance.
Structure is documented below.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
:param pulumi.Input[str] region: Region where the containing instance group manager is located
:param pulumi.Input[str] region_instance_group_manager: The region instance group manager this instance config is part of.
- - -
:param pulumi.Input[bool] remove_instance_state_on_destroy: When true, deleting this config will immediately remove any specified state from the underlying instance.
When false, deleting this config will *not* immediately remove any state from the underlying instance.
State will be removed on the next instance recreation or update.
"""
if minimal_action is not None:
pulumi.set(__self__, "minimal_action", minimal_action)
if most_disruptive_allowed_action is not None:
pulumi.set(__self__, "most_disruptive_allowed_action", most_disruptive_allowed_action)
if name is not None:
pulumi.set(__self__, "name", name)
if preserved_state is not None:
pulumi.set(__self__, "preserved_state", preserved_state)
if project is not None:
pulumi.set(__self__, "project", project)
if region is not None:
pulumi.set(__self__, "region", region)
if region_instance_group_manager is not None:
pulumi.set(__self__, "region_instance_group_manager", region_instance_group_manager)
if remove_instance_state_on_destroy is not None:
pulumi.set(__self__, "remove_instance_state_on_destroy", remove_instance_state_on_destroy)
@property
@pulumi.getter(name="minimalAction")
def minimal_action(self) -> Optional[pulumi.Input[str]]:
"""
The minimal action to perform on the instance during an update.
Default is `NONE`. Possible values are:
* REPLACE
* RESTART
* REFRESH
* NONE
"""
return pulumi.get(self, "minimal_action")
@minimal_action.setter
def minimal_action(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "minimal_action", value)
@property
@pulumi.getter(name="mostDisruptiveAllowedAction")
def most_disruptive_allowed_action(self) -> Optional[pulumi.Input[str]]:
"""
The most disruptive action to perform on the instance during an update.
Default is `REPLACE`. Possible values are:
* REPLACE
* RESTART
* REFRESH
* NONE
"""
return pulumi.get(self, "most_disruptive_allowed_action")
@most_disruptive_allowed_action.setter
def most_disruptive_allowed_action(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "most_disruptive_allowed_action", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name for this per-instance config and its corresponding instance.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="preservedState")
def preserved_state(self) -> Optional[pulumi.Input['RegionPerInstanceConfigPreservedStateArgs']]:
"""
The preserved state for this instance.
Structure is documented below.
"""
return pulumi.get(self, "preserved_state")
@preserved_state.setter
def preserved_state(self, value: Optional[pulumi.Input['RegionPerInstanceConfigPreservedStateArgs']]):
pulumi.set(self, "preserved_state", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter
def region(self) -> Optional[pulumi.Input[str]]:
"""
Region where the containing instance group manager is located
"""
return pulumi.get(self, "region")
@region.setter
def region(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "region", value)
@property
@pulumi.getter(name="regionInstanceGroupManager")
def region_instance_group_manager(self) -> Optional[pulumi.Input[str]]:
"""
The region instance group manager this instance config is part of.
- - -
"""
return pulumi.get(self, "region_instance_group_manager")
@region_instance_group_manager.setter
def region_instance_group_manager(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "region_instance_group_manager", value)
@property
@pulumi.getter(name="removeInstanceStateOnDestroy")
def remove_instance_state_on_destroy(self) -> Optional[pulumi.Input[bool]]:
"""
When true, deleting this config will immediately remove any specified state from the underlying instance.
When false, deleting this config will *not* immediately remove any state from the underlying instance.
State will be removed on the next instance recreation or update.
"""
return pulumi.get(self, "remove_instance_state_on_destroy")
@remove_instance_state_on_destroy.setter
def remove_instance_state_on_destroy(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "remove_instance_state_on_destroy", value)
class RegionPerInstanceConfig(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
minimal_action: Optional[pulumi.Input[str]] = None,
most_disruptive_allowed_action: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
preserved_state: Optional[pulumi.Input[pulumi.InputType['RegionPerInstanceConfigPreservedStateArgs']]] = None,
project: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
region_instance_group_manager: Optional[pulumi.Input[str]] = None,
remove_instance_state_on_destroy: Optional[pulumi.Input[bool]] = None,
__props__=None):
"""
A config defined for a single managed instance that belongs to an instance group manager. It preserves the instance name
across instance group manager operations and can define stateful disks or metadata that are unique to the instance.
This resource works with regional instance group managers.
To get more information about RegionPerInstanceConfig, see:
* [API documentation](https://cloud.google.com/compute/docs/reference/rest/v1/instanceGroupManagers)
* How-to Guides
* [Official Documentation](https://cloud.google.com/compute/docs/instance-groups/stateful-migs#per-instance_configs)
## Example Usage
### Stateful Rigm
```python
import pulumi
import pulumi_gcp as gcp
my_image = gcp.compute.get_image(family="debian-11",
project="debian-cloud")
igm_basic = gcp.compute.InstanceTemplate("igm-basic",
machine_type="e2-medium",
can_ip_forward=False,
tags=[
"foo",
"bar",
],
disks=[gcp.compute.InstanceTemplateDiskArgs(
source_image=my_image.self_link,
auto_delete=True,
boot=True,
)],
network_interfaces=[gcp.compute.InstanceTemplateNetworkInterfaceArgs(
network="default",
)],
service_account=gcp.compute.InstanceTemplateServiceAccountArgs(
scopes=[
"userinfo-email",
"compute-ro",
"storage-ro",
],
))
rigm = gcp.compute.RegionInstanceGroupManager("rigm",
description="Demo test instance group manager",
versions=[gcp.compute.RegionInstanceGroupManagerVersionArgs(
name="prod",
instance_template=igm_basic.self_link,
)],
update_policy=gcp.compute.RegionInstanceGroupManagerUpdatePolicyArgs(
type="OPPORTUNISTIC",
instance_redistribution_type="NONE",
minimal_action="RESTART",
),
base_instance_name="rigm",
region="us-central1",
target_size=2)
default = gcp.compute.Disk("default",
type="pd-ssd",
zone="us-central1-a",
image="debian-11-bullseye-v20220719",
physical_block_size_bytes=4096)
with_disk = gcp.compute.RegionPerInstanceConfig("withDisk",
region=google_compute_region_instance_group_manager["igm"]["region"],
region_instance_group_manager=rigm.name,
preserved_state=gcp.compute.RegionPerInstanceConfigPreservedStateArgs(
metadata={
"foo": "bar",
"instance_template": igm_basic.self_link,
},
disks=[gcp.compute.RegionPerInstanceConfigPreservedStateDiskArgs(
device_name="my-stateful-disk",
source=default.id,
mode="READ_ONLY",
)],
))
```
## Import
RegionPerInstanceConfig can be imported using any of these accepted formats
```sh
$ pulumi import gcp:compute/regionPerInstanceConfig:RegionPerInstanceConfig default projects/{{project}}/regions/{{region}}/instanceGroupManagers/{{region_instance_group_manager}}/{{name}}
```
```sh
$ pulumi import gcp:compute/regionPerInstanceConfig:RegionPerInstanceConfig default {{project}}/{{region}}/{{region_instance_group_manager}}/{{name}}
```
```sh
$ pulumi import gcp:compute/regionPerInstanceConfig:RegionPerInstanceConfig default {{region}}/{{region_instance_group_manager}}/{{name}}
```
```sh
$ pulumi import gcp:compute/regionPerInstanceConfig:RegionPerInstanceConfig default {{region_instance_group_manager}}/{{name}}
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] minimal_action: The minimal action to perform on the instance during an update.
Default is `NONE`. Possible values are:
* REPLACE
* RESTART
* REFRESH
* NONE
:param pulumi.Input[str] most_disruptive_allowed_action: The most disruptive action to perform on the instance during an update.
Default is `REPLACE`. Possible values are:
* REPLACE
* RESTART
* REFRESH
* NONE
:param pulumi.Input[str] name: The name for this per-instance config and its corresponding instance.
:param pulumi.Input[pulumi.InputType['RegionPerInstanceConfigPreservedStateArgs']] preserved_state: The preserved state for this instance.
Structure is documented below.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
:param pulumi.Input[str] region: Region where the containing instance group manager is located
:param pulumi.Input[str] region_instance_group_manager: The region instance group manager this instance config is part of.
- - -
:param pulumi.Input[bool] remove_instance_state_on_destroy: When true, deleting this config will immediately remove any specified state from the underlying instance.
When false, deleting this config will *not* immediately remove any state from the underlying instance.
State will be removed on the next instance recreation or update.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: RegionPerInstanceConfigArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
A config defined for a single managed instance that belongs to an instance group manager. It preserves the instance name
across instance group manager operations and can define stateful disks or metadata that are unique to the instance.
This resource works with regional instance group managers.
To get more information about RegionPerInstanceConfig, see:
* [API documentation](https://cloud.google.com/compute/docs/reference/rest/v1/instanceGroupManagers)
* How-to Guides
* [Official Documentation](https://cloud.google.com/compute/docs/instance-groups/stateful-migs#per-instance_configs)
## Example Usage
### Stateful Rigm
```python
import pulumi
import pulumi_gcp as gcp
my_image = gcp.compute.get_image(family="debian-11",
project="debian-cloud")
igm_basic = gcp.compute.InstanceTemplate("igm-basic",
machine_type="e2-medium",
can_ip_forward=False,
tags=[
"foo",
"bar",
],
disks=[gcp.compute.InstanceTemplateDiskArgs(
source_image=my_image.self_link,
auto_delete=True,
boot=True,
)],
network_interfaces=[gcp.compute.InstanceTemplateNetworkInterfaceArgs(
network="default",
)],
service_account=gcp.compute.InstanceTemplateServiceAccountArgs(
scopes=[
"userinfo-email",
"compute-ro",
"storage-ro",
],
))
rigm = gcp.compute.RegionInstanceGroupManager("rigm",
description="Demo test instance group manager",
versions=[gcp.compute.RegionInstanceGroupManagerVersionArgs(
name="prod",
instance_template=igm_basic.self_link,
)],
update_policy=gcp.compute.RegionInstanceGroupManagerUpdatePolicyArgs(
type="OPPORTUNISTIC",
instance_redistribution_type="NONE",
minimal_action="RESTART",
),
base_instance_name="rigm",
region="us-central1",
target_size=2)
default = gcp.compute.Disk("default",
type="pd-ssd",
zone="us-central1-a",
image="debian-11-bullseye-v20220719",
physical_block_size_bytes=4096)
with_disk = gcp.compute.RegionPerInstanceConfig("withDisk",
region=google_compute_region_instance_group_manager["igm"]["region"],
region_instance_group_manager=rigm.name,
preserved_state=gcp.compute.RegionPerInstanceConfigPreservedStateArgs(
metadata={
"foo": "bar",
"instance_template": igm_basic.self_link,
},
disks=[gcp.compute.RegionPerInstanceConfigPreservedStateDiskArgs(
device_name="my-stateful-disk",
source=default.id,
mode="READ_ONLY",
)],
))
```
## Import
RegionPerInstanceConfig can be imported using any of these accepted formats
```sh
$ pulumi import gcp:compute/regionPerInstanceConfig:RegionPerInstanceConfig default projects/{{project}}/regions/{{region}}/instanceGroupManagers/{{region_instance_group_manager}}/{{name}}
```
```sh
$ pulumi import gcp:compute/regionPerInstanceConfig:RegionPerInstanceConfig default {{project}}/{{region}}/{{region_instance_group_manager}}/{{name}}
```
```sh
$ pulumi import gcp:compute/regionPerInstanceConfig:RegionPerInstanceConfig default {{region}}/{{region_instance_group_manager}}/{{name}}
```
```sh
$ pulumi import gcp:compute/regionPerInstanceConfig:RegionPerInstanceConfig default {{region_instance_group_manager}}/{{name}}
```
:param str resource_name: The name of the resource.
:param RegionPerInstanceConfigArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(RegionPerInstanceConfigArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
minimal_action: Optional[pulumi.Input[str]] = None,
most_disruptive_allowed_action: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
preserved_state: Optional[pulumi.Input[pulumi.InputType['RegionPerInstanceConfigPreservedStateArgs']]] = None,
project: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
region_instance_group_manager: Optional[pulumi.Input[str]] = None,
remove_instance_state_on_destroy: Optional[pulumi.Input[bool]] = None,
__props__=None):
opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts)
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = RegionPerInstanceConfigArgs.__new__(RegionPerInstanceConfigArgs)
__props__.__dict__["minimal_action"] = minimal_action
__props__.__dict__["most_disruptive_allowed_action"] = most_disruptive_allowed_action
__props__.__dict__["name"] = name
__props__.__dict__["preserved_state"] = preserved_state
__props__.__dict__["project"] = project
__props__.__dict__["region"] = region
if region_instance_group_manager is None and not opts.urn:
raise TypeError("Missing required property 'region_instance_group_manager'")
__props__.__dict__["region_instance_group_manager"] = region_instance_group_manager
__props__.__dict__["remove_instance_state_on_destroy"] = remove_instance_state_on_destroy
super(RegionPerInstanceConfig, __self__).__init__(
'gcp:compute/regionPerInstanceConfig:RegionPerInstanceConfig',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
minimal_action: Optional[pulumi.Input[str]] = None,
most_disruptive_allowed_action: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
preserved_state: Optional[pulumi.Input[pulumi.InputType['RegionPerInstanceConfigPreservedStateArgs']]] = None,
project: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
region_instance_group_manager: Optional[pulumi.Input[str]] = None,
remove_instance_state_on_destroy: Optional[pulumi.Input[bool]] = None) -> 'RegionPerInstanceConfig':
"""
Get an existing RegionPerInstanceConfig resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] minimal_action: The minimal action to perform on the instance during an update.
Default is `NONE`. Possible values are:
* REPLACE
* RESTART
* REFRESH
* NONE
:param pulumi.Input[str] most_disruptive_allowed_action: The most disruptive action to perform on the instance during an update.
Default is `REPLACE`. Possible values are:
* REPLACE
* RESTART
* REFRESH
* NONE
:param pulumi.Input[str] name: The name for this per-instance config and its corresponding instance.
:param pulumi.Input[pulumi.InputType['RegionPerInstanceConfigPreservedStateArgs']] preserved_state: The preserved state for this instance.
Structure is documented below.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
:param pulumi.Input[str] region: Region where the containing instance group manager is located
:param pulumi.Input[str] region_instance_group_manager: The region instance group manager this instance config is part of.
- - -
:param pulumi.Input[bool] remove_instance_state_on_destroy: When true, deleting this config will immediately remove any specified state from the underlying instance.
When false, deleting this config will *not* immediately remove any state from the underlying instance.
State will be removed on the next instance recreation or update.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _RegionPerInstanceConfigState.__new__(_RegionPerInstanceConfigState)
__props__.__dict__["minimal_action"] = minimal_action
__props__.__dict__["most_disruptive_allowed_action"] = most_disruptive_allowed_action
__props__.__dict__["name"] = name
__props__.__dict__["preserved_state"] = preserved_state
__props__.__dict__["project"] = project
__props__.__dict__["region"] = region
__props__.__dict__["region_instance_group_manager"] = region_instance_group_manager
__props__.__dict__["remove_instance_state_on_destroy"] = remove_instance_state_on_destroy
return RegionPerInstanceConfig(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="minimalAction")
def minimal_action(self) -> pulumi.Output[Optional[str]]:
"""
The minimal action to perform on the instance during an update.
Default is `NONE`. Possible values are:
* REPLACE
* RESTART
* REFRESH
* NONE
"""
return pulumi.get(self, "minimal_action")
@property
@pulumi.getter(name="mostDisruptiveAllowedAction")
def most_disruptive_allowed_action(self) -> pulumi.Output[Optional[str]]:
"""
The most disruptive action to perform on the instance during an update.
Default is `REPLACE`. Possible values are:
* REPLACE
* RESTART
* REFRESH
* NONE
"""
return pulumi.get(self, "most_disruptive_allowed_action")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name for this per-instance config and its corresponding instance.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="preservedState")
def preserved_state(self) -> pulumi.Output[Optional['outputs.RegionPerInstanceConfigPreservedState']]:
"""
The preserved state for this instance.
Structure is documented below.
"""
return pulumi.get(self, "preserved_state")
@property
@pulumi.getter
def project(self) -> pulumi.Output[str]:
"""
The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
return pulumi.get(self, "project")
@property
@pulumi.getter
def region(self) -> pulumi.Output[str]:
"""
Region where the containing instance group manager is located
"""
return pulumi.get(self, "region")
@property
@pulumi.getter(name="regionInstanceGroupManager")
def region_instance_group_manager(self) -> pulumi.Output[str]:
"""
The region instance group manager this instance config is part of.
- - -
"""
return pulumi.get(self, "region_instance_group_manager")
@property
@pulumi.getter(name="removeInstanceStateOnDestroy")
def remove_instance_state_on_destroy(self) -> pulumi.Output[Optional[bool]]:
"""
When true, deleting this config will immediately remove any specified state from the underlying instance.
When false, deleting this config will *not* immediately remove any state from the underlying instance.
State will be removed on the next instance recreation or update.
"""
return pulumi.get(self, "remove_instance_state_on_destroy")
|
28a252aef84561a1c2779b82a575ea6de3d1f466
|
952dc66c61966f099756cdb6c2d13b40352f63cc
|
/zerver/webhooks/slack/tests.py
|
4948d477dbed1b233e28d4dd965c25de88f2296b
|
[
"Apache-2.0",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
zulip/zulip
|
5ae6aad35fd9f72996c0a2a9cdd674400966ebf6
|
965a25d91b6ee2db54038f5df855215fa25146b0
|
refs/heads/main
| 2023-08-28T23:43:00.971110
| 2023-08-28T16:47:09
| 2023-08-28T19:33:02
| 43,160,685
| 20,239
| 8,996
|
Apache-2.0
| 2023-09-14T20:57:47
| 2015-09-25T16:37:25
|
Python
|
UTF-8
|
Python
| false
| false
| 2,486
|
py
|
tests.py
|
from zerver.lib.test_classes import WebhookTestCase
class SlackWebhookTests(WebhookTestCase):
STREAM_NAME = "slack"
URL_TEMPLATE = "/api/v1/external/slack?stream={stream}&api_key={api_key}"
WEBHOOK_DIR_NAME = "slack"
def test_slack_channel_to_topic(self) -> None:
expected_topic = "channel: general"
expected_message = "**slack_user**: test"
self.check_webhook(
"message_info",
expected_topic,
expected_message,
content_type="application/x-www-form-urlencoded",
)
def test_slack_channel_to_stream(self) -> None:
self.STREAM_NAME = "general"
self.url = "{}{}".format(self.url, "&channels_map_to_topics=0")
expected_topic = "Message from Slack"
expected_message = "**slack_user**: test"
self.check_webhook(
"message_info",
expected_topic,
expected_message,
content_type="application/x-www-form-urlencoded",
)
def test_missing_data_user_name(self) -> None:
payload = self.get_body("message_info_missing_user_name")
url = self.build_webhook_url()
result = self.client_post(url, payload, content_type="application/x-www-form-urlencoded")
self.assert_json_error(result, "Missing 'user_name' argument")
def test_missing_data_channel_name(self) -> None:
payload = self.get_body("message_info_missing_channel_name")
url = self.build_webhook_url()
result = self.client_post(url, payload, content_type="application/x-www-form-urlencoded")
self.assert_json_error(result, "Missing 'channel_name' argument")
def test_missing_data_text(self) -> None:
payload = self.get_body("message_info_missing_text")
url = self.build_webhook_url()
result = self.client_post(url, payload, content_type="application/x-www-form-urlencoded")
self.assert_json_error(result, "Missing 'text' argument")
def test_invalid_channels_map_to_topics(self) -> None:
payload = self.get_body("message_info")
url = "{}{}".format(self.url, "&channels_map_to_topics=abc")
result = self.client_post(url, payload, content_type="application/x-www-form-urlencoded")
self.assert_json_error(result, "Error: channels_map_to_topics parameter other than 0 or 1")
def get_body(self, fixture_name: str) -> str:
return self.webhook_fixture_data("slack", fixture_name, file_type="txt")
|
72cb9f3fc1b271685538040563523f1ebbeee207
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/helpers/typeshed/stubs/tree-sitter-languages/tree_sitter_languages/__init__.pyi
|
59a083877290da5114682749af40ae2b16a666ab
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 158
|
pyi
|
__init__.pyi
|
from .core import get_language as get_language, get_parser as get_parser
__version__: str
__title__: str
__author__: str
__license__: str
__copyright__: str
|
66a5a50f0324e2c073f449d526c84988057a6ab4
|
7cc85a6fdc7eb4e096b67628d003942b59266d78
|
/ptsemseg/loader/pascal_voc_loader.py
|
ae7736854a06530e79d9186959d46f38729d5f1b
|
[
"MIT"
] |
permissive
|
zhechen/PLARD
|
c2275c756eebf30b37526714f537118f0399d857
|
44485803092e729661c696ab6c03f6f2fabc8701
|
refs/heads/master
| 2023-07-22T04:18:36.580898
| 2022-06-18T04:32:52
| 2022-06-18T04:32:52
| 230,184,349
| 151
| 49
|
MIT
| 2023-07-06T21:14:28
| 2019-12-26T02:56:10
|
Python
|
UTF-8
|
Python
| false
| false
| 9,954
|
py
|
pascal_voc_loader.py
|
import os
from os.path import join as pjoin
import collections
import json
import torch
import numpy as np
import scipy.misc as m
import scipy.io as io
import matplotlib.pyplot as plt
import glob
from tqdm import tqdm
from torch.utils import data
def get_data_path(name):
"""Extract path to data from config file.
Args:
name (str): The name of the dataset.
Returns:
(str): The path to the root directory containing the dataset.
"""
js = open('config.json').read()
data = json.loads(js)
return os.path.expanduser(data[name]['data_path'])
class pascalVOCLoader(data.Dataset):
"""Data loader for the Pascal VOC semantic segmentation dataset.
Annotations from both the original VOC data (which consist of RGB images
in which colours map to specific classes) and the SBD (Berkely) dataset
(where annotations are stored as .mat files) are converted into a common
`label_mask` format. Under this format, each mask is an (M,N) array of
integer values from 0 to 21, where 0 represents the background class.
The label masks are stored in a new folder, called `pre_encoded`, which
is added as a subdirectory of the `SegmentationClass` folder in the
original Pascal VOC data layout.
A total of five data splits are provided for working with the VOC data:
train: The original VOC 2012 training data - 1464 images
val: The original VOC 2012 validation data - 1449 images
trainval: The combination of `train` and `val` - 2913 images
train_aug: The unique images present in both the train split and
training images from SBD: - 8829 images (the unique members
of the result of combining lists of length 1464 and 8498)
train_aug_val: The original VOC 2012 validation data minus the images
present in `train_aug` (This is done with the same logic as
the validation set used in FCN PAMI paper, but with VOC 2012
rather than VOC 2011) - 904 images
"""
def __init__(self, root, split='train_aug', is_transform=False,
img_size=512, augmentations=None, img_norm=True):
self.root = os.path.expanduser(root)
self.split = split
self.is_transform = is_transform
self.augmentations = augmentations
self.img_norm = img_norm
self.n_classes = 21
self.mean = np.array([104.00699, 116.66877, 122.67892])
self.files = collections.defaultdict(list)
self.img_size = img_size if isinstance(img_size, tuple) \
else (img_size, img_size)
for split in ['train', 'val', 'trainval']:
path = pjoin(self.root, 'ImageSets/Segmentation', split + '.txt')
file_list = tuple(open(path, 'r'))
file_list = [id_.rstrip() for id_ in file_list]
self.files[split] = file_list
self.setup_annotations()
def __len__(self):
return len(self.files[self.split])
def __getitem__(self, index):
im_name = self.files[self.split][index]
im_path = pjoin(self.root, 'JPEGImages', im_name + '.jpg')
lbl_path = pjoin(self.root, 'SegmentationClass/pre_encoded',
im_name + '.png')
im = m.imread(im_path)
im = np.array(im, dtype=np.uint8)
lbl = m.imread(lbl_path)
lbl = np.array(lbl, dtype=np.int8)
if self.augmentations is not None:
im, lbl = self.augmentations(im, lbl)
if self.is_transform:
im, lbl = self.transform(im, lbl)
return im, lbl
def transform(self, img, lbl):
img = m.imresize(img, (self.img_size[0], self.img_size[1])) # uint8 with RGB mode
img = img[:, :, ::-1] # RGB -> BGR
img = img.astype(np.float64)
img -= self.mean
if self.img_norm:
# Resize scales images from 0 to 255, thus we need
# to divide by 255.0
img = img.astype(float) / 255.0
# NHWC -> NCHW
img = img.transpose(2, 0, 1)
lbl[lbl==255] = 0
lbl = lbl.astype(float)
lbl = m.imresize(lbl, (self.img_size[0], self.img_size[1]), 'nearest',
mode='F')
lbl = lbl.astype(int)
img = torch.from_numpy(img).float()
lbl = torch.from_numpy(lbl).long()
return img, lbl
def get_pascal_labels(self):
"""Load the mapping that associates pascal classes with label colors
Returns:
np.ndarray with dimensions (21, 3)
"""
return np.asarray([[0,0,0], [128,0,0], [0,128,0], [128,128,0],
[0,0,128], [128,0,128], [0,128,128], [128,128,128],
[64,0,0], [192,0,0], [64,128,0], [192,128,0],
[64,0,128], [192,0,128], [64,128,128], [192,128,128],
[0, 64,0], [128, 64, 0], [0,192,0], [128,192,0],
[0,64,128]])
def encode_segmap(self, mask):
"""Encode segmentation label images as pascal classes
Args:
mask (np.ndarray): raw segmentation label image of dimension
(M, N, 3), in which the Pascal classes are encoded as colours.
Returns:
(np.ndarray): class map with dimensions (M,N), where the value at
a given location is the integer denoting the class index.
"""
mask = mask.astype(int)
label_mask = np.zeros((mask.shape[0], mask.shape[1]), dtype=np.int16)
for ii, label in enumerate(self.get_pascal_labels()):
label_mask[np.where(np.all(mask == label, axis=-1))[:2]] = ii
label_mask = label_mask.astype(int)
return label_mask
def decode_segmap(self, label_mask, plot=False):
"""Decode segmentation class labels into a color image
Args:
label_mask (np.ndarray): an (M,N) array of integer values denoting
the class label at each spatial location.
plot (bool, optional): whether to show the resulting color image
in a figure.
Returns:
(np.ndarray, optional): the resulting decoded color image.
"""
label_colours = self.get_pascal_labels()
r = label_mask.copy()
g = label_mask.copy()
b = label_mask.copy()
for ll in range(0, self.n_classes):
r[label_mask == ll] = label_colours[ll, 0]
g[label_mask == ll] = label_colours[ll, 1]
b[label_mask == ll] = label_colours[ll, 2]
rgb = np.zeros((label_mask.shape[0], label_mask.shape[1], 3))
rgb[:, :, 0] = r / 255.0
rgb[:, :, 1] = g / 255.0
rgb[:, :, 2] = b / 255.0
if plot:
plt.imshow(rgb)
plt.show()
else:
return rgb
def setup_annotations(self):
"""Sets up Berkley annotations by adding image indices to the
`train_aug` split and pre-encode all segmentation labels into the
common label_mask format (if this has not already been done). This
function also defines the `train_aug` and `train_aug_val` data splits
according to the description in the class docstring
"""
sbd_path = get_data_path('sbd')
target_path = pjoin(self.root, 'SegmentationClass/pre_encoded')
if not os.path.exists(target_path): os.makedirs(target_path)
path = pjoin(sbd_path, 'dataset/train.txt')
sbd_train_list = tuple(open(path, 'r'))
sbd_train_list = [id_.rstrip() for id_ in sbd_train_list]
train_aug = self.files['train'] + sbd_train_list
# keep unique elements (stable)
train_aug = [train_aug[i] for i in \
sorted(np.unique(train_aug, return_index=True)[1])]
self.files['train_aug'] = train_aug
set_diff = set(self.files['val']) - set(train_aug) # remove overlap
self.files['train_aug_val'] = list(set_diff)
pre_encoded = glob.glob(pjoin(target_path, '*.png'))
expected = np.unique(self.files['train_aug'] + self.files['val']).size
if len(pre_encoded) != expected:
print("Pre-encoding segmentation masks...")
for ii in tqdm(sbd_train_list):
lbl_path = pjoin(sbd_path, 'dataset/cls', ii + '.mat')
data = io.loadmat(lbl_path)
lbl = data['GTcls'][0]['Segmentation'][0].astype(np.int32)
lbl = m.toimage(lbl, high=lbl.max(), low=lbl.min())
m.imsave(pjoin(target_path, ii + '.png'), lbl)
for ii in tqdm(self.files['trainval']):
fname = ii + '.png'
lbl_path = pjoin(self.root, 'SegmentationClass', fname)
lbl = self.encode_segmap(m.imread(lbl_path))
lbl = m.toimage(lbl, high=lbl.max(), low=lbl.min())
m.imsave(pjoin(target_path, fname), lbl)
assert expected == 9733, 'unexpected dataset sizes'
# Leave code for debugging purposes
# import ptsemseg.augmentations as aug
# if __name__ == '__main__':
# # local_path = '/home/meetshah1995/datasets/VOCdevkit/VOC2012/'
# bs = 4
# augs = aug.Compose([aug.RandomRotate(10), aug.RandomHorizontallyFlip()])
# dst = pascalVOCLoader(root=local_path, is_transform=True, augmentations=augs)
# trainloader = data.DataLoader(dst, batch_size=bs)
# for i, data in enumerate(trainloader):
# imgs, labels = data
# imgs = imgs.numpy()[:, ::-1, :, :]
# imgs = np.transpose(imgs, [0,2,3,1])
# f, axarr = plt.subplots(bs, 2)
# for j in range(bs):
# axarr[j][0].imshow(imgs[j])
# axarr[j][1].imshow(dst.decode_segmap(labels.numpy()[j]))
# plt.show()
# a = raw_input()
# if a == 'ex':
# break
# else:
# plt.close()
|
828e75e2b48ef1e7ee5110816ef14a93fd20a90c
|
71acb7214efd91c0d327f6d8958e1798eadb4401
|
/locations/spiders/mr_liquor_au.py
|
20550b67139fa2ba6174830ab3f5166e67d13a6a
|
[
"CC0-1.0",
"MIT"
] |
permissive
|
alltheplaces/alltheplaces
|
21b9f8b4ace1352e52ae7b8f8825a930d2cb033e
|
1bcbb55cfcf06f2c714465570711f6e83f205c22
|
refs/heads/master
| 2023-08-30T19:45:35.098658
| 2023-08-30T17:51:54
| 2023-08-30T17:51:54
| 61,166,935
| 453
| 176
|
NOASSERTION
| 2023-09-14T17:16:40
| 2016-06-15T01:09:18
|
Python
|
UTF-8
|
Python
| false
| false
| 251
|
py
|
mr_liquor_au.py
|
from locations.storefinders.metizsoft import MetizsoftSpider
class MrLiquorAUSpider(MetizsoftSpider):
name = "mr_liquor_au"
item_attributes = {"brand": "Mr Liquor", "brand_wikidata": "Q117822077"}
shopify_url = "mr-liquor.myshopify.com"
|
98febf9036f202848755e90b7a8b8552fd01869e
|
67e7c0f06e8aef9579bf3761ff6af76e5eafb590
|
/pipeline/models/header_model.py
|
c456b3782aca23da41b9336687380efe345835eb
|
[
"MIT"
] |
permissive
|
epi052/recon-pipeline
|
d1c711f5fd7ceccc95eda13004287d030452fe90
|
4930f4064ca42c4b3669444b92dee355dd68b81e
|
refs/heads/main
| 2023-02-23T06:02:26.055102
| 2023-01-27T00:20:30
| 2023-01-27T00:20:30
| 205,856,988
| 413
| 102
|
MIT
| 2023-02-13T16:35:28
| 2019-09-02T12:54:26
|
Python
|
UTF-8
|
Python
| false
| false
| 969
|
py
|
header_model.py
|
from sqlalchemy.orm import relationship
from sqlalchemy import Column, Integer, ForeignKey, String, UniqueConstraint, Table
from .base_model import Base
header_association_table = Table(
"header_association",
Base.metadata,
Column("header_id", Integer, ForeignKey("header.id")),
Column("endpoint_id", Integer, ForeignKey("endpoint.id")),
)
class Header(Base):
""" Database model that describes an http header (i.e. Server=cloudflare).
Relationships:
``endpoints``: many to many -> :class:`pipeline.models.target_model.Endpoint`
"""
__tablename__ = "header"
__table_args__ = (UniqueConstraint("name", "value"),) # combination of name/value == unique
id = Column(Integer, primary_key=True)
name = Column(String)
value = Column(String)
endpoint_id = Column(Integer, ForeignKey("endpoint.id"))
endpoints = relationship("Endpoint", secondary=header_association_table, back_populates="headers")
|
b878f6c037b323244aa2a28389deabd0868910a6
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/SimCalorimetry/HcalZeroSuppressionProducers/python/hcalNZS.py
|
8c01c767bccc8555781ee77cdb37c017f211b6f2
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 192
|
py
|
hcalNZS.py
|
import FWCore.ParameterSet.Config as cms
def customise_hcalNZS(process):
if hasattr(process,'simHcalDigis'):
process.simHcalDigis.markAndPass = cms.bool(True)
return process
|
21791182ed376e6f31dc62a54cc39fa070f38940
|
2d0bada349646b801a69c542407279cc7bc25013
|
/src/vai_quantizer/vai_q_tensorflow2.x/tensorflow_model_optimization/python/examples/quantization/keras/vitis/mnist_cnn_ptq_mix_precision.py
|
5151fa8159a1d4edd070958ea33fe6070a2eb93c
|
[
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSD-3-Clause-Open-MPI",
"LicenseRef-scancode-free-unknown",
"Libtool-exception",
"GCC-exception-3.1",
"LicenseRef-scancode-mit-old-style",
"OFL-1.1",
"JSON",
"LGPL-2.1-only",
"LGPL-2.0-or-later",
"ICU",
"LicenseRef-scancode-other-permissive",
"GPL-2.0-or-later",
"GPL-3.0-only",
"LicenseRef-scancode-issl-2018",
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-unicode",
"LGPL-3.0-only",
"LicenseRef-scancode-warranty-disclaimer",
"GPL-3.0-or-later",
"Zlib",
"BSD-Source-Code",
"ClArtistic",
"LicenseRef-scancode-unknown-license-reference",
"ISC",
"NCSA",
"LicenseRef-scancode-proprietary-license",
"GPL-2.0-only",
"CC-BY-4.0",
"FSFULLR",
"Minpack",
"Unlicense",
"BSL-1.0",
"NAIST-2003",
"LicenseRef-scancode-protobuf",
"LicenseRef-scancode-public-domain",
"Libpng",
"Spencer-94",
"BSD-2-Clause",
"Intel",
"GPL-1.0-or-later",
"MPL-2.0"
] |
permissive
|
Xilinx/Vitis-AI
|
31e664f7adff0958bb7d149883ab9c231efb3541
|
f74ddc6ed086ba949b791626638717e21505dba2
|
refs/heads/master
| 2023-08-31T02:44:51.029166
| 2023-07-27T06:50:28
| 2023-07-27T06:50:28
| 215,649,623
| 1,283
| 683
|
Apache-2.0
| 2023-08-17T09:24:55
| 2019-10-16T21:41:54
|
Python
|
UTF-8
|
Python
| false
| false
| 3,767
|
py
|
mnist_cnn_ptq_mix_precision.py
|
# Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import onnx
import onnxruntime
import tensorflow as tf
import numpy as np
import tempfile
import datetime
from tensorflow import keras
from tensorflow.keras import mixed_precision
import tensorflow as tf
# Load MNIST dataset
mnist = keras.datasets.mnist
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
# Normalize the input image so that each pixel value is between 0 to 1.
train_images = train_images / 255.0
test_images = test_images / 255.0
from tensorflow_model_optimization.quantization.keras.vitis.layers import vitis_activation
from tensorflow.keras import layers, models
# Define the model architecture.
inputs = keras.layers.Input(shape=(28, 28), dtype=tf.float32)
x = keras.layers.Reshape(target_shape=(28, 28, 1))(inputs)
x = keras.layers.Conv2D(
filters=32, kernel_size=(3, 3), use_bias=True, activation='linear')(
x)
x = keras.layers.BatchNormalization(axis=[-1])(x)
y = x * 2
x = keras.layers.Concatenate()([x, y])
x = keras.layers.Activation('relu')(x)
x = keras.layers.DepthwiseConv2D(
kernel_size=(3, 3), use_bias=True, activation='linear')(
x)
x = keras.layers.BatchNormalization()(x)
x = keras.layers.Activation('relu')(x)
x = keras.layers.Flatten()(x)
x = keras.layers.Dropout(rate=0.1)(x)
x = keras.layers.Dense(10, dtype=tf.float32)(x)
predictions = x
model = keras.Model(inputs=inputs, outputs=predictions, name="mnist_model")
# Train the float model
model.compile(
optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['sparse_categorical_accuracy'])
log_dir = "logs/float_fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
tensorboard_callback = tf.keras.callbacks.TensorBoard(
log_dir=log_dir, histogram_freq=1)
model.fit(
train_images,
train_labels,
epochs=1,
validation_data=(test_images, test_labels))
model.save('float.h5')
# Post-Training Quantize
from tensorflow_model_optimization.quantization.keras import vitis_quantize
quantizer = vitis_quantize.VitisQuantizer(model)
#concatenate layer config with bfloat16 datatype
quantized_model = quantizer.quantize_model(
layer_config={"concatenate": "bfloat16"},
calib_dataset=train_images[0:10],
include_cle=False,
cle_steps=10,
convert_to_fs_quantize_strategy=True,
output_format='onnx',
onnx_opset_version=13,
include_fast_ft=False)
onnx_model = onnx.load("quantize_results/quantized_model.onnx")
onnx.checker.check_model(onnx_model)
ort_session = onnxruntime.InferenceSession(
"quantize_results/quantized_model.onnx")
num_correct_top1 = 0
total_num_test = len(test_images)
for i in range(total_num_test):
test_data_x, test_data_y = test_images[i:i + 1], test_labels[i]
ort_inputs = {
ort_session.get_inputs()[0].name: test_data_x.astype(np.float32)
}
ort_outs = ort_session.run(None, ort_inputs)
ort_outs_top1 = np.argmax(ort_outs[0])
if test_data_y == ort_outs_top1:
num_correct_top1 += 1
acc_top1 = round(num_correct_top1 / total_num_test, 4)
print("onnx val_sparse_categorical_accuracy: {} ".format(acc_top1))
|
cb2cbbb99e440aae58d433228a5c061e11d1dc64
|
6d54a7b26d0eb82152a549a6a9dfde656687752c
|
/config/common/cmake/make_gn_args.py
|
c6282bc0f8b45195e172d991e27816975e282cda
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
project-chip/connectedhomeip
|
81a123d675cf527773f70047d1ed1c43be5ffe6d
|
ea3970a7f11cd227ac55917edaa835a2a9bc4fc8
|
refs/heads/master
| 2023-09-01T11:43:37.546040
| 2023-09-01T08:01:32
| 2023-09-01T08:01:32
| 244,694,174
| 6,409
| 1,789
|
Apache-2.0
| 2023-09-14T20:56:31
| 2020-03-03T17:05:10
|
C++
|
UTF-8
|
Python
| false
| false
| 3,084
|
py
|
make_gn_args.py
|
#!/usr/bin/env python3
#
# Copyright (c) 2023 Project CHIP Authors
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import sys
GN_SPECIAL_SEPARATOR = "+|+"
GN_CFLAG_EXCLUDES = [
'-fno-asynchronous-unwind-tables',
'-fno-common',
'-fno-defer-pop',
'-fno-reorder-functions',
'-ffunction-sections',
'-fdata-sections',
'-g*',
'-O*',
'-W*',
]
def write_gn_args(args):
if args.module:
sys.stdout.write('import("{}")\n'.format(args.module))
for key, value in args.arg:
sys.stdout.write('{} = {}\n'.format(key, value))
for key, value in args.arg_string:
# Escaped quote and dollar sign characters
filtered_value = value.replace('"', '\\"')
filtered_value = filtered_value.replace('$', '\\$')
sys.stdout.write('{} = "{}"\n'.format(key, filtered_value))
cflag_excludes = ', '.join(['"{}"'.format(exclude)
for exclude in GN_CFLAG_EXCLUDES])
for key, value in args.arg_cflags:
filtered_value = value.split(" -")
# Remove empty include paths and defines caused by Cmake generator expressions
filtered_value = filter(lambda v: v != "D", filtered_value)
filtered_value = filter(lambda v: v != "isystem", filtered_value)
# Escaped quote and dollar sign characters
filtered_value = map(lambda v: v.replace('"', '\\"'), filtered_value)
filtered_value = map(lambda v: v.replace('$', '\\$'), filtered_value)
# Remove white spaces around the argument and remove internal whitespace
# for correct splitting in string_split() function
filtered_value = map(lambda v: v.strip(), filtered_value)
filtered_value = map(lambda v: v.replace(' ', ''), filtered_value)
# Remove duplicates
filtered_value = list(dict.fromkeys(filtered_value))
sys.stdout.write('{} = filter_exclude(string_split("{}", "{}"), [{}])\n'.format(
key, "{}-".format(GN_SPECIAL_SEPARATOR).join(filtered_value), GN_SPECIAL_SEPARATOR, cflag_excludes))
def main():
parser = argparse.ArgumentParser(fromfile_prefix_chars='@')
parser.add_argument('--module', action='store')
parser.add_argument('--arg', action='append', nargs=2, default=[])
parser.add_argument('--arg-string', action='append', nargs=2, default=[])
parser.add_argument('--arg-cflags', action='append', nargs=2, default=[])
args = parser.parse_args()
write_gn_args(args)
if __name__ == "__main__":
main()
|
37397dfbaf9c9142e2a6114c3026a603ad3ac53e
|
933b0eef6909e52fb086015e1f73e9507aad9c3f
|
/Graph_Algorithms/src/Kosaraju/StronglyConnected.py
|
0fa29b4d3a34f84eb3d868990819f5ed98e86484
|
[] |
no_license
|
codezoned/ScriptsDump
|
c105641ee06b8bf148b9c2779b04eb1d0369a85f
|
df6fcc47f5a73c7a5c3522f985e23b89fe56117e
|
refs/heads/master
| 2023-04-29T03:12:40.610817
| 2022-11-26T12:22:33
| 2022-11-26T12:22:33
| 141,881,901
| 157
| 221
| null | 2023-08-10T07:00:05
| 2018-07-22T09:17:56
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 3,693
|
py
|
StronglyConnected.py
|
'''
Code to find the strongly connected components of a graph using Kosaraju's Algorithm
Time complexity: O(V + E)
'''
from collections import defaultdict
class Graph:
#Constructor that basically initiallises every new vertex in dict as an
#empty list
def __init__(self):
self.graph = defaultdict(list)
self.transpose = defaultdict(list)
self.vertexList = []
def addEdgeDirected(self, u, v, w = 1):
self.graph[u].append([v, w])
self.transpose[v].append([u, w])
if u not in self.vertexList:
self.vertexList.append(u)
if v not in self.vertexList:
self.vertexList.append(v)
def addEdgeUndirected(self, u, v, w = 1):
self.graph[u].append([v, w])
self.graph[v].append([u, w])
if u not in self.vertexList:
self.vertexList.append(u)
if v not in self.vertexList:
self.vertexList.append(v)
#s here is the source node
def BFS(self, s):
color = ['w']*len(self.graph)
#Queue that will store the nodes in BFS
queue = []
queue.append(s)
color[s] = 'g'
while(queue):
s = queue.pop(0) #dequeue operation
color[s] = 'b'
print s,
for i in self.graph[s]:
if (color[i] == 'w'):
queue.append(i)
color[i] = 'g'
def DFS(self, s, time = 0, startTime = {}, endTime = {}, visited = defaultdict(bool), DFSList = []):
startTime[s] = time
visited[s] = True
DFSList.append(s)
for i in self.graph[s]:
if(visited[i[0]] == False):
time += 1
self.DFS(i[0], time, startTime, endTime, visited, DFSList)
endTime[i[0]] = time
return DFSList
def TopologicalSortUtil(self, v, visited, stack):
visited[v] = True
print visited
for i in self.graph[v]:
if (visited[i[0]] == False):
self.TopologicalSortUtil(i[0], visited, stack)
stack.insert(0, v) #adding to bottom of stack same as adding to top then printing in reverse
def TopologicalSort(self):
#self.vertexList.sort()
visited = defaultdict(bool)
stack = []
for i in self.vertexList:
if (visited[i] == False):
self.TopologicalSortUtil(i, visited, stack)
print "The Graph vertices after topological sort are:"
print stack
def DFSUtil(self, s, visited):
visited[s] = True
print s,
for i in self.transpose[s]:
if (visited[i[0]] == False):
self.DFSUtil(i[0], visited)
def FillOrder(self, s, visited, stack):
visited[s] = True
for i in self.graph[s]:
if (visited[i[0]] == False):
self.FillOrder(i[0], visited, stack)
stack.append(s)
def Kosarajus(self):
#Step1: Create the stack
stack = []
visited = defaultdict(bool)
for i in self.vertexList:
if (visited[i] == False):
self.FillOrder(i, visited, stack)
#Step2: Empty the stack, and print the SCC's
visited = defaultdict(bool)
while (stack):
i = stack.pop()
if (visited[i] == False):
self.DFSUtil(i, visited)
print ""
#Testing on graph
g = Graph()
g.addEdgeDirected(0, 3)
g.addEdgeDirected(3, 2)
g.addEdgeDirected(2, 1)
g.addEdgeDirected(1, 0)
g.addEdgeDirected(4, 2)
g.addEdgeDirected(5, 4)
print "Following are strongly connected components in given graph"
g.Kosarajus()
|
8c52d3bb8ff4e1abafee98f19384e462e1c0939e
|
27b86f422246a78704e0e84983b2630533a47db6
|
/docs/source/tutorials/src/ocs/insert.py
|
309088f88a8bd22589b03bd7007bc173c8b233f7
|
[
"MIT"
] |
permissive
|
mozman/ezdxf
|
7512decd600896960660f0f580cab815bf0d7a51
|
ba6ab0264dcb6833173042a37b1b5ae878d75113
|
refs/heads/master
| 2023-09-01T11:55:13.462105
| 2023-08-15T11:50:05
| 2023-08-15T12:00:04
| 79,697,117
| 750
| 194
|
MIT
| 2023-09-14T09:40:41
| 2017-01-22T05:55:55
|
Python
|
UTF-8
|
Python
| false
| false
| 4,364
|
py
|
insert.py
|
# Copyright (c) 2019-2020 Manfred Moitzi
# License: MIT License
import math
import ezdxf
from pathlib import Path
OUT_DIR = Path('~/Desktop/Outbox').expanduser()
from ezdxf.lldxf.const import MIRROR_X
from ezdxf.math import UCS, Vec3, Matrix44, Y_AXIS, X_AXIS, Z_AXIS
RED = 1
GREEN = 3
BLUE = 5
def setup_csys(blk, size=3):
# draw axis
blk.add_line((0, 0), (size, 0), dxfattribs={'color': RED}) # x-axis
blk.add_line((0, 0), (0, size), dxfattribs={'color': GREEN}) # y-axis
blk.add_line((0, 0), (0, 0, size), dxfattribs={'color': BLUE}) # z-axis
# place text
size2 = size / 2
txt_props = {
'style': 'OpenSans',
'height': size / 2,
'color': RED,
}
# XY-plane text
blk.add_text('XY', dxfattribs=txt_props).set_pos((size2, size2), align='MIDDLE_CENTER')
# YZ-plane text
ucs = UCS(ux=(0, 1, 0), uy=(0, 0, 1))
txt_props['extrusion'] = ucs.uz
txt_props['color'] = GREEN
blk.add_text('YZ', dxfattribs=txt_props).set_pos(ucs.to_ocs((size2, size2)), align='MIDDLE_CENTER')
# XZ-plane text
ucs = UCS(ux=(1, 0, 0), uy=(0, 0, 1))
txt_props['extrusion'] = ucs.uz
txt_props['color'] = BLUE
txt_props['text_generation_flag'] = MIRROR_X
blk.add_text('XZ', dxfattribs=txt_props).set_pos(ucs.to_ocs((size2, size2)), align='MIDDLE_CENTER')
# rotate UCS around an arbitrary axis:
def ucs_rotation(ucs: UCS, axis: Vec3, angle: float):
# new in ezdxf v0.11: UCS.rotate(axis, angle)
t = Matrix44.axis_rotate(axis, math.radians(angle))
ux, uy, uz = t.transform_vertices([ucs.ux, ucs.uy, ucs.uz])
return UCS(origin=ucs.origin, ux=ux, uy=uy, uz=uz)
doc = ezdxf.new('R2010', setup=True)
blk = doc.blocks.new('CSYS')
setup_csys(blk)
msp = doc.modelspace()
ucs = ucs_rotation(UCS(), axis=Y_AXIS, angle=90)
# transform insert location to OCS
insert = ucs.to_ocs((0, 0, 0))
# rotation angle about the z-axis (= WCS x-axis)
rotation = ucs.to_ocs_angle_deg(15)
msp.add_blockref('CSYS', insert, dxfattribs={
'extrusion': ucs.uz,
'rotation': rotation,
})
# To rotate a block reference around the block x-axis,
# you have to find the rotated z-axis (= extrusion vector)
# of the rotated block reference:
# t is a transformation matrix to rotate 15 degree around the x-axis
t = Matrix44.axis_rotate(axis=X_AXIS, angle=math.radians(15))
# transform block z-axis into new UCS z-axis (= extrusion vector)
uz = Vec3(t.transform(Z_AXIS))
# create new UCS at the insertion point, because we are rotating around the x-axis,
# ux is the same as the WCS x-axis and uz is the rotated z-axis.
ucs = UCS(origin=(1, 2, 0), ux=X_AXIS, uz=uz)
# transform insert location to OCS, block base_point=(0, 0, 0)
insert = ucs.to_ocs((0, 0, 0))
# for this case a rotation around the z-axis is not required
rotation = 0
blockref = msp.add_blockref('CSYS', insert, dxfattribs={
'extrusion': ucs.uz,
'rotation': rotation,
})
# translate a block references with an established OCS
translation = Vec3(-3, -1, 1)
# get established OCS
ocs = blockref.ocs()
# get insert location in WCS
actual_wcs_location = ocs.to_wcs(blockref.dxf.insert)
# translate location
new_wcs_location = actual_wcs_location + translation
# convert WCS location to OCS location
blockref.dxf.insert = ocs.from_wcs(new_wcs_location)
# rotate a block references with an established OCS around the block y-axis about 90 degree
ocs = blockref.ocs()
# convert block y-axis (= rotation axis) into WCS vector
rotation_axis = ocs.to_wcs((0, 1, 0))
# convert local z-axis (=extrusion vector) into WCS vector
local_z_axis = ocs.to_wcs((0, 0, 1))
# build transformation matrix
t = Matrix44.axis_rotate(axis=rotation_axis, angle=math.radians(-90))
uz = t.transform(local_z_axis)
uy = rotation_axis
# the block reference origin stays at the same location, no rotation needed
wcs_insert = ocs.to_wcs(blockref.dxf.insert)
# build new UCS to convert WCS locations and angles into OCS
ucs = UCS(origin=wcs_insert, uy=uy, uz=uz)
# set new OCS
blockref.dxf.extrusion = ucs.uz
# set new insert
blockref.dxf.insert = ucs.to_ocs((0, 0, 0))
# set new rotation: we do not rotate the block reference around the local z-axis,
# but the new block x-axis (0 deg) differs from OCS x-axis and has to be adjusted
blockref.dxf.rotation = ucs.to_ocs_angle_deg(0)
doc.set_modelspace_vport(5)
doc.saveas(OUT_DIR / 'ocs_insert.dxf')
|
86d08d25e933d1c34edda0bb3432b99f4f522237
|
06afc066ffb460ada63dfa9e0e1201cd890c9058
|
/test/python/test_so2.py
|
aeeac28c849a1d6c25a783f34060e8a506042133
|
[
"MIT"
] |
permissive
|
artivis/manif
|
9be04fd896924eab4812aa33719e93ee480ff8ba
|
bb3f6758ae467b7f24def71861798d131f157032
|
refs/heads/devel
| 2023-08-20T03:18:29.379652
| 2023-07-17T21:00:39
| 2023-07-17T21:00:39
| 129,808,016
| 1,277
| 215
|
MIT
| 2023-07-24T15:41:58
| 2018-04-16T21:28:32
|
C++
|
UTF-8
|
Python
| false
| false
| 801
|
py
|
test_so2.py
|
import numpy as np
import pytest
from manifpy import SO2, SO2Tangent
def test_constructor():
state = SO2(0.17)
assert 0.17 == state.angle()
state = SO2(1, 0)
assert 0 == state.angle()
delta = SO2Tangent(0.17)
assert 0.17 == delta.angle()
def test_accessors():
state = SO2.Identity()
assert 1 == state.real()
assert 0 == state.imag()
assert 0 == state.angle()
delta = SO2Tangent.Zero()
assert 0 == delta.angle()
def test_transform():
state = SO2.Identity()
transform = state.transform()
assert (3, 3) == transform.shape
assert (np.identity(3) == transform).all()
def test_rotation():
state = SO2.Identity()
rotation = state.rotation()
assert (2, 2) == rotation.shape
assert (np.identity(2) == rotation).all()
|
9577676d20c0b4f17333655d2b2e3875b333890c
|
ec85250addb7357dfe7bb3e0680d53fc7b0fd8fb
|
/python_modules/dagster/dagster/_grpc/__init__.py
|
5aedcd53852c1e854e449af53323d60bb6ba6043
|
[
"Apache-2.0"
] |
permissive
|
dagster-io/dagster
|
6adb5deee8bcf3ea1866a6a64f2ed81e1db5e73a
|
fe21995e0402878437a828c6a4244025eac8c43b
|
refs/heads/master
| 2023-09-05T20:46:08.203794
| 2023-09-05T19:54:52
| 2023-09-05T19:54:52
| 131,619,646
| 8,565
| 1,154
|
Apache-2.0
| 2023-09-14T21:57:37
| 2018-04-30T16:30:04
|
Python
|
UTF-8
|
Python
| false
| false
| 2,060
|
py
|
__init__.py
|
"""Tools for accessing core Dagster APIs over a GRPC mechanism.
GRPC is intended to be used in all cases where host processes communicate with user processes, both
locally (over UDS on MacOS and Unix, and over a local port on Windows) and when communicating with
remote Dagster user proceses (e.g., containers).
The GRPC layer is not intended to supplant the dagster-graphql layer, which should still be used to
drive web frontends like the Dagster UI.
"""
from .client import (
DagsterGrpcClient as DagsterGrpcClient,
client_heartbeat_thread as client_heartbeat_thread,
ephemeral_grpc_api_client as ephemeral_grpc_api_client,
)
from .impl import core_execute_run as core_execute_run
from .server import (
DagsterGrpcServer as DagsterGrpcServer,
GrpcServerProcess as GrpcServerProcess,
)
from .types import (
CanCancelExecutionRequest as CanCancelExecutionRequest,
CanCancelExecutionResult as CanCancelExecutionResult,
CancelExecutionRequest as CancelExecutionRequest,
CancelExecutionResult as CancelExecutionResult,
ExecuteExternalJobArgs as ExecuteExternalJobArgs,
ExecuteRunArgs as ExecuteRunArgs,
ExecuteStepArgs as ExecuteStepArgs,
ExecutionPlanSnapshotArgs as ExecutionPlanSnapshotArgs,
ExternalJobArgs as ExternalJobArgs,
ExternalScheduleExecutionArgs as ExternalScheduleExecutionArgs,
GetCurrentImageResult as GetCurrentImageResult,
JobSubsetSnapshotArgs as JobSubsetSnapshotArgs,
ListRepositoriesInput as ListRepositoriesInput,
ListRepositoriesResponse as ListRepositoriesResponse,
LoadableRepositorySymbol as LoadableRepositorySymbol,
NotebookPathArgs as NotebookPathArgs,
PartitionArgs as PartitionArgs,
PartitionNamesArgs as PartitionNamesArgs,
PartitionSetExecutionParamArgs as PartitionSetExecutionParamArgs,
ResumeRunArgs as ResumeRunArgs,
SensorExecutionArgs as SensorExecutionArgs,
ShutdownServerResult as ShutdownServerResult,
StartRunResult as StartRunResult,
)
from .utils import get_loadable_targets as get_loadable_targets
|
638a1b7e03248dafab785b93c55659eb7b23b804
|
568fa58296378fa129ab3349adf010daa44ed45b
|
/tests/common/test_run/abs_run.py
|
5110714166806b7f0118aca03303a03198579118
|
[
"Apache-2.0",
"BSD-3-Clause",
"NCSA",
"X11-distribute-modifications-variant",
"Zlib",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"LLVM-exception",
"BSD-2-Clause"
] |
permissive
|
mindspore-ai/akg
|
37f471badc66de6a831f1f45ad84344f34d23ef2
|
99f33858d6972741748cbfc9ab0bf9600428fef7
|
refs/heads/master
| 2023-07-25T23:03:17.672665
| 2023-07-11T07:33:57
| 2023-07-11T07:33:57
| 274,077,856
| 319
| 36
|
Apache-2.0
| 2021-12-30T13:43:08
| 2020-06-22T08:09:05
|
Python
|
UTF-8
|
Python
| false
| false
| 2,497
|
py
|
abs_run.py
|
# Copyright 2019-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import akg
import numpy as np
from tests.common.tensorio import compare_tensor
from akg.utils import kernel_exec as utils
from akg.ops.math.abs import abs
from tests.common.base import get_rtol_atol
from akg.utils.result_analysis import target_profiling
from akg.utils.format_transform import to_tvm_nd_array
def abs_run(shape, dtype, attrs={}):
# Result_Numpy
input_shape = [shape]
input_dtype = [dtype]
if 'tuning' in attrs.keys():
t = attrs.get("tuning", False)
kernel_name = attrs.get("kernel_name", False)
mod = utils.op_build_test(abs, input_shape, input_dtype, kernel_name=kernel_name, attrs=attrs, tuning=t)
if t:
exp_output, inputs, output = gen_date(dtype, shape)
return mod, exp_output, (inputs, output)
else:
return mod
else:
mod = utils.op_build_test(abs, input_shape, input_dtype, kernel_name='abs', attrs=attrs)
exp_output, inputs, output = gen_date(dtype, shape)
acu_output = utils.mod_launch(mod, (inputs, output), expect=exp_output)
# compare result
rtol, atol = get_rtol_atol("abs", dtype)
TestCase_Result = compare_tensor(acu_output, exp_output, rtol=rtol, atol=atol, equal_nan=True)
target_name = attrs["target"].split()[0]
if attrs.get("profiling", False):
target_name = attrs["target"].split()[0]
data, output = to_tvm_nd_array([inputs, output], akg.tvm.context(target_name, 0))
target_profiling(mod, data, output, target=target_name, repeat_time=attrs["repeat_times"])
return inputs, acu_output, exp_output, TestCase_Result
def gen_date(dtype, shape):
inputs = np.random.uniform(-1, 0, size=shape).astype(dtype)
exp_output = np.abs(inputs)
# inputs and output to hold the data
output = np.full(shape, np.nan, dtype)
return exp_output, inputs, output
|
9d3fb1fac7c369d1566933da2205959470282c7b
|
4d829ecf98a554e52daf946db065bab3ec15677d
|
/explorations/kafka_router.py
|
0cd2ad207d439b32e096e0126b5624952cdf3d2a
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
SuperCowPowers/zat
|
5c6179542359b673c4f30f1705dca62c3bd2be4b
|
1d83a44a4d171b1f99a48b6706da4f495c9f63f9
|
refs/heads/main
| 2023-08-31T17:17:19.706642
| 2023-04-03T00:20:42
| 2023-04-03T00:20:42
| 85,858,065
| 191
| 46
|
MIT
| 2023-04-03T00:20:43
| 2017-03-22T17:40:04
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 5,372
|
py
|
kafka_router.py
|
"""KafkaRouter: The class takes in N Kafka input topics and produces M Kafka output topics"""
import sys
from kafka import KafkaProducer, KafkaConsumer
from kafka.errors import NoBrokersAvailable
from collections import defaultdict
import json
from pprint import pprint
from datetime import datetime
# Local imports
from zat.utils import signal_utils
class KafkaRouter(object):
"""KafkaRouter: The class takes in N Kafka input topics and produces M Kafka output topics
Args:
input_topics (list of strings): List of the initial input topics
input_servers (list of strings): Kafka Bootstrap servers (default=['localhost:9092'])
output_servers (list of strings): Kafka Bootstrap servers (default=['localhost:9092'])
"""
def __init__(self, offset='latest', input_servers=['localhost:9092'], output_servers=['localhost:9092']):
"""Initialize the KafkaRouter class"""
# Setup the input and output
print('Initializing KafkaRouter: {!r}'.format(input_servers))
try:
self.input_pipe = KafkaConsumer(bootstrap_servers=input_servers, auto_offset_reset=offset,
value_deserializer=lambda x: json.loads(x.decode('utf8')))
except NoBrokersAvailable:
print('Could not connect to Kafka bootstrap servers: {:s}'.format(input_servers))
sys.exit(-1)
try:
self.output_pipe = KafkaProducer(bootstrap_servers=output_servers,
value_serializer=lambda x: json.dumps(x).encode('utf8'))
except NoBrokersAvailable:
print('Could not connect to Kafka bootstrap servers: {:s}'.format(output_servers))
sys.exit(-1)
# Gates
self.route_info = {}
self.routes = defaultdict(list)
# Topics we're listening to
self.topics = set()
def add_route(self, topic, callback):
"""Add a logic route that pulls in a message from the input_pipe and sends messages
to the output_pipe with the specified topic"""
self.route_info[callback.__name__] = callback
self.routes[topic].append(callback)
# Add this topic to our input pipe
if topic not in self.topics:
self.topics.add(topic)
self.input_pipe.subscribe(list(self.topics))
print('Adding Topic {:s}'.format(topic))
print('Topics: {!r}'.format(self.input_pipe.subscription()))
def run(self):
"""Run the KafkaRouter with all of the registered logic routes"""
with signal_utils.signal_catcher(self.exit_program):
# Now lets process our Kafka Messages
for message in self.input_pipe:
topic = message.topic
message = message.value
for route in self.routes[topic]:
topic = route(message)
if topic:
self.output_pipe.send(topic, message)
# self.output_pipe.poll() # What do we want to do here?
def list_routes(self):
print('{!r}'.format(self.route_info))
def exit_program(self):
"""Exit on Signal"""
print('Exiting Program...')
sys.exit()
# Simple test of the functionality
def disabled_test():
"""Test for KafkaRouter Class"""
from zat.utils import geo_lookup
my_geo = geo_lookup.GeoLookup()
# Make some simple logic routes
def north_south(message):
if (not message['local_orig']) and message['local_resp']:
return 'incoming'
elif message['local_orig'] and not message['local_resp']:
return 'outgoing'
return None
def incoming_info(message):
geo_info = my_geo.query_ip(message['id.orig_h'])
timestamp = datetime.fromtimestamp(message['ts'])
print('\nINCOMING')
print(timestamp, geo_info['country_code'], geo_info['region_name'], message['id.orig_h'], '-->', message['id.resp_h'],
message['proto'], message.get('service', 'unknown'))
return None
def outgoing_info(message):
ip = message['id.resp_h']
# Skip broad-cast, multi-cast
if ip[:3] in ['255', '239', '224']:
return None
# Get Geographical Information and Check for outside US traffic
geo_info = my_geo.query_ip(message['id.resp_h'])
if not geo_info or geo_info['country_code'] == 'US':
return None
# Add geo info and route to 'non_us'
message['country_code'] = geo_info['country_code']
message['region_name'] = geo_info['region_name']
return 'non_us'
def print_info(message):
timestamp = datetime.fromtimestamp(message['ts'])
print(timestamp, message['country_code'], message['region_name'], message['id.orig_h'], '-->', message['id.resp_h'],
message['proto'], message.get('service', 'unknown'))
return None
# Create the class and test it
router = KafkaRouter(offset='earliest')
router.add_route('conn', north_south)
router.add_route('outgoing', outgoing_info)
router.add_route('incoming', incoming_info)
router.add_route('non_us', print_info)
router.run()
if __name__ == "__main__":
disabled_test()
|
e2292c3e9eb54f7a099276c4c8acf4314f02e4c8
|
e9869359c839c8c175ae7877bc35dcfdfe4058f8
|
/kornia/augmentation/random_generator/_2d/probability.py
|
e342049e667fb208a045c29f7aadbe2713a157bb
|
[
"Apache-2.0"
] |
permissive
|
kornia/kornia
|
80f93eae6a70b8bc0c9784f92a842ab9a6ab54ae
|
1e0f8baa7318c05b17ea6dbb48605691bca8972f
|
refs/heads/master
| 2023-08-31T06:32:45.960859
| 2023-08-30T21:59:41
| 2023-08-30T21:59:41
| 145,693,916
| 7,351
| 833
|
Apache-2.0
| 2023-09-12T21:59:29
| 2018-08-22T10:31:37
|
Python
|
UTF-8
|
Python
| false
| false
| 2,964
|
py
|
probability.py
|
from typing import Dict, Tuple
import torch
from torch.distributions import Bernoulli
from kornia.augmentation.random_generator.base import RandomGeneratorBase
from kornia.augmentation.utils import _adapted_sampling, _common_param_check
from kornia.core import Tensor, tensor
__all__ = ["ProbabilityGenerator"]
class ProbabilityGenerator(RandomGeneratorBase):
r"""Generate random probabilities for a batch of inputs.
Args:
p: probability to generate an 1-d binary mask. Default value is 0.5.
Returns:
A dict of parameters to be passed for transformation.
- probs (Tensor): element-wise probabilities with a shape of (B,).
Note:
The generated random numbers are not reproducible across different devices and dtypes. By default,
the parameters will be generated on CPU in float32. This can be changed by calling
``self.set_rng_device_and_dtype(device="cuda", dtype=torch.float64)``.
"""
def __init__(self, p: float = 0.5) -> None:
super().__init__()
self.p = p
def __repr__(self) -> str:
repr = f"p={self.p}"
return repr
def make_samplers(self, device: torch.device, dtype: torch.dtype) -> None:
p = torch.tensor(float(self.p), device=device, dtype=dtype)
self.sampler = Bernoulli(p)
def forward(self, batch_shape: Tuple[int, ...], same_on_batch: bool = False) -> Dict[str, Tensor]:
batch_size = batch_shape[0]
probs_mask: Tensor = _adapted_sampling((batch_size,), self.sampler, same_on_batch).bool()
return {"probs": probs_mask}
def random_prob_generator(
batch_size: int,
p: float = 0.5,
same_on_batch: bool = False,
device: torch.device = torch.device('cpu'),
dtype: torch.dtype = torch.float32,
) -> Tensor:
r"""Generate random probabilities for a batch of inputs.
Args:
batch_size (int): the number of images.
p (float): probability to generate an 1-d binary mask. Default value is 0.5.
same_on_batch (bool): apply the same transformation across the batch. Default: False.
device (torch.device): the device on which the random numbers will be generated. Default: cpu.
dtype (torch.dtype): the data type of the generated random numbers. Default: float32.
Returns:
Tensor: parameters to be passed for transformation.
- probs (Tensor): element-wise probabilities with a shape of (B,).
Note:
The generated random numbers are not reproducible across different devices and dtypes.
"""
_common_param_check(batch_size, same_on_batch)
if not isinstance(p, (int, float)) or p > 1 or p < 0:
raise TypeError(f"The probability should be a float number within [0, 1]. Got {type(p)}.")
_bernoulli = Bernoulli(tensor(float(p), device=device, dtype=dtype))
probs_mask: Tensor = _adapted_sampling((batch_size,), _bernoulli, same_on_batch).bool()
return probs_mask
|
2225617440b775847fbbb158db02a504603f09dc
|
94c1805df5a09c39159d502f420d19ad54b567fc
|
/runtime/deps/gyp/test/win/winrt-target-platform-version/winrt-target-platform-version.gyp
|
dbcfac696288f94117baac976f3ce07cd6948d5f
|
[
"BSD-3-Clause",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
tmikov/jscomp
|
9805a5a4d06520549c57380f0df4a1c0aa0dab56
|
83828441cb38ec96603a6a60be06977d4852940a
|
refs/heads/develop
| 2021-01-19T02:56:35.102659
| 2016-04-12T06:19:30
| 2016-04-12T06:19:30
| 36,981,674
| 237
| 13
|
Apache-2.0
| 2018-10-14T09:48:12
| 2015-06-06T13:49:26
|
C
|
UTF-8
|
Python
| false
| false
| 1,367
|
gyp
|
winrt-target-platform-version.gyp
|
# Copyright (c) 2015 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'enable_winrt_10_platversion_dll',
'type': 'shared_library',
'msvs_enable_winrt': 1,
'msvs_application_type_revision': '10.0',
'msvs_target_platform_version':'10.0.10240.0',
'msvs_target_platform_minversion':'10.0.10240.0'
'sources': [
'dllmain.cc',
],
},
{
'target_name': 'enable_winrt_10_platversion_nominver_dll',
'type': 'shared_library',
'msvs_enable_winrt': 1,
'msvs_application_type_revision': '10.0',
'msvs_target_platform_version':'10.0.10240.0',
'sources': [
'dllmain.cc',
],
},
{
'target_name': 'enable_winrt_9_platversion_dll',
'type': 'shared_library',
'msvs_enable_winrt': 1,
'msvs_application_type_revision': '10.0',
'msvs_target_platform_version':'9.0.0.0',
'msvs_target_platform_minversion':'9.0.0.0'
'sources': [
'dllmain.cc',
],
},
{
'target_name': 'enable_winrt_missing_platversion_dll',
'type': 'shared_library',
'msvs_enable_winrt': 1,
'msvs_application_type_revision': '10.0',
'sources': [
'dllmain.cc',
],
},
]
}
|
a527d45dfbcccd982494626e4238dd2e2d19490c
|
38bd31e53bdc4d8ed8a06544931ad9c3db5b6f5b
|
/ava_evaluation/np_box_ops.py
|
7cadc80f77097e5f73d987ac4b12a626ab4cf0f9
|
[
"Apache-2.0"
] |
permissive
|
facebookresearch/SlowFast
|
1ee6f666d20e5adab8e86949649bcaf4bf6cd820
|
6092dad7be32bb1d6b71fe1bade258dc8b492fe3
|
refs/heads/main
| 2023-09-03T12:15:35.478138
| 2023-08-26T20:55:56
| 2023-08-26T20:55:56
| 203,465,734
| 6,221
| 1,325
|
Apache-2.0
| 2023-09-12T23:51:28
| 2019-08-20T22:47:26
|
Python
|
UTF-8
|
Python
| false
| false
| 3,565
|
py
|
np_box_ops.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for [N, 4] numpy arrays representing bounding boxes.
Example box operations that are supported:
* Areas: compute bounding box areas
* IOU: pairwise intersection-over-union scores
"""
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import numpy as np
def area(boxes):
"""Computes area of boxes.
Args:
boxes: Numpy array with shape [N, 4] holding N boxes
Returns:
a numpy array with shape [N*1] representing box areas
"""
return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
def intersection(boxes1, boxes2):
"""Compute pairwise intersection areas between boxes.
Args:
boxes1: a numpy array with shape [N, 4] holding N boxes
boxes2: a numpy array with shape [M, 4] holding M boxes
Returns:
a numpy array with shape [N*M] representing pairwise intersection area
"""
[y_min1, x_min1, y_max1, x_max1] = np.split(boxes1, 4, axis=1)
[y_min2, x_min2, y_max2, x_max2] = np.split(boxes2, 4, axis=1)
all_pairs_min_ymax = np.minimum(y_max1, np.transpose(y_max2))
all_pairs_max_ymin = np.maximum(y_min1, np.transpose(y_min2))
intersect_heights = np.maximum(
np.zeros(all_pairs_max_ymin.shape),
all_pairs_min_ymax - all_pairs_max_ymin,
)
all_pairs_min_xmax = np.minimum(x_max1, np.transpose(x_max2))
all_pairs_max_xmin = np.maximum(x_min1, np.transpose(x_min2))
intersect_widths = np.maximum(
np.zeros(all_pairs_max_xmin.shape),
all_pairs_min_xmax - all_pairs_max_xmin,
)
return intersect_heights * intersect_widths
def iou(boxes1, boxes2):
"""Computes pairwise intersection-over-union between box collections.
Args:
boxes1: a numpy array with shape [N, 4] holding N boxes.
boxes2: a numpy array with shape [M, 4] holding N boxes.
Returns:
a numpy array with shape [N, M] representing pairwise iou scores.
"""
intersect = intersection(boxes1, boxes2)
area1 = area(boxes1)
area2 = area(boxes2)
union = (
np.expand_dims(area1, axis=1)
+ np.expand_dims(area2, axis=0)
- intersect
)
return intersect / union
def ioa(boxes1, boxes2):
"""Computes pairwise intersection-over-area between box collections.
Intersection-over-area (ioa) between two boxes box1 and box2 is defined as
their intersection area over box2's area. Note that ioa is not symmetric,
that is, IOA(box1, box2) != IOA(box2, box1).
Args:
boxes1: a numpy array with shape [N, 4] holding N boxes.
boxes2: a numpy array with shape [M, 4] holding N boxes.
Returns:
a numpy array with shape [N, M] representing pairwise ioa scores.
"""
intersect = intersection(boxes1, boxes2)
areas = np.expand_dims(area(boxes2), axis=0)
return intersect / areas
|
12dcf09d5aa625e997365129f9e0951726dd5454
|
e04a5b20f946c5033f24d4dd8acda395a98747c5
|
/deeplearning-image-cpu/python-runnables/download-models/runnable.py
|
98b20c2af17ae767f6e04cd04a87e81b9bf8e278
|
[
"LicenseRef-scancode-free-unknown",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-other-permissive"
] |
permissive
|
dataiku/dataiku-contrib
|
2a2f2fb420d7f2ab49b82d80659cc6f6ec1d8f61
|
9a9f189e8a544a81c205d8a8b3779d4517b88653
|
refs/heads/master
| 2023-09-04T03:33:58.625093
| 2023-04-26T08:17:34
| 2023-04-26T08:17:34
| 45,074,604
| 103
| 94
|
Apache-2.0
| 2023-06-08T21:29:07
| 2015-10-27T22:41:00
|
Python
|
UTF-8
|
Python
| false
| false
| 5,418
|
py
|
runnable.py
|
from dataiku.runnables import Runnable
import dataiku
import urllib2, sys
import requests
import json
import os
import dl_image_toolbox_utils as utils
import pandas as pd
import constants
import time
# We deactivate GPU for this script, because all the methods only need to
# fetch information about model and do not make computation
utils.deactivate_gpu()
class MyRunnable(Runnable):
"""The base interface for a Python runnable"""
def __init__(self, project_key, config, plugin_config):
"""
:param project_key: the project in which the runnable executes
:param config: the dict of the configuration of the object
:param plugin_config: contains the plugin settings
"""
self.project_key = project_key
self.config = config
self.plugin_config = plugin_config
self.client = dataiku.api_client()
def get_progress_target(self):
"""
If the runnable will return some progress info, have this function return a tuple of
(target, unit) where unit is one of: SIZE, FILES, RECORDS, NONE
"""
return (100, 'NONE')
def run(self, progress_callback):
# Retrieving parameters
output_folder_name = self.config.get('outputName', '')
model = self.config.get('model', '')
architecture, trained_on = model.split('_')
# Creating new Managed Folder if needed
project = self.client.get_project(self.project_key)
output_folder_found = False
for folder in project.list_managed_folders():
if output_folder_name == folder['name']:
output_folder = project.get_managed_folder(folder['id'])
output_folder_found = True
break
if not output_folder_found:
output_folder = project.create_managed_folder(output_folder_name)
output_folder_path = dataiku.Folder(output_folder.get_definition()["id"], project_key=self.project_key).get_path()
# Building config file
config = {
"architecture": architecture,
"trained_on": trained_on,
"extract_layer_default_index": utils.get_extract_layer_index(architecture, trained_on)
}
# Downloading weights
url_to_weights = utils.get_weights_urls(architecture, trained_on)
def update_percent(percent, last_update_time):
new_time = time.time()
if (new_time - last_update_time) > 3:
progress_callback(percent)
return new_time
else:
return last_update_time
def download_files_to_managed_folder(folder_path, files_info, chunk_size=8192):
total_size = 0
bytes_so_far = 0
for file_info in files_info:
response = requests.get(file_info["url"], stream=True)
total_size += int(response.headers.get('content-length'))
file_info["response"] = response
update_time = time.time()
for file_info in files_info:
with open(utils.get_file_path(folder_path, file_info["filename"]), "wb") as f:
for content in file_info["response"].iter_content(chunk_size=chunk_size):
bytes_so_far += len(content)
# Only scale to 80% because needs to compute model summary after download
percent = int(float(bytes_so_far) / total_size * 80)
update_time = update_percent(percent, update_time)
f.write(content)
files_to_dl = [
{"url": url_to_weights["top"], "filename": utils.get_weights_filename(output_folder_path, config)},
{"url": url_to_weights["no_top"], "filename": utils.get_weights_filename(output_folder_path, config, "_notop")}
]
if trained_on == constants.IMAGENET:
# Downloading mapping id <-> name for imagenet classes
# File used by Keras in all its 'decode_predictions' methods
# Found here : https://github.com/keras-team/keras/blob/2.1.1/keras/applications/imagenet_utils.py
imagenet_id_class_mapping_url = "https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json"
imagenet_class_mapping_temp_file = "imagenet_classes_mapping.json"
files_to_dl.append({"url": imagenet_id_class_mapping_url, "filename": imagenet_class_mapping_temp_file})
output_folder.put_file(constants.CONFIG_FILE, json.dumps(config))
download_files_to_managed_folder(output_folder_path, files_to_dl)
if trained_on == constants.IMAGENET:
# Convert class mapping from json to csv
mapping_df = pd.read_json(utils.get_file_path(output_folder_path, imagenet_class_mapping_temp_file), orient="index")
mapping_df = mapping_df.reset_index()
mapping_df = mapping_df.rename(columns={"index": "id", 1: "className"})[["id", "className"]]
mapping_df.to_csv(utils.get_file_path(output_folder_path, constants.MODEL_LABELS_FILE), index=False, sep=",")
os.remove(utils.get_file_path(output_folder_path, imagenet_class_mapping_temp_file))
# Computing model info
utils.save_model_info(output_folder_path)
return "<span>DONE</span>"
|
638a708816a7c8c5adc0b87aab60b15fd7bcac8c
|
e62c8ee151671b999c6720ab8c2aa2f96c0d7f55
|
/tests/unit/providers/singleton/test_delegated_singleton_py2_py3.py
|
8a76ec0be0d71f1216bb3a87bee5678ab33b69f6
|
[] |
permissive
|
ets-labs/python-dependency-injector
|
45645973456bb6494386ad12103d06e1f1be2cd8
|
cc2304e46e054ae08dc12995428759fbfb51af10
|
refs/heads/master
| 2023-08-23T03:59:53.509743
| 2022-12-19T03:14:24
| 2022-12-19T03:14:24
| 28,774,758
| 3,217
| 273
|
BSD-3-Clause
| 2023-09-08T21:46:18
| 2015-01-04T13:23:05
|
Python
|
UTF-8
|
Python
| false
| false
| 548
|
py
|
test_delegated_singleton_py2_py3.py
|
"""Delegated singleton provider tests."""
from dependency_injector import providers
from pytest import fixture
from .common import Example
PROVIDER_CLASSES = [
providers.DelegatedSingleton,
providers.DelegatedThreadLocalSingleton,
providers.DelegatedThreadSafeSingleton,
]
@fixture(params=PROVIDER_CLASSES)
def singleton_cls(request):
return request.param
@fixture
def provider(singleton_cls):
return singleton_cls(Example)
def test_is_delegated_provider(provider):
assert providers.is_delegated(provider) is True
|
c018be47ee42065c2291abbaecae2fac0f72a750
|
241a75b26be206c85d5321ba371b7221a7f9caea
|
/scripts/test/test_derp_cli.py
|
b4d6233a767e8b12f5ace4b678e09ccbc182afc6
|
[
"BSD-3-Clause"
] |
permissive
|
facebook/facebook360_dep
|
e9d44c50420e8402da6f16fb2387b6b35f927dfa
|
5826244049ed45e5e38d27bbc0af826c2caee286
|
refs/heads/main
| 2023-09-01T21:03:08.734504
| 2023-04-07T20:30:07
| 2023-04-07T20:30:07
| 204,788,647
| 248
| 65
|
NOASSERTION
| 2023-04-27T10:31:15
| 2019-08-27T20:53:07
|
HTML
|
UTF-8
|
Python
| false
| false
| 3,438
|
py
|
test_derp_cli.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
"""Unit test class for DerpCLI.
This class subclasses the DepTest class, which provides some utility functions around
the base unittest.TestCase class. This script can be run as part of the overall test suite
via run_tests.py or standalone.
Example:
To run the test independently (which will produce the standard Python unittest success
output), simply run:
$ python test_derp_cli.py \
--binary_dir=/path/to/facebook360_dep/build/bin \
--dataset_root=s3://example/dataset
"""
import os
import sys
sys.path.append(
os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
)
from . import test_config as config
from .test_master_class import DepTest, generic_main
class DerpCLITest(DepTest):
"""Unit test class for DerpCLI.
Attributes:
name (str): String representation of the class name.
"""
def parse_rephoto_errors(self, log_dir):
"""Parses the relevant rephotography errors for red, green, and blue from logs.
Args:
log_dir (str): Path to the directory where glog files are saved.
Returns:
dict[str, float]: Map from relevant metric to its value. The map contains
the keys "error_r", "error_g", and "error_b."
"""
info_path = os.path.join(log_dir, "ComputeRephotographyErrors.INFO")
with open(info_path, "r") as f:
lines = f.readlines()
# rephoto_error_line here refers to line specifically in the format:
# <timestamp> ComputeRephotographyErrors.cpp:<line_number> TOTAL average MSSIM: R <error_r>%, G <error_g>%, B <error_b>%
rephoto_error_line = lines[-1]
parts = rephoto_error_line.split("%")
parts = [float(parts[i].split(" ")[-1]) for i in range(3)] # returns R, G, B
part_labels = ["error_r", "error_g", "error_b"]
errors = dict(zip(part_labels, parts))
return errors
def test_run(self):
"""Run test for DerpCLI.
Raises:
AssertionError: If incorrect results are produced.
"""
derp_command = self.run_app("DerpCLI")
# Rephotography error is computed on a particular level (e.g. finest level)
self.io_args.disparity = os.path.join(
self.io_args.disparity_levels, config.TEST_LEVEL
)
self.io_args.color = os.path.join(self.io_args.color, config.TEST_LEVEL)
rephoto_command = self.run_app("ComputeRephotographyErrors")
derp_records = self.parse_rephoto_errors(self.io_args.log_dir)
total_rephoto_error = (
derp_records["error_r"] + derp_records["error_g"] + derp_records["error_b"]
) / 3
record = {
"test_name": self.__class__.__name__,
"total_rephoto_error": total_rephoto_error,
"r_rephoto_error": derp_records["error_r"],
"g_rephoto_error": derp_records["error_g"],
"b_rephoto_error": derp_records["error_b"],
"derpcli_invocation": derp_command,
"computerephotography_invocation": rephoto_command,
}
self.check_metrics(record)
if __name__ == "__main__":
generic_main([DerpCLITest])
|
2fcebdde0b7970fac995f55f6e0fff0936d370e6
|
6b1677e3d641ed89b834de5ff1cf070997b6cdba
|
/memcnn/models/tests/test_models.py
|
b6393806ea433644fb943fc3c7d9713763980311
|
[
"MIT"
] |
permissive
|
silvandeleemput/memcnn
|
e3c7639261eead441d84626c78c3d8b7fb2a9627
|
a71169b32d4f8568b333d95a4303d8e05d1e1af5
|
refs/heads/master
| 2023-05-10T17:01:39.711636
| 2023-05-10T08:04:48
| 2023-05-10T08:08:13
| 122,979,177
| 251
| 29
|
MIT
| 2023-05-10T07:50:35
| 2018-02-26T14:11:31
|
Python
|
UTF-8
|
Python
| false
| false
| 3,273
|
py
|
test_models.py
|
import torch
import torch.nn
from memcnn import create_coupling, InvertibleModuleWrapper
class MultiplicationInverse(torch.nn.Module):
def __init__(self, factor=2):
super(MultiplicationInverse, self).__init__()
self.factor = torch.nn.Parameter(torch.ones(1) * factor)
def forward(self, x):
return x * self.factor
def inverse(self, y):
return y / self.factor
class IdentityInverse(torch.nn.Module):
def __init__(self, multiply_forward=False, multiply_inverse=False):
super(IdentityInverse, self).__init__()
self.factor = torch.nn.Parameter(torch.ones(1))
self.multiply_forward = multiply_forward
self.multiply_inverse = multiply_inverse
def forward(self, x):
if self.multiply_forward:
return x * self.factor
else:
return x
def inverse(self, y):
if self.multiply_inverse:
return y * self.factor
else:
return y
class MultiSharedOutputs(torch.nn.Module):
# pylint: disable=R0201
def forward(self, x):
y = x * x
return y, y
# pylint: disable=R0201
def inverse(self, y, y2):
x = torch.max(torch.sqrt(y), torch.sqrt(y2))
return x
class SubModule(torch.nn.Module):
def __init__(self, in_filters=5, out_filters=5):
super(SubModule, self).__init__()
self.bn = torch.nn.BatchNorm2d(out_filters)
self.conv = torch.nn.Conv2d(in_filters, out_filters, (3, 3), padding=1)
def forward(self, x):
return self.bn(self.conv(x))
class SubModuleStack(torch.nn.Module):
def __init__(self, Gm, coupling='additive', depth=10, implementation_fwd=-1, implementation_bwd=-1,
keep_input=False, adapter=None, num_bwd_passes=1):
super(SubModuleStack, self).__init__()
fn = create_coupling(Fm=Gm, Gm=Gm, coupling=coupling, implementation_fwd=implementation_fwd, implementation_bwd=implementation_bwd, adapter=adapter)
self.stack = torch.nn.ModuleList(
[InvertibleModuleWrapper(fn=fn, keep_input=keep_input, keep_input_inverse=keep_input, num_bwd_passes=num_bwd_passes) for _ in range(depth)]
)
def forward(self, x):
for rev_module in self.stack:
x = rev_module.forward(x)
return x
def inverse(self, y):
for rev_module in reversed(self.stack):
y = rev_module.inverse(y)
return y
class SplitChannels(torch.nn.Module):
def __init__(self, split_location):
self.split_location = split_location
super(SplitChannels, self).__init__()
def forward(self, x):
return (x[:, :self.split_location, :].clone(),
x[:, self.split_location:, :].clone())
# pylint: disable=R0201
def inverse(self, x, y):
return torch.cat([x, y], dim=1)
class ConcatenateChannels(torch.nn.Module):
def __init__(self, split_location):
self.split_location = split_location
super(ConcatenateChannels, self).__init__()
# pylint: disable=R0201
def forward(self, x, y):
return torch.cat([x, y], dim=1)
def inverse(self, x):
return (x[:, :self.split_location, :].clone(),
x[:, self.split_location:, :].clone())
|
07effbe0629f3cbeb50004800b89bfb3ff39e057
|
3d62372eb5e17bf135616de4f196c14a384adf36
|
/Tests/test_Phylo_networkx.py
|
829fb3f2daadd5ac1f45f8d318b19f88cd3d4ad6
|
[
"BSD-3-Clause",
"LicenseRef-scancode-biopython"
] |
permissive
|
biopython/biopython
|
817c9a995a49528937bebefe99f3f5b9054f8947
|
d416809344f1e345fbabbdaca4dd6dcf441e53bd
|
refs/heads/master
| 2023-08-28T05:26:46.916988
| 2023-08-23T14:11:24
| 2023-08-23T14:11:24
| 151,541
| 3,669
| 1,939
|
NOASSERTION
| 2023-09-12T10:19:46
| 2009-03-15T21:09:53
|
Python
|
UTF-8
|
Python
| false
| false
| 1,180
|
py
|
test_Phylo_networkx.py
|
# Copyright (C) 2009 by Eric Talevich (eric.talevich@gmail.com)
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Unit tests for Bio.Phylo functions with external dependencies."""
import unittest
from io import StringIO
from Bio import Phylo
# Check for any missing dependencies at the top level so we can skip
from Bio import MissingExternalDependencyError
try:
import networkx
except ImportError:
raise MissingExternalDependencyError(
"Install networkx if you wish to use it with Bio.Phylo"
) from None
# Example PhyloXML file
EX_DOLLO = "PhyloXML/o_tol_332_d_dollo.xml"
EX_APAF = "PhyloXML/apaf.xml"
class UtilTests(unittest.TestCase):
"""Tests for various utility functions."""
def test_to_networkx(self):
"""Tree to Graph conversion, if networkx is available."""
tree = Phylo.read(EX_DOLLO, "phyloxml")
G = Phylo.to_networkx(tree)
self.assertEqual(len(G.nodes()), 659)
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
|
7b5dcffb018e4659c81d6d8a603659b732266731
|
e03bce53de6f88c0e09f56e4fe11c36af0f1161f
|
/tests/unit/env_mgr/test_env_mgr.py
|
4871a7a47e909cedf41b5939197d9c6741625a41
|
[
"Apache-2.0"
] |
permissive
|
onicagroup/runway
|
20c31df9cbc1a1ffc5c9aa468ce5cf7d6ac7899f
|
0763b06aee07d2cf3f037a49ca0cb81a048c5deb
|
refs/heads/master
| 2023-08-30T22:35:54.113981
| 2023-08-29T14:13:35
| 2023-08-29T14:13:35
| 122,529,924
| 156
| 79
|
Apache-2.0
| 2023-09-13T13:43:50
| 2018-02-22T20:12:55
|
Python
|
UTF-8
|
Python
| false
| false
| 4,635
|
py
|
test_env_mgr.py
|
"""Test runway.env_mgr."""
# pylint: disable=unused-argument
# pyright: basic
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Optional
import pytest
from runway.env_mgr import EnvManager
if TYPE_CHECKING:
from pathlib import Path
from pytest import LogCaptureFixture, MonkeyPatch
from pytest_mock import MockerFixture
class TestEnvManager:
"""Test runway.env_mgr.EnvManager."""
def test___init___darwin(
self, platform_darwin: None, cd_tmp_path: Path, mocker: MockerFixture
) -> None:
"""Test __init__ on Darwin platform."""
home = cd_tmp_path / "home"
mocker.patch("runway.env_mgr.Path.home", return_value=home)
obj = EnvManager("test-bin", "test-dir")
assert not obj.current_version
assert obj.command_suffix == ""
assert obj.env_dir_name == ".test-dir"
assert obj.env_dir == home / ".test-dir"
assert obj.versions_dir == home / ".test-dir" / "versions"
def test___init___windows(
self,
platform_windows: None,
cd_tmp_path: Path,
mocker: MockerFixture,
monkeypatch: MonkeyPatch,
) -> None:
"""Test __init__ on Windows platform."""
home = cd_tmp_path / "home"
mocker.patch("runway.env_mgr.Path.home", return_value=home)
monkeypatch.delenv("APPDATA", raising=False)
obj = EnvManager("test-bin", "test-dir")
expected_env_dir = home / "AppData" / "Roaming" / "test-dir"
assert not obj.current_version
assert obj.command_suffix == ".exe"
assert obj.env_dir_name == "test-dir"
assert obj.env_dir == expected_env_dir
assert obj.versions_dir == expected_env_dir / "versions"
def test___init___windows_appdata(
self, platform_windows: None, cd_tmp_path: Path, monkeypatch: MonkeyPatch
) -> None:
"""Test __init__ on Windows platform."""
monkeypatch.setenv("APPDATA", str(cd_tmp_path / "custom_path"))
obj = EnvManager("test-bin", "test-dir")
expected_env_dir = cd_tmp_path / "custom_path" / "test-dir"
assert not obj.current_version
assert obj.command_suffix == ".exe"
assert obj.env_dir_name == "test-dir"
assert obj.env_dir == expected_env_dir
assert obj.versions_dir == expected_env_dir / "versions"
def test_bin(
self, platform_darwin: None, cd_tmp_path: Path, mocker: MockerFixture
) -> None:
"""Test bin."""
home = cd_tmp_path / "home"
mocker.patch("runway.env_mgr.Path.home", return_value=home)
obj = EnvManager("test-bin", "test-dir")
obj.current_version = "1.0.0"
assert obj.bin == home / ".test-dir" / "versions" / "1.0.0" / "test-bin"
@pytest.mark.parametrize("version", ["1.0.0", None])
def test_install(self, version: Optional[str]) -> None:
"""Test install."""
with pytest.raises(NotImplementedError):
assert EnvManager("", "").install(version)
def test_list_installed(self) -> None:
"""Test list_installed."""
with pytest.raises(NotImplementedError):
assert EnvManager("", "").list_installed()
def test_path(self, cd_tmp_path: Path) -> None:
"""Test how path attribute is set."""
assert EnvManager("", "", path=cd_tmp_path).path == cd_tmp_path
assert EnvManager("", "").path == cd_tmp_path
@pytest.mark.parametrize("exists", [False, True])
def test_uninstall(
self,
caplog: LogCaptureFixture,
exists: bool,
mocker: MockerFixture,
tmp_path: Path,
) -> None:
"""Test uninstall."""
caplog.set_level(logging.INFO, logger="runway.env_mgr")
mocker.patch.object(EnvManager, "versions_dir", tmp_path)
obj = EnvManager("foo", "")
version = "1.0.0"
version_dir = tmp_path / version
bin_name = "foo" + obj.command_suffix
if exists:
version_dir.mkdir()
(version_dir / "foo").touch()
assert obj.uninstall(version)
assert (
f"uninstalling {bin_name} {version} from {tmp_path}..."
in caplog.messages
)
assert f"uninstalled {bin_name} {version}" in caplog.messages
else:
assert not obj.uninstall(version)
assert f"{bin_name} {version} not installed" in caplog.messages
def test_version_file(self) -> None:
"""Test version_file."""
with pytest.raises(NotImplementedError):
assert EnvManager("", "").version_file
|
ea5f4b6351350c638b544d5bcdad8872be96ab1a
|
620323fc090cebaf7aca456ff3f7fbbe1e210394
|
/about__user_agent/get_random_win_UserAgent.py
|
7126c8da8bd0d53188a3226b6fa0eaae59c91032
|
[
"CC-BY-4.0"
] |
permissive
|
gil9red/SimplePyScripts
|
bd2733372728bf9b9f00570e90316fa12116516b
|
773c2c9724edd8827a1dbd91694d780e03fcb05a
|
refs/heads/master
| 2023-08-31T04:26:09.120173
| 2023-08-30T17:22:59
| 2023-08-30T17:22:59
| 22,650,442
| 157
| 46
| null | 2023-09-08T17:51:33
| 2014-08-05T16:19:52
|
Python
|
UTF-8
|
Python
| false
| false
| 814
|
py
|
get_random_win_UserAgent.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "ipetrash"
import random
class UserAgent:
@classmethod
def get_win_version(cls) -> float:
return 10.0
@classmethod
def get_chrome_version(cls) -> str:
a = random.randint(40, 69)
b = random.randint(2987, 3497)
c = random.randint(80, 140)
return f"{a}.0.{b}.{c}"
@classmethod
def get(cls) -> str:
a = f"Mozilla/5.0 (Windows NT {cls.get_win_version()}; Win64; x64)"
b = f"AppleWebKit/537.36 (KHTML, like Gecko) Chrome/{cls.get_chrome_version()} Safari/537.36"
return f"{a} {b}"
if __name__ == "__main__":
print(UserAgent.get())
# Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36
# (KHTML, like Gecko) Chrome/41.0.2993.140 Safari/537.36
|
5b49dc06fde81274b892bce6897ab56a0776f385
|
cb8f7d14f42dfae3be7ce3c84bcd11ac15ff1dab
|
/asciimol/app/config.py
|
9296f1477c1e65a29e681b5609a3d72d7917ad37
|
[
"BSD-2-Clause"
] |
permissive
|
dewberryants/asciiMol
|
9b435a363fd2af3a0c5b1446e43cf236c206e886
|
53e30aa813ed0b8999a334277af9663bb16bd6eb
|
refs/heads/master
| 2023-09-05T00:06:46.868062
| 2023-05-24T12:15:33
| 2023-05-24T12:15:33
| 292,568,583
| 329
| 15
|
BSD-2-Clause
| 2023-03-03T08:51:04
| 2020-09-03T12:48:52
|
Python
|
UTF-8
|
Python
| false
| false
| 2,196
|
py
|
config.py
|
import argparse
import numpy as np
from asciimol.app import map_colors, map_radii
from asciimol.app.colors import init_curses_color_pairs
from asciimol.app.io import handle_io
class Config:
"""
The runtime configuration object. Contains currently opened file, as well as any other configurables.
"""
def __init__(self):
self.coordinates = None
self.symbols = None
self.colors = None
self.bonds = None
self.atm_counts = None
def parse(self):
parser = argparse.ArgumentParser()
parser.add_argument('XYZFILE', metavar='XYZFILE or SMILES', type=str,
help='Specify an .xyz file or a SMILES string (e.g., CC) to open and display.')
opts = parser.parse_args()
self.atm_counts, self.coordinates, self.symbols = handle_io(opts.XYZFILE)
if self.atm_counts:
self._setup_bonds()
return self.atm_counts
def post_setup(self):
self._setup_colors()
def _setup_bonds(self):
if self.bonds:
return
self.bonds = list()
offset = 0
for n, counts in enumerate(self.atm_counts):
radii = np.array(list(map_radii(self.symbols[offset:offset + counts])), dtype='float32')
xyz = np.array(self.coordinates[offset:offset + counts], dtype='float32')
rsq = (radii[..., np.newaxis] + radii + 0.41) ** 2
dx = xyz[:, 0, np.newaxis] - xyz[:, 0]
dy = xyz[:, 1, np.newaxis] - xyz[:, 1]
dz = xyz[:, 2, np.newaxis] - xyz[:, 2]
dsq = dx ** 2 + dy ** 2 + dz ** 2
np.fill_diagonal(dsq, np.inf)
bonds = np.argwhere(np.triu(dsq) < np.triu(rsq))
bound = np.isin(np.arange(counts), bonds[:, 0]) + \
np.isin(np.arange(counts), bonds[:, 1])
unbound = np.hstack((np.argwhere(bound == 0), np.argwhere(bound == 0)))
self.bonds.append(np.vstack((bonds, unbound)))
offset += counts
def _setup_colors(self):
if self.colors:
return
colors = list(map_colors(self.symbols))
self.colors = list(init_curses_color_pairs(colors))
|
59550de9e2f823296c38b0dfe32a5bcc2a0fd59c
|
35b45b5225f911072287b7f0888f4ef4cc70f3d9
|
/tests/test_writers.py
|
adfb8baae04543ff393ec67a8d012b86ee650e6c
|
[
"BSD-3-Clause"
] |
permissive
|
heuer/segno
|
54e9b583dbc33b016715bb13f97a9013a37cc9d4
|
11556378fa8949fa5ad6dddbf8cc5f4a667038af
|
refs/heads/master
| 2023-06-21T02:01:45.620206
| 2023-05-02T22:09:31
| 2023-05-02T22:09:31
| 64,920,252
| 441
| 59
|
BSD-3-Clause
| 2023-06-15T05:00:05
| 2016-08-04T09:08:52
|
Python
|
UTF-8
|
Python
| false
| false
| 2,741
|
py
|
test_writers.py
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 - 2022 -- Lars Heuer
# All rights reserved.
#
# License: BSD License
#
"""\
Tests against the ``writers`` module.
"""
from __future__ import absolute_import, unicode_literals
import os
import io
import tempfile
import pytest
import segno
from segno import consts, writers
def test_writable_stream():
buff = io.BytesIO()
with writers.writable(buff, 'wb') as f:
f.write(b'x')
assert not buff.closed
def test_writable_stream2():
buff = io.StringIO()
with writers.writable(buff, 'wt') as f:
f.write('x')
assert not buff.closed
def test_writable_stream3():
buff = io.StringIO()
with pytest.raises(Exception):
with writers.writable(buff, 'wt') as f:
f.write('x')
raise Exception()
assert not f.closed
def test_writable_not_stream():
fn = tempfile.NamedTemporaryFile()
name = fn.name
fn.close()
try:
with writers.writable(name, 'wb') as f:
assert name == f.name
f.write(b'Segno')
finally:
os.remove(name)
def test_writable_not_stream2():
fn = tempfile.NamedTemporaryFile()
name = fn.name
fn.close()
try:
with writers.writable(name, 'wt') as f:
assert name == f.name
f.write('Segno')
finally:
os.remove(name)
def test_writable_not_stream3():
fn = tempfile.NamedTemporaryFile()
name = fn.name
fn.close()
with pytest.raises(Exception):
with writers.writable(name, 'wb') as f:
assert name == f.name
f.write(b'Segno')
raise Exception()
assert f.closed
def test_colormap_dark_light():
qr = segno.make('123', version=7)
width, height = len(qr.matrix[0]), len(qr.matrix)
cm = writers._make_colormap(width, height, dark='blue', light='white')
assert 15 == len(cm)
def test_colormap_lesser_version_7():
qr = segno.make('123', version=6)
width, height = len(qr.matrix[0]), len(qr.matrix)
cm = writers._make_colormap(width, height, dark='blue', light='white')
assert 13 == len(cm)
assert consts.TYPE_VERSION_DARK not in cm
assert consts.TYPE_VERSION_LIGHT not in cm
def test_colormap_micro():
qr = segno.make_micro('123')
width, height = len(qr.matrix[0]), len(qr.matrix)
cm = writers._make_colormap(width, height, dark='blue', light='white')
assert 10 == len(cm)
assert consts.TYPE_VERSION_DARK not in cm
assert consts.TYPE_VERSION_LIGHT not in cm
assert consts.TYPE_ALIGNMENT_PATTERN_DARK not in cm
assert consts.TYPE_ALIGNMENT_PATTERN_LIGHT not in cm
assert consts.TYPE_DARKMODULE not in cm
if __name__ == '__main__':
pytest.main([__file__])
|
de531fb46630237ec229fc2c697c25f29258b1fc
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/RecoLocalMuon/DTSegment/python/dt4DExtendedSegments_CombPatternReco4D_ParamDrift_cfi.py
|
a439ffc7455504ce3d3a13cbaa0b02736432f9f1
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 178
|
py
|
dt4DExtendedSegments_CombPatternReco4D_ParamDrift_cfi.py
|
import FWCore.ParameterSet.Config as cms
from RecoLocalMuon.DTSegment.dt4DSegments_CombPatternReco4D_ParamDrift_cfi import *
dt4DSegments.recHits2DLabel= "dt2DExtendedSegments"
|
6ba9df01069dd56ac80e285dbb891f84a1d7c77e
|
a165b266e96c44bb27c6886ce88d88ba381aa20e
|
/agents/swarm/swarm_agent.py
|
3eeb2253ef20cfcbd175c2194cbb5012f9ef5513
|
[
"MIT"
] |
permissive
|
SaltieRL/Saltie
|
ab616f261f480ebb8d10c32c0a37e046e8188d6f
|
a491ecfa5c77583ec370a0a378d27865dbd8da63
|
refs/heads/master
| 2021-06-25T06:35:02.232354
| 2019-01-26T16:50:43
| 2019-01-26T16:50:43
| 111,743,860
| 148
| 32
|
MIT
| 2019-01-26T16:50:44
| 2017-11-22T23:58:21
|
Python
|
UTF-8
|
Python
| false
| false
| 2,341
|
py
|
swarm_agent.py
|
import os
from rlbot.botmanager.helper_process_request import HelperProcessRequest
from rlbot.agents.base_agent import BaseAgent, BOT_CONFIG_AGENT_HEADER
from rlbot.parsing.custom_config import ConfigHeader, ConfigObject
from rlbot.utils.logging_utils import get_logger
from framework.utils import get_repo_directory
import sys
class SwarmAgent(BaseAgent):
pipe = None
model = None
input_formatter = None
output_formatter = None
game_memory = None
optimizer = None
loss_function = None
def __init__(self, name, team, index):
super().__init__(name, team, index)
sys.path.insert(0, get_repo_directory()) # this is for separate process imports
self.logger = get_logger(name)
self.manager_path = None
self.model_path = None
self.load_model = None
def get_helper_process_request(self) -> HelperProcessRequest:
from multiprocessing import Pipe
file = self.get_manager_path()
key = 'swarm_manager'
request = HelperProcessRequest(file, key)
self.pipe, request.pipe = Pipe(False)
request.model_path = self.model_path
request.load_model = self.load_model
return request
def load_config(self, config_object_header: ConfigHeader):
self.model_path = config_object_header.get('model_path')
self.load_model = config_object_header.getboolean('load_model')
def get_manager_path(self):
raise NotImplementedError
def create_input_formatter(self):
raise NotImplementedError
def create_output_formatter(self):
raise NotImplementedError
def initialize_agent(self):
self.model = self.pipe.recv()
self.input_formatter = self.create_input_formatter()
self.output_formatter = self.create_output_formatter()
self.game_memory = self.pipe.recv()
@staticmethod
def create_agent_configurations(config: ConfigObject):
super(SwarmAgent, SwarmAgent).create_agent_configurations(config)
params = config.get_header(BOT_CONFIG_AGENT_HEADER)
params.add_value('model_path', str, default=os.path.join('models', 'cool_atba.mdl'),
description='Path to the model file')
params.add_value('load_model', bool, default=False, description='The model should be loaded')
|
003ed9435c6e6e8cc4c59e56c51114c50322d88e
|
14a42aa9e707f70312647fbf86adb96fce7a2f97
|
/src/_pytest/pastebin.py
|
22c7a622373f5561273b424d180f5195d808fab3
|
[
"MIT"
] |
permissive
|
pytest-dev/pytest
|
a0374d435f2b46e8a475b4b26085ab4f3d04aa67
|
afb8d66e42a3449476cf9bf4526705b1e36ff5a5
|
refs/heads/main
| 2023-09-01T14:12:11.863580
| 2023-08-30T11:52:14
| 2023-08-30T11:52:14
| 37,489,525
| 11,423
| 3,125
|
MIT
| 2023-09-12T22:17:22
| 2015-06-15T20:28:27
|
Python
|
UTF-8
|
Python
| false
| false
| 3,949
|
py
|
pastebin.py
|
"""Submit failure or test session information to a pastebin service."""
import tempfile
from io import StringIO
from typing import IO
from typing import Union
import pytest
from _pytest.config import Config
from _pytest.config import create_terminal_writer
from _pytest.config.argparsing import Parser
from _pytest.stash import StashKey
from _pytest.terminal import TerminalReporter
pastebinfile_key = StashKey[IO[bytes]]()
def pytest_addoption(parser: Parser) -> None:
group = parser.getgroup("terminal reporting")
group._addoption(
"--pastebin",
metavar="mode",
action="store",
dest="pastebin",
default=None,
choices=["failed", "all"],
help="Send failed|all info to bpaste.net pastebin service",
)
@pytest.hookimpl(trylast=True)
def pytest_configure(config: Config) -> None:
if config.option.pastebin == "all":
tr = config.pluginmanager.getplugin("terminalreporter")
# If no terminal reporter plugin is present, nothing we can do here;
# this can happen when this function executes in a worker node
# when using pytest-xdist, for example.
if tr is not None:
# pastebin file will be UTF-8 encoded binary file.
config.stash[pastebinfile_key] = tempfile.TemporaryFile("w+b")
oldwrite = tr._tw.write
def tee_write(s, **kwargs):
oldwrite(s, **kwargs)
if isinstance(s, str):
s = s.encode("utf-8")
config.stash[pastebinfile_key].write(s)
tr._tw.write = tee_write
def pytest_unconfigure(config: Config) -> None:
if pastebinfile_key in config.stash:
pastebinfile = config.stash[pastebinfile_key]
# Get terminal contents and delete file.
pastebinfile.seek(0)
sessionlog = pastebinfile.read()
pastebinfile.close()
del config.stash[pastebinfile_key]
# Undo our patching in the terminal reporter.
tr = config.pluginmanager.getplugin("terminalreporter")
del tr._tw.__dict__["write"]
# Write summary.
tr.write_sep("=", "Sending information to Paste Service")
pastebinurl = create_new_paste(sessionlog)
tr.write_line("pastebin session-log: %s\n" % pastebinurl)
def create_new_paste(contents: Union[str, bytes]) -> str:
"""Create a new paste using the bpaste.net service.
:contents: Paste contents string.
:returns: URL to the pasted contents, or an error message.
"""
import re
from urllib.request import urlopen
from urllib.parse import urlencode
params = {"code": contents, "lexer": "text", "expiry": "1week"}
url = "https://bpa.st"
try:
response: str = (
urlopen(url, data=urlencode(params).encode("ascii")).read().decode("utf-8")
)
except OSError as exc_info: # urllib errors
return "bad response: %s" % exc_info
m = re.search(r'href="/raw/(\w+)"', response)
if m:
return f"{url}/show/{m.group(1)}"
else:
return "bad response: invalid format ('" + response + "')"
def pytest_terminal_summary(terminalreporter: TerminalReporter) -> None:
if terminalreporter.config.option.pastebin != "failed":
return
if "failed" in terminalreporter.stats:
terminalreporter.write_sep("=", "Sending information to Paste Service")
for rep in terminalreporter.stats["failed"]:
try:
msg = rep.longrepr.reprtraceback.reprentries[-1].reprfileloc
except AttributeError:
msg = terminalreporter._getfailureheadline(rep)
file = StringIO()
tw = create_terminal_writer(terminalreporter.config, file)
rep.toterminal(tw)
s = file.getvalue()
assert len(s)
pastebinurl = create_new_paste(s)
terminalreporter.write_line(f"{msg} --> {pastebinurl}")
|
c25eb02d3bfe9a75d5a2af5198c05fcf3a037e52
|
f1c480ca385f60aa325a23b8c90cfe6bb98e34ec
|
/heat/nn/tests/test_data_parallel.py
|
979c0dad3e6665e9ee7988bedcedbb47d2ff24ff
|
[
"MIT"
] |
permissive
|
helmholtz-analytics/heat
|
32d56f59a97b089741838df5aea9e4f2e5d90cc6
|
c43a2401786a8aa93d10617aafecb8a28114d2e9
|
refs/heads/main
| 2023-09-03T12:11:38.716928
| 2023-08-29T11:32:46
| 2023-08-29T11:32:46
| 133,808,899
| 181
| 59
|
MIT
| 2023-09-13T11:36:12
| 2018-05-17T12:16:27
|
Python
|
UTF-8
|
Python
| false
| false
| 7,170
|
py
|
test_data_parallel.py
|
import heat as ht
import torch
import unittest
class TestDataParallel(unittest.TestCase):
def test_data_parallel(self):
import heat.nn.functional as F
with self.assertRaises(TypeError):
ht.utils.data.datatools.DataLoader("asdf")
class Model(ht.nn.Module):
def __init__(self):
super(Model, self).__init__()
# 1 input image channel, 6 output channels, 3x3 square convolution
# kernel
self.conv1 = ht.nn.Conv2d(1, 6, 3)
self.conv2 = ht.nn.Conv2d(6, 16, 3)
# an affine operation: y = Wx + b
self.fc1 = ht.nn.Linear(16 * 6 * 6, 120) # 6*6 from image dimension
self.fc2 = ht.nn.Linear(120, 84)
self.fc3 = ht.nn.Linear(84, 10)
def forward(self, x):
# Max pooling over a (2, 2) window
x = self.conv1(x)
x = F.max_pool2d(F.relu(x), (2, 2))
# If the size is a square you can only specify a single number
x = F.max_pool2d(F.relu(self.conv2(x)), 2)
x = x.view(-1, self.num_flat_features(x))
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def num_flat_features(self, x):
size = x.size()[1:] # all dimensions except the batch dimension
num_features = 1
for s in size:
num_features *= s
return num_features
class TestDataset(ht.utils.data.Dataset):
def __init__(self, array, ishuffle):
super(TestDataset, self).__init__(array, ishuffle=ishuffle)
def __getitem__(self, item):
return self.data[item]
def Ishuffle(self):
if not self.test_set:
ht.utils.data.dataset_ishuffle(self, attrs=[["data", None]])
def Shuffle(self):
if not self.test_set:
ht.utils.data.dataset_shuffle(self, attrs=[["data", None]])
# create model and move it to GPU with id rank
model = Model()
optimizer = ht.optim.SGD(model.parameters(), lr=0.001)
with self.assertRaises(TypeError):
ht.optim.DataParallelOptimizer(optimizer, "asdf")
dp_optimizer = ht.optim.DataParallelOptimizer(optimizer, True)
ht.random.seed(1)
torch.random.manual_seed(1)
labels = torch.randn((2, 10), device=ht.get_device().torch_device)
data = ht.random.rand(2 * ht.MPI_WORLD.size, 1, 32, 32, split=0)
dataset = TestDataset(data, ishuffle=True)
dataloader = ht.utils.data.datatools.DataLoader(dataset=dataset, batch_size=2)
# there is only 1 batch on each process (data size[0] is 2 * number of processes, and the batch size is 2)
self.assertTrue(len(dataloader) == 1)
ht_model = ht.nn.DataParallel(
model, data.comm, dp_optimizer, blocking_parameter_updates=True
)
if str(ht.get_device()).startswith("gpu"):
ht_model.to(ht.get_device().torch_device)
lim = 1e-4
loss_fn = torch.nn.MSELoss()
for _ in range(2):
for data in dataloader:
self.assertEqual(data.shape[0], 2)
dp_optimizer.zero_grad()
ht_outputs = ht_model(data)
loss_fn(ht_outputs, labels).backward()
dp_optimizer.step()
for p in ht_model.parameters():
p0dim = p.shape[0]
hld = ht.resplit(ht.array(p, is_split=0)).larray.clone()
hld_list = [hld[i * p0dim : (i + 1) * p0dim] for i in range(ht.MPI_WORLD.size - 1)]
for i in range(1, len(hld_list)):
self.assertTrue(torch.allclose(hld_list[0], hld_list[i], rtol=lim, atol=lim))
model = Model()
optimizer = ht.optim.SGD(model.parameters(), lr=0.001)
dp_optimizer = ht.optim.DataParallelOptimizer(optimizer, False)
labels = torch.randn((2, 10), device=ht.get_device().torch_device)
data = ht.random.rand(2 * ht.MPI_WORLD.size, 1, 32, 32, split=0)
dataset = ht.utils.data.Dataset(data, ishuffle=False)
dataloader = ht.utils.data.datatools.DataLoader(dataset=dataset, batch_size=2)
ht_model = ht.nn.DataParallel(
model, data.comm, dp_optimizer, blocking_parameter_updates=False
)
if str(ht.get_device()).startswith("gpu"):
ht_model.to(ht.get_device().torch_device)
with self.assertRaises(TypeError):
ht.nn.DataParallel(model, data.comm, "asdf")
loss_fn = torch.nn.MSELoss()
for _ in range(2):
for data in dataloader:
self.assertEqual(data.shape[0], 2)
dp_optimizer.zero_grad()
ht_outputs = ht_model(data)
loss_fn(ht_outputs, labels).backward()
dp_optimizer.step()
for p in ht_model.parameters():
p0dim = p.shape[0]
hld = ht.resplit(ht.array(p, is_split=0)).larray.clone()
hld_list = [hld[i * p0dim : (i + 1) * p0dim] for i in range(ht.MPI_WORLD.size - 1)]
for i in range(1, len(hld_list)):
self.assertTrue(torch.allclose(hld_list[0], hld_list[i], rtol=lim, atol=lim))
model = Model()
optimizer = ht.optim.SGD(model.parameters(), lr=0.001)
dp_optimizer = ht.optim.DataParallelOptimizer(optimizer, False)
labels = torch.randn((2, 10), device=ht.get_device().torch_device)
data = ht.random.rand(2 * ht.MPI_WORLD.size, 1, 32, 32, split=0)
dataset = ht.utils.data.Dataset(data, ishuffle=True)
dataloader = ht.utils.data.datatools.DataLoader(dataset=dataset, batch_size=2)
ht_model = ht.nn.DataParallel(
model, data.comm, dp_optimizer, blocking_parameter_updates=False
)
if str(ht.get_device()).startswith("gpu"):
ht_model.to(ht.get_device().torch_device)
for _ in range(2):
for data in dataloader:
self.assertEqual(data.shape[0], 2)
dp_optimizer.zero_grad()
ht_outputs = ht_model(data)
loss_fn(ht_outputs, labels).backward()
dp_optimizer.step()
for p in ht_model.parameters():
p0dim = p.shape[0]
hld = ht.resplit(ht.array(p, is_split=0)).larray.clone()
hld_list = [hld[i * p0dim : (i + 1) * p0dim] for i in range(ht.MPI_WORLD.size - 1)]
for i in range(1, len(hld_list)):
self.assertTrue(torch.allclose(hld_list[0], hld_list[i], rtol=lim, atol=lim))
with self.assertWarns(Warning):
ht_model = ht.nn.DataParallel(
model, ht.MPI_WORLD, [dp_optimizer, dp_optimizer], blocking_parameter_updates=False
)
# NOTE: this will throw a warning: this is expected
self.assertTrue(ht_model.blocking_parameter_updates)
|
0f70fcbcf53f855886a4aa185cbad49597b2e390
|
4091caecbc727e6d6ae0d827afce11c5979a84fd
|
/demos/gesture_recognition_demo/python/gesture_recognition_demo/visualizer.py
|
bd03f7231715199f2cb6950c0670816b0a159f78
|
[
"Apache-2.0"
] |
permissive
|
openvinotoolkit/open_model_zoo
|
fdb03dd40bfccb854e4ed4f7b9beaa90596963cd
|
7929adbe91e9cfe8dc5dc1daad5ae7392f9719a0
|
refs/heads/master
| 2023-08-18T18:03:47.254427
| 2023-08-18T10:54:31
| 2023-08-18T10:54:31
| 153,097,694
| 1,712
| 730
|
Apache-2.0
| 2023-09-11T11:31:20
| 2018-10-15T10:55:02
|
Python
|
UTF-8
|
Python
| false
| false
| 3,548
|
py
|
visualizer.py
|
"""
Copyright (c) 2019-2023 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import time
from multiprocessing import Process, Queue, Value
import cv2
import numpy as np
class Visualizer:
"""Class that allows to play video sources with different speed"""
def __init__(self, trg_fps=60):
"""Constructor"""
self._trg_time_step = 1. / float(trg_fps)
self._last_key = Value('i', -1, lock=True)
self._need_stop = Value('i', False, lock=False)
self._worker_process = None
self._tasks = {}
def register_window(self, name):
"""Allocates resources for the new window"""
if self._worker_process is not None and self._worker_process.is_alive():
raise RuntimeError('Cannot add the window for running visualizer')
self._tasks[name] = Queue(1)
def get_key(self):
"""Returns the value of pressed key"""
with self._last_key.get_lock():
out_key = self._last_key.value
self._last_key.value = -1
return out_key
def get_queue(self, name):
if name not in self._tasks:
raise ValueError('Unknown name of queue: {}'.format(name))
return self._tasks[name]
def put_queue(self, frame, name):
"""Adds frame in the queue of the specified window"""
if name not in self._tasks.keys():
raise ValueError('Cannot show unregistered window: {}'.format(name))
self._tasks[name].put(np.copy(frame), True)
def start(self):
"""Starts internal threads"""
if self._worker_process is not None and self._worker_process.is_alive():
return
if len(self._tasks) == 0:
raise RuntimeError('Cannot start without registered windows')
self._need_stop.value = False
self._worker_process = Process(target=self._worker,
args=(self._tasks, self._last_key,
self._trg_time_step, self._need_stop))
self._worker_process.daemon = True
self._worker_process.start()
def release(self):
"""Stops playing and releases internal storages"""
if self._worker_process is not None:
self._need_stop.value = True
self._worker_process.join()
@staticmethod
def _worker(tasks, last_key, trg_time_step, need_stop):
"""Shows new frames in appropriate screens"""
while not need_stop.value:
start_time = time.perf_counter()
for name, frame_queue in tasks.items():
if not frame_queue.empty():
frame = frame_queue.get(True)
cv2.imshow(name, frame)
key = cv2.waitKey(1)
if key != -1:
last_key.value = key
end_time = time.perf_counter()
elapsed_time = end_time - start_time
rest_time = trg_time_step - elapsed_time
if rest_time > 0.0:
time.sleep(rest_time)
|
7061d707f3ad76ff68718e27b44aa7d03294519d
|
04e02c56f90d7ce4ada1fd36017facc90f5345a8
|
/all/policies/soft_deterministic.py
|
11f7ca2547355c1da83cb9004a33704e84958053
|
[
"MIT"
] |
permissive
|
cpnota/autonomous-learning-library
|
d34a317cb68b6360c450786add7053d02f69f19b
|
8e68f0b21cd74bb5efc6d68fde45c26a0e26d6c2
|
refs/heads/master
| 2023-07-19T06:20:12.485433
| 2022-08-31T19:43:36
| 2022-08-31T19:43:36
| 156,611,625
| 647
| 74
|
MIT
| 2022-10-03T14:03:14
| 2018-11-07T21:39:56
|
Python
|
UTF-8
|
Python
| false
| false
| 3,405
|
py
|
soft_deterministic.py
|
import torch
from all.approximation import Approximation
from all.nn import RLNetwork
class SoftDeterministicPolicy(Approximation):
'''
A "soft" deterministic policy compatible with soft actor-critic (SAC).
Args:
model (torch.nn.Module): A Pytorch module representing the policy network.
The input shape should be the same as the shape of the state (or feature) space,
and the output shape should be double the size of the the action space
The first n outputs will be the unscaled mean of the action for each dimension,
and the second n outputs will be the logarithm of the variance.
optimizer (torch.optim.Optimizer): A optimizer initialized with the
model parameters, e.g. SGD, Adam, RMSprop, etc.
action_space (gym.spaces.Box): The Box representing the action space.
kwargs (optional): Any other arguments accepted by all.approximation.Approximation
'''
def __init__(
self,
model,
optimizer=None,
space=None,
name="policy",
**kwargs
):
model = SoftDeterministicPolicyNetwork(model, space)
self._inner_model = model
super().__init__(model, optimizer, name=name, **kwargs)
class SoftDeterministicPolicyNetwork(RLNetwork):
def __init__(self, model, space):
super().__init__(model)
self._action_dim = space.shape[0]
self._tanh_scale = torch.tensor((space.high - space.low) / 2).to(self.device)
self._tanh_mean = torch.tensor((space.high + space.low) / 2).to(self.device)
def forward(self, state):
outputs = super().forward(state)
normal = self._normal(outputs)
if self.training:
action, log_prob = self._sample(normal)
return action, log_prob
return self._squash(normal.loc)
def _normal(self, outputs):
means = outputs[..., 0:self._action_dim]
logvars = outputs[..., self._action_dim:]
std = logvars.mul(0.5).exp_()
return torch.distributions.normal.Normal(means, std)
def _sample(self, normal):
raw = normal.rsample()
log_prob = self._log_prob(normal, raw)
return self._squash(raw), log_prob
def _log_prob(self, normal, raw):
'''
Compute the log probability of a raw action after the action is squashed.
Both inputs act on the raw underlying distribution.
Because tanh_mean does not affect the density, we can ignore it.
However, tanh_scale will affect the relative contribution of each component.'
See Appendix C in the Soft Actor-Critic paper
Args:
normal (torch.distributions.normal.Normal): The "raw" normal distribution.
raw (torch.Tensor): The "raw" action.
Returns:
torch.Tensor: The probability of the raw action, accounting for the affects of tanh.
'''
log_prob = normal.log_prob(raw)
log_prob -= torch.log(1 - torch.tanh(raw).pow(2) + 1e-6)
log_prob -= torch.log(self._tanh_scale)
return log_prob.sum(-1)
def _squash(self, x):
return torch.tanh(x) * self._tanh_scale + self._tanh_mean
def to(self, device):
self._tanh_mean = self._tanh_mean.to(device)
self._tanh_scale = self._tanh_scale.to(device)
return super().to(device)
|
5555254c2e03ca5adc9ebd754bab9cb0de71bb5b
|
9cc6f9d9eed9aceb5efa56e3b2f364900df11051
|
/improver_tests/ensemble_copula_coupling/test_RebadgePercentilesAsRealizations.py
|
79c00aa7745e28c46c2ba0385281b40fdad1e80b
|
[
"BSD-3-Clause",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
metoppv/improver
|
8553a4f8b93c88291bde0db8f5dfd7b577c04b92
|
cd2c9019944345df1e703bf8f625db537ad9f559
|
refs/heads/master
| 2023-08-30T19:01:04.946698
| 2023-08-25T13:57:20
| 2023-08-25T13:57:20
| 85,334,761
| 101
| 88
|
BSD-3-Clause
| 2023-09-14T19:07:45
| 2017-03-17T16:51:29
|
Python
|
UTF-8
|
Python
| false
| false
| 5,738
|
py
|
test_RebadgePercentilesAsRealizations.py
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown copyright. The Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Unit tests for the
`ensemble_copula_coupling.RebadgePercentilesAsRealizations` class.
"""
import unittest
import numpy as np
from iris.coords import AuxCoord, DimCoord
from iris.cube import Cube
from iris.exceptions import InvalidCubeError
from iris.tests import IrisTest
from improver.ensemble_copula_coupling.ensemble_copula_coupling import (
RebadgePercentilesAsRealizations as Plugin,
)
from improver.synthetic_data.set_up_test_cubes import set_up_percentile_cube
from .ecc_test_data import ECC_TEMPERATURE_REALIZATIONS
class Test_process(IrisTest):
"""Test the process method of the
RebadgePercentilesAsRealizations plugin."""
def setUp(self):
"""Set up temperature percentile cube for testing"""
self.cube = set_up_percentile_cube(
np.sort(ECC_TEMPERATURE_REALIZATIONS.copy(), axis=0),
np.array([25, 50, 75], dtype=np.float32),
)
def test_basic(self):
"""Test that a cube is produced with a realization dimension"""
result = Plugin().process(self.cube)
self.assertIsInstance(result, Cube)
self.assertIsInstance(result.coord("realization"), DimCoord)
self.assertEqual(result.coord("realization").units, "1")
def test_specify_realization_numbers(self):
"""Use the ensemble_realization_numbers optional argument to specify
particular values for the ensemble realization numbers."""
ensemble_realization_numbers = [12, 13, 14]
result = Plugin().process(self.cube, ensemble_realization_numbers)
self.assertArrayEqual(
result.coord("realization").points, ensemble_realization_numbers
)
def test_number_of_realizations(self):
"""Check the values for the realization coordinate generated without
specifying the ensemble_realization_numbers argument."""
result = Plugin().process(self.cube)
self.assertArrayAlmostEqual(
result.coord("realization").points, np.array([0, 1, 2])
)
def test_raises_exception_if_realization_already_exists(self):
"""Check that an exception is raised if a realization
coordinate already exists."""
self.cube.add_aux_coord(AuxCoord(0, "realization"))
msg = r"Cannot rebadge percentile coordinate to realization.*"
with self.assertRaisesRegex(InvalidCubeError, msg):
Plugin().process(self.cube)
def test_raises_exception_if_percentiles_unevenly_spaced(self):
"""Check that an exception is raised if the input percentiles
are not evenly spaced."""
cube = set_up_percentile_cube(
np.sort(ECC_TEMPERATURE_REALIZATIONS.copy(), axis=0),
np.array([25, 50, 90], dtype=np.float32),
)
msg = r"The percentile cube provided cannot be rebadged as ensemble realizations.*"
with self.assertRaisesRegex(ValueError, msg):
Plugin().process(cube)
def test_raises_exception_if_percentiles_not_centred(self):
"""Check that an exception is raised if the input percentiles
are not centred on 50th percentile."""
cube = set_up_percentile_cube(
np.sort(ECC_TEMPERATURE_REALIZATIONS.copy(), axis=0),
np.array([30, 60, 90], dtype=np.float32),
)
msg = r"The percentile cube provided cannot be rebadged as ensemble realizations.*"
with self.assertRaisesRegex(ValueError, msg):
Plugin().process(cube)
def test_raises_exception_if_percentiles_unequal_partition_percentile_space(self):
"""Check that an exception is raised if the input percentiles
don't evenly partition percentile space."""
cube = set_up_percentile_cube(
np.sort(ECC_TEMPERATURE_REALIZATIONS.copy(), axis=0),
np.array([10, 50, 90], dtype=np.float32),
)
msg = r"The percentile cube provided cannot be rebadged as ensemble realizations.*"
with self.assertRaisesRegex(ValueError, msg):
Plugin().process(cube)
if __name__ == "__main__":
unittest.main()
|
23d96d571bf1d9a8e37c04ab2e6011eb641579ef
|
578db86c51d44ebddd0dc7b1738985b3dc69eb74
|
/corehq/apps/export/exceptions.py
|
9a5511d430c87b4d578ebe8f38548033e0b3b307
|
[
"BSD-3-Clause"
] |
permissive
|
dimagi/commcare-hq
|
a43c7dd32b5f89c89fd5aa1b1359ab7301f4ff6b
|
e7391ddae1af1dbf118211ecb52c83fc508aa656
|
refs/heads/master
| 2023-08-16T22:38:27.853437
| 2023-08-16T19:07:19
| 2023-08-16T19:07:19
| 247,278
| 499
| 203
|
BSD-3-Clause
| 2023-09-14T19:03:24
| 2009-07-09T17:00:07
|
Python
|
UTF-8
|
Python
| false
| false
| 1,230
|
py
|
exceptions.py
|
from django.utils.translation import gettext_lazy
from corehq.apps.export.const import MAX_CASE_TYPE_COUNT, MAX_APP_COUNT
class ExportAppException(Exception):
pass
class BadExportConfiguration(ExportAppException):
pass
class ExportFormValidationException(Exception):
pass
class ExportAsyncException(Exception):
pass
class ExportODataDuplicateLabelException(Exception):
pass
class RejectedStaleExport(Exception):
pass
class InvalidLoginException(Exception):
pass
class ExportTooLargeException(Exception):
"""Export exceeds size limit"""
class CaseTypeOrAppLimitExceeded(Exception):
"""Project exceeds max allowed case types or applications for a bulk export"""
message = gettext_lazy(
"Cannot do a bulk case export as the project has more than %(max_case_types)s "
"case types or %(max_apps)s applications."
) % {
'max_case_types': MAX_CASE_TYPE_COUNT,
'max_apps': MAX_APP_COUNT
}
def __init__(self, msg=None, *args, **kwargs):
if msg:
self.message = msg
super().__init__(self.message, *args, **kwargs)
class NoTablesException(Exception):
"""ExportInstance does not have any tables to export"""
|
4cfe6dc4af80d6065efe6175cb67d87c12e9c752
|
a8ca3225e24c8b093056ce6baa1db6ba3aea8f97
|
/tests/meta/test_meta_sim.py
|
c0a0ccf6601d1bc41636fd111ec8979ee722e76a
|
[
"MIT"
] |
permissive
|
simpeg/simpeg
|
3e8779392d7b26fe576a7a665205068989d8f4d8
|
ebde5856c318f7b4deb92d755b4fefe19012c48e
|
refs/heads/main
| 2023-09-03T18:49:03.545965
| 2023-08-27T15:45:50
| 2023-08-27T15:45:50
| 14,727,320
| 437
| 268
|
MIT
| 2023-09-10T18:16:22
| 2013-11-26T19:46:36
|
Python
|
UTF-8
|
Python
| false
| false
| 13,077
|
py
|
test_meta_sim.py
|
import numpy as np
from SimPEG.potential_fields import gravity
from SimPEG.electromagnetics.static import resistivity as dc
from SimPEG import maps
from discretize import TensorMesh
import scipy.sparse as sp
import pytest
from SimPEG.meta import MetaSimulation, SumMetaSimulation, RepeatedSimulation
def test_multi_sim_correctness():
mesh = TensorMesh([16, 16, 16], origin="CCN")
rx_locs = np.mgrid[-0.25:0.25:5j, -0.25:0.25:5j, 0:1:1j]
rx_locs = rx_locs.reshape(3, -1).T
rxs = dc.receivers.Pole(rx_locs)
source_locs = np.mgrid[-0.5:0.5:10j, 0:1:1j, 0:1:1j].reshape(3, -1).T
src_list = [
dc.sources.Pole(
[
rxs,
],
location=loc,
)
for loc in source_locs
]
survey_full = dc.Survey(src_list)
full_sim = dc.Simulation3DNodal(
mesh, survey=survey_full, sigmaMap=maps.IdentityMap()
)
m_test = np.arange(mesh.n_cells) / mesh.n_cells + 0.1
# split by chunks of sources
chunk_size = 3
sims = []
mappings = []
for i in range(0, len(src_list) + 1, chunk_size):
end = min(i + chunk_size, len(src_list))
if i == end:
break
survey_chunk = dc.Survey(src_list[i:end])
sims.append(
dc.Simulation3DNodal(mesh, survey=survey_chunk, sigmaMap=maps.IdentityMap())
)
mappings.append(maps.IdentityMap())
multi_sim = MetaSimulation(sims, mappings)
# test fields objects
f_full = full_sim.fields(m_test)
f_mult = multi_sim.fields(m_test)
sol_full = f_full[:, "phiSolution"]
sol_mult = np.concatenate([f[:, "phiSolution"] for f in f_mult], axis=1)
np.testing.assert_allclose(sol_full, sol_mult)
# test data output
d_full = full_sim.dpred(m_test, f=f_full)
d_mult = multi_sim.dpred(m_test, f=f_mult)
np.testing.assert_allclose(d_full, d_mult)
# test Jvec
u = np.random.rand(mesh.n_cells)
jvec_full = full_sim.Jvec(m_test, u, f=f_full)
jvec_mult = multi_sim.Jvec(m_test, u, f=f_mult)
np.testing.assert_allclose(jvec_full, jvec_mult)
# test Jtvec
v = np.random.rand(survey_full.nD)
jtvec_full = full_sim.Jtvec(m_test, v, f=f_full)
jtvec_mult = multi_sim.Jtvec(m_test, v, f=f_mult)
np.testing.assert_allclose(jtvec_full, jtvec_mult)
# test get diag
diag_full = full_sim.getJtJdiag(m_test, f=f_full)
diag_mult = multi_sim.getJtJdiag(m_test, f=f_mult)
np.testing.assert_allclose(diag_full, diag_mult)
# test things also works without passing optional fields
multi_sim.model = m_test
d_mult2 = multi_sim.dpred()
np.testing.assert_allclose(d_mult, d_mult2)
jvec_mult2 = multi_sim.Jvec(m_test, u)
np.testing.assert_allclose(jvec_mult, jvec_mult2)
jtvec_mult2 = multi_sim.Jtvec(m_test, v)
np.testing.assert_allclose(jtvec_mult, jtvec_mult2)
# also pass a diagonal matrix here for testing.
multi_sim._jtjdiag = None
W = sp.eye(multi_sim.survey.nD)
diag_mult2 = multi_sim.getJtJdiag(m_test, W=W)
np.testing.assert_allclose(diag_mult, diag_mult2)
def test_sum_sim_correctness():
mesh = TensorMesh([16, 16, 16], origin="CCN")
rx_locs = np.mgrid[-0.25:0.25:5j, -0.25:0.25:5j, 0:1:1j].reshape(3, -1).T
rx = gravity.Point(rx_locs, components=["gz"])
survey = gravity.Survey(gravity.SourceField(rx))
full_sim = gravity.Simulation3DIntegral(
mesh, survey=survey, rhoMap=maps.IdentityMap(), n_processes=1
)
mesh_bot = TensorMesh([mesh.h[0], mesh.h[1], mesh.h[2][:8]], origin=mesh.origin)
mesh_top = TensorMesh(
[mesh.h[0], mesh.h[1], mesh.h[2][8:]], origin=["C", "C", mesh.nodes_z[8]]
)
mappings = [
maps.Mesh2Mesh((mesh_bot, mesh)),
maps.Mesh2Mesh((mesh_top, mesh)),
]
sims = [
gravity.Simulation3DIntegral(
mesh_bot, survey=survey, rhoMap=maps.IdentityMap(), n_processes=1
),
gravity.Simulation3DIntegral(
mesh_top, survey=survey, rhoMap=maps.IdentityMap(), n_processes=1
),
]
sum_sim = SumMetaSimulation(sims, mappings)
m_test = np.arange(mesh.n_cells) / mesh.n_cells + 0.1
# test fields objects
f_full = full_sim.fields(m_test)
f_mult = sum_sim.fields(m_test)
np.testing.assert_allclose(f_full, sum(f_mult), rtol=1e-6)
# test data output
d_full = full_sim.dpred(m_test, f=f_full)
d_mult = sum_sim.dpred(m_test, f=f_mult)
np.testing.assert_allclose(d_full, d_mult, rtol=1e-6)
# test Jvec
u = np.random.rand(mesh.n_cells)
jvec_full = full_sim.Jvec(m_test, u, f=f_full)
jvec_mult = sum_sim.Jvec(m_test, u, f=f_mult)
np.testing.assert_allclose(jvec_full, jvec_mult, rtol=1e-6)
# test Jtvec
v = np.random.rand(survey.nD)
jtvec_full = full_sim.Jtvec(m_test, v, f=f_full)
jtvec_mult = sum_sim.Jtvec(m_test, v, f=f_mult)
np.testing.assert_allclose(jtvec_full, jtvec_mult, rtol=1e-6)
# test get diag
diag_full = full_sim.getJtJdiag(m_test, f=f_full)
diag_mult = sum_sim.getJtJdiag(m_test, f=f_mult)
np.testing.assert_allclose(diag_full, diag_mult)
# test things also works without passing optional kwargs
sum_sim.model = m_test
d_mult2 = sum_sim.dpred()
np.testing.assert_allclose(d_mult, d_mult2)
jvec_mult2 = sum_sim.Jvec(m_test, u)
np.testing.assert_allclose(jvec_mult, jvec_mult2)
jtvec_mult2 = sum_sim.Jtvec(m_test, v)
np.testing.assert_allclose(jtvec_mult, jtvec_mult2)
sum_sim._jtjdiag = None
diag_mult2 = sum_sim.getJtJdiag(m_test)
np.testing.assert_allclose(diag_mult, diag_mult2)
def test_repeat_sim_correctness():
# meta sim is tested for correctness
# so can test the repeat against the meta sim
mesh = TensorMesh([8, 8, 8], origin="CCN")
rx_locs = np.mgrid[-0.25:0.25:5j, -0.25:0.25:5j, 0:1:1j].reshape(3, -1).T
rx = gravity.Point(rx_locs, components=["gz"])
survey = gravity.Survey(gravity.SourceField(rx))
sim = gravity.Simulation3DIntegral(
mesh, survey=survey, rhoMap=maps.IdentityMap(), n_processes=1
)
time_mesh = TensorMesh(
[
8,
],
origin=[
0,
],
)
sim_ts = np.linspace(0, 1, 6)
mappings = []
simulations = []
eye = sp.eye(mesh.n_cells, mesh.n_cells)
for t in sim_ts:
ave_time = time_mesh.get_interpolation_matrix(
[
t,
]
)
ave_full = sp.kron(ave_time, eye, format="csr")
mappings.append(maps.LinearMap(ave_full))
simulations.append(
gravity.Simulation3DIntegral(
mesh, survey=survey, rhoMap=maps.IdentityMap(), n_processes=1
)
)
multi_sim = MetaSimulation(simulations, mappings)
repeat_sim = RepeatedSimulation(sim, mappings)
model = np.random.rand(time_mesh.n_cells, mesh.n_cells).reshape(-1)
# test field things
f_full = multi_sim.fields(model)
f_mult = repeat_sim.fields(model)
np.testing.assert_equal(np.c_[f_full], np.c_[f_mult])
d_full = multi_sim.dpred(model, f_full)
d_repeat = repeat_sim.dpred(model, f_mult)
np.testing.assert_equal(d_full, d_repeat)
# test Jvec
u = np.random.rand(len(model))
jvec_full = multi_sim.Jvec(model, u, f=f_full)
jvec_mult = repeat_sim.Jvec(model, u, f=f_mult)
np.testing.assert_allclose(jvec_full, jvec_mult)
# test Jtvec
v = np.random.rand(len(sim_ts) * survey.nD)
jtvec_full = multi_sim.Jtvec(model, v, f=f_full)
jtvec_mult = repeat_sim.Jtvec(model, v, f=f_mult)
np.testing.assert_allclose(jtvec_full, jtvec_mult)
# test get diag
diag_full = multi_sim.getJtJdiag(model, f=f_full)
diag_mult = repeat_sim.getJtJdiag(model, f=f_mult)
np.testing.assert_allclose(diag_full, diag_mult)
def test_multi_errors():
mesh = TensorMesh([16, 16, 16], origin="CCN")
rx_locs = np.mgrid[-0.25:0.25:5j, -0.25:0.25:5j, 0:1:1j]
rx_locs = rx_locs.reshape(3, -1).T
rxs = dc.receivers.Pole(rx_locs)
source_locs = np.mgrid[-0.5:0.5:10j, 0:1:1j, 0:1:1j].reshape(3, -1).T
src_list = [
dc.sources.Pole(
[
rxs,
],
location=loc,
)
for loc in source_locs
]
# split by chunks of sources
chunk_size = 3
sims = []
mappings = []
for i in range(0, len(src_list) + 1, chunk_size):
end = min(i + chunk_size, len(src_list))
if i == end:
break
survey_chunk = dc.Survey(src_list[i:end])
sims.append(
dc.Simulation3DNodal(
mesh, survey=survey_chunk, sigmaMap=maps.IdentityMap(mesh)
)
)
mappings.append(maps.IdentityMap(mesh))
# incompatible length of mappings and simulations lists
with pytest.raises(ValueError):
MetaSimulation(sims[:-1], mappings)
# mappings have incompatible input lengths:
mappings[0] = maps.Projection(mesh.n_cells + 1, np.arange(mesh.n_cells) + 1)
with pytest.raises(ValueError):
MetaSimulation(sims, mappings)
# incompatible mapping and simulation
mappings[0] = maps.Projection(mesh.n_cells, [0, 1, 3, 5, 10])
with pytest.raises(ValueError):
MetaSimulation(sims, mappings)
def test_sum_errors():
mesh = TensorMesh([16, 16, 16], origin="CCN")
mesh_bot = TensorMesh([mesh.h[0], mesh.h[1], mesh.h[2][:8]], origin=mesh.origin)
mesh_top = TensorMesh(
[mesh.h[0], mesh.h[1], mesh.h[2][8:]], origin=["C", "C", mesh.nodes_z[8]]
)
mappings = [
maps.Mesh2Mesh((mesh_bot, mesh)),
maps.Mesh2Mesh((mesh_top, mesh)),
]
rx_locs = np.mgrid[-0.25:0.25:5j, -0.25:0.25:5j, 0:1:1j].reshape(3, -1).T
rx1 = gravity.Point(rx_locs, components=["gz"])
survey1 = gravity.Survey(gravity.SourceField(rx1))
rx2 = gravity.Point(rx_locs[1:], components=["gz"])
survey2 = gravity.Survey(gravity.SourceField(rx2))
sims = [
gravity.Simulation3DIntegral(
mesh_bot, survey=survey1, rhoMap=maps.IdentityMap(mesh_bot), n_processes=1
),
gravity.Simulation3DIntegral(
mesh_top, survey=survey2, rhoMap=maps.IdentityMap(mesh_top), n_processes=1
),
]
# Test simulations with different numbers of data.
with pytest.raises(ValueError):
SumMetaSimulation(sims, mappings)
def test_repeat_errors():
mesh = TensorMesh([16, 16, 16], origin="CCN")
rx_locs = np.mgrid[-0.25:0.25:5j, -0.25:0.25:5j, 0:1:1j]
rx_locs = rx_locs.reshape(3, -1).T
rxs = dc.receivers.Pole(rx_locs)
source_locs = np.mgrid[-0.5:0.5:10j, 0:1:1j, 0:1:1j].reshape(3, -1).T
src_list = [
dc.sources.Pole(
[
rxs,
],
location=loc,
)
for loc in source_locs
]
survey = dc.Survey(src_list)
sim = dc.Simulation3DNodal(mesh, survey=survey, sigmaMap=maps.IdentityMap(mesh))
# split by chunks of sources
mappings = []
for _i in range(10):
mappings.append(maps.IdentityMap(mesh))
# mappings have incompatible input lengths:
mappings[0] = maps.Projection(mesh.n_cells + 1, np.arange(mesh.n_cells) + 1)
with pytest.raises(ValueError):
RepeatedSimulation(sim, mappings)
# incompatible mappings and simulations
mappings[0] = maps.Projection(mesh.n_cells, [0, 1, 3, 5, 10])
with pytest.raises(ValueError):
RepeatedSimulation(sim, mappings)
def test_cache_clear_on_model_clear():
mesh = TensorMesh([16, 16, 16], origin="CCN")
rx_locs = np.mgrid[-0.25:0.25:5j, -0.25:0.25:5j, 0:1:1j]
rx_locs = rx_locs.reshape(3, -1).T
rxs = dc.receivers.Pole(rx_locs)
source_locs = np.mgrid[-0.5:0.5:10j, 0:1:1j, 0:1:1j].reshape(3, -1).T
src_list = [
dc.sources.Pole(
[
rxs,
],
location=loc,
)
for loc in source_locs
]
m_test = np.arange(mesh.n_cells) / mesh.n_cells + 0.1
# split by chunks of sources
chunk_size = 3
sims = []
mappings = []
for i in range(0, len(src_list) + 1, chunk_size):
end = min(i + chunk_size, len(src_list))
if i == end:
break
survey_chunk = dc.Survey(src_list[i:end])
sims.append(
dc.Simulation3DNodal(mesh, survey=survey_chunk, sigmaMap=maps.IdentityMap())
)
mappings.append(maps.IdentityMap())
multi_sim = MetaSimulation(sims, mappings)
assert multi_sim.model is None
for sim in multi_sim.simulations:
assert sim.model is None
# create fields to do some caching operations
multi_sim.fields(m_test)
assert multi_sim.model is not None
for sim in multi_sim.simulations:
assert sim._Me_Sigma is not None
# then set to None to make sure that works (and it clears things)
multi_sim.model = None
assert multi_sim.model is None
for sim in multi_sim.simulations:
assert sim.model is None
assert not hasattr(sim, "_Me_Sigma")
|
df1bf9e1f2960148feac0cae25dbbdfb646d1887
|
3982e6daf88e453c726f6b39a081fc37ce15a08a
|
/discovery-provider/src/queries/get_followees_for_user.py
|
e8746706423f0ee81d7c387d150f41dae9a2271b
|
[
"Apache-2.0"
] |
permissive
|
AudiusProject/audius-protocol
|
45808e11082608ad5b76a425d287cb6d94a6dab0
|
7cf1d8e378520460d24a7cc8c29e9927c0944cb3
|
refs/heads/main
| 2023-08-09T10:34:28.850436
| 2023-08-09T04:28:17
| 2023-08-09T04:28:17
| 201,821,771
| 531
| 108
|
NOASSERTION
| 2023-09-14T21:27:52
| 2019-08-11T22:31:43
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 1,256
|
py
|
get_followees_for_user.py
|
from sqlalchemy.sql import text
from src.queries.get_unpopulated_users import get_unpopulated_users
from src.queries.query_helpers import populate_user_metadata
from src.utils.db_session import get_db_read_replica
sql = text(
"""
SELECT
followee_user_id
from
follows
left outer join aggregate_user on followee_user_id = user_id
where
is_current = true
and is_delete = false
and follower_user_id = :follower_user_id
order by
follower_count desc,
user_id asc
offset :offset
limit :limit;
"""
)
def get_followees_for_user(args):
users = []
follower_user_id = args.get("follower_user_id")
current_user_id = args.get("current_user_id")
limit = args.get("limit")
offset = args.get("offset")
db = get_db_read_replica()
with db.scoped_session() as session:
rows = session.execute(
sql,
{"follower_user_id": follower_user_id, "limit": limit, "offset": offset},
)
user_ids = [r[0] for r in rows]
# get all users for above user_ids
users = get_unpopulated_users(session, user_ids)
# bundle peripheral info into user results
users = populate_user_metadata(session, user_ids, users, current_user_id)
return users
|
2c0680aafea96a74134d4002bdeeebfbad64783d
|
67cc5db4593e2cdd109e589e13fb07074bcff5d9
|
/tests/transformations/trivial_loop_elimination_test.py
|
6f2769f9212438360e24e996a1aea17c05f60e8f
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
spcl/dace
|
39849b1488e8f59f880fc0e2572687556c51847d
|
c5ca99ad37e7ceef6da71026c3c8bb579f64117f
|
refs/heads/master
| 2023-08-31T10:45:09.480018
| 2023-08-30T06:05:10
| 2023-08-30T06:05:10
| 172,703,996
| 402
| 114
|
BSD-3-Clause
| 2023-09-14T15:18:29
| 2019-02-26T12:05:50
|
Python
|
UTF-8
|
Python
| false
| false
| 980
|
py
|
trivial_loop_elimination_test.py
|
# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
from dace.sdfg.nodes import MapEntry
import dace
from dace.transformation.interstate import TrivialLoopElimination
from dace.symbolic import pystr_to_symbolic
import unittest
import numpy as np
I = dace.symbol("I")
J = dace.symbol("J")
@dace.program
def trivial_loop(data: dace.float64[I, J]):
for i in range(1, 2):
for j in dace.map[0:J]:
data[i, j] = data[i, j] + data[i - 1, j]
class TrivialLoopEliminationTest(unittest.TestCase):
def test_semantic_eq(self):
A1 = np.random.rand(16, 16)
A2 = np.copy(A1)
sdfg = trivial_loop.to_sdfg(simplify=False)
sdfg(A1, I=A1.shape[0], J=A1.shape[1])
count = sdfg.apply_transformations(TrivialLoopElimination)
self.assertGreater(count, 0)
sdfg(A2, I=A1.shape[0], J=A1.shape[1])
self.assertTrue(np.allclose(A1, A2))
if __name__ == '__main__':
unittest.main()
|
d62e125ff498d45bc8141b48d20a8fb476eb54cc
|
6c8305ea1df9687df1c0d2b0ace56733516c6322
|
/readthedocs/api/v2/permissions.py
|
8bc30ecd4e20a10e530d8d13e9111dd39be3f3ed
|
[
"MIT"
] |
permissive
|
readthedocs/readthedocs.org
|
9806083aa744c2308267919480a692e1e003e45d
|
bf88ce6d1085d922322a5fadce63a22c5544c830
|
refs/heads/main
| 2023-09-05T20:22:34.281891
| 2023-09-05T12:41:52
| 2023-09-05T12:41:52
| 841,835
| 2,894
| 1,509
|
MIT
| 2023-09-14T20:36:00
| 2010-08-16T19:18:06
|
Python
|
UTF-8
|
Python
| false
| false
| 2,653
|
py
|
permissions.py
|
"""Defines access permissions for the API."""
from rest_framework import permissions
from rest_framework_api_key.permissions import BaseHasAPIKey, KeyParser
from readthedocs.api.v2.models import BuildAPIKey
from readthedocs.builds.models import Version
class IsOwner(permissions.BasePermission):
"""Custom permission to only allow owners of an object to edit it."""
def has_object_permission(self, request, view, obj):
# Write permissions are only allowed to the owner of the snippet
return request.user in obj.users.all()
class ReadOnlyPermission(permissions.BasePermission):
"""Allow read-only access to authenticated and anonymous users."""
def has_permission(self, request, view):
return request.method in permissions.SAFE_METHODS
class IsAuthorizedToViewVersion(permissions.BasePermission):
"""
Checks if the user from the request has permissions to see the version.
This permission class used in the FooterHTML and PageSearchAPIView views.
.. note::
Views using this permissions should implement the
`_get_version` and `_get_project` methods.
"""
def has_permission(self, request, view):
project = view._get_project()
version = view._get_version()
has_access = (
Version.objects.public(
user=request.user,
project=project,
only_active=False,
)
.filter(pk=version.pk)
.exists()
)
return has_access
class TokenKeyParser(KeyParser):
"""
Custom key parser to use ``Token {TOKEN}`` as format.
This is the same format we use in API V3 for auth/authz.
"""
keyword = "Token"
class HasBuildAPIKey(BaseHasAPIKey):
"""
Custom permission to inject the build API key into the request.
We completely override the ``has_permission`` method
to avoid having to parse and validate the key again on each view.
The key is injected in the ``request.build_api_key`` attribute
only if it's valid, otherwise it's set to ``None``.
This grants read and write access to the API.
"""
model = BuildAPIKey
key_parser = TokenKeyParser()
def has_permission(self, request, view):
request.build_api_key = None
key = self.get_key(request)
if not key:
return False
try:
build_api_key = self.model.objects.get_from_key(key)
except self.model.DoesNotExist:
return False
if build_api_key.has_expired:
return False
request.build_api_key = build_api_key
return True
|
6105f4b5da835e21b56c13f1cc743a24283dbf26
|
52c0949315583ba8898694cceacfaafaeab6f902
|
/script/testing/oltpbench/__main__.py
|
ad951b263f1df27dffc0dbb90bf951ed8cfe6c8d
|
[
"MIT"
] |
permissive
|
cmu-db/noisepage
|
97093adcc9474419e063fdd97a5aa7a7ea6f3150
|
79276e68fe83322f1249e8a8be96bd63c583ae56
|
refs/heads/master
| 2023-08-29T05:51:04.628704
| 2021-11-05T14:12:08
| 2021-11-05T14:12:08
| 140,325,970
| 1,245
| 287
|
MIT
| 2022-11-08T02:06:48
| 2018-07-09T18:22:34
|
C++
|
UTF-8
|
Python
| false
| false
| 3,634
|
py
|
__main__.py
|
#!/usr/bin/python3
import json
import logging
import os
import sys
import traceback
from ..util.constants import LOG, ErrorCode
from . import constants
from .test_case_oltp import TestCaseOLTPBench
from .test_oltpbench import TestOLTPBench
from .utils import parse_command_line_args
def generate_tests(args):
"""
Generate tests for TestOLTPBench.run().
Parameters
----------
args : dict
The result of parse_command_line_args().
WARNING: NOTE THAT THIS IS MUTATED WITH THE FOLLOWING KEYS:
collect_mem_info
server_args
continue_on_error
TODO(WAN): Don't do this...
Returns
-------
Tests that can be executed with TestOLTPBench.run().
"""
def get_test_json_config():
config_file = os.path.join(os.getcwd(), args.get("config_file"))
if not os.path.exists(config_file):
raise FileNotFoundError(f"Config file doesn't exist: {config_file}")
with open(config_file) as test_suite_config:
json_config = json.load(test_suite_config)
if not json_config:
raise RuntimeError(f"Bad JSON: {config_file}")
return json_config
def build_server_metadata():
server_metadata = test_json.get('env', {})
max_connection_threads = int(test_json.get('server_args', {}).get(
'connection_thread_count',
str(constants.OLTPBENCH_DEFAULT_CONNECTION_THREAD_COUNT)))
server_metadata['max_connection_threads'] = max_connection_threads
wal_enable = test_json.get('server_args', {}).get(
'wal_enable', constants.OLTPBENCH_DEFAULT_WAL_ENABLE)
if not wal_enable:
server_metadata['wal_device'] = 'None'
return server_metadata
test_json = get_test_json_config()
# MUTATE args["collect_mem_info"]. All tests collect memory info by default.
disable_mem_info = args.get("disable_mem_info")
args["collect_mem_info"] = not disable_mem_info
# MUTATE args["server_args"].
server_args = test_json.get("server_args", {})
if server_args:
args["server_args"] = server_args
# MUTATE args["continue_on_error"].
args["continue_on_error"] = test_json.get(
"continue_on_error", constants.OLTPBENCH_DEFAULT_CONTINUE_ON_ERROR)
# Build the test suite.
test_suite = []
for testcase in test_json.get("testcases", []):
base_test = testcase.get("base")
base_test["server_data"] = build_server_metadata()
base_test["publish_results"] = args.get("publish_results")
base_test["publish_username"] = args.get("publish_username")
base_test["publish_password"] = args.get("publish_password")
# The config files support looping over parameters,
# see nightly/nightly.json for an example.
loop_tests = testcase.get("loop")
if not loop_tests:
test_suite.append(TestCaseOLTPBench(base_test))
else:
for loop_item in loop_tests:
combined_config = {**base_test, **loop_item}
test_suite.append(TestCaseOLTPBench(combined_config))
return test_suite
if __name__ == "__main__":
args = parse_command_line_args()
exit_code = ErrorCode.ERROR
try:
tests = generate_tests(args)
# Because generate_tests MUTATES args, has to come first.
oltpbench = TestOLTPBench(args)
exit_code = oltpbench.run(tests)
except:
LOG.error("Exception trying to run OLTPBench tests.")
traceback.print_exc(file=sys.stdout)
logging.shutdown()
sys.exit(exit_code)
|
2d90c975e0a648a7fc3f13d65b18c12156af617a
|
b51bd302a32f0cf0ce46ca10f4ccd4961d87868c
|
/timeseries-streaming/timeseries-python-applications/ml_pipeline_examples/sin_wave_example/inference/batch_inference.py
|
c5b917629d6c1401cfaa4c6372046efc16d1b416
|
[
"Apache-2.0"
] |
permissive
|
GoogleCloudPlatform/dataflow-sample-applications
|
8087e0658ab634538aa33cc025c61ae7fcd8b85b
|
beb6e20133939d7f7024c338ec2dc02e84e17475
|
refs/heads/master
| 2023-08-11T19:32:51.181127
| 2023-05-18T20:49:21
| 2023-05-18T20:49:21
| 278,225,471
| 118
| 62
|
Apache-2.0
| 2023-05-16T23:57:13
| 2020-07-09T00:45:55
|
Java
|
UTF-8
|
Python
| false
| false
| 3,530
|
py
|
batch_inference.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
import logging
import sys
import tensorflow as tf
import apache_beam as beam
from apache_beam.options.pipeline_options import PipelineOptions
from tfx_bsl.public.beam import RunInference
from tfx_bsl.public.proto import model_spec_pb2
from ml_pipeline.timeseries.encoder_decoder.transforms import process_encdec_inf_rtn
import ml_pipeline_examples.sin_wave_example.config as config
def run(args, pipeline_args):
"""
Run inference pipeline using data generated from streaming pipeline.
"""
pipeline_options = PipelineOptions(
pipeline_args, save_main_session=True, streaming=False)
with beam.Pipeline(options=pipeline_options) as pipeline:
_ = (
pipeline
| 'ReadTFExample' >> beam.io.tfrecordio.ReadFromTFRecord(
file_pattern=args.tfrecord_folder)
| 'ParseExamples' >> beam.Map(tf.train.Example.FromString)
| RunInference(
model_spec_pb2.InferenceSpecType(
saved_model_spec=model_spec_pb2.SavedModelSpec(
signature_name=['serving_default'],
model_path=args.saved_model_location)))
| beam.ParDo(
process_encdec_inf_rtn.ProcessReturn(
config={
'tf_transform_graph_dir': args.
tf_transform_graph_dir,
'model_config': config.MODEL_CONFIG
}))
| beam.ParDo(
process_encdec_inf_rtn.CheckAnomalous(threshold=0.15))
| beam.ParDo(print))
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
'--tfrecord_folder',
dest='tfrecord_folder',
required=True,
help=
'Location of the TFRecord files produced by the streaming pipeline')
parser.add_argument(
'--saved_model_location',
dest='saved_model_location',
required=True,
help='location of save model to be used with this inference pipeline'
)
parser.add_argument(
'--tf_transform_graph_dir',
dest='tf_transform_graph_dir',
required=True,
help=
'location of the tf transform graph dir used in post processing to rescale the values'
)
known_args, pipeline_args = parser.parse_known_args()
run(known_args, pipeline_args)
|
971370f6837ac510d91e867b8a0f2bf91dab8728
|
f509ab9825c542e09b0c6591d86ef1f9feb540a6
|
/pkgs/conf-pkg/src/genie/libs/conf/interface/iosxe/tests/test_interface.py
|
90970cd6a5f6867a6c5e8898c466fe82c743e5c2
|
[
"Apache-2.0"
] |
permissive
|
CiscoTestAutomation/genielibs
|
97f597117193aaa18028defeb69078ebb241173a
|
e42e51475cddcb10f5c7814d0fe892ac865742ba
|
refs/heads/master
| 2023-08-11T16:39:41.959947
| 2023-07-27T17:58:42
| 2023-07-27T17:58:42
| 130,717,047
| 109
| 60
|
Apache-2.0
| 2023-08-29T22:32:08
| 2018-04-23T15:21:56
|
Python
|
UTF-8
|
Python
| false
| false
| 25,326
|
py
|
test_interface.py
|
#!/usr/bin/env python
# python
import unittest
from unittest.mock import Mock
# Genie package
from genie.tests.conf import TestCase
from genie.conf import Genie
from genie.conf.base import Testbed, Device, Link, Interface
# xBU-shared genie pacakge
from genie.libs.conf.interface import TunnelTeInterface
from genie.libs.conf.base import MAC, IPv4Interface, IPv6Interface, IPv4Address, IPv6Address
from genie.libs.conf.interface import Layer, L2_type, IPv4Addr, IPv6Addr
from genie.libs.conf.vrf import Vrf
class test_interface(TestCase):
maxDiff = None
def test_TunnelTeInterface(self):
Genie.testbed = Testbed()
dev1 = Device(name='PE1', os='iosxe')
lo0 = Interface(device=dev1, name='Loopback0')
intf1 = Interface(device=dev1, name='GigabitEthernet0/0/1')
intf2 = Interface(device=dev1, name='GigabitEthernet0/0/2')
intf1.ipv4 = '1.2.3.4/32'
tun3 = Interface(device=dev1, name='Tunnel101', tunnel_mode='mpls traffic-eng')
# tun3 = TunnelTeInterface(device=dev1, name='Tunnel101')
self.assertTrue(isinstance(tun3, TunnelTeInterface))
self.assertEqual(tun3.interface_number, 101)
tun3.destination = intf1.ipv4.ip
tun3.autoroute_announce = True
tun3.ipv4_unnumbered_interface = dev1.interfaces['Loopback0']
tun3.add_path_option(1)
tun3.path_option_attr[1].dynamic = True
tun3.add_path_option(2)
tun3.path_option_attr[2].explicit_name = "exp_PE2_P1_PE1"
cfg = tun3.build_config(apply=False)
self.assertMultiLineEqual(str(cfg), '\n'.join([
'interface Tunnel101',
' tunnel mode mpls traffic-eng',
' ip unnumbered Loopback0',
' tunnel destination 1.2.3.4',
' tunnel mpls traffic-eng autoroute announce',
' tunnel mpls traffic-eng path-option 1 dynamic',
' tunnel mpls traffic-eng path-option 2 explicit name exp_PE2_P1_PE1',
' exit',
]))
uncfg = tun3.build_unconfig(apply=False)
self.assertMultiLineEqual(str(uncfg), '\n'.join([
'no interface Tunnel101',
]))
partial_uncfg = tun3.build_unconfig(apply=False,attributes='autoroute_announce')
self.assertMultiLineEqual(str(partial_uncfg), '\n'.join([
'interface Tunnel101',
' no tunnel mpls traffic-eng autoroute announce',
' exit',
]))
partial_cfg1 = tun3.build_config(apply=False,attributes='path_option_attr__2')
self.assertMultiLineEqual(str(partial_cfg1), '\n'.join([
'interface Tunnel101',
' tunnel mpls traffic-eng path-option 2 explicit name exp_PE2_P1_PE1',
' exit',
]))
# def test_LoopbackInterface(self):
# Genie.testbed = Testbed()
# dev1 = Device(name='PE1', os='iosxe')
# intf1 = Interface(device=dev1, name='Loopback0')
# intf1.ipv4 = '1.2.3.4/32'
# intf1.ipv6 = '2001:0:0::1/128'
# cfg = intf1.build_config(apply=False)
# self.assertMultiLineEqual(str(cfg), '\n'.join([
# 'interface Loopback0',
# ' ip address 1.2.3.4 255.255.255.255',
# ' ipv6 address 2001::1/128',
# ' exit',
# ]))
# uncfg = intf1.build_unconfig(apply=False)
# self.assertMultiLineEqual(str(uncfg), '\n'.join([
# 'no interface Loopback0',
# ]))
def test_EthernetInterface(self):
Genie.testbed = Testbed()
dev1 = Device(name='PE1', os='iosxe')
intf1 = Interface(device=dev1, name='GigabitEthernet0/0/1')
intf1.ipv4 = '1.2.3.4/32'
intf1.auto_negotiation = True
cfg = intf1.build_config(apply=False)
self.assertMultiLineEqual(str(cfg), '\n'.join([
'interface GigabitEthernet0/0/1',
' ip address 1.2.3.4 255.255.255.255',
' negotiation auto',
' exit',
]))
uncfg = intf1.build_unconfig(apply=False)
self.assertMultiLineEqual(str(uncfg), '\n'.join([
'default interface GigabitEthernet0/0/1',
'interface GigabitEthernet0/0/1',
'shutdown',
]))
partial_uncfg1 = intf1.build_unconfig(apply=False,attributes="auto_negotiation")
self.assertMultiLineEqual(str(partial_uncfg1), '\n'.join([
'interface GigabitEthernet0/0/1',
' default negotiation auto',
' exit',
]))
partial_uncfg2 = intf1.build_unconfig(apply=False, attributes="ipv4")
self.assertMultiLineEqual(str(partial_uncfg2), '\n'.join([
'interface GigabitEthernet0/0/1',
' no ip address 1.2.3.4 255.255.255.255',
' exit',
]))
# FiftyGig interface
intf2 = Interface(device=dev1, name='FiftyGigE6/0/1')
intf2.ipv4 = '10.20.30.40/24'
intf2.shutdown = False
cfg = intf2.build_config(apply=False)
self.assertMultiLineEqual(str(cfg), '\n'.join([
'interface FiftyGigE6/0/1',
' ip address 10.20.30.40 255.255.255.0',
' no shutdown',
' exit'
]))
uncfg = intf2.build_unconfig(apply=False)
self.assertMultiLineEqual(str(uncfg), '\n'.join([
'default interface FiftyGigE6/0/1',
'interface FiftyGigE6/0/1',
'shutdown'
]))
def test_EthernetSubInterface(self):
"""Test subinterface support without usage of service_instance"""
Genie.testbed = Testbed()
dev1 = Device(name='PE1', os='iosxe')
sub_intf = Interface(device=dev1, name='GigabitEthernet0/0/1.20')
sub_intf.ipv4 = '10.10.0.1/24'
sub_intf.eth_encap_type1 = 'dot1q'
sub_intf.eth_encap_val1 = 20
cfg = sub_intf.build_config(apply=False)
self.assertMultiLineEqual(str(cfg), '\n'.join([
'interface GigabitEthernet0/0/1.20',
' encapsulation dot1q 20',
' ip address 10.10.0.1 255.255.255.0',
' exit',
]))
uncfg = sub_intf.build_unconfig(apply=False)
self.assertMultiLineEqual(str(uncfg), '\n'.join([
'no interface GigabitEthernet0/0/1.20',
]))
def test_EFPInterface(self):
Genie.testbed = Testbed()
dev1 = Device(name='PE1', os='iosxe')
intf1 = Interface(device=dev1, name='GigabitEthernet0/0/1')
efp = Interface(device=dev1, name='GigabitEthernet0/0/1.20',service_instance=20)
efp.eth_encap_val1 = 20
efp.rewrite_ingress = 'pop 1 symmetric'
cfg = efp.build_config(apply=False)
self.assertMultiLineEqual(str(cfg), '\n'.join([
'interface GigabitEthernet0/0/1',
' service instance 20 ethernet',
' encapsulation dot1q 20',
' rewrite ingress tag pop 1 symmetric',
' exit',
' exit',
]))
uncfg = efp.build_unconfig(apply=False)
self.assertMultiLineEqual(str(uncfg), '\n'.join([
'interface GigabitEthernet0/0/1',
' no service instance 20 ethernet',
' exit',
]))
def test_VlanInterface(self):
testbed = Genie.testbed = Testbed()
dev1 = Device(testbed=testbed, name='PE1', os='iosxe')
intf1 = Interface(name='Vlan100',device=dev1)
# Defining attributes section
intf1.mtu = 500
intf1.ipv4 = '201.0.12.1'
intf1.ipv4.netmask = '255.255.255.0'
intf1.ipv6 = '2001::12:1'
cfg = intf1.build_config(apply=False)
self.assertMultiLineEqual(
str(cfg),
'\n'.join([
'interface Vlan100',
' ip address 201.0.12.1 255.255.255.0',
' ipv6 address 2001::12:1/128',
' mtu 500',
' exit',
]))
uncfg = intf1.build_unconfig(apply=False)
self.assertMultiLineEqual(
str(uncfg),
'\n'.join([
'no interface Vlan100',
]))
partial_uncfg = intf1.build_unconfig(apply=False,attributes="mtu")
self.assertMultiLineEqual(str(partial_uncfg), '\n'.join([
'interface Vlan100',
' no mtu 500',
' exit',
]))
def test_InterfaceSwitchport(self):
Genie.testbed = Testbed()
dev1 = Device(name='PE1', os='iosxe')
intf1 = Interface(device=dev1, name='GigabitEthernet0/0/1')
# Do not set switchport - default
# Check config
cfg1 = intf1.build_config(apply=False)
self.assertMultiLineEqual(str(cfg1), '\n'.join([
'interface GigabitEthernet0/0/1',
' exit',
]))
# Set switchport to True
intf1.switchport = True
# Check config
cfg2 = intf1.build_config(apply=False)
self.assertMultiLineEqual(str(cfg2), '\n'.join([
'interface GigabitEthernet0/0/1',
' switchport',
' exit',
]))
# Check unconfig
uncfg2 = intf1.build_unconfig(apply=False, attributes="switchport")
self.assertMultiLineEqual(str(uncfg2), '\n'.join([
'interface GigabitEthernet0/0/1',
' no switchport',
' exit',
]))
# Set switchport to False
intf1.switchport = False
# Check config
cfg3 = intf1.build_config(apply=False)
self.assertMultiLineEqual(str(cfg3), '\n'.join([
'interface GigabitEthernet0/0/1',
' no switchport',
' exit',
]))
# Check unconfig
uncfg3 = intf1.build_unconfig(apply=False, attributes="switchport")
self.assertMultiLineEqual(str(uncfg3), '\n'.join([
'interface GigabitEthernet0/0/1',
' switchport',
' exit',
]))
# test full switchport related configuration
intf1.switchport_enable = True
intf1.switchport_mode = "access"
intf1.access_vlan = "100"
intf1.trunk_vlans = "all"
intf1.trunk_add_vlans = "100"
intf1.trunk_remove_vlans = "100-200"
intf1.native_vlan = "1"
cfg1 = intf1.build_config(apply=False)
self.assertMultiLineEqual(str(cfg1), '\n'.join([
'interface GigabitEthernet0/0/1',
' switchport',
' switchport mode access',
' switchport trunk allowed vlan all',
' switchport trunk native vlan 1',
' switchport access vlan 100',
' switchport trunk allowed vlan add 100',
' switchport trunk allowed vlan remove 100-200',
' exit',
]))
def test_all_ethernet(self):
testbed = Testbed()
Genie.testbed = Testbed()
dev1 = Device(name='PE1', os='iosxe')
intf1 = Interface(device=dev1, name='GigabitEthernet0/0/1')
vrf = Vrf(name='test', testbed=testbed)
dev1.add_feature(vrf)
# Defining attributes section
intf1.description = 'test desc'
intf1.enabled = True
intf1.link_up_down_trap_enable = True
intf1.mtu = 500
intf1.vrf = vrf
intf1.vrf_downstream = 'vrf_downstream_test'
intf1.mac_address = 'aaaa.bbbb.cccc'
intf1.bandwidth = 768
intf1.link_status = True
intf1.load_interval = 30
intf1.encapsulation = 'dot1q'
intf1.first_dot1q = '20'
intf1.second_dot1q = '30'
ipv4a = IPv4Addr(device=dev1)
ipv4a.ipv4 = IPv4Address('192.168.1.1')
ipv4a.prefix_length = '24'
intf1.add_ipv4addr(ipv4a)
ipv4b = IPv4Addr(device=dev1)
ipv4b.ipv4 = IPv4Address('192.168.1.2')
ipv4b.prefix_length = '24'
ipv4b.ipv4_secondary = True
intf1.add_ipv4addr(ipv4b)
ipv6a = IPv6Addr(device=dev1)
ipv6a.ipv6 = IPv6Address('2001:db1:1::1')
ipv6a.ipv6_prefix_length = '64'
intf1.add_ipv6addr(ipv6a)
ipv6b = IPv6Addr(device=dev1)
ipv6b.ipv6 = IPv6Address('2001:db1:2::2')
ipv6b.ipv6_prefix_length = '64'
intf1.add_ipv6addr(ipv6b)
ipv6b.ipv6_anycast = True
intf1.dhcp = True
intf1.dhcp_client_id = '10'
intf1.dhcp_hostname = 'dhcp-host'
intf1.unnumbered_intf_ref = 'GigabitEthernet0/0/2.20'
intf1.ipv6_unnumbered_intf_ref = 'GigabitEthernet0/0/3.100'
intf1.ipv6_enabled = True
intf1.ipv6_autoconf = True
intf1.ipv6_autoconf_default = True
intf1.medium = "broadcast"
intf1.delay = 100
intf1.port_speed = '1000'
intf1.auto_negotiate = True
intf1.duplex_mode = "full"
intf1.flow_control_receive = True
intf1.flow_control_send = False
# Check config
cfg = intf1.build_config(apply=False)
self.assertMultiLineEqual(str(cfg), '\n'.join([
'interface GigabitEthernet0/0/1',
' encapsulation dot1q 20 second-dot1q 30',
' vrf forwarding test downstream vrf_downstream_test',
' description test desc',
' bandwidth 768',
' mtu 500',
' no shutdown',
' snmp trap link-status',
' logging event link-status',
' load-interval 30',
' ipv6 enable',
' ipv6 address autoconfig default',
' ip unnumbered GigabitEthernet0/0/2.20',
' ipv6 unnumbered GigabitEthernet0/0/3.100',
' speed 1000',
' negotiation auto',
' duplex full',
' flowcontrol receive on',
' flowcontrol send off',
' ip address dhcp client-id 10 hostname dhcp-host',
' medium broadcast',
' delay 100',
' ip address 192.168.1.1 255.255.255.0',
' ip address 192.168.1.2 255.255.255.0 secondary',
' ipv6 address 2001:db1:1::1/64',
' ipv6 address 2001:db1:2::2/64 anycast',
' mac-address aaaa.bbbb.cccc',
' exit',
]))
# Check unconfig without attribtues
uncfg = intf1.build_unconfig(apply=False)
self.assertMultiLineEqual(str(uncfg), '\n'.join([
'default interface GigabitEthernet0/0/1',
'interface GigabitEthernet0/0/1',
'shutdown',
]))
# Check ipv4 unconfig
uncfg = intf1.build_unconfig(apply=False, attributes="ipv4addr")
self.assertMultiLineEqual(str(uncfg), '\n'.join([
'interface GigabitEthernet0/0/1',
' no ip address 192.168.1.1 255.255.255.0',
' no ip address 192.168.1.2 255.255.255.0 secondary',
' exit',
]))
# Check encapsulation unconfig
uncfg = intf1.build_unconfig(apply=False, attributes={"encapsulation": None,
"first_dot1q": None})
self.assertMultiLineEqual(str(uncfg), '\n'.join([
'interface GigabitEthernet0/0/1',
' no encapsulation dot1q',
' exit',
]))
def test_enabled_switchport_enabled(self):
Genie.testbed = Testbed()
dev1 = Device(name='PE1', os='iosxe')
intf1 = Interface(device=dev1, name='GigabitEthernet0/0/1')
# Defining attributes section
intf1.switchport_enable = True
intf1.enabled = True
# Check config
cfg = intf1.build_config(apply=False)
self.assertMultiLineEqual(str(cfg), '\n'.join([
'interface GigabitEthernet0/0/1',
' no shutdown',
' switchport',
' exit',
]))
# Check unconfig
uncfg = intf1.build_unconfig(apply=False, attributes={"switchport_enable": True,
"enabled": True})
self.assertMultiLineEqual(str(uncfg), '\n'.join([
'interface GigabitEthernet0/0/1',
' shutdown',
' no switchport',
' exit',
]))
# Defining attributes section
intf1.switchport_enable = False
intf1.enabled = False
# Check config
cfg = intf1.build_config(apply=False)
self.assertMultiLineEqual(str(cfg), '\n'.join([
'interface GigabitEthernet0/0/1',
' shutdown',
' no switchport',
' exit',
]))
# Check unconfig
uncfg = intf1.build_unconfig(apply=False, attributes={"switchport_enable": True,
"enabled": True})
self.assertMultiLineEqual(str(uncfg), '\n'.join([
'interface GigabitEthernet0/0/1',
' no shutdown',
' switchport',
' exit',
]))
def test_virtual(self):
Genie.testbed = Testbed()
dev1 = Device(name='PE1', os='iosxe')
intf1 = Interface(device=dev1, name='Vlan100')
intf2 = Interface(device=dev1, name='Loopback10')
# Defining attributes section
intf1.enabled = True
intf2.enabled = False
# Check config
cfg1 = intf1.build_config(apply=False)
self.assertMultiLineEqual(str(cfg1), '\n'.join([
'interface Vlan100',
' no shutdown',
' exit',
]))
# Check unconfig
uncfg1 = intf1.build_unconfig(apply=False)
self.assertMultiLineEqual(str(uncfg1), '\n'.join([
'no interface Vlan100',
]))
# Check config
cfg2 = intf2.build_config(apply=False)
self.assertMultiLineEqual(str(cfg2), '\n'.join([
'interface Loopback10',
' shutdown',
' exit',
]))
# Check unconfig
uncfg2 = intf2.build_unconfig(apply=False)
self.assertMultiLineEqual(str(uncfg2), '\n'.join([
'no interface Loopback10',
]))
# Check unconfig with attributes
uncfg2 = intf2.build_unconfig(apply=False, attributes="enabled")
self.assertMultiLineEqual(str(uncfg2), '\n'.join([
'interface Loopback10',
' no shutdown',
' exit',
]))
def test_lag_interafce(self):
Genie.testbed = Testbed()
dev1 = Device(name='PE1', os='iosxe')
intf1 = Interface(device=dev1, name='GigabitEthernet1/0/1')
intf2 = Interface(device=dev1, name='GigabitEthernet1/0/2')
intf3 = Interface(device=dev1, name='Port-channel10')
# lacp
intf1.lag_bundle_id = 10
intf1.lag_activity = 'active'
intf1.lag_lacp_port_priority = 30
# pagp
intf2.lag_bundle_id = 20
intf2.lag_activity = 'auto'
intf2.lag_non_silent = True
intf2.lag_pagp_port_priority = 50
# virtual lagInterface
intf3.lag_lacp_system_priority = 100
intf3.lag_lacp_max_bundle = 20
intf3.lag_lacp_min_bundle = 15
# error assigned attributes, shouldn't in the configuration
intf2.lag_lacp_max_bundle = 123
# Check config
cfg1 = intf1.build_config(apply=False)
self.assertMultiLineEqual(str(cfg1), '\n'.join([
'interface GigabitEthernet1/0/1',
' channel-group 10 mode active',
' lacp port-priority 30',
' exit',
]))
cfg2 = intf2.build_config(apply=False)
self.assertMultiLineEqual(str(cfg2), '\n'.join([
'interface GigabitEthernet1/0/2',
' channel-group 20 mode auto non-silent',
' pagp port-priority 50',
' exit',
]))
cfg3 = intf3.build_config(apply=False)
self.assertMultiLineEqual(str(cfg3), '\n'.join([
'lacp system-priority 100',
'interface Port-channel10',
' lacp max-bundle 20',
' lacp min-bundle 15',
' exit',
]))
# Check unconfig
uncfg1 = intf1.build_unconfig(apply=False)
self.assertMultiLineEqual(str(uncfg1), '\n'.join([
'default interface GigabitEthernet1/0/1',
'interface GigabitEthernet1/0/1',
'shutdown',
]))
uncfg2 = intf2.build_unconfig(apply=False)
self.assertMultiLineEqual(str(uncfg2), '\n'.join([
'default interface GigabitEthernet1/0/2',
'interface GigabitEthernet1/0/2',
'shutdown',
]))
# Check unconfig with attributes
uncfg1 = intf1.build_unconfig(apply=False, attributes={'lag_activity': None,
'lag_bundle_id': None})
self.assertMultiLineEqual(str(uncfg1), '\n'.join([
'interface GigabitEthernet1/0/1',
' no channel-group 10 mode active',
' exit',
]))
uncfg2 = intf2.build_unconfig(apply=False, attributes="lag_pagp_port_priority")
self.assertMultiLineEqual(str(uncfg2), '\n'.join([
'interface GigabitEthernet1/0/2',
' no pagp port-priority 50',
' exit',
]))
def test_NveInterface_L2vni_mcast(self):
testbed = Genie.testbed = Testbed()
dev1 = Device(testbed=testbed, name='PE1', os='iosxe')
intf1 = Interface(name='nve1',device=dev1)
# Defining attributes section
intf1.nve_bgp_host_reachability = True
intf1.nve_src_intf_loopback = 'Loopback0'
intf1.nve_vni= '20000'
intf1.nve_vni_mcast_group = '239.1.1.2'
intf1.nve_vni_local_routing = True
# Build config
cfg = intf1.build_config(apply=False)
self.assertMultiLineEqual(
str(cfg),
'\n'.join([
'interface nve1',
' host-reachability protocol bgp',
' source-interface Loopback0',
' member vni 20000',
' mcast-group 239.1.1.2 local-routing',
' exit',
' exit'
]))
# Build unconfig
partial_uncfg_1 = intf1.build_unconfig(apply=False, attributes={
'nve_bgp_host_reachability': True,
'nve_vni': '20000'})
# Check config build correctly
self.assertMultiLineEqual(
str(partial_uncfg_1),
'\n'.join([
'interface nve1',
' no host-reachability protocol bgp',
' no member vni 20000',
' exit'
]))
# Build unconfig
partial_uncfg_2 = intf1.build_unconfig(apply=False, attributes={
'nve_src_intf_loopback': 'Loopback0'})
# Check config build correctly
self.assertMultiLineEqual(
str(partial_uncfg_2),
'\n'.join([
'interface nve1',
' no source-interface Loopback0',
' exit'
]))
def test_NveInterface_L2vni_ir(self):
testbed = Genie.testbed = Testbed()
dev1 = Device(testbed=testbed, name='PE1', os='iosxe')
intf1 = Interface(name='nve1',device=dev1)
# Defining attributes section
intf1.nve_bgp_host_reachability = True
intf1.nve_src_intf_loopback = 'Loopback0'
intf1.nve_vni= '20000'
intf1.nve_vni_ingress_replication = True
# Build config
cfg = intf1.build_config(apply=False)
self.assertMultiLineEqual(
str(cfg),
'\n'.join([
'interface nve1',
' host-reachability protocol bgp',
' source-interface Loopback0',
' member vni 20000',
' ingress-replication',
' exit',
' exit'
]))
# Build unconfig
uncfg= intf1.build_unconfig(apply=False, attributes={'nve_vni': '20000'})
# Check config build correctly
self.assertMultiLineEqual(
str(uncfg),
'\n'.join([
'interface nve1',
' no member vni 20000',
' exit'
]))
def test_NveInterface_L3vni(self):
testbed = Genie.testbed = Testbed()
dev1 = Device(testbed=testbed, name='PE1', os='iosxe')
intf1 = Interface(name='nve1',device=dev1)
# Defining attributes section
intf1.nve_bgp_host_reachability = True
intf1.nve_src_intf_loopback = 'Loopback0'
intf1.nve_vni= '30000'
intf1.nve_vni_vrf = 'red'
# Build config
cfg = intf1.build_config(apply=False)
self.assertMultiLineEqual(
str(cfg),
'\n'.join([
'interface nve1',
' host-reachability protocol bgp',
' source-interface Loopback0',
' member vni 30000 vrf red',
' exit'
]))
# Build unconfig
uncfg= intf1.build_unconfig(apply=False, attributes={'nve_vni': '20000'})
# Check config build correctly
self.assertMultiLineEqual(
str(uncfg),
'\n'.join([
'interface nve1',
' no member vni 30000',
' exit'
]))
if __name__ == '__main__':
unittest.main()
|
539cfbe2478f36ca08b59022d6b6bba6e88d4484
|
10ddfb2d43a8ec5d47ce35dc0b8acf4fd58dea94
|
/Python/clone-n-ary-tree.py
|
2978a5c10e44f664c31d3b46a657f9fb258424e9
|
[
"MIT"
] |
permissive
|
kamyu104/LeetCode-Solutions
|
f54822059405ef4df737d2e9898b024f051fd525
|
4dc4e6642dc92f1983c13564cc0fd99917cab358
|
refs/heads/master
| 2023-09-02T13:48:26.830566
| 2023-08-28T10:11:12
| 2023-08-28T10:11:12
| 152,631,182
| 4,549
| 1,651
|
MIT
| 2023-05-31T06:10:33
| 2018-10-11T17:38:35
|
C++
|
UTF-8
|
Python
| false
| false
| 1,340
|
py
|
clone-n-ary-tree.py
|
# Time: O(n)
# Space: O(h)
# Definition for a Node.
class Node(object):
def __init__(self, val=None, children=None):
self.val = val
self.children = children if children is not None else []
class Solution(object):
def cloneTree(self, root):
"""
:type root: Node
:rtype: Node
"""
result = [None]
stk = [(1, (root, result))]
while stk:
step, params = stk.pop()
if step == 1:
node, ret = params
if not node:
continue
ret[0] = Node(node.val)
for child in reversed(node.children):
ret1 = [None]
stk.append((2, (ret1, ret)))
stk.append((1, (child, ret1)))
else:
ret1, ret = params
ret[0].children.append(ret1[0])
return result[0]
# Time: O(n)
# Space: O(h)
class Solution2(object):
def cloneTree(self, root):
"""
:type root: Node
:rtype: Node
"""
def dfs(node):
if not node:
return None
copy = Node(node.val)
for child in node.children:
copy.children.append(dfs(child))
return copy
return dfs(root)
|
da178f04d05cf9d581c919ce73b77a4d4555330a
|
b8d80a23cb27af08a1c4d34b478c76228ae5fbb4
|
/insights/parsers/hammer_task_list.py
|
9a3c11bbde64a8cbc2759f6f391d27a06e7ac797
|
[
"Apache-2.0"
] |
permissive
|
RedHatInsights/insights-core
|
bb243e2bf8a52446fefb95ebe05478d6e35efe2e
|
b0ea07fc3f4dd8801b505fe70e9b36e628152c4a
|
refs/heads/master
| 2023-09-04T21:15:40.456257
| 2023-09-04T10:46:56
| 2023-09-04T10:46:56
| 92,518,221
| 144
| 290
|
Apache-2.0
| 2023-09-14T02:40:13
| 2017-05-26T14:23:11
|
Python
|
UTF-8
|
Python
| false
| false
| 6,471
|
py
|
hammer_task_list.py
|
"""
HammerTaskList - command ``hammer --csv task list``
===================================================
This parser reads the task list of a Satellite server using hammer, in CSV
format. It relies on the root user running the command being able to do
authenticated commands, which currently relies on the Satellite administrator
setting up an authentication file. This is often done as a convenience; if
the command is unable to authenticate then no tasks will be shown and an
error flag will be recorded in the parser.
Sample output from the ``hammer --csv task list`` command::
ID,Name,Owner,Started at,Ended at,State,Result,Task action,Task errors
92b732ea-7423-4644-8890-80e054f1799a,,foreman_api_admin,2016/11/11 07:18:32,2016/11/11 07:18:34,stopped,success,Refresh repository,""
e9cb6455-a433-467e-8404-7d01bd726689,,foreman_api_admin,2016/11/11 07:18:28,2016/11/11 07:18:31,stopped,success,Refresh repository,""
e30f3e7e-c023-4380-9594-337fdc4967e4,,foreman_api_admin,2016/11/11 07:18:24,2016/11/11 07:18:28,stopped,success,Refresh repository,""
3197f6a1-891f-4f42-9e4d-92c83c3ed035,,foreman_api_admin,2016/11/11 07:18:20,2016/11/11 07:18:24,stopped,success,Refresh repository,""
22169621-7175-411c-86be-46b4254a4e77,,foreman_api_admin,2016/11/11 07:18:16,2016/11/11 07:18:19,stopped,success,Refresh repository,""
f111e8f7-c956-470b-abb6-2e436ecd5866,,foreman_api_admin,2016/11/11 07:18:14,2016/11/11 07:18:16,stopped,success,Refresh repository,""
dfc702ea-ce46-427c-8a07-43e2a68e1320,,foreman_api_admin,2016/11/11 07:18:12,2016/11/11 07:18:14,stopped,success,Refresh repository,""
e8cac892-e666-4f2c-ab97-2be298da337e,,foreman_api_admin,2016/11/11 07:18:09,2016/11/11 07:18:12,stopped,success,Refresh repository,""
e6c1e1b2-a29d-4fd0-891e-e736dc9b7150,,,2016/11/11 07:14:06,2016/11/12 05:10:17,stopped,success,Listen on candlepin events,""
44a42c49-3038-4cae-8067-4d1cc305db05,,,2016/11/11 07:11:44,2016/11/11 07:12:47,stopped,success,Listen on candlepin events,""
72669288-54ac-41ba-a3b2-314a2c81f438,,,2016/11/11 06:57:15,2016/11/11 07:07:03,stopped,success,Listen on candlepin events,""
1314c91e-19d6-4d71-9bca-31db0df0aad2,,foreman_admin,2016/11/11 06:55:59,2016/11/11 06:55:59,stopped,error,Update for host sat62disc.example.org,"There was an issue with the backend service candlepin: 404 Resource Not Found, There was an issue with the backend service candlepin: 404 Resource Not Found"
303ef924-9845-4267-a705-194a4ebfbcfb,,foreman_admin,2016/11/11 06:55:58,2016/11/11 06:55:58,stopped,error,Package Profile Update,500 Internal Server Error
cffa5990-23ba-49f5-828b-ae0c77e8257a,,foreman_admin,2016/11/11 06:55:53,2016/11/11 06:55:56,stopped,error,Update for host sat62disc.example.org,"There was an issue with the backend service candlepin: 404 Resource Not Found, There was an issue with the backend service candlepin: 404 Resource Not Found"
07780e8f-dd81-49c4-a792-c4d4d162eb10,,foreman_admin,2016/11/11 06:55:50,2016/11/11 06:55:51,stopped,error,Update for host sat62disc.example.org,"There was an issue with the backend service candlepin: 404 Resource Not Found, There was an issue with the backend service candlepin: 404 Resource Not Found"
749a17a1-a8cb-46f0-98f6-017576481df8,,foreman_admin,2016/11/11 06:51:28,2016/11/11 06:51:29,stopped,error,Update for host sat62disc.example.org,"There was an issue with the backend service candlepin: 404 Resource Not Found, There was an issue with the backend service candlepin: 404 Resource Not Found"
d8f41819-b492-46e5-b0e3-ead3b4b6810c,,foreman_admin,2016/11/11 06:51:22,2016/11/11 06:51:28,stopped,error,Package Profile Update,500 Internal Server Error
Examples:
>>> type(tasks)
<class 'insights.parsers.hammer_task_list.HammerTaskList'>
>>> tasks.can_authenticate
True
>>> len(tasks) # Can act as a list
17
>>> tasks[0]['ID'] # Fetch rows directly
'92b732ea-7423-4644-8890-80e054f1799a'
>>> tasks[0]['Task errors'] # Literal contents of field - quotes not stripped
''
>>> error_tasks = tasks.search(Result='error') # List of dictionaries
>>> len(error_tasks)
6
>>> error_tasks[0]['ID']
'1314c91e-19d6-4d71-9bca-31db0df0aad2'
>>> error_tasks[-1]['Task errors']
'500 Internal Server Error'
"""
import csv
from insights.core import CommandParser
from insights.core.exceptions import SkipComponent
from insights.core.plugins import parser
from insights.parsers import keyword_search
from insights.specs import Specs
@parser(Specs.hammer_task_list)
class HammerTaskList(CommandParser, list):
"""
Parse the CSV output from the ``hammer --output csv task list`` command.
Raises:
SkipComponent: When nothing is parsed.
Attributes:
can_authenticate (bool): Whether we have valid data; if False it's
probably due to not being able to authenticate.
"""
def parse_content(self, content):
self.can_authenticate = content[0].startswith('ID')
if self.can_authenticate:
headings = [c.strip() for c in content[0].split(',')]
creader = csv.reader(content[1:], skipinitialspace=True)
for line in creader:
strip_line = [item.strip() for item in line]
self.append(dict(zip(headings, strip_line)))
if len(self) <= 0:
raise SkipComponent()
self._running_tasks = [t for t in self if t.get('State', t.get('state')) == 'running']
@property
def tasks(self):
"""Return a list of tasks, in the order they appear in the file, as dictionaries of fields and values."""
return self
@property
def running_tasks(self):
"""Return a list of running tasks"""
return self._running_tasks
def search(self, **kwargs):
"""
Search the process list for matching rows based on key-value pairs.
This uses the py:func:`insights.parsers.keyword_search` function for
searching; see its documentation for usage details. If no search
parameters are given, no rows are returned.
Examples:
>>> no_owner_tasks = tasks.search(Owner='')
>>> len(no_owner_tasks)
3
>>> no_owner_tasks[0]['Task action']
'Listen on candlepin events'
>>> len(tasks.search(State='stopped', Result='error'))
6
"""
return keyword_search(self, **kwargs)
|
27c4476067fec3ce6b21ef10ec0209f10e347902
|
2d4cb3bca0f41054d91b551d8690473540bfc71a
|
/roboticstoolbox/examples/test.py
|
1ef320ee8cb38425de31a25a335c2da1d0c2ea53
|
[
"MIT"
] |
permissive
|
petercorke/robotics-toolbox-python
|
a7175e4a9587b1aa530fed1750223a638f70b72a
|
687d345d9994718d972a4b071dc061a3920d1733
|
refs/heads/master
| 2023-09-01T13:23:57.434048
| 2023-05-15T21:52:49
| 2023-05-15T21:52:49
| 33,467,565
| 1,535
| 407
|
MIT
| 2023-08-18T04:18:23
| 2015-04-06T05:05:25
|
Python
|
UTF-8
|
Python
| false
| false
| 426
|
py
|
test.py
|
import numpy as np
import roboticstoolbox as rtb
import spatialmath as sm
q0 = np.array(
[
-1.66441371,
-1.20998727,
1.04248366,
-2.10222463,
1.05097407,
1.41173279,
0.0053529,
]
)
tol = 1e-6
panda = rtb.models.Panda().ets()
Tep = panda.eval([0, -0.3, 0, -2.2, 0, 2.0, np.pi / 4])
solver = rtb.IK_QP()
sol = panda.ik_LM(Tep, tol=tol, q0=q0, method="chan")
|
2d33e0a75c78a24bd13b9360490aad556849c83d
|
501b0b4b6df1c9384661fbc5d8c73f0200e75308
|
/test/test_parsing.py
|
cf33b1f71f0f4ba50b63b62d32a1e518713b9c95
|
[
"MIT"
] |
permissive
|
mathandy/svgpathtools
|
80f4ef84ad82f2217573bc17c94c86ee41c4b909
|
fcb648b9bb9591d925876d3b51649fa175b40524
|
refs/heads/master
| 2023-08-27T21:54:39.971142
| 2023-05-20T18:38:22
| 2023-05-20T18:38:22
| 62,691,990
| 475
| 144
|
MIT
| 2023-08-30T16:21:51
| 2016-07-06T04:51:28
|
Python
|
UTF-8
|
Python
| false
| false
| 13,221
|
py
|
test_parsing.py
|
# Note: This file was taken mostly as is from the svg.path module (v 2.0)
from __future__ import division, absolute_import, print_function
import unittest
from svgpathtools import Path, Line, QuadraticBezier, CubicBezier, Arc, parse_path
import svgpathtools
import numpy as np
def construct_rotation_tf(a, x, y):
a = a * np.pi / 180.0
tf_offset = np.identity(3)
tf_offset[0:2, 2:3] = np.array([[x], [y]])
tf_rotate = np.identity(3)
tf_rotate[0:2, 0:2] = np.array([[np.cos(a), -np.sin(a)],
[np.sin(a), np.cos(a)]])
tf_offset_neg = np.identity(3)
tf_offset_neg[0:2, 2:3] = np.array([[-x], [-y]])
return tf_offset.dot(tf_rotate).dot(tf_offset_neg)
class TestParser(unittest.TestCase):
def test_svg_examples(self):
"""Examples from the SVG spec"""
path1 = parse_path('M 100 100 L 300 100 L 200 300 z')
self.assertEqual(path1, Path(Line(100 + 100j, 300 + 100j),
Line(300 + 100j, 200 + 300j),
Line(200 + 300j, 100 + 100j)))
self.assertTrue(path1.isclosed())
# for Z command behavior when there is multiple subpaths
path1 = parse_path('M 0 0 L 50 20 M 100 100 L 300 100 L 200 300 z')
self.assertEqual(path1, Path(Line(0 + 0j, 50 + 20j),
Line(100 + 100j, 300 + 100j),
Line(300 + 100j, 200 + 300j),
Line(200 + 300j, 100 + 100j)))
path1 = parse_path('M 100 100 L 200 200')
path2 = parse_path('M100 100L200 200')
self.assertEqual(path1, path2)
path1 = parse_path('M 100 200 L 200 100 L -100 -200')
path2 = parse_path('M 100 200 L 200 100 -100 -200')
self.assertEqual(path1, path2)
path1 = parse_path("""M100,200 C100,100 250,100 250,200
S400,300 400,200""")
self.assertEqual(path1, Path(CubicBezier(100 + 200j,
100 + 100j,
250 + 100j,
250 + 200j),
CubicBezier(250 + 200j,
250 + 300j,
400 + 300j,
400 + 200j)))
path1 = parse_path('M100,200 C100,100 400,100 400,200')
self.assertEqual(path1, Path(CubicBezier(100 + 200j,
100 + 100j,
400 + 100j,
400 + 200j)))
path1 = parse_path('M100,500 C25,400 475,400 400,500')
self.assertEqual(path1, Path(CubicBezier(100 + 500j,
25 + 400j,
475 + 400j,
400 + 500j)))
path1 = parse_path('M100,800 C175,700 325,700 400,800')
self.assertEqual(path1, Path(CubicBezier(100 + 800j,
175 + 700j,
325 + 700j,
400 + 800j)))
path1 = parse_path('M600,200 C675,100 975,100 900,200')
self.assertEqual(path1, Path(CubicBezier(600 + 200j,
675 + 100j,
975 + 100j,
900 + 200j)))
path1 = parse_path('M600,500 C600,350 900,650 900,500')
self.assertEqual(path1, Path(CubicBezier(600 + 500j,
600 + 350j,
900 + 650j,
900 + 500j)))
path1 = parse_path("""M600,800 C625,700 725,700 750,800
S875,900 900,800""")
self.assertEqual(path1, Path(CubicBezier(600 + 800j,
625 + 700j,
725 + 700j,
750 + 800j),
CubicBezier(750 + 800j,
775 + 900j,
875 + 900j,
900 + 800j)))
path1 = parse_path('M200,300 Q400,50 600,300 T1000,300')
self.assertEqual(path1, Path(QuadraticBezier(200 + 300j,
400 + 50j,
600 + 300j),
QuadraticBezier(600 + 300j,
800 + 550j,
1000 + 300j)))
path1 = parse_path('M300,200 h-150 a150,150 0 1,0 150,-150 z')
self.assertEqual(path1, Path(Line(300 + 200j, 150 + 200j),
Arc(150 + 200j, 150 + 150j, 0, 1, 0, 300 + 50j),
Line(300 + 50j, 300 + 200j)))
path1 = parse_path('M275,175 v-150 a150,150 0 0,0 -150,150 z')
self.assertEqual(path1,
Path(Line(275 + 175j, 275 + 25j),
Arc(275 + 25j, 150 + 150j, 0, 0, 0, 125 + 175j),
Line(125 + 175j, 275 + 175j)))
path1 = parse_path("""M600,350 l 50,-25
a25,25 -30 0,1 50,-25 l 50,-25
a25,50 -30 0,1 50,-25 l 50,-25
a25,75 -30 0,1 50,-25 l 50,-25
a25,100 -30 0,1 50,-25 l 50,-25""")
self.assertEqual(path1,
Path(Line(600 + 350j, 650 + 325j),
Arc(650 + 325j, 25 + 25j, -30, 0, 1, 700 + 300j),
Line(700 + 300j, 750 + 275j),
Arc(750 + 275j, 25 + 50j, -30, 0, 1, 800 + 250j),
Line(800 + 250j, 850 + 225j),
Arc(850 + 225j, 25 + 75j, -30, 0, 1, 900 + 200j),
Line(900 + 200j, 950 + 175j),
Arc(950 + 175j, 25 + 100j, -30, 0, 1, 1000 + 150j),
Line(1000 + 150j, 1050 + 125j)))
def test_others(self):
# Other paths that need testing:
# Relative moveto:
path1 = parse_path('M 0 0 L 50 20 m 50 80 L 300 100 L 200 300 z')
self.assertEqual(path1, Path(Line(0 + 0j, 50 + 20j),
Line(100 + 100j, 300 + 100j),
Line(300 + 100j, 200 + 300j),
Line(200 + 300j, 100 + 100j)))
# Initial smooth and relative CubicBezier
path1 = parse_path("""M100,200 s 150,-100 150,0""")
self.assertEqual(path1,
Path(CubicBezier(100 + 200j,
100 + 200j,
250 + 100j,
250 + 200j)))
# Initial smooth and relative QuadraticBezier
path1 = parse_path("""M100,200 t 150,0""")
self.assertEqual(path1,
Path(QuadraticBezier(100 + 200j,
100 + 200j,
250 + 200j)))
# Relative QuadraticBezier
path1 = parse_path("""M100,200 q 0,0 150,0""")
self.assertEqual(path1,
Path(QuadraticBezier(100 + 200j,
100 + 200j,
250 + 200j)))
def test_negative(self):
"""You don't need spaces before a minus-sign"""
path1 = parse_path('M100,200c10-5,20-10,30-20')
path2 = parse_path('M 100 200 c 10 -5 20 -10 30 -20')
self.assertEqual(path1, path2)
def test_numbers(self):
"""Exponents and other number format cases"""
# It can be e or E, the plus is optional, and a minimum of
# +/-3.4e38 must be supported.
path1 = parse_path('M-3.4e38 3.4E+38L-3.4E-38,3.4e-38')
path2 = Path(Line(-3.4e+38 + 3.4e+38j, -3.4e-38 + 3.4e-38j))
self.assertEqual(path1, path2)
def test_errors(self):
self.assertRaises(ValueError, parse_path,
'M 100 100 L 200 200 Z 100 200')
def test_transform(self):
tf_matrix = svgpathtools.parser.parse_transform(
'matrix(1.0 2.0 3.0 4.0 5.0 6.0)')
expected_tf_matrix = np.identity(3)
expected_tf_matrix[0:2, 0:3] = np.array([[1.0, 3.0, 5.0],
[2.0, 4.0, 6.0]])
self.assertTrue(np.array_equal(expected_tf_matrix, tf_matrix))
# Try a test with no y specified
expected_tf_translate = np.identity(3)
expected_tf_translate[0, 2] = -36
self.assertTrue(np.array_equal(
expected_tf_translate,
svgpathtools.parser.parse_transform('translate(-36)')
))
# Now specify y
expected_tf_translate[1, 2] = 45.5
tf_translate = svgpathtools.parser.parse_transform(
'translate(-36 45.5)')
self.assertTrue(np.array_equal(expected_tf_translate, tf_translate))
# Try a test with no y specified
expected_tf_scale = np.identity(3)
expected_tf_scale[0, 0] = 10
expected_tf_scale[1, 1] = 10
self.assertTrue(np.array_equal(
expected_tf_scale,
svgpathtools.parser.parse_transform('scale(10)')
))
# Now specify y
expected_tf_scale[1, 1] = 0.5
tf_scale = svgpathtools.parser.parse_transform('scale(10 0.5)')
self.assertTrue(np.array_equal(expected_tf_scale, tf_scale))
tf_rotation = svgpathtools.parser.parse_transform('rotate(-10 50 100)')
expected_tf_rotation = construct_rotation_tf(-10, 50, 100)
self.assertTrue(np.array_equal(expected_tf_rotation, tf_rotation))
# Try a test with no offset specified
self.assertTrue(np.array_equal(
construct_rotation_tf(50, 0, 0),
svgpathtools.parser.parse_transform('rotate(50)')
))
expected_tf_skewx = np.identity(3)
expected_tf_skewx[0, 1] = np.tan(40.0 * np.pi/180.0)
tf_skewx = svgpathtools.parser.parse_transform('skewX(40)')
self.assertTrue(np.array_equal(expected_tf_skewx, tf_skewx))
expected_tf_skewy = np.identity(3)
expected_tf_skewy[1, 0] = np.tan(30.0 * np.pi / 180.0)
tf_skewy = svgpathtools.parser.parse_transform('skewY(30)')
self.assertTrue(np.array_equal(expected_tf_skewy, tf_skewy))
self.assertTrue(np.array_equal(
tf_rotation.dot(tf_translate).dot(tf_skewx).dot(tf_scale),
svgpathtools.parser.parse_transform(
"""rotate(-10 50 100)
translate(-36 45.5)
skewX(40)
scale(10 0.5)""")
))
def test_pathd_init(self):
path0 = Path('')
path1 = parse_path("M 100 100 L 300 100 L 200 300 z")
path2 = Path("M 100 100 L 300 100 L 200 300 z")
self.assertEqual(path1, path2)
path1 = parse_path("m 100 100 L 300 100 L 200 300 z", current_pos=50+50j)
path2 = Path("m 100 100 L 300 100 L 200 300 z")
self.assertNotEqual(path1, path2)
path1 = parse_path("m 100 100 L 300 100 L 200 300 z")
path2 = Path("m 100 100 L 300 100 L 200 300 z", current_pos=50 + 50j)
self.assertNotEqual(path1, path2)
path1 = parse_path("m 100 100 L 300 100 L 200 300 z", current_pos=50 + 50j)
path2 = Path("m 100 100 L 300 100 L 200 300 z", current_pos=50 + 50j)
self.assertEqual(path1, path2)
path1 = parse_path("m 100 100 L 300 100 L 200 300 z", 50+50j)
path2 = Path("m 100 100 L 300 100 L 200 300 z")
self.assertNotEqual(path1, path2)
path1 = parse_path("m 100 100 L 300 100 L 200 300 z")
path2 = Path("m 100 100 L 300 100 L 200 300 z", 50 + 50j)
self.assertNotEqual(path1, path2)
path1 = parse_path("m 100 100 L 300 100 L 200 300 z", 50 + 50j)
path2 = Path("m 100 100 L 300 100 L 200 300 z", 50 + 50j)
self.assertEqual(path1, path2)
def test_issue_99(self):
p = Path("M 100 250 S 200 200 200 250 300 300 300 250")
self.assertEqual(p.d(useSandT=True), 'M 100.0,250.0 S 200.0,200.0 200.0,250.0 S 300.0,300.0 300.0,250.0')
self.assertEqual(p.d(),
'M 100.0,250.0 C 100.0,250.0 200.0,200.0 200.0,250.0 C 200.0,300.0 300.0,300.0 300.0,250.0')
self.assertNotEqual(p.d(),
'M 100.0,250.0 C 100.0,250.0 200.0,200.0 200.0,250.0 C 200.0,250.0 300.0,300.0 300.0,250.0')
|
61415815f82b4155ae603e7c0ef3f240e3bbb98c
|
3a6a211ea0d32405497fbd6486c490bb147e25f9
|
/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/script_from_import.py
|
f48a98b78445f2486e5385f197b8de17860a28ef
|
[
"MIT",
"BSD-3-Clause"
] |
permissive
|
catapult-project/catapult
|
e2cbdd5eb89f3b1492fc8752494e62ea1df4bae0
|
53102de187a48ac2cfc241fef54dcbc29c453a8e
|
refs/heads/main
| 2021-05-25T07:37:22.832505
| 2021-05-24T08:01:49
| 2021-05-25T06:07:38
| 33,947,548
| 2,032
| 742
|
BSD-3-Clause
| 2022-08-26T16:01:18
| 2015-04-14T17:49:05
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,590
|
py
|
script_from_import.py
|
from __future__ import absolute_import
from pkg import toplevel_existing
from pkg import toplevel_nonexisting
class MyClass:
from pkg import toplevel_class_existing
from pkg import toplevel_class_nonexisting
if a == b:
from pkg import toplevel_conditional_existing
from pkg import toplevel_conditional_nonexisting
try:
from pkg import toplevel_conditional_import_existing, toplevel_conditional_import_nonexisting
except:
from pkg import toplevel_conditional_import2_existing
from pkg import toplevel_conditional_import2_nonexisting
try:
from pkg import toplevel_import_existing, toplevel_import_nonexisting
except:
from pkg import toplevel_import2_existing
from pkg import toplevel_import2_nonexisting
def function():
from pkg import function_existing, function_nonexisting
class MyClass:
from pkg import function_class_existing, function_class_nonexisting
if a == b:
from pkg import function_conditional_existing
from pkg import function_conditional_nonexisting
try:
from pkg import function_conditional_import_existing
from pkg import function_conditional_import_nonexisting
except:
from pkg import function_conditional_import2_existing
from pkg import function_conditional_import2_nonexisting
try:
from pkg import function_import_existing
from pkg import function_import_nonexisting
except:
from pkg import function_import2_existing
from pkg import function_import2_nonexisting
|
ac9d2353ff4aaadc2bae236bab5291512886f4e9
|
f3806d9fb54773908cd9704121a543b114470aca
|
/angr/procedures/definitions/win32_credui.py
|
96b7be46f4d957969a2ea6bc5e0d6a6806452c3b
|
[
"BSD-2-Clause"
] |
permissive
|
angr/angr
|
8ae95fceca51b0a001de56477d984dd01193ac1d
|
37e8ca1c3308ec601ad1d7c6bc8081ff38a7cffd
|
refs/heads/master
| 2023-08-17T03:15:21.007865
| 2023-08-15T18:44:57
| 2023-08-15T18:44:57
| 40,328,394
| 7,184
| 1,306
|
BSD-2-Clause
| 2023-09-14T20:14:23
| 2015-08-06T21:46:55
|
Python
|
UTF-8
|
Python
| false
| false
| 14,146
|
py
|
win32_credui.py
|
# pylint:disable=line-too-long
import logging
from ...sim_type import SimTypeFunction, SimTypeShort, SimTypeInt, SimTypeLong, SimTypeLongLong, SimTypeDouble, SimTypeFloat, SimTypePointer, SimTypeChar, SimStruct, SimTypeFixedSizeArray, SimTypeBottom, SimUnion, SimTypeBool
from ...calling_conventions import SimCCStdcall, SimCCMicrosoftAMD64
from .. import SIM_PROCEDURES as P
from . import SimLibrary
_l = logging.getLogger(name=__name__)
lib = SimLibrary()
lib.set_default_cc('X86', SimCCStdcall)
lib.set_default_cc('AMD64', SimCCMicrosoftAMD64)
lib.set_library_names("credui.dll")
prototypes = \
{
#
'SspiPromptForCredentialsW': SimTypeFunction([SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimTypePointer(SimTypeBottom(label="Void"), offset=0), offset=0), SimTypePointer(SimTypeInt(signed=True, label="Int32"), offset=0), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["pszTargetName", "pUiInfo", "dwAuthError", "pszPackage", "pInputAuthIdentity", "ppAuthIdentity", "pfSave", "dwFlags"]),
#
'SspiPromptForCredentialsA': SimTypeFunction([SimTypePointer(SimTypeChar(label="Byte"), offset=0), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Byte"), offset=0), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimTypePointer(SimTypeBottom(label="Void"), offset=0), offset=0), SimTypePointer(SimTypeInt(signed=True, label="Int32"), offset=0), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["pszTargetName", "pUiInfo", "dwAuthError", "pszPackage", "pInputAuthIdentity", "ppAuthIdentity", "pfSave", "dwFlags"]),
#
'SspiIsPromptingNeeded': SimTypeFunction([SimTypeInt(signed=False, label="UInt32")], SimTypeChar(label="Byte"), arg_names=["ErrorOrNtStatus"]),
#
'CredUnPackAuthenticationBufferW': SimTypeFunction([SimTypeInt(signed=False, label="CRED_PACK_FLAGS"), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Char"), label="LPArray", offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypePointer(SimTypeChar(label="Char"), label="LPArray", offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypePointer(SimTypeChar(label="Char"), label="LPArray", offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["dwFlags", "pAuthBuffer", "cbAuthBuffer", "pszUserName", "pcchMaxUserName", "pszDomainName", "pcchMaxDomainName", "pszPassword", "pcchMaxPassword"]),
#
'CredUnPackAuthenticationBufferA': SimTypeFunction([SimTypeInt(signed=False, label="CRED_PACK_FLAGS"), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Byte"), label="LPArray", offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypePointer(SimTypeChar(label="Byte"), label="LPArray", offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypePointer(SimTypeChar(label="Byte"), label="LPArray", offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["dwFlags", "pAuthBuffer", "cbAuthBuffer", "pszUserName", "pcchlMaxUserName", "pszDomainName", "pcchMaxDomainName", "pszPassword", "pcchMaxPassword"]),
#
'CredPackAuthenticationBufferW': SimTypeFunction([SimTypeInt(signed=False, label="CRED_PACK_FLAGS"), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypeChar(label="Byte"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["dwFlags", "pszUserName", "pszPassword", "pPackedCredentials", "pcbPackedCredentials"]),
#
'CredPackAuthenticationBufferA': SimTypeFunction([SimTypeInt(signed=False, label="CRED_PACK_FLAGS"), SimTypePointer(SimTypeChar(label="Byte"), offset=0), SimTypePointer(SimTypeChar(label="Byte"), offset=0), SimTypePointer(SimTypeChar(label="Byte"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["dwFlags", "pszUserName", "pszPassword", "pPackedCredentials", "pcbPackedCredentials"]),
#
'CredUIPromptForCredentialsW': SimTypeFunction([SimTypePointer(SimStruct({"cbSize": SimTypeInt(signed=False, label="UInt32"), "hwndParent": SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), "pszMessageText": SimTypePointer(SimTypeChar(label="Char"), offset=0), "pszCaptionText": SimTypePointer(SimTypeChar(label="Char"), offset=0), "hbmBanner": SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0)}, name="CREDUI_INFOW", pack=False, align=None), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimStruct({"dwLower": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), "dwUpper": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0)}, name="SecHandle", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Char"), label="LPArray", offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Char"), label="LPArray", offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeInt(signed=True, label="Int32"), offset=0), SimTypeInt(signed=False, label="CREDUI_FLAGS")], SimTypeInt(signed=False, label="UInt32"), arg_names=["pUiInfo", "pszTargetName", "pContext", "dwAuthError", "pszUserName", "ulUserNameBufferSize", "pszPassword", "ulPasswordBufferSize", "save", "dwFlags"]),
#
'CredUIPromptForCredentialsA': SimTypeFunction([SimTypePointer(SimStruct({"cbSize": SimTypeInt(signed=False, label="UInt32"), "hwndParent": SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), "pszMessageText": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "pszCaptionText": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "hbmBanner": SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0)}, name="CREDUI_INFOA", pack=False, align=None), offset=0), SimTypePointer(SimTypeChar(label="Byte"), offset=0), SimTypePointer(SimStruct({"dwLower": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), "dwUpper": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0)}, name="SecHandle", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Byte"), label="LPArray", offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Byte"), label="LPArray", offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeInt(signed=True, label="Int32"), offset=0), SimTypeInt(signed=False, label="CREDUI_FLAGS")], SimTypeInt(signed=False, label="UInt32"), arg_names=["pUiInfo", "pszTargetName", "pContext", "dwAuthError", "pszUserName", "ulUserNameBufferSize", "pszPassword", "ulPasswordBufferSize", "save", "dwFlags"]),
#
'CredUIPromptForWindowsCredentialsW': SimTypeFunction([SimTypePointer(SimStruct({"cbSize": SimTypeInt(signed=False, label="UInt32"), "hwndParent": SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), "pszMessageText": SimTypePointer(SimTypeChar(label="Char"), offset=0), "pszCaptionText": SimTypePointer(SimTypeChar(label="Char"), offset=0), "hbmBanner": SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0)}, name="CREDUI_INFOW", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypePointer(SimTypeBottom(label="Void"), offset=0), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypePointer(SimTypeInt(signed=True, label="Int32"), offset=0), SimTypeInt(signed=False, label="CREDUIWIN_FLAGS")], SimTypeInt(signed=False, label="UInt32"), arg_names=["pUiInfo", "dwAuthError", "pulAuthPackage", "pvInAuthBuffer", "ulInAuthBufferSize", "ppvOutAuthBuffer", "pulOutAuthBufferSize", "pfSave", "dwFlags"]),
#
'CredUIPromptForWindowsCredentialsA': SimTypeFunction([SimTypePointer(SimStruct({"cbSize": SimTypeInt(signed=False, label="UInt32"), "hwndParent": SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), "pszMessageText": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "pszCaptionText": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "hbmBanner": SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0)}, name="CREDUI_INFOA", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypePointer(SimTypeBottom(label="Void"), offset=0), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypePointer(SimTypeInt(signed=True, label="Int32"), offset=0), SimTypeInt(signed=False, label="CREDUIWIN_FLAGS")], SimTypeInt(signed=False, label="UInt32"), arg_names=["pUiInfo", "dwAuthError", "pulAuthPackage", "pvInAuthBuffer", "ulInAuthBufferSize", "ppvOutAuthBuffer", "pulOutAuthBufferSize", "pfSave", "dwFlags"]),
#
'CredUIParseUserNameW': SimTypeFunction([SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypeChar(label="Char"), label="LPArray", offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Char"), label="LPArray", offset=0), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["UserName", "user", "userBufferSize", "domain", "domainBufferSize"]),
#
'CredUIParseUserNameA': SimTypeFunction([SimTypePointer(SimTypeChar(label="Byte"), offset=0), SimTypePointer(SimTypeChar(label="Byte"), label="LPArray", offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Byte"), label="LPArray", offset=0), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["userName", "user", "userBufferSize", "domain", "domainBufferSize"]),
#
'CredUICmdLinePromptForCredentialsW': SimTypeFunction([SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimStruct({"dwLower": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), "dwUpper": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0)}, name="SecHandle", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Char"), label="LPArray", offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Char"), label="LPArray", offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeInt(signed=True, label="Int32"), offset=0), SimTypeInt(signed=False, label="CREDUI_FLAGS")], SimTypeInt(signed=False, label="UInt32"), arg_names=["pszTargetName", "pContext", "dwAuthError", "UserName", "ulUserBufferSize", "pszPassword", "ulPasswordBufferSize", "pfSave", "dwFlags"]),
#
'CredUICmdLinePromptForCredentialsA': SimTypeFunction([SimTypePointer(SimTypeChar(label="Byte"), offset=0), SimTypePointer(SimStruct({"dwLower": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), "dwUpper": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0)}, name="SecHandle", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Byte"), label="LPArray", offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Byte"), label="LPArray", offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeInt(signed=True, label="Int32"), offset=0), SimTypeInt(signed=False, label="CREDUI_FLAGS")], SimTypeInt(signed=False, label="UInt32"), arg_names=["pszTargetName", "pContext", "dwAuthError", "UserName", "ulUserBufferSize", "pszPassword", "ulPasswordBufferSize", "pfSave", "dwFlags"]),
#
'CredUIConfirmCredentialsW': SimTypeFunction([SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypeInt(signed=True, label="Int32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["pszTargetName", "bConfirm"]),
#
'CredUIConfirmCredentialsA': SimTypeFunction([SimTypePointer(SimTypeChar(label="Byte"), offset=0), SimTypeInt(signed=True, label="Int32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["pszTargetName", "bConfirm"]),
#
'CredUIStoreSSOCredW': SimTypeFunction([SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypeInt(signed=True, label="Int32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["pszRealm", "pszUsername", "pszPassword", "bPersist"]),
#
'CredUIReadSSOCredW': SimTypeFunction([SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypePointer(SimTypeChar(label="Char"), offset=0), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["pszRealm", "ppszUsername"]),
}
lib.set_prototypes(prototypes)
|
4b098d7faaf595e6ee48e46ecb080da11eb1b083
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/domain/StageSceneryVO.py
|
c31391694a82609446d1a0d8d17b9c34430a97cf
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 2,292
|
py
|
StageSceneryVO.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class StageSceneryVO(object):
def __init__(self):
self._icon = None
self._id = None
self._scenery_gltf_url = None
self._scenery_name = None
@property
def icon(self):
return self._icon
@icon.setter
def icon(self, value):
self._icon = value
@property
def id(self):
return self._id
@id.setter
def id(self, value):
self._id = value
@property
def scenery_gltf_url(self):
return self._scenery_gltf_url
@scenery_gltf_url.setter
def scenery_gltf_url(self, value):
self._scenery_gltf_url = value
@property
def scenery_name(self):
return self._scenery_name
@scenery_name.setter
def scenery_name(self, value):
self._scenery_name = value
def to_alipay_dict(self):
params = dict()
if self.icon:
if hasattr(self.icon, 'to_alipay_dict'):
params['icon'] = self.icon.to_alipay_dict()
else:
params['icon'] = self.icon
if self.id:
if hasattr(self.id, 'to_alipay_dict'):
params['id'] = self.id.to_alipay_dict()
else:
params['id'] = self.id
if self.scenery_gltf_url:
if hasattr(self.scenery_gltf_url, 'to_alipay_dict'):
params['scenery_gltf_url'] = self.scenery_gltf_url.to_alipay_dict()
else:
params['scenery_gltf_url'] = self.scenery_gltf_url
if self.scenery_name:
if hasattr(self.scenery_name, 'to_alipay_dict'):
params['scenery_name'] = self.scenery_name.to_alipay_dict()
else:
params['scenery_name'] = self.scenery_name
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = StageSceneryVO()
if 'icon' in d:
o.icon = d['icon']
if 'id' in d:
o.id = d['id']
if 'scenery_gltf_url' in d:
o.scenery_gltf_url = d['scenery_gltf_url']
if 'scenery_name' in d:
o.scenery_name = d['scenery_name']
return o
|
b0c7ff231129d572b4418e11382d7b7b6600ca1d
|
6189f34eff2831e3e727cd7c5e43bc5b591adffc
|
/WebMirror/management/rss_parser_funcs/feed_parse_extractSurrealskytranslationsWordpressCom.py
|
6641f46181f9cd2d14abd0f8acbbc5ec245b429f
|
[
"BSD-3-Clause"
] |
permissive
|
fake-name/ReadableWebProxy
|
24603660b204a9e7965cfdd4a942ff62d7711e27
|
ca2e086818433abc08c014dd06bfd22d4985ea2a
|
refs/heads/master
| 2023-09-04T03:54:50.043051
| 2023-08-26T16:08:46
| 2023-08-26T16:08:46
| 39,611,770
| 207
| 20
|
BSD-3-Clause
| 2023-09-11T15:48:15
| 2015-07-24T04:30:43
|
Python
|
UTF-8
|
Python
| false
| false
| 1,151
|
py
|
feed_parse_extractSurrealskytranslationsWordpressCom.py
|
def extractSurrealskytranslationsWordpressCom(item):
'''
Parser for 'surrealskytranslations.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('what to do when i become a koi and fall into the male god’s bathtub', 'what to do when i become a koi and fall into the male god’s bathtub', 'translated'),
('wdbkfmgb', 'what to do when i become a koi and fall into the male god’s bathtub', 'translated'),
('superstar aspiration', 'superstar aspirations', 'translated'),
('superstar aspirations', 'superstar aspirations', 'translated'),
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
af0b72500ccecc49ab8c41d98c4ea6113f5dfda1
|
d1c2d00078520cd556f60b7213c27856f8b3460d
|
/sdks/python/apache_beam/dataframe/convert_test.py
|
b00ce0e51fa8ba7c3fd2f9f0dfa078a433110e01
|
[
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-protobuf",
"Apache-2.0",
"Python-2.0"
] |
permissive
|
apache/beam
|
ed11b9e043465c720659eac20ac71b5b171bfa88
|
6d5048e05087ea54abc889ce402ae2a0abb9252b
|
refs/heads/master
| 2023-09-04T07:41:07.002653
| 2023-09-01T23:01:05
| 2023-09-01T23:01:05
| 50,904,245
| 7,061
| 4,522
|
Apache-2.0
| 2023-09-14T21:43:38
| 2016-02-02T08:00:06
|
Java
|
UTF-8
|
Python
| false
| false
| 7,488
|
py
|
convert_test.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import pandas as pd
import apache_beam as beam
from apache_beam.dataframe import convert
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
def equal_to_unordered_series(expected):
def check(actual):
actual = pd.concat(actual)
if sorted(expected) != sorted(actual):
raise AssertionError('Series not equal: \n%s\n%s\n' % (expected, actual))
return check
class ConvertTest(unittest.TestCase):
def test_convert_yield_pandas(self):
with beam.Pipeline() as p:
a = pd.Series([1, 2, 3])
b = pd.Series([100, 200, 300])
pc_a = p | 'A' >> beam.Create([a])
pc_b = p | 'B' >> beam.Create([b])
df_a = convert.to_dataframe(pc_a, proxy=a[:0])
df_b = convert.to_dataframe(pc_b, proxy=b[:0])
df_2a = 2 * df_a
df_3a = 3 * df_a
df_ab = df_a * df_b
# Converting multiple results at a time can be more efficient.
pc_2a, pc_ab = convert.to_pcollection(df_2a, df_ab,
yield_elements='pandas')
# But separate conversions can be done as well.
pc_3a = convert.to_pcollection(df_3a, yield_elements='pandas')
assert_that(pc_2a, equal_to_unordered_series(2 * a), label='Check2a')
assert_that(pc_3a, equal_to_unordered_series(3 * a), label='Check3a')
assert_that(pc_ab, equal_to_unordered_series(a * b), label='Checkab')
def test_convert(self):
with beam.Pipeline() as p:
a = pd.Series([1, 2, 3])
b = pd.Series([100, 200, 300])
pc_a = p | 'A' >> beam.Create(a)
pc_b = p | 'B' >> beam.Create(b)
df_a = convert.to_dataframe(pc_a)
df_b = convert.to_dataframe(pc_b)
df_2a = 2 * df_a
df_3a = 3 * df_a
df_ab = df_a * df_b
# Converting multiple results at a time can be more efficient.
pc_2a, pc_ab = convert.to_pcollection(df_2a, df_ab)
# But separate conversions can be done as well.
pc_3a = convert.to_pcollection(df_3a)
assert_that(pc_2a, equal_to(list(2 * a)), label='Check2a')
assert_that(pc_3a, equal_to(list(3 * a)), label='Check3a')
assert_that(pc_ab, equal_to(list(a * b)), label='Checkab')
def test_convert_with_none(self):
# Ensure the logical Any type allows (nullable) None, see BEAM-12587.
df = pd.DataFrame({'A': ['str', 10, None], 'B': [None, 'str', 20]})
with beam.Pipeline() as p:
res = convert.to_pcollection(df, pipeline=p) | beam.Map(tuple)
assert_that(res, equal_to([(row.A, row.B) for _, row in df.iterrows()]))
def test_convert_scalar(self):
with beam.Pipeline() as p:
pc = p | 'A' >> beam.Create([1, 2, 3])
s = convert.to_dataframe(pc)
pc_sum = convert.to_pcollection(s.sum())
assert_that(pc_sum, equal_to([6]))
def test_convert_non_deferred(self):
with beam.Pipeline() as p:
s1 = pd.Series([1, 2, 3])
s2 = convert.to_dataframe(p | beam.Create([100, 200, 300]))
pc1, pc2 = convert.to_pcollection(s1, s2, pipeline=p)
assert_that(pc1, equal_to([1, 2, 3]), label='CheckNonDeferred')
assert_that(pc2, equal_to([100, 200, 300]), label='CheckDeferred')
def test_convert_memoization(self):
with beam.Pipeline() as p:
a = pd.Series([1, 2, 3])
b = pd.Series([100, 200, 300])
pc_a = p | 'A' >> beam.Create([a])
pc_b = p | 'B' >> beam.Create([b])
df_a = convert.to_dataframe(pc_a, proxy=a[:0])
df_b = convert.to_dataframe(pc_b, proxy=b[:0])
df_2a = 2 * df_a
df_3a = 3 * df_a
df_ab = df_a * df_b
# Two calls to to_pcollection with the same Dataframe should produce the
# same PCollection(s)
pc_2a_, pc_ab_ = convert.to_pcollection(df_2a, df_ab)
pc_3a, pc_2a, pc_ab = convert.to_pcollection(df_3a, df_2a, df_ab)
self.assertIs(pc_2a, pc_2a_)
self.assertIs(pc_ab, pc_ab_)
self.assertIsNot(pc_3a, pc_2a)
self.assertIsNot(pc_3a, pc_ab)
# The same conversions without the unbatching transform should also cache
# PCollections
pc_2a_pandas_, pc_ab_pandas_ = convert.to_pcollection(df_2a, df_ab,
yield_elements='pandas')
pc_3a_pandas, pc_2a_pandas, pc_ab_pandas = convert.to_pcollection(df_3a,
df_2a,
df_ab,
yield_elements='pandas')
self.assertIs(pc_2a_pandas, pc_2a_pandas_)
self.assertIs(pc_ab_pandas, pc_ab_pandas_)
self.assertIsNot(pc_3a_pandas, pc_2a_pandas)
self.assertIsNot(pc_3a_pandas, pc_ab_pandas)
# .. but the cached PCollections should be different
self.assertIsNot(pc_2a_pandas, pc_2a)
self.assertIsNot(pc_ab_pandas, pc_ab)
self.assertIsNot(pc_3a_pandas, pc_3a)
def test_convert_memoization_clears_cache(self):
# This test re-runs the other memoization test, and makes sure that the
# cache is cleaned up with the pipeline. Otherwise there would be concerns
# of it growing without bound.
import gc
# Make sure cache is clear
gc.collect()
self.assertEqual(len(convert.TO_PCOLLECTION_CACHE), 0)
# Disable GC so it doesn't run pre-emptively, confounding assertions about
# cache size
gc.disable()
# Also disable logging, as some implementations may artificially extend
# the life of objects.
import logging
logging.disable(logging.INFO)
try:
self.test_convert_memoization()
self.assertEqual(len(convert.TO_PCOLLECTION_CACHE), 3)
gc.collect()
# PCollections should be removed from cache after pipelines go out of
# scope and are GC'd
self.assertEqual(len(convert.TO_PCOLLECTION_CACHE), 0)
finally:
# Always re-enable GC and logging
gc.enable()
logging.disable(logging.NOTSET)
def test_auto_convert(self):
class MySchemaTransform(beam.PTransform):
def expand(self, pcoll):
return pcoll | beam.Map(
lambda x: beam.Row(
a=x.n**2 - x.m**2, b=2 * x.m * x.n, c=x.n**2 + x.m**2))
with beam.Pipeline() as p:
pc_mn = p | beam.Create([
(1, 2), (2, 3), (3, 10)
]) | beam.MapTuple(lambda m, n: beam.Row(m=m, n=n))
df_mn = convert.to_dataframe(pc_mn)
# Apply a transform directly to a dataframe to get another dataframe.
df_abc = df_mn | MySchemaTransform()
pc_abc = convert.to_pcollection(df_abc) | beam.Map(tuple)
assert_that(pc_abc, equal_to([(3, 4, 5), (5, 12, 13), (91, 60, 109)]))
if __name__ == '__main__':
unittest.main()
|
df2527de3354b986aab15802798c080aae3f1f3c
|
bb33e6be8316f35decbb2b81badf2b6dcf7df515
|
/source/res/scripts/client/gui/Scaleform/daapi/view/meta/BattleStrongholdsQueueMeta.py
|
6f7a02cf5c600db1ab56ceaec79fee48bf41ad79
|
[] |
no_license
|
StranikS-Scan/WorldOfTanks-Decompiled
|
999c9567de38c32c760ab72c21c00ea7bc20990c
|
d2fe9c195825ececc728e87a02983908b7ea9199
|
refs/heads/1.18
| 2023-08-25T17:39:27.718097
| 2022-09-22T06:49:44
| 2022-09-22T06:49:44
| 148,696,315
| 103
| 39
| null | 2022-09-14T17:50:03
| 2018-09-13T20:49:11
|
Python
|
UTF-8
|
Python
| false
| false
| 1,162
|
py
|
BattleStrongholdsQueueMeta.py
|
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/meta/BattleStrongholdsQueueMeta.py
from gui.Scaleform.framework.entities.View import View
class BattleStrongholdsQueueMeta(View):
def exitClick(self):
self._printOverrideError('exitClick')
def onEscape(self):
self._printOverrideError('onEscape')
def as_setTimerS(self, textLabel, timeLabel):
return self.flashObject.as_setTimer(textLabel, timeLabel) if self._isDAAPIInited() else None
def as_setTypeInfoS(self, data):
return self.flashObject.as_setTypeInfo(data) if self._isDAAPIInited() else None
def as_setLeaguesS(self, data):
return self.flashObject.as_setLeagues(data) if self._isDAAPIInited() else None
def as_showExitS(self, vis):
return self.flashObject.as_showExit(vis) if self._isDAAPIInited() else None
def as_showWaitingS(self, description):
return self.flashObject.as_showWaiting(description) if self._isDAAPIInited() else None
def as_hideWaitingS(self):
return self.flashObject.as_hideWaiting() if self._isDAAPIInited() else None
|
241dfc8e9d9ef2f8e3579a473f6b700baad6723a
|
fa3f6d4e9169fb95f828013d179d03accdff381b
|
/grr/server/grr_response_server/gui/api_plugins/stats_test.py
|
af9eebbc3b58fbf86fc46db776a59096407d507a
|
[
"Apache-2.0"
] |
permissive
|
google/grr
|
c51a2bd251ed2f7adae538541990a2cc01fdcc8c
|
44c0eb8c938302098ef7efae8cfd6b90bcfbb2d6
|
refs/heads/master
| 2023-09-05T20:02:36.823914
| 2023-07-26T09:34:09
| 2023-07-26T09:34:09
| 14,909,673
| 4,683
| 927
|
Apache-2.0
| 2023-07-26T09:34:10
| 2013-12-04T00:17:53
|
Python
|
UTF-8
|
Python
| false
| false
| 3,619
|
py
|
stats_test.py
|
#!/usr/bin/env python
from unittest import mock
from absl.testing import absltest
from grr_response_core.stats import default_stats_collector
from grr_response_core.stats import metrics
from grr_response_proto.api import stats_pb2
from grr_response_server.gui import admin_ui_metrics
from grr_response_server.gui import api_test_lib
from grr_response_server.gui.api_plugins import stats
from grr.test_lib import stats_test_lib
from grr.test_lib import testing_startup
class StatsTest(
stats_test_lib.StatsCollectorTestMixin,
api_test_lib.ApiCallHandlerTest,
):
@classmethod
def setUpClass(cls):
super().setUpClass()
testing_startup.TestInit()
def setUp(self):
super().setUp()
self.handler = stats.ApiIncrementCounterMetricHandler()
@mock.patch.object(
admin_ui_metrics,
"API_INCREASE_ALLOWLIST",
frozenset(["bananas_de_pijamas_counter"]),
)
def testIncreasesExistingMetric(self):
with self.SetUpStatsCollector(
default_stats_collector.DefaultStatsCollector()
):
counter = metrics.Counter(
"bananas_de_pijamas_counter", fields=[("name", str), ("number", int)]
)
args = stats.ApiIncrementCounterMetricArgs(
metric_name="bananas_de_pijamas_counter",
field_values=[
stats.FieldValue(
field_type=stats_pb2.FieldValue.STRING, string_value="b"
),
stats.FieldValue(
field_type=stats_pb2.FieldValue.NUMBER, number_value=2
),
],
)
self.assertEqual(0, counter.GetValue(fields=["b", 1]))
self.assertEqual(0, counter.GetValue(fields=["b", 2]))
self.handler.Handle(args, context=self.context)
self.assertEqual(0, counter.GetValue(fields=["b", 1]))
self.assertEqual(1, counter.GetValue(fields=["b", 2]))
@mock.patch.object(
admin_ui_metrics,
"API_INCREASE_ALLOWLIST",
frozenset(["nothing_allowlisted"]),
)
def testRaisesNotAllowlisted(self):
with self.SetUpStatsCollector(
default_stats_collector.DefaultStatsCollector()
):
metrics.Counter(
"bananas_de_pijamas_counter", fields=[("name", str), ("number", int)]
)
args = stats.ApiIncrementCounterMetricArgs(
metric_name="invalid_counter_does_not_exist",
field_values=[
stats.FieldValue(
field_type=stats_pb2.FieldValue.STRING, string_value="b"
),
stats.FieldValue(
field_type=stats_pb2.FieldValue.NUMBER, number_value=2
),
],
)
with self.assertRaises(ValueError):
self.handler.Handle(args, context=self.context)
@mock.patch.object(
admin_ui_metrics,
"API_INCREASE_ALLOWLIST",
frozenset(
["bananas_de_pijamas_counter", "invalid_counter_does_not_exist"]
),
)
def testRaisesWithInvalidMetric(self):
with self.SetUpStatsCollector(
default_stats_collector.DefaultStatsCollector()
):
metrics.Counter(
"bananas_de_pijamas_counter", fields=[("name", str), ("number", int)]
)
args = stats.ApiIncrementCounterMetricArgs(
metric_name="invalid_counter_does_not_exist",
field_values=[
stats.FieldValue(
field_type=stats_pb2.FieldValue.STRING, string_value="b"
),
stats.FieldValue(
field_type=stats_pb2.FieldValue.NUMBER, number_value=2
),
],
)
with self.assertRaises(KeyError):
self.handler.Handle(args, context=self.context)
if __name__ == "__main__":
absltest.main()
|
a7557eb138ae4e6cbbde02995f6ee2bbc6942067
|
610244a938791d3d05c749804725f4a9b3831a96
|
/diagrams/onprem/ci.py
|
cdb253367d2166017af62e12fec556e1cd391201
|
[
"MIT"
] |
permissive
|
mingrammer/diagrams
|
66b62ab484eca9cc439aee9b1cffedb1fcb9dba6
|
b19b09761db6f0037fd76e527b9ce6918fbdfcfc
|
refs/heads/master
| 2023-09-04T04:57:36.727192
| 2023-05-22T23:51:10
| 2023-05-22T23:51:10
| 237,791,077
| 31,257
| 2,119
|
MIT
| 2023-09-05T15:45:52
| 2020-02-02T15:23:24
|
Python
|
UTF-8
|
Python
| false
| false
| 775
|
py
|
ci.py
|
# This module is automatically generated by autogen.sh. DO NOT EDIT.
from . import _OnPrem
class _Ci(_OnPrem):
_type = "ci"
_icon_dir = "resources/onprem/ci"
class Circleci(_Ci):
_icon = "circleci.png"
class Concourseci(_Ci):
_icon = "concourseci.png"
class Droneci(_Ci):
_icon = "droneci.png"
class GithubActions(_Ci):
_icon = "github-actions.png"
class Gitlabci(_Ci):
_icon = "gitlabci.png"
class Jenkins(_Ci):
_icon = "jenkins.png"
class Teamcity(_Ci):
_icon = "teamcity.png"
class Travisci(_Ci):
_icon = "travisci.png"
class Zuulci(_Ci):
_icon = "zuulci.png"
# Aliases
CircleCI = Circleci
ConcourseCI = Concourseci
DroneCI = Droneci
GitlabCI = Gitlabci
TravisCI = Travisci
TC = Teamcity
ZuulCI = Zuulci
|
1450d8f702a9794dc71942c750cbc0da9f503ea3
|
ad4425b6ecd8fe75d521be0c01f8ff8cba6e8ed0
|
/absl/tests/app_test_helper.py
|
92f7be3ce06581c319b85ab1b665b7f1c48c16cf
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
abseil/abseil-py
|
99430a2f48314c145e01be12f68581c9de94f5cb
|
e5f96d9056efc6d4169e51b379bc557e3462cdf7
|
refs/heads/main
| 2023-09-02T15:59:38.982733
| 2023-09-01T00:09:16
| 2023-09-01T00:09:46
| 104,132,246
| 2,280
| 277
|
Apache-2.0
| 2023-09-14T20:05:59
| 2017-09-19T21:45:02
|
Python
|
UTF-8
|
Python
| false
| false
| 4,660
|
py
|
app_test_helper.py
|
# Copyright 2017 The Abseil Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper script used by app_test.py."""
import os
import sys
try:
import faulthandler # pylint: disable=g-import-not-at-top
except ImportError:
faulthandler = None
from absl import app # pylint: disable=g-import-not-at-top
from absl import flags
FLAGS = flags.FLAGS
flags.DEFINE_boolean('faulthandler_sigsegv', False, 'raise SIGSEGV')
flags.DEFINE_boolean('raise_exception', False, 'Raise MyException from main.')
flags.DEFINE_boolean(
'raise_usage_error', False, 'Raise app.UsageError from main.')
flags.DEFINE_integer(
'usage_error_exitcode', None, 'The exitcode if app.UsageError if raised.')
flags.DEFINE_string(
'str_flag_with_unicode_args', u'thumb:\U0001F44D', u'smile:\U0001F604')
flags.DEFINE_boolean('print_init_callbacks', False,
'print init callbacks and exit')
class MyException(Exception):
pass
class MyExceptionHandler(app.ExceptionHandler):
def __init__(self, message):
self.message = message
def handle(self, exc):
sys.stdout.write('MyExceptionHandler: {}\n'.format(self.message))
def real_main(argv):
"""The main function."""
if os.environ.get('APP_TEST_PRINT_ARGV', False):
sys.stdout.write('argv: {}\n'.format(' '.join(argv)))
if FLAGS.raise_exception:
raise MyException
if FLAGS.raise_usage_error:
if FLAGS.usage_error_exitcode is not None:
raise app.UsageError('Error!', FLAGS.usage_error_exitcode)
else:
raise app.UsageError('Error!')
if FLAGS.faulthandler_sigsegv:
faulthandler._sigsegv() # pylint: disable=protected-access
sys.exit(1) # Should not reach here.
if FLAGS.print_init_callbacks:
app.call_after_init(lambda: _callback_results.append('during real_main'))
for value in _callback_results:
print('callback: {}'.format(value))
sys.exit(0)
# Ensure that we have a random C++ flag in flags.FLAGS; this shows
# us that app.run() did the right thing in conjunction with C++ flags.
helper_type = os.environ['APP_TEST_HELPER_TYPE']
if helper_type == 'clif':
if 'heap_check_before_constructors' in flags.FLAGS:
print('PASS: C++ flag present and helper_type is {}'.format(helper_type))
sys.exit(0)
else:
print('FAILED: C++ flag absent but helper_type is {}'.format(helper_type))
sys.exit(1)
elif helper_type == 'pure_python':
if 'heap_check_before_constructors' in flags.FLAGS:
print('FAILED: C++ flag present but helper_type is pure_python')
sys.exit(1)
else:
print('PASS: C++ flag absent and helper_type is pure_python')
sys.exit(0)
else:
print('Unexpected helper_type "{}"'.format(helper_type))
sys.exit(1)
def custom_main(argv):
print('Function called: custom_main.')
real_main(argv)
def main(argv):
print('Function called: main.')
real_main(argv)
flags_parser_argv_sentinel = object()
def flags_parser_main(argv):
print('Function called: main_with_flags_parser.')
if argv is not flags_parser_argv_sentinel:
sys.exit(
'FAILED: main function should be called with the return value of '
'flags_parser, but found: {}'.format(argv))
def flags_parser(argv):
print('Function called: flags_parser.')
if os.environ.get('APP_TEST_FLAGS_PARSER_PARSE_FLAGS', None):
FLAGS(argv)
return flags_parser_argv_sentinel
# Holds results from callbacks triggered by `app.run_after_init`.
_callback_results = []
if __name__ == '__main__':
kwargs = {'main': main}
main_function_name = os.environ.get('APP_TEST_CUSTOM_MAIN_FUNC', None)
if main_function_name:
kwargs['main'] = globals()[main_function_name]
custom_argv = os.environ.get('APP_TEST_CUSTOM_ARGV', None)
if custom_argv:
kwargs['argv'] = custom_argv.split(' ')
if os.environ.get('APP_TEST_USE_CUSTOM_PARSER', None):
kwargs['flags_parser'] = flags_parser
app.call_after_init(lambda: _callback_results.append('before app.run'))
app.install_exception_handler(MyExceptionHandler('first'))
app.install_exception_handler(MyExceptionHandler('second'))
app.run(**kwargs)
sys.exit('This is not reachable.')
|
466e893423681387c697ec7f0b333f13614770f0
|
1b046f94e73603ca807225c3f32633f8712f93a6
|
/tests/test_rii.py
|
c6a28bcbd10d9b5c7466bf80a40305e5efffff5d
|
[
"MIT"
] |
permissive
|
matsui528/rii
|
308e0695c9ba83354ffc14b725dd0d2b2f7b009c
|
99910515d3a562d3050d9428c27e19476a47d8ba
|
refs/heads/main
| 2022-09-04T09:31:57.304762
| 2022-08-06T03:33:45
| 2022-08-06T03:33:45
| 142,134,229
| 146
| 19
|
MIT
| 2022-08-06T03:33:46
| 2018-07-24T09:12:54
|
C++
|
UTF-8
|
Python
| false
| false
| 13,108
|
py
|
test_rii.py
|
from .context import rii
import unittest
import numpy as np
import nanopq
class TestSuite(unittest.TestCase):
def setUp(self):
np.random.seed(123)
def test_construct(self):
M, Ks = 4, 20
N, D = 1000, 40
X = np.random.random((N, D)).astype(np.float32)
e = rii.Rii(fine_quantizer=nanopq.PQ(M=M, Ks=Ks, verbose=True).fit(vecs=X))
self.assertEqual(e.fine_quantizer.codewords.shape, (M, Ks, D/M))
self.assertEqual((e.M, e.Ks), (M, Ks))
self.assertEqual(e.verbose, True)
e.verbose = False
self.assertEqual(e.verbose, False)
def test_add(self):
for codec in [nanopq.PQ, nanopq.OPQ]:
M, Ks = 4, 20
N, D = 1000, 40
X = np.random.random((N, D)).astype(np.float32)
e = rii.Rii(fine_quantizer=codec(M=M, Ks=Ks, verbose=True).fit(vecs=X))
self.assertEqual(e.N, 0)
e.add(vecs=X, update_posting_lists=False)
self.assertEqual(e.N, N)
# The encoded vectors should be equal to the ones manually PQ-encoded
pq = e.fine_quantizer
codes = pq.encode(X)
self.assertTrue(np.allclose(codes, e.codes))
# Add again
e.add(vecs=X, update_posting_lists=False)
self.assertEqual(e.N, 2 * N)
def test_reconfigure(self):
for codec in [nanopq.PQ, nanopq.OPQ]:
M, Ks = 4, 20
N, D = 1000, 40
X = np.random.random((N, D)).astype(np.float32)
e = rii.Rii(fine_quantizer=codec(M=M, Ks=Ks, verbose=True).fit(vecs=X))
e.add(vecs=X, update_posting_lists=False)
for nlist in [5, 100]:
e.reconfigure(nlist=nlist)
self.assertEqual(e.nlist, nlist)
self.assertEqual(e.coarse_centers.shape, (nlist, M))
self.assertEqual(len(e.posting_lists), nlist)
self.assertEqual(sum([len(plist) for plist in e.posting_lists]), N)
def test_simple_add_configure(self):
M, Ks = 4, 20
N1, N2, D = 300, 700, 40
X1 = np.random.random((N1, D)).astype(np.float32)
X2 = np.random.random((N2, D)).astype(np.float32)
e = rii.Rii(fine_quantizer=nanopq.PQ(M=M, Ks=Ks, verbose=True).fit(vecs=X1))
e.add(vecs=X1)
self.assertEqual(e.N, N1)
e.add(vecs=X2)
self.assertEqual(e.N, N1 + N2)
for nlist in [5, 100]:
e.reconfigure(nlist=nlist)
self.assertEqual(e.nlist, nlist)
self.assertEqual(e.coarse_centers.shape, (nlist, M))
self.assertEqual(len(e.posting_lists), nlist)
self.assertEqual(sum([len(plist) for plist in e.posting_lists]), N1 + N2)
def test_add_configure(self):
M, Ks = 4, 20
N, D = 1000, 40
X = np.random.random((N, D)).astype(np.float32)
e1 = rii.Rii(fine_quantizer=nanopq.PQ(M=M, Ks=Ks, verbose=True).fit(vecs=X))
e1.add_configure(vecs=X, nlist=20)
e2 = rii.Rii(fine_quantizer=nanopq.PQ(M=M, Ks=Ks, verbose=True).fit(vecs=X))
e2.add(vecs=X, update_posting_lists=False)
e2.reconfigure(nlist=20)
# The result of add_configure() should be the same as that of
# (1) add(updating_posting_lists=False) and (2) reconfigure()
self.assertTrue(np.allclose(e1.codes, e2.codes))
self.assertListEqual(e1.posting_lists, e2.posting_lists)
e3 = rii.Rii(fine_quantizer=nanopq.PQ(M=M, Ks=Ks, verbose=True).fit(vecs=X)).add_configure(vecs=X, nlist=20)
# Can be called as a chain
self.assertTrue(np.allclose(e1.codes, e3.codes))
self.assertListEqual(e1.posting_lists, e3.posting_lists)
def test_add_configure_small_number_of_vectors(self):
import copy
M, Ks = 4, 20
N, D = 1000, 40
X = np.random.random((N, D)).astype(np.float32)
e1 = rii.Rii(fine_quantizer=nanopq.PQ(M=M, Ks=Ks, verbose=True).fit(vecs=X))
e2 = copy.deepcopy(e1)
e3 = copy.deepcopy(e1)
for x in X[:10]:
e1.add_configure(vecs=x.reshape(1, -1)) # Can be added one by one
self.assertEqual(e1.N, 10)
e2.add_configure(vecs=X[:10])
# Should be same to that by add_reconfigure at once
self.assertTrue(np.allclose(e1.codes, e2.codes))
self.assertEqual(e1.posting_lists, e2.posting_lists)
for x in X[:10]:
e3.add(x.reshape(1, -1))
e3.reconfigure()
# Should be same to that by add several times the nreconfigure
self.assertTrue(np.allclose(e1.codes, e3.codes))
self.assertEqual(e1.posting_lists, e3.posting_lists)
def test_query_linear(self):
M, Ks = 4, 20
N, D = 1000, 40
X = np.random.random((N, D)).astype(np.float32)
e = rii.Rii(fine_quantizer=nanopq.PQ(M=M, Ks=Ks, verbose=True).fit(vecs=X))
e.add_configure(vecs=X, nlist=20)
for n, q in enumerate(X[:10]):
topk = 10
ids1, dists1 = e.impl_cpp.query_linear(q, topk, np.array([], dtype=np.int64))
self.assertTrue(isinstance(ids1, list))
self.assertTrue(isinstance(ids1[0], int))
self.assertTrue(isinstance(dists1, list))
self.assertTrue(isinstance(dists1[0], float))
self.assertEqual(len(ids1), topk)
self.assertEqual(len(ids1), len(dists1))
self.assertTrue(np.all(0 <= np.diff(dists1))) # Make sure dists1 is sorted
# The true NN is included in top 10 with high prob
self.assertTrue(n in ids1)
# Subset search w/ a full indices should be the same w/o target
ids2, dists2 = e.impl_cpp.query_linear(q, topk, np.arange(N, dtype=np.int64))
self.assertListEqual(ids1, ids2)
self.assertListEqual(dists1, dists2)
S = np.array([2, 24, 43, 55, 102, 139, 221, 542, 667, 873, 874, 899], dtype=np.int64)
ids3, dists3 = e.impl_cpp.query_linear(q, topk, S)
self.assertTrue(np.all([id in S for id in ids3]))
def test_query_ivf(self):
M, Ks = 20, 256
N, D = 1000, 40
X = np.random.random((N, D)).astype(np.float32)
e = rii.Rii(fine_quantizer=nanopq.PQ(M=M, Ks=Ks, verbose=True).fit(vecs=X))
e.add_configure(vecs=X, nlist=20)
for n, q in enumerate(X[:10]):
L = 200
topk = 10
ids1, dists1 = e.impl_cpp.query_ivf(q, topk, np.array([], dtype=np.int64), L)
self.assertTrue(isinstance(ids1, list))
self.assertTrue(isinstance(ids1[0], int))
self.assertTrue(isinstance(dists1, list))
self.assertTrue(isinstance(dists1[0], float))
self.assertEqual(len(ids1), topk)
self.assertEqual(len(ids1), len(dists1))
self.assertTrue(np.all(0 <= np.diff(dists1))) # Make sure dists1 is sorted
# The true NN is included in top 10 with high prob
# This might fail if the parameters are severe
self.assertTrue(n in ids1)
# Subset search w/ a full indices should be the same w/o target
ids2, dists2 = e.impl_cpp.query_ivf(q, topk, np.arange(N, dtype=np.int64), L)
self.assertListEqual(ids1, ids2)
self.assertListEqual(dists1, dists2)
S = np.array([2, 24, 43, 55, 102, 139, 221, 542, 667, 873, 874, 899], dtype=np.int64)
ids3, dists3 = e.impl_cpp.query_ivf(q, topk, S, L)
self.assertTrue(np.all([id in S for id in ids3]))
# When target_ids is all vectors and L=all, the results is the same as linear PQ scan
ids4, dists4 = e.impl_cpp.query_ivf(q, topk, np.arange(N, dtype=np.int64), N)
ids5, dists5 = e.impl_cpp.query_linear(q, topk, np.array([], dtype=np.int64))
self.assertListEqual(ids4, ids5)
self.assertListEqual(dists4, dists5)
# When target_ids is specified and L is large, linear and ivf should produce the same result
ids6, dists6 = e.impl_cpp.query_ivf(q, topk, S, L)
ids7, dists7 = e.impl_cpp.query_linear(q, topk, S)
self.assertListEqual(ids6, ids7)
self.assertListEqual(dists6, dists7)
def test_query(self):
for codec in [nanopq.PQ, nanopq.OPQ]:
M, Ks = 20, 256
N, D = 1000, 40
X = np.random.random((N, D)).astype(np.float32)
e = rii.Rii(fine_quantizer=codec(M=M, Ks=Ks, verbose=True).fit(vecs=X))
e.add_configure(vecs=X, nlist=20)
for n, q in enumerate(X[:10]):
topk=50
ids1, dists1 = e.query(q=q, topk=topk)
self.assertTrue(isinstance(ids1, np.ndarray))
self.assertEqual(ids1.dtype, np.int64)
self.assertTrue(isinstance(dists1, np.ndarray))
self.assertEqual(dists1.dtype, np.float64)
self.assertEqual(len(ids1), topk)
self.assertEqual(len(ids1), len(dists1))
self.assertTrue(np.all(0 <= np.diff(dists1))) # Make sure dists1 is sorted
# The true NN is included in top 10 with high prob
# This might fail if the parameters are severe
self.assertTrue(n in ids1)
# Subset search w/ a full indices should be the same w/o target
ids2, dists2 = e.query(q=q, topk=topk, target_ids=np.arange(N, dtype=np.int64))
self.assertTrue(np.allclose(ids1, ids2))
self.assertTrue(np.allclose(dists1, dists2))
S = np.array([2, 24, 43, 55, 102, 139, 221, 542, 667, 873, 874, 899], dtype=np.int64)
ids3, dists3 = e.query(q=q, topk=5, target_ids=S)
self.assertTrue(np.all([id in S for id in ids3]))
def test_pickle(self):
M, Ks = 10, 256
N, D = 1000, 40
X = np.random.random((N, D)).astype(np.float32)
e1 = rii.Rii(fine_quantizer=nanopq.PQ(M=M, Ks=Ks, verbose=True).fit(vecs=X))
e1.add_configure(vecs=X, nlist=20)
import pickle
dumped = pickle.dumps(e1)
e2 = pickle.loads(dumped)
self.assertEqual((e1.M, e1.Ks, e1.threshold),
(e2.M, e2.Ks, e2.threshold))
self.assertTrue(np.allclose(e1.coarse_centers, e2.coarse_centers))
self.assertTrue(np.allclose(e1.codes, e2.codes))
for pl1, pl2 in zip(e1.posting_lists, e2.posting_lists):
self.assertListEqual(pl1, pl2)
def test_clear(self):
M, Ks = 4, 20
N, D = 1000, 40
X = np.random.random((N, D)).astype(np.float32)
e = rii.Rii(fine_quantizer=nanopq.PQ(M=M, Ks=Ks, verbose=True).fit(vecs=X))
e.add_configure(vecs=X, nlist=20)
e.clear()
self.assertTrue(e.threshold is None)
self.assertEqual(e.N, 0)
self.assertEqual(e.nlist, 0)
self.assertEqual(e.coarse_centers, None)
self.assertEqual(e.codes, None)
self.assertEqual(len(e.posting_lists), 0)
def test_merge(self):
from itertools import chain
M, Ks, N1, N2, D = 4, 20, 1000, 500, 40
X1 = np.random.random((N1, D)).astype(np.float32)
X2 = np.random.random((N2, D)).astype(np.float32)
codec = nanopq.PQ(M=M, Ks=Ks, verbose=True).fit(vecs=X1)
e1 = rii.Rii(fine_quantizer=codec)
e2 = rii.Rii(fine_quantizer=codec)
# e1: empty e2: empty
e1.merge(e2)
self.assertEqual((e1.N, e2.N), (0, 0))
# e1: vecs e2: empty
e1.add_configure(vecs=X1)
e1.merge(e2) # posting lists are created in the above line
self.assertEqual(e1.N, N1)
self.assertEqual(e1.nlist, int(np.sqrt(N1))) # Have posting lists
e1.clear()
# e1: empty e2: vecs
e2.add_configure(vecs=X2)
e1.merge(e2) # e1 didn't have posting lists
self.assertEqual(e1.N, N2)
self.assertEqual(e1.nlist, 0) # No posting lists
e1.clear()
e2.clear()
# e1: vecs e2: vecs
e1.add_configure(vecs=X1)
e2.add_configure(vecs=X2)
e1.merge(e2)
self.assertEqual(e1.N, N1 + N2)
self.assertEqual(e1.nlist, int(np.sqrt(N1))) # posting lists are same as the original e1
# Make sure everything is fine
self.assertTrue(np.array_equal(e1.codes, codec.encode(np.vstack((X1, X2)))))
self.assertEqual(sorted(chain(*e1.posting_lists)), list(range(N1 + N2)))
### For debugging ###
# def test_runtime(self):
# import time
# M, Ks, N, D = 8, 256, 100000, 128
# X = np.random.random((N, D)).astype(np.float32)
# e = rii.Rii(fine_quantizer=nanopq.PQ(M=M, Ks=Ks, verbose=True).fit(vecs=X[:1000])).add_configure(vecs=X)
# Q = np.random.random((10000, D)).astype(np.float32)
# t0 = time.time()
# for q in Q:
# e.query(q=q, topk=3, method='ivf')
# print(time.time() - t0, "sec")
if __name__ == '__main__':
print("Starting Rii Test")
unittest.main()
|
bcefae5dfa0fa4f496234778683ca63c4c1618ff
|
f00e34fb264447186ebdb8929ab5fdcd23619980
|
/test/unit/console_tool/__init__.py
|
677ed3dfd9661f76caaf78df739bc328991478ab
|
[
"MIT"
] |
permissive
|
Backblaze/B2_Command_Line_Tool
|
f0afd86705248cfe03bded612450083d7c18f417
|
99b600719c7c83e4c476b2cc13ba08bd974091cb
|
refs/heads/master
| 2023-08-29T01:54:52.367969
| 2023-08-15T20:19:44
| 2023-08-17T10:14:04
| 46,293,472
| 542
| 143
|
NOASSERTION
| 2023-09-11T21:22:10
| 2015-11-16T18:22:50
|
Python
|
UTF-8
|
Python
| false
| false
| 344
|
py
|
__init__.py
|
######################################################################
#
# File: test/unit/console_tool/__init__.py
#
# Copyright 2023 Backblaze Inc. All Rights Reserved.
#
# License https://www.backblaze.com/using_b2_code.html
#
######################################################################
"""Tests for the console_tool commands."""
|
aa832524561235c298c73908c5f0cba645cf6af0
|
fa1ad2e2ac7e376fc7cb3b3a6e1bb88eed3e80be
|
/ai/feathr/feathr_project/feathr/registry/registry_utils.py
|
027b0305be6af2a35b8f6fb10e4128788cc1244a
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"MIT"
] |
permissive
|
alldatacenter/alldata
|
7bc7713c9f1d56ad6b8e59ea03206d1073b7e047
|
8d5f9a2d49ab8f9e85ccf058cb02c2fda287afc6
|
refs/heads/master
| 2023-08-05T07:32:25.442740
| 2023-08-03T13:17:24
| 2023-08-03T13:17:24
| 213,321,771
| 774
| 250
|
Apache-2.0
| 2023-09-06T17:35:32
| 2019-10-07T07:36:18
| null |
UTF-8
|
Python
| false
| false
| 7,305
|
py
|
registry_utils.py
|
import inspect
from re import sub
from typing import List
from urllib.parse import urlparse
from feathr.constants import INPUT_CONTEXT
from feathr.definition.anchor import FeatureAnchor
from feathr.definition.dtype import FeatureType, str_to_value_type, value_type_to_str
from feathr.definition.feature import Feature
from feathr.definition.feature_derivations import DerivedFeature
from feathr.definition.source import HdfsSource, JdbcSource, Source, SnowflakeSource
from pyapacheatlas.core import AtlasProcess,AtlasEntity
from feathr.definition.transformation import ExpressionTransformation, Transformation, WindowAggTransformation
from feathr.definition.typed_key import TypedKey
def to_camel(s):
if not s:
return s
if isinstance(s, str):
if "_" in s:
s = sub(r"(_)+", " ", s).title().replace(" ", "")
return ''.join([s[0].lower(), s[1:]])
return s
elif isinstance(s, list):
return [to_camel(i) for i in s]
elif isinstance(s, dict):
return dict([(to_camel(k), s[k]) for k in s])
def source_to_def(v: Source) -> dict:
# Note that after this method, attributes are Camel cased (eventTimestampColumn).
# If the old logic works with snake case (event_timestamp_column), make sure you handle them manually.
ret = {}
if v.name == INPUT_CONTEXT:
return {
"name": INPUT_CONTEXT,
"type": INPUT_CONTEXT,
"path": INPUT_CONTEXT,
}
elif isinstance(v, HdfsSource):
ret = {
"name": v.name,
"type": "hdfs",
"path": v.path,
}
elif isinstance(v, SnowflakeSource):
ret = {
"name": v.name,
"type": "SNOWFLAKE",
"path": v.path,
}
elif isinstance(v, JdbcSource):
ret = {
"name": v.name,
"type": "jdbc",
"url": v.url,
}
if hasattr(v, "dbtable") and v.dbtable:
ret["dbtable"] = v.dbtable
if hasattr(v, "query") and v.query:
ret["query"] = v.query
if hasattr(v, "auth") and v.auth:
ret["auth"] = v.auth
else:
raise ValueError(f"Unsupported source type {v.__class__}")
if hasattr(v, "preprocessing") and v.preprocessing:
ret["preprocessing"] = inspect.getsource(v.preprocessing)
if v.event_timestamp_column:
ret["eventTimestampColumn"] = v.event_timestamp_column
ret["event_timestamp_column"] = v.event_timestamp_column
if v.timestamp_format:
ret["timestampFormat"] = v.timestamp_format
ret["timestamp_format"] = v.timestamp_format
if v.registry_tags:
ret["tags"] = v.registry_tags
return ret
def anchor_to_def(v: FeatureAnchor) -> dict:
# Note that after this method, attributes are Camel cased (eventTimestampColumn).
# If the old logic works with snake case (event_timestamp_column), make sure you handle them manually.
source_id = v.source._registry_id
ret = {
"name": v.name,
"sourceId": str(source_id),
}
if v.registry_tags:
ret["tags"] = v.registry_tags
return ret
def transformation_to_def(v: Transformation) -> dict:
if isinstance(v, ExpressionTransformation):
return {
"transformExpr": v.expr
}
elif isinstance(v, WindowAggTransformation):
ret = {
"defExpr": v.def_expr,
}
if v.agg_func:
ret["aggFunc"] = v.agg_func
if v.window:
ret["window"] = v.window
if v.group_by:
ret["groupBy"] = v.group_by
if v.filter:
ret["filter"] = v.filter
if v.limit:
ret["limit"] = v.limit
return ret
raise ValueError("Unsupported Transformation type")
def feature_type_to_def(v: FeatureType) -> dict:
# Note that after this method, attributes are Camel cased (eventTimestampColumn).
# If the old logic works with snake case (event_timestamp_column), make sure you handle them manually.
return {
"type": v.type,
"tensorCategory": v.tensor_category,
"dimensionType": [value_type_to_str(t) for t in v.dimension_type],
"valType": value_type_to_str(v.val_type),
}
def typed_key_to_def(v: TypedKey) -> dict:
ret = {
"keyColumn": v.key_column,
"keyColumnType": value_type_to_str(v.key_column_type)
}
if v.full_name:
ret["fullName"] = v.full_name
if v.description:
ret["description"] = v.description
if v.key_column_alias:
ret["keyColumnAlias"] = v.key_column_alias
return ret
def feature_to_def(v: Feature) -> dict:
ret = {
"name": v.name,
"featureType": feature_type_to_def(v.feature_type),
"key": [typed_key_to_def(k) for k in v.key],
}
if v.transform:
ret["transformation"] = transformation_to_def(
v.transform)
if v.registry_tags:
ret["tags"] = v.registry_tags
return ret
def derived_feature_to_def(v: DerivedFeature) -> dict:
# Note that after this method, attributes are Camel cased (eventTimestampColumn).
# If the old logic works with snake case (event_timestamp_column), make sure you handle them manually.
ret = {
"name": v.name,
"featureType": feature_type_to_def(v.feature_type),
"key": [typed_key_to_def(k) for k in v.key],
"inputAnchorFeatures": [str(f._registry_id) for f in v.input_features if not isinstance(f, DerivedFeature)],
"inputDerivedFeatures": [str(f._registry_id) for f in v.input_features if isinstance(f, DerivedFeature)],
}
if v.transform:
ret["transformation"] = transformation_to_def(v.transform)
return ret
def topological_sort(derived_feature_list: List[DerivedFeature]) -> List[DerivedFeature]:
"""
In the current registry implementation, we need to make sure all upstream are registered before registering one derived feature
Thus we need to sort derived features by the partial order of dependencies, upstream to downstream.
"""
ret = []
# We don't want to destroy the input list
input = derived_feature_list.copy()
# Each round add the most downstream features into `ret`, so `ret` is in reversed order
while input:
# Process all remaining features
current = input.copy()
# In Python you should not alter content while iterating
current_copy = current.copy()
# Go over all remaining features to see if some feature depends on others
for f in current_copy:
for i in f.input_features:
if i in current:
# Someone depends on feature `i`, so `i` is **not** the most downstream
current.remove(i)
# Now `current` contains only the most downstream features in this round
ret.extend(current)
# Remove one level of dependency from input
for f in current:
input.remove(f)
# The ret was in a reversed order when it's generated
ret.reverse()
if len(set(ret)) != len (set(derived_feature_list)):
raise ValueError("Cyclic dependency detected")
return ret
|
c414fed6bb3510d9b7ffa3d86355d7d369f4f8fe
|
450201e3dac529d165a0bf96c0cd31b644d53323
|
/mistral/services/action_manager.py
|
3f8ecf24794d48a56d7c2ea6b2f4b67ad7e53cbb
|
[
"Apache-2.0"
] |
permissive
|
openstack/mistral
|
c840b971c89a054f4953831480abc4d266df307e
|
7baff017d0cf01d19c44055ad201ca59131b9f94
|
refs/heads/master
| 2023-08-20T18:42:20.116390
| 2023-07-05T15:39:49
| 2023-07-05T15:39:49
| 13,968,255
| 214
| 117
|
Apache-2.0
| 2021-01-28T06:06:45
| 2013-10-29T20:46:49
|
Python
|
UTF-8
|
Python
| false
| false
| 1,379
|
py
|
action_manager.py
|
# Copyright 2014 - Mirantis, Inc.
# Copyright 2014 - StackStorm, Inc.
# Copyright 2020 Nokia Software.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_log import log as logging
from mistral.db.v2 import api as db_api
from mistral.services import adhoc_actions
from mistral_lib import utils
# TODO(rakhmerov): This module won't be needed after we add action providers
LOG = logging.getLogger(__name__)
ACTIONS_PATH = 'resources/actions'
def _register_preinstalled_adhoc_actions():
action_paths = utils.get_file_list(ACTIONS_PATH)
for action_path in action_paths:
action_definition = open(action_path).read()
adhoc_actions.create_or_update_actions(
action_definition,
scope='public'
)
def sync_db():
with db_api.transaction():
_register_preinstalled_adhoc_actions()
|
e32266f806bb9cc324c2b973554c330cf7fd55bd
|
7162c7fa1433f8bacc666e611241b32232ef3792
|
/tests/functional/__init__.py
|
42627c2b683bdfac15c5f44281d6095451a29410
|
[
"CC-BY-3.0",
"MIT"
] |
permissive
|
econchick/interrogate
|
ff4e04e9a4a677a8dd694599e39705f80a5c3ad9
|
1e74611fc5296b0572b6bb11b480d43242c4ec49
|
refs/heads/master
| 2023-04-11T11:54:42.297583
| 2022-07-29T16:12:10
| 2022-07-29T16:12:10
| 258,385,030
| 497
| 49
|
MIT
| 2023-09-10T13:43:00
| 2020-04-24T02:33:25
|
Python
|
UTF-8
|
Python
| false
| false
| 87
|
py
|
__init__.py
|
# Copyright 2020 Lynn Root
"""Yay functional tests for the ``interrogate`` package!"""
|
ae6ecff06fb52e431c255da80fc907d47d21e46c
|
35b6013c1943f37d1428afd2663c8aba0a02628d
|
/compute/client_library/snippets/tests/test_templates.py
|
8a4321e77733f9cfe3feb43a7edeb7a2c07d6a0e
|
[
"Apache-2.0"
] |
permissive
|
GoogleCloudPlatform/python-docs-samples
|
d2a251805fbeab15d76ed995cf200727f63f887d
|
44e819e713c3885e38c99c16dc73b7d7478acfe8
|
refs/heads/main
| 2023-08-28T12:52:01.712293
| 2023-08-28T11:18:28
| 2023-08-28T11:18:28
| 35,065,876
| 7,035
| 7,593
|
Apache-2.0
| 2023-09-14T20:20:56
| 2015-05-04T23:26:13
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 3,594
|
py
|
test_templates.py
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import uuid
import google.auth
import pytest
# Turning off F401 check because flake8 doesn't recognize using
# PyTest fixture as parameter as usage.
from .test_instance_start_stop import compute_instance # noqa: F401
from ..instance_templates.create import create_template
from ..instance_templates.create_from_instance import create_template_from_instance
from ..instance_templates.create_with_subnet import create_template_with_subnet
from ..instance_templates.delete import delete_instance_template
from ..instance_templates.list import list_instance_templates
PROJECT = google.auth.default()[1]
INSTANCE_ZONE = "europe-west2-b"
@pytest.fixture
def deletable_template_name():
template_name = "i" + uuid.uuid4().hex[:10]
yield template_name
delete_instance_template(PROJECT, template_name)
@pytest.fixture
def template_to_be_deleted():
template_name = "i" + uuid.uuid4().hex[:10]
template = create_template(PROJECT, template_name)
yield template
def test_create_template_and_list(deletable_template_name):
template = create_template(PROJECT, deletable_template_name)
assert template.name == deletable_template_name
assert any(
template.name == deletable_template_name
for template in list_instance_templates(PROJECT)
)
assert template.properties.disks[0].initialize_params.disk_size_gb == 250
assert "debian-11" in template.properties.disks[0].initialize_params.source_image
assert template.properties.network_interfaces[0].name == "global/networks/default"
assert template.properties.machine_type == "e2-standard-4"
def test_create_from_instance(compute_instance, deletable_template_name): # noqa: F811
template = create_template_from_instance(
PROJECT, compute_instance.self_link, deletable_template_name
)
assert template.name == deletable_template_name
assert template.properties.machine_type in compute_instance.machine_type
assert (
template.properties.disks[0].disk_size_gb
== compute_instance.disks[0].disk_size_gb
)
assert (
template.properties.disks[0].initialize_params.source_image
== "projects/rocky-linux-cloud/global/images/family/rocky-linux-8"
)
def test_create_template_with_subnet(deletable_template_name):
template = create_template_with_subnet(
PROJECT,
"global/networks/default",
"regions/asia-east1/subnetworks/default",
deletable_template_name,
)
assert template.name == deletable_template_name
assert (
"global/networks/default" in template.properties.network_interfaces[0].network
)
assert (
"regions/asia-east1/subnetworks/default"
in template.properties.network_interfaces[0].subnetwork
)
def test_delete_template(template_to_be_deleted):
delete_instance_template(PROJECT, template_to_be_deleted.name)
assert all(
template.name != template_to_be_deleted.name
for template in list_instance_templates(PROJECT)
)
|
8e6570d6980e926295090b75f4456f07f19c3b53
|
50927fa2c786a18436526345e4aca1490aa031dc
|
/core/src/main/python/wlsdeploy/util/variables.py
|
0ef651a36218ec46a2bbd87976eaa338b376ac0a
|
[
"UPL-1.0",
"LicenseRef-scancode-other-copyleft",
"MIT",
"GPL-2.0-only",
"Classpath-exception-2.0",
"Apache-2.0",
"CDDL-1.1"
] |
permissive
|
oracle/weblogic-deploy-tooling
|
c3646c297ac482fed921fb599182d557cf77d532
|
9fd74ae578a5b1353662facb0405e5672ecc5191
|
refs/heads/main
| 2023-09-01T08:40:12.305524
| 2023-08-26T13:26:37
| 2023-08-26T13:26:37
| 120,652,037
| 148
| 108
|
UPL-1.0
| 2023-09-14T21:03:06
| 2018-02-07T18:08:30
|
Python
|
UTF-8
|
Python
| false
| false
| 23,766
|
py
|
variables.py
|
"""
Copyright (c) 2017, 2023, Oracle Corporation and/or its affiliates.
Licensed under the Universal Permissive License v1.0 as shown at https://oss.oracle.com/licenses/upl.
"""
import os
import re
from java.lang import Boolean
from java.lang import System
from java.io import BufferedReader
from java.io import File
from java.io import FileOutputStream
from java.io import FileReader
from java.io import PrintWriter
from java.io import IOException
from oracle.weblogic.deploy.util import PyOrderedDict as OrderedDict
from wlsdeploy.util import path_utils
from wlsdeploy.util import string_utils
import wlsdeploy.util.unicode_helper as str_helper
from wlsdeploy.exception import exception_helper
from wlsdeploy.logging import platform_logger
from wlsdeploy.util import dictionary_utils
from wlsdeploy.util import env_helper
from wlsdeploy.util.cla_utils import CommandLineArgUtil
_class_name = "variables"
_logger = platform_logger.PlatformLogger('wlsdeploy.variables')
_file_variable_pattern = re.compile("(@@FILE:([\w.\\\/:-]+)@@)")
_property_pattern = re.compile("(@@PROP:([\\w.-]+)@@)")
_environment_pattern = re.compile("(@@ENV:([\\w.-]+)@@)")
_secret_pattern = re.compile("(@@SECRET:([\\w.-]+):([\\w.-]+)@@)")
_file_nested_variable_pattern = re.compile("(@@FILE:(@@[\w]+@@[\w.\\\/:-]+)@@)")
# these match a string containing ONLY a token
_property_string_pattern = re.compile("^(@@PROP:([\\w.-]+)@@)$")
_secret_string_pattern = re.compile("^(@@SECRET:([\\w.-]+):([\\w.-]+)@@)$")
# if this pattern is found, token substitution was incomplete
_unresolved_token_pattern = re.compile("(@@(PROP|FILE|ENV|SECRET):)")
_secret_dirs_variable = "WDT_MODEL_SECRETS_DIRS"
_secret_dir_pairs_variable = "WDT_MODEL_SECRETS_NAME_DIR_PAIRS"
_secret_token_map = None
def load_variables(file_path, allow_multiple_files=False):
"""
Load a dictionary of variables from the specified file(s).
:param file_path: the file from which to load properties
:param allow_multiple_files: if True, allow a comma-separated list of variable files
:return the dictionary of variables
:raises VariableException if an I/O error occurs while loading the variables from the file
"""
method_name = "load_variables"
if allow_multiple_files:
paths = file_path.split(CommandLineArgUtil.MODEL_FILES_SEPARATOR)
else:
paths = [file_path]
variable_map = {}
for path in paths:
try:
variable_map.update(string_utils.load_properties(path))
except IOException, ioe:
ex = exception_helper.create_variable_exception('WLSDPLY-01730', path, ioe.getLocalizedMessage(),
error=ioe)
_logger.throwing(ex, class_name=_class_name, method_name=method_name)
raise ex
return variable_map
def write_variables(program_name, variable_map, file_path, append=False):
"""
Write variables to file while preserving order of the variables.
:param program_name: name of the calling program
:param variable_map: map or variable properties to write to file
:param file_path: the file to which to write the properties
:param append: defaults to False. Append properties to the end of file
:raises VariableException if an error occurs while storing the variables in the file
"""
_method_name = 'write_variables'
_logger.entering(program_name, file_path, append, class_name=_class_name, method_name=_method_name)
pw = None
try:
pw = PrintWriter(FileOutputStream(File(file_path), Boolean(append)), Boolean('true'))
for key, value in variable_map.iteritems():
formatted = '%s=%s' % (key, value)
pw.println(formatted)
pw.close()
except IOException, ioe:
_logger.fine('WLSDPLY-20007', program_name, file_path, ioe.getLocalizedMessage())
ex = exception_helper.create_variable_exception('WLSDPLY-20007', program_name, file_path,
ioe.getLocalizedMessage(), error=ioe)
_logger.throwing(ex, class_name=_class_name, method_name=_method_name)
if pw is not None:
pw.close()
raise ex
_logger.exiting(class_name=_class_name, method_name=_method_name)
def write_sorted_variables(program_name, variable_map, file_path, append=False):
"""
Write the dictionary of variables to the specified file, in alphabetical order by key.
:param program_name: name of tool that invoked the method which will be written to the variable properties file
:param variable_map: the dictionary of variables
:param file_path: the file to which to write the properties
:param append: defaults to False. Append properties to the end of file
:raises VariableException if an error occurs while storing the variables in the file
"""
_method_name = 'write_sorted_variables'
_logger.entering(program_name, file_path, append, class_name=_class_name, method_name=_method_name)
sorted_keys = variable_map.keys()
sorted_keys.sort()
sorted_map = OrderedDict()
for key in sorted_keys:
sorted_map[key] = variable_map[key]
write_variables(program_name, sorted_map, file_path, append)
_logger.exiting(class_name=_class_name, method_name=_method_name)
def get_default_variable_file_name(model_context):
"""
Generate location and file name for the variable file.
If model file is present, use the model file name and location;
else, use the archive file name and location.
:param model_context: contains the model and archive file arguments
:return: location and file name of variable properties file.
"""
_method_name = 'get_default_variable_file_name'
if model_context.get_target() is not None:
default_variable_file = os.path.join(model_context.get_output_dir(), model_context.get_target() +
"_variable.properties")
else:
extract_file_name = model_context.get_model_file()
if not extract_file_name:
extract_file_name = model_context.get_archive_file_name()
default_variable_file = path_utils.get_filename_no_ext_from_path(extract_file_name)
if default_variable_file:
default_variable_file = os.path.join(path_utils.get_pathname_from_path(extract_file_name),
default_variable_file + '.properties')
if default_variable_file:
_logger.finer('WLSDPLY-01736', default_variable_file, class_name=_class_name, method_name=_method_name)
return default_variable_file
def get_variable_names(text):
"""
Get the list of variable names in the supplied text.
:param text: the text to search for variables
:return: a list of variable names
"""
names = []
if '@@' in text:
matches = _property_pattern.findall(text)
for token, key in matches:
names.append(key)
return names
def substitute_value(text, variables, model_context):
"""
Perform token substitutions on a single text value.
If errors occur during substitution, throw a single VariableException.
:param text: the original text
:param variables: a dictionary of variables for substitution
:param model_context: used to resolve variables in file paths
"""
method_name = 'substitute_value'
error_info = {'errorCount': 0}
result = _substitute(text, variables, model_context, error_info)
error_count = error_info['errorCount']
if error_count:
ex = exception_helper.create_variable_exception("WLSDPLY-01740", error_count)
_logger.throwing(ex, class_name=_class_name, method_name=method_name)
raise ex
return result
def substitute(dictionary, variables, model_context):
"""
Substitute fields in the specified dictionary with variable values.
If errors occur during substitution, throw a single VariableException.
:param dictionary: the dictionary in which to substitute variables
:param variables: a dictionary of variables for substitution
:param model_context: used to resolve variables in file paths
"""
method_name = '_substitute'
error_info = {'errorCount': 0}
_process_node(dictionary, variables, model_context, error_info)
error_count = error_info['errorCount']
if error_count:
ex = exception_helper.create_variable_exception("WLSDPLY-01740", error_count)
_logger.throwing(ex, class_name=_class_name, method_name=method_name)
raise ex
def _process_node(nodes, variables, model_context, error_info):
"""
Process variables in the node.
:param nodes: the dictionary to process
:param variables: the variables to use
:param model_context: used to resolve variables in file paths
:param error_info: collects information about errors encountered
"""
# iterate over copy to avoid concurrent change for add/delete
if isinstance(nodes, OrderedDict):
nodes_iterator = OrderedDict(nodes)
else:
nodes_iterator = dict(nodes)
for key in nodes_iterator:
value = nodes[key]
# if the key changes with substitution, remove old key and map value to new key
new_key = _substitute(key, variables, model_context, error_info)
if new_key is not key:
del nodes[key]
nodes[new_key] = value
if isinstance(value, dict):
_process_node(value, variables, model_context, error_info)
elif isinstance(value, list):
for member in value:
if type(member) in [str, unicode]:
index = value.index(member)
value[index] = _substitute(member, variables, model_context, error_info, key)
elif type(value) in [str, unicode]:
nodes[key] = _substitute(value, variables, model_context, error_info, key)
def _substitute(text, variables, model_context, error_info, attribute_name=None):
"""
Substitute token placeholders with their derived values.
:param text: the text to process for token placeholders
:param variables: the variables to use
:param model_context: used to determine the validation method (strict, lax, etc.)
:param error_info: collects information about errors encountered
:return: the replaced text
"""
method_name = '_substitute'
validation_config = model_context.get_validate_configuration()
problem_found = False
# skip lookups for text with no @@
if '@@' in text:
# do properties first, to cover the case @@FILE:/dir/@@PROP:name@@.txt@@
matches = _property_pattern.findall(text)
for token, key in matches:
# log, or throw an exception if key is not found.
if key not in variables:
allow_unresolved = validation_config.allow_unresolved_variable_tokens()
if model_context.get_variable_file() is not None:
_report_token_issue('WLSDPLY-01732', method_name, allow_unresolved, key)
else:
_report_token_issue('WLSDPLY-01734', method_name, allow_unresolved, key)
_increment_error_count(error_info, allow_unresolved)
problem_found = True
continue
value = variables[key]
text = text.replace(token, value)
# check environment variables before @@FILE:/dir/@@ENV:name@@.txt@@
matches = _environment_pattern.findall(text)
for token, key in matches:
#
# On Windows, environment variables are not case sensitive. On Windows 11 anyway,
# setting an environment variable using a name with lower-case letters will always
# result in an environment variable name in all upper-case.
#
env_var_name = str_helper.to_string(key)
is_windows = System.getProperty('os.name').startswith('Windows')
if is_windows and not env_helper.has_env(env_var_name) and env_var_name.has_env(env_var_name.upper()):
env_var_name = env_var_name.upper()
if not env_helper.has_env(env_var_name):
allow_unresolved = validation_config.allow_unresolved_environment_tokens()
_report_token_issue('WLSDPLY-01737', method_name, allow_unresolved, key)
_increment_error_count(error_info, allow_unresolved)
problem_found = True
continue
value = env_helper.getenv(env_var_name)
text = text.replace(token, value)
# check secret variables before @@FILE:/dir/@@SECRET:name:key@@.txt@@
matches = _secret_pattern.findall(text)
for token, name, key in matches:
value = _resolve_secret_token(name, key, model_context)
if value is None:
# does not match, only report for non target case
allow_unresolved = validation_config.allow_unresolved_environment_tokens()
secret_token = name + ':' + key
known_tokens = _list_known_secret_tokens()
_report_token_issue('WLSDPLY-01739', method_name, allow_unresolved, secret_token, known_tokens)
_increment_error_count(error_info, allow_unresolved)
problem_found = True
continue
text = text.replace(token, value)
matches = _file_variable_pattern.findall(text)
for token, path in matches:
allow_unresolved = validation_config.allow_unresolved_file_tokens()
value = _read_value_from_file(path, allow_unresolved)
if value is None:
_increment_error_count(error_info, allow_unresolved)
problem_found = True
continue
text = text.replace(token, value)
# special case for @@FILE:@@ORACLE_HOME@@/dir/name.txt@@
matches = _file_nested_variable_pattern.findall(text)
for token, path in matches:
path = model_context.replace_token_string(path)
allow_unresolved = validation_config.allow_unresolved_file_tokens()
value = _read_value_from_file(path, allow_unresolved)
if value is None:
_increment_error_count(error_info, allow_unresolved)
problem_found = True
continue
text = text.replace(token, value)
# if any @@TOKEN: remains in the value, log an error.
# if previous problems were found, don't perform this check.
matches = _unresolved_token_pattern.findall(text)
if matches and not problem_found:
match = matches[0]
token = match[1]
sample = "@@" + token + ":<name>"
if token == "SECRET":
sample += ":<key>"
sample += "@@"
# always log SEVERE, these are syntax errors in the value
allow_unresolved = False
if attribute_name is None:
_report_token_issue("WLSDPLY-01745", method_name, allow_unresolved, text, sample)
else:
_report_token_issue("WLSDPLY-01746", method_name, allow_unresolved, attribute_name, text, sample)
_increment_error_count(error_info, allow_unresolved)
return text
def _increment_error_count(error_info, allow_unresolved):
if not allow_unresolved:
error_info['errorCount'] = error_info['errorCount'] + 1
def _read_value_from_file(file_path, allow_unresolved):
"""
Read a single text value from the first line in the specified file.
:param file_path: the file from which to read the value
:param allow_unresolved: if True, log INFO instead of SEVERE for lookup failures
:return: the text value
:raises BundleAwareException if an error occurs while reading the value
"""
method_name = '_read_value_from_file'
try:
file_reader = BufferedReader(FileReader(file_path))
line = file_reader.readLine()
file_reader.close()
except IOException, e:
_report_token_issue('WLSDPLY-01733', method_name, allow_unresolved, file_path, e.getLocalizedMessage())
return None
if line is None:
line = ''
return str_helper.to_string(line).strip()
def _resolve_secret_token(name, key, model_context):
"""
Return the value associated with the specified secret name and key.
If the name and key are found in the directory map, return the associated value.
:param name: the name of the secret (a directory name or mapped name)
:param key: the name of the file containing the secret
:param model_context: used to determine the validation method (strict, lax, etc.)
:return: the secret value, or None if it is not found
"""
global _secret_token_map
if _secret_token_map is None:
_init_secret_token_map(model_context)
secret_token = name + ':' + key
return dictionary_utils.get_element(_secret_token_map, secret_token)
def _init_secret_token_map(model_context):
"""
Initialize a global map of name/value tokens to secret values.
The map includes secrets found below the directories specified in WDT_MODEL_SECRETS_DIRS,
and in WDT_MODEL_SECRETS_NAME_DIR_PAIRS assignments.
:param model_context: used to determine the validation method (strict, lax, etc.)
"""
method_name = '_init_secret_token_map'
global _secret_token_map
log_method = _logger.warning
if model_context.get_validate_configuration().allow_unresolved_secret_tokens():
log_method = _logger.info
_secret_token_map = dict()
# add name/key pairs for files in sub-directories of directories in WDT_MODEL_SECRETS_DIRS.
locations = env_helper.getenv(str_helper.to_string(_secret_dirs_variable))
if locations is not None:
for secret_dir in locations.split(","):
if not os.path.isdir(secret_dir):
# log at WARN or INFO, but no exception is thrown
log_method('WLSDPLY-01738', _secret_dirs_variable, secret_dir, class_name=_class_name,
method_name=method_name)
continue
for subdir_name in os.listdir(secret_dir):
subdir_path = os.path.join(secret_dir, subdir_name)
if os.path.isdir(subdir_path):
_add_file_secrets_to_map(subdir_path, subdir_name, model_context)
# add name/key pairs for files in directories assigned in WDT_MODEL_SECRETS_NAME_DIR_PAIRS.
# these pairs will override if they were previously added as sub-directory pairs.
dir_pairs_text = env_helper.getenv(str_helper.to_string(_secret_dir_pairs_variable))
if dir_pairs_text is not None:
dir_pairs = dir_pairs_text.split(',')
for dir_pair in dir_pairs:
result = dir_pair.split('=')
if len(result) != 2:
log_method('WLSDPLY-01735', _secret_dir_pairs_variable, dir_pair, class_name=_class_name,
method_name=method_name)
continue
secret_dir = result[1]
if not os.path.isdir(secret_dir):
log_method('WLSDPLY-01738', _secret_dir_pairs_variable, secret_dir, class_name=_class_name,
method_name=method_name)
continue
name = result[0]
_add_file_secrets_to_map(secret_dir, name, model_context)
def _clear_secret_token_map():
"""
Used by unit tests to force reload of map.
"""
global _secret_token_map
_secret_token_map = None
def _add_file_secrets_to_map(dir, name, model_context):
"""
Add the secret from each file in the specified directory to the map.
:param dir: the directory to be examined
:param name: the name to be used in the map token
:param model_context: used to determine the validation method (strict, lax, etc.)
"""
global _secret_token_map
for file_name in os.listdir(dir):
file_path = os.path.join(dir, file_name)
if os.path.isfile(file_path):
token = name + ":" + file_name
allow_unresolved = model_context.get_validate_configuration().allow_unresolved_secret_tokens()
_secret_token_map[token] = _read_value_from_file(file_path, allow_unresolved)
def _list_known_secret_tokens():
"""
Returns a string representation of the available secret name/path tokens.
"""
global _secret_token_map
keys = list(_secret_token_map.keys())
keys.sort()
ret = ''
for key in keys:
if ret != '':
ret += ', '
ret += "'" + key + "'"
return ret
def _report_token_issue(message_key, method_name, allow_unresolved, *args):
"""
Log a message at the level corresponding to the validation method (SEVERE for strict, INFO otherwise).
The lax validation method can be used to verify the model without resolving tokens.
:param message_key: the message key to be logged and used for exceptions
:param method_name: the name of the calling method for logging
:param allow_unresolved: if True, log INFO instead of SEVERE for lookup failures
:param args: arguments for use in the message
"""
log_method = _logger.severe
if allow_unresolved:
log_method = _logger.info
log_method(message_key, class_name=_class_name, method_name=method_name, *args)
def substitute_key(text, variables):
"""
Substitute any @@PROP values in the text and return.
If the corresponding variable is not found, leave the @@PROP value in place.
:param text: the text to be evaluated
:param variables: the variable map
:return: the substituted text value
"""
if variables is not None:
matches = _property_pattern.findall(text)
for token, key in matches:
if key in variables:
value = variables[key]
text = text.replace(token, value)
matches = _environment_pattern.findall(text)
for token, key in matches:
# log, or throw an exception if key is not found.
if not env_helper.has_env(str_helper.to_string(key)):
continue
value = env_helper.getenv(str_helper.to_string(key))
text = text.replace(token, value)
return text
def has_variables(text):
"""
Determine if the specified text contains any variable references.
:param text: the text to be evaluated
:return: True if the text contains variable references, False otherwise
"""
matches = _property_pattern.findall(text)
return len(matches) > 0
def get_variable_matches(text):
"""
Return a list containing a tuple for each property key in the specified text.
Each tuple contains the full expression (@@PROP:<key>@@) and just the key (<key>).
:param text: the text to be evaluated
:return: a list of tuples
"""
return _property_pattern.findall(text)
def is_variable_string(value):
"""
Return True if the value contains ONLY a variable token.
"""
if not isinstance(value, basestring):
return False
return bool(_property_string_pattern.match(value))
def get_variable_string_key(value):
"""
Return the variable key if the value contains ONLY a variable token.
"""
if not isinstance(value, basestring):
return None
matches = _property_string_pattern.findall(value)
if len(matches) > 0:
return matches[0][1]
return None
def is_secret_string(value):
"""
Return True if the value contains ONLY a secret token.
"""
if not isinstance(value, basestring):
return False
return bool(_secret_string_pattern.match(value))
|
8b3f9876da1d6e5653402ea9199b11e29134b1af
|
6146e33102797407ede06ce2daa56c28fdfa2812
|
/python/GafferImageTest/FormatQueryTest.py
|
074ea7710c6546fd58fdc5728d959765dcc532c4
|
[
"BSD-3-Clause"
] |
permissive
|
GafferHQ/gaffer
|
e1eb78ba8682bfbb7b17586d6e7b47988c3b7d64
|
59cab96598c59b90bee6d3fc1806492a5c03b4f1
|
refs/heads/main
| 2023-09-01T17:36:45.227956
| 2023-08-30T09:10:56
| 2023-08-30T09:10:56
| 9,043,124
| 707
| 144
|
BSD-3-Clause
| 2023-09-14T09:05:37
| 2013-03-27T00:04:53
|
Python
|
UTF-8
|
Python
| false
| false
| 7,865
|
py
|
FormatQueryTest.py
|
##########################################################################
#
# Copyright (c) 2021, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import imath
import os
import random
import IECore
import Gaffer
import GafferTest
import GafferImage
import GafferImageTest
class FormatQueryTest( GafferImageTest.ImageTestCase ) :
def test( self ) :
constantSource = GafferImage.Constant()
constantSource["color"].setValue( imath.Color4f( 0.1, 0.2, 0.3, 0.4 ) )
formatQuery = GafferImage.FormatQuery()
formatQuery["image"].setInput( constantSource["out"] )
constantDest = GafferImage.Constant()
constantDest["color"].setValue( imath.Color4f( 0.1, 0.2, 0.3, 0.4 ) )
constantDest["format"].setInput( formatQuery["format"] )
random.seed( 42 )
for f in [
GafferImage.Format( imath.Box2i( imath.V2i( 0 ), imath.V2i( 511 ) ), 1 ),
GafferImage.Format( imath.Box2i( imath.V2i( 0 ), imath.V2i( 217, 716) ), 2 ),
GafferImage.Format( imath.Box2i( imath.V2i( 0 ), imath.V2i( 3840, 2160 ) ), 1 ),
GafferImage.Format( imath.Box2i( imath.V2i( 0 ), imath.V2i( 32544, 73427 ) ), 0.5 ),
GafferImage.Format( imath.Box2i( imath.V2i( 10, 20 ), imath.V2i( 80, 90 ) ), 1.5 ),
GafferImage.Format( imath.Box2i( imath.V2i( -10, -20 ), imath.V2i( 80, 90 ) ), 1 ),
GafferImage.Format( imath.Box2i( imath.V2i( -1275, -1534 ), imath.V2i( 2422, 5475 ) ), 1 ),
GafferImage.Format( imath.Box2i( imath.V2i( -12075, -10534 ), imath.V2i( 24202, 50475 ) ), 1 ),
GafferImage.Format( imath.Box2i( imath.V2i( -120075, -100534 ), imath.V2i( 242002, 500475 ) ), 1 ),
GafferImage.Format( imath.Box2i( imath.V2i( -1200075, -1000534 ), imath.V2i( 2420002, 5000475 ) ), 1 ),
] + [
GafferImage.Format( imath.Box2i( imath.V2i( random.randrange( -500000, 0 ), random.randrange( -500000, 0 ) ), imath.V2i( random.randrange( 0, 500000 ), random.randrange( 0, 500000 ) ) ), 1 ) for i in range( 100 )
]:
constantSource["format"].setValue( f )
self.assertEqual( formatQuery["format"]["displayWindow"].getValue(), f.getDisplayWindow() )
self.assertEqual( formatQuery["format"]["pixelAspect"].getValue(), f.getPixelAspect() )
self.assertEqual( formatQuery["format"].getValue(), f )
self.assertEqual( formatQuery["size"].getValue(), f.getDisplayWindow().size() )
self.assertEqual( formatQuery["center"].getValue(), imath.V2f( ( imath.V2d( f.getDisplayWindow().min() ) + imath.V2d( f.getDisplayWindow().max() ) ) * 0.5 ) )
# Driving a Constant using FormatQuery should produce the same image
# ( but only check if it's not going to be too expensive )
if f.getDisplayWindow().size()[0] * f.getDisplayWindow().size()[1] < 2000 * 2000:
self.assertImagesEqual( constantSource["out"], constantDest["out"] )
def testView( self ) :
constantSource = GafferImage.Constant()
constantSource["color"].setValue( imath.Color4f( 0.1, 0.2, 0.3, 0.4 ) )
constantSource["format"].setValue( GafferImage.Format( imath.Box2i( imath.V2i( 0 ), imath.V2i( 512 ) ), 1 ) )
reader = GafferImage.ImageReader()
reader["fileName"].setValue( self.imagesPath() / "checkerboard.100x100.exr" )
views = GafferImage.CreateViews()
views["views"].addChild( Gaffer.NameValuePlug( "left", GafferImage.ImagePlug(), True ) )
views["views"].addChild( Gaffer.NameValuePlug( "right", GafferImage.ImagePlug(), True ) )
views["views"][0]["value"].setInput( constantSource["out"] )
views["views"][1]["value"].setInput( reader["out"] )
formatQuery = GafferImage.FormatQuery()
formatQuery["image"].setInput( views["out"] )
for contextView, queryAndResults in [
( None, [ ( "", None ), ( "left", imath.V2i( 512 ) ), ( "right", imath.V2i( 100 ) ) ] ),
( "left", [ ( "", imath.V2i( 512 ) ), ( "left", imath.V2i( 512 ) ), ( "right", imath.V2i( 100 ) ) ] ),
("right", [ ( "", imath.V2i( 100 ) ), ( "left", imath.V2i( 512 ) ), ( "right", imath.V2i( 100 ) ) ] )
]:
for queryView, result in queryAndResults:
with Gaffer.Context( Gaffer.Context.current() ) as c:
formatQuery["view"].setValue( queryView )
if contextView:
c["image:viewName"] = contextView
if result is None:
# The result when contextView and queryView are both not overridden is an error,
# views has just left and right views, so it's illegal to ask for "default".
with self.assertRaisesRegex( Gaffer.ProcessException, 'View does not exist "default"' ):
formatQuery["format"]["displayWindow"]["max"].getValue()
else:
self.assertEqual( formatQuery["format"]["displayWindow"]["max"].getValue(), result )
# When reading from an image with a default view, we get the default when requesting a view that
# doesn't exist
formatQuery["image"].setInput( reader["out"] )
formatQuery["view"].setValue( "left" )
self.assertEqual( formatQuery["format"]["displayWindow"]["max"].getValue(), imath.V2i( 100 ) )
formatQuery["view"].setValue( "right" )
self.assertEqual( formatQuery["format"]["displayWindow"]["max"].getValue(), imath.V2i( 100 ) )
def testCleanContext( self ) :
# This test checks that formatQuery removes tile origin and channel name from the context before
# pulling on the input image format.
# It does this by connecting to a Checkerboard, and computing the tiles, knowing that ContextSanitiser
# will throw if there is bad variable in the context when pulling on constantSource["format"].
# Checkerboard should actually be using a global scope for its size parameter anyway, if this is fixed,
# this test will no longer do anything. ( I think it's vaguely plausible that in the long run we might
# end up in a situation where everything that could connect to QueryFormat handles the context pruning
# itself, and it becomes unnecessary for QueryFormat to prune, but John disagrees )
constantSource = GafferImage.Constant()
constantSource["color"].setValue( imath.Color4f( 0.1, 0.2, 0.3, 0.4 ) )
formatQuery = GafferImage.FormatQuery()
formatQuery["image"].setInput( constantSource["out"] )
checkerBoard = GafferImage.Checkerboard()
checkerBoard["size"].setInput( formatQuery["size"] )
GafferImage.ImageAlgo.tiles( checkerBoard["out"] )
if __name__ == "__main__":
unittest.main()
|
caa1172ca02ae0e41d9bc1be89485bd1f68ce3e6
|
94f23b6ee6a0cb96eb3a3e2d5a0900ab31d3e807
|
/{{cookiecutter.app_name}}/{{cookiecutter.app_name}}/commands.py
|
0eb970e8c6661dd87873b896262785fbe57650fc
|
[
"MIT"
] |
permissive
|
cookiecutter-flask/cookiecutter-flask
|
e08b1cde934db58b270dc0b6ff504996e2aa0f66
|
6be0173177fbf9210c463d9eaffbf4fdaed192d6
|
refs/heads/master
| 2023-08-30T17:35:13.734425
| 2023-08-29T03:54:22
| 2023-08-29T12:13:59
| 12,153,248
| 2,072
| 412
|
MIT
| 2023-09-13T20:52:59
| 2013-08-16T07:27:55
|
Python
|
UTF-8
|
Python
| false
| false
| 1,964
|
py
|
commands.py
|
# -*- coding: utf-8 -*-
"""Click commands."""
import os
from glob import glob
from subprocess import call
import click
HERE = os.path.abspath(os.path.dirname(__file__))
PROJECT_ROOT = os.path.join(HERE, os.pardir)
TEST_PATH = os.path.join(PROJECT_ROOT, "tests")
@click.command()
@click.option(
"-c/-C",
"--coverage/--no-coverage",
default=True,
is_flag=True,
help="Show coverage report",
)
def test(coverage):
"""Run the tests."""
import pytest
args = [TEST_PATH, "--verbose"]
if coverage:
args.append("--cov={{cookiecutter.app_name}}")
rv = pytest.main(args)
exit(rv)
@click.command()
@click.option(
"-f",
"--fix-imports",
default=True,
is_flag=True,
help="Fix imports using isort, before linting",
)
@click.option(
"-c",
"--check",
default=False,
is_flag=True,
help="Don't make any changes to files, just confirm they are formatted correctly",
)
def lint(fix_imports, check):
"""Lint and check code style with black, flake8 and isort."""
skip = ["node_modules", "requirements", "migrations"]
root_files = glob("*.py")
root_directories = [
name for name in next(os.walk("."))[1] if not name.startswith(".")
]
files_and_directories = [
arg for arg in root_files + root_directories if arg not in skip
]
def execute_tool(description, *args):
"""Execute a checking tool with its arguments."""
command_line = list(args) + files_and_directories
click.echo(f"{description}: {' '.join(command_line)}")
rv = call(command_line)
if rv != 0:
exit(rv)
isort_args = []
black_args = []
if check:
isort_args.append("--check")
black_args.append("--check")
if fix_imports:
execute_tool("Fixing import order", "isort", *isort_args)
execute_tool("Formatting style", "black", *black_args)
execute_tool("Checking code style", "flake8")
|
443ab2c32ab0f9b9f6166ae709f4652c4c8fea45
|
775e5461c46b26caa24393c0d9d6c09ed5ceab85
|
/2LayerLSTM/lstmClassOld.py
|
2473dded69b675738fc712875ee294c9b243764f
|
[
"Apache-2.0"
] |
permissive
|
mike-bowles/hdDeepLearningStudy
|
d2154328b389671bdbbd8454a12e531c71248da0
|
1d4a9b2af5015b455c70952bc30a4cc3eeff963e
|
refs/heads/master
| 2023-09-01T07:22:21.259949
| 2023-08-30T03:57:28
| 2023-08-30T03:57:28
| 53,914,628
| 306
| 104
|
NOASSERTION
| 2020-10-01T12:50:43
| 2016-03-15T04:29:02
|
Python
|
UTF-8
|
Python
| false
| false
| 8,366
|
py
|
lstmClassOld.py
|
import theano
import theano.tensor as T
import numpy as np
import random
import matplotlib.pyplot as plt
import cPickle as pickle
from math import sqrt
#from lstmClass import LstmLayer, recurrent_fn
'''Define lstm class for single layer lstm
The objective is to define in such a way as to facilitate construction of multi-layer
lstm. The questions are:
1. Cost function can't go inside the class because it may only be associated with the last
layer in the stack.
2. Should the scan function be inside or outside of the class?
3. How about the gradient calculations? It seems like those need to be outside the class
does that mean that the gradient calculations have to be outside the class statement.
4. Perhaps the scan function and the single layer recurrance function need to be inside
the class statement, but the cost function goes outside.
5. Then the gradient calculation might only need to have a list of the parameters for which
the cost needs to be diff'd. That would just be the list of lstm-layer objects dotted with
the parameter list for each one.
6. Not clear how gradient of scan function may interact with python oop. Not sure if scan
output includes enough for gradient calc. Perhaps scan should be external to class structure
Plan A.
class RNN
'''
class LstmLayer(object):
def __init__(self, n_in, n_hidden, n_out, name):
self.name = name
rng = np.random.RandomState(1234)
#cell input
self.W_ug = np.asarray(rng.normal(size=(n_in, n_hidden), scale= .01, loc = 0.0), dtype = theano.config.floatX)
self.W_hg = np.asarray(rng.normal(size=(n_hidden, n_hidden), scale=.01, loc = 0.0), dtype = theano.config.floatX)
self.b_g = np.zeros((n_hidden,), dtype=theano.config.floatX)
#input gate equation
self.W_ui = np.asarray(rng.normal(size=(n_in, n_hidden), scale =.01, loc=0.0), dtype = theano.config.floatX)
self.W_hi = np.asarray(rng.normal(size=(n_hidden, n_hidden), scale =.01, loc=0.0), dtype = theano.config.floatX)
self.b_i = np.zeros((n_hidden,), dtype=theano.config.floatX)
#forget gate equations
self.W_uf = np.asarray(rng.normal(size=(n_in, n_hidden), scale =.01, loc=0.0), dtype = theano.config.floatX)
self.W_hf = np.asarray(rng.normal(size=(n_hidden, n_hidden), scale =.01, loc=0.0), dtype = theano.config.floatX)
self.b_f = np.zeros((n_hidden,), dtype=theano.config.floatX)
#cell output gate equations
self.W_uo = np.asarray(rng.normal(size=(n_in, n_hidden), scale =.01, loc=0.0), dtype = theano.config.floatX)
self.W_ho = np.asarray(rng.normal(size=(n_hidden, n_hidden), scale =.01, loc=0.0), dtype = theano.config.floatX)
self.b_o = np.zeros((n_hidden,), dtype=theano.config.floatX)
#output layer
self.W_hy = np.asarray(rng.normal(size=(n_hidden, n_out), scale =.01, loc=0.0), dtype = theano.config.floatX)
self.b_hy = np.zeros((n_out,), dtype=theano.config.floatX)
#cell input
self.W_ug = theano.shared(self.W_ug, 'W_ug' + self.name)
self.W_hg = theano.shared(self.W_hg, 'W_hg' + self.name)
self.b_g = theano.shared(self.b_g, 'b_g' + self.name)
#input gate equation
self.W_ui = theano.shared(self.W_ui, 'W_ui' + self.name)
self.W_hi = theano.shared(self.W_hi, 'W_hi' + self.name)
self.b_i = theano.shared(self.b_i, 'b_i' + self.name)
#forget gate equations
self.W_uf = theano.shared(self.W_uf, 'W_uf' + self.name)
self.W_hf = theano.shared(self.W_hf, 'W_hf' + self.name)
self.b_f = theano.shared(self.b_f, 'b_f' + self.name)
#cell output gate equations
self.W_uo = theano.shared(self.W_uo, 'W_uo' + self.name)
self.W_ho = theano.shared(self.W_ho, 'W_ho' + self.name)
self.b_o = theano.shared(self.b_o, 'b_o' + self.name)
#output layer
self.W_hy = theano.shared(self.W_hy, 'W_hy' + self.name)
self.b_hy = theano.shared(self.b_hy, 'b_hy' + self.name)
self.h0_tm1 = theano.shared(np.zeros(n_hidden, dtype=theano.config.floatX))
self.s0_tm1 = theano.shared(np.zeros(n_hidden, dtype=theano.config.floatX))
self.argList = [self.W_ug, self.W_hg, self.b_g, self.W_ui, self.W_hi,
self.b_i, self.W_uf, self.W_hf, self.b_f, self.W_uo, self.W_ho, self.b_o, self.W_hy, self.b_hy]
def recurrent_fn(u_t, h_tm1, s_tm1, W_ug, W_hg, b_g, W_ui, W_hi, b_i, W_uf, W_hf, b_f,
W_uo, W_ho, b_o, W_hy, b_hy):
g_t = T.tanh(T.dot(u_t, W_ug) + T.dot(h_tm1, W_hg) + b_g)
i_t = T.nnet.sigmoid(T.dot(u_t, W_ui) + T.dot(h_tm1, W_hi) + b_i)
f_t = T.nnet.sigmoid(T.dot(u_t, W_uf) + T.dot(h_tm1, W_hf) + b_f)
o_t = T.nnet.sigmoid(T.dot(u_t, W_uo) + T.dot(h_tm1, W_ho) + b_o)
s_t = g_t * i_t + s_tm1*f_t
h_t = T.tanh(s_t)*o_t
#h_t = self.activ(T.dot(h_tm1, W_hh) + T.dot(u_t, W_uh) + b_hh)
return [h_t, s_t]
def fcn2(u_t, h_tm1, s_tm1,h_tm12, s_tm12, W_ug, W_hg, b_g, W_ui, W_hi, b_i, W_uf, W_hf, b_f,
W_uo, W_ho, b_o, W_hy, b_hy, W_ug2, W_hg2, b_g2, W_ui2, W_hi2, b_i2, W_uf2, W_hf2, b_f2,
W_uo2, W_ho2, b_o2, W_hy2, b_hy2):
[h_t, s_t] = recurrent_fn(u_t, h_tm1, s_tm1, W_ug, W_hg, b_g, W_ui, W_hi, b_i, W_uf, W_hf, b_f,
W_uo, W_ho, b_o, W_hy, b_hy)
o1 = T.dot(h_tm1, W_hy) + b_hy
[h_t2, s_t2] = recurrent_fn(o1, h_tm12, s_tm12, W_ug2, W_hg2, b_g2, W_ui2, W_hi2, b_i2, W_uf2, W_hf2, b_f2,
W_uo2, W_ho2, b_o2, W_hy2, b_hy2)
return [h_t, s_t, h_t2, s_t2]
#use lstmLayer class to define algebra of lstm and build stack and gradient calculation
#one layer lstm stack for stock price prediction
# u = T.matrix()
# t = T.scalar()
# l1 = LstmLayer(n_in=5, n_hidden=10, n_out=1, name='l1')
#theano.printing.debugprint([h0_tm1, u, W_hh, W_uh, W_hy, b_hh, b_hy], print_type=True)
#define
# [l1.h, l1.s], _ = theano.scan(recurrent_fn, sequences = u,
# outputs_info = [l1.h0_tm1, l1.s0_tm1],
# non_sequences = l1.argList)
# y = T.dot(l1.h[-1], l1.W_hy) + l1.b_hy
# cost = ((t - y)**2).mean(axis=0).sum()
# grad = T.grad(cost, l1.argList)
# lr = T.scalar()
# update = [(a, a-lr*b) for (a,b) in zip(l1.argList, grad)]
#
# train_step = theano.function([u, t, lr], cost,
# on_unused_input='warn',
# updates=update,
# allow_input_downcast=True)
#two layer lstm stack for stock price prediction
u = T.matrix()
t = T.scalar()
o1 = T.matrix()
l1 = LstmLayer(n_in=5, n_hidden=10, n_out=10, name='l1')
l2 = LstmLayer(n_in=10, n_hidden=10, n_out=1, name='l2')
#theano.printing.debugprint([h0_tm1, u, W_hh, W_uh, W_hy, b_hh, b_hy], print_type=True)
#define
[l1.h, l1.s, l2.h, l2.s], _ = theano.scan(fcn2, sequences = u,
outputs_info = [l1.h0_tm1, l1.s0_tm1, l2.h0_tm1, l2.s0_tm1],
non_sequences = l1.argList + l2.argList)
# non_sequences = l1.argList + l2.argList, mode='DebugMode')
y = T.dot(l2.h[-1], l2.W_hy) + l2.b_hy
cost = ((t - y)**2).mean(axis=0).sum()
grad = T.grad(cost, l1.argList + l2.argList)
lr = T.scalar()
update = [(a, a-lr*b) for (a,b) in zip(l1.argList + l2.argList, grad)]
train_step = theano.function([u, t, lr], cost,
on_unused_input='warn',
updates=update,
allow_input_downcast=True)
# allow_input_downcast=True, mode='DebugMode')
if __name__ == '__main__':
(xlist, ylist) = pickle.load(open('stockTT.bin', 'rb'))
nInputs = len(xlist[0])
x = np.array(xlist, dtype = theano.config.floatX)
y = np.array(ylist, dtype = theano.config.floatX)
print "Std Dev of Price Change", np.std(y)
nHidden = 20
nOutputs = 1
lr = 0.01
eSmooth = 1.0
nPasses = 1
vals = []
errSq = []
for i in range(nPasses):
for j in range(len(x)):
u = np.asarray(xlist[j], dtype = theano.config.floatX).reshape((1,nInputs))
t = y[j]
c = train_step(u, t, lr)
if j%10==0: print "iteration {0}: {1}".format(j, np.sqrt(c))
eSmooth = 0.1*np.sqrt(c) + 0.9*eSmooth
vals.append(eSmooth)
errSq.append(c)
print 'RMS Pred Error', sqrt(np.average(errSq[500:]))
plt.plot(vals)
plt.show()
|
2cb3fee5307d16adcbb03f6767aeaea0402b506d
|
68073b5bbec051890bce2cdb0abbf1c7652002ed
|
/src/robotide/lib/robot/parsing/model.py
|
0e8f22bfbe4da23c3502e6c3212ddadd0b1ae022
|
[
"Apache-2.0"
] |
permissive
|
robotframework/RIDE
|
3b6dc9629e34b6f350e154e5f76d106fa48eaaa8
|
ed4d650dbd806672401d4341fecc30274c4972c7
|
refs/heads/master
| 2023-09-05T15:59:01.151700
| 2023-09-02T22:39:16
| 2023-09-02T22:39:16
| 2,467,257
| 897
| 419
|
Apache-2.0
| 2023-09-10T03:43:39
| 2011-09-27T11:53:40
|
Python
|
UTF-8
|
Python
| false
| false
| 39,240
|
py
|
model.py
|
# Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import copy
import re
import warnings
from robotide.lib.robot.errors import DataError
from robotide.lib.robot.variables import is_var
from robotide.lib.robot.output import LOGGER
from robotide.lib.robot.writer import DataFileWriter
from robotide.lib.robot.utils import abspath, is_string, normalize, py2to3, NormalizedDict
from .comments import Comment
from .populators import FromFilePopulator, FromDirectoryPopulator, NoTestsFound
from .settings import (Documentation, Fixture, Timeout, Tags, Metadata,
Library, Resource, Variables, Arguments, Return,
Template, MetadataList, ImportList)
re_set_var = re.compile(r"(?i)^set[ ](\S.*)+variable$")
def TestData(parent=None, source=None, include_suites=None,
warn_on_skipped='DEPRECATED', extensions=None, settings=None):
"""Parses a file or directory to a corresponding model object.
:param parent: Optional parent to be used in creation of the model object.
:param source: Path where test data is read from.
:param warn_on_skipped: Deprecated.
:param extensions: List/set of extensions to parse. If None, all files
supported by Robot Framework are parsed when searching test cases.
:returns: :class:`~.model.TestDataDirectory` if `source` is a directory,
:class:`~.model.TestCaseFile` otherwise.
"""
# TODO: Remove in RF 3.2.
if warn_on_skipped != 'DEPRECATED':
warnings.warn("Option 'warn_on_skipped' is deprecated and has no "
"effect.", DeprecationWarning)
if os.path.isdir(source):
return TestDataDirectory(parent, source, settings).populate(include_suites,
extensions)
return TestCaseFile(parent, source, settings).populate()
class _TestData(object):
_setting_table_names = 'Setting', 'Settings'
_variable_table_names = 'Variable', 'Variables'
_testcase_table_names = 'Test Case', 'Test Cases', 'Task', 'Tasks'
_keyword_table_names = 'Keyword', 'Keywords'
# remove Comments section, because we want to keep them as they are in files
_comment_table_names = 'Comment', 'Comments'
def __init__(self, parent=None, source=None):
self.parent = parent
self.source = abspath(source) if source else None
self.children = []
self._preamble = []
self._tables = dict(self._get_tables())
def _get_tables(self):
for names, table in [(self._setting_table_names, self.setting_table),
(self._variable_table_names, self.variable_table),
(self._testcase_table_names, self.testcase_table),
(self._keyword_table_names, self.keyword_table),
(self._comment_table_names, None)]:
# remove Comments section, because we want to keep them as they are in files
# , (self._comment_table_names, None)]:
for name in names:
yield name, table
def start_table(self, header_row):
table = self._find_table(header_row)
if table is None or not self._table_is_allowed(table):
return None
table.set_header(header_row)
return table
def has_preamble(self):
return len(self.preamble) > 0
def add_preamble(self, row):
self._preamble.append(row)
@property
def preamble(self):
return self._preamble
@preamble.setter
def preamble(self, row):
self.add_preamble(row)
def _find_table(self, header_row):
name = header_row[0] if header_row else ''
title = name.title()
if title not in self._tables:
title = self._resolve_deprecated_table(name)
if title is None:
self._report_unrecognized_table(name)
return None
return self._tables[title]
def _resolve_deprecated_table(self, used_name):
normalized = normalize(used_name)
for name in (self._setting_table_names + self._variable_table_names +
self._testcase_table_names + self._keyword_table_names):
# remove Comments section, because we want to keep them as they are in files
# + self._comment_table_names):
if normalize(name) == normalized:
self._report_deprecated_table(used_name, name)
return name
return None
def _report_deprecated_table(self, deprecated, name):
self.report_invalid_syntax(
"Section name '%s' is deprecated. Use '%s' instead."
% (deprecated, name), level='WARN'
)
def _report_unrecognized_table(self, name):
self.report_invalid_syntax(
"Unrecognized table header '%s'. Available headers for data: "
"'Setting(s)', 'Variable(s)', 'Test Case(s)', 'Task(s)' and "
"'Keyword(s)'. Use 'Comment(s)' to embedded additional data."
% name
)
def _table_is_allowed(self, table):
return True
@property
def name(self):
return self._format_name(self._get_basename()) if self.source else None
@property
def rawname(self):
return self._get_basename() if self.source else None
# To be used on resource prefixed suggestions
def _get_basename(self):
return os.path.splitext(os.path.basename(self.source))[0]
def _format_name(self, name):
name = self._strip_possible_prefix_from_name(name)
name = name.replace('_', ' ').strip()
return name.title() if name.islower() else name
def _strip_possible_prefix_from_name(self, name):
return name.split('__', 1)[-1]
@property
def keywords(self):
return self.keyword_table.keywords
@property
def imports(self):
return self.setting_table.imports
def report_invalid_syntax(self, message, level='ERROR'):
initfile = getattr(self, 'initfile', None)
path = os.path.join(self.source, initfile) if initfile else self.source
LOGGER.write("Error in file '%s': %s" % (path, message), level)
def save(self, **options):
"""Writes this datafile to disk.
:param options: Configuration for writing. These are passed to
:py:class:`~robot.writer.datafilewriter.WritingContext` as
keyword arguments.
See also :py:class:`robot.writer.datafilewriter.DataFileWriter`
"""
return DataFileWriter(**options).write(self)
@py2to3
class TestCaseFile(_TestData):
"""The parsed test case file object.
:param parent: parent object to be used in creation of the model object.
:param source: path where test data is read from.
"""
__test__ = False
def __init__(self, parent=None, source=None, settings=None):
self.directory = os.path.dirname(source) if source else None
self.setting_table = TestCaseFileSettingTable(self)
self.variable_table = VariableTable(self)
self.testcase_table = TestCaseTable(self)
self.keyword_table = KeywordTable(self)
self._settings = settings
self._tab_size = self._settings.get('txt number of spaces', 2) if self._settings else 2
_TestData.__init__(self, parent, source)
def populate(self):
FromFilePopulator(self, self._tab_size).populate(self.source)
self._validate()
return self
def _validate(self):
if not self.testcase_table.is_started():
# print(f"DEBUG: Model TestCaseFile _validate this is where there are no tests")
raise NoTestsFound('File has no tests or tasks.')
def has_tests(self):
return True
def __iter__(self):
for table in [self.setting_table, self.variable_table,
self.testcase_table, self.keyword_table]:
yield table
def __nonzero__(self):
return any(table for table in self)
class ResourceFile(_TestData):
"""The parsed resource file object.
:param source: path where resource file is read from.
"""
def __init__(self, source=None, settings=None):
self.directory = os.path.dirname(source) if source else None
self.setting_table = ResourceFileSettingTable(self)
self.variable_table = VariableTable(self)
self.testcase_table = TestCaseTable(self)
self.keyword_table = KeywordTable(self)
self.settings = settings
self._tab_size = self.settings.get('txt number of spaces', 2) if self.settings else 2
_TestData.__init__(self, source=source)
def populate(self):
FromFilePopulator(self, self._tab_size).populate(self.source, resource=True)
self._report_status()
return self
def _report_status(self):
if self.setting_table or self.variable_table or self.keyword_table:
LOGGER.info("Imported resource file '%s' (%d keywords)."
% (self.source, len(self.keyword_table.keywords)))
else:
LOGGER.warn("Imported resource file '%s' is empty." % self.source)
def _table_is_allowed(self, table):
if table is self.testcase_table:
raise DataError("Resource file '%s' cannot contain tests or "
"tasks." % self.source)
return True
def __iter__(self):
for table in [self.setting_table, self.variable_table, self.keyword_table]:
yield table
class TestDataDirectory(_TestData):
"""The parsed test data directory object. Contains hiearchical structure
of other :py:class:`.TestDataDirectory` and :py:class:`.TestCaseFile`
objects.
:param parent: parent object to be used in creation of the model object.
:param source: path where test data is read from.
"""
__test__ = False
def __init__(self, parent=None, source=None, settings=None):
self.directory = source
self.initfile = None
self.setting_table = InitFileSettingTable(self)
self.variable_table = VariableTable(self)
self.testcase_table = TestCaseTable(self)
self.keyword_table = KeywordTable(self)
self._settings = settings
self._tab_size = self._settings.get('txt number of spaces', 2) if self._settings else 2
_TestData.__init__(self, parent, source)
def populate(self, include_suites=None, extensions=None, recurse=True):
FromDirectoryPopulator().populate(self.source, self, include_suites,
extensions, recurse, self._tab_size)
self.children = [ch for ch in self.children if ch.has_tests()]
return self
def _get_basename(self):
return os.path.basename(self.source)
def _table_is_allowed(self, table):
if table is self.testcase_table:
LOGGER.error("Test suite initialization file in '%s' cannot "
"contain tests or tasks." % self.source)
return False
return True
def add_child(self, path, include_suites, extensions=None):
self.children.append(TestData(parent=self,
source=path,
include_suites=include_suites,
extensions=extensions, settings=self._settings))
def has_tests(self):
return any(ch.has_tests() for ch in self.children)
def __iter__(self):
for table in [self.setting_table, self.variable_table, self.keyword_table]:
yield table
@py2to3
class _Table(object):
def __init__(self, parent):
self.parent = parent
self._header = None
def set_header(self, header):
self._header = self._prune_old_style_headers(header)
def _prune_old_style_headers(self, header):
if len(header) < 3:
return header
if self._old_header_matcher.match(header):
return [header[0]]
return header
@property
def header(self):
return self._header or [self.type.title() + 's']
@property
def name(self):
return self.header[0]
@property
def source(self):
return self.parent.source
@property
def directory(self):
return self.parent.directory
def report_invalid_syntax(self, message, level='ERROR'):
self.parent.report_invalid_syntax(message, level)
def __nonzero__(self):
return bool(self._header or len(self))
def __len__(self):
return sum(1 for item in self)
class _WithSettings(object):
_setters = {}
_aliases = {}
def get_setter(self, name):
if name[-1:] == ':':
name = name[:-1]
# Patching for ... setting, this way we don't get a Parser Error on log
if name == '...':
name = 'Documentation'
setter = self._get_setter(name)
if setter is not None:
return setter
setter = self._get_deprecated_setter(name)
if setter is not None:
return setter
self.report_invalid_syntax("Non-existing setting '%s'." % name)
return None
def _get_setter(self, name):
title = name.title()
if title in self._aliases:
title = self._aliases[name]
if title in self._setters:
return self._setters[title](self)
return None
def _get_deprecated_setter(self, name):
normalized = normalize(name)
for setting in list(self._setters) + list(self._aliases):
if normalize(setting) == normalized:
self._report_deprecated_setting(name, setting)
return self._get_setter(setting)
return None
def _report_deprecated_setting(self, deprecated, correct):
self.report_invalid_syntax(
"Setting '%s' is deprecated. Use '%s' instead."
% (deprecated, correct), level='WARN'
)
def report_invalid_syntax(self, message, level='ERROR'):
raise NotImplementedError
class _SettingTable(_Table, _WithSettings):
type = 'setting'
def __init__(self, parent):
_Table.__init__(self, parent)
self.doc = Documentation('Documentation', self)
self.suite_setup = Fixture('Suite Setup', self)
self.suite_teardown = Fixture('Suite Teardown', self)
self.test_setup = Fixture('Test Setup', self)
self.test_teardown = Fixture('Test Teardown', self)
self.force_tags = Tags('Force Tags', self)
self.default_tags = Tags('Default Tags', self)
self.test_template = Template('Test Template', self)
self.test_timeout = Timeout('Test Timeout', self)
self.metadata = MetadataList(self)
self.imports = ImportList(self)
@property
def _old_header_matcher(self):
return OldStyleSettingAndVariableTableHeaderMatcher()
def add_metadata(self, name, value='', comment=None):
self.metadata.add(Metadata(self, name, value, comment))
return self.metadata[-1]
def add_library(self, name, args=None, comment=None):
self.imports.add(Library(self, name, args, comment=comment))
return self.imports[-1]
def add_resource(self, name, invalid_args=None, comment=None):
self.imports.add(Resource(self, name, invalid_args, comment=comment))
return self.imports[-1]
def add_variables(self, name, args=None, comment=None):
self.imports.add(Variables(self, name, args, comment=comment))
return self.imports[-1]
def __len__(self):
return sum(1 for setting in self if setting.is_set())
class TestCaseFileSettingTable(_SettingTable):
__test__ = False
_setters = {'Documentation': lambda s: s.doc.populate,
'Suite Setup': lambda s: s.suite_setup.populate,
'Suite Teardown': lambda s: s.suite_teardown.populate,
'Test Setup': lambda s: s.test_setup.populate,
'Test Teardown': lambda s: s.test_teardown.populate,
'Force Tags': lambda s: s.force_tags.populate,
'Default Tags': lambda s: s.default_tags.populate,
'Test Template': lambda s: s.test_template.populate,
'Test Timeout': lambda s: s.test_timeout.populate,
'Library': lambda s: s.imports.populate_library,
'Resource': lambda s: s.imports.populate_resource,
'Variables': lambda s: s.imports.populate_variables,
'Metadata': lambda s: s.metadata.populate}
_aliases = {'Task Setup': 'Test Setup',
'Task Teardown': 'Test Teardown',
'Task Template': 'Test Template',
'Task Timeout': 'Test Timeout'}
def __iter__(self):
for setting in [self.doc, self.suite_setup, self.suite_teardown,
self.test_setup, self.test_teardown, self.force_tags,
self.default_tags, self.test_template, self.test_timeout] \
+ self.metadata.data + self.imports.data:
yield setting
class ResourceFileSettingTable(_SettingTable):
_setters = {'Documentation': lambda s: s.doc.populate,
'Library': lambda s: s.imports.populate_library,
'Resource': lambda s: s.imports.populate_resource,
'Variables': lambda s: s.imports.populate_variables}
def __iter__(self):
for setting in [self.doc] + self.imports.data:
yield setting
class InitFileSettingTable(_SettingTable):
_setters = {'Documentation': lambda s: s.doc.populate,
'Suite Setup': lambda s: s.suite_setup.populate,
'Suite Teardown': lambda s: s.suite_teardown.populate,
'Test Setup': lambda s: s.test_setup.populate,
'Test Teardown': lambda s: s.test_teardown.populate,
'Test Timeout': lambda s: s.test_timeout.populate,
'Force Tags': lambda s: s.force_tags.populate,
'Library': lambda s: s.imports.populate_library,
'Resource': lambda s: s.imports.populate_resource,
'Variables': lambda s: s.imports.populate_variables,
'Metadata': lambda s: s.metadata.populate}
def __iter__(self):
for setting in [self.doc, self.suite_setup, self.suite_teardown,
self.test_setup, self.test_teardown, self.force_tags,
self.test_timeout] + self.metadata.data + self.imports.data:
yield setting
class VariableTable(_Table):
type = 'variable'
def __init__(self, parent):
_Table.__init__(self, parent)
self.variables = []
@property
def _old_header_matcher(self):
return OldStyleSettingAndVariableTableHeaderMatcher()
def add(self, name, value, comment=None):
self.variables.append(Variable(self, name, value, comment))
def __iter__(self):
return iter(self.variables)
class TestCaseTable(_Table):
__test__ = False
type = 'test case'
def __init__(self, parent):
_Table.__init__(self, parent)
self.tests = []
def set_header(self, header):
if self._header and header:
self._validate_mode(self._header[0], header[0])
_Table.set_header(self, header)
def _validate_mode(self, name1, name2):
tasks1 = normalize(name1) in ('task', 'tasks')
tasks2 = normalize(name2) in ('task', 'tasks')
if tasks1 is not tasks2:
raise DataError('One file cannot have both tests and tasks.')
@property
def _old_header_matcher(self):
return OldStyleTestAndKeywordTableHeaderMatcher()
def add(self, name):
self.tests.append(TestCase(self, name))
return self.tests[-1]
def __iter__(self):
return iter(self.tests)
def is_started(self):
return bool(self._header)
class KeywordTable(_Table):
type = 'keyword'
def __init__(self, parent):
_Table.__init__(self, parent)
self.keywords = []
@property
def _old_header_matcher(self):
return OldStyleTestAndKeywordTableHeaderMatcher()
def add(self, name):
self.keywords.append(UserKeyword(self, name))
return self.keywords[-1]
def __iter__(self):
return iter(self.keywords)
@py2to3
class Variable(object):
def __init__(self, parent, name, value, comment=None):
self.parent = parent
self.name = name.rstrip('= ')
if name.startswith('$') and value == []:
value = ''
if is_string(value):
value = [value]
self.value = value
self.comment = Comment(comment)
def as_list(self):
if self.has_data():
return [self.name] + self.value + self.comment.as_list()
return self.comment.as_list()
def is_set(self):
return True
def is_for_loop(self):
return False
def has_data(self):
return bool(self.name or ''.join(self.value))
def __nonzero__(self):
return self.has_data()
def report_invalid_syntax(self, message, level='ERROR'):
self.parent.report_invalid_syntax("Setting variable '%s' failed: %s"
% (self.name, message), level)
class _WithSteps(object):
def add_step(self, content, comment=None):
# print(f"DEBUG: model.py Enter _WithSteps content={content[:]} comment={comment}")
self.steps.append(Step(content, comment))
return self.steps[-1]
def copy(self, name):
new = copy.deepcopy(self)
new.name = name
self._add_to_parent(new)
return new
class TestCase(_WithSteps, _WithSettings):
__test__ = False
def __init__(self, parent, name):
self.parent = parent
self.name = name
self.doc = Documentation('[Documentation]', self)
self.template = Template('[Template]', self)
self.tags = Tags('[Tags]', self)
self.setup = Fixture('[Setup]', self)
self.teardown = Fixture('[Teardown]', self)
self.timeout = Timeout('[Timeout]', self)
self.steps = []
if name == '...':
self.report_invalid_syntax(
"Using '...' as test case name is deprecated. It will be "
"considered line continuation in Robot Framework 3.2.",
level='WARN'
)
_setters = {'Documentation': lambda s: s.doc.populate,
'Template': lambda s: s.template.populate,
'Setup': lambda s: s.setup.populate,
'Teardown': lambda s: s.teardown.populate,
'Tags': lambda s: s.tags.populate,
'Timeout': lambda s: s.timeout.populate}
@property
def source(self):
return self.parent.source
@property
def directory(self):
return self.parent.directory
def add_for_loop(self, declaration, comment=None):
self.steps.append(Step(['FOR'] + declaration, comment))
# : Model add_for_loop return steps:{self.steps[-1].as_list()} comment:{comment}")
return self.steps[-1]
def end_for_loop(self):
loop, steps = self._find_last_empty_for_and_steps_after()
if not loop:
return False
loop.steps.extend(steps)
self.steps[-len(steps):] = []
return True
def _find_last_empty_for_and_steps_after(self):
steps = []
for step in reversed(self.steps):
if isinstance(step, ForLoop):
if not step.steps:
steps.reverse()
return step, steps
break
steps.append(step)
return None, []
def report_invalid_syntax(self, message, level='ERROR'):
type_ = 'test case' if type(self) is TestCase else 'keyword'
message = "Invalid syntax in %s '%s': %s" % (type_, self.name, message)
self.parent.report_invalid_syntax(message, level)
def _add_to_parent(self, test):
self.parent.tests.append(test)
@property
def settings(self):
return [self.doc, self.tags, self.setup, self.template, self.timeout,
self.teardown]
def __iter__(self):
for element in [self.doc, self.tags, self.setup,
self.template, self.timeout] \
+ self.steps + [self.teardown]:
yield element
class UserKeyword(TestCase):
def __init__(self, parent, name):
self.parent = parent
self.name = name
self.doc = Documentation('[Documentation]', self)
self.args = Arguments('[Arguments]', self)
self.return_ = Return('[Return]', self)
self.timeout = Timeout('[Timeout]', self)
self.teardown = Fixture('[Teardown]', self)
self.tags = Tags('[Tags]', self)
self.steps = []
if name == '...':
self.report_invalid_syntax(
"Using '...' as keyword name is deprecated. It will be "
"considered line continuation in Robot Framework 3.2.",
level='WARN'
)
_setters = {'Documentation': lambda s: s.doc.populate,
'Arguments': lambda s: s.args.populate,
'Return': lambda s: s.return_.populate,
'Timeout': lambda s: s.timeout.populate,
'Teardown': lambda s: s.teardown.populate,
'Tags': lambda s: s.tags.populate}
def _add_to_parent(self, test):
self.parent.keywords.append(test)
@property
def settings(self):
return [self.args, self.doc, self.tags, self.timeout, self.teardown, self.return_]
def __iter__(self):
for element in [self.args, self.doc, self.tags, self.timeout] \
+ self.steps + [self.teardown, self.return_]:
yield element
class ForLoop(_WithSteps):
"""The parsed representation of a for-loop.
:param list declaration: The literal cell values that declare the loop
(excluding ":FOR").
:param str comment: A comment, default None.
:ivar str flavor: The value of the 'IN' item, uppercased, typically 'IN', 'IN RANGE', 'IN ZIP', or 'IN ENUMERATE'.
:ivar list vars: Variables set per-iteration by this loop.
:ivar list items: Loop values that come after the 'IN' item.
:ivar str comment: A comment, or None.
:ivar list steps: A list of steps in the loop.
"""
flavors = {'IN', 'IN RANGE', 'IN ZIP', 'IN ENUMERATE'}
normalized_flavors = NormalizedDict((f, f) for f in flavors)
inner_kw_pos = None
def __init__(self, parent, declaration, indentation=None, comment=None):
self.parent = parent
if indentation is None:
indentation = []
self.indent = indentation if isinstance(indentation, list) else [indentation]
isize = idx = 0
print(f"\nDEBUG: ForLoop init ENTER declaration={declaration[:]}")
if declaration[0] == '':
declaration.pop(0)
for idx in range(0, len(declaration)):
if declaration[idx] == '':
if idx >= 0:
isize = self.increase_indent()
else:
self.first_kw = declaration[idx]
break
self.inner_kw_pos = idx
print(f"\nDEBUG: ForLoop init indent {isize} self.inner_kw_pos={self.inner_kw_pos}\ndeclaration={declaration[:]}")
# compensation for double FOR
if declaration[self.inner_kw_pos+1] == declaration[self.inner_kw_pos] == 'FOR':
declaration.pop(self.inner_kw_pos+1)
self.flavor, index = self._get_flavor_and_index(declaration)
self.vars = declaration[self.inner_kw_pos+1:index]
self.items = declaration[index+1:]
self.comment = Comment(comment)
self.steps = []
self.args = []
def _get_flavor_and_index(self, declaration):
for index, item in enumerate(declaration):
if item in self.flavors:
return item, index
if item in self.normalized_flavors:
correct = self.normalized_flavors[item]
self._report_deprecated_flavor_syntax(item, correct)
return correct, index
if normalize(item).startswith('in'):
return item.upper(), index
return 'IN', len(declaration)
def _report_deprecated_flavor_syntax(self, deprecated, correct):
self.parent.report_invalid_syntax(
"Using '%s' as a FOR loop separator is deprecated. "
"Use '%s' instead." % (deprecated, correct), level='WARN'
)
def is_comment(self):
return False
def is_for_loop(self):
return True
def as_list(self, indent=True, include_comment=True):
_ = indent
comments = self.comment.as_list() if include_comment else []
# print(f"DEBUG: Model ForLoop as_list: indent={self.indent[:]} self.first_kw={self.first_kw}\n"
# f"{self.vars} + {self.flavor} + {self.items} + {comments}")
return self.indent + [self.first_kw] + self.vars + [self.flavor] + self.items + comments
def __iter__(self):
return iter(self.steps)
def is_set(self):
return True
def increase_indent(self):
self.indent.append('')
return len(self.indent)
def decrease_indent(self):
self.indent = self.indent[:-1] if len(self.indent) > 0 else []
return len(self.indent)
class Step(object):
inner_kw_pos = None
name = None
indent = []
assign = []
args = []
comment = []
normal_assign = None
cells = []
def __init__(self, content, comment=None):
if isinstance(content, Step):
size = len(content)
self.cells = content.cells # .as_list()
elif isinstance(content, list):
size = len(content)
self.cells = content
else:
size = len(self.cells) # size = len(self)
# cells = self.as_list()
if comment:
if isinstance(comment, list):
self.cells.extend(comment)
elif isinstance(comment, str):
self.cells.append(comment)
index = self.first_non_empty_cell(content) # Called first to set self.inner_kw_pos
self.inner_kw_pos = index
self.normal_assign = None
self.assign = self._get_assign() # self._get_assign(content)
# print(f"DEBUG: RFLib Model enter init Step: 1st cell content={content} comment={comment} index={index}"
# f" assign={self.assign} self.normal_assign={self.normal_assign}")
self.indent = []
self.args = []
self.name = None
self.comment = Comment(comment)
if index < 0: # This is redundant because index is >= 0
return
for _ in range(0, index):
self.indent.append('')
# print(f"DEBUG: RFLib Model init Step: index={index} inner_kw_pos = {self.inner_kw_pos} indent={self.indent[:]} \ncontent {content}")
self.args = content[index + 1:] if content and index <= len(content) - 1 else []
# print(f"DEBUG: RFLib Model init Step: 1st cell len(content)={len(content)} index {index} indent={self.indent[:]}") # 1st cell: {content[index]}")
# TODO: Create setters for Step.name and Step.args, see stepcontrollers.py replace_keyword
if index < len(content):
self.name = content[index] if content else None
else:
self.name = None
# if self.assign:
# print(f"DEBUG RFLib init Step: self.assign {self.assign}")
@staticmethod
def is_kind_of_comment(content):
return content.lower() in ['comment', 'builtin.comment'] or content.startswith('#')
def _get_assign(self):
assign = []
idx = 0
positional = True
cells = self.cells.copy()
if cells and cells != ['']:
index = self.inner_kw_pos # DEBUG avoiding calling self.first_non_empty_cell(content)
if index < len(cells) and is_var(cells[index].rstrip('=')):
self.normal_assign = True
if 0 <= index < len(cells) and self.is_kind_of_comment(cells[index]): # Special case for commented content
return []
# print(f"DEBUG: RFLib Model _get_assign VAR NORMAL (index={index}) inner_kw_pos={self.inner_kw_pos} content={content[:]}")
# first handle non FOR cases
idx = 0
try:
if cells[self.inner_kw_pos] != 'FOR':
while idx < len(cells):
if is_var(cells[idx].rstrip('=')):
assign.append(cells.pop(idx))
# if idx < self.inner_kw_pos:
idx -= 1
else:
break
idx += 1
# print(f"DEBUG: RFLib Model _get_assign RETURN assign={assign} size of content={len(content)}")
return assign
except IndexError:
pass
idx = index
while idx < len(cells) and positional:
if idx <= self.inner_kw_pos:
positional = True
else:
positional = False
if not positional and self.inner_kw_pos < idx <= self.inner_kw_pos + 3 < len(cells) and cells[self.inner_kw_pos] == 'FOR':
# print(f"DEBUG: RFLib Model _get_assign idx={idx} +1{self.inner_kw_pos + 1}:{idx+1} +2{self.inner_kw_pos + 2}:{idx+2}"
# f"FOR content1={content[self.inner_kw_pos + 1]}"
# f" content2={content[self.inner_kw_pos + 2]} size of content={len(content)}")
if idx + 2 < len(cells): # idx < self.inner_kw_pos + 3 and
# print(f"DEBUG: RFLib Model _get_assign FOR idx={idx} second IN ENUMERATE"
# f" content[idx + 1]={content[idx + 1]} content[idx + 2]={content[idx + 2]}")
if cells[idx + 1] == 'IN ENUMERATE' or cells[idx + 2] == 'IN ENUMERATE':
positional = True
self.normal_assign = False
# print(f"DEBUG: RFLib Model _get_assign FOR idx={idx} second IN ENUMERATE"
# f" size of content={len(content)} VALUE={content[idx]}")
if idx == self.inner_kw_pos + 1:
positional = True
self.normal_assign = False
# print(f"DEBUG: RFLib Model _get_assign FOR idx={idx} first loop var")
# else:
# positional = False
if not positional and self.inner_kw_pos < idx <= self.inner_kw_pos + 1 < len(cells) and re_set_var.match(cells[self.inner_kw_pos]):
positional = True
self.normal_assign = False
if is_var(cells[idx].rstrip('=')) and positional: # and self.normal_assign:
assign.append(cells.pop(idx))
idx -= 1 # We need to recheck var in case of IN ENUMERATE
idx += 1
# print(f"DEBUG: RFLib Model _get_assign idx={idx} size of content={len(content)}")
return assign
def is_comment(self):
return self.name.lower() == 'comment' or not (self.assign or self.name or self.args)
def is_for_loop(self):
# TODO: remove steps ForLoop: return self.name == 'FOR'
return False
def is_set(self):
return True
def as_list(self, indent=False, include_comment=True):
"""
import inspect
stack = inspect.stack()
the_class = stack[1][0].f_locals["self"].__class__.__name__
the_method = stack[1][0].f_code.co_name
print("DEBUG: RFLib Model Step called by {}.{}()".format(the_class, the_method))
"""
_ = include_comment
if indent:
return [''] + self.cells[:]
return self.cells[:]
def first_non_empty_cell(self, content=None):
_ = content
size = len(self.cells)
index = 0
while index < size and self.cells[index] == '':
index += 1
if 0 <= index < size:
return index
elif index - 1 > 0:
return index - 1
else:
return 0
def first_empty_cell(self):
index = self.inner_kw_pos
if index > 0:
return index - 1
return None
def increase_indent(self):
self.indent.append('')
self.cells.insert(0, '')
return len(self.indent)
def decrease_indent(self):
self.indent = self.indent[:-1] if len(self.indent) > 0 else []
self.cells = self.cells[1:] if len(self.cells) >= 1 and self.cells[0] == '' else self.cells
return len(self.indent)
def add_step(self, content, comment=None):
self.__init__(content, comment)
return self
def __len__(self):
kw = [self.name] if self.name is not None and self.name[0] != '#' else []
cells_len = len(self.cells)
if self.name == 'FOR':
seglen = len(self.indent) + len(kw) + len(self.args) + len(self.comment)
else:
seglen = len(self.indent) + len(self.assign) + len(kw) + len(self.args) + len(self.comment)
# Compensation for args==comment
if self.args and self.comment and self.args[-1] == self.comment.as_list()[-1]:
seglen -= 1 # len(self.args[:-1])
elif len(self.comment) > 1 and self.args == self.comment.as_list()[1:]:
seglen -= len(self.comment)
# Compensation for assign==kw
if self.assign and kw and self.assign[0] == kw[0]:
seglen -= len(self.assign) # len assign because assign may also be in args
# Compensation for kw==comment
if kw and self.comment and kw == self.comment.as_list():
seglen -= 1
return cells_len
class OldStyleSettingAndVariableTableHeaderMatcher(object):
def match(self, header):
return all(value.lower() == 'value' for value in header[1:])
class OldStyleTestAndKeywordTableHeaderMatcher(object):
def match(self, header):
if header[1].lower() != 'action':
return False
for arg in header[2:]:
if not arg.lower().startswith('arg'):
return False
return True
|
ea1113e9456cc97c6cd077d43b80452f3491bdd7
|
4e4b752c4dbecf0b0d9f7cb86f9f76bb0ffa5d32
|
/opencensus/trace/span_data.py
|
612b39f89d99c3b479c5e48af721e3624b63ffef
|
[
"Apache-2.0"
] |
permissive
|
census-instrumentation/opencensus-python
|
ab6bcf12b16677d9ca7fc93a5f96c2946d138a0c
|
3a2d8dfe1db4e0129dc691c35901a0d12127afc1
|
refs/heads/master
| 2023-09-02T13:53:19.757971
| 2023-03-16T22:10:07
| 2023-03-16T22:10:07
| 96,581,030
| 701
| 289
|
Apache-2.0
| 2023-09-14T21:14:09
| 2017-07-07T22:28:28
|
Python
|
UTF-8
|
Python
| false
| false
| 6,330
|
py
|
span_data.py
|
# Copyright 2017, OpenCensus Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
from opencensus.common import utils
from opencensus.trace import attributes
_SpanData = collections.namedtuple(
'_SpanData',
(
'name',
'context',
'span_id',
'parent_span_id',
'attributes',
'start_time',
'end_time',
'child_span_count',
'stack_trace',
'annotations',
'message_events',
'links',
'status',
'same_process_as_parent_span',
'span_kind',
),
)
class SpanData(_SpanData):
"""Immutable representation of all data collected by a
:class: `~opencensus.trace.span.Span`.
:type name: str
:param name: The name of the span.
:type: context: :class: `~opencensus.trace.span_context.SpanContext`
:param context: The SpanContext of the Span
:type span_id: int
:param span_id: Identifier for the span, unique within a trace.
:type parent_span_id: int
:param parent_span_id: (Optional) Parent span id.
:type attributes: dict
:param attributes: Collection of attributes associated with the span.
:type start_time: str
:param start_time: (Optional) Start of the time interval (inclusive)
during which the trace data was collected from the
application.
:type end_time: str
:param end_time: (Optional) End of the time interval (inclusive) during
which the trace data was collected from the application.
:type child_span_count: int
:param child_span_count: the number of child spans that were
generated while the span was active.
:type stack_trace: :class: `~opencensus.trace.stack_trace.StackTrace`
:param stack_trace: (Optional) A call stack appearing in a trace
:type annotations: list(:class:`opencensus.trace.time_event.Annotation`)
:param annotations: (Optional) The list of span annotations.
:type message_events:
list(:class:`opencensus.trace.time_event.MessageEvent`)
:param message_events: (Optional) The list of span message events.
:type links: list
:param links: (Optional) Links associated with the span. You can have up
to 128 links per Span.
:type status: :class: `~opencensus.trace.status.Status`
:param status: (Optional) An optional final status for this span.
:type same_process_as_parent_span: bool
:param same_process_as_parent_span: (Optional) A highly recommended but not
required flag that identifies when a
trace crosses a process boundary.
True when the parent_span belongs to
the same process as the current span.
:type span_kind: int
:param span_kind: (Optional) Highly recommended flag that denotes the type
of span (valid values defined by :class:
`opencensus.trace.span.SpanKind`)
"""
__slots__ = ()
def _format_legacy_span_json(span_data):
"""
:param SpanData span_data: SpanData object to convert
:rtype: dict
:return: Dictionary representing the Span
"""
span_json = {
'displayName': utils.get_truncatable_str(span_data.name),
'spanId': span_data.span_id,
'startTime': span_data.start_time,
'endTime': span_data.end_time,
'childSpanCount': span_data.child_span_count,
'kind': span_data.span_kind
}
if span_data.parent_span_id is not None:
span_json['parentSpanId'] = span_data.parent_span_id
if span_data.attributes:
span_json['attributes'] = attributes.Attributes(
span_data.attributes).format_attributes_json()
if span_data.stack_trace is not None:
span_json['stackTrace'] = \
span_data.stack_trace.format_stack_trace_json()
formatted_time_events = []
if span_data.annotations:
formatted_time_events.extend(
{'time': aa.timestamp,
'annotation': aa.format_annotation_json()}
for aa in span_data.annotations)
if span_data.message_events:
formatted_time_events.extend(
{'time': aa.timestamp,
'message_event': aa.format_message_event_json()}
for aa in span_data.message_events)
if formatted_time_events:
span_json['timeEvents'] = {
'timeEvent': formatted_time_events
}
if span_data.links:
span_json['links'] = {
'link': [
link.format_link_json() for link in span_data.links]
}
if span_data.status is not None:
span_json['status'] = span_data.status.format_status_json()
if span_data.same_process_as_parent_span is not None:
span_json['sameProcessAsParentSpan'] = \
span_data.same_process_as_parent_span
return span_json
def format_legacy_trace_json(span_datas):
"""Formats a list of SpanData tuples into the legacy 'trace' dictionary
format for backwards compatibility
:type span_datas: list of :class:
`~opencensus.trace.span_data.SpanData`
:param list of opencensus.trace.span_data.SpanData span_datas:
SpanData tuples to emit
:rtype: dict
:return: Legacy 'trace' dictionary representing given SpanData tuples
"""
if not span_datas:
return {}
top_span = span_datas[0]
assert isinstance(top_span, SpanData)
trace_id = top_span.context.trace_id if top_span.context is not None \
else None
assert trace_id is not None
return {
'traceId': trace_id,
'spans': [_format_legacy_span_json(sd) for sd in span_datas],
}
|
02d64f204f16bb2432cdf41cf394e90325046df4
|
9882a8d98429fe0f227b062b0e89da9b881e902c
|
/datasets/casia.py
|
beca7883ab334a55188d7d6efdd93d4c7c0a38d3
|
[
"Apache-2.0"
] |
permissive
|
grib0ed0v/face_recognition.pytorch
|
87306a5b8c7ded2bf61ddaf2166bb868be8e72cc
|
05cb9b30e8220445fcb27988926d88f330091c12
|
refs/heads/develop
| 2020-04-26T04:11:51.213924
| 2019-04-10T11:04:21
| 2019-04-10T11:04:21
| 173,293,569
| 170
| 23
|
Apache-2.0
| 2019-04-10T11:04:22
| 2019-03-01T11:49:14
|
Python
|
UTF-8
|
Python
| false
| false
| 2,348
|
py
|
casia.py
|
"""
Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os.path as osp
from tqdm import tqdm
from torch.utils.data import Dataset
import cv2 as cv
from utils.face_align import FivePointsAligner
class CASIA(Dataset):
"""CASIA Dataset compatible with PyTorch DataLoader."""
def __init__(self, images_root_path, image_list_path, transform, use_landmarks=True):
self.image_list_path = image_list_path
self.images_root_path = images_root_path
self.identities = {}
self.use_landmarks = use_landmarks
self.samples_info = self._read_samples_info()
self.transform = transform
def _read_samples_info(self):
"""Reads annotation of the dataset"""
samples = []
with open(self.image_list_path, 'r') as f:
for line in tqdm(f.readlines(), 'Preparing CASIA dataset'):
sample = line.split()
sample_id = sample[1]
landmarks = [[sample[i], sample[i+1]] for i in range(2, 12, 2)]
self.identities[sample_id] = [1]
samples.append((osp.join(self.images_root_path, sample[0]), sample_id, landmarks))
return samples
def get_num_classes(self):
"""Returns total number of identities"""
return len(self.identities)
def __len__(self):
"""Returns total number of samples"""
return len(self.samples_info)
def __getitem__(self, idx):
img = cv.imread(self.samples_info[idx][0])
if self.use_landmarks:
img = FivePointsAligner.align(img, self.samples_info[idx][2],
d_size=(200, 200), normalized=True, show=False)
if self.transform:
img = self.transform(img)
return {'img': img, 'label': int(self.samples_info[idx][1])}
|
c4f5632f46d8dc3898c8c8cdbb93b5f628f9214c
|
52a677b94056d3397b4a499bc9185adb68a63f05
|
/buildman/test_orchestrator.py
|
39ac9bdcd3c6db49996f56716eae57bc9d314172
|
[
"Apache-2.0"
] |
permissive
|
quay/quay
|
9b6fcff54efc0dbf7c6d91fa80676950555b6f1a
|
e400a0c22c5f89dd35d571654b13d262b1f6e3b3
|
refs/heads/master
| 2023-08-28T15:08:38.001842
| 2023-08-28T13:52:31
| 2023-08-28T13:52:31
| 220,517,730
| 2,363
| 322
|
Apache-2.0
| 2023-09-14T17:43:48
| 2019-11-08T17:37:05
|
Python
|
UTF-8
|
Python
| false
| false
| 5,558
|
py
|
test_orchestrator.py
|
import time
from random import randrange
from test.fixtures import *
from unittest.mock import Mock, patch
import fakeredis
import pytest
from freezegun import freeze_time
from buildman.orchestrator import (
REDIS_EXPIRED_SUFFIX,
REDIS_EXPIRING_SUFFIX,
KeyChange,
KeyEvent,
MemoryOrchestrator,
RedisOrchestrator,
)
from util import slash_join
@pytest.fixture()
def fake_redis():
def init_fake_strict_redis(
host="127.0.0.1",
port=6379,
password=None,
db=0,
ssl_certfile=None,
ssl_keyfile=None,
ssl_ca_certs=None,
ssl=False,
socket_connect_timeout=1,
socket_timeout=2,
health_check_interval=2,
):
fake_client = fakeredis.FakeStrictRedis(
host=host,
port=port,
password=password,
db=db,
ssl_certfile=ssl_certfile,
ssl_keyfile=ssl_keyfile,
ssl_ca_certs=ssl_ca_certs,
ssl=ssl,
socket_connect_timeout=socket_connect_timeout,
socket_timeout=socket_timeout,
# health_check_interval is not supported by fakeredis on its StrictRedis interface
)
fake_client.config_set = Mock()
fake_client.flushall()
return fake_client
with patch("redis.StrictRedis", init_fake_strict_redis):
yield
@pytest.fixture(params=[MemoryOrchestrator, RedisOrchestrator])
def orchestrator(request, fake_redis):
return request.param()
def test_acquire_lock(orchestrator):
lock_key = "lock/somekey"
acquired = orchestrator.lock(lock_key)
assert acquired
assert orchestrator.get_key(lock_key) is not None
acquired_again = orchestrator.lock(lock_key)
assert not acquired_again
orchestrator.delete_key(lock_key)
assert orchestrator.lock(lock_key)
def test_get_prefixed_keys(orchestrator):
keys_to_generate = 10
key_prefix = "building/"
generated_keys = set()
for x in range(keys_to_generate):
orchestrator.set_key(slash_join(key_prefix, str(x)), "test_val")
generated_keys.add(slash_join(key_prefix, str(x)))
assert len(orchestrator.get_prefixed_keys(key_prefix)) == keys_to_generate
keys_to_remove = randrange(1, keys_to_generate)
for x in range(keys_to_remove):
orchestrator.delete_key(slash_join(key_prefix, str(x)))
generated_keys.remove(slash_join(key_prefix, str(x)))
assert len(orchestrator.get_prefixed_keys(key_prefix)) == keys_to_generate - keys_to_remove
for k in generated_keys:
orchestrator.delete_key(k)
assert len(orchestrator.get_prefixed_keys(key_prefix)) == 0
def test_set_key(orchestrator):
some_key = "someprefix/somekey"
# Setting overwrite if the key doesn't exists prevent it from being written
orchestrator.set_key(some_key, "test_val", overwrite=True)
with pytest.raises(KeyError):
orchestrator.get_key(some_key)
# Set some key/value
orchestrator.set_key(some_key, "test_val_2")
assert orchestrator.get_key(some_key) == "test_val_2"
# Try overwriting some existing key without setting overwrite
with pytest.raises(KeyError):
orchestrator.set_key(some_key, "test_val_3")
# Try overwriting some existing key with overwrite set.
# Also expects a new expiration key to be created.
orchestrator.set_key(some_key, "test_val_4", overwrite=True, expiration=360)
assert orchestrator.get_key(some_key) == "test_val_4"
assert orchestrator.get_key(slash_join(some_key, REDIS_EXPIRING_SUFFIX)) is not None
def test_on_key_change(orchestrator):
key_prefix = "building/"
mock_callback = Mock()
orchestrator.on_key_change(key_prefix, lambda x: mock_callback.meth(x))
# CREATE
orchestrator.set_key(slash_join(key_prefix, "key1"), "test_val")
time.sleep(0.1)
mock_callback.meth.assert_called_with(
KeyChange(
KeyEvent.CREATE,
slash_join(key_prefix, "key1"),
"test_val",
)
)
# SET
orchestrator.set_key(slash_join(key_prefix, "key1"), "test_val", overwrite=True)
time.sleep(0.1)
mock_callback.meth.assert_called_with(
KeyChange(
KeyEvent.SET,
slash_join(key_prefix, "key1"),
"test_val",
)
)
# DELETE
orchestrator.delete_key(slash_join(key_prefix, "key1"))
time.sleep(0.1)
mock_callback.meth.assert_called_with(
KeyChange(
KeyEvent.DELETE,
slash_join(key_prefix, "key1"),
"test_val",
)
)
def test_get_key(orchestrator):
key_prefix = "building/"
with pytest.raises(KeyError):
orchestrator.get_key(slash_join(key_prefix, "key1"))
orchestrator.set_key(slash_join(key_prefix, "key1"), "test_val", overwrite=True)
with pytest.raises(KeyError):
orchestrator.get_key(slash_join(key_prefix, "key1"))
orchestrator.set_key(slash_join(key_prefix, "key1"), "test_val")
assert orchestrator.get_key(slash_join(key_prefix, "key1")) == "test_val"
def test_delete_key(orchestrator):
key_prefix = "building/"
with pytest.raises(KeyError):
orchestrator.delete_key(slash_join(key_prefix, "key1"))
orchestrator.set_key(slash_join(key_prefix, "key1"), "test_val")
assert orchestrator.get_key(slash_join(key_prefix, "key1")) is not None
orchestrator.delete_key(slash_join(key_prefix, "key1"))
with pytest.raises(KeyError):
orchestrator.get_key(slash_join(key_prefix, "key1"))
|
2faa6e6cec141eaaa98720aec04ca888c1e0592d
|
1311696a180047135c825ffa283f9ac9750d4236
|
/tests/publish/test_merge.py
|
35cad25cfdbfac5edc7e0a3230e7b8723f4d2246
|
[
"MIT"
] |
permissive
|
Josverl/micropython-stubber
|
71103afa842da02d5ad074b541d9bff7243ce23f
|
68fe9113f4b4e611bb4c3d19f79c8ba0e7111f5e
|
refs/heads/main
| 2023-08-31T00:51:22.200348
| 2023-05-31T07:48:54
| 2023-05-31T07:48:54
| 177,823,007
| 135
| 8
|
NOASSERTION
| 2023-09-11T21:25:19
| 2019-03-26T16:00:53
|
Python
|
UTF-8
|
Python
| false
| false
| 2,358
|
py
|
test_merge.py
|
from pathlib import Path
import pytest
from mock import MagicMock
from stubber.publish.merge_docstubs import merge_all_docstubs, copy_and_merge_docstubs
from .fakeconfig import FakeConfig
@pytest.mark.mocked
@pytest.mark.integration
def test_merge_all_docstubs_mocked(mocker, tmp_path, pytestconfig):
"""Test publish_multiple"""
if not (pytestconfig.rootpath / "repos/micropython-stubs").exists():
pytest.skip("Integration test: micropython-stubs repo not found")
# use the test config
config = FakeConfig(tmp_path=tmp_path, rootpath=pytestconfig.rootpath)
mocker.patch("stubber.publish.merge_docstubs.CONFIG", config)
m_board_candidates: MagicMock = mocker.patch(
"stubber.publish.merge_docstubs.board_candidates",
autospec=True,
return_value=[
{"family": "micropython", "version": "1.19.1", "port": "stm32", "board": "generic"},
{"family": "micropython", "version": "1.19.1", "port": "esp32", "board": "generic"},
],
)
m_copy_and_merge_docstubs: MagicMock = mocker.patch("stubber.publish.merge_docstubs.copy_and_merge_docstubs", autospec=True)
result = merge_all_docstubs(["v1.18", "v1.19"])
assert m_board_candidates.call_count == 1
assert m_copy_and_merge_docstubs.call_count == 2
@pytest.mark.mocked
def test_copydocstubs_mocked(mocker, tmp_path, pytestconfig):
"""Test publish_multiple"""
# use the test config
config = FakeConfig(tmp_path=tmp_path, rootpath=pytestconfig.rootpath)
mocker.patch("stubber.publish.merge_docstubs.CONFIG", config)
m_enrich_folder: MagicMock = mocker.patch("stubber.publish.merge_docstubs.enrich_folder", autospec=True, return_value=42)
m_copytree: MagicMock = mocker.patch("stubber.publish.merge_docstubs.shutil.copytree", autospec=True)
m_copy: MagicMock = mocker.patch("stubber.publish.merge_docstubs.shutil.copy", autospec=True)
# use files already in test set
fw_path = Path(".") / "tests" / "data" / "micropython-1.18-esp32"
docstub_path = Path(".") / "tests" / "data" / "micropython-1.18-docstubs"
dest_path = tmp_path / "micropython-merged"
result = copy_and_merge_docstubs(fw_path, dest_path, docstub_path)
assert result == 42
assert m_enrich_folder.call_count == 1
assert m_copytree.call_count == 1
assert m_copy.call_count == 1
|
e68c80fcc03bd3d3e261398f0dcacde0ea580704
|
a6c84bfd01cf40a2ca32b538c02aa971a2abec88
|
/src/triage/component/catwalk/feature_importances.py
|
436afa8904fb2dbccb5951ba60fc8e0848ffac09
|
[
"MIT"
] |
permissive
|
dssg/triage
|
01b480d103f1eba7e00822410a8ba462378c1f12
|
1b2049a9d10d8c6b70586e6fbc945ac4fa32fe68
|
refs/heads/master
| 2023-08-25T12:53:10.463078
| 2023-08-03T22:54:49
| 2023-08-03T22:54:49
| 71,394,134
| 177
| 64
|
NOASSERTION
| 2023-09-13T17:41:43
| 2016-10-19T19:55:46
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,487
|
py
|
feature_importances.py
|
import verboselogs, logging
logger = verboselogs.VerboseLogger(__name__)
import numpy as np
import sklearn.linear_model
from sklearn.svm import SVC
from triage.component.catwalk.estimators.classifiers import ScaledLogisticRegression
def _ad_hoc_feature_importances(model):
"""
Get the "ad-hoc feature importances" for scikit-learn's models
lacking the `feature_importances_` attribute
Args:
model: A trained model that has not a `feature_importances_` attribute
Returns:
At this moment, this method only returns the odds ratio of both the
intercept and the coefficients given by sklearn's implementation of
the LogisticRegression.
The order of the odds ratio list is the standard
of the statistical packages (like R, SAS, etc) i.e. (intercept, coefficients)
"""
feature_importances = None
if (isinstance(model, (sklearn.linear_model.LogisticRegression)) or
isinstance(model, (ScaledLogisticRegression))):
coef_odds_ratio = np.exp(model.coef_)
# intercept_odds_ratio = np.exp(model.intercept_[:,np.newaxis])
# We are ignoring the intercept
# NOTE: We need to squeeze this array so it has the correct dimensions
feature_importances = coef_odds_ratio.squeeze()
elif isinstance(model, (SVC)) and (model.get_params()["kernel"] == "linear"):
feature_importances = model.coef_.squeeze()
return feature_importances
def get_feature_importances(model):
"""
Get feature importances (from scikit-learn) of a trained model.
Args:
model: Trained model
Returns:
Feature importances, or failing that, None
"""
feature_importances = None
if hasattr(model, "feature_importances_"):
feature_importances = model.feature_importances_
else:
logger.warning(
"The selected algorithm, doesn't support a standard way "
"of calculate the importance of each feature used. "
"Falling back to ad-hoc methods "
"(e.g. in LogisticRegression we will return Odd Ratios instead coefficients)"
)
feature_importances = _ad_hoc_feature_importances(model)
# if we just ended up with a scalar (e.g., single feature logit), ensure we return an array
if isinstance(feature_importances, np.ndarray) and feature_importances.shape == ():
feature_importances = feature_importances.reshape((1,))
return feature_importances
|
dc52837e4caa86a48431babd27464a4aed1b41c0
|
fad4aa5a174627b8930beb8f5f987dd62c88957e
|
/sky/skylet/providers/kubernetes/node_provider.py
|
b233462c2ac21c39c172290ee8a1bd02d6e68e33
|
[
"Apache-2.0"
] |
permissive
|
skypilot-org/skypilot
|
603e29ecb3ce3b25d308f018fd402488ee352ef0
|
e58f33f315ca08c6e057ab9a2d00cd27476529a1
|
refs/heads/master
| 2023-08-16T21:46:53.379586
| 2023-08-16T02:17:21
| 2023-08-16T02:17:21
| 395,140,743
| 3,416
| 220
|
Apache-2.0
| 2023-09-14T21:20:44
| 2021-08-11T23:32:15
|
Python
|
UTF-8
|
Python
| false
| false
| 13,618
|
py
|
node_provider.py
|
import copy
import logging
import time
from typing import Dict
from urllib.parse import urlparse
from uuid import uuid4
from ray.autoscaler._private.command_runner import SSHCommandRunner
from ray.autoscaler.node_provider import NodeProvider
from ray.autoscaler.tags import NODE_KIND_HEAD
from ray.autoscaler.tags import TAG_RAY_CLUSTER_NAME
from ray.autoscaler.tags import TAG_RAY_NODE_KIND
from sky.adaptors import kubernetes
from sky.skylet.providers.kubernetes import config
from sky.skylet.providers.kubernetes import utils
logger = logging.getLogger(__name__)
MAX_TAG_RETRIES = 3
DELAY_BEFORE_TAG_RETRY = 0.5
RAY_COMPONENT_LABEL = 'cluster.ray.io/component'
# Monkey patch SSHCommandRunner to allow specifying SSH port
def set_port(self, port):
self.ssh_options.arg_dict['Port'] = port
SSHCommandRunner.set_port = set_port
def head_service_selector(cluster_name: str) -> Dict[str, str]:
"""Selector for Operator-configured head service."""
return {RAY_COMPONENT_LABEL: f'{cluster_name}-ray-head'}
def to_label_selector(tags):
label_selector = ''
for k, v in tags.items():
if label_selector != '':
label_selector += ','
label_selector += '{}={}'.format(k, v)
return label_selector
class KubernetesNodeProvider(NodeProvider):
def __init__(self, provider_config, cluster_name):
NodeProvider.__init__(self, provider_config, cluster_name)
self.cluster_name = cluster_name
# Kubernetes namespace to user
self.namespace = utils.get_current_kube_config_context_namespace()
# Timeout for resource provisioning. If it takes longer than this
# timeout, the resource provisioning will be considered failed.
# This is useful for failover. May need to be adjusted for different
# kubernetes setups.
self.timeout = provider_config['timeout']
def non_terminated_nodes(self, tag_filters):
# Match pods that are in the 'Pending' or 'Running' phase.
# Unfortunately there is no OR operator in field selectors, so we
# have to match on NOT any of the other phases.
field_selector = ','.join([
'status.phase!=Failed',
'status.phase!=Unknown',
'status.phase!=Succeeded',
'status.phase!=Terminating',
])
tag_filters[TAG_RAY_CLUSTER_NAME] = self.cluster_name
label_selector = to_label_selector(tag_filters)
pod_list = kubernetes.core_api().list_namespaced_pod(
self.namespace,
field_selector=field_selector,
label_selector=label_selector)
# Don't return pods marked for deletion,
# i.e. pods with non-null metadata.DeletionTimestamp.
return [
pod.metadata.name
for pod in pod_list.items
if pod.metadata.deletion_timestamp is None
]
def is_running(self, node_id):
pod = kubernetes.core_api().read_namespaced_pod(node_id, self.namespace)
return pod.status.phase == 'Running'
def is_terminated(self, node_id):
pod = kubernetes.core_api().read_namespaced_pod(node_id, self.namespace)
return pod.status.phase not in ['Running', 'Pending']
def node_tags(self, node_id):
pod = kubernetes.core_api().read_namespaced_pod(node_id, self.namespace)
return pod.metadata.labels
def external_ip(self, node_id):
# Return the IP address of the first node with an external IP
nodes = kubernetes.core_api().list_node().items
for node in nodes:
if node.status.addresses:
for address in node.status.addresses:
if address.type == 'ExternalIP':
return address.address
# If no external IP is found, use the API server IP
api_host = kubernetes.core_api().api_client.configuration.host
parsed_url = urlparse(api_host)
return parsed_url.hostname
def external_port(self, node_id):
# Extract the NodePort of the head node's SSH service
# Node id is str e.g., example-cluster-ray-head-v89lb
# TODO(romilb): Implement caching here for performance.
# TODO(romilb): Multi-node would need more handling here.
cluster_name = node_id.split('-ray-head')[0]
return utils.get_head_ssh_port(cluster_name, self.namespace)
def internal_ip(self, node_id):
pod = kubernetes.core_api().read_namespaced_pod(node_id, self.namespace)
return pod.status.pod_ip
def get_node_id(self, ip_address, use_internal_ip=True) -> str:
def find_node_id():
if use_internal_ip:
return self._internal_ip_cache.get(ip_address)
else:
return self._external_ip_cache.get(ip_address)
if not find_node_id():
all_nodes = self.non_terminated_nodes({})
ip_func = self.internal_ip if use_internal_ip else self.external_ip
ip_cache = (self._internal_ip_cache
if use_internal_ip else self._external_ip_cache)
for node_id in all_nodes:
ip_cache[ip_func(node_id)] = node_id
if not find_node_id():
if use_internal_ip:
known_msg = f'Worker internal IPs: {list(self._internal_ip_cache)}'
else:
known_msg = f'Worker external IP: {list(self._external_ip_cache)}'
raise ValueError(f'ip {ip_address} not found. ' + known_msg)
return find_node_id()
def set_node_tags(self, node_ids, tags):
for _ in range(MAX_TAG_RETRIES - 1):
try:
self._set_node_tags(node_ids, tags)
return
except kubernetes.api_exception() as e:
if e.status == 409:
logger.info(config.log_prefix +
'Caught a 409 error while setting'
' node tags. Retrying...')
time.sleep(DELAY_BEFORE_TAG_RETRY)
continue
else:
raise
# One more try
self._set_node_tags(node_ids, tags)
def _set_node_tags(self, node_id, tags):
pod = kubernetes.core_api().read_namespaced_pod(node_id, self.namespace)
pod.metadata.labels.update(tags)
kubernetes.core_api().patch_namespaced_pod(node_id, self.namespace, pod)
def create_node(self, node_config, tags, count):
conf = copy.deepcopy(node_config)
pod_spec = conf.get('pod', conf)
service_spec = conf.get('service')
node_uuid = str(uuid4())
tags[TAG_RAY_CLUSTER_NAME] = self.cluster_name
tags['ray-node-uuid'] = node_uuid
pod_spec['metadata']['namespace'] = self.namespace
if 'labels' in pod_spec['metadata']:
pod_spec['metadata']['labels'].update(tags)
else:
pod_spec['metadata']['labels'] = tags
# Allow Operator-configured service to access the head node.
if tags[TAG_RAY_NODE_KIND] == NODE_KIND_HEAD:
head_selector = head_service_selector(self.cluster_name)
pod_spec['metadata']['labels'].update(head_selector)
logger.info(config.log_prefix +
'calling create_namespaced_pod (count={}).'.format(count))
new_nodes = []
for _ in range(count):
pod = kubernetes.core_api().create_namespaced_pod(
self.namespace, pod_spec)
new_nodes.append(pod)
new_svcs = []
if service_spec is not None:
logger.info(config.log_prefix + 'calling create_namespaced_service '
'(count={}).'.format(count))
for new_node in new_nodes:
metadata = service_spec.get('metadata', {})
metadata['name'] = new_node.metadata.name
service_spec['metadata'] = metadata
service_spec['spec']['selector'] = {'ray-node-uuid': node_uuid}
svc = kubernetes.core_api().create_namespaced_service(
self.namespace, service_spec)
new_svcs.append(svc)
# Wait for all pods to be ready, and if it exceeds the timeout, raise an
# exception. If pod's container is ContainerCreating, then we can assume
# that resources have been allocated and we can exit.
start = time.time()
while True:
if time.time() - start > self.timeout:
raise config.KubernetesError(
'Timed out while waiting for nodes to start. '
'Cluster may be out of resources or '
'may be too slow to autoscale.')
all_ready = True
for node in new_nodes:
pod = kubernetes.core_api().read_namespaced_pod(
node.metadata.name, self.namespace)
if pod.status.phase == 'Pending':
# Iterate over each pod to check their status
if pod.status.container_statuses is not None:
for container_status in pod.status.container_statuses:
# Continue if container status is ContainerCreating
# This indicates this pod has been scheduled.
if container_status.state.waiting is not None and container_status.state.waiting.reason == 'ContainerCreating':
continue
else:
# If the container wasn't in creating state,
# then we know pod wasn't scheduled or had some
# other error, such as image pull error.
# See list of possible reasons for waiting here:
# https://stackoverflow.com/a/57886025
all_ready = False
else:
# If container_statuses is None, then the pod hasn't
# been scheduled yet.
all_ready = False
if all_ready:
break
time.sleep(1)
def terminate_node(self, node_id):
logger.info(config.log_prefix + 'calling delete_namespaced_pod')
try:
kubernetes.core_api().delete_namespaced_pod(
node_id,
self.namespace,
_request_timeout=config.DELETION_TIMEOUT)
except kubernetes.api_exception() as e:
if e.status == 404:
logger.warning(config.log_prefix +
f'Tried to delete pod {node_id},'
' but the pod was not found (404).')
else:
raise
try:
kubernetes.core_api().delete_namespaced_service(
node_id,
self.namespace,
_request_timeout=config.DELETION_TIMEOUT)
kubernetes.core_api().delete_namespaced_service(
f'{node_id}-ssh',
self.namespace,
_request_timeout=config.DELETION_TIMEOUT)
except kubernetes.api_exception():
pass
def terminate_nodes(self, node_ids):
# TODO(romilb): terminate_nodes should be include optimizations for
# deletion of multiple nodes. Currently, it deletes one node at a time.
# We should look in to using deletecollection here for batch deletion.
for node_id in node_ids:
self.terminate_node(node_id)
def get_command_runner(self,
log_prefix,
node_id,
auth_config,
cluster_name,
process_runner,
use_internal_ip,
docker_config=None):
"""Returns the CommandRunner class used to perform SSH commands.
Args:
log_prefix(str): stores "NodeUpdater: {}: ".format(<node_id>). Used
to print progress in the CommandRunner.
node_id(str): the node ID.
auth_config(dict): the authentication configs from the autoscaler
yaml file.
cluster_name(str): the name of the cluster.
process_runner(module): the module to use to run the commands
in the CommandRunner. E.g., subprocess.
use_internal_ip(bool): whether the node_id belongs to an internal ip
or external ip.
docker_config(dict): If set, the docker information of the docker
container that commands should be run on.
"""
common_args = {
'log_prefix': log_prefix,
'node_id': node_id,
'provider': self,
'auth_config': auth_config,
'cluster_name': cluster_name,
'process_runner': process_runner,
'use_internal_ip': use_internal_ip,
}
command_runner = SSHCommandRunner(**common_args)
if use_internal_ip:
port = 22
else:
port = self.external_port(node_id)
command_runner.set_port(port)
return command_runner
@staticmethod
def bootstrap_config(cluster_config):
return config.bootstrap_kubernetes(cluster_config)
@staticmethod
def fillout_available_node_types_resources(cluster_config):
"""Fills out missing "resources" field for available_node_types."""
return config.fillout_resources_kubernetes(cluster_config)
|
cc5027167faf26f6cfef40343e390163583d4199
|
6923f79f1eaaba0ab28b25337ba6cb56be97d32d
|
/Non_Linear_Finite_Element_Analysis_of_Solids_and_Structures_Borst/pyfem-1.0/pyfem/elements/Interface.py
|
0a5cd9bb7b6b712c021e55db91290fd91396928b
|
[] |
no_license
|
burakbayramli/books
|
9fe7ba0cabf06e113eb125d62fe16d4946f4a4f0
|
5e9a0e03aa7ddf5e5ddf89943ccc68d94b539e95
|
refs/heads/master
| 2023-08-17T05:31:08.885134
| 2023-08-14T10:05:37
| 2023-08-14T10:05:37
| 72,460,321
| 223
| 174
| null | 2022-10-24T12:15:06
| 2016-10-31T17:24:00
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 6,048
|
py
|
Interface.py
|
############################################################################
# This Python file is part of PyFEM-1.0, released on Aug. 29, 2012. #
# The PyFEM code accompanies the book: #
# #
# 'Non-Linear Finite Element Analysis of Solids and Structures' #
# R. de Borst, M.A. Crisfield, J.J.C. Remmers and C.V. Verhoosel #
# John Wiley and Sons, 2012, ISBN 978-0470666449 #
# #
# The code is written by J.J.C. Remmers, C.V. Verhoosel and R. de Borst. #
# Comments and suggestions can be sent to: #
# PyFEM-support@tue.nl #
# #
# The latest version can be downloaded from the web-site: #
# http://www.wiley.com/go/deborst #
# #
# The code is open source and intended for educational and scientific #
# purposes only. If you use PyFEM in your research, the developers would #
# be grateful if you could cite the book. #
# #
# Disclaimer: #
# The authors reserve all rights but do not guarantee that the code is #
# free from errors. Furthermore, the authors shall not be liable in any #
# event caused by the use of the program. #
############################################################################
from .Element import Element
from pyfem.util.shapeFunctions import getElemShapeData
from pyfem.util.kinematics import Kinematics
from numpy import zeros, dot, outer, ones, eye, sqrt
from scipy.linalg import norm
#------------------------------------------------------------------------------
#
#------------------------------------------------------------------------------
class Interface( Element ):
dofTypes = [ 'u' , 'v' ]
def __init__ ( self, elnodes , props ):
self.intMethod = "NewtonCotes"
Element.__init__( self, elnodes , props )
#Initialize the history parameter
self.setHistoryParameter( 'normal' , zeros(2) )
self.commitHistory()
def __type__ ( self ):
return name
#------------------------------------------------------------------------------
#
#------------------------------------------------------------------------------
def getTangentStiffness ( self, elemdat ):
rot = self.getRotation( elemdat.coords , elemdat.state )
sData = getElemShapeData( elemdat.coords[:2,:] , method = self.intMethod , elemType = "Line2" )
elemdat.outlabel.append("tractions")
elemdat.outdata = zeros( shape=(len(elemdat.nodes),2) )
kin = Kinematics(2,2)
for iData in sData:
B = self.getBmatrix( iData.h , rot )
kin.strain = dot( B , elemdat.state )
sigma,tang = self.mat.getStress( kin )
elemdat.stiff += dot ( B.transpose() , dot ( tang , B ) ) * iData.weight
elemdat.fint += dot ( B.transpose() , sigma ) * iData.weight
elemdat.outdata += outer( ones(len(elemdat.nodes)), sigma )
#------------------------------------------------------------------------------
#
#------------------------------------------------------------------------------
def getInternalForce ( self, elemdat ):
rot = self.getRotation( elemdat.coords , elemdat.state )
sData = getElemShapeData( elemdat.coords[:2,:] , method = self.intMethod , elemType = "Line2" )
elemdat.outlabel.append("tractions")
elemdat.outdata = zeros( shape=(len(elemdat.nodes),2) )
kin = Kinematics(2,2)
for iData in sData:
B = self.getBmatrix( iData.h , rot )
kin.strain = dot( B , elemdat.state )
sigma,tang = self.mat.getStress( kin )
elemdat.fint += dot ( B.transpose() , sigma ) * iData.weight
elemdat.outdata += outer( ones(len(elemdat.nodes)), sigma )
#------------------------------------------------------------------------------
#
#------------------------------------------------------------------------------
def getBmatrix( self , phi , rot ):
B = zeros( shape=( 2 , self.dofCount() ) )
B[:,:2] = -rot * phi[0]
B[:,2:4] = -rot * phi[1]
B[:,4:6] = rot * phi[0]
B[:,6:] = rot * phi[1]
return B
#------------------------------------------------------------------------------
#
#------------------------------------------------------------------------------
def getRotation( self , coords , state ):
rot = zeros( shape=(2,2) )
midCoords = zeros( shape=(2,2) )
midCoords = 0.5 * ( coords[:2,:] + coords[2:,:] )
midCoords[0,0] += 0.5 * ( state[0] + state[4] )
midCoords[0,1] += 0.5 * ( state[1] + state[5] )
midCoords[1,0] += 0.5 * ( state[2] + state[6] )
midCoords[1,1] += 0.5 * ( state[3] + state[7] )
ds = midCoords[1,:]-midCoords[0,:]
normal = self.getHistoryParameter('normal')
if norm(normal) < 0.5:
normal[0] = ds[1]/norm(ds)
normal[1] = ds[0]/norm(ds)
else:
newnormal = zeros(2)
newnormal[0] = ds[1]/norm(ds)
newnormal[1] = ds[0]/norm(ds)
if dot(newnormal,normal) < 0 :
normal = -newnormal
else:
normal = newnormal
self.setHistoryParameter( 'normal' , normal )
rot[0,0]= normal[0]
rot[0,1]= normal[1]
rot[1,0]= normal[1]
rot[1,1]= -normal[0]
return rot
|
75f3c0df9bf1d77f9843fa178e48f068bddb17e5
|
2d6323b8ccaf08a8929dba79fb9575c436977bd4
|
/docassemble_webapp/docassemble/webapp/process_email.py
|
b85a1e712f2860b45823dca17c15e0654b94d370
|
[
"MIT"
] |
permissive
|
jhpyle/docassemble
|
f1c36e73d02807a7052b860dfceecdfa88e728c7
|
8726242cfbe3a15cad610dc2b518346be68ab142
|
refs/heads/master
| 2023-09-01T20:03:39.497473
| 2023-08-26T12:44:45
| 2023-08-26T12:44:45
| 34,148,903
| 691
| 300
|
MIT
| 2023-09-09T20:08:14
| 2015-04-18T02:09:32
|
Python
|
UTF-8
|
Python
| false
| false
| 7,721
|
py
|
process_email.py
|
import datetime
import email
import json
import mimetypes
import re
import sys
from email.utils import parseaddr, parsedate, getaddresses
from time import mktime
from sqlalchemy import select
import docassemble.base.config
docassemble.base.config.load()
import docassemble.webapp.worker # noqa: E402
import docassemble.webapp.db_object # noqa: E402
from docassemble.webapp.core.models import Shortener, Email, EmailAttachment # noqa: E402
from docassemble.webapp.file_number import get_new_file_number # noqa: E402
from docassemble.webapp.files import SavedFile # noqa: E402
from docassemble.webapp.users.models import UserModel # noqa: E402
db = docassemble.webapp.db_object.init_sqlalchemy()
def main():
fp = open("/tmp/mail.log", "a", encoding="utf-8")
# fp.write("The file is " + sys.argv[1] + "\n")
try:
with open(sys.argv[1], 'r', encoding="utf-8") as email_fp:
msg = email.message_from_file(email_fp)
except Exception as errMess:
fp.write("Failed to read e-mail message: " + str(errMess) + "\n")
sys.exit("Failed to read e-mail message")
raw_date = msg.get('Date', msg.get('Resent-Date', None))
addr_return_path = msg.get('Return-path', None)
addr_reply_to = msg.get('Reply-to', None)
addr_to = msg.get('Envelope-to', None)
addr_from = msg.get('From', msg.get('Sender', None))
subject = msg.get('Subject', None)
fp.write("Message to " + str(addr_to) + "\n")
# fp.write("From was " + str(addr_from) + "\n")
# fp.write("Subject was " + str(subject) + "\n")
to_recipients = []
for recipient in getaddresses(msg.get_all('to', []) + msg.get_all('resent-to', [])):
to_recipients.append({'name': recipient[0], 'address': recipient[1]})
cc_recipients = []
for recipient in getaddresses(msg.get_all('cc', []) + msg.get_all('resent-cc', [])):
cc_recipients.append({'name': recipient[0], 'address': recipient[1]})
recipients = []
for recipient in getaddresses(msg.get_all('to', []) + msg.get_all('cc', []) + msg.get_all('resent-to', []) + msg.get_all('resent-cc', [])):
recipients.append({'name': recipient[0], 'address': recipient[1]})
if addr_to is None and len(recipients) > 0:
addr_to = recipients[0]['address']
# fp.write("recipients are " + str(recipients) + "\n")
if addr_to is not None:
# fp.write("parsed envelope-to: " + str(parseaddr(addr_to)) + "\n")
short_code = re.sub(r'@.*', '', parseaddr(addr_to)[1])
else:
short_code = None
# fp.write("short code is " + str(short_code) + "\n")
record = db.session.execute(select(Shortener).filter_by(short=short_code)).scalar()
if record is None:
fp.write("short code not found\n")
sys.exit("short code not found")
# fp.write("short code found\n")
# file_number = get_new_file_number(record.uid, 'email', yaml_file_name=record.filename)
# #fp.write("file number is " + str(file_number) + "\n")
# saved_file_email = SavedFile(file_number, fix=True)
if addr_from is not None:
# fp.write("parsed from: " + str(parseaddr(addr_from)[1]) + "\n")
addr_from = {'name': parseaddr(addr_from)[0], 'address': parseaddr(addr_from)[1]}
else:
addr_from = {'empty': True}
if addr_return_path is not None:
# fp.write("parsed return_path: " + str(parseaddr(addr_return_path)[1]) + "\n")
addr_return_path = {'name': parseaddr(addr_return_path)[0], 'address': parseaddr(addr_return_path)[1]}
else:
addr_return_path = {'empty': True}
# fp.write("return_path is " + str(addr_return_path) + "\n")
if addr_reply_to is not None:
# fp.write("parsed reply-to: " + str(parseaddr(addr_reply_to)[1]) + "\n")
addr_reply_to = {'name': parseaddr(addr_reply_to)[0], 'address': parseaddr(addr_reply_to)[1]}
# fp.write("reply-to is " + str(addr_reply_to) + "\n")
else:
addr_reply_to = {'empty': True}
# fp.write("reply-to is " + str(addr_reply_to) + "\n")
msg_current_time = datetime.datetime.now()
if raw_date is not None:
msg_date = datetime.datetime.fromtimestamp(mktime(parsedate(raw_date)))
# fp.write("msg_date is " + str(msg_date) + "\n")
else:
msg_date = msg_current_time
# fp.write("msg_date set to current time\n")
headers = []
for item in msg.items():
headers.append([item[0], item[1]])
# fp.write("headers:\n" + json.dumps(headers) + "\n")
email_record = Email(short=short_code, to_addr=json.dumps(to_recipients), cc_addr=json.dumps(cc_recipients), from_addr=json.dumps(addr_from), reply_to_addr=json.dumps(addr_reply_to), return_path_addr=json.dumps(addr_return_path), subject=subject, datetime_message=msg_date, datetime_received=msg_current_time)
db.session.add(email_record)
db.session.commit()
save_attachment(record.uid, record.filename, 'headers.json', email_record.id, 0, 'application/json', 'json', json.dumps(headers))
counter = 1
for part in msg.walk():
if part.get_content_maintype() == 'multipart':
continue
filename = part.get_filename()
if part.get_content_type() == 'text/plain':
ext = '.txt'
else:
ext = mimetypes.guess_extension(part.get_content_type())
if not ext:
ext = '.bin'
if filename:
filename = '%03d-%s' % (counter, secure_filename(filename))
else:
filename = '%03d-attachment%s' % (counter, ext)
# fp.write("Filename is " + str(filename) + "\n")
# fp.write("Content type is " + str(part.get_content_type()) + "\n")
real_filename = re.sub(r'[0-9][0-9][0-9]-', r'', filename)
real_ext = re.sub(r'^\.', r'', ext)
save_attachment(record.uid, record.filename, real_filename, email_record.id, counter, part.get_content_type(), real_ext, part.get_payload(decode=True))
counter += 1
fp.close()
user = None
if record.user_id is not None:
user = db.session.execute(select(UserModel).options(db.joinedload(UserModel.roles)).filter_by(id=record.user_id)).scalar()
if user is None:
user_info = {'email': None, 'the_user_id': 't' + str(record.temp_user_id), 'theid': record.temp_user_id, 'roles': []}
else:
role_list = [role.name for role in user.roles]
if len(role_list) == 0:
role_list = ['user']
user_info = {'email': user.email, 'roles': role_list, 'the_user_id': user.id, 'theid': user.id, 'firstname': user.first_name, 'lastname': user.last_name, 'nickname': user.nickname, 'country': user.country, 'subdivisionfirst': user.subdivisionfirst, 'subdivisionsecond': user.subdivisionsecond, 'subdivisionthird': user.subdivisionthird, 'organization': user.organization}
docassemble.webapp.worker.background_action.delay(record.filename, user_info, record.uid, None, None, None, {'action': 'incoming_email', 'arguments': {'id': email_record.id}}, extra=None)
def save_attachment(uid, yaml_filename, filename, email_id, index, content_type, extension, content):
att_file_number = get_new_file_number(uid, filename, yaml_file_name=yaml_filename)
attachment_record = EmailAttachment(email_id=email_id, index=index, content_type=content_type, extension=extension, upload=att_file_number)
db.session.add(attachment_record)
db.session.commit()
saved_file_attachment = SavedFile(att_file_number, extension=extension, fix=True, should_not_exist=True)
saved_file_attachment.write_content(content)
saved_file_attachment.finalize()
def secure_filename(filename):
filename = re.sub(r'[^A-Za-z0-9\_\-\. ]+', r'_', filename)
return filename.strip('_')
main()
|
72ecde90cdac1a5fa966b8945d90a78df14125d2
|
50927fa2c786a18436526345e4aca1490aa031dc
|
/core/src/main/python/wlsdeploy/tool/discover/discoverer.py
|
a93443fdd7c72909dcb44b9f212ce4749c6d53d8
|
[
"UPL-1.0",
"LicenseRef-scancode-other-copyleft",
"MIT",
"GPL-2.0-only",
"Classpath-exception-2.0",
"Apache-2.0",
"CDDL-1.1"
] |
permissive
|
oracle/weblogic-deploy-tooling
|
c3646c297ac482fed921fb599182d557cf77d532
|
9fd74ae578a5b1353662facb0405e5672ecc5191
|
refs/heads/main
| 2023-09-01T08:40:12.305524
| 2023-08-26T13:26:37
| 2023-08-26T13:26:37
| 120,652,037
| 148
| 108
|
UPL-1.0
| 2023-09-14T21:03:06
| 2018-02-07T18:08:30
|
Python
|
UTF-8
|
Python
| false
| false
| 47,096
|
py
|
discoverer.py
|
"""
Copyright (c) 2017, 2022, Oracle Corporation and/or its affiliates.
Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl.
"""
import os
from java.net import MalformedURLException
from java.net import URI
from java.net import URISyntaxException
from oracle.weblogic.deploy.discover import DiscoverException
from oracle.weblogic.deploy.util import PyOrderedDict as OrderedDict
from oracle.weblogic.deploy.util import StringUtils
from wlsdeploy.aliases.aliases import Aliases
from wlsdeploy.aliases.location_context import LocationContext
from wlsdeploy.aliases.wlst_modes import WlstModes
from wlsdeploy.exception import exception_helper
from wlsdeploy.exception.expection_types import ExceptionType
from wlsdeploy.logging.platform_logger import PlatformLogger
from wlsdeploy.tool.deploy import deployer_utils
from wlsdeploy.tool.discover.custom_folder_helper import CustomFolderHelper
from wlsdeploy.tool.util.mbean_utils import MBeanUtils
from wlsdeploy.tool.util.mbean_utils import get_interface_name
from wlsdeploy.tool.util.wlst_helper import WlstHelper
from wlsdeploy.util import path_utils
import wlsdeploy.util.unicode_helper as str_helper
from wlsdeploy.util.weblogic_helper import WebLogicHelper
_DISCOVER_LOGGER_NAME = 'wlsdeploy.discover'
_class_name = 'Discoverer'
_logger = PlatformLogger(_DISCOVER_LOGGER_NAME)
remote_dict = OrderedDict()
REMOTE_TYPE = 'Type'
REMOTE_ARCHIVE_PATH = 'ArchivePath'
class Discoverer(object):
"""
Discoverer contains the private methods used to facilitate discovery of the domain information by its subclasses.
"""
def __init__(self, model_context, base_location, wlst_mode, aliases=None, credential_injector=None):
"""
:param model_context: context about the model for this instance of discover domain
:param base_location: to look for common weblogic resources. By default this is the global path or '/'
:param wlst_mode: offline or online
:param aliases: optional, aliases object to use
:param credential_injector: optional, injector to collect credentials
"""
self._model_context = model_context
self._base_location = base_location
self._wlst_mode = wlst_mode
if aliases:
self._aliases = aliases
else:
self._aliases = Aliases(self._model_context, wlst_mode=self._wlst_mode,
exception_type=ExceptionType.DISCOVER)
self._credential_injector = credential_injector
self._att_handler_map = OrderedDict()
self._custom_folder = CustomFolderHelper(self._aliases, _logger, self._model_context, ExceptionType.DISCOVER,
self._credential_injector)
self._weblogic_helper = WebLogicHelper(_logger)
self._wlst_helper = WlstHelper(ExceptionType.DISCOVER)
self._mbean_utils = MBeanUtils(self._model_context, self._aliases, ExceptionType.DISCOVER)
self._wls_version = self._weblogic_helper.get_actual_weblogic_version()
def add_to_remote_map(self, local_name, archive_name, file_type):
if not os.path.isabs(local_name):
local_name = os.path.join(self._model_context.get_domain_home(), local_name)
# we don't know the remote machine type, so automatically turn into forward
# slashes.
local_name = local_name.replace('\\', '/')
remote_dict[local_name] = OrderedDict()
remote_dict[local_name][REMOTE_TYPE] = file_type
remote_dict[local_name][REMOTE_ARCHIVE_PATH] = archive_name
if file_type == 'FILE_STORE' or file_type == 'COHERENCE_PERSISTENCE_DIR':
_logger.todo('WLSDPLY-06042', file_type, archive_name)
else:
_logger.todo('WLSDPLY-06041', file_type, local_name, archive_name)
def discover_domain_mbean(self, model_top_folder_name):
"""
Discover the domain specific MBean and its configuration attributes.
:return: model name for domain MBean:dictionary containing the discovered Domain MBean attributes
"""
_method_name = 'discover_domain_mbean'
_logger.entering(model_top_folder_name, class_name=_class_name, method_name=_method_name)
result = OrderedDict()
location = LocationContext(self._base_location)
location.append_location(model_top_folder_name)
name = self._find_singleton_name_in_folder(location)
if name is not None:
_logger.info('WLSDPLY-06644', model_top_folder_name, class_name=_class_name, method_name=_method_name)
location.add_name_token(self._aliases.get_name_token(location), name)
self._populate_model_parameters(result, location)
# if any subfolders exist, discover
self._discover_subfolders(result, location)
_logger.exiting(class_name=_class_name, method_name=_method_name)
return model_top_folder_name, result
# methods for use only by the subclasses
def _populate_model_parameters(self, dictionary, location):
"""
Populate the model dictionary with the attribute values discovered at the current location. Perform
any special processing for a specific attribute before storing into the model dictionary.
:param dictionary: where to store the discovered attributes
:param location: context containing current location information
:return: dictionary of model attribute name and wlst value
"""
_method_name = '_populate_model_parameters'
wlst_path = self._aliases.get_wlst_attributes_path(location)
_logger.finer('WLSDPLY-06100', wlst_path, class_name=_class_name, method_name=_method_name)
if not self.wlst_cd(wlst_path, location):
return
wlst_lsa_params = self._get_attributes_for_current_location(location)
wlst_did_get = list()
_logger.finest('WLSDPLY-06102', self._wlst_helper.get_pwd(), wlst_lsa_params, class_name=_class_name,
method_name=_method_name)
wlst_get_params = self._get_required_attributes(location)
_logger.finest('WLSDPLY-06103', str_helper.to_string(location), wlst_get_params,
class_name=_class_name, method_name=_method_name)
if wlst_lsa_params is not None:
for wlst_lsa_param in wlst_lsa_params:
if wlst_lsa_param in wlst_get_params:
success, wlst_value = self._get_attribute_value_with_get(wlst_lsa_param, wlst_path)
wlst_did_get.append(wlst_lsa_param)
if not success:
continue
else:
_logger.finer('WLSDPLY-06131', wlst_lsa_param, class_name=_class_name, method_name=_method_name)
wlst_value = wlst_lsa_params[wlst_lsa_param]
# if attribute was never set (online only), don't add to the model
try:
if self._omit_from_model(location, wlst_lsa_param):
_logger.finest('WLSDPLY-06157', wlst_lsa_param, str_helper.to_string(location),
class_name=_class_name, method_name=_method_name)
continue
except DiscoverException, de:
_logger.info("WLSDPLY-06158", wlst_lsa_param, str_helper.to_string(location),
de.getLocalizedMessage(), class_name=_class_name, method_name=_method_name)
continue
self._add_to_dictionary(dictionary, location, wlst_lsa_param, wlst_value, wlst_path)
# These will come after the lsa params in the ordered dictionary
# Find the attributes that are not in the LSA wlst map but are in the alias definitions with GET access
get_attributes = [get_param for get_param in wlst_get_params if not get_param in wlst_did_get]
for get_attribute in get_attributes:
success, wlst_value = self._get_attribute_value_with_get(get_attribute, wlst_path)
if success:
self._add_to_dictionary(dictionary, location, get_attribute, wlst_value, wlst_path)
def _omit_from_model(self, location, wlst_lsa_param):
"""
Determine if the specified attribute should be omitted from the model.
Avoid calling wlst_helper.is_set() if possible, it slows down the online discovery process.
:param location: the location of the attribute to be examined
:param wlst_lsa_param: the name of the attribute to be examined
:return: True if attribute should be omitted, False otherwise
"""
# attributes with derived defaults need to call is_set(), since their value is dynamic.
# don't call is_set() if the -remote command-line argument is used.
if self._aliases.is_derived_default(location, wlst_lsa_param) or not self._model_context.is_remote():
# wlst_helper.is_set already checks for offline / online
return not self._wlst_helper.is_set(wlst_lsa_param)
return False
def _get_attribute_value_with_get(self, wlst_get_param, wlst_path):
_method_name = '_get_attribute_value_with_get'
_logger.finest('WLSDPLY-06104', wlst_get_param, class_name=_class_name, method_name=_method_name)
success = False
wlst_value = None
try:
wlst_value = self._wlst_helper.get(wlst_get_param)
success = True
except DiscoverException, pe:
_logger.info('WLSDPLY-06127', wlst_get_param, wlst_path, pe.getLocalizedMessage(),
class_name=_class_name, method_name=_method_name)
return success, wlst_value
def _add_to_dictionary(self, dictionary, location, wlst_param, wlst_value, wlst_path):
_method_name = '_add_to_dictionary'
_logger.finer('WLSDPLY-06105', wlst_param, wlst_value, wlst_path, class_name=_class_name,
method_name=_method_name)
try:
model_param, model_value = self._aliases.get_model_attribute_name_and_value(location,
wlst_param,
wlst_value)
except DiscoverException, de:
_logger.info('WLSDPLY-06106', wlst_param, wlst_path, de.getLocalizedMessage(),
class_name=_class_name, method_name=_method_name)
return
model_value = self._check_attribute(model_param, model_value, location)
if model_value is not None:
_logger.finer('WLSDPLY-06107', model_param, model_value, class_name=_class_name,
method_name=_method_name)
dictionary[model_param] = model_value
# tokenize the attribute if needed
if self._credential_injector is not None:
self._credential_injector.check_and_tokenize(dictionary, model_param, location)
elif model_param is None:
_logger.finest('WLSDPLY-06108', model_param, class_name=_class_name, method_name=_method_name)
def _get_attributes_for_current_location(self, location):
"""
Change to the mbean folder with the provided name using the current location and return
the attributes at that location.
:param location: context with the current location information
:return: list of attributes
"""
_method_name = '_get_attributes_for_current_location'
attributes = []
path = self._aliases.get_wlst_attributes_path(location)
try:
attributes = self._wlst_helper.lsa(path)
except DiscoverException, de:
name = location.get_model_folders()[-1]
_logger.fine('WLSDPLY-06109', name, str_helper.to_string(location), de.getLocalizedMessage(),
class_name=_class_name, method_name=_method_name)
return attributes
def _is_defined_attribute(self, location, wlst_name):
attribute = False
try:
if self._aliases.get_model_attribute_name(location, wlst_name, exclude_ignored=False):
attribute = True
except DiscoverException:
pass
return attribute
def _get_required_attributes(self, location):
"""
Use get for all online attributes, and use the attribute names in the
:param location: current location context
:return: list of attributes that require wlst.get
"""
_method_name = '_get_required_attributes'
attributes = list()
try:
attributes = self._aliases.get_wlst_get_required_attribute_names(location)
except DiscoverException, de:
name = location.get_model_folders()[-1]
_logger.warning('WLSDPLY-06109', name, location.get_folder_path(), de.getLocalizedMessage(),
class_name=_class_name, method_name=_method_name)
return attributes
def _get_additional_parameters(self, location):
_method_name = '_get_additional_parameters'
other_attributes = list()
try:
other_attributes = self._mbean_utils.get_attributes_not_in_lsa_map(location)
except DiscoverException, de:
name = 'DomainConfig'
folders = location.get_model_folders()
if len(folders) > 0:
name = location.get_model_folders()[-1]
_logger.info('WLSDPLY-06150', name, location.get_folder_path(), de.getLocalizedMessage(),
class_name=_class_name, method_name=_method_name)
return other_attributes
def _mbean_names_exist(self, location):
"""
Check to see if there are any configured MBeans for the current location
:param location: context with the current location
:return: True if MBeans of the type at the location exist
"""
path = self._aliases.get_wlst_list_path(location)
mbean_name_map = None
try:
mbean_name_map = self._wlst_helper.lsc(path)
except DiscoverException, de:
_logger.warning('WLSDPLY-06130', path, de.getLocalizedMessage())
if mbean_name_map:
return True
return False
def _check_attribute(self, model_name, model_value, location):
"""
Check to see if the attribute has special handling indicated by the discover handler map. If the
attribute needs special processing, all the handler specified by the map.
:param model_name: model name for the attribute to check
:param model_value: value converted to model format
:param location: context containing current location information
:return: new value if modified by the handler or the original value if not a special attribute
"""
if model_value == 'null ':
new_value = None
else:
new_value = model_value
if model_name in self._att_handler_map:
type_method = self._att_handler_map[model_name]
if type_method is not None:
new_value = type_method(model_name, model_value, location)
return new_value
def _find_names_in_folder(self, location):
"""
Find the names for the top folder in the current location.
:param location: context containing the current location information
:return: list of names for the folder or None if the folder does not exist in the domain
"""
_method_name = '_find_names_in_folder'
names = None
mbean_type = self._aliases.get_wlst_mbean_type(location)
if mbean_type is None:
_logger.fine('WLSDPLY-06110', location.get_model_folders()[-1], location.get_folder_path(),
class_name=_class_name, method_name=_method_name)
else:
folder_path = self._aliases.get_wlst_list_path(location)
_logger.fine('WLSDPLY-06111', folder_path, class_name=_class_name, method_name=_method_name)
if self._wlst_helper.path_exists(folder_path):
self.wlst_cd(folder_path, location)
names = self._wlst_helper.lsc()
_logger.fine('WLSDPLY-06146', names, location, class_name=_class_name, method_name=_method_name)
else:
_logger.fine('Path {0} does not exist', folder_path, class_name=_class_name, method_name=_method_name)
return names
def _find_singleton_name_in_folder(self, location):
"""
The top folder is a singleton. Find the single name for the folder.
:param location: context containing current location informationget_mbean_folders
:return: The single name for the folder, or None if the top folder does not exist in the domain
"""
_method_name = '_find_singleton_name_in_top_folder'
name = None
names = self._find_names_in_folder(location)
if names is not None:
names_len = len(names)
if names_len > 1:
ex = exception_helper.create_discover_exception('WLSDPLY-06112', location.get_model_folders(),
self._aliases.get_model_folder_path(location),
len(names))
_logger.throwing(ex, class_name=_class_name, method_name=_method_name)
raise ex
if names_len > 0:
name = names[0]
return name
def _find_subfolders(self, location):
if self._wlst_mode == WlstModes.OFFLINE:
return self._find_subfolders_offline(location)
else:
return self._find_subfolders_online(location)
def _find_subfolders_offline(self, location):
"""
Find the subfolders of the current location.
:param location: context containing current location information
:return: list of subfolders
"""
wlst_path = self._aliases.get_wlst_subfolders_path(location)
wlst_subfolders = []
if self.wlst_cd(wlst_path, location):
wlst_subfolders = self._wlst_helper.lsc()
if wlst_subfolders:
new_subfolders = []
for wlst_subfolder in wlst_subfolders:
model_subfolder_name = self._get_model_name(location, wlst_subfolder)
if model_subfolder_name:
new_subfolders.append(wlst_subfolder)
wlst_subfolders = new_subfolders
return wlst_subfolders
def _find_subfolders_online(self, location):
wlst_path = self._aliases.get_wlst_subfolders_path(location)
wlst_subfolders = []
if self.wlst_cd(wlst_path, location):
wlst_subfolders = self._massage_online_folders(self._wlst_helper.lsc())
if wlst_subfolders:
new_subfolders = []
for wlst_subfolder in wlst_subfolders:
model_subfolder_name = self._get_model_name(location, wlst_subfolder)
if model_subfolder_name:
new_subfolders.append(wlst_subfolder)
wlst_subfolders = new_subfolders
return wlst_subfolders
def _discover_subfolder_singleton(self, model_subfolder_name, location):
"""
Discover the subfolder from the wlst subfolder name. populate the attributes in the folder.
Return the subfolder model name and the dictionary populated from the subfolder.
The location is appended and then removed from the provided location context prior to return.
:param model_subfolder_name: subfolder name in wlst format
:param location: containing the current location information
:return: model subfolder name: subfolder result dictionary:
"""
_method_name = '_discover_subfolder_singleton'
_logger.entering(model_subfolder_name, str_helper.to_string(location),
class_name=_class_name, method_name=_method_name)
subfolder_result = OrderedDict()
# For all server subfolder names there should only be one path
if self._mbean_names_exist(location):
subfolder_path = self._aliases.get_wlst_attributes_path(location)
if self.wlst_cd(subfolder_path, location):
self._populate_model_parameters(subfolder_result, location)
self._discover_subfolders(subfolder_result, location)
_logger.finest('WLSDPLY-06111', str_helper.to_string(location),
class_name=_class_name, method_name=_method_name)
_logger.exiting(class_name=_class_name, method_name=_method_name)
return subfolder_result
def _discover_subfolder_with_single_name(self, model_subfolder_name, location, name_token):
"""
Discover a subfolder that is a singleton but has an unpredictable naming strategy. Find the name for
the singleton folder and then discover the folder contents.
:param location: context containing current location information
:param name_token: represents the single folder name token in the aliases
:return: dictionary containing discovered folder attributes
"""
_method_name = '_discover_subfolder_with_single_name'
_logger.entering(name_token, class_name=_class_name, method_name=_method_name)
name = self._find_singleton_name_in_folder(location)
result = OrderedDict()
if name:
location.add_name_token(name_token, name)
result = self._discover_subfolder_singleton(model_subfolder_name, location)
location.remove_name_token(name_token)
_logger.exiting(class_name=_class_name, method_name=_method_name)
return result
def _discover_artificial_folder(self, model_subfolder_type, location, name_token, check_order=False):
"""
Discover the subfolder that has an artificial connection; the subfolder contains multiple different types
under one MBean. The model must contain the subfolder type, the artificial type that specifies which it is,
and the name of the subfolder. This folder is only one layer deep. No need to continue to discover
additional subfolders
:param model_subfolder_type: type of the model subfolder
:param location: context containing the current location information
:param name_token: for use in the location to contain the folder name
:param check_order: if true, check the subfolders for order
:return: dictionary containing the discovered folder attributes
"""
_method_name = '_discover_artifical_folder'
_logger.entering(model_subfolder_type, str_helper.to_string(location), name_token,
class_name=_class_name, method_name=_method_name)
subfolder_result = OrderedDict()
names = self._find_names_in_folder(location)
required_order = self._aliases.get_subfolders_in_order(location)
attr_map = dict()
default_list = list()
if names is not None:
for name in names:
location.add_name_token(name_token, name)
massaged = self._inspect_artificial_folder_name(name, location)
location.add_name_token(name_token, massaged)
# circumventing problems if the trust identity asserter schematype jar
# is not in the oracle home. Force it to have the correct name.
if name == 'Trust Service Identity Asserter':
artificial = 'TrustServiceIdentityAsserter'
else:
artificial = self._get_artificial_type(location)
if artificial is None:
if self._aliases.is_custom_folder_allowed(location):
_logger.fine('WLSDPLY-06148', model_subfolder_type, massaged, location.get_folder_path(),
class_name=_class_name, method_name=_method_name)
# doesn't matter how many parameters, it is automatically a non-default name
default_list.append(massaged)
attr_map[massaged] = 0
subfolder_result.update(
self._custom_folder.discover_custom_mbean(location, model_subfolder_type, massaged))
else:
_logger.warning('WLSDPLY-06123', self._aliases.get_model_folder_path(location),
class_name=_class_name, method_name=_method_name)
else:
_logger.finer('WLSDPLY-06120', artificial, massaged, model_subfolder_type, class_name=_class_name,
method_name=_method_name)
location.append_location(artificial)
subfolder_result[massaged] = OrderedDict()
subfolder_result[massaged][artificial] = OrderedDict()
self._populate_model_parameters(subfolder_result[massaged][artificial], location)
default_list.append(artificial)
attr_map[artificial] = len(subfolder_result[massaged][artificial])
location.pop_location()
location.remove_name_token(name_token)
# check to see if the order and number of the subfolder list is same as required order
is_default = False
if check_order and len(required_order) == len(default_list):
is_default = True
idx = 0
while idx < len(required_order):
if required_order[idx] != default_list[idx] or attr_map[default_list[idx]] > 0:
is_default = False
break
idx += 1
if is_default:
subfolder_result = None
_logger.exiting(class_name=_class_name, method_name=_method_name, result=subfolder_result)
return subfolder_result
def _discover_subfolder_with_names(self, model_subfolder_name, location, name_token):
"""
Discover the subfolders from the wlst subfolder name. The subfolder may contain 0 to n instances, each
with a unique name. Create an entry for each name in the subfolder. Populate the attributes of the subfolder.
Return the subfolder model name and the populated dictionary.
:param model_subfolder_name: model name of the wlst subfolder
:param location: context of the current location
:param name_token: aliases token for the type of model folder name
:return: model subfolder name: dictionary results:
"""
_method_name = '_discover_subfolder_with_names'
_logger.entering(model_subfolder_name, str_helper.to_string(location), name_token,
class_name=_class_name, method_name=_method_name)
subfolder_result = OrderedDict()
names = self._find_names_in_folder(location)
if names is not None:
typedef = self._model_context.get_domain_typedef()
for name in names:
if typedef.is_filtered(location, name):
_logger.info('WLSDPLY-06159', typedef.get_domain_type(), location.get_current_model_folder(), name,
class_name=_class_name, method_name=_method_name)
else:
_logger.finer('WLSDPLY-06113', name, self._aliases.get_model_folder_path(location),
class_name=_class_name, method_name=_method_name)
subfolder_result[name] = OrderedDict()
location.add_name_token(name_token, name)
subfolder_path = self._aliases.get_wlst_attributes_path(location)
if self.wlst_cd(subfolder_path, location):
self._populate_model_parameters(subfolder_result[name], location)
self._discover_subfolders(subfolder_result[name], location)
location.remove_name_token(name_token)
_logger.finest('WLSDPLY-06114', str_helper.to_string(location),
class_name=_class_name, method_name=_method_name)
_logger.exiting(class_name=_class_name, method_name=_method_name)
return subfolder_result
def _discover_subfolder(self, model_subfolder_name, location, result=None, check_order=False):
"""
Discover the subfolder indicated by the model subfolder name. Append the model subfolder to the
current location context, and pop that location before return
:param model_subfolder_name: Name of the model subfolder
:param location: context containing the current subfolder information
:param check_order: does the folder need to be checked for order
:return: discovered dictionary
"""
_method_name = '_discover_subfolder'
_logger.entering(model_subfolder_name, location.get_folder_path(), class_name=_class_name,
method_name=_method_name)
location.append_location(model_subfolder_name)
deployer_utils.set_flattened_folder_token(location, self._aliases)
_logger.finer('WLSDPLY-06115', model_subfolder_name, self._aliases.get_model_folder_path(location),
class_name=_class_name, method_name=_method_name)
# handle null model_subfolder name which should never happen in discover. throw exception about version
if result is None:
result = OrderedDict()
name_token = self._aliases.get_name_token(location)
_logger.finest('WLSDPLY-06116', model_subfolder_name, self._aliases.get_model_folder_path(location),
name_token, class_name=_class_name, method_name=_method_name)
if name_token is not None:
if self._aliases.requires_unpredictable_single_name_handling(location):
subfolder_result = self._discover_subfolder_with_single_name(model_subfolder_name, location,
name_token)
elif self._aliases.requires_artificial_type_subfolder_handling(location):
subfolder_result = self._discover_artificial_folder(
model_subfolder_name, location, name_token, check_order)
else:
subfolder_result = self._discover_subfolder_with_names(model_subfolder_name, location,
name_token)
else:
subfolder_result = self._discover_subfolder_singleton(model_subfolder_name, location)
# this is a really special case. Empty means not-default it is empty
if self._aliases.requires_artificial_type_subfolder_handling(location):
if subfolder_result is not None:
add_to_model(result, model_subfolder_name, subfolder_result)
else:
add_to_model_if_not_empty(result, model_subfolder_name, subfolder_result)
location.pop_location()
_logger.exiting(class_name=_class_name, method_name=_method_name, result=result)
return result
def _discover_subfolders(self, result, location, check_order=False):
"""
Discover the rest of the mbean hierarchy at the current location.
:param result: dictionary where to store the discovered subfolders
:param location: context containing current location information
:param check_order: True if artificial folder has an order to check
:return: populated dictionary
"""
_method_name = '_discover_subfolders'
_logger.entering(str_helper.to_string(location), method_name=_method_name, class_name=_class_name)
wlst_subfolders = self._find_subfolders(location)
if wlst_subfolders is not None:
for wlst_subfolder in wlst_subfolders:
model_subfolder_name = self._get_model_name(location, wlst_subfolder)
# will return a None if subfolder not in current wls version
if model_subfolder_name is not None:
result = self._discover_subfolder(model_subfolder_name, location, result, check_order)
_logger.finest('WLSDPLY-06114', str_helper.to_string(location),
class_name=_class_name, method_name=_method_name)
_logger.exiting(class_name=_class_name, method_name=_method_name, result=result)
return result
def _discover_single_folder(self, location):
"""
Discover the attributes in the single folder at current location and allow the
caller to continue the discover for any of its child folders. This is for required
for certain folders that need to be handled differently.
:param location: containing the current location information
:return: folder result dictionary:
"""
_method_name = '_discover_single_folder'
_logger.entering(str_helper.to_string(location), class_name=_class_name, method_name=_method_name)
result = OrderedDict()
subfolder_path = self._aliases.get_wlst_attributes_path(location)
if self.wlst_cd(subfolder_path, location):
self._populate_model_parameters(result, location)
_logger.exiting(class_name=_class_name, method_name=_method_name)
return result
def _get_model_name(self, location, wlst_name):
"""
Get the model folder name for the provided wlst mbean name. Throw an exception if the model name is
not found in the aliases.
:param location: context containing the current location information
:param wlst_name: for which to locate the mbean name
:return: model name for the folder
:raises:DiscoverException:The mbean name is not in the alias folders
"""
_method_name = '_get_model_name'
_logger.finer('WLSDPLY-06117', wlst_name, self._aliases.get_model_folder_path(location),
class_name=_class_name, method_name=_method_name)
model_name = None
# The below call will throw an exception if the folder does not exist; need to have that
# exception thrown. The get_model_subfolder_name does not throw an exception if the alias
# does not exist. We do not want an exception if the folder is just not available for the version
# Update 05/21/20 - does not make sense to stop discover because of missing alias definition.
try:
mbean_type = self._aliases.get_wlst_mbean_type(location)
except DiscoverException:
_logger.warning('WLSDPLY-06156', str_helper.to_string(location),
class_name=_class_name, method_name=_method_name)
mbean_type = None
if mbean_type:
model_name = self._aliases.get_model_subfolder_name(location, wlst_name)
_logger.finest('WLSDPLY-06118', model_name, wlst_name, class_name=_class_name, method_name=_method_name)
if model_name is None:
_logger.fine('WLSDPLY-06119', wlst_name, self._get_wlst_mode_string(), self._wls_version,
class_name=_class_name, method_name=_method_name)
return model_name
def _topfolder_exists(self, model_top_folder_name):
"""
Check to see if the folder represented by the top folder name exists at the current location.
There is not a way to check for wlst_type for top folders. The top folder name and the wlst name
must be the same.
:param model_top_folder_name: to check for at top directory
:return: True if the folder exists at the current location in the domain
"""
result = self._wlst_helper.lsc('/', log_throwing=False)
return model_top_folder_name in result
def _subfolder_exists(self, model_folder_name, location):
"""
Check to see if the folder represented by the model folder name exists at the current loction
:param model_folder_name: to check for at location
:param location: context containing the current location information
:return: True if the folder exists at the current location in the domain
"""
temp_location = LocationContext(location)
subfolders = self._find_subfolders(temp_location)
temp_location.append_location(model_folder_name)
wlst_mbean_type = self._aliases.get_wlst_mbean_type(temp_location)
if subfolders:
return wlst_mbean_type in subfolders
return False
def _add_att_handler(self, attribute_key, method):
self._att_handler_map[attribute_key] = method
def _convert_path(self, file_name):
file_name_resolved = self._model_context.replace_token_string(file_name)
if path_utils.is_relative_path(file_name_resolved):
return convert_to_absolute_path(self._model_context.get_domain_home(), file_name_resolved)
return file_name_resolved
def _is_oracle_home_file(self, file_name):
"""
Determine if the absolute file name starts with an oracle home. Disregard if the application is
located in the domain home.
:param file_name: to check for oracle home or weblogic home
:return: true if in oracle home location
"""
py_str = path_utils.fixup_path(str_helper.to_string(file_name))
return (not py_str.startswith(self._model_context.get_domain_home())) and \
(py_str.startswith(self._model_context.get_oracle_home()) or
py_str.startswith(self._model_context.get_wl_home()))
def _get_wlst_mode_string(self):
"""
Helper method to return the string representation for the online/offline mode of discovery.
:return: String representation of mode
"""
return WlstModes.from_value(self._wlst_mode)
def _get_artificial_type(self, location):
"""
Return the short model name for the MBean interface found for the location object
:param location:context containing current location object information
:return: short artificial name for the model
"""
_method_name = '_get_artificial_type'
_logger.entering(str_helper.to_string(location), class_name=_class_name, method_name=_method_name)
mbean_name = None
subfolder_path = self._aliases.get_wlst_attributes_path(location)
if subfolder_path:
location_object = self.wlst_cd(subfolder_path, location)
if location_object is None:
_logger.fine('WLSDPLY-06121', self._aliases.get_wlst_attributes_path(location),
class_name=_class_name, method_name=_method_name)
else:
interfaces = location_object.getClass().getInterfaces()
if not interfaces:
_logger.info('WLSDPLY-06124', str_helper.to_string(location),
str_helper.to_string(location_object),
class_name=_class_name, method_name=_method_name)
else:
mbean_name = self._find_mbean_interface(location, interfaces)
_logger.exiting(class_name=_class_name, method_name=_method_name, result=mbean_name)
return mbean_name
def _find_mbean_interface(self, location, interfaces):
_method_name = '_find_mbean_interface'
mbean_name = None
for interface in interfaces:
interface_name = get_interface_name(interface)
if 'MBean' in interface_name:
_logger.finer('WLSDPLY-06126', interface_name, self._aliases.get_model_folder_path(location),
class_name=_class_name, method_name=_method_name)
try:
mbean_name = self._aliases.get_model_subfolder_name(location, interface_name)
except DiscoverException, ae:
_logger.fine('WLSDPLY-06122', interface_name, ae.getLocalizedMessage(), class_name=_class_name,
method_name=_method_name)
if mbean_name is None:
_logger.fine('WLSDPLY-06125', interface_name, str_helper.to_string(location),
class_name=_class_name, method_name=_method_name)
break
return mbean_name
def _get_wlst_attributes(self, location):
wlst_attributes = []
model_attributes = self._aliases.get_model_attribute_names(location)
if model_attributes:
for model_attribute in model_attributes:
try:
wlst_attribute = self._aliases.get_wlst_attribute_name(location, model_attribute)
if wlst_attribute:
wlst_attributes.append(wlst_attribute)
except DiscoverException:
continue
return wlst_attributes
def wlst_cd(self, path, location):
"""
Change to the directory specified in the path. If the wlst.cd() fails, assume something is wrong with the
construction of the path tokens: Log a message, and return a indication to the caller that it should
not continue on in this path.
:param path: where to change directory
:param location: context containing the current location information used to determine the path
:return: the mbean instance if the wlst.cd() was successful, or None
"""
_method_name = 'wlst_cd'
result = None
try:
result = self._wlst_helper.cd(path)
except DiscoverException, pe:
_logger.warning('WLSDPLY-06140', path, str_helper.to_string(location), pe.getLocalizedMessage(),
class_name=_class_name, method_name=_method_name)
return result
def _inspect_artificial_folder_name(self, folder_name, location):
"""
Perform any special handling for the folder or folder names.
:param location: current context of location
:return: Original name or processed name value
"""
return self._inspect_security_folder_name(folder_name, location)
def _inspect_security_folder_name(self, folder_name, location):
# This is clunky - Some security providers in 11g offline have the name "Provider", and cannot be discovered.
# If found, log and throw an exception here, and the SecurityConfiguration will be omitted from the model.
if (not self._weblogic_helper.is_version_in_12c()) and self._wlst_mode == WlstModes.OFFLINE and \
self._aliases.is_security_provider_type(location) and 'Provider' == folder_name:
raise exception_helper.create_discover_exception('WLSDPLY-06201', folder_name, location.get_folder_path())
return folder_name
def _get_credential_injector(self):
"""
The credential injector is a specialized injector that collects credentials during the discovery process.
It is later used to create the properties file, or Kubernetes secrets.
:return: the credential injector
"""
return self._credential_injector
def _massage_online_folders(self, lsc_folders):
_method_name = '_massage_online_folders'
location = self._wlst_helper.get_pwd()
folder_list = []
mbi_folder_list = []
for mbean_attribute_info in self._wlst_helper.get_mbi(location).getAttributes():
if _is_containment(mbean_attribute_info):
mbi_folder_list.append(mbean_attribute_info.getName())
for lsc_folder in lsc_folders:
if lsc_folder in mbi_folder_list:
folder_list.append(lsc_folder)
else:
_logger.finer('WLSDPLY-06144', lsc_folder, location, class_name=_class_name, method_name=_method_name)
if len(folder_list) != len(mbi_folder_list):
_logger.fine('WLSDPLY-06145', folder_list, location, mbi_folder_list, class_name=_class_name,
method_name=_method_name)
return folder_list
def _get_from_url(self, owner_name, file_name):
"""
Determine if the provided file name is a URL location where the file is hosted. If it is a URL, return
a URL stream that can be used to retrieve the file from the hosted location.
:param owner_name: of the file being discovered
:param file_name: of the file to be tested as a URL
:return: True if the file is hosted at a URL: URL file handle for the archive file to retrieve the file, or path
from file name
"""
url = None
path = None
try:
uri = URI(file_name)
if 'http' == uri.getScheme():
url = uri.toURL()
elif 'file' == uri.getScheme() or uri.getScheme() is None:
path = uri.getPath()
except (URISyntaxException, MalformedURLException), e:
_logger.warning('WLSDPLY-06321', owner_name, file_name, e.getLocalizedMessage)
return False, None, None
return True, url, path
def add_to_model_if_not_empty(dictionary, entry_name, entry_value):
"""
Helper method for discover to add a non-empty value to the dictionary with the provided entry-name
:param dictionary: to add the value
:param entry_name: key to the value
:param entry_value: to add to dictionary
:return: True if the value was not empty and added to the dictionary
"""
if entry_value and len(entry_value):
dictionary[entry_name] = entry_value
return True
return False
def add_to_model(dictionary, entry_name, entry_value):
"""
Add this to the model even if empty
:param dictionary: to add the value
:param entry_name: name of the key
:param entry_value: dictionary to add
"""
dictionary[entry_name] = entry_value
def convert_to_absolute_path(relative_to, file_name):
"""
Transform the path by joining the relative_to before the file_name and converting the resulting path name to
an absolute path name.
:param relative_to: prefix of the path
:param file_name: name of the file
:return: absolute path of the relative_to and file_name
"""
if not StringUtils.isEmpty(relative_to) and not StringUtils.isEmpty(file_name):
file_name = os.path.join(relative_to, file_name)
return file_name
def _is_containment(mbean_attribute_info):
return mbean_attribute_info.getDescriptor().getFieldValue('com.bea.relationship') == 'containment'
def get_discover_logger_name():
"""
Return the common logger used for all discover logging.
:return: logger name
"""
return _DISCOVER_LOGGER_NAME
|
f7916188e6252ec889ff0737625f8fa7cf276aef
|
1df65ca19dd7cf303ba8c9481c0ed851197675ec
|
/src/tekore/_model/chapter/full.py
|
5013c4338aff26b215c877422c452ea4f7af5b30
|
[
"MIT"
] |
permissive
|
felix-hilden/tekore
|
068a178cdf66a94bf9a043f36f3e702c30f70849
|
b04e43eca81ac39f8d5559d88317435b287fd290
|
refs/heads/master
| 2023-07-24T14:44:59.401888
| 2023-07-07T18:34:29
| 2023-07-07T18:34:29
| 201,959,999
| 180
| 30
|
MIT
| 2023-06-18T18:28:13
| 2019-08-12T15:41:18
|
Python
|
UTF-8
|
Python
| false
| false
| 163
|
py
|
full.py
|
from ..audiobook import SimpleAudiobook
from .base import Chapter
class FullChapter(Chapter):
"""Complete chapter object."""
audiobook: SimpleAudiobook
|
75ae393c6265c5b83f4ff9259a88346bfc653e5c
|
5bf29ab4578ab2daaaff48cd5db2bbbe2840a5b0
|
/collectors/PassiveTotal.py
|
2573757e1879a5143dd44b71d93b252d1f2fccd7
|
[
"BSD-3-Clause"
] |
permissive
|
gfek/Lepus
|
f98b0e2f165650bd759613e1ca21f948bd45da86
|
28de99babcf9e71190075a20f7d7fee9c1e5dd91
|
refs/heads/master
| 2023-07-21T02:20:41.541182
| 2022-11-14T16:07:52
| 2022-11-14T16:07:52
| 130,077,053
| 335
| 41
|
BSD-3-Clause
| 2023-07-17T12:19:34
| 2018-04-18T14:42:24
|
Python
|
UTF-8
|
Python
| false
| false
| 1,715
|
py
|
PassiveTotal.py
|
import requests
from termcolor import colored
from configparser import RawConfigParser
def init(domain):
PT = []
print(colored("[*]-Searching PassiveTotal...", "yellow"))
parser = RawConfigParser()
parser.read("config.ini")
PT_KEY = parser.get("PassiveTotal", "PT_KEY")
PT_SECRET = parser.get("PassiveTotal", "PT_SECRET")
if PT_KEY == "" or PT_SECRET == "":
print(" \__", colored("No PassiveTotal API credentials configured", "red"))
return []
else:
auth = (PT_KEY, PT_SECRET)
url = "https://api.passivetotal.org/v2/enrichment/subdomains"
data = {"query": domain}
headers = {"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:88.0) Gecko/20100101 Firefox/88.0"}
try:
response = requests.get(url, auth=auth, json=data, headers=headers)
if response.status_code == 402:
print(" \__", colored("Quota exceeded.", "red"))
return []
try:
for subdomain in response.json()["subdomains"]:
PT.append("%s.%s" % (subdomain, domain))
PT = set(PT)
print(" \__ {0}: {1}".format(colored("Subdomains found", "cyan"), colored(len(PT), "yellow")))
return PT
except KeyError as errk:
print(" \__", colored(errk, "red"))
return []
except requests.exceptions.RequestException as err:
print(" \__", colored(err, "red"))
return []
except requests.exceptions.HTTPError as errh:
print(" \__", colored(errh, "red"))
return []
except requests.exceptions.ConnectionError as errc:
print(" \__", colored(errc, "red"))
return []
except requests.exceptions.Timeout as errt:
print(" \__", colored(errt, "red"))
return []
except Exception:
print(" \__", colored("Something went wrong!", "red"))
return []
|
cd55e827b95b21e978cfde99c17d5a88222d625d
|
a3d6556180e74af7b555f8d47d3fea55b94bcbda
|
/components/test/data/password_manager/form_classification_tests/signup_forms_test.py
|
fd3847824fb77f8fff4ddbce90cfe1d85f93e036
|
[
"BSD-3-Clause"
] |
permissive
|
chromium/chromium
|
aaa9eda10115b50b0616d2f1aed5ef35d1d779d6
|
a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c
|
refs/heads/main
| 2023-08-24T00:35:12.585945
| 2023-08-23T22:01:11
| 2023-08-23T22:01:11
| 120,360,765
| 17,408
| 7,102
|
BSD-3-Clause
| 2023-09-10T23:44:27
| 2018-02-05T20:55:32
| null |
UTF-8
|
Python
| false
| false
| 25,015
|
py
|
signup_forms_test.py
|
# Copyright 2016 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import time
import unittest
from form_classification_test import FormClassificationTest
"""Unittest class for testing signup forms.
The test methods were generated by the form annotation extension
(components/test/data/password_manager/form_annotation_extension)
"""
class SignUpFormsTest(FormClassificationTest):
def test_yandex_ru(self):
self.GoTo("https://passport.yandex.ru/registration/mail")
self.CheckPwdField("#password")
def test_mail_ru(self):
self.GoTo("https://e.mail.ru/signup")
self.CheckPwdField("span.sig2 > input[type='password']")
def test_linkedin_com(self):
self.GoTo("https://www.linkedin.com/")
self.CheckPwdField("#join-password")
def test_yahoo_com(self):
self.GoTo("https://edit.yahoo.com/registration")
self.CheckPwdField("#usernamereg-password")
def test_amazon_com(self):
self.GoTo(
"https://www.amazon.com/ap/register?_encoding=UTF8&openid.assoc_handle"
"=usflex&openid.claimed_id=http%3A%2F%2Fspecs.openid.net%2Fauth%2F2.0"
"%2Fidentifier_select&openid.identity=http%3A%2F%2Fspecs.openid.net%2F"
"auth%2F2.0%2Fidentifier_select&openid.mode=checkid_setup&openid.ns="
"http%3A%2F%2Fspecs.openid.net%2Fauth%2F2.0&openid.ns.pape=http%3A%2F%"
"2Fspecs.openid.net%2Fextensions%2Fpape%2F1.0&openid.pape.max_auth_age"
"=0&openid.return_to=https%3A%2F%2Fwww.amazon.com%2Fgp%2Fyourstore%2F"
"home%3Fie%3DUTF8%26ref_%3Dnav_newcust")
self.CheckPwdField("#ap_password")
def test_google_com(self):
self.GoTo("https://accounts.google.com/signup")
self.CheckPwdField("#Passwd")
def test_baidu_com(self):
self.GoTo("https://passport.baidu.com/v2/?reg")
self.CheckPwdField("#TANGRAM__PSP_4__password")
def test_wikipedia_org(self):
self.GoTo(
"https://en.wikipedia.org/w/index.php?"
"title=Special:UserLogin&signup&type=signup")
self.CheckPwdField("#wpPassword2")
def test_qq_com(self):
self.GoTo("http://zc.qq.com/en/index.html")
self.CheckPwdField("#password")
def test_twitter_com(self):
self.GoTo("https://twitter.com/")
self.CheckPwdField("input[name='user[user_password]']")
def test_live_com(self):
self.GoTo("https://signup.live.com/")
self.CheckPwdField("#Password")
def test_sina_com(self):
self.GoTo("https://login.sina.com.cn/signup/signup")
self.CheckPwdField("input[class='reg_ipt'][type='password']")
def test_weibo_com(self):
self.GoTo("http://weibo.com/signup/signup.php")
self.CheckPwdField("input[name='passwd']")
def test_instagram_com(self):
self.GoTo("https://www.instagram.com/")
self.CheckPwdField("input[name='password']")
def test_reddit_com(self):
self.GoTo("https://www.reddit.com/")
self.Click("span.user > a")
self.CheckPwdField("#passwd_reg")
def test_360_cn(self):
self.GoTo("http://i.360.cn/reg/")
self.CheckPwdField("input[class~='quc-input-password']")
def test_pinterest_com(self):
self.GoTo("https://www.pinterest.com/")
self.CheckPwdField("#userPassword")
def test_netflix_com(self):
self.GoTo(
"https://www.netflix.com/getstarted?locale=en-GB&action=startAction")
FormClassificationTest.driver.delete_all_cookies()
self.Click("button")
self.CheckPwdField("input[name='password'][type='password']")
def test_apple_com(self):
self.GoTo("https://appleid.apple.com/account")
self.CheckPwdField("#password")
def test_stackoverflow_com(self):
self.GoTo("https://stackoverflow.com/users/signup")
self.CheckPwdField("#password")
def test_paypal_com(self):
self.GoTo("https://www.paypal.com/de/signup/account")
time.sleep(2)
self.Click("#personalSignUpForm")
self.CheckPwdField("#password")
def test_sohu_com(self):
self.GoTo("https://passport.sohu.com/signup")
self.CheckPwdField("input[name='password']")
def test_tumblr_com(self):
self.GoTo("https://www.tumblr.com/")
self.Click("span[class~='signup_get_started_btn']")
self.CheckPwdField(
"#signup_password",
"We shouldn't skip disabled elements "
"if ignore_invisible_usernames==false")
def test_www_linkedin_com(self):
self.GoTo("https://www.linkedin.com/")
self.CheckPwdField(
"INPUT#join-password[name=password][type=password]"
".cell-body-textinput")
def test_imgur_com(self):
self.GoTo("https://imgur.com")
self.Click("LI#register > A")
self.SwitchTo("IFRAME.cboxIframe")
self.SwitchTo("IFRAME#f[name='f']")
self.CheckPwdField("DIV#part-one > INPUT[name='password']"
"[type='password']")
def test_naver_com(self):
self.GoTo("https://nid.naver.com/user2/joinGlobal.nhn?m=init&lang=en_US")
self.Click("LABEL#chk_allLb")
self.Click("A.btn_agree")
self.CheckPwdField("INPUT#pswd1[name='pswd1'][type='password'].int")
self.CheckPwdField("INPUT#pswd1[name='pswd1'][type='password'].int")
def test_imdb_com(self):
self.GoTo("https://secure.imdb.com/register-imdb/form-v2")
self.CheckPwdField(
"INPUT#password1[name='password1'][type='password'].reg_thick")
def test_github_com(self):
self.GoTo("https://github.com/")
self.CheckPwdField(
"INPUT[name='user[password]'][type='password'].form-control-lg")
def test_rakuten_co_jp(self):
self.GoTo("http://www.rakuten.co.jp/")
self.Click("A.mr-new-entry-btn")
self.CheckPwdField("INPUT#p_id[name='p'][type='password']")
def test_signup_live_com(self):
self.GoTo("https://signup.live.com/signup")
self.CheckPwdField(
"INPUT#Password[name='Password'][type='password'].form-control")
def test_www_tianya_cn(self):
self.GoTo("http://www.tianya.cn/")
self.CheckPwdField(
"INPUT#password1[name='vpassword'][type='password'].text-ipt")
def test_pixnet_cc(self):
self.GoTo("https://www.pixnet.net/")
self.Click("A#switch-openid.switch-openid")
self.Click("DIV#login-1.login > DIV.box-title > A")
self.CheckPwdField(
"INPUT#user_password[name='user_password'][type='password'].text_field")
def test_jd_com(self):
self.GoTo("http://www.jd.com/")
self.Click("A.link-regist")
self.Click("IMG#imgAuthCode.img-code")
self.CheckPwdField("INPUT#form-pwd[name='pwd'][type='password'].field")
def test_kat_cr(self):
self.GoTo("https://kat.cr/")
self.Click("DIV.land-login > A.ajaxLink > SPAN")
self.Click("A#register_link.darkButton > SPAN")
self.CheckPwdField(
"INPUT#password[name='password'][type='password'].botmarg5px")
def test_my_outbrain_com(self):
self.GoTo("http://www.outbrain.com/")
self.Click("LI.register > A")
self.CheckPwdField(
"INPUT[name='password'][type='password']._am_core-input."
"ng-valid-password-rules")
def test_www_dropbox_com(self):
self.GoTo("https://www.dropbox.com/register")
self.CheckPwdField("INPUT[name='password'][type='password']"
".password-input")
def test_i_360_cn(self):
self.GoTo("http://i.360.cn/reg/?src=pcw_home")
self.CheckPwdField(
"INPUT[name='password'][type='password'].quc-input-password")
def test_adobe_com(self):
self.GoTo("http://www.adobe.com/")
self.Click("I.ui-close")
self.Click(
"LI.globalnav__js__menu-bar__item__signin > "
"BUTTON.globalnav__menu-bar__button > "
"SPAN.globalnav__menu-bar__item__title")
self.Click("A#create_account.nowrap")
self.CheckPwdField("INPUT#password[name='password'][type='password']",
"fake visible password field outside of screen")
def test_email_163_com(self):
self.GoTo("http://reg.email.163.com/unireg/call.do?cmd=register.entrance")
self.CheckPwdField(
"INPUT#mobilePwdIpt[name='mobilePassword'][type='password']"
".norWidthIpt")
def test_passport_china_com(self):
self.GoTo("http://passport.china.com/logon.do?processID=engRegister")
self.CheckPwdField("INPUT#password[name='password'][type='password']")
def test_adf_ly(self):
self.GoTo("http://adf.ly/")
self.Click("A#joinNow.button")
self.CheckPwdField("INPUT#password[name='password'][type='password']")
def test_www_booking_com(self):
self.GoTo("http://www.booking.com/index.ru.html")
self.Click(
"A.remove_padding_register_btn_right > DIV.sign_in_wrapper > SPAN")
self.CheckPwdField(
"INPUT[name='password'][type='password'].user_signup_password")
def test_passport_twitch_tv(self):
self.GoTo("https://www.twitch.tv/")
self.Click("A#header_signup.button.last > SPAN")
self.SwitchTo("IFRAME[name='passport']")
self.CheckPwdField("INPUT#password[name='password'][type='password'].text")
def test_nytimes_com(self):
self.GoTo("https://myaccount.nytimes.com/register")
self.CheckPwdField(
"INPUT[id='password1'][name='password1'][type='password'].password")
def test_www_quora_com(self):
self.GoTo("https://www.quora.com/")
self.Click("A.signup_email_link")
self.CheckPwdField(
"INPUT[name='password'][type='password'][id*=password].text")
def test_member_livedoor_com(self):
self.GoTo("http://www.livedoor.com/")
self.Click("DIV#member > UL > LI > A")
self.CheckPwdField("INPUT#password[name='password'][type='password'].pw")
def test_www_popads_net(self):
self.GoTo("https://www.popads.net/")
self.Click("IMG.right")
self.Click(
"FORM#UserRegisterForm > DIV.pad > DIV > DIV.input_wrapper > "
"DIV.password > LABEL")
self.CheckPwdField(
"INPUT#UserPass1[name='data[User][pass1]'][type='password']")
def test_ebay_com(self):
self.GoTo("https://signin.ebay.com/ws/eBayISAPI.dll?SignIn&pos=1")
self.SwitchTo("#regFrame")
self.CheckPwdField("#PASSWORD")
def test_ssl_bbc_com(self):
self.GoTo("http://www.bbc.com/")
self.Click("A#idcta-link")
self.Click("P.bbcid-pre-form > A")
self.CheckPwdField(
"INPUT#bbcid_createpassword[name='confirmpassword'][type='password']"
".password")
def test_passport_bilibili_com(self):
self.GoTo("https://passport.bilibili.com/register/phone")
self.CheckPwdField(
"INPUT#password[name='userpwd'][type='password'].user_id_password")
def test_tudou_com(self):
self.GoTo("http://login.tudou.com/reg.do")
self.CheckPwdField("INPUT#passwd[name='passwd'][type='password'].passwd")
def test_www_buzzfeed_com(self):
self.GoTo("http://www.buzzfeed.com/")
self.Click(
"DIV.page-nav__utilities > DIV#nav-signin.nav-signin > "
"DIV#usernav-signin > A#header-signin.nav-signin-icon")
self.Click("DIV.modal__tab--right")
self.CheckPwdField("INPUT[type='password'].js-user-password")
def test_www_dailymail_co_uk(self):
self.GoTo("http://www.dailymail.co.uk/home/index.html")
self.Click("A.js-login")
self.Click("A.reg-btn-join")
self.CheckPwdField(
"INPUT#reg-password[name='password'][type='password'].js-val")
def test_www_zillow_com(self):
self.GoTo("http://www.zillow.com/")
self.Click("DIV.nav-top-auth-links > A#register_opener.register")
self.SwitchTo("DIV#register_content > iframe")
self.CheckPwdField("INPUT#password[name='password'][type='password']")
def test_soundcloud_com(self):
self.GoTo(
"https://soundcloud.com/connect?client_id=02gUJC0hH2ct1EGOcYXQIz"
"RFU91c72Ea&response_type=token&scope=non-expiring%20fast-connect"
"%20purchase%20upload&display=next&redirect_uri=https%3A//"
"soundcloud.com/soundcloud-callback.html&highlight=signup")
self.CheckPwdField(
"INPUT#user_password[name='user[password]'][type='password'].sc-input")
def test_indeed_com(self):
self.GoTo("http://de.indeed.com/")
self.Click("A#userOptionsLabel")
self.Click("P.sign_up_prompt > A")
self.CheckPwdField(
"INPUT#register_password[name='password'][type='password']"
".input_password")
def test_www_so_com(self):
self.GoTo("https://www.so.com/")
self.Click("A#user-reg")
self.CheckPwdField(
"INPUT[name='password'][type='password'].quc-input-password")
def test_www_etsy_com(self):
self.GoTo("https://www.etsy.com/")
self.Click("A#register.register-header-action")
self.CheckPwdField("INPUT#password[name='password'][type='password'].text")
def test_www_avito_ru(self):
self.GoTo("https://www.avito.ru/registration")
self.CheckPwdField(
"INPUT#password[name='password'][type='password'].password-field")
def test_www_yelp_com(self):
self.GoTo("http://www.yelp.com/m%C3%BCnchen")
self.Click("A#header-sign-up.header-nav_button--sign-up")
self.CheckPwdField(
"FORM#signup-form.signup-form > "
"INPUT#password[name='password'][type='password']")
def test_globo_com(self):
self.GoTo("https://login.globo.com/cadastro/4728")
self.CheckPwdField(
"INPUT#senha[name='senha'][type='password'].ng-valid-maxlength")
def test_www_slideshare_net(self):
self.GoTo("http://www.slideshare.net/")
self.Click("A#signup.void_redirect_link")
self.Click("A.alternate-cta")
self.CheckPwdField(
"INPUT#j-user-password[name='user_password'][type='password']")
def test_detik_com(self):
self.GoTo("http://www.detik.com/")
self.Click("A.daftar")
self.SwitchTo("#box_login_iframe")
self.CheckPwdField(
"INPUT#password[name='password'][type='password'].input1")
def test_aol_com(self):
self.GoTo("http://www.aol.com/")
self.Click("UL.m-side-nav__list > LI > A.m-side-nav__link")
self.Click("A#getSn")
self.CheckPwdField("INPUT#password[name='password'][type='password']")
def test_www_mediafire_com(self):
self.GoTo("https://www.mediafire.com/upgrade/registration.php?pid=66")
self.CheckPwdField(
"INPUT#reg_pass[name='reg_pass'][type='password'].inspectletIgnore")
def test_cnet_com(self):
self.GoTo("http://www.cnet.com/uk/")
self.Click("DIV.menuHead > DIV.user-avatar")
self.Click("UL.logged-out > LI:nth-child(3)")
self.SwitchTo("iframe[id*=easyXDM_fly_]")
self.CheckPwdField(
"INPUT#user_password[name='user[password]'][type='password']")
def test_stackexchange_com(self):
self.GoTo("http://stackexchange.com/")
self.Click("SPAN.topbar-menu-links > A")
self.SwitchTo("#affiliate-iframe")
self.CheckPwdField(
"INPUT#password[name='password'][type='password'].signup-text-field")
def test_theguardian_com(self):
self.GoTo("http://www.theguardian.com/international")
self.Click("DIV.brand-bar__item--profile > A.popup__toggle")
self.Click("A#register_cta.signin__cta-link--signup")
self.CheckPwdField(
"INPUT#register_field_password[name='password'][type='password']"
".register-form__field--password")
def test_www_wittyfeed_com(self):
self.GoTo("http://www.wittyfeed.com/")
self.Click("A.express")
self.Click("A#signUp_btn.btn.btn-default")
self.CheckPwdField(
"DIV.signUp_passwrdDiv > "
"INPUT[name='password'][type='password'].form-control")
def test_www_tribunnews_com(self):
SignUpFormsTest.driver.maximize_window()
self.GoTo("http://www.tribunnews.com/")
self.Click("A#login.blue")
self.Click("A.fbo")
self.CheckPwdField(
"DIV.al.pa10 > DIV.mb10 > INPUT[name='password'][type='password']"
".input")
def test_naver_jp(self):
self.GoTo("http://matome.naver.jp/")
self.Click("UL.MdHeadUtil01 > LI.mdHeadUtil01Li:nth-child(2) > A")
self.CheckPwdField(
"INPUT#_passwd[name='kodamaUser.password'][type='password']"
".mdInputTxt01Input")
def test_steampowered_com(self):
self.GoTo("https://store.steampowered.com/join/")
self.CheckPwdField("INPUT#password[name='password'][type='password']")
def test_kakaku_com(self):
self.GoTo("http://kakaku.com/auth/inc_idcore_top_v2.asp")
self.Click("A.register")
self.CheckPwdField("INPUT[name='Password1'][type='password'].secPwFrm")
def test_gfycat_com(self):
self.GoTo("https://gfycat.com/signup")
self.CheckPwdField("INPUT[name='password'][type='password']")
def test_www_tripadvisor_com(self):
self.GoTo("https://www.tripadvisor.com/")
self.Click("LI#register")
self.SwitchTo("IFRAME#overlayRegFrame.overlayRegFrame")
self.Click("SPAN.regTaEmail")
self.CheckPwdField(
"DIV#regSignUp.regSignUp > INPUT[type='password'].text.regInputText")
def test_flickr_com(self):
self.GoTo("https://www.flickr.com/")
self.Click("A.gn-title.butt")
self.CheckPwdField(
"INPUT#usernamereg-password[name='password'][type='password']")
def test_feedly_com(self):
self.GoTo("https://feedly.com/v3/auth/login?newUser=true")
self.CheckPwdField("INPUT[name='password'][type='password'].input-bottom")
def test_zol_com_cn(self):
self.GoTo("http://service.zol.com.cn/user/login.php?type=reg")
self.CheckPwdField(
"INPUT#J_RegistPsw[name='J_RegistPsw'][type='password'].txt.tabInput")
def test_www_iqiyi_com(self):
self.GoTo("http://www.iqiyi.com/")
self.Click("A.register0201")
self.CheckPwdField("DIV.acountIn > INPUT[type='password'].in-txt")
def test_vimeo_com(self):
self.GoTo("https://vimeo.com/")
self.CheckPwdField(
"INPUT#signup_password[name='password'][type='password']"
".js-join_password")
def test_torrentz_eu(self):
self.GoTo("http://torrentz.eu/profile")
self.CheckPwdField("INPUT#rpass[name='pass'][type='password'].i")
def test_9gag_com(self):
self.GoTo("http://9gag.com/")
self.Click("A.badge-signup-button")
self.Click("A.badge-signup-email-link")
self.CheckPwdField(
"INPUT#signup-email-password[name='password'][type='password']")
def test_blogs_forbes_com(self):
self.GoTo("http://blogs.forbes.com/account/register")
self.CheckPwdField(
"INPUT#signup_form_password[name='password'][type='password']")
def test_en_softonic_com(self):
self.GoTo("http://en.softonic.com/linux")
self.Click("A#login_link.nav-item-icons")
self.Click("A#standard-register.button-standard-m")
self.CheckPwdField(
"INPUT#reg-password[name='password'][type='password'].field-m")
def test_store_steampowered_com(self):
self.GoTo("http://store.steampowered.com/")
self.Click("A.global_action_link")
self.Click("A.btnv6_blue_hoverfade.btn_medium")
self.CheckPwdField("INPUT#password[name='password'][type='password']")
def test_godaddy_com(self):
self.GoTo("https://uk.godaddy.com/")
self.Click("DIV#sign-in.pc-menu-item.ux-tray > A.ux-tray-toggle.menu-title")
self.Click("A#ux-ub-create-account.btn-primary.btn-sm")
self.CheckPwdField(
"INPUT#create_password[name='create_password'][type='password']"
".sf-tipper-target")
def test_web_de(self):
self.GoTo("http://web.de/")
self.Click("A.icon-freemail")
self.Click(
"FORM#formFreemailLogin[name='fm'].form-login > FIELDSET > "
"DIV.login-username > SPAN > A")
self.CheckPwdField(
"INPUT#id1f[name='passwordPanel:password-form:password:textfield']"
"[type='password'].password")
def test_eui_orange_fr(self):
self.GoTo(
"http://www.orange.fr/bin/frame.cgi?u=https%3A//eui.orange.fr/signup/"
"bin/signup.cgi%3F")
self.SwitchTo("FRAME[name~=w_contenu]")
self.CheckPwdField("INPUT#f_password")
def test_www_foxnews_com(self):
self.GoTo("http://www.foxnews.com/")
self.Click("A.login")
self.Click("A.capture_fox_marginTop")
self.CheckPwdField(
"INPUT#capture_traditionalRegistration_traditionalRegistration_password"
"[name='traditionalRegistration_password'][type='password']"
".capture_traditionalRegistration_password")
def test_www_homedepot_com(self):
self.GoTo("http://www.homedepot.com/")
self.Click("A#headerMyAccount.headerMyAccount__button")
self.Click("A.button.dual-sign-in-pop-up")
self.CheckPwdField(
"INPUT#dualSignIn-registrationPassword[name='logonPassword']"
"[type='password'].width_295px")
def test_www_livejournal_com(self):
self.GoTo("http://www.livejournal.com/")
self.Click("LI.s-nav-item-signup > A.b-flatbutton-simple")
self.CheckPwdField(
"INPUT#password[name='password'][type='password'].b-passview-realpass")
def test_www_babytree_com(self):
self.GoTo("http://www.babytree.com/reg/register.php")
self.CheckPwdField(
"INPUT[name='password'][type='password'].login-input-text")
def test_www_wikihow_com(self):
self.GoTo(
"https://www.wikihow.com/index.php?title=Special:UserLogin&type=signup"
"&fromhttp=1")
self.CheckPwdField(
"INPUT#wpPassword2[name='wpPassword'][type='password'].mw-input"
".loginPassword")
def test_www_facebook_com(self):
self.GoTo("https://www.facebook.com/")
self.CheckPwdField(
"INPUT#u_0_b[name='reg_passwd__'][type='password']._58mg")
def test_youku_com(self):
self.GoTo("http://login.youku.com/user_signup?from=header")
self.CheckPwdField(
"INPUT#password[name='password'][type='password'].form_input")
def test_www_wikia_com(self):
self.GoTo("https://www.wikia.com/register")
self.CheckPwdField(
"INPUT#signupPassword[name='password'][type='password']"
".input-password")
def test_www_walmart_com(self):
self.GoTo("https://www.walmart.com/account/signup")
self.CheckPwdField(
"INPUT#password[name='signup-password'][type='password']"
".js-signup-password")
def test_uptodown_com(self):
SignUpFormsTest.driver.maximize_window()
self.GoTo("http://en.uptodown.com/ubuntu")
self.Click("A.button > SPAN")
self.Click("NAV > UL > LI:nth-child(2) > A")
self.CheckPwdField("INPUT#reg_password[type='password']")
def test_www_dmm_com(self):
self.GoTo("https://www.dmm.com/en/my/-/register/")
self.CheckPwdField("INPUT#password[name='password'][type='password']")
def test_id_ifeng_com(self):
self.GoTo("https://id.ifeng.com/user/register")
self.CheckPwdField("INPUT#js-email-password[type='password'].txt_270")
def test_udn_com(self):
self.GoTo("https://member.udn.com/member/rule.jsp")
self.Click("INPUT#accept[type='button']")
self.CheckPwdField("INPUT#password[name='password'][type='password']")
def test_thepiratebay_se(self):
self.GoTo("https://thepiratebay.se/register")
self.CheckPwdField("INPUT#password[name='password'][type='password']")
def test_www_sunmaker_com(self):
self.GoTo("https://www.sunmaker.com/de/register")
self.CheckPwdField(
"INPUT[name='password'][type='password'].form-control.ng-valid-pattern")
def test_csdn_net(self):
self.GoTo("http://passport.csdn.net/account/register")
self.CheckPwdField(
"INPUT#password[name='password'][type='password'].main-password")
def test_gmx_net(self):
self.GoTo("https://registrierung.gmx.net/")
self.CheckPwdField(
"INPUT[type='password'][name='passwordPanel:password-form:"
"password:textfield'].password")
def test_www_deviantart_com(self):
self.GoTo("https://www.deviantart.com/join/")
self.CheckPwdField(
"INPUT#password1[name='password1'][type='password'].text")
def test_bet365_com(self):
self.GoTo("https://members.bet365.com/Members/OpenAccount/")
self.CheckPwdField(
"INPUT#ctl00_main_OA_tPwd[name='ctl00$main$OA$tPwd']"
"[type='password'].tbx")
def test_www_douyu_com(self):
SignUpFormsTest.driver.maximize_window()
self.GoTo("http://www.douyu.com/")
self.Click("A.u-reg")
self.CheckPwdField(
"DIV.login-pop-cont > DIV.c-item[data-type~='reg'] > FORM > P > "
"INPUT[name='password'][type='password'].ipt")
def test_allegro_pl(self):
SignUpFormsTest.driver.maximize_window()
self.GoTo("http://allegro.pl/")
self.Click(
"DIV.header-namespace > DIV.user-links-wrapper > DIV.wrapper-fluid > "
"UL.user-nav > LI.register:nth-child(8) > A")
self.CheckPwdField(
"INPUT#signup-password[name='password'][type='password']")
def test_mega_nz(self):
self.GoTo("https://mega.nz/")
self.Click("A.create-account-button")
self.CheckPwdField(
"INPUT#register-password[name='login-password'][type='password']")
def test_skype_com(self):
self.GoTo("https://login.skype.com/registration")
self.CheckPwdField(
"INPUT#password[name='password'][type='password'].password")
def test_adsterra_com(self):
self.GoTo("http://publishers.adsterra.com/signup/")
self.CheckPwdField("INPUT[name='password'][type='password']")
if __name__ == "__main__":
unittest.main()
|
cda735a979334774c7752eb151f6e3e9ee9d57d9
|
d110546d747d7e3865ce5742d5fca09f404623c0
|
/tests/pytests/functional/modules/test_saltutil.py
|
f9e72a9f73e6cb3471727ab1e3ca56de7fd4e9fb
|
[
"Apache-2.0",
"MIT",
"BSD-2-Clause"
] |
permissive
|
saltstack/salt
|
354fc86a7be1f69514b3dd3b2edb9e6f66844c1d
|
1ef90cbdc7203f97775edb7666db86a41eb9fc15
|
refs/heads/master
| 2023-07-19T20:56:20.210556
| 2023-06-29T23:12:28
| 2023-07-19T11:47:47
| 1,390,248
| 11,026
| 6,296
|
Apache-2.0
| 2023-09-14T20:45:37
| 2011-02-20T20:16:56
|
Python
|
UTF-8
|
Python
| false
| false
| 1,699
|
py
|
test_saltutil.py
|
import pytest
import salt.config
import salt.loader
import salt.modules.saltutil
import salt.state
from tests.support.mock import patch
@pytest.fixture
def opts(salt_master_factory):
config_overrides = {"master_uri": "tcp://127.0.0.1:11111"}
factory = salt_master_factory.salt_minion_daemon(
"get-tops-minion",
overrides=config_overrides,
)
yield factory.config.copy()
@pytest.fixture
def modules(opts):
yield salt.loader.minion_mods(opts, context={})
@pytest.fixture
def configure_mocks(opts):
with patch("salt.utils.extmods.sync", return_value=(None, None)):
with patch.object(salt.state.HighState, "top_matches", return_value={}):
# Mock the __gen_opts method of HighState so it doesn't try to auth to master.
with patch.object(
salt.state.BaseHighState, "_BaseHighState__gen_opts", return_value=opts
):
# Mock the _gather_pillar method of State so it doesn't try to auth to master.
with patch.object(salt.state.State, "_gather_pillar", return_value={}):
yield
@pytest.fixture
def destroy(configure_mocks):
with patch.object(salt.state.HighState, "destroy") as destroy:
yield destroy
@pytest.fixture
def get_top(configure_mocks):
with patch.object(salt.state.HighState, "get_top") as get_top:
yield get_top
@pytest.mark.slow_test
def test__get_top_file_envs(modules, get_top, destroy):
"""
Ensure we cleanup objects created by saltutil._get_top_file_envs #60449
"""
modules["saltutil.sync_clouds"]()
assert get_top.called
# Ensure destroy is getting called
assert destroy.called
|
7289f04c6c6707d0c7f842a37c762d45dfedcc83
|
2b7180b739df298195e35a71e20a4251f83b4813
|
/dino/db/manager/broadcast.py
|
6e5fffc1d8357718c9ecc4e7a702e24921b20e2b
|
[
"Apache-2.0"
] |
permissive
|
thenetcircle/dino
|
625f752046502a04ab9ec42b0a8c437d7123bcbb
|
f1f68954191f64cdec4b3914caf154300ccbf519
|
refs/heads/master
| 2023-08-10T09:59:07.064141
| 2023-08-03T07:56:19
| 2023-08-03T07:56:19
| 69,937,941
| 153
| 21
|
Apache-2.0
| 2023-02-15T22:53:29
| 2016-10-04T05:40:09
|
Python
|
UTF-8
|
Python
| false
| false
| 1,072
|
py
|
broadcast.py
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from dino.db.manager.base import BaseManager
from dino.environ import GNEnvironment
from dino import utils
__author__ = 'Oscar Eriksson <oscar.eriks@gmail.com>'
logger = logging.getLogger(__name__)
class BroadcastManager(BaseManager):
def __init__(self, env: GNEnvironment):
self.env = env
def send(self, body: str, verb: str) -> None:
data = utils.activity_for_broadcast(body, verb)
self.env.out_of_scope_emit('gn_broadcast', data, json=True, namespace='/ws', broadcast=True)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.