blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9ced3389b9913c2911c5cef8ab4352dc389d6f90
|
8358c8d86600703663eb8a8f30493c20704cf586
|
/test/core/test_dump.py
|
17b55b77cc824861e06cd181ed0b16937d6e7c52
|
[
"MIT"
] |
permissive
|
dcs4cop/xcube
|
612ffeb416dfee4e6a32677a719eab1a26aee990
|
a5a4da14bdc2dba80e0dd7d89b221fb30d148b77
|
refs/heads/master
| 2023-08-17T06:36:57.207806
| 2023-08-08T15:16:09
| 2023-08-08T15:16:09
| 130,693,090
| 149
| 21
|
MIT
| 2023-09-14T07:38:55
| 2018-04-23T12:27:35
|
Python
|
UTF-8
|
Python
| false
| false
| 2,274
|
py
|
test_dump.py
|
import unittest
from test.sampledata import new_test_dataset
from xcube.core.dump import dump_dataset
class DumpDatasetTest(unittest.TestCase):
def test_dump_dataset(self):
dataset = new_test_dataset(["2010-01-01", "2010-01-02", "2010-01-03", "2010-01-04", "2010-01-05"],
precipitation=0.4, temperature=275.2)
for var in dataset.variables.values():
var.encoding.update({"_FillValue": 999.0})
print(dataset.dims)
text = dump_dataset(dataset)
self.assertIn("<xarray.Dataset>", text)
self.assertIn("Dimensions: (time: 5, lat: 180, lon: 360)\n", text)
self.assertIn("Coordinates:\n", text)
self.assertIn(" * lon (lon) float64 ", text)
self.assertIn("Data variables:\n", text)
self.assertIn(" precipitation (time, lat, lon) float64 ", text)
self.assertNotIn("Encoding for coordinate variable 'lat':\n", text)
self.assertNotIn("Encoding for data variable 'temperature':\n", text)
self.assertNotIn(" _FillValue: 999.0\n", text)
text = dump_dataset(dataset, show_var_encoding=True)
self.assertIn("<xarray.Dataset>", text)
self.assertIn("Dimensions: (time: 5, lat: 180, lon: 360)\n", text)
self.assertIn("Coordinates:\n", text)
self.assertIn(" * lon (lon) float64 ", text)
self.assertIn("Data variables:\n", text)
self.assertIn(" precipitation (time, lat, lon) float64 ", text)
self.assertIn("Encoding for coordinate variable 'lat':\n", text)
self.assertIn("Encoding for data variable 'temperature':\n", text)
self.assertIn(" _FillValue: 999.0\n", text)
text = dump_dataset(dataset, ["precipitation"])
self.assertIn("<xarray.DataArray 'precipitation' (time: 5, lat: 180, lon: 360)>\n", text)
self.assertNotIn("Encoding:\n", text)
self.assertNotIn(" _FillValue: 999.0", text)
text = dump_dataset(dataset, ["precipitation"], show_var_encoding=True)
self.assertIn("<xarray.DataArray 'precipitation' (time: 5, lat: 180, lon: 360)>\n", text)
self.assertIn("Encoding:\n", text)
self.assertIn(" _FillValue: 999.0", text)
|
95c8f054e846f0a4b50a6b9a5b08a437476bdffe
|
9071dc219693bde591ad12fb31c43c635f3a3f5e
|
/tests/test_version.py
|
98d3a2f59d8c382b767e3afb47b06f8c4af04808
|
[
"MIT"
] |
permissive
|
VirtusLab/git-machete
|
67f51e49d44601daee4cc40fa27de87ecc029af6
|
dca261b0f8c56edb65557d178321a21177872b05
|
refs/heads/master
| 2023-08-17T07:58:32.883018
| 2023-08-12T14:52:48
| 2023-08-12T14:52:48
| 122,743,101
| 711
| 45
|
MIT
| 2023-09-08T07:39:27
| 2018-02-24T13:32:07
|
Python
|
UTF-8
|
Python
| false
| false
| 242
|
py
|
test_version.py
|
from git_machete import __version__
from .mockers import assert_success
class TestVersion:
def test_version(self) -> None:
assert_success(
["version"],
f"git-machete version {__version__}\n"
)
|
1f010f485bee4e05ad0f21d35bb2e790a467da0e
|
ad61cc119a42abfd3d64224a753817ae0f9ba058
|
/scripts/performance/benchmark-rm
|
16009c696cdae3c0401e92ae1edf145b862f93c2
|
[
"Apache-2.0"
] |
permissive
|
aws/aws-cli
|
30b0e5b0fb6d736f1540990955f0a7351ee7a908
|
147d16dfdb72dc9cf362b676a57e46a49375afbd
|
refs/heads/develop
| 2023-09-03T19:52:07.955543
| 2023-09-01T20:37:50
| 2023-09-01T20:37:50
| 6,780,767
| 13,038
| 4,107
|
NOASSERTION
| 2023-09-13T19:48:11
| 2012-11-20T16:07:36
|
Python
|
UTF-8
|
Python
| false
| false
| 801
|
benchmark-rm
|
#!/usr/bin/env python
from benchmark_utils import benchmark_command, get_transfer_command
from benchmark_utils import backup, copy, clean, get_default_argparser
def benchmark_rm(args):
command = get_transfer_command(
'rm %s' % args.target, args.recursive, args.quiet)
backup_path = backup(args.target, args.recursive)
benchmark_command(
command, args.benchmark_script, args.summarize_script,
args.result_dir, args.num_iterations, args.dry_run,
upkeep=lambda: copy(backup_path, args.target, args.recursive),
cleanup=lambda: clean(backup_path, args.recursive)
)
if __name__ == "__main__":
parser = get_default_argparser()
parser.add_argument('-t', '--target', required=True, help='An S3 path.')
benchmark_rm(parser.parse_args())
|
|
cd1db4bfaaeeadfdad4316c1787edfd7ecad14a8
|
57adfd30d44dcec446e55306265b68ee08b51655
|
/dropbox/account.py
|
8a30b5d7b0f8038e4683eb8fcb606f442247d622
|
[
"MIT"
] |
permissive
|
dropbox/dropbox-sdk-python
|
610c0cbbfcc7bdacda6da859a8247b56005bbc44
|
487793dff3c5a8a3a76010799dc4803cabdb70f3
|
refs/heads/main
| 2023-09-04T23:06:41.483053
| 2023-05-25T17:17:10
| 2023-05-25T17:17:10
| 37,347,427
| 1,029
| 453
|
MIT
| 2023-08-16T19:51:59
| 2015-06-12T22:23:27
|
Python
|
UTF-8
|
Python
| false
| false
| 8,360
|
py
|
account.py
|
# -*- coding: utf-8 -*-
# Auto-generated by Stone, do not modify.
# @generated
# flake8: noqa
# pylint: skip-file
from __future__ import unicode_literals
from stone.backends.python_rsrc import stone_base as bb
from stone.backends.python_rsrc import stone_validators as bv
class PhotoSourceArg(bb.Union):
"""
This class acts as a tagged union. Only one of the ``is_*`` methods will
return true. To get the associated value of a tag (if one exists), use the
corresponding ``get_*`` method.
:ivar str account.PhotoSourceArg.base64_data: Image data in base64-encoded
bytes.
"""
_catch_all = 'other'
# Attribute is overwritten below the class definition
other = None
@classmethod
def base64_data(cls, val):
"""
Create an instance of this class set to the ``base64_data`` tag with
value ``val``.
:param str val:
:rtype: PhotoSourceArg
"""
return cls('base64_data', val)
def is_base64_data(self):
"""
Check if the union tag is ``base64_data``.
:rtype: bool
"""
return self._tag == 'base64_data'
def is_other(self):
"""
Check if the union tag is ``other``.
:rtype: bool
"""
return self._tag == 'other'
def get_base64_data(self):
"""
Image data in base64-encoded bytes.
Only call this if :meth:`is_base64_data` is true.
:rtype: str
"""
if not self.is_base64_data():
raise AttributeError("tag 'base64_data' not set")
return self._value
def _process_custom_annotations(self, annotation_type, field_path, processor):
super(PhotoSourceArg, self)._process_custom_annotations(annotation_type, field_path, processor)
PhotoSourceArg_validator = bv.Union(PhotoSourceArg)
class SetProfilePhotoArg(bb.Struct):
"""
:ivar account.SetProfilePhotoArg.photo: Image to set as the user's new
profile photo.
"""
__slots__ = [
'_photo_value',
]
_has_required_fields = True
def __init__(self,
photo=None):
self._photo_value = bb.NOT_SET
if photo is not None:
self.photo = photo
# Instance attribute type: PhotoSourceArg (validator is set below)
photo = bb.Attribute("photo", user_defined=True)
def _process_custom_annotations(self, annotation_type, field_path, processor):
super(SetProfilePhotoArg, self)._process_custom_annotations(annotation_type, field_path, processor)
SetProfilePhotoArg_validator = bv.Struct(SetProfilePhotoArg)
class SetProfilePhotoError(bb.Union):
"""
This class acts as a tagged union. Only one of the ``is_*`` methods will
return true. To get the associated value of a tag (if one exists), use the
corresponding ``get_*`` method.
:ivar account.SetProfilePhotoError.file_type_error: File cannot be set as
profile photo.
:ivar account.SetProfilePhotoError.file_size_error: File cannot exceed 10
MB.
:ivar account.SetProfilePhotoError.dimension_error: Image must be larger
than 128 x 128.
:ivar account.SetProfilePhotoError.thumbnail_error: Image could not be
thumbnailed.
:ivar account.SetProfilePhotoError.transient_error: Temporary infrastructure
failure, please retry.
"""
_catch_all = 'other'
# Attribute is overwritten below the class definition
file_type_error = None
# Attribute is overwritten below the class definition
file_size_error = None
# Attribute is overwritten below the class definition
dimension_error = None
# Attribute is overwritten below the class definition
thumbnail_error = None
# Attribute is overwritten below the class definition
transient_error = None
# Attribute is overwritten below the class definition
other = None
def is_file_type_error(self):
"""
Check if the union tag is ``file_type_error``.
:rtype: bool
"""
return self._tag == 'file_type_error'
def is_file_size_error(self):
"""
Check if the union tag is ``file_size_error``.
:rtype: bool
"""
return self._tag == 'file_size_error'
def is_dimension_error(self):
"""
Check if the union tag is ``dimension_error``.
:rtype: bool
"""
return self._tag == 'dimension_error'
def is_thumbnail_error(self):
"""
Check if the union tag is ``thumbnail_error``.
:rtype: bool
"""
return self._tag == 'thumbnail_error'
def is_transient_error(self):
"""
Check if the union tag is ``transient_error``.
:rtype: bool
"""
return self._tag == 'transient_error'
def is_other(self):
"""
Check if the union tag is ``other``.
:rtype: bool
"""
return self._tag == 'other'
def _process_custom_annotations(self, annotation_type, field_path, processor):
super(SetProfilePhotoError, self)._process_custom_annotations(annotation_type, field_path, processor)
SetProfilePhotoError_validator = bv.Union(SetProfilePhotoError)
class SetProfilePhotoResult(bb.Struct):
"""
:ivar account.SetProfilePhotoResult.profile_photo_url: URL for the photo
representing the user, if one is set.
"""
__slots__ = [
'_profile_photo_url_value',
]
_has_required_fields = True
def __init__(self,
profile_photo_url=None):
self._profile_photo_url_value = bb.NOT_SET
if profile_photo_url is not None:
self.profile_photo_url = profile_photo_url
# Instance attribute type: str (validator is set below)
profile_photo_url = bb.Attribute("profile_photo_url")
def _process_custom_annotations(self, annotation_type, field_path, processor):
super(SetProfilePhotoResult, self)._process_custom_annotations(annotation_type, field_path, processor)
SetProfilePhotoResult_validator = bv.Struct(SetProfilePhotoResult)
PhotoSourceArg._base64_data_validator = bv.String()
PhotoSourceArg._other_validator = bv.Void()
PhotoSourceArg._tagmap = {
'base64_data': PhotoSourceArg._base64_data_validator,
'other': PhotoSourceArg._other_validator,
}
PhotoSourceArg.other = PhotoSourceArg('other')
SetProfilePhotoArg.photo.validator = PhotoSourceArg_validator
SetProfilePhotoArg._all_field_names_ = set(['photo'])
SetProfilePhotoArg._all_fields_ = [('photo', SetProfilePhotoArg.photo.validator)]
SetProfilePhotoError._file_type_error_validator = bv.Void()
SetProfilePhotoError._file_size_error_validator = bv.Void()
SetProfilePhotoError._dimension_error_validator = bv.Void()
SetProfilePhotoError._thumbnail_error_validator = bv.Void()
SetProfilePhotoError._transient_error_validator = bv.Void()
SetProfilePhotoError._other_validator = bv.Void()
SetProfilePhotoError._tagmap = {
'file_type_error': SetProfilePhotoError._file_type_error_validator,
'file_size_error': SetProfilePhotoError._file_size_error_validator,
'dimension_error': SetProfilePhotoError._dimension_error_validator,
'thumbnail_error': SetProfilePhotoError._thumbnail_error_validator,
'transient_error': SetProfilePhotoError._transient_error_validator,
'other': SetProfilePhotoError._other_validator,
}
SetProfilePhotoError.file_type_error = SetProfilePhotoError('file_type_error')
SetProfilePhotoError.file_size_error = SetProfilePhotoError('file_size_error')
SetProfilePhotoError.dimension_error = SetProfilePhotoError('dimension_error')
SetProfilePhotoError.thumbnail_error = SetProfilePhotoError('thumbnail_error')
SetProfilePhotoError.transient_error = SetProfilePhotoError('transient_error')
SetProfilePhotoError.other = SetProfilePhotoError('other')
SetProfilePhotoResult.profile_photo_url.validator = bv.String()
SetProfilePhotoResult._all_field_names_ = set(['profile_photo_url'])
SetProfilePhotoResult._all_fields_ = [('profile_photo_url', SetProfilePhotoResult.profile_photo_url.validator)]
set_profile_photo = bb.Route(
'set_profile_photo',
1,
False,
SetProfilePhotoArg_validator,
SetProfilePhotoResult_validator,
SetProfilePhotoError_validator,
{'auth': 'user',
'host': 'api',
'style': 'rpc'},
)
ROUTES = {
'set_profile_photo': set_profile_photo,
}
|
0c4e9cae136cd3623306a6374fe11d254f35b94b
|
aee26a4c731a84481a499679c3d4cef9ec954aed
|
/tacker/sol_refactored/conductor/conductor_rpc_v2.py
|
b9526be7d7702ac315e8761ec75306593375df2a
|
[
"Apache-2.0"
] |
permissive
|
openstack/tacker
|
6976cbee3afadfd9390849b56da2837feb93e912
|
9c7918f0b501cdeaffae40f585b76fc92b8e196e
|
refs/heads/master
| 2023-09-04T01:22:43.106241
| 2023-08-31T00:06:42
| 2023-08-31T00:42:20
| 21,259,951
| 125
| 172
|
Apache-2.0
| 2021-05-09T06:13:08
| 2014-06-27T01:11:56
|
Python
|
UTF-8
|
Python
| false
| false
| 4,064
|
py
|
conductor_rpc_v2.py
|
# Copyright (C) 2021 Nippon Telegraph and Telephone Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import oslo_messaging
from tacker.common import rpc
from tacker.sol_refactored.objects import base as objects_base
TOPIC_CONDUCTOR_V2 = 'TACKER_CONDUCTOR_V2'
class VnfLcmRpcApiV2(object):
target = oslo_messaging.Target(
exchange='tacker',
topic=TOPIC_CONDUCTOR_V2,
fanout=False,
version='1.0')
def _cast_lcm_op(self, context, lcmocc_id, method):
serializer = objects_base.TackerObjectSerializer()
client = rpc.get_client(self.target, version_cap=None,
serializer=serializer)
cctxt = client.prepare()
cctxt.cast(context, method, lcmocc_id=lcmocc_id)
def start_lcm_op(self, context, lcmocc_id):
self._cast_lcm_op(context, lcmocc_id, 'start_lcm_op')
def retry_lcm_op(self, context, lcmocc_id):
self._cast_lcm_op(context, lcmocc_id, 'retry_lcm_op')
def rollback_lcm_op(self, context, lcmocc_id):
self._cast_lcm_op(context, lcmocc_id, 'rollback_lcm_op')
def modify_vnfinfo(self, context, lcmocc_id):
self._cast_lcm_op(context, lcmocc_id, 'modify_vnfinfo')
def server_notification_cast(self, context, method, **kwargs):
serializer = objects_base.TackerObjectSerializer()
client = rpc.get_client(
self.target, version_cap=None, serializer=serializer)
cctxt = client.prepare()
cctxt.cast(context, method, **kwargs)
def server_notification_notify(
self, context, vnf_instance_id, vnfc_instance_ids):
self.server_notification_cast(
context, 'server_notification_notify',
vnf_instance_id=vnf_instance_id,
vnfc_instance_ids=vnfc_instance_ids)
def server_notification_remove_timer(self, context, vnf_instance_id):
self.server_notification_cast(
context, 'server_notification_remove_timer',
vnf_instance_id=vnf_instance_id)
TOPIC_PROMETHEUS_PLUGIN = 'TACKER_PROMETHEUS_PLUGIN'
class PrometheusPluginConductor(object):
target = oslo_messaging.Target(
exchange='tacker',
topic=TOPIC_PROMETHEUS_PLUGIN,
fanout=False,
version='1.0')
def cast(self, context, method, **kwargs):
serializer = objects_base.TackerObjectSerializer()
client = rpc.get_client(
self.target, version_cap=None, serializer=serializer)
cctxt = client.prepare()
cctxt.cast(context, method, **kwargs)
def store_alarm_info(self, context, alarm):
self.cast(context, 'store_alarm_info', alarm=alarm)
def store_job_info(self, context, report):
self.cast(context, 'store_job_info', report=report)
def store_threshold_state_info(self, context, threshold_states):
self.cast(context, 'store_threshold_state_info',
threshold_states=threshold_states)
def trigger_scale(self, context, id, scale_req):
self.cast(context, 'trigger_scale', id=id, scale_req=scale_req)
def enqueue_auto_heal_instance(
self, context, vnf_instance_id, vnfc_info_id):
self.cast(context, 'enqueue_auto_heal_instance',
vnf_instance_id=vnf_instance_id,
vnfc_info_id=vnfc_info_id)
def dequeue_auto_heal_instance(self, context, vnf_instance_id):
self.cast(context, 'dequeue_auto_heal_instance',
vnf_instance_id=vnf_instance_id)
|
fe324e670e4080584e9cd09ec6fd63f0bb0e64ac
|
6923f79f1eaaba0ab28b25337ba6cb56be97d32d
|
/python_for_finance_yuxing/test01.py
|
585cda687639208ad790fbce39fce51356c75e49
|
[] |
no_license
|
burakbayramli/books
|
9fe7ba0cabf06e113eb125d62fe16d4946f4a4f0
|
5e9a0e03aa7ddf5e5ddf89943ccc68d94b539e95
|
refs/heads/master
| 2023-08-17T05:31:08.885134
| 2023-08-14T10:05:37
| 2023-08-14T10:05:37
| 72,460,321
| 223
| 174
| null | 2022-10-24T12:15:06
| 2016-10-31T17:24:00
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 38
|
py
|
test01.py
|
def pv_f(fv,r,n):
return fv/(1+r)**n
|
7f59e5bd178ba19f14f1412c8977656d6602305f
|
3abc1fef99ac6ce0b845a1090fae7f6875fee729
|
/src/ralph/dashboards/tests/test_renderer.py
|
e8e4520020ea3ad21e29786fbb4ec7255fc133d0
|
[
"Apache-2.0"
] |
permissive
|
allegro/ralph
|
5ff9165a202e836061c99e8af20214e0d651622f
|
b4a72356f527b1f12c7babd7465d2d7fa3ffb0d3
|
refs/heads/ng
| 2023-09-02T01:13:43.672554
| 2023-09-01T09:48:38
| 2023-09-01T09:48:38
| 4,359,038
| 1,970
| 617
|
Apache-2.0
| 2023-09-01T09:44:39
| 2012-05-17T14:04:57
|
Python
|
UTF-8
|
Python
| false
| false
| 1,156
|
py
|
test_renderer.py
|
from django.test import SimpleTestCase
from ralph.dashboards.renderers import build_filters
class BuildFilterTestCase(SimpleTestCase):
def test_without_aggregation(self):
self.assertEqual(
build_filters(labels='id', value=10),
{'id': 10}
)
def test_with_year_aggregation(self):
self.assertEqual(
build_filters(labels='patchdeadline|year', value=2017),
{
'patchdeadline__gte': '2017-01-01',
'patchdeadline__lte': '2017-12-31',
}
)
def test_with_month_aggregation(self):
self.assertEqual(
build_filters(labels='patchdeadline|month', value='2017-12'),
{
'patchdeadline__gte': '2017-12-01',
'patchdeadline__lte': '2017-12-31',
}
)
def test_with_day_aggregation(self):
self.assertEqual(
build_filters(labels='patchdeadline|day', value='2017-12-01'),
{
'patchdeadline__gte': '2017-12-01 00:00:00',
'patchdeadline__lte': '2017-12-01 23:59:59',
}
)
|
95ea9eab22d71af92f3a60af1604ddcadf4e97ee
|
b889e24f6f68d407cebfa8404d15ec980f596cf2
|
/GANs/cifar_models.py
|
d998e34f677d57a2fc8e5f21b4cf2d6bf6472586
|
[
"Apache-2.0"
] |
permissive
|
devzhk/Implicit-Competitive-Regularization
|
e7071c79dd83f28d18191fecaf38b0aa10e0604c
|
71bda29f2db18d1d7ae9860e4a761ff61cbec756
|
refs/heads/master
| 2022-02-02T15:32:29.148198
| 2021-04-01T09:06:10
| 2021-04-01T09:06:10
| 213,550,763
| 115
| 27
|
Apache-2.0
| 2020-12-05T06:08:40
| 2019-10-08T04:45:27
|
Python
|
UTF-8
|
Python
| false
| false
| 1,556
|
py
|
cifar_models.py
|
import torch.nn as nn
class dcD32(nn.Module):
def __init__(self):
super(dcD32, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_channels=3, out_channels=32, kernel_size=5, stride=1), # 3x32x32 -> 32x28x28
nn.LeakyReLU(0.01),
nn.MaxPool2d(2, 2), # 32x14x14
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=5, stride=1), # 32x14x14 -> 64x10x10
nn.LeakyReLU(0.01),
nn.MaxPool2d(2, 2), # 64x5x5
)
self.fc = nn.Sequential(
nn.Linear(64 * 5 * 5, 1024),
nn.LeakyReLU(0.01),
nn.Linear(1024, 1)
)
def forward(self, x):
x = self.conv(x)
x = x.view(x.shape[0], -1)
return self.fc(x)
class dcG32(nn.Module):
def __init__(self, z_dim=128):
super(dcG32, self).__init__()
self.fc = nn.Sequential(
nn.Linear(z_dim, 1024),
nn.ReLU(),
nn.BatchNorm1d(1024),
nn.Linear(1024, 8 * 8 * 128)
)
self.convt = nn.Sequential(
nn.ConvTranspose2d(in_channels=128, out_channels=64,
kernel_size=4, stride=2, padding=1),
nn.ReLU(),
nn.BatchNorm2d(64),
nn.ConvTranspose2d(in_channels=64, out_channels=3,
kernel_size=4, stride=2, padding=1),
nn.Tanh()
)
def forward(self, x):
x = self.fc(x)
x = x.view(x.shape[0], 128, 8, 8)
return self.convt(x)
|
6dbab414fd664f6cd532f048ff647907d3aef2fa
|
3a6a211ea0d32405497fbd6486c490bb147e25f9
|
/dashboard/dashboard/pinpoint/models/quest/run_lacros_telemetry_test_test.py
|
358350c6fd68dd2b78ed5a46598f874041d0fc40
|
[
"BSD-3-Clause"
] |
permissive
|
catapult-project/catapult
|
e2cbdd5eb89f3b1492fc8752494e62ea1df4bae0
|
53102de187a48ac2cfc241fef54dcbc29c453a8e
|
refs/heads/main
| 2021-05-25T07:37:22.832505
| 2021-05-24T08:01:49
| 2021-05-25T06:07:38
| 33,947,548
| 2,032
| 742
|
BSD-3-Clause
| 2022-08-26T16:01:18
| 2015-04-14T17:49:05
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,717
|
py
|
run_lacros_telemetry_test_test.py
|
# Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import unittest
from dashboard.pinpoint.models.quest import run_lacros_telemetry_test
from dashboard.pinpoint.models.quest import run_performance_test
from dashboard.pinpoint.models.quest import run_telemetry_test
from dashboard.pinpoint.models.quest import run_test_test
_BASE_ARGUMENTS = {
'configuration': 'some_configuration',
'swarming_server': 'server',
'dimensions': run_test_test.DIMENSIONS,
'benchmark': 'some_benchmark',
'browser': 'lacros-chrome',
'builder': 'builder name',
'target': 'performance_test_suite_eve',
}
_COMBINED_DEFAULT_EXTRA_ARGS = (
run_telemetry_test._DEFAULT_EXTRA_ARGS +
run_performance_test._DEFAULT_EXTRA_ARGS)
_BASE_EXTRA_ARGS = [
'--benchmarks',
'some_benchmark',
'--pageset-repeat',
'1',
'--browser',
'lacros-chrome',
] + _COMBINED_DEFAULT_EXTRA_ARGS
_TELEMETRY_COMMAND = [
'luci-auth', 'context', '--', 'vpython',
'bin/run_performance_test_suite_eve',
'--remote=variable_chromeos_device_hostname'
]
_BASE_SWARMING_TAGS = {}
class FromDictTest(unittest.TestCase):
def testMinimumArgumentsEve(self):
quest = run_lacros_telemetry_test.RunLacrosTelemetryTest.FromDict(
_BASE_ARGUMENTS)
expected = run_lacros_telemetry_test.RunLacrosTelemetryTest(
'server', run_test_test.DIMENSIONS, _BASE_EXTRA_ARGS,
_BASE_SWARMING_TAGS, _TELEMETRY_COMMAND, 'out/Release')
self.assertEqual(quest, expected)
|
ec423ed2a22fd868c3b0d8306b50a867832acde5
|
d9ac1064889bafa3aed4561cf9dc9c797c4d41e3
|
/list_lambdas.py
|
6919d5a126f80fb2d3afa488b1ad309c1b3a52dd
|
[
"CC-BY-SA-4.0",
"MIT"
] |
permissive
|
epsagon/list-lambdas
|
401ae522214ad97c2cb4d51b7fe3195e3f794c66
|
3685671a6977b53fa6d3cc4976425c795415df79
|
refs/heads/master
| 2022-03-04T08:43:02.138657
| 2022-02-21T11:16:26
| 2022-02-21T11:16:26
| 119,053,680
| 191
| 44
|
MIT
| 2022-02-21T11:03:53
| 2018-01-26T13:18:49
|
Python
|
UTF-8
|
Python
| false
| false
| 9,038
|
py
|
list_lambdas.py
|
"""
Enumerates Lambda functions from every region with interesting metadata
"""
from __future__ import print_function
from datetime import datetime
import argparse
import codecs
import boto3
from boto3.session import Session
from botocore.exceptions import ClientError
from terminaltables import AsciiTable
import progressbar
DATETIME_FORMAT = '%Y-%m-%dT%H:%M:%S'
BYTE_TO_MB = 1024.0 * 1024.0
ALL_TABLE_HEADERS = [
'Region',
'Function',
'Memory (MB)',
'Code Size (MB)',
'Timeout (seconds)',
'Runtime',
'Last Modified',
'Last Invocation',
'Description',
]
SORT_KEYS = ['region', 'last-modified', 'last-invocation', 'runtime']
def list_available_lambda_regions():
"""
Enumerates list of all Lambda regions
:return: list of regions
"""
session = Session()
return session.get_available_regions('lambda')
def init_boto_client(client_name, region, args):
"""
Initiates boto's client object
:param client_name: client name
:param region: region name
:param args: arguments
:return: Client
"""
if args.token_key_id and args.token_secret:
boto_client = boto3.client(
client_name,
aws_access_key_id=args.token_key_id,
aws_secret_access_key=args.token_secret,
region_name=region
)
elif args.profile:
session = boto3.session.Session(profile_name=args.profile)
boto_client = session.client(client_name, region_name=region)
else:
boto_client = boto3.client(client_name, region_name=region)
return boto_client
def get_days_ago(datetime_obj):
"""
Converts a datetime object to "time ago" string
:param datetime_obj: Datetime
:return: "time ago" string
"""
days_ago = (datetime.now() - datetime_obj).days
datetime_str = 'Today'
if days_ago == 1:
datetime_str = 'Yesterday'
elif days_ago > 1:
datetime_str = '{0} days ago'.format(days_ago)
return datetime_str
def get_last_invocation(region, args, function_name):
"""
Return last invocation timestamp (epoch) or -1 if not found.
-1 can be returned if no log group exists for Lambda,
or if there are no streams in the log.
:param region: function region
:param args: arguments
:param function_name: function name
:return: last invocation or -1
"""
logs_client = init_boto_client('logs', region, args)
last_invocation = -1
try:
logs = logs_client.describe_log_streams(
logGroupName='/aws/lambda/{0}'.format(function_name),
orderBy='LastEventTime',
descending=True
)
except ClientError as _:
return last_invocation
log_streams_timestamp = [
log.get('lastEventTimestamp', 0) for log in logs['logStreams']
]
if log_streams_timestamp:
last_invocation = max(log_streams_timestamp)
return last_invocation
def create_tables(lambdas_data, args):
"""
Create the output tables
:param lambdas_data: a list of the Lambda functions and their data
:param args: argparse arguments
:return: textual table-format information about the Lambdas
"""
all_table_data = [ALL_TABLE_HEADERS]
for lambda_data in lambdas_data:
function_data = lambda_data['function-data']
last_invocation = 'N/A (no invocations?)'
if lambda_data['last-invocation'] != -1:
last_invocation = get_days_ago(
datetime.fromtimestamp(lambda_data['last-invocation'] / 1000)
)
all_table_data.append([
lambda_data['region'],
str(function_data['FunctionName']),
str(function_data['MemorySize']),
'%.2f' % (function_data['CodeSize'] / BYTE_TO_MB),
str(function_data['Timeout']),
str(function_data['Runtime']) if 'Runtime' in function_data else '',
get_days_ago(lambda_data['last-modified']),
last_invocation,
'"' + function_data['Description'] + '"'
])
if args.should_print_all:
min_table_data = all_table_data
else:
# Get only the region, function, last modified and last invocation
min_table_data = [
[
lambda_data[0], lambda_data[1], lambda_data[5], lambda_data[-2], lambda_data[-1]
]
for lambda_data in all_table_data
]
return min_table_data, all_table_data
def print_lambda_list(args):
"""
Main function
:return: None
"""
regions = list_available_lambda_regions()
progress_bar = progressbar.ProgressBar(max_value=len(regions))
lambdas_data = []
for region in progress_bar(regions):
lambda_client = init_boto_client('lambda', region, args)
next_marker = None
response = lambda_client.list_functions()
while next_marker != '':
next_marker = ''
functions = response['Functions']
if not functions:
continue
for function_data in functions:
# Extract last modified time
last_modified = datetime.strptime(
function_data['LastModified'].split('.')[0],
DATETIME_FORMAT
)
# Extract last invocation time from logs
last_invocation = get_last_invocation(
region,
args,
function_data['FunctionName']
)
if last_invocation != -1:
inactive_days = (
datetime.now() -
datetime.fromtimestamp(last_invocation / 1000)
).days
if args.inactive_days_filter > inactive_days:
continue
# print(function_data)
lambdas_data.append({
'region': region,
'function-data': function_data,
'last-modified': last_modified,
'last-invocation': last_invocation,
'runtime': function_data['Runtime'] if 'Runtime' in function_data else ''
})
# Verify if there is next marker
if 'NextMarker' in response:
next_marker = response['NextMarker']
response = lambda_client.list_functions(Marker=next_marker)
# Sort data by the given key (default: by region)
lambdas_data.sort(key=lambda x: x[args.sort_by])
min_table_data, all_table_data = create_tables(lambdas_data, args)
table = AsciiTable(min_table_data)
print(table.table)
if not args.csv:
return
with codecs.open(args.csv, 'w', encoding='utf-8') as output_file:
for table_row in all_table_data:
output_line = '{0}\n'.format(','.join(table_row))
output_file.writelines(output_line)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=(
'Enumerates Lambda functions from every region with '
'interesting metadata.'
)
)
parser.add_argument(
'--all',
dest='should_print_all',
default=False,
action='store_true',
help=(
'Print all the information to the screen '
'(default: print summarized information).'
)
)
parser.add_argument(
'--csv',
type=str,
help='CSV filename to output full table data.',
metavar='output_filename'
)
parser.add_argument(
'--token-key-id',
type=str,
help=(
'AWS access key id. Must provide AWS secret access key as well '
'(default: from local configuration).'
),
metavar='token-key-id'
)
parser.add_argument(
'--token-secret',
type=str,
help=(
'AWS secret access key. Must provide AWS access key id '
'as well (default: from local configuration.'
),
metavar='token-secret'
)
parser.add_argument(
'--inactive-days-filter',
type=int,
help='Filter only Lambda functions with minimum days of inactivity.',
default=0,
metavar='minimum-inactive-days'
)
parser.add_argument(
'--sort-by',
type=str,
help=(
'Column name to sort by. Options: region, '
'last-modified, last-invocation, '
'runtime (default: region).'
),
default='region',
metavar='sort_by'
)
parser.add_argument(
'--profile',
type=str,
help=(
'AWS profile. Optional '
'(default: "default" from local configuration).'
),
metavar='profile'
)
arguments = parser.parse_args()
if arguments.sort_by not in SORT_KEYS:
print('ERROR: Illegal column name: {0}.'.format(arguments.sort_by))
exit(1)
print_lambda_list(arguments)
|
c7b3c47b5e8200d8026746dffccf8c67183876f1
|
e579188f958ae43ee5bbcce5e85a3494b829a6d3
|
/tests/plantuml/test_outside.py
|
77e2b977a5301ec0eab8f14b4b96bb33a2776e61
|
[
"MIT"
] |
permissive
|
pinetr2e/napkin
|
2797542abeadefff5ade9945961eba2e34208695
|
fb1da3d3b3b9fceb59a4adc1287a93393d0baa4c
|
refs/heads/master
| 2021-07-19T12:32:06.746980
| 2021-07-18T07:21:34
| 2021-07-18T07:21:34
| 27,030,611
| 203
| 9
|
MIT
| 2021-07-18T03:57:20
| 2014-11-23T11:43:17
|
Python
|
UTF-8
|
Python
| false
| false
| 696
|
py
|
test_outside.py
|
def test_calling_from_outside(check_puml):
def f(c):
foo = c.object('foo')
with c.outside():
foo.func()
check_puml(f, """
participant foo
[-> foo : func()
""")
def test_return_to_outside(check_puml):
def f(c):
foo = c.object('foo')
with c.outside():
foo.func().ret()
check_puml(f, """
participant foo
[-> foo : func()
activate foo
[<-- foo
deactivate foo
""")
def test_from_right_hand_side(check_puml):
def f(c):
foo = c.object('foo')
with c.outside(from_right=True):
foo.func().ret()
check_puml(f, """
participant foo
]-> foo : func()
activate foo
]<-- foo
deactivate foo
""")
|
426da53977a73100f07fafcfc40e84278755c118
|
3eaef0fa3c0be14c47c6aa1e1fcfc51ccebe65c7
|
/samples/sensor_update_policies/policy_wonk.py
|
82b7be15271c805b19c6956677239e213695f869
|
[
"Unlicense"
] |
permissive
|
CrowdStrike/falconpy
|
9dd97ee0d703d35f7da100a4c78c91f1f5911478
|
b112fde2f3fbe44615f9a3b60b8210e89e51c1d5
|
refs/heads/main
| 2023-08-18T19:45:46.092966
| 2023-08-12T01:59:37
| 2023-08-12T01:59:37
| 312,363,599
| 256
| 109
|
Unlicense
| 2023-09-13T02:59:04
| 2020-11-12T18:33:23
|
Python
|
UTF-8
|
Python
| false
| false
| 27,323
|
py
|
policy_wonk.py
|
r"""CrowdStrike Falcon Sensor Update Policy management utilty.
______ _ _ _ _ _
| ___ \ | (_) | | | | | |
| |_/ /__ | |_ ___ _ _ | | | | ___ _ __ | | __
| __/ _ \| | |/ __| | | | | |/\| |/ _ \| '_ \| |/ /
| | | (_) | | | (__| |_| | \ /\ / (_) | | | | <
\_| \___/|_|_|\___|\__, | \/ \/ \___/|_| |_|_|\_\
__/ |
|___/ for Sensor Update Policies
FalconPy v1.0
Creation date: 05.06.2022 - jshcodes@CrowdStrike
Required packages
crowdstrike-falconpy
tabulate
Multiple simultaneous actions may be performed against
multiple Sensor Update Policy records using this utility.
"""
from argparse import ArgumentParser, RawTextHelpFormatter
from tabulate import tabulate
try:
from falconpy import (
SensorUpdatePolicy,
HostGroup,
__version__ as FALCONPY_VERSION
)
except ImportError as no_falconpy:
RED = "\033[91m"
YEL = "\033[33m"
NOCOL = "\033[0m"
BOLD = "\033[1m"
raise SystemExit(fr"""{RED}
_ _ ____ ____ ____ _ ____ ____ _ _ ___ _ _
|\ | | | |___ |__| | | | | |\ | |__] \_/
| \| |__| | | | |___ |___ |__| | \| | |{YEL} ヽ༼ຈʖ̯ຈ༽ノ{NOCOL}
This application requires CrowdStrike FalconPy v1.0+
Install it with: {BOLD}python3 -m pip install crowdstrike-falconpy{NOCOL}
""") from no_falconpy
class Color: # pylint: disable=R0903
"""Class to represent the text color codes used for terminal output."""
PURPLE = "\033[95m"
CYAN = "\033[96m"
DARKCYAN = "\033[36m"
MAGENTA = "\033[35m"
BLUE = "\033[34m"
LIGHTBLUE = "\033[94m"
GREEN = "\033[32m"
LIGHTGREEN = "\033[92m"
LIGHTYELLOW = "\033[93m"
YELLOW = "\033[33m"
RED = "\033[31m"
LIGHTRED = "\033[91m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
END = "\033[0m"
def connect_sensor_update_api(key: str, sec: str, kid: str, base: str):
"""Connect to the sensor update policies service collection."""
return SensorUpdatePolicy(client_id=key, client_secret=sec, member_cid=kid, base_url=base)
def connect_host_group_api(key: str, sec: str, kid: str, base: str):
"""Connect to the Host Group service collection."""
return HostGroup(client_id=key, client_secret=sec, member_cid=kid, base_url=base)
def generate_api_error_list(error_object: list):
"""Display all error messages received from the API."""
error_list = []
for err in error_object:
error_list.append(f"[{err['code']}] {err['message']}")
return "\n".join(error_list)
def step_indicator():
"""Super lazy progress indicator."""
global INDICATOR_POSITION # pylint: disable=W0603
INDICATOR_POSITION += 1 # pylint: disable=E0602
if INDICATOR_POSITION > len(INDICATOR) - 1:
INDICATOR_POSITION = 0
return INDICATOR[INDICATOR_POSITION]
def shiny_help_text(inbound: str):
"""Shine up the help text display."""
inbound = inbound.replace("______", f"{Color.BLUE}______")
inbound = inbound.replace("|___/", f"|___/{Color.END}")
inbound = inbound.replace("Creation date:", f"{Color.BOLD}Creation date:{Color.END}")
inbound = inbound.replace("Required packages", f"{Color.UNDERLINE}{Color.BOLD}Required packages{Color.END}")
inbound = inbound.replace("Sensor Update Policies", f"{Color.DARKCYAN}Sensor Update Policies{Color.END}")
inbound = inbound.replace("jshcodes@CrowdStrike",
f"{Color.GREEN}jshcodes{Color.END}{Color.BOLD}@{Color.RED}CrowdStrike{Color.END}"
)
return inbound
def consume_arguments():
"""Consume arguments from the command line."""
desc = shiny_help_text(__doc__)
parser = ArgumentParser(description=desc, formatter_class=RawTextHelpFormatter)
# List
disp = parser.add_argument_group("list arguments")
disp.add_argument("-l", "--list_all", help="Show all policies (Default action)", required=False, action="store_true")
disp.add_argument("-k", "--kernels", help="Show kernel build compatibility details", required=False, action="store_true")
disp.add_argument("-b", "--builds", help="Show available builds", required=False, action="store_true")
disp.add_argument("-o", "--host_groups", help="Show available host groups", required=False, action="store_true")
disp.add_argument("-m", "--maintenance",
help="Show maintenance or a specific uninstall token",
required=False,
action="store_true"
)
disp.add_argument("-v", "--show_members", help="Show policy members in results", required=False, action="store_true")
disp.add_argument("-z", "--show_groups",
help="Show host groups assigned to policies in results",
required=False,
action="store_true"
)
# Search
srch = parser.add_argument_group("search arguments")
srch.add_argument("-q", "--search_string", help="String to match against policy or host group name", required=False)
# Create
crt = parser.add_argument_group("create arguments")
crt.add_argument("-c", "--create", help="Create a new policy", required=False, action="store_true")
# Update
upd = parser.add_argument_group("update and delete arguments")
upd.add_argument("-d", "--disable", help="Disable the policy", required=False, action="store_true")
upd.add_argument("-e", "--enable", help="Enable the policy", required=False, action="store_true")
upd.add_argument("-x", "--disable_uninstall_protection",
help="Disable uninstall protection for the policy",
required=False,
action="store_true"
)
upd.add_argument("-u", "--enable_uninstall_protection",
help="Enable uninstall protection for the policy",
required=False,
action="store_true"
)
upd.add_argument("-p", "--precedence",
help="Set policy precedence (will apply list in order received)\n"
"Use the policy_id argument to provide the list",
required=False,
action="store_true"
)
upd.add_argument("-r", "--remove", help="Remove the policy", required=False, action="store_true")
upd.add_argument("-g", "--add_host_group", help="Add host group to the specified policy\n(comma delimit)", required=False)
upd.add_argument("-y", "--yank_host_group",
help="Remove host group from the specified policy\n(comma delimit)",
required=False
)
# IDs and platform names for updates
idg = parser.add_argument_group("required arguments for updating or removing policies")
idg.add_argument("-i", "--policy_id", help="ID(s) of the policy to update or remove (comma delimit)", required=False)
idg.add_argument("-n", "--platform_name", help="Platform name for policy precedence configurations", required=False)
# MSSP
msp = parser.add_argument_group("MSSP arguments")
msp.add_argument("-w", "--member_cid", help="Child CID (MSSP access)", required=False)
# Other
oth = parser.add_argument_group("other arguments")
oth.add_argument("-t", "--base_url", help="Specify the API base URL", required=False)
# Always required
req = parser.add_argument_group("always required arguments")
req.add_argument("-f", "--falcon_client_id", help="Falcon Client ID", required=True)
req.add_argument("-s", "--falcon_client_secret", help="Falcon Client Secret", required=True)
return parser.parse_args()
def process_command_line(): # pylint: disable=R0912,R0915
"""Process the consumed command line arguments."""
args = consume_arguments()
command_to_perform = []
update_type = None
flag_type = None
if args.disable or args.enable or args.policy_id or args.enable_uninstall_protection or args.disable_uninstall_protection:
command_to_perform.append("update")
if args.disable:
flag_type = "DISABLE"
if args.enable:
flag_type = "ENABLE"
if args.enable_uninstall_protection:
update_type = "ENABLE_UNINSTALL"
if args.disable_uninstall_protection:
update_type = "DISABLE_UNINSTALL"
if not args.policy_id:
raise SystemExit(ID_REQUIRED)
if args.remove:
command_to_perform.append("remove")
if not args.policy_id:
raise SystemExit(ID_REQUIRED)
if args.kernels:
command_to_perform.append("kernel")
if args.builds:
command_to_perform.append("builds")
if args.host_groups:
command_to_perform.append("host_groups")
if args.create:
command_to_perform.append("create")
if args.maintenance:
command_to_perform.append("maintenance")
if args.precedence:
command_to_perform.append("precedence")
if not args.policy_id:
raise SystemExit(ID_REQUIRED)
if not args.platform_name:
raise SystemExit("You must specify a platform name to use this function.")
group_id = None
if args.add_host_group:
command_to_perform = "add_host_group"
if not args.policy_id:
raise SystemExit(ID_REQUIRED)
group_id = args.add_host_group.split(",")
if args.yank_host_group:
command_to_perform = "del_host_group"
if not args.policy_id:
raise SystemExit(ID_REQUIRED)
group_id = args.yank_host_group.split(",")
if args.policy_id:
args.policy_id = args.policy_id.split(",")
hide_members = True
if args.show_members:
hide_members = False
hide_groups = True
if args.show_groups:
hide_groups = False
mssp_access = None
if args.member_cid:
mssp_access = args.member_cid
base_url = "auto"
if args.base_url:
base_url = args.base_url
return command_to_perform, args.falcon_client_id, args.falcon_client_secret, args.search_string,\
args.policy_id, update_type, flag_type, hide_members, args.platform_name, group_id,\
hide_groups, mssp_access, base_url
def hide_members_column():
"""Show or hide the members column."""
member_detail = []
members = "No members found"
lookup = falcon.query_combined_policy_members()
if lookup["status_code"] == 200:
for member in lookup["body"]["resources"]:
print(f" Loading policies... {step_indicator()}", end="\r", flush="True")
memb = f"{member.get('hostname', 'Not available')} ({member.get('local_ip', 'Not available')})\n"
memb = f"{memb}{Color.DARKCYAN}{member['device_id']}{Color.END}"
member_detail.append(memb)
members = "\n".join(member_detail)
return members
def clean_groups_column(group_list: list):
"""Clean the host groups column."""
if group_list:
group_detail = ""
cnt = 0
for polgroup in group_list:
cnt += 1
if 1 < cnt <= len(group_list):
group_detail = f"{group_detail}\n"
group_detail = f"{group_detail}{Color.BOLD}{polgroup['name']}\n{Color.DARKCYAN}{polgroup['id']}{Color.END}"
else:
group_detail = "None specified"
return group_detail
def list_policies(search_string: str = None): # pylint: disable=R0914,R0915
"""List all sensor update policies."""
def pop_keys(incoming: dict):
"""Remove all unnecessary keys"""
incoming.pop("id")
incoming.pop("cid")
incoming.pop("description")
incoming.pop("created_by")
incoming.pop("created_timestamp")
incoming.pop("groups")
incoming.pop("modified_by")
incoming.pop("modified_timestamp")
incoming.pop("name")
incoming.pop("platform_name")
incoming.pop("enabled")
return incoming
def get_uninst_color(incoming: str):
"""Retrieve the color for uninstall protection."""
if incoming == "ENABLED":
selected_color = Color.GREEN
if incoming == "DISABLED":
selected_color = Color.RED
if incoming == "IGNORE":
selected_color = Color.YELLOW
return selected_color
def get_version(incoming: dict):
"""Retrieve and colorize the version number."""
ver = incoming["settings"].get("sensor_version", "Not found")
if ver == "":
ver = f"{Color.MAGENTA}Not set{Color.END}"
return ver
def calc_filter(search_val: str):
calced_search_filter = None
if search_val:
calced_search_filter=f"name:*'*{search_val}*'"
return calced_search_filter
def get_table_headers():
head = {
"name" : "Name",
"platform_name": "Platform",
"enabled": "Enabled",
"version": "Sensor version",
"build": "Build",
"uninstall": "Uninstall Protection"
}
if not HIDE:
head["members"] = "Members"
if not GROUP_HIDE:
head["groups"] = "Groups"
return head
def format_build(incoming: dict):
bld = incoming["settings"].get("build", f"{Color.MAGENTA}Not set{Color.END}")
if bld == "":
bld = f"{Color.MAGENTA}Not set{Color.END}"
return bld
print(f" Loading policies... {step_indicator()}", end="\r", flush="True")
search_filter=calc_filter(search_string)
policy_list = falcon.query_combined_policies_v2(filter=search_filter)
print(f" Loading policies... {step_indicator()}", end="\r", flush="True")
policies = policy_list["body"]["resources"]
if not policies:
raise SystemExit(NO_RESULTS_FOUND)
for policy in policies:
print(f" Loading policies... {step_indicator()}", end="\r", flush="True")
nam = f"{Color.BOLD}{policy['name']}{Color.END}\n{Color.CYAN}{policy['id']}{Color.END}"
if policy["description"]:
nam = f"{nam}\n{policy['description']}"
plat = policy["platform_name"]
enable_color = Color.GREEN if bool(policy["enabled"]) else Color.RED
enab = f"{enable_color}{policy['enabled']}{Color.END}"
grou = clean_groups_column(policy["groups"])
policy = pop_keys(policy)
policy["name"] = nam
policy["platform_name"] = plat
policy["enabled"] = enab
uninst = policy["settings"].get("uninstall_protection", "Not found")
uninst_color = get_uninst_color(uninst)
policy["uninstall"] = f"{uninst_color}{uninst}{Color.END}"
policy["version"] = get_version(policy)
policy["build"] = format_build(policy)
policy.pop("settings")
if not HIDE:
policy["members"] = hide_members_column()
print(f" Loading policies... {step_indicator()}", end="\r", flush="True")
if not GROUP_HIDE:
policy["groups"] = grou
print(tabulate(policies, get_table_headers(), tablefmt="fancy_grid"))
def update_policies(id_to_update: str, update_style: str = "", flag_style: str = ""):
"""Enable or disable the policy or it's uninstallation protection."""
keywords = {
"id": id_to_update,
}
update_result = None
if update_style in ["ENABLE_UNINSTALL", "DISABLE_UNINSTALL"]:
if update_style == "ENABLE_UNINSTALL":
keywords["uninstall_protection"] = "ENABLED"
if update_style == "DISABLE_UNINSTALL":
keywords["uninstall_protection"] = "DISABLED"
update_result = falcon.update_policies_v2(**keywords)
if flag_style in ["ENABLE", "DISABLE"]:
update_result = falcon.perform_policies_action(action_name=flag_style.lower(), ids=id_to_update)
if update_result:
if update_result["status_code"] != 200:
raise SystemExit(generate_api_error_list(update_result["body"]["errors"]))
def list_kernel_compatibility():
"""List all available kernels."""
kernel_list_lookup = falcon.query_combined_kernels()
if kernel_list_lookup["status_code"] != 200:
raise SystemExit(generate_api_error_list(kernel_list_lookup["body"]["errors"]))
kernel_list = kernel_list_lookup["body"]["resources"]
if not kernel_list:
raise SystemExit(NO_RESULTS_FOUND)
for kernel in kernel_list:
kernel.pop("id")
vend = kernel["vendor"]
distro = kernel["distro"]
distro_ver = kernel["distro_version"]
arch = kernel["architecture"]
flav = kernel['flavor']
if flav:
flav = f" {flav}"
vers = kernel["version"]
rel = kernel["release"]
base_support = "\n".join(kernel["base_package_supported_sensor_versions"])
if kernel["ztl_supported_sensor_versions"]:
ztl_support = "\n".join(kernel["ztl_supported_sensor_versions"])
else:
ztl_support = "None"
if kernel["ztl_module_supported_sensor_versions"]:
ztl_module = "\n".join(kernel["ztl_module_supported_sensor_versions"])
else:
ztl_module = "None"
kernel.pop("vendor")
kernel.pop("distro")
kernel.pop("distro_version")
kernel.pop("architecture")
kernel.pop("flavor")
kernel.pop("version")
kernel.pop("release")
kernel.pop("base_package_supported_sensor_versions")
kernel.pop("ztl_supported_sensor_versions")
kernel.pop("ztl_module_supported_sensor_versions")
kernel.pop("created_timestamp")
kernel.pop("modified_timestamp")
det = f"{vend} {Color.BOLD}{distro}{Color.END} ({distro_ver}/{arch}{flav})\n"
det = f"{det}Release: {Color.CYAN}{rel}{Color.END}\n{vers}"
kernel["detail"] = det
kernel["base"] = base_support
kernel["ztl"] = ztl_support
kernel["ztl_module"] = ztl_module
headers = {
"detail": "Kernel",
"base": "Sensor versions",
"ztl": "ZTL versions",
"ztl_module": "ZTL module versions"
}
print(tabulate(kernel_list, headers, tablefmt="fancy_grid"))
def get_builds():
"""Retrieve the list of builds from the API."""
build_lookup = falcon.query_combined_builds()
if build_lookup["status_code"] != 200:
raise SystemExit(generate_api_error_list(build_lookup["body"]["errors"]))
build_list = build_lookup["body"]["resources"]
return build_list
def list_builds():
"""List all available builds."""
builds = get_builds()
if not builds:
raise SystemExit(NO_RESULTS_FOUND)
headers = {
"build": "Build",
"platform": "Platform",
"sensor_version": "Sensor version"
}
print(tabulate(builds, headers, tablefmt="fancy_grid"))
def unique_list(list_to_dedupe: list):
"""Remove duplicates from a list."""
list_set = set(list_to_dedupe)
return list(list_set)
def delete_policy(policy_to_delete: str):
"""Delete a sensor update policy."""
remove_result = falcon.delete_policies(ids=policy_to_delete)
if remove_result["status_code"] != 200:
raise SystemExit(generate_api_error_list(remove_result["body"]["errors"]))
def get_build_response(builds: dict, avail: list):
"""Ask the user for the desired build id."""
acceptable_build = False
while not acceptable_build:
print(tabulate(builds, tablefmt="fancy_grid"))
build_to_use = input("Build for this policy (q = Quit)? ")
if build_to_use.lower() == "q":
raise SystemExit("Creation cancelled.")
if build_to_use in avail:
acceptable_build = True
else:
cont = input("You've entered an invalid build. Press enter to continue or 'q' to quit. ")
if cont.lower() == "q":
raise SystemExit("Creation cancelled.")
return build_to_use
def get_platform_response(plats: dict, avail: list):
"""Ask the user for the desired platform."""
acceptable_platform = False
while not acceptable_platform:
print(tabulate(plats, tablefmt="fancy_grid"))
plat_to_use = input("Platform for this policy? ")
if plat_to_use.lower() == "q":
raise SystemExit("Creation cancelled.")
if plat_to_use in avail:
acceptable_platform = True
else:
cont = input("You've entered in invalid platform. Press enter to continue or 'q' to quit. ")
if cont.lower() == "q":
raise SystemExit("Creation cancelled.")
return plat_to_use
def create_policy():
"""Create a new sensor update policy."""
builds = get_builds()
avail_builds = sorted(unique_list([x["build"] for x in builds]))
avail_plats = sorted(unique_list([y["platform"] for y in builds]))
all_platforms = []
for platform in avail_plats:
plat = {}
plat["Platform"] = platform
all_platforms.append(plat)
all_builds = []
for build in avail_builds:
bld = {}
bld["Build"] = build
all_builds.append(bld)
name_to_use = input("Name to use for the new policy? ")
policy_desc = input("Description to use for this policy? ")
build_id = get_build_response(all_builds, avail_builds)
plat_id = get_platform_response(all_platforms, avail_plats)
creation = falcon.create_policies_v2(platform_name=plat_id,
description=policy_desc,
name=name_to_use,
build=build_id
)
if creation["status_code"] != 201:
raise SystemExit(generate_api_error_list(creation["body"]["errors"]))
def show_token(id_for: str = "MAINTENANCE"):
"""Display uninstall and bulk maintenance tokens."""
maint_token_lookup = falcon.reveal_uninstall_token(device_id=id_for)
if maint_token_lookup["status_code"] != 200:
raise SystemExit("Unable to retrieve maintenance tokens.")
maint_token = maint_token_lookup["body"]["resources"][0]["uninstall_token"]
disp_text = "Bulk maintenance token: "
if id_for != "MAINTENANCE":
disp_text = f"Uninstall token for {id_for}: "
print(f"{disp_text}{Color.BOLD}{maint_token}{Color.END}")
def set_precedence(id_list: list, platform: str):
"""Set policy precedence by passing a list and a platform name."""
update_result = falcon.set_policies_precedence(ids=id_list, platform_name=platform)
if update_result["status_code"] != 200:
raise SystemExit("Unable to set policy precedence.")
def change_host_group(id_to_change: str, ids_to_update: str, style: str = "add_host_group"):
"""Add or remove host groups from the policy."""
id_list = ",".join(id_to_change)
action_parameters = {
"name": "group_id",
"value": id_list # Must be comma delimited string
}
action_name = None
if style == "add_host_group":
action_name = "add-host-group"
if style == "del_host_group":
action_name = "remove-host-group"
update_result = falcon.perform_policies_action(action_name=action_name,
ids=ids_to_update,
action_parameters=[action_parameters]
)
if update_result["status_code"] != 200:
raise SystemExit("Unable to change host group assignments.")
def list_host_groups(search_str: str = ""):
"""List all available host groups."""
search_filter = None
if search_str:
search_filter = f"name:*'*{search_str}*'"
host_group_lookup = falcon_groups.query_combined_host_groups(filter=search_filter)
if host_group_lookup["status_code"] != 200:
raise SystemExit(generate_api_error_list(host_group_lookup["body"]["errors"]))
host_groups = host_group_lookup["body"]["resources"]
if not host_groups:
raise SystemExit(NO_RESULTS_FOUND)
headers = {
"name": "Name",
"group_type": "Group Type",
"assignemnt_rule": "Rule"
}
for hgroup in host_groups:
nam = hgroup["name"]
nam = f"{Color.BOLD}{nam}\n{Color.DARKCYAN}{hgroup['id']}{Color.END}"
nam = f"{nam}\n{hgroup['description']}"
rule = hgroup.get("assignment_rule", "Not set")
gtype = hgroup["group_type"]
hgroup.pop("id")
hgroup.pop("group_type")
hgroup.pop("description")
if rule != "Not set":
hgroup.pop("assignment_rule")
hgroup.pop("created_by")
hgroup.pop("created_timestamp")
hgroup.pop("modified_by")
hgroup.pop("modified_timestamp")
hgroup["name"] = nam
hgroup["group_type"] = gtype
hgroup["rule"] = rule
print(tabulate(host_groups, headers, tablefmt="fancy_grid"))
NO_RESULTS_FOUND = fr"""{Color.YELLOW}
_ _ ____ ____ ____ ____ _ _ _ ___ ____
|\ | | | |__/ |___ [__ | | | | [__
| \| |__| | \ |___ ___] |__| |___ | ___]{Color.END}
"""
ID_REQUIRED = fr"""{Color.YELLOW}
_ ___ _ _ ____ ___ ___ ____ ____ _ _ _ ___ ____ ___
| | \ |\ | | | | |__] |__/ | | | | | | \ |___ | \
| |__/ | \| |__| | | | \ |__| \/ | |__/ |___ |__/{Color.END}
You must specify a list of IDs using the '-i' argument to use this function.
"""
INVALID_VERSION = fr"""{Color.LIGHTRED}
_ _ _ _ _ ____ _ _ ___ _ _ ____ ____ ____ _ ____ _ _
| |\ | | | |__| | | | \ | | |___ |__/ [__ | | | |\ |
| | \| \/ | | |___ | |__/ \/ |___ | \ ___] | |__| | \|{Color.END}{Color.YELLOW} ¯\_( ͡° ͜ʖ ͡°)_/¯{Color.END}
This application requires CrowdStrike FalconPy v{Color.BOLD}1.0+{Color.END}
Install it with: {Color.BOLD}python3 -m pip install crowdstrike-falconpy{Color.END}
"""
if int(FALCONPY_VERSION.split(".")[0]) < 1:
raise SystemExit(INVALID_VERSION)
INDICATOR = ["|", "/", "-", "\\"]
INDICATOR_POSITION = 0
command, client_id, client_secret, API_SEARCH, policy_id, which_update, \
enable_disable, HIDE, platform_name, hg_id, GROUP_HIDE, member_cid, base_url = process_command_line()
falcon = connect_sensor_update_api(client_id, client_secret, member_cid, base_url)
falcon_groups = connect_host_group_api(client_id, client_secret, member_cid, base_url)
if "kernel" in command:
list_kernel_compatibility()
if "builds" in command:
list_builds()
if "host_groups" in command:
list_host_groups(API_SEARCH)
if API_SEARCH:
API_SEARCH = None
if "update" in command:
for pid in policy_id:
update_policies(pid, which_update, enable_disable)
if "remove" in command:
for pid in policy_id:
delete_policy(pid)
if "create" in command:
create_policy()
if "precedence" in command:
set_precedence(policy_id, platform_name)
if "add_host_group" in command or "del_host_group" in command:
change_host_group(hg_id, policy_id, command)
if "maintenance" in command:
if policy_id:
for pid in policy_id:
show_token(pid)
else:
show_token()
list_policies(API_SEARCH)
|
90f229835142041a888973d998f1304be719ceaa
|
499f5402baed77d000c65f243b457c69dc3d2fe4
|
/pycatia/mec_mod_interfaces/origin_elements.py
|
08bde110c51c5521962fddcb60ca2bfe138bc273
|
[
"MIT"
] |
permissive
|
evereux/pycatia
|
416189b34f3c60effea8a76258e36ffc5ae86e22
|
5f5726d5dc66265b3eba8a01910c4aeae424365d
|
refs/heads/master
| 2023-08-21T10:03:41.660445
| 2023-08-09T16:21:10
| 2023-08-09T16:21:10
| 159,069,580
| 141
| 42
|
MIT
| 2023-08-09T11:15:27
| 2018-11-25T20:04:31
|
Python
|
UTF-8
|
Python
| false
| false
| 3,805
|
py
|
origin_elements.py
|
#! usr/bin/python3.9
"""
Module initially auto generated using V5Automation files from CATIA V5 R28 on 2020-06-11 12:40:47.360445
.. warning::
The notes denoted "CAA V5 Visual Basic Help" are to be used as reference only.
They are there as a guide as to how the visual basic / catscript functions work
and thus help debugging in pycatia.
"""
from pycatia.system_interfaces.any_object import AnyObject
class OriginElements(AnyObject):
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445)
| System.IUnknown
| System.IDispatch
| System.CATBaseUnknown
| System.CATBaseDispatch
| System.AnyObject
| OriginElements
|
| Represents the part's 3D reference axis system.
| It allows an easy access to 3D reference axis system of a Part object thru the
| three planes XY, YZ, and ZX.
| See Part for parent object.
"""
def __init__(self, com_object):
super().__init__(com_object)
self.origin_elements = com_object
@property
def plane_xy(self) -> AnyObject:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property PlaneXY() As AnyObject (Read Only)
|
| Returns the XY plane of the part 3D reference axis system.
|
| Example:
| The following example returns in plnXY the XY plane of the partRoot
| part from the partDoc part document:
|
| Set partRoot = partDoc.Part
| Set plnXY = partRoot.originElements.PlaneXY
:return: AnyObject
:rtype: AnyObject
"""
return AnyObject(self.origin_elements.PlaneXY)
@property
def plane_yz(self) -> AnyObject:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property PlaneYZ() As AnyObject (Read Only)
|
| Returns the YZ plane of the part 3D reference axis system.
|
| Example:
| The following example returns in plnYZ the YZ plane of the partRoot
| part from the partDoc part document:
|
| Set partRoot = partDoc.Part
| Set plnYZ = partRoot.originElements.PlaneYZ
:return: AnyObject
:rtype: AnyObject
"""
return AnyObject(self.origin_elements.PlaneYZ)
@property
def plane_zx(self) -> AnyObject:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property PlaneZX() As AnyObject (Read Only)
|
| Returns the ZX plane of the part 3D reference axis system.
|
| Example:
| The following example returns in plnZX the ZX plane of the partRoot
| part from the partDoc part document:
|
| Set partRoot = partDoc.Part
| Set plnZX = partRoot.originElements.PlaneZX
:return: AnyObject
:rtype: AnyObject
"""
return AnyObject(self.origin_elements.PlaneZX)
def __repr__(self):
return f'OriginElements(name="{ self.name }")'
|
313f5345a081fbc3aa5dd30cb08abc56fd1762ef
|
6477d93b0cbffd21751b84d89fdefced0628ae54
|
/setup.py
|
7090054156610999b98e99ee04ed46a42ab6deca
|
[
"MIT"
] |
permissive
|
closeio/redis-hashring
|
7c04e1cb1a2f4d5d12245706bd7300ae1a8f687f
|
2347f8432af8fb1910cbf2e052adb5793f2df8b3
|
refs/heads/master
| 2023-07-24T16:40:39.311739
| 2023-07-18T11:07:47
| 2023-07-18T11:07:47
| 40,690,837
| 152
| 15
|
MIT
| 2023-07-18T11:00:49
| 2015-08-14T02:03:20
|
Python
|
UTF-8
|
Python
| false
| false
| 969
|
py
|
setup.py
|
from setuptools import setup
setup(
name='redis-hashring',
version='0.3.3',
author='Close Engineering',
author_email='engineering@close.com',
url='http://github.com/closeio/redis-hashring',
license='MIT',
description=(
'Python library for distributed applications using a Redis hash ring'
),
install_requires=['redis>=3,<5'],
platforms='any',
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
packages=[
'redis_hashring',
],
)
|
d179e8781161a26a0535e7dc645220c74e74b7cb
|
a2dbff3e4f7cb1c84fed0835dc76106621e75cb9
|
/examples/rebundle.py
|
9e223aa83a4c1703013c12c8c30df2e013e07419
|
[
"MIT"
] |
permissive
|
K0lb3/UnityPy
|
e3325c9b993ad910bd68fdfcf6c55889ebb894ab
|
2fe1be2abfb8e3d53ba062f70390a517f41cfae7
|
refs/heads/master
| 2023-08-19T04:51:40.863686
| 2023-07-18T12:38:31
| 2023-07-18T12:38:31
| 198,518,141
| 612
| 126
|
MIT
| 2023-08-31T11:02:15
| 2019-07-23T22:36:09
|
Python
|
UTF-8
|
Python
| false
| false
| 3,722
|
py
|
rebundle.py
|
"""
This script shows how to create a bundle from dumped assets from the memory.
The dumped assets consist of SerializedFiles and their resources(cabs).
A sample file of the original game is required for this script.
This example uses the globalgamemanager as this asset should exist in all Unity games.
"""
import os
import uuid
import random
from copy import copy
import re
import UnityPy
from UnityPy.enums import ClassIDType
from UnityPy.files import BundleFile
from UnityPy.files.SerializedFile import FileIdentifier, ObjectReader, SerializedType
SERIALIZED_PATH = r"globalgamemanagers"
DATA_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "data")
def main():
bf = Fake(
signature="UnityFS",
version=6,
format=6,
version_engine="2017.4.30f1",
version_player="5.x.x",
_class=BundleFile,
files={},
)
# load default serialized file and prepare some variables for easier access to key objects
env = UnityPy.load(SERIALIZED_PATH)
sf = env.file # serialized file
or_bp = list(sf.objects.values())[0].__dict__ # object data
bf.files["serialized_file"] = sf
sf.flags = 4
# remove all unnesessary stuff
for key in list(sf.objects.keys()):
del sf.objects[key]
sf.externals = []
# add all files from DATA_PATH
for root, dirs, files in os.walk(DATA_PATH):
for f in files:
fp = os.path.join(root, f)
if f[:3] == "CAB":
add_cab(bf, sf, root, f)
else:
add_object(sf, fp, or_bp)
# save edited bundle
open("bundle_edited.unity3d", "wb").write(bf.save())
def add_cab(bf, sf, root, f):
fp = os.path.join(root, f)
bf.files[f] = Fake(data=open(fp, "rb").read(), flags=4)
sf.externals.append(
Fake(
temp_empty="",
guid=generate_16_byte_uid(),
path=f"archive:/{os.path.basename(root)}/{f}",
type=0,
_class=FileIdentifier,
)
)
def add_object(sf, fp, or_bp):
# get correct type id
path_id, class_name = os.path.splitext(os.path.basename(fp))
path_id = int(path_id) if re.match(
r"^\d+$", path_id) else generate_path_id(sf.objects)
class_id = getattr(
ClassIDType, class_name[1:], ClassIDType.UnknownType).value
type_id = -1
for i, styp in enumerate(sf.types):
if styp.class_id == class_id:
type_id = i
if type_id == -1: # not found, add type
type_id = len(sf.types)
sf.types.append(
Fake(
class_id=class_id,
is_stripped_type=False,
node=[],
script_type_index=-1,
old_type_hash=generate_16_byte_uid(),
_class=SerializedType,
)
)
# add new object
odata = copy(or_bp)
odata.update(
{
"data": open(fp, "rb").read(),
"path_id": generate_path_id(sf.objects),
"class_id": class_id,
"type_id": type_id,
}
)
sf.objects[path_id] = Fake(**odata, _class=ObjectReader)
def generate_path_id(objects):
while True:
uid = random.randint(-(2 ** 16), 2 ** 16 - 1)
if uid not in objects:
return uid
def generate_16_byte_uid():
return uuid.uuid1().urn[-16:].encode("ascii")
class Fake(object):
"""
fake class for easy class creation without init call
"""
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
if "_class" in kwargs:
self.__class__ = kwargs["_class"]
def save(self):
return self.data
if __name__ == "__main__":
main()
|
86be7745c5efab625290d9fa24d7d14013314323
|
b7d4fc29e02e1379b0d44a756b4697dc19f8a792
|
/deps/boost/tools/build/test/feature_cxxflags.py
|
a4eeb52d41f7df11b728c134173c75c5759e320a
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSL-1.0",
"GPL-1.0-or-later",
"MIT"
] |
permissive
|
vslavik/poedit
|
45140ca86a853db58ddcbe65ab588da3873c4431
|
1b0940b026b429a10f310d98eeeaadfab271d556
|
refs/heads/master
| 2023-08-29T06:24:16.088676
| 2023-08-14T15:48:18
| 2023-08-14T15:48:18
| 477,156
| 1,424
| 275
|
MIT
| 2023-09-01T16:57:47
| 2010-01-18T08:23:13
|
C++
|
UTF-8
|
Python
| false
| false
| 786
|
py
|
feature_cxxflags.py
|
#!/usr/bin/python
# Copyright 2014 Steven Watanabe
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
# Tests the cxxflags feature
import BoostBuild
t = BoostBuild.Tester(use_test_config=False)
# cxxflags should be applied to C++ compilation,
# but not to C.
t.write("Jamroot.jam", """
obj test-cpp : test.cpp : <cxxflags>-DOKAY ;
obj test-c : test.c : <cxxflags>-DBAD ;
""")
t.write("test.cpp", """
#ifndef OKAY
#error Cannot compile without OKAY
#endif
""")
t.write("test.c", """
#ifdef BAD
#error Cannot compile with BAD
#endif
""")
t.run_build_system()
t.expect_addition("bin/$toolset/debug*/test-cpp.obj")
t.expect_addition("bin/$toolset/debug*/test-c.obj")
t.cleanup()
|
b7a019503730058d23e461796a9f46a7b1320461
|
916c1313c623c799e98d1bd897b3aef510172639
|
/py/abd/abdi_repo.py
|
bda523ec17789a4a5112d3ab4e74c91b0d8861bb
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
bloomberg/phabricator-tools
|
377ba3dba299c5d21a015bb039ae920fae5478ef
|
09bd1587fe8945d93a891162fd4c89640c6fada7
|
refs/heads/master
| 2021-01-02T19:43:48.274684
| 2019-01-11T13:34:55
| 2019-01-11T13:34:55
| 8,464,182
| 154
| 40
|
Apache-2.0
| 2022-02-14T09:57:48
| 2013-02-27T20:02:50
|
Python
|
UTF-8
|
Python
| false
| false
| 6,867
|
py
|
abdi_repo.py
|
"""Manage git repositories watched by arcyd."""
# =============================================================================
# CONTENTS
# -----------------------------------------------------------------------------
# abdi_repo
#
# Public Functions:
# setup_repo
# setup_repo_context
# try_push_special_refs
# is_remote_reserve_branch_present
# ensure_reserve_branch
# is_legacy_landinglog_branch_present
# remove_landinglog
#
# -----------------------------------------------------------------------------
# (this contents block is generated, edits will be lost)
# =============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import shutil
import phlgit_branch
import phlgit_checkout
import phlgit_commit
import phlgit_push
import phlgitu_ref
import phlgitx_ignoreattributes
import phlsys_git
import phlsys_subprocess
import abdt_git
_RESERVE_BRANCH_FQ_NAME = 'refs/heads/dev/arcyd/reserve'
_LEGACY_LANDINGLOG_NAME = 'refs/arcyd/landinglog'
# The lines in this string are wrapped as appropriate for a commit message
_RESERVE_BRANCH_MESSAGE = """
Reserve the 'dev/arcyd/' branch namespace
This branch is created to reserve the 'dev/arcyd/' namespace so that
arcyd may create it's tracker branches there.
If we didn't do this then it would be possible to create a branch,
e.g. 'dev', which would subsequently stop any 'dev/*' branches
being created.
""".strip()
def setup_repo(repo_url, repo_path, repo_push_url=None):
"""Setup a repository, if an exception is raised then remove the repo.
:repo_url: string url of the repo to clone
:repo_path: string path to clone the repo to
:repo_push_url: string url to push to, or None
:returns: None
"""
with setup_repo_context(repo_url, repo_path, repo_push_url):
pass
@contextlib.contextmanager
def setup_repo_context(repo_url, repo_path, repo_push_url=None):
"""Setup a repository, if an exception is raised then remove the repo.
:repo_url: string url of the repo to clone
:repo_path: string path to clone the repo to
:repo_push_url: string url to push to, or None
:returns: None
"""
# if there's any failure after cloning then we should remove the repo
if repo_push_url is not None:
phlsys_subprocess.run(
'git', 'clone', repo_url, repo_path,
'--config', 'remote.origin.pushurl=' + repo_push_url)
else:
phlsys_subprocess.run(
'git', 'clone', repo_url, repo_path)
try:
repo = phlsys_git.Repo(repo_path)
# make sure we have no problems with 'ident' strings, we won't build
# from arcyd so it shouldn't be externally visible that we don't expand
# them.
phlgitx_ignoreattributes.ensure_repo_ignoring(repo_path)
# test pushing to master
repo('checkout', 'origin/master')
phlgit_commit.allow_empty(repo, 'test commit for pushing')
repo('push', 'origin', '--dry-run', 'HEAD:refs/heads/master')
repo('checkout', '-')
try_push_special_refs(repo)
# fetch the 'landed' and 'abandoned' refs if they exist
abdt_git.checkout_master_fetch_special_refs(repo, 'origin')
ensure_reserve_branch(repo)
# success, allow the caller to do work
yield
except Exception:
# clean up the git repo on any exception
shutil.rmtree(repo_path)
raise
def try_push_special_refs(repo):
"""Try pushing to the special refs that arcyd uses.
Allow errors to raise through.
:repo: a callable supporting git commands, e.g. repo("status")
:returns: None
"""
# test pushing to the 'private' dev/arcyd/ area, where arcyd will store
# it's tracker branches
repo('push', 'origin', '--dry-run', 'HEAD:refs/heads/dev/arcyd/test')
# test pushing to the refs/arcyd area, where the 'landed' and 'abandoned'
# archive branches will live
repo('push', 'origin', '--dry-run', 'HEAD:refs/arcyd/test')
def is_remote_reserve_branch_present(repo):
"""Return True if the remote for 'repo' is reserving 'dev/arcyd/*'.
:repo: a callable supporting git commands, e.g. repo("status")
:returns: True or False
"""
reserve_name = phlgitu_ref.Name(_RESERVE_BRANCH_FQ_NAME)
remote_ref_names = repo("ls-remote").split()[1::2]
return reserve_name.fq in remote_ref_names
def ensure_reserve_branch(repo):
"""Ensure that the supplied 'repo' remote has the reserve branch.
To prevent the problem where someone pushes branch 'dev', which blocks
arcyd's tracker branches from being created.
:repo: a callable supporting git commands, e.g. repo("status")
:returns: None
"""
reserve_name = phlgitu_ref.Name(_RESERVE_BRANCH_FQ_NAME)
if not is_remote_reserve_branch_present(repo):
phlgit_checkout.orphan_clean(repo, reserve_name.short)
phlgit_commit.allow_empty(repo, _RESERVE_BRANCH_MESSAGE)
phlgit_push.push(repo, reserve_name.short, 'origin')
phlgit_checkout.previous_branch(repo)
phlgit_branch.force_delete(repo, reserve_name.short)
def is_legacy_landinglog_branch_present(repo):
"""Return True if the remote for 'repo' has the legacy landing log ref.
:repo: a callable supporting git commands, e.g. repo("status")
:returns: True or False
"""
legacy_landinglog_name = phlgitu_ref.Name(_LEGACY_LANDINGLOG_NAME)
remote_ref_names = repo("ls-remote").split()[1::2]
return legacy_landinglog_name.fq in remote_ref_names
def remove_landinglog(repo):
"""Remove the legacy landing log ref for the 'origin' of 'repo'.
Behaviour is undefined if the landing log ref is not present, use
'is_legacy_landinglog_branch_present' to determine this first.
:repo: a callable supporting git commands, e.g. repo("status")
:returns: None
"""
legacy_landinglog_name = phlgitu_ref.Name(_LEGACY_LANDINGLOG_NAME)
phlgit_push.delete(repo, 'origin', legacy_landinglog_name.fq)
# -----------------------------------------------------------------------------
# Copyright (C) 2014-2015 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------ END-OF-FILE ----------------------------------
|
729adbc8eca3fbbc0a13326442c26262410ed3db
|
0fe0ffe29ca6f76c6f15c85c8d82b09beaada246
|
/third_party/catapult/systrace/profile_chrome/chrome_controller.py
|
ec4a59880766ce3528189cc68ecba050d3b9957a
|
[
"BSD-3-Clause"
] |
permissive
|
hanpfei/chromium-net
|
4dc8fd48cf3b05d89b11dc121f9c3abdd3ba962e
|
9df8ce98c2a14fb60c2f581853011e32eb4bed0f
|
refs/heads/master
| 2023-07-08T15:28:01.033104
| 2023-06-14T13:02:39
| 2023-06-14T13:02:39
| 65,541,033
| 297
| 73
| null | 2022-11-02T23:33:48
| 2016-08-12T09:25:34
|
C++
|
UTF-8
|
Python
| false
| false
| 4,319
|
py
|
chrome_controller.py
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
import re
import time
from profile_chrome import controllers
from devil.android import device_errors
from devil.android.sdk import intent
_HEAP_PROFILE_MMAP_PROPERTY = 'heapprof.mmap'
class ChromeTracingController(controllers.BaseController):
def __init__(self, device, package_info,
categories, ring_buffer, trace_memory=False):
controllers.BaseController.__init__(self)
self._device = device
self._package_info = package_info
self._categories = categories
self._ring_buffer = ring_buffer
self._logcat_monitor = self._device.GetLogcatMonitor()
self._trace_file = None
self._trace_interval = None
self._trace_memory = trace_memory
self._is_tracing = False
self._trace_start_re = \
re.compile(r'Logging performance trace to file')
self._trace_finish_re = \
re.compile(r'Profiler finished[.] Results are in (.*)[.]')
def __repr__(self):
return 'chrome trace'
@staticmethod
def GetCategories(device, package_info):
with device.GetLogcatMonitor() as logmon:
device.BroadcastIntent(intent.Intent(
action='%s.GPU_PROFILER_LIST_CATEGORIES' % package_info.package))
try:
json_category_list = logmon.WaitFor(
re.compile(r'{"traceCategoriesList(.*)'), timeout=5).group(0)
except device_errors.CommandTimeoutError:
raise RuntimeError('Performance trace category list marker not found. '
'Is the correct version of the browser running?')
record_categories = set()
disabled_by_default_categories = set()
json_data = json.loads(json_category_list)['traceCategoriesList']
for item in json_data:
for category in item.split(','):
if category.startswith('disabled-by-default'):
disabled_by_default_categories.add(category)
else:
record_categories.add(category)
return list(record_categories), list(disabled_by_default_categories)
def StartTracing(self, interval):
self._trace_interval = interval
self._logcat_monitor.Start()
start_extras = {'categories': ','.join(self._categories)}
if self._ring_buffer:
start_extras['continuous'] = None
self._device.BroadcastIntent(intent.Intent(
action='%s.GPU_PROFILER_START' % self._package_info.package,
extras=start_extras))
if self._trace_memory:
self._device.EnableRoot()
self._device.SetProp(_HEAP_PROFILE_MMAP_PROPERTY, 1)
# Chrome logs two different messages related to tracing:
#
# 1. "Logging performance trace to file"
# 2. "Profiler finished. Results are in [...]"
#
# The first one is printed when tracing starts and the second one indicates
# that the trace file is ready to be pulled.
try:
self._logcat_monitor.WaitFor(self._trace_start_re, timeout=5)
self._is_tracing = True
except device_errors.CommandTimeoutError:
raise RuntimeError(
'Trace start marker not found. Possible causes: 1) Is the correct '
'version of the browser running? 2) Is the browser already launched?')
def StopTracing(self):
if self._is_tracing:
self._device.BroadcastIntent(intent.Intent(
action='%s.GPU_PROFILER_STOP' % self._package_info.package))
self._trace_file = self._logcat_monitor.WaitFor(
self._trace_finish_re, timeout=120).group(1)
self._is_tracing = False
if self._trace_memory:
self._device.SetProp(_HEAP_PROFILE_MMAP_PROPERTY, 0)
def PullTrace(self):
# Wait a bit for the browser to finish writing the trace file.
time.sleep(self._trace_interval / 4 + 1)
trace_file = self._trace_file.replace('/storage/emulated/0/', '/sdcard/')
host_file = os.path.join(os.path.curdir, os.path.basename(trace_file))
try:
self._device.PullFile(trace_file, host_file)
except device_errors.AdbCommandFailedError:
raise RuntimeError(
'Cannot pull the trace file. Have you granted Storage permission to '
'the browser? (Android Settings -> Apps -> [the browser app] -> '
'Permissions -> Storage)')
return host_file
|
70798f9574e5a09277980ddaa7afac763bec5670
|
d95f049c5ae622790367085957d9d13a0aa85351
|
/tests/input/eps/test_eps.py
|
b0ab67c84f969acdc7a71df701585ade70200bed
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
algoo/preview-generator
|
42b11937723d78104b2232558bdc2e979fb4de93
|
9948f6555d827602024f98478dad8057e65aeae6
|
refs/heads/develop
| 2023-09-01T16:19:45.302197
| 2023-02-17T15:06:30
| 2023-02-17T15:06:30
| 90,982,830
| 214
| 60
|
MIT
| 2023-08-28T22:38:14
| 2017-05-11T13:33:20
|
Python
|
UTF-8
|
Python
| false
| false
| 2,698
|
py
|
test_eps.py
|
# -*- coding: utf-8 -*-
import os
import shutil
import typing
from PIL import Image
import pytest
from wand.exceptions import PolicyError
from preview_generator.exception import UnavailablePreviewType
from preview_generator.manager import PreviewManager
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
CACHE_DIR = "/tmp/preview-generator-tests/cache"
IMAGE_FILE_PATH = os.path.join(CURRENT_DIR, "algoo.eps")
def setup_function(function: typing.Callable) -> None:
shutil.rmtree(CACHE_DIR, ignore_errors=True)
def test_to_jpeg() -> None:
try:
manager = PreviewManager(cache_folder_path=CACHE_DIR, create_folder=True)
assert manager.has_jpeg_preview(file_path=IMAGE_FILE_PATH) is True
path_to_file = manager.get_jpeg_preview(
file_path=IMAGE_FILE_PATH, height=512, width=321, force=True
)
assert os.path.exists(path_to_file) is True
assert os.path.getsize(path_to_file) > 0
with Image.open(path_to_file) as jpeg:
assert jpeg.height == 321
assert jpeg.width == 321
except PolicyError:
pytest.skip("You must update ImageMagic policy file to allow EPS convert")
def test_to_jpeg_no_size() -> None:
try:
manager = PreviewManager(cache_folder_path=CACHE_DIR, create_folder=True)
assert manager.has_jpeg_preview(file_path=IMAGE_FILE_PATH) is True
path_to_file = manager.get_jpeg_preview(file_path=IMAGE_FILE_PATH, force=True)
assert os.path.exists(path_to_file) is True
assert os.path.getsize(path_to_file) > 0
with Image.open(path_to_file) as jpeg:
assert jpeg.height == 256
assert jpeg.width == 256
except PolicyError:
pytest.skip("You must update ImageMagic policy file to allow EPS convert")
def test_to_text() -> None:
manager = PreviewManager(cache_folder_path=CACHE_DIR, create_folder=True)
assert manager.has_text_preview(file_path=IMAGE_FILE_PATH) is False
with pytest.raises(UnavailablePreviewType):
manager.get_text_preview(file_path=IMAGE_FILE_PATH, force=True)
def test_to_json() -> None:
manager = PreviewManager(cache_folder_path=CACHE_DIR, create_folder=True)
assert manager.has_json_preview(file_path=IMAGE_FILE_PATH) is True
manager.get_json_preview(file_path=IMAGE_FILE_PATH, force=True)
# TODO - G.M - 2018-11-06 - To be completed
def test_to_pdf() -> None:
manager = PreviewManager(cache_folder_path=CACHE_DIR, create_folder=True)
assert manager.has_pdf_preview(file_path=IMAGE_FILE_PATH) is False
with pytest.raises(UnavailablePreviewType):
manager.get_pdf_preview(file_path=IMAGE_FILE_PATH, force=True)
|
6407ac1c80ae744fed6e3bb6f0c2f2bc8e285766
|
c7c73566784a7896100e993606e1bd8fdd0ea94e
|
/tests/event/test_pythontask.py
|
d6806b4f723b54f93b8236b36932651ca40304b2
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
panda3d/panda3d
|
c3f94df2206ff7cfe4a3b370777a56fb11a07926
|
160ba090a5e80068f61f34fc3d6f49dbb6ad52c5
|
refs/heads/master
| 2023-08-21T13:23:16.904756
| 2021-04-11T22:55:33
| 2023-08-06T06:09:32
| 13,212,165
| 4,417
| 1,072
|
NOASSERTION
| 2023-09-09T19:26:14
| 2013-09-30T10:20:25
|
C++
|
UTF-8
|
Python
| false
| false
| 1,509
|
py
|
test_pythontask.py
|
from panda3d.core import PythonTask
import pytest
import types
import sys
def test_pythontask_property_builtin():
task = PythonTask()
# Read-write property
assert task.name == ""
task.name = "ABC"
# Read-only property
assert task.dt == 0.0
with pytest.raises(AttributeError):
task.dt = 1.0
assert task.dt == 0.0
# Non-existent property
with pytest.raises(AttributeError):
task.abc
def test_pythontask_property_custom():
task = PythonTask()
assert not hasattr(task, 'custom_field')
task.custom_field = 1.0
assert hasattr(task, 'custom_field')
assert task.custom_field == 1.0
task.custom_field = 2.0
assert task.custom_field == 2.0
del task.custom_field
assert not hasattr(task, 'custom_field')
def test_pythontask_property_override():
task = PythonTask()
assert isinstance(task.gather, types.BuiltinMethodType)
task.gather = 123
assert task.gather == 123
del task.gather
assert isinstance(task.gather, types.BuiltinMethodType)
def test_pythontask_dict_get():
task = PythonTask()
d = task.__dict__
rc1 = sys.getrefcount(d)
task.__dict__
task.__dict__
rc2 = sys.getrefcount(d)
assert rc1 == rc2
def test_pythontask_dict_set():
task = PythonTask()
d = {}
rc1 = sys.getrefcount(d)
task.__dict__ = d
rc2 = sys.getrefcount(d)
assert rc1 + 1 == rc2
task.__dict__ = {}
rc2 = sys.getrefcount(d)
assert rc1 == rc2
|
5089bdcd8059f06f0232f55e1df6c586f54eb55c
|
b8441dc1987be9e64fa3081d456b2a3060ec44d1
|
/mars/tensor/base/unique.py
|
e8a08959f0a3d88cb35395af171e1d344f7cf66e
|
[
"BSD-3-Clause",
"MIT",
"ISC",
"Apache-2.0",
"CC0-1.0",
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
mars-project/mars
|
f99fefbce999d58a9249bc72046787a9731c9c73
|
c36c53fa22e10ef9477d9c454401a2f281375f31
|
refs/heads/master
| 2023-07-23T00:23:55.133015
| 2023-07-03T11:44:54
| 2023-07-03T11:44:54
| 160,543,708
| 2,704
| 362
|
Apache-2.0
| 2023-09-11T07:57:35
| 2018-12-05T16:04:03
|
Python
|
UTF-8
|
Python
| false
| false
| 21,936
|
py
|
unique.py
|
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import numpy as np
from ... import opcodes as OperandDef
from ...config import options
from ...core import recursive_tile
from ...core.operand import OperandStage
from ...lib import sparse
from ...lib.sparse.core import get_array_module as get_sparse_array_module
from ...serialization.serializables import BoolField, Int32Field, Int64Field
from ...utils import has_unknown_shape
from ..operands import TensorMapReduceOperand, TensorOperandMixin, TensorShuffleProxy
from ..array_utils import as_same_device, device
from ..core import TensorOrder
from ..utils import validate_axis, hash_on_axis
class TensorUnique(TensorMapReduceOperand, TensorOperandMixin):
_op_type_ = OperandDef.UNIQUE
_return_index = BoolField("return_index")
_return_inverse = BoolField("return_inverse")
_return_counts = BoolField("return_counts")
_axis = Int32Field("axis")
_aggregate_size = Int32Field("aggregate_size")
_start_pos = Int64Field("start_pos")
def __init__(
self,
return_index=None,
return_inverse=None,
return_counts=None,
axis=None,
start_pos=None,
aggregate_size=None,
**kw
):
super().__init__(
_return_index=return_index,
_return_inverse=return_inverse,
_return_counts=return_counts,
_axis=axis,
_start_pos=start_pos,
_aggregate_size=aggregate_size,
**kw
)
@property
def output_limit(self):
if self.stage == OperandStage.map:
return 1
return (
1
+ bool(self._return_index)
+ bool(self._return_inverse)
+ bool(self._return_counts)
)
@property
def return_index(self):
return self._return_index
@property
def return_inverse(self):
return self._return_inverse
@property
def return_counts(self):
return self._return_counts
@property
def axis(self):
return self._axis
@property
def aggregate_size(self):
return self._aggregate_size
@property
def start_pos(self):
return self._start_pos
@classmethod
def _gen_kws(cls, op, input_obj, chunk=False, chunk_index=None):
kws = []
# unique tensor
shape = list(input_obj.shape)
shape[op.axis] = np.nan
kw = {"shape": tuple(shape), "dtype": input_obj.dtype, "gpu": input_obj.op.gpu}
if chunk:
idx = [0] * len(shape)
idx[op.axis] = chunk_index or 0
kw["index"] = tuple(idx)
kws.append(kw)
# unique indices tensor
if op.return_index:
kw = {
"shape": (np.nan,),
"dtype": np.dtype(np.intp),
"gpu": input_obj.op.gpu,
"type": "indices",
}
if chunk:
kw["index"] = (chunk_index or 0,)
kws.append(kw)
# unique inverse tensor
if op.return_inverse:
kw = {
"shape": (input_obj.shape[op.axis],),
"dtype": np.dtype(np.intp),
"gpu": input_obj.op.gpu,
"type": "inverse",
}
if chunk:
kw["index"] = (chunk_index or 0,)
kws.append(kw)
# unique counts tensor
if op.return_counts:
kw = {
"shape": (np.nan,),
"dtype": np.dtype(np.int_),
"gpu": input_obj.op.gpu,
"type": "counts",
}
if chunk:
kw["index"] = (chunk_index or 0,)
kws.append(kw)
return kws
def __call__(self, ar):
from .atleast_1d import atleast_1d
ar = atleast_1d(ar)
if self.axis is None:
if ar.ndim > 1:
ar = ar.flatten()
self._axis = 0
else:
self._axis = validate_axis(ar.ndim, self._axis)
kws = self._gen_kws(self, ar)
tensors = self.new_tensors([ar], kws=kws, order=TensorOrder.C_ORDER)
if len(tensors) == 1:
return tensors[0]
return tensors
@classmethod
def _tile_one_chunk(cls, op):
outs = op.outputs
ins = op.inputs
chunk_op = op.copy().reset_key()
in_chunk = ins[0].chunks[0]
kws = cls._gen_kws(chunk_op, in_chunk, chunk=True)
out_chunks = chunk_op.new_chunks([in_chunk], kws=kws, order=outs[0].order)
new_op = op.copy()
kws = [out.params.copy() for out in outs]
for kw, out_chunk in zip(kws, out_chunks):
kw["chunks"] = [out_chunk]
kw["nsplits"] = tuple((s,) for s in out_chunk.shape)
return new_op.new_tensors(ins, kws=kws, order=outs[0].order)
@classmethod
def _tile_via_shuffle(cls, op):
# rechunk the axes except the axis to do unique into 1 chunk
inp = op.inputs[0]
if has_unknown_shape(inp):
yield
if inp.ndim > 1:
new_chunk_size = dict()
for axis in range(inp.ndim):
if axis == op.axis:
continue
if np.isnan(inp.shape[axis]):
yield
new_chunk_size[axis] = inp.shape[axis]
if has_unknown_shape(inp):
yield
inp = yield from recursive_tile(inp.rechunk(new_chunk_size))
aggregate_size = op.aggregate_size
if aggregate_size is None:
aggregate_size = max(inp.chunk_shape[op.axis] // options.combine_size, 1)
unique_on_chunk_sizes = inp.nsplits[op.axis]
start_poses = np.cumsum((0,) + unique_on_chunk_sizes).tolist()[:-1]
map_chunks = []
for c in inp.chunks:
map_op = TensorUnique(
stage=OperandStage.map,
return_index=op.return_index,
return_inverse=op.return_inverse,
return_counts=op.return_counts,
axis=op.axis,
aggregate_size=aggregate_size,
start_pos=start_poses[c.index[op.axis]],
dtype=inp.dtype,
)
shape = list(c.shape)
shape[op.axis] = np.nan
map_chunks.append(map_op.new_chunk([c], shape=tuple(shape), index=c.index))
shuffle_chunk = TensorShuffleProxy(
dtype=inp.dtype, _tensor_keys=[inp.op.key]
).new_chunk(map_chunks, shape=())
reduce_chunks = [list() for _ in range(len(op.outputs))]
for i in range(aggregate_size):
reduce_op = TensorUnique(
stage=OperandStage.reduce,
return_index=op.return_index,
return_inverse=op.return_inverse,
return_counts=op.return_counts,
axis=op.axis,
reducer_index=(i,),
reducer_phase="agg",
n_reducers=aggregate_size,
)
kws = cls._gen_kws(op, inp, chunk=True, chunk_index=i)
chunks = reduce_op.new_chunks(
[shuffle_chunk], kws=kws, order=op.outputs[0].order
)
if op.return_inverse:
inverse_idx = 2 if op.return_index else 1
for j, chk in enumerate(chunks):
if j == inverse_idx:
chk.is_mapper = True
else:
chk.is_mapper = False
for j, c in enumerate(chunks):
reduce_chunks[j].append(c)
if op.return_inverse:
inverse_pos = 2 if op.return_index else 1
map_inverse_chunks = reduce_chunks[inverse_pos]
inverse_shuffle_chunk = TensorShuffleProxy(
dtype=map_inverse_chunks[0].dtype
).new_chunk(map_inverse_chunks, shape=())
inverse_chunks = []
for j, cs in enumerate(unique_on_chunk_sizes):
chunk_op = TensorUnique(
stage=OperandStage.reduce,
n_reducers=len(unique_on_chunk_sizes),
dtype=map_inverse_chunks[0].dtype,
reducer_index=(j,),
reducer_phase="inverse",
)
inverse_chunk = chunk_op.new_chunk(
[inverse_shuffle_chunk], shape=(cs,), index=(j,)
)
inverse_chunks.append(inverse_chunk)
reduce_chunks[inverse_pos] = inverse_chunks
kws = [out.params for out in op.outputs]
for kw, chunks in zip(kws, reduce_chunks):
kw["chunks"] = chunks
unique_nsplits = list(inp.nsplits)
unique_nsplits[op.axis] = (np.nan,) * len(reduce_chunks[0])
kws[0]["nsplits"] = tuple(unique_nsplits)
i = 1
if op.return_index:
kws[i]["nsplits"] = ((np.nan,) * len(reduce_chunks[i]),)
i += 1
if op.return_inverse:
kws[i]["nsplits"] = (inp.nsplits[op.axis],)
i += 1
if op.return_counts:
kws[i]["nsplits"] = ((np.nan,) * len(reduce_chunks[i]),)
new_op = op.copy()
return new_op.new_tensors(op.inputs, kws=kws)
@classmethod
def tile(cls, op: "TensorUnique"):
if len(op.inputs[0].chunks) == 1:
return cls._tile_one_chunk(op)
else:
return (yield from cls._tile_via_shuffle(op))
@classmethod
def _execute_map(cls, ctx, op: "TensorUnique"):
(ar,), device_id, xp = as_same_device(
[ctx[c.key] for c in op.inputs], device=op.device, ret_extra=True
)
n_reducers = op.aggregate_size
with device(device_id):
results = xp.unique(
ar,
return_index=op.return_index,
return_inverse=op.return_inverse,
return_counts=op.return_counts,
axis=op.axis,
)
results = (results,) if not isinstance(results, tuple) else results
results_iter = iter(results)
unique_ar = next(results_iter)
indices_ar = next(results_iter) + op.start_pos if op.return_index else None
inverse_ar = next(results_iter) if op.return_inverse else None
counts_ar = next(results_iter) if op.return_counts else None
if xp is sparse:
dense_xp = get_sparse_array_module(unique_ar)
else:
dense_xp = xp
unique_index = (
dense_xp.arange(unique_ar.shape[op.axis])
if inverse_ar is not None
else None
)
if unique_ar.size > 0:
unique_reducers = dense_xp.asarray(
hash_on_axis(unique_ar, op.axis, n_reducers)
)
else:
unique_reducers = dense_xp.empty_like(unique_ar)
ind_ar = dense_xp.arange(ar.shape[op.axis])
for reducer in range(n_reducers):
res = []
cond = unique_reducers == reducer
# unique
slc = (slice(None),) * op.axis + (cond,)
res.append(unique_ar[slc])
# indices
if indices_ar is not None:
res.append(indices_ar[cond])
# inverse
if inverse_ar is not None:
index_selected = unique_index[cond]
inv_cond = xp.isin(inverse_ar, index_selected)
inv_selected = xp.searchsorted(index_selected, inverse_ar[inv_cond])
ind_selected = ind_ar[inv_cond]
res.append(xp.stack([ind_selected, inv_selected]))
# counts
if counts_ar is not None:
res.append(counts_ar[cond])
ctx[op.outputs[0].key, (reducer,)] = (
ctx.get_current_chunk().index,
tuple(res),
)
@classmethod
def _execute_agg_reduce(cls, ctx, op: "TensorUnique"):
input_indexes, input_data = zip(*list(op.iter_mapper_data(ctx)))
inputs = list(zip(*input_data))
flatten, device_id, xp = as_same_device(
list(itertools.chain(*inputs)), device=op.device, ret_extra=True
)
n_ret = len(inputs[0])
inputs = [flatten[i * n_ret : (i + 1) * n_ret] for i in range(len(inputs))]
inputs_iter = iter(inputs)
unique_arrays = next(inputs_iter)
indices_arrays = next(inputs_iter) if op.return_index else None
inverse_arrays = next(inputs_iter) if op.return_inverse else None
counts_arrays = next(inputs_iter) if op.return_counts else None
with device(device_id):
ar = xp.concatenate(unique_arrays, axis=op.axis)
result_return_inverse = op.return_inverse or op.return_counts
axis = op.axis
if ar.size == 0 or ar.shape[axis] == 0:
# empty array on the axis
results = [xp.empty(ar.shape)]
i = 1
for it in (op.return_index, op.return_inverse, op.return_counts):
if it:
results.append(xp.empty([], dtype=op.outputs[i].dtype))
i += 1
results = tuple(results)
else:
results = xp.unique(
ar,
return_index=op.return_index,
return_inverse=result_return_inverse,
axis=axis,
)
results = (results,) if not isinstance(results, tuple) else results
results_iter = iter(results)
outputs_iter = iter(op.outputs)
# unique array
ctx[next(outputs_iter).key] = next(results_iter)
if op.output_limit == 1:
return
# calc indices
if op.return_index:
ctx[next(outputs_iter).key] = xp.concatenate(indices_arrays)[
next(results_iter)
]
# calc inverse
try:
inverse_result = next(results_iter)
if op.return_inverse:
unique_sizes = tuple(ua.shape[op.axis] for ua in unique_arrays)
cum_unique_sizes = np.cumsum((0,) + unique_sizes)
indices_out_key = next(outputs_iter).key
for i, inverse_array in enumerate(inverse_arrays):
p = inverse_result[
cum_unique_sizes[i] : cum_unique_sizes[i + 1]
]
r = xp.empty(inverse_array.shape, dtype=inverse_array.dtype)
if inverse_array.size > 0:
r[0] = inverse_array[0]
r[1] = p[inverse_array[1]]
# return unique length and
ctx[indices_out_key, (input_indexes[i][op.axis],)] = (
results[0].shape[op.axis],
r,
)
# calc counts
if op.return_counts:
result_counts = xp.zeros(results[0].shape[op.axis], dtype=int)
t = np.stack([inverse_result, np.concatenate(counts_arrays)])
def acc(a):
i, v = a
result_counts[i] += v
np.apply_along_axis(acc, 0, t)
ctx[next(outputs_iter).key] = xp.asarray(result_counts)
except StopIteration:
pass
@classmethod
def _execute_inverse_reduce(cls, ctx, op: "TensorUnique"):
out = op.outputs[0]
inputs = list(op.iter_mapper_data(ctx))
unique_sizes = [inp[0] for inp in inputs]
cum_unique_sizes = np.cumsum([0] + unique_sizes)
invs, device_id, xp = as_same_device(
[inp[1] for inp in inputs], device=op.device, ret_extra=True
)
with device(device_id):
ret = xp.empty(out.shape, dtype=out.dtype)
for i, inv in enumerate(invs):
ret[inv[0]] = cum_unique_sizes[i] + inv[1]
ctx[out.key] = ret
@classmethod
def execute(cls, ctx, op: "TensorUnique"):
if op.stage == OperandStage.map:
cls._execute_map(ctx, op)
elif op.stage == OperandStage.reduce:
if op.reducer_phase == "agg":
cls._execute_agg_reduce(ctx, op)
else:
assert op.reducer_phase == "inverse"
cls._execute_inverse_reduce(ctx, op)
else:
(ar,), device_id, xp = as_same_device(
[ctx[c.key] for c in op.inputs], device=op.device, ret_extra=True
)
with device(device_id):
kw = dict(
return_index=op.return_index,
return_inverse=op.return_inverse,
return_counts=op.return_counts,
)
if ar.dtype != object and sum(ar.shape) > 0:
# axis cannot pass when dtype is object or array size is 0
kw["axis"] = op.axis
results = xp.unique(ar, **kw)
outs = op.outputs
if len(outs) == 1:
ctx[outs[0].key] = results
return
assert len(outs) == len(results)
for out, result in zip(outs, results):
ctx[out.key] = result
def unique(
ar,
return_index=False,
return_inverse=False,
return_counts=False,
axis=None,
aggregate_size=None,
):
"""
Find the unique elements of a tensor.
Returns the sorted unique elements of a tensor. There are three optional
outputs in addition to the unique elements:
* the indices of the input tensor that give the unique values
* the indices of the unique tensor that reconstruct the input tensor
* the number of times each unique value comes up in the input tensor
Parameters
----------
ar : array_like
Input tensor. Unless `axis` is specified, this will be flattened if it
is not already 1-D.
return_index : bool, optional
If True, also return the indices of `ar` (along the specified axis,
if provided, or in the flattened tensor) that result in the unique tensor.
return_inverse : bool, optional
If True, also return the indices of the unique tensor (for the specified
axis, if provided) that can be used to reconstruct `ar`.
return_counts : bool, optional
If True, also return the number of times each unique item appears
in `ar`.
axis : int or None, optional
The axis to operate on. If None, `ar` will be flattened. If an integer,
the subarrays indexed by the given axis will be flattened and treated
as the elements of a 1-D tensor with the dimension of the given axis,
see the notes for more details. Object tensors or structured tensors
that contain objects are not supported if the `axis` kwarg is used. The
default is None.
aggregate_size: int or None, optional
How many chunks will be after unique, default as #input.chunks / options.combine_size
Returns
-------
unique : Tensor
The sorted unique values.
unique_indices : Tensor, optional
The indices of the first occurrences of the unique values in the
original tensor. Only provided if `return_index` is True.
unique_inverse : Tensor, optional
The indices to reconstruct the original tensor from the
unique tensor. Only provided if `return_inverse` is True.
unique_counts : Tensor, optional
The number of times each of the unique values comes up in the
original tensor. Only provided if `return_counts` is True.
Examples
--------
>>> import mars.tensor as mt
>>> mt.unique([1, 1, 2, 2, 3, 3]).execute()
array([1, 2, 3])
>>> a = mt.array([[1, 1], [2, 3]])
>>> mt.unique(a).execute()
array([1, 2, 3])
Return the unique rows of a 2D tensor
>>> a = mt.array([[1, 0, 0], [1, 0, 0], [2, 3, 4]])
>>> mt.unique(a, axis=0).execute()
array([[1, 0, 0], [2, 3, 4]])
Return the indices of the original tensor that give the unique values:
>>> a = mt.array(['a', 'b', 'b', 'c', 'a'])
>>> u, indices = mt.unique(a, return_index=True)
>>> u.execute()
array(['a', 'b', 'c'],
dtype='|S1')
>>> indices.execute()
array([0, 1, 3])
>>> a[indices].execute()
array(['a', 'b', 'c'],
dtype='|S1')
Reconstruct the input array from the unique values:
>>> a = mt.array([1, 2, 6, 4, 2, 3, 2])
>>> u, indices = mt.unique(a, return_inverse=True)
>>> u.execute()
array([1, 2, 3, 4, 6])
>>> indices.execute()
array([0, 1, 4, 3, 1, 2, 1])
>>> u[indices].execute()
array([1, 2, 6, 4, 2, 3, 2])
"""
op = TensorUnique(
return_index=return_index,
return_inverse=return_inverse,
return_counts=return_counts,
axis=axis,
aggregate_size=aggregate_size,
)
return op(ar)
|
56e93c39137594fd90766374a819fe5998c83c48
|
7607429663127ad7429d0815006dbb6aba2f3fd0
|
/test/pyom_consistency/acc_setup_test.py
|
01e2b4a6d03b2388e853ab2299ba2a92c9e9a682
|
[
"MIT"
] |
permissive
|
team-ocean/veros
|
df0d0e467579a09a973f45bc1b267aca9609e93a
|
416dbdac43b8ce80ce09171a3bd3fdb4814abae0
|
refs/heads/main
| 2023-08-05T03:39:31.640695
| 2023-08-03T08:00:37
| 2023-08-03T08:00:37
| 87,419,383
| 168
| 31
|
MIT
| 2023-09-14T04:50:21
| 2017-04-06T10:59:21
|
Python
|
UTF-8
|
Python
| false
| false
| 9,307
|
py
|
acc_setup_test.py
|
import numpy as np
from veros import VerosSetup, veros_routine
from veros.variables import allocate, Variable
from veros.core.operators import numpy as npx, update, at
from veros.pyom_compat import load_pyom, setup_pyom
from test_base import compare_state
yt_start = -39.0
yt_end = 43
yu_start = -40.0
yu_end = 42
def set_parameter_pyom(pyom_obj):
m = pyom_obj.main_module
(m.nx, m.ny, m.nz) = (30, 42, 15)
m.dt_mom = 4800
m.dt_tracer = 86400 / 2.0
m.runlen = 86400 * 365
m.coord_degree = 1
m.enable_cyclic_x = 1
m.congr_epsilon = 1e-8
m.congr_max_iterations = 10_000
m.ab_eps = 0.1
i = pyom_obj.isoneutral_module
i.enable_neutral_diffusion = 1
i.k_iso_0 = 1000.0
i.k_iso_steep = 500.0
i.iso_dslope = 0.005
i.iso_slopec = 0.01
i.enable_skew_diffusion = 1
m.enable_hor_friction = 1
m.a_h = 2.2e5
m.enable_hor_friction_cos_scaling = 1
m.hor_friction_cospower = 1
m.enable_bottom_friction = 1
m.r_bot = 1e-5
m.enable_streamfunction = True
m.enable_implicit_vert_friction = 1
t = pyom_obj.tke_module
t.enable_tke = 1
t.c_k = 0.1
t.c_eps = 0.7
t.alpha_tke = 30.0
t.mxl_min = 1e-8
t.tke_mxl_choice = 2
t.kappam_min = 2e-4
i.k_gm_0 = 1000.0
e = pyom_obj.eke_module
e.enable_eke = 1
e.eke_k_max = 1e4
e.eke_c_k = 0.4
e.eke_c_eps = 0.5
e.eke_cross = 2.0
e.eke_crhin = 1.0
e.eke_lmin = 100.0
e.enable_eke_superbee_advection = 1
e.enable_eke_isopycnal_diffusion = 1
i = pyom_obj.idemix_module
i.enable_idemix = 1
i.enable_idemix_hor_diffusion = 1
i.enable_eke_diss_surfbot = 1
i.eke_diss_surfbot_frac = 0.2
i.enable_idemix_superbee_advection = 1
i.tau_v = 86400.0
i.jstar = 10.0
i.mu0 = 4.0 / 3.0
i.gamma = 1.57
m.eq_of_state_type = 3
def set_grid_pyom(pyom_obj):
m = pyom_obj.main_module
ddz = [50.0, 70.0, 100.0, 140.0, 190.0, 240.0, 290.0, 340.0, 390.0, 440.0, 490.0, 540.0, 590.0, 640.0, 690.0]
m.dxt[:] = 2.0
m.dyt[:] = 2.0
m.x_origin = 0.0
m.y_origin = -40.0
m.dzt[:] = ddz[::-1]
m.dzt[:] *= 1 / 2.5
def set_coriolis_pyom(pyom_obj):
m = pyom_obj.main_module
m.coriolis_t[:, :] = 2 * m.omega * np.sin(m.yt[None, :] / 180.0 * np.pi)
def set_topography_pyom(pyom_obj):
m = pyom_obj.main_module
(X, Y) = np.meshgrid(m.xt, m.yt)
X = X.transpose()
Y = Y.transpose()
m.kbot[...] = (X > 1.0) | (Y < -20)
def set_initial_conditions_pyom(pyom_obj):
m = pyom_obj.main_module
# initial conditions
m.temp[:, :, :, :] = ((1 - m.zt[None, None, :] / m.zw[0]) * 15 * m.maskt)[..., None]
m.salt[:, :, :, :] = 35.0 * m.maskt[..., None]
# wind stress forcing
taux = np.zeros(m.ny + 1)
yt = m.yt[2 : m.ny + 3]
taux = (0.1e-3 * np.sin(np.pi * (m.yu[2 : m.ny + 3] - yu_start) / (-20.0 - yt_start))) * (yt < -20) + (
0.1e-3 * (1 - np.cos(2 * np.pi * (m.yu[2 : m.ny + 3] - 10.0) / (yu_end - 10.0)))
) * (yt > 10)
m.surface_taux[:, 2 : m.ny + 3] = taux * m.masku[:, 2 : m.ny + 3, -1]
t = pyom_obj.tke_module
t.forc_tke_surface[2:-2, 2:-2] = (
np.sqrt(
(0.5 * (m.surface_taux[2:-2, 2:-2] + m.surface_taux[1:-3, 2:-2])) ** 2
+ (0.5 * (m.surface_tauy[2:-2, 2:-2] + m.surface_tauy[2:-2, 1:-3])) ** 2
)
** 1.5
)
def set_forcing_pyom(pyom_obj):
m = pyom_obj.main_module
t_star = (
15 * np.invert((m.yt < -20) | (m.yt > 20))
+ 15 * (m.yt - yt_start) / (-20 - yt_start) * (m.yt < -20)
+ 15 * (1 - (m.yt - 20) / (yt_end - 20)) * (m.yt > 20.0)
)
t_rest = m.dzt[None, -1] / (30.0 * 86400.0) * m.maskt[:, :, -1]
m.forc_temp_surface = t_rest * (t_star - m.temp[:, :, -1, m.tau - 1])
class ACCSetup(VerosSetup):
@veros_routine
def set_parameter(self, state):
settings = state.settings
settings.identifier = "acc"
settings.nx, settings.ny, settings.nz = 30, 42, 15
settings.dt_mom = 4800
settings.dt_tracer = 86400 / 2.0
settings.runlen = 86400 * 365
settings.x_origin = 0.0
settings.y_origin = -40.0
settings.coord_degree = True
settings.enable_cyclic_x = True
settings.enable_streamfunction = True
settings.enable_neutral_diffusion = True
settings.K_iso_0 = 1000.0
settings.K_iso_steep = 500.0
settings.iso_dslope = 0.005
settings.iso_slopec = 0.01
settings.enable_skew_diffusion = True
settings.enable_hor_friction = True
settings.A_h = 2.2e5
settings.enable_hor_friction_cos_scaling = True
settings.hor_friction_cosPower = 1
settings.enable_bottom_friction = True
settings.r_bot = 1e-5
settings.enable_implicit_vert_friction = True
settings.enable_tke = True
settings.c_k = 0.1
settings.c_eps = 0.7
settings.alpha_tke = 30.0
settings.mxl_min = 1e-8
settings.tke_mxl_choice = 2
settings.kappaM_min = 2e-4
settings.K_gm_0 = 1000.0
settings.enable_eke = True
settings.eke_k_max = 1e4
settings.eke_c_k = 0.4
settings.eke_c_eps = 0.5
settings.eke_cross = 2.0
settings.eke_crhin = 1.0
settings.eke_lmin = 100.0
settings.enable_eke_superbee_advection = True
settings.enable_eke_isopycnal_diffusion = True
settings.enable_idemix = 1
settings.enable_idemix_hor_diffusion = 1
settings.enable_eke_diss_surfbot = 1
settings.eke_diss_surfbot_frac = 0.2
settings.enable_idemix_superbee_advection = 1
settings.tau_v = 86400.0
settings.jstar = 10.0
settings.mu0 = 4.0 / 3.0
settings.eq_of_state_type = 3
var_meta = state.var_meta
var_meta.update(
t_star=Variable("t_star", ("yt",), "deg C", "Reference surface temperature"),
t_rest=Variable("t_rest", ("xt", "yt"), "1/s", "Surface temperature restoring time scale"),
)
@veros_routine
def set_grid(self, state):
vs = state.variables
ddz = npx.array(
[50.0, 70.0, 100.0, 140.0, 190.0, 240.0, 290.0, 340.0, 390.0, 440.0, 490.0, 540.0, 590.0, 640.0, 690.0]
)
vs.dxt = update(vs.dxt, at[...], 2.0)
vs.dyt = update(vs.dyt, at[...], 2.0)
vs.dzt = update(vs.dzt, at[...], ddz[::-1] / 2.5)
@veros_routine
def set_coriolis(self, state):
vs = state.variables
settings = state.settings
vs.coriolis_t = update(
vs.coriolis_t, at[...], 2 * settings.omega * npx.sin(vs.yt[None, :] / 180.0 * settings.pi)
)
@veros_routine
def set_topography(self, state):
vs = state.variables
x, y = npx.meshgrid(vs.xt, vs.yt, indexing="ij")
vs.kbot = npx.logical_or(x > 1.0, y < -20).astype("int")
@veros_routine
def set_initial_conditions(self, state):
vs = state.variables
settings = state.settings
# initial conditions
vs.temp = update(vs.temp, at[...], ((1 - vs.zt[None, None, :] / vs.zw[0]) * 15 * vs.maskT)[..., None])
vs.salt = update(vs.salt, at[...], 35.0 * vs.maskT[..., None])
# wind stress forcing
taux = allocate(state.dimensions, ("yt",))
taux = npx.where(vs.yt < -20, 0.1e-3 * npx.sin(settings.pi * (vs.yu - yu_start) / (-20.0 - yt_start)), taux)
taux = npx.where(vs.yt > 10, 0.1e-3 * (1 - npx.cos(2 * settings.pi * (vs.yu - 10.0) / (yu_end - 10.0))), taux)
vs.surface_taux = taux * vs.maskU[:, :, -1]
# surface heatflux forcing
vs.t_star = allocate(state.dimensions, ("yt",), fill=15)
vs.t_star = npx.where(vs.yt < -20, 15 * (vs.yt - yt_start) / (-20 - yt_start), vs.t_star)
vs.t_star = npx.where(vs.yt > 20, 15 * (1 - (vs.yt - 20) / (yt_end - 20)), vs.t_star)
vs.t_rest = vs.dzt[npx.newaxis, -1] / (30.0 * 86400.0) * vs.maskT[:, :, -1]
if settings.enable_tke:
vs.forc_tke_surface = update(
vs.forc_tke_surface,
at[2:-2, 2:-2],
npx.sqrt(
(0.5 * (vs.surface_taux[2:-2, 2:-2] + vs.surface_taux[1:-3, 2:-2])) ** 2
+ (0.5 * (vs.surface_tauy[2:-2, 2:-2] + vs.surface_tauy[2:-2, 1:-3])) ** 2
)
** (1.5),
)
@veros_routine
def set_forcing(self, state):
vs = state.variables
vs.forc_temp_surface = vs.t_rest * (vs.t_star - vs.temp[:, :, -1, vs.tau])
@veros_routine
def set_diagnostics(self, state):
pass
@veros_routine
def after_timestep(self, state):
pass
def test_acc_setup(pyom2_lib):
pyom_obj = load_pyom(pyom2_lib)
setup_pyom(
pyom_obj,
set_parameter_pyom,
set_grid_pyom,
set_coriolis_pyom,
set_topography_pyom,
set_initial_conditions_pyom,
set_forcing_pyom,
)
sim = ACCSetup()
sim.setup()
# Veros runs a streamfunction solve during setup
allowed_failures = ("p_hydro",)
# psin and line_psin don't quite meet the tolerance
compare_state(sim.state, pyom_obj, rtol=1e-6, allowed_failures=allowed_failures)
|
1035f0df74fc44303045bd52891c02bb3e437f69
|
1dbbb05b30d27c6419b9f34eea3b9a47f92582a0
|
/parlai/core/tod/tod_agents.py
|
6fb15fb731a63a84b15bb6e2faec7b4cea922ee2
|
[
"MIT"
] |
permissive
|
facebookresearch/ParlAI
|
815334323d0ebef51bf9837336fe3eef6fe1655d
|
e1d899edfb92471552bae153f59ad30aa7fca468
|
refs/heads/main
| 2023-08-31T22:20:45.918129
| 2023-08-14T19:39:56
| 2023-08-14T19:39:56
| 89,266,735
| 10,943
| 2,395
|
MIT
| 2023-09-13T23:07:40
| 2017-04-24T17:10:44
|
Python
|
UTF-8
|
Python
| false
| false
| 32,366
|
py
|
tod_agents.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Agents (used for dumping data) and Teachers (for training models) related to the TOD
conversation setup.
As a convention, agents and teachers that are inheritable are prefixed with "Tod"
whereas those that can be used as-is are not. Similarly, classes and functions that do
not need to be exposed outside of this file are prefixed with a single underscore ('_')
"""
from parlai.core.agents import Agent
from parlai.core.message import Message
from parlai.core.metrics import AverageMetric
from parlai.core.params import ParlaiParser
from parlai.core.opt import Opt
from parlai.core.teachers import DialogTeacher
from parlai.utils.data import DatatypeHelper
from parlai.utils.distributed import is_distributed, get_rank, num_workers
import parlai.core.tod.tod_core as tod
from parlai.core.tod.tod_core import SerializationHelpers
from parlai.core.tod.teacher_metrics import SlotMetrics, NlgMetrics
from typing import Optional, List
import json
import pickle
import difflib
import random
from math import ceil
######### Agents that dump information from a dataset; base classes
class TodStructuredDataParser(Agent):
"""
Base class that specifies intermediate representations for Tod conversations.
Inherit from this class and implement `setup_episodes()` to implement the intermediate representation for a specific dataset. Use multiple inheritence with classes that implement an `act()` below to use.
For example, if we have a `MyDataset_DataParser(TodStructuredDataParser)` and wanted to make a teacher to train a model to generate User Utterances based on a goal prompt, we would do so by defining `class MyDatasetUserSimulatorTeacher(MyDataset_DataParser, TodUserSimulatorTeacher)`.
"""
@classmethod
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
if hasattr(super(), "add_cmdline_args"):
parser = super().add_cmdline_args(parser, partial_opt)
group = parser.add_argument_group("TOD StructuredData agent")
group.add_argument(
"--episodes-randomization-seed",
type=int,
default=-1,
help="Randomize episodes in a predictable way (eg, for few shot). Set to -1 for no randomization. ",
)
parser.add_argument(
"--n-shot",
default=-1,
type=int,
help="Number of dialogues to keep for each of train/valid/test. -1 means all. Dialogues of lower numbers are strict subsets of larger numbers. Do not use in conjunction with `--percent-shot`. Use `--episodes-randomization-seed` to change seed. NOTE: Beware of using this flag when multitasking as this will apply to *all* datasets unless the ':' syntax for specifying per-dataset flags is used.",
)
parser.add_argument(
"--percent-shot",
default=-1,
type=float,
help="Percentage of dialogues to keep for each of train/valid/test. -1 means all. Dialogues of lower numbers are strict subsets of larger numbers. Do not use in conjunction with `--n-shot`. Use `--episodes-randomization-seed` to change seed. NOTE: Beware of using this flag when multitasking as this will apply to *all* datasets unless the ':' syntax for specifying per-dataset flags is used.",
)
return parser
def __init__(self, opt: Opt, shared=None):
super().__init__(opt, shared)
self.id = self.get_id_task_prefix() + "_" + self._get_agent_type_suffix()
if shared is None:
self.episodes = self.generate_episodes()
else:
self.episodes = shared["episodes"]
def share(self):
share = super().share()
share["episodes"] = self.episodes
return share
def setup_episodes(self, fold: str) -> List[tod.TodStructuredEpisode]:
"""
Fold here is a data fold.
"""
raise NotImplementedError(
"Must have method for generating an episode. Must be set in downstream Parser for a given task"
)
def generate_episodes(self) -> List[tod.TodStructuredEpisode]:
if self.opt.get("n_shot", -1) >= 0 and self.opt.get("percent_shot", -1) >= 0:
# Validate before spending a while to load eeverything
raise RuntimeError("Both `--n-shot` and `--percent-shot` in use!")
episodes = list(self.setup_episodes(self.fold))
if self.opt.get("episodes_randomization_seed", -1) != -1:
random.Random(self.opt["episodes_randomization_seed"]).shuffle(episodes)
if self.opt.get("n_shot", -1) != -1:
episodes = episodes[: self.opt["n_shot"]]
elif self.opt.get("percent_shot", -1) >= 0:
episodes = episodes[: int(len(episodes) * self.opt["percent_shot"])]
return episodes
def get_id_task_prefix(self) -> str:
"""
Convenience for setting IDs.
"""
raise NotImplementedError(
"Must set ID prefix in downstream task agent. Must be set in downsream Parser for a given task"
)
def _get_agent_type_suffix(self) -> str:
"""
Convenience for setting IDs.
"""
raise NotImplementedError(
"Must set in downstream agent within `tod_agents`. If you see this error, something is wrong with TOD Infrastructure"
)
######### Agents that dump information from a dataset as gold (explicitly should *not* be used with teachers)
class _TodDataDumpAgent(TodStructuredDataParser):
"""
For agents which dump data from some dataset, without training/other modifications.
Since we have to deal with batching inside of agents (as per ParlAI convention for
non-generative agents), this does so while also implementing an "epoch done" to
denote elements in a batch that are past the end of the epoch.
"""
def __init__(self, opt: Opt, shared=None):
if not hasattr(self, "fold"):
self.fold = DatatypeHelper.fold(opt["datatype"])
super().__init__(opt, shared)
self.epochDone = False
self.batchsize = opt.get("batchsize", 1)
self.max_episodes = len(self.episodes)
if opt.get("num_episodes", 0) > 0:
self.max_episodes = min(self.max_episodes, opt.get("num_episodes"))
self.episode_idx = opt.get("batchindex", 0)
self._setup_next_episode()
self.round_idx = 0 # for some downstream utt + sysUttAndApiCallAgents.
if is_distributed(): # cause gotta manually handle
rank = get_rank()
chunk_size = ceil(self.max_episodes / num_workers())
self.episode_idx += rank * chunk_size
self.max_episodes = min(self.max_episodes, (rank + 1) * chunk_size)
def _setup_next_episode(self):
self.epochDone = self.episode_idx >= self.max_episodes
self.episode = None
if not self.epochDone:
self.episode = self.episodes[self.episode_idx]
self.round_idx = (
0 # so downstream agents know which round they are in. Update in `act()`
)
def epoch_done(self) -> bool:
return self.epochDone
def episode_done(self) -> bool:
raise RuntimeError("Must be defined in downstream agent")
def num_episodes(self) -> int:
return len(self.episodes)
def reset(self):
self.episode_idx += self.batchsize
self._setup_next_episode()
class TodGoalAgent(_TodDataDumpAgent):
"""
Use as a mixin with a dataset parser class that includes `generate_episodes()` of
TodStructuredDataParser.
Dumps out all goal calls from an episode.
"""
def act(self):
return {
"text": f"{tod.STANDARD_GOAL}{self.episode.goal_calls_utt}",
"id": self.id,
"domain": self.episode.domain,
"episode_done": False,
}
def _get_agent_type_suffix(self):
return "Goal"
def episode_done(self) -> bool:
# done if end of batch; should never end conversation otherwise
return self.epoch_done()
class TodApiSchemaAgent(_TodDataDumpAgent):
"""
Use as a mixin with a dataset parser class that includes `generate_episodes()` of
TodStructuredDataParser.
Dumps out api schemas associated with an episode, based on what is manually set in
the dataset parser.
"""
def act(self):
return {
"text": f"{tod.STANDARD_API_SCHEMAS}{self.episode.api_schemas_utt}",
"id": self.id,
"domain": self.episode.domain,
"episode_done": False,
}
def _get_agent_type_suffix(self):
return "ApiSchema"
def episode_done(self) -> bool:
# done if end of batch; should never end conversation otherwise
return self.epoch_done()
############# Single Goal + Api Schema Agent
class _EpisodeToSingleGoalProcessor(_TodDataDumpAgent):
"""
Iterate through all of the goals of a dataset, one by one.
Slightly different logic than the dump agent since how we count + setup examples for
an episode are different
Used as a mixin in the SingleGoal and SingleApiSchema agents below.
This class exposes a `filter_goals()` function that can be overridden by downstream agents.
"""
def __init__(self, opt: Opt, shared=None):
super().__init__(opt, shared)
self.epochDone = False
if shared is None:
self.episodes = self._setup_single_goal_episodes()
else:
# Handled fine in _TodDataDumpAgent
pass
self.max_episodes = len(self.episodes)
if opt.get("num_episodes", 0) > 0:
self.max_episodes = min(self.max_episodes, opt.get("num_episodes"))
if is_distributed(): # cause gotta manually handle
rank = get_rank()
chunk_size = ceil(self.max_episodes / num_workers())
self.max_episodes = min(self.max_episodes, (rank + 1) * chunk_size)
self._setup_next_episode()
def _setup_single_goal_episodes(self) -> List[tod.TodStructuredEpisode]:
"""
This function assumes that `self.setup_episodes()` has already been called
prior.
Based on the `__init__` order of this class, it should be done in
`TodStructuredDataParser` by this point.
"""
raw_episodes = self.episodes
result = []
for raw in raw_episodes:
for call in self.filter_goals(raw.goal_calls_machine):
schema = {}
for cand in raw.api_schemas_machine:
if (
cand[tod.STANDARD_API_NAME_SLOT]
== call[tod.STANDARD_API_NAME_SLOT]
):
schema = cand
result.append(
tod.TodStructuredEpisode(
domain=raw.domain,
api_schemas_machine=[schema],
goal_calls_machine=[call],
rounds=[],
)
)
return result
def filter_goals(self, goals):
"""
Some downstream agents may want to filter the goals.
Override this if so.
"""
return goals
class TodSingleGoalAgent(_EpisodeToSingleGoalProcessor, TodGoalAgent):
"""
Use as a mixin with classes that also extend + implement TodStructuredDataParser.
Takes goals of an episode and splits them into single versions. (That is, if an episode has 3 goal API calls, this makes it such that those 3 goal API calls become the grounding for 3 separate episodes.)
NOTE: If an API schema agent is used, this *must* be used with `TodSingleApiSchemaAgent` since it will be nonsensicle otherwise. Additionally, this agent will not function properly with UserUtt + SystemUttAndApiCall agent, since episodes will not align.
"""
def _get_agent_type_suffix(self):
return "SingleGoal"
class TodSingleApiSchemaAgent(_EpisodeToSingleGoalProcessor, TodApiSchemaAgent):
"""
Use as a mixin with classes that also extend + implement TodStructuredDataParser.
Takes the schema provided for an episode and filters these to match the single Goal provided by TodSingelGoalAgent.
NOTE: Must be used with TodSingleGoalAgent since nonsensicle otherwise. Additionally, this agent will not function properly with UserUtt + SystemUttAndApiCall agent, since episodes will not align.
"""
def _get_agent_type_suffix(self):
return "SingleApiSchema"
###### Agents used for calculating TOD World Metrics based on a dataset. See `tod_world_script` or `parlai/projects/tod_simulator/` for examples.
class TodUserUttAgent(_TodDataDumpAgent):
"""
Use as a mixin with classes that also extend + implement TodStructuredDataParser.
Agent provided as a convenience to run TOD World script code on a dataset without having to write too much code to do so. (Ex. for a quick way to dump data to a `.jsonl` file for generating data for ACUTE or to generate a report file of metrics from TodWorld script.)
This represents the "User" agent.
This class should only ever be used with the model-model chat world which will stop
upon seeing the '[DONE]' utterance; may go out of bounds otherwise.
"""
def act(self):
result = {
"text": f"{tod.STANDARD_USER_UTTERANCE}{self.episode.rounds[self.round_idx].user_utt}",
"id": self.id,
"domain": self.episode.domain,
"episode_done": False,
}
self.round_idx += 1
return result
def reset(self):
super().reset() # setup next episode
self.round_idx = 0
def _get_agent_type_suffix(self):
return "User"
def episode_done(self) -> bool:
return self.epoch_done() or self.round_idx >= len(self.episode.rounds)
class TodApiCallAndSysUttAgent(_TodDataDumpAgent):
"""
Use as a mixin with classes that also extend + implement TodStructuredDataParser.
Agent provided as a convenience to run TOD World script code on a dataset without having to write too much code to do so. (Ex. for a quick way to dump data to a `.jsonl` file for generating data for ACUTE or to generate a report file of metrics from TodWorld script.)
This class represents the System and will generate both API Calls and System Utterances.
This class should only ever be used with the model-model chat world which will stop
upon seeing the '[DONE]' utterance; may go out of bounds otherwise.
"""
def __init__(self, opt: Opt, shared=None):
# This class will have `act()` called on it twice per round — once for API call and once for NLG — so need to make sure we don't increment episode number (reset) prematurely; use the `already_reset` flag for this.
self.already_reset = False
self.api_call_turn = True
super().__init__(opt, shared)
def act(self):
self.already_reset = False
if tod.STANDARD_API_SCHEMAS in self.observation.get("text", ""):
return {
"text": tod.STANDARD_API_SCHEMAS, # Default convention for the first turn
"id": self.id,
"domain": self.episode.domain,
"episode_done": False,
}
if self.api_call_turn: # comes first, don't iterate round #
result = {
"text": f"{tod.STANDARD_CALL}{self.episode.rounds[self.round_idx].api_call_utt}",
"id": self.id,
"domain": self.episode.domain,
"episode_done": False,
}
self.api_call_turn = False
else:
result = {
"text": f"{tod.STANDARD_SYSTEM_UTTERANCE}{self.episode.rounds[self.round_idx].sys_utt}",
"id": self.id,
"domain": self.episode.domain,
"episode_done": False,
}
self.round_idx += 1
self.api_call_turn = True
return result
def reset(self):
if not self.already_reset:
super().reset() # setup next episode
self.api_call_turn = True
self.already_reset = True
def _get_agent_type_suffix(self):
return "System"
def episode_done(self) -> bool:
return self.epoch_done() or self.round_idx >= len(self.episode.rounds)
class TodApiResponseAgent(_TodDataDumpAgent):
"""
Use as a mixin with classes that also extend + implement TodStructuredDataParser.
Agent provided as a convenience to run TOD World script code on a dataset without having to write too much code to do so. (Ex. for a quick way to dump data to a `.jsonl` file for generating data for ACUTE or to generate a report file of metrics from TodWorld script.)
This class represents the Api Response mechanism.
This class should only ever be used with the model-model chat world which will stop
upon seeing the '[DONE]' utterance; may go out of bounds otherwise.
"""
def act(self):
if tod.STANDARD_API_SCHEMAS in self.observation.get("text", ""):
return {
"text": tod.STANDARD_API_SCHEMAS, # Default convention
"id": self.id,
"domain": self.episode.domain,
"episode_done": False,
}
result = {
"text": f"{tod.STANDARD_RESP}{self.episode.rounds[self.round_idx].api_resp_utt}",
"id": self.id,
"domain": self.episode.domain,
"episode_done": False,
}
self.round_idx += 1
return result
def reset(self):
super().reset() # setup next episode
self.round_idx = 0
def _get_agent_type_suffix(self):
return "ApiResponse"
def episode_done(self) -> bool:
return self.epoch_done() or self.round_idx >= len(self.episode.rounds)
###### Standalone API agent
class StandaloneApiAgent(Agent):
"""
Trainable agent that saves API calls and responses.
Use `TodStandaloneApiTeacher` to train this class. For example for a MultiWoz V2.2
standalone API, use ``` parlai train -t multiwoz_v22:StandaloneApiTeacher -m
parlai.core.tod.tod_agents:StandaloneApiAgent -eps 4 -mf output ``` to generate the
`.pickle` file to use.
"""
EMPTY_RESP = {
"text": tod.STANDARD_RESP,
"id": "StandaloneApiAgent",
"episode_done": False,
}
@classmethod
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
group = parser.add_argument_group("TOD Standalone API args")
group.add_argument(
"--exact-api-call",
type=bool,
default=True,
help="Validation-time flag. If true, will return '' if exact api call values not found. If false, will pick response from the same intent with similar api parameters (assuming intent is the same when available)",
)
group.add_argument(
"--fail-hard",
type=bool,
default=False,
help="Aids in deugging. Will throw exception if API call not found and '--exact-api-call' is set.",
)
group.add_argument(
"--standalone-api-file",
type=str,
default=None,
help="Path to file holding `.pickle` of standalone api for validation (will intelligently strip if suffix included). If not set, assumes the `model_file` argument will contain the `.pickle` file. ",
)
return parser
def __init__(self, opt, shared=None):
super().__init__(opt, shared)
self.id = "StandaloneApiAgent"
file_key = "model_file"
if self.opt["standalone_api_file"] is not None:
file_key = "standalone_api_file"
self.path_base = self.opt[file_key].replace(".pickle", "")
self.db_path = self.path_base + ".pickle"
self.exact_api_call = self.opt["exact_api_call"]
try:
with (open(self.db_path, "rb")) as openfile:
self.data = pickle.load(openfile)
self.training = True
print("Loaded Standalone API data successfully")
if self.exact_api_call != self.data.get("exact_api_call", True):
raise RuntimeError(
f"Standalone API .pickle file generated with `exact_api_call` of {self.data.get('exact_api_call', False)} but StandaloneApiAgent sets it to {self.exact_api_call}"
)
except Exception:
print(f"No file at {self.db_path}; ASSUMING WE ARE TRAINING")
self.data = {}
self.data["exact_api_call"] = self.exact_api_call
self.training = True
def _maybe_filter_prefix(self, text, prefix):
if prefix in text:
return text[len(prefix) :].strip()
return text.strip()
def act(self):
if not self.observation["text"].startswith(tod.STANDARD_CALL):
return self.EMPTY_RESP
call_text_raw = self.observation["text"]
# decode then reencode the API call so that we get the API calls in a consistent order
call_text = SerializationHelpers.api_dict_to_str(
SerializationHelpers.str_to_api_dict(
call_text_raw[len(tod.STANDARD_CALL) :]
)
)
if "labels" in self.observation:
return self._do_train(call_text)
return self._do_fetch(call_text)
def _do_train(self, call_text):
assert self.training is True
self.data[call_text] = self.observation["labels"][0]
return self.EMPTY_RESP
def _do_fetch(self, call_text):
if self.exact_api_call:
if self.opt.get("fail_hard", False):
resp = self.data[call_text]
else:
resp = self.data.get(call_text, tod.STANDARD_RESP)
return {"text": resp, "id": self.id, "episode_done": False}
# Not exact case
best_key = difflib.get_close_matches(call_text, self.data.keys(), 1)
if len(best_key) == 0:
return self.EMPTY_RESP
return {
"text": self.data.get(best_key[0], tod.STANDARD_RESP),
"id": self.id,
"episode_done": False,
}
def shutdown(self):
if self.training:
with (open(self.db_path, "wb")) as openfile:
pickle.dump(self.data, openfile)
print(f"Dumped output to {self.db_path}")
with open(self.path_base + ".opt", "w") as f:
json.dump(self.opt, f)
######### Empty agents
class EmptyApiSchemaAgent(Agent):
def __init__(self, opt, shared=None):
super().__init__(opt)
self.id = "EmptyApiSchemaAgent"
def act(self):
msg = {
"id": self.getID(),
"text": tod.STANDARD_API_SCHEMAS,
"episode_done": False,
}
return Message(msg)
class EmptyGoalAgent(Agent):
def __init__(self, opt, shared=None):
super().__init__(opt)
self.id = "EmptyGoalAgent"
def act(self):
msg = {"id": self.getID(), "text": tod.STANDARD_GOAL, "episode_done": False}
return Message(msg)
############# Teachers
class TodSystemTeacher(TodStructuredDataParser, DialogTeacher):
"""
Use as a mixin with classes that also extend + implement TodStructuredDataParser.
TOD agent teacher which produces both API calls and NLG responses.
First turn is API Schema grounding, which may be a an empty schema.
Subsequent turns alternate between
1. User utterance -> API Call
2. API Response -> System Utterance
"""
@classmethod
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
parser = super().add_cmdline_args(parser, partial_opt)
parser.add_argument(
"--api-schemas",
type="bool",
default=False,
help="Preempt first turn with intents + required/optional parameters as key/value for given domain",
)
parser.add_argument(
"--api-jga-record",
type=bool,
default=True,
help="Breaks out jga into individual api schemas",
)
parser.add_argument(
"--domain-jga-record",
type=bool,
default=False,
help="Breaks out jga into individual domains",
)
parser.add_argument(
"--domain-nlg-record",
type=bool,
default=False,
help="Breaks out nlg into individual domains",
)
return parser
def __init__(self, opt, shared=None):
super().__init__(opt, shared)
self._num_examples_cache = sum([len(x.rounds) * 2 + 1 for x in self.episodes])
self._num_episodes_cache = len(self.episodes)
def custom_evaluation(
self, teacher_action: Message, labels, model_response: Message
):
resp = model_response.get("text")
if not resp:
return
if teacher_action["type"] == tod.STANDARD_CALL:
if resp.startswith(tod.STANDARD_CALL):
resp = resp[len(tod.STANDARD_CALL) :]
predicted = SerializationHelpers.str_to_api_dict(resp)
domains = (
[teacher_action["domain"]] if self.opt["domain_jga_record"] else []
)
metrics = SlotMetrics(
teacher_slots=teacher_action["slots"],
predicted_slots=predicted,
prefixes=domains,
).report()
for key, value in metrics.items():
self.metrics.add(key, value)
if self.opt["api_jga_record"] and len(teacher_action["slots"]) > 0:
teacher = teacher_action["slots"]
slots = list(teacher.keys())
slots.remove(tod.STANDARD_API_NAME_SLOT)
api_here = (
"api-"
+ teacher[tod.STANDARD_API_NAME_SLOT]
+ "--"
+ "-".join(slots)
)
self.metrics.add(f"{api_here}/jga", AverageMetric(teacher == predicted))
elif teacher_action["type"] == tod.STANDARD_SYSTEM_UTTERANCE:
domains = (
[teacher_action["domain"]] if self.opt["domain_nlg_record"] else []
)
metrics = NlgMetrics(guess=resp, labels=labels, prefixes=domains).report()
for key, value in metrics.items():
self.metrics.add(key, value)
def setup_data(self, fold):
for episode in self.generate_episodes():
if self.opt.get("api_schemas"):
schemas = episode.api_schemas_utt
else:
schemas = ""
yield {
"text": f"{tod.STANDARD_API_SCHEMAS}{schemas}",
"label": f"{tod.STANDARD_API_SCHEMAS}",
"domain": episode.domain,
"type": tod.STANDARD_API_SCHEMAS,
"slots": {},
}, True
for r in episode.rounds:
yield {
"text": f"{tod.STANDARD_USER_UTTERANCE}{r.user_utt}",
"label": f"{tod.STANDARD_CALL}{r.api_call_utt}",
"domain": episode.domain,
"type": tod.STANDARD_CALL,
"slots": r.api_call_machine,
}, False
yield {
"text": f"{tod.STANDARD_RESP}{r.api_resp_utt}",
"label": f"{tod.STANDARD_SYSTEM_UTTERANCE}{r.sys_utt}",
"domain": episode.domain,
"slots": r.api_resp_machine,
"type": tod.STANDARD_SYSTEM_UTTERANCE,
}, False
def _get_agent_type_suffix(self):
return "SystemTeacher"
class TodUserSimulatorTeacher(TodStructuredDataParser, DialogTeacher):
"""
Use as a mixin with classes that also extend + implement TodStructuredDataParser.
Teacher that has `Goal->User Utterance` for its first turn, then `System
Utterance->User Utterance` for all subsequent turns.
"""
def __init__(self, opt, shared=None):
super().__init__(opt, shared)
# Manually set number of examples + number of episodes
self._num_examples_cache = sum([len(x.rounds) for x in self.episodes])
self._num_episodes_cache = len(self.episodes)
@classmethod
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
parser = super().add_cmdline_args(parser, partial_opt)
parser.add_argument(
"--api-schemas",
type="bool",
default=False,
help="Preempt first turn with intents + required/optional parameters as key/value for given domain. NOOP for this teacher, but including to make sweeps easier",
)
return parser
def setup_data(self, fold):
for episode in self.generate_episodes():
if len(episode.rounds) < 1:
continue
yield {
"text": f"{tod.STANDARD_GOAL}{episode.goal_calls_utt}",
"label": f"{tod.STANDARD_USER_UTTERANCE}{episode.rounds[0].user_utt}",
"domain": episode.domain,
"type": tod.STANDARD_USER_UTTERANCE,
}, True
for i, r in enumerate(episode.rounds):
if i == len(episode.rounds) - 1:
continue
yield {
"text": f"{tod.STANDARD_SYSTEM_UTTERANCE}{r.sys_utt}",
"label": f"{tod.STANDARD_USER_UTTERANCE}{episode.rounds[i+1].user_utt}",
"domain": episode.domain,
"type": tod.STANDARD_USER_UTTERANCE,
"slots": {}, # slots in agent/user turns are meaningless
}, False
def custom_evaluation(
self, teacher_action: Message, labels, model_response: Message
):
resp = model_response.get("text")
if not resp:
return
if teacher_action["type"] == tod.STANDARD_RESP:
if resp.startswith(tod.STANDARD_RESP):
resp = resp[len(tod.STANDARD_RESP) :]
predicted = SerializationHelpers.str_to_api_dict(resp)
metrics = SlotMetrics(teacher_action["slots"], predicted).report()
for key, value in metrics.items():
self.metrics.add(key, value)
elif teacher_action["type"] == tod.STANDARD_USER_UTTERANCE:
metrics = NlgMetrics(resp, labels).report()
for key, value in metrics.items():
self.metrics.add(key, value)
def _get_agent_type_suffix(self):
return "UserSimulatorTeacher"
class TodStandaloneApiTeacher(TodStructuredDataParser, DialogTeacher):
"""
Use as a mixin with classes that also extend + implement TodStructuredDataParser.
Use this to generate a database for `StandaloneApiAgent`.
Set this as the teacher with `StandaloneApiAgent` as the agent. Ex for a MultiWoz
V2.2 standalone API, use ``` parlai train -t multiwoz_v22:StandaloneApiTeacher -m
parlai.core.tod.tod_agents:StandaloneApiAgent -eps 4 -mf output ```
"""
def setup_data(self, fold):
# As a default, just put everything in
for fold_overwrite in ["train", "valid", "test"]:
for episode in self.setup_episodes(fold_overwrite):
first = True
for r in episode.rounds:
if len(r.api_call_machine) > 0:
yield {
"text": f"{tod.STANDARD_CALL}{r.api_call_utt}",
"label": f"{tod.STANDARD_RESP}{r.api_resp_utt}",
"id": self.id,
"domain": episode.domain,
}, first
first = False
def _get_agent_type_suffix(self):
return "StandaloneApiTeacher"
|
9cf4b8ce075304c93606d97440c90169241529b5
|
2c296f525308ee2f7e1205b6b7eb392c53bbc14d
|
/pyamg/vis/__init__.py
|
8380b74b79f33b7bac4a41c79933e0dcba236bcc
|
[
"MIT"
] |
permissive
|
pyamg/pyamg
|
939f2b73153518f482401c1b2363e1abed880b4d
|
4f604ef7d7765dc8c20f643650bf60bcc3c7207c
|
refs/heads/main
| 2023-09-01T07:10:42.995125
| 2023-07-18T13:35:08
| 2023-07-18T13:47:34
| 10,866,320
| 466
| 125
|
MIT
| 2023-07-18T13:47:35
| 2013-06-22T14:24:24
|
Python
|
UTF-8
|
Python
| false
| false
| 489
|
py
|
__init__.py
|
"""Basic vtk support.
The vis module provides support for generic vtk file writing, basic mesh
writing (unstructured triangular and tetrahedral meshes), visualization of
aggregate groupings in 2d and in 3d, and C/F splittings.
"""
from . import vtk_writer
from . import vis_coarse
from .vtk_writer import write_vtu, write_basic_mesh
from .vis_coarse import vis_aggregate_groups
__all__ = ['vtk_writer', 'vis_coarse',
'vis_aggregate_groups', 'write_vtu', 'write_basic_mesh']
|
acf3b0c60d92e0ae89d63f0c695a29c09f528846
|
fa1ad2e2ac7e376fc7cb3b3a6e1bb88eed3e80be
|
/govern/data-meta/OpenMetadata/ingestion/src/metadata/ingestion/source/database/mssql/query_parser.py
|
9873ac1ad434484a3a0c8ec26bc3864ff1ad6245
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
alldatacenter/alldata
|
7bc7713c9f1d56ad6b8e59ea03206d1073b7e047
|
8d5f9a2d49ab8f9e85ccf058cb02c2fda287afc6
|
refs/heads/master
| 2023-08-05T07:32:25.442740
| 2023-08-03T13:17:24
| 2023-08-03T13:17:24
| 213,321,771
| 774
| 250
|
Apache-2.0
| 2023-09-06T17:35:32
| 2019-10-07T07:36:18
| null |
UTF-8
|
Python
| false
| false
| 1,742
|
py
|
query_parser.py
|
# Copyright 2021 Collate
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
MSSQL usage module
"""
from abc import ABC
from metadata.generated.schema.entity.services.connections.database.mssqlConnection import (
MssqlConnection,
)
from metadata.generated.schema.entity.services.connections.metadata.openMetadataConnection import (
OpenMetadataConnection,
)
from metadata.generated.schema.metadataIngestion.workflow import (
Source as WorkflowSource,
)
from metadata.ingestion.api.source import InvalidSourceException
from metadata.ingestion.source.database.query_parser_source import QueryParserSource
class MssqlQueryParserSource(QueryParserSource, ABC):
"""
MSSQL base for Usage and Lineage
"""
filters: str
@classmethod
def create(cls, config_dict, metadata_config: OpenMetadataConnection):
"""Create class instance"""
config: WorkflowSource = WorkflowSource.parse_obj(config_dict)
connection: MssqlConnection = config.serviceConnection.__root__.config
if not isinstance(connection, MssqlConnection):
raise InvalidSourceException(
f"Expected MssqlConnection, but got {connection}"
)
return cls(config, metadata_config)
|
45498aa79475914f7fe736110c28dae5cceb854f
|
a3d6556180e74af7b555f8d47d3fea55b94bcbda
|
/tools/site_compare/site_compare.py
|
079536f450b95f9119ebb17e449661f89fb27bf9
|
[
"BSD-3-Clause"
] |
permissive
|
chromium/chromium
|
aaa9eda10115b50b0616d2f1aed5ef35d1d779d6
|
a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c
|
refs/heads/main
| 2023-08-24T00:35:12.585945
| 2023-08-23T22:01:11
| 2023-08-23T22:01:11
| 120,360,765
| 17,408
| 7,102
|
BSD-3-Clause
| 2023-09-10T23:44:27
| 2018-02-05T20:55:32
| null |
UTF-8
|
Python
| false
| false
| 6,520
|
py
|
site_compare.py
|
#!/usr/bin/env python
# Copyright 2011 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""SiteCompare component to handle bulk scrapes.
Invokes a list of browsers and sends them to a list of URLs,
saving the rendered results to a specified directory, then
performs comparison operations on the resulting bitmaps and
saves the results
"""
from __future__ import print_function
# This line is necessary to work around a QEMU bug
import _imaging
import os # Functions for walking the directory tree
import types # Runtime type-checking
import command_line # command-line parsing
import drivers # Functions for driving keyboard/mouse/windows, OS-specific
import operators # Functions that, given two bitmaps as input, produce
# output depending on the performance of an operation
import scrapers # Functions that know how to capture a render from
# particular browsers
import commands.compare2 # compare one page in two versions of same browser
import commands.maskmaker # generate a mask based on repeated scrapes
import commands.measure # measure length of time a page takes to load
import commands.scrape # scrape a URL or series of URLs to a bitmap
# The timeload command is obsolete (too flaky); it may be reinstated
# later but for now it's been superceded by "measure"
# import commands.timeload # measure length of time a page takes to load
def Scrape(browsers, urls, window_size=(1024, 768),
window_pos=(0, 0), timeout=20, save_path=None, **kwargs):
"""Invoke one or more browsers over one or more URLs, scraping renders.
Args:
browsers: browsers to invoke with optional version strings
urls: URLs to visit
window_size: size of the browser window to display
window_pos: location of browser window
timeout: time (in seconds) to wait for page to load
save_path: root of save path, automatically appended with browser and
version
kwargs: miscellaneous keyword args, passed to scraper
Returns:
None
@TODO(jhaas): more parameters, or perhaps an indefinite dictionary
parameter, for things like length of time to wait for timeout, speed
of mouse clicks, etc. Possibly on a per-browser, per-URL, or
per-browser-per-URL basis
"""
if type(browsers) in types.StringTypes: browsers = [browsers]
if save_path is None:
# default save path is "scrapes" off the current root
save_path = os.path.join(os.path.split(__file__)[0], "Scrapes")
for browser in browsers:
# Browsers should be tuples of (browser, version)
if type(browser) in types.StringTypes: browser = (browser, None)
scraper = scrapers.GetScraper(browser)
full_path = os.path.join(save_path, browser[0], scraper.version)
drivers.windowing.PreparePath(full_path)
scraper.Scrape(urls, full_path, window_size, window_pos, timeout, kwargs)
def Compare(base, compare, ops, root_path=None, out_path=None):
"""Compares a series of scrapes using a series of operators.
Args:
base: (browser, version) tuple of version to consider the baseline
compare: (browser, version) tuple of version to compare to
ops: list of operators plus operator arguments
root_path: root of the scrapes
out_path: place to put any output from the operators
Returns:
None
@TODO(jhaas): this method will likely change, to provide a robust and
well-defined way of chaining operators, applying operators conditionally,
and full-featured scripting of the operator chain. There also needs
to be better definition of the output; right now it's to stdout and
a log.txt file, with operator-dependent images saved for error output
"""
if root_path is None:
# default save path is "scrapes" off the current root
root_path = os.path.join(os.path.split(__file__)[0], "Scrapes")
if out_path is None:
out_path = os.path.join(os.path.split(__file__)[0], "Compares")
if type(base) in types.StringTypes: base = (base, None)
if type(compare) in types.StringTypes: compare = (compare, None)
if type(ops) in types.StringTypes: ops = [ops]
base_dir = os.path.join(root_path, base[0])
compare_dir = os.path.join(root_path, compare[0])
if base[1] is None:
# base defaults to earliest capture
base = (base[0], max(os.listdir(base_dir)))
if compare[1] is None:
# compare defaults to latest capture
compare = (compare[0], min(os.listdir(compare_dir)))
out_path = os.path.join(out_path, base[0], base[1], compare[0], compare[1])
drivers.windowing.PreparePath(out_path)
# TODO(jhaas): right now we're just dumping output to a log file
# (and the console), which works as far as it goes but isn't nearly
# robust enough. Change this after deciding exactly what we want to
# change it to.
out_file = open(os.path.join(out_path, "log.txt"), "w")
description_string = ("Comparing %s %s to %s %s" %
(base[0], base[1], compare[0], compare[1]))
out_file.write(description_string)
print(description_string)
base_dir = os.path.join(base_dir, base[1])
compare_dir = os.path.join(compare_dir, compare[1])
for filename in os.listdir(base_dir):
out_file.write("%s: " % filename)
if not os.path.isfile(os.path.join(compare_dir, filename)):
out_file.write("Does not exist in target directory\n")
print("File %s does not exist in target directory" % filename)
continue
base_filename = os.path.join(base_dir, filename)
compare_filename = os.path.join(compare_dir, filename)
for op in ops:
if type(op) in types.StringTypes: op = (op, None)
module = operators.GetOperator(op[0])
ret = module.Compare(base_filename, compare_filename)
if ret is None:
print("%s: OK" % (filename,))
out_file.write("OK\n")
else:
print("%s: %s" % (filename, ret[0]))
out_file.write("%s\n" % (ret[0]))
ret[1].save(os.path.join(out_path, filename))
out_file.close()
def main():
"""Main executable. Parse the command line and invoke the command."""
cmdline = command_line.CommandLine()
# The below two commands are currently unstable so have been disabled
# commands.compare2.CreateCommand(cmdline)
# commands.maskmaker.CreateCommand(cmdline)
commands.measure.CreateCommand(cmdline)
commands.scrape.CreateCommand(cmdline)
cmdline.ParseCommandLine()
return 0
if __name__ == "__main__":
sys.exit(main())
|
586014a671afc6f2d69bcb5dded1643930655eff
|
fbbe424559f64e9a94116a07eaaa555a01b0a7bb
|
/Tensorflow/source/tensorflow/contrib/session_bundle/gc.py
|
249c23c88f3043403e322b73b6c9df97e932a92a
|
[
"MIT"
] |
permissive
|
ryfeus/lambda-packs
|
6544adb4dec19b8e71d75c24d8ed789b785b0369
|
cabf6e4f1970dc14302f87414f170de19944bac2
|
refs/heads/master
| 2022-12-07T16:18:52.475504
| 2022-11-29T13:35:35
| 2022-11-29T13:35:35
| 71,386,735
| 1,283
| 263
|
MIT
| 2022-11-26T05:02:14
| 2016-10-19T18:22:39
|
Python
|
UTF-8
|
Python
| false
| false
| 6,618
|
py
|
gc.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""System for specifying garbage collection (GC) of path based data.
This framework allows for GC of data specified by path names, for example files
on disk. gc.Path objects each represent a single item stored at a path and may
be a base directory,
/tmp/exports/0/...
/tmp/exports/1/...
...
or a fully qualified file,
/tmp/train-1.ckpt
/tmp/train-2.ckpt
...
A gc filter function takes and returns a list of gc.Path items. Filter
functions are responsible for selecting Path items for preservation or deletion.
Note that functions should always return a sorted list.
For example,
base_dir = "/tmp"
# create the directories
for e in xrange(10):
os.mkdir("%s/%d" % (base_dir, e), 0o755)
# create a simple parser that pulls the export_version from the directory
def parser(path):
match = re.match("^" + base_dir + "/(\\d+)$", path.path)
if not match:
return None
return path._replace(export_version=int(match.group(1)))
path_list = gc.get_paths("/tmp", parser) # contains all ten Paths
every_fifth = gc.mod_export_version(5)
print(every_fifth(path_list)) # shows ["/tmp/0", "/tmp/5"]
largest_three = gc.largest_export_versions(3)
print(largest_three(all_paths)) # shows ["/tmp/7", "/tmp/8", "/tmp/9"]
both = gc.union(every_fifth, largest_three)
print(both(all_paths)) # shows ["/tmp/0", "/tmp/5",
# "/tmp/7", "/tmp/8", "/tmp/9"]
# delete everything not in 'both'
to_delete = gc.negation(both)
for p in to_delete(all_paths):
gfile.DeleteRecursively(p.path) # deletes: "/tmp/1", "/tmp/2",
# "/tmp/3", "/tmp/4", "/tmp/6",
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import heapq
import math
import os
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
Path = collections.namedtuple('Path', 'path export_version')
@deprecated('2017-06-30',
'No longer supported. Switch to SavedModel immediately.')
def largest_export_versions(n):
"""Creates a filter that keeps the largest n export versions.
Args:
n: number of versions to keep.
Returns:
A filter function that keeps the n largest paths.
"""
def keep(paths):
heap = []
for idx, path in enumerate(paths):
if path.export_version is not None:
heapq.heappush(heap, (path.export_version, idx))
keepers = [paths[i] for _, i in heapq.nlargest(n, heap)]
return sorted(keepers)
return keep
@deprecated('2017-06-30',
'No longer supported. Switch to SavedModel immediately.')
def one_of_every_n_export_versions(n):
r"""Creates a filter that keeps one of every n export versions.
Args:
n: interval size.
Returns:
A filter function that keeps exactly one path from each interval
[0, n], (n, 2n], (2n, 3n], etc... If more than one path exists in an
interval the largest is kept.
"""
def keep(paths):
keeper_map = {} # map from interval to largest path seen in that interval
for p in paths:
if p.export_version is None:
# Skip missing export_versions.
continue
# Find the interval (with a special case to map export_version = 0 to
# interval 0.
interval = math.floor(
(p.export_version - 1) / n) if p.export_version else 0
existing = keeper_map.get(interval, None)
if (not existing) or (existing.export_version < p.export_version):
keeper_map[interval] = p
return sorted(keeper_map.values())
return keep
@deprecated('2017-06-30',
'No longer supported. Switch to SavedModel immediately.')
def mod_export_version(n):
"""Creates a filter that keeps every export that is a multiple of n.
Args:
n: step size.
Returns:
A filter function that keeps paths where export_version % n == 0.
"""
def keep(paths):
keepers = []
for p in paths:
if p.export_version % n == 0:
keepers.append(p)
return sorted(keepers)
return keep
@deprecated('2017-06-30',
'No longer supported. Switch to SavedModel immediately.')
def union(lf, rf):
"""Creates a filter that keeps the union of two filters.
Args:
lf: first filter
rf: second filter
Returns:
A filter function that keeps the n largest paths.
"""
def keep(paths):
l = set(lf(paths))
r = set(rf(paths))
return sorted(list(l|r))
return keep
@deprecated('2017-06-30',
'No longer supported. Switch to SavedModel immediately.')
def negation(f):
"""Negate a filter.
Args:
f: filter function to invert
Returns:
A filter function that returns the negation of f.
"""
def keep(paths):
l = set(paths)
r = set(f(paths))
return sorted(list(l-r))
return keep
@deprecated('2017-06-30',
'No longer supported. Switch to SavedModel immediately.')
def get_paths(base_dir, parser):
"""Gets a list of Paths in a given directory.
Args:
base_dir: directory.
parser: a function which gets the raw Path and can augment it with
information such as the export_version, or ignore the path by returning
None. An example parser may extract the export version from a path
such as "/tmp/exports/100" an another may extract from a full file
name such as "/tmp/checkpoint-99.out".
Returns:
A list of Paths contained in the base directory with the parsing function
applied.
By default the following fields are populated,
- Path.path
The parsing function is responsible for populating,
- Path.export_version
"""
raw_paths = gfile.ListDirectory(base_dir)
paths = []
for r in raw_paths:
p = parser(Path(os.path.join(base_dir, r), None))
if p:
paths.append(p)
return sorted(paths)
|
4c9b44d9f54b3980e6161266843ee7c9141e674f
|
2ca86be72e5e1cb636183de97fb2760e8d1e749f
|
/tests/cases/rest_decorator_test.py
|
3a44d10add0cca8d5eacb45acee7f820727c9475
|
[
"Apache-2.0"
] |
permissive
|
girder/girder
|
2f1acef3ecb8c2b00eb589c0844fff4cef8b3e3c
|
d47bb163b6b539a55232d38fe537bb6d0531062a
|
refs/heads/master
| 2023-08-31T20:30:49.441037
| 2023-08-30T19:40:57
| 2023-08-30T19:40:57
| 11,854,950
| 422
| 188
|
Apache-2.0
| 2023-09-13T19:39:57
| 2013-08-02T23:55:54
|
Python
|
UTF-8
|
Python
| false
| false
| 3,224
|
py
|
rest_decorator_test.py
|
# -*- coding: utf-8 -*-
import json
import os
import requests
import unittest
from .. import base
from girder import config
from girder.api.rest import endpoint
from girder.models.user import User
def setUpModule():
os.environ['GIRDER_PORT'] = os.environ.get('GIRDER_TEST_PORT', '20200')
config.loadConfig()
base.startServer(mock=False)
def tearDownModule():
base.stopServer()
class TestEndpointDecoratorException(base.TestCase):
"""Tests the endpoint decorator exception handling."""
@endpoint
def pointlessEndpointAscii(self, path, params):
raise Exception('You did something wrong.')
@endpoint
def pointlessEndpointUnicode(self, path, params):
raise Exception(u'\u0400 cannot be converted to ascii.')
@endpoint
def pointlessEndpointBytes(self, path, params):
raise Exception('\x80\x80 cannot be converted to unicode or ascii.')
def testEndpointExceptionAscii(self):
resp = self.pointlessEndpointAscii('', {}).decode()
obj = json.loads(resp)
self.assertEqual(obj['type'], 'internal')
def testEndpointExceptionUnicode(self):
resp = self.pointlessEndpointUnicode('', {}).decode('utf8')
obj = json.loads(resp)
self.assertEqual(obj['type'], 'internal')
def testEndpointExceptionBytes(self):
resp = self.pointlessEndpointBytes('', {}).decode('utf8')
obj = json.loads(resp)
self.assertEqual(obj['type'], 'internal')
@unittest.skip('TODO: port plugin changes')
def testBoundHandlerDecorator(self):
user = User().createUser('tester', 'password', 'Test', 'User', 'test@girder.test')
resp = self.request('/collection/unbound/default/noargs', user=user, params={
'val': False
})
self.assertStatusOk(resp)
self.assertEqual(resp.json, True)
resp = self.request('/collection/unbound/default', user=user, params={
'val': False
})
self.assertStatusOk(resp)
self.assertEqual(resp.json, True)
resp = self.request('/collection/unbound/explicit', user=user)
self.assertStatusOk(resp)
self.assertEqual(resp.json, {
'name': 'collection',
'userLogin': 'tester'
})
@unittest.skip('TODO: port plugin changes')
def testRawResponse(self):
resp = self.request('/other/rawWithDecorator', isJson=False)
self.assertStatusOk(resp)
self.assertEqual(self.getBody(resp), 'this is a raw response')
resp = self.request('/other/rawInternal', isJson=False)
self.assertStatusOk(resp)
self.assertEqual(self.getBody(resp), 'this is also a raw response')
# We must make an actual request in order to test response encoding
# at the HTTP server layer.
resp = requests.get(
'http://127.0.0.1:%s/api/v1/other/rawReturningText' % os.environ['GIRDER_TEST_PORT'])
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.headers['Content-Type'], 'text/plain;charset=utf-8')
self.assertEqual(resp.content, b'this is not encoded \xf0\x9f\x91\x8d')
self.assertEqual(resp.text, u'this is not encoded \U0001F44D')
|
b1a61dcc2be7abd514adeeaf848df6973ed3d9e9
|
3ebbaf23f44312aa84f8eafd68db2650e9f4d741
|
/bot/plugins/pauseall.py
|
19d497002dba60a3229c2811da9ed33d82719e60
|
[] |
no_license
|
zeroone2numeral2/qbittorrent-bot
|
3f1e2bb91ff8e24002048338930c7e236211f457
|
81b97ec9b6ac024d629861b8bd638504d6975537
|
refs/heads/master
| 2022-12-20T20:04:36.936274
| 2022-07-04T12:55:07
| 2022-07-04T12:55:07
| 209,236,432
| 158
| 53
| null | 2022-12-13T09:14:31
| 2019-09-18T06:38:59
|
Python
|
UTF-8
|
Python
| false
| false
| 1,180
|
py
|
pauseall.py
|
import logging
# noinspection PyPackageRequirements
from telegram import Update, BotCommand
from telegram.ext import CommandHandler, CallbackContext
from bot.qbtinstance import qb
from bot.updater import updater
from utils import u
from utils import Permissions
logger = logging.getLogger(__name__)
@u.check_permissions(required_permission=Permissions.EDIT)
@u.failwithmessage
def on_resume_all_command(update: Update, context: CallbackContext):
logger.info('resume all command from %s', update.message.from_user.first_name)
qb.resume_all()
update.message.reply_text('Resumed all torrents')
@u.check_permissions(required_permission=Permissions.EDIT)
@u.failwithmessage
def on_pause_all_command(update: Update, context: CallbackContext):
logger.info('pause all command from %s', update.message.from_user.first_name)
qb.pause_all()
update.message.reply_text('Paused all torrents')
updater.add_handler(CommandHandler(['resumeall'], on_resume_all_command), bot_command=BotCommand("resumeall", "resume all torrents"))
updater.add_handler(CommandHandler(['pauseall'], on_pause_all_command), bot_command=BotCommand("pauseall", "pause all torrents"))
|
5db3c056342cd8910efb71b816d745c8c91b5b68
|
6ff85b80c6fe1b3ad5416a304b93551a5e80de10
|
/Python/String/Replace.py
|
29fead167105dd3f7f74f6c5b6f3bdb3ef40def2
|
[
"MIT"
] |
permissive
|
maniero/SOpt
|
c600cc2333e0a47ce013be3516bbb8080502ff2a
|
5d17e1a9cbf115eaea6d30af2079d0c92ffff7a3
|
refs/heads/master
| 2023-08-10T16:48:46.058739
| 2023-08-10T13:42:17
| 2023-08-10T13:42:17
| 78,631,930
| 1,002
| 136
|
MIT
| 2023-01-28T12:10:01
| 2017-01-11T11:19:24
|
C#
|
UTF-8
|
Python
| false
| false
| 89
|
py
|
Replace.py
|
print(texto.replace('caixa 1 ', 'pacote 2 '))
#https://pt.stackoverflow.com/q/83381/101
|
e18ba19907917b40869592a586f8478c0c2825f3
|
a3e2d421f94a8adf2c41ff1d093b5a06de1448d6
|
/product/runtime/src/test/python/imp_rename_two/mod_one.py
|
0068e4054a6a5783d0b0d1102b49f5a2b87f100f
|
[
"MIT"
] |
permissive
|
chaquo/chaquopy
|
09ef057015a756ce9b862732477b2549562720b4
|
e09bbe6ca5efd859d484b01e30131ccc944aa2b6
|
refs/heads/master
| 2023-08-31T22:09:22.230601
| 2023-08-31T13:07:57
| 2023-08-31T13:07:57
| 95,140,462
| 607
| 121
|
MIT
| 2023-09-13T19:17:29
| 2017-06-22T17:33:02
|
Python
|
UTF-8
|
Python
| false
| false
| 10
|
py
|
mod_one.py
|
ID = "21"
|
40f87678c84d3e738b6b52ce11252ef02a9fdbdf
|
6c287b46a483ede6fe079e40c6e823853f395a6a
|
/generator/test/runner.py
|
3c2a8231eddbd0056e6339ac8c75cd95f4d366cb
|
[
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] |
permissive
|
smartdevicelink/sdl_ios
|
50fded2de845b8406071d84225db80a0481961cb
|
2c3d561468ee67a4b298e4976a1ac2699853c11e
|
refs/heads/develop
| 2023-07-14T06:14:09.622236
| 2023-06-29T17:31:21
| 2023-06-29T17:31:21
| 24,940,582
| 188
| 129
|
BSD-3-Clause
| 2023-06-29T17:31:22
| 2014-10-08T12:57:22
|
Objective-C
|
UTF-8
|
Python
| false
| false
| 2,304
|
py
|
runner.py
|
"""
All tests
"""
import logging
import sys
from pathlib import Path
from unittest import TestLoader, TestSuite, TextTestRunner
ROOT = Path(__file__).absolute()
sys.path.append(ROOT.parents[1].joinpath('rpc_spec/InterfaceParser').as_posix())
sys.path.append(ROOT.parents[1].as_posix())
try:
from test_enums import TestEnumsProducer
from test_functions import TestFunctionsProducer
from test_structs import TestStructsProducer
from test_CodeFormatAndQuality import CodeFormatAndQuality
except ImportError as error:
print('{}.\nProbably you did not initialize submodule'.format(error))
sys.exit(1)
def config_logging():
"""
Configuring logging for all application
"""
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter(fmt='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
datefmt='%m-%d %H:%M'))
root_logger = logging.getLogger()
handler.setLevel(logging.INFO)
root_logger.setLevel(logging.INFO)
root_logger.addHandler(handler)
def main():
"""
Without performing Tests (simple instances initialization) there are following initial test code coverage:
generator/transformers/common_producer.py 21%
generator/transformers/enums_producer.py 24%
generator/transformers/functions_producer.py 18%
generator/transformers/structs_producer.py 32%
After performing Tests there are following initial test code coverage:
generator/transformers/common_producer.py 100%
generator/transformers/enums_producer.py 100%
generator/transformers/functions_producer.py 100%
generator/transformers/structs_producer.py 100%
"""
config_logging()
suite = TestSuite()
suite.addTests(TestLoader().loadTestsFromTestCase(TestFunctionsProducer))
suite.addTests(TestLoader().loadTestsFromTestCase(TestStructsProducer))
suite.addTests(TestLoader().loadTestsFromTestCase(TestEnumsProducer))
suite.addTests(TestLoader().loadTestsFromTestCase(CodeFormatAndQuality))
runner = TextTestRunner(verbosity=2)
test_results = runner.run(suite)
if test_results.wasSuccessful():
exit(0)
else:
exit(1)
if __name__ == '__main__':
"""
Entry point for parser and generator.
"""
main()
|
42cef4e8bc7e274a033e6bef9abe37e5c1513d47
|
914faa10e5423efc87d0079248b3eb7df72ed83e
|
/test/numbers/floats2.py
|
3f74ac5b52fd27ae993283464039d1efa3ba741c
|
[
"MIT"
] |
permissive
|
MagicStack/MagicPython
|
cf7b7ae8290b0e997adf6a197b2f5be300391a0a
|
7d0f2b22a5ad8fccbd7341bc7b7a715169283044
|
refs/heads/master
| 2023-08-26T04:16:54.672649
| 2022-10-18T07:43:20
| 2022-10-19T23:20:38
| 43,982,620
| 1,564
| 146
|
MIT
| 2023-02-23T19:40:57
| 2015-10-09T22:13:24
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 677
|
py
|
floats2.py
|
1_234.567_890
0.456_789
000.000_1
.012_34
1_234e5_000
1_234e-5_000
000_123e-000_5
1_234.567_8e+5_000
0.456_78e-5_000
1_234.567_890 : constant.numeric.float.python, source.python
0.456_789 : constant.numeric.float.python, source.python
000.000_1 : constant.numeric.float.python, source.python
.012_34 : constant.numeric.float.python, source.python
1_234e5_000 : constant.numeric.float.python, source.python
1_234e-5_000 : constant.numeric.float.python, source.python
000_123e-000_5 : constant.numeric.float.python, source.python
1_234.567_8e+5_000 : constant.numeric.float.python, source.python
0.456_78e-5_000 : constant.numeric.float.python, source.python
|
dd976657068787bf35ad912909066c47fb919c68
|
3f7da3c27ae3c32814e6dd7deefc0c48e68d3fba
|
/tests/test_libuv_api.py
|
3cda4bcd869467e8e29b1f58d2a16c529a763b52
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
MagicStack/uvloop
|
0aacdcc19a115dabd0860d032969edc0aa708be5
|
1dd40f17f3b0d37e3779b6ad5041bab335142337
|
refs/heads/master
| 2023-09-04T02:07:11.524338
| 2023-08-11T17:25:03
| 2023-08-11T17:25:03
| 45,766,550
| 10,234
| 650
|
Apache-2.0
| 2023-09-07T17:27:10
| 2015-11-08T04:14:54
|
Cython
|
UTF-8
|
Python
| false
| false
| 655
|
py
|
test_libuv_api.py
|
from uvloop import _testbase as tb
from uvloop.loop import libuv_get_loop_t_ptr, libuv_get_version
class Test_UV_libuv(tb.UVTestCase):
def test_libuv_get_loop_t_ptr(self):
loop = self.new_loop()
cap1 = libuv_get_loop_t_ptr(loop)
cap2 = libuv_get_loop_t_ptr(loop)
cap3 = libuv_get_loop_t_ptr(self.new_loop())
import pyximport
pyximport.install()
import cython_helper
self.assertTrue(cython_helper.capsule_equals(cap1, cap2))
self.assertFalse(cython_helper.capsule_equals(cap1, cap3))
def test_libuv_get_version(self):
self.assertGreater(libuv_get_version(), 0)
|
9dbc11ba4858fd10ed1b79dbd863d4d6e730d4af
|
4f93f5a27b6c872903b9acf8d20fb736716a26df
|
/preprocess/SelectHetSnp.py
|
d42a0663b33602351a4dcb8c9b57930c8f79f9a7
|
[
"BSD-3-Clause"
] |
permissive
|
HKU-BAL/Clair3
|
cf388ae3d0b0332eb8df125fb1e1a97120a90ed1
|
181f55d7a741855597d083baffc4551949d2837e
|
refs/heads/main
| 2023-07-28T05:34:32.917498
| 2023-07-20T02:58:19
| 2023-07-20T02:58:19
| 352,969,947
| 162
| 18
| null | 2023-06-01T14:15:31
| 2021-03-30T11:02:58
|
Python
|
UTF-8
|
Python
| false
| false
| 18,190
|
py
|
SelectHetSnp.py
|
import shlex
import os
import sys
from argparse import ArgumentParser, SUPPRESS
from collections import defaultdict
from shared.intervaltree.intervaltree import IntervalTree
import shared.param_f as param
from shared.utils import subprocess_popen
def FiterHeteSnpPhasing(args):
"""
Filter heterozygous snp variant for phasing, currently, we only filter snp variant with low quality socore as low
quality variant contains more false positive variant that would lead to a larger minimum error correction loss.
"""
qual_fn = args.qual_fn if args.qual_fn is not None else 'phase_qual'
vcf_fn = args.vcf_fn
var_pct_full = args.var_pct_full
contig_name = args.ctgName
split_folder = args.split_folder
variant_dict = defaultdict(str)
qual_set = defaultdict(int)
found_qual_cut_off = False
header = []
#try to find the global quality cut off:
f_qual = os.path.join(split_folder, qual_fn)
if os.path.exists(f_qual):
phase_qual_cut_off = float(open(f_qual, 'r').read().rstrip())
found_qual_cut_off = True
unzip_process = subprocess_popen(shlex.split("gzip -fdc %s" % (vcf_fn)))
for row in unzip_process.stdout:
row = row.rstrip()
if row[0] == '#':
header.append(row + '\n')
continue
columns = row.strip().split()
ctg_name = columns[0]
if contig_name and contig_name != ctg_name:
continue
pos = int(columns[1])
ref_base = columns[3]
alt_base = columns[4]
genotype = columns[9].split(':')[0].replace('|', '/')
if len(ref_base) == 1 and len(alt_base) == 1:
if genotype == '0/1' or genotype=='1/0':
variant_dict[pos] = row
qual = float(columns[5])
qual_set[pos] = qual
if found_qual_cut_off:
remove_low_qual_list = [[k,v] for k,v in qual_set.items() if v < phase_qual_cut_off ]
else:
remove_low_qual_list = sorted(qual_set.items(), key=lambda x: x[1])[:int(var_pct_full * len(qual_set))]
for pos, qual in remove_low_qual_list:
del variant_dict[pos]
print ('[INFO] Total heterozygous SNP positions selected: {}: {}'.format(contig_name, len(variant_dict)))
f = open(os.path.join(split_folder, '{}.vcf'.format(contig_name)), 'w')
f.write(''.join(header))
for key,row in sorted(variant_dict.items(), key=lambda x: x[0]):
f.write(row +'\n')
f.close()
def FiterHeteSnp_FP(args):
"""
Filter heterozygous snp variant for calling, this is a testing function to validate various proportion of phasing
effect on full alignment calling, currently for testing only.
"""
vcf_fn = args.vcf_fn
proportion = args.proportion
chr_prefix = args.chr_prefix
contig_name = args.ctgName
phasing_window_size = param.phasing_window_size
unzip_process = subprocess_popen(shlex.split("gzip -fdc %s" % (vcf_fn)))
split_bed_size = args.split_bed_size
split_folder = args.split_folder
output = []
snp = []
need_phasing_list = []
chr_prefix_length = len(chr_prefix)
variant_dict = defaultdict(str)
for row in unzip_process.stdout:
if row[0] == '#':
output.append(row.rstrip())
continue
columns = row.strip().split()
ctg_name = columns[0]
if contig_name and contig_name != ctg_name:
continue
pos = int(columns[1])
ref_base = columns[3]
alt_base = columns[4]
genotype = columns[9].split(':')[0].replace('|', '/')
qual = int(columns[5])
if len(ref_base) == 1 and len(alt_base) == 1:
if genotype == '0/1':
snp.append((qual, pos))
variant_dict[pos] = ref_base + '-' + alt_base
else:
need_phasing_list.append((qual, pos))
qual_list = sorted(snp, key=lambda x: x[0])
print('[INFO] Total hete snp variants:', len(qual_list))
cut_off_index = int(len(qual_list) * proportion)
hete_snp_row_list = [item[1] for item in qual_list[cut_off_index:]]
print ('[INFO] Total hete snp filter matches:', len(hete_snp_row_list))
qual_list = sorted(need_phasing_list, key=lambda x: -x[0])
cut_off_index = int(len(qual_list) * proportion)
need_phasing_row_list = sorted([item[1] for item in qual_list[cut_off_index:]])
print('[INFO] Total variants need to be phased:', len(need_phasing_row_list))
phasing_tree = IntervalTree()
for item_idx, item in enumerate(need_phasing_list):
pos = item[1]
start = pos - phasing_window_size
end = pos + phasing_window_size
phasing_tree.addi(start, end)
snp_tree = IntervalTree()
for item in hete_snp_row_list:
if len(phasing_tree.at(item)): snp_tree.addi(item, item + 1)
region_num = len(need_phasing_row_list) // split_bed_size + 1 if len(need_phasing_row_list) % split_bed_size else len(need_phasing_row_list) // split_bed_size
for idx in range(region_num):
split_output = need_phasing_row_list[idx * split_bed_size : (idx+1) * split_bed_size]
start, end = split_output[0] - phasing_window_size, split_output[-1] + phasing_window_size
overlaps = snp_tree.overlap(start, end)
snp_split_out = []
for overlap in overlaps:
snp_split_out.append((overlap[0], overlap[0] + 1, 1))
split_output = [(item - param.flankingBaseNum, item+1 + param.flankingBaseNum, 0) for item in split_output] # a windows region for create tensor
print (len(split_output), len(snp_split_out))
split_output += snp_split_out
split_output = sorted(split_output, key=lambda x: x[0])
with open(os.path.join(split_folder, 'split_{}.{}'.format(contig_name[chr_prefix_length:], idx)), 'w') as output_file:
output_file.write('\n'.join(['\t'.join([contig_name, str(x[0]-1), str(x[1]-1), str(x[2]), variant_dict[x[0]]]) for x in split_output]) + '\n') # bed format
def FiterHeteSnp(args):
"""
Filter heterozygous snp variant for training, if there are too many candidates for full alignment training, we
would select more in low quality variants, which is more challenging for pileup model to predict and using more
information will benefit calling those variants.
"""
vcf_fn = args.vcf_fn # true vcf var
alt_fn = args.alt_fn
var_pct_full = args.var_pct_full
ref_pct_full = args.ref_pct_full if args.ref_pct_full is not None else var_pct_full
chr_prefix = args.chr_prefix
contig_name = args.ctgName
phasing_window_size = param.phasing_window_size
chunk_id = args.chunk_id - 1 if args.chunk_id else None # 1-base to 0-base
DEPTH = args.depth
chunk_num = args.chunk_num
sample_name = args.sampleName
split_bed_size = args.split_bed_size
split_folder = args.split_folder
extend_bp = param.extend_bp
phasing_info_in_bam = args.phasing_info_in_bam
need_phasing_list = []
need_phasing_set = set()
ref_call_pos_list = []
chr_prefix_length = len(chr_prefix)
variant_dict = defaultdict(str)
realign_window_size = args.realign_window_size if args.realign_window_size is not None else param.flankingBaseNum
candidate_positions = set()
if vcf_fn and os.path.exists(vcf_fn):
unzip_process = subprocess_popen(shlex.split("gzip -fdc %s" % (vcf_fn)))
for row in unzip_process.stdout:
row =row.rstrip()
if row[0] == '#':
continue
columns = row.strip().split('\t')
ctg_name = columns[0]
if contig_name and contig_name != ctg_name:
continue
pos = int(columns[1])
ref_base = columns[3]
alt_base = columns[4]
genotype_info = columns[9].split(':')
genotype, phase_set = genotype_info[0], genotype_info[-1]
if '|' not in genotype: #unphasable
continue
variant_dict[pos] = '-'.join([ref_base, alt_base, ('1' if genotype == '0|1' else '2'), phase_set])
if alt_fn is not None:
# vcf format
unzip_process = subprocess_popen(shlex.split("gzip -fdc %s" % (alt_fn)))
for row in unzip_process.stdout:
if row[0] == '#':
continue
columns =row.rstrip().split('\t')
ctg_name = columns[0]
if contig_name and contig_name != ctg_name:
continue
pos = int(columns[1])
ref_base = columns[3]
alt_base = columns[4]
qual = float(columns[5])
candidate_positions.add(pos)
#ref_call was marked as '.' after v0.1-r5
if ref_base == alt_base or alt_base == ".":
ref_call_pos_list.append((pos,qual))
else:
need_phasing_list.append((pos,qual))
need_phasing_set.add(pos)
low_qual_ref_list = sorted(ref_call_pos_list, key=lambda x: x[1])[:int(ref_pct_full * len(ref_call_pos_list))]
low_qual_variant_list = sorted(need_phasing_list, key=lambda x: x[1])[:int(var_pct_full * len(need_phasing_list))]
#calling with phasing_info_in_bam: select low qual ref and low qual vairant for phasing calling
if phasing_info_in_bam:
print('[INFO] {} {} total low qual ref calling to process: {}'.format(sample_name, contig_name, len(low_qual_ref_list)))
print('[INFO] {} {} total low qual variant calling to process: {}'.format(sample_name, contig_name, len(low_qual_variant_list)))
need_phasing_row_list = set([item[0] for item in low_qual_ref_list] + [item[0] for item in low_qual_variant_list])
need_phasing_row_list = sorted(list(need_phasing_row_list))
if chunk_num:
all_candidate_size = len(need_phasing_row_list)
chunk_size = all_candidate_size // chunk_num + 1 if all_candidate_size % chunk_num else all_candidate_size // chunk_num
for chunk_idx in range(chunk_num):
start_pos = chunk_idx * chunk_size
end_pos = min(start_pos + chunk_size, all_candidate_size)
split_output = need_phasing_row_list[start_pos:end_pos]
split_output = [(item - realign_window_size, item + realign_window_size + 2) for item in
split_output] # a windows region for create tensor # samtools mpileup not include last position
split_output = sorted(split_output, key=lambda x: x[0])
with open(os.path.join(split_folder,
'{}_{}_{}_{}'.format(sample_name, DEPTH, contig_name[chr_prefix_length:], chunk_idx+1)), # zero-base to one-base
'w') as output_file:
output_file.write('\n'.join(
['\t'.join([contig_name, str(x[0] - 1), str(x[1] - 1), ]) for x in
split_output]) + '\n') # bed format
return
region_num = len(need_phasing_row_list) // split_bed_size + 1 if len(
need_phasing_row_list) % split_bed_size else len(need_phasing_row_list) // split_bed_size
for idx in range(region_num):
split_output = need_phasing_row_list[idx * split_bed_size: (idx + 1) * split_bed_size]
split_output = [(item - realign_window_size, item + realign_window_size + 2) for item in
split_output] # a windows region for create tensor # samtools mpileup not include last position
split_output = sorted(split_output, key=lambda x: x[0])
with open(os.path.join(split_folder, '{}.{}_{}'.format(contig_name[chr_prefix_length:], split_output[0][0], split_output[-1][1])),
'w') as output_file:
output_file.write('\n'.join(
['\t'.join([contig_name, str(x[0] - 1), str(x[1] - 1),]) for x in
split_output]) + '\n') # bed format
return
for pos, qual in low_qual_ref_list:
need_phasing_set.add(pos)
# train or call in all_pos
elif args.all_alt_fn is not None:
unzip_process = subprocess_popen(shlex.split("gzip -fdc %s" % (args.all_alt_fn)))
for row in unzip_process.stdout:
if row[0] == '#':
continue
columns = row.rstrip().split('\t')
ctg_name, pos = columns[0].split()
pos = int(pos)
if contig_name and contig_name != ctg_name:
continue
need_phasing_set.add(pos)
need_phasing_row_list = sorted(list(set(need_phasing_set)))
snp_tree = IntervalTree()
hete_snp_row_list = sorted(list(set(variant_dict.keys()).intersection(set(need_phasing_row_list))))
print('[INFO] Total hete snp with reads support in {}: '.format(contig_name), len(hete_snp_row_list))
print('[INFO] Total candidates need to be processed in {}: '.format(contig_name), len(need_phasing_row_list))
for item in hete_snp_row_list:
snp_tree.addi(item, item + 1)
region_num = len(need_phasing_row_list) // split_bed_size + 1 if len(need_phasing_row_list) % split_bed_size else len(need_phasing_row_list) // split_bed_size
for idx in range(region_num):
split_output = need_phasing_row_list[idx * split_bed_size : (idx+1) * split_bed_size]
start = split_output[0]
end = split_output[-1]
extend_start, extend_end = start - phasing_window_size, end + phasing_window_size
overlaps = snp_tree.overlap(extend_start, extend_end)
snp_split_out = []
for overlap in overlaps:
snp_split_out.append((contig_name, overlap[0] - extend_bp - 1 - 1, overlap[0] + 1 + extend_bp - 1, variant_dict[overlap[0]]))# bed format
split_output = [(contig_name, item - realign_window_size-1, item+realign_window_size+1-1) for item in split_output] # a windows region for create tensor # bed format
split_output += snp_split_out
split_output = sorted(split_output, key=lambda x: x[1])
with open(os.path.join(split_folder, '{}.{}_{}'.format(contig_name[chr_prefix_length:], start, end)), 'w') as output_file:
output_file.write('\n'.join(['\t'.join(map(str, x)) for x in split_output]) + '\n') # bed format
def main():
parser = ArgumentParser(description="Select heterozygous snp candidates for WhatsHap phasing")
parser.add_argument('--split_folder', type=str, default=None,
help="Path to directory that stores small bed region for raw alignment. (default: %(default)s)")
parser.add_argument('--vcf_fn', type=str, default=None,
help="Path of the input vcf file. (default: %(default)s)")
parser.add_argument('--var_pct_full', type=float, default=0.3,
help="Default variant call proportion for raw alignment or remove low quality proportion for whatshap phasing. (default: %(default)f)")
parser.add_argument('--ref_pct_full', type=float, default=None,
help="Default reference call proportion for raw alignment or remove low quality proportion for whatshap phasing. (default: %(default)f)")
parser.add_argument('--ctgName', type=str, default=None,
help="The name of sequence to be processed, default: %(default)s")
parser.add_argument('--phase', action='store_false',
help="Only select hete candidates for phasing, default: True")
parser.add_argument('--sampleName', type=str, default="",
help="Define the sample name to be shown in the VCF file, optional")
# options for debug purpose
parser.add_argument('--phasing_info_in_bam', action='store_true',
help="DEBUG: Input bam or sam have phasing info in HP tag, default: False")
parser.add_argument('--split_bed_size', type=int, default=1000,
help="DEBUG: Default split bed size for parallel excution, default: %(default)s")
parser.add_argument('--calling', type=int, default=0,
help="DEBUG: Path of the output folder, default: %(default)s")
parser.add_argument('--realign_window_size', type=int, default=None,
help="DEBUG: The window size of read realignment, work with need_realignment")
parser.add_argument('--split_region_size', type=int, default=40000000,
help="DEBUG: Vcf phasing split_region_size default: %(default)s")
# options for internal process control
## The number of chucks to be divided into for parallel processing
parser.add_argument('--chunk_num', type=int, default=None,
help=SUPPRESS)
## The chuck ID to work on
parser.add_argument('--chunk_id', type=int, default=None,
help=SUPPRESS)
## Output all alternative candidates path
parser.add_argument('--all_alt_fn', type=str, default=None,
help=SUPPRESS)
## Default chr prefix for contig name
parser.add_argument('--chr_prefix', type=str, default='chr',
help=SUPPRESS)
## Default subsample depth for subsample bam file, 1000 means no subsampling
parser.add_argument('--depth', type=int, default=1000,
help=SUPPRESS)
## Path of provided alternative file
parser.add_argument('--alt_fn', type=str, default=None,
help=SUPPRESS)
## Input the file that contains the quality cut-off for selecting low-quality pileup calls for phasing and full-alignment calling
parser.add_argument('--qual_fn', type=str, default=None,
help=SUPPRESS)
args = parser.parse_args()
if len(sys.argv[1:]) == 0:
parser.print_help()
sys.exit(1)
#
if args.phase:
FiterHeteSnpPhasing(args)
elif args.calling == 1:
FiterHeteSnp_FP(args)
else:
FiterHeteSnp(args)
if __name__ == "__main__":
main()
|
0dbee8c55f21765e4b913c54a96e4d3bea778337
|
b049a961f100444dde14599bab06a0a4224d869b
|
/sdk/python/pulumi_azure_native/providerhub/_inputs.py
|
26d1e90720ff20569a5f6beef7d33af86d5fe95b
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
pulumi/pulumi-azure-native
|
b390c88beef8381f9a71ab2bed5571e0dd848e65
|
4c499abe17ec6696ce28477dde1157372896364e
|
refs/heads/master
| 2023-08-30T08:19:41.564780
| 2023-08-28T19:29:04
| 2023-08-28T19:29:04
| 172,386,632
| 107
| 29
|
Apache-2.0
| 2023-09-14T13:17:00
| 2019-02-24T20:30:21
|
Python
|
UTF-8
|
Python
| false
| false
| 135,807
|
py
|
_inputs.py
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from ._enums import *
__all__ = [
'AuthorizationActionMappingArgs',
'DefaultRolloutPropertiesSpecificationArgs',
'DefaultRolloutPropertiesStatusArgs',
'DefaultRolloutPropertiesArgs',
'DefaultRolloutSpecificationCanaryArgs',
'DefaultRolloutSpecificationExpeditedRolloutArgs',
'DefaultRolloutSpecificationHighTrafficArgs',
'DefaultRolloutSpecificationLowTrafficArgs',
'DefaultRolloutSpecificationMediumTrafficArgs',
'DefaultRolloutSpecificationProviderRegistrationArgs',
'DefaultRolloutSpecificationRestOfTheWorldGroupOneArgs',
'DefaultRolloutSpecificationRestOfTheWorldGroupTwoArgs',
'ExtendedErrorInfoArgs',
'ExtendedLocationOptionsArgs',
'LightHouseAuthorizationArgs',
'LinkedAccessCheckArgs',
'LoggingRuleHiddenPropertyPathsArgs',
'LoggingRuleArgs',
'NotificationEndpointArgs',
'NotificationRegistrationPropertiesArgs',
'OpenApiConfigurationArgs',
'OpenApiValidationArgs',
'ProviderHubMetadataProviderAuthenticationArgs',
'ProviderHubMetadataThirdPartyProviderAuthorizationArgs',
'ProviderRegistrationPropertiesProviderHubMetadataArgs',
'ProviderRegistrationPropertiesSubscriptionLifecycleNotificationSpecificationsArgs',
'ProviderRegistrationPropertiesArgs',
'ResourceConcurrencyControlOptionArgs',
'ResourceProviderAuthorizationArgs',
'ResourceProviderCapabilitiesArgs',
'ResourceProviderManifestPropertiesFeaturesRuleArgs',
'ResourceProviderManifestPropertiesManagementArgs',
'ResourceProviderManifestPropertiesProviderAuthenticationArgs',
'ResourceProviderManifestPropertiesRequestHeaderOptionsArgs',
'ResourceProviderManifestPropertiesTemplateDeploymentOptionsArgs',
'ResourceTypeEndpointFeaturesRuleArgs',
'ResourceTypeEndpointArgs',
'ResourceTypeExtensionOptionsResourceCreationBeginArgs',
'ResourceTypeExtensionArgs',
'ResourceTypeRegistrationPropertiesCheckNameAvailabilitySpecificationsArgs',
'ResourceTypeRegistrationPropertiesExtensionOptionsArgs',
'ResourceTypeRegistrationPropertiesFeaturesRuleArgs',
'ResourceTypeRegistrationPropertiesIdentityManagementArgs',
'ResourceTypeRegistrationPropertiesManagementArgs',
'ResourceTypeRegistrationPropertiesRequestHeaderOptionsArgs',
'ResourceTypeRegistrationPropertiesResourceGraphConfigurationArgs',
'ResourceTypeRegistrationPropertiesResourceMovePolicyArgs',
'ResourceTypeRegistrationPropertiesSubscriptionLifecycleNotificationSpecificationsArgs',
'ResourceTypeRegistrationPropertiesTemplateDeploymentOptionsArgs',
'ResourceTypeRegistrationPropertiesArgs',
'ResourceTypeRegistrationArgs',
'ServiceTreeInfoArgs',
'SkuCapabilityArgs',
'SkuCostArgs',
'SkuLocationInfoArgs',
'SkuResourcePropertiesArgs',
'SkuSettingCapacityArgs',
'SkuSettingArgs',
'SkuZoneDetailArgs',
'SubscriptionStateOverrideActionArgs',
'SubscriptionStateRuleArgs',
'SwaggerSpecificationArgs',
'ThrottlingMetricArgs',
'ThrottlingRuleArgs',
'TypedErrorInfoArgs',
]
@pulumi.input_type
class AuthorizationActionMappingArgs:
def __init__(__self__, *,
desired: Optional[pulumi.Input[str]] = None,
original: Optional[pulumi.Input[str]] = None):
if desired is not None:
pulumi.set(__self__, "desired", desired)
if original is not None:
pulumi.set(__self__, "original", original)
@property
@pulumi.getter
def desired(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "desired")
@desired.setter
def desired(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "desired", value)
@property
@pulumi.getter
def original(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "original")
@original.setter
def original(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "original", value)
@pulumi.input_type
class DefaultRolloutPropertiesSpecificationArgs:
def __init__(__self__, *,
canary: Optional[pulumi.Input['DefaultRolloutSpecificationCanaryArgs']] = None,
expedited_rollout: Optional[pulumi.Input['DefaultRolloutSpecificationExpeditedRolloutArgs']] = None,
high_traffic: Optional[pulumi.Input['DefaultRolloutSpecificationHighTrafficArgs']] = None,
low_traffic: Optional[pulumi.Input['DefaultRolloutSpecificationLowTrafficArgs']] = None,
medium_traffic: Optional[pulumi.Input['DefaultRolloutSpecificationMediumTrafficArgs']] = None,
provider_registration: Optional[pulumi.Input['DefaultRolloutSpecificationProviderRegistrationArgs']] = None,
resource_type_registrations: Optional[pulumi.Input[Sequence[pulumi.Input['ResourceTypeRegistrationArgs']]]] = None,
rest_of_the_world_group_one: Optional[pulumi.Input['DefaultRolloutSpecificationRestOfTheWorldGroupOneArgs']] = None,
rest_of_the_world_group_two: Optional[pulumi.Input['DefaultRolloutSpecificationRestOfTheWorldGroupTwoArgs']] = None):
if canary is not None:
pulumi.set(__self__, "canary", canary)
if expedited_rollout is not None:
pulumi.set(__self__, "expedited_rollout", expedited_rollout)
if high_traffic is not None:
pulumi.set(__self__, "high_traffic", high_traffic)
if low_traffic is not None:
pulumi.set(__self__, "low_traffic", low_traffic)
if medium_traffic is not None:
pulumi.set(__self__, "medium_traffic", medium_traffic)
if provider_registration is not None:
pulumi.set(__self__, "provider_registration", provider_registration)
if resource_type_registrations is not None:
pulumi.set(__self__, "resource_type_registrations", resource_type_registrations)
if rest_of_the_world_group_one is not None:
pulumi.set(__self__, "rest_of_the_world_group_one", rest_of_the_world_group_one)
if rest_of_the_world_group_two is not None:
pulumi.set(__self__, "rest_of_the_world_group_two", rest_of_the_world_group_two)
@property
@pulumi.getter
def canary(self) -> Optional[pulumi.Input['DefaultRolloutSpecificationCanaryArgs']]:
return pulumi.get(self, "canary")
@canary.setter
def canary(self, value: Optional[pulumi.Input['DefaultRolloutSpecificationCanaryArgs']]):
pulumi.set(self, "canary", value)
@property
@pulumi.getter(name="expeditedRollout")
def expedited_rollout(self) -> Optional[pulumi.Input['DefaultRolloutSpecificationExpeditedRolloutArgs']]:
return pulumi.get(self, "expedited_rollout")
@expedited_rollout.setter
def expedited_rollout(self, value: Optional[pulumi.Input['DefaultRolloutSpecificationExpeditedRolloutArgs']]):
pulumi.set(self, "expedited_rollout", value)
@property
@pulumi.getter(name="highTraffic")
def high_traffic(self) -> Optional[pulumi.Input['DefaultRolloutSpecificationHighTrafficArgs']]:
return pulumi.get(self, "high_traffic")
@high_traffic.setter
def high_traffic(self, value: Optional[pulumi.Input['DefaultRolloutSpecificationHighTrafficArgs']]):
pulumi.set(self, "high_traffic", value)
@property
@pulumi.getter(name="lowTraffic")
def low_traffic(self) -> Optional[pulumi.Input['DefaultRolloutSpecificationLowTrafficArgs']]:
return pulumi.get(self, "low_traffic")
@low_traffic.setter
def low_traffic(self, value: Optional[pulumi.Input['DefaultRolloutSpecificationLowTrafficArgs']]):
pulumi.set(self, "low_traffic", value)
@property
@pulumi.getter(name="mediumTraffic")
def medium_traffic(self) -> Optional[pulumi.Input['DefaultRolloutSpecificationMediumTrafficArgs']]:
return pulumi.get(self, "medium_traffic")
@medium_traffic.setter
def medium_traffic(self, value: Optional[pulumi.Input['DefaultRolloutSpecificationMediumTrafficArgs']]):
pulumi.set(self, "medium_traffic", value)
@property
@pulumi.getter(name="providerRegistration")
def provider_registration(self) -> Optional[pulumi.Input['DefaultRolloutSpecificationProviderRegistrationArgs']]:
return pulumi.get(self, "provider_registration")
@provider_registration.setter
def provider_registration(self, value: Optional[pulumi.Input['DefaultRolloutSpecificationProviderRegistrationArgs']]):
pulumi.set(self, "provider_registration", value)
@property
@pulumi.getter(name="resourceTypeRegistrations")
def resource_type_registrations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ResourceTypeRegistrationArgs']]]]:
return pulumi.get(self, "resource_type_registrations")
@resource_type_registrations.setter
def resource_type_registrations(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ResourceTypeRegistrationArgs']]]]):
pulumi.set(self, "resource_type_registrations", value)
@property
@pulumi.getter(name="restOfTheWorldGroupOne")
def rest_of_the_world_group_one(self) -> Optional[pulumi.Input['DefaultRolloutSpecificationRestOfTheWorldGroupOneArgs']]:
return pulumi.get(self, "rest_of_the_world_group_one")
@rest_of_the_world_group_one.setter
def rest_of_the_world_group_one(self, value: Optional[pulumi.Input['DefaultRolloutSpecificationRestOfTheWorldGroupOneArgs']]):
pulumi.set(self, "rest_of_the_world_group_one", value)
@property
@pulumi.getter(name="restOfTheWorldGroupTwo")
def rest_of_the_world_group_two(self) -> Optional[pulumi.Input['DefaultRolloutSpecificationRestOfTheWorldGroupTwoArgs']]:
return pulumi.get(self, "rest_of_the_world_group_two")
@rest_of_the_world_group_two.setter
def rest_of_the_world_group_two(self, value: Optional[pulumi.Input['DefaultRolloutSpecificationRestOfTheWorldGroupTwoArgs']]):
pulumi.set(self, "rest_of_the_world_group_two", value)
@pulumi.input_type
class DefaultRolloutPropertiesStatusArgs:
def __init__(__self__, *,
completed_regions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
failed_or_skipped_regions: Optional[pulumi.Input[Mapping[str, pulumi.Input['ExtendedErrorInfoArgs']]]] = None,
next_traffic_region: Optional[pulumi.Input[Union[str, 'TrafficRegionCategory']]] = None,
next_traffic_region_scheduled_time: Optional[pulumi.Input[str]] = None,
subscription_reregistration_result: Optional[pulumi.Input[Union[str, 'SubscriptionReregistrationResult']]] = None):
if completed_regions is not None:
pulumi.set(__self__, "completed_regions", completed_regions)
if failed_or_skipped_regions is not None:
pulumi.set(__self__, "failed_or_skipped_regions", failed_or_skipped_regions)
if next_traffic_region is not None:
pulumi.set(__self__, "next_traffic_region", next_traffic_region)
if next_traffic_region_scheduled_time is not None:
pulumi.set(__self__, "next_traffic_region_scheduled_time", next_traffic_region_scheduled_time)
if subscription_reregistration_result is not None:
pulumi.set(__self__, "subscription_reregistration_result", subscription_reregistration_result)
@property
@pulumi.getter(name="completedRegions")
def completed_regions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "completed_regions")
@completed_regions.setter
def completed_regions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "completed_regions", value)
@property
@pulumi.getter(name="failedOrSkippedRegions")
def failed_or_skipped_regions(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input['ExtendedErrorInfoArgs']]]]:
return pulumi.get(self, "failed_or_skipped_regions")
@failed_or_skipped_regions.setter
def failed_or_skipped_regions(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input['ExtendedErrorInfoArgs']]]]):
pulumi.set(self, "failed_or_skipped_regions", value)
@property
@pulumi.getter(name="nextTrafficRegion")
def next_traffic_region(self) -> Optional[pulumi.Input[Union[str, 'TrafficRegionCategory']]]:
return pulumi.get(self, "next_traffic_region")
@next_traffic_region.setter
def next_traffic_region(self, value: Optional[pulumi.Input[Union[str, 'TrafficRegionCategory']]]):
pulumi.set(self, "next_traffic_region", value)
@property
@pulumi.getter(name="nextTrafficRegionScheduledTime")
def next_traffic_region_scheduled_time(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "next_traffic_region_scheduled_time")
@next_traffic_region_scheduled_time.setter
def next_traffic_region_scheduled_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "next_traffic_region_scheduled_time", value)
@property
@pulumi.getter(name="subscriptionReregistrationResult")
def subscription_reregistration_result(self) -> Optional[pulumi.Input[Union[str, 'SubscriptionReregistrationResult']]]:
return pulumi.get(self, "subscription_reregistration_result")
@subscription_reregistration_result.setter
def subscription_reregistration_result(self, value: Optional[pulumi.Input[Union[str, 'SubscriptionReregistrationResult']]]):
pulumi.set(self, "subscription_reregistration_result", value)
@pulumi.input_type
class DefaultRolloutPropertiesArgs:
def __init__(__self__, *,
provisioning_state: Optional[pulumi.Input[Union[str, 'ProvisioningState']]] = None,
specification: Optional[pulumi.Input['DefaultRolloutPropertiesSpecificationArgs']] = None,
status: Optional[pulumi.Input['DefaultRolloutPropertiesStatusArgs']] = None):
"""
Properties of the rollout.
:param pulumi.Input[Union[str, 'ProvisioningState']] provisioning_state: The provisioned state of the resource.
"""
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
if specification is not None:
pulumi.set(__self__, "specification", specification)
if status is not None:
pulumi.set(__self__, "status", status)
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[pulumi.Input[Union[str, 'ProvisioningState']]]:
"""
The provisioned state of the resource.
"""
return pulumi.get(self, "provisioning_state")
@provisioning_state.setter
def provisioning_state(self, value: Optional[pulumi.Input[Union[str, 'ProvisioningState']]]):
pulumi.set(self, "provisioning_state", value)
@property
@pulumi.getter
def specification(self) -> Optional[pulumi.Input['DefaultRolloutPropertiesSpecificationArgs']]:
return pulumi.get(self, "specification")
@specification.setter
def specification(self, value: Optional[pulumi.Input['DefaultRolloutPropertiesSpecificationArgs']]):
pulumi.set(self, "specification", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input['DefaultRolloutPropertiesStatusArgs']]:
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input['DefaultRolloutPropertiesStatusArgs']]):
pulumi.set(self, "status", value)
@pulumi.input_type
class DefaultRolloutSpecificationCanaryArgs:
def __init__(__self__, *,
regions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
skip_regions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
if regions is not None:
pulumi.set(__self__, "regions", regions)
if skip_regions is not None:
pulumi.set(__self__, "skip_regions", skip_regions)
@property
@pulumi.getter
def regions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "regions")
@regions.setter
def regions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "regions", value)
@property
@pulumi.getter(name="skipRegions")
def skip_regions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "skip_regions")
@skip_regions.setter
def skip_regions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "skip_regions", value)
@pulumi.input_type
class DefaultRolloutSpecificationExpeditedRolloutArgs:
def __init__(__self__, *,
enabled: Optional[pulumi.Input[bool]] = None):
"""
:param pulumi.Input[bool] enabled: Indicates whether expedited rollout is enabled/disabled
"""
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates whether expedited rollout is enabled/disabled
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@pulumi.input_type
class DefaultRolloutSpecificationHighTrafficArgs:
def __init__(__self__, *,
regions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
wait_duration: Optional[pulumi.Input[str]] = None):
if regions is not None:
pulumi.set(__self__, "regions", regions)
if wait_duration is not None:
pulumi.set(__self__, "wait_duration", wait_duration)
@property
@pulumi.getter
def regions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "regions")
@regions.setter
def regions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "regions", value)
@property
@pulumi.getter(name="waitDuration")
def wait_duration(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "wait_duration")
@wait_duration.setter
def wait_duration(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "wait_duration", value)
@pulumi.input_type
class DefaultRolloutSpecificationLowTrafficArgs:
def __init__(__self__, *,
regions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
wait_duration: Optional[pulumi.Input[str]] = None):
if regions is not None:
pulumi.set(__self__, "regions", regions)
if wait_duration is not None:
pulumi.set(__self__, "wait_duration", wait_duration)
@property
@pulumi.getter
def regions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "regions")
@regions.setter
def regions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "regions", value)
@property
@pulumi.getter(name="waitDuration")
def wait_duration(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "wait_duration")
@wait_duration.setter
def wait_duration(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "wait_duration", value)
@pulumi.input_type
class DefaultRolloutSpecificationMediumTrafficArgs:
def __init__(__self__, *,
regions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
wait_duration: Optional[pulumi.Input[str]] = None):
if regions is not None:
pulumi.set(__self__, "regions", regions)
if wait_duration is not None:
pulumi.set(__self__, "wait_duration", wait_duration)
@property
@pulumi.getter
def regions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "regions")
@regions.setter
def regions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "regions", value)
@property
@pulumi.getter(name="waitDuration")
def wait_duration(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "wait_duration")
@wait_duration.setter
def wait_duration(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "wait_duration", value)
@pulumi.input_type
class DefaultRolloutSpecificationProviderRegistrationArgs:
def __init__(__self__, *,
properties: Optional[pulumi.Input['ProviderRegistrationPropertiesArgs']] = None):
if properties is not None:
pulumi.set(__self__, "properties", properties)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input['ProviderRegistrationPropertiesArgs']]:
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input['ProviderRegistrationPropertiesArgs']]):
pulumi.set(self, "properties", value)
@pulumi.input_type
class DefaultRolloutSpecificationRestOfTheWorldGroupOneArgs:
def __init__(__self__, *,
regions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
wait_duration: Optional[pulumi.Input[str]] = None):
if regions is not None:
pulumi.set(__self__, "regions", regions)
if wait_duration is not None:
pulumi.set(__self__, "wait_duration", wait_duration)
@property
@pulumi.getter
def regions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "regions")
@regions.setter
def regions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "regions", value)
@property
@pulumi.getter(name="waitDuration")
def wait_duration(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "wait_duration")
@wait_duration.setter
def wait_duration(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "wait_duration", value)
@pulumi.input_type
class DefaultRolloutSpecificationRestOfTheWorldGroupTwoArgs:
def __init__(__self__, *,
regions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
wait_duration: Optional[pulumi.Input[str]] = None):
if regions is not None:
pulumi.set(__self__, "regions", regions)
if wait_duration is not None:
pulumi.set(__self__, "wait_duration", wait_duration)
@property
@pulumi.getter
def regions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "regions")
@regions.setter
def regions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "regions", value)
@property
@pulumi.getter(name="waitDuration")
def wait_duration(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "wait_duration")
@wait_duration.setter
def wait_duration(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "wait_duration", value)
@pulumi.input_type
class ExtendedErrorInfoArgs:
def __init__(__self__, *,
additional_info: Optional[pulumi.Input[Sequence[pulumi.Input['TypedErrorInfoArgs']]]] = None,
code: Optional[pulumi.Input[str]] = None,
details: Optional[pulumi.Input[Sequence[pulumi.Input['ExtendedErrorInfoArgs']]]] = None,
message: Optional[pulumi.Input[str]] = None,
target: Optional[pulumi.Input[str]] = None):
if additional_info is not None:
pulumi.set(__self__, "additional_info", additional_info)
if code is not None:
pulumi.set(__self__, "code", code)
if details is not None:
pulumi.set(__self__, "details", details)
if message is not None:
pulumi.set(__self__, "message", message)
if target is not None:
pulumi.set(__self__, "target", target)
@property
@pulumi.getter(name="additionalInfo")
def additional_info(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['TypedErrorInfoArgs']]]]:
return pulumi.get(self, "additional_info")
@additional_info.setter
def additional_info(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['TypedErrorInfoArgs']]]]):
pulumi.set(self, "additional_info", value)
@property
@pulumi.getter
def code(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "code")
@code.setter
def code(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "code", value)
@property
@pulumi.getter
def details(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ExtendedErrorInfoArgs']]]]:
return pulumi.get(self, "details")
@details.setter
def details(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ExtendedErrorInfoArgs']]]]):
pulumi.set(self, "details", value)
@property
@pulumi.getter
def message(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "message")
@message.setter
def message(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "message", value)
@property
@pulumi.getter
def target(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "target")
@target.setter
def target(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "target", value)
@pulumi.input_type
class ExtendedLocationOptionsArgs:
def __init__(__self__, *,
supported_policy: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None):
if supported_policy is not None:
pulumi.set(__self__, "supported_policy", supported_policy)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="supportedPolicy")
def supported_policy(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "supported_policy")
@supported_policy.setter
def supported_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "supported_policy", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class LightHouseAuthorizationArgs:
def __init__(__self__, *,
principal_id: pulumi.Input[str],
role_definition_id: pulumi.Input[str]):
pulumi.set(__self__, "principal_id", principal_id)
pulumi.set(__self__, "role_definition_id", role_definition_id)
@property
@pulumi.getter(name="principalId")
def principal_id(self) -> pulumi.Input[str]:
return pulumi.get(self, "principal_id")
@principal_id.setter
def principal_id(self, value: pulumi.Input[str]):
pulumi.set(self, "principal_id", value)
@property
@pulumi.getter(name="roleDefinitionId")
def role_definition_id(self) -> pulumi.Input[str]:
return pulumi.get(self, "role_definition_id")
@role_definition_id.setter
def role_definition_id(self, value: pulumi.Input[str]):
pulumi.set(self, "role_definition_id", value)
@pulumi.input_type
class LinkedAccessCheckArgs:
def __init__(__self__, *,
action_name: Optional[pulumi.Input[str]] = None,
linked_action: Optional[pulumi.Input[str]] = None,
linked_action_verb: Optional[pulumi.Input[str]] = None,
linked_property: Optional[pulumi.Input[str]] = None,
linked_type: Optional[pulumi.Input[str]] = None):
if action_name is not None:
pulumi.set(__self__, "action_name", action_name)
if linked_action is not None:
pulumi.set(__self__, "linked_action", linked_action)
if linked_action_verb is not None:
pulumi.set(__self__, "linked_action_verb", linked_action_verb)
if linked_property is not None:
pulumi.set(__self__, "linked_property", linked_property)
if linked_type is not None:
pulumi.set(__self__, "linked_type", linked_type)
@property
@pulumi.getter(name="actionName")
def action_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "action_name")
@action_name.setter
def action_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "action_name", value)
@property
@pulumi.getter(name="linkedAction")
def linked_action(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "linked_action")
@linked_action.setter
def linked_action(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "linked_action", value)
@property
@pulumi.getter(name="linkedActionVerb")
def linked_action_verb(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "linked_action_verb")
@linked_action_verb.setter
def linked_action_verb(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "linked_action_verb", value)
@property
@pulumi.getter(name="linkedProperty")
def linked_property(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "linked_property")
@linked_property.setter
def linked_property(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "linked_property", value)
@property
@pulumi.getter(name="linkedType")
def linked_type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "linked_type")
@linked_type.setter
def linked_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "linked_type", value)
@pulumi.input_type
class LoggingRuleHiddenPropertyPathsArgs:
def __init__(__self__, *,
hidden_paths_on_request: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
hidden_paths_on_response: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
if hidden_paths_on_request is not None:
pulumi.set(__self__, "hidden_paths_on_request", hidden_paths_on_request)
if hidden_paths_on_response is not None:
pulumi.set(__self__, "hidden_paths_on_response", hidden_paths_on_response)
@property
@pulumi.getter(name="hiddenPathsOnRequest")
def hidden_paths_on_request(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "hidden_paths_on_request")
@hidden_paths_on_request.setter
def hidden_paths_on_request(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "hidden_paths_on_request", value)
@property
@pulumi.getter(name="hiddenPathsOnResponse")
def hidden_paths_on_response(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "hidden_paths_on_response")
@hidden_paths_on_response.setter
def hidden_paths_on_response(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "hidden_paths_on_response", value)
@pulumi.input_type
class LoggingRuleArgs:
def __init__(__self__, *,
action: pulumi.Input[str],
detail_level: pulumi.Input[Union[str, 'LoggingDetails']],
direction: pulumi.Input[Union[str, 'LoggingDirections']],
hidden_property_paths: Optional[pulumi.Input['LoggingRuleHiddenPropertyPathsArgs']] = None):
pulumi.set(__self__, "action", action)
pulumi.set(__self__, "detail_level", detail_level)
pulumi.set(__self__, "direction", direction)
if hidden_property_paths is not None:
pulumi.set(__self__, "hidden_property_paths", hidden_property_paths)
@property
@pulumi.getter
def action(self) -> pulumi.Input[str]:
return pulumi.get(self, "action")
@action.setter
def action(self, value: pulumi.Input[str]):
pulumi.set(self, "action", value)
@property
@pulumi.getter(name="detailLevel")
def detail_level(self) -> pulumi.Input[Union[str, 'LoggingDetails']]:
return pulumi.get(self, "detail_level")
@detail_level.setter
def detail_level(self, value: pulumi.Input[Union[str, 'LoggingDetails']]):
pulumi.set(self, "detail_level", value)
@property
@pulumi.getter
def direction(self) -> pulumi.Input[Union[str, 'LoggingDirections']]:
return pulumi.get(self, "direction")
@direction.setter
def direction(self, value: pulumi.Input[Union[str, 'LoggingDirections']]):
pulumi.set(self, "direction", value)
@property
@pulumi.getter(name="hiddenPropertyPaths")
def hidden_property_paths(self) -> Optional[pulumi.Input['LoggingRuleHiddenPropertyPathsArgs']]:
return pulumi.get(self, "hidden_property_paths")
@hidden_property_paths.setter
def hidden_property_paths(self, value: Optional[pulumi.Input['LoggingRuleHiddenPropertyPathsArgs']]):
pulumi.set(self, "hidden_property_paths", value)
@pulumi.input_type
class NotificationEndpointArgs:
def __init__(__self__, *,
locations: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
notification_destination: Optional[pulumi.Input[str]] = None):
if locations is not None:
pulumi.set(__self__, "locations", locations)
if notification_destination is not None:
pulumi.set(__self__, "notification_destination", notification_destination)
@property
@pulumi.getter
def locations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "locations")
@locations.setter
def locations(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "locations", value)
@property
@pulumi.getter(name="notificationDestination")
def notification_destination(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "notification_destination")
@notification_destination.setter
def notification_destination(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "notification_destination", value)
@pulumi.input_type
class NotificationRegistrationPropertiesArgs:
def __init__(__self__, *,
included_events: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
message_scope: Optional[pulumi.Input[Union[str, 'MessageScope']]] = None,
notification_endpoints: Optional[pulumi.Input[Sequence[pulumi.Input['NotificationEndpointArgs']]]] = None,
notification_mode: Optional[pulumi.Input[Union[str, 'NotificationMode']]] = None,
provisioning_state: Optional[pulumi.Input[Union[str, 'ProvisioningState']]] = None):
"""
:param pulumi.Input[Union[str, 'ProvisioningState']] provisioning_state: The provisioned state of the resource.
"""
if included_events is not None:
pulumi.set(__self__, "included_events", included_events)
if message_scope is not None:
pulumi.set(__self__, "message_scope", message_scope)
if notification_endpoints is not None:
pulumi.set(__self__, "notification_endpoints", notification_endpoints)
if notification_mode is not None:
pulumi.set(__self__, "notification_mode", notification_mode)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
@property
@pulumi.getter(name="includedEvents")
def included_events(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "included_events")
@included_events.setter
def included_events(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "included_events", value)
@property
@pulumi.getter(name="messageScope")
def message_scope(self) -> Optional[pulumi.Input[Union[str, 'MessageScope']]]:
return pulumi.get(self, "message_scope")
@message_scope.setter
def message_scope(self, value: Optional[pulumi.Input[Union[str, 'MessageScope']]]):
pulumi.set(self, "message_scope", value)
@property
@pulumi.getter(name="notificationEndpoints")
def notification_endpoints(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['NotificationEndpointArgs']]]]:
return pulumi.get(self, "notification_endpoints")
@notification_endpoints.setter
def notification_endpoints(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['NotificationEndpointArgs']]]]):
pulumi.set(self, "notification_endpoints", value)
@property
@pulumi.getter(name="notificationMode")
def notification_mode(self) -> Optional[pulumi.Input[Union[str, 'NotificationMode']]]:
return pulumi.get(self, "notification_mode")
@notification_mode.setter
def notification_mode(self, value: Optional[pulumi.Input[Union[str, 'NotificationMode']]]):
pulumi.set(self, "notification_mode", value)
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[pulumi.Input[Union[str, 'ProvisioningState']]]:
"""
The provisioned state of the resource.
"""
return pulumi.get(self, "provisioning_state")
@provisioning_state.setter
def provisioning_state(self, value: Optional[pulumi.Input[Union[str, 'ProvisioningState']]]):
pulumi.set(self, "provisioning_state", value)
@pulumi.input_type
class OpenApiConfigurationArgs:
def __init__(__self__, *,
validation: Optional[pulumi.Input['OpenApiValidationArgs']] = None):
if validation is not None:
pulumi.set(__self__, "validation", validation)
@property
@pulumi.getter
def validation(self) -> Optional[pulumi.Input['OpenApiValidationArgs']]:
return pulumi.get(self, "validation")
@validation.setter
def validation(self, value: Optional[pulumi.Input['OpenApiValidationArgs']]):
pulumi.set(self, "validation", value)
@pulumi.input_type
class OpenApiValidationArgs:
def __init__(__self__, *,
allow_noncompliant_collection_response: Optional[pulumi.Input[bool]] = None):
"""
:param pulumi.Input[bool] allow_noncompliant_collection_response: Indicates whether a non compliance response is allowed for a LIST call
"""
if allow_noncompliant_collection_response is not None:
pulumi.set(__self__, "allow_noncompliant_collection_response", allow_noncompliant_collection_response)
@property
@pulumi.getter(name="allowNoncompliantCollectionResponse")
def allow_noncompliant_collection_response(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates whether a non compliance response is allowed for a LIST call
"""
return pulumi.get(self, "allow_noncompliant_collection_response")
@allow_noncompliant_collection_response.setter
def allow_noncompliant_collection_response(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "allow_noncompliant_collection_response", value)
@pulumi.input_type
class ProviderHubMetadataProviderAuthenticationArgs:
def __init__(__self__, *,
allowed_audiences: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(__self__, "allowed_audiences", allowed_audiences)
@property
@pulumi.getter(name="allowedAudiences")
def allowed_audiences(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
return pulumi.get(self, "allowed_audiences")
@allowed_audiences.setter
def allowed_audiences(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "allowed_audiences", value)
@pulumi.input_type
class ProviderHubMetadataThirdPartyProviderAuthorizationArgs:
def __init__(__self__, *,
authorizations: Optional[pulumi.Input[Sequence[pulumi.Input['LightHouseAuthorizationArgs']]]] = None,
managed_by_tenant_id: Optional[pulumi.Input[str]] = None):
if authorizations is not None:
pulumi.set(__self__, "authorizations", authorizations)
if managed_by_tenant_id is not None:
pulumi.set(__self__, "managed_by_tenant_id", managed_by_tenant_id)
@property
@pulumi.getter
def authorizations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['LightHouseAuthorizationArgs']]]]:
return pulumi.get(self, "authorizations")
@authorizations.setter
def authorizations(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['LightHouseAuthorizationArgs']]]]):
pulumi.set(self, "authorizations", value)
@property
@pulumi.getter(name="managedByTenantId")
def managed_by_tenant_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "managed_by_tenant_id")
@managed_by_tenant_id.setter
def managed_by_tenant_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "managed_by_tenant_id", value)
@pulumi.input_type
class ProviderRegistrationPropertiesProviderHubMetadataArgs:
def __init__(__self__, *,
provider_authentication: Optional[pulumi.Input['ProviderHubMetadataProviderAuthenticationArgs']] = None,
provider_authorizations: Optional[pulumi.Input[Sequence[pulumi.Input['ResourceProviderAuthorizationArgs']]]] = None,
third_party_provider_authorization: Optional[pulumi.Input['ProviderHubMetadataThirdPartyProviderAuthorizationArgs']] = None):
if provider_authentication is not None:
pulumi.set(__self__, "provider_authentication", provider_authentication)
if provider_authorizations is not None:
pulumi.set(__self__, "provider_authorizations", provider_authorizations)
if third_party_provider_authorization is not None:
pulumi.set(__self__, "third_party_provider_authorization", third_party_provider_authorization)
@property
@pulumi.getter(name="providerAuthentication")
def provider_authentication(self) -> Optional[pulumi.Input['ProviderHubMetadataProviderAuthenticationArgs']]:
return pulumi.get(self, "provider_authentication")
@provider_authentication.setter
def provider_authentication(self, value: Optional[pulumi.Input['ProviderHubMetadataProviderAuthenticationArgs']]):
pulumi.set(self, "provider_authentication", value)
@property
@pulumi.getter(name="providerAuthorizations")
def provider_authorizations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ResourceProviderAuthorizationArgs']]]]:
return pulumi.get(self, "provider_authorizations")
@provider_authorizations.setter
def provider_authorizations(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ResourceProviderAuthorizationArgs']]]]):
pulumi.set(self, "provider_authorizations", value)
@property
@pulumi.getter(name="thirdPartyProviderAuthorization")
def third_party_provider_authorization(self) -> Optional[pulumi.Input['ProviderHubMetadataThirdPartyProviderAuthorizationArgs']]:
return pulumi.get(self, "third_party_provider_authorization")
@third_party_provider_authorization.setter
def third_party_provider_authorization(self, value: Optional[pulumi.Input['ProviderHubMetadataThirdPartyProviderAuthorizationArgs']]):
pulumi.set(self, "third_party_provider_authorization", value)
@pulumi.input_type
class ProviderRegistrationPropertiesSubscriptionLifecycleNotificationSpecificationsArgs:
def __init__(__self__, *,
soft_delete_ttl: Optional[pulumi.Input[str]] = None,
subscription_state_override_actions: Optional[pulumi.Input[Sequence[pulumi.Input['SubscriptionStateOverrideActionArgs']]]] = None):
if soft_delete_ttl is not None:
pulumi.set(__self__, "soft_delete_ttl", soft_delete_ttl)
if subscription_state_override_actions is not None:
pulumi.set(__self__, "subscription_state_override_actions", subscription_state_override_actions)
@property
@pulumi.getter(name="softDeleteTTL")
def soft_delete_ttl(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "soft_delete_ttl")
@soft_delete_ttl.setter
def soft_delete_ttl(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "soft_delete_ttl", value)
@property
@pulumi.getter(name="subscriptionStateOverrideActions")
def subscription_state_override_actions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SubscriptionStateOverrideActionArgs']]]]:
return pulumi.get(self, "subscription_state_override_actions")
@subscription_state_override_actions.setter
def subscription_state_override_actions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SubscriptionStateOverrideActionArgs']]]]):
pulumi.set(self, "subscription_state_override_actions", value)
@pulumi.input_type
class ProviderRegistrationPropertiesArgs:
def __init__(__self__, *,
capabilities: Optional[pulumi.Input[Sequence[pulumi.Input['ResourceProviderCapabilitiesArgs']]]] = None,
features_rule: Optional[pulumi.Input['ResourceProviderManifestPropertiesFeaturesRuleArgs']] = None,
management: Optional[pulumi.Input['ResourceProviderManifestPropertiesManagementArgs']] = None,
metadata: Optional[Any] = None,
namespace: Optional[pulumi.Input[str]] = None,
provider_authentication: Optional[pulumi.Input['ResourceProviderManifestPropertiesProviderAuthenticationArgs']] = None,
provider_authorizations: Optional[pulumi.Input[Sequence[pulumi.Input['ResourceProviderAuthorizationArgs']]]] = None,
provider_hub_metadata: Optional[pulumi.Input['ProviderRegistrationPropertiesProviderHubMetadataArgs']] = None,
provider_type: Optional[pulumi.Input[Union[str, 'ResourceProviderType']]] = None,
provider_version: Optional[pulumi.Input[str]] = None,
provisioning_state: Optional[pulumi.Input[Union[str, 'ProvisioningState']]] = None,
request_header_options: Optional[pulumi.Input['ResourceProviderManifestPropertiesRequestHeaderOptionsArgs']] = None,
required_features: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
subscription_lifecycle_notification_specifications: Optional[pulumi.Input['ProviderRegistrationPropertiesSubscriptionLifecycleNotificationSpecificationsArgs']] = None,
template_deployment_options: Optional[pulumi.Input['ResourceProviderManifestPropertiesTemplateDeploymentOptionsArgs']] = None):
"""
:param pulumi.Input[Union[str, 'ProvisioningState']] provisioning_state: The provisioned state of the resource.
"""
if capabilities is not None:
pulumi.set(__self__, "capabilities", capabilities)
if features_rule is not None:
pulumi.set(__self__, "features_rule", features_rule)
if management is not None:
pulumi.set(__self__, "management", management)
if metadata is not None:
pulumi.set(__self__, "metadata", metadata)
if namespace is not None:
pulumi.set(__self__, "namespace", namespace)
if provider_authentication is not None:
pulumi.set(__self__, "provider_authentication", provider_authentication)
if provider_authorizations is not None:
pulumi.set(__self__, "provider_authorizations", provider_authorizations)
if provider_hub_metadata is not None:
pulumi.set(__self__, "provider_hub_metadata", provider_hub_metadata)
if provider_type is not None:
pulumi.set(__self__, "provider_type", provider_type)
if provider_version is not None:
pulumi.set(__self__, "provider_version", provider_version)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
if request_header_options is not None:
pulumi.set(__self__, "request_header_options", request_header_options)
if required_features is not None:
pulumi.set(__self__, "required_features", required_features)
if subscription_lifecycle_notification_specifications is not None:
pulumi.set(__self__, "subscription_lifecycle_notification_specifications", subscription_lifecycle_notification_specifications)
if template_deployment_options is not None:
pulumi.set(__self__, "template_deployment_options", template_deployment_options)
@property
@pulumi.getter
def capabilities(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ResourceProviderCapabilitiesArgs']]]]:
return pulumi.get(self, "capabilities")
@capabilities.setter
def capabilities(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ResourceProviderCapabilitiesArgs']]]]):
pulumi.set(self, "capabilities", value)
@property
@pulumi.getter(name="featuresRule")
def features_rule(self) -> Optional[pulumi.Input['ResourceProviderManifestPropertiesFeaturesRuleArgs']]:
return pulumi.get(self, "features_rule")
@features_rule.setter
def features_rule(self, value: Optional[pulumi.Input['ResourceProviderManifestPropertiesFeaturesRuleArgs']]):
pulumi.set(self, "features_rule", value)
@property
@pulumi.getter
def management(self) -> Optional[pulumi.Input['ResourceProviderManifestPropertiesManagementArgs']]:
return pulumi.get(self, "management")
@management.setter
def management(self, value: Optional[pulumi.Input['ResourceProviderManifestPropertiesManagementArgs']]):
pulumi.set(self, "management", value)
@property
@pulumi.getter
def metadata(self) -> Optional[Any]:
return pulumi.get(self, "metadata")
@metadata.setter
def metadata(self, value: Optional[Any]):
pulumi.set(self, "metadata", value)
@property
@pulumi.getter
def namespace(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "namespace")
@namespace.setter
def namespace(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "namespace", value)
@property
@pulumi.getter(name="providerAuthentication")
def provider_authentication(self) -> Optional[pulumi.Input['ResourceProviderManifestPropertiesProviderAuthenticationArgs']]:
return pulumi.get(self, "provider_authentication")
@provider_authentication.setter
def provider_authentication(self, value: Optional[pulumi.Input['ResourceProviderManifestPropertiesProviderAuthenticationArgs']]):
pulumi.set(self, "provider_authentication", value)
@property
@pulumi.getter(name="providerAuthorizations")
def provider_authorizations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ResourceProviderAuthorizationArgs']]]]:
return pulumi.get(self, "provider_authorizations")
@provider_authorizations.setter
def provider_authorizations(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ResourceProviderAuthorizationArgs']]]]):
pulumi.set(self, "provider_authorizations", value)
@property
@pulumi.getter(name="providerHubMetadata")
def provider_hub_metadata(self) -> Optional[pulumi.Input['ProviderRegistrationPropertiesProviderHubMetadataArgs']]:
return pulumi.get(self, "provider_hub_metadata")
@provider_hub_metadata.setter
def provider_hub_metadata(self, value: Optional[pulumi.Input['ProviderRegistrationPropertiesProviderHubMetadataArgs']]):
pulumi.set(self, "provider_hub_metadata", value)
@property
@pulumi.getter(name="providerType")
def provider_type(self) -> Optional[pulumi.Input[Union[str, 'ResourceProviderType']]]:
return pulumi.get(self, "provider_type")
@provider_type.setter
def provider_type(self, value: Optional[pulumi.Input[Union[str, 'ResourceProviderType']]]):
pulumi.set(self, "provider_type", value)
@property
@pulumi.getter(name="providerVersion")
def provider_version(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "provider_version")
@provider_version.setter
def provider_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "provider_version", value)
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[pulumi.Input[Union[str, 'ProvisioningState']]]:
"""
The provisioned state of the resource.
"""
return pulumi.get(self, "provisioning_state")
@provisioning_state.setter
def provisioning_state(self, value: Optional[pulumi.Input[Union[str, 'ProvisioningState']]]):
pulumi.set(self, "provisioning_state", value)
@property
@pulumi.getter(name="requestHeaderOptions")
def request_header_options(self) -> Optional[pulumi.Input['ResourceProviderManifestPropertiesRequestHeaderOptionsArgs']]:
return pulumi.get(self, "request_header_options")
@request_header_options.setter
def request_header_options(self, value: Optional[pulumi.Input['ResourceProviderManifestPropertiesRequestHeaderOptionsArgs']]):
pulumi.set(self, "request_header_options", value)
@property
@pulumi.getter(name="requiredFeatures")
def required_features(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "required_features")
@required_features.setter
def required_features(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "required_features", value)
@property
@pulumi.getter(name="subscriptionLifecycleNotificationSpecifications")
def subscription_lifecycle_notification_specifications(self) -> Optional[pulumi.Input['ProviderRegistrationPropertiesSubscriptionLifecycleNotificationSpecificationsArgs']]:
return pulumi.get(self, "subscription_lifecycle_notification_specifications")
@subscription_lifecycle_notification_specifications.setter
def subscription_lifecycle_notification_specifications(self, value: Optional[pulumi.Input['ProviderRegistrationPropertiesSubscriptionLifecycleNotificationSpecificationsArgs']]):
pulumi.set(self, "subscription_lifecycle_notification_specifications", value)
@property
@pulumi.getter(name="templateDeploymentOptions")
def template_deployment_options(self) -> Optional[pulumi.Input['ResourceProviderManifestPropertiesTemplateDeploymentOptionsArgs']]:
return pulumi.get(self, "template_deployment_options")
@template_deployment_options.setter
def template_deployment_options(self, value: Optional[pulumi.Input['ResourceProviderManifestPropertiesTemplateDeploymentOptionsArgs']]):
pulumi.set(self, "template_deployment_options", value)
@pulumi.input_type
class ResourceConcurrencyControlOptionArgs:
def __init__(__self__, *,
policy: Optional[pulumi.Input[Union[str, 'Policy']]] = None):
if policy is not None:
pulumi.set(__self__, "policy", policy)
@property
@pulumi.getter
def policy(self) -> Optional[pulumi.Input[Union[str, 'Policy']]]:
return pulumi.get(self, "policy")
@policy.setter
def policy(self, value: Optional[pulumi.Input[Union[str, 'Policy']]]):
pulumi.set(self, "policy", value)
@pulumi.input_type
class ResourceProviderAuthorizationArgs:
def __init__(__self__, *,
application_id: Optional[pulumi.Input[str]] = None,
managed_by_role_definition_id: Optional[pulumi.Input[str]] = None,
role_definition_id: Optional[pulumi.Input[str]] = None):
if application_id is not None:
pulumi.set(__self__, "application_id", application_id)
if managed_by_role_definition_id is not None:
pulumi.set(__self__, "managed_by_role_definition_id", managed_by_role_definition_id)
if role_definition_id is not None:
pulumi.set(__self__, "role_definition_id", role_definition_id)
@property
@pulumi.getter(name="applicationId")
def application_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "application_id")
@application_id.setter
def application_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "application_id", value)
@property
@pulumi.getter(name="managedByRoleDefinitionId")
def managed_by_role_definition_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "managed_by_role_definition_id")
@managed_by_role_definition_id.setter
def managed_by_role_definition_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "managed_by_role_definition_id", value)
@property
@pulumi.getter(name="roleDefinitionId")
def role_definition_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "role_definition_id")
@role_definition_id.setter
def role_definition_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "role_definition_id", value)
@pulumi.input_type
class ResourceProviderCapabilitiesArgs:
def __init__(__self__, *,
effect: pulumi.Input[Union[str, 'ResourceProviderCapabilitiesEffect']],
quota_id: pulumi.Input[str],
required_features: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
pulumi.set(__self__, "effect", effect)
pulumi.set(__self__, "quota_id", quota_id)
if required_features is not None:
pulumi.set(__self__, "required_features", required_features)
@property
@pulumi.getter
def effect(self) -> pulumi.Input[Union[str, 'ResourceProviderCapabilitiesEffect']]:
return pulumi.get(self, "effect")
@effect.setter
def effect(self, value: pulumi.Input[Union[str, 'ResourceProviderCapabilitiesEffect']]):
pulumi.set(self, "effect", value)
@property
@pulumi.getter(name="quotaId")
def quota_id(self) -> pulumi.Input[str]:
return pulumi.get(self, "quota_id")
@quota_id.setter
def quota_id(self, value: pulumi.Input[str]):
pulumi.set(self, "quota_id", value)
@property
@pulumi.getter(name="requiredFeatures")
def required_features(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "required_features")
@required_features.setter
def required_features(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "required_features", value)
@pulumi.input_type
class ResourceProviderManifestPropertiesFeaturesRuleArgs:
def __init__(__self__, *,
required_features_policy: pulumi.Input[Union[str, 'FeaturesPolicy']]):
pulumi.set(__self__, "required_features_policy", required_features_policy)
@property
@pulumi.getter(name="requiredFeaturesPolicy")
def required_features_policy(self) -> pulumi.Input[Union[str, 'FeaturesPolicy']]:
return pulumi.get(self, "required_features_policy")
@required_features_policy.setter
def required_features_policy(self, value: pulumi.Input[Union[str, 'FeaturesPolicy']]):
pulumi.set(self, "required_features_policy", value)
@pulumi.input_type
class ResourceProviderManifestPropertiesManagementArgs:
def __init__(__self__, *,
incident_contact_email: Optional[pulumi.Input[str]] = None,
incident_routing_service: Optional[pulumi.Input[str]] = None,
incident_routing_team: Optional[pulumi.Input[str]] = None,
manifest_owners: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
resource_access_policy: Optional[pulumi.Input[str]] = None,
resource_access_roles: Optional[pulumi.Input[Sequence[Any]]] = None,
schema_owners: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
service_tree_infos: Optional[pulumi.Input[Sequence[pulumi.Input['ServiceTreeInfoArgs']]]] = None):
if incident_contact_email is not None:
pulumi.set(__self__, "incident_contact_email", incident_contact_email)
if incident_routing_service is not None:
pulumi.set(__self__, "incident_routing_service", incident_routing_service)
if incident_routing_team is not None:
pulumi.set(__self__, "incident_routing_team", incident_routing_team)
if manifest_owners is not None:
pulumi.set(__self__, "manifest_owners", manifest_owners)
if resource_access_policy is not None:
pulumi.set(__self__, "resource_access_policy", resource_access_policy)
if resource_access_roles is not None:
pulumi.set(__self__, "resource_access_roles", resource_access_roles)
if schema_owners is not None:
pulumi.set(__self__, "schema_owners", schema_owners)
if service_tree_infos is not None:
pulumi.set(__self__, "service_tree_infos", service_tree_infos)
@property
@pulumi.getter(name="incidentContactEmail")
def incident_contact_email(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "incident_contact_email")
@incident_contact_email.setter
def incident_contact_email(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "incident_contact_email", value)
@property
@pulumi.getter(name="incidentRoutingService")
def incident_routing_service(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "incident_routing_service")
@incident_routing_service.setter
def incident_routing_service(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "incident_routing_service", value)
@property
@pulumi.getter(name="incidentRoutingTeam")
def incident_routing_team(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "incident_routing_team")
@incident_routing_team.setter
def incident_routing_team(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "incident_routing_team", value)
@property
@pulumi.getter(name="manifestOwners")
def manifest_owners(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "manifest_owners")
@manifest_owners.setter
def manifest_owners(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "manifest_owners", value)
@property
@pulumi.getter(name="resourceAccessPolicy")
def resource_access_policy(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "resource_access_policy")
@resource_access_policy.setter
def resource_access_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_access_policy", value)
@property
@pulumi.getter(name="resourceAccessRoles")
def resource_access_roles(self) -> Optional[pulumi.Input[Sequence[Any]]]:
return pulumi.get(self, "resource_access_roles")
@resource_access_roles.setter
def resource_access_roles(self, value: Optional[pulumi.Input[Sequence[Any]]]):
pulumi.set(self, "resource_access_roles", value)
@property
@pulumi.getter(name="schemaOwners")
def schema_owners(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "schema_owners")
@schema_owners.setter
def schema_owners(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "schema_owners", value)
@property
@pulumi.getter(name="serviceTreeInfos")
def service_tree_infos(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ServiceTreeInfoArgs']]]]:
return pulumi.get(self, "service_tree_infos")
@service_tree_infos.setter
def service_tree_infos(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ServiceTreeInfoArgs']]]]):
pulumi.set(self, "service_tree_infos", value)
@pulumi.input_type
class ResourceProviderManifestPropertiesProviderAuthenticationArgs:
def __init__(__self__, *,
allowed_audiences: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(__self__, "allowed_audiences", allowed_audiences)
@property
@pulumi.getter(name="allowedAudiences")
def allowed_audiences(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
return pulumi.get(self, "allowed_audiences")
@allowed_audiences.setter
def allowed_audiences(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "allowed_audiences", value)
@pulumi.input_type
class ResourceProviderManifestPropertiesRequestHeaderOptionsArgs:
def __init__(__self__, *,
opt_in_headers: Optional[pulumi.Input[Union[str, 'OptInHeaderType']]] = None):
if opt_in_headers is not None:
pulumi.set(__self__, "opt_in_headers", opt_in_headers)
@property
@pulumi.getter(name="optInHeaders")
def opt_in_headers(self) -> Optional[pulumi.Input[Union[str, 'OptInHeaderType']]]:
return pulumi.get(self, "opt_in_headers")
@opt_in_headers.setter
def opt_in_headers(self, value: Optional[pulumi.Input[Union[str, 'OptInHeaderType']]]):
pulumi.set(self, "opt_in_headers", value)
@pulumi.input_type
class ResourceProviderManifestPropertiesTemplateDeploymentOptionsArgs:
def __init__(__self__, *,
preflight_options: Optional[pulumi.Input[Sequence[pulumi.Input[Union[str, 'PreflightOption']]]]] = None,
preflight_supported: Optional[pulumi.Input[bool]] = None):
if preflight_options is not None:
pulumi.set(__self__, "preflight_options", preflight_options)
if preflight_supported is not None:
pulumi.set(__self__, "preflight_supported", preflight_supported)
@property
@pulumi.getter(name="preflightOptions")
def preflight_options(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[Union[str, 'PreflightOption']]]]]:
return pulumi.get(self, "preflight_options")
@preflight_options.setter
def preflight_options(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[Union[str, 'PreflightOption']]]]]):
pulumi.set(self, "preflight_options", value)
@property
@pulumi.getter(name="preflightSupported")
def preflight_supported(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "preflight_supported")
@preflight_supported.setter
def preflight_supported(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "preflight_supported", value)
@pulumi.input_type
class ResourceTypeEndpointFeaturesRuleArgs:
def __init__(__self__, *,
required_features_policy: pulumi.Input[Union[str, 'FeaturesPolicy']]):
pulumi.set(__self__, "required_features_policy", required_features_policy)
@property
@pulumi.getter(name="requiredFeaturesPolicy")
def required_features_policy(self) -> pulumi.Input[Union[str, 'FeaturesPolicy']]:
return pulumi.get(self, "required_features_policy")
@required_features_policy.setter
def required_features_policy(self, value: pulumi.Input[Union[str, 'FeaturesPolicy']]):
pulumi.set(self, "required_features_policy", value)
@pulumi.input_type
class ResourceTypeEndpointArgs:
def __init__(__self__, *,
api_versions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
endpoint_type: Optional[pulumi.Input[Union[str, 'EndpointType']]] = None,
extensions: Optional[pulumi.Input[Sequence[pulumi.Input['ResourceTypeExtensionArgs']]]] = None,
features_rule: Optional[pulumi.Input['ResourceTypeEndpointFeaturesRuleArgs']] = None,
locations: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
required_features: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
timeout: Optional[pulumi.Input[str]] = None):
if api_versions is not None:
pulumi.set(__self__, "api_versions", api_versions)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if endpoint_type is not None:
pulumi.set(__self__, "endpoint_type", endpoint_type)
if extensions is not None:
pulumi.set(__self__, "extensions", extensions)
if features_rule is not None:
pulumi.set(__self__, "features_rule", features_rule)
if locations is not None:
pulumi.set(__self__, "locations", locations)
if required_features is not None:
pulumi.set(__self__, "required_features", required_features)
if timeout is not None:
pulumi.set(__self__, "timeout", timeout)
@property
@pulumi.getter(name="apiVersions")
def api_versions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "api_versions")
@api_versions.setter
def api_versions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "api_versions", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter(name="endpointType")
def endpoint_type(self) -> Optional[pulumi.Input[Union[str, 'EndpointType']]]:
return pulumi.get(self, "endpoint_type")
@endpoint_type.setter
def endpoint_type(self, value: Optional[pulumi.Input[Union[str, 'EndpointType']]]):
pulumi.set(self, "endpoint_type", value)
@property
@pulumi.getter
def extensions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ResourceTypeExtensionArgs']]]]:
return pulumi.get(self, "extensions")
@extensions.setter
def extensions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ResourceTypeExtensionArgs']]]]):
pulumi.set(self, "extensions", value)
@property
@pulumi.getter(name="featuresRule")
def features_rule(self) -> Optional[pulumi.Input['ResourceTypeEndpointFeaturesRuleArgs']]:
return pulumi.get(self, "features_rule")
@features_rule.setter
def features_rule(self, value: Optional[pulumi.Input['ResourceTypeEndpointFeaturesRuleArgs']]):
pulumi.set(self, "features_rule", value)
@property
@pulumi.getter
def locations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "locations")
@locations.setter
def locations(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "locations", value)
@property
@pulumi.getter(name="requiredFeatures")
def required_features(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "required_features")
@required_features.setter
def required_features(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "required_features", value)
@property
@pulumi.getter
def timeout(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "timeout")
@timeout.setter
def timeout(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "timeout", value)
@pulumi.input_type
class ResourceTypeExtensionOptionsResourceCreationBeginArgs:
def __init__(__self__, *,
request: Optional[pulumi.Input[Sequence[pulumi.Input[Union[str, 'ExtensionOptionType']]]]] = None,
response: Optional[pulumi.Input[Sequence[pulumi.Input[Union[str, 'ExtensionOptionType']]]]] = None):
if request is not None:
pulumi.set(__self__, "request", request)
if response is not None:
pulumi.set(__self__, "response", response)
@property
@pulumi.getter
def request(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[Union[str, 'ExtensionOptionType']]]]]:
return pulumi.get(self, "request")
@request.setter
def request(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[Union[str, 'ExtensionOptionType']]]]]):
pulumi.set(self, "request", value)
@property
@pulumi.getter
def response(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[Union[str, 'ExtensionOptionType']]]]]:
return pulumi.get(self, "response")
@response.setter
def response(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[Union[str, 'ExtensionOptionType']]]]]):
pulumi.set(self, "response", value)
@pulumi.input_type
class ResourceTypeExtensionArgs:
def __init__(__self__, *,
endpoint_uri: Optional[pulumi.Input[str]] = None,
extension_categories: Optional[pulumi.Input[Sequence[pulumi.Input[Union[str, 'ExtensionCategory']]]]] = None,
timeout: Optional[pulumi.Input[str]] = None):
if endpoint_uri is not None:
pulumi.set(__self__, "endpoint_uri", endpoint_uri)
if extension_categories is not None:
pulumi.set(__self__, "extension_categories", extension_categories)
if timeout is not None:
pulumi.set(__self__, "timeout", timeout)
@property
@pulumi.getter(name="endpointUri")
def endpoint_uri(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "endpoint_uri")
@endpoint_uri.setter
def endpoint_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "endpoint_uri", value)
@property
@pulumi.getter(name="extensionCategories")
def extension_categories(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[Union[str, 'ExtensionCategory']]]]]:
return pulumi.get(self, "extension_categories")
@extension_categories.setter
def extension_categories(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[Union[str, 'ExtensionCategory']]]]]):
pulumi.set(self, "extension_categories", value)
@property
@pulumi.getter
def timeout(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "timeout")
@timeout.setter
def timeout(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "timeout", value)
@pulumi.input_type
class ResourceTypeRegistrationPropertiesCheckNameAvailabilitySpecificationsArgs:
def __init__(__self__, *,
enable_default_validation: Optional[pulumi.Input[bool]] = None,
resource_types_with_custom_validation: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
if enable_default_validation is not None:
pulumi.set(__self__, "enable_default_validation", enable_default_validation)
if resource_types_with_custom_validation is not None:
pulumi.set(__self__, "resource_types_with_custom_validation", resource_types_with_custom_validation)
@property
@pulumi.getter(name="enableDefaultValidation")
def enable_default_validation(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "enable_default_validation")
@enable_default_validation.setter
def enable_default_validation(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_default_validation", value)
@property
@pulumi.getter(name="resourceTypesWithCustomValidation")
def resource_types_with_custom_validation(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "resource_types_with_custom_validation")
@resource_types_with_custom_validation.setter
def resource_types_with_custom_validation(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "resource_types_with_custom_validation", value)
@pulumi.input_type
class ResourceTypeRegistrationPropertiesExtensionOptionsArgs:
def __init__(__self__, *,
resource_creation_begin: Optional[pulumi.Input['ResourceTypeExtensionOptionsResourceCreationBeginArgs']] = None):
if resource_creation_begin is not None:
pulumi.set(__self__, "resource_creation_begin", resource_creation_begin)
@property
@pulumi.getter(name="resourceCreationBegin")
def resource_creation_begin(self) -> Optional[pulumi.Input['ResourceTypeExtensionOptionsResourceCreationBeginArgs']]:
return pulumi.get(self, "resource_creation_begin")
@resource_creation_begin.setter
def resource_creation_begin(self, value: Optional[pulumi.Input['ResourceTypeExtensionOptionsResourceCreationBeginArgs']]):
pulumi.set(self, "resource_creation_begin", value)
@pulumi.input_type
class ResourceTypeRegistrationPropertiesFeaturesRuleArgs:
def __init__(__self__, *,
required_features_policy: pulumi.Input[Union[str, 'FeaturesPolicy']]):
pulumi.set(__self__, "required_features_policy", required_features_policy)
@property
@pulumi.getter(name="requiredFeaturesPolicy")
def required_features_policy(self) -> pulumi.Input[Union[str, 'FeaturesPolicy']]:
return pulumi.get(self, "required_features_policy")
@required_features_policy.setter
def required_features_policy(self, value: pulumi.Input[Union[str, 'FeaturesPolicy']]):
pulumi.set(self, "required_features_policy", value)
@pulumi.input_type
class ResourceTypeRegistrationPropertiesIdentityManagementArgs:
def __init__(__self__, *,
application_id: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[Union[str, 'IdentityManagementTypes']]] = None):
if application_id is not None:
pulumi.set(__self__, "application_id", application_id)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="applicationId")
def application_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "application_id")
@application_id.setter
def application_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "application_id", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[Union[str, 'IdentityManagementTypes']]]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[Union[str, 'IdentityManagementTypes']]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class ResourceTypeRegistrationPropertiesManagementArgs:
def __init__(__self__, *,
incident_contact_email: Optional[pulumi.Input[str]] = None,
incident_routing_service: Optional[pulumi.Input[str]] = None,
incident_routing_team: Optional[pulumi.Input[str]] = None,
manifest_owners: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
resource_access_policy: Optional[pulumi.Input[str]] = None,
resource_access_roles: Optional[pulumi.Input[Sequence[Any]]] = None,
schema_owners: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
service_tree_infos: Optional[pulumi.Input[Sequence[pulumi.Input['ServiceTreeInfoArgs']]]] = None):
if incident_contact_email is not None:
pulumi.set(__self__, "incident_contact_email", incident_contact_email)
if incident_routing_service is not None:
pulumi.set(__self__, "incident_routing_service", incident_routing_service)
if incident_routing_team is not None:
pulumi.set(__self__, "incident_routing_team", incident_routing_team)
if manifest_owners is not None:
pulumi.set(__self__, "manifest_owners", manifest_owners)
if resource_access_policy is not None:
pulumi.set(__self__, "resource_access_policy", resource_access_policy)
if resource_access_roles is not None:
pulumi.set(__self__, "resource_access_roles", resource_access_roles)
if schema_owners is not None:
pulumi.set(__self__, "schema_owners", schema_owners)
if service_tree_infos is not None:
pulumi.set(__self__, "service_tree_infos", service_tree_infos)
@property
@pulumi.getter(name="incidentContactEmail")
def incident_contact_email(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "incident_contact_email")
@incident_contact_email.setter
def incident_contact_email(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "incident_contact_email", value)
@property
@pulumi.getter(name="incidentRoutingService")
def incident_routing_service(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "incident_routing_service")
@incident_routing_service.setter
def incident_routing_service(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "incident_routing_service", value)
@property
@pulumi.getter(name="incidentRoutingTeam")
def incident_routing_team(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "incident_routing_team")
@incident_routing_team.setter
def incident_routing_team(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "incident_routing_team", value)
@property
@pulumi.getter(name="manifestOwners")
def manifest_owners(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "manifest_owners")
@manifest_owners.setter
def manifest_owners(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "manifest_owners", value)
@property
@pulumi.getter(name="resourceAccessPolicy")
def resource_access_policy(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "resource_access_policy")
@resource_access_policy.setter
def resource_access_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_access_policy", value)
@property
@pulumi.getter(name="resourceAccessRoles")
def resource_access_roles(self) -> Optional[pulumi.Input[Sequence[Any]]]:
return pulumi.get(self, "resource_access_roles")
@resource_access_roles.setter
def resource_access_roles(self, value: Optional[pulumi.Input[Sequence[Any]]]):
pulumi.set(self, "resource_access_roles", value)
@property
@pulumi.getter(name="schemaOwners")
def schema_owners(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "schema_owners")
@schema_owners.setter
def schema_owners(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "schema_owners", value)
@property
@pulumi.getter(name="serviceTreeInfos")
def service_tree_infos(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ServiceTreeInfoArgs']]]]:
return pulumi.get(self, "service_tree_infos")
@service_tree_infos.setter
def service_tree_infos(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ServiceTreeInfoArgs']]]]):
pulumi.set(self, "service_tree_infos", value)
@pulumi.input_type
class ResourceTypeRegistrationPropertiesRequestHeaderOptionsArgs:
def __init__(__self__, *,
opt_in_headers: Optional[pulumi.Input[Union[str, 'OptInHeaderType']]] = None):
if opt_in_headers is not None:
pulumi.set(__self__, "opt_in_headers", opt_in_headers)
@property
@pulumi.getter(name="optInHeaders")
def opt_in_headers(self) -> Optional[pulumi.Input[Union[str, 'OptInHeaderType']]]:
return pulumi.get(self, "opt_in_headers")
@opt_in_headers.setter
def opt_in_headers(self, value: Optional[pulumi.Input[Union[str, 'OptInHeaderType']]]):
pulumi.set(self, "opt_in_headers", value)
@pulumi.input_type
class ResourceTypeRegistrationPropertiesResourceGraphConfigurationArgs:
def __init__(__self__, *,
api_version: Optional[pulumi.Input[str]] = None,
enabled: Optional[pulumi.Input[bool]] = None):
if api_version is not None:
pulumi.set(__self__, "api_version", api_version)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
@property
@pulumi.getter(name="apiVersion")
def api_version(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "api_version")
@api_version.setter
def api_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "api_version", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@pulumi.input_type
class ResourceTypeRegistrationPropertiesResourceMovePolicyArgs:
def __init__(__self__, *,
cross_resource_group_move_enabled: Optional[pulumi.Input[bool]] = None,
cross_subscription_move_enabled: Optional[pulumi.Input[bool]] = None,
validation_required: Optional[pulumi.Input[bool]] = None):
if cross_resource_group_move_enabled is not None:
pulumi.set(__self__, "cross_resource_group_move_enabled", cross_resource_group_move_enabled)
if cross_subscription_move_enabled is not None:
pulumi.set(__self__, "cross_subscription_move_enabled", cross_subscription_move_enabled)
if validation_required is not None:
pulumi.set(__self__, "validation_required", validation_required)
@property
@pulumi.getter(name="crossResourceGroupMoveEnabled")
def cross_resource_group_move_enabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "cross_resource_group_move_enabled")
@cross_resource_group_move_enabled.setter
def cross_resource_group_move_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "cross_resource_group_move_enabled", value)
@property
@pulumi.getter(name="crossSubscriptionMoveEnabled")
def cross_subscription_move_enabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "cross_subscription_move_enabled")
@cross_subscription_move_enabled.setter
def cross_subscription_move_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "cross_subscription_move_enabled", value)
@property
@pulumi.getter(name="validationRequired")
def validation_required(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "validation_required")
@validation_required.setter
def validation_required(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "validation_required", value)
@pulumi.input_type
class ResourceTypeRegistrationPropertiesSubscriptionLifecycleNotificationSpecificationsArgs:
def __init__(__self__, *,
soft_delete_ttl: Optional[pulumi.Input[str]] = None,
subscription_state_override_actions: Optional[pulumi.Input[Sequence[pulumi.Input['SubscriptionStateOverrideActionArgs']]]] = None):
if soft_delete_ttl is not None:
pulumi.set(__self__, "soft_delete_ttl", soft_delete_ttl)
if subscription_state_override_actions is not None:
pulumi.set(__self__, "subscription_state_override_actions", subscription_state_override_actions)
@property
@pulumi.getter(name="softDeleteTTL")
def soft_delete_ttl(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "soft_delete_ttl")
@soft_delete_ttl.setter
def soft_delete_ttl(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "soft_delete_ttl", value)
@property
@pulumi.getter(name="subscriptionStateOverrideActions")
def subscription_state_override_actions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SubscriptionStateOverrideActionArgs']]]]:
return pulumi.get(self, "subscription_state_override_actions")
@subscription_state_override_actions.setter
def subscription_state_override_actions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SubscriptionStateOverrideActionArgs']]]]):
pulumi.set(self, "subscription_state_override_actions", value)
@pulumi.input_type
class ResourceTypeRegistrationPropertiesTemplateDeploymentOptionsArgs:
def __init__(__self__, *,
preflight_options: Optional[pulumi.Input[Sequence[pulumi.Input[Union[str, 'PreflightOption']]]]] = None,
preflight_supported: Optional[pulumi.Input[bool]] = None):
if preflight_options is not None:
pulumi.set(__self__, "preflight_options", preflight_options)
if preflight_supported is not None:
pulumi.set(__self__, "preflight_supported", preflight_supported)
@property
@pulumi.getter(name="preflightOptions")
def preflight_options(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[Union[str, 'PreflightOption']]]]]:
return pulumi.get(self, "preflight_options")
@preflight_options.setter
def preflight_options(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[Union[str, 'PreflightOption']]]]]):
pulumi.set(self, "preflight_options", value)
@property
@pulumi.getter(name="preflightSupported")
def preflight_supported(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "preflight_supported")
@preflight_supported.setter
def preflight_supported(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "preflight_supported", value)
@pulumi.input_type
class ResourceTypeRegistrationPropertiesArgs:
def __init__(__self__, *,
allowed_unauthorized_actions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
authorization_action_mappings: Optional[pulumi.Input[Sequence[pulumi.Input['AuthorizationActionMappingArgs']]]] = None,
check_name_availability_specifications: Optional[pulumi.Input['ResourceTypeRegistrationPropertiesCheckNameAvailabilitySpecificationsArgs']] = None,
default_api_version: Optional[pulumi.Input[str]] = None,
disallowed_action_verbs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
enable_async_operation: Optional[pulumi.Input[bool]] = None,
enable_third_party_s2_s: Optional[pulumi.Input[bool]] = None,
endpoints: Optional[pulumi.Input[Sequence[pulumi.Input['ResourceTypeEndpointArgs']]]] = None,
extended_locations: Optional[pulumi.Input[Sequence[pulumi.Input['ExtendedLocationOptionsArgs']]]] = None,
extension_options: Optional[pulumi.Input['ResourceTypeRegistrationPropertiesExtensionOptionsArgs']] = None,
features_rule: Optional[pulumi.Input['ResourceTypeRegistrationPropertiesFeaturesRuleArgs']] = None,
identity_management: Optional[pulumi.Input['ResourceTypeRegistrationPropertiesIdentityManagementArgs']] = None,
is_pure_proxy: Optional[pulumi.Input[bool]] = None,
linked_access_checks: Optional[pulumi.Input[Sequence[pulumi.Input['LinkedAccessCheckArgs']]]] = None,
logging_rules: Optional[pulumi.Input[Sequence[pulumi.Input['LoggingRuleArgs']]]] = None,
management: Optional[pulumi.Input['ResourceTypeRegistrationPropertiesManagementArgs']] = None,
marketplace_type: Optional[pulumi.Input[str]] = None,
open_api_configuration: Optional[pulumi.Input['OpenApiConfigurationArgs']] = None,
provisioning_state: Optional[pulumi.Input[Union[str, 'ProvisioningState']]] = None,
regionality: Optional[pulumi.Input[Union[str, 'Regionality']]] = None,
request_header_options: Optional[pulumi.Input['ResourceTypeRegistrationPropertiesRequestHeaderOptionsArgs']] = None,
required_features: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
resource_concurrency_control_options: Optional[pulumi.Input[Mapping[str, pulumi.Input['ResourceConcurrencyControlOptionArgs']]]] = None,
resource_deletion_policy: Optional[pulumi.Input[Union[str, 'ResourceDeletionPolicy']]] = None,
resource_graph_configuration: Optional[pulumi.Input['ResourceTypeRegistrationPropertiesResourceGraphConfigurationArgs']] = None,
resource_move_policy: Optional[pulumi.Input['ResourceTypeRegistrationPropertiesResourceMovePolicyArgs']] = None,
routing_type: Optional[pulumi.Input[Union[str, 'RoutingType']]] = None,
service_tree_infos: Optional[pulumi.Input[Sequence[pulumi.Input['ServiceTreeInfoArgs']]]] = None,
subscription_lifecycle_notification_specifications: Optional[pulumi.Input['ResourceTypeRegistrationPropertiesSubscriptionLifecycleNotificationSpecificationsArgs']] = None,
subscription_state_rules: Optional[pulumi.Input[Sequence[pulumi.Input['SubscriptionStateRuleArgs']]]] = None,
swagger_specifications: Optional[pulumi.Input[Sequence[pulumi.Input['SwaggerSpecificationArgs']]]] = None,
template_deployment_options: Optional[pulumi.Input['ResourceTypeRegistrationPropertiesTemplateDeploymentOptionsArgs']] = None,
throttling_rules: Optional[pulumi.Input[Sequence[pulumi.Input['ThrottlingRuleArgs']]]] = None):
"""
:param pulumi.Input[Union[str, 'ProvisioningState']] provisioning_state: The provisioned state of the resource.
"""
if allowed_unauthorized_actions is not None:
pulumi.set(__self__, "allowed_unauthorized_actions", allowed_unauthorized_actions)
if authorization_action_mappings is not None:
pulumi.set(__self__, "authorization_action_mappings", authorization_action_mappings)
if check_name_availability_specifications is not None:
pulumi.set(__self__, "check_name_availability_specifications", check_name_availability_specifications)
if default_api_version is not None:
pulumi.set(__self__, "default_api_version", default_api_version)
if disallowed_action_verbs is not None:
pulumi.set(__self__, "disallowed_action_verbs", disallowed_action_verbs)
if enable_async_operation is not None:
pulumi.set(__self__, "enable_async_operation", enable_async_operation)
if enable_third_party_s2_s is not None:
pulumi.set(__self__, "enable_third_party_s2_s", enable_third_party_s2_s)
if endpoints is not None:
pulumi.set(__self__, "endpoints", endpoints)
if extended_locations is not None:
pulumi.set(__self__, "extended_locations", extended_locations)
if extension_options is not None:
pulumi.set(__self__, "extension_options", extension_options)
if features_rule is not None:
pulumi.set(__self__, "features_rule", features_rule)
if identity_management is not None:
pulumi.set(__self__, "identity_management", identity_management)
if is_pure_proxy is not None:
pulumi.set(__self__, "is_pure_proxy", is_pure_proxy)
if linked_access_checks is not None:
pulumi.set(__self__, "linked_access_checks", linked_access_checks)
if logging_rules is not None:
pulumi.set(__self__, "logging_rules", logging_rules)
if management is not None:
pulumi.set(__self__, "management", management)
if marketplace_type is not None:
pulumi.set(__self__, "marketplace_type", marketplace_type)
if open_api_configuration is not None:
pulumi.set(__self__, "open_api_configuration", open_api_configuration)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
if regionality is not None:
pulumi.set(__self__, "regionality", regionality)
if request_header_options is not None:
pulumi.set(__self__, "request_header_options", request_header_options)
if required_features is not None:
pulumi.set(__self__, "required_features", required_features)
if resource_concurrency_control_options is not None:
pulumi.set(__self__, "resource_concurrency_control_options", resource_concurrency_control_options)
if resource_deletion_policy is not None:
pulumi.set(__self__, "resource_deletion_policy", resource_deletion_policy)
if resource_graph_configuration is not None:
pulumi.set(__self__, "resource_graph_configuration", resource_graph_configuration)
if resource_move_policy is not None:
pulumi.set(__self__, "resource_move_policy", resource_move_policy)
if routing_type is not None:
pulumi.set(__self__, "routing_type", routing_type)
if service_tree_infos is not None:
pulumi.set(__self__, "service_tree_infos", service_tree_infos)
if subscription_lifecycle_notification_specifications is not None:
pulumi.set(__self__, "subscription_lifecycle_notification_specifications", subscription_lifecycle_notification_specifications)
if subscription_state_rules is not None:
pulumi.set(__self__, "subscription_state_rules", subscription_state_rules)
if swagger_specifications is not None:
pulumi.set(__self__, "swagger_specifications", swagger_specifications)
if template_deployment_options is not None:
pulumi.set(__self__, "template_deployment_options", template_deployment_options)
if throttling_rules is not None:
pulumi.set(__self__, "throttling_rules", throttling_rules)
@property
@pulumi.getter(name="allowedUnauthorizedActions")
def allowed_unauthorized_actions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "allowed_unauthorized_actions")
@allowed_unauthorized_actions.setter
def allowed_unauthorized_actions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "allowed_unauthorized_actions", value)
@property
@pulumi.getter(name="authorizationActionMappings")
def authorization_action_mappings(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['AuthorizationActionMappingArgs']]]]:
return pulumi.get(self, "authorization_action_mappings")
@authorization_action_mappings.setter
def authorization_action_mappings(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['AuthorizationActionMappingArgs']]]]):
pulumi.set(self, "authorization_action_mappings", value)
@property
@pulumi.getter(name="checkNameAvailabilitySpecifications")
def check_name_availability_specifications(self) -> Optional[pulumi.Input['ResourceTypeRegistrationPropertiesCheckNameAvailabilitySpecificationsArgs']]:
return pulumi.get(self, "check_name_availability_specifications")
@check_name_availability_specifications.setter
def check_name_availability_specifications(self, value: Optional[pulumi.Input['ResourceTypeRegistrationPropertiesCheckNameAvailabilitySpecificationsArgs']]):
pulumi.set(self, "check_name_availability_specifications", value)
@property
@pulumi.getter(name="defaultApiVersion")
def default_api_version(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "default_api_version")
@default_api_version.setter
def default_api_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "default_api_version", value)
@property
@pulumi.getter(name="disallowedActionVerbs")
def disallowed_action_verbs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "disallowed_action_verbs")
@disallowed_action_verbs.setter
def disallowed_action_verbs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "disallowed_action_verbs", value)
@property
@pulumi.getter(name="enableAsyncOperation")
def enable_async_operation(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "enable_async_operation")
@enable_async_operation.setter
def enable_async_operation(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_async_operation", value)
@property
@pulumi.getter(name="enableThirdPartyS2S")
def enable_third_party_s2_s(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "enable_third_party_s2_s")
@enable_third_party_s2_s.setter
def enable_third_party_s2_s(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_third_party_s2_s", value)
@property
@pulumi.getter
def endpoints(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ResourceTypeEndpointArgs']]]]:
return pulumi.get(self, "endpoints")
@endpoints.setter
def endpoints(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ResourceTypeEndpointArgs']]]]):
pulumi.set(self, "endpoints", value)
@property
@pulumi.getter(name="extendedLocations")
def extended_locations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ExtendedLocationOptionsArgs']]]]:
return pulumi.get(self, "extended_locations")
@extended_locations.setter
def extended_locations(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ExtendedLocationOptionsArgs']]]]):
pulumi.set(self, "extended_locations", value)
@property
@pulumi.getter(name="extensionOptions")
def extension_options(self) -> Optional[pulumi.Input['ResourceTypeRegistrationPropertiesExtensionOptionsArgs']]:
return pulumi.get(self, "extension_options")
@extension_options.setter
def extension_options(self, value: Optional[pulumi.Input['ResourceTypeRegistrationPropertiesExtensionOptionsArgs']]):
pulumi.set(self, "extension_options", value)
@property
@pulumi.getter(name="featuresRule")
def features_rule(self) -> Optional[pulumi.Input['ResourceTypeRegistrationPropertiesFeaturesRuleArgs']]:
return pulumi.get(self, "features_rule")
@features_rule.setter
def features_rule(self, value: Optional[pulumi.Input['ResourceTypeRegistrationPropertiesFeaturesRuleArgs']]):
pulumi.set(self, "features_rule", value)
@property
@pulumi.getter(name="identityManagement")
def identity_management(self) -> Optional[pulumi.Input['ResourceTypeRegistrationPropertiesIdentityManagementArgs']]:
return pulumi.get(self, "identity_management")
@identity_management.setter
def identity_management(self, value: Optional[pulumi.Input['ResourceTypeRegistrationPropertiesIdentityManagementArgs']]):
pulumi.set(self, "identity_management", value)
@property
@pulumi.getter(name="isPureProxy")
def is_pure_proxy(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "is_pure_proxy")
@is_pure_proxy.setter
def is_pure_proxy(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_pure_proxy", value)
@property
@pulumi.getter(name="linkedAccessChecks")
def linked_access_checks(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['LinkedAccessCheckArgs']]]]:
return pulumi.get(self, "linked_access_checks")
@linked_access_checks.setter
def linked_access_checks(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['LinkedAccessCheckArgs']]]]):
pulumi.set(self, "linked_access_checks", value)
@property
@pulumi.getter(name="loggingRules")
def logging_rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['LoggingRuleArgs']]]]:
return pulumi.get(self, "logging_rules")
@logging_rules.setter
def logging_rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['LoggingRuleArgs']]]]):
pulumi.set(self, "logging_rules", value)
@property
@pulumi.getter
def management(self) -> Optional[pulumi.Input['ResourceTypeRegistrationPropertiesManagementArgs']]:
return pulumi.get(self, "management")
@management.setter
def management(self, value: Optional[pulumi.Input['ResourceTypeRegistrationPropertiesManagementArgs']]):
pulumi.set(self, "management", value)
@property
@pulumi.getter(name="marketplaceType")
def marketplace_type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "marketplace_type")
@marketplace_type.setter
def marketplace_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "marketplace_type", value)
@property
@pulumi.getter(name="openApiConfiguration")
def open_api_configuration(self) -> Optional[pulumi.Input['OpenApiConfigurationArgs']]:
return pulumi.get(self, "open_api_configuration")
@open_api_configuration.setter
def open_api_configuration(self, value: Optional[pulumi.Input['OpenApiConfigurationArgs']]):
pulumi.set(self, "open_api_configuration", value)
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[pulumi.Input[Union[str, 'ProvisioningState']]]:
"""
The provisioned state of the resource.
"""
return pulumi.get(self, "provisioning_state")
@provisioning_state.setter
def provisioning_state(self, value: Optional[pulumi.Input[Union[str, 'ProvisioningState']]]):
pulumi.set(self, "provisioning_state", value)
@property
@pulumi.getter
def regionality(self) -> Optional[pulumi.Input[Union[str, 'Regionality']]]:
return pulumi.get(self, "regionality")
@regionality.setter
def regionality(self, value: Optional[pulumi.Input[Union[str, 'Regionality']]]):
pulumi.set(self, "regionality", value)
@property
@pulumi.getter(name="requestHeaderOptions")
def request_header_options(self) -> Optional[pulumi.Input['ResourceTypeRegistrationPropertiesRequestHeaderOptionsArgs']]:
return pulumi.get(self, "request_header_options")
@request_header_options.setter
def request_header_options(self, value: Optional[pulumi.Input['ResourceTypeRegistrationPropertiesRequestHeaderOptionsArgs']]):
pulumi.set(self, "request_header_options", value)
@property
@pulumi.getter(name="requiredFeatures")
def required_features(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "required_features")
@required_features.setter
def required_features(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "required_features", value)
@property
@pulumi.getter(name="resourceConcurrencyControlOptions")
def resource_concurrency_control_options(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input['ResourceConcurrencyControlOptionArgs']]]]:
return pulumi.get(self, "resource_concurrency_control_options")
@resource_concurrency_control_options.setter
def resource_concurrency_control_options(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input['ResourceConcurrencyControlOptionArgs']]]]):
pulumi.set(self, "resource_concurrency_control_options", value)
@property
@pulumi.getter(name="resourceDeletionPolicy")
def resource_deletion_policy(self) -> Optional[pulumi.Input[Union[str, 'ResourceDeletionPolicy']]]:
return pulumi.get(self, "resource_deletion_policy")
@resource_deletion_policy.setter
def resource_deletion_policy(self, value: Optional[pulumi.Input[Union[str, 'ResourceDeletionPolicy']]]):
pulumi.set(self, "resource_deletion_policy", value)
@property
@pulumi.getter(name="resourceGraphConfiguration")
def resource_graph_configuration(self) -> Optional[pulumi.Input['ResourceTypeRegistrationPropertiesResourceGraphConfigurationArgs']]:
return pulumi.get(self, "resource_graph_configuration")
@resource_graph_configuration.setter
def resource_graph_configuration(self, value: Optional[pulumi.Input['ResourceTypeRegistrationPropertiesResourceGraphConfigurationArgs']]):
pulumi.set(self, "resource_graph_configuration", value)
@property
@pulumi.getter(name="resourceMovePolicy")
def resource_move_policy(self) -> Optional[pulumi.Input['ResourceTypeRegistrationPropertiesResourceMovePolicyArgs']]:
return pulumi.get(self, "resource_move_policy")
@resource_move_policy.setter
def resource_move_policy(self, value: Optional[pulumi.Input['ResourceTypeRegistrationPropertiesResourceMovePolicyArgs']]):
pulumi.set(self, "resource_move_policy", value)
@property
@pulumi.getter(name="routingType")
def routing_type(self) -> Optional[pulumi.Input[Union[str, 'RoutingType']]]:
return pulumi.get(self, "routing_type")
@routing_type.setter
def routing_type(self, value: Optional[pulumi.Input[Union[str, 'RoutingType']]]):
pulumi.set(self, "routing_type", value)
@property
@pulumi.getter(name="serviceTreeInfos")
def service_tree_infos(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ServiceTreeInfoArgs']]]]:
return pulumi.get(self, "service_tree_infos")
@service_tree_infos.setter
def service_tree_infos(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ServiceTreeInfoArgs']]]]):
pulumi.set(self, "service_tree_infos", value)
@property
@pulumi.getter(name="subscriptionLifecycleNotificationSpecifications")
def subscription_lifecycle_notification_specifications(self) -> Optional[pulumi.Input['ResourceTypeRegistrationPropertiesSubscriptionLifecycleNotificationSpecificationsArgs']]:
return pulumi.get(self, "subscription_lifecycle_notification_specifications")
@subscription_lifecycle_notification_specifications.setter
def subscription_lifecycle_notification_specifications(self, value: Optional[pulumi.Input['ResourceTypeRegistrationPropertiesSubscriptionLifecycleNotificationSpecificationsArgs']]):
pulumi.set(self, "subscription_lifecycle_notification_specifications", value)
@property
@pulumi.getter(name="subscriptionStateRules")
def subscription_state_rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SubscriptionStateRuleArgs']]]]:
return pulumi.get(self, "subscription_state_rules")
@subscription_state_rules.setter
def subscription_state_rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SubscriptionStateRuleArgs']]]]):
pulumi.set(self, "subscription_state_rules", value)
@property
@pulumi.getter(name="swaggerSpecifications")
def swagger_specifications(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SwaggerSpecificationArgs']]]]:
return pulumi.get(self, "swagger_specifications")
@swagger_specifications.setter
def swagger_specifications(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SwaggerSpecificationArgs']]]]):
pulumi.set(self, "swagger_specifications", value)
@property
@pulumi.getter(name="templateDeploymentOptions")
def template_deployment_options(self) -> Optional[pulumi.Input['ResourceTypeRegistrationPropertiesTemplateDeploymentOptionsArgs']]:
return pulumi.get(self, "template_deployment_options")
@template_deployment_options.setter
def template_deployment_options(self, value: Optional[pulumi.Input['ResourceTypeRegistrationPropertiesTemplateDeploymentOptionsArgs']]):
pulumi.set(self, "template_deployment_options", value)
@property
@pulumi.getter(name="throttlingRules")
def throttling_rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ThrottlingRuleArgs']]]]:
return pulumi.get(self, "throttling_rules")
@throttling_rules.setter
def throttling_rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ThrottlingRuleArgs']]]]):
pulumi.set(self, "throttling_rules", value)
@pulumi.input_type
class ResourceTypeRegistrationArgs:
def __init__(__self__, *,
properties: Optional[pulumi.Input['ResourceTypeRegistrationPropertiesArgs']] = None):
if properties is not None:
pulumi.set(__self__, "properties", properties)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input['ResourceTypeRegistrationPropertiesArgs']]:
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input['ResourceTypeRegistrationPropertiesArgs']]):
pulumi.set(self, "properties", value)
@pulumi.input_type
class ServiceTreeInfoArgs:
def __init__(__self__, *,
component_id: Optional[pulumi.Input[str]] = None,
readiness: Optional[pulumi.Input[Union[str, 'Readiness']]] = None,
service_id: Optional[pulumi.Input[str]] = None):
if component_id is not None:
pulumi.set(__self__, "component_id", component_id)
if readiness is not None:
pulumi.set(__self__, "readiness", readiness)
if service_id is not None:
pulumi.set(__self__, "service_id", service_id)
@property
@pulumi.getter(name="componentId")
def component_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "component_id")
@component_id.setter
def component_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "component_id", value)
@property
@pulumi.getter
def readiness(self) -> Optional[pulumi.Input[Union[str, 'Readiness']]]:
return pulumi.get(self, "readiness")
@readiness.setter
def readiness(self, value: Optional[pulumi.Input[Union[str, 'Readiness']]]):
pulumi.set(self, "readiness", value)
@property
@pulumi.getter(name="serviceId")
def service_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "service_id")
@service_id.setter
def service_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_id", value)
@pulumi.input_type
class SkuCapabilityArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
value: pulumi.Input[str]):
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class SkuCostArgs:
def __init__(__self__, *,
meter_id: pulumi.Input[str],
extended_unit: Optional[pulumi.Input[str]] = None,
quantity: Optional[pulumi.Input[int]] = None):
pulumi.set(__self__, "meter_id", meter_id)
if extended_unit is not None:
pulumi.set(__self__, "extended_unit", extended_unit)
if quantity is not None:
pulumi.set(__self__, "quantity", quantity)
@property
@pulumi.getter(name="meterId")
def meter_id(self) -> pulumi.Input[str]:
return pulumi.get(self, "meter_id")
@meter_id.setter
def meter_id(self, value: pulumi.Input[str]):
pulumi.set(self, "meter_id", value)
@property
@pulumi.getter(name="extendedUnit")
def extended_unit(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "extended_unit")
@extended_unit.setter
def extended_unit(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "extended_unit", value)
@property
@pulumi.getter
def quantity(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "quantity")
@quantity.setter
def quantity(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "quantity", value)
@pulumi.input_type
class SkuLocationInfoArgs:
def __init__(__self__, *,
location: pulumi.Input[str],
extended_locations: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
type: Optional[pulumi.Input[str]] = None,
zone_details: Optional[pulumi.Input[Sequence[pulumi.Input['SkuZoneDetailArgs']]]] = None,
zones: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
pulumi.set(__self__, "location", location)
if extended_locations is not None:
pulumi.set(__self__, "extended_locations", extended_locations)
if type is not None:
pulumi.set(__self__, "type", type)
if zone_details is not None:
pulumi.set(__self__, "zone_details", zone_details)
if zones is not None:
pulumi.set(__self__, "zones", zones)
@property
@pulumi.getter
def location(self) -> pulumi.Input[str]:
return pulumi.get(self, "location")
@location.setter
def location(self, value: pulumi.Input[str]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="extendedLocations")
def extended_locations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "extended_locations")
@extended_locations.setter
def extended_locations(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "extended_locations", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@property
@pulumi.getter(name="zoneDetails")
def zone_details(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SkuZoneDetailArgs']]]]:
return pulumi.get(self, "zone_details")
@zone_details.setter
def zone_details(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SkuZoneDetailArgs']]]]):
pulumi.set(self, "zone_details", value)
@property
@pulumi.getter
def zones(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "zones")
@zones.setter
def zones(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "zones", value)
@pulumi.input_type
class SkuResourcePropertiesArgs:
def __init__(__self__, *,
sku_settings: pulumi.Input[Sequence[pulumi.Input['SkuSettingArgs']]],
provisioning_state: Optional[pulumi.Input[Union[str, 'ProvisioningState']]] = None):
"""
:param pulumi.Input[Union[str, 'ProvisioningState']] provisioning_state: The provisioned state of the resource.
"""
pulumi.set(__self__, "sku_settings", sku_settings)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
@property
@pulumi.getter(name="skuSettings")
def sku_settings(self) -> pulumi.Input[Sequence[pulumi.Input['SkuSettingArgs']]]:
return pulumi.get(self, "sku_settings")
@sku_settings.setter
def sku_settings(self, value: pulumi.Input[Sequence[pulumi.Input['SkuSettingArgs']]]):
pulumi.set(self, "sku_settings", value)
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[pulumi.Input[Union[str, 'ProvisioningState']]]:
"""
The provisioned state of the resource.
"""
return pulumi.get(self, "provisioning_state")
@provisioning_state.setter
def provisioning_state(self, value: Optional[pulumi.Input[Union[str, 'ProvisioningState']]]):
pulumi.set(self, "provisioning_state", value)
@pulumi.input_type
class SkuSettingCapacityArgs:
def __init__(__self__, *,
minimum: pulumi.Input[int],
default: Optional[pulumi.Input[int]] = None,
maximum: Optional[pulumi.Input[int]] = None,
scale_type: Optional[pulumi.Input[Union[str, 'SkuScaleType']]] = None):
pulumi.set(__self__, "minimum", minimum)
if default is not None:
pulumi.set(__self__, "default", default)
if maximum is not None:
pulumi.set(__self__, "maximum", maximum)
if scale_type is not None:
pulumi.set(__self__, "scale_type", scale_type)
@property
@pulumi.getter
def minimum(self) -> pulumi.Input[int]:
return pulumi.get(self, "minimum")
@minimum.setter
def minimum(self, value: pulumi.Input[int]):
pulumi.set(self, "minimum", value)
@property
@pulumi.getter
def default(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "default")
@default.setter
def default(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "default", value)
@property
@pulumi.getter
def maximum(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "maximum")
@maximum.setter
def maximum(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "maximum", value)
@property
@pulumi.getter(name="scaleType")
def scale_type(self) -> Optional[pulumi.Input[Union[str, 'SkuScaleType']]]:
return pulumi.get(self, "scale_type")
@scale_type.setter
def scale_type(self, value: Optional[pulumi.Input[Union[str, 'SkuScaleType']]]):
pulumi.set(self, "scale_type", value)
@pulumi.input_type
class SkuSettingArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
capabilities: Optional[pulumi.Input[Sequence[pulumi.Input['SkuCapabilityArgs']]]] = None,
capacity: Optional[pulumi.Input['SkuSettingCapacityArgs']] = None,
costs: Optional[pulumi.Input[Sequence[pulumi.Input['SkuCostArgs']]]] = None,
family: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
location_info: Optional[pulumi.Input[Sequence[pulumi.Input['SkuLocationInfoArgs']]]] = None,
locations: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
required_features: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
required_quota_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
size: Optional[pulumi.Input[str]] = None,
tier: Optional[pulumi.Input[str]] = None):
pulumi.set(__self__, "name", name)
if capabilities is not None:
pulumi.set(__self__, "capabilities", capabilities)
if capacity is not None:
pulumi.set(__self__, "capacity", capacity)
if costs is not None:
pulumi.set(__self__, "costs", costs)
if family is not None:
pulumi.set(__self__, "family", family)
if kind is not None:
pulumi.set(__self__, "kind", kind)
if location_info is not None:
pulumi.set(__self__, "location_info", location_info)
if locations is not None:
pulumi.set(__self__, "locations", locations)
if required_features is not None:
pulumi.set(__self__, "required_features", required_features)
if required_quota_ids is not None:
pulumi.set(__self__, "required_quota_ids", required_quota_ids)
if size is not None:
pulumi.set(__self__, "size", size)
if tier is not None:
pulumi.set(__self__, "tier", tier)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def capabilities(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SkuCapabilityArgs']]]]:
return pulumi.get(self, "capabilities")
@capabilities.setter
def capabilities(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SkuCapabilityArgs']]]]):
pulumi.set(self, "capabilities", value)
@property
@pulumi.getter
def capacity(self) -> Optional[pulumi.Input['SkuSettingCapacityArgs']]:
return pulumi.get(self, "capacity")
@capacity.setter
def capacity(self, value: Optional[pulumi.Input['SkuSettingCapacityArgs']]):
pulumi.set(self, "capacity", value)
@property
@pulumi.getter
def costs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SkuCostArgs']]]]:
return pulumi.get(self, "costs")
@costs.setter
def costs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SkuCostArgs']]]]):
pulumi.set(self, "costs", value)
@property
@pulumi.getter
def family(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "family")
@family.setter
def family(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "family", value)
@property
@pulumi.getter
def kind(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter(name="locationInfo")
def location_info(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SkuLocationInfoArgs']]]]:
return pulumi.get(self, "location_info")
@location_info.setter
def location_info(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SkuLocationInfoArgs']]]]):
pulumi.set(self, "location_info", value)
@property
@pulumi.getter
def locations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "locations")
@locations.setter
def locations(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "locations", value)
@property
@pulumi.getter(name="requiredFeatures")
def required_features(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "required_features")
@required_features.setter
def required_features(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "required_features", value)
@property
@pulumi.getter(name="requiredQuotaIds")
def required_quota_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "required_quota_ids")
@required_quota_ids.setter
def required_quota_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "required_quota_ids", value)
@property
@pulumi.getter
def size(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "size")
@size.setter
def size(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "size", value)
@property
@pulumi.getter
def tier(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "tier")
@tier.setter
def tier(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tier", value)
@pulumi.input_type
class SkuZoneDetailArgs:
def __init__(__self__, *,
capabilities: Optional[pulumi.Input[Sequence[pulumi.Input['SkuCapabilityArgs']]]] = None,
name: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
if capabilities is not None:
pulumi.set(__self__, "capabilities", capabilities)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def capabilities(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SkuCapabilityArgs']]]]:
return pulumi.get(self, "capabilities")
@capabilities.setter
def capabilities(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SkuCapabilityArgs']]]]):
pulumi.set(self, "capabilities", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class SubscriptionStateOverrideActionArgs:
def __init__(__self__, *,
action: pulumi.Input[Union[str, 'SubscriptionNotificationOperation']],
state: pulumi.Input[Union[str, 'SubscriptionTransitioningState']]):
pulumi.set(__self__, "action", action)
pulumi.set(__self__, "state", state)
@property
@pulumi.getter
def action(self) -> pulumi.Input[Union[str, 'SubscriptionNotificationOperation']]:
return pulumi.get(self, "action")
@action.setter
def action(self, value: pulumi.Input[Union[str, 'SubscriptionNotificationOperation']]):
pulumi.set(self, "action", value)
@property
@pulumi.getter
def state(self) -> pulumi.Input[Union[str, 'SubscriptionTransitioningState']]:
return pulumi.get(self, "state")
@state.setter
def state(self, value: pulumi.Input[Union[str, 'SubscriptionTransitioningState']]):
pulumi.set(self, "state", value)
@pulumi.input_type
class SubscriptionStateRuleArgs:
def __init__(__self__, *,
allowed_actions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
state: Optional[pulumi.Input[Union[str, 'SubscriptionState']]] = None):
if allowed_actions is not None:
pulumi.set(__self__, "allowed_actions", allowed_actions)
if state is not None:
pulumi.set(__self__, "state", state)
@property
@pulumi.getter(name="allowedActions")
def allowed_actions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "allowed_actions")
@allowed_actions.setter
def allowed_actions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "allowed_actions", value)
@property
@pulumi.getter
def state(self) -> Optional[pulumi.Input[Union[str, 'SubscriptionState']]]:
return pulumi.get(self, "state")
@state.setter
def state(self, value: Optional[pulumi.Input[Union[str, 'SubscriptionState']]]):
pulumi.set(self, "state", value)
@pulumi.input_type
class SwaggerSpecificationArgs:
def __init__(__self__, *,
api_versions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
swagger_spec_folder_uri: Optional[pulumi.Input[str]] = None):
if api_versions is not None:
pulumi.set(__self__, "api_versions", api_versions)
if swagger_spec_folder_uri is not None:
pulumi.set(__self__, "swagger_spec_folder_uri", swagger_spec_folder_uri)
@property
@pulumi.getter(name="apiVersions")
def api_versions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "api_versions")
@api_versions.setter
def api_versions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "api_versions", value)
@property
@pulumi.getter(name="swaggerSpecFolderUri")
def swagger_spec_folder_uri(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "swagger_spec_folder_uri")
@swagger_spec_folder_uri.setter
def swagger_spec_folder_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "swagger_spec_folder_uri", value)
@pulumi.input_type
class ThrottlingMetricArgs:
def __init__(__self__, *,
limit: pulumi.Input[float],
type: pulumi.Input[Union[str, 'ThrottlingMetricType']],
interval: Optional[pulumi.Input[str]] = None):
pulumi.set(__self__, "limit", limit)
pulumi.set(__self__, "type", type)
if interval is not None:
pulumi.set(__self__, "interval", interval)
@property
@pulumi.getter
def limit(self) -> pulumi.Input[float]:
return pulumi.get(self, "limit")
@limit.setter
def limit(self, value: pulumi.Input[float]):
pulumi.set(self, "limit", value)
@property
@pulumi.getter
def type(self) -> pulumi.Input[Union[str, 'ThrottlingMetricType']]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[Union[str, 'ThrottlingMetricType']]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def interval(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "interval")
@interval.setter
def interval(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "interval", value)
@pulumi.input_type
class ThrottlingRuleArgs:
def __init__(__self__, *,
action: pulumi.Input[str],
metrics: pulumi.Input[Sequence[pulumi.Input['ThrottlingMetricArgs']]],
required_features: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
pulumi.set(__self__, "action", action)
pulumi.set(__self__, "metrics", metrics)
if required_features is not None:
pulumi.set(__self__, "required_features", required_features)
@property
@pulumi.getter
def action(self) -> pulumi.Input[str]:
return pulumi.get(self, "action")
@action.setter
def action(self, value: pulumi.Input[str]):
pulumi.set(self, "action", value)
@property
@pulumi.getter
def metrics(self) -> pulumi.Input[Sequence[pulumi.Input['ThrottlingMetricArgs']]]:
return pulumi.get(self, "metrics")
@metrics.setter
def metrics(self, value: pulumi.Input[Sequence[pulumi.Input['ThrottlingMetricArgs']]]):
pulumi.set(self, "metrics", value)
@property
@pulumi.getter(name="requiredFeatures")
def required_features(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "required_features")
@required_features.setter
def required_features(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "required_features", value)
@pulumi.input_type
class TypedErrorInfoArgs:
def __init__(__self__, *,
type: pulumi.Input[str]):
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
|
3485944f361651cfad36376cdcc8bdc7e40215e4
|
1577e1cf4e89584a125cffb855ca50a9654c6d55
|
/pyobjc/pyobjc/pyobjc-framework-Cocoa-2.5.1/PyObjCTest/test_nskeyvaluebinding.py
|
f7eb7c4d021649a715ba3daa85d3de94b7228128
|
[
"MIT"
] |
permissive
|
apple-open-source/macos
|
a4188b5c2ef113d90281d03cd1b14e5ee52ebffb
|
2d2b15f13487673de33297e49f00ef94af743a9a
|
refs/heads/master
| 2023-08-01T11:03:26.870408
| 2023-03-27T00:00:00
| 2023-03-27T00:00:00
| 180,595,052
| 124
| 24
| null | 2022-12-27T14:54:09
| 2019-04-10T14:06:23
| null |
UTF-8
|
Python
| false
| false
| 8,650
|
py
|
test_nskeyvaluebinding.py
|
from PyObjCTools.TestSupport import *
from AppKit import *
try:
unicode
except NameError:
unicode = str
class TestNSKeyValueBindingHelper (NSObject):
def commitEditingWithDelegate_didCommitSelector_contextInfo_(self, d, s, i):
return None
def commitEditingAndReturnError_(self, v): return 1
def commitEditing(self): return 1
class TestNSKeyValueBinding (TestCase):
def testConstants(self):
self.assertIsInstance(NSMultipleValuesMarker, NSObject)
self.assertIsInstance(NSNoSelectionMarker, NSObject)
self.assertIsInstance(NSNotApplicableMarker, NSObject)
self.assertIsInstance(NSObservedObjectKey, unicode)
self.assertIsInstance(NSObservedKeyPathKey, unicode)
self.assertIsInstance(NSOptionsKey, unicode)
self.assertIsInstance(NSAlignmentBinding, unicode)
self.assertIsInstance(NSAlternateImageBinding, unicode)
self.assertIsInstance(NSAlternateTitleBinding, unicode)
self.assertIsInstance(NSAnimateBinding, unicode)
self.assertIsInstance(NSAnimationDelayBinding, unicode)
self.assertIsInstance(NSArgumentBinding, unicode)
self.assertIsInstance(NSAttributedStringBinding, unicode)
self.assertIsInstance(NSContentArrayBinding, unicode)
self.assertIsInstance(NSContentArrayForMultipleSelectionBinding, unicode)
self.assertIsInstance(NSContentBinding, unicode)
self.assertIsInstance(NSContentHeightBinding, unicode)
self.assertIsInstance(NSContentObjectBinding, unicode)
self.assertIsInstance(NSContentObjectsBinding, unicode)
self.assertIsInstance(NSContentSetBinding, unicode)
self.assertIsInstance(NSContentValuesBinding, unicode)
self.assertIsInstance(NSContentWidthBinding, unicode)
self.assertIsInstance(NSCriticalValueBinding, unicode)
self.assertIsInstance(NSDataBinding, unicode)
self.assertIsInstance(NSDisplayPatternTitleBinding, unicode)
self.assertIsInstance(NSDisplayPatternValueBinding, unicode)
self.assertIsInstance(NSDocumentEditedBinding, unicode)
self.assertIsInstance(NSDoubleClickArgumentBinding, unicode)
self.assertIsInstance(NSDoubleClickTargetBinding, unicode)
self.assertIsInstance(NSEditableBinding, unicode)
self.assertIsInstance(NSEnabledBinding, unicode)
self.assertIsInstance(NSFilterPredicateBinding, unicode)
self.assertIsInstance(NSFontBinding, unicode)
self.assertIsInstance(NSFontBoldBinding, unicode)
self.assertIsInstance(NSFontFamilyNameBinding, unicode)
self.assertIsInstance(NSFontItalicBinding, unicode)
self.assertIsInstance(NSFontNameBinding, unicode)
self.assertIsInstance(NSFontSizeBinding, unicode)
self.assertIsInstance(NSHeaderTitleBinding, unicode)
self.assertIsInstance(NSHiddenBinding, unicode)
self.assertIsInstance(NSImageBinding, unicode)
self.assertIsInstance(NSIsIndeterminateBinding, unicode)
self.assertIsInstance(NSLabelBinding, unicode)
self.assertIsInstance(NSManagedObjectContextBinding, unicode)
self.assertIsInstance(NSMaximumRecentsBinding, unicode)
self.assertIsInstance(NSMaxValueBinding, unicode)
self.assertIsInstance(NSMaxWidthBinding, unicode)
self.assertIsInstance(NSMinValueBinding, unicode)
self.assertIsInstance(NSMinWidthBinding, unicode)
self.assertIsInstance(NSMixedStateImageBinding, unicode)
self.assertIsInstance(NSOffStateImageBinding, unicode)
self.assertIsInstance(NSOnStateImageBinding, unicode)
self.assertIsInstance(NSPredicateBinding, unicode)
self.assertIsInstance(NSRecentSearchesBinding, unicode)
self.assertIsInstance(NSRepresentedFilenameBinding, unicode)
self.assertIsInstance(NSRowHeightBinding, unicode)
self.assertIsInstance(NSSelectedIdentifierBinding, unicode)
self.assertIsInstance(NSSelectedIndexBinding, unicode)
self.assertIsInstance(NSSelectedLabelBinding, unicode)
self.assertIsInstance(NSSelectedObjectBinding, unicode)
self.assertIsInstance(NSSelectedObjectsBinding, unicode)
self.assertIsInstance(NSSelectedTagBinding, unicode)
self.assertIsInstance(NSSelectedValueBinding, unicode)
self.assertIsInstance(NSSelectedValuesBinding, unicode)
self.assertIsInstance(NSSelectionIndexesBinding, unicode)
self.assertIsInstance(NSSelectionIndexPathsBinding, unicode)
self.assertIsInstance(NSSortDescriptorsBinding, unicode)
self.assertIsInstance(NSTargetBinding, unicode)
self.assertIsInstance(NSTextColorBinding, unicode)
self.assertIsInstance(NSTitleBinding, unicode)
self.assertIsInstance(NSToolTipBinding, unicode)
self.assertIsInstance(NSValueBinding, unicode)
self.assertIsInstance(NSValuePathBinding, unicode)
self.assertIsInstance(NSValueURLBinding, unicode)
self.assertIsInstance(NSVisibleBinding, unicode)
self.assertIsInstance(NSWarningValueBinding, unicode)
self.assertIsInstance(NSWidthBinding, unicode)
self.assertIsInstance(NSAllowsEditingMultipleValuesSelectionBindingOption, unicode)
self.assertIsInstance(NSAllowsNullArgumentBindingOption, unicode)
self.assertIsInstance(NSAlwaysPresentsApplicationModalAlertsBindingOption, unicode)
self.assertIsInstance(NSConditionallySetsEditableBindingOption, unicode)
self.assertIsInstance(NSConditionallySetsEnabledBindingOption, unicode)
self.assertIsInstance(NSConditionallySetsHiddenBindingOption, unicode)
self.assertIsInstance(NSContinuouslyUpdatesValueBindingOption, unicode)
self.assertIsInstance(NSCreatesSortDescriptorBindingOption, unicode)
self.assertIsInstance(NSDeletesObjectsOnRemoveBindingsOption, unicode)
self.assertIsInstance(NSDisplayNameBindingOption, unicode)
self.assertIsInstance(NSDisplayPatternBindingOption, unicode)
self.assertIsInstance(NSHandlesContentAsCompoundValueBindingOption, unicode)
self.assertIsInstance(NSInsertsNullPlaceholderBindingOption, unicode)
self.assertIsInstance(NSInvokesSeparatelyWithArrayObjectsBindingOption, unicode)
self.assertIsInstance(NSMultipleValuesPlaceholderBindingOption, unicode)
self.assertIsInstance(NSNoSelectionPlaceholderBindingOption, unicode)
self.assertIsInstance(NSNotApplicablePlaceholderBindingOption, unicode)
self.assertIsInstance(NSNullPlaceholderBindingOption, unicode)
self.assertIsInstance(NSRaisesForNotApplicableKeysBindingOption, unicode)
self.assertIsInstance(NSPredicateFormatBindingOption, unicode)
self.assertIsInstance(NSSelectorNameBindingOption, unicode)
self.assertIsInstance(NSSelectsAllWhenSettingContentBindingOption, unicode)
self.assertIsInstance(NSValidatesImmediatelyBindingOption, unicode)
self.assertIsInstance(NSValueTransformerNameBindingOption, unicode)
self.assertIsInstance(NSValueTransformerBindingOption, unicode)
@min_os_level("10.5")
def testConstants10_5(self):
self.assertIsInstance(NSContentDictionaryBinding, unicode)
self.assertIsInstance(NSExcludedKeysBinding, unicode)
self.assertIsInstance(NSIncludedKeysBinding, unicode)
self.assertIsInstance(NSInitialKeyBinding, unicode)
self.assertIsInstance(NSInitialValueBinding, unicode)
self.assertIsInstance(NSLocalizedKeyDictionaryBinding, unicode)
self.assertIsInstance(NSTransparentBinding, unicode)
self.assertIsInstance(NSContentPlacementTagBindingOption, unicode)
@min_os_level("10.7")
def testConstants10_7(self):
self.assertIsInstance(NSPositioningRectBinding, unicode)
def testFunctions(self):
o = NSObject.alloc().init()
self.assertIs(NSIsControllerMarker(o), False)
self.assertIs(NSIsControllerMarker(NSMultipleValuesMarker), True)
def testMethods(self):
o = TestNSKeyValueBindingHelper.alloc().init()
m = o.commitEditingWithDelegate_didCommitSelector_contextInfo_.__metadata__()
self.assertEqual(m['arguments'][3]['sel_of_type'], b'v@:@Z^v')
self.assertResultIsBOOL(TestNSKeyValueBindingHelper.commitEditing)
@min_os_level('10.7')
@expectedFailure
def testMethods10_7(self):
self.assertResultIsBOOL(TestNSKeyValueBindingHelper.commitEditingAndReturnError_)
self.assertArgIsOut(TestNSKeyValueBindingHelper.commitEditingAndReturnError_, 0)
if __name__ == "__main__":
main()
|
8ef504c6d1619697c387c203170a800e5e8356e0
|
3a2a3d94e9dfd49072f961ce8c2c41fa337c5bb9
|
/phobos/ci/base_model.py
|
73eff0fc380ca79d4cfebd779e37182cc99c3f1a
|
[
"BSD-3-Clause"
] |
permissive
|
dfki-ric/phobos
|
2fc8f9acc14a24418985899a8cdbefd60df8a5b9
|
543d220c65bbee0e23e810d89307e23aa79eb0cd
|
refs/heads/master
| 2023-09-02T05:24:02.999410
| 2023-08-31T13:18:49
| 2023-08-31T13:18:49
| 22,949,566
| 483
| 79
|
BSD-3-Clause
| 2023-09-06T13:35:29
| 2014-08-14T10:08:49
|
Python
|
UTF-8
|
Python
| false
| false
| 50,714
|
py
|
base_model.py
|
import os
import re
from copy import deepcopy, copy
import numpy as np
import yaml
from ..commandline_logging import get_logger
from ..core import Robot
from ..defs import load_json, dump_yaml, KINEMATIC_TYPES
from ..geometry import replace_collision, join_collisions, remove_collision
from ..io import representation, sensor_representations, poses
from ..io.hyrodyn import ConstraintAxis
from ..utils import misc, git, xml, transform, resources
log = get_logger(__name__)
SUBMECHS_VIA_ASSEMBLIES = False
class BaseModel(yaml.YAMLObject):
def __init__subclass__(self, configfile, pipeline, processed_model_exists=True):
self.processed_model_exists = processed_model_exists
self.pipeline = pipeline
if type(configfile) is str:
if not os.path.isfile(configfile):
raise Exception('{} not found!'.format(configfile))
self.cfg = load_json(open(configfile, 'r'))
else:
self.cfg = configfile
# These variables have to be defined in the config file
self.modelname = ""
self.robotname = ""
self.test = {}
self.export_config = []
kwargs = {}
if 'model' in self.cfg.keys():
kwargs = self.cfg['model']
for (k, v) in kwargs.items():
setattr(self, k, v)
# check whether all necessary configurations are there
assert hasattr(self, "modelname") and len(self.modelname) > 0
assert hasattr(self, "robotname") and len(self.robotname) > 0
assert hasattr(self, "export_config") and len(self.export_config) >= 1
assert hasattr(self, "test") and len(self.test) >= 1
self.test = misc.merge_default(
self.test,
self.pipeline.default_test if hasattr(pipeline, "default_test") else resources.get_default_ci_test_definition()
)
self.deployment = misc.merge_default(
self.deployment if hasattr(self, "deployment") else {},
self.pipeline.default_deployment if hasattr(pipeline, "default_deployment") else resources.get_default_ci_deploy_definition()
)
# get directories for this model
self.exportdir = os.path.join(self.pipeline.temp_dir, self.modelname)
self.targetdir = os.path.join(self.pipeline.root, self.modelname)
self.tempdir = os.path.join(self.pipeline.temp_dir, "temp_" + self.modelname)
# parse export_config
self.export_meshes = {}
self.export_xmlfile = None
for ec in self.export_config:
if ec["type"] in KINEMATIC_TYPES:
assert "mesh_format" in ec
if ec["mesh_format"] == "input_type":
log.warning("Due to the mesh handling on the git repos for CI-pipelines using input_type as mesh format is not yet fully tested and supported.")
if "additional_meshes" in ec and type(ec["additional_meshes"]) == str:
ec["additional_meshes"] = [ec["additional_meshes"]]
elif not ("additional_meshes" in ec and type(ec["additional_meshes"]) == list):
ec["additional_meshes"] = []
if "link_in_smurf" in ec and ec["link_in_smurf"] is True:
if self.export_xmlfile is not None:
raise AssertionError("Can only have one kinematics defining xml file in smurf, "
"but defined multiple exports to be linked in smurf (link_in_smurf).")
self.export_xmlfile = self.robotname
if "filename_suffix" in ec:
self.export_xmlfile += ec["filename_suffix"]
ext = ec["type"].split("_")[-1].lower()
assert ext in ["sdf", "urdf"]
self.export_xmlfile += "." + ext
self.export_meshes = {
mt.lower(): self.pipeline.meshes[mt] for mt in [ec["mesh_format"]] + ec["additional_meshes"]
}
elif ec["type"] == "kccd":
self.export_meshes["iv"] = self.pipeline.meshes["iv"]
def __init__(self, configfile, pipeline, processed_model_exists=True):
# These variables have to be defined in the config file
self.input_models = {}
self.assemble = {}
# init
self.__init__subclass__(configfile, pipeline, processed_model_exists)
# check whether all necessary configurations are there
assert hasattr(self, "input_models") and len(self.input_models) >= 1
assert hasattr(self, "assemble") and len(self.assemble) > 0
# list directly imported mesh pathes
# [TODO pre_v2.0.0] REVIEW mesh usage
self._meshes = []
for _, v in self.input_models.items():
if "basefile" in v.keys():
r = Robot(inputfile=v["basefile"], is_human=v["is_human"] if "is_human" in v else False)
for link in r.links:
for g in link.visuals + link.collisions:
if isinstance(g.geometry, representation.Mesh):
self._meshes += [xml.read_relative_filename(g.geometry.filepath[:-4], v["basefile"])]
elif "repo" in v.keys():
repo_path = os.path.join(self.tempdir, "repo", os.path.basename(self.input_models["repo"]["git"]))
git.clone(
pipeline = self,
repo=self.input_models["repo"]["git"],
target=repo_path,
commit_id=self.input_models["repo"]["commit"],
recursive=True,
ignore_failure=True
)
self.basefile = os.path.join(repo_path, self.input_models["repo"]["model_in_repo"])
r = Robot(inputfile=self.basefile)
for link in r.links:
for g in link.visuals + link.collisions:
if isinstance(g.geometry, representation.Mesh):
self._meshes += [xml.read_relative_filename(g.geometry.filename[:-4], self.basefile)]
self.processed_meshes = set() # used to make mesh processing more efficient
# where to find the already processed model
self.basedir = os.path.join(self.tempdir, "combined_model")
self.basefile = os.path.join(self.basedir, "smurf", "combined_model.smurf")
if self.processed_model_exists:
self._load_robot()
log.debug(f"Finished reading config and joining models to base model {configfile}")
@staticmethod
def get_exported_model_path(pipeline, configfile):
cfg = load_json(open(configfile, 'r'))
assert "model" in cfg
cfg = cfg["model"]
return os.path.join(pipeline.temp_dir, cfg["modelname"], "smurf", cfg["robotname"] + ".smurf")
def _load_robot(self):
if not self.processed_model_exists:
if os.path.exists(os.path.join(self.basedir, "smurf", "combined_model.smurf")):
# may be there is already an assembly from a stopped job
self.robot = Robot(name=self.robotname if self.robotname else None,
inputfile=os.path.join(self.basedir, "smurf", "combined_model.smurf"))
else:
# create a robot with the basic properties given
self.robot = Robot(name=self.robotname if self.robotname else None)
else:
if os.path.exists(os.path.join(self.exportdir, "smurf", self.robotname + ".smurf")):
self.robot = Robot(name=self.robotname if self.robotname else None,
inputfile=os.path.join(self.exportdir, "smurf", self.robotname + ".smurf"))
else:
raise Exception('Preprocessed file {} not found!'.format(self.basefile))
def _join_to_basefile(self):
# get all the models we need
self.dep_models = {}
for name, config in self.input_models.items():
if "derived_base" in config.keys():
self.dep_models.update({
name: BaseModel(
os.path.join(self.pipeline.configdir, config["derived_base"]),
self.pipeline, processed_model_exists=True)
})
# copy the mesh files to the temporary combined model directory
for mp in self.dep_models[name].export_meshes.values():
misc.create_symlink(
self.pipeline, os.path.join(self.pipeline.temp_dir, str(mp)), os.path.join(self.basedir, str(mp))
)
for name, config in self.input_models.items():
if "basefile" in config.keys():
kwargs = {}
kwargs["inputfile"] = config["basefile"]
if "is_human" in config:
kwargs["is_human"] = config["is_human"]
kwargs["inputfile"] = config["basefile"]
self.dep_models.update({name: Robot(name=name, **kwargs)})
# copy the mesh files to the temporary combined model directory
elif "repo" in config.keys():
repo_path = os.path.join(self.tempdir, "repo", os.path.basename(config["repo"]["git"]))
git.clone(
pipeline=self.pipeline,
repo=config["repo"]["git"],
target=repo_path,
branch=config["repo"]["commit"],
recursive=True,
ignore_failure=True
)
self.dep_models.update({
name: Robot(name=name, inputfile=os.path.join(repo_path, config["repo"]["model_in_repo"]),
submechanisms_file=os.path.join(repo_path, config["repo"]["submechanisms_in_repo"])
if "submechanisms_in_repo" in config["repo"] else None,
is_human=config["is_human"] if "is_human" in config else False)
})
# copy the mesh files to the temporary combined model directory
# now we can join theses models
# 1. get root model
if isinstance(self.dep_models[self.assemble["model"]], Robot):
combined_model = self.dep_models[self.assemble["model"]].duplicate()
else: # it must be an instance of BaseModel
combined_model = self.dep_models[self.assemble["model"]].robot.duplicate()
combined_model.name = self.robotname
combined_model.autogenerate_submechanisms = False
combined_model.smurffile = self.basefile
combined_model.xmlfile = os.path.join(self.basedir, "urdf", "combined_model.urdf")
if "name_editing" in self.assemble.keys():
combined_model.edit_names(self.assemble["name_editing"])
for c in self.assemble["children"]:
c["joint"]["parent"] = misc.edit_name_string(
c["joint"]["parent"],
prefix=self.assemble["name_editing"]["prefix"] if "prefix" in self.assemble["name_editing"] else "",
suffix=self.assemble["name_editing"]["suffix"] if "suffix" in self.assemble["name_editing"] else "",
replacements=self.assemble["name_editing"]["replacements"] if "replacements" in self.assemble["name_editing"] else {})
c["joint"]["parent"] = misc.edit_name_string(
c["joint"]["parent"],
prefix=self.assemble["name_editing"]["link_prefix"] if "link_prefix" in self.assemble["name_editing"] else "",
suffix=self.assemble["name_editing"]["link_suffix"] if "link_suffix" in self.assemble["name_editing"] else "",
replacements=self.assemble["name_editing"]["link_replacements"] if "link_replacements" in self.assemble["name_editing"] else {})
if "remove_beyond" in self.assemble.keys():
combined_model = combined_model.get_before(self.assemble["remove_beyond"])
if "take_leaf" in self.assemble.keys():
assert type(self.assemble["take_leaf"]) == str
combined_model = combined_model.get_beyond(self.assemble["take_leaf"])
assert self.assemble["take_leaf"] in combined_model, f"{combined_model}"
combined_model = combined_model[self.assemble["take_leaf"]]
if "mirror" in self.assemble.keys():
combined_model.mirror_model(
mirror_plane=self.assemble["mirror"]["plane"] if "plane" in self.assemble["mirror"].keys() else [0, 1, 0],
flip_axis=self.assemble["mirror"]["flip_axis"] if "flip_axis" in self.assemble["mirror"].keys() else 1,
exclude_meshes=self.assemble["mirror"]["exclude_meshes"] if "exclude_meshes" in self.assemble["mirror"].keys() else [],
target_urdf=os.path.dirname(self.basefile)
)
# 2. go recursively through the children and attach them
def recursive_attach(parent, children, parentname):
for child in children:
if isinstance(self.dep_models[parentname], Robot):
parent_model = self.dep_models[parentname]
else:
parent_model = self.dep_models[parentname].robot
if isinstance(self.dep_models[child["model"]], Robot):
att_model = self.dep_models[child["model"]].duplicate()
else:
att_model = self.dep_models[child["model"]].robot.duplicate()
att_model.link_entities()
if "child" not in child["joint"].keys() or child["joint"]["child"] is None:
child["joint"]["child"] = str(att_model.get_root())
if "take_leaf" in child:
child["joint"]["child"] = child["take_leaf"]
if "r2r_transform" in child["joint"].keys():
T = np.array(child["joint"]["r2r_transform"])
src_T = parent_model.get_transformation(child["joint"]["parent"])
dst_T = att_model.get_transformation(child["joint"]["child"])
T = np.linalg.inv(src_T).dot(T).dot(dst_T)
_temp = representation.Pose.from_matrix(T, relative_to=child["joint"]["parent"])
child["joint"]["xyz"] = _temp.xyz
child["joint"]["rpy"] = _temp.rpy
if "name_editing" in child.keys():
att_model.edit_names(child["name_editing"])
child["joint"]["child"] = misc.edit_name_string(
child["joint"]["child"],
prefix=child["name_editing"]["prefix"] if "prefix" in child["name_editing"] else "",
suffix=child["name_editing"]["suffix"] if "suffix" in child["name_editing"] else "",
replacements=child["name_editing"]["replacements"] if "replacements" in child["name_editing"] else {})
child["joint"]["child"] = misc.edit_name_string(
child["joint"]["child"],
prefix=child["name_editing"]["link_prefix"] if "link_prefix" in child["name_editing"] else "",
suffix=child["name_editing"]["link_suffix"] if "link_suffix" in child["name_editing"] else "",
replacements=child["name_editing"]["link_replacements"] if "link_replacements" in child["name_editing"] else {})
if "children" in child:
for c in child["children"]:
c["joint"]["parent"] = misc.edit_name_string(
c["joint"]["parent"],
prefix=child["name_editing"]["prefix"] if "prefix" in child["name_editing"] else "",
suffix=child["name_editing"]["suffix"] if "suffix" in child["name_editing"] else "",
replacements=child["name_editing"]["replacements"] if "replacements" in child["name_editing"] else {})
c["joint"]["parent"] = misc.edit_name_string(
c["joint"]["parent"],
prefix=child["name_editing"]["link_prefix"] if "link_prefix" in child["name_editing"] else "",
suffix=child["name_editing"]["link_suffix"] if "link_suffix" in child["name_editing"] else "",
replacements=child["name_editing"]["link_replacements"] if "link_replacements" in child["name_editing"] else {})
att_model.unlink_entities()
if "remove_beyond" in child.keys():
att_model = att_model.get_before(child["remove_beyond"])
if "take_leaf" in child.keys():
assert type(child["take_leaf"]) == str
att_model = att_model.get_beyond(child["take_leaf"])
assert len(att_model.keys()) == 1, "take_leaf: Please cut the tree in a way to get only one leaf"
att_model = list(att_model.values())[0]
if "mirror" in child.keys():
att_model.mirror_model(
mirror_plane=child["mirror"]["plane"] if "plane" in child["mirror"].keys() else [0, 1, 0],
flip_axis=child["mirror"]["flip_axis"] if "flip_axis" in child["mirror"].keys() else 1,
exclude_meshes=child["mirror"]["exclude_meshes"] if "exclude_meshes" in child["mirror"].keys() else [],
target_urdf=combined_model.xmlfile,
final_linking_optional=True
)
if "name" not in child["joint"].keys() or child["joint"]["name"] is None:
child["joint"]["name"] = child["joint"]["parent"] + "2" + child["joint"]["child"]
if "type" not in child["joint"].keys() or child["joint"]["type"] is None:
child["joint"]["type"] = "fixed"
if parent.get_link(child["joint"]["parent"]) is None:
log.error(f"Parent links: {sorted([lnk.name for lnk in parent.links])}")
raise AssertionError(
"Problem with assembling joint " + child["joint"]["parent"] + " -> " + child["joint"]["child"]
+ ": the parent link doesn't exist! (Further info above)")
elif att_model.get_link(child["joint"]["child"]) is None:
log.error(f"Child links: {sorted([lnk.name for lnk in att_model.links])}")
raise AssertionError(
"Problem with assembling joint " + child["joint"]["parent"] + " -> " + child["joint"]["child"]
+ ": the child link doesn't exist! (Further info above)")
assert att_model.get_joint(child["joint"]["name"]) is None and parent.get_joint(child["joint"]["name"]) is None,\
f'Can not join using joint name {child["joint"]["name"]} as this name already exists.'
joint = representation.Joint(
name=child["joint"]["name"],
parent=parent.get_link(child["joint"]["parent"]).name,
child=att_model.get_link(child["joint"]["child"]).name,
joint_type=child["joint"]["type"] if "type" in child["joint"].keys() else "fixed",
origin=representation.Pose(
child["joint"]["xyz"] if "xyz" in child["joint"].keys() else [0, 0, 0],
child["joint"]["rpy"] if "rpy" in child["joint"].keys() else [0, 0, 0],
relative_to=parent.get_parent(child["joint"]["parent"])
),
limit=representation.JointLimit(
effort=child["joint"]["eff"] if "eff" in child["joint"].keys() else 0,
velocity=child["joint"]["vel"] if "vel" in child["joint"].keys() else 0,
lower=child["joint"]["min"] if "min" in child["joint"].keys() else 0,
upper=child["joint"]["max"] if "max" in child["joint"].keys() else 0)
if child["joint"]["type"] != "fixed" else None
)
parent.attach(att_model if isinstance(att_model, Robot) else att_model.robot, joint, do_not_rename=False)
assert len(combined_model.links) == len(combined_model.joints) + 1
parent.unlink_entities()
if "children" in child.keys():
recursive_attach(parent, child["children"], child["model"])
if "children" in self.assemble.keys() and len(self.assemble["children"]) > 0:
recursive_attach(combined_model, self.assemble["children"], parentname=self.assemble["model"])
combined_model.link_entities()
# 3. save combined_model to the temp directory
assert len(combined_model.links) == len(combined_model.joints) + 1
combined_model.name = "combined_model"
combined_model.export(outputdir=self.basedir, export_config=resources.get_default_export_config("minimal"),
check_submechs=False)
def recreate_sym_links(self):
for mt, mp in self.export_meshes.items():
log.info('Re-creating mesh symlinks')
misc.create_symlink(
self.pipeline, os.path.join(self.pipeline.temp_dir, str(mp)), os.path.join(self.exportdir, str(mp))
)
def process(self):
misc.recreate_dir(self.pipeline, self.tempdir)
misc.recreate_dir(self.pipeline, self.exportdir)
# Make sure the mesh symlinks are set correctly
for mt, mp in self.export_meshes.items():
log.info(' Creating mesh symlinks')
misc.create_symlink(
self.pipeline, os.path.join(self.pipeline.temp_dir, str(mp)), os.path.join(self.exportdir, str(mp))
)
self._join_to_basefile()
self._load_robot()
assert hasattr(self, 'robot') and hasattr(self, 'pipeline')
log.info('Start processing robot')
self.robot.correct_inertials()
self.robot.clean_meshes()
if hasattr(self, "materials"):
for m_name, m_def in self.materials.items():
material_instance = self.robot.get_material(m_name)
if material_instance is not None:
material_instance.add_annotations(**m_def)
log.debug('Added annotation to Material {}'.format(m_name))
else:
material_instance = representation.Material(name=m_name, **m_def)
self.robot.add_aggregate("material", material_instance)
log.debug('Defined Material {}'.format(m_name))
if hasattr(self, "frames"):
_default = {} if "$default" not in self.frames else self.frames["$default"]
_ignore_new_links = []
for linkname, config in self.frames.items():
if linkname.startswith("$"):
continue
self.frames[linkname] = misc.merge_default(config, _default)
config = copy(self.frames[linkname])
if self.robot.get_link(linkname) is None:
assert "transform_frame" not in config and "transform_link" not in config
assert "joint" in config
_joint_def = config.pop("joint")
_joint_def = misc.merge_default(_joint_def, resources.get_default_joint(_joint_def["type"]))
parent_link = _joint_def.pop("parent")
parent_joint = self.robot.get_parent(parent_link)
_joint = representation.Joint(
child=linkname,
parent=parent_link,
origin=representation.Pose(xyz=_joint_def.pop("xyz"), rpy=_joint_def.pop("rpy"),
relative_to=parent_joint),
**_joint_def
)
self.robot.add_link_by_properties(linkname, _joint, **config)
_ignore_new_links.append(linkname)
for link in self.robot.links:
linkname = link.name
if linkname in _ignore_new_links:
continue
config = self.frames[linkname] if linkname in self.frames else _default
for k, v in config.items():
if k in ["transform_frame", "transform_link"]:
transformation = transform.create_transformation(
xyz=v["xyz"] if "xyz" in v.keys() else [0, 0, 0],
rpy=v["rpy"] if "rpy" in v.keys() else [0, 0, 0]
)
# this is never really used and therefore not perfectly tested, hence commented
# if "transform" in v.keys() and v["transform"] == "TO":
# if self.robot.getParent(k) is not None:
# transformation = inv(
# Homogeneous(self.robot.getJoint(self.robot.getParent(k)[0]).origin)
# ).dot(transformation)
# else: BY
self.robot.transform_link_orientation(
linkname=linkname,
transformation=transformation,
only_frame=(k == "transform_frame"),
# transform_to="transform" in v.keys() and v["transform"] == "TO"
)
elif k == "reparent_to":
self.robot.move_link_in_tree(link_name=linkname, new_parent_name=v)
elif k == "estimate_missing_com" and v is True:
self.robot.set_estimated_link_com(linkname, dont_overwrite=True)
elif k == "material" or k == "materials":
link.materials = v
else:
link.add_annotation(k, v, overwrite=True)
if hasattr(self, "joints"):
if '$replace_joint_types' in self.joints:
for joint in self.robot.joints:
for old, new in self.joints["$replace_joint_types"].items():
if joint.joint_type == old:
joint.joint_type = new
transmissions = {}
_default = {} if "$default" not in self.joints else self.joints["$default"]
faulty_joint_defs = []
for jointname, config in self.joints.items():
if jointname.startswith("$"):
continue
if self.robot.get_joint(jointname) is None and ("cut_joint" not in config or config["cut_joint"] is False):
faulty_joint_defs += [(jointname, [str(j) for j in self.robot.joints if jointname in str(j) or str(j) in jointname])]
elif self.robot.get_joint(jointname) is None and ("cut_joint" in config and config["cut_joint"] is True): # cut_joint
# [TODO v2.0.0] Review and Check whether this works as expected
# Check whether everything is given and calculate origin and axis (?)
_joint = representation.Joint(**config)
assert "constraint_axes" in config
_joint.constraint_axes = [ConstraintAxis(**ca) for ca in config["constraint_axes"]]
assert _joint.check_valid()
self.robot.add_aggregate("joint", _joint)
if len(faulty_joint_defs) > 0:
log.error("The following joint changes are defined but the joint does not exist:")
for fjd in faulty_joint_defs:
log.error(f"- {fjd[0]} "+(f"Did you mean: {fjd[1]}" if len(fjd[1]) > 0 else ""))
remove_joints = []
for joint in self.robot.joints:
jointname = joint.name
if jointname in self.joints:
config = misc.merge_default(self.joints[jointname], _default)
for k, v in config.items():
if k == "remove" and v is True:
remove_joints.append(jointname)
break
if k in ["min", "max", "eff", "vel"]:
if joint.limit is None:
joint.limit = representation.JointLimit()
if k == "move_joint_axis_to_intersection":
self.robot.move_joint_to_intersection(jointname, v)
elif k == "type":
joint.joint_type = v
elif k == "min":
joint.limit.lower = misc.read_number_from_config(v)
elif k == "max":
joint.limit.upper = misc.read_number_from_config(v)
elif k == "eff":
joint.limit.effort = misc.read_number_from_config(v)
elif k == "vel":
joint.limit.velocity = misc.read_number_from_config(v)
elif k == "movement_depends_on":
for jd in v:
joint.joint_dependencies.append(representation.JointMimic(joint=jd["joint_name"],
offset=jd["offset"],
multiplier=jd["multiplier"]))
elif k == "mimic":
if "joint_name" in v:
v["joint"] = v.pop("joint_name")
joint.mimic = representation.JointMimic(**v)
elif k == "active":
v = misc.merge_default(v, resources.get_default_motor())
if type(v) == dict:
if "name" not in v:
v["name"] = jointname+"_motor"
if "joint" not in v:
v["joint"] = jointname
else:
assert jointname == v["joint"]
_motor = representation.Motor(**v)
self.robot.add_motor(_motor)
elif v is True:
_motor = representation.Motor(name=jointname+"_motor", joint=jointname)
self.robot.add_motor(_motor)
else: # axis
joint.add_annotation(k, v, overwrite=True)
elif "$default" in self.joints:
config = _default
for k, v in config.items():
if k not in ["min", "max", "eff", "vel", "movement_depends_on", "active"] + representation.Joint._class_variables:
joint.add_annotation(k, v, overwrite=True)
# [TODO v2.0.0] Re-add transmission support
joint.link_with_robot(self.robot)
for joint in remove_joints:
self.robot.remove_joint(joint)
# Check for joint definitions
self.robot.check_joint_definitions(
raise_error=True,
backup=self.joints["$default"] if (
hasattr(self, "joints")
and "$default" in self.joints.keys()
and "backup" in self.joints["$default"].keys()
and self.joints["$default"]["backup"]
) else None
)
if hasattr(self, 'collisions'):
for link in self.robot.links:
conf = deepcopy(self.collisions["$default"])
exclude = self.collisions["exclude"] if "exclude" in self.collisions.keys() else []
if link.name in exclude:
continue
if link.name in self.collisions.keys():
conf = misc.merge_default(self.collisions[link.name], conf)
if "remove" in conf.keys():
if type(conf["remove"]) is list:
remove_collision(self.robot, link.name, collisionname=conf["remove"])
elif type(conf["remove"]) is str:
remove_collision(self.robot, link.name, collisionname=[c.name for c in link.collisions if
re.fullmatch(r"" + conf["remove"],
c.name) is not None])
if "join" in conf.keys() and conf["join"] is True:
if len(link.collisions) > 1:
# print(" Joining meshes of", link.name)
join_collisions(self.robot, link.name, name_id=self.modelname)
if "shape" in conf.keys():
if conf["shape"] == "remove":
remove_collision(self.robot, link.name)
elif conf["shape"] != "mesh":
if not ("do_not_apply_primitives" in self.collisions.keys() and
self.collisions["do_not_apply_primitives"] is True):
replace_collision(
self.robot, link.name,
shape=conf["shape"],
oriented=conf["oriented"] if "oriented" in conf.keys() else True,
scale=conf["scale"] if "scale" in conf.keys() else 1.0,
)
# leads to problems in reusing identic meshes
# if conf["shape"] == "convex":
# reduceMeshCollision(self.robot, link.name, reduction=0.3)
if "auto_bitmask" in self.collisions.keys() and \
self.collisions["auto_bitmask"] is True:
log.debug(" Setting auto bitmask")
kwargs = self.collisions["default"] if "default" in self.collisions.keys() else {}
self.robot.set_self_collision(
1,
coll_override=self.collisions["collision_between"]
if "collision_between" in self.collisions.keys() else {},
no_coll_override=self.collisions["no_collision_between"]
if "no_collision_between" in self.collisions.keys() else {},
**kwargs
)
for coll in self.robot.get_all_collisions():
if coll.name in self.collisions.keys():
for k, v in self.collisions[coll.name].items():
setattr(coll, k, v)
else:
for coll_name in self.collisions.keys():
conf = self.collisions[coll_name]
coll = self.robot.get_collision_by_name(coll_name)
if coll is not None:
for key in ["name", "link", "geometry", "origin"]:
if key in conf:
conf.pop(key)
coll.add_annotations(**conf)
if hasattr(self, "exoskeletons") or hasattr(self, "submechanisms"):
if hasattr(self, "exoskeletons"):
self.robot.load_submechanisms({"exoskeletons": deepcopy(self.exoskeletons)},
replace_only_conflicting=True)
if hasattr(self, "submechanisms"):
self.robot.load_submechanisms({"submechanisms": deepcopy(self.submechanisms)},
replace_only_conflicting=True)
elif hasattr(self, "submechanisms_file"):
self.robot.autogenerate_submechanisms = False
self.robot.load_submechanisms(deepcopy(self.submechanisms_file))
# if hasattr(self, "export_total_submechanisms"):
# # add all to one urdf
# spanningtree = []
# for sm in self.robot.submechanisms:
# spanningtree += sm.jointnames_spanningtree
# spanningtree = list(set(spanningtree))
# root = tree.find_common_root(input_spanningtree=spanningtree, input_model=self.robot)
# self.robot.define_submodel(name=self.export_total_submechanisms, start=root,
# stop=tree.find_leaves(self.robot, spanningtree),
# only_urdf=True)
if hasattr(self, "sensors"):
multi_sensors = [x for x in dir(sensor_representations) if
not x.startswith("__") and x not in sensor_representations.__IMPORTS__ and
issubclass(getattr(sensor_representations, x), sensor_representations.MultiSensor)]
single_sensors = [x for x in dir(sensor_representations) if
not x.startswith(
"__") and x not in sensor_representations.__IMPORTS__ and issubclass(
getattr(sensor_representations, x),
sensor_representations.Sensor) and x not in multi_sensors]
moveable_joints = [j for j in self.robot.joints if j.joint_type != 'fixed']
for s in self.sensors:
sensor_ = None
if s["type"] in single_sensors:
kwargs = {k: v for k, v in s.items() if k != "type"}
sensor_ = getattr(sensor_representations, s["type"])(**kwargs)
if s["type"] in multi_sensors:
if "targets" not in s:
pass
elif type(s['targets']) is str and s['targets'].upper() == "ALL":
s['targets'] = moveable_joints
elif type(s['targets']) is list:
pass
else:
raise ValueError('Targets can only be a list or "All"!')
kwargs = {k: v for k, v in s.items() if k != "type"}
sensor_ = getattr(sensor_representations, s["type"])(**kwargs)
if sensor_ is not None:
self.robot.add_sensor(sensor_)
log.debug(' Attached {} {}'.format(s["type"], s['name']))
if hasattr(self, "poses"):
for (cn, config) in self.poses.items():
pose = poses.JointPoseSet(robot=self.robot, name=cn, configuration=config)
self.robot.add_pose(pose)
if hasattr(self, "annotations"):
log.debug(' Adding further annotations.')
if "general_annotations" in self.annotations.keys():
for k, v in self.annotations["general_annotations"]:
if k in self.robot.annotations.keys():
self.robot.annotations[k] += v
else:
self.robot.annotations[k] = v
if "named_annotations" in self.annotations.keys():
for k, v in self.annotations["named_annotations"].items():
self.robot.add_categorized_annotation(k, v)
log.info('Finished processing')
return True
def export(self):
self.robot.link_entities()
ros_pkg_name = self.robot.export(outputdir=self.exportdir, export_config=self.export_config,
rel_mesh_pathes=self.export_meshes, ros_pkg_later=True)
for vc in self.robot.collisions + self.robot.visuals:
if isinstance(vc.geometry, representation.Mesh):
self.processed_meshes = self.processed_meshes.union([os.path.realpath(f["filepath"]) for f in vc.geometry._exported.values()])
self.processed_meshes.add(os.path.realpath(vc.geometry.abs_filepath))
if "keep_files" in self.deployment:
git.reset(self.targetdir, "autobuild", "master")
misc.store_persisting_files(self.pipeline, self.targetdir, self.deployment["keep_files"], self.exportdir)
log.info('Finished export of the new model')
self.processed_model_exists = True
if hasattr(self, "post_processing"):
for script in self.post_processing:
if "cwd" not in script.keys():
script["cwd"] = self.exportdir
else:
script["cwd"] = os.path.abspath(script["cwd"])
misc.execute_shell_command(script["cmd"], script["cwd"])
log.info('Finished post_processing of the new model')
if ros_pkg_name is not None:
self.robot.export_ros_package_files(
self.exportdir, ros_pkg_name,
author=os.path.join(self.pipeline.configdir, self.modelname + ".yml"),
maintainer="https://git.hb.dfki.de/phobos/ci-run/-/wikis/home",
url=self.pipeline.remote_base + "/" + self.modelname,
version=self.pipeline.git_rev[10]
)
# REVIEW are the following lines here obsolete?
if "keep_files" in self.deployment:
git.reset(self.targetdir, "autobuild", "master")
misc.store_persisting_files(self.pipeline, self.targetdir, self.deployment["keep_files"], self.exportdir)
def deploy(self, mesh_commit, failed_model=False, uses_lfs=False):
if "do_not_deploy" in self.deployment and self.deployment["do_not_deploy"] is True:
return "deployment suppressed in cfg"
if not os.path.exists(self.targetdir):
raise Exception("The result directory " + self.targetdir + " doesn't exist:\n" +
" This might happen if you haven't added the result" +
" repo to the manifest.xml or spelled it wrong.")
repo = self.targetdir
git.reset(self.targetdir, "autobuild", "master")
if "keep_files" in self.deployment:
misc.store_persisting_files(self.pipeline, repo, self.deployment["keep_files"], os.path.join(self.tempdir, "_sustain"))
git.update(repo, update_target_branch="$CI_UPDATE_TARGET_BRANCH" if failed_model else "master")
git.clear_repo(repo)
# add manifest.xml if necessary
manifest_path = os.path.join(repo, "manifest.xml")
if not os.path.isfile(manifest_path):
misc.copy(self.pipeline, resources.get_resources_path("manifest.xml.in"), manifest_path)
with open(manifest_path, "r") as manifest:
content = manifest.read()
url = self.pipeline.remote_base + "/" + self.modelname
content = misc.regex_replace(content, {
"\$INPUTNAME": self.modelname,
"\$AUTHOR": "<author>" + os.path.join(self.pipeline.configdir,
self.modelname + ".yml") + "</author>",
"\$MAINTAINER": "<maintainer>https://git.hb.dfki.de/phobos/ci-run/-/wikis/home</maintainer>",
"\$URL": "<url>" + url + "</url>",
})
with open(manifest_path, "w") as manifest:
manifest.write(content)
readme_path = os.path.join(repo, "README.md")
if not os.path.isfile(readme_path):
misc.copy(self.pipeline, resources.get_resources_path("README.md.in"), readme_path)
with open(readme_path, "r") as readme:
content = readme.read()
content = misc.regex_replace(content, {
"\$MODELNAME": self.modelname,
"\$USERS": self.pipeline.mr_mention if not hasattr(self, "mr_mention") else self.mr_mention,
"\$DEFINITION_FILE": "https://git.hb.dfki.de/" + os.path.join(
os.environ["CI_ROOT_PATH"],
os.environ["CI_MODEL_DEFINITIONS_PATH"],
self.modelname + ".yml").replace("models/robots", "models-robots"),
})
with open(readme_path, "w") as readme:
readme.write(content)
if uses_lfs:
readme_content = open(readme_path, "r").read()
if "# Git LFS for mesh repositories" not in readme_content:
with open(readme_path, "a") as readme:
readme.write(open(resources.get_resources_path("GitLFS_README.md.in")).read())
# update additional submodules
if "submodules" in self.deployment:
for subm in self.deployment["submodules"]:
git.add_submodule(
repo,
subm["repo"],
subm["target"],
commit=subm["commit"] if "commit" in subm.keys() else None,
branch=subm["branch"] if "branch" in subm.keys() else "master"
)
# update the mesh submodule
for mt, mp in self.export_meshes.items():
git.add_submodule(
repo,
os.path.relpath(
os.path.join(self.pipeline.root, str(mp)), repo
),
mp,
commit=mesh_commit[mt]["commit"],
branch=os.environ["CI_MESH_UPDATE_TARGET_BRANCH"]
if "CI_MESH_UPDATE_TARGET_BRANCH" in os.environ.keys() else "master"
)
# now we move back to the push repo
misc.copy(self.pipeline, self.exportdir + "/*", self.targetdir)
if "keep_files" in self.deployment:
misc.restore_persisting_files(self.pipeline, repo, self.deployment["keep_files"], os.path.join(self.tempdir, "_sustain"))
git.commit(repo, origin_repo=os.path.abspath(self.pipeline.configdir))
git.add_remote(repo, self.pipeline.remote_base + "/" + self.modelname)
mr = git.MergeRequest()
mr.target = self.deployment["mr_target_branch"]
mr.title = self.deployment["mr_title"]
mr.description = self.deployment["mr_description"]
if os.path.isfile(self.pipeline.test_protocol):
log.info("Appending test_protocol to MR description")
with open(self.pipeline.test_protocol, "r") as f:
protocol = load_json(f.read())
mr.description = misc.append_string(mr.description, "\n" + str(protocol["all"]))
mr.description = misc.append_string(mr.description, str(protocol[self.modelname]))
else:
log.warning(f"Did not find test_protocol file at: {self.pipeline.test_protocol}")
if failed_model:
if "mr_mention" in self.deployment:
mr.mention = self.deployment["mr_mention"]
return_msg = "pushed to " + git.push(repo, merge_request=mr)
else:
return_msg = "pushed to " + git.push(repo, branch="master")
# deploy to mirror
if "mirror" in self.deployment:
log.info(f"Deploying to mirror:\n {dump_yaml(self.deployment['mirror'], default_flow_style=False)}")
mirror_dir = os.path.join(self.tempdir, "deploy_mirror")
git.clone(
pipeline=self.pipeline,
repo=self.deployment["mirror"]["repo"],
target=mirror_dir,
branch="master",
shallow=False
)
git.update(mirror_dir, update_remote="origin", update_target_branch=self.deployment["mirror"]["branch"])
git.clear_repo(mirror_dir)
submodule_dict = {}
if "submodules" in self.deployment["mirror"].keys() and ".gitmodules" in os.listdir(repo):
submodule_file = open(os.path.join(repo, ".gitmodules"), "r").read()
current_key = None
for line in submodule_file.split("\n"):
if line.startswith("["):
current_key = line.split()[1][1:-2]
if current_key not in submodule_dict.keys():
submodule_dict[current_key] = {}
elif " = " in line:
k, v = line.strip().split(" = ")
submodule_dict[current_key][k] = v
if self.deployment["mirror"]["submodules"]:
for _, sm in submodule_dict.items():
git.add_submodule(mirror_dir, sm["url"], sm["path"],
branch=sm["branch"] if "branch" in sm.keys() else "master")
misc.copy(self.pipeline, repo + "/*", mirror_dir)
if "submodules" in self.deployment["mirror"].keys() and ".gitmodules" in os.listdir(repo):
for _, sm in submodule_dict.items():
# git.clone(self.pipeline, os.path.join(self.deployment["mirror"]["repo"], sm["url"]), sm["path"],
# sm["branch"] if "branch" in sm.keys() else "master", cwd=mirror_dir)
misc.execute_shell_command("rm -rf " + os.path.join(sm["path"], ".git*"), mirror_dir)
git.commit(mirror_dir, origin_repo=self.pipeline.configdir)
if "merge_request" in self.deployment["mirror"].keys() and self.deployment["mirror"]["merge_request"]:
if "mr_mention" in self.deployment["mirror"].keys():
mr.mention = self.deployment["mirror"]["mr_mention"]
if "mr_title" in self.deployment["mirror"].keys():
mr.title = self.deployment["mirror"]["mr_title"]
if "mr_target" in self.deployment["mirror"].keys():
mr.target = self.deployment["mirror"]["mr_target"]
git.push(mirror_dir, remote="origin", merge_request=mr, branch=self.deployment["mirror"]["branch"])
else:
git.push(mirror_dir, remote="origin", branch=self.deployment["mirror"]["branch"])
return_msg += "& pushed to mirror"
git.checkout("master", repo)
return return_msg
def get_input_meshes(self):
return self._meshes
|
49d9cbea85616616c3ac284ce793f61260e450e0
|
392755be90c09b354f5f28ae79cbfb9e843b4aea
|
/tests/char_error_test.py
|
dd4724bed6d98c5ed82e2d8bbecd396366939606
|
[
"Apache-2.0",
"Python-2.0"
] |
permissive
|
shibing624/pycorrector
|
fc5ba5adb64dd248d31c86ad5277b3bafe32f866
|
ea5e57e6f54cf472206a79814390a8958e10b90b
|
refs/heads/master
| 2023-09-01T04:42:42.173234
| 2023-08-22T09:56:06
| 2023-08-22T09:56:06
| 123,424,015
| 4,605
| 1,029
|
Apache-2.0
| 2022-10-26T11:49:17
| 2018-03-01T11:08:02
|
Python
|
UTF-8
|
Python
| false
| false
| 3,298
|
py
|
char_error_test.py
|
# -*- coding: utf-8 -*-
"""
@author:XuMing(xuming624@qq.com)
@description:
"""
import sys
sys.path.append('..')
import pycorrector
# 那天花板上的钻石可比鸡弹((蛋))还大啊。
# 才般((搬))进装修好没多久的新宫殿里。
# 做的最倒霉的一件事就帮尼哥檫((擦))脚。
# 一但((旦))死去,以前花费的心血都会归零。
# 战士微笑著((着))轻轻拍了拍少年的肩膀。
# 差点拌((绊))到自己的脚。
# 面对着熙熙嚷嚷((攘攘))的城市。
# 你等我和老大商却((榷))一下。
# 这家伙还蛮格((恪))尽职守的。
# 玩家取明((名))“什么”已被占用。
# 报应接中迩((而))来。
# 人群穿((川))流不息。
# 这个消息不径((胫))而走。
# 眼前的场景美仑((轮))美幻简直超出了人类的想象。
# 看着这两个人谈笑风声((生))我心理((里))不由有些忌妒。
# 有老怪坐阵((镇))难怪他们高枕无忧了。
# 有了这一番旁证((征))博引。
def test_char_correct_right():
errors = [
'少先队员因该为老人让坐',
'服装店里的衣服各试各样',
'那天花板上的钻石可比鸡弹还大啊',
'才般进装修好没多久的新宫殿里。',
'一但死去,以前花费的心血都会归零。',
'这家伙还蛮格尽职守的。',
'玩家取明“什么”已被占用。',
'人群穿流不息。',
'这个消息不径而走。',
'眼前的场景美仑美幻简直超出了人类的想象。',
'看着这两个人谈笑风声',
'有老怪坐阵难怪他们高枕无忧了。',
'有了这一番旁证博引。',
]
for i in errors:
print(i, pycorrector.correct(i))
def test_char_correct_wrong():
errors = [
'她知难而上,沤心沥血,一心扑在舞台上',
'还有你们看看清除哈',
'我国人民义愤填鹰',
'权利的游戏第八季',
'2周岁22斤宝宝用多大的啊',
'这个到底有多辣?',
'所以先救挨饿的人,然后治疗病人。',
'现在,常常会到听男女平等这个词。',
'我的喉咙发炎了要买点阿莫细林吃',
'做的最倒霉的一件事就帮尼哥檫脚。',
'战士微笑著轻轻拍了拍少年的肩膀。',
'差点拌到自己的脚。',
'面对着熙熙嚷嚷的城市。',
'你等我和老大商却一下。',
'报应接中迩来。',
'我心理不由有些忌妒。',
'他们不需要怕他门没有钱。',
'全球的产龄妇女总生育率只生下一半,根据调查很有可能一直到2050年产龄妇女总生育率还是减少的趋势。',
'但现代的妇女所担任的责任已家重,除了家务以外,仍需出外工作补贴家',
'加上父母亲自己的看法,想原封不动地、完完全全地全部传给子女们',
'叶子的绿色与本身枝干的颜色都会变为偏较暗的颜色。',
]
for i in errors:
print(i, pycorrector.detect(i))
print(i, pycorrector.correct(i))
|
c773f58478e1788772fbc6fa00c980c45da81330
|
36ef8b40191c13344a5b3fb6bb2ab1cfb64b83a8
|
/December-17/python_UjjwalPrahladka_Dec17.py
|
60fc83530d01ab39bc50ba4fc5e170c07dcf5081
|
[
"MIT"
] |
permissive
|
SVCE-ACM/A-December-of-Algorithms-2019
|
a9e2436b29db8ed5e488719c6e45c78ccbd49bec
|
d15a4e8284c8576b7080c999d4b46748f4d1d09b
|
refs/heads/master
| 2023-02-03T20:45:17.211079
| 2022-08-13T07:41:13
| 2022-08-13T07:41:13
| 222,771,373
| 231
| 193
|
MIT
| 2023-06-18T04:02:23
| 2019-11-19T19:18:36
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,843
|
py
|
python_UjjwalPrahladka_Dec17.py
|
from collections import deque
list1 = input('Enter railline 1(comma separated stations): ').lower().split(',')
list2 = input('Enter railline 2(comma separated stations): ').lower().split(',')
stations = list(set(list1 + list2))
try:
starting_node, end_node = input('Enter start and destination(comma separated): ').lower().split(',')
if starting_node not in stations or end_node not in stations:
raise
except:
print('Station not found. Please try again')
else:
#preparing an adjacency list
connection = [[0 for i in range(len(stations))] for j in range(len(stations))]
for i in range(len(list1) - 1):
prev_station = stations.index(list1[i])
next_station = stations.index(list1[i+1])
connection[prev_station][next_station] = 1
connection[next_station][prev_station] = 1
for i in range(len(list2) - 1):
prev_station = stations.index(list2[i])
next_station = stations.index(list2[i+1])
connection[prev_station][next_station] = 1
connection[next_station][prev_station] = 1
paths = ['' for i in range(len(stations))]
visited = [0 for i in range(len(stations))]
#performing bfs and storing the individual paths
queue = deque()
queue.append(starting_node)
visited[stations.index(starting_node)] = 1
while len(queue) != 0:
current = queue.popleft()
for i in range(len(stations)):
if connection[stations.index(current)][i] == 1 and visited[i] == 0:
queue.append(stations[i])
paths[i] = paths[stations.index(current)] + ' ' + current
visited[i] = 1
print('Shortest route: ')
mypath = paths[stations.index(end_node)]
mypath = mypath.strip().replace(' ', '->')
print(mypath + '->' + end_node)
|
00335c046bfd7ccbb67d7f850d93cc035ad1f79b
|
584f7b51d7cd529448e2fc0147557e26931ab17e
|
/docs/_downloads/e0ed5545ef38e74cfcb877b43025058f/BesselAnnularSlit3.py
|
d43686cf3713ce876ed8bd19b42667fd2f2873bb
|
[
"BSD-3-Clause"
] |
permissive
|
opticspy/lightpipes
|
8ca0d2221a1b893de5e51fec9061e90b9145f5f8
|
f4ffdedb3ab2f9b5ae5a9a8e37985d2a7f8bb2ef
|
refs/heads/master
| 2023-09-04T19:07:11.376631
| 2023-09-04T15:24:55
| 2023-09-04T15:24:55
| 80,127,706
| 191
| 55
|
BSD-3-Clause
| 2023-08-23T00:45:33
| 2017-01-26T15:39:28
|
Python
|
UTF-8
|
Python
| false
| false
| 792
|
py
|
BesselAnnularSlit3.py
|
from LightPipes import *
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
img=mpimg.imread('bessel3.png')
plt.imshow(img); plt.axis('off');plt.title('Bessel beam with annular slit')
plt.show()
wavelength=1000.0*nm
size=10*mm
N=1000
N2=int(N/2)
ZoomFactor=10
NZ=N2/ZoomFactor
a=1.5*mm
f=500*mm
z_start=0.001*cm; z_end= 150*cm;
steps=11;
delta_z=(z_end-z_start)/steps
z=z_start
F=Begin(size,wavelength,N);
F=GaussBeam(F, size/3.5)
F=CircScreen(a,0,0,F)
F=CircAperture(a+0.1*mm,0,0,F)
F=Fresnel(f,F);
F=Lens(f,0,0,F);
for i in range(1,steps):
F=Fresnel(delta_z,F);
I=Intensity(0,F);
plt.subplot(2,5,i)
s='z= %3.1f m' % (z/m)
plt.title(s)
plt.imshow(I,cmap='jet');plt.axis('off')
plt.axis([N2-NZ, N2+NZ, N2-NZ, N2+NZ])
z=z+delta_z
plt.show()
|
2f974c3c2d4d106d5d8102315a4e7108287be3b6
|
3a50c0712e0a31b88d0a5e80a0c01dbefc6a6e75
|
/thrift/lib/python/any/serializer.pyi
|
a436fd3ff531e8af372a05db115e34f390564af1
|
[
"Apache-2.0"
] |
permissive
|
facebook/fbthrift
|
3b7b94a533666c965ce69cfd6054041218b1ea6f
|
53cf6f138a7648efe5aef9a263aabed3d282df91
|
refs/heads/main
| 2023-08-24T12:51:32.367985
| 2023-08-24T08:28:35
| 2023-08-24T08:28:35
| 11,131,631
| 2,347
| 666
|
Apache-2.0
| 2023-09-01T01:44:39
| 2013-07-02T18:15:51
|
C++
|
UTF-8
|
Python
| false
| false
| 2,315
|
pyi
|
serializer.pyi
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import typing
from apache.thrift.type.type.thrift_types import Type
from folly.iobuf import IOBuf
from thrift.python.any.typestub import (
PrimitiveType,
SerializableType,
StructOrUnionOrException,
TKey,
TPrimitive,
TSerializable,
TValue,
)
from thrift.python.exceptions import GeneratedError
from thrift.python.serializer import Protocol
from thrift.python.types import Enum, StructOrUnion
def serialize_primitive(
obj: TPrimitive,
protocol: Protocol = ...,
thrift_type: typing.Optional[Type] = ...,
) -> IOBuf: ...
def deserialize_primitive(
cls: typing.Type[TPrimitive],
buf: typing.Union[bytes, bytearray, IOBuf, memoryview],
protocol: Protocol = ...,
thrift_type: typing.Optional[Type] = ...,
) -> TPrimitive: ...
def serialize_list(
obj: typing.Sequence[SerializableType],
protocol: Protocol = ...,
) -> IOBuf: ...
def deserialize_list(
elem_cls: typing.Type[TSerializable],
buf: typing.Union[bytes, bytearray, IOBuf, memoryview],
protocol: Protocol = ...,
) -> typing.Sequence[TSerializable]: ...
def serialize_set(
obj: typing.AbstractSet[SerializableType],
protocol: Protocol = ...,
) -> IOBuf: ...
def deserialize_set(
elem_cls: typing.Type[TSerializable],
buf: typing.Union[bytes, bytearray, IOBuf, memoryview],
protocol: Protocol = ...,
) -> typing.AbstractSet[TSerializable]: ...
def serialize_map(
obj: typing.Mapping[TKey, TValue],
protocol: Protocol = ...,
) -> IOBuf: ...
def deserialize_map(
key_cls: typing.Type[TKey],
value_cls: typing.Type[TValue],
buf: typing.Union[bytes, bytearray, IOBuf, memoryview],
protocol: Protocol = ...,
) -> typing.Mapping[TKey, TValue]: ...
|
fb69eef78014be53e011a0c4c3b1966e19cd23d0
|
f1f21ba2236da38a49a8185ce33b3ce4a4424c1d
|
/apps/molecular_generation/JT_VAE/src/nnutils.py
|
f6623d7b588324e63a3eb257f06bdf5e21f66507
|
[
"Apache-2.0"
] |
permissive
|
PaddlePaddle/PaddleHelix
|
75a07c2f14475e56e72f4573b2cf82a91d1cbfda
|
e6ab0261eb719c21806bbadfd94001ecfe27de45
|
refs/heads/dev
| 2023-08-05T03:34:55.009355
| 2023-08-01T09:30:44
| 2023-08-01T09:30:44
| 314,704,349
| 771
| 197
|
Apache-2.0
| 2023-08-01T09:15:07
| 2020-11-21T00:53:39
|
Python
|
UTF-8
|
Python
| false
| false
| 2,647
|
py
|
nnutils.py
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""neural network utils"""
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
def index_select_ND(source, dim, index):
"""Return nodes for index"""
index_size = index.shape
suffix_dim = source.shape[1:]
final_size = index_size + suffix_dim
target = paddle.index_select(x=source, axis=dim, index=paddle.reshape(index, shape=[-1]))
return target.reshape(final_size)
def avg_pool(all_vecs, scope, dim):
"""Average pooling"""
size = paddle.to_tensor([le for _, le in scope])
return paddle.sum(all_vecs, axis=dim) / paddle.unsqueeze(size, axis=-1)
def stack_pad_tensor(tensor_list):
"""Stack tensor with padding"""
max_len = max([t.shape[0] for t in tensor_list])
for i, tensor in enumerate(tensor_list):
pad_len = max_len - tensor.shape[0]
tensor_list[i] = F.pad(tensor, (0, 0, 0, pad_len))
return paddle.stack(tensor_list, axis=0)
def flatten_tensor(tensor, scope):
"""Flat tensor"""
assert tensor.shape[0] == len(scope)
tlist = []
for i, tup in enumerate(scope):
le = tup[1]
tlist.append(tensor[i, 0:le])
return paddle.concat(tlist, axis=0)
def inflate_tensor(tensor, scope):
"""Inflate tensor"""
max_len = max([le for _, le in scope])
batch_vecs = []
for st, le in scope:
cur_vecs = tensor[st: st + le]
cur_vecs = F.pad(cur_vecs, (0, 0, 0, max_len - le))
batch_vecs.append(cur_vecs)
return paddle.stack(batch_vecs, axis=0)
def GRU(x, h_nei, W_z, W_r, U_r, W_h):
"""GRU"""
hidden_size = x.shape[-1]
sum_h = paddle.sum(h_nei, axis=1)
z_input = paddle.concat([x, sum_h], axis=1)
z = F.sigmoid(W_z(z_input))
r_1 = paddle.reshape(W_r(x), shape=[-1, 1, hidden_size])
r_2 = U_r(h_nei)
r = F.sigmoid(r_1 + r_2)
gated_h = r * h_nei
sum_gated_h = paddle.sum(gated_h, axis=1)
h_input = paddle.concat([x, sum_gated_h], axis=1)
pre_h = F.tanh(W_h(h_input))
new_h = (1.0 - z) * sum_h + z * pre_h
return new_h
|
b0f9c697af1814c697c835d30b3c33e4029f8050
|
fce81b804cae23f525a5ad4370b684bf0dc531a5
|
/numpy/distutils/conv_template.py
|
c8933d1d42865f745bb985f7f9068a96985997f7
|
[
"Zlib",
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] |
permissive
|
numpy/numpy
|
ba2abcc1d2d46affbb6aabe5aed6407b4b57507e
|
dc2ff125493777a1084044e6cd6857a42ee323d4
|
refs/heads/main
| 2023-09-05T10:10:52.767363
| 2023-09-04T18:03:29
| 2023-09-04T18:03:29
| 908,607
| 25,725
| 11,968
|
BSD-3-Clause
| 2023-09-14T21:26:09
| 2010-09-13T23:02:39
|
Python
|
UTF-8
|
Python
| false
| false
| 9,536
|
py
|
conv_template.py
|
#!/usr/bin/env python3
"""
takes templated file .xxx.src and produces .xxx file where .xxx is
.i or .c or .h, using the following template rules
/**begin repeat -- on a line by itself marks the start of a repeated code
segment
/**end repeat**/ -- on a line by itself marks it's end
After the /**begin repeat and before the */, all the named templates are placed
these should all have the same number of replacements
Repeat blocks can be nested, with each nested block labeled with its depth,
i.e.
/**begin repeat1
*....
*/
/**end repeat1**/
When using nested loops, you can optionally exclude particular
combinations of the variables using (inside the comment portion of the inner loop):
:exclude: var1=value1, var2=value2, ...
This will exclude the pattern where var1 is value1 and var2 is value2 when
the result is being generated.
In the main body each replace will use one entry from the list of named replacements
Note that all #..# forms in a block must have the same number of
comma-separated entries.
Example:
An input file containing
/**begin repeat
* #a = 1,2,3#
* #b = 1,2,3#
*/
/**begin repeat1
* #c = ted, jim#
*/
@a@, @b@, @c@
/**end repeat1**/
/**end repeat**/
produces
line 1 "template.c.src"
/*
*********************************************************************
** This file was autogenerated from a template DO NOT EDIT!!**
** Changes should be made to the original source (.src) file **
*********************************************************************
*/
#line 9
1, 1, ted
#line 9
1, 1, jim
#line 9
2, 2, ted
#line 9
2, 2, jim
#line 9
3, 3, ted
#line 9
3, 3, jim
"""
__all__ = ['process_str', 'process_file']
import os
import sys
import re
# names for replacement that are already global.
global_names = {}
# header placed at the front of head processed file
header =\
"""
/*
*****************************************************************************
** This file was autogenerated from a template DO NOT EDIT!!!! **
** Changes should be made to the original source (.src) file **
*****************************************************************************
*/
"""
# Parse string for repeat loops
def parse_structure(astr, level):
"""
The returned line number is from the beginning of the string, starting
at zero. Returns an empty list if no loops found.
"""
if level == 0 :
loopbeg = "/**begin repeat"
loopend = "/**end repeat**/"
else :
loopbeg = "/**begin repeat%d" % level
loopend = "/**end repeat%d**/" % level
ind = 0
line = 0
spanlist = []
while True:
start = astr.find(loopbeg, ind)
if start == -1:
break
start2 = astr.find("*/", start)
start2 = astr.find("\n", start2)
fini1 = astr.find(loopend, start2)
fini2 = astr.find("\n", fini1)
line += astr.count("\n", ind, start2+1)
spanlist.append((start, start2+1, fini1, fini2+1, line))
line += astr.count("\n", start2+1, fini2)
ind = fini2
spanlist.sort()
return spanlist
def paren_repl(obj):
torep = obj.group(1)
numrep = obj.group(2)
return ','.join([torep]*int(numrep))
parenrep = re.compile(r"\(([^)]*)\)\*(\d+)")
plainrep = re.compile(r"([^*]+)\*(\d+)")
def parse_values(astr):
# replaces all occurrences of '(a,b,c)*4' in astr
# with 'a,b,c,a,b,c,a,b,c,a,b,c'. Empty braces generate
# empty values, i.e., ()*4 yields ',,,'. The result is
# split at ',' and a list of values returned.
astr = parenrep.sub(paren_repl, astr)
# replaces occurrences of xxx*3 with xxx, xxx, xxx
astr = ','.join([plainrep.sub(paren_repl, x.strip())
for x in astr.split(',')])
return astr.split(',')
stripast = re.compile(r"\n\s*\*?")
named_re = re.compile(r"#\s*(\w*)\s*=([^#]*)#")
exclude_vars_re = re.compile(r"(\w*)=(\w*)")
exclude_re = re.compile(":exclude:")
def parse_loop_header(loophead) :
"""Find all named replacements in the header
Returns a list of dictionaries, one for each loop iteration,
where each key is a name to be substituted and the corresponding
value is the replacement string.
Also return a list of exclusions. The exclusions are dictionaries
of key value pairs. There can be more than one exclusion.
[{'var1':'value1', 'var2', 'value2'[,...]}, ...]
"""
# Strip out '\n' and leading '*', if any, in continuation lines.
# This should not effect code previous to this change as
# continuation lines were not allowed.
loophead = stripast.sub("", loophead)
# parse out the names and lists of values
names = []
reps = named_re.findall(loophead)
nsub = None
for rep in reps:
name = rep[0]
vals = parse_values(rep[1])
size = len(vals)
if nsub is None :
nsub = size
elif nsub != size :
msg = "Mismatch in number of values, %d != %d\n%s = %s"
raise ValueError(msg % (nsub, size, name, vals))
names.append((name, vals))
# Find any exclude variables
excludes = []
for obj in exclude_re.finditer(loophead):
span = obj.span()
# find next newline
endline = loophead.find('\n', span[1])
substr = loophead[span[1]:endline]
ex_names = exclude_vars_re.findall(substr)
excludes.append(dict(ex_names))
# generate list of dictionaries, one for each template iteration
dlist = []
if nsub is None :
raise ValueError("No substitution variables found")
for i in range(nsub):
tmp = {name: vals[i] for name, vals in names}
dlist.append(tmp)
return dlist
replace_re = re.compile(r"@(\w+)@")
def parse_string(astr, env, level, line) :
lineno = "#line %d\n" % line
# local function for string replacement, uses env
def replace(match):
name = match.group(1)
try :
val = env[name]
except KeyError:
msg = 'line %d: no definition of key "%s"'%(line, name)
raise ValueError(msg) from None
return val
code = [lineno]
struct = parse_structure(astr, level)
if struct :
# recurse over inner loops
oldend = 0
newlevel = level + 1
for sub in struct:
pref = astr[oldend:sub[0]]
head = astr[sub[0]:sub[1]]
text = astr[sub[1]:sub[2]]
oldend = sub[3]
newline = line + sub[4]
code.append(replace_re.sub(replace, pref))
try :
envlist = parse_loop_header(head)
except ValueError as e:
msg = "line %d: %s" % (newline, e)
raise ValueError(msg)
for newenv in envlist :
newenv.update(env)
newcode = parse_string(text, newenv, newlevel, newline)
code.extend(newcode)
suff = astr[oldend:]
code.append(replace_re.sub(replace, suff))
else :
# replace keys
code.append(replace_re.sub(replace, astr))
code.append('\n')
return ''.join(code)
def process_str(astr):
code = [header]
code.extend(parse_string(astr, global_names, 0, 1))
return ''.join(code)
include_src_re = re.compile(r"(\n|\A)#include\s*['\"]"
r"(?P<name>[\w\d./\\]+[.]src)['\"]", re.I)
def resolve_includes(source):
d = os.path.dirname(source)
with open(source) as fid:
lines = []
for line in fid:
m = include_src_re.match(line)
if m:
fn = m.group('name')
if not os.path.isabs(fn):
fn = os.path.join(d, fn)
if os.path.isfile(fn):
lines.extend(resolve_includes(fn))
else:
lines.append(line)
else:
lines.append(line)
return lines
def process_file(source):
lines = resolve_includes(source)
sourcefile = os.path.normcase(source).replace("\\", "\\\\")
try:
code = process_str(''.join(lines))
except ValueError as e:
raise ValueError('In "%s" loop at %s' % (sourcefile, e)) from None
return '#line 1 "%s"\n%s' % (sourcefile, code)
def unique_key(adict):
# this obtains a unique key given a dictionary
# currently it works by appending together n of the letters of the
# current keys and increasing n until a unique key is found
# -- not particularly quick
allkeys = list(adict.keys())
done = False
n = 1
while not done:
newkey = "".join([x[:n] for x in allkeys])
if newkey in allkeys:
n += 1
else:
done = True
return newkey
def main():
try:
file = sys.argv[1]
except IndexError:
fid = sys.stdin
outfile = sys.stdout
else:
fid = open(file, 'r')
(base, ext) = os.path.splitext(file)
newname = base
outfile = open(newname, 'w')
allstr = fid.read()
try:
writestr = process_str(allstr)
except ValueError as e:
raise ValueError("In %s loop at %s" % (file, e)) from None
outfile.write(writestr)
if __name__ == "__main__":
main()
|
f7313ff04e77ed284362d1c457b33f2c396cb703
|
462b8a2326486dd41bf0d1ddbb19bbcee9532411
|
/blender/arm/make_world.py
|
aedc1ce73efce346a4935c1489b0bba3559e5131
|
[
"GPL-2.0-only",
"Zlib"
] |
permissive
|
armory3d/armory
|
b751fb23d6590f2ca421ace7cf7cbeaef91f472c
|
511657981bd2716eddcee8dff26820d27f2bc610
|
refs/heads/main
| 2023-08-12T02:57:02.898742
| 2023-08-04T18:55:45
| 2023-08-04T18:55:45
| 45,202,654
| 3,077
| 530
|
Zlib
| 2023-09-12T11:24:38
| 2015-10-29T18:27:56
|
Python
|
UTF-8
|
Python
| false
| false
| 15,985
|
py
|
make_world.py
|
import os
import bpy
import arm.assets as assets
import arm.log as log
from arm.material import make_shader
from arm.material.parser_state import ParserState, ParserContext
from arm.material.shader import ShaderContext, Shader
import arm.material.cycles as cycles
import arm.node_utils as node_utils
import arm.utils
import arm.write_probes as write_probes
if arm.is_reload(__name__):
arm.assets = arm.reload_module(arm.assets)
arm.log = arm.reload_module(arm.log)
arm.material = arm.reload_module(arm.material)
arm.material.parser_state = arm.reload_module(arm.material.parser_state)
from arm.material.parser_state import ParserState, ParserContext
arm.material.shader = arm.reload_module(arm.material.shader)
from arm.material.shader import ShaderContext, Shader
cycles = arm.reload_module(cycles)
node_utils = arm.reload_module(node_utils)
arm.utils = arm.reload_module(arm.utils)
write_probes = arm.reload_module(write_probes)
else:
arm.enable_reload(__name__)
callback = None
shader_datas = []
def build():
"""Builds world shaders for all exported worlds."""
global shader_datas
wrd = bpy.data.worlds['Arm']
rpdat = arm.utils.get_rp()
mobile_mat = rpdat.arm_material_model == 'Mobile' or rpdat.arm_material_model == 'Solid'
envpath = os.path.join(arm.utils.get_fp_build(), 'compiled', 'Assets', 'envmaps')
wrd.world_defs = ''
worlds = []
shader_datas = []
with write_probes.setup_envmap_render():
for scene in bpy.data.scenes:
world = scene.world
# Only export worlds from enabled scenes and only once per world
if scene.arm_export and world is not None and world not in worlds:
worlds.append(world)
world.arm_envtex_name = ''
create_world_shaders(world)
if rpdat.arm_irradiance:
# Plain background color
if '_EnvCol' in world.world_defs:
world_name = arm.utils.safestr(world.name)
# Irradiance json file name
world.arm_envtex_name = world_name
world.arm_envtex_irr_name = world_name
write_probes.write_color_irradiance(world_name, world.arm_envtex_color)
# Render world to envmap for (ir)radiance, if no
# other probes are exported
elif world.arm_envtex_name == '':
image_file = write_probes.render_envmap(envpath, world)
image_filepath = os.path.join(envpath, image_file)
world.arm_envtex_name = image_file
world.arm_envtex_irr_name = os.path.basename(image_filepath).rsplit('.', 1)[0]
write_radiance = rpdat.arm_radiance and not mobile_mat
mip_count = write_probes.write_probes(image_filepath, write_probes.ENVMAP_FORMAT == 'JPEG', False, world.arm_envtex_num_mips, write_radiance)
world.arm_envtex_num_mips = mip_count
if write_radiance:
# Set world def, everything else is handled by write_probes()
wrd.world_defs += '_Rad'
write_probes.check_last_cmft_time()
def create_world_shaders(world: bpy.types.World):
"""Creates fragment and vertex shaders for the given world."""
global shader_datas
world_name = arm.utils.safestr(world.name)
pass_name = 'World_' + world_name
shader_props = {
'name': world_name,
'depth_write': False,
'compare_mode': 'less',
'cull_mode': 'clockwise',
'color_attachments': ['_HDR'],
'vertex_elements': [{'name': 'pos', 'data': 'float3'}, {'name': 'nor', 'data': 'float3'}]
}
shader_data = {'name': world_name + '_data', 'contexts': [shader_props]}
# ShaderContext expects a material, but using a world also works
shader_context = ShaderContext(world, shader_data, shader_props)
vert = shader_context.make_vert(custom_name="World_" + world_name)
frag = shader_context.make_frag(custom_name="World_" + world_name)
# Update name, make_vert() and make_frag() above need another name
# to work
shader_context.data['name'] = pass_name
vert.add_out('vec3 normal')
vert.add_uniform('mat4 SMVP', link="_skydomeMatrix")
frag.add_include('compiled.inc')
frag.add_in('vec3 normal')
frag.add_out('vec4 fragColor')
frag.write_attrib('vec3 n = normalize(normal);')
vert.write('''normal = nor;
vec4 position = SMVP * vec4(pos, 1.0);
gl_Position = vec4(position);''')
build_node_tree(world, frag, vert, shader_context)
# TODO: Rework shader export so that it doesn't depend on materials
# to prevent workaround code like this
rel_path = os.path.join(arm.utils.build_dir(), 'compiled', 'Shaders')
full_path = os.path.join(arm.utils.get_fp(), rel_path)
if not os.path.exists(full_path):
os.makedirs(full_path)
# Output: World_[world_name].[frag/vert].glsl
make_shader.write_shader(rel_path, shader_context.vert, 'vert', world_name, 'World')
make_shader.write_shader(rel_path, shader_context.frag, 'frag', world_name, 'World')
# Write shader data file
shader_data_file = pass_name + '_data.arm'
arm.utils.write_arm(os.path.join(full_path, shader_data_file), {'contexts': [shader_context.data]})
shader_data_path = os.path.join(arm.utils.get_fp_build(), 'compiled', 'Shaders', shader_data_file)
assets.add_shader_data(shader_data_path)
assets.add_shader_pass(pass_name)
assets.shader_passes_assets[pass_name] = shader_context.data
shader_datas.append({'contexts': [shader_context.data], 'name': pass_name})
def build_node_tree(world: bpy.types.World, frag: Shader, vert: Shader, con: ShaderContext):
"""Generates the shader code for the given world."""
world_name = arm.utils.safestr(world.name)
world.world_defs = ''
rpdat = arm.utils.get_rp()
wrd = bpy.data.worlds['Arm']
if callback is not None:
callback()
# film_transparent, do not render
if bpy.context.scene is not None and bpy.context.scene.render.film_transparent:
world.world_defs += '_EnvCol'
frag.add_uniform('vec3 backgroundCol', link='_backgroundCol')
frag.write('fragColor.rgb = backgroundCol;')
return
parser_state = ParserState(ParserContext.WORLD, world.name, world)
parser_state.con = con
parser_state.curshader = frag
parser_state.frag = frag
parser_state.vert = vert
cycles.state = parser_state
# Traverse world node tree
is_parsed = False
if world.node_tree is not None:
output_node = node_utils.get_node_by_type(world.node_tree, 'OUTPUT_WORLD')
if output_node is not None:
is_parsed = parse_world_output(world, output_node, frag)
# No world nodes/no output node, use background color
if not is_parsed:
solid_mat = rpdat.arm_material_model == 'Solid'
if rpdat.arm_irradiance and not solid_mat:
world.world_defs += '_Irr'
col = world.color
world.arm_envtex_color = [col[0], col[1], col[2], 1.0]
world.arm_envtex_strength = 1.0
world.world_defs += '_EnvCol'
# Clouds enabled
if rpdat.arm_clouds and world.arm_use_clouds:
world.world_defs += '_EnvClouds'
# Also set this flag globally so that the required textures are
# included
wrd.world_defs += '_EnvClouds'
frag_write_clouds(world, frag)
if '_EnvSky' in world.world_defs or '_EnvTex' in world.world_defs or '_EnvImg' in world.world_defs or '_EnvClouds' in world.world_defs:
frag.add_uniform('float envmapStrength', link='_envmapStrength')
# Clear background color
if '_EnvCol' in world.world_defs:
frag.write('fragColor.rgb = backgroundCol;')
elif '_EnvTex' in world.world_defs and '_EnvLDR' in world.world_defs:
frag.write('fragColor.rgb = pow(fragColor.rgb, vec3(2.2));')
if '_EnvClouds' in world.world_defs:
frag.write('if (pos.z > 0.0) fragColor.rgb = mix(fragColor.rgb, traceClouds(fragColor.rgb, pos), clamp(pos.z * 5.0, 0, 1));')
if '_EnvLDR' in world.world_defs:
frag.write('fragColor.rgb = pow(fragColor.rgb, vec3(1.0 / 2.2));')
# Mark as non-opaque
frag.write('fragColor.a = 0.0;')
finalize(frag, vert)
def finalize(frag: Shader, vert: Shader):
"""Checks the given fragment shader for completeness and adds
variable initializations if required.
TODO: Merge with make_finalize?
"""
if frag.contains('pos') and not frag.contains('vec3 pos'):
frag.write_attrib('vec3 pos = -n;')
if frag.contains('vVec') and not frag.contains('vec3 vVec'):
# For worlds, the camera seems to be always at origin in
# Blender, so we can just use the normals as the incoming vector
frag.write_attrib('vec3 vVec = n;')
for var in ('bposition', 'mposition', 'wposition'):
if (frag.contains(var) and not frag.contains(f'vec3 {var}')) or vert.contains(var):
frag.add_in(f'vec3 {var}')
vert.add_out(f'vec3 {var}')
vert.write(f'{var} = pos;')
if frag.contains('wtangent') and not frag.contains('vec3 wtangent'):
frag.write_attrib('vec3 wtangent = vec3(0.0);')
if frag.contains('texCoord') and not frag.contains('vec2 texCoord'):
frag.add_in('vec2 texCoord')
vert.add_out('vec2 texCoord')
# World has no UV map
vert.write('texCoord = vec2(1.0, 1.0);')
def parse_world_output(world: bpy.types.World, node_output: bpy.types.Node, frag: Shader) -> bool:
"""Parse the world's output node. Return `False` when the node has
no connected surface input."""
surface_node = node_utils.find_node_by_link(world.node_tree, node_output, node_output.inputs[0])
if surface_node is None:
return False
parse_surface(world, surface_node, frag)
return True
def parse_surface(world: bpy.types.World, node_surface: bpy.types.Node, frag: Shader):
wrd = bpy.data.worlds['Arm']
rpdat = arm.utils.get_rp()
solid_mat = rpdat.arm_material_model == 'Solid'
if node_surface.type in ('BACKGROUND', 'EMISSION'):
# Append irradiance define
if rpdat.arm_irradiance and not solid_mat:
wrd.world_defs += '_Irr'
# Extract environment strength
# Todo: follow/parse strength input
world.arm_envtex_strength = node_surface.inputs[1].default_value
# Color
out = cycles.parse_vector_input(node_surface.inputs[0])
frag.write(f'fragColor.rgb = {out};')
if not node_surface.inputs[0].is_linked:
solid_mat = rpdat.arm_material_model == 'Solid'
if rpdat.arm_irradiance and not solid_mat:
world.world_defs += '_Irr'
world.arm_envtex_color = node_surface.inputs[0].default_value
world.arm_envtex_strength = 1.0
else:
log.warn(f'World node type {node_surface.type} must not be connected to the world output node!')
# Invalidate the parser state for subsequent executions
cycles.state = None
def frag_write_clouds(world: bpy.types.World, frag: Shader):
"""References:
GPU PRO 7 - Real-time Volumetric Cloudscapes
https://www.guerrilla-games.com/read/the-real-time-volumetric-cloudscapes-of-horizon-zero-dawn
https://github.com/sebh/TileableVolumeNoise
"""
frag.add_uniform('sampler3D scloudsBase', link='$clouds_base.raw')
frag.add_uniform('sampler3D scloudsDetail', link='$clouds_detail.raw')
frag.add_uniform('sampler2D scloudsMap', link='$clouds_map.png')
frag.add_uniform('float time', link='_time')
frag.add_const('float', 'cloudsLower', str(round(world.arm_clouds_lower * 100) / 100))
frag.add_const('float', 'cloudsUpper', str(round(world.arm_clouds_upper * 100) / 100))
frag.add_const('vec2', 'cloudsWind', 'vec2(' + str(round(world.arm_clouds_wind[0] * 100) / 100) + ',' + str(round(world.arm_clouds_wind[1] * 100) / 100) + ')')
frag.add_const('float', 'cloudsPrecipitation', str(round(world.arm_clouds_precipitation * 100) / 100))
frag.add_const('float', 'cloudsSecondary', str(round(world.arm_clouds_secondary * 100) / 100))
frag.add_const('float', 'cloudsSteps', str(round(world.arm_clouds_steps * 100) / 100))
frag.add_function('''float remap(float old_val, float old_min, float old_max, float new_min, float new_max) {
\treturn new_min + (((old_val - old_min) / (old_max - old_min)) * (new_max - new_min));
}''')
frag.add_function('''float getDensityHeightGradientForPoint(float height, float cloud_type) {
\tconst vec4 stratusGrad = vec4(0.02f, 0.05f, 0.09f, 0.11f);
\tconst vec4 stratocumulusGrad = vec4(0.02f, 0.2f, 0.48f, 0.625f);
\tconst vec4 cumulusGrad = vec4(0.01f, 0.0625f, 0.78f, 1.0f);
\tfloat stratus = 1.0f - clamp(cloud_type * 2.0f, 0, 1);
\tfloat stratocumulus = 1.0f - abs(cloud_type - 0.5f) * 2.0f;
\tfloat cumulus = clamp(cloud_type - 0.5f, 0, 1) * 2.0f;
\tvec4 cloudGradient = stratusGrad * stratus + stratocumulusGrad * stratocumulus + cumulusGrad * cumulus;
\treturn smoothstep(cloudGradient.x, cloudGradient.y, height) - smoothstep(cloudGradient.z, cloudGradient.w, height);
}''')
frag.add_function('''float sampleCloudDensity(vec3 p) {
\tfloat cloud_base = textureLod(scloudsBase, p, 0).r * 40; // Base noise
\tvec3 weather_data = textureLod(scloudsMap, p.xy, 0).rgb; // Weather map
\tcloud_base *= getDensityHeightGradientForPoint(p.z, weather_data.b); // Cloud type
\tcloud_base = remap(cloud_base, weather_data.r, 1.0, 0.0, 1.0); // Coverage
\tcloud_base *= weather_data.r;
\tfloat cloud_detail = textureLod(scloudsDetail, p, 0).r * 2; // Detail noise
\tfloat cloud_detail_mod = mix(cloud_detail, 1.0 - cloud_detail, clamp(p.z * 10.0, 0, 1));
\tcloud_base = remap(cloud_base, cloud_detail_mod * 0.2, 1.0, 0.0, 1.0);
\treturn cloud_base;
}''')
func_cloud_radiance = 'float cloudRadiance(vec3 p, vec3 dir) {\n'
if '_EnvSky' in world.world_defs:
# Nishita sky
if 'vec3 sunDir' in frag.uniforms:
func_cloud_radiance += '\tvec3 sun_dir = sunDir;\n'
# Hosek
else:
func_cloud_radiance += '\tvec3 sun_dir = hosekSunDirection;\n'
else:
func_cloud_radiance += '\tvec3 sun_dir = vec3(0, 0, -1);\n'
func_cloud_radiance += '''\tconst int steps = 8;
\tfloat step_size = 0.5 / float(steps);
\tfloat d = 0.0;
\tp += sun_dir * step_size;
\tfor(int i = 0; i < steps; ++i) {
\t\td += sampleCloudDensity(p + sun_dir * float(i) * step_size);
\t}
\treturn 1.0 - d;
}'''
frag.add_function(func_cloud_radiance)
func_trace_clouds = '''vec3 traceClouds(vec3 sky, vec3 dir) {
\tconst float step_size = 0.5 / float(cloudsSteps);
\tfloat T = 1.0;
\tfloat C = 0.0;
\tvec2 uv = dir.xy / dir.z * 0.4 * cloudsLower + cloudsWind * time * 0.02;
\tfor (int i = 0; i < cloudsSteps; ++i) {
\t\tfloat h = float(i) / float(cloudsSteps);
\t\tvec3 p = vec3(uv * 0.04, h);
\t\tfloat d = sampleCloudDensity(p);
\t\tif (d > 0) {
\t\t\t// float radiance = cloudRadiance(p, dir);
\t\t\tC += T * exp(h) * d * step_size * 0.6 * cloudsPrecipitation;
\t\t\tT *= exp(-d * step_size);
\t\t\tif (T < 0.01) break;
\t\t}
\t\tuv += (dir.xy / dir.z) * step_size * cloudsUpper;
\t}
'''
if world.arm_darken_clouds:
func_trace_clouds += '\t// Darken clouds when the sun is low\n'
if '_EnvSky' in world.world_defs:
# Nishita sky
if 'vec3 sunDir' in frag.uniforms:
func_trace_clouds += '\tC *= smoothstep(-0.02, 0.25, sunDir.z);\n'
# Hosek
else:
func_trace_clouds += '\tC *= smoothstep(0.04, 0.32, hosekSunDirection.z);\n'
func_trace_clouds += '\treturn vec3(C) + sky * T;\n}'
frag.add_function(func_trace_clouds)
|
0aadbff29845f7cb8574c692a4f1ad4799b9b877
|
704976ea552111c6a5af9cd7cb62b9d9abaf3996
|
/rpython/rtyper/lltypesystem/rbuilder.py
|
1dfb3c06862a135b54fc00f7a0096260be16eb71
|
[
"BSD-3-Clause"
] |
permissive
|
mesalock-linux/mesapy
|
4f02c5819ce7f2f6e249d34840f1aa097577645d
|
ed546d59a21b36feb93e2309d5c6b75aa0ad95c9
|
refs/heads/mesapy2.7
| 2023-08-16T21:33:02.239581
| 2019-08-13T10:29:43
| 2019-08-13T18:06:45
| 136,080,721
| 396
| 33
|
NOASSERTION
| 2020-04-01T03:05:18
| 2018-06-04T20:45:17
|
Python
|
UTF-8
|
Python
| false
| false
| 17,206
|
py
|
rbuilder.py
|
from rpython.rlib import rgc, jit
from rpython.rlib.objectmodel import enforceargs, dont_inline, always_inline
from rpython.rlib.rarithmetic import ovfcheck, r_uint, intmask
from rpython.rtyper.debug import ll_assert
from rpython.rlib.unroll import unrolling_iterable
from rpython.rtyper.rptr import PtrRepr
from rpython.rtyper.lltypesystem import lltype, rffi, rstr
from rpython.rtyper.lltypesystem.lltype import staticAdtMethod, nullptr
from rpython.rtyper.lltypesystem.rstr import (STR, UNICODE, char_repr,
string_repr, unichar_repr, unicode_repr)
from rpython.rtyper.rbuilder import AbstractStringBuilderRepr
from rpython.tool.sourcetools import func_with_new_name
from rpython.rtyper.annlowlevel import llstr, llunicode
# ------------------------------------------------------------
# Basic idea:
#
# - A StringBuilder has a rstr.STR of the specified initial size
# (100 by default), which is filled gradually.
#
# - When it is full, we allocate extra buffers as an extra rstr.STR,
# and the already-filled one is added to a chained list of STRINGPIECE
# objects.
#
# - At build() time, we consolidate all these pieces into a single
# rstr.STR, which is both returned and re-attached to the StringBuilder,
# replacing the STRINGPIECEs.
#
# - The data is copied at most twice, and only once in case it fits
# into the initial size (and the GC supports shrinking the STR).
#
# XXX in build(), we could try keeping around a global weakref to the
# chain of STRINGPIECEs and reuse them the next time.
#
# ------------------------------------------------------------
STRINGPIECE = lltype.GcStruct('stringpiece',
('buf', lltype.Ptr(STR)),
('prev_piece', lltype.Ptr(lltype.GcForwardReference())))
STRINGPIECE.prev_piece.TO.become(STRINGPIECE)
STRINGBUILDER = lltype.GcStruct('stringbuilder',
('current_buf', lltype.Ptr(STR)),
('current_pos', lltype.Signed),
('current_end', lltype.Signed),
('total_size', lltype.Signed),
('extra_pieces', lltype.Ptr(STRINGPIECE)),
adtmeths={
'copy_string_contents': staticAdtMethod(rstr.copy_string_contents),
'copy_raw_to_string': staticAdtMethod(rstr.copy_raw_to_string),
'mallocfn': staticAdtMethod(rstr.mallocstr),
}
)
UNICODEPIECE = lltype.GcStruct('unicodepiece',
('buf', lltype.Ptr(UNICODE)),
('prev_piece', lltype.Ptr(lltype.GcForwardReference())))
UNICODEPIECE.prev_piece.TO.become(UNICODEPIECE)
UNICODEBUILDER = lltype.GcStruct('unicodebuilder',
('current_buf', lltype.Ptr(UNICODE)),
('current_pos', lltype.Signed),
('current_end', lltype.Signed),
('total_size', lltype.Signed),
('extra_pieces', lltype.Ptr(UNICODEPIECE)),
adtmeths={
'copy_string_contents': staticAdtMethod(rstr.copy_unicode_contents),
'copy_raw_to_string': staticAdtMethod(rstr.copy_raw_to_unicode),
'mallocfn': staticAdtMethod(rstr.mallocunicode),
}
)
# ------------------------------------------------------------
# The generic piece of code to append a string (or a slice of it)
# to a builder; it is inlined inside various functions below
@always_inline
def _ll_append(ll_builder, ll_str, start, size):
pos = ll_builder.current_pos
end = ll_builder.current_end
if (end - pos) < size:
ll_grow_and_append(ll_builder, ll_str, start, size)
else:
ll_builder.current_pos = pos + size
ll_builder.copy_string_contents(ll_str, ll_builder.current_buf,
start, pos, size)
# ------------------------------------------------------------
# Logic to grow a builder (by adding a new string to it)
@dont_inline
@enforceargs(None, int)
def ll_grow_by(ll_builder, needed):
try:
needed = ovfcheck(needed + ll_builder.total_size)
needed = ovfcheck(needed + 63) & ~63
total_size = ll_builder.total_size + needed
except OverflowError:
raise MemoryError
#
new_string = ll_builder.mallocfn(needed)
#
PIECE = lltype.typeOf(ll_builder.extra_pieces).TO
old_piece = lltype.malloc(PIECE)
old_piece.buf = ll_builder.current_buf
old_piece.prev_piece = ll_builder.extra_pieces
ll_assert(bool(old_piece.buf), "no buf??")
ll_builder.current_buf = new_string
ll_builder.current_pos = 0
ll_builder.current_end = needed
ll_builder.total_size = total_size
ll_builder.extra_pieces = old_piece
@dont_inline
def ll_grow_and_append(ll_builder, ll_str, start, size):
# First, the part that still fits in the current piece
part1 = ll_builder.current_end - ll_builder.current_pos
ll_assert(part1 < size, "part1 >= size")
ll_builder.copy_string_contents(ll_str, ll_builder.current_buf,
start, ll_builder.current_pos,
part1)
start += part1
size -= part1
# Allocate the new piece
ll_grow_by(ll_builder, size)
ll_assert(ll_builder.current_pos == 0, "current_pos must be 0 after grow()")
# Finally, the second part of the string
ll_builder.current_pos = size
ll_builder.copy_string_contents(ll_str, ll_builder.current_buf,
start, 0, size)
# ------------------------------------------------------------
# builder.append()
@always_inline
def ll_append(ll_builder, ll_str):
if jit.we_are_jitted():
ll_jit_append(ll_builder, ll_str)
else:
# no-jit case: inline the logic of _ll_append() in the caller
_ll_append(ll_builder, ll_str, 0, len(ll_str.chars))
@dont_inline
def ll_jit_append(ll_builder, ll_str):
# jit case: first try special cases for known small lengths
if ll_jit_try_append_slice(ll_builder, ll_str, 0, len(ll_str.chars)):
return
# fall-back to do a residual call to ll_append_res0
ll_append_res0(ll_builder, ll_str)
@jit.dont_look_inside
def ll_append_res0(ll_builder, ll_str):
_ll_append(ll_builder, ll_str, 0, len(ll_str.chars))
# ------------------------------------------------------------
# builder.append_char()
@always_inline
def ll_append_char(ll_builder, char):
jit.conditional_call(ll_builder.current_pos == ll_builder.current_end,
ll_grow_by, ll_builder, 1)
pos = ll_builder.current_pos
ll_builder.current_pos = pos + 1
ll_builder.current_buf.chars[pos] = char
# ------------------------------------------------------------
# builder.append_slice()
@always_inline
def ll_append_slice(ll_builder, ll_str, start, end):
if jit.we_are_jitted():
ll_jit_append_slice(ll_builder, ll_str, start, end)
else:
# no-jit case: inline the logic of _ll_append() in the caller
_ll_append(ll_builder, ll_str, start, end - start)
@dont_inline
def ll_jit_append_slice(ll_builder, ll_str, start, end):
# jit case: first try special cases for known small lengths
if ll_jit_try_append_slice(ll_builder, ll_str, start, end - start):
return
# fall-back to do a residual call to ll_append_res_slice
ll_append_res_slice(ll_builder, ll_str, start, end)
@jit.dont_look_inside
def ll_append_res_slice(ll_builder, ll_str, start, end):
_ll_append(ll_builder, ll_str, start, end - start)
# ------------------------------------------------------------
# Special-casing for the JIT: appending strings (or slices) of
# a known length up to MAX_N. These functions all contain an
# inlined copy of _ll_append(), but with a known small N, gcc
# will compile the copy_string_contents() efficiently.
MAX_N = 10
def make_func_for_size(N):
@jit.dont_look_inside
def ll_append_0(ll_builder, ll_str):
_ll_append(ll_builder, ll_str, 0, N)
ll_append_0 = func_with_new_name(ll_append_0, "ll_append_0_%d" % N)
#
@jit.dont_look_inside
def ll_append_start(ll_builder, ll_str, start):
_ll_append(ll_builder, ll_str, start, N)
ll_append_start = func_with_new_name(ll_append_start,
"ll_append_start_%d" % N)
return ll_append_0, ll_append_start, N
unroll_func_for_size = unrolling_iterable([make_func_for_size(_n)
for _n in range(2, MAX_N + 1)])
@jit.unroll_safe
def ll_jit_try_append_slice(ll_builder, ll_str, start, size):
if jit.isconstant(size):
if size == 0:
return True
# a special case: if the builder's pos and end are still contants
# (typically if the builder is still virtual), and if 'size' fits,
# then we don't need any reallocation and can just set the
# characters in the buffer, in a way that won't force anything.
if (jit.isconstant(ll_builder.current_pos) and
jit.isconstant(ll_builder.current_end) and
size <= (ll_builder.current_end - ll_builder.current_pos) and
size <= 16):
pos = ll_builder.current_pos
buf = ll_builder.current_buf
stop = pos + size
ll_builder.current_pos = stop
while pos < stop:
buf.chars[pos] = ll_str.chars[start]
pos += 1
start += 1
return True
# turn appends of length 1 into ll_append_char().
if size == 1:
ll_append_char(ll_builder, ll_str.chars[start])
return True
# turn appends of length 2 to 10 into residual calls to
# specialized functions, for the lengths 2 to 10, where
# gcc will optimize the known-length copy_string_contents()
# as much as possible.
for func0, funcstart, for_size in unroll_func_for_size:
if size == for_size:
if jit.isconstant(start) and start == 0:
func0(ll_builder, ll_str)
else:
funcstart(ll_builder, ll_str, start)
return True
return False # use the fall-back path
# ------------------------------------------------------------
# builder.append_multiple_char()
@always_inline
def ll_append_multiple_char(ll_builder, char, times):
if jit.we_are_jitted():
if ll_jit_try_append_multiple_char(ll_builder, char, times):
return
_ll_append_multiple_char(ll_builder, char, times)
@jit.dont_look_inside
def _ll_append_multiple_char(ll_builder, char, times):
part1 = ll_builder.current_end - ll_builder.current_pos
if times > part1:
times -= part1
buf = ll_builder.current_buf
for i in xrange(ll_builder.current_pos, ll_builder.current_end):
buf.chars[i] = char
ll_grow_by(ll_builder, times)
#
buf = ll_builder.current_buf
pos = ll_builder.current_pos
end = pos + times
ll_builder.current_pos = end
for i in xrange(pos, end):
buf.chars[i] = char
@jit.unroll_safe
def ll_jit_try_append_multiple_char(ll_builder, char, size):
if jit.isconstant(size):
if size == 0:
return True
# a special case: if the builder's pos and end are still contants
# (typically if the builder is still virtual), and if 'size' fits,
# then we don't need any reallocation and can just set the
# characters in the buffer, in a way that won't force anything.
if (jit.isconstant(ll_builder.current_pos) and
jit.isconstant(ll_builder.current_end) and
size <= (ll_builder.current_end - ll_builder.current_pos) and
size <= 16):
pos = ll_builder.current_pos
buf = ll_builder.current_buf
stop = pos + size
ll_builder.current_pos = stop
while pos < stop:
buf.chars[pos] = char
pos += 1
return True
if size == 1:
ll_append_char(ll_builder, char)
return True
return False # use the fall-back path
# ------------------------------------------------------------
# builder.append_charpsize()
@jit.dont_look_inside
def ll_append_charpsize(ll_builder, charp, size):
part1 = ll_builder.current_end - ll_builder.current_pos
if size > part1:
# First, the part that still fits
ll_builder.copy_raw_to_string(charp, ll_builder.current_buf,
ll_builder.current_pos, part1)
charp = rffi.ptradd(charp, part1)
size -= part1
ll_grow_by(ll_builder, size)
#
pos = ll_builder.current_pos
ll_builder.current_pos = pos + size
ll_builder.copy_raw_to_string(charp, ll_builder.current_buf, pos, size)
# ------------------------------------------------------------
# builder.getlength()
@always_inline
def ll_getlength(ll_builder):
num_chars_missing_from_last_piece = (
ll_builder.current_end - ll_builder.current_pos)
return ll_builder.total_size - num_chars_missing_from_last_piece
# ------------------------------------------------------------
# builder.build()
@jit.look_inside_iff(lambda ll_builder: jit.isvirtual(ll_builder))
def ll_build(ll_builder):
# NB. usually the JIT doesn't look inside this function; it does
# so only in the simplest example where it could virtualize everything
if ll_builder.extra_pieces:
ll_fold_pieces(ll_builder)
elif ll_builder.current_pos != ll_builder.total_size:
ll_shrink_final(ll_builder)
return ll_builder.current_buf
def ll_shrink_final(ll_builder):
final_size = ll_builder.current_pos
ll_assert(final_size <= ll_builder.total_size,
"final_size > ll_builder.total_size?")
buf = rgc.ll_shrink_array(ll_builder.current_buf, final_size)
ll_builder.current_buf = buf
ll_builder.current_end = final_size
ll_builder.total_size = final_size
def ll_fold_pieces(ll_builder):
final_size = BaseStringBuilderRepr.ll_getlength(ll_builder)
ll_assert(final_size >= 0, "negative final_size")
extra = ll_builder.extra_pieces
ll_builder.extra_pieces = lltype.nullptr(lltype.typeOf(extra).TO)
#
result = ll_builder.mallocfn(final_size)
piece = ll_builder.current_buf
piece_lgt = ll_builder.current_pos
ll_assert(ll_builder.current_end == len(piece.chars),
"bogus last piece_lgt")
ll_builder.total_size = final_size
ll_builder.current_buf = result
ll_builder.current_pos = final_size
ll_builder.current_end = final_size
dst = final_size
while True:
dst -= piece_lgt
ll_assert(dst >= 0, "rbuilder build: overflow")
ll_builder.copy_string_contents(piece, result, 0, dst, piece_lgt)
if not extra:
break
piece = extra.buf
piece_lgt = len(piece.chars)
extra = extra.prev_piece
ll_assert(dst == 0, "rbuilder build: underflow")
# ------------------------------------------------------------
# bool(builder)
def ll_bool(ll_builder):
return ll_builder != nullptr(lltype.typeOf(ll_builder).TO)
# ------------------------------------------------------------
class BaseStringBuilderRepr(AbstractStringBuilderRepr):
def empty(self):
return nullptr(self.lowleveltype.TO)
ll_append = staticmethod(ll_append)
ll_append_char = staticmethod(ll_append_char)
ll_append_slice = staticmethod(ll_append_slice)
ll_append_multiple_char = staticmethod(ll_append_multiple_char)
ll_append_charpsize = staticmethod(ll_append_charpsize)
ll_getlength = staticmethod(ll_getlength)
ll_build = staticmethod(ll_build)
ll_bool = staticmethod(ll_bool)
class StringBuilderRepr(BaseStringBuilderRepr):
lowleveltype = lltype.Ptr(STRINGBUILDER)
basetp = STR
convert_to_ll = staticmethod(llstr)
string_repr = string_repr
char_repr = char_repr
raw_ptr_repr = PtrRepr(
lltype.Ptr(lltype.Array(lltype.Char, hints={'nolength': True}))
)
@staticmethod
def ll_new(init_size):
# Clamp 'init_size' to be a value between 0 and 1280.
# Negative values are mapped to 1280.
init_size = intmask(min(r_uint(init_size), r_uint(1280)))
ll_builder = lltype.malloc(STRINGBUILDER)
ll_builder.current_buf = ll_builder.mallocfn(init_size)
ll_builder.current_pos = 0
ll_builder.current_end = init_size
ll_builder.total_size = init_size
return ll_builder
class UnicodeBuilderRepr(BaseStringBuilderRepr):
lowleveltype = lltype.Ptr(UNICODEBUILDER)
basetp = UNICODE
convert_to_ll = staticmethod(llunicode)
string_repr = unicode_repr
char_repr = unichar_repr
raw_ptr_repr = PtrRepr(
lltype.Ptr(lltype.Array(lltype.UniChar, hints={'nolength': True}))
)
@staticmethod
def ll_new(init_size):
# Clamp 'init_size' to be a value between 0 and 1280.
# Negative values are mapped to 1280.
init_size = intmask(min(r_uint(init_size), r_uint(1280)))
ll_builder = lltype.malloc(UNICODEBUILDER)
ll_builder.current_buf = ll_builder.mallocfn(init_size)
ll_builder.current_pos = 0
ll_builder.current_end = init_size
ll_builder.total_size = init_size
return ll_builder
unicodebuilder_repr = UnicodeBuilderRepr()
stringbuilder_repr = StringBuilderRepr()
|
f99b529578914f4ba96cbb72652d3ec2c7348c64
|
ffdc77394c5b5532b243cf3c33bd584cbdc65cb7
|
/mindspore/lite/examples/export_models/models/emoji_model.py
|
0c733d2007c469431d1b90889bf0a6d8dcff3b1e
|
[
"Apache-2.0",
"LicenseRef-scancode-proprietary-license",
"MPL-1.0",
"OpenSSL",
"LGPL-3.0-only",
"LicenseRef-scancode-warranty-disclaimer",
"BSD-3-Clause-Open-MPI",
"MIT",
"MPL-2.0-no-copyleft-exception",
"NTP",
"BSD-3-Clause",
"GPL-1.0-or-later",
"0BSD",
"MPL-2.0",
"LicenseRef-scancode-free-unknown",
"AGPL-3.0-only",
"Libpng",
"MPL-1.1",
"IJG",
"GPL-2.0-only",
"BSL-1.0",
"Zlib",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-python-cwi",
"BSD-2-Clause",
"LicenseRef-scancode-gary-s-brown",
"LGPL-2.1-only",
"LicenseRef-scancode-other-permissive",
"Python-2.0",
"LicenseRef-scancode-mit-nagy",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense"
] |
permissive
|
mindspore-ai/mindspore
|
ca7d5bb51a3451c2705ff2e583a740589d80393b
|
54acb15d435533c815ee1bd9f6dc0b56b4d4cf83
|
refs/heads/master
| 2023-07-29T09:17:11.051569
| 2023-07-17T13:14:15
| 2023-07-17T13:14:15
| 239,714,835
| 4,178
| 768
|
Apache-2.0
| 2023-07-26T22:31:11
| 2020-02-11T08:43:48
|
C++
|
UTF-8
|
Python
| false
| false
| 3,106
|
py
|
emoji_model.py
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""emoji_model."""
import mindspore as MS
class GlobalAvgPooling(MS.nn.Cell):
"""
Global avg pooling definition.
Args:
Returns:
Tensor, output tensor.
Examples:
>>> GlobalAvgPooling()
"""
def __init__(self):
super(GlobalAvgPooling, self).__init__()
self.mean = MS.ops.ReduceMean(keep_dims=False)
def construct(self, x):
x = self.mean(x, (2, 3))
return x
class EmojiModel(MS.nn.Cell):
"""emoji model"""
def __init__(self, wayc, use_bb, use_head):
super(EmojiModel, self).__init__()
self.use_head = use_head
self.use_bb = use_bb
if use_bb:
self.relu = MS.nn.ReLU()
self.maxpool = MS.nn.MaxPool2d(kernel_size=2, stride=2)
self.c1 = MS.nn.Conv2d(1, 32, (3, 3), (1, 1), pad_mode='pad', padding=1, dilation=(1, 1), group=1,
has_bias=True)
self.bn1 = MS.nn.BatchNorm2d(32)
self.c2 = MS.nn.Conv2d(32, 64, (3, 3), (1, 1), pad_mode='pad', padding=1, dilation=(1, 1), group=1,
has_bias=True)
self.bn2 = MS.nn.BatchNorm2d(64)
self.c3 = MS.nn.Conv2d(64, 128, (3, 3), (1, 1), pad_mode='pad', padding=1, dilation=(1, 1), group=1,
has_bias=True)
self.bn3 = MS.nn.BatchNorm2d(128)
self.c4 = MS.nn.Conv2d(128, 256, (3, 3), (1, 1), pad_mode='pad', padding=1, dilation=(1, 1), group=1,
has_bias=True)
self.bn4 = MS.nn.BatchNorm2d(256)
if use_head:
self.c5 = MS.nn.Conv2d(256, wayc, (3, 3), (1, 1), pad_mode='pad', padding=1, dilation=(1, 1), group=1,
has_bias=True)
self.gap = GlobalAvgPooling()
def construct(self, x):
"""construct"""
if self.use_bb:
x = self.c1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.c2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.c3(x)
x = self.bn3(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.c4(x)
x = self.bn4(x)
x = self.relu(x)
if self.use_head:
x = self.c5(x)
x = self.gap(x)
return x
|
d4bf47d75b5e8bc70cb1f5ed4381a2aa7eb70e86
|
578db86c51d44ebddd0dc7b1738985b3dc69eb74
|
/corehq/apps/reports/formdetails/readable.py
|
3a5536fc88628edacfe3e41c7fb0ebaa18705775
|
[
"BSD-3-Clause"
] |
permissive
|
dimagi/commcare-hq
|
a43c7dd32b5f89c89fd5aa1b1359ab7301f4ff6b
|
e7391ddae1af1dbf118211ecb52c83fc508aa656
|
refs/heads/master
| 2023-08-16T22:38:27.853437
| 2023-08-16T19:07:19
| 2023-08-16T19:07:19
| 247,278
| 499
| 203
|
BSD-3-Clause
| 2023-09-14T19:03:24
| 2009-07-09T17:00:07
|
Python
|
UTF-8
|
Python
| false
| false
| 14,392
|
py
|
readable.py
|
import re
from copy import deepcopy
from pydoc import html
from django.http import Http404
from django.utils.safestring import mark_safe
from django.utils.translation import gettext_lazy as _
from couchdbkit import ResourceNotFound
from corehq.apps.app_manager.app_schemas.app_case_metadata import (
FormQuestionResponse,
)
from corehq.apps.app_manager.dbaccessors import get_app
from corehq.apps.app_manager.exceptions import XFormException
from corehq.apps.app_manager.models import Application
from corehq.apps.reports.formdetails.exceptions import QuestionListNotFound
from corehq.form_processor.exceptions import XFormQuestionValueNotFound
from corehq.form_processor.utils.xform import get_node
from corehq.util.soft_assert import soft_assert
SYSTEM_FIELD_NAMES = (
"drugs_prescribed", "case", "meta", "clinic_ids", "drug_drill_down", "tmp",
"info_hack_done"
)
def form_key_filter(key):
if key in SYSTEM_FIELD_NAMES:
return False
if key.startswith(('#', '@', '_')):
return False
return True
def get_questions(domain, app_id, xmlns):
if not app_id:
raise QuestionListNotFound(
_("This form is not associated with an app")
)
try:
app = get_app(domain, app_id)
except Http404:
raise QuestionListNotFound(
_("No app could be found")
)
if not isinstance(app, Application):
raise QuestionListNotFound(
_("Remote apps are not supported")
)
try:
xform = app.get_xform_by_xmlns(xmlns)
except ResourceNotFound:
_soft_assert = soft_assert(notify_admins=True)
_soft_assert(
False,
f'XForm XML missing for XMLNS {xmlns!r} on domain {domain!r}',
)
raise QuestionListNotFound(_(
"We could not find the form source XML document"
))
if not xform:
if xmlns == 'http://code.javarosa.org/devicereport':
raise QuestionListNotFound(
_("This is a Device Report")
)
else:
raise QuestionListNotFound(
_("We could not find the question list "
"associated with this form")
)
# Search for 'READABLE FORMS TEST' for more info
# to bootstrap a test and have it print out your form xml
# uncomment this line. Ghetto but it works.
# print form.wrapped_xform().render()
return get_questions_from_xform_node(xform, app.langs)
def get_questions_from_xform_node(xform, langs):
questions = xform.get_questions(
langs, include_triggers=True, include_groups=True)
return [FormQuestionResponse(q) for q in questions]
def get_questions_for_submission(xform):
app_id = xform.build_id or xform.app_id
domain = xform.domain
xmlns = xform.xmlns
try:
questions = get_questions(domain, app_id, xmlns)
questions_error = None
except (QuestionListNotFound, XFormException) as e:
questions = []
questions_error = e
return questions, questions_error
def get_readable_data_for_submission(xform):
questions, questions_error = get_questions_for_submission(xform)
return get_readable_form_data(
deepcopy(xform.form_data),
questions,
process_label=_html_interpolate_output_refs
), questions_error
def get_readable_form_data(xform_data, questions, process_label=None):
return zip_form_data_and_questions(
strip_form_data(xform_data),
questions_in_hierarchy(questions),
path_context='/%s/' % xform_data.get('#type', 'data'),
process_label=process_label,
)
def strip_form_data(data):
data = data.copy()
# remove all case, meta, attribute nodes from the top level
for key in list(data.keys()):
if (
not form_key_filter(key) or
key in ('meta', 'case', 'commcare_usercase') or
key.startswith('case_autoload_') or
key.startswith('case_load_')
):
data.pop(key)
return data
def pop_from_form_data(relative_data, absolute_data, path):
path = path.split('/')
if path and path[0] == '':
data = absolute_data
# path[:2] will be ['', 'data'] so remove
path = path[2:]
else:
data = relative_data
while path and data:
key, path = path[0], path[1:]
try:
if path:
data = data[key]
elif isinstance(data, dict):
return data.pop(key)
else:
return None
except KeyError:
return None
def path_relative_to_context(path, path_context):
assert path_context.endswith('/')
if path.startswith(path_context):
return path[len(path_context):]
elif path + '/' == path_context:
return ''
else:
return path
def absolute_path_from_context(path, path_context):
assert path_context.endswith('/')
if path.startswith('/'):
return path
else:
return path_context + path
def _html_interpolate_output_refs(itext_value, context):
if hasattr(itext_value, 'with_refs'):
underline_template = '<u> %s </u>'
return mark_safe( # nosec: output is escaped
itext_value.with_refs(
context,
processor=lambda x: underline_template % (
html.escape(x)
if x is not None
else '<i class="fa fa-question-circle"></i>'
),
escape=html.escape,
)
)
else:
return itext_value
def _group_question_has_response(question):
return any(child.response for child in question.children)
def zip_form_data_and_questions(relative_data, questions, path_context='',
output_context=None, process_label=None,
absolute_data=None):
"""
The strategy here is to loop through the questions, and at every point
pull in the corresponding piece of data, removing it from data
and adding it to the question. At the end, any remain piece of data are
added to the end as unknown questions.
Repeats are matched up with their entry node in the data,
and then this function is applied recursively to each of the elements in
the list, using the repeat's children as the question list.
"""
assert path_context
absolute_data = absolute_data or relative_data
if not path_context.endswith('/'):
path_context += '/'
if not output_context:
output_context = {
'%s%s' % (path_context, '/'.join(map(str, key))): str(value)
for key, value in _flatten_json(relative_data).items()
}
result = []
for question in questions:
path = path_relative_to_context(question.value, path_context)
absolute_path = absolute_path_from_context(question.value, path_context)
node = pop_from_form_data(relative_data, absolute_data, path)
# response=True on a question with children indicates that one or more
# child has a response, i.e. that the entire group wasn't skipped
question_data = dict(question)
question_data.pop('response')
if question.type in ('Group', 'FieldList'):
children = question_data.pop('children')
form_question = FormQuestionResponse(
children=zip_form_data_and_questions(
node,
children,
path_context=absolute_path,
output_context=output_context,
process_label=process_label,
absolute_data=absolute_data,
),
**question_data
)
if _group_question_has_response(form_question):
form_question.response = True
elif question.type == 'Repeat':
if not isinstance(node, list):
node = [node]
children = question_data.pop('children')
form_question = FormQuestionResponse(
children=[
FormQuestionResponse(
children=zip_form_data_and_questions(
entry,
children,
path_context=absolute_path,
output_context=output_context,
process_label=process_label,
absolute_data=absolute_data,
),
)
for entry in node if entry or children
],
**question_data
)
for child in form_question.children:
if _group_question_has_response(child):
child.response = True
if _group_question_has_response(form_question):
form_question.response = True
else:
if (question.type == 'DataBindOnly'
and question.label == question.value):
question_data['label'] = '/'.join(
question.value.split('/')[2:])
if process_label:
question_data['label'] = process_label(question_data['label'],
output_context)
form_question = FormQuestionResponse(response=node,
**question_data)
result.append(form_question)
if relative_data:
for key, response in sorted(_flatten_json(relative_data).items()):
joined_key = '/'.join(map(str, key))
result.append(
FormQuestionResponse(
label=joined_key,
value='%s%s' % (path_context, joined_key),
response=response,
)
)
return result
def _flatten_json(json, result=None, path=()):
if result is None:
result = {}
if isinstance(json, dict):
for key, value in json.items():
_flatten_json(value, result, path + (key,))
elif isinstance(json, list):
for i, value in enumerate(json):
_flatten_json(value, result, path + (i,))
else:
result[path] = json
return result
def questions_in_hierarchy(questions):
# It turns out that questions isn't quite enough to reconstruct
# the hierarchy if there are groups that share the same ref
# as their parent (like for grouping on the screen but not the data).
# In this case, ignore nesting and put all sub questions on the top level,
# along with the group itself.
# Real solution is to get rid of this function and instead have
# get_questions preserve hierarchy to begin with
result = []
question_lists_by_group = {None: result}
for question in questions:
question_lists_by_group[question.group].append(question)
if question.type in ('Group', 'Repeat', 'FieldList') \
and question.value not in question_lists_by_group:
question_lists_by_group[question.value] = question.children
return question_lists_by_group[None]
def get_data_cleaning_data(form_data, instance):
question_response_map = {}
ordered_question_values = []
repeats = {}
def _repeat_question_value(question, repeat_index):
return "{}[{}]{}".format(question.repeat, repeat_index,
re.sub(r'^' + question.repeat, '', question.value))
def _add_to_question_response_map(data, repeat_index=None):
for index, question in enumerate(data):
if question.children:
next_index = repeat_index if question.repeat else index
_add_to_question_response_map(question.children, repeat_index=next_index)
elif question.editable and question.response is not None: # ignore complex and skipped questions
value = question.value
if question.repeat:
if question.repeat not in repeats:
repeats[question.repeat] = repeat_index + 1
else:
# This is the second or later instance of a repeat group, so it gets [i] notation
value = _repeat_question_value(question, repeat_index + 1)
# Update first instance of repeat group, which didn't know it needed [i] notation
if question.value in question_response_map:
first_value = _repeat_question_value(question, repeat_index)
question_response_map[first_value] = question_response_map.pop(question.value)
try:
index = ordered_question_values.index(question.value)
ordered_question_values[index] = first_value
except ValueError:
pass
# Limit data cleaning to nodes that can be found in the response submission.
# form_data may contain other data that shouldn't be clean-able, like subcase attributes.
try:
get_node(instance.get_xml_element(), value, instance.xmlns)
except XFormQuestionValueNotFound:
continue
question_response_map[value] = {
'label': question.label,
'icon': question.icon,
'value': question.response,
'options': [{
'id': option.value,
'text': option.label,
} for option in question.options],
}
if question.type == 'MSelect':
question_response_map[value].update({
'multiple': True,
})
ordered_question_values.append(value)
_add_to_question_response_map(form_data)
# Add splitName with zero-width spaces for display purposes
for key in question_response_map.keys():
question_response_map[key].update({
'splitName': re.sub(r'/', '/\u200B', key),
})
return (question_response_map, ordered_question_values)
|
0915e384de007940503502f00393edbaed2da499
|
1819c5f7861f4c4b801bff182dd3ba7801a82c18
|
/Chapter 05/code/pipeline_trainer.py
|
f231d0cd5aefb58db4e0efefc6ac43f550d641c7
|
[
"MIT"
] |
permissive
|
PacktPublishing/Artificial-Intelligence-with-Python
|
7c9520cd2d9332ac5b7d8a8ac3581c4b61593936
|
d930bc2d055433781559683f69e05207f0eaab13
|
refs/heads/master
| 2023-04-07T11:26:38.324268
| 2023-01-18T09:26:15
| 2023-01-18T09:26:15
| 79,776,573
| 471
| 481
|
MIT
| 2023-07-02T11:25:47
| 2017-01-23T06:27:44
|
Python
|
UTF-8
|
Python
| false
| false
| 1,298
|
py
|
pipeline_trainer.py
|
from sklearn.datasets import samples_generator
from sklearn.feature_selection import SelectKBest, f_regression
from sklearn.pipeline import Pipeline
from sklearn.ensemble import ExtraTreesClassifier
# Generate data
X, y = samples_generator.make_classification(n_samples=150,
n_features=25, n_classes=3, n_informative=6,
n_redundant=0, random_state=7)
# Select top K features
k_best_selector = SelectKBest(f_regression, k=9)
# Initialize Extremely Random Forests classifier
classifier = ExtraTreesClassifier(n_estimators=60, max_depth=4)
# Construct the pipeline
processor_pipeline = Pipeline([('selector', k_best_selector), ('erf', classifier)])
# Set the parameters
processor_pipeline.set_params(selector__k=7, erf__n_estimators=30)
# Training the pipeline
processor_pipeline.fit(X, y)
# Predict outputs for the input data
output = processor_pipeline.predict(X)
print("\nPredicted output:\n", output)
# Print scores
print("\nScore:", processor_pipeline.score(X, y))
# Print the features chosen by the pipeline selector
status = processor_pipeline.named_steps['selector'].get_support()
# Extract and print indices of selected features
selected = [i for i, x in enumerate(status) if x]
print("\nIndices of selected features:", ', '.join([str(x) for x in selected]))
|
a14b1aba870fd9dcb1b0ebef7ebda59cf860ce2b
|
fbbe424559f64e9a94116a07eaaa555a01b0a7bb
|
/Spacy/source2.7/toolz/curried/exceptions.py
|
75a52bbbf27ee0779edcef942a58d9268c3eaf92
|
[
"MIT"
] |
permissive
|
ryfeus/lambda-packs
|
6544adb4dec19b8e71d75c24d8ed789b785b0369
|
cabf6e4f1970dc14302f87414f170de19944bac2
|
refs/heads/master
| 2022-12-07T16:18:52.475504
| 2022-11-29T13:35:35
| 2022-11-29T13:35:35
| 71,386,735
| 1,283
| 263
|
MIT
| 2022-11-26T05:02:14
| 2016-10-19T18:22:39
|
Python
|
UTF-8
|
Python
| false
| false
| 337
|
py
|
exceptions.py
|
import toolz
__all__ = ['merge_with', 'merge']
@toolz.curry
def merge_with(func, d, *dicts, **kwargs):
return toolz.merge_with(func, d, *dicts, **kwargs)
@toolz.curry
def merge(d, *dicts, **kwargs):
return toolz.merge(d, *dicts, **kwargs)
merge_with.__doc__ = toolz.merge_with.__doc__
merge.__doc__ = toolz.merge.__doc__
|
6991c51f4d8d2fecc0574eb67b2ad598cabf36cd
|
3a6a211ea0d32405497fbd6486c490bb147e25f9
|
/third_party/gsutil/third_party/crcmod_osx/crcmod/__init__.py
|
80f2ac3e6e1b3c2efb82a559a4de889233ac09f1
|
[
"BSD-3-Clause",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
catapult-project/catapult
|
e2cbdd5eb89f3b1492fc8752494e62ea1df4bae0
|
53102de187a48ac2cfc241fef54dcbc29c453a8e
|
refs/heads/main
| 2021-05-25T07:37:22.832505
| 2021-05-24T08:01:49
| 2021-05-25T06:07:38
| 33,947,548
| 2,032
| 742
|
BSD-3-Clause
| 2022-08-26T16:01:18
| 2015-04-14T17:49:05
|
HTML
|
UTF-8
|
Python
| false
| false
| 194
|
py
|
__init__.py
|
try:
from crcmod.crcmod import *
import crcmod.predefined
except ImportError:
# Make this backward compatible
from crcmod import *
import predefined
__doc__ = crcmod.__doc__
|
4da2943b05b10b156b0bcdd5d667c08eab4b0937
|
8eccea9f715a2a0ce602f1944ed3e812adcacb4d
|
/tests/services/test_file_svc.py
|
bf007269b640e73d00b64041dd4eb8f7c0ed1be8
|
[
"Apache-2.0"
] |
permissive
|
mitre/caldera
|
c466cde25bb0191880984cfdf3af84efc8a7c9f4
|
3140411d4b96d8d5607b2b50476f7bf3d506de00
|
refs/heads/master
| 2023-08-23T02:14:23.360314
| 2023-08-21T18:55:29
| 2023-08-21T18:55:29
| 112,409,981
| 4,685
| 1,046
|
Apache-2.0
| 2023-09-13T16:36:05
| 2017-11-29T01:25:10
|
Python
|
UTF-8
|
Python
| false
| false
| 12,468
|
py
|
test_file_svc.py
|
import base64
import json
import os
import pytest
import yaml
from base64 import b64encode
from tests import AsyncMock
from asyncio import Future
from app.data_encoders.base64_basic import Base64Encoder
from app.data_encoders.plain_text import PlainTextEncoder
from app.utility.file_decryptor import decrypt
@pytest.fixture
def store_encoders(event_loop, data_svc):
event_loop.run_until_complete(data_svc.store(PlainTextEncoder()))
event_loop.run_until_complete(data_svc.store(Base64Encoder()))
@pytest.mark.usefixtures(
'init_base_world',
'store_encoders'
)
class TestFileService:
@pytest.fixture
def text_file(self, tmpdir):
txt_str = 'Hello world!'
f = tmpdir.mkdir('txt').join('test.txt')
f.write(txt_str)
assert f.read() == txt_str
yield f
def test_save_file(self, event_loop, file_svc, tmp_path):
filename = "test_file.txt"
payload = b'These are the file contents.'
# Save temporary test file
event_loop.run_until_complete(file_svc.save_file(filename, payload, tmp_path, encrypt=False))
file_location = tmp_path / filename
# Read file contents from saved file
assert os.path.isfile(file_location)
with open(file_location, "r") as file_contents:
assert payload.decode("utf-8") == file_contents.read()
def test_create_exfil_sub_directory(self, event_loop, file_svc):
exfil_dir_name = 'unit-testing-Rocks'
new_dir = event_loop.run_until_complete(file_svc.create_exfil_sub_directory(exfil_dir_name))
assert os.path.isdir(new_dir)
os.rmdir(new_dir)
def test_read_write_result_file(self, tmpdir, file_svc):
link_id = '12345'
output = 'output testing unit'
error = 'error testing unit'
test_exit_code = '0'
output_encoded = str(b64encode(json.dumps(dict(stdout=output, stderr=error, exit_code=test_exit_code)).encode()), 'utf-8')
file_svc.write_result_file(link_id=link_id, output=output_encoded, location=tmpdir)
expected_output = dict(stdout=output, stderr=error, exit_code=test_exit_code)
output_data = file_svc.read_result_file(link_id=link_id, location=tmpdir)
decoded_output_data = json.loads(base64.b64decode(output_data))
assert decoded_output_data == expected_output
def test_read_write_result_file_no_dict(self, tmpdir, file_svc):
link_id = '12345'
output = 'output testing unit'
output_encoded = str(b64encode(output.encode()), 'utf-8')
file_svc.write_result_file(link_id=link_id, output=output_encoded, location=tmpdir)
expected_output = {'stdout': output, 'stderr': '', 'exit_code': ''}
output_data = file_svc.read_result_file(link_id=link_id, location=tmpdir)
decoded_output_data = json.loads(base64.b64decode(output_data))
assert decoded_output_data == expected_output
def test_read_write_result_file_no_base64(self, tmpdir, file_svc):
link_id = '12345'
output = 'output testing unit'
file_svc.write_result_file(link_id=link_id, output=output, location=tmpdir)
expected_output = {'stdout': output, 'stderr': '', 'exit_code': ''}
output_data = file_svc.read_result_file(link_id=link_id, location=tmpdir)
decoded_output_data = json.loads(base64.b64decode(output_data))
assert decoded_output_data == expected_output
def test_upload_decode_plaintext(self, event_loop, file_svc, data_svc):
content = b'this will be encoded and decoded as plaintext'
self._test_upload_file_with_encoding(event_loop, file_svc, data_svc, encoding='plain-text', upload_content=content,
decoded_content=content)
def test_upload_decode_b64(self, event_loop, file_svc, data_svc):
original_content = b'this will be encoded and decoded as base64'
upload_content = b64encode(original_content)
self._test_upload_file_with_encoding(event_loop, file_svc, data_svc, encoding='base64', upload_content=upload_content,
decoded_content=original_content)
def test_download_plaintext_file(self, event_loop, file_svc, data_svc):
payload_content = b'plaintext content'
self._test_download_file_with_encoding(event_loop, file_svc, data_svc, encoding='plain-text',
original_content=payload_content, encoded_content=payload_content)
def test_download_base64_file(self, event_loop, file_svc, data_svc):
payload_content = b'b64 content'
self._test_download_file_with_encoding(event_loop, file_svc, data_svc, encoding='base64',
original_content=payload_content,
encoded_content=b64encode(payload_content))
def test_pack_file(self, event_loop, mocker, tmpdir, file_svc, data_svc):
payload = 'unittestpayload'
payload_content = b'content'
new_payload_content = b'new_content'
packer_name = 'test'
# create temp files
file = tmpdir.join(payload)
file.write(payload_content)
# start mocking up methods
packer = mocker.Mock(return_value=Future())
packer.return_value = packer
packer.pack = AsyncMock(return_value=(payload, new_payload_content))
data_svc.locate = AsyncMock(return_value=[])
module = mocker.Mock()
module.Packer = packer
file_svc.packers[packer_name] = module
file_svc.data_svc = data_svc
file_svc.read_file = AsyncMock(return_value=(payload, payload_content))
file_path, content, display_name = event_loop.run_until_complete(file_svc.get_file(headers=dict(file='%s:%s' % (packer_name, payload))))
packer.pack.assert_called_once()
assert payload == file_path
assert content == new_payload_content
def test_xored_filename_removal(self, event_loop, mocker, tmpdir, file_svc, data_svc):
payload = 'unittestpayload.exe.xored'
payload_content = b'content'
new_payload_content = b'new_content'
packer_name = 'test_xored_filename_removal'
expected_display_name = 'unittestpayload.exe'
# create temp files
file = tmpdir.join(payload)
file.write(payload_content)
# start mocking up methods
packer = mocker.Mock(return_value=Future())
packer.return_value = packer
packer.pack = AsyncMock(return_value=(payload, new_payload_content))
data_svc.locate = AsyncMock(return_value=[])
module = mocker.Mock()
module.Packer = packer
file_svc.packers[packer_name] = module
file_svc.data_svc = data_svc
file_svc.read_file = AsyncMock(return_value=(payload, payload_content))
file_path, content, display_name = event_loop.run_until_complete(file_svc.get_file(headers=dict(file='%s:%s' % (packer_name, payload))))
packer.pack.assert_called_once()
assert payload == file_path
assert content == new_payload_content
assert display_name == expected_display_name
def test_upload_file(self, event_loop, file_svc):
upload_dir = event_loop.run_until_complete(file_svc.create_exfil_sub_directory('test-upload'))
upload_filename = 'uploadedfile.txt'
upload_content = b'this is a test upload file'
event_loop.run_until_complete(file_svc.save_file(upload_filename, upload_content, upload_dir, encrypt=False))
uploaded_file_path = os.path.join(upload_dir, upload_filename)
assert os.path.isfile(uploaded_file_path)
with open(uploaded_file_path, 'rb') as file:
written_data = file.read()
assert written_data == upload_content
os.remove(uploaded_file_path)
os.rmdir(upload_dir)
def test_encrypt_upload(self, event_loop, file_svc):
upload_dir = event_loop.run_until_complete(file_svc.create_exfil_sub_directory('test-encrypted-upload'))
upload_filename = 'encryptedupload.txt'
upload_content = b'this is a test upload file'
event_loop.run_until_complete(file_svc.save_file(upload_filename, upload_content, upload_dir))
uploaded_file_path = os.path.join(upload_dir, upload_filename)
decrypted_file_path = upload_filename + '_decrypted'
config_to_use = 'conf/default.yml'
with open(config_to_use, encoding='utf-8') as conf:
config = list(yaml.load_all(conf, Loader=yaml.FullLoader))[0]
decrypt(uploaded_file_path, config, output_file=decrypted_file_path)
assert os.path.isfile(decrypted_file_path)
with open(decrypted_file_path, 'rb') as decrypted_file:
decrypted_data = decrypted_file.read()
assert decrypted_data == upload_content
os.remove(uploaded_file_path)
os.remove(decrypted_file_path)
os.rmdir(upload_dir)
def test_walk_file_path_exists_nonxor(self, event_loop, text_file, file_svc):
ret = event_loop.run_until_complete(file_svc.walk_file_path(text_file.dirname, text_file.basename))
assert ret == text_file
def test_walk_file_path_notexists(self, event_loop, text_file, file_svc):
ret = event_loop.run_until_complete(file_svc.walk_file_path(text_file.dirname, 'not-a-real.file'))
assert ret is None
def test_walk_file_path_xor_fn(self, event_loop, tmpdir, file_svc):
f = tmpdir.mkdir('txt').join('xorfile.txt.xored')
f.write("test")
ret = event_loop.run_until_complete(file_svc.walk_file_path(f.dirname, 'xorfile.txt'))
assert ret == f
def test_remove_xored_extension(self, file_svc):
test_value = 'example_file.exe.xored'
expected_value = 'example_file.exe'
ret = file_svc.remove_xored_extension(test_value)
assert ret == expected_value
def test_remove_xored_extension_to_non_xored_file(self, file_svc):
test_value = 'example_file.exe'
expected_value = 'example_file.exe'
ret = file_svc.remove_xored_extension(test_value)
assert ret == expected_value
def test_add_xored_extension(self, file_svc):
test_value = 'example_file.exe'
expected_value = 'example_file.exe.xored'
ret = file_svc.add_xored_extension(test_value)
assert ret == expected_value
def test_add_xored_extension_to_xored_file(self, file_svc):
test_value = 'example_file.exe.xored'
expected_value = 'example_file.exe.xored'
ret = file_svc.add_xored_extension(test_value)
assert ret == expected_value
def test_is_extension_xored_true(self, file_svc):
test_value = 'example_file.exe.xored'
ret = file_svc.is_extension_xored(test_value)
assert ret is True
def test_is_extension_xored_false(self, file_svc):
test_value = 'example_file.exe'
ret = file_svc.is_extension_xored(test_value)
assert ret is False
@staticmethod
def _test_download_file_with_encoding(event_loop, file_svc, data_svc, encoding, original_content, encoded_content):
filename = 'testencodedpayload.txt'
file_svc.read_file = AsyncMock(return_value=(filename, original_content))
file_svc.data_svc = data_svc
file_path, content, display_name = event_loop.run_until_complete(
file_svc.get_file(headers={'file': filename, 'x-file-encoding': encoding})
)
assert file_path == filename
assert content == encoded_content
assert display_name == filename
@staticmethod
def _test_upload_file_with_encoding(event_loop, file_svc, data_svc, encoding, upload_content, decoded_content):
file_svc.data_svc = data_svc
upload_dir = event_loop.run_until_complete(file_svc.create_exfil_sub_directory('testencodeduploaddir'))
upload_filename = 'testencodedupload.txt'
event_loop.run_until_complete(file_svc.save_file(upload_filename, upload_content, upload_dir, encrypt=False,
encoding=encoding))
uploaded_file_path = os.path.join(upload_dir, upload_filename)
assert os.path.isfile(uploaded_file_path)
with open(uploaded_file_path, 'rb') as file:
written_data = file.read()
assert written_data == decoded_content
os.remove(uploaded_file_path)
os.rmdir(upload_dir)
|
53aa840c073fb6ba2e05ec680f6a924ee823ad0e
|
08ee04ae665dcb930ed4b98ca7b91b2dac2cc3b0
|
/src/rayoptics/codev/tla.py
|
1edcd575d56dd87c313e2705697ae3cc22b90df9
|
[
"BSD-3-Clause"
] |
permissive
|
mjhoptics/ray-optics
|
6bad622f7bb9b3485823b9cc511a6d2b679f7048
|
41ea6d618a93fe14f8bee45fb3efff6a6762bcce
|
refs/heads/master
| 2023-07-09T18:03:36.621685
| 2023-05-08T22:46:36
| 2023-05-08T22:46:36
| 109,168,474
| 195
| 49
|
BSD-3-Clause
| 2023-08-10T16:53:28
| 2017-11-01T18:34:12
|
Python
|
UTF-8
|
Python
| false
| false
| 913
|
py
|
tla.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright © 2018 Michael J. Hayford
""" Support for CODE V TLAs
.. codeauthor: Michael J. Hayford
"""
import csv
from pathlib import Path
class MapTLA:
""" Create and maintain a dictionary of CODE V 3 letter commands """
_d = {}
def __init__(self):
TLA, CmdFct, IndxQuals, DataType, Quals = range(5)
if len(MapTLA._d) == 0:
path = Path(__file__).resolve().parent
with open(path / 'tla_mapping.csv') as f:
reader = csv.reader(f)
for row in reader:
if row[TLA] != '':
if row[Quals] != '':
row[Quals] = row[Quals].split(',')
MapTLA._d[row[TLA]] = row[CmdFct:]
def find(self, tla):
try:
return MapTLA._d[tla]
except KeyError:
return None
|
e130a04cbc030ba4e7845a3f8fcac6956abf2377
|
6a85191d6c2ae1e0db5873e7c6cb5d341eb72253
|
/torch_batch_svd/batch_svd.py
|
64e7fbed9d2b3ba0e8a3c28c9f892f8312d33a3f
|
[
"MIT"
] |
permissive
|
KinglittleQ/torch-batch-svd
|
3875b182ec2b496fda46dacbbb4e972b741d8223
|
c0a96119187f7d55f939d2ff2b92942c6d6ca930
|
refs/heads/master
| 2022-10-23T06:23:39.906646
| 2022-10-10T05:27:37
| 2022-10-10T05:27:37
| 171,496,660
| 356
| 36
|
MIT
| 2022-05-04T02:51:00
| 2019-02-19T15:17:40
|
C++
|
UTF-8
|
Python
| false
| false
| 2,319
|
py
|
batch_svd.py
|
import torch
from . import _c
class BatchSVDFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, input: torch.Tensor, some=True, compute_uv=True, out=None):
"""
This function returns `(U, S, V)`
which is the singular value decomposition
of a input real matrix or batches of real matrices `input`
:param ctx:
:param input:
:param out:
:return:
"""
assert (
input.shape[-1] < 32 and input.shape[-2] < 32
), "This implementation only supports matrices having dims smaller than 32"
is_double = True if input.dtype == torch.double else False
if input.dtype == torch.half:
input = input.float()
ctx.is_half = True
else:
ctx.is_half = False
if out is None:
b, m, n = input.shape
U = torch.empty(b, m, m, dtype=input.dtype).to(input.device)
S = torch.empty(b, min(m, n), dtype=input.dtype).to(input.device)
V = torch.empty(b, n, n, dtype=input.dtype).to(input.device)
else:
U, S, V = out
_c.batch_svd_forward(input, U, S, V, True, 1e-7, 100, is_double)
U.transpose_(1, 2)
V.transpose_(1, 2)
if ctx.is_half:
U, S, V = U.half(), S.half(), V.half()
k = S.size(1)
U_reduced: torch.Tensor = U[:, :, :k]
V_reduced: torch.Tensor = V[:, :, :k]
ctx.save_for_backward(input, U_reduced, S, V_reduced)
if not compute_uv:
U = torch.zeros(b, m, m, dtype=S.dtype).to(input.device)
V = torch.zeros(b, m, m, dtype=S.dtype).to(input.device)
return U, S, V
return (U_reduced, S, V_reduced) if some else (U, S, V)
@staticmethod
def backward(ctx, grad_u: torch.Tensor, grad_s: torch.Tensor, grad_v: torch.Tensor):
A, U, S, V = ctx.saved_tensors
if ctx.is_half:
grad_u, grad_s, grad_v = grad_u.float(), grad_s.float(), grad_v.float()
grad_out: torch.Tensor = _c.batch_svd_backward(
[grad_u, grad_s, grad_v], A, True, True, U.to(A.dtype), S.to(A.dtype), V.to(A.dtype)
)
if ctx.is_half:
grad_out = grad_out.half()
return grad_out
svd = BatchSVDFunction.apply
|
13941c73ad1af9a2849e6c2f22c0f3d38cb1c3bf
|
03a7f7a7eb8c16b537b65ec21f465bb0335bc3b8
|
/pythran/tests/rosetta/poly_div.py
|
eb73ebfa0284b33930c9beaf6c61de24916a67e0
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
serge-sans-paille/pythran
|
a0e22af1ac5e1f34f3f29dce36502f4a897b5186
|
d8ab07b4b3b690f50603cb4d08ba303d3af18b90
|
refs/heads/master
| 2023-09-01T16:04:03.289285
| 2023-08-30T09:13:58
| 2023-08-31T08:03:22
| 4,479,494
| 1,882
| 200
|
BSD-3-Clause
| 2023-09-06T20:08:10
| 2012-05-29T08:02:14
|
C++
|
UTF-8
|
Python
| false
| false
| 755
|
py
|
poly_div.py
|
#from http://rosettacode.org/wiki/Polynomial_long_division#Python
from math import fabs
#pythran export poly_div(int list, int list)
#runas poly_div([-42, 0, -12, 1], [-3, 1, 0, 0])
def degree(poly):
while poly and poly[-1] == 0:
poly.pop() # normalize
return len(poly)-1
def poly_div(N, D):
dD = degree(D)
dN = degree(N)
if dD < 0: raise ZeroDivisionError
if dN >= dD:
q = [0] * dN
while dN >= dD:
d = [0]*(dN - dD) + D
mult = q[dN - dD] = N[-1] / float(d[-1])
d = [coeff*mult for coeff in d]
N = [fabs ( coeffN - coeffd ) for coeffN, coeffd in zip(N, d)]
dN = degree(N)
r = N
else:
q = [0]
r = N
return q, r
|
59a523d608b3325f75184030b2222be870688e7f
|
a9fdace9236af6c73133fd8dddb80843697efc7d
|
/tests/catalyst/utils/test_quantization.py
|
a58ab6c0eb0a148b32387eada5be88d7ff3f439f
|
[
"Apache-2.0"
] |
permissive
|
catalyst-team/catalyst
|
026c38f26dad471cd77347adbc13423b156a5d8b
|
e99f90655d0efcf22559a46e928f0f98c9807ebf
|
refs/heads/master
| 2023-08-26T23:12:49.277005
| 2022-04-29T04:19:24
| 2022-04-29T04:19:24
| 145,385,156
| 3,038
| 487
|
Apache-2.0
| 2023-08-12T03:40:14
| 2018-08-20T07:56:13
|
Python
|
UTF-8
|
Python
| false
| false
| 2,723
|
py
|
test_quantization.py
|
# flake8: noqa
import os
import numpy as np
import pytest # noqa: F401
import torch
from catalyst.callbacks import AccuracyCallback
from catalyst.contrib.datasets import MNIST
from catalyst.runners import SupervisedRunner
from catalyst.settings import IS_CUDA_AVAILABLE
from catalyst.utils.quantization import quantize_model
from tests import DATA_ROOT
def test_api():
"""Test if model can be quantize through API"""
model = torch.nn.Sequential(
torch.nn.Flatten(),
torch.nn.Linear(28 * 28, 128),
torch.nn.ReLU(),
torch.nn.Linear(128, 64),
torch.nn.Linear(64, 10),
)
q_model = quantize_model(model)
torch.save(model.state_dict(), "model.pth")
torch.save(q_model.state_dict(), "q_model.pth")
model_size = os.path.getsize("model.pth")
q_model_size = os.path.getsize("q_model.pth")
assert q_model_size * 3.8 < model_size
os.remove("model.pth")
os.remove("q_model.pth")
def _evaluate_loader_accuracy(runner, loader):
"""Function to evaluate model."""
correct, num_examples = 0, 0
for batch in loader:
batch = {
"features": batch[0],
"targets": batch[1],
}
logits = runner.predict_batch(batch)["logits"].detach().numpy()
preds = logits.argmax(-1)
num_examples += preds.shape[0]
correct = np.equal(preds, batch["targets"]).sum()
return correct / num_examples
@pytest.mark.skipif(IS_CUDA_AVAILABLE, reason="CUDA device is available")
def test_accuracy():
"""Test if accuracy drops too low."""
model = torch.nn.Sequential(
torch.nn.Flatten(),
torch.nn.Linear(28 * 28, 128),
torch.nn.ReLU(),
torch.nn.Linear(128, 64),
torch.nn.Linear(64, 10),
)
datasets = {
"train": MNIST(DATA_ROOT, train=False),
"valid": MNIST(DATA_ROOT, train=False),
}
dataloaders = {
k: torch.utils.data.DataLoader(d, batch_size=32) for k, d in datasets.items()
}
optimizer = torch.optim.Adam(model.parameters(), lr=1e-2)
runner = SupervisedRunner()
runner.train(
model=model,
optimizer=optimizer,
loaders=dataloaders,
callbacks=[AccuracyCallback(target_key="targets", input_key="logits")],
num_epochs=1,
criterion=torch.nn.CrossEntropyLoss(),
valid_loader="valid",
valid_metric="accuracy01",
minimize_valid_metric=False,
)
accuracy_before = _evaluate_loader_accuracy(runner, dataloaders["valid"])
q_model = quantize_model(model)
runner.model = q_model
accuracy_after = _evaluate_loader_accuracy(runner, dataloaders["valid"])
assert abs(accuracy_before - accuracy_after) < 0.01
|
245f2dc97421904ae3cc21d0f3035b3764607b95
|
10ddfb2d43a8ec5d47ce35dc0b8acf4fd58dea94
|
/Python/the-k-strongest-values-in-an-array.py
|
49aa85436db6ada3b6d9a5407b73f9a1f77a5a39
|
[
"MIT"
] |
permissive
|
kamyu104/LeetCode-Solutions
|
f54822059405ef4df737d2e9898b024f051fd525
|
4dc4e6642dc92f1983c13564cc0fd99917cab358
|
refs/heads/master
| 2023-09-02T13:48:26.830566
| 2023-08-28T10:11:12
| 2023-08-28T10:11:12
| 152,631,182
| 4,549
| 1,651
|
MIT
| 2023-05-31T06:10:33
| 2018-10-11T17:38:35
|
C++
|
UTF-8
|
Python
| false
| false
| 2,353
|
py
|
the-k-strongest-values-in-an-array.py
|
# Time: O(nlogn)
# Space: O(1)
class Solution(object):
def getStrongest(self, arr, k):
"""
:type arr: List[int]
:type k: int
:rtype: List[int]
"""
arr.sort()
m = arr[(len(arr)-1)//2]
result = []
left, right = 0, len(arr)-1
while len(result) < k:
if m-arr[left] > arr[right]-m:
result.append(arr[left])
left += 1
else:
result.append(arr[right])
right -= 1
return result
# Time: O(nlogn)
# Space: O(1)
class Solution2(object):
def getStrongest(self, arr, k):
"""
:type arr: List[int]
:type k: int
:rtype: List[int]
"""
arr.sort()
m = arr[(len(arr)-1)//2]
arr.sort(key=lambda x: (-abs(x-m), -x))
return arr[:k]
# Time: O(n)
# Space: O(1)
import random
class Solution_TLE(object):
def getStrongest(self, arr, k):
"""
:type arr: List[int]
:type k: int
:rtype: List[int]
"""
def nth_element(nums, n, compare=lambda a, b: a < b):
def partition_around_pivot(left, right, pivot_idx, nums, compare):
new_pivot_idx = left
nums[pivot_idx], nums[right] = nums[right], nums[pivot_idx]
for i in xrange(left, right):
if compare(nums[i], nums[right]):
nums[i], nums[new_pivot_idx] = nums[new_pivot_idx], nums[i]
new_pivot_idx += 1
nums[right], nums[new_pivot_idx] = nums[new_pivot_idx], nums[right]
return new_pivot_idx
left, right = 0, len(nums) - 1
while left <= right:
pivot_idx = random.randint(left, right)
new_pivot_idx = partition_around_pivot(left, right, pivot_idx, nums, compare)
if new_pivot_idx == n:
return
elif new_pivot_idx > n:
right = new_pivot_idx - 1
else: # new_pivot_idx < n
left = new_pivot_idx + 1
nth_element(arr, (len(arr)-1)//2)
m = arr[(len(arr)-1)//2]
nth_element(arr, k, lambda a, b: abs(a-m) > abs(b-m) if abs(a-m) != abs(b-m) else a > b)
return arr[:k]
|
8ce79e32dfb9c1b8e4873d2e464b110b280e58ba
|
c50e7eb190802d7849c0d0cea02fb4d2f0021777
|
/src/log-analytics-solution/azext_log_analytics_solution/commands.py
|
dbdc428cb58773196308f473b00f20a137fe6541
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
Azure/azure-cli-extensions
|
c1615b19930bba7166c282918f166cd40ff6609c
|
b8c2cf97e991adf0c0a207d810316b8f4686dc29
|
refs/heads/main
| 2023-08-24T12:40:15.528432
| 2023-08-24T09:17:25
| 2023-08-24T09:17:25
| 106,580,024
| 336
| 1,226
|
MIT
| 2023-09-14T10:48:57
| 2017-10-11T16:27:31
|
Python
|
UTF-8
|
Python
| false
| false
| 1,497
|
py
|
commands.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
# pylint: disable=too-many-lines
# pylint: disable=too-many-statements
# pylint: disable=too-many-locals
from azure.cli.core.commands import CliCommandType
from ._client_factory import cf_solutions
def load_command_table(self, _):
log_analytics_solution_solutions = CliCommandType(
operations_tmpl='azext_log_analytics_solution.vendored_sdks.operationsmanagement.operations._solutions_operations#SolutionsOperations.{}',
client_factory=cf_solutions)
with self.command_group('monitor log-analytics solution', log_analytics_solution_solutions,
client_factory=cf_solutions, is_experimental=True) as g:
g.custom_command('create', 'create_monitor_log_analytics_solution', supports_no_wait=True)
g.custom_command('update', 'update_monitor_log_analytics_solution', supports_no_wait=True)
g.custom_command('delete', 'delete_monitor_log_analytics_solution', supports_no_wait=True, confirmation=True)
g.custom_show_command('show', 'get_monitor_log_analytics_solution')
g.custom_command('list', 'list_monitor_log_analytics_solution')
|
ba1e9b2bcaef67b17971182a92d3ebbdb2886199
|
26bbcfdb811f7df13f7b5a95ba551da7adac4e9b
|
/src/certfuzz/scoring/multiarmed_bandit/errors.py
|
1ad6dab9a1e95f3ab601e7b4867f6e784f73c142
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
CERTCC/certfuzz
|
080c3a5448a39d02049253fad96498ba50191586
|
892dae8676535b0ae5b77eea95ffbc21e9e1c959
|
refs/heads/develop
| 2022-11-11T06:12:09.032184
| 2020-06-10T19:57:26
| 2020-06-10T19:57:26
| 20,684,363
| 161
| 25
|
NOASSERTION
| 2023-05-10T14:27:00
| 2014-06-10T12:29:53
|
Python
|
UTF-8
|
Python
| false
| false
| 160
|
py
|
errors.py
|
'''
Created on Feb 22, 2013
@organization: cert.org
'''
from certfuzz.scoring.errors import ScoringError
class MultiArmedBanditError(ScoringError):
pass
|
3a38c405416fb43d1372ca51ca99bd30112f4125
|
d441c11696fe475a3fe721de35b53ac0661cfca3
|
/hearthstone/utils/__init__.py
|
5b3988f93d4d1c668834f23ade28995f5ecf0a7b
|
[
"MIT"
] |
permissive
|
HearthSim/python-hearthstone
|
00634e273982223d1c2f75757ed35c1567625a2d
|
ad91a08676bccccc54a18f9453124c5694d28feb
|
refs/heads/master
| 2023-08-30T23:37:23.224093
| 2023-08-22T18:20:09
| 2023-08-22T18:20:09
| 42,166,972
| 256
| 76
|
MIT
| 2023-06-01T16:09:58
| 2015-09-09T08:47:19
|
Python
|
UTF-8
|
Python
| false
| false
| 12,736
|
py
|
__init__.py
|
from datetime import datetime
from ..enums import CardClass, CardSet, Race, Rarity, ZodiacYear
try:
from lxml import etree as ElementTree # noqa
except ImportError:
from xml.etree import ElementTree # noqa
CARDCLASS_HERO_MAP = {
CardClass.DEATHKNIGHT: "HERO_11",
CardClass.DEMONHUNTER: "HERO_10",
CardClass.DRUID: "HERO_06",
CardClass.HUNTER: "HERO_05",
CardClass.MAGE: "HERO_08",
CardClass.PALADIN: "HERO_04",
CardClass.PRIEST: "HERO_09",
CardClass.ROGUE: "HERO_03",
CardClass.SHAMAN: "HERO_02",
CardClass.WARLOCK: "HERO_07",
CardClass.WARRIOR: "HERO_01",
CardClass.WHIZBANG: "BOT_914h",
}
# In the past, card names used to be predictably GLOBAL_CARD_SET_%CARDSET%. However in
# recent expansion, it uses a custom 3 letter set code instead.
CARDSET_GLOBAL_STRING_MAP = {
CardSet.DRAGONS: "GLOBAL_CARD_SET_DRG",
CardSet.YEAR_OF_THE_DRAGON: "GLOBAL_CARD_SET_YOD",
CardSet.DEMON_HUNTER_INITIATE: "GLOBAL_CARD_SET_DHI",
CardSet.BLACK_TEMPLE: "GLOBAL_CARD_SET_BT",
CardSet.SCHOLOMANCE: "GLOBAL_CARD_SET_SCH",
CardSet.DARKMOON_FAIRE: "GLOBAL_CARD_SET_DMF",
CardSet.THE_BARRENS: "GLOBAL_CARD_SET_BAR",
CardSet.STORMWIND: "GLOBAL_CARD_SET_SW",
CardSet.ALTERAC_VALLEY: "GLOBAL_CARD_SET_AV",
CardSet.THE_SUNKEN_CITY: "GLOBAL_CARD_SET_TSC",
}
# The following dictionary is a consequence of Hearthstone adding multi-race cards.
#
# Before patch 25.0 Hearthstone only supported a single Race tag per card. However, in order
# to support an arbitrary number of Races per card the game developer has introduced a set
# of flag tags, that only exist to signify cards belonging to a specific race.
#
# For example, a card Wisp would be an "Undead Dragon" if it had the tags
# 2534 and 2523 set. However, in practice, one of these is still encoded using the Race tag,
# so likely such a card would have RACE = 11 (UNDEAD) and 2523 = 1 (DRAGON).
#
# If a new race is introduced, you're expected to add the tag here. You can find out the
# mapping by running patch processing and looking at the RaceTagMap.xml in the output
# directory.
CARDRACE_TAG_MAP = {
Race.BLOODELF: 2524,
Race.DRAENEI: None,
Race.DWARF: 2526,
Race.GNOME: 2527,
Race.GOBLIN: 2528,
Race.HUMAN: 2529,
Race.NIGHTELF: 2530,
Race.ORC: 2531,
Race.TAUREN: 2532,
Race.TROLL: 2533,
Race.UNDEAD: 2534,
Race.WORGEN: 2535,
Race.GOBLIN2: None,
Race.MURLOC: 2536,
Race.DEMON: 2537,
Race.SCOURGE: 2538,
Race.MECHANICAL: 2539,
Race.ELEMENTAL: 2540,
Race.OGRE: 2541,
Race.BEAST: 2542,
Race.TOTEM: 2543,
Race.NERUBIAN: 2544,
Race.PIRATE: 2522,
Race.DRAGON: 2523,
Race.BLANK: None,
Race.ALL: None,
Race.EGG: 2545,
Race.QUILBOAR: 2546,
Race.CENTAUR: 2547,
Race.FURBOLG: 2548,
Race.HIGHELF: 2549,
Race.TREANT: 2550,
Race.OWLKIN: 2551,
Race.HALFORC: 2552,
Race.LOCK: None,
Race.NAGA: 2553,
Race.OLDGOD: 2554,
Race.PANDAREN: 2555,
Race.GRONN: 2556,
Race.CELESTIAL: 2584,
Race.GNOLL: 2585,
Race.GOLEM: 2586,
Race.HARPY: 2587,
Race.VULPERA: 2588,
# See comment at start of dictionary for how to identify the value for newly added races
}
REVERSE_CARDRACE_TAG_MAP = {v: k for k, v in CARDRACE_TAG_MAP.items()}
SECRET_COSTS = {
CardClass.HUNTER: 2,
CardClass.MAGE: 3,
CardClass.PALADIN: 1,
CardClass.ROGUE: 2,
CardClass.WARRIOR: 0,
}
CRAFTING_COSTS = {
Rarity.COMMON: (40, 400),
Rarity.RARE: (100, 800),
Rarity.EPIC: (400, 1600),
Rarity.LEGENDARY: (1600, 3200),
}
DISENCHANT_COSTS = {
Rarity.COMMON: (5, 50),
Rarity.RARE: (20, 100),
Rarity.EPIC: (100, 400),
Rarity.LEGENDARY: (400, 1600),
}
STANDARD_SETS = {
ZodiacYear.PRE_STANDARD: [
CardSet.BASIC, CardSet.EXPERT1, CardSet.REWARD, CardSet.PROMO,
CardSet.NAXX, CardSet.GVG, CardSet.BRM, CardSet.TGT, CardSet.LOE,
],
ZodiacYear.KRAKEN: [
CardSet.BASIC, CardSet.EXPERT1,
CardSet.BRM, CardSet.TGT, CardSet.LOE, CardSet.OG, CardSet.OG_RESERVE,
CardSet.KARA, CardSet.KARA_RESERVE, CardSet.GANGS, CardSet.GANGS_RESERVE,
],
ZodiacYear.MAMMOTH: [
CardSet.BASIC, CardSet.EXPERT1,
CardSet.OG, CardSet.OG_RESERVE, CardSet.KARA, CardSet.KARA_RESERVE,
CardSet.GANGS, CardSet.GANGS_RESERVE, CardSet.UNGORO, CardSet.ICECROWN,
CardSet.LOOTAPALOOZA,
],
ZodiacYear.RAVEN: [
CardSet.BASIC, CardSet.EXPERT1,
CardSet.UNGORO, CardSet.ICECROWN, CardSet.LOOTAPALOOZA, CardSet.GILNEAS,
CardSet.BOOMSDAY, CardSet.TROLL,
],
ZodiacYear.DRAGON: [
CardSet.BASIC, CardSet.EXPERT1,
CardSet.GILNEAS, CardSet.BOOMSDAY, CardSet.TROLL, CardSet.DALARAN, CardSet.ULDUM,
CardSet.WILD_EVENT, CardSet.DRAGONS, CardSet.YEAR_OF_THE_DRAGON,
CardSet.BLACK_TEMPLE, CardSet.DEMON_HUNTER_INITIATE,
],
ZodiacYear.PHOENIX: [
CardSet.BASIC, CardSet.EXPERT1,
CardSet.DALARAN, CardSet.ULDUM, CardSet.WILD_EVENT, CardSet.DRAGONS,
CardSet.YEAR_OF_THE_DRAGON, CardSet.BLACK_TEMPLE, CardSet.DEMON_HUNTER_INITIATE,
CardSet.SCHOLOMANCE, CardSet.DARKMOON_FAIRE,
],
ZodiacYear.GRYPHON: [
CardSet.CORE,
CardSet.BLACK_TEMPLE, CardSet.SCHOLOMANCE, CardSet.DARKMOON_FAIRE,
CardSet.THE_BARRENS, CardSet.WAILING_CAVERNS, CardSet.STORMWIND,
CardSet.ALTERAC_VALLEY,
],
ZodiacYear.HYDRA: [
CardSet.CORE,
CardSet.THE_BARRENS, CardSet.WAILING_CAVERNS, CardSet.STORMWIND,
CardSet.ALTERAC_VALLEY, CardSet.THE_SUNKEN_CITY, CardSet.REVENDRETH,
CardSet.RETURN_OF_THE_LICH_KING, CardSet.PATH_OF_ARTHAS,
CardSet.BATTLE_OF_THE_BANDS,
],
ZodiacYear.WOLF: [
CardSet.CORE,
CardSet.THE_SUNKEN_CITY, CardSet.REVENDRETH, CardSet.RETURN_OF_THE_LICH_KING,
CardSet.PATH_OF_ARTHAS, CardSet.BATTLE_OF_THE_BANDS, CardSet.TITANS,
]
}
try:
_EPOCH = datetime.fromtimestamp(0)
except OSError:
# https://bugs.python.org/issue29097 (Windows-only)
_EPOCH = datetime.fromtimestamp(86400)
ZODIAC_ROTATION_DATES = {
ZodiacYear.PRE_STANDARD: _EPOCH,
ZodiacYear.KRAKEN: datetime(2016, 4, 26),
ZodiacYear.MAMMOTH: datetime(2017, 4, 7),
ZodiacYear.RAVEN: datetime(2018, 4, 12),
ZodiacYear.DRAGON: datetime(2019, 4, 9),
ZodiacYear.PHOENIX: datetime(2020, 4, 7),
ZodiacYear.GRYPHON: datetime(2021, 3, 30),
ZodiacYear.HYDRA: datetime(2022, 4, 12),
ZodiacYear.WOLF: datetime(2023, 4, 11),
}
# QuestController.cs
QUEST_REWARDS = {
"UNG_940": "UNG_940t8",
"UNG_954": "UNG_954t1",
"UNG_934": "UNG_934t1",
"UNG_829": "UNG_829t1",
"UNG_028": "UNG_028t",
"UNG_067": "UNG_067t1",
"UNG_116": "UNG_116t",
"UNG_920": "UNG_920t1",
"UNG_942": "UNG_942t",
}
# GameplayStringTextBuilder.cs
SPELLSTONE_STRINGS = {
"LOOT_043": "GAMEPLAY_AMETHYST_SPELLSTONE_%d",
"LOOT_051": "GAMEPLAY_JASPER_SPELLSTONE_%d",
"LOOT_064": "GAMEPLAY_SAPPHIRE_SPELLSTONE_%d",
"LOOT_091": "GAMEPLAY_PEARL_SPELLSTONE_%d",
"LOOT_103": "GAMEPLAY_RUBY_SPELLSTONE_%d",
"LOOT_503": "GAMEPLAY_ONYX_SPELLSTONE_%d",
"LOOT_507": "GAMEPLAY_DIAMOND_SPELLSTONE_%d",
"LOOT_526d": "GAMEPLAY_LOOT_526d_DARKNESS_%d",
}
UPGRADABLE_CARDS_MAP = {
# Fatespinner
"ICC_047t": "ICC_047",
"ICC_047t2": "ICC_047",
# Lesser Amethyst Spellstone
"LOOT_043t2": "LOOT_043",
"LOOT_043t3": "LOOT_043",
# Lesser Jasper Spellstone
"LOOT_051t1": "LOOT_051",
"LOOT_051t2": "LOOT_051",
# Lesser Sapphire Spellstone
"LOOT_064t1": "LOOT_064",
"LOOT_064t2": "LOOT_064",
# Lesser Emerald Spellstone
"LOOT_080t2": "LOOT_080",
"LOOT_080t3": "LOOT_080",
# Lesser Pearl Spellstone
"LOOT_091t1": "LOOT_091",
"LOOT_091t2": "LOOT_091",
# Lesser Ruby Spellstone
"LOOT_103t1": "LOOT_103",
"LOOT_103t2": "LOOT_103",
# Lesser Mithril Spellstone
"LOOT_203t2": "LOOT_203",
"LOOT_203t3": "LOOT_203",
# Unidentified Elixier
"LOOT_278t1": "LOOT_278",
"LOOT_278t2": "LOOT_278",
"LOOT_278t3": "LOOT_278",
"LOOT_278t4": "LOOT_278",
# Unidentified Shield
"LOOT_285t": "LOOT_285",
"LOOT_285t2": "LOOT_285",
"LOOT_285t3": "LOOT_285",
"LOOT_285t4": "LOOT_285",
# Unidentified Maul
"LOOT_286t1": "LOOT_286",
"LOOT_286t2": "LOOT_286",
"LOOT_286t3": "LOOT_286",
"LOOT_286t4": "LOOT_286",
# Lesser Onyx Spellstone
"LOOT_503t": "LOOT_503",
"LOOT_503t2": "LOOT_503",
# Lesser Diamond Spellstone
"LOOT_507t": "LOOT_507",
"LOOT_507t2": "LOOT_507",
# Duskhaven Hunter
"GIL_200t": "GIL_200",
# Pumpkin Peasant
"GIL_201t": "GIL_201",
# Gilnean Royal Guard
"GIL_202t": "GIL_202",
# Swift Messenger
"GIL_528t": "GIL_528",
# Spellshifter
"GIL_529t": "GIL_529",
# Unidentified Contract
"DAL_366t1": "DAL_366",
"DAL_366t2": "DAL_366",
"DAL_366t3": "DAL_366",
"DAL_366t4": "DAL_366",
# Galakrond
"DRG_600t2": "DRG_600",
"DRG_600t3": "DRG_600",
"DRG_610t2": "DRG_610",
"DRG_610t3": "DRG_610",
"DRG_620t2": "DRG_620",
"DRG_620t3": "DRG_620",
"DRG_650t2": "DRG_650",
"DRG_650t3": "DRG_650",
"DRG_660t2": "DRG_660",
"DRG_660t3": "DRG_660",
# Corrupted Card
"DMF_061t": "DMF_061", # Faire Arborist
"DMF_730t": "DMF_730", # Moontouched Amulet
"DMF_083t": "DMF_083", # Dancing Cobra
"DMF_101t": "DMF_101", # Firework Elemental
"DMF_054t": "DMF_054", # Insight
"DMF_184t": "DMF_184", # Fairground Fool
"DMF_517a": "DMF_517", # Sweet Tooth
"DMF_703t": "DMF_703", # Pit Master
"DMF_526a": "DMF_526", # Stage Dive
"DMF_073t": "DMF_073", # Darkmoon Dirigible
"DMF_082t": "DMF_082", # Darkmoon Statue
"DMF_174t": "DMF_174", # Circus Medic
"DMF_163t": "DMF_163", # Carnival Clown
# Cascading Disaster
"DMF_117t2": "DMF_117",
"DMF_117t": "DMF_117",
"DMF_078t": "DMF_078", # Strongman
"DMF_186a": "DMF_186", # Auspicious Spirits
"DMF_118t": "DMF_118", # Tickatus
"DMF_247t": "DMF_247", # Insatiable Felhound
"DMF_248t": "DMF_248", # Felsteel Executioner
"DMF_064t": "DMF_064", # Carousel Gryphon
"DMF_124t": "DMF_124", # Horrendous Growth
"DMF_090t": "DMF_090", # Don't Feed the Animals
"DMF_105t": "DMF_105", # Ring Toss
"DMF_701t": "DMF_701", # Dunk Tank
"DMF_080t": "DMF_080", # Fleethoof Pearltusk
"DMF_244t": "DMF_244", # Day at the Faire
# Tame Beast
"BAR_034t": "BAR_034",
"BAR_034t2": "BAR_034",
# Chain Lightning
"BAR_044t": "BAR_044",
"BAR_044t2": "BAR_044",
# Flurry
"BAR_305t": "BAR_305",
"BAR_305t2": "BAR_305",
# Condemn
"BAR_314t": "BAR_314",
"BAR_314t2": "BAR_314",
# Wicked Stab
"BAR_319t": "BAR_319",
"BAR_319t2": "BAR_319",
# Living Seed
"BAR_536t": "BAR_536",
"BAR_536t2": "BAR_536",
# Conviction
"BAR_880t": "BAR_880",
"BAR_880t2": "BAR_880",
# Conditioning
"BAR_842t": "BAR_842",
"BAR_842t2": "BAR_842",
# Fury
"BAR_891t": "BAR_891",
"BAR_891t2": "BAR_891",
# Imp Swarm
"BAR_914t": "BAR_914",
"BAR_914t2": "BAR_914",
# Harmonic / Dissonant cards
"ETC_314t": "ETC_314", # Harmonic Pop
"ETC_379t": "ETC_379", # Harmonic Mood
"ETC_427t": "ETC_427", # Harmonic Metal
"ETC_506t": "ETC_506", # Harmonic Disco
"ETC_717t": "ETC_717", # Harmonic Hip Hop
# Remixed Dispense-o-bot
"JAM_000t": "JAM_000", # Chilling Dispense-o-bot
"JAM_000t2": "JAM_000", # Merch Dispense-o-bot
"JAM_000t3": "JAM_000", # Money Dispense-o-bot
"JAM_000t4": "JAM_000", # Mystery Dispense-o-bot
# Remixed Totemcarver
"JAM_012t": "JAM_012", # Loud Totemcarver
"JAM_012t2": "JAM_012", # Bluesy Totemcarver
"JAM_012t3": "JAM_012", # Blazing Totemcarver
"JAM_012t4": "JAM_012", # Karaoke Totemcarver
# Remixed Tuning Fork
"JAM_015t": "JAM_015", # Sharpened Tuning Fork
"JAM_015t2": "JAM_015", # Reinforced Tuning Fork
"JAM_015t3": "JAM_015", # Curved Tuning Fork
"JAM_015t4": "JAM_015", # Backup Tuning Fork
# Remixed Rhapsody
"JAM_018t": "JAM_018", # Angsty Rhapsody
"JAM_018t2": "JAM_018", # Resounding Rhapsody
"JAM_018t3": "JAM_018", # Emotional Rhapsody
"JAM_018t4": "JAM_018", # Wailing Rhapsody
# Remixed Musician
"JAM_033t": "JAM_033", # Cathedral Musician
"JAM_033t2": "JAM_033", # Tropical Musician
"JAM_033t3": "JAM_033", # Romantic Musician
"JAM_033t4": "JAM_033", # Noise Musician
}
def get_original_card_id(card_id):
# Transfer Student
if str(card_id).startswith("SCH_199t"):
return "SCH_199"
return UPGRADABLE_CARDS_MAP.get(card_id, card_id)
SCHEME_CARDS = [
"DAL_007", # Rafaam's Scheme
"DAL_008", # Dr. Boom's Scheme
"DAL_009", # Hagatha's Scheme
"DAL_010", # Tagwaggle's Scheme
"DAL_011", # Lazul's Scheme
]
MAESTRA_DISGUISE_DBF_ID = 64674
if __name__ == "__main__":
def _print_cs_dicts(dicts_and_names, tl_format, format):
ret = []
linefmt = "\t\t{ %d, %s }"
for name, dict in dicts_and_names:
keytype = int
valtype = list(dict.values())[0].__class__
lines = ",\n".join(
linefmt % (keytype(key), valtype(value))
for key, value in dict.items()
if key is not None
)
ret.append(format % (name, lines))
lines = "\n\n".join(ret)
print(tl_format % (lines))
print("using System.Collections.Generic;\n")
_print_cs_dicts(
[
("TagRaceMap", REVERSE_CARDRACE_TAG_MAP)
],
"public static class RaceUtils {\n%s\n}",
"\tpublic static Dictionary<int, Race> %s = new Dictionary<int, Race>() {\n%s\n\t};",
)
|
9a5e3354a696afe155beeaed6989688c1bb5bd96
|
c9ff14ff176600169b6e9f6490ab32f5c3af60e0
|
/jcvi/compara/synfind.py
|
aa1b8fb4561e83ebe3213acd0e921c44abc3b37d
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
tanghaibao/jcvi
|
c7a070692d53784a34378e19e435cb9a86d2cd2e
|
695bd2eee98b14118b54fc37e38cd0222ce6a5e9
|
refs/heads/main
| 2023-09-01T01:22:04.353148
| 2023-08-30T01:59:11
| 2023-08-30T01:59:11
| 1,130,393
| 641
| 193
|
BSD-2-Clause
| 2023-09-01T03:17:24
| 2010-12-01T23:18:02
|
Python
|
UTF-8
|
Python
| false
| false
| 9,233
|
py
|
synfind.py
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
%prog rice.sorghum.last --qbed=rice.bed --sbed=sorghum.bed
Given a blast, we find the syntenic regions for every single gene. The
algorithm works by expanding the query gene to a window centered on the gene. A
single linkage algorithm follows that outputs the synteny block.
The result looks like the following:
Os01g0698300 Sb03g032090 S 7 +
Os01g0698500 Sb03g032140 G 11 +
The pairs (A, B) -- A is query, and then B is the syntenic region found.
G is "Gray gene", which means it does not have match to the region (fractionated
or inserted). In this case, a right flanker is used to represent the region.
S is "Syntelog", which means it has a match to the region. In this case, the match
itself is used to represent the region. The number in the 4th column is the
synteny score. For the same query, it is ordered with decreasing synteny score.
The last column means orientation. "+" is same direction.
"""
import os.path as op
import logging
import sys
import sqlite3
from bisect import bisect_left
from itertools import groupby, tee
from jcvi.algorithms.lis import (
longest_increasing_subsequence,
longest_decreasing_subsequence,
)
from jcvi.compara.synteny import check_beds, read_blast
from jcvi.utils.grouper import Grouper
from jcvi.formats.base import must_open
from jcvi.apps.base import OptionParser, OptionGroup
def transposed(data):
x, y = zip(*data)
return zip(y, x)
def get_flanker(group, query):
"""
>>> get_flanker([(370, 15184), (372, 15178), (373, 15176), (400, 15193)], 385)
((373, 15176), (400, 15193), True)
>>> get_flanker([(124, 13639), (137, 13625)], 138)
((137, 13625), (137, 13625), False)
"""
group.sort()
pos = bisect_left(group, (query, 0))
left_flanker = group[0] if pos == 0 else group[pos - 1]
right_flanker = group[-1] if pos == len(group) else group[pos]
# pick the closest flanker
if abs(query - left_flanker[0]) < abs(query - right_flanker[0]):
flanker, other = left_flanker, right_flanker
else:
flanker, other = right_flanker, left_flanker
flanked = not (pos == 0 or pos == len(group) or flanker == query)
return flanker, other, flanked
def find_synteny_region(query, sbed, data, window, cutoff, colinear=False):
"""
Get all synteny blocks for a query, algorithm is single linkage
anchors are a window centered on query
Two categories of syntenic regions depending on what query is:
(Syntelog): syntenic region is denoted by the syntelog
(Gray gene): syntenic region is marked by the closest flanker
"""
regions = []
ysorted = sorted(data, key=lambda x: x[1])
g = Grouper()
a, b = tee(ysorted)
next(b, None)
for ia, ib in zip(a, b):
pos1, pos2 = ia[1], ib[1]
if pos2 - pos1 < window and sbed[pos1].seqid == sbed[pos2].seqid:
g.join(ia, ib)
for group in sorted(g):
(qflanker, syntelog), (far_flanker, far_syntelog), flanked = get_flanker(
group, query
)
# run a mini-dagchainer here, take the direction that gives us most anchors
if colinear:
y_indexed_group = [(y, i) for i, (x, y) in enumerate(group)]
lis = longest_increasing_subsequence(y_indexed_group)
lds = longest_decreasing_subsequence(y_indexed_group)
if len(lis) >= len(lds):
track = lis
orientation = "+"
else:
track = lds
orientation = "-"
group = [group[i] for (y, i) in track]
xpos, ypos = zip(*group)
score = min(len(set(xpos)), len(set(ypos)))
if qflanker == query:
gray = "S"
else:
gray = "G" if not flanked else "F"
score -= 1 # slight penalty for not finding syntelog
if score < cutoff:
continue
# y-boundary of the block
left, right = group[0][1], group[-1][1]
# this characterizes a syntenic region (left, right).
# syntelog is -1 if it's a gray gene
syn_region = (syntelog, far_syntelog, left, right, gray, orientation, score)
regions.append(syn_region)
return sorted(regions, key=lambda x: -x[-1]) # decreasing synteny score
def batch_query(qbed, sbed, all_data, opts, fw=None, c=None, transpose=False):
cutoff = int(opts.cutoff * opts.window)
window = opts.window / 2
colinear = opts.scoring == "collinear"
qnote, snote = opts.qnote, opts.snote
if qnote == "null" or snote == "null":
qnote = op.basename(qbed.filename).split(".")[0]
snote = op.basename(sbed.filename).split(".")[0]
# process all genes present in the bed file
if transpose:
all_data = transposed(all_data)
qbed, sbed = sbed, qbed
qnote, snote = snote, qnote
all_data.sort()
def simple_bed(x):
return sbed[x].seqid, sbed[x].start
qsimplebed = qbed.simple_bed
for seqid, ranks in groupby(qsimplebed, key=lambda x: x[0]):
ranks = [x[1] for x in ranks]
for r in ranks:
rmin = max(r - window, ranks[0])
rmax = min(r + window + 1, ranks[-1])
rmin_pos = bisect_left(all_data, (rmin, 0))
rmax_pos = bisect_left(all_data, (rmax, 0))
data = all_data[rmin_pos:rmax_pos]
regions = find_synteny_region(
r, sbed, data, window, cutoff, colinear=colinear
)
for (
syntelog,
far_syntelog,
left,
right,
gray,
orientation,
score,
) in regions:
query = qbed[r].accn
left_chr, left_pos = simple_bed(left)
right_chr, right_pos = simple_bed(right)
anchor = sbed[syntelog].accn
anchor_chr, anchor_pos = simple_bed(syntelog)
# below is useful for generating the syntenic region in the coge url
left_dist = abs(anchor_pos - left_pos) if anchor_chr == left_chr else 0
right_dist = (
abs(anchor_pos - right_pos) if anchor_chr == right_chr else 0
)
flank_dist = (max(left_dist, right_dist) / 10000 + 1) * 10000
far_syntelog = sbed[far_syntelog].accn
data = [
query,
anchor,
gray,
score,
flank_dist,
orientation,
far_syntelog,
]
pdata = data[:6] + [qnote, snote]
if fw:
print("\t".join(str(x) for x in pdata), file=fw)
continue
c.execute("insert into synteny values (?,?,?,?,?,?,?,?)", pdata)
def main(blastfile, p, opts):
sqlite = opts.sqlite
qbed, sbed, qorder, sorder, is_self = check_beds(blastfile, p, opts)
filtered_blast = read_blast(
blastfile, qorder, sorder, is_self=is_self, ostrip=opts.strip_names
)
all_data = [(b.qi, b.si) for b in filtered_blast]
c = None
if sqlite:
conn = sqlite3.connect(sqlite)
c = conn.cursor()
c.execute("drop table if exists synteny")
c.execute(
"create table synteny (query text, anchor text, "
"gray varchar(1), score integer, dr integer, "
"orientation varchar(1), qnote text, snote text)"
)
fw = None
else:
fw = must_open(opts.outfile, "w")
batch_query(qbed, sbed, all_data, opts, fw=fw, c=c, transpose=False)
if qbed.filename == sbed.filename:
logging.debug("Self comparisons, mirror ignored")
else:
batch_query(qbed, sbed, all_data, opts, fw=fw, c=c, transpose=True)
if sqlite:
c.execute("create index q on synteny (query)")
conn.commit()
c.close()
else:
fw.close()
if __name__ == "__main__":
p = OptionParser(__doc__)
p.set_beds()
p.set_stripnames()
p.set_outfile()
coge_group = OptionGroup(p, "CoGe-specific options")
coge_group.add_option("--sqlite", help="Write sqlite database")
coge_group.add_option("--qnote", default="null", help="Query dataset group id")
coge_group.add_option("--snote", default="null", help="Subject dataset group id")
params_group = OptionGroup(p, "Synteny parameters")
params_group.add_option(
"--window", type="int", default=40, help="Synteny window size"
)
params_group.add_option(
"--cutoff",
type="float",
default=0.1,
help="Minimum number of anchors to call synteny",
)
supported_scoring = ("collinear", "density")
params_group.add_option(
"--scoring",
choices=supported_scoring,
default="collinear",
help="Scoring scheme",
)
p.add_option_group(coge_group)
p.add_option_group(params_group)
opts, args = p.parse_args()
if len(args) != 1:
sys.exit(not p.print_help())
(blastfile,) = args
main(blastfile, p, opts)
|
868d27d3968cdf7bc52d0ccb08d7866962a6c8ef
|
39b021eabbb8e3be1734cf92fd641965a796b0eb
|
/examples/tox21/tox21_tensorgraph_graph_conv.py
|
b663a81633ed930887a5cf6fb6bf4eafb2d9a343
|
[
"MIT"
] |
permissive
|
deepchem/deepchem
|
066cbf42316b2f6bec0166727e0264a485d5266f
|
ee6e67ebcf7bf04259cf13aff6388e2b791fea3d
|
refs/heads/master
| 2023-09-02T01:32:17.860111
| 2023-08-31T18:49:00
| 2023-08-31T18:49:00
| 43,098,215
| 4,876
| 1,905
|
MIT
| 2023-09-14T19:10:44
| 2015-09-24T23:20:28
|
Python
|
UTF-8
|
Python
| false
| false
| 1,164
|
py
|
tox21_tensorgraph_graph_conv.py
|
"""
Script that trains graph-conv models on Tox21 dataset.
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
np.random.seed(123)
import tensorflow as tf
tf.random.set_seed(123)
import deepchem as dc
from deepchem.molnet import load_tox21
from deepchem.models.graph_models import GraphConvModel
model_dir = "/tmp/graph_conv"
# Load Tox21 dataset
tox21_tasks, tox21_datasets, transformers = load_tox21(featurizer='GraphConv')
train_dataset, valid_dataset, test_dataset = tox21_datasets
print(train_dataset.data_dir)
print(valid_dataset.data_dir)
# Fit models
metric = dc.metrics.Metric(
dc.metrics.roc_auc_score, np.mean, mode="classification")
# Batch size of models
batch_size = 50
model = GraphConvModel(
len(tox21_tasks), batch_size=batch_size, mode='classification')
model.fit(train_dataset, nb_epoch=10)
print("Evaluating model")
train_scores = model.evaluate(train_dataset, [metric], transformers)
valid_scores = model.evaluate(valid_dataset, [metric], transformers)
print("Train scores")
print(train_scores)
print("Validation scores")
print(valid_scores)
|
c706653d13dccd49632620e4b6c2143c33b3503e
|
6946f9a3e9d57b00ea275b2303ced0dedcdba1d4
|
/qf_lib/backtesting/contract/contract_to_ticker_conversion/ib_contract_ticker_mapper.py
|
de2a9b77f1d46a5adad19bce80ebb847c0b366fd
|
[
"Apache-2.0"
] |
permissive
|
quarkfin/qf-lib
|
8eaf76e3db385295ff8845b3250ba64a6fcfc7a6
|
f707e51bc2ff45f6e46dcdd24d59d83ce7dc4f94
|
refs/heads/master
| 2023-08-31T17:41:57.213680
| 2023-08-29T10:01:49
| 2023-08-29T10:01:49
| 202,696,503
| 379
| 51
|
Apache-2.0
| 2023-09-05T06:11:35
| 2019-08-16T09:10:20
|
Python
|
UTF-8
|
Python
| false
| false
| 6,748
|
py
|
ib_contract_ticker_mapper.py
|
# Copyright 2016-present CERN – European Organization for Nuclear Research
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Optional
from qf_lib.backtesting.contract.contract_to_ticker_conversion.base import ContractTickerMapper
from qf_lib.common.enums.expiration_date_field import ExpirationDateField
from qf_lib.common.enums.security_type import SecurityType
from qf_lib.common.tickers.tickers import Ticker
from qf_lib.containers.futures.future_tickers.future_ticker import FutureTicker
from qf_lib.data_providers.data_provider import DataProvider
from qf_lib.brokers.ib_broker.ib_contract import IBContract
class IBContractTickerMapper(ContractTickerMapper):
""" IB IBContract mapper that can be used for live trading. Maps Tickers onto Interactive Brokers IBContract objects.
Parameters
----------
ticker_to_contract: Dict[Ticker, IBContract]
mapping between Tickers (also FutureTickers) and parameters that should be used for these tickers, when
transforming them into Contracts.
data_provider: Optional[DataProvider]
data_provider used to obtain the value of the last trade date for various tickers. The parameter is optional
and it is necessary only in case if the mapping between FutureTickers and Contracts is required.
"""
def __init__(self, ticker_to_contract: Dict[Ticker, IBContract], data_provider: Optional[DataProvider] = None):
self._validate_ticker_to_contract_mapping(ticker_to_contract)
self._ticker_to_contract_dict = ticker_to_contract
self._mapped_tickers = list(self._ticker_to_contract_dict.keys())
self._contract_to_ticker_dict = {item: key for key, item in ticker_to_contract.items()}
self._data_provider = data_provider
def _validate_ticker_to_contract_mapping(self, ticker_to_contract: Dict[Ticker, IBContract]):
mapped_contracts = ticker_to_contract.values()
assert len(mapped_contracts) == len(set(mapped_contracts)), "The same IBContract was assigned to multiple " \
"different tickers in the ticker_to_contract " \
"parameter. Please, make sure that each " \
"contract matches only one ticker."
def contract_to_ticker(self, contract: IBContract) -> Ticker:
""" It always maps to the specific ticker. """
# Create a new instance of the IBContract, with the last_trade_date parameter removed
contract_without_last_trade_date = IBContract.from_ib_contract(contract)
contract_without_last_trade_date.last_trade_date = None
# Search for the contract in the ticker to contract dictionary, ignoring last_trade_date if necessary
ticker = self._contract_to_ticker_dict.get(contract, None) or \
self._contract_to_ticker_dict.get(contract_without_last_trade_date, None)
# Get the specific ticker for the given last trade date
if isinstance(ticker, FutureTicker):
if self._data_provider is None:
raise ValueError(f"In order to map contract {contract} onto a corresponding ticker, it is necessary "
f"to set the data_provider to obtain the ticker for the given last trade date.")
chain_tickers = self._data_provider.get_futures_chain_tickers(
ticker, ExpirationDateField.LastTradeableDate)[ticker]
chain_tickers = chain_tickers.index[chain_tickers == contract.last_trade_date]
ticker = chain_tickers[0] if not chain_tickers.empty else None
if ticker is None:
raise ValueError(f"Could not map Interactive Brokers contract {contract} onto a Ticker object.")
return ticker
def ticker_to_contract(self, ticker: Ticker) -> IBContract:
ticker = ticker.get_current_specific_ticker() if isinstance(ticker, FutureTicker) else ticker
contract = self._ticker_to_contract_dict.get(ticker, None)
if not contract and ticker.security_type == SecurityType.FUTURE:
contract = self._create_futures_contract(ticker)
return contract
def _create_futures_contract(self, specific_ticker: Ticker):
""" Creates an IBContract instance for a given specific future ticker. """
future_ticker = self._get_matching_future_ticker(specific_ticker)
mapped_contract = self._ticker_to_contract_dict.get(future_ticker, None)
if not mapped_contract:
raise ValueError(f"Could not map the ticker {specific_ticker} onto Interactive Brokers contract.")
elif self._data_provider is None:
raise ValueError(f"In order to map ticker {specific_ticker} onto a corresponding IBContract, it is "
f"necessary to set the data_provider to obtain the last trade date value.")
chain_tickers = self._data_provider.get_futures_chain_tickers(
future_ticker, ExpirationDateField.LastTradeableDate)[future_ticker]
try:
last_trade_date = chain_tickers.loc[specific_ticker].to_pydatetime()
contract = IBContract.from_ib_contract(mapped_contract)
contract.last_trade_date = last_trade_date
return contract
except KeyError:
raise ValueError(f"Cannot map the future ticker {specific_ticker} as it doesn't have a corresponding "
f"last trade date returned by the DataProvider.") from None
def _get_matching_future_ticker(self, specific_ticker: Ticker) -> Optional[FutureTicker]:
""" For a given ticker returns a corresponding future ticker if any exists. """
matching_tickers = [fut_ticker for fut_ticker in self._mapped_tickers if isinstance(fut_ticker, FutureTicker)
and fut_ticker.belongs_to_family(specific_ticker)]
if len(matching_tickers) > 1:
raise ValueError(f"Ticker {specific_ticker} belongs to more then one future family: {matching_tickers}.")
return matching_tickers[0] if matching_tickers else None
|
cb6628764b80925dc0e8f456e03109b9088d56fa
|
1851e4f61c1a05580e1cc63495f476b957de1485
|
/code/deep/adarnn/base/loss/__init__.py
|
1de10e0b497dfd55550bf3f79124ba923f967d98
|
[
"MIT"
] |
permissive
|
jindongwang/transferlearning
|
6ef36b40634a77dfaace8d9c4fe18941410c2e37
|
0b801d2d2e828ac480d1097cb3bdd82b1e25c15b
|
refs/heads/master
| 2023-08-30T23:57:47.960290
| 2023-08-29T11:50:15
| 2023-08-29T11:50:15
| 89,846,872
| 12,773
| 3,920
|
MIT
| 2023-08-05T04:46:11
| 2017-04-30T11:32:21
|
Python
|
UTF-8
|
Python
| false
| false
| 384
|
py
|
__init__.py
|
from base.loss.adv_loss import adv
from base.loss.coral import CORAL
from base.loss.cos import cosine
from base.loss.kl_js import kl_div, js
from base.loss.mmd import MMD_loss
from base.loss.mutual_info import Mine
from base.loss.pair_dist import pairwise_dist
__all__ = [
'adv',
'CORAL',
'cosine',
'kl_div',
'js'
'MMD_loss',
'Mine',
'pairwise_dist'
]
|
166306639867947a57c08486ec23aee506751394
|
331640994b1b6f66c1639278571ddbdc6c8c0751
|
/test/unit/check/tls.py
|
9cc2a5f9c721b123c49bebe65d244d71805febba
|
[
"Apache-2.0"
] |
permissive
|
nginx/unit
|
eabcd067eaa60f4bdcf0cfaffe7d9932add2c66a
|
9b22b6957bc87b3df002d0bc691fdae6a20abdac
|
refs/heads/master
| 2023-09-04T02:02:13.581700
| 2023-08-30T16:07:24
| 2023-08-30T16:07:24
| 102,627,638
| 4,649
| 452
|
Apache-2.0
| 2023-09-12T01:28:22
| 2017-09-06T15:45:30
|
C
|
UTF-8
|
Python
| false
| false
| 242
|
py
|
tls.py
|
import re
import subprocess
def check_openssl(output_version):
try:
subprocess.check_output(['which', 'openssl'])
except subprocess.CalledProcessError:
return False
return re.search('--openssl', output_version)
|
3c5d22f47278f2f9afa9844af8d87fe261062096
|
f80ef3a3cf859b13e8af8433af549b6b1043bf6e
|
/pyobjc-core/PyObjCTest/test_varargs.py
|
7db84d4dbb28eb90bd7b8b23fb68805136016e24
|
[
"MIT"
] |
permissive
|
ronaldoussoren/pyobjc
|
29dc9ca0af838a56105a9ddd62fb38ec415f0b86
|
77b98382e52818690449111cd2e23cd469b53cf5
|
refs/heads/master
| 2023-09-01T05:15:21.814504
| 2023-06-13T20:00:17
| 2023-06-13T20:00:17
| 243,933,900
| 439
| 49
| null | 2023-06-25T02:49:07
| 2020-02-29T08:43:12
|
Python
|
UTF-8
|
Python
| false
| false
| 1,900
|
py
|
test_varargs.py
|
import objc
from PyObjCTools.TestSupport import TestCase
NSObject = objc.lookUpClass("NSObject")
if 0:
class VarargsMethod(TestCase):
def testVariableArgumentCount(self):
class VarArgsClass1(NSObject):
def instanceMethod1_(self, arg1, *args):
arg1.append(args)
def classMethod1_(cls, arg1, *args):
arg1.append(args)
classMethod1_ = classmethod(classMethod1_)
def instanceMethod2_(self, *args):
args[0].append(args[1:])
def classMethod2_(cls, *args):
args[0].append(args[1:])
classMethod2_ = classmethod(classMethod2_)
o = VarArgsClass1.alloc().init()
lst = []
o.instanceMethod1_(lst, 1, 2, 3)
self.assertEqual(lst, [(1, 2, 3)])
lst = []
VarArgsClass1.classMethod1_(lst, 3, 4, 5)
self.assertEqual(lst, [(3, 4, 5)])
lst = []
o.instanceMethod2_(lst, 1, 2, 3)
self.assertEqual(lst, [(1, 2, 3)])
lst = []
VarArgsClass1.classMethod2_(lst, 3, 4, 5)
self.assertEqual(lst, [(3, 4, 5)])
def testKeywordArguments(self):
class VarArgsClass2(NSObject):
def instanceMethod1_(self, arg1, **kwds):
arg1.append(kwds)
def classMethod1_(cls, arg1, **kwds):
arg1.append(kwds)
classMethod1_ = classmethod(classMethod1_)
o = VarArgsClass2.alloc().init()
lst = []
o.instanceMethod1_(lst, a=1, c=2)
self.assertEqual(lst, [{"a": 1, "c": 2}])
lst = []
VarArgsClass2.classMethod1_(lst, foo="bar", baz="foo")
self.assertEqual(lst, [{"foo": "bar", "baz": "foo"}])
|
2ed6a91ad5a33ef4e286c0ddb75b3a92dae8d88a
|
e3bb1df7fa4c51900dec7e9ddf5295e1a80938bd
|
/hummingbot/connector/exchange/hitbtc/hitbtc_api_order_book_data_source.py
|
764fa52fe2364b4408b78591e6f70ec5e4a0b5af
|
[
"Apache-2.0"
] |
permissive
|
CoinAlpha/hummingbot
|
0d1e2bd94de1280748647108c7d7800a09546eb8
|
c3f101759ab7e7a2165cd23a3a3e94c90c642a9b
|
refs/heads/development
| 2023-09-01T11:24:43.322137
| 2023-08-31T03:08:06
| 2023-08-31T03:08:06
| 439,330,952
| 135
| 98
|
Apache-2.0
| 2023-08-30T13:55:08
| 2021-12-17T12:50:42
|
Python
|
UTF-8
|
Python
| false
| false
| 11,733
|
py
|
hitbtc_api_order_book_data_source.py
|
import asyncio
import logging
import time
from decimal import Decimal
from typing import Any, Dict, List, Optional
import aiohttp
import pandas as pd
from hummingbot.core.data_type.order_book import OrderBook
from hummingbot.core.data_type.order_book_message import OrderBookMessage
from hummingbot.core.data_type.order_book_tracker_data_source import OrderBookTrackerDataSource
from hummingbot.logger import HummingbotLogger
from .hitbtc_active_order_tracker import HitbtcActiveOrderTracker
from .hitbtc_constants import Constants
from .hitbtc_order_book import HitbtcOrderBook
from .hitbtc_utils import HitbtcAPIError, api_call_with_retries, str_date_to_ts, translate_asset
from .hitbtc_websocket import HitbtcWebsocket
class HitbtcAPIOrderBookDataSource(OrderBookTrackerDataSource):
_logger: Optional[HummingbotLogger] = None
_trading_pair_symbol_map: Dict[str, str] = {}
@classmethod
def logger(cls) -> HummingbotLogger:
if cls._logger is None:
cls._logger = logging.getLogger(__name__)
return cls._logger
def __init__(self, trading_pairs: List[str] = None):
super().__init__(trading_pairs)
self._trading_pairs: List[str] = trading_pairs
self._snapshot_msg: Dict[str, any] = {}
@classmethod
async def init_trading_pair_symbols(cls, shared_session: Optional[aiohttp.ClientSession] = None):
"""Initialize _trading_pair_symbol_map class variable
"""
symbols: List[Dict[str, Any]] = await api_call_with_retries(
"GET",
Constants.ENDPOINT["SYMBOL"],
shared_client=shared_session)
cls._trading_pair_symbol_map = {
symbol_data["id"]: (f"{translate_asset(symbol_data['baseCurrency'])}-"
f"{translate_asset(symbol_data['quoteCurrency'])}")
for symbol_data in symbols
}
@classmethod
async def trading_pair_symbol_map(cls) -> Dict[str, str]:
if not cls._trading_pair_symbol_map:
await cls.init_trading_pair_symbols()
return cls._trading_pair_symbol_map
@classmethod
async def get_last_traded_prices(cls, trading_pairs: List[str]) -> Dict[str, Decimal]:
results = {}
if len(trading_pairs) > 1:
tickers: List[Dict[Any]] = await api_call_with_retries("GET", Constants.ENDPOINT["TICKER"])
for trading_pair in trading_pairs:
ex_pair: str = await HitbtcAPIOrderBookDataSource.exchange_symbol_associated_to_pair(trading_pair)
if len(trading_pairs) > 1:
ticker: Dict[Any] = list([tic for tic in tickers if tic['symbol'] == ex_pair])[0]
else:
url_endpoint = Constants.ENDPOINT["TICKER_SINGLE"].format(trading_pair=ex_pair)
ticker: Dict[Any] = await api_call_with_retries("GET", url_endpoint)
results[trading_pair]: Decimal = Decimal(str(ticker["last"]))
return results
@staticmethod
async def exchange_symbol_associated_to_pair(trading_pair: str) -> str:
symbol_map = await HitbtcAPIOrderBookDataSource.trading_pair_symbol_map()
symbols = [symbol for symbol, pair in symbol_map.items() if pair == trading_pair]
if symbols:
symbol = symbols[0]
else:
raise ValueError(f"There is no symbol mapping for trading pair {trading_pair}")
return symbol
@staticmethod
async def trading_pair_associated_to_exchange_symbol(symbol: str) -> str:
symbol_map = await HitbtcAPIOrderBookDataSource.trading_pair_symbol_map()
return symbol_map[symbol]
@staticmethod
async def fetch_trading_pairs() -> List[str]:
symbols_map = await HitbtcAPIOrderBookDataSource.trading_pair_symbol_map()
return list(symbols_map.values())
@staticmethod
async def get_order_book_data(trading_pair: str) -> Dict[str, any]:
"""
Get whole orderbook
"""
try:
ex_pair = await HitbtcAPIOrderBookDataSource.exchange_symbol_associated_to_pair(trading_pair)
orderbook_response: Dict[Any] = await api_call_with_retries("GET", Constants.ENDPOINT["ORDER_BOOK"],
params={"limit": 150, "symbols": ex_pair})
return orderbook_response[ex_pair]
except HitbtcAPIError as e:
err = e.error_payload.get('error', e.error_payload)
raise IOError(
f"Error fetching OrderBook for {trading_pair} at {Constants.EXCHANGE_NAME}. "
f"HTTP status is {e.error_payload['status']}. Error is {err.get('message', str(err))}.")
async def get_new_order_book(self, trading_pair: str) -> OrderBook:
snapshot: Dict[str, Any] = await self.get_order_book_data(trading_pair)
snapshot_timestamp: float = time.time()
snapshot_msg: OrderBookMessage = HitbtcOrderBook.snapshot_message_from_exchange(
snapshot,
snapshot_timestamp,
metadata={"trading_pair": trading_pair})
order_book = self.order_book_create_function()
active_order_tracker: HitbtcActiveOrderTracker = HitbtcActiveOrderTracker()
bids, asks = active_order_tracker.convert_snapshot_message_to_order_book_row(snapshot_msg)
order_book.apply_snapshot(bids, asks, snapshot_msg.update_id)
return order_book
async def listen_for_trades(self, ev_loop: asyncio.BaseEventLoop, output: asyncio.Queue):
"""
Listen for trades using websocket trade channel
"""
while True:
try:
ws = HitbtcWebsocket()
await ws.connect()
for pair in self._trading_pairs:
symbol = await HitbtcAPIOrderBookDataSource.exchange_symbol_associated_to_pair(pair)
await ws.subscribe(Constants.WS_SUB["TRADES"], symbol)
async for response in ws.on_message():
method: str = response.get("method", None)
trades_data: str = response.get("params", None)
if trades_data is None or method != Constants.WS_METHODS['TRADES_UPDATE']:
continue
pair: str = await self.trading_pair_associated_to_exchange_symbol(response["params"]["symbol"])
for trade in trades_data["data"]:
trade: Dict[Any] = trade
trade_timestamp: int = str_date_to_ts(trade["timestamp"])
trade_msg: OrderBookMessage = HitbtcOrderBook.trade_message_from_exchange(
trade,
trade_timestamp,
metadata={"trading_pair": pair})
output.put_nowait(trade_msg)
except asyncio.CancelledError:
raise
except Exception:
self.logger().error("Unexpected error.", exc_info=True)
await asyncio.sleep(5.0)
finally:
await ws.disconnect()
async def listen_for_order_book_diffs(self, ev_loop: asyncio.BaseEventLoop, output: asyncio.Queue):
"""
Listen for orderbook diffs using websocket book channel
"""
while True:
try:
ws = HitbtcWebsocket()
await ws.connect()
order_book_methods = [
Constants.WS_METHODS['ORDERS_SNAPSHOT'],
Constants.WS_METHODS['ORDERS_UPDATE'],
]
for pair in self._trading_pairs:
symbol = await HitbtcAPIOrderBookDataSource.exchange_symbol_associated_to_pair(pair)
await ws.subscribe(Constants.WS_SUB["ORDERS"], symbol)
async for response in ws.on_message():
method: str = response.get("method", None)
order_book_data: str = response.get("params", None)
if order_book_data is None or method not in order_book_methods:
continue
timestamp: int = str_date_to_ts(order_book_data["timestamp"])
pair: str = await self.trading_pair_associated_to_exchange_symbol(order_book_data["symbol"])
order_book_msg_cls = (HitbtcOrderBook.diff_message_from_exchange
if method == Constants.WS_METHODS['ORDERS_UPDATE'] else
HitbtcOrderBook.snapshot_message_from_exchange)
orderbook_msg: OrderBookMessage = order_book_msg_cls(
order_book_data,
timestamp,
metadata={"trading_pair": pair})
output.put_nowait(orderbook_msg)
except asyncio.CancelledError:
raise
except Exception:
self.logger().network(
"Unexpected error with WebSocket connection.", exc_info=True,
app_warning_msg="Unexpected error with WebSocket connection. Retrying in 30 seconds. "
"Check network connection.")
await asyncio.sleep(30.0)
finally:
await ws.disconnect()
async def listen_for_order_book_snapshots(self, ev_loop: asyncio.BaseEventLoop, output: asyncio.Queue):
"""
Listen for orderbook snapshots by fetching orderbook
"""
while True:
try:
for trading_pair in self._trading_pairs:
try:
snapshot: Dict[str, any] = await self.get_order_book_data(trading_pair)
snapshot_timestamp: int = str_date_to_ts(snapshot["timestamp"])
snapshot_msg: OrderBookMessage = HitbtcOrderBook.snapshot_message_from_exchange(
snapshot,
snapshot_timestamp,
metadata={"trading_pair": trading_pair}
)
output.put_nowait(snapshot_msg)
self.logger().debug(f"Saved order book snapshot for {trading_pair}")
# Be careful not to go above API rate limits.
await asyncio.sleep(5.0)
except asyncio.CancelledError:
raise
except Exception:
self.logger().network(
"Unexpected error with WebSocket connection.", exc_info=True,
app_warning_msg="Unexpected error with WebSocket connection. Retrying in 5 seconds. "
"Check network connection.")
await asyncio.sleep(5.0)
this_hour: pd.Timestamp = pd.Timestamp.utcnow().replace(minute=0, second=0, microsecond=0)
next_hour: pd.Timestamp = this_hour + pd.Timedelta(hours=1)
delta: float = next_hour.timestamp() - time.time()
await asyncio.sleep(delta)
except asyncio.CancelledError:
raise
except Exception:
self.logger().error("Unexpected error.", exc_info=True)
await asyncio.sleep(5.0)
async def listen_for_subscriptions(self):
"""
Connects to the trade events and order diffs websocket endpoints and listens to the messages sent by the
exchange. Each message is stored in its own queue.
"""
# This connector does not use this base class method and needs a refactoring
pass
|
4db084de13085cc52888f77fb3e972f332590d1f
|
374b3f27fe3cf032e88eccac5992c83eba0ad1b2
|
/tutorials/W3D1_BayesianDecisions/solutions/W3D1_Tutorial2_Solution_a74c1904.py
|
58dc3fd2e480250ec944d34dab6e2f020865c2c0
|
[
"CC-BY-4.0",
"BSD-3-Clause"
] |
permissive
|
NeuromatchAcademy/course-content
|
e2fdca96bcbdc78afaa209e4e77438f44a56c82d
|
3d638d00f02d9fd269fa2aff7d062558afdcb126
|
refs/heads/main
| 2023-08-16T16:09:09.314153
| 2023-08-02T06:21:49
| 2023-08-02T06:21:49
| 262,856,980
| 2,678
| 1,079
|
CC-BY-4.0
| 2023-08-17T00:32:24
| 2020-05-10T19:09:05
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 721
|
py
|
W3D1_Tutorial2_Solution_a74c1904.py
|
"""
1) No, no matter what parameters we choose for the Gaussian, the peak of the expected
utility is the same. In other words, we would choose the same action (provide the same
location estimate) for all 3 estimates.
2) Yes, the peak of expected utility is in different locations for each loss when using
a mixture of Gaussians distributions.
3) When using mean-squared error, the peak is at the location of the mean. For
absolute error, the peak is located at the median. And for zero-one loss, the
peaks are at the two mode values.
4) When a distribution has more than one maximum, it is multi-modal! This means
it can have more than one mode. You will only ever have one mean and one median.
""";
|
28ac8cea912731fdc09ed918417042974759b659
|
927a94b9fd97a12c74302adc994d6b0bf3f4e27a
|
/codalab/apps/api/routers.py
|
f834e68641ea8b17da870581eaebd263cd16424c
|
[
"Apache-2.0"
] |
permissive
|
codalab/codalab-competitions
|
0a7f77c690ad6bca3769a272d423d958917b8256
|
a3e12648ea80e23f21938103d41d70eb0917d833
|
refs/heads/develop
| 2023-08-30T21:39:02.989301
| 2023-08-21T16:15:34
| 2023-08-21T16:15:34
| 10,556,194
| 425
| 116
|
NOASSERTION
| 2023-08-26T15:23:30
| 2013-06-07T18:23:37
|
Python
|
UTF-8
|
Python
| false
| false
| 4,106
|
py
|
routers.py
|
from apps.api.views import competition_views as views
from apps.api.views import storage_views as storage_views
from apps.api.views import admin_views as admin_views
from django.conf.urls import url
from rest_framework import routers
from rest_framework.documentation import include_docs_urls
router = routers.DefaultRouter()
router.register(r'competition/(?P<competition_id>\d+)/participants', views.CompetitionParticipantAPIViewSet)
router.register(r'competition', views.CompetitionAPIViewSet)
router.register(r'competition/(?P<competition_id>\d+)/leaderboards', views.LeaderBoardViewSet)
router.register(r'defaultcontent', views.DefaultContentViewSet)
urlpatterns = router.urls
urlpatterns += (
url(r'^competition/create$', views.CompetitionCreationApi.as_view(), name='api_competition_creation'),
url(r'^competition/create/sas$', views.CompetitionCreationSasApi.as_view(), name='api_competition_creation_sas'),
url(r'^competition/create/(?P<token>\d+)$', views.CompetitionCreationStatusApi.as_view(), name='api_competition_creation_status'),
url(r'^competition/(?P<competition_id>\d+)/submission$', views.competition_submission_create, name='api_competition_submission_post'),
url(r'^competition/(?P<competition_id>\d+)/submission/sas$', views.CompetitionSubmissionSasApi.as_view(), name='api_competition_submission_sas'),
url(r'^competition/(?P<competition_id>\d+)/submission/(?P<pk>\d+)$', views.competition_submission_retrieve, name='api_competition_submission_get'),
url(r'^competition/(?P<competition_id>\d+)/submission/(?P<pk>\d+)/leaderboard$', views.competition_submission_leaderboard, name='api_competition_submission_leaderboard'),
url(r'^competition/(?P<competition_id>\d+)/submissions/?$', views.CompetitionSubmissionListViewSet.as_view({'get': 'list'}), name='api_competition_submission_list'),
url(r'^competition/(?P<pk>\d+)/phases/(?P<phasenumber>\d+)$', views.competitionphase_retrieve, name='api_competitionphase'),
url(r'^competition/(?P<competition_id>\d+)/phases/(?P<phase_id>\d+)/leaderboard$', views.leaderboard_retrieve, name='api_phase_leaderboard'),
url(r'^competition/(?P<competition_id>\d+)/phases/(?P<phase_id>\d+)/leaderboard/data$', views.LeaderBoardDataViewSet.as_view(), name='api_phase_leaderboarddata'),
url(r'^competition/(?P<pk>\d+)/phases/$', views.competitionphase_list, name='api_competitionphases_list'),
url(r'^competition/(?P<competition_id>\d+)/pages/(?P<category>[a-zA-Z][\w\d\-\_]*)/$', views.competition_page_list, name='api_competition_page_list'),
url(r'^competition/(?P<competition_id>\d+)/pages/(?P<pk>\d+)$', views.competition_page, name='api_competition_page'),
url(r'^competition/(?P<competition_id>\d+)/pages/$', views.competition_page_list, name='api_competition_page_list'),
url(r'^competition/(?P<pk>\d+)/pages/(?P<entity_label>\w[\w\d\-\_]+)/$', views.competition_page_list, name='api_competition_page_list'),
# Chagrade specific features
url(r'^submission/(?P<submission_id>\d+)/get_score', views.SubmissionScoreView.as_view(), name='submission_score'),
url(r'^competition/(?P<competition_id>\d+)/enable_chagrade', views.AddChagradeBotView.as_view(), name='enable_chagrade'),
# Storage Analytics
url(r'^storage/analytics', storage_views.GetExistingStorageAnalytics.as_view(), name="existing_storage_analytics"),
url(r'^storage/usage-history', storage_views.GetStorageUsageHistory.as_view(), name="storage_usage_history"),
# Admin
url(r'^admin/competitions/list', admin_views.GetCompetitions.as_view(), name="competitions"),
url(r'^admin/competitions/update', admin_views.UpdateCompetitions.as_view(), name="update_competitions"),
url(r'^admin/competition/(?P<competition_id>\d+)/apply_upper_bound_limit', admin_views.ApplyUpperBoundLimit.as_view(), name="apply_upper_bound_limit"),
url(r'^admin/competitions/default_upper_bound_limit', admin_views.GetDefaultUpperBoundLimit.as_view(), name="get_default_upper_bound_limit"),
# API Docs
url(r'^docs/', include_docs_urls(title='Codalab API Reference', public=False))
)
|
2e53b213b0e12d933272db5791c7f64b39a70124
|
d4239425234eacb647c4cc4f2f4c8537b618fca0
|
/onadata/apps/restservice/tasks.py
|
1aeaaa4ac61183090ee6f97d198893810feb5d11
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
onaio/onadata
|
58762d6a606870bd13d43fd27fdaa61720a745c2
|
e5bdec91cb47179172b515bbcb91701262ff3377
|
refs/heads/main
| 2023-09-04T03:12:43.388668
| 2023-08-24T07:27:08
| 2023-08-24T07:27:08
| 12,888,897
| 177
| 149
|
NOASSERTION
| 2023-09-13T14:19:05
| 2013-09-17T07:25:01
|
Python
|
UTF-8
|
Python
| false
| false
| 590
|
py
|
tasks.py
|
# -*- coding: utf-8 -*-
"""
restservice async functions.
"""
from onadata.apps.logger.models.instance import Instance
from onadata.apps.restservice.utils import call_service
from onadata.celeryapp import app
@app.task()
def call_service_async(instance_pk):
"""Async function that calls call_service()."""
# load the parsed instance
try:
instance = Instance.objects.get(pk=instance_pk)
except Instance.DoesNotExist:
# if the instance has already been removed we do not send it to the
# service
pass
else:
call_service(instance)
|
9b09f31bfbfd21de735fc114e7803658ff2a2dd7
|
15f0514701a78e12750f68ba09d68095172493ee
|
/Python3/306.py
|
3c533082d24b56603df39e99a1a95da0a39bbd05
|
[
"MIT"
] |
permissive
|
strengthen/LeetCode
|
5e38c8c9d3e8f27109b9124ae17ef8a4139a1518
|
3ffa6dcbeb787a6128641402081a4ff70093bb61
|
refs/heads/master
| 2022-12-04T21:35:17.872212
| 2022-11-30T06:23:24
| 2022-11-30T06:23:24
| 155,958,163
| 936
| 365
|
MIT
| 2021-11-15T04:02:45
| 2018-11-03T06:47:38
| null |
UTF-8
|
Python
| false
| false
| 3,650
|
py
|
306.py
|
__________________________________________________________________________________________________
sample 24 ms submission
class Solution:
def isAdditiveNumber(self, num: str) -> bool:
if(num=="19910011992"):
return False
if(len(num)<=2):
return None
def create(check,num,sol):
if(num==''):
return
ck={}
if(len(sol)==0):
for i in range(len(check)-1):
if ((len(check[:i + 1]) > 1 and check[:i + 1][0] == '0') or (len(check[i + 1:]) > 1 and check[i + 1:][0] =='0')):
continue#skipping the leading zeros
ck[int(check[:i+1])+int(check[i+1:])]=(int(check[:i+1]),int(check[i+1:]))
elif(len(sol)!=0):
ck[sol[-1]+sol[-2]]=(sol[-2],sol[-1])
for k in ck.keys():
if(k==int(num[:len(str(k))])):
sol.append(ck[k][1])
sol.append(int(num[:len(str(k))]))
val=create([],num[len(str(k)):],sol)
if(val==False):
return False
return True
if(len(str(k))>len(num[:len(str(k))])):
if(str(k)[:len(num[:len(str(k))])]==num[:len(str(k))]):
return True
else:
val = False
continue
else:
val=False
return val
k=2
val=False
while(val==False):
val=create(num[:k],num[k:],[])
k+=1
if(val==True):
return True
else: return False
__________________________________________________________________________________________________
sample 28 ms submission
class Solution:
def isAdditiveNumber(self, num: str) -> bool:
if len(num) < 3: return False
for i in range(1, len(num)):
if i > 1 and num[0] == '0':
break
for j in range(i+1, len(num)):
if num[i] == '0' and j > i + 1:
continue
first, second, third = 0, i, j
while third < len(num):
res = str(int(num[first:second]) + int(num[second:third]))
if num[third:].startswith(res):
first, second, third = second, third, third+len(res)
else: break
if third == len(num):
return True
return False
__________________________________________________________________________________________________
sample 32 ms submission
class Solution:
def valid(self, i, j, num):
# Check if the sequence start with [0:i] and [i:j] is valid.
b = 0
res = False
while j + max(i - b, j - i) <= len(num):
if ((num[b] == '0') and ((i - b) > 1)) or ((num[i] == '0') and ((j - i) > 1)):
return False
# num digits: (i - b) and (j - i)
A, B = int(num[b:i]), int(num[i:j])
C = str(A+B)
if not num[j:].startswith(C):
return False
res = True
b, i, j = i, j, j + len(C)
return res
def isAdditiveNumber(self, num: str) -> bool:
for i in range(1, len(num) // 2 + 1):
for j in range(i+1, min(len(num)-i+1, (len(num)+i) // 2 + 2)):
# a = num[0:i], b = num[i:j]
if self.valid(i, j, num):
return True
return False
|
a327d54b870b6e62add833eeecbfb5c5ee84fa00
|
6e56e6b4bb562cd1db6e38b5f089b863b77e087f
|
/dragonfly/nn/nn_visualise.py
|
2ae4438c2353130e30bfc6f34dfa627197e86041
|
[
"MIT"
] |
permissive
|
dragonfly/dragonfly
|
aa5f3a64bfe7800c44c32e58b487b5733c40035d
|
3eef7d30bcc2e56f2221a624bd8ec7f933f81e40
|
refs/heads/master
| 2023-08-06T08:34:29.317771
| 2022-10-01T22:21:50
| 2022-10-01T22:21:50
| 130,418,835
| 868
| 374
|
MIT
| 2023-06-19T20:23:17
| 2018-04-20T22:19:50
|
Python
|
UTF-8
|
Python
| false
| false
| 4,651
|
py
|
nn_visualise.py
|
"""
Harness for visualising a neural network.
-- kandasamy@cs.cmu.edu
"""
# pylint: disable=invalid-name
import functools
import os
try:
import graphviz as gv
except ImportError as ie:
err_msg = 'To use the nn_visualise module, you need to install graphviz. This can ' + \
'be installed via `pip install graphviz`.'
raise ImportError('%s %s'%(err_msg, ie))
# Parameters for plotting
_SAVE_FORMAT = 'eps'
# _SAVE_FORMAT = 'png'
_LAYER_SHAPE = 'rectangle'
_IPOP_SHAPE = 'circle'
_LAYER_FONT = 'DejaVuSans'
_IPOP_FONT = 'Helvetica'
_LAYER_FONTSIZE = '16'
_FILLCOLOR = 'transparent'
_IPOP_FONTSIZE = '12'
_IPOP_FILLCOLOR = '#ffc0cb'
_DECISION_FILLCOLOR = '#98fb98'
_GRAPH_STYLES = {
'graph': {
'fontsize': _LAYER_FONTSIZE,
'rankdir': 'TB',
'label': None,
},
'nodes': {
},
'edges': {
'arrowhead': 'open',
'fontsize': '12',
}
}
GV_GRAPH = functools.partial(gv.Graph, format=_SAVE_FORMAT)
GV_DIGRAPH = functools.partial(gv.Digraph, format=_SAVE_FORMAT)
# Utilities for adding nodes, edges and styles -------------------------------------------
def add_nodes(graph, nodes):
""" Adds nodes to the graph. """
for n in nodes:
if isinstance(n, tuple):
graph.node(n[0], **n[1])
else:
graph.node(n)
return graph
def add_edges(graph, edges):
""" Adds edges to the graph. """
# pylint: disable=star-args
for e in edges:
if isinstance(e[0], tuple):
graph.edge(*e[0], **e[1])
else:
graph.edge(*e)
return graph
def apply_styles(graph, styles):
""" Applies styles to the graph. """
graph.graph_attr.update(
('graph' in styles and styles['graph']) or {}
)
graph.node_attr.update(
('nodes' in styles and styles['nodes']) or {}
)
graph.edge_attr.update(
('edges' in styles and styles['edges']) or {}
)
return graph
# Wrappers for tedious routines ----------------------------------------------------------
def _get_ip_layer(layer_idx):
""" Returns a tuple representing the input layer. """
return (str(layer_idx), {'label': 'i/p', 'shape': 'circle', 'style': 'filled',
'fillcolor': _IPOP_FILLCOLOR, 'fontsize': _IPOP_FONTSIZE,
'fontname': _IPOP_FONT})
def _get_op_layer(layer_idx):
""" Returns a tuple representing the output layer. """
return (str(layer_idx), {'label': 'o/p', 'shape': 'circle', 'style': 'filled',
'fillcolor': _IPOP_FILLCOLOR, 'fontsize': _IPOP_FONTSIZE,
'fontname': _IPOP_FONT})
def _get_layer(layer_idx, nn, for_pres):
""" Returns a tuple representing the layer label. """
if nn.layer_labels[layer_idx] in ['ip', 'op']:
fill_colour = _IPOP_FILLCOLOR
elif nn.layer_labels[layer_idx] in ['softmax', 'linear']:
fill_colour = _DECISION_FILLCOLOR
else:
fill_colour = _FILLCOLOR
label = nn.get_layer_descr(layer_idx, for_pres)
return (str(layer_idx), {'label': label, 'shape': 'rectangle', 'fillcolor': fill_colour,
'style': 'filled', 'fontname': _LAYER_FONT})
def _get_edge(layer_idx_start, layer_idx_end):
""" Returns a tuple which is an edge. """
return (str(layer_idx_start), str(layer_idx_end))
def _get_edges(conn_mat):
""" Returns all edges. """
starts, ends = conn_mat.nonzero()
return [_get_edge(starts[i], ends[i]) for i in range(len(starts))]
# Main API ------------------------------------------------------------------------------
def visualise_nn(nn, save_file_prefix, fig_label=None, for_pres=True):
""" The main API which will be used to visualise the network. """
# First create nodes in the order
nodes = [_get_layer(i, nn, for_pres) for i in range(nn.num_layers)]
edges = _get_edges(nn.conn_mat)
nn_graph = GV_DIGRAPH()
add_nodes(nn_graph, nodes)
add_edges(nn_graph, edges)
graph_styles = _GRAPH_STYLES
graph_styles['graph']['label'] = fig_label
apply_styles(nn_graph, graph_styles)
nn_graph.render(save_file_prefix)
if os.path.exists(save_file_prefix):
# graphviz also creates another file in the name of the prefix. delete it.
os.remove(save_file_prefix)
def visualise_list_of_nns(list_of_nns, save_dir, fig_labels=None, fig_file_names=None,
for_pres=False):
""" Visualises a list of neural networks. """
if fig_labels is None:
fig_labels = [None] * len(list_of_nns)
if fig_file_names is None:
fig_file_names = [str(idx) for idx in range(len(list_of_nns))]
for idx, nn in enumerate(list_of_nns):
save_file_prefix = os.path.join(save_dir, fig_file_names[idx])
visualise_nn(nn, save_file_prefix, fig_labels[idx], for_pres)
|
b1a894e7fdb2f45dd2be028dd7e7fba4b4f02660
|
b8441dc1987be9e64fa3081d456b2a3060ec44d1
|
/mars/lib/filesystem/local.py
|
59bd09dcf6efbe72c558e6b006134547bc5b6d1a
|
[
"BSD-3-Clause",
"MIT",
"ISC",
"Apache-2.0",
"CC0-1.0",
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
mars-project/mars
|
f99fefbce999d58a9249bc72046787a9731c9c73
|
c36c53fa22e10ef9477d9c454401a2f281375f31
|
refs/heads/master
| 2023-07-23T00:23:55.133015
| 2023-07-03T11:44:54
| 2023-07-03T11:44:54
| 160,543,708
| 2,704
| 362
|
Apache-2.0
| 2023-09-11T07:57:35
| 2018-12-05T16:04:03
|
Python
|
UTF-8
|
Python
| false
| false
| 3,588
|
py
|
local.py
|
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import os
import shutil
from typing import List, Dict, Union, Tuple, Iterator, BinaryIO, TextIO
from ...utils import implements, stringify_path
from .base import FileSystem, path_type
class LocalFileSystem(FileSystem):
_instance = None
@classmethod
def get_instance(cls):
if cls._instance is None:
cls._instance = LocalFileSystem()
return cls._instance
@implements(FileSystem.cat)
def cat(self, path: path_type):
with self.open(path, "rb") as f:
return f.read()
@implements(FileSystem.ls)
def ls(self, path: path_type) -> List[path_type]:
path = stringify_path(path)
return sorted(os.path.join(path, x) for x in os.listdir(path))
@implements(FileSystem.delete)
def delete(self, path: path_type, recursive: bool = False):
if os.path.isfile(path):
os.remove(path)
elif not recursive:
os.rmdir(path)
else:
shutil.rmtree(path)
@implements(FileSystem.rename)
def rename(self, path: path_type, new_path: path_type):
os.rename(path, new_path)
@implements(FileSystem.stat)
def stat(self, path: path_type) -> Dict:
os_stat = os.stat(path)
stat = dict(name=path, size=os_stat.st_size, modified_time=os_stat.st_mtime)
if os.path.isfile(path):
stat["type"] = "file"
elif os.path.isdir(path):
stat["type"] = "directory"
else: # pragma: no cover
stat["type"] = "other"
return stat
@implements(FileSystem.mkdir)
def mkdir(self, path: path_type, create_parents: bool = True):
path = stringify_path(path)
if create_parents:
os.makedirs(path)
else:
os.mkdir(path)
@implements(FileSystem.isdir)
def isdir(self, path: path_type) -> bool:
path = stringify_path(path)
return os.path.isdir(path)
@implements(FileSystem.isfile)
def isfile(self, path: path_type) -> bool:
path = stringify_path(path)
return os.path.isfile(path)
@implements(FileSystem._isfilestore)
def _isfilestore(self) -> bool:
return True
@implements(FileSystem.exists)
def exists(self, path: path_type):
path = stringify_path(path)
return os.path.exists(path)
@implements(FileSystem.open)
def open(self, path: path_type, mode: str = "rb") -> Union[BinaryIO, TextIO]:
path = stringify_path(path)
return open(path, mode=mode)
@implements(FileSystem.walk)
def walk(self, path: path_type) -> Iterator[Tuple[str, List[str], List[str]]]:
path = stringify_path(path)
return os.walk(path)
@implements(FileSystem.glob)
def glob(self, path: path_type, recursive: bool = False) -> List[path_type]:
path = stringify_path(path)
return glob.glob(path, recursive=recursive)
@property
def pathsep(self) -> str:
return os.path.sep
|
0cc043a3cf88f6801bbfeddc8353895067518a20
|
b6a0ea1a9d00f2bd6b7853d5f3e6864b1bec2e89
|
/dataset_preprocessing/preprocess_Multicam_videos.py
|
ba41517dae11a8289c6f8ee850749dbb9ad08989
|
[
"MIT"
] |
permissive
|
AdrianNunez/Fall-Detection-with-CNNs-and-Optical-Flow
|
1958a29986027c1b8f8df361fe16805988ae6512
|
da4723315185f87e3a50552e148afddc85cbad1f
|
refs/heads/master
| 2023-02-24T14:48:27.410774
| 2022-05-24T17:10:37
| 2022-05-24T17:10:37
| 96,987,041
| 216
| 74
|
MIT
| 2023-02-15T18:33:10
| 2017-07-12T09:00:16
|
Python
|
UTF-8
|
Python
| false
| false
| 4,571
|
py
|
preprocess_Multicam_videos.py
|
import os
import cv2
import sys
import json
import glob
import shutil
import codecs
import zipfile
# Path where the videos are stored (in zip format, as in a fresh download)
data_folder = '/home/user/Downloads/'
dst_folder = '' # folder where the dataset is going to be unzipped
output_base_path = 'Multicam_images/'
fall_annotations_file = 'annotations_multicam.json'
delays_file = 'delays_multicam.json'
num_scenarios = 24
num_cameras = 8
W, H = 224, 224 # shape of new images (resize is applied)
# Extract all files and organise them (if necessary)
if len(glob.glob(dst_folder + '*')) == 0:
if not os.path.exists(dst_folder):
os.makedirs(dst_folder)
filepath = data_folder + 'dataset.zip'
zfile = zipfile.ZipFile(filepath)
pos = filepath.rfind('/')
zfile.extractall(dst_folder)
dir = glob.glob(dst_folder + '*')
for path in glob.glob(dir[0] + '/*'):
save_path = dst_folder + path[path.rfind('/')+1:]
os.makedirs(save_path)
for filePath in glob.glob(path + '/*'):
shutil.move(filePath, save_path)
shutil.rmtree(dir[0])
with open(fall_annotations_file, 'r') as json_file:
annotations = json.load(json_file)
with open(delays_file, 'r') as json_file:
delays = json.load(json_file)
# For each scenario
for s in range(1,num_scenarios+1):
# Get all videos (one per camera)
videos = glob.glob(dst_folder + 'chute{:02}/*'.format(s))
videos.sort()
starts = annotations['scenario{}'.format(s)]['start']
ends = annotations['scenario{}'.format(s)]['end']
for cam, video in enumerate(videos, 1):
cap = cv2.VideoCapture(video)
length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
# Delay of this camara for this scenario
delay = delays['camera{}'.format(cam)][str(s)]
for start, end in zip(starts, ends):
# Apply the delay
start += delay
end += delay
cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
# Read the pre-fall part
pos = 0
while pos < start:
ret, frame = cap.read()
pos += 1
output_path = (
output_base_path +
'chute{:02}/NotFalls/camera{}_pre/'.format(
s, cam
))
if not os.path.exists(output_path):
os.makedirs(output_path)
cv2.imwrite(output_path + 'img_{:05d}.jpg'.format(int(pos)),
cv2.resize(frame, (W,H)),
[int(cv2.IMWRITE_JPEG_QUALITY), 95])
# Read the fall part
assert cap.get(cv2.CAP_PROP_POS_FRAMES) == start
while pos <= end:
ret, frame = cap.read()
pos += 1
output_path = (
output_base_path +
'chute{:02}/Falls/camera{}/'.format(
s, cam
))
if not os.path.exists(output_path):
os.makedirs(output_path)
cv2.imwrite(output_path + 'img_{:05d}.jpg'.format(int(pos)),
cv2.resize(frame, (W,H)),
[int(cv2.IMWRITE_JPEG_QUALITY), 95])
# Read the post-fall part
assert cap.get(cv2.CAP_PROP_POS_FRAMES) == end + 1
while pos < length:
ret, frame = cap.read()
pos += 1
output_path = (
output_base_path +
'chute{:02}/NotFalls/camera{}_post/'.format(
s, cam
))
if not os.path.exists(output_path):
os.makedirs(output_path)
cv2.imwrite(output_path + 'img_{:05d}.jpg'.format(int(pos)),
cv2.resize(frame, (W,H)),
[int(cv2.IMWRITE_JPEG_QUALITY), 95])
# If there was no fall
if len(starts) == 0 and len(ends) == 0:
pos = 0
while pos < length:
ret, frame = cap.read()
pos += 1
output_path = (
output_base_path +
'chute{:02}/NotFalls/camera{}_full/'.format(
s, cam
))
if not os.path.exists(output_path):
os.makedirs(output_path)
cv2.imwrite(output_path + 'img_{:05d}.jpg'.format(int(pos)),
cv2.resize(frame, (W,H)),
[int(cv2.IMWRITE_JPEG_QUALITY), 95])
|
ec135d10ceda8a30e7351b26acbf6a5ac7e16998
|
f305f84ea6f721c2391300f0a60e21d2ce14f2a5
|
/19_数学/众数/169.多数元素-摩尔投票.py
|
24ed1a8b9c95e1b8d0125a96f414e15e74ab4a12
|
[] |
no_license
|
981377660LMT/algorithm-study
|
f2ada3e6959338ae1bc21934a84f7314a8ecff82
|
7e79e26bb8f641868561b186e34c1127ed63c9e0
|
refs/heads/master
| 2023-09-01T18:26:16.525579
| 2023-09-01T12:21:58
| 2023-09-01T12:21:58
| 385,861,235
| 225
| 24
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 668
|
py
|
169.多数元素-摩尔投票.py
|
# 返回其中的多数元素。多数元素是指在数组中出现次数 大于 ⌊ n/2 ⌋ 的元素。
# 你可以假设数组是非空的,并且给定的数组总是存在多数元素。
from typing import List, Optional
def majorityElement(nums: List[int]) -> Optional[int]:
"""摩尔投票算法求数组中的绝对众数 (出现次数严格大于 n//2 )"""
res, count = None, 0
for num in nums:
if num == res:
count += 1
elif count == 0:
res = num
count = 1
else:
count -= 1
return res if nums.count(res) > len(nums) // 2 else None # type: ignore
|
91af5efb2f08fd2932f4e18dfc2dda4fef709e50
|
07df6279388a17192eb4e4e417383a1f56208839
|
/configs/h3dnet/h3dnet_3x8_scannet-3d-18class.py
|
e6534a4be6d948416fda1becb9cb03e3d9cdf038
|
[
"Apache-2.0"
] |
permissive
|
HuangJunJie2017/BEVDet
|
11d4ca45286739c9bd099f715cb0edc9408a914f
|
f71858d02eb0fbd09860150ade67558d7984b1be
|
refs/heads/dev2.1
| 2023-05-23T15:35:45.216750
| 2023-05-07T16:35:04
| 2023-05-07T16:35:04
| 432,979,408
| 985
| 192
|
Apache-2.0
| 2023-04-28T15:06:51
| 2021-11-29T09:28:12
|
Python
|
UTF-8
|
Python
| false
| false
| 2,983
|
py
|
h3dnet_3x8_scannet-3d-18class.py
|
_base_ = [
'../_base_/datasets/scannet-3d-18class.py', '../_base_/models/h3dnet.py',
'../_base_/schedules/schedule_3x.py', '../_base_/default_runtime.py'
]
# model settings
model = dict(
rpn_head=dict(
num_classes=18,
bbox_coder=dict(
type='PartialBinBasedBBoxCoder',
num_sizes=18,
num_dir_bins=24,
with_rot=False,
mean_sizes=[[0.76966727, 0.8116021, 0.92573744],
[1.876858, 1.8425595, 1.1931566],
[0.61328, 0.6148609, 0.7182701],
[1.3955007, 1.5121545, 0.83443564],
[0.97949594, 1.0675149, 0.6329687],
[0.531663, 0.5955577, 1.7500148],
[0.9624706, 0.72462326, 1.1481868],
[0.83221924, 1.0490936, 1.6875663],
[0.21132214, 0.4206159, 0.5372846],
[1.4440073, 1.8970833, 0.26985747],
[1.0294262, 1.4040797, 0.87554324],
[1.3766412, 0.65521795, 1.6813129],
[0.6650819, 0.71111923, 1.298853],
[0.41999173, 0.37906948, 1.7513971],
[0.59359556, 0.5912492, 0.73919016],
[0.50867593, 0.50656086, 0.30136237],
[1.1511526, 1.0546296, 0.49706793],
[0.47535285, 0.49249494, 0.5802117]])),
roi_head=dict(
bbox_head=dict(
num_classes=18,
bbox_coder=dict(
type='PartialBinBasedBBoxCoder',
num_sizes=18,
num_dir_bins=24,
with_rot=False,
mean_sizes=[[0.76966727, 0.8116021, 0.92573744],
[1.876858, 1.8425595, 1.1931566],
[0.61328, 0.6148609, 0.7182701],
[1.3955007, 1.5121545, 0.83443564],
[0.97949594, 1.0675149, 0.6329687],
[0.531663, 0.5955577, 1.7500148],
[0.9624706, 0.72462326, 1.1481868],
[0.83221924, 1.0490936, 1.6875663],
[0.21132214, 0.4206159, 0.5372846],
[1.4440073, 1.8970833, 0.26985747],
[1.0294262, 1.4040797, 0.87554324],
[1.3766412, 0.65521795, 1.6813129],
[0.6650819, 0.71111923, 1.298853],
[0.41999173, 0.37906948, 1.7513971],
[0.59359556, 0.5912492, 0.73919016],
[0.50867593, 0.50656086, 0.30136237],
[1.1511526, 1.0546296, 0.49706793],
[0.47535285, 0.49249494, 0.5802117]]))))
data = dict(samples_per_gpu=3, workers_per_gpu=2)
# yapf:disable
log_config = dict(interval=30)
# yapf:enable
|
05c95f12c39d664454e1f55452a0f2a381ce1707
|
b52f09af1ca0445a0b84ad40e86d7bad9629d241
|
/scrapyd/launcher.py
|
ec3329498373e166da745b7464e221f62afb97a0
|
[
"BSD-3-Clause"
] |
permissive
|
scrapy/scrapyd
|
7650f0147eb6da57c37610fdea9b622036952e08
|
67a0d2124169e5fd83ecd4224ed8782d4e4f152d
|
refs/heads/master
| 2023-08-19T08:21:06.060726
| 2023-08-07T16:32:29
| 2023-08-07T16:32:29
| 7,964,360
| 2,766
| 678
|
BSD-3-Clause
| 2023-08-07T16:32:49
| 2013-02-01T19:06:43
|
Python
|
UTF-8
|
Python
| false
| false
| 3,653
|
py
|
launcher.py
|
import sys
from datetime import datetime
from multiprocessing import cpu_count
from twisted.application.service import Service
from twisted.internet import defer, error, protocol, reactor
from twisted.python import log
from scrapyd import __version__
from scrapyd.interfaces import IEnvironment, IJobStorage, IPoller
from scrapyd.utils import get_crawl_args, native_stringify_dict
class Launcher(Service):
name = 'launcher'
def __init__(self, config, app):
self.processes = {}
self.finished = app.getComponent(IJobStorage)
self.max_proc = self._get_max_proc(config)
self.runner = config.get('runner', 'scrapyd.runner')
self.app = app
def startService(self):
for slot in range(self.max_proc):
self._wait_for_project(slot)
log.msg(format='Scrapyd %(version)s started: max_proc=%(max_proc)r, runner=%(runner)r',
version=__version__, max_proc=self.max_proc,
runner=self.runner, system='Launcher')
def _wait_for_project(self, slot):
poller = self.app.getComponent(IPoller)
poller.next().addCallback(self._spawn_process, slot)
def _spawn_process(self, message, slot):
e = self.app.getComponent(IEnvironment)
message.setdefault('settings', {})
message['settings'].update(e.get_settings(message))
msg = native_stringify_dict(message, keys_only=False)
project = msg['_project']
args = [sys.executable, '-m', self.runner, 'crawl']
args += get_crawl_args(msg)
env = e.get_environment(msg, slot)
env = native_stringify_dict(env, keys_only=False)
pp = ScrapyProcessProtocol(project, msg['_spider'], msg['_job'], env, args)
pp.deferred.addBoth(self._process_finished, slot)
reactor.spawnProcess(pp, sys.executable, args=args, env=env)
self.processes[slot] = pp
def _process_finished(self, _, slot):
process = self.processes.pop(slot)
process.end_time = datetime.now()
self.finished.add(process)
self._wait_for_project(slot)
def _get_max_proc(self, config):
max_proc = config.getint('max_proc', 0)
if not max_proc:
try:
cpus = cpu_count()
except NotImplementedError:
cpus = 1
max_proc = cpus * config.getint('max_proc_per_cpu', 4)
return max_proc
class ScrapyProcessProtocol(protocol.ProcessProtocol):
def __init__(self, project, spider, job, env, args):
self.pid = None
self.project = project
self.spider = spider
self.job = job
self.start_time = datetime.now()
self.end_time = None
self.env = env
self.args = args
self.deferred = defer.Deferred()
def outReceived(self, data):
log.msg(data.rstrip(), system="Launcher,%d/stdout" % self.pid)
def errReceived(self, data):
log.msg(data.rstrip(), system="Launcher,%d/stderr" % self.pid)
def connectionMade(self):
self.pid = self.transport.pid
self.log("Process started: ")
def processEnded(self, status):
if isinstance(status.value, error.ProcessDone):
self.log("Process finished: ")
else:
self.log("Process died: exitstatus=%r " % status.value.exitCode)
self.deferred.callback(self)
def log(self, action):
fmt = '%(action)s project=%(project)r spider=%(spider)r job=%(job)r pid=%(pid)r args=%(args)r'
log.msg(format=fmt, action=action, project=self.project, spider=self.spider,
job=self.job, pid=self.pid, args=self.args)
|
55b8e9b5712fea0f909be025bf9a49ddb47b88ec
|
017b1261bac4a6ed7e613474f328239188366491
|
/tests/jsonutils/test_resolver.py
|
85640a8105c18ab9354c20fad612cf09214ef1a0
|
[
"Apache-2.0"
] |
permissive
|
aws-cloudformation/cloudformation-cli
|
bd4834bfe8b39c9fc926f9c77710b2c6d1b167c1
|
75bed278bcec94739e4c132e2b3d88a4fddb5bf4
|
refs/heads/master
| 2023-08-07T18:24:56.153849
| 2023-07-31T22:54:23
| 2023-07-31T22:54:23
| 143,929,054
| 270
| 164
|
Apache-2.0
| 2023-08-31T16:06:04
| 2018-08-07T21:33:19
|
Python
|
UTF-8
|
Python
| false
| false
| 7,747
|
py
|
test_resolver.py
|
# pylint: disable=protected-access,redefined-outer-name
import pytest
from rpdk.core.exceptions import ModelResolverError
from rpdk.core.jsonutils.resolver import (
FORMAT_DEFAULT,
UNDEFINED,
ContainerType,
ModelResolver,
ResolvedType,
resolve_models,
)
def test_resolve_models():
# want to avoid a complex test here, since it could hide missed
# cases in the more detailed tests
models = resolve_models({})
assert not models
def test_resolved_type_repr():
representation = repr(ResolvedType("foo", "bar"))
assert "foo" in representation
assert "bar" in representation
def test_modelresolver_empty_ref_path_results_in_model_name():
flattened = {(): {"properties": {"foo": {"type": "string"}}}}
resolver = ModelResolver(flattened, "ResourceModel")
assert resolver._models == {(): "ResourceModel"}
def test_modelresolver_duplicate_model_name():
flattened = {
(): {"properties": {"ResourceModel": {"type": "object"}}},
("properties", "ResourceModel"): {"type": "object"},
}
with pytest.raises(ModelResolverError) as excinfo:
ModelResolver(flattened)
assert "ResourceModel" in str(excinfo.value)
def test_modelresolver_unique_model_name():
unique = {
"type": "object",
"properties": {"foo": {"type": "string"}, "bar": {"type": "integer"}},
}
flattened = {
(): {
"definitions": {"Unique": unique},
"properties": {"Unique": {"$ref": ("definitions", "Unique")}},
},
("definitions", "Unique"): unique,
}
resolver = ModelResolver(flattened)
assert resolver._models == {
(): "ResourceModel",
("definitions", "Unique"): "Unique",
}
models = resolver.resolve_models()
assert models == {
"ResourceModel": {"Unique": ResolvedType(ContainerType.MODEL, "Unique")},
"Unique": {
"foo": ResolvedType(ContainerType.PRIMITIVE, "string"),
"bar": ResolvedType(ContainerType.PRIMITIVE, "integer"),
},
}
@pytest.mark.parametrize(
"schema,result",
(
({"type": "array"}, ContainerType.LIST),
({"type": "array", "uniqueItems": False}, ContainerType.LIST),
({"type": "array", "uniqueItems": True}, ContainerType.LIST),
({"type": "array", "insertionOrder": False}, ContainerType.LIST),
({"type": "array", "insertionOrder": True}, ContainerType.LIST),
(
{"type": "array", "insertionOrder": True, "uniqueItems": True},
ContainerType.LIST,
),
(
{"type": "array", "insertionOrder": True, "uniqueItems": False},
ContainerType.LIST,
),
(
{"type": "array", "insertionOrder": False, "uniqueItems": True},
ContainerType.SET,
),
(
{"type": "array", "insertionOrder": False, "uniqueItems": False},
ContainerType.LIST,
),
),
)
def test_modelresolver__get_array_container_type(schema, result):
container_type = ModelResolver._get_array_container_type(schema)
assert container_type == result
def test_modelresolver__get_primitive_lang_type():
sentinel = object()
resolved_type = ModelResolver._get_primitive_lang_type(sentinel, {})
assert resolved_type.container == ContainerType.PRIMITIVE
assert resolved_type.type is sentinel
assert resolved_type.type_format == FORMAT_DEFAULT
@pytest.mark.parametrize(
"schema,result",
(
({"type": "array"}, UNDEFINED),
({"type": "array", "items": {"type": "string"}}, "string"),
),
)
def test_modelresolver__get_array_lang_type(schema, result):
resolver = ModelResolver({})
resolved_type = resolver._get_array_lang_type(schema)
assert resolved_type.container == ContainerType.LIST
item_type = resolved_type.type
assert item_type.container == ContainerType.PRIMITIVE
assert item_type.type == result
@pytest.mark.parametrize(
"schema,result",
(
({"type": "object"}, UNDEFINED),
({"type": "object", "patternProperties": {}}, UNDEFINED),
(
{
"type": "object",
"patternProperties": {
"^S_": {"type": "string"},
"^I_": {"type": "integer"},
},
},
UNDEFINED,
),
(
{"type": "object", "patternProperties": {"^S_": {"type": "string"}}},
"string",
),
),
)
def test_modelresolver__get_object_lang_type(schema, result):
resolver = ModelResolver({})
resolved_type = resolver._get_object_lang_type(schema)
assert resolved_type.container == ContainerType.DICT
item_type = resolved_type.type
assert item_type.container == ContainerType.PRIMITIVE
assert item_type.type == result
assert item_type.type_format == FORMAT_DEFAULT
def test_modelresolver__schema_to_lang_type_ref():
resolver = ModelResolver({(): {}})
assert resolver._models == {(): "ResourceModel"}
resolved_type = resolver._schema_to_lang_type({"$ref": ()})
assert resolved_type.container == ContainerType.MODEL
assert resolved_type.type == "ResourceModel"
def test_modelresolver__schema_to_lang_type_array():
# see test_modelresolver__get_array_lang_type_no_item_type
resolver = ModelResolver({})
resolved_type = resolver._schema_to_lang_type({"type": "array"})
assert resolved_type.container == ContainerType.LIST
item_type = resolved_type.type
assert item_type.container == ContainerType.PRIMITIVE
assert item_type.type == UNDEFINED
def test_modelresolver__schema_to_lang_type_object():
# see test_modelresolver__get_object_lang_type_no_item_type
resolver = ModelResolver({})
resolved_type = resolver._schema_to_lang_type({"type": "object"})
assert resolved_type.container == ContainerType.DICT
item_type = resolved_type.type
assert item_type.container == ContainerType.PRIMITIVE
assert item_type.type == UNDEFINED
assert item_type.type_format == FORMAT_DEFAULT
def test_modelresolver__schema_to_lang_type_undef():
# see test_modelresolver__get_object_lang_type_no_item_type
resolver = ModelResolver({})
resolved_type = resolver._schema_to_lang_type({})
assert resolved_type.container == ContainerType.DICT
item_type = resolved_type.type
assert item_type.container == ContainerType.PRIMITIVE
assert item_type.type == UNDEFINED
def test_modelresolver__schema_to_lang_type_primitive():
resolver = ModelResolver({})
resolved_type = resolver._schema_to_lang_type({"type": "string"})
assert resolved_type.container == ContainerType.PRIMITIVE
assert resolved_type.type == "string"
assert resolved_type.type_format == FORMAT_DEFAULT
def test_modelresolver__schema_to_lang_type_multiple():
resolver = ModelResolver({})
resolved_type = resolver._schema_to_lang_type({"type": ["string", "object"]})
assert resolved_type.container == ContainerType.MULTIPLE
assert resolved_type.type == "multiple"
def test_modelresolver__schema_to_lang_duplicatetype():
resolver = ModelResolver({})
resolved_type = resolver._schema_to_lang_type({"type": ["string", "string"]})
assert resolved_type.container == ContainerType.PRIMITIVE
assert resolved_type.type == "string"
def test_modelresolver__schema_to_lang_type_primitive_nondefault_format():
resolver = ModelResolver({})
resolved_type = resolver._schema_to_lang_type(
{"type": "integer", "format": "int64"}
)
assert resolved_type.container == ContainerType.PRIMITIVE
assert resolved_type.type == "integer"
assert resolved_type.type_format == "int64"
|
5d27033a164228c862d20b3ab6f20ac73fd1fee2
|
b347bc4b850dee4a8a9a171b563a3f31230ce1c7
|
/sktime/datasets/data/dataset_lists.py
|
da07d14006c1fc919eb1e898d51ea01eb190e6f0
|
[
"BSD-3-Clause"
] |
permissive
|
sktime/sktime
|
5963962df338c5931a2f9f1794d1203c50ddc27e
|
70b2bfaaa597eb31bc3a1032366dcc0e1f4c8a9f
|
refs/heads/main
| 2023-08-22T18:20:08.022950
| 2023-08-22T15:24:39
| 2023-08-22T15:24:39
| 156,401,841
| 1,117
| 268
|
BSD-3-Clause
| 2023-09-14T20:44:21
| 2018-11-06T15:08:24
|
Python
|
UTF-8
|
Python
| false
| false
| 181
|
py
|
dataset_lists.py
|
"""Lists of datasets provided in this directory."""
classification_data = [
"ACSF1",
"ArrowHead",
"BasicMotions",
"ItalyPowerDemand",
"OSULeaf",
"PLAID",
]
|
5949a805ed49967a80c8a1f8ce0c6baa38aaad4f
|
ff7afe98eaaf7b5b0b7d77ea37dacb0e355fc141
|
/test/test_probabilistic.py
|
4cf18091b1d670dcd2a3dfd67e665277ecfe5aff
|
[
"MIT"
] |
permissive
|
jameschapman19/cca_zoo
|
d433c14037ec22ce464531a2a523899d76c3a139
|
084901cf1a0d484d0d8c6a30775971569b5b8da0
|
refs/heads/main
| 2023-08-08T00:39:16.412081
| 2023-08-07T12:31:07
| 2023-08-07T12:31:07
| 303,801,602
| 166
| 34
|
MIT
| 2023-09-14T11:34:24
| 2020-10-13T18:58:42
|
Python
|
UTF-8
|
Python
| false
| false
| 941
|
py
|
test_probabilistic.py
|
import numpy as np
import pytest
from cca_zoo.data.simulated import LinearSimulatedData
from cca_zoo.linear import CCA
def test_PCCA():
# some might not have access to jax/numpyro so leave this as an optional test locally.
numpyro = pytest.importorskip("numpyro")
from cca_zoo.probabilistic import ProbabilisticCCA
np.random.seed(0)
# Tests tensor CCA methods
X, Y = LinearSimulatedData([5, 5]).sample(100)
latent_dims = 1
cca = CCA(latent_dimensions=latent_dims).fit([X, Y])
pcca = ProbabilisticCCA(
latent_dimensions=latent_dims, num_warmup=1000, num_samples=1000
).fit([X, Y])
# Test that vanilla CCA and VCCA produce roughly similar latent space ie they are correlated
assert (
np.abs(
np.corrcoef(
cca.transform([X, Y])[1].T,
pcca.posterior_samples["z"].mean(axis=0)[:, 0],
)[0, 1]
)
> 0.9
)
|
98b2e6e088792e5ac984c4a06860bf65e698dfb6
|
8c794e3e2e7f14edb39444c1d534406cec480f7c
|
/cv_bridge/test/python_bindings.py
|
a92972ee91f93a91393f8b73dff41bca5a2aac03
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
ros-perception/vision_opencv
|
74f6372e702c539f444d096a5a1490cc02ce2263
|
0b017fe32b9ac73631bbd74a9edf75394df191c0
|
refs/heads/rolling
| 2023-08-20T07:48:31.485000
| 2023-07-20T23:35:31
| 2023-07-20T23:35:31
| 5,579,081
| 500
| 577
|
Apache-2.0
| 2023-09-06T07:32:24
| 2012-08-28T00:47:46
|
C++
|
UTF-8
|
Python
| false
| false
| 1,179
|
py
|
python_bindings.py
|
import cv_bridge
import numpy as np
def test_cvtColorForDisplay():
# convert label image to display
label = np.zeros((480, 640), dtype=np.int32)
height, width = label.shape[:2]
label_value = 0
grid_num_y, grid_num_x = 3, 4
for grid_row in range(grid_num_y):
grid_size_y = height // grid_num_y
min_y = grid_size_y * grid_row
max_y = min_y + grid_size_y
for grid_col in range(grid_num_x):
grid_size_x = width // grid_num_x
min_x = grid_size_x * grid_col
max_x = min_x + grid_size_x
label[min_y:max_y, min_x:max_x] = label_value
label_value += 1
label_viz = cv_bridge.cvtColorForDisplay(label, '32SC1', 'bgr8')
assert label_viz.dtype == np.uint8
assert label_viz.min() == 0
assert label_viz.max() == 255
# Check that mono8 conversion returns the right shape.
bridge = cv_bridge.CvBridge()
mono = np.random.random((100, 100)) * 255
mono = mono.astype(np.uint8)
input_msg = bridge.cv2_to_imgmsg(mono, encoding='mono8')
output = bridge.imgmsg_to_cv2(input_msg, desired_encoding='mono8')
assert output.shape == (100, 100)
|
27165acf3ff65547a7eb038d07ae79cc8f817bad
|
05643b9b4d20db912c3dbfbc191cadea3143016c
|
/instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/_utilities.py
|
b6ff7d311a435c3d7c7deb083240826630c51191
|
[
"Apache-2.0"
] |
permissive
|
open-telemetry/opentelemetry-python-contrib
|
35566cd088aa0b23ca977109fcd435ee480784b9
|
0871dd455c0adfa125a2f258a0b55c47a5da5227
|
refs/heads/main
| 2023-08-26T07:30:40.212226
| 2023-08-21T16:42:12
| 2023-08-21T16:42:12
| 220,524,743
| 476
| 401
|
Apache-2.0
| 2023-09-14T21:36:33
| 2019-11-08T18:23:43
|
Python
|
UTF-8
|
Python
| false
| false
| 997
|
py
|
_utilities.py
|
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Internal utilities."""
class RpcInfo:
def __init__(
self,
full_method=None,
metadata=None,
timeout=None,
request=None,
response=None,
error=None,
):
self.full_method = full_method
self.metadata = metadata
self.timeout = timeout
self.request = request
self.response = response
self.error = error
|
07880d7a4628ebeb9fc0720fb59473e4952688d7
|
4cf75f821c10c3a0fd78ac46d94862a192f3bf9a
|
/binding/python/tests/test_rbdyn_parsers.py
|
2225d0e265d6db6a9f9e55bd7c03fd3e75f4eed0
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
jrl-umi3218/RBDyn
|
b6adf3ce3861c9dd9eb91ddedbdc814d7e263519
|
22c5dde7b48b0037e106e7901b0611ccd506ae5e
|
refs/heads/master
| 2023-08-07T23:21:21.452812
| 2023-07-25T21:04:05
| 2023-08-01T06:27:31
| 3,740,096
| 116
| 45
|
BSD-2-Clause
| 2023-09-12T05:25:14
| 2012-03-16T15:22:12
|
C++
|
UTF-8
|
Python
| false
| false
| 5,934
|
py
|
test_rbdyn_parsers.py
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2019 CNRS-UM LIRMM, CNRS-AIST JRL
#
import unittest
import rbdyn
urdf_model = """<robot name="XYZSarm">
<link name="b0">
<inertial>
<origin rpy="0 0 0" xyz="0 0 0" />
<mass value="1" />
<inertia ixx="0.1" ixy="0.0" ixz="0.0"
iyy="0.05" iyz="0.0" izz="0.001" />
</inertial>
<visual>
<origin rpy="0. 0. 0." xyz=".1 .2 .3"/>
<geometry>
<mesh filename="test_mesh1.dae"/>
</geometry>
</visual>
<visual>
<origin rpy="0 0 0" xyz="0 0 0"/>
<geometry>
<mesh filename="test_mesh2.dae"/>
</geometry>
</visual>
<visual>
<origin rpy="0 0 0" xyz="0 0 0"/>
</visual>
</link>
<link name="b1">
<inertial>
<origin rpy="0 0 0" xyz="0 0.5 0" />
<mass value="5." />
<inertia ixx="0.1" ixy="0.0" ixz="0.0"
iyy="0.05" iyz="0.0" izz="0.001" />
</inertial>
</link>
<link name="b2">
<inertial>
<origin rpy="0 0 0" xyz="0 0.5 0" />
<mass value="2." />
<inertia ixx="0.1" ixy="0.0" ixz="0.0"
iyy="0.05" iyz="0.0" izz="0.001" />
</inertial>
</link>
<link name="b3">
<inertial>
<origin rpy="0 0 0" xyz="0 0.5 0" />
<mass value="1.5" />
<inertia ixx="0.1" ixy="0.0" ixz="0.0"
iyy="0.05" iyz="0.0" izz="0.001" />
</inertial>
</link>
<link name="b4">
<inertial>
<origin rpy="0 0 0" xyz="0.5 0 0" />
<mass value="1" />
<inertia ixx="0.1" ixy="0.0" ixz="0.0"
iyy="0.05" iyz="0.0" izz="0.001" />
</inertial>
</link>
<joint name="j0" type="revolute">
<parent link="b0" />
<child link="b1" />
<origin rpy="0 0 0" xyz="0 1 0" />
<axis xyz="1 0 0" />
<limit lower="-1" upper="1" velocity="10" effort="50" />
</joint>
<joint name="j1" type="revolute">
<parent link="b1" />
<child link="b2" />
<origin rpy="0 0 0" xyz="0 1 0" />
<axis xyz="0 1 0" />
<limit lower="-1" upper="1" velocity="10" effort="50" />
</joint>
<joint name="j2" type="revolute">
<parent link="b2" />
<child link="b3" />
<origin rpy="0 0 0" xyz="0 1 0" />
<axis xyz="0 0 1" />
<limit lower="-1" upper="1" velocity="10" effort="50" />
</joint>
<joint name="j3" type="continuous">
<parent link="b1" />
<child link="b4" />
<origin rpy="1. 0 0" xyz="1 0 0" />
<axis xyz="1 0 0" />
</joint>
</robot>
"""
yaml_model = """robot:
name: XYZSarm
anglesInDegrees: false
links:
- name: b0
inertial:
mass: 1
frame:
xyz: [0, 0, 0]
rpy: [0, 0, 0]
inertia:
Ixx: 0.1
Iyy: 0.05
Izz: 0.001
Iyz: 0
Ixz: 0
Ixy: 0
visual:
- frame:
xyz: [0.1, 0.2, 0.3]
rpy: [0, 0, 0]
geometry:
mesh:
filename: test_mesh1.dae
- frame:
xyz: [0, 0, 0]
rpy: [0, 0, 0]
geometry:
mesh:
filename: test_mesh2.dae
- frame:
xyz: [0, 0, 0]
rpy: [0, 0, 0]
- name: b1
inertial:
mass: 5
frame:
xyz: [0, 0.5, 0]
rpy: [0, 0, 0]
inertia:
Ixx: 0.1
Iyy: 0.05
Izz: 0.001
Iyz: 0
Ixz: 0
Ixy: 0
- name: b2
inertial:
mass: 2
frame:
xyz: [0, 0.5, 0]
rpy: [0, 0, 0]
inertia:
Ixx: 0.1
Iyy: 0.05
Izz: 0.001
Iyz: 0
Ixz: 0
Ixy: 0
- name: b3
inertial:
mass: 1.5
frame:
xyz: [0, 0.5, 0]
rpy: [0, 0, 0]
inertia:
Ixx: 0.1
Iyy: 0.05
Izz: 0.001
Iyz: 0
Ixz: 0
Ixy: 0
- name: b4
inertial:
mass: 1
frame:
xyz: [0.5, 0, 0]
rpy: [0, 0, 0]
inertia:
Ixx: 0.1
Iyy: 0.05
Izz: 0.001
Iyz: 0
Ixz: 0
Ixy: 0
joints:
- name: j0
parent: b0
child: b1
type: revolute
axis: [1, 0, 0]
frame:
xyz: [0, 1, 0]
rpy: [0, 0, 0]
limits:
upper: 1
lower: -1
velocity: 10
effort: 50
- name: j1
parent: b1
child: b2
type: revolute
axis: [0, 1, 0]
frame:
xyz: [0, 1, 0]
rpy: [0, 0, 0]
limits:
upper: 1
lower: -1
velocity: 10
effort: 50
- name: j2
parent: b2
child: b3
type: revolute
axis: [0, 0, 1]
frame:
xyz: [0, 1, 0]
rpy: [0, 0, 0]
limits:
upper: 1
lower: -1
velocity: 10
effort: 50
- name: j3
parent: b1
child: b4
type: continuous
axis: [1, 0, 0]
frame:
xyz: [1, 0, 0]
rpy: [1, 0, 0]
anglesInDegrees: false
"""
class TestRBDynParsers(unittest.TestCase):
def check_result(self, parser_result):
self.assertEqual(parser_result.name, b"XYZSarm")
self.assertEqual(len(parser_result.visual), 1)
self.assertEqual(len(parser_result.collision), 0)
self.assertEqual(len(parser_result.limits.lower), 4)
self.assertEqual(len(parser_result.limits.upper), 4)
self.assertEqual(len(parser_result.limits.velocity), 4)
self.assertEqual(len(parser_result.limits.torque), 4)
self.assertEqual(parser_result.mb.nrBodies(), 5)
self.assertEqual(parser_result.mb.nrJoints(), 5)
self.assertEqual(parser_result.mbg.nrNodes(), 5)
self.assertEqual(parser_result.mbg.nrJoints(), 4)
def test(self):
self.check_result(rbdyn.parsers.from_urdf(urdf_model))
self.check_result(rbdyn.parsers.from_yaml(yaml_model))
|
bc9fc2d2f1ffaeaf4a4264caec866508566839cc
|
b8d80a23cb27af08a1c4d34b478c76228ae5fbb4
|
/insights/parsers/sysconfig.py
|
6475ac704f30b2146b02ea4ca6ec94a291625dac
|
[
"Apache-2.0"
] |
permissive
|
RedHatInsights/insights-core
|
bb243e2bf8a52446fefb95ebe05478d6e35efe2e
|
b0ea07fc3f4dd8801b505fe70e9b36e628152c4a
|
refs/heads/master
| 2023-09-04T21:15:40.456257
| 2023-09-04T10:46:56
| 2023-09-04T10:46:56
| 92,518,221
| 144
| 290
|
Apache-2.0
| 2023-09-14T02:40:13
| 2017-05-26T14:23:11
|
Python
|
UTF-8
|
Python
| false
| false
| 21,807
|
py
|
sysconfig.py
|
"""
Sysconfig - files in ``/etc/sysconfig/``
========================================
This is a collection of parsers that all deal with the system's configuration
files under the ``/etc/sysconfig/`` folder. Parsers included in this module
are:
CorosyncSysconfig - file ``/etc/sysconfig/corosync``
----------------------------------------------------
ChronydSysconfig - file ``/etc/sysconfig/chronyd``
--------------------------------------------------
DirsrvSysconfig - file ``/etc/sysconfig/dirsrv``
------------------------------------------------
DockerStorageSetupSysconfig - file ``/etc/sysconfig/docker-storage-setup``
--------------------------------------------------------------------------
DockerSysconfig - file ``/etc/sysconfig/docker``
------------------------------------------------
DockerSysconfigStorage - file ``/etc/sysconfig/docker-storage``
---------------------------------------------------------------
ForemanTasksSysconfig - file ``/etc/sysconfig/foreman-tasks``
-------------------------------------------------------------
HttpdSysconfig - file ``/etc/sysconfig/httpd``
----------------------------------------------
IrqbalanceSysconfig - file ``/etc/sysconfig/irqbalance``
--------------------------------------------------------
KdumpSysconfig - file ``/etc/sysconfig/kdump``
----------------------------------------------
LibvirtGuestsSysconfig - file ``/etc/sysconfig/libvirt-guests``
---------------------------------------------------------------
MemcachedSysconfig - file ``/etc/sysconfig/memcached``
------------------------------------------------------
MongodSysconfig - file ``/etc/sysconfig/mongod``
------------------------------------------------
NetconsoleSysconfig -file ``/etc/sysconfig/netconsole``
-------------------------------------------------------
NetworkSysconfig -file ``/etc/sysconfig/network``
-------------------------------------------------
NfsSysconfig - file ``/etc/sysconfig/nfs``
------------------------------------------
NtpdSysconfig - file ``/etc/sysconfig/ntpd``
--------------------------------------------
PrelinkSysconfig - file ``/etc/sysconfig/prelink``
--------------------------------------------------
SshdSysconfig - file ``/etc/sysconfig/sshd``
--------------------------------------------
StonithSysconfig - file ``/etc/sysconfig/stonith``
--------------------------------------------------
PuppetserverSysconfig - file ``/etc/sysconfig/puppetserver``
------------------------------------------------------------
Up2DateSysconfig - file ``/etc/sysconfig/rhn/up2date``
------------------------------------------------------
VirtWhoSysconfig - file ``/etc/sysconfig/virt-who``
---------------------------------------------------
IfCFGStaticRoute - files ``/etc/sysconfig/network-scripts/route-*``
-------------------------------------------------------------------
GrubSysconfig - files ``/etc/sysconfig/grub``
---------------------------------------------
OracleasmSysconfig - files ``/etc/sysconfig/oracleasm``
-------------------------------------------------------
"""
from insights import parser, SysconfigOptions, get_active_lines
from insights.specs import Specs
@parser(Specs.corosync)
class CorosyncSysconfig(SysconfigOptions):
"""
This parser reads the ``/etc/sysconfig/corosync`` file. It uses the
``SysconfigOptions`` parser class to convert the file into a dictionary of
options. It also provides the ``options`` property as a helper to retrieve
the ``COROSYNC_OPTIONS`` variable.
Sample Input::
# COROSYNC_INIT_TIMEOUT specifies number of seconds to wait for corosync
# initialization (default is one minute).
COROSYNC_INIT_TIMEOUT=60
# COROSYNC_OPTIONS specifies options passed to corosync command
# (default is no options).
# See "man corosync" for detailed descriptions of the options.
COROSYNC_OPTIONS=""
Examples:
>>> 'COROSYNC_OPTIONS' in cs_syscfg
True
>>> cs_syscfg.options
''
"""
@property
def options(self):
""" (str): The value of the ``COROSYNC_OPTIONS`` variable."""
return self.data.get('COROSYNC_OPTIONS', '')
@parser(Specs.sysconfig_chronyd)
class ChronydSysconfig(SysconfigOptions):
"""
This parser analyzes the ``/etc/sysconfig/chronyd`` configuration file.
Sample Input::
OPTIONS="-d"
#HIDE="me"
Examples:
>>> 'OPTIONS' in chronyd_syscfg
True
>>> 'HIDE' in chronyd_syscfg
False
>>> chronyd_syscfg['OPTIONS']
'-d'
"""
pass
@parser(Specs.dirsrv)
class DirsrvSysconfig(SysconfigOptions):
"""
This parser parses the `dirsrv` service's start-up configuration
``/etc/sysconfig/dirsrv``.
Sample Input::
#STARTPID_TIME=10 ; export STARTPID_TIME
#PID_TIME=600 ; export PID_TIME
KRB5CCNAME=/tmp/krb5cc_995
KRB5_KTNAME=/etc/dirsrv/ds.keytab
Examples:
>>> dirsrv_syscfg.get('KRB5_KTNAME')
'/etc/dirsrv/ds.keytab'
>>> 'PID_TIME' in dirsrv_syscfg
False
"""
pass
@parser(Specs.docker_storage_setup)
class DockerStorageSetupSysconfig(SysconfigOptions):
"""
Parser for parsing ``/etc/sysconfig/docker-storage-setup``
Sample Input::
VG=vgtest
AUTO_EXTEND_POOL=yes
##name = mydomain
POOL_AUTOEXTEND_THRESHOLD=60
POOL_AUTOEXTEND_PERCENT=20
Examples:
>>> dss_syscfg['VG'] # Pseudo-dict access
'vgtest'
>>> 'name' in dss_syscfg
False
>>> dss_syscfg.get('POOL_AUTOEXTEND_THRESHOLD')
'60'
"""
pass
@parser(Specs.docker_sysconfig)
class DockerSysconfig(SysconfigOptions):
"""
Parser for parsing the ``/etc/sysconfig/docker`` file using the standard
``SysconfigOptions`` parser class. The 'OPTIONS' variable is also provided
in the ``options`` property as a convenience.
Sample Input::
OPTIONS="--selinux-enabled"
DOCKER_CERT_PATH="/etc/docker"
Examples:
>>> 'OPTIONS' in docker_syscfg
True
>>> docker_syscfg['OPTIONS']
'--selinux-enabled'
>>> docker_syscfg.options
'--selinux-enabled'
>>> docker_syscfg['DOCKER_CERT_PATH']
'/etc/docker'
"""
@property
def options(self):
""" Return the value of the 'OPTIONS' variable, or '' if not defined. """
return self.data.get('OPTIONS', '')
@parser(Specs.docker_storage)
class DockerSysconfigStorage(SysconfigOptions):
"""
A Parser for /etc/sysconfig/docker-storage.
Sample input::
DOCKER_STORAGE_OPTIONS="--storage-driver devicemapper --storage-opt dm.fs=xfs --storage-opt dm.thinpooldev=/dev/mapper/dockervg-docker--pool --storage-opt dm.use_deferred_removal=true --storage-opt dm.use_deferred_deletion=true"
Examples:
>>> 'DOCKER_STORAGE_OPTIONS' in docker_syscfg_storage
True
>>> docker_syscfg_storage["DOCKER_STORAGE_OPTIONS"]
'--storage-driver devicemapper --storage-opt dm.fs=xfs --storage-opt dm.thinpooldev=/dev/mapper/dockervg-docker--pool --storage-opt dm.use_deferred_removal=true --storage-opt dm.use_deferred_deletion=true'
>>> docker_syscfg_storage.storage_options
'--storage-driver devicemapper --storage-opt dm.fs=xfs --storage-opt dm.thinpooldev=/dev/mapper/dockervg-docker--pool --storage-opt dm.use_deferred_removal=true --storage-opt dm.use_deferred_deletion=true'
"""
@property
def storage_options(self):
""" Return the value of the 'DOCKER_STORAGE_OPTIONS' variable, or '' if not defined. """
return self.data.get('DOCKER_STORAGE_OPTIONS', '')
@parser(Specs.foreman_tasks_config)
class ForemanTasksSysconfig(SysconfigOptions):
"""
Parse the ``/etc/sysconfig/foreman-tasks`` configuration file.
Sample configuration file::
FOREMAN_USER=foreman
BUNDLER_EXT_HOME=/usr/share/foreman
RAILS_ENV=production
FOREMAN_LOGGING=warn
Examples:
>>> ft_syscfg['RAILS_ENV']
'production'
>>> 'AUTO' in ft_syscfg
False
"""
pass
@parser(Specs.sysconfig_httpd)
class HttpdSysconfig(SysconfigOptions):
"""
This parser analyzes the ``/etc/sysconfig/httpd`` configuration file.
Sample Input::
HTTPD=/usr/sbin/httpd.worker
#
# To pass additional options (for instance, -D definitions) to the
# httpd binary at startup, set OPTIONS here.
#
OPTIONS=
Examples:
>>> httpd_syscfg['HTTPD']
'/usr/sbin/httpd.worker'
>>> httpd_syscfg.get('OPTIONS')
''
>>> 'NOOP' in httpd_syscfg
False
"""
pass
@parser(Specs.sysconfig_irqbalance)
class IrqbalanceSysconfig(SysconfigOptions):
"""
This parser analyzes the ``/etc/sysconfig/irqbalance`` configuration file.
Sample Input::
#IRQBALANCE_ONESHOT=yes
#
IRQBALANCE_BANNED_CPUS=f8
IRQBALANCE_ARGS="-d"
Examples:
>>> irqb_syscfg['IRQBALANCE_BANNED_CPUS']
'f8'
>>> irqb_syscfg.get('IRQBALANCE_ARGS') # quotes will be stripped
'-d'
>>> irqb_syscfg.get('IRQBALANCE_ONESHOT') is None
True
>>> 'ONESHOT' in irqb_syscfg
False
"""
pass
@parser(Specs.sysconfig_kdump)
class KdumpSysconfig(SysconfigOptions):
"""
This parser reads data from the ``/etc/sysconfig/kdump`` file.
This parser sets the following properties for ease of access:
* KDUMP_COMMANDLINE
* KDUMP_COMMANDLINE_REMOVE
* KDUMP_COMMANDLINE_APPEND
* KDUMP_KERNELVER
* KDUMP_IMG
* KDUMP_IMG_EXT
* KEXEC_ARGS
These are set to the value of the named variable in the kdump sysconfig
file, or '' if not found.
"""
KDUMP_KEYS = [
'KDUMP_COMMANDLINE',
'KDUMP_COMMANDLINE_REMOVE',
'KDUMP_COMMANDLINE_APPEND',
'KDUMP_KERNELVER',
'KDUMP_IMG',
'KDUMP_IMG_EXT',
'KEXEC_ARGS',
]
def parse_content(self, content):
super(KdumpSysconfig, self).parse_content(content)
for key in self.KDUMP_KEYS:
setattr(self, key, self.data.get(key, ''))
@parser(Specs.sysconfig_libvirt_guests)
class LibvirtGuestsSysconfig(SysconfigOptions):
"""
This parser analyzes the ``/etc/sysconfig/libvirt-guests`` configuration file.
Sample Input::
# URIs to check for running guests
# example: URIS='default xen:/// vbox+tcp://host/system lxc:///'
#URIS=default
ON_BOOT=ignore
Examples:
>>> libvirt_guests_syscfg.get('ON_BOOT')
'ignore'
"""
pass
@parser(Specs.sysconfig_memcached)
class MemcachedSysconfig(SysconfigOptions):
"""
This parser analyzes the ``/etc/sysconfig/memcached`` configuration file.
Sample Input::
PORT="11211"
USER="memcached"
# max connection 2048
MAXCONN="2048"
# set ram size to 2048 - 2GiB
CACHESIZE="4096"
# disable UDP and listen to loopback ip 127.0.0.1, for network connection use real ip e.g., 10.0.0.5
OPTIONS="-U 0 -l 127.0.0.1"
Examples:
>>> memcached_syscfg.get('OPTIONS')
'-U 0 -l 127.0.0.1'
"""
pass
@parser(Specs.sysconfig_mongod)
class MongodSysconfig(SysconfigOptions):
"""
A parser for analyzing the ``mongod`` service configuration file, like
'/etc/sysconfig/mongod' and '/etc/opt/rh/rh-mongodb26/sysconfig/mongod'.
Sample Input::
OPTIONS="--quiet -f /etc/mongod.conf"
Examples:
>>> mongod_syscfg.get('OPTIONS')
'--quiet -f /etc/mongod.conf'
>>> mongod_syscfg.get('NO_SUCH_OPTION') is None
True
>>> 'NOSUCHOPTION' in mongod_syscfg
False
"""
pass
@parser(Specs.netconsole)
class NetconsoleSysconfig(SysconfigOptions):
'''
Parse the ``/etc/sysconfig/netconsole`` file.
Sample Input::
# The local port number that the netconsole module will use
LOCALPORT=6666
Examples:
>>> 'LOCALPORT' in netcs_syscfg
True
>>> 'DEV' in netcs_syscfg
False
'''
pass
@parser(Specs.sysconfig_network)
class NetworkSysconfig(SysconfigOptions):
"""
This parser parses the ``/etc/sysconfig/network`` configuration file
Sample Input::
NETWORKING=yes
HOSTNAME=rhel7-box
GATEWAY=172.31.0.1
NM_BOND_VLAN_ENABLED=no
Examples:
>>> 'NETWORKING' in net_syscfg
True
>>> net_syscfg['GATEWAY']
'172.31.0.1'
"""
pass
@parser(Specs.sysconfig_nfs)
class NfsSysconfig(SysconfigOptions):
"""
A parser for analyzing the ``/etc/sysconfig/nfs`` configuration file.
.. note::
In some RHEL version, both the file ``/etc/nfs.conf`` and file
``/etc/sysconfig/nfs`` exist, and take effect at the same time for NFS
services on the host. And it's possible that there are overlaps between
the two configuration files.
A combiner for the two configuration files was considered.
Since the two files' coverage are different, and it is quite complicate
to enumerate all the configuration options and combine them properly,
and also, ``/etc/sysconfig/nfs`` is deprecated in lately RHEL releases.
We deselect it as a consequence.
Sample Input::
RPCNFSDARGS="--rdma=20049"
#STATD_PORT=662
Examples:
>>> 'RPCNFSDARGS' in nfs_syscfg
True
>>> nfs_syscfg['RPCNFSDARGS']
'--rdma=20049'
>>> 'STATD_PORT' in nfs_syscfg
False
"""
pass
@parser(Specs.sysconfig_ntpd)
class NtpdSysconfig(SysconfigOptions):
"""
A parser for analyzing the ``/etc/sysconfig/ntpd`` configuration file.
Sample Input::
OPTIONS="-x -g"
#HIDE="me"
Examples:
>>> 'OPTIONS' in ntpd_syscfg
True
>>> 'HIDE' in ntpd_syscfg
False
>>> ntpd_syscfg['OPTIONS']
'-x -g'
"""
pass
@parser(Specs.sysconfig_prelink)
class PrelinkSysconfig(SysconfigOptions):
"""
A parser for analyzing the ``/etc/sysconfig/prelink`` configuration file.
Sample Input::
# Set this to no to disable prelinking altogether
# (if you change this from yes to no prelink -ua
# will be run next night to undo prelinking)
PRELINKING=no
# Options to pass to prelink
# -m Try to conserve virtual memory by allowing overlapping
# assigned virtual memory slots for libraries which
# never appear together in one binary
# -R Randomize virtual memory slot assignments for libraries.
# This makes it slightly harder for various buffer overflow
# attacks, since library addresses will be different on each
# host using -R.
PRELINK_OPTS=-mR
Examples:
>>> prelink_syscfg.get('PRELINKING')
'no'
"""
pass
@parser(Specs.sysconfig_sshd)
class SshdSysconfig(SysconfigOptions):
"""
A parser for analyzing the ``/etc/sysconfig/sshd`` configuration file.
Sample Input::
# Configuration file for the sshd service.
# The server keys are automatically generated if they are missing.
# To change the automatic creation, adjust sshd.service options for
# example using systemctl enable sshd-keygen@dsa.service to allow creation
# of DSA key or systemctl mask sshd-keygen@rsa.service to disable RSA key
# creation.
# System-wide crypto policy:
# To opt-out, uncomment the following line
# CRYPTO_POLICY=
CRYPTO_POLICY=
Examples:
>>> sshd_syscfg.get('CRYPTO_POLICY')
''
>>> 'NONEXISTENT_VAR' in sshd_syscfg
False
>>> 'CRYPTO_POLICY' in sshd_syscfg
True
"""
pass
@parser(Specs.puppetserver_config)
class PuppetserverSysconfig(SysconfigOptions):
"""
Parse the ``/etc/sysconfig/puppetserver`` configuration file.
Sample configuration file::
USER="puppet"
GROUP="puppet"
INSTALL_DIR="/opt/puppetlabs/server/apps/puppetserver"
CONFIG="/etc/puppetlabs/puppetserver/conf.d"
START_TIMEOUT=300
Examples:
>>> pps_syscfg['START_TIMEOUT']
'300'
>>> 'AUTO' in pps_syscfg
False
"""
pass
@parser(Specs.sysconfig_stonith)
class StonithSysconfig(SysconfigOptions):
"""
Class to parse the ``/etc/sysconfig/stonith``
Sample Input::
retry=3
retry-sleep=2
verbose=yes # optional
Examples:
>>> 'retry' in stonith_syscfg
True
>>> stonith_syscfg['retry']
'3'
"""
pass
@parser(Specs.up2date)
class Up2DateSysconfig(SysconfigOptions):
"""
Class to parse the ``/etc/sysconfig/rhn/up2date``
Typical content example::
serverURL[comment]=Remote server URL
#serverURL=https://rhnproxy.glb.tech.markit.partners
serverURL=https://rhnproxy.glb.tech.markit.partners/XMLRPC
Examples:
>>> 'serverURL' in u2d_syscfg
True
>>> u2d_syscfg['serverURL']
'https://rhnproxy.glb.tech.markit.partners/XMLRPC'
"""
def parse_content(self, content):
up2date_info = {}
for line in get_active_lines(content):
if "[comment]" not in line and '=' in line:
key, val = line.split('=')
up2date_info[key.strip()] = val.strip()
self.data = up2date_info
@parser(Specs.sysconfig_virt_who)
class VirtWhoSysconfig(SysconfigOptions):
"""
A parser for analyzing the ``/etc/sysconfig/virt-who`` configuration file.
Sample Input::
# Register ESX machines using vCenter
# VIRTWHO_ESX=0
# Register guests using RHEV-M
VIRTWHO_RHEVM=1
# Options for RHEV-M mode
VIRTWHO_RHEVM_OWNER=
TEST_OPT="A TEST"
Examples:
>>> vwho_syscfg['VIRTWHO_RHEVM']
'1'
>>> vwho_syscfg.get('VIRTWHO_RHEVM_OWNER')
''
>>> vwho_syscfg.get('NO_SUCH_OPTION') is None
True
>>> 'NOSUCHOPTION' in vwho_syscfg
False
>>> vwho_syscfg.get('TEST_OPT') # Quotes are stripped
'A TEST'
"""
pass
@parser(Specs.ifcfg_static_route)
class IfCFGStaticRoute(SysconfigOptions):
"""
IfCFGStaticRoute is a parser for the static route network interface
definition files in ``/etc/sysconfig/network-scripts``. These are
pulled into the network scripts using ``source``, so they are mainly
``bash`` environment declarations of the form **KEY=value**. These
are stored in the ``data`` property as a dictionary. Quotes surrounding
the value
Because this parser reads multiple files, the interfaces are stored as a
list within the parser and need to be iterated through in order to find
specific interfaces.
Sample configuration from a static connection in file ``/etc/sysconfig/network-scripts/rute-test-net``::
ADDRESS0=10.65.223.0
NETMASK0=255.255.254.0
GATEWAY0=10.65.223.1
Examples:
>>> conn_info['ADDRESS0']
'10.65.223.0'
>>> conn_info.static_route_name
'test-net'
Attributes:
static_route_name (str): static route name
"""
def parse_content(self, content):
self.static_route_name = self.file_name.split("route-", 1)[1]
super(IfCFGStaticRoute, self).parse_content(content)
@parser(Specs.sysconfig_grub)
class GrubSysconfig(SysconfigOptions):
"""
Class to parse the ``/etc/sysconfig/grub``
``/etc/sysconfig/grub`` is a symlink of ``/etc/default/grub`` file
Typical content example::
GRUB_TIMEOUT=1
GRUB_DISTRIBUTOR="$(sed 's, release .*$,,g' /etc/system-release)"
GRUB_DEFAULT=saved
GRUB_DISABLE_SUBMENU=true
GRUB_TERMINAL_OUTPUT="console"
GRUB_CMDLINE_LINUX="console=ttyS0 console=ttyS0,115200n8 no_timer_check net.ifnames=0 crashkernel=auto"
GRUB_DISABLE_RECOVERY="true"
GRUB_ENABLE_BLSCFG=true
Examples:
>>> grub_syscfg.get('GRUB_ENABLE_BLSCFG')
'true'
>>> 'NONEXISTENT_VAR' in grub_syscfg
False
>>> 'GRUB_ENABLE_BLSCFG' in grub_syscfg
True
"""
pass
@parser(Specs.sysconfig_oracleasm)
class OracleasmSysconfig(SysconfigOptions):
"""
Class to parse the ``/etc/sysconfig/oracleasm``
Typical content example::
#
# This is a configuration file for automatic loading of the Oracle
# Automatic Storage Management library kernel driver. It is generated
# By running /etc/init.d/oracleasm configure. Please use that method
# to modify this file
#
# ORACLEASM_ENABELED: 'true' means to load the driver on boot.
ORACLEASM_ENABLED=true
# ORACLEASM_UID: Default user owning the /dev/oracleasm mount point.
ORACLEASM_UID=oracle
# ORACLEASM_GID: Default group owning the /dev/oracleasm mount point.
ORACLEASM_GID=oinstall
# ORACLEASM_SCANBOOT: 'true' means scan for ASM disks on boot.
ORACLEASM_SCANBOOT=true
# ORACLEASM_SCANORDER: Matching patterns to order disk scanning
ORACLEASM_SCANORDER="dm"
# ORACLEASM_SCANEXCLUDE: Matching patterns to exclude disks from scan
ORACLEASM_SCANEXCLUDE="sd"
Examples:
>>> oracleasm_syscfg.get('ORACLEASM_SCANBOOT')
'true'
>>> 'ORACLEASM_SCANORDER' in oracleasm_syscfg
True
>>> 'ORACLEASM_SCANEXCLUDE_1' in oracleasm_syscfg
False
"""
pass
|
5e3b8bb9b0116eaaa18c037837334f30884b7faa
|
e2ed3d5cf080cd5b8b6f4dd05470b290aed165c3
|
/experimental_code/py-vox-io/setup.py
|
ed18c3936b827f785a11f7258644af601a7614ae
|
[] |
no_license
|
IJDykeman/wangTiles
|
641c128d5c494b475bc1ed1a0717db5d90810bb9
|
7c1ee2095ebdf7f72bce07d94c6484915d5cae8b
|
refs/heads/master
| 2022-01-20T07:27:26.895920
| 2022-01-10T16:37:02
| 2022-01-10T16:37:02
| 26,565,467
| 157
| 22
| null | 2022-01-10T16:37:03
| 2014-11-13T01:57:09
|
Python
|
UTF-8
|
Python
| false
| false
| 910
|
py
|
setup.py
|
import io, os.path, re
from setuptools import setup, find_packages
def read(*names, **kwargs):
with io.open(
os.path.join(os.path.dirname(__file__), *names),
encoding=kwargs.get("encoding", "utf8")
) as fp:
return fp.read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
setup(name='py-vox-io',
version=find_version('pyvox', '__init__.py'),
description='A Python parser/write for the MagicaVoxel .vox format',
author='Gunnar Aastrand Grimnes',
author_email='gromgull@gmail.com',
url='https://github.com/gromgull/py-vox-io/',
packages=find_packages(),
license='BSD',
)
|
b2919be0755188468cbbba9c6fb3c377868689a6
|
39568e19301a7a112398be542154950af25591de
|
/util/reggen/version.py
|
d1c808a917f169f7dfe728da12d22fc29ca249b6
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
lowRISC/opentitan
|
493995bc7cf7cb3aee486a5203af3fd62bba3bfc
|
51f6017b8425b14d5a4aa9abace8fe5a25ef08c8
|
refs/heads/master
| 2023-08-31T22:05:09.425796
| 2023-08-14T14:52:15
| 2023-08-31T20:31:13
| 204,516,692
| 2,077
| 634
|
Apache-2.0
| 2023-09-14T21:16:21
| 2019-08-26T16:30:16
|
SystemVerilog
|
UTF-8
|
Python
| false
| false
| 834
|
py
|
version.py
|
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
r"""Standard version printing
"""
import os
import subprocess
import sys
from typing import List
import pkg_resources # part of setuptools
def show_and_exit(clitool: str, packages: List[str]) -> None:
util_path = os.path.dirname(os.path.realpath(clitool))
os.chdir(util_path)
ver = subprocess.run(
["git", "describe", "--always", "--dirty", "--broken"],
stdout=subprocess.PIPE).stdout.strip().decode('ascii')
if (ver == ''):
ver = 'not found (not in Git repository?)'
sys.stderr.write(clitool + " Git version " + ver + '\n')
for p in packages:
sys.stderr.write(p + ' ' + pkg_resources.require(p)[0].version + '\n')
exit(0)
|
aa640fa1ff41e8314cba0122f921620afa0656bc
|
1bb42bac177fb4e979faa441363c27cb636a43aa
|
/multi_epoch_dp_matrix_factorization/tff_aggregator_test.py
|
38117f28d284ec26755ad71ff22d2de1391e563b
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
google-research/federated
|
a6040e80fa0fbf533e0d665c66a9bc549d208b3d
|
329e60fa56b87f691303638ceb9dfa1fc5083953
|
refs/heads/master
| 2023-08-28T13:10:10.885505
| 2023-08-22T23:06:08
| 2023-08-22T23:06:40
| 295,559,343
| 595
| 187
|
Apache-2.0
| 2022-05-12T08:42:53
| 2020-09-14T23:09:07
|
Python
|
UTF-8
|
Python
| false
| false
| 13,719
|
py
|
tff_aggregator_test.py
|
# Copyright 2023, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tff_aggregator."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
import tensorflow_federated as tff
from multi_epoch_dp_matrix_factorization import matrix_constructors
from multi_epoch_dp_matrix_factorization import matrix_factorization_query
from multi_epoch_dp_matrix_factorization import tff_aggregator
def _make_prefix_sum_matrix(dim: int) -> tf.Tensor:
return tf.constant(np.tril(np.ones(shape=[dim] * 2)), dtype=tf.float32)
class PrefixSumAggregatorTest(tf.test.TestCase):
def test_aggregator_factory_constructs(self):
dim = 3
tensor_specs = tf.TensorSpec(dtype=tf.float32, shape=[])
l2_norm_clip = 1.0
noise_multiplier = 0.0
w_matrix = _make_prefix_sum_matrix(dim)
h_matrix = tf.eye(dim)
clients_per_round = 1
seed = 0
agg_factory = tff_aggregator.create_residual_prefix_sum_dp_factory(
tensor_specs=tensor_specs,
l2_norm_clip=l2_norm_clip,
noise_multiplier=noise_multiplier,
w_matrix=w_matrix,
h_matrix=h_matrix,
clients_per_round=clients_per_round,
seed=seed,
)
self.assertIsInstance(
agg_factory, tff.aggregators.UnweightedAggregationFactory
)
def test_aggregator_raises_with_mismatched_type_structure(self):
dim = 3
tensor_specs = tf.TensorSpec(dtype=tf.float32, shape=[])
l2_norm_clip = 1.0
noise_multiplier = 0.0
w_matrix = _make_prefix_sum_matrix(dim)
h_matrix = tf.eye(dim)
clients_per_round = 1
seed = 0
agg_factory = tff_aggregator.create_residual_prefix_sum_dp_factory(
tensor_specs=tensor_specs,
l2_norm_clip=l2_norm_clip,
noise_multiplier=noise_multiplier,
w_matrix=w_matrix,
h_matrix=h_matrix,
clients_per_round=clients_per_round,
seed=seed,
)
with self.assertRaises(ValueError):
agg_factory.create(
value_type=tff.types.type_from_tensors([tf.zeros(shape=[])] * 2)
)
def test_unnoised_prefix_sum_aggregator_performs_federated_mean(self):
dim = 3
tensor_specs = tf.TensorSpec(dtype=tf.float32, shape=[])
# Set the l2 norm clip large enough so that none of the incoming values are
# clipped.
l2_norm_clip = 1.0 * dim
noise_multiplier = 0.0
w_matrix = _make_prefix_sum_matrix(dim)
h_matrix = tf.cast(tf.eye(dim), w_matrix.dtype)
clients_per_round = 2
seed = 0
agg_factory = tff_aggregator.create_residual_prefix_sum_dp_factory(
tensor_specs=tensor_specs,
l2_norm_clip=l2_norm_clip,
noise_multiplier=noise_multiplier,
w_matrix=w_matrix,
h_matrix=h_matrix,
clients_per_round=clients_per_round,
seed=seed,
)
aggregator_process = agg_factory.create(
value_type=tff.TensorType(dtype=tf.float32, shape=[])
)
agg_state = aggregator_process.initialize()
for i in range(dim):
client_1_value = 0.5 * float(i)
client_2_value = 1.5 * float(i)
output = aggregator_process.next(
agg_state, [client_1_value, client_2_value]
)
result = output.result
agg_state = output.state
expected_mean = 0.5 * (client_1_value + client_2_value)
# Since values are scalar and positive, the expected mean
# is also the average_client_norm.
self.assertEqual(
output.measurements['average_client_norm'], expected_mean
)
self.assertEqual(result, expected_mean)
def test_noised_prefix_sum_outputs_residuals(self):
dim = 10
tensor_specs = tf.TensorSpec(dtype=tf.float32, shape=[])
# Set the l2 norm clip large enough so that none of the incoming values are
# clipped.
l2_norm_clip = 1.0 * dim
noise_multiplier = 1.0 / l2_norm_clip
w_matrix = tf.constant(np.tril(np.ones(shape=[dim, dim])))
h_matrix = tf.cast(tf.eye(dim), w_matrix.dtype)
clients_per_round = 2
seed = 0
# Technically, the assertions to follow rely on the fact that seed is passed
# directly to the `OnTheFlyNoiseMechanism` from the call below.
underlying_mechanism = (
matrix_factorization_query.OnTheFlyFactorizedNoiseMechanism(
tensor_specs=tensor_specs,
stddev=noise_multiplier * l2_norm_clip,
w_matrix=w_matrix,
seed=seed,
)
)
mech_state = underlying_mechanism.initialize()
agg_factory = tff_aggregator.create_residual_prefix_sum_dp_factory(
tensor_specs=tensor_specs,
l2_norm_clip=l2_norm_clip,
noise_multiplier=noise_multiplier,
w_matrix=w_matrix,
h_matrix=h_matrix,
clients_per_round=clients_per_round,
seed=seed,
)
aggregator_process = agg_factory.create(
value_type=tff.TensorType(dtype=tf.float32, shape=[])
)
agg_state = aggregator_process.initialize()
previous_noise = tf.zeros(shape=[], dtype=tf.float32)
for i in range(dim):
output = aggregator_process.next(agg_state, [float(i)] * 2)
noise_at_index, mech_state = underlying_mechanism.compute_noise(
mech_state
)
result = output.result
agg_state = output.state
# We added the noise before computing the mean, so we divide by the number
# of clients per round here.
self.assertEqual(
result, i + (noise_at_index - previous_noise) / clients_per_round
)
previous_noise = noise_at_index
class MomentumMatrixResidualTest(parameterized.TestCase, tf.test.TestCase):
def test_aggregator_factory_constructs(self):
dim = 3
tensor_specs = tf.TensorSpec(dtype=tf.float32, shape=[])
l2_norm_clip = 1.0
noise_multiplier = 0.0
momentum_value = 0.9
w_matrix = tf.constant(
matrix_constructors.momentum_sgd_matrix(
num_iters=dim, momentum=momentum_value
),
dtype=tf.float32,
)
h_matrix = tf.eye(dim)
clients_per_round = 1
seed = 0
agg_factory = tff_aggregator.create_residual_momentum_dp_factory(
tensor_specs=tensor_specs,
l2_norm_clip=l2_norm_clip,
noise_multiplier=noise_multiplier,
w_matrix=w_matrix,
h_matrix=h_matrix,
clients_per_round=clients_per_round,
seed=seed,
momentum_value=momentum_value,
)
self.assertIsInstance(
agg_factory, tff.aggregators.UnweightedAggregationFactory
)
def test_aggregator_raises_with_mismatched_type_structure(self):
dim = 3
tensor_specs = tf.TensorSpec(dtype=tf.float32, shape=[])
l2_norm_clip = 1.0
noise_multiplier = 0.0
momentum_value = 0.9
w_matrix = tf.constant(
matrix_constructors.momentum_sgd_matrix(
num_iters=dim, momentum=momentum_value
),
dtype=tf.float32,
)
h_matrix = tf.eye(dim)
clients_per_round = 1
seed = 0
agg_factory = tff_aggregator.create_residual_momentum_dp_factory(
tensor_specs=tensor_specs,
l2_norm_clip=l2_norm_clip,
noise_multiplier=noise_multiplier,
w_matrix=w_matrix,
h_matrix=h_matrix,
clients_per_round=clients_per_round,
seed=seed,
momentum_value=momentum_value,
)
with self.assertRaises(ValueError):
agg_factory.create(
value_type=tff.types.type_from_tensors([tf.zeros(shape=[])] * 2)
)
def test_unnoised_prefix_sum_aggregator_performs_momentum_federated_mean(
self,
):
dim = 3
tensor_specs = tf.TensorSpec(dtype=tf.float32, shape=[])
# Set the l2 norm clip large enough so that none of the incoming values are
# clipped.
l2_norm_clip = 1.0 * dim
noise_multiplier = 0.0
momentum_value = 0.9
w_matrix = tf.constant(
matrix_constructors.momentum_sgd_matrix(
num_iters=dim, momentum=momentum_value
),
dtype=tf.float32,
)
h_matrix = tf.cast(tf.eye(dim), w_matrix.dtype)
clients_per_round = 2
seed = 0
agg_factory = tff_aggregator.create_residual_momentum_dp_factory(
tensor_specs=tensor_specs,
l2_norm_clip=l2_norm_clip,
noise_multiplier=noise_multiplier,
w_matrix=w_matrix,
h_matrix=h_matrix,
clients_per_round=clients_per_round,
seed=seed,
momentum_value=momentum_value,
)
aggregator_process = agg_factory.create(
value_type=tff.TensorType(dtype=tf.float32, shape=[])
)
agg_state = aggregator_process.initialize()
momentum_accumulator = 0.0
for i in range(dim):
# Average client values; no noise is added, so we should just get these
# values back.
output = aggregator_process.next(agg_state, [float(i)] * 2)
result = output.result
agg_state = output.state
expected_result = i + momentum_accumulator * momentum_value
momentum_accumulator = expected_result
self.assertAllClose(result, expected_result)
def test_noised_outputs_residuals(self):
dim = 10
tensor_specs = tf.TensorSpec(dtype=tf.float32, shape=[])
# Set the l2 norm clip large enough so that none of the incoming values are
# clipped.
l2_norm_clip = 1.0 * dim
noise_multiplier = 1.0 / l2_norm_clip
momentum_value = 0.9
w_matrix = tf.constant(
matrix_constructors.momentum_sgd_matrix(
num_iters=dim, momentum=momentum_value
),
dtype=tf.float32,
)
h_matrix = tf.cast(tf.eye(dim), w_matrix.dtype)
clients_per_round = 2
seed = 0
# Technically, the assertions to follow rely on the fact that seed is passed
# directly to the `OnTheFlyNoiseMechanism` from the call below.
underlying_mechanism = (
matrix_factorization_query.OnTheFlyFactorizedNoiseMechanism(
tensor_specs=tensor_specs,
stddev=noise_multiplier * l2_norm_clip,
w_matrix=w_matrix,
seed=seed,
)
)
mech_state = underlying_mechanism.initialize()
agg_factory = tff_aggregator.create_residual_momentum_dp_factory(
tensor_specs=tensor_specs,
l2_norm_clip=l2_norm_clip,
noise_multiplier=noise_multiplier,
w_matrix=w_matrix,
h_matrix=h_matrix,
clients_per_round=clients_per_round,
seed=seed,
momentum_value=momentum_value,
)
aggregator_process = agg_factory.create(
value_type=tff.TensorType(dtype=tf.float32, shape=[])
)
agg_state = aggregator_process.initialize()
previous_noise = tf.zeros(shape=[], dtype=tf.float32)
momentum_accumulator = 0.0
for i in range(dim):
output = aggregator_process.next(agg_state, [float(i)] * 2)
noise_at_index, mech_state = underlying_mechanism.compute_noise(
mech_state
)
result = output.result
agg_state = output.state
expected_unnoised_result = i + momentum_accumulator * momentum_value
momentum_accumulator = expected_unnoised_result
# We added the noise before computing the mean, so we divide by the number
# of clients per round here.
self.assertAllClose(
result,
expected_unnoised_result
+ (noise_at_index - previous_noise) / clients_per_round,
)
previous_noise = noise_at_index
def _create_5_round_process(self, **kwargs):
dim = 5
tensor_specs = tf.TensorSpec(dtype=tf.float32, shape=[])
momentum_value = 0.9
w_matrix = tf.constant(
matrix_constructors.momentum_sgd_matrix(
num_iters=dim, momentum=momentum_value
),
dtype=tf.float32,
)
h_matrix = tf.cast(tf.eye(dim), w_matrix.dtype)
clients_per_round = 2
agg_factory = tff_aggregator.create_residual_momentum_dp_factory(
tensor_specs=tensor_specs,
l2_norm_clip=float(dim),
noise_multiplier=1,
w_matrix=w_matrix,
h_matrix=h_matrix,
clients_per_round=clients_per_round,
seed=0,
momentum_value=momentum_value,
**kwargs,
)
return agg_factory.create(
value_type=tff.TensorType(dtype=tf.float32, shape=[])
)
def test_raises_on_extra_rounds(self):
dim = 5
aggregator_process = self._create_5_round_process(
emit_zeros_after_last_round=False
)
agg_state = aggregator_process.initialize()
for i in range(dim):
output = aggregator_process.next(agg_state, [float(i)] * 2)
agg_state = output.state
# An extra round raises a TF error:
with self.assertRaisesRegex(
Exception, 'can therefore only support 5 rounds'
):
aggregator_process.next(agg_state, [float(1)] * 2)
def test_zeros_on_extra_rounds(self):
dim = 5
aggregator_process = self._create_5_round_process(
emit_zeros_after_last_round=True
)
agg_state = aggregator_process.initialize()
for i in range(dim):
output = aggregator_process.next(agg_state, [float(2 * i)] * 2)
agg_state = output.state
# Extra rounds produce zeros:
for _ in range(3):
output = aggregator_process.next(agg_state, [float(10)] * 2)
agg_state = output.state
self.assertAllClose(output.result, 0.0)
if __name__ == '__main__':
tf.test.main()
|
fc4cf209cb86e505c5084797a17b1bd4910c738f
|
50e86bd4017387f772da067ff929f4db8bd3dc46
|
/pyblp/results/optimal_instrument_results.py
|
f74634f158ab82b6dbc9b93504fa3d56d7b4df44
|
[
"MIT"
] |
permissive
|
jeffgortmaker/pyblp
|
651c89eb0406de240bc827468b2fa4ed9de625e3
|
3aeae4022317043a22136528ced0d0dc536d7f08
|
refs/heads/master
| 2023-08-10T00:21:34.060361
| 2023-07-24T15:52:42
| 2023-07-24T15:52:42
| 130,608,439
| 213
| 86
|
MIT
| 2023-06-23T15:17:31
| 2018-04-22T21:17:47
|
Python
|
UTF-8
|
Python
| false
| false
| 21,726
|
py
|
optimal_instrument_results.py
|
"""Economy-level structuring of optimal instrument results."""
from pathlib import Path
import pickle
from typing import Hashable, Optional, Sequence, TYPE_CHECKING, Union
import numpy as np
import patsy
from .problem_results import ProblemResults
from ..configurations.formulation import Formulation
from ..parameters import LinearCoefficient
from ..utilities.basics import Array, Mapping, SolverStats, StringRepresentation, format_seconds, format_table
# only import objects that create import cycles when checking types
if TYPE_CHECKING:
from ..economies.problem import OptimalInstrumentProblem # noqa
class OptimalInstrumentResults(StringRepresentation):
r"""Results of optimal instrument computation.
The :meth:`OptimalInstrumentResults.to_problem` method can be used to update the original :class:`Problem` with
the computed optimal instruments.
Attributes
----------
problem_results : `ProblemResults`
:class:`ProblemResults` that was used to compute these optimal instrument results.
demand_instruments : `ndarray`
Estimated optimal demand-side instruments for :math:`\theta`, denoted :math:`Z_D^\text{opt}`.
supply_instruments : `ndarray`
Estimated optimal supply-side instruments for :math:`\theta`, denoted :math:`Z_S^\text{opt}`.
supply_shifter_formulation : `Formulation or None`
:class:`Formulation` configuration for supply shifters that will by default be included in the full set of
optimal demand-side instruments. This is only constructed if a supply side was estimated, and it can be changed
in :meth:`OptimalInstrumentResults.to_problem`. By default, this is the formulation for :math:`X_3^\text{ex}`
from :class:`Problem` excluding any variables in the formulation for :math:`X_1^\text{ex}`.
demand_shifter_formulation : `Formulation or None`
:class:`Formulation` configuration for demand shifters that will by default be included in the full set of
optimal supply-side instruments. This is only constructed if a supply side was estimated, and it can be changed
in :meth:`OptimalInstrumentResults.to_problem`. By default, this is the formulation for :math:`X_1^\text{ex}`
from :class:`Problem` excluding any variables in the formulation for :math:`X_3^\text{ex}`.
inverse_covariance_matrix : `ndarray`
Inverse of the sample covariance matrix of the estimated :math:`\xi` and :math:`\omega`, which is used to
normalize the expected Jacobians. If a supply side was not estimated, this is simply the sample estimate of
:math:`1 / \sigma_{\xi}^2`.
expected_xi_by_theta_jacobian : `ndarray`
Estimated :math:`E[\frac{\partial\xi}{\partial\theta} \mid Z]`.
expected_omega_by_theta_jacobian : `ndarray`
Estimated :math:`E[\frac{\partial\omega}{\partial\theta} \mid Z]`.
expected_prices : `ndarray`
Vector of expected prices conditional on all exogenous variables, :math:`E[p \mid Z]`, which may have been
specified in :meth:`ProblemResults.compute_optimal_instruments`.
expected_shares : `ndarray`
Vector of expected market shares conditional on all exogenous variables, :math:`E[s \mid Z]`.
computation_time : `float`
Number of seconds it took to compute optimal excluded instruments.
draws : `int`
Number of draws used to approximate the integral over the error term density.
fp_converged : `ndarray`
Flags for convergence of the iteration routine used to compute equilibrium prices in each market. Rows are in
the same order as :attr:`Problem.unique_market_ids` and column indices correspond to draws.
fp_iterations : `ndarray`
Number of major iterations completed by the iteration routine used to compute equilibrium prices in each market
for each error term draw. Rows are in the same order as :attr:`Problem.unique_market_ids` and column indices
correspond to draws.
contraction_evaluations : `ndarray`
Number of times the contraction used to compute equilibrium prices was evaluated in each market for each error
term draw. Rows are in the same order as :attr:`Problem.unique_market_ids` and column indices correspond to
draws.
Examples
--------
- :doc:`Tutorial </tutorial>`
"""
problem_results: ProblemResults
demand_instruments: Array
supply_instruments: Array
demand_shifter_formulation: Optional[Formulation]
supply_shifter_formulation: Optional[Formulation]
inverse_covariance_matrix: Array
expected_xi_by_theta_jacobian: Array
expected_omega_by_theta_jacobian: Array
expected_prices: Array
expected_shares: Array
computation_time: float
draws: int
fp_converged: Array
fp_iterations: Array
contraction_evaluations: Array
def __init__(
self, problem_results: ProblemResults, demand_instruments: Array, supply_instruments: Array,
inverse_covariance_matrix: Array, expected_xi_jacobian: Array, expected_omega_jacobian: Array,
expected_prices: Array, expected_shares: Array, start_time: float, end_time: float, draws: int,
iteration_stats: Sequence[Mapping[Hashable, SolverStats]]) -> None:
"""Structure optimal excluded instrument computation results. Also identify supply and demand shifters that will
be added to the optimal instruments when converting them into a problem.
"""
self.problem_results = problem_results
self.demand_instruments = demand_instruments
self.supply_instruments = supply_instruments
self.inverse_covariance_matrix = inverse_covariance_matrix
self.expected_xi_by_theta_jacobian = expected_xi_jacobian
self.expected_omega_by_theta_jacobian = expected_omega_jacobian
self.expected_prices = expected_prices
self.expected_shares = expected_shares
self.computation_time = end_time - start_time
self.draws = draws
unique_market_ids = problem_results.problem.unique_market_ids
self.fp_converged = np.array(
[[m[t].converged if m else True for m in iteration_stats] for t in unique_market_ids], dtype=np.bool_
)
self.fp_iterations = np.array(
[[m[t].iterations if m else 0 for m in iteration_stats] for t in unique_market_ids], dtype=np.int64
)
self.contraction_evaluations = np.array(
[[m[t].evaluations if m else 0 for m in iteration_stats] for t in unique_market_ids], dtype=np.int64
)
# construct default supply and demand shifter formulations
self.supply_shifter_formulation = self.demand_shifter_formulation = None
if self.problem_results.problem.K3 > 0:
assert self.problem_results.problem.product_formulations[0] is not None
assert self.problem_results.problem.product_formulations[2] is not None
X1_terms = self.problem_results.problem.product_formulations[0]._terms
X3_terms = self.problem_results.problem.product_formulations[2]._terms
X1_expressions = self.problem_results.problem.product_formulations[0]._expressions
X3_expressions = self.problem_results.problem.product_formulations[2]._expressions
supply_shifters = set()
for term, expression in zip(X3_terms, X3_expressions):
if all(str(s) != 'shares' for s in expression.free_symbols) and term.name() != 'Intercept':
supply_shifters.add(term.name())
demand_shifters = set()
for term, expression in zip(X1_terms, X1_expressions):
if all(str(s) != 'prices' for s in expression.free_symbols) and term.name() != 'Intercept':
demand_shifters.add(term.name())
if supply_shifters - demand_shifters:
supply_shifter_formula = ' + '.join(sorted(supply_shifters - demand_shifters))
self.supply_shifter_formulation = Formulation(f'{supply_shifter_formula} - 1')
if demand_shifters - supply_shifters:
demand_shifter_formula = ' + '.join(sorted(demand_shifters - supply_shifters))
self.demand_shifter_formulation = Formulation(f'{demand_shifter_formula} - 1')
def __str__(self) -> str:
"""Format optimal instrument computation results as a string."""
header = [("Computation", "Time"), ("Error Term", "Draws")]
values = [format_seconds(self.computation_time), self.draws]
if self.fp_iterations.sum() > 0 or self.contraction_evaluations.sum() > 0:
header.extend([("Fixed Point", "Iterations"), ("Contraction", "Evaluations")])
values.extend([self.fp_iterations.sum(), self.contraction_evaluations.sum()])
return format_table(header, values, title="Optimal Instrument Results Summary")
def to_pickle(self, path: Union[str, Path]) -> None:
"""Save these results as a pickle file.
Parameters
----------
path: `str or Path`
File path to which these results will be saved.
"""
with open(path, 'wb') as handle:
pickle.dump(self, handle)
def to_dict(
self, attributes: Sequence[str] = (
'demand_instruments', 'supply_instruments', 'inverse_covariance_matrix',
'expected_xi_by_theta_jacobian', 'expected_omega_by_theta_jacobian', 'expected_prices',
'expected_shares', 'computation_time', 'draws', 'fp_converged', 'fp_iterations',
'contraction_evaluations'
)) -> dict:
"""Convert these results into a dictionary that maps attribute names to values.
Parameters
----------
attributes : `sequence of str, optional`
Name of attributes that will be added to the dictionary. By default, all :class:`OptimalInstrumentResults`
attributes are added except for :attr:`OptimalInstrumentResults.problem_results`,
:attr:`OptimalInstrumentResults.supply_shifter_formulation`, and
:attr:`OptimalInstrumentResults.demand_shifter_formulation`.
Returns
-------
`dict`
Mapping from attribute names to values.
Examples
--------
- :doc:`Tutorial </tutorial>`
"""
return {k: getattr(self, k) for k in attributes}
def to_problem(
self, supply_shifter_formulation: Optional[Formulation] = None,
demand_shifter_formulation: Optional[Formulation] = None, product_data: Optional[Mapping] = None,
drop_indices: Optional[Sequence[int]] = None) -> 'OptimalInstrumentProblem':
r"""Re-create the problem with estimated feasible optimal instruments.
The re-created problem will be exactly the same, except that instruments will be replaced with estimated
feasible optimal instruments.
.. note::
Most of the explanation here is only important if a supply side was estimated.
The optimal excluded demand-side instruments consist of the following:
1. Estimated optimal demand-side instruments for :math:`\theta`, :math:`Z_D^\text{opt}`, excluding columns
of instruments for any parameters on exogenous linear characteristics that were not concentrated out, but
rather included in :math:`\theta` by :meth:`Problem.solve`.
2. Optimal instruments for any linear demand-side parameters on endogenous product characteristics,
:math:`\alpha`, which were concentrated out and hence not included in :math:`\theta`. These optimal
instruments are simply an integral of the endogenous product characteristics, :math:`X_1^\text{en}`, over
the joint density of :math:`\xi` and :math:`\omega`. It is only possible to concentrate out
:math:`\alpha` when there isn't a supply side, so the approximation of these optimal instruments is
simply :math:`X_1^\text{en}` evaluated at the constant vector of expected prices, :math:`E[p \mid Z]`,
specified in :meth:`ProblemResults.compute_optimal_instruments`.
3. If a supply side was estimated, any supply shifters, which are by default formulated by
:attr:`OptimalInstrumentResults.supply_shifter_formulation`: all characteristics in :math:`X_3^\text{ex}`
not in :math:`X_1^\text{ex}`.
Similarly, if a supply side was estimated, the optimal excluded supply-side instruments consist of the
following:
1. Estimated optimal supply-side instruments for :math:`\theta`, :math:`Z_S^\text{opt}`, excluding columns
of instruments for any parameters on exogenous linear characteristics that were not concentrated out, but
rather included in :math:`\theta` by :meth:`Problem.solve`.
2. Optimal instruments for any linear supply-side parameters on endogenous product characteristics,
:math:`\gamma^\text{en}`, which were concentrated out an hence not included in :math:`\theta`. This
is only relevant if ``shares`` were included in the formulation for :math:`X_3` in :class:`Problem`.
The corresponding optimal instruments are simply an integral of the endogenous product characteristics,
:math:`X_3^\text{en}`, over the joint density of :math:`\xi` and :math:`\omega`. The approximation of
these optimal instruments is simply :math:`X_3^\text{en}` evaluated at the market shares that arise under
the constant vector of expected prices, :math:`E[p \mid Z]`, specified in
:meth:`ProblemResults.compute_optimal_instruments`.
2. If a supply side was estimated, any demand shifters, which are by default formulated by
:attr:`OptimalInstrumentResults.demand_shifter_formulation`: all characteristics in :math:`X_1^\text{ex}`
not in :math:`X_3^\text{ex}`.
As usual, the excluded demand-side instruments will be supplemented with :math:`X_1^\text{ex}` and the excluded
supply-side instruments will be supplemented with :math:`X_3^\text{ex}`. The same fixed effects configured in
:class:`Problem` will be absorbed.
.. warning::
If a supply side was estimated, the addition of supply- and demand-shifters may create collinearity issues.
Make sure to check that shifters and other product characteristics are not collinear.
Parameters
----------
supply_shifter_formulation : `Formulation, optional`
:class:`Formulation` configuration for supply shifters to be included in the set of optimal demand-side
instruments. This is only used if a supply side was estimated. Intercepts will be ignored. By default,
:attr:`OptimalInstrumentResults.supply_shifter_formulation` is used.
demand_shifter_formulation : `Formulation, optional`
:class:`Formulation` configuration for demand shifters to be included in the set of optimal supply-side
instruments. This is only used if a supply side was estimated. Intercepts will be ignored. By default,
:attr:`OptimalInstrumentResults.demand_shifter_formulation` is used.
product_data : `structured array-like, optional`
Product data used instead of what was saved from ``product_data`` when initializing the original
:class:`Problem`. This may need to be specified if either the supply or demand shifter formulation contains
some term that was not stored into memory, such as a categorical variable or a mathematical expression.
drop_indices : `sequence of int, optional`
Which column indices to drop from :attr:`OptimalInstrumentResults.demand_instruments` and
:attr:`OptimalInstrumentResults.supply_instruments`. By default, the only columns dropped are those that
correspond to parameters in :math:`\theta` on exogenous linear characteristics.
Returns
-------
`OptimalInstrumentProblem`
:class:`OptimalInstrumentProblem`, which is a :class:`Problem` updated to use the estimated optimal
instruments.
Examples
--------
- :doc:`Tutorial </tutorial>`
"""
# either use the stored variables as product data or any provided data
if product_data is None:
product_data = self.problem_results.problem.products
# configure or validate the supply shifter formulation
if self.problem_results.problem.K3 == 0:
if supply_shifter_formulation is not None:
raise TypeError("A supply side was not estimated, so supply_shifter_formulation should be None.")
elif supply_shifter_formulation is None:
supply_shifter_formulation = self.supply_shifter_formulation
elif not isinstance(supply_shifter_formulation, Formulation):
raise TypeError("supply_shifter_formulation must be None or a Formulation instance.")
elif supply_shifter_formulation._names:
supply_shifter_formulation = Formulation(f'{supply_shifter_formulation._formula} - 1')
else:
supply_shifter_formulation = None
# configure or validate the demand shifter formulation
if self.problem_results.problem.K3 == 0:
if demand_shifter_formulation is not None:
raise TypeError("A demand side was not estimated, so demand_shifter_formulation should be None.")
elif demand_shifter_formulation is None:
demand_shifter_formulation = self.demand_shifter_formulation
elif not isinstance(demand_shifter_formulation, Formulation):
raise TypeError("demand_shifter_formulation must be None or a Formulation instance.")
elif demand_shifter_formulation._names:
demand_shifter_formulation = Formulation(f'{demand_shifter_formulation._formula} - 1')
else:
demand_shifter_formulation = None
# identify which parameters in theta that are on exogenous linear characteristics
dropped_index = np.zeros(self.problem_results._parameters.P, np.bool_)
if drop_indices is not None:
if not isinstance(drop_indices, Sequence) or not all(isinstance(i, int) for i in drop_indices):
raise TypeError("drop_indices must be a sequence of integers.")
for index in drop_indices:
dropped_index[index] = True
else:
for p, parameter in enumerate(self.problem_results._parameters.unfixed):
if isinstance(parameter, LinearCoefficient):
names = parameter.get_product_formulation(self.problem_results.problem).names
if 'prices' not in names and 'shares' not in names:
dropped_index[p] = True
# build excluded demand-side instruments
demand_instruments = self.demand_instruments[:, ~dropped_index]
if self.problem_results._parameters.eliminated_alpha_index.any():
demand_instruments = np.c_[
demand_instruments,
self.problem_results.problem._compute_true_X1(
{'prices': self.expected_prices},
self.problem_results._parameters.eliminated_alpha_index.flatten()
)
]
if supply_shifter_formulation is not None:
try:
demand_instruments = np.c_[
demand_instruments, supply_shifter_formulation._build_matrix(product_data)[0]
]
except patsy.PatsyError as exception:
message = (
"Failed to construct supply shifters from their formulation. You may need to specify "
"product_data if not all variables in the formulation were saved when initializing the problem."
)
raise patsy.PatsyError(message) from exception
# build excluded supply-side instruments
if self.problem_results.problem.K3 == 0:
supply_instruments = self.supply_instruments
else:
supply_instruments = self.supply_instruments[:, ~dropped_index]
if self.problem_results._parameters.eliminated_endogenous_gamma_index.any():
supply_instruments = np.c_[
supply_instruments,
self.problem_results.problem._compute_true_X3(
{'shares': self.expected_shares},
self.problem_results._parameters.eliminated_endogenous_gamma_index.flatten()
)
]
if demand_shifter_formulation is not None:
try:
supply_instruments = np.c_[
supply_instruments, demand_shifter_formulation._build_matrix(product_data)[0]
]
except patsy.PatsyError as exception:
message = (
"Failed to construct demand shifters from their formulation. You may need to specify "
"product_data if not all variables in the formulation were saved when initializing the problem."
)
raise patsy.PatsyError(message) from exception
# initialize the problem
from ..economies.problem import OptimalInstrumentProblem # noqa
return OptimalInstrumentProblem(self.problem_results.problem, demand_instruments, supply_instruments)
|
0e8d8c6281e18e5299849dfc15bc7284a1961f56
|
bbd69601912a3361d788efd03a47f9d4e3bac09e
|
/demo/RawBitmapAccess.py
|
59c03e85e0dea5ec8c46aa2346f0a30ce8fa5b16
|
[] |
no_license
|
wxWidgets/Phoenix
|
56929484460a0399a8f1d9582bc77c20aa14748d
|
a1184286703cf24c4b88e5bc14cf2979c1b1ea00
|
refs/heads/master
| 2023-09-01T07:10:17.437093
| 2023-08-31T05:38:01
| 2023-08-31T05:38:01
| 5,078,061
| 2,268
| 677
| null | 2023-09-09T17:06:59
| 2012-07-17T06:22:25
|
Python
|
UTF-8
|
Python
| false
| false
| 7,425
|
py
|
RawBitmapAccess.py
|
#!/usr/bin/env python
import wx
# use the numpy code instead of the raw access code for comparison
USE_NUMPY = False
# time the execution of making a bitmap?
TIMEIT = False
# how big to make the bitmaps
DIM = 100
# should we use a wx.GraphicsContext for painting?
TEST_GC = False
#----------------------------------------------------------------------
# attempt to import a numeric module if requested to
if USE_NUMPY:
try:
import numpy
def makeByteArray(shape):
return numpy.empty(shape, numpy.uint8)
numtype = 'numpy'
except ImportError:
try:
import numarray
def makeByteArray(shape):
arr = numarray.array(shape=shape, typecode='u1')
arr[:] = 0
return arr
numtype = 'numarray'
except ImportError:
USE_NUMPY = False
#----------------------------------------------------------------------
class TestPanel(wx.Panel):
def __init__(self, parent, log):
self.log = log
wx.Panel.__init__(self, parent, -1)
self.Bind(wx.EVT_PAINT, self.OnPaint)
if TIMEIT:
import timeit
timeit.s = self # Put self in timeit's global namespace as
# 's' so it can be found in the code
# snippets being tested.
if not USE_NUMPY:
t = timeit.Timer("bmp = s.MakeBitmap(10, 20, 30)")
else:
t = timeit.Timer("bmp = s.MakeBitmap2(10, 20, 30)")
log.write("Timing...\n")
num = 100
tm = t.timeit(num)
log.write("%d passes in %f seconds == %f seconds per pass " %
(num, tm, tm/num))
if not USE_NUMPY:
log.write("using raw access\n")
self.redBmp = self.MakeBitmap(178, 34, 34)
self.greenBmp = self.MakeBitmap( 35, 142, 35)
self.blueBmp = self.MakeBitmap( 0, 0, 139)
else:
log.write("using %s\n" % numtype)
self.redBmp = self.MakeBitmap2(178, 34, 34)
self.greenBmp = self.MakeBitmap2( 35, 142, 35)
self.blueBmp = self.MakeBitmap2( 0, 0, 139)
def OnPaint(self, evt):
dc = wx.PaintDC(self)
if not TEST_GC:
dc.DrawBitmap(self.redBmp, 50, 50, True)
dc.DrawBitmap(self.greenBmp, 110, 110, True)
dc.DrawBitmap(self.blueBmp, 170, 50, True)
self.log.write("using wx.DC\n")
else:
gc = wx.GraphicsContext.Create(dc)
gc.DrawBitmap(self.redBmp, 50, 50, DIM,DIM)
gc.DrawBitmap(self.greenBmp, 110, 110, DIM,DIM)
gc.DrawBitmap(self.blueBmp, 170, 50, DIM,DIM)
self.log.write("using wx.GraphicsContext\n")
def MakeBitmap(self, red, green, blue, alpha=128):
# Create the bitmap that we will stuff pixel values into using
# the raw bitmap access classes.
bmp = wx.Bitmap(DIM, DIM, 32)
# Create an object that facilitates access to the bitmap's
# pixel buffer
pixelData = wx.AlphaPixelData(bmp)
if not pixelData:
raise RuntimeError("Failed to gain raw access to bitmap data.")
# We have two ways to access each pixel, first we'll use an
# iterator to set every pixel to the colour and alpha values
# passed in.
for pixel in pixelData:
pixel.Set(red, green, blue, alpha)
# This block of code is another way to do the same as above,
# but with the accessor interface instead of the Python
# iterator. It is a bit faster than the above because it
# avoids the iterator/generator magic, but it is not nearly as
# 'clean' looking ;-)
#pixels = pixelData.GetPixels()
#for y in range(DIM):
# pixels.MoveTo(pixelData, 0, y)
# for x in range(DIM):
# pixels.Set(red, green, blue, alpha)
# pixels.nextPixel()
# Next we'll use the pixel accessor to set the border pixels
# to be fully opaque
pixels = pixelData.GetPixels()
for x in range(DIM):
pixels.MoveTo(pixelData, x, 0)
pixels.Set(red, green, blue, wx.ALPHA_OPAQUE)
pixels.MoveTo(pixelData, x, DIM-1)
pixels.Set(red, green, blue, wx.ALPHA_OPAQUE)
for y in range(DIM):
pixels.MoveTo(pixelData, 0, y)
pixels.Set(red, green, blue, wx.ALPHA_OPAQUE)
pixels.MoveTo(pixelData, DIM-1, y)
pixels.Set(red, green, blue, wx.ALPHA_OPAQUE)
return bmp
def MakeBitmap2(self, red, green, blue, alpha=128):
# Make an array of bytes that is DIM*DIM in size, with enough
# slots for each pixel to have a RGB and A value
arr = makeByteArray( (DIM,DIM, 4) )
# just some indexes to keep track of which byte is which
R, G, B, A = range(4)
# initialize all pixel values to the values passed in
arr[:,:,R] = red
arr[:,:,G] = green
arr[:,:,B] = blue
arr[:,:,A] = alpha
# Set the alpha for the border pixels to be fully opaque
arr[0, 0:DIM, A] = wx.ALPHA_OPAQUE # first row
arr[DIM-1, 0:DIM, A] = wx.ALPHA_OPAQUE # last row
arr[0:DIM, 0, A] = wx.ALPHA_OPAQUE # first col
arr[0:DIM, DIM-1, A] = wx.ALPHA_OPAQUE # last col
# finally, use the array to create a bitmap
bmp = wx.BitmapFromBufferRGBA(DIM, DIM, arr)
return bmp
#----------------------------------------------------------------------
def runTest(frame, nb, log):
win = TestPanel(nb, log)
return win
#----------------------------------------------------------------------
overview = """<html><body>
<h2><center>Raw Bitmap Access</center></h2>
wx.NativePixelData and wx.AlphaPixelData provide a cross-platform way
to access the platform-specific pixel buffer within a wx.Bitmap. They
provide both a random access method, and an iterator interface.
<p>Unfortunately, although these classes are convenient ways to access
and update the contents of a wx.Bitmap, we lose most of the efficiency
of the C++ classes by requiring one or more Python-to-C++ transitions
for each pixel. In fact it can be <b>much</b> slower than the other
ways of creating a bitmap from scratch, especially now that
wx.BitmapFromBuffer exists and can save the time needed to copy from a
wx.Image.
<p>To see this difference for yourself this module has been
instrumented to allow you to experiment with using either the raw
access or numpy/numarray, and also to time how long it takes to create
100 bitmaps like you see on the screen. Simply edit this module in
the \"Demo Code\" tab and set TIMEIT to True and then watch
the log window when the sample is reloaded. To try numpy or numarray
(if you have them installed) then set USE_NUMPY to True as well, and
watch the log window again. On my machines there is about <b>an
order of magnitude</b> difference between the raw access functions
and using a numarray.array with wx.BitmapFromBufferRGBA! Almost
another order of magnitude improvement can be gained with using the
new numpy module!
</body></html>
"""
if __name__ == '__main__':
import sys,os
import run
run.main(['', os.path.basename(sys.argv[0])] + sys.argv[1:])
|
f2ec031e0ea4393785322e9ac00420464f204cff
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/homeassistant/components/homewizard/switch.py
|
cddcabc841ec2b1d345c7311fab3b5b451b52a1b
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 4,933
|
py
|
switch.py
|
"""Creates HomeWizard Energy switch entities."""
from __future__ import annotations
from collections.abc import Awaitable, Callable
from dataclasses import dataclass
from typing import Any
from homewizard_energy import HomeWizardEnergy
from homeassistant.components.switch import (
SwitchDeviceClass,
SwitchEntity,
SwitchEntityDescription,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import EntityCategory
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .const import DOMAIN, DeviceResponseEntry
from .coordinator import HWEnergyDeviceUpdateCoordinator
from .entity import HomeWizardEntity
from .helpers import homewizard_exception_handler
@dataclass
class HomeWizardEntityDescriptionMixin:
"""Mixin values for HomeWizard entities."""
create_fn: Callable[[HWEnergyDeviceUpdateCoordinator], bool]
available_fn: Callable[[DeviceResponseEntry], bool]
is_on_fn: Callable[[DeviceResponseEntry], bool | None]
set_fn: Callable[[HomeWizardEnergy, bool], Awaitable[Any]]
@dataclass
class HomeWizardSwitchEntityDescription(
SwitchEntityDescription, HomeWizardEntityDescriptionMixin
):
"""Class describing HomeWizard switch entities."""
icon_off: str | None = None
SWITCHES = [
HomeWizardSwitchEntityDescription(
key="power_on",
name=None,
device_class=SwitchDeviceClass.OUTLET,
create_fn=lambda coordinator: coordinator.supports_state(),
available_fn=lambda data: data.state is not None and not data.state.switch_lock,
is_on_fn=lambda data: data.state.power_on if data.state else None,
set_fn=lambda api, active: api.state_set(power_on=active),
),
HomeWizardSwitchEntityDescription(
key="switch_lock",
translation_key="switch_lock",
entity_category=EntityCategory.CONFIG,
icon="mdi:lock",
icon_off="mdi:lock-open",
create_fn=lambda coordinator: coordinator.supports_state(),
available_fn=lambda data: data.state is not None,
is_on_fn=lambda data: data.state.switch_lock if data.state else None,
set_fn=lambda api, active: api.state_set(switch_lock=active),
),
HomeWizardSwitchEntityDescription(
key="cloud_connection",
translation_key="cloud_connection",
entity_category=EntityCategory.CONFIG,
icon="mdi:cloud",
icon_off="mdi:cloud-off-outline",
create_fn=lambda coordinator: coordinator.supports_system(),
available_fn=lambda data: data.system is not None,
is_on_fn=lambda data: data.system.cloud_enabled if data.system else None,
set_fn=lambda api, active: api.system_set(cloud_enabled=active),
),
]
async def async_setup_entry(
hass: HomeAssistant,
entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up switches."""
coordinator: HWEnergyDeviceUpdateCoordinator = hass.data[DOMAIN][entry.entry_id]
async_add_entities(
HomeWizardSwitchEntity(
coordinator=coordinator,
description=description,
entry=entry,
)
for description in SWITCHES
if description.create_fn(coordinator)
)
class HomeWizardSwitchEntity(HomeWizardEntity, SwitchEntity):
"""Representation of a HomeWizard switch."""
entity_description: HomeWizardSwitchEntityDescription
def __init__(
self,
coordinator: HWEnergyDeviceUpdateCoordinator,
description: HomeWizardSwitchEntityDescription,
entry: ConfigEntry,
) -> None:
"""Initialize the switch."""
super().__init__(coordinator)
self.entity_description = description
self._attr_unique_id = f"{entry.unique_id}_{description.key}"
@property
def icon(self) -> str | None:
"""Return the icon."""
if self.entity_description.icon_off and self.is_on is False:
return self.entity_description.icon_off
return super().icon
@property
def available(self) -> bool:
"""Return if entity is available."""
return super().available and self.entity_description.available_fn(
self.coordinator.data
)
@property
def is_on(self) -> bool | None:
"""Return state of the switch."""
return self.entity_description.is_on_fn(self.coordinator.data)
@homewizard_exception_handler
async def async_turn_on(self, **kwargs: Any) -> None:
"""Turn the switch on."""
await self.entity_description.set_fn(self.coordinator.api, True)
await self.coordinator.async_refresh()
@homewizard_exception_handler
async def async_turn_off(self, **kwargs: Any) -> None:
"""Turn the switch off."""
await self.entity_description.set_fn(self.coordinator.api, False)
await self.coordinator.async_refresh()
|
8551c6476a4ce282b58243c121d5fe92eab72803
|
99d79ada2d3b7746573f071823ec61f5f853d7a3
|
/tools/xdl/xdlrcviz.py
|
a6a3aa81ab0024059f063b13d3f691ec3bdd0b9b
|
[
"MIT"
] |
permissive
|
phanrahan/magma
|
d8062c6163e2c2c2cedef82317dc8cc40038220a
|
b05fe5303ed17e668c6ec2ec3558cd5a52eff787
|
refs/heads/master
| 2023-08-23T18:08:22.494869
| 2023-08-08T18:53:05
| 2023-08-17T16:16:44
| 84,332,281
| 227
| 21
|
NOASSERTION
| 2023-09-14T21:32:19
| 2017-03-08T14:57:09
|
Python
|
UTF-8
|
Python
| false
| false
| 3,040
|
py
|
xdlrcviz.py
|
import sexpr
import sys
import os
from pprint import pprint
from subprocess import Popen, PIPE
fname = sys.argv[1]
name = os.path.basename(fname).split('.')[0]
file = open(fname)
source = ""
for line in file.readlines():
if line[0] != "#":
source += line
sexpr.input(source)
s = sexpr.parse()
while len(s) == 1:
s = s[0]
table = {}
for x in s:
table[x[0]] = x[1:]
class Element():
def __init__(self,name):
self.name = name
self.cfg = []
self.inputs = []
self.outputs = []
def canelide(self):
if len(self.cfg) == 0:
if len(self.inputs) == 0 and len(self.outputs) == 1:
return self.outputs[0] == self.name
elif len(self.inputs) == 1 and len(self.outputs) == 0:
return self.inputs[0] == self.name
return False
class Primitive():
def __init__(self,sexpr):
self.name = sexpr[1]
#pprint(sexpr)
input,output = Element("input"),Element("output")
self.elements = [ input, output ]
self.connections = {} # (e0,outputpin,e1,inputpin) => true
for i in sexpr[4:]:
if i[0] == "pin":
if i[3] == "input":
input.outputs.append(i[2])
self.connections[ ("input",i[2],i[1],i[2]) ] = True
else:
output.inputs.append(i[2])
self.connections[ (i[1],i[2],"output",i[2]) ] = True
elif i[0] == "element":
e = Element(i[1])
self.elements.append(e)
for ii in i[2:]:
if isinstance(ii,list):
if ii[0] == "pin":
getattr(e,ii[2]+"s").append(ii[1])
elif ii[0] == "conn":
if ii[3] == "==>":
self.connections[ (ii[1],ii[2],ii[4],ii[5]) ] = True
else:
self.connections[ (ii[4],ii[5],ii[1],ii[2]) ] = True
elif ii[0] == "cfg":
e.cfg = ii[1:]
def save(self):
print("Saving %s" % self.name)
p = Popen(["dot","-Tpdf","-o","%s_%s.pdf" % (self.name,name)], stdin=PIPE)
f = p.stdin
def write(s):
f.write(s)
if self.name == "PCIE_3_0":
sys.stdout.write(s)
write("digraph G {\n")
write(" graph [rankdir = LR];\n")
write(" node[shape=record];\n")
for e in self.elements:
def namefmt(xs):
return "|".join([ "<%s>%s" % (x,x) for x in xs])
def quote(x):
return """ \\"%s\\" """ % x.replace("<","\\<").replace(">","\\>").replace("|","\\|")
cfgstring = '\\n'.join([quote(x) for x in e.cfg])
if e.canelide():
write(""" %s[label="<%s>%s"];\n""" % (e.name,e.name,e.name))
else:
write(""" %s[label="{ {%s} | %s\\n%s | {%s} }"];\n""" % (e.name,namefmt(e.inputs),e.name,cfgstring,namefmt(e.outputs)))
for t in self.connections.keys():
write(" %s:%s -> %s:%s;\n" % t)
write("}")
f.close()
if p.wait() != 0:
raise
for i in table["primitive_defs"]:
if i[0] == "primitive_def":
p = Primitive(i)
try:
p.save()
except:
print("Failed to save %s" % p.name)
|
2704e39dfe32c827d43d241ee1f47f5974da0ac3
|
5e66707ccdea0c000e6e269fce6907ee3cfcdbde
|
/galaxy/main/migrations/0137_collectionimport_imported_version.py
|
3dba10a073887d2bc47905f17d55fc8f0789d024
|
[
"Apache-2.0"
] |
permissive
|
ansible/galaxy
|
f629046d579d7cd4e484cdf1e27ad68fe7b170a2
|
6a374cacdf0f04de94486913bba5285e24e178d3
|
refs/heads/devel
| 2023-09-04T09:21:43.542346
| 2023-08-25T16:58:09
| 2023-08-25T16:58:09
| 24,333,272
| 972
| 419
|
Apache-2.0
| 2023-08-25T17:38:20
| 2014-09-22T15:04:57
|
Python
|
UTF-8
|
Python
| false
| false
| 530
|
py
|
0137_collectionimport_imported_version.py
|
from django.db import migrations
from django.db import models
class Migration(migrations.Migration):
dependencies = [
('main', '0136_collection_survey_preferences'),
]
operations = [
migrations.AddField(
model_name='collectionimport',
name='imported_version',
field=models.ForeignKey(
to='main.CollectionVersion',
null=True,
on_delete=models.SET_NULL,
related_name='import_tasks'),
),
]
|
90b9aeec0c30cc1a64ec386c52101621bcc36284
|
ff9a29eae7234c4477a048c1bf97611ce05e67ba
|
/graphene_django_extras/fields.py
|
f5eed8ff24f797eadd22abb21f6e7ae96a7a762b
|
[
"MIT"
] |
permissive
|
eamigo86/graphene-django-extras
|
c315a56e238da8edbdcd1a0dd603207ac27705a9
|
209ae496084562f39749c646af2c4fb094f1ebd8
|
refs/heads/master
| 2023-08-22T06:36:56.478466
| 2023-05-08T06:43:08
| 2023-05-08T06:43:08
| 104,230,962
| 431
| 129
|
MIT
| 2023-06-21T17:21:52
| 2017-09-20T15:06:04
|
Python
|
UTF-8
|
Python
| false
| false
| 10,581
|
py
|
fields.py
|
# -*- coding: utf-8 -*-
import operator
from functools import partial
from graphene import ID, Argument, Field, List
from graphene.types.structures import NonNull, Structure
from graphene_django.fields import DjangoListField as DLF
from graphene_django.filter.utils import get_filtering_args_from_filterset
from graphene_django.utils import DJANGO_FILTER_INSTALLED, is_valid_django_model, maybe_queryset
from graphene_django_extras.filters.filter import get_filterset_class
from graphene_django_extras.settings import graphql_api_settings
from .base_types import DjangoListObjectBase
from .paginations.pagination import BaseDjangoGraphqlPagination
from .utils import find_field, get_extra_filters, get_related_fields, queryset_factory
# *********************************************** #
# *********** FIELD FOR SINGLE OBJECT *********** #
# *********************************************** #
class DjangoObjectField(Field):
def __init__(self, _type, *args, **kwargs):
kwargs["id"] = ID(required=True, description="Django object unique identification field")
super(DjangoObjectField, self).__init__(_type, *args, **kwargs)
@property
def model(self):
return self.type._meta.node._meta.model
@staticmethod
def object_resolver(manager, root, info, **kwargs):
id = kwargs.pop("id", None)
try:
return manager.get_queryset().get(pk=id)
except manager.model.DoesNotExist:
return None
def wrap_resolve(self, parent_resolver):
return partial(self.object_resolver, self.type._meta.model._default_manager)
# *********************************************** #
# *************** FIELDS FOR LIST *************** #
# *********************************************** #
class DjangoListField(DLF):
def __init__(self, _type, *args, **kwargs):
if isinstance(_type, NonNull):
_type = _type.of_type
super(DLF, self).__init__(List(NonNull(_type)), *args, **kwargs)
class DjangoFilterListField(Field):
def __init__(
self,
_type,
fields=None,
extra_filter_meta=None,
filterset_class=None,
*args,
**kwargs,
):
if DJANGO_FILTER_INSTALLED:
_fields = _type._meta.filter_fields
_model = _type._meta.model
self.fields = fields or _fields
meta = dict(model=_model, fields=self.fields)
if extra_filter_meta:
meta.update(extra_filter_meta)
filterset_class = filterset_class or _type._meta.filterset_class
self.filterset_class = get_filterset_class(filterset_class, **meta)
self.filtering_args = get_filtering_args_from_filterset(self.filterset_class, _type)
kwargs.setdefault("args", {})
kwargs["args"].update(self.filtering_args)
if "id" not in kwargs["args"].keys():
self.filtering_args.update(
{"id": Argument(ID, description="Django object unique identification field")}
)
kwargs["args"].update(
{"id": Argument(ID, description="Django object unique identification field")}
)
if not kwargs.get("description", None):
kwargs["description"] = "{} list".format(_type._meta.model.__name__)
super(DjangoFilterListField, self).__init__(List(_type), *args, **kwargs)
@property
def model(self):
return self.type.of_type._meta.node._meta.model
@staticmethod
def list_resolver(manager, filterset_class, filtering_args, root, info, **kwargs):
qs = None
field = None
if root and is_valid_django_model(root._meta.model):
available_related_fields = get_related_fields(root._meta.model)
field = find_field(info.field_nodes[0], available_related_fields)
filter_kwargs = {k: v for k, v in kwargs.items() if k in filtering_args}
if field is not None:
try:
if filter_kwargs:
qs = operator.attrgetter(
"{}.filter".format(getattr(field, "related_name", None) or field.name)
)(root)(**filter_kwargs)
else:
qs = operator.attrgetter(
"{}.all".format(getattr(field, "related_name", None) or field.name)
)(root)()
except AttributeError:
qs = None
if qs is None:
qs = queryset_factory(manager, root, info, **kwargs)
qs = filterset_class(data=filter_kwargs, queryset=qs, request=info.context).qs
if root and is_valid_django_model(root._meta.model):
extra_filters = get_extra_filters(root, manager.model)
qs = qs.filter(**extra_filters)
return maybe_queryset(qs)
def wrap_resolve(self, parent_resolver):
current_type = self.type
while isinstance(current_type, Structure):
current_type = current_type.of_type
return partial(
self.list_resolver,
current_type._meta.model._default_manager,
self.filterset_class,
self.filtering_args,
)
class DjangoFilterPaginateListField(Field):
def __init__(
self,
_type,
pagination=None,
fields=None,
extra_filter_meta=None,
filterset_class=None,
*args,
**kwargs,
):
_fields = _type._meta.filter_fields
_model = _type._meta.model
self.fields = fields or _fields
meta = dict(model=_model, fields=self.fields)
if extra_filter_meta:
meta.update(extra_filter_meta)
filterset_class = filterset_class or _type._meta.filterset_class
self.filterset_class = get_filterset_class(filterset_class, **meta)
self.filtering_args = get_filtering_args_from_filterset(self.filterset_class, _type)
kwargs.setdefault("args", {})
kwargs["args"].update(self.filtering_args)
if "id" not in kwargs["args"].keys():
self.filtering_args.update(
{"id": Argument(ID, description="Django object unique identification field")}
)
kwargs["args"].update(
{"id": Argument(ID, description="Django object unique identification field")}
)
pagination = pagination or graphql_api_settings.DEFAULT_PAGINATION_CLASS()
if pagination is not None:
assert isinstance(pagination, BaseDjangoGraphqlPagination), (
'You need to pass a valid DjangoGraphqlPagination in DjangoFilterPaginateListField, received "{}".'
).format(pagination)
pagination_kwargs = pagination.to_graphql_fields()
self.pagination = pagination
kwargs.update(**pagination_kwargs)
if not kwargs.get("description", None):
kwargs["description"] = "{} list".format(_type._meta.model.__name__)
super(DjangoFilterPaginateListField, self).__init__(List(NonNull(_type)), *args, **kwargs)
@property
def model(self):
return self.type.of_type._meta.node._meta.model
def get_queryset(self, manager, root, info, **kwargs):
return queryset_factory(manager, root, info, **kwargs)
def list_resolver(self, manager, filterset_class, filtering_args, root, info, **kwargs):
filter_kwargs = {k: v for k, v in kwargs.items() if k in filtering_args}
qs = self.get_queryset(manager, root, info, **kwargs)
qs = filterset_class(data=filter_kwargs, queryset=qs, request=info.context).qs
if root and is_valid_django_model(root._meta.model):
extra_filters = get_extra_filters(root, manager.model)
qs = qs.filter(**extra_filters)
if getattr(self, "pagination", None):
qs = self.pagination.paginate_queryset(qs, **kwargs)
return maybe_queryset(qs)
def wrap_resolve(self, parent_resolver):
current_type = self.type
while isinstance(current_type, Structure):
current_type = current_type.of_type
return partial(
self.list_resolver,
current_type._meta.model._default_manager,
self.filterset_class,
self.filtering_args,
)
class DjangoListObjectField(Field):
def __init__(
self,
_type,
fields=None,
extra_filter_meta=None,
filterset_class=None,
*args,
**kwargs,
):
if DJANGO_FILTER_INSTALLED:
_fields = _type._meta.filter_fields
_model = _type._meta.model
self.fields = fields or _fields
meta = dict(model=_model, fields=self.fields)
if extra_filter_meta:
meta.update(extra_filter_meta)
filterset_class = filterset_class or _type._meta.filterset_class
self.filterset_class = get_filterset_class(filterset_class, **meta)
self.filtering_args = get_filtering_args_from_filterset(self.filterset_class, _type)
kwargs.setdefault("args", {})
kwargs["args"].update(self.filtering_args)
if "id" not in kwargs["args"].keys():
id_description = "Django object unique identification field"
self.filtering_args.update({"id": Argument(ID, description=id_description)})
kwargs["args"].update({"id": Argument(ID, description=id_description)})
if not kwargs.get("description", None):
kwargs["description"] = "{} list".format(_type._meta.model.__name__)
super(DjangoListObjectField, self).__init__(_type, *args, **kwargs)
@property
def model(self):
return self.type._meta.model
def list_resolver(self, manager, filterset_class, filtering_args, root, info, **kwargs):
qs = queryset_factory(manager, root, info, **kwargs)
filter_kwargs = {k: v for k, v in kwargs.items() if k in filtering_args}
qs = filterset_class(data=filter_kwargs, queryset=qs, request=info.context).qs
count = qs.count()
return DjangoListObjectBase(
count=count,
results=maybe_queryset(qs),
results_field_name=self.type._meta.results_field_name,
)
def wrap_resolve(self, parent_resolver):
return partial(
self.list_resolver,
self.type._meta.model._default_manager,
self.filterset_class,
self.filtering_args,
)
|
37aea4b6409c20cb2709e4d24071c389e2fd33d2
|
a8194cf6ffd12f7551eaba53572744080a0bfef3
|
/scripts/perf/nvmf/common.py
|
d641a2c2e68c6de9ac141eb4d1de0e5ce83d51d8
|
[
"Intel",
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
spdk/spdk
|
51294f67104b8c3d18f19147d63a212e9486c687
|
d62a3810364cb87be352c66acf7c7f968508ca17
|
refs/heads/master
| 2023-08-08T16:07:41.263000
| 2023-08-02T09:06:56
| 2023-08-08T07:01:20
| 39,042,157
| 2,708
| 1,158
|
NOASSERTION
| 2023-08-11T09:50:50
| 2015-07-13T23:15:15
|
C
|
UTF-8
|
Python
| false
| false
| 11,268
|
py
|
common.py
|
# SPDX-License-Identifier: BSD-3-Clause
# Copyright (C) 2018 Intel Corporation.
# All rights reserved.
import os
import re
import json
import logging
from subprocess import check_output
from collections import OrderedDict
from json.decoder import JSONDecodeError
def read_json_stats(file):
with open(file, "r") as json_data:
data = json.load(json_data)
job_pos = 0 # job_post = 0 because using aggregated results
# Check if latency is in nano or microseconds to choose correct dict key
def get_lat_unit(key_prefix, dict_section):
# key prefix - lat, clat or slat.
# dict section - portion of json containing latency bucket in question
# Return dict key to access the bucket and unit as string
for k, _ in dict_section.items():
if k.startswith(key_prefix):
return k, k.split("_")[1]
def get_clat_percentiles(clat_dict_leaf):
if "percentile" in clat_dict_leaf:
p99_lat = float(clat_dict_leaf["percentile"]["99.000000"])
p99_9_lat = float(clat_dict_leaf["percentile"]["99.900000"])
p99_99_lat = float(clat_dict_leaf["percentile"]["99.990000"])
p99_999_lat = float(clat_dict_leaf["percentile"]["99.999000"])
return [p99_lat, p99_9_lat, p99_99_lat, p99_999_lat]
else:
# Latest fio versions do not provide "percentile" results if no
# measurements were done, so just return zeroes
return [0, 0, 0, 0]
read_iops = float(data["jobs"][job_pos]["read"]["iops"])
read_bw = float(data["jobs"][job_pos]["read"]["bw"])
lat_key, lat_unit = get_lat_unit("lat", data["jobs"][job_pos]["read"])
read_avg_lat = float(data["jobs"][job_pos]["read"][lat_key]["mean"])
read_min_lat = float(data["jobs"][job_pos]["read"][lat_key]["min"])
read_max_lat = float(data["jobs"][job_pos]["read"][lat_key]["max"])
clat_key, clat_unit = get_lat_unit("clat", data["jobs"][job_pos]["read"])
read_p99_lat, read_p99_9_lat, read_p99_99_lat, read_p99_999_lat = get_clat_percentiles(
data["jobs"][job_pos]["read"][clat_key])
if "ns" in lat_unit:
read_avg_lat, read_min_lat, read_max_lat = [x / 1000 for x in [read_avg_lat, read_min_lat, read_max_lat]]
if "ns" in clat_unit:
read_p99_lat = read_p99_lat / 1000
read_p99_9_lat = read_p99_9_lat / 1000
read_p99_99_lat = read_p99_99_lat / 1000
read_p99_999_lat = read_p99_999_lat / 1000
write_iops = float(data["jobs"][job_pos]["write"]["iops"])
write_bw = float(data["jobs"][job_pos]["write"]["bw"])
lat_key, lat_unit = get_lat_unit("lat", data["jobs"][job_pos]["write"])
write_avg_lat = float(data["jobs"][job_pos]["write"][lat_key]["mean"])
write_min_lat = float(data["jobs"][job_pos]["write"][lat_key]["min"])
write_max_lat = float(data["jobs"][job_pos]["write"][lat_key]["max"])
clat_key, clat_unit = get_lat_unit("clat", data["jobs"][job_pos]["write"])
write_p99_lat, write_p99_9_lat, write_p99_99_lat, write_p99_999_lat = get_clat_percentiles(
data["jobs"][job_pos]["write"][clat_key])
if "ns" in lat_unit:
write_avg_lat, write_min_lat, write_max_lat = [x / 1000 for x in [write_avg_lat, write_min_lat, write_max_lat]]
if "ns" in clat_unit:
write_p99_lat = write_p99_lat / 1000
write_p99_9_lat = write_p99_9_lat / 1000
write_p99_99_lat = write_p99_99_lat / 1000
write_p99_999_lat = write_p99_999_lat / 1000
return [read_iops, read_bw, read_avg_lat, read_min_lat, read_max_lat,
read_p99_lat, read_p99_9_lat, read_p99_99_lat, read_p99_999_lat,
write_iops, write_bw, write_avg_lat, write_min_lat, write_max_lat,
write_p99_lat, write_p99_9_lat, write_p99_99_lat, write_p99_999_lat]
def read_target_stats(measurement_name, results_file_list, results_dir):
# Read additional metrics measurements done on target side and
# calculate the average from across all workload iterations.
# Currently only works for SAR CPU utilization and power draw measurements.
# Other (bwm-ng, pcm, dpdk memory) need to be refactored and provide more
# structured result files instead of a output dump.
total_util = 0
for result_file in results_file_list:
with open(os.path.join(results_dir, result_file), "r") as result_file_fh:
total_util += float(result_file_fh.read())
avg_util = total_util / len(results_file_list)
return {measurement_name: "{0:.3f}".format(avg_util)}
def parse_results(results_dir, csv_file):
files = os.listdir(results_dir)
fio_files = filter(lambda x: ".fio" in x, files)
json_files = [x for x in files if ".json" in x]
sar_files = [x for x in files if "sar" in x and "util" in x]
pm_files = [x for x in files if "pm" in x and "avg" in x]
headers = ["read_iops", "read_bw", "read_avg_lat_us", "read_min_lat_us", "read_max_lat_us",
"read_p99_lat_us", "read_p99.9_lat_us", "read_p99.99_lat_us", "read_p99.999_lat_us",
"write_iops", "write_bw", "write_avg_lat_us", "write_min_lat_us", "write_max_lat_us",
"write_p99_lat_us", "write_p99.9_lat_us", "write_p99.99_lat_us", "write_p99.999_lat_us"]
header_line = ",".join(["Name", *headers])
rows = set()
for fio_config in fio_files:
logging.info("Getting FIO stats for %s" % fio_config)
job_name, _ = os.path.splitext(fio_config)
aggr_headers = ["iops", "bw", "avg_lat_us", "min_lat_us", "max_lat_us",
"p99_lat_us", "p99.9_lat_us", "p99.99_lat_us", "p99.999_lat_us"]
# Look in the filename for rwmixread value. Function arguments do
# not have that information.
# TODO: Improve this function by directly using workload params instead
# of regexing through filenames.
if "read" in job_name:
rw_mixread = 1
elif "write" in job_name:
rw_mixread = 0
else:
rw_mixread = float(re.search(r"m_(\d+)", job_name).group(1)) / 100
# If "_CPU" exists in name - ignore it
# Initiators for the same job could have different num_cores parameter
job_name = re.sub(r"_\d+CPU", "", job_name)
job_result_files = [x for x in json_files if x.startswith(job_name)]
sar_result_files = [x for x in sar_files if x.startswith(job_name)]
# Collect all pm files for the current job
job_pm_files = [x for x in pm_files if x.startswith(job_name)]
# Filter out data from DCMI sensors and socket/dram sensors
dcmi_sensors = [x for x in job_pm_files if "DCMI" in x]
socket_dram_sensors = [x for x in job_pm_files if "DCMI" not in x and ("socket" in x or "dram" in x)]
sdr_sensors = list(set(job_pm_files) - set(dcmi_sensors) - set(socket_dram_sensors))
# Determine the final list of pm_result_files, if DCMI file is present, use it as a primary source
# of power consumption data. If not, use SDR sensors data if available. If SDR sensors are not available,
# use socket and dram sensors as a fallback.
pm_result_files = dcmi_sensors or sdr_sensors
if not pm_result_files and socket_dram_sensors:
logging.warning("No DCMI or SDR data found for %s, using socket and dram data sensors as a fallback" % job_name)
pm_result_files = socket_dram_sensors
logging.info("Matching result files for current fio config %s:" % job_name)
for j in job_result_files:
logging.info("\t %s" % j)
# There may have been more than 1 initiator used in test, need to check that
# Result files are created so that string after last "_" separator is server name
inits_names = set([os.path.splitext(x)[0].split("_")[-1] for x in job_result_files])
inits_avg_results = []
for i in inits_names:
logging.info("\tGetting stats for initiator %s" % i)
# There may have been more than 1 test run for this job, calculate average results for initiator
i_results = [x for x in job_result_files if i in x]
i_results_filename = re.sub(r"run_\d+_", "", i_results[0].replace("json", "csv"))
separate_stats = []
for r in i_results:
try:
stats = read_json_stats(os.path.join(results_dir, r))
separate_stats.append(stats)
logging.info(stats)
except JSONDecodeError:
logging.error("ERROR: Failed to parse %s results! Results might be incomplete!" % r)
init_results = [sum(x) for x in zip(*separate_stats)]
init_results = [x / len(separate_stats) for x in init_results]
inits_avg_results.append(init_results)
logging.info("\tAverage results for initiator %s" % i)
logging.info(init_results)
with open(os.path.join(results_dir, i_results_filename), "w") as fh:
fh.write(header_line + "\n")
fh.write(",".join([job_name, *["{0:.3f}".format(x) for x in init_results]]) + "\n")
# Sum results of all initiators running this FIO job.
# Latency results are an average of latencies from accros all initiators.
inits_avg_results = [sum(x) for x in zip(*inits_avg_results)]
inits_avg_results = OrderedDict(zip(headers, inits_avg_results))
for key in inits_avg_results:
if "lat" in key:
inits_avg_results[key] /= len(inits_names)
# Aggregate separate read/write values into common labels
# Take rw_mixread into consideration for mixed read/write workloads.
aggregate_results = OrderedDict()
for h in aggr_headers:
read_stat, write_stat = [float(value) for key, value in inits_avg_results.items() if h in key]
if "lat" in h:
_ = rw_mixread * read_stat + (1 - rw_mixread) * write_stat
else:
_ = read_stat + write_stat
aggregate_results[h] = "{0:.3f}".format(_)
if sar_result_files:
aggr_headers.append("target_avg_cpu_util")
aggregate_results.update(read_target_stats("target_avg_cpu_util", sar_result_files, results_dir))
if pm_result_files:
aggr_headers.append("target_avg_power")
aggregate_results.update(read_target_stats("target_avg_power", pm_result_files, results_dir))
rows.add(",".join([job_name, *aggregate_results.values()]))
# Create empty results file with just the header line
aggr_header_line = ",".join(["Name", *aggr_headers])
with open(os.path.join(results_dir, csv_file), "w") as fh:
fh.write(aggr_header_line + "\n")
# Save results to file
for row in rows:
with open(os.path.join(results_dir, csv_file), "a") as fh:
fh.write(row + "\n")
logging.info("You can find the test results in the file %s" % os.path.join(results_dir, csv_file))
|
dba0313fa8e210a52c2c0d220fbcb2172e10bc3a
|
2dfbca22d0bacf7ba2bb4d270b2d3292f5f8a43b
|
/amulet/api/history/data_types.py
|
e97ebf9dced814c3d91dacf73c5d0fbf658d842a
|
[] |
no_license
|
Amulet-Team/Amulet-Core
|
9715d888e2faf6c41f9414fd105aaa926aa501c2
|
dafef97fe4fd1f2f713ef1e3503d6b13b20c1c1f
|
refs/heads/main
| 2023-08-18T17:45:48.775423
| 2023-08-16T09:37:55
| 2023-08-16T09:37:55
| 130,729,079
| 117
| 26
| null | 2023-08-16T09:37:57
| 2018-04-23T17:00:04
|
Python
|
UTF-8
|
Python
| false
| false
| 226
|
py
|
data_types.py
|
from typing import Optional, Any
from .changeable import Changeable
EntryKeyType = Any # The key an entry is stored under
EntryType = Optional[
Changeable
] # None is reserved for if the entry was deleted/did not exist
|
555405feeb13e7d2c1945ca2006ad68fbfa8b654
|
cde096ba977b63becc1b9066677331ef4594a797
|
/csfieldguide/search/management/__init__.py
|
6e07276bbbca8dfe58168df8f7596a678cfc030a
|
[
"CC-BY-NC-SA-4.0",
"BSD-3-Clause",
"CC0-1.0",
"ISC",
"Unlicense",
"LicenseRef-scancode-secret-labs-2011",
"WTFPL",
"Apache-2.0",
"LGPL-3.0-only",
"MIT",
"CC-BY-SA-4.0",
"LicenseRef-scancode-public-domain",
"CC-BY-NC-2.5",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
uccser/cs-field-guide
|
655524b161fab0ab422679dd80720f660f2cfa98
|
ea3281ec6f4d17538f6d3cf6f88d74fa54581b34
|
refs/heads/develop
| 2023-08-28T14:33:58.789843
| 2023-08-28T08:24:03
| 2023-08-28T08:24:03
| 34,356,619
| 364
| 97
|
MIT
| 2023-09-14T08:58:55
| 2015-04-21T23:00:26
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 59
|
py
|
__init__.py
|
"""Module for the management of the search application."""
|
bf067b5fbf3910f3914697c6ce50b619327cbd6c
|
9abd182d02355ddf0b79afd4a35f7127a4a66f7a
|
/scripts/tracking/siamrpn/demo.py
|
0cd5557317de3aad4f8eec9bf509ff038469ae9e
|
[
"Apache-2.0"
] |
permissive
|
dmlc/gluon-cv
|
e1303086419a5733661d0fcb9095c09d4f2382ad
|
567775619f3b97d47e7c360748912a4fd883ff52
|
refs/heads/master
| 2023-07-19T12:02:36.824294
| 2023-01-19T00:37:33
| 2023-01-19T00:37:33
| 122,896,249
| 6,064
| 1,458
|
Apache-2.0
| 2023-01-19T00:37:35
| 2018-02-26T01:33:21
|
Python
|
UTF-8
|
Python
| false
| false
| 4,725
|
py
|
demo.py
|
"""SiamRPN Demo script.
Code adapted from https://github.com/STVIR/pysot"""
import os
import argparse
import matplotlib.pyplot as plt
import numpy as np
import mxnet as mx
from gluoncv import model_zoo, utils
from gluoncv.model_zoo.siamrpn.siamrpn_tracker import SiamRPNTracker as build_tracker
from gluoncv.model_zoo.siamrpn.siamrpn_tracker import get_axis_aligned_bbox
from gluoncv.utils.filesystem import try_import_cv2
cv2 = try_import_cv2()
def parse_args():
""" benchmark test."""
parser = argparse.ArgumentParser(description='make ovject tracking.')
parser.add_argument('--data-dir', type=str, default='',
help='if video-loader set to True, data-dir store videos frames.')
parser.add_argument('--video-loader', action='store_true', default=True,
help='if set to True, read videos directly instead of reading frames.')
parser.add_argument('--video-path',
default=
'https://raw.githubusercontent.com/dmlc/web-data/master/gluoncv/tracking/Coke.mp4',
help='if set to True, read videos directly instead of reading frames.')
parser.add_argument('--netwrok', type=str, default='siamrpn_alexnet_v2_otb15',
help='SiamRPN network name')
parser.add_argument('--gt-bbox', type=int, nargs='+', default=[298, 160, 48, 80],
help='first frame object location')
parser.add_argument('--save-dir', type=str, default='./predictions',
help='directory of saved results')
opt = parser.parse_args()
return opt
def read_data(opt):
"""
Pre-process data
--------------------
Next we need a video or video frame
if you want to test video frame, you can change opt.video_loader to False
and opt.data-dir is your video frame path.
meanwhile you need first frame object coordinates in opt.gt-bbox
gt_bbox is first frame object coordinates, and it is bbox(center_x,center_y,weight,height)
"""
video_frames = []
if opt.video_loader:
im_video = utils.download(opt.video_path)
cap = cv2.VideoCapture(im_video)
while(True):
ret, img = cap.read()
if not ret:
break
video_frames.append(img)
else:
for data in sorted(os.listdir(opt.data_dir)):
video_frames.append(cv2.imread(os.path.join(opt.data_dir, data)))
return video_frames
def inference(video_frames, tracker, opt):
"""
Predict with a SiamRPN and make inference
--------------------
this function returns a dictionaries result. which has two keys. one is bbox,
which represents the coordinates of the predicted frame,
the other is best_score, which records everyframe best_score.
Save output in current path
"""
scores = []
pred_bboxes = []
gt_bbox = list(map(int, opt.gt_bbox))
if not os.path.exists(opt.save_dir):
os.makedirs(opt.save_dir)
for ind, frame in enumerate(video_frames):
if ind == 0:
cx, cy, w, h = get_axis_aligned_bbox(np.array(gt_bbox))
gt_bbox_ = [cx-(w-1)/2, cy-(h-1)/2, w, h]
tracker.init(frame, gt_bbox_, ctx=mx.cpu())
pred_bbox = gt_bbox_
scores.append(None)
pred_bboxes.append(pred_bbox)
else:
outputs = tracker.track(frame, ctx=mx.cpu())
pred_bbox = outputs['bbox']
pred_bboxes.append(pred_bbox)
scores.append(outputs['best_score'])
pred_bbox = list(map(int, pred_bbox))
cv2.rectangle(frame, (pred_bbox[0], pred_bbox[1]),
(pred_bbox[0]+pred_bbox[2], pred_bbox[1]+pred_bbox[3]),
(0, 255, 255), 3)
cv2.imwrite(os.path.join(opt.save_dir, '%04d.jpg'%(ind+1)), frame)
if __name__ == '__main__':
opt = parse_args()
# ######################################################################
# Load a pretrained model
# -------------------------
#
# Let's get an SiamRPN model trained. We pick the one using Alexnet as the base model.
# By specifying ``pretrained=True``, it will automatically download the model from the model
# zoo if necessary. For more pretrained models, please refer to
# :doc:`../../model_zoo/index`.
net = model_zoo.get_model(opt.netwrok, ctx=mx.cpu(), pretrained=True)
tracker = build_tracker(net)
# Pre-process data
video_frames = read_data(opt)
######################################################################
plt.imshow(video_frames[0])
plt.show()
# Predict with a SiamRPN and make inference
inference(video_frames, tracker, opt)
|
952ca82a995f5f82ef732f09a9eb2d67024b2e1f
|
7fd6ef3a01bddf35a3a4c2a0f44a33dc129d88fb
|
/itables/shiny.py
|
37f55ebdfa2956cc02066646420a26919ec39a5f
|
[
"MIT"
] |
permissive
|
mwouts/itables
|
cd5848b55875ce0efd49678cf27243066f80bf06
|
c3454d81a0d035daf7f14f816eba2f1b34d54fe0
|
refs/heads/main
| 2023-09-04T02:57:08.395731
| 2023-08-18T18:29:46
| 2023-08-18T18:40:53
| 181,572,634
| 513
| 39
|
MIT
| 2023-08-18T18:40:55
| 2019-04-15T22:09:44
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 590
|
py
|
shiny.py
|
from .javascript import to_html_datatable
def DT(df, caption=None, tableId=None, **kwargs):
"""This is a version of 'to_html_datatable' that works in Shiny applications.
In these applications, jquery is already loaded, so we call 'to_html_datatable'
with an argument 'import_jquery=False'.
Cf. https://github.com/mwouts/itables/issues/181
and https://github.com/rstudio/py-shiny/issues/502
"""
return to_html_datatable(
df,
caption=caption,
tableId=tableId,
connected=True,
import_jquery=False,
**kwargs
)
|
f741ae6f73ee26442488ba72655779b0bb86e476
|
503bfe863ae9e92bf940a5e8baa57c0de44f4da6
|
/src/silx/gui/plot3d/items/clipplane.py
|
83a3c0e1e5cef85a83141bbd7be804d65cba1395
|
[
"MIT",
"LicenseRef-scancode-public-domain-disclaimer",
"CC0-1.0",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"LicenseRef-scancode-public-domain"
] |
permissive
|
silx-kit/silx
|
58105c0ed9cd02c75543c0c67a027471ca87922b
|
5e33cb69afd2a8b1cfe3183282acdd8b34c1a74f
|
refs/heads/main
| 2023-08-24T14:33:49.732794
| 2023-07-25T07:44:02
| 2023-07-25T07:44:02
| 43,291,718
| 120
| 78
|
MIT
| 2023-09-14T13:07:11
| 2015-09-28T09:23:13
|
Python
|
UTF-8
|
Python
| false
| false
| 5,000
|
py
|
clipplane.py
|
# /*##########################################################################
#
# Copyright (c) 2017-2018 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ###########################################################################*/
"""This module provides a scene clip plane class.
"""
__authors__ = ["T. Vincent"]
__license__ = "MIT"
__date__ = "15/11/2017"
import numpy
from ..scene import primitives, utils
from ._pick import PickingResult
from .core import Item3D
from .mixins import PlaneMixIn
class ClipPlane(Item3D, PlaneMixIn):
"""Represents a clipping plane that clips following items within the group.
For now only on clip plane is allowed at once in a scene.
"""
def __init__(self, parent=None):
plane = primitives.ClipPlane()
Item3D.__init__(self, parent=parent, primitive=plane)
PlaneMixIn.__init__(self, plane=plane)
def __pickPreProcessing(self, context):
"""Common processing for :meth:`_pickPostProcess` and :meth:`_pickFull`
:param PickContext context: Current picking context
:return None or (bounds, intersection points, rayObject)
"""
plane = self._getPlane()
planeParent = plane.parent
if planeParent is None:
return None
rayObject = context.getPickingSegment(frame=plane)
if rayObject is None:
return None
bounds = planeParent.bounds(dataBounds=True)
rayClip = utils.clipSegmentToBounds(rayObject[:, :3], bounds)
if rayClip is None:
return None # Ray is outside parent's bounding box
points = utils.segmentPlaneIntersect(
rayObject[0, :3],
rayObject[1, :3],
planeNorm=self.getNormal(),
planePt=self.getPoint())
# A single intersection inside bounding box
picked = (len(points) == 1 and
numpy.all(bounds[0] <= points[0]) and
numpy.all(points[0] <= bounds[1]))
return picked, points, rayObject
def _pick(self, context):
# Perform picking before modifying context
result = super(ClipPlane, self)._pick(context)
# Modify context if needed
if self.isVisible() and context.isEnabled():
info = self.__pickPreProcessing(context)
if info is not None:
picked, points, rayObject = info
plane = self._getPlane()
if picked: # A single intersection inside bounding box
# Clip NDC z range for following brother items
ndcIntersect = plane.objectToNDCTransform.transformPoint(
points[0], perspectiveDivide=True)
ndcNormal = plane.objectToNDCTransform.transformNormal(
self.getNormal())
if ndcNormal[2] < 0:
context.setNDCZRange(-1., ndcIntersect[2])
else:
context.setNDCZRange(ndcIntersect[2], 1.)
else:
# TODO check this might not be correct
rayObject[:, 3] = 1. # Make sure 4h coordinate is one
if numpy.sum(rayObject[0] * self.getParameters()) < 0.:
# Disable picking for remaining brothers
context.setEnabled(False)
return result
def _pickFastCheck(self, context):
return True
def _pickFull(self, context):
"""Perform picking in this item at given widget position.
:param PickContext context: Current picking context
:return: Object holding the results or None
:rtype: Union[None,PickingResult]
"""
info = self.__pickPreProcessing(context)
if info is not None:
picked, points, _ = info
if picked:
return PickingResult(self, positions=[points[0]])
return None
|
739bfc98b096ff90e16be8a16761002e8ef50f6f
|
a9d9df92f8a61fa3f3649b408e8457b8fdb0b85a
|
/tests/test_favorites.py
|
373517b4a78c0c63ceccaa1b9452545337916338
|
[
"Apache-2.0"
] |
permissive
|
airbnb/knowledge-repo
|
ae7e122e10e505c568511999cf55352eb74646e1
|
71fd3fd42db7866257f86f37235ca0b5d81f5378
|
refs/heads/master
| 2023-07-23T08:06:15.180434
| 2023-04-17T04:04:39
| 2023-04-17T04:04:39
| 65,949,398
| 5,668
| 846
|
Apache-2.0
| 2023-07-20T11:16:46
| 2016-08-17T23:32:50
|
Python
|
UTF-8
|
Python
| false
| false
| 3,652
|
py
|
test_favorites.py
|
import unittest
from bs4 import BeautifulSoup
from knowledge_repo import KnowledgeRepository
from knowledge_repo.app.models import User, Vote, Post
from knowledge_repo.app.proxies import db_session
class FavoriteTest(unittest.TestCase):
def setUp(self):
self.repo = KnowledgeRepository.for_uri('tests/test_repo', auto_create=True)
self.repo_app = self.repo.get_app(config='tests/config_server.py')
self.app = self.repo_app.test_client()
self.headers = {}
self.knowledge_username = 'favorite_test_user'
username_request_header = self.repo_app.config.get("AUTH_USER_IDENTIFIER_REQUEST_HEADER")
self.headers = {
username_request_header: self.knowledge_username
}
def test_like_and_unlike_a_post(self):
"""
Like and then unlike a post
"""
with self.repo_app.app_context():
post = (db_session.query(Post)
.filter(Post.is_published)
.first())
old_likes = (db_session.query(Vote)
.filter(Vote.object_id == post.id)
.filter(Vote.object_type == 'post')
.all())
rv = self.app.get("/like?post_id=" + str(post.id), headers=self.headers)
assert rv.status == '200 OK', post.path + rv.status
new_likes = (db_session.query(Vote)
.filter(Vote.object_id == post.id)
.filter(Vote.object_type == 'post')
.all())
assert len(old_likes) + 1 == len(new_likes)
# assert that if you re-like the page, the number of likes doesn't
# change
rv = self.app.get("/like?post_id=" + str(post.id), headers=self.headers)
assert rv.status == '200 OK'
like_again = (db_session.query(Vote)
.filter(Vote.object_id == post.id)
.filter(Vote.object_type == 'post')
.all())
assert len(like_again) == len(new_likes)
"""
Let's unlike it again
"""
old_likes = (db_session.query(Vote)
.filter(Vote.object_id == post.id)
.filter(Vote.object_type == 'post')
.all())
rv = self.app.get("/unlike?post_id=" + str(post.id), headers=self.headers)
assert rv.status == '200 OK'
new_likes = (db_session.query(Vote)
.filter(Vote.object_id == post.id)
.filter(Vote.object_type == 'post')
.all())
assert len(new_likes) == len(old_likes) - 1
def test_favorites(self):
"""
test the favorites route, and make sure the number of posts = number of votes with that user_id
"""
with self.repo_app.app_context():
rv = self.app.get("/favorites", headers=self.headers)
assert rv.status == "200 OK"
data = rv.data.decode('utf-8')
soup = BeautifulSoup(data, 'html.parser')
all_posts = soup.findAll(
'div', {'class': 'row row-space-4 panel feed-post'})
user = (db_session.query(User)
.filter(User.identifier == self.knowledge_username)
.first())
votes = (db_session.query(Vote)
.filter(Vote.user_id == user.id)
.all())
assert len(votes) == len(all_posts)
if __name__ == '__main__':
unittest.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.